aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
-rw-r--r--.gitignore1
-rw-r--r--GNUmakefile2
-rw-r--r--MAINTAINERS286
-rw-r--r--app/Makefile2
-rw-r--r--app/meson.build56
-rw-r--r--app/pdump/main.c146
-rw-r--r--app/pdump/meson.build6
-rw-r--r--app/proc-info/Makefile (renamed from app/proc_info/Makefile)0
-rw-r--r--app/proc-info/main.c (renamed from app/proc_info/main.c)14
-rw-r--r--app/proc-info/meson.build6
-rw-r--r--app/test-bbdev/Makefile2
-rw-r--r--app/test-bbdev/meson.build9
-rwxr-xr-xapp/test-bbdev/test-bbdev.py4
-rw-r--r--app/test-bbdev/test_bbdev.c29
-rw-r--r--app/test-bbdev/test_bbdev_perf.c407
-rw-r--r--app/test-bbdev/test_bbdev_vector.c8
-rw-r--r--app/test-bbdev/test_vectors/bbdev_null.data (renamed from app/test-bbdev/test_vectors/bbdev_vector_null.data)0
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data (renamed from app/test-bbdev/test_vectors/bbdev_vector_td_default.data)7
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data643
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data643
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data645
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data645
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data1224
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data1225
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data676
-rw-r--r--app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data677
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data36
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data36
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data36
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data (renamed from app/test-bbdev/test_vectors/bbdev_vector_te_default.data)0
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data63
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data156
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data159
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data180
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data300
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data153
-rw-r--r--app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data252
l---------app/test-bbdev/turbo_dec_default.data1
l---------app/test-bbdev/turbo_enc_default.data1
-rw-r--r--app/test-crypto-perf/Makefile2
-rw-r--r--app/test-crypto-perf/cperf_ops.c3
-rw-r--r--app/test-crypto-perf/cperf_options.h5
-rw-r--r--app/test-crypto-perf/cperf_test_common.c41
-rw-r--r--app/test-crypto-perf/cperf_test_pmd_cyclecount.c2
-rw-r--r--app/test-crypto-perf/cperf_test_vector_parsing.c7
-rw-r--r--app/test-crypto-perf/main.c70
-rw-r--r--app/test-crypto-perf/meson.build15
-rw-r--r--app/test-eventdev/evt_main.c42
-rw-r--r--app/test-eventdev/evt_options.c132
-rw-r--r--app/test-eventdev/evt_options.h35
-rw-r--r--app/test-eventdev/evt_test.h3
-rw-r--r--app/test-eventdev/meson.build18
-rw-r--r--app/test-eventdev/test_order_atq.c12
-rw-r--r--app/test-eventdev/test_order_queue.c12
-rw-r--r--app/test-eventdev/test_perf_atq.c12
-rw-r--r--app/test-eventdev/test_perf_common.c276
-rw-r--r--app/test-eventdev/test_perf_common.h14
-rw-r--r--app/test-eventdev/test_perf_queue.c9
-rw-r--r--app/test-eventdev/test_pipeline_atq.c4
-rw-r--r--app/test-eventdev/test_pipeline_common.c33
-rw-r--r--app/test-eventdev/test_pipeline_queue.c6
-rw-r--r--app/test-pmd/Makefile10
-rw-r--r--app/test-pmd/bpf_cmd.c198
-rw-r--r--app/test-pmd/bpf_cmd.h16
-rw-r--r--app/test-pmd/cmdline.c1783
-rw-r--r--app/test-pmd/cmdline_flow.c1388
-rw-r--r--app/test-pmd/cmdline_tm.c185
-rw-r--r--app/test-pmd/cmdline_tm.h2
-rw-r--r--app/test-pmd/config.c480
-rw-r--r--app/test-pmd/csumonly.c108
-rw-r--r--app/test-pmd/macfwd.c3
-rw-r--r--app/test-pmd/macswap.c3
-rw-r--r--app/test-pmd/meson.build33
-rw-r--r--app/test-pmd/parameters.c86
-rw-r--r--app/test-pmd/softnicfwd.c (renamed from app/test-pmd/tm.c)408
-rw-r--r--app/test-pmd/testpmd.c469
-rw-r--r--app/test-pmd/testpmd.h141
-rw-r--r--buildtools/Makefile33
-rwxr-xr-xbuildtools/auto-config-h.sh32
-rw-r--r--buildtools/pmdinfogen/Makefile35
-rw-r--r--config/arm/arm64_dpaa2_linuxapp_gcc15
-rw-r--r--config/arm/arm64_dpaa_linuxapp_gcc15
-rw-r--r--config/arm/arm64_thunderx_linuxapp_gcc1
-rw-r--r--config/arm/meson.build24
-rw-r--r--config/common_armv8a_linuxapp58
-rw-r--r--config/common_base171
-rw-r--r--config/common_linuxapp21
-rw-r--r--config/defconfig_arm-armv7a-linuxapp-gcc31
-rw-r--r--config/defconfig_arm64-dpaa-linuxapp-gcc9
-rw-r--r--config/defconfig_arm64-dpaa2-linuxapp-gcc22
-rw-r--r--config/defconfig_arm64-stingray-linuxapp-gcc16
-rw-r--r--config/defconfig_i686-native-linuxapp-gcc3
-rw-r--r--config/defconfig_i686-native-linuxapp-icc8
-rw-r--r--config/defconfig_x86_x32-native-linuxapp-gcc3
-rw-r--r--config/meson.build21
-rw-r--r--config/rte_config.h33
-rwxr-xr-xdevtools/check-dup-includes.sh23
-rwxr-xr-xdevtools/check-git-log.sh38
-rwxr-xr-xdevtools/check-includes.sh32
-rwxr-xr-xdevtools/check-maintainers.sh30
-rwxr-xr-xdevtools/check-symbol-change.sh159
-rwxr-xr-xdevtools/check-symbol-maps.sh30
-rwxr-xr-xdevtools/checkpatches.sh166
-rw-r--r--devtools/cocci/strlcpy.cocci8
-rwxr-xr-xdevtools/get-maintainer.sh5
-rwxr-xr-xdevtools/git-log-fixes.sh30
-rwxr-xr-xdevtools/test-build.sh51
-rwxr-xr-xdevtools/test-meson-builds.sh60
-rwxr-xr-xdevtools/test-null.sh30
-rw-r--r--doc/api/doxy-api-index.md46
-rw-r--r--doc/api/doxy-api.conf37
-rwxr-xr-xdoc/api/doxy-html-custom.sh30
-rw-r--r--doc/guides/bbdevs/null.rst8
-rw-r--r--doc/guides/bbdevs/turbo_sw.rst70
-rw-r--r--doc/guides/compressdevs/features/default.ini26
-rw-r--r--doc/guides/compressdevs/features/isal.ini16
-rw-r--r--doc/guides/compressdevs/features/octeontx.ini10
-rw-r--r--doc/guides/compressdevs/features/qat.ini15
-rw-r--r--doc/guides/compressdevs/features/zlib.ini10
-rw-r--r--doc/guides/compressdevs/index.rst16
-rw-r--r--doc/guides/compressdevs/isal.rst123
-rw-r--r--doc/guides/compressdevs/octeontx.rst105
-rw-r--r--doc/guides/compressdevs/overview.rst32
-rw-r--r--doc/guides/compressdevs/qat_comp.rst47
-rw-r--r--doc/guides/compressdevs/zlib.rst69
-rw-r--r--doc/guides/conf.py24
-rw-r--r--doc/guides/contributing/cheatsheet.rst3
-rw-r--r--doc/guides/contributing/coding_style.rst7
-rw-r--r--doc/guides/contributing/design.rst3
-rw-r--r--doc/guides/contributing/documentation.rst3
-rw-r--r--doc/guides/contributing/index.rst3
-rw-r--r--doc/guides/contributing/patches.rst77
-rw-r--r--doc/guides/contributing/stable.rst23
-rw-r--r--doc/guides/contributing/versioning.rst57
-rw-r--r--doc/guides/cryptodevs/aesni_gcm.rst13
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst15
-rw-r--r--doc/guides/cryptodevs/ccp.rst140
-rw-r--r--doc/guides/cryptodevs/dpaa2_sec.rst38
-rw-r--r--doc/guides/cryptodevs/dpaa_sec.rst40
-rw-r--r--doc/guides/cryptodevs/features/aesni_gcm.ini3
-rw-r--r--doc/guides/cryptodevs/features/aesni_mb.ini2
-rw-r--r--doc/guides/cryptodevs/features/ccp.ini59
-rw-r--r--doc/guides/cryptodevs/features/default.ini20
-rw-r--r--doc/guides/cryptodevs/features/dpaa2_sec.ini6
-rw-r--r--doc/guides/cryptodevs/features/dpaa_sec.ini6
-rw-r--r--doc/guides/cryptodevs/features/mvsam.ini (renamed from doc/guides/cryptodevs/features/mrvl.ini)2
-rw-r--r--doc/guides/cryptodevs/features/null.ini2
-rw-r--r--doc/guides/cryptodevs/features/openssl.ini14
-rw-r--r--doc/guides/cryptodevs/features/qat.ini6
-rw-r--r--doc/guides/cryptodevs/features/virtio.ini26
-rw-r--r--doc/guides/cryptodevs/index.rst4
-rw-r--r--doc/guides/cryptodevs/kasumi.rst10
-rw-r--r--doc/guides/cryptodevs/mvsam.rst (renamed from doc/guides/cryptodevs/mrvl.rst)26
-rw-r--r--doc/guides/cryptodevs/openssl.rst1
-rw-r--r--doc/guides/cryptodevs/overview.rst27
-rw-r--r--doc/guides/cryptodevs/qat.rst191
-rw-r--r--doc/guides/cryptodevs/scheduler.rst12
-rw-r--r--doc/guides/cryptodevs/snow3g.rst10
-rw-r--r--doc/guides/cryptodevs/virtio.rst117
-rw-r--r--doc/guides/cryptodevs/zuc.rst10
-rw-r--r--doc/guides/eventdevs/dpaa2.rst14
-rw-r--r--doc/guides/eventdevs/octeontx.rst33
-rw-r--r--doc/guides/faq/faq.rst23
-rw-r--r--doc/guides/freebsd_gsg/build_sample_apps.rst7
-rw-r--r--doc/guides/howto/rte_flow.rst34
-rw-r--r--doc/guides/howto/virtio_user_for_container_networking.rst3
-rw-r--r--doc/guides/index.rst2
-rw-r--r--doc/guides/linux_gsg/build_sample_apps.rst16
-rw-r--r--doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst132
-rw-r--r--doc/guides/linux_gsg/index.rst1
-rw-r--r--doc/guides/linux_gsg/linux_drivers.rst2
-rw-r--r--doc/guides/nics/axgbe.rst89
-rw-r--r--doc/guides/nics/bnx2x.rst1
-rw-r--r--doc/guides/nics/bnxt.rst36
-rw-r--r--doc/guides/nics/cxgbe.rst194
-rw-r--r--doc/guides/nics/dpaa.rst10
-rw-r--r--doc/guides/nics/dpaa2.rst53
-rw-r--r--doc/guides/nics/enic.rst239
-rw-r--r--doc/guides/nics/fail_safe.rst28
-rw-r--r--doc/guides/nics/features.rst43
-rw-r--r--doc/guides/nics/features/avf.ini1
-rw-r--r--doc/guides/nics/features/avf_vec.ini1
-rw-r--r--doc/guides/nics/features/axgbe.ini19
-rw-r--r--doc/guides/nics/features/cxgbe.ini3
-rw-r--r--doc/guides/nics/features/cxgbevf.ini29
-rw-r--r--doc/guides/nics/features/default.ini4
-rw-r--r--doc/guides/nics/features/enic.ini10
-rw-r--r--doc/guides/nics/features/fm10k.ini4
-rw-r--r--doc/guides/nics/features/fm10k_vf.ini2
-rw-r--r--doc/guides/nics/features/i40e.ini3
-rw-r--r--doc/guides/nics/features/i40e_vec.ini1
-rw-r--r--doc/guides/nics/features/i40e_vf.ini1
-rw-r--r--doc/guides/nics/features/i40e_vf_vec.ini1
-rw-r--r--doc/guides/nics/features/ifcvf.ini8
-rw-r--r--doc/guides/nics/features/igb.ini1
-rw-r--r--doc/guides/nics/features/igb_vf.ini1
-rw-r--r--doc/guides/nics/features/ixgbe.ini1
-rw-r--r--doc/guides/nics/features/ixgbe_vec.ini1
-rw-r--r--doc/guides/nics/features/mlx4.ini1
-rw-r--r--doc/guides/nics/features/mlx5.ini5
-rw-r--r--doc/guides/nics/features/mvpp2.ini (renamed from doc/guides/nics/features/mrvl.ini)4
-rw-r--r--doc/guides/nics/features/netvsc.ini23
-rw-r--r--doc/guides/nics/features/qede.ini12
-rw-r--r--doc/guides/nics/features/qede_vf.ini2
-rw-r--r--doc/guides/nics/features/softnic.ini9
-rw-r--r--doc/guides/nics/features/vhost.ini1
-rw-r--r--doc/guides/nics/features/virtio.ini1
-rw-r--r--doc/guides/nics/features/virtio_vec.ini1
-rw-r--r--doc/guides/nics/fm10k.rst27
-rw-r--r--doc/guides/nics/i40e.rst129
-rw-r--r--doc/guides/nics/ifc.rst96
-rw-r--r--doc/guides/nics/img/szedata2_nfb200g_architecture.svg214
-rw-r--r--doc/guides/nics/index.rst6
-rw-r--r--doc/guides/nics/ixgbe.rst43
-rw-r--r--doc/guides/nics/liquidio.rst4
-rw-r--r--doc/guides/nics/mlx4.rst63
-rw-r--r--doc/guides/nics/mlx5.rst264
-rw-r--r--doc/guides/nics/mrvl.rst275
-rw-r--r--doc/guides/nics/mvpp2.rst520
-rw-r--r--doc/guides/nics/netvsc.rst105
-rw-r--r--doc/guides/nics/nfp.rst65
-rw-r--r--doc/guides/nics/octeontx.rst3
-rw-r--r--doc/guides/nics/overview.rst28
-rw-r--r--doc/guides/nics/pcap_ring.rst25
-rw-r--r--doc/guides/nics/qede.rst34
-rw-r--r--doc/guides/nics/sfc_efx.rst153
-rw-r--r--doc/guides/nics/softnic.rst250
-rw-r--r--doc/guides/nics/szedata2.rst104
-rw-r--r--doc/guides/nics/tap.rst30
-rw-r--r--doc/guides/nics/thunderx.rst24
-rw-r--r--doc/guides/nics/vdev_netvsc.rst14
-rw-r--r--doc/guides/nics/virtio.rst40
-rw-r--r--doc/guides/platform/octeontx.rst6
-rw-r--r--doc/guides/prog_guide/bbdev.rst259
-rw-r--r--doc/guides/prog_guide/bpf_lib.rst38
-rw-r--r--doc/guides/prog_guide/compressdev.rst623
-rw-r--r--doc/guides/prog_guide/cryptodev_lib.rst304
-rw-r--r--doc/guides/prog_guide/env_abstraction_layer.rst239
-rw-r--r--doc/guides/prog_guide/event_crypto_adapter.rst296
-rw-r--r--doc/guides/prog_guide/event_ethernet_rx_adapter.rst47
-rw-r--r--doc/guides/prog_guide/event_timer_adapter.rst296
-rw-r--r--doc/guides/prog_guide/eventdev.rst57
-rw-r--r--doc/guides/prog_guide/generic_segmentation_offload_lib.rst10
-rw-r--r--doc/guides/prog_guide/glossary.rst3
-rw-r--r--doc/guides/prog_guide/hash_lib.rst29
-rw-r--r--doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg1078
-rw-r--r--doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg1061
-rw-r--r--doc/guides/prog_guide/img/eventdev_usage.svg1519
-rw-r--r--doc/guides/prog_guide/img/malloc_heap.svg1348
-rw-r--r--doc/guides/prog_guide/img/stateful-op.svg116
-rw-r--r--doc/guides/prog_guide/img/stateless-op-shared.svg124
-rw-r--r--doc/guides/prog_guide/img/stateless-op.svg140
-rw-r--r--doc/guides/prog_guide/img/turbo_tb_decode.svg1471
-rw-r--r--doc/guides/prog_guide/img/turbo_tb_encode.svg1948
-rw-r--r--doc/guides/prog_guide/index.rst5
-rw-r--r--doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst43
-rw-r--r--doc/guides/prog_guide/mbuf_lib.rst11
-rw-r--r--doc/guides/prog_guide/multi_proc_support.rst172
-rw-r--r--doc/guides/prog_guide/overview.rst4
-rw-r--r--doc/guides/prog_guide/poll_mode_drv.rst60
-rw-r--r--doc/guides/prog_guide/rawdev.rst2
-rw-r--r--doc/guides/prog_guide/rte_flow.rst1226
-rw-r--r--doc/guides/prog_guide/source_org.rst4
-rw-r--r--doc/guides/prog_guide/switch_representation.rst837
-rw-r--r--doc/guides/prog_guide/traffic_metering_and_policing.rst2
-rw-r--r--doc/guides/prog_guide/vhost_lib.rst128
-rw-r--r--doc/guides/rawdevs/dpaa2_cmdif.rst144
-rw-r--r--doc/guides/rawdevs/dpaa2_qdma.rst140
-rw-r--r--doc/guides/rawdevs/ifpga_rawdev.rst112
-rw-r--r--doc/guides/rawdevs/index.rst16
-rw-r--r--doc/guides/rel_notes/deprecation.rst188
-rw-r--r--doc/guides/rel_notes/index.rst2
-rw-r--r--doc/guides/rel_notes/known_issues.rst45
-rw-r--r--doc/guides/rel_notes/release_16_04.rst3
-rw-r--r--doc/guides/rel_notes/release_16_07.rst3
-rw-r--r--doc/guides/rel_notes/release_16_11.rst3
-rw-r--r--doc/guides/rel_notes/release_17_02.rst3
-rw-r--r--doc/guides/rel_notes/release_17_05.rst3
-rw-r--r--doc/guides/rel_notes/release_17_08.rst3
-rw-r--r--doc/guides/rel_notes/release_17_11.rst7
-rw-r--r--doc/guides/rel_notes/release_18_02.rst3
-rw-r--r--doc/guides/rel_notes/release_18_05.rst983
-rw-r--r--doc/guides/rel_notes/release_18_08.rst549
-rw-r--r--doc/guides/rel_notes/release_2_2.rst3
-rw-r--r--doc/guides/sample_app_ug/bbdev_app.rst11
-rw-r--r--doc/guides/sample_app_ug/ethtool.rst3
-rw-r--r--doc/guides/sample_app_ug/flow_classify.rst14
-rw-r--r--doc/guides/sample_app_ug/flow_filtering.rst34
-rw-r--r--doc/guides/sample_app_ug/img/ip_pipelines_1.svg738
-rw-r--r--doc/guides/sample_app_ug/img/ip_pipelines_2.svg997
-rw-r--r--doc/guides/sample_app_ug/img/ip_pipelines_3.svg826
-rw-r--r--doc/guides/sample_app_ug/img/master_slave_proc.pngbin195232 -> 0 bytes
-rw-r--r--doc/guides/sample_app_ug/img/slave_proc_recov.pngbin85287 -> 0 bytes
-rw-r--r--doc/guides/sample_app_ug/index.rst5
-rw-r--r--doc/guides/sample_app_ug/ip_pipeline.rst1448
-rw-r--r--doc/guides/sample_app_ug/kernel_nic_interface.rst363
-rw-r--r--doc/guides/sample_app_ug/l2_forward_job_stats.rst26
-rw-r--r--doc/guides/sample_app_ug/l2_forward_real_virtual.rst26
-rw-r--r--doc/guides/sample_app_ug/link_status_intr.rst11
-rw-r--r--doc/guides/sample_app_ug/multi_process.rst406
-rw-r--r--doc/guides/sample_app_ug/quota_watermark.rst2
-rw-r--r--doc/guides/sample_app_ug/rxtx_callbacks.rst3
-rw-r--r--doc/guides/sample_app_ug/skeleton.rst11
-rw-r--r--doc/guides/sample_app_ug/vhost.rst30
-rw-r--r--doc/guides/sample_app_ug/vhost_crypto.rst82
-rw-r--r--doc/guides/sample_app_ug/vm_power_management.rst165
-rw-r--r--doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst4
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst21
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst463
-rw-r--r--doc/guides/tools/cryptoperf.rst2
-rw-r--r--doc/guides/tools/testbbdev.rst189
-rw-r--r--doc/guides/tools/testeventdev.rst60
-rw-r--r--drivers/Makefile19
-rw-r--r--drivers/baseband/Makefile (renamed from drivers/bbdev/Makefile)0
-rw-r--r--drivers/baseband/null/Makefile (renamed from drivers/bbdev/null/Makefile)0
-rw-r--r--drivers/baseband/null/bbdev_null.c (renamed from drivers/bbdev/null/bbdev_null.c)10
-rw-r--r--drivers/baseband/null/rte_pmd_bbdev_null_version.map (renamed from drivers/bbdev/null/rte_pmd_bbdev_null_version.map)0
-rw-r--r--drivers/baseband/turbo_sw/Makefile (renamed from drivers/bbdev/turbo_sw/Makefile)0
-rw-r--r--drivers/baseband/turbo_sw/bbdev_turbo_software.c (renamed from drivers/bbdev/turbo_sw/bbdev_turbo_software.c)284
-rw-r--r--drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map (renamed from drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map)0
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/dpaa/base/fman/fman.c6
-rw-r--r--drivers/bus/dpaa/base/fman/fman_hw.c62
-rw-r--r--drivers/bus/dpaa/base/fman/netcfg_layer.c5
-rw-r--r--drivers/bus/dpaa/base/fman/of.c44
-rw-r--r--drivers/bus/dpaa/base/qbman/bman_driver.c6
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c36
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_driver.c14
-rw-r--r--drivers/bus/dpaa/base/qbman/qman_priv.h1
-rw-r--r--drivers/bus/dpaa/dpaa_bus.c148
-rw-r--r--drivers/bus/dpaa/include/compat.h36
-rw-r--r--drivers/bus/dpaa/include/fsl_fman.h6
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h27
-rw-r--r--drivers/bus/dpaa/include/of.h2
-rw-r--r--drivers/bus/dpaa/meson.build29
-rw-r--r--drivers/bus/dpaa/rte_bus_dpaa_version.map10
-rw-r--r--drivers/bus/dpaa/rte_dpaa_bus.h40
-rw-r--r--drivers/bus/dpaa/rte_dpaa_logs.h11
-rw-r--r--drivers/bus/fslmc/Makefile19
-rw-r--r--drivers/bus/fslmc/fslmc_bus.c179
-rw-r--r--drivers/bus/fslmc/fslmc_logs.h69
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.c384
-rw-r--r--drivers/bus/fslmc/fslmc_vfio.h2
-rw-r--r--drivers/bus/fslmc/mc/dpdmai.c429
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai.h189
-rw-r--r--drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h107
-rw-r--r--drivers/bus/fslmc/mc/fsl_mc_cmd.h2
-rw-r--r--drivers/bus/fslmc/meson.build27
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c12
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpci.c100
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.c123
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_dpio.h8
-rw-r--r--drivers/bus/fslmc/portal/dpaa2_hw_pvt.h94
-rw-r--r--drivers/bus/fslmc/qbman/include/fsl_qbman_base.h2
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.c17
-rw-r--r--drivers/bus/fslmc/qbman/qbman_portal.h1
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys.h30
-rw-r--r--drivers/bus/fslmc/qbman/qbman_sys_decl.h23
-rw-r--r--drivers/bus/fslmc/rte_bus_fslmc_version.map17
-rw-r--r--drivers/bus/fslmc/rte_fslmc.h12
-rw-r--r--drivers/bus/ifpga/Makefile32
-rw-r--r--drivers/bus/ifpga/ifpga_bus.c467
-rw-r--r--drivers/bus/ifpga/ifpga_common.c88
-rw-r--r--drivers/bus/ifpga/ifpga_common.h18
-rw-r--r--drivers/bus/ifpga/ifpga_logs.h31
-rw-r--r--drivers/bus/ifpga/meson.build8
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga.h149
-rw-r--r--drivers/bus/ifpga/rte_bus_ifpga_version.map10
-rw-r--r--drivers/bus/meson.build2
-rw-r--r--drivers/bus/pci/Makefile36
-rw-r--r--drivers/bus/pci/bsd/Makefile32
-rw-r--r--drivers/bus/pci/linux/Makefile32
-rw-r--r--drivers/bus/pci/linux/pci.c36
-rw-r--r--drivers/bus/pci/linux/pci_uio.c47
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c118
-rw-r--r--drivers/bus/pci/meson.build3
-rw-r--r--drivers/bus/pci/pci_common.c124
-rw-r--r--drivers/bus/pci/private.h50
-rw-r--r--drivers/bus/pci/rte_bus_pci.h8
-rw-r--r--drivers/bus/vdev/Makefile1
-rw-r--r--drivers/bus/vdev/meson.build2
-rw-r--r--drivers/bus/vdev/rte_bus_vdev.h38
-rw-r--r--drivers/bus/vdev/vdev.c262
-rw-r--r--drivers/bus/vmbus/Makefile36
-rw-r--r--drivers/bus/vmbus/linux/Makefile3
-rw-r--r--drivers/bus/vmbus/linux/vmbus_bus.c355
-rw-r--r--drivers/bus/vmbus/linux/vmbus_uio.c398
-rw-r--r--drivers/bus/vmbus/meson.build18
-rw-r--r--drivers/bus/vmbus/private.h132
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus.h407
-rw-r--r--drivers/bus/vmbus/rte_bus_vmbus_version.map29
-rw-r--r--drivers/bus/vmbus/rte_vmbus_reg.h344
-rw-r--r--drivers/bus/vmbus/vmbus_bufring.c244
-rw-r--r--drivers/bus/vmbus/vmbus_channel.c405
-rw-r--r--drivers/bus/vmbus/vmbus_common.c286
-rw-r--r--drivers/bus/vmbus/vmbus_common_uio.c232
-rw-r--r--drivers/common/Makefile11
-rw-r--r--drivers/common/meson.build7
-rw-r--r--drivers/common/octeontx/Makefile24
-rw-r--r--drivers/common/octeontx/meson.build5
-rw-r--r--drivers/common/octeontx/octeontx_mbox.c (renamed from drivers/mempool/octeontx/octeontx_mbox.c)65
-rw-r--r--drivers/common/octeontx/octeontx_mbox.h37
-rw-r--r--drivers/common/octeontx/rte_common_octeontx_version.map7
-rw-r--r--drivers/common/qat/Makefile66
-rw-r--r--drivers/common/qat/meson.build14
-rw-r--r--drivers/common/qat/qat_adf/adf_transport_access_macros.h (renamed from drivers/crypto/qat/qat_adf/adf_transport_access_macros.h)56
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_fw.h)116
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw_comp.h482
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_fw_la.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_fw_la.h)47
-rw-r--r--drivers/common/qat/qat_adf/icp_qat_hw.h (renamed from drivers/crypto/qat/qat_adf/icp_qat_hw.h)177
-rw-r--r--drivers/common/qat/qat_common.c123
-rw-r--r--drivers/common/qat/qat_common.h79
-rw-r--r--drivers/common/qat/qat_device.c279
-rw-r--r--drivers/common/qat/qat_device.h102
-rw-r--r--drivers/common/qat/qat_logs.c38
-rw-r--r--drivers/common/qat/qat_logs.h34
-rw-r--r--drivers/common/qat/qat_qp.c642
-rw-r--r--drivers/common/qat/qat_qp.h111
-rw-r--r--drivers/compress/Makefile10
-rw-r--r--drivers/compress/isal/Makefile31
-rw-r--r--drivers/compress/isal/isal_compress_pmd.c694
-rw-r--r--drivers/compress/isal/isal_compress_pmd_ops.c351
-rw-r--r--drivers/compress/isal/isal_compress_pmd_private.h57
-rw-r--r--drivers/compress/isal/meson.build14
-rw-r--r--drivers/compress/isal/rte_pmd_isal_version.map3
-rw-r--r--drivers/compress/meson.build8
-rw-r--r--drivers/compress/octeontx/Makefile30
-rw-r--r--drivers/compress/octeontx/include/zip_regs.h711
-rw-r--r--drivers/compress/octeontx/meson.build9
-rw-r--r--drivers/compress/octeontx/otx_zip.c180
-rw-r--r--drivers/compress/octeontx/otx_zip.h277
-rw-r--r--drivers/compress/octeontx/otx_zip_pmd.c658
-rw-r--r--drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map3
-rw-r--r--drivers/compress/qat/meson.build18
-rw-r--r--drivers/compress/qat/qat_comp.c393
-rw-r--r--drivers/compress/qat/qat_comp.h65
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c429
-rw-r--r--drivers/compress/qat/qat_comp_pmd.h39
-rw-r--r--drivers/compress/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/compress/zlib/Makefile29
-rw-r--r--drivers/compress/zlib/meson.build14
-rw-r--r--drivers/compress/zlib/rte_pmd_zlib_version.map3
-rw-r--r--drivers/compress/zlib/zlib_pmd.c436
-rw-r--r--drivers/compress/zlib/zlib_pmd_ops.c307
-rw-r--r--drivers/compress/zlib/zlib_pmd_private.h71
-rw-r--r--drivers/crypto/Makefile9
-rw-r--r--drivers/crypto/aesni_gcm/Makefile10
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h3
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c53
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c54
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h36
-rw-r--r--drivers/crypto/aesni_mb/Makefile10
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h31
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c178
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c116
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h78
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c15
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c47
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h2
-rw-r--r--drivers/crypto/ccp/Makefile35
-rw-r--r--drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--drivers/crypto/ccp/ccp_pmd_ops.c833
-rw-r--r--drivers/crypto/ccp/ccp_pmd_private.h107
-rw-r--r--drivers/crypto/ccp/meson.build21
-rw-r--r--drivers/crypto/ccp/rte_ccp_pmd.c397
-rw-r--r--drivers/crypto/ccp/rte_pmd_ccp_version.map4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c564
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h62
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h28
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build14
-rw-r--r--drivers/crypto/dpaa_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c412
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h36
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h65
-rw-r--r--drivers/crypto/dpaa_sec/meson.build13
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c50
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c53
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h28
-rw-r--r--drivers/crypto/meson.build4
-rw-r--r--drivers/crypto/mrvl/Makefile67
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h51
-rw-r--r--drivers/crypto/mvsam/Makefile42
-rw-r--r--drivers/crypto/mvsam/meson.build21
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_compat.h23
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd.c)172
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_ops.c)82
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_private.h (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_private.h)38
-rw-r--r--drivers/crypto/mvsam/rte_pmd_mvsam_version.map (renamed from drivers/crypto/mrvl/rte_pmd_mrvl_version.map)0
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c28
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c68
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h24
-rw-r--r--drivers/crypto/openssl/compat.h108
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c527
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c581
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h55
-rw-r--r--drivers/crypto/qat/Makefile35
-rw-r--r--drivers/crypto/qat/README7
-rw-r--r--drivers/crypto/qat/meson.build24
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h169
-rw-r--r--drivers/crypto/qat/qat_crypto.c1696
-rw-r--r--drivers/crypto/qat/qat_crypto.h150
-rw-r--r--drivers/crypto/qat/qat_logs.h49
-rw-r--r--drivers/crypto/qat/qat_qp.c470
-rw-r--r--drivers/crypto/qat/qat_sym.c569
-rw-r--r--drivers/crypto/qat/qat_sym.h174
-rw-r--r--drivers/crypto/qat/qat_sym_capabilities.h (renamed from drivers/crypto/qat/qat_crypto_capabilities.h)10
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.c331
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.h41
-rw-r--r--drivers/crypto/qat/qat_sym_session.c (renamed from drivers/crypto/qat/qat_adf/qat_algs_build_desc.c)890
-rw-r--r--drivers/crypto/qat/qat_sym_session.h145
-rw-r--r--drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c180
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c101
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h3
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c4
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c60
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c18
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c182
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c101
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h26
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c42
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c51
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h30
-rw-r--r--drivers/crypto/virtio/Makefile35
-rw-r--r--drivers/crypto/virtio/meson.build8
-rw-r--r--drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.c1505
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.h64
-rw-r--r--drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--drivers/crypto/virtio/virtio_rxtx.c527
-rw-r--r--drivers/crypto/virtio/virtqueue.c43
-rw-r--r--drivers/crypto/virtio/virtqueue.h171
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c152
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c52
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h29
-rw-r--r--drivers/event/Makefile4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.c4
-rw-r--r--drivers/event/dpaa/dpaa_eventdev.h2
-rw-r--r--drivers/event/dpaa/meson.build10
-rw-r--r--drivers/event/dpaa2/Makefile6
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.c84
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev.h1
-rw-r--r--drivers/event/dpaa2/dpaa2_eventdev_logs.h12
-rw-r--r--drivers/event/dpaa2/dpaa2_hw_dpcon.c15
-rw-r--r--drivers/event/dpaa2/meson.build11
-rw-r--r--drivers/event/meson.build2
-rw-r--r--drivers/event/octeontx/Makefile12
-rw-r--r--drivers/event/octeontx/meson.build9
-rw-r--r--drivers/event/octeontx/ssovf_evdev.c77
-rw-r--r--drivers/event/octeontx/ssovf_evdev.h17
-rw-r--r--drivers/event/octeontx/ssovf_evdev_selftest.c36
-rw-r--r--drivers/event/octeontx/ssovf_probe.c (renamed from drivers/mempool/octeontx/octeontx_ssovf.c)25
-rw-r--r--drivers/event/octeontx/ssovf_worker.c30
-rw-r--r--drivers/event/octeontx/ssovf_worker.h2
-rw-r--r--drivers/event/octeontx/timvf_evdev.c405
-rw-r--r--drivers/event/octeontx/timvf_evdev.h225
-rw-r--r--drivers/event/octeontx/timvf_probe.c148
-rw-r--r--drivers/event/octeontx/timvf_worker.c199
-rw-r--r--drivers/event/octeontx/timvf_worker.h443
-rw-r--r--drivers/event/opdl/opdl_evdev.c7
-rw-r--r--drivers/event/opdl/opdl_evdev_init.c3
-rw-r--r--drivers/event/opdl/opdl_ring.c97
-rw-r--r--drivers/event/opdl/opdl_ring.h16
-rw-r--r--drivers/event/skeleton/skeleton_eventdev.c2
-rw-r--r--drivers/event/sw/sw_evdev.c151
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c17
-rw-r--r--drivers/event/sw/sw_evdev_selftest.c81
-rw-r--r--drivers/event/sw/sw_evdev_worker.c6
-rw-r--r--drivers/mempool/Makefile5
-rw-r--r--drivers/mempool/bucket/Makefile27
-rw-r--r--drivers/mempool/bucket/meson.build9
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket.c628
-rw-r--r--drivers/mempool/bucket/rte_mempool_bucket_version.map4
-rw-r--r--drivers/mempool/dpaa/Makefile3
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.c54
-rw-r--r--drivers/mempool/dpaa/dpaa_mempool.h2
-rw-r--r--drivers/mempool/dpaa/meson.build12
-rw-r--r--drivers/mempool/dpaa/rte_mempool_dpaa_version.map1
-rw-r--r--drivers/mempool/dpaa2/Makefile11
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool.c139
-rw-r--r--drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h38
-rw-r--r--drivers/mempool/dpaa2/meson.build12
-rw-r--r--drivers/mempool/dpaa2/rte_dpaa2_mempool.h53
-rw-r--r--drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map9
-rw-r--r--drivers/mempool/meson.build2
-rw-r--r--drivers/mempool/octeontx/Makefile5
-rw-r--r--drivers/mempool/octeontx/meson.build6
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c63
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h9
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h36
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h9
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c64
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx_version.map6
-rw-r--r--drivers/meson.build25
-rw-r--r--drivers/net/Makefile13
-rw-r--r--drivers/net/af_packet/Makefile37
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c175
-rw-r--r--drivers/net/ark/Makefile32
-rw-r--r--drivers/net/ark/ark_ddm.c33
-rw-r--r--drivers/net/ark/ark_ddm.h33
-rw-r--r--drivers/net/ark/ark_ethdev.c46
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c33
-rw-r--r--drivers/net/ark/ark_ethdev_rx.h33
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c33
-rw-r--r--drivers/net/ark/ark_ethdev_tx.h33
-rw-r--r--drivers/net/ark/ark_ext.h33
-rw-r--r--drivers/net/ark/ark_global.h33
-rw-r--r--drivers/net/ark/ark_logs.h33
-rw-r--r--drivers/net/ark/ark_mpu.c33
-rw-r--r--drivers/net/ark/ark_mpu.h33
-rw-r--r--drivers/net/ark/ark_pktchkr.c33
-rw-r--r--drivers/net/ark/ark_pktchkr.h33
-rw-r--r--drivers/net/ark/ark_pktdir.c33
-rw-r--r--drivers/net/ark/ark_pktdir.h33
-rw-r--r--drivers/net/ark/ark_pktgen.c33
-rw-r--r--drivers/net/ark/ark_pktgen.h33
-rw-r--r--drivers/net/ark/ark_rqp.c33
-rw-r--r--drivers/net/ark/ark_rqp.h33
-rw-r--r--drivers/net/ark/ark_udm.c33
-rw-r--r--drivers/net/ark/ark_udm.h33
-rw-r--r--drivers/net/ark/meson.build13
-rw-r--r--drivers/net/avf/avf_ethdev.c56
-rw-r--r--drivers/net/avf/avf_rxtx.c9
-rw-r--r--drivers/net/avf/avf_rxtx.h10
-rw-r--r--drivers/net/avp/Makefile33
-rw-r--r--drivers/net/avp/avp_ethdev.c69
-rw-r--r--drivers/net/avp/avp_logs.h32
-rw-r--r--drivers/net/avp/meson.build5
-rw-r--r--drivers/net/avp/rte_avp_common.h57
-rw-r--r--drivers/net/avp/rte_avp_fifo.h57
-rw-r--r--drivers/net/axgbe/Makefile35
-rw-r--r--drivers/net/axgbe/axgbe_common.h1710
-rw-r--r--drivers/net/axgbe/axgbe_dev.c1103
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.c770
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.h586
-rw-r--r--drivers/net/axgbe/axgbe_i2c.c331
-rw-r--r--drivers/net/axgbe/axgbe_logs.h26
-rw-r--r--drivers/net/axgbe/axgbe_mdio.c1066
-rw-r--r--drivers/net/axgbe/axgbe_phy.h192
-rw-r--r--drivers/net/axgbe/axgbe_phy_impl.c2191
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.c674
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.h186
-rw-r--r--drivers/net/axgbe/axgbe_rxtx_vec_sse.c93
-rw-r--r--drivers/net/axgbe/meson.build19
-rw-r--r--drivers/net/axgbe/rte_pmd_axgbe_version.map4
-rw-r--r--drivers/net/bnx2x/LICENSE.bnx2x_pmd28
-rw-r--r--drivers/net/bnx2x/Makefile8
-rw-r--r--drivers/net/bnx2x/bnx2x.c64
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c148
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.h12
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c20
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c10
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h10
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/bnx2x/ecore_fw_defs.h10
-rw-r--r--drivers/net/bnx2x/ecore_hsi.h10
-rw-r--r--drivers/net/bnx2x/ecore_init.h10
-rw-r--r--drivers/net/bnx2x/ecore_init_ops.h10
-rw-r--r--drivers/net/bnx2x/ecore_mfw_req.h10
-rw-r--r--drivers/net/bnx2x/ecore_reg.h10
-rw-r--r--drivers/net/bnx2x/ecore_sp.c10
-rw-r--r--drivers/net/bnx2x/ecore_sp.h10
-rw-r--r--drivers/net/bnx2x/elink.c354
-rw-r--r--drivers/net/bnx2x/elink.h10
-rw-r--r--drivers/net/bnx2x/meson.build14
-rw-r--r--drivers/net/bnxt/Makefile39
-rw-r--r--drivers/net/bnxt/bnxt.h74
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c119
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h51
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c418
-rw-r--r--drivers/net/bnxt/bnxt_filter.c1113
-rw-r--r--drivers/net/bnxt/bnxt_filter.h35
-rw-r--r--drivers/net/bnxt/bnxt_flow.c1171
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c501
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h47
-rw-r--r--drivers/net/bnxt/bnxt_irq.c62
-rw-r--r--drivers/net/bnxt/bnxt_irq.h34
-rw-r--r--drivers/net/bnxt/bnxt_nvm_defs.h11
-rw-r--r--drivers/net/bnxt/bnxt_ring.c237
-rw-r--r--drivers/net/bnxt/bnxt_ring.h40
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c113
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h40
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c99
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h58
-rw-r--r--drivers/net/bnxt/bnxt_stats.c62
-rw-r--r--drivers/net/bnxt/bnxt_stats.h34
-rw-r--r--drivers/net/bnxt/bnxt_txq.c49
-rw-r--r--drivers/net/bnxt/bnxt_txq.h37
-rw-r--r--drivers/net/bnxt/bnxt_txr.c190
-rw-r--r--drivers/net/bnxt/bnxt_txr.h44
-rw-r--r--drivers/net/bnxt/bnxt_util.c18
-rw-r--r--drivers/net/bnxt/bnxt_util.h11
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c49
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h41
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h32583
-rw-r--r--drivers/net/bnxt/meson.build20
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c34
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h34
-rw-r--r--drivers/net/bonding/Makefile1
-rw-r--r--drivers/net/bonding/meson.build3
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c126
-rw-r--r--drivers/net/bonding/rte_eth_bond_alb.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c118
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c11
-rw-r--r--drivers/net/bonding/rte_eth_bond_flow.c228
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c561
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h40
-rw-r--r--drivers/net/bonding/rte_pmd_bond_version.map1
-rw-r--r--drivers/net/cxgbe/Makefile47
-rw-r--r--drivers/net/cxgbe/base/adapter.h169
-rw-r--r--drivers/net/cxgbe/base/common.h205
-rw-r--r--drivers/net/cxgbe/base/t4_chip_type.h34
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c1159
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h38
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h244
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h34
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h172
-rw-r--r--drivers/net/cxgbe/base/t4_regs_values.h34
-rw-r--r--drivers/net/cxgbe/base/t4_tcb.h26
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h667
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.c880
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.h15
-rw-r--r--drivers/net/cxgbe/clip_tbl.c193
-rw-r--r--drivers/net/cxgbe/clip_tbl.h31
-rw-r--r--drivers/net/cxgbe/cxgbe.h65
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h55
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c436
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.c1252
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.h235
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.c845
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.h42
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c768
-rw-r--r--drivers/net/cxgbe/cxgbe_ofld.h89
-rw-r--r--drivers/net/cxgbe/cxgbe_pfvf.h45
-rw-r--r--drivers/net/cxgbe/cxgbevf_ethdev.c201
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c295
-rw-r--r--drivers/net/cxgbe/meson.build14
-rw-r--r--drivers/net/cxgbe/sge.c627
-rw-r--r--drivers/net/dpaa/Makefile3
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.c213
-rw-r--r--drivers/net/dpaa/dpaa_ethdev.h24
-rw-r--r--drivers/net/dpaa/dpaa_rxtx.c41
-rw-r--r--drivers/net/dpaa/meson.build14
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa.h5
-rw-r--r--drivers/net/dpaa/rte_pmd_dpaa_version.map4
-rw-r--r--drivers/net/dpaa2/Makefile10
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c30
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c502
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h6
-rw-r--r--drivers/net/dpaa2/dpaa2_pmd_logs.h41
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c179
-rw-r--r--drivers/net/dpaa2/mc/dpni.c2
-rw-r--r--drivers/net/dpaa2/meson.build18
-rw-r--r--drivers/net/e1000/Makefile4
-rw-r--r--drivers/net/e1000/base/e1000_82575.c5
-rw-r--r--drivers/net/e1000/base/e1000_defines.h1
-rw-r--r--drivers/net/e1000/base/e1000_phy.h8
-rw-r--r--drivers/net/e1000/e1000_ethdev.h27
-rw-r--r--drivers/net/e1000/e1000_logs.c26
-rw-r--r--drivers/net/e1000/e1000_logs.h6
-rw-r--r--drivers/net/e1000/em_ethdev.c146
-rw-r--r--drivers/net/e1000/em_rxtx.c113
-rw-r--r--drivers/net/e1000/igb_ethdev.c246
-rw-r--r--drivers/net/e1000/igb_flow.c110
-rw-r--r--drivers/net/e1000/igb_rxtx.c188
-rw-r--r--drivers/net/e1000/meson.build1
-rw-r--r--drivers/net/ena/Makefile4
-rw-r--r--drivers/net/ena/base/ena_com.c711
-rw-r--r--drivers/net/ena/base/ena_com.h112
-rw-r--r--drivers/net/ena/base/ena_defs/ena_admin_defs.h1164
-rw-r--r--drivers/net/ena/base/ena_defs/ena_common_defs.h8
-rw-r--r--drivers/net/ena/base/ena_defs/ena_eth_io_defs.h758
-rw-r--r--drivers/net/ena/base/ena_defs/ena_gen_info.h4
-rw-r--r--drivers/net/ena/base/ena_defs/ena_includes.h2
-rw-r--r--drivers/net/ena/base/ena_defs/ena_regs_defs.h36
-rw-r--r--drivers/net/ena/base/ena_eth_com.c78
-rw-r--r--drivers/net/ena/base/ena_eth_com.h10
-rw-r--r--drivers/net/ena/base/ena_plat.h4
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h79
-rw-r--r--drivers/net/ena/ena_ethdev.c800
-rw-r--r--drivers/net/ena/ena_ethdev.h32
-rw-r--r--drivers/net/ena/meson.build11
-rw-r--r--drivers/net/enic/base/cq_desc.h1
-rw-r--r--drivers/net/enic/base/vnic_dev.c88
-rw-r--r--drivers/net/enic/base/vnic_dev.h12
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h38
-rw-r--r--drivers/net/enic/base/vnic_enet.h3
-rw-r--r--drivers/net/enic/base/vnic_nic.h6
-rw-r--r--drivers/net/enic/base/vnic_rq.h4
-rw-r--r--drivers/net/enic/base/vnic_wq.c9
-rw-r--r--drivers/net/enic/base/vnic_wq.h13
-rw-r--r--drivers/net/enic/enic.h142
-rw-r--r--drivers/net/enic/enic_clsf.c21
-rw-r--r--drivers/net/enic/enic_compat.h5
-rw-r--r--drivers/net/enic/enic_ethdev.c483
-rw-r--r--drivers/net/enic/enic_flow.c87
-rw-r--r--drivers/net/enic/enic_main.c641
-rw-r--r--drivers/net/enic/enic_res.c94
-rw-r--r--drivers/net/enic/enic_res.h22
-rw-r--r--drivers/net/enic/enic_rxtx.c397
-rw-r--r--drivers/net/enic/meson.build19
-rw-r--r--drivers/net/failsafe/Makefile33
-rw-r--r--drivers/net/failsafe/failsafe.c52
-rw-r--r--drivers/net/failsafe/failsafe_args.c9
-rw-r--r--drivers/net/failsafe/failsafe_eal.c61
-rw-r--r--drivers/net/failsafe/failsafe_ether.c55
-rw-r--r--drivers/net/failsafe/failsafe_flow.c6
-rw-r--r--drivers/net/failsafe/failsafe_intr.c2
-rw-r--r--drivers/net/failsafe/failsafe_ops.c151
-rw-r--r--drivers/net/failsafe/failsafe_private.h18
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c2
-rw-r--r--drivers/net/failsafe/meson.build23
-rw-r--r--drivers/net/fm10k/Makefile3
-rw-r--r--drivers/net/fm10k/fm10k.h13
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c228
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c78
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c6
-rw-r--r--drivers/net/i40e/Makefile5
-rw-r--r--drivers/net/i40e/base/i40e_register.h24
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1016
-rw-r--r--drivers/net/i40e/i40e_ethdev.h120
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c325
-rw-r--r--drivers/net/i40e/i40e_fdir.c3
-rw-r--r--drivers/net/i40e/i40e_flow.c218
-rw-r--r--drivers/net/i40e/i40e_rxtx.c383
-rw-r--r--drivers/net/i40e/i40e_rxtx.h5
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_avx2.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c35
-rw-r--r--drivers/net/i40e/i40e_vf_representor.c531
-rw-r--r--drivers/net/i40e/meson.build12
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c86
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h18
-rw-r--r--drivers/net/ifc/Makefile35
-rw-r--r--drivers/net/ifc/base/ifcvf.c298
-rw-r--r--drivers/net/ifc/base/ifcvf.h154
-rw-r--r--drivers/net/ifc/base/ifcvf_osdep.h52
-rw-r--r--drivers/net/ifc/ifcvf_vdpa.c793
-rw-r--r--drivers/net/ifc/meson.build8
-rw-r--r--drivers/net/ifc/rte_pmd_ifc_version.map4
-rw-r--r--drivers/net/ixgbe/Makefile6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c764
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h37
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c30
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c209
-rw-r--r--drivers/net/ixgbe/ixgbe_ipsec.c13
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c18
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c434
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_vf_representor.c231
-rw-r--r--drivers/net/ixgbe/meson.build3
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c230
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h84
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map10
-rw-r--r--drivers/net/kni/Makefile1
-rw-r--r--drivers/net/kni/meson.build8
-rw-r--r--drivers/net/kni/rte_eth_kni.c66
-rw-r--r--drivers/net/liquidio/Makefile2
-rw-r--r--drivers/net/liquidio/lio_ethdev.c76
-rw-r--r--drivers/net/liquidio/meson.build8
-rw-r--r--drivers/net/liquidio/rte_pmd_liquidio_version.map (renamed from drivers/net/liquidio/rte_pmd_lio_version.map)0
-rw-r--r--drivers/net/meson.build29
-rw-r--r--drivers/net/mlx4/Makefile50
-rw-r--r--drivers/net/mlx4/mlx4.c255
-rw-r--r--drivers/net/mlx4/mlx4.h57
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c208
-rw-r--r--drivers/net/mlx4/mlx4_flow.c240
-rw-r--r--drivers/net/mlx4/mlx4_flow.h6
-rw-r--r--drivers/net/mlx4/mlx4_glue.c2
-rw-r--r--drivers/net/mlx4/mlx4_glue.h2
-rw-r--r--drivers/net/mlx4/mlx4_intr.c2
-rw-r--r--drivers/net/mlx4/mlx4_mr.c1247
-rw-r--r--drivers/net/mlx4/mlx4_mr.h122
-rw-r--r--drivers/net/mlx4/mlx4_prm.h18
-rw-r--r--drivers/net/mlx4/mlx4_rxq.c109
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.c566
-rw-r--r--drivers/net/mlx4/mlx4_rxtx.h94
-rw-r--r--drivers/net/mlx4/mlx4_txq.c130
-rw-r--r--drivers/net/mlx4/mlx4_utils.c2
-rw-r--r--drivers/net/mlx4/mlx4_utils.h2
-rw-r--r--drivers/net/mlx5/Makefile303
-rw-r--r--drivers/net/mlx5/mlx5.c1528
-rw-r--r--drivers/net/mlx5/mlx5.h381
-rw-r--r--drivers/net/mlx5/mlx5_defs.h76
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c1216
-rw-r--r--drivers/net/mlx5/mlx5_flow.c5017
-rw-r--r--drivers/net/mlx5/mlx5_glue.c44
-rw-r--r--drivers/net/mlx5/mlx5_glue.h18
-rw-r--r--drivers/net/mlx5/mlx5_mac.c159
-rw-r--r--drivers/net/mlx5/mlx5_mr.c1316
-rw-r--r--drivers/net/mlx5/mlx5_mr.h120
-rw-r--r--drivers/net/mlx5/mlx5_nl.c916
-rw-r--r--drivers/net/mlx5/mlx5_nl_flow.c1248
-rw-r--r--drivers/net/mlx5/mlx5_prm.h53
-rw-r--r--drivers/net/mlx5/mlx5_rss.c179
-rw-r--r--drivers/net/mlx5/mlx5_rxmode.c56
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c1558
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c748
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h538
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c40
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h15
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h53
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h37
-rw-r--r--drivers/net/mlx5/mlx5_socket.c179
-rw-r--r--drivers/net/mlx5/mlx5_stats.c246
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c299
-rw-r--r--drivers/net/mlx5/mlx5_txq.c434
-rw-r--r--drivers/net/mlx5/mlx5_utils.h31
-rw-r--r--drivers/net/mlx5/mlx5_vlan.c114
-rw-r--r--drivers/net/mrvl/Makefile68
-rw-r--r--drivers/net/mvpp2/Makefile42
-rw-r--r--drivers/net/mvpp2/meson.build25
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.c (renamed from drivers/net/mrvl/mrvl_ethdev.c)650
-rw-r--r--drivers/net/mvpp2/mrvl_ethdev.h (renamed from drivers/net/mrvl/mrvl_ethdev.h)55
-rw-r--r--drivers/net/mvpp2/mrvl_flow.c2779
-rw-r--r--drivers/net/mvpp2/mrvl_qos.c (renamed from drivers/net/mrvl/mrvl_qos.c)368
-rw-r--r--drivers/net/mvpp2/mrvl_qos.h (renamed from drivers/net/mrvl/mrvl_qos.h)58
-rw-r--r--drivers/net/mvpp2/rte_pmd_mvpp2_version.map (renamed from drivers/net/mrvl/rte_pmd_mrvl_version.map)0
-rw-r--r--drivers/net/netvsc/Makefile23
-rw-r--r--drivers/net/netvsc/hn_ethdev.c761
-rw-r--r--drivers/net/netvsc/hn_logs.h36
-rw-r--r--drivers/net/netvsc/hn_nvs.c546
-rw-r--r--drivers/net/netvsc/hn_nvs.h229
-rw-r--r--drivers/net/netvsc/hn_rndis.c1099
-rw-r--r--drivers/net/netvsc/hn_rndis.h32
-rw-r--r--drivers/net/netvsc/hn_rxtx.c1334
-rw-r--r--drivers/net/netvsc/hn_var.h158
-rw-r--r--drivers/net/netvsc/meson.build10
-rw-r--r--drivers/net/netvsc/ndis.h378
-rw-r--r--drivers/net/netvsc/rndis.h414
-rw-r--r--drivers/net/netvsc/rte_pmd_netvsc_version.map5
-rw-r--r--drivers/net/nfp/Makefile17
-rw-r--r--drivers/net/nfp/meson.build16
-rw-r--r--drivers/net/nfp/nfp_net.c873
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h6
-rw-r--r--drivers/net/nfp/nfp_net_eth.h82
-rw-r--r--drivers/net/nfp/nfp_net_logs.h9
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h46
-rw-r--r--drivers/net/nfp/nfp_nfpu.c108
-rw-r--r--drivers/net/nfp/nfp_nfpu.h55
-rw-r--r--drivers/net/nfp/nfp_nspu.c642
-rw-r--r--drivers/net/nfp/nfp_nspu.h83
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h722
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h35
-rw-r--r--drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h592
-rw-r--r--drivers/net/nfp/nfpcore/nfp6000/nfp6000.h40
-rw-r--r--drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h26
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp.h781
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c845
-rw-r--r--drivers/net/nfp/nfpcore/nfp_cppcore.c858
-rw-r--r--drivers/net/nfp/nfpcore/nfp_crc.c49
-rw-r--r--drivers/net/nfp/nfpcore/nfp_crc.h19
-rw-r--r--drivers/net/nfp/nfpcore/nfp_hwinfo.c199
-rw-r--r--drivers/net/nfp/nfpcore/nfp_hwinfo.h85
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mip.c154
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mip.h21
-rw-r--r--drivers/net/nfp/nfpcore/nfp_mutex.c424
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nffw.c235
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nffw.h86
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp.c427
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp.h304
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp_cmds.c109
-rw-r--r--drivers/net/nfp/nfpcore/nfp_nsp_eth.c665
-rw-r--r--drivers/net/nfp/nfpcore/nfp_resource.c266
-rw-r--r--drivers/net/nfp/nfpcore/nfp_resource.h52
-rw-r--r--drivers/net/nfp/nfpcore/nfp_rtsym.c327
-rw-r--r--drivers/net/nfp/nfpcore/nfp_rtsym.h61
-rw-r--r--drivers/net/nfp/nfpcore/nfp_target.h579
-rw-r--r--drivers/net/null/meson.build1
-rw-r--r--drivers/net/null/rte_eth_null.c82
-rw-r--r--drivers/net/octeontx/Makefile3
-rw-r--r--drivers/net/octeontx/base/octeontx_bgx.c22
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.c10
-rw-r--r--drivers/net/octeontx/base/octeontx_pkivf.h10
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c124
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.h4
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c2
-rw-r--r--drivers/net/pcap/meson.build20
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c274
-rw-r--r--drivers/net/qede/LICENSE.qede_pmd28
-rw-r--r--drivers/net/qede/Makefile10
-rw-r--r--drivers/net/qede/base/bcm_osal.c24
-rw-r--r--drivers/net/qede/base/bcm_osal.h8
-rw-r--r--drivers/net/qede/base/common_hsi.h16
-rw-r--r--drivers/net/qede/base/ecore.h36
-rw-r--r--drivers/net/qede/base/ecore_attn_values.h8
-rw-r--r--drivers/net/qede/base/ecore_chain.h57
-rw-r--r--drivers/net/qede/base/ecore_cxt.c38
-rw-r--r--drivers/net/qede/base/ecore_cxt.h12
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h8
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c15
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h8
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h9
-rw-r--r--drivers/net/qede/base/ecore_dev.c344
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h28
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h8
-rw-r--r--drivers/net/qede/base/ecore_gtt_values.h8
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h114
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h116
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h37
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h44
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h115
-rw-r--r--drivers/net/qede/base/ecore_hw.c130
-rw-r--r--drivers/net/qede/base/ecore_hw.h12
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h8
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c218
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h41
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c40
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h8
-rw-r--r--drivers/net/qede/base/ecore_int.c56
-rw-r--r--drivers/net/qede/base/ecore_int.h11
-rw-r--r--drivers/net/qede/base/ecore_int_api.h8
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h19
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h72
-rw-r--r--drivers/net/qede/base/ecore_l2.c73
-rw-r--r--drivers/net/qede/base/ecore_l2.h8
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h10
-rw-r--r--drivers/net/qede/base/ecore_mcp.c228
-rw-r--r--drivers/net/qede/base/ecore_mcp.h8
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h54
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c6
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h11
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h599
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h8
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c37
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h8
-rw-r--r--drivers/net/qede/base/ecore_spq.c44
-rw-r--r--drivers/net/qede/base/ecore_spq.h21
-rw-r--r--drivers/net/qede/base/ecore_sriov.c96
-rw-r--r--drivers/net/qede/base/ecore_sriov.h8
-rw-r--r--drivers/net/qede/base/ecore_status.h8
-rw-r--r--drivers/net/qede/base/ecore_utils.h8
-rw-r--r--drivers/net/qede/base/ecore_vf.c46
-rw-r--r--drivers/net/qede/base/ecore_vf.h17
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h8
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h32
-rw-r--r--drivers/net/qede/base/eth_common.h11
-rw-r--r--drivers/net/qede/base/mcp_public.h11
-rw-r--r--drivers/net/qede/base/nvm_cfg.h8
-rw-r--r--drivers/net/qede/base/reg_addr.h18
-rw-r--r--drivers/net/qede/qede_ethdev.c618
-rw-r--r--drivers/net/qede/qede_ethdev.h18
-rw-r--r--drivers/net/qede/qede_fdir.c15
-rw-r--r--drivers/net/qede/qede_if.h8
-rw-r--r--drivers/net/qede/qede_logs.h8
-rw-r--r--drivers/net/qede/qede_main.c24
-rw-r--r--drivers/net/qede/qede_rxtx.c51
-rw-r--r--drivers/net/qede/qede_rxtx.h14
-rw-r--r--drivers/net/ring/meson.build1
-rw-r--r--drivers/net/ring/rte_eth_ring.c65
-rw-r--r--drivers/net/sfc/Makefile7
-rw-r--r--drivers/net/sfc/base/ef10_ev.c112
-rw-r--r--drivers/net/sfc/base/ef10_filter.c163
-rw-r--r--drivers/net/sfc/base/ef10_image.c885
-rw-r--r--drivers/net/sfc/base/ef10_impl.h74
-rw-r--r--drivers/net/sfc/base/ef10_intr.c13
-rw-r--r--drivers/net/sfc/base/ef10_mac.c193
-rw-r--r--drivers/net/sfc/base/ef10_mcdi.c25
-rw-r--r--drivers/net/sfc/base/ef10_nic.c923
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c33
-rw-r--r--drivers/net/sfc/base/ef10_phy.c126
-rw-r--r--drivers/net/sfc/base/ef10_rx.c184
-rw-r--r--drivers/net/sfc/base/ef10_signed_image_layout.h62
-rw-r--r--drivers/net/sfc/base/ef10_tlv_layout.h115
-rw-r--r--drivers/net/sfc/base/ef10_tx.c67
-rw-r--r--drivers/net/sfc/base/ef10_vpd.c37
-rw-r--r--drivers/net/sfc/base/efx.h425
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c89
-rw-r--r--drivers/net/sfc/base/efx_check.h122
-rw-r--r--drivers/net/sfc/base/efx_ev.c10
-rw-r--r--drivers/net/sfc/base/efx_filter.c71
-rw-r--r--drivers/net/sfc/base/efx_impl.h134
-rw-r--r--drivers/net/sfc/base/efx_intr.c21
-rw-r--r--drivers/net/sfc/base/efx_lic.c26
-rw-r--r--drivers/net/sfc/base/efx_mac.c38
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c95
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h4
-rw-r--r--drivers/net/sfc/base/efx_mon.c6
-rw-r--r--drivers/net/sfc/base/efx_nic.c304
-rw-r--r--drivers/net/sfc/base/efx_nvram.c10
-rw-r--r--drivers/net/sfc/base/efx_phy.c14
-rw-r--r--drivers/net/sfc/base/efx_port.c5
-rw-r--r--drivers/net/sfc/base/efx_regs_ef10.h230
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi.h9375
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi_aoe.h2914
-rw-r--r--drivers/net/sfc/base/efx_rx.c245
-rw-r--r--drivers/net/sfc/base/efx_sram.c14
-rw-r--r--drivers/net/sfc/base/efx_tunnel.c32
-rw-r--r--drivers/net/sfc/base/efx_tx.c56
-rw-r--r--drivers/net/sfc/base/efx_types.h38
-rw-r--r--drivers/net/sfc/base/efx_vpd.c10
-rw-r--r--drivers/net/sfc/base/hunt_nic.c172
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c9
-rw-r--r--drivers/net/sfc/base/medford2_impl.h35
-rw-r--r--drivers/net/sfc/base/medford2_nic.c162
-rw-r--r--drivers/net/sfc/base/medford_nic.c240
-rw-r--r--drivers/net/sfc/base/meson.build4
-rw-r--r--drivers/net/sfc/base/siena_flash.h9
-rw-r--r--drivers/net/sfc/base/siena_mac.c31
-rw-r--r--drivers/net/sfc/base/siena_mcdi.c12
-rw-r--r--drivers/net/sfc/base/siena_nic.c24
-rw-r--r--drivers/net/sfc/base/siena_nvram.c17
-rw-r--r--drivers/net/sfc/base/siena_phy.c9
-rw-r--r--drivers/net/sfc/base/siena_vpd.c25
-rw-r--r--drivers/net/sfc/efsys.h17
-rw-r--r--drivers/net/sfc/meson.build7
-rw-r--r--drivers/net/sfc/sfc.c350
-rw-r--r--drivers/net/sfc/sfc.h41
-rw-r--r--drivers/net/sfc/sfc_dp.c5
-rw-r--r--drivers/net/sfc/sfc_dp.h9
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h30
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h4
-rw-r--r--drivers/net/sfc/sfc_ef10.h34
-rw-r--r--drivers/net/sfc/sfc_ef10_essb_rx.c723
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c184
-rw-r--r--drivers/net/sfc/sfc_ef10_rx_ev.h175
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c2
-rw-r--r--drivers/net/sfc/sfc_ethdev.c232
-rw-r--r--drivers/net/sfc/sfc_ev.c56
-rw-r--r--drivers/net/sfc/sfc_filter.c14
-rw-r--r--drivers/net/sfc/sfc_filter.h10
-rw-r--r--drivers/net/sfc/sfc_flow.c1326
-rw-r--r--drivers/net/sfc/sfc_flow.h23
-rw-r--r--drivers/net/sfc/sfc_intr.c6
-rw-r--r--drivers/net/sfc/sfc_kvargs.c4
-rw-r--r--drivers/net/sfc/sfc_kvargs.h24
-rw-r--r--drivers/net/sfc/sfc_log.h77
-rw-r--r--drivers/net/sfc/sfc_mcdi.c25
-rw-r--r--drivers/net/sfc/sfc_port.c68
-rw-r--r--drivers/net/sfc/sfc_rx.c437
-rw-r--r--drivers/net/sfc/sfc_rx.h11
-rw-r--r--drivers/net/sfc/sfc_tso.c3
-rw-r--r--drivers/net/sfc/sfc_tweak.h8
-rw-r--r--drivers/net/sfc/sfc_tx.c98
-rw-r--r--drivers/net/sfc/sfc_tx.h1
-rw-r--r--drivers/net/softnic/Makefile23
-rw-r--r--drivers/net/softnic/conn.c332
-rw-r--r--drivers/net/softnic/conn.h49
-rw-r--r--drivers/net/softnic/firmware.cli21
-rw-r--r--drivers/net/softnic/hash_func.h359
-rw-r--r--drivers/net/softnic/hash_func_arm64.h (renamed from examples/ip_pipeline/pipeline/hash_func_arm64.h)0
-rw-r--r--drivers/net/softnic/meson.build18
-rw-r--r--drivers/net/softnic/parser.c703
-rw-r--r--drivers/net/softnic/parser.h68
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c758
-rw-r--r--drivers/net/softnic/rte_eth_softnic.h49
-rw-r--r--drivers/net/softnic/rte_eth_softnic_action.c389
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cli.c5259
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h812
-rw-r--r--drivers/net/softnic/rte_eth_softnic_link.c98
-rw-r--r--drivers/net/softnic/rte_eth_softnic_mempool.c103
-rw-r--r--drivers/net/softnic/rte_eth_softnic_pipeline.c966
-rw-r--r--drivers/net/softnic/rte_eth_softnic_swq.c114
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tap.c118
-rw-r--r--drivers/net/softnic/rte_eth_softnic_thread.c2929
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c345
-rw-r--r--drivers/net/softnic/rte_pmd_softnic_version.map (renamed from drivers/net/softnic/rte_pmd_eth_softnic_version.map)6
-rw-r--r--drivers/net/szedata2/Makefile33
-rw-r--r--drivers/net/szedata2/meson.build7
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c929
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h37
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.c203
-rw-r--r--drivers/net/szedata2/szedata2_iobuf.h356
-rw-r--r--drivers/net/szedata2/szedata2_logs.h22
-rw-r--r--drivers/net/tap/Makefile2
-rw-r--r--drivers/net/tap/rte_eth_tap.c1009
-rw-r--r--drivers/net/tap/rte_eth_tap.h16
-rw-r--r--drivers/net/tap/tap_bpf.h2
-rw-r--r--drivers/net/tap/tap_bpf_api.c2
-rw-r--r--drivers/net/tap/tap_bpf_insns.h2
-rw-r--r--drivers/net/tap/tap_bpf_program.c4
-rw-r--r--drivers/net/tap/tap_flow.c144
-rw-r--r--drivers/net/tap/tap_flow.h2
-rw-r--r--drivers/net/tap/tap_intr.c4
-rw-r--r--drivers/net/tap/tap_log.h10
-rw-r--r--drivers/net/tap/tap_netlink.c20
-rw-r--r--drivers/net/tap/tap_netlink.h2
-rw-r--r--drivers/net/tap/tap_rss.h8
-rw-r--r--drivers/net/tap/tap_tcmsgs.c11
-rw-r--r--drivers/net/tap/tap_tcmsgs.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c13
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h1
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h5
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c302
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h2
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c144
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h24
-rw-r--r--drivers/net/thunderx/nicvf_struct.h29
-rw-r--r--drivers/net/vdev_netvsc/Makefile2
-rw-r--r--drivers/net/vdev_netvsc/vdev_netvsc.c275
-rw-r--r--drivers/net/vhost/meson.build8
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c396
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h35
-rw-r--r--drivers/net/virtio/meson.build27
-rw-r--r--drivers/net/virtio/virtio_ethdev.c240
-rw-r--r--drivers/net/virtio/virtio_ethdev.h11
-rw-r--r--drivers/net/virtio/virtio_pci.h12
-rw-r--r--drivers/net/virtio/virtio_rxtx.c698
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c67
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h49
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c86
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c14
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h3
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c76
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c201
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h12
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c158
-rw-r--r--drivers/net/virtio/virtqueue.c8
-rw-r--r--drivers/net/virtio/virtqueue.h2
-rw-r--r--drivers/net/vmxnet3/Makefile4
-rw-r--r--drivers/net/vmxnet3/base/upt1_defs.h7
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h34
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c156
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c212
-rw-r--r--drivers/raw/Makefile5
-rw-r--r--drivers/raw/dpaa2_cmdif/Makefile36
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c297
-rw-r--r--drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h46
-rw-r--r--drivers/raw/dpaa2_cmdif/meson.build10
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h35
-rw-r--r--drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map4
-rw-r--r--drivers/raw/dpaa2_qdma/Makefile37
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.c1001
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma.h150
-rw-r--r--drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h46
-rw-r--r--drivers/raw/dpaa2_qdma/meson.build10
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h286
-rw-r--r--drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map20
-rw-r--r--drivers/raw/ifpga_rawdev/Makefile36
-rw-r--r--drivers/raw/ifpga_rawdev/base/Makefile26
-rw-r--r--drivers/raw/ifpga_rawdev/base/README31
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.c294
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_api.h28
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_compat.h58
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_defines.h1663
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c821
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h11
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c253
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h168
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme.c734
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c301
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c715
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c352
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_hw.h127
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port.c388
-rw-r--r--drivers/raw/ifpga_rawdev/base/ifpga_port_error.c144
-rw-r--r--drivers/raw/ifpga_rawdev/base/meson.build34
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.c99
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_debug.h19
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.c381
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_hw_api.h253
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c145
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h279
-rw-r--r--drivers/raw/ifpga_rawdev/base/opae_osdep.h79
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h75
-rw-r--r--drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h45
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.c617
-rw-r--r--drivers/raw/ifpga_rawdev/ifpga_rawdev.h37
-rw-r--r--drivers/raw/ifpga_rawdev/meson.build15
-rw-r--r--drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map4
-rw-r--r--drivers/raw/meson.build7
-rw-r--r--drivers/raw/skeleton_rawdev/Makefile1
-rw-r--r--drivers/raw/skeleton_rawdev/meson.build6
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev.c18
-rw-r--r--drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c16
-rw-r--r--examples/Makefile34
-rw-r--r--examples/bbdev_app/main.c16
-rw-r--r--examples/bond/Makefile3
-rw-r--r--examples/bond/main.c25
-rw-r--r--examples/bond/meson.build1
-rw-r--r--examples/distributor/main.c42
-rw-r--r--examples/ethtool/Makefile2
-rw-r--r--examples/ethtool/ethtool-app/ethapp.c74
-rw-r--r--examples/ethtool/ethtool-app/main.c4
-rw-r--r--examples/ethtool/lib/Makefile3
-rw-r--r--examples/ethtool/lib/rte_ethtool.c46
-rw-r--r--examples/ethtool/lib/rte_ethtool.h34
-rw-r--r--examples/ethtool/meson.build10
-rw-r--r--examples/eventdev_pipeline/main.c38
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_generic.c7
-rw-r--r--examples/eventdev_pipeline/pipeline_worker_tx.c6
-rw-r--r--examples/exception_path/main.c33
-rw-r--r--examples/flow_classify/flow_classify.c19
-rw-r--r--examples/flow_filtering/Makefile33
-rw-r--r--examples/flow_filtering/flow_blocks.c32
-rw-r--r--examples/flow_filtering/main.c53
-rw-r--r--examples/ip_fragmentation/main.c16
-rw-r--r--examples/ip_pipeline/Makefile55
-rw-r--r--examples/ip_pipeline/action.c358
-rw-r--r--examples/ip_pipeline/action.h77
-rw-r--r--examples/ip_pipeline/app.h1401
-rw-r--r--examples/ip_pipeline/cli.c5240
-rw-r--r--examples/ip_pipeline/cli.h18
-rw-r--r--examples/ip_pipeline/common.h12
-rw-r--r--examples/ip_pipeline/config/action.cfg68
-rw-r--r--examples/ip_pipeline/config/action.sh119
-rw-r--r--examples/ip_pipeline/config/action.txt8
-rwxr-xr-xexamples/ip_pipeline/config/diagram-generator.py317
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.cfg97
-rw-r--r--examples/ip_pipeline/config/edge_router_downstream.sh13
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.cfg124
-rw-r--r--examples/ip_pipeline/config/edge_router_upstream.sh33
-rw-r--r--examples/ip_pipeline/config/firewall.cfg68
-rw-r--r--examples/ip_pipeline/config/firewall.sh13
-rw-r--r--examples/ip_pipeline/config/firewall.txt9
-rw-r--r--examples/ip_pipeline/config/flow.cfg72
-rw-r--r--examples/ip_pipeline/config/flow.sh25
-rw-r--r--examples/ip_pipeline/config/flow.txt17
-rw-r--r--examples/ip_pipeline/config/ip_pipeline.cfg9
-rw-r--r--examples/ip_pipeline/config/ip_pipeline.sh5
-rw-r--r--examples/ip_pipeline/config/kni.cfg67
-rw-r--r--examples/ip_pipeline/config/l2fwd.cfg58
-rw-r--r--examples/ip_pipeline/config/l3fwd.cfg68
-rw-r--r--examples/ip_pipeline/config/l3fwd.sh33
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.cfg70
-rw-r--r--examples/ip_pipeline/config/l3fwd_arp.sh43
-rw-r--r--examples/ip_pipeline/config/network_layers.cfg227
-rw-r--r--examples/ip_pipeline/config/network_layers.sh79
-rwxr-xr-xexamples/ip_pipeline/config/pipeline-to-core-mapping.py906
-rw-r--r--examples/ip_pipeline/config/tap.cfg64
-rw-r--r--examples/ip_pipeline/config/tm_profile.cfg105
-rw-r--r--examples/ip_pipeline/config_check.c488
-rw-r--r--examples/ip_pipeline/config_parse.c3395
-rw-r--r--examples/ip_pipeline/config_parse_tm.c419
-rw-r--r--examples/ip_pipeline/conn.c329
-rw-r--r--examples/ip_pipeline/conn.h47
-rw-r--r--examples/ip_pipeline/cpu_core_map.c471
-rw-r--r--examples/ip_pipeline/cpu_core_map.h40
-rw-r--r--examples/ip_pipeline/examples/firewall.cli59
-rw-r--r--examples/ip_pipeline/examples/flow.cli60
-rw-r--r--examples/ip_pipeline/examples/kni.cli69
-rw-r--r--examples/ip_pipeline/examples/l2fwd.cli51
-rw-r--r--examples/ip_pipeline/examples/route.cli60
-rw-r--r--examples/ip_pipeline/examples/route_ecmp.cli57
-rw-r--r--examples/ip_pipeline/examples/rss.cli112
-rw-r--r--examples/ip_pipeline/examples/tap.cli66
-rw-r--r--examples/ip_pipeline/hash_func.h (renamed from examples/ip_pipeline/pipeline/hash_func.h)3
-rw-r--r--examples/ip_pipeline/hash_func_arm64.h232
-rw-r--r--examples/ip_pipeline/init.c1927
-rw-r--r--examples/ip_pipeline/kni.c175
-rw-r--r--examples/ip_pipeline/kni.h46
-rw-r--r--examples/ip_pipeline/link.c267
-rw-r--r--examples/ip_pipeline/link.h66
-rw-r--r--examples/ip_pipeline/main.c259
-rw-r--r--examples/ip_pipeline/mempool.c82
-rw-r--r--examples/ip_pipeline/mempool.h40
-rw-r--r--examples/ip_pipeline/meson.build36
-rw-r--r--examples/ip_pipeline/parser.c17
-rw-r--r--examples/ip_pipeline/parser.h8
-rw-r--r--examples/ip_pipeline/pipeline.c988
-rw-r--r--examples/ip_pipeline/pipeline.h389
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_actions_common.h202
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_be.c176
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_be.h134
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.c1455
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_common_fe.h231
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.c1421
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall.h60
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.c856
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_firewall_be.h147
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.c1286
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions.h60
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c960
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h139
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c1878
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.h106
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c723
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h113
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.c20
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master.h12
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.c141
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_master_be.h12
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.c45
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough.h12
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c929
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.h44
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.c1613
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing.h71
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c1966
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.h283
-rw-r--r--examples/ip_pipeline/pipeline_be.h322
-rw-r--r--examples/ip_pipeline/swq.c76
-rw-r--r--examples/ip_pipeline/swq.h37
-rw-r--r--examples/ip_pipeline/tap.c102
-rw-r--r--examples/ip_pipeline/tap.h29
-rw-r--r--examples/ip_pipeline/thread.c3053
-rw-r--r--examples/ip_pipeline/thread.h75
-rw-r--r--examples/ip_pipeline/thread_fe.c457
-rw-r--r--examples/ip_pipeline/thread_fe.h72
-rw-r--r--examples/ip_pipeline/tmgr.c229
-rw-r--r--examples/ip_pipeline/tmgr.h70
-rw-r--r--examples/ip_reassembly/main.c26
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c248
-rw-r--r--examples/ipsec-secgw/ipsec.c75
-rw-r--r--examples/ipsec-secgw/ipsec.h5
-rw-r--r--examples/ipsec-secgw/parser.c23
-rw-r--r--examples/ipsec-secgw/sa.c2
-rw-r--r--examples/ipv4_multicast/main.c12
-rw-r--r--examples/kni/Makefile2
-rw-r--r--examples/kni/main.c37
-rw-r--r--examples/kni/meson.build2
-rw-r--r--examples/l2fwd-cat/l2fwd-cat.c11
-rw-r--r--examples/l2fwd-cat/meson.build4
-rw-r--r--examples/l2fwd-crypto/Makefile6
-rw-r--r--examples/l2fwd-crypto/main.c593
-rw-r--r--examples/l2fwd-jobstats/main.c23
-rw-r--r--examples/l2fwd-keepalive/main.c23
-rw-r--r--examples/l2fwd/main.c25
-rw-r--r--examples/l3fwd-acl/main.c32
-rw-r--r--examples/l3fwd-power/Makefile6
-rw-r--r--examples/l3fwd-power/main.c111
-rw-r--r--examples/l3fwd-power/main.h20
-rw-r--r--examples/l3fwd-power/meson.build7
-rw-r--r--examples/l3fwd-power/perf_core.c230
-rw-r--r--examples/l3fwd-power/perf_core.h12
-rw-r--r--examples/l3fwd-vf/main.c29
-rw-r--r--examples/l3fwd/l3fwd_common.h35
-rw-r--r--examples/l3fwd/l3fwd_em.c1
-rw-r--r--examples/l3fwd/l3fwd_em_hlm.h35
-rw-r--r--examples/l3fwd/l3fwd_em_hlm_neon.h35
-rw-r--r--examples/l3fwd/l3fwd_lpm.c1
-rw-r--r--examples/l3fwd/l3fwd_lpm_neon.h35
-rw-r--r--examples/l3fwd/l3fwd_neon.h36
-rw-r--r--examples/l3fwd/main.c34
-rw-r--r--examples/link_status_interrupt/main.c4
-rw-r--r--examples/load_balancer/config.c2
-rw-r--r--examples/load_balancer/init.c14
-rw-r--r--examples/meson.build55
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/Makefile3
-rw-r--r--examples/multi_process/client_server_mp/mp_server/init.c2
-rw-r--r--examples/multi_process/l2fwd_fork/flib.c280
-rw-r--r--examples/multi_process/l2fwd_fork/flib.h120
-rw-r--r--examples/multi_process/l2fwd_fork/main.c1261
-rw-r--r--examples/multi_process/meson.build10
-rw-r--r--examples/multi_process/symmetric_mp/main.c22
-rw-r--r--examples/netmap_compat/bridge/Makefile2
-rw-r--r--examples/netmap_compat/bridge/bridge.c3
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c1
-rw-r--r--examples/netmap_compat/meson.build10
-rw-r--r--examples/packet_ordering/main.c34
-rw-r--r--examples/performance-thread/common/arch/x86/ctx.c62
-rw-r--r--examples/performance-thread/common/lthread.c64
-rw-r--r--examples/performance-thread/common/lthread.h62
-rw-r--r--examples/performance-thread/common/lthread_api.h64
-rw-r--r--examples/performance-thread/common/lthread_cond.c61
-rw-r--r--examples/performance-thread/common/lthread_cond.h61
-rw-r--r--examples/performance-thread/common/lthread_int.h61
-rw-r--r--examples/performance-thread/common/lthread_pool.h68
-rw-r--r--examples/performance-thread/common/lthread_queue.h68
-rw-r--r--examples/performance-thread/common/lthread_sched.c62
-rw-r--r--examples/performance-thread/common/lthread_sched.h61
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c52
-rw-r--r--examples/performance-thread/meson.build10
-rw-r--r--examples/performance-thread/pthread_shim/main.c6
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c4
-rw-r--r--examples/ptpclient/ptpclient.c9
-rw-r--r--examples/qos_meter/Makefile2
-rw-r--r--examples/qos_meter/main.c64
-rw-r--r--examples/qos_meter/main.h32
-rw-r--r--examples/qos_sched/Makefile2
-rw-r--r--examples/qos_sched/init.c4
-rw-r--r--examples/quota_watermark/meson.build10
-rw-r--r--examples/quota_watermark/qw/init.c4
-rw-r--r--examples/quota_watermark/qw/main.c14
-rw-r--r--examples/rxtx_callbacks/main.c15
-rw-r--r--examples/server_node_efd/meson.build10
-rw-r--r--examples/server_node_efd/node/node.c2
-rw-r--r--examples/server_node_efd/server/Makefile2
-rw-r--r--examples/server_node_efd/server/init.c4
-rw-r--r--examples/service_cores/Makefile3
-rw-r--r--examples/service_cores/meson.build1
-rw-r--r--examples/skeleton/basicfwd.c13
-rw-r--r--examples/tep_termination/Makefile5
-rw-r--r--examples/tep_termination/main.c27
-rw-r--r--examples/tep_termination/meson.build4
-rw-r--r--examples/tep_termination/vxlan_setup.c4
-rw-r--r--examples/vhost/Makefile5
-rw-r--r--examples/vhost/main.c35
-rw-r--r--examples/vhost/meson.build4
-rw-r--r--examples/vhost/virtio_net.c94
-rw-r--r--examples/vhost_crypto/Makefile (renamed from examples/multi_process/l2fwd_fork/Makefile)26
-rw-r--r--examples/vhost_crypto/main.c536
-rw-r--r--examples/vhost_crypto/meson.build14
-rw-r--r--examples/vhost_scsi/Makefile2
-rw-r--r--examples/vhost_scsi/meson.build3
-rw-r--r--examples/vhost_scsi/scsi.c14
-rw-r--r--examples/vhost_scsi/scsi_spec.h2
-rw-r--r--examples/vhost_scsi/vhost_scsi.c56
-rw-r--r--examples/vm_power_manager/Makefile7
-rw-r--r--examples/vm_power_manager/channel_monitor.c28
-rw-r--r--examples/vm_power_manager/guest_cli/Makefile2
-rw-r--r--examples/vm_power_manager/guest_cli/main.c151
-rw-r--r--examples/vm_power_manager/guest_cli/parse.c82
-rw-r--r--examples/vm_power_manager/guest_cli/parse.h19
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.c113
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.h6
-rw-r--r--examples/vm_power_manager/main.c177
-rw-r--r--examples/vm_power_manager/meson.build10
-rw-r--r--examples/vm_power_manager/oob_monitor.h68
-rw-r--r--examples/vm_power_manager/oob_monitor_nop.c38
-rw-r--r--examples/vm_power_manager/oob_monitor_x86.c258
-rw-r--r--examples/vm_power_manager/parse.c81
-rw-r--r--examples/vm_power_manager/parse.h20
-rw-r--r--examples/vm_power_manager/power_manager.c139
-rw-r--r--examples/vm_power_manager/power_manager.h23
-rw-r--r--examples/vmdq/main.c14
-rw-r--r--examples/vmdq_dcb/main.c27
-rw-r--r--kernel/Makefile9
-rw-r--r--kernel/freebsd/BSDmakefile.meson18
-rw-r--r--kernel/freebsd/Makefile9
-rw-r--r--kernel/freebsd/contigmem/BSDmakefile (renamed from lib/librte_eal/bsdapp/contigmem/BSDmakefile)0
-rw-r--r--kernel/freebsd/contigmem/Makefile (renamed from lib/librte_eal/bsdapp/contigmem/Makefile)0
-rw-r--r--kernel/freebsd/contigmem/contigmem.c (renamed from lib/librte_eal/bsdapp/contigmem/contigmem.c)0
-rw-r--r--kernel/freebsd/contigmem/meson.build (renamed from lib/librte_eal/bsdapp/contigmem/meson.build)0
-rw-r--r--kernel/freebsd/meson.build32
-rw-r--r--kernel/freebsd/nic_uio/BSDmakefile (renamed from lib/librte_eal/bsdapp/nic_uio/BSDmakefile)0
-rw-r--r--kernel/freebsd/nic_uio/Makefile (renamed from lib/librte_eal/bsdapp/nic_uio/Makefile)0
-rw-r--r--kernel/freebsd/nic_uio/meson.build (renamed from lib/librte_eal/bsdapp/nic_uio/meson.build)0
-rw-r--r--kernel/freebsd/nic_uio/nic_uio.c (renamed from lib/librte_eal/bsdapp/nic_uio/nic_uio.c)0
-rw-r--r--kernel/linux/Makefile9
-rw-r--r--kernel/linux/igb_uio/Kbuild2
-rw-r--r--kernel/linux/igb_uio/Makefile (renamed from lib/librte_eal/linuxapp/igb_uio/Makefile)0
-rw-r--r--kernel/linux/igb_uio/compat.h (renamed from lib/librte_eal/linuxapp/igb_uio/compat.h)24
-rw-r--r--kernel/linux/igb_uio/igb_uio.c (renamed from lib/librte_eal/linuxapp/igb_uio/igb_uio.c)32
-rw-r--r--kernel/linux/igb_uio/meson.build (renamed from lib/librte_eal/linuxapp/igb_uio/meson.build)8
-rw-r--r--kernel/linux/kni/Makefile (renamed from lib/librte_eal/linuxapp/kni/Makefile)0
-rw-r--r--kernel/linux/kni/compat.h (renamed from lib/librte_eal/linuxapp/kni/compat.h)6
-rw-r--r--kernel/linux/kni/ethtool/README (renamed from lib/librte_eal/linuxapp/kni/ethtool/README)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_82575.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_82575.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_api.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_api.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_defines.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_hw.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_i210.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_i210.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_mac.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_mac.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_manage.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_manage.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_mbx.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_mbx.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_nvm.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_nvm.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_osdep.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_phy.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_phy.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/e1000_regs.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_ethtool.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c)7
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_main.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_param.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_regtest.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_vmdq.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c)0
-rw-r--r--kernel/linux/kni/ethtool/igb/igb_vmdq.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h)0
-rw-r--r--kernel/linux/kni/ethtool/igb/kcompat.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h)18
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_api.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_api.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_common.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_common.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_dcb.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_ethtool.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_fcoe.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_main.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_mbx.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_osdep.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_type.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/kcompat.c (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c)0
-rw-r--r--kernel/linux/kni/ethtool/ixgbe/kcompat.h (renamed from lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h)0
-rw-r--r--kernel/linux/kni/kni_dev.h (renamed from lib/librte_eal/linuxapp/kni/kni_dev.h)1
-rw-r--r--kernel/linux/kni/kni_ethtool.c (renamed from lib/librte_eal/linuxapp/kni/kni_ethtool.c)0
-rw-r--r--kernel/linux/kni/kni_fifo.h (renamed from lib/librte_eal/linuxapp/kni/kni_fifo.h)0
-rw-r--r--kernel/linux/kni/kni_misc.c (renamed from lib/librte_eal/linuxapp/kni/kni_misc.c)2
-rw-r--r--kernel/linux/kni/kni_net.c (renamed from lib/librte_eal/linuxapp/kni/kni_net.c)40
-rw-r--r--kernel/linux/meson.build42
-rw-r--r--kernel/meson.build4
-rw-r--r--lib/Makefile44
-rw-r--r--lib/librte_bbdev/rte_bbdev.c17
-rw-r--r--lib/librte_bbdev/rte_bbdev.h8
-rw-r--r--lib/librte_bbdev/rte_bbdev_op.h28
-rw-r--r--lib/librte_bitratestats/meson.build1
-rw-r--r--lib/librte_bitratestats/rte_bitrate.c6
-rw-r--r--lib/librte_bpf/Makefile41
-rw-r--r--lib/librte_bpf/bpf.c61
-rw-r--r--lib/librte_bpf/bpf_def.h143
-rw-r--r--lib/librte_bpf/bpf_exec.c453
-rw-r--r--lib/librte_bpf/bpf_impl.h55
-rw-r--r--lib/librte_bpf/bpf_jit_x86.c1356
-rw-r--r--lib/librte_bpf/bpf_load.c148
-rw-r--r--lib/librte_bpf/bpf_load_elf.c322
-rw-r--r--lib/librte_bpf/bpf_pkt.c605
-rw-r--r--lib/librte_bpf/bpf_validate.c2248
-rw-r--r--lib/librte_bpf/meson.build25
-rw-r--r--lib/librte_bpf/rte_bpf.h203
-rw-r--r--lib/librte_bpf/rte_bpf_ethdev.h117
-rw-r--r--lib/librte_bpf/rte_bpf_version.map16
-rw-r--r--lib/librte_cmdline/cmdline_parse.c13
-rw-r--r--lib/librte_cmdline/cmdline_parse_etheraddr.c2
-rw-r--r--lib/librte_cmdline/cmdline_parse_ipaddr.c223
-rw-r--r--lib/librte_cmdline/cmdline_parse_portlist.c2
-rw-r--r--lib/librte_cmdline/cmdline_parse_string.c4
-rw-r--r--lib/librte_compat/Makefile33
-rw-r--r--lib/librte_compressdev/Makefile31
-rw-r--r--lib/librte_compressdev/meson.build12
-rw-r--r--lib/librte_compressdev/rte_comp.c215
-rw-r--r--lib/librte_compressdev/rte_comp.h485
-rw-r--r--lib/librte_compressdev/rte_compressdev.c772
-rw-r--r--lib/librte_compressdev/rte_compressdev.h540
-rw-r--r--lib/librte_compressdev/rte_compressdev_internal.h114
-rw-r--r--lib/librte_compressdev/rte_compressdev_pmd.c160
-rw-r--r--lib/librte_compressdev/rte_compressdev_pmd.h390
-rw-r--r--lib/librte_compressdev/rte_compressdev_version.map39
-rw-r--r--lib/librte_cryptodev/Makefile1
-rw-r--r--lib/librte_cryptodev/meson.build5
-rw-r--r--lib/librte_cryptodev/rte_crypto.h82
-rw-r--r--lib/librte_cryptodev/rte_crypto_asym.h496
-rw-r--r--lib/librte_cryptodev/rte_crypto_sym.h17
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c430
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.h407
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.c12
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_pmd.h157
-rw-r--r--lib/librte_cryptodev/rte_cryptodev_version.map33
-rw-r--r--lib/librte_eal/bsdapp/Makefile2
-rw-r--r--lib/librte_eal/bsdapp/eal/Makefile11
-rw-r--r--lib/librte_eal/bsdapp/eal/eal.c290
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_alarm.c299
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_alarm_private.h19
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_cpuflags.c21
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_dev.c21
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_hugepage_info.c69
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_interrupts.c464
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_memalloc.c54
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_memory.c471
-rw-r--r--lib/librte_eal/bsdapp/eal/eal_thread.c2
-rw-r--r--lib/librte_eal/bsdapp/eal/meson.build5
-rw-r--r--lib/librte_eal/common/Makefile4
-rw-r--r--lib/librte_eal/common/arch/arm/rte_cpuflags.c54
-rw-r--r--lib/librte_eal/common/arch/arm/rte_hypervisor.c2
-rw-r--r--lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c15
-rw-r--r--lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c2
-rw-r--r--lib/librte_eal/common/arch/x86/rte_hypervisor.c2
-rw-r--r--lib/librte_eal/common/eal_common_bus.c3
-rw-r--r--lib/librte_eal/common/eal_common_class.c64
-rw-r--r--lib/librte_eal/common/eal_common_dev.c443
-rw-r--r--lib/librte_eal/common/eal_common_devargs.c228
-rw-r--r--lib/librte_eal/common/eal_common_fbarray.c1239
-rw-r--r--lib/librte_eal/common/eal_common_hypervisor.c2
-rw-r--r--lib/librte_eal/common/eal_common_lcore.c75
-rw-r--r--lib/librte_eal/common/eal_common_log.c121
-rw-r--r--lib/librte_eal/common/eal_common_memalloc.c364
-rw-r--r--lib/librte_eal/common/eal_common_memory.c528
-rw-r--r--lib/librte_eal/common/eal_common_memzone.c290
-rw-r--r--lib/librte_eal/common/eal_common_options.c192
-rw-r--r--lib/librte_eal/common/eal_common_proc.c713
-rw-r--r--lib/librte_eal/common/eal_common_thread.c98
-rw-r--r--lib/librte_eal/common/eal_common_uuid.c193
-rw-r--r--lib/librte_eal/common/eal_filesystem.h70
-rw-r--r--lib/librte_eal/common/eal_hugepages.h11
-rw-r--r--lib/librte_eal/common/eal_internal_cfg.h20
-rw-r--r--lib/librte_eal/common/eal_memalloc.h82
-rw-r--r--lib/librte_eal/common/eal_options.h8
-rw-r--r--lib/librte_eal/common/eal_private.h99
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_atomic_32.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_byteorder.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cpuflags.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cycles.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_cycles_32.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_prefetch.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h32
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_rwlock.h2
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_spinlock.h32
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h23
-rw-r--r--lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h2
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic.h24
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic_32.h12
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_atomic_64.h12
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h24
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_spinlock.h4
-rw-r--r--lib/librte_eal/common/include/generic/rte_atomic.h90
-rw-r--r--lib/librte_eal/common/include/generic/rte_byteorder.h6
-rw-r--r--lib/librte_eal/common/include/generic/rte_cpuflags.h21
-rw-r--r--lib/librte_eal/common/include/generic/rte_rwlock.h4
-rw-r--r--lib/librte_eal/common/include/rte_bitmap.h8
-rw-r--r--lib/librte_eal/common/include/rte_bus.h4
-rw-r--r--lib/librte_eal/common/include/rte_class.h134
-rw-r--r--lib/librte_eal/common/include/rte_common.h160
-rw-r--r--lib/librte_eal/common/include/rte_dev.h211
-rw-r--r--lib/librte_eal/common/include/rte_devargs.h173
-rw-r--r--lib/librte_eal/common/include/rte_eal.h54
-rw-r--r--lib/librte_eal/common/include/rte_eal_interrupts.h1
-rw-r--r--lib/librte_eal/common/include/rte_eal_memconfig.h28
-rw-r--r--lib/librte_eal/common/include/rte_fbarray.h470
-rw-r--r--lib/librte_eal/common/include/rte_hypervisor.h2
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h60
-rw-r--r--lib/librte_eal/common/include/rte_log.h40
-rw-r--r--lib/librte_eal/common/include/rte_malloc.h10
-rw-r--r--lib/librte_eal/common/include/rte_malloc_heap.h6
-rw-r--r--lib/librte_eal/common/include/rte_memory.h330
-rw-r--r--lib/librte_eal/common/include/rte_memzone.h45
-rw-r--r--lib/librte_eal/common/include/rte_pci_dev_feature_defs.h58
-rw-r--r--lib/librte_eal/common/include/rte_pci_dev_features.h58
-rw-r--r--lib/librte_eal/common/include/rte_random.h6
-rw-r--r--lib/librte_eal/common/include/rte_service.h167
-rw-r--r--lib/librte_eal/common/include/rte_service_component.h38
-rw-r--r--lib/librte_eal/common/include/rte_string_fns.h31
-rw-r--r--lib/librte_eal/common/include/rte_tailq.h3
-rw-r--r--lib/librte_eal/common/include/rte_uuid.h129
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/include/rte_vfio.h243
-rw-r--r--lib/librte_eal/common/malloc_elem.c479
-rw-r--r--lib/librte_eal/common/malloc_elem.h51
-rw-r--r--lib/librte_eal/common/malloc_heap.c868
-rw-r--r--lib/librte_eal/common/malloc_heap.h19
-rw-r--r--lib/librte_eal/common/malloc_mp.c743
-rw-r--r--lib/librte_eal/common/malloc_mp.h86
-rw-r--r--lib/librte_eal/common/meson.build8
-rw-r--r--lib/librte_eal/common/rte_malloc.c85
-rw-r--r--lib/librte_eal/common/rte_service.c130
-rw-r--r--lib/librte_eal/linuxapp/Makefile2
-rw-r--r--lib/librte_eal/linuxapp/eal/Makefile12
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c269
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_alarm.c9
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_cpuflags.c84
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_dev.c224
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_hugepage_info.c253
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c44
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_log.c13
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memalloc.c1363
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c1520
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_thread.c6
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_timer.c12
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.c1580
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio.h60
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c402
-rw-r--r--lib/librte_eal/linuxapp/eal/meson.build4
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/Kbuild1
-rw-r--r--lib/librte_eal/meson.build27
-rw-r--r--lib/librte_eal/rte_eal_version.map120
-rw-r--r--lib/librte_ethdev/Makefile (renamed from lib/librte_ether/Makefile)2
-rw-r--r--lib/librte_ethdev/ethdev_profile.c (renamed from lib/librte_ether/ethdev_profile.c)0
-rw-r--r--lib/librte_ethdev/ethdev_profile.h (renamed from lib/librte_ether/ethdev_profile.h)0
-rw-r--r--lib/librte_ethdev/meson.build (renamed from lib/librte_ether/meson.build)4
-rw-r--r--lib/librte_ethdev/rte_dev_info.h (renamed from lib/librte_ether/rte_dev_info.h)18
-rw-r--r--lib/librte_ethdev/rte_eth_ctrl.h (renamed from lib/librte_ether/rte_eth_ctrl.h)3
-rw-r--r--lib/librte_ethdev/rte_ethdev.c (renamed from lib/librte_ether/rte_ethdev.c)1429
-rw-r--r--lib/librte_ethdev/rte_ethdev.h (renamed from lib/librte_ether/rte_ethdev.h)396
-rw-r--r--lib/librte_ethdev/rte_ethdev_core.h (renamed from lib/librte_ether/rte_ethdev_core.h)14
-rw-r--r--lib/librte_ethdev/rte_ethdev_driver.h357
-rw-r--r--lib/librte_ethdev/rte_ethdev_pci.h (renamed from lib/librte_ether/rte_ethdev_pci.h)24
-rw-r--r--lib/librte_ethdev/rte_ethdev_vdev.h (renamed from lib/librte_ether/rte_ethdev_vdev.h)0
-rw-r--r--lib/librte_ethdev/rte_ethdev_version.map (renamed from lib/librte_ether/rte_ethdev_version.map)47
-rw-r--r--lib/librte_ethdev/rte_flow.c (renamed from lib/librte_ether/rte_flow.c)311
-rw-r--r--lib/librte_ethdev/rte_flow.h (renamed from lib/librte_ether/rte_flow.h)1003
-rw-r--r--lib/librte_ethdev/rte_flow_driver.h (renamed from lib/librte_ether/rte_flow_driver.h)67
-rw-r--r--lib/librte_ethdev/rte_mtr.c (renamed from lib/librte_ether/rte_mtr.c)0
-rw-r--r--lib/librte_ethdev/rte_mtr.h (renamed from lib/librte_ether/rte_mtr.h)0
-rw-r--r--lib/librte_ethdev/rte_mtr_driver.h (renamed from lib/librte_ether/rte_mtr_driver.h)0
-rw-r--r--lib/librte_ethdev/rte_tm.c (renamed from lib/librte_ether/rte_tm.c)0
-rw-r--r--lib/librte_ethdev/rte_tm.h (renamed from lib/librte_ether/rte_tm.h)59
-rw-r--r--lib/librte_ethdev/rte_tm_driver.h (renamed from lib/librte_ether/rte_tm_driver.h)0
-rw-r--r--lib/librte_ether/rte_ethdev_driver.h132
-rw-r--r--lib/librte_eventdev/Makefile15
-rw-r--r--lib/librte_eventdev/meson.build19
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.c1128
-rw-r--r--lib/librte_eventdev/rte_event_crypto_adapter.h575
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c1662
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.h129
-rw-r--r--lib/librte_eventdev/rte_event_ring.c15
-rw-r--r--lib/librte_eventdev/rte_event_ring.h4
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter.c1299
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter.h766
-rw-r--r--lib/librte_eventdev/rte_event_timer_adapter_pmd.h114
-rw-r--r--lib/librte_eventdev/rte_eventdev.c79
-rw-r--r--lib/librte_eventdev/rte_eventdev.h167
-rw-r--r--lib/librte_eventdev/rte_eventdev_pmd.h225
-rw-r--r--lib/librte_eventdev/rte_eventdev_version.map39
-rw-r--r--lib/librte_flow_classify/rte_flow_classify.c9
-rw-r--r--lib/librte_flow_classify/rte_flow_classify_parse.c24
-rw-r--r--lib/librte_gso/Makefile1
-rw-r--r--lib/librte_gso/gso_common.h3
-rw-r--r--lib/librte_gso/gso_udp4.c81
-rw-r--r--lib/librte_gso/gso_udp4.h42
-rw-r--r--lib/librte_gso/meson.build2
-rw-r--r--lib/librte_gso/rte_gso.c24
-rw-r--r--lib/librte_gso/rte_gso.h6
-rw-r--r--lib/librte_hash/meson.build1
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.c700
-rw-r--r--lib/librte_hash/rte_cuckoo_hash.h22
-rw-r--r--lib/librte_hash/rte_cuckoo_hash_x86.h164
-rw-r--r--lib/librte_hash/rte_hash.h88
-rw-r--r--lib/librte_hash/rte_hash_crc.h11
-rw-r--r--lib/librte_hash/rte_hash_version.map8
-rw-r--r--lib/librte_ip_frag/ip_frag_internal.c8
-rw-r--r--lib/librte_ip_frag/rte_ipv4_reassembly.c2
-rw-r--r--lib/librte_ip_frag/rte_ipv6_reassembly.c2
-rw-r--r--lib/librte_kni/meson.build2
-rw-r--r--lib/librte_kni/rte_kni.c7
-rw-r--r--lib/librte_kvargs/Makefile34
-rw-r--r--lib/librte_kvargs/meson.build5
-rw-r--r--lib/librte_kvargs/rte_kvargs.c61
-rw-r--r--lib/librte_kvargs/rte_kvargs.h58
-rw-r--r--lib/librte_kvargs/rte_kvargs_version.map8
-rw-r--r--lib/librte_latencystats/rte_latencystats.c16
-rw-r--r--lib/librte_mbuf/Makefile3
-rw-r--r--lib/librte_mbuf/meson.build1
-rw-r--r--lib/librte_mbuf/rte_mbuf.c26
-rw-r--r--lib/librte_mbuf/rte_mbuf.h540
-rw-r--r--lib/librte_mbuf/rte_mbuf_pool_ops.c14
-rw-r--r--lib/librte_mbuf/rte_mbuf_pool_ops.h13
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.c3
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h53
-rw-r--r--lib/librte_mbuf/rte_mbuf_version.map4
-rw-r--r--lib/librte_member/rte_member.c5
-rw-r--r--lib/librte_mempool/Makefile7
-rw-r--r--lib/librte_mempool/meson.build18
-rw-r--r--lib/librte_mempool/rte_mempool.c546
-rw-r--r--lib/librte_mempool/rte_mempool.h588
-rw-r--r--lib/librte_mempool/rte_mempool_ops.c51
-rw-r--r--lib/librte_mempool/rte_mempool_ops_default.c70
-rw-r--r--lib/librte_mempool/rte_mempool_version.map23
-rw-r--r--lib/librte_meter/Makefile2
-rw-r--r--lib/librte_meter/meson.build1
-rw-r--r--lib/librte_meter/rte_meter.c93
-rw-r--r--lib/librte_meter/rte_meter.h197
-rw-r--r--lib/librte_meter/rte_meter_version.map7
-rw-r--r--lib/librte_metrics/rte_metrics.c23
-rw-r--r--lib/librte_net/Makefile1
-rw-r--r--lib/librte_net/meson.build1
-rw-r--r--lib/librte_net/rte_esp.h2
-rw-r--r--lib/librte_net/rte_ether.h31
-rw-r--r--lib/librte_net/rte_ip.h42
-rw-r--r--lib/librte_net/rte_net.c21
-rw-r--r--lib/librte_net/rte_net.h27
-rw-r--r--lib/librte_net/rte_net_version.map3
-rw-r--r--lib/librte_pci/Makefile32
-rw-r--r--lib/librte_pci/rte_pci.c11
-rw-r--r--lib/librte_pci/rte_pci_version.map5
-rw-r--r--lib/librte_pdump/Makefile3
-rw-r--r--lib/librte_pdump/meson.build2
-rw-r--r--lib/librte_pdump/rte_pdump.c428
-rw-r--r--lib/librte_pdump/rte_pdump.h9
-rw-r--r--lib/librte_pipeline/Makefile7
-rw-r--r--lib/librte_pipeline/meson.build7
-rw-r--r--lib/librte_pipeline/rte_pipeline_version.map28
-rw-r--r--lib/librte_pipeline/rte_port_in_action.c531
-rw-r--r--lib/librte_pipeline/rte_port_in_action.h301
-rw-r--r--lib/librte_pipeline/rte_table_action.c2386
-rw-r--r--lib/librte_pipeline/rte_table_action.h905
-rw-r--r--lib/librte_port/meson.build12
-rw-r--r--lib/librte_power/channel_commands.h3
-rw-r--r--lib/librte_power/power_acpi_cpufreq.c21
-rw-r--r--lib/librte_power/power_acpi_cpufreq.h16
-rw-r--r--lib/librte_power/power_kvm_vm.c8
-rw-r--r--lib/librte_power/power_kvm_vm.h17
-rw-r--r--lib/librte_power/rte_power.c3
-rw-r--r--lib/librte_power/rte_power.h32
-rw-r--r--lib/librte_power/rte_power_version.map9
-rw-r--r--lib/librte_rawdev/Makefile1
-rw-r--r--lib/librte_rawdev/meson.build5
-rw-r--r--lib/librte_rawdev/rte_rawdev.c76
-rw-r--r--lib/librte_rawdev/rte_rawdev.h55
-rw-r--r--lib/librte_rawdev/rte_rawdev_pmd.h28
-rw-r--r--lib/librte_rawdev/rte_rawdev_version.map3
-rw-r--r--lib/librte_ring/Makefile2
-rw-r--r--lib/librte_ring/rte_ring.h27
-rw-r--r--lib/librte_ring/rte_ring_c11_mem.h10
-rw-r--r--lib/librte_ring/rte_ring_generic.h10
-rw-r--r--lib/librte_sched/rte_sched.c239
-rw-r--r--lib/librte_sched/rte_sched.h21
-rw-r--r--lib/librte_sched/rte_sched_version.map6
-rw-r--r--lib/librte_security/rte_security.c37
-rw-r--r--lib/librte_security/rte_security.h50
-rw-r--r--lib/librte_security/rte_security_driver.h40
-rw-r--r--lib/librte_table/Makefile1
-rw-r--r--lib/librte_table/meson.build37
-rw-r--r--lib/librte_table/rte_table_acl.c6
-rw-r--r--lib/librte_table/rte_table_hash.h3
-rw-r--r--lib/librte_table/rte_table_hash_cuckoo.c11
-rw-r--r--lib/librte_table/rte_table_hash_cuckoo.h57
-rw-r--r--lib/librte_timer/rte_timer.c2
-rw-r--r--lib/librte_vhost/Makefile13
-rw-r--r--lib/librte_vhost/fd_man.c98
-rw-r--r--lib/librte_vhost/fd_man.h17
-rw-r--r--lib/librte_vhost/iotlb.c10
-rw-r--r--lib/librte_vhost/iotlb.h2
-rw-r--r--lib/librte_vhost/meson.build11
-rw-r--r--lib/librte_vhost/rte_vdpa.h87
-rw-r--r--lib/librte_vhost/rte_vhost.h212
-rw-r--r--lib/librte_vhost/rte_vhost_crypto.h109
-rw-r--r--lib/librte_vhost/rte_vhost_version.map24
-rw-r--r--lib/librte_vhost/socket.c288
-rw-r--r--lib/librte_vhost/vdpa.c115
-rw-r--r--lib/librte_vhost/vhost.c312
-rw-r--r--lib/librte_vhost/vhost.h358
-rw-r--r--lib/librte_vhost/vhost_crypto.c1372
-rw-r--r--lib/librte_vhost/vhost_user.c581
-rw-r--r--lib/librte_vhost/vhost_user.h56
-rw-r--r--lib/librte_vhost/virtio_crypto.h422
-rw-r--r--lib/librte_vhost/virtio_net.c1460
-rw-r--r--lib/meson.build30
-rw-r--r--meson.build7
-rw-r--r--meson_options.txt2
-rw-r--r--mk/arch/arm/rte.vars.mk32
-rw-r--r--mk/machine/armv7a/rte.vars.mk31
-rw-r--r--mk/machine/dpaa/rte.vars.mk3
-rw-r--r--mk/machine/dpaa2/rte.vars.mk3
-rw-r--r--mk/rte.app.mk135
-rw-r--r--mk/rte.extshared.mk29
-rw-r--r--mk/rte.extsubdir.mk31
-rw-r--r--mk/rte.sdkbuild.mk1
-rw-r--r--mk/rte.sdkconfig.mk19
-rw-r--r--mk/rte.sdkdoc.mk34
-rw-r--r--mk/rte.sdkexamples.mk31
-rw-r--r--mk/rte.sdkinstall.mk70
-rw-r--r--mk/rte.sdkroot.mk4
-rw-r--r--mk/rte.sdktest.mk32
-rw-r--r--mk/rte.shared.mk29
-rw-r--r--mk/toolchain/gcc/rte.toolchain-compat.mk13
-rw-r--r--mk/toolchain/gcc/rte.vars.mk9
-rw-r--r--pkg/dpdk.spec32
-rw-r--r--test/bpf/dummy.c20
-rw-r--r--test/bpf/mbuf.h578
-rw-r--r--test/bpf/t1.c52
-rw-r--r--test/bpf/t2.c31
-rw-r--r--test/bpf/t3.c36
-rw-r--r--test/test-pipeline/init.c6
-rw-r--r--test/test-pipeline/main.h4
-rw-r--r--test/test-pipeline/pipeline_hash.c26
-rw-r--r--test/test/Makefile19
-rw-r--r--test/test/autotest.py13
-rw-r--r--test/test/autotest_data.py1087
-rw-r--r--test/test/autotest_runner.py519
-rw-r--r--test/test/commands.c5
-rw-r--r--test/test/meson.build22
-rw-r--r--test/test/process.h29
-rw-r--r--test/test/resource.c33
-rw-r--r--test/test/resource.h33
-rw-r--r--test/test/test_bpf.c1926
-rw-r--r--test/test/test_cmdline_cirbuf.c2
-rw-r--r--test/test/test_cmdline_ipaddr.c2
-rw-r--r--test/test/test_common.c38
-rw-r--r--test/test/test_compressdev.c1486
-rw-r--r--test/test/test_compressdev_test_buffer.h295
-rw-r--r--test/test/test_cryptodev.c472
-rw-r--r--test/test/test_cryptodev.h5
-rw-r--r--test/test/test_cryptodev_aes_test_vectors.h109
-rw-r--r--test/test/test_cryptodev_asym.c1369
-rw-r--r--test/test/test_cryptodev_asym_util.h42
-rw-r--r--test/test/test_cryptodev_blockcipher.c103
-rw-r--r--test/test/test_cryptodev_blockcipher.h4
-rw-r--r--test/test/test_cryptodev_des_test_vectors.h90
-rw-r--r--test/test/test_cryptodev_dh_test_vectors.h80
-rw-r--r--test/test/test_cryptodev_dsa_test_vectors.h117
-rw-r--r--test/test/test_cryptodev_hash_test_vectors.h158
-rw-r--r--test/test/test_cryptodev_mod_test_vectors.h103
-rw-r--r--test/test/test_cryptodev_rsa_test_vectors.h88
-rw-r--r--test/test/test_devargs.c103
-rw-r--r--test/test/test_distributor_perf.c3
-rw-r--r--test/test/test_eal_flags.c92
-rw-r--r--test/test/test_event_crypto_adapter.c928
-rw-r--r--test/test/test_event_eth_rx_adapter.c344
-rw-r--r--test/test/test_event_timer_adapter.c1830
-rw-r--r--test/test/test_fbarray.c576
-rw-r--r--test/test/test_flow_classify.c20
-rw-r--r--test/test/test_hash.c12
-rw-r--r--test/test/test_hash_multiwriter.c58
-rw-r--r--test/test/test_hash_perf.c36
-rw-r--r--test/test/test_hash_readwrite.c637
-rw-r--r--test/test/test_interrupts.c39
-rw-r--r--test/test/test_kni.c43
-rw-r--r--test/test/test_link_bonding.c31
-rw-r--r--test/test/test_link_bonding_mode4.c8
-rw-r--r--test/test/test_link_bonding_rssconf.c12
-rw-r--r--test/test/test_malloc.c32
-rw-r--r--test/test/test_memory.c27
-rw-r--r--test/test/test_mempool.c46
-rw-r--r--test/test/test_memzone.c227
-rw-r--r--test/test/test_meter.c194
-rw-r--r--test/test/test_mp_secondary.c26
-rw-r--r--test/test/test_pmd_perf.c35
-rw-r--r--test/test/test_pmd_ring.c6
-rw-r--r--test/test/test_power_acpi_cpufreq.c46
-rw-r--r--test/test/test_power_kvm_vm.c21
-rw-r--r--test/test/test_reorder.c47
-rw-r--r--test/test/test_resource.c33
-rw-r--r--test/test/test_service_cores.c116
-rw-r--r--test/test/test_table.c11
-rw-r--r--test/test/test_table.h6
-rw-r--r--test/test/test_table_combined.c4
-rw-r--r--test/test/test_table_pipeline.c14
-rw-r--r--test/test/test_table_tables.c6
-rw-r--r--test/test/virtual_pmd.c6
-rwxr-xr-xusertools/cpu_layout.py36
-rwxr-xr-xusertools/dpdk-devbind.py19
-rwxr-xr-xusertools/dpdk-pmdinfo.py2
2031 files changed, 272651 insertions, 93265 deletions
diff --git a/.gitignore b/.gitignore
index 6df5ba06..9105e26c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@ doc/guides/cryptodevs/overview_feature_table.txt
doc/guides/cryptodevs/overview_cipher_table.txt
doc/guides/cryptodevs/overview_auth_table.txt
doc/guides/cryptodevs/overview_aead_table.txt
+doc/guides/compressdevs/overview_feature_table.txt
cscope.out.po
cscope.out.in
cscope.out
diff --git a/GNUmakefile b/GNUmakefile
index d07fef0d..ae80720e 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -12,7 +12,7 @@ export RTE_SDK
# directory list
#
-ROOTDIRS-y := buildtools lib drivers app
+ROOTDIRS-y := buildtools lib kernel drivers app
ROOTDIRS- := test
include $(RTE_SDK)/mk/rte.sdkroot.mk
diff --git a/MAINTAINERS b/MAINTAINERS
index a646ca3e..9fd258fa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -27,6 +27,41 @@ M: Thomas Monjalon <thomas@monjalon.net>
M: Ferruh Yigit <ferruh.yigit@intel.com>
T: git://dpdk.org/dpdk
+Next-net Tree
+M: Ferruh Yigit <ferruh.yigit@intel.com>
+T: git://dpdk.org/next/dpdk-next-net
+
+Next-net-intel Tree
+M: Qi Zhang <qi.z.zhang@intel.com>
+M: Beilei Xing <beilei.xing@intel.com>
+T: git://dpdk.org/next/dpdk-next-net-intel
+
+Next-net-mlx Tree
+M: Shahaf Shuler <shahafs@mellanox.com>
+T: git://dpdk.org/next/dpdk-next-net-mlx
+
+Next-virtio Tree
+M: Maxime Coquelin <maxime.coquelin@redhat.com>
+M: Tiwei Bie <tiwei.bie@intel.com>
+T: git://dpdk.org/next/dpdk-next-virtio
+
+Next-crypto Tree
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+M: Akhil Goyal <akhil.goyal@nxp.com>
+T: git://dpdk.org/next/dpdk-next-crypto
+
+Next-eventdev Tree
+M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+
+Next-qos Tree
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+T: git://dpdk.org/next/dpdk-next-qos
+
+Next-pipeline Tree
+M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
+T: git://dpdk.org/next/dpdk-next-pipeline
+
Stable Branches
M: Yuanhan Liu <yliu@fridaylinux.org>
M: Luca Boccassi <bluca@debian.org>
@@ -48,11 +83,13 @@ F: devtools/check-dup-includes.sh
F: devtools/check-maintainers.sh
F: devtools/check-git-log.sh
F: devtools/check-includes.sh
+F: devtools/check-symbol-maps.sh
F: devtools/checkpatches.sh
F: devtools/get-maintainer.sh
F: devtools/git-log-fixes.sh
F: devtools/load-devel-config
F: devtools/test-build.sh
+F: devtools/test-meson-builds.sh
F: license/
@@ -87,6 +124,7 @@ M: Neil Horman <nhorman@tuxdriver.com>
F: lib/librte_compat/
F: doc/guides/rel_notes/deprecation.rst
F: devtools/validate-abi.sh
+F: devtools/check-symbol-change.sh
F: buildtools/check-experimental-syms.sh
Driver information
@@ -113,7 +151,6 @@ F: test/test/test_common.c
F: test/test/test_cpuflags.c
F: test/test/test_cycles.c
F: test/test/test_debug.c
-F: test/test/test_devargs.c
F: test/test/test_eal*
F: test/test/test_errno.c
F: test/test/test_interrupts.c
@@ -130,13 +167,18 @@ F: test/test/test_version.c
Memory Allocation
M: Anatoly Burakov <anatoly.burakov@intel.com>
+F: lib/librte_eal/common/include/rte_fbarray.h
F: lib/librte_eal/common/include/rte_mem*
F: lib/librte_eal/common/include/rte_malloc.h
F: lib/librte_eal/common/*malloc*
+F: lib/librte_eal/common/eal_common_fbarray.c
F: lib/librte_eal/common/eal_common_mem*
F: lib/librte_eal/common/eal_hugepages.h
+F: lib/librte_eal/linuxapp/eal/eal_mem*
+F: lib/librte_eal/bsdapp/eal/eal_mem*
F: doc/guides/prog_guide/env_abstraction_layer.rst
F: test/test/test_func_reentrancy.c
+F: test/test/test_fbarray.c
F: test/test/test_malloc.c
F: test/test/test_memory.c
F: test/test/test_memzone.c
@@ -156,7 +198,7 @@ F: test/test/test_mp_secondary.c
F: examples/multi_process/
F: doc/guides/sample_app_ug/multi_process.rst
-Service Cores - EXPERIMENTAL
+Service Cores
M: Harry van Haaren <harry.van.haaren@intel.com>
F: lib/librte_eal/common/include/rte_service.h
F: lib/librte_eal/common/include/rte_service_component.h
@@ -171,13 +213,13 @@ F: test/test/test_bitmap.c
ARM v7
M: Jan Viktorin <viktorin@rehivetech.com>
-M: Jianbo Liu <jianbo.liu@arm.com>
+M: Gavin Hu <gavin.hu@arm.com>
F: lib/librte_eal/common/arch/arm/
F: lib/librte_eal/common/include/arch/arm/
ARM v8
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
-M: Jianbo Liu <jianbo.liu@arm.com>
+M: Gavin Hu <gavin.hu@arm.com>
F: lib/librte_eal/common/include/arch/arm/*_64.h
F: lib/librte_net/net_crc_neon.h
F: lib/librte_acl/acl_run_neon.*
@@ -209,7 +251,7 @@ F: doc/guides/linux_gsg/
Linux UIO
M: Ferruh Yigit <ferruh.yigit@intel.com>
-F: lib/librte_eal/linuxapp/igb_uio/
+F: kernel/linux/igb_uio/
F: drivers/bus/pci/linux/*uio*
Linux VFIO
@@ -225,11 +267,11 @@ F: doc/guides/freebsd_gsg/
FreeBSD contigmem
M: Bruce Richardson <bruce.richardson@intel.com>
-F: lib/librte_eal/bsdapp/contigmem/
+F: kernel/freebsd/contigmem/
FreeBSD UIO
M: Bruce Richardson <bruce.richardson@intel.com>
-F: lib/librte_eal/bsdapp/nic_uio/
+F: kernel/freebsd/nic_uio/
Core Libraries
@@ -237,6 +279,7 @@ Core Libraries
Memory pool
M: Olivier Matz <olivier.matz@6wind.com>
+M: Andrew Rybchenko <arybchenko@solarflare.com>
F: lib/librte_mempool/
F: drivers/mempool/Makefile
F: drivers/mempool/ring/
@@ -260,29 +303,34 @@ F: test/test/test_mbuf.c
Ethernet API
M: Thomas Monjalon <thomas@monjalon.net>
+M: Ferruh Yigit <ferruh.yigit@intel.com>
+M: Andrew Rybchenko <arybchenko@solarflare.com>
T: git://dpdk.org/next/dpdk-next-net
-F: lib/librte_ether/
+F: lib/librte_ethdev/
F: devtools/test-null.sh
Flow API
M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
T: git://dpdk.org/next/dpdk-next-net
-F: lib/librte_ether/rte_flow*
+F: app/test-pmd/cmdline_flow.c
+F: doc/guides/prog_guide/rte_flow.rst
+F: lib/librte_ethdev/rte_flow*
Traffic Management API - EXPERIMENTAL
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
T: git://dpdk.org/next/dpdk-next-tm
-F: lib/librte_ether/rte_tm*
+F: lib/librte_ethdev/rte_tm*
Traffic Metering and Policing API - EXPERIMENTAL
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
-F: lib/librte_ether/rte_mtr*
+F: lib/librte_ethdev/rte_mtr*
Baseband API - EXPERIMENTAL
M: Amr Mokhtar <amr.mokhtar@intel.com>
+T: git://dpdk.org/next/dpdk-next-crypto
F: lib/librte_bbdev/
F: doc/guides/prog_guide/bbdev.rst
-F: drivers/bbdev/
+F: drivers/baseband/
F: doc/guides/bbdevs/
F: app/test-bbdev/
F: doc/guides/tools/testbbdev.rst
@@ -300,9 +348,21 @@ F: examples/l2fwd-crypto/
Security API - EXPERIMENTAL
M: Akhil Goyal <akhil.goyal@nxp.com>
M: Declan Doherty <declan.doherty@intel.com>
+T: git://dpdk.org/next/dpdk-next-crypto
F: lib/librte_security/
F: doc/guides/prog_guide/rte_security.rst
+Compression API - EXPERIMENTAL
+M: Fiona Trahe <fiona.trahe@intel.com>
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+M: Ashish Gupta <ashish.gupta@caviumnetworks.com>
+T: git://dpdk.org/next/dpdk-next-crypto
+F: lib/librte_compressdev/
+F: drivers/compress/
+F: test/test/test_compressdev*
+F: doc/guides/prog_guide/compressdev.rst
+F: doc/guides/compressdevs/features/default.ini
+
Eventdev API
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
T: git://dpdk.org/next/dpdk-next-eventdev
@@ -317,6 +377,20 @@ F: lib/librte_eventdev/*eth_rx_adapter*
F: test/test/test_event_eth_rx_adapter.c
F: doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+Eventdev Timer Adapter API - EXPERIMENTAL
+M: Erik Gabriel Carrillo <erik.g.carrillo@intel.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+F: lib/librte_eventdev/*timer_adapter*
+F: test/test/test_event_timer_adapter.c
+F: doc/guides/prog_guide/event_timer_adapter.rst
+
+Eventdev Crypto Adapter API - EXPERIMENTAL
+M: Abhinandan Gujjar <abhinandan.gujjar@intel.com>
+T: git://dpdk.org/next/dpdk-next-eventdev
+F: lib/librte_eventdev/*crypto_adapter*
+F: test/test/test_event_crypto_adapter.c
+F: doc/guides/prog_guide/event_crypto_adapter.rst
+
Raw device API - EXPERIMENTAL
M: Shreyansh Jain <shreyansh.jain@nxp.com>
M: Hemant Agrawal <hemant.agrawal@nxp.com>
@@ -326,9 +400,22 @@ F: test/test/test_rawdev.c
F: doc/guides/prog_guide/rawdev.rst
+Memory Pool Drivers
+-------------------
+
+Bucket memory pool
+M: Artem V. Andreev <artem.andreev@oktetlabs.ru>
+M: Andrew Rybchenko <arybchenko@solarflare.com>
+F: drivers/mempool/bucket/
+
+
Bus Drivers
-----------
+Intel FPGA bus
+M: Rosen Xu <rosen.xu@intel.com>
+F: drivers/bus/ifpga/
+
NXP buses
M: Hemant Agrawal <hemant.agrawal@nxp.com>
M: Shreyansh Jain <shreyansh.jain@nxp.com>
@@ -339,9 +426,12 @@ PCI bus driver
F: drivers/bus/pci/
VDEV bus driver
-M: Jianfeng Tan <jianfeng.tan@intel.com>
F: drivers/bus/vdev/
+VMBUS bus driver
+M: Stephen Hemminger <sthemmin@microsoft.com>
+F: drivers/bus/vmbus/
+
Networking Drivers
------------------
@@ -351,6 +441,7 @@ F: doc/guides/nics/features/default.ini
Link bonding
M: Declan Doherty <declan.doherty@intel.com>
+M: Chas Williams <chas3@att.com>
F: drivers/net/bonding/
F: doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
F: test/test/test_link_bonding*
@@ -359,7 +450,7 @@ F: doc/guides/nics/features/bonding.ini
Linux KNI
M: Ferruh Yigit <ferruh.yigit@intel.com>
-F: lib/librte_eal/linuxapp/kni/
+F: kernel/linux/kni/
F: lib/librte_kni/
F: doc/guides/prog_guide/kernel_nic_interface.rst
F: test/test/test_kni.c
@@ -380,6 +471,12 @@ F: drivers/net/ena/
F: doc/guides/nics/ena.rst
F: doc/guides/nics/features/ena.ini
+AMD axgbe
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/net/axgbe/
+F: doc/guides/nics/axgbe.rst
+F: doc/guides/nics/features/axgbe.ini
+
Atomic Rules ARK
M: Shepard Siegel <shepard.siegel@atomicrules.com>
M: Ed Czeck <ed.czeck@atomicrules.com>
@@ -412,6 +509,7 @@ F: doc/guides/nics/features/liquidio.ini
Cavium OcteonTX
M: Santosh Shukla <santosh.shukla@caviumnetworks.com>
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+F: drivers/common/octeontx/
F: drivers/mempool/octeontx/
F: drivers/net/octeontx/
F: doc/guides/nics/octeontx.rst
@@ -462,48 +560,63 @@ M: Qi Zhang <qi.z.zhang@intel.com>
M: Xiao Wang <xiao.w.wang@intel.com>
T: git://dpdk.org/next/dpdk-next-net-intel
F: drivers/net/fm10k/
+F: doc/guides/nics/fm10k.rst
F: doc/guides/nics/features/fm10k*.ini
Intel avf
M: Jingjing Wu <jingjing.wu@intel.com>
M: Wenzhuo Lu <wenzhuo.lu@intel.com>
+T: git://dpdk.org/next/dpdk-next-net-intel
F: drivers/net/avf/
F: doc/guides/nics/features/avf*.ini
+Intel ifc
+M: Xiao Wang <xiao.w.wang@intel.com>
+T: git://dpdk.org/next/dpdk-next-net-intel
+F: drivers/net/ifc/
+F: doc/guides/nics/ifc.rst
+F: doc/guides/nics/features/ifc*.ini
+
+Marvell mvpp2
+M: Tomasz Duszynski <tdu@semihalf.com>
+M: Dmitri Epshtein <dima@marvell.com>
+M: Natalie Samsonov <nsamsono@marvell.com>
+F: drivers/net/mvpp2/
+F: doc/guides/nics/mvpp2.rst
+F: doc/guides/nics/features/mvpp2.ini
+
Mellanox mlx4
-M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
+M: Matan Azrad <matan@mellanox.com>
+M: Shahaf Shuler <shahafs@mellanox.com>
T: git://dpdk.org/next/dpdk-next-net-mlx
F: drivers/net/mlx4/
F: doc/guides/nics/mlx4.rst
F: doc/guides/nics/features/mlx4.ini
Mellanox mlx5
-M: Adrien Mazarguil <adrien.mazarguil@6wind.com>
-M: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
+M: Shahaf Shuler <shahafs@mellanox.com>
M: Yongseok Koh <yskoh@mellanox.com>
T: git://dpdk.org/next/dpdk-next-net-mlx
F: drivers/net/mlx5/
F: doc/guides/nics/mlx5.rst
F: doc/guides/nics/features/mlx5.ini
-Marvell mrvl
-M: Jacek Siuda <jck@semihalf.com>
-M: Tomasz Duszynski <tdu@semihalf.com>
-M: Dmitri Epshtein <dima@marvell.com>
-M: Natalie Samsonov <nsamsono@marvell.com>
-M: Jianbo Liu <jianbo.liu@arm.com>
-F: drivers/net/mrvl/
-F: doc/guides/nics/mrvl.rst
-F: doc/guides/nics/features/mrvl.ini
-
Microsoft vdev_netvsc - EXPERIMENTAL
M: Matan Azrad <matan@mellanox.com>
F: drivers/net/vdev_netvsc/
F: doc/guides/nics/vdev_netvsc.rst
F: doc/guides/nics/features/vdev_netvsc.ini
+Microsoft Hyper-V netvsc - EXPERIMENTAL
+M: Stephen Hemminger <sthemmin@microsoft.com>
+M: K. Y. Srinivasan <kys@microsoft.com>
+M: Haiyang Zhang <haiyangz@microsoft.com>
+F: drivers/net/netvsc/
+F: doc/guides/nics/netvsc.rst
+F: doc/guides/nics/features/netvsc.ini
+
Netcope szedata2
-M: Matej Vido <vido@cesnet.cz>
+M: Jan Remes <remes@netcope.com>
F: drivers/net/szedata2/
F: doc/guides/nics/szedata2.rst
F: doc/guides/nics/features/szedata2.ini
@@ -552,14 +665,15 @@ F: doc/guides/nics/sfc_efx.rst
F: doc/guides/nics/features/sfc_efx.ini
VMware vmxnet3
-M: Shrikrishna Khare <skhare@vmware.com>
+M: Yong Wang <yongwang@vmware.com>
F: drivers/net/vmxnet3/
F: doc/guides/nics/vmxnet3.rst
F: doc/guides/nics/features/vmxnet3.ini
Vhost-user
-M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
+M: Tiwei Bie <tiwei.bie@intel.com>
+M: Zhihong Wang <zhihong.wang@intel.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: lib/librte_vhost/
F: doc/guides/prog_guide/vhost_lib.rst
@@ -567,19 +681,21 @@ F: examples/vhost/
F: doc/guides/sample_app_ug/vhost.rst
F: examples/vhost_scsi/
F: doc/guides/sample_app_ug/vhost_scsi.rst
+F: examples/vhost_crypto/
Vhost PMD
-M: Tetsuya Mukawa <mtetsuyah@gmail.com>
-M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
+M: Tiwei Bie <tiwei.bie@intel.com>
+M: Zhihong Wang <zhihong.wang@intel.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: drivers/net/vhost/
+F: doc/guides/nics/vhost.rst
F: doc/guides/nics/features/vhost.ini
Virtio PMD
-M: Yuanhan Liu <yliu@fridaylinux.org>
M: Maxime Coquelin <maxime.coquelin@redhat.com>
M: Tiwei Bie <tiwei.bie@intel.com>
+M: Zhihong Wang <zhihong.wang@intel.com>
T: git://dpdk.org/next/dpdk-next-virtio
F: drivers/net/virtio/
F: doc/guides/nics/virtio.rst
@@ -599,7 +715,7 @@ F: doc/guides/nics/pcap_ring.rst
F: doc/guides/nics/features/pcap.ini
Tap PMD
-M: Pascal Mazon <pascal.mazon@6wind.com>
+M: Keith Wiles <keith.wiles@intel.com>
F: drivers/net/tap/
F: doc/guides/nics/tap.rst
F: doc/guides/nics/features/tap.ini
@@ -627,11 +743,14 @@ Fail-safe PMD
M: Gaetan Rivet <gaetan.rivet@6wind.com>
F: drivers/net/failsafe/
F: doc/guides/nics/fail_safe.rst
+F: doc/guides/nics/features/failsafe.ini
Softnic PMD
M: Jasvinder Singh <jasvinder.singh@intel.com>
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
F: drivers/net/softnic/
+F: doc/guides/nics/features/softnic.ini
+F: doc/guides/nics/softnic.rst
Crypto Drivers
@@ -640,6 +759,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
T: git://dpdk.org/next/dpdk-next-crypto
F: doc/guides/cryptodevs/features/default.ini
+AMD CCP Crypto
+M: Ravi Kumar <ravi1.kumar@amd.com>
+F: drivers/crypto/ccp/
+F: doc/guides/cryptodevs/ccp.rst
+F: doc/guides/cryptodevs/features/ccp.ini
+
ARMv8 Crypto
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
F: drivers/crypto/armv8/
@@ -668,6 +793,7 @@ M: John Griffin <john.griffin@intel.com>
M: Fiona Trahe <fiona.trahe@intel.com>
M: Deepak Kumar Jain <deepak.k.jain@intel.com>
F: drivers/crypto/qat/
+F: drivers/common/qat/
F: doc/guides/cryptodevs/qat.rst
F: doc/guides/cryptodevs/features/qat.ini
@@ -678,14 +804,12 @@ F: doc/guides/cryptodevs/kasumi.rst
F: doc/guides/cryptodevs/features/kasumi.ini
Marvell Mrvl
-M: Jacek Siuda <jck@semihalf.com>
M: Tomasz Duszynski <tdu@semihalf.com>
M: Dmitri Epshtein <dima@marvell.com>
M: Natalie Samsonov <nsamsono@marvell.com>
-M: Jianbo Liu <jianbo.liu@arm.com>
-F: drivers/crypto/mrvl/
-F: doc/guides/cryptodevs/mrvl.rst
-F: doc/guides/cryptodevs/features/mrvl.ini
+F: drivers/crypto/mvsam/
+F: doc/guides/cryptodevs/mvsam.rst
+F: doc/guides/cryptodevs/features/mvsam.ini
Null Crypto
M: Declan Doherty <declan.doherty@intel.com>
@@ -719,6 +843,12 @@ F: drivers/crypto/snow3g/
F: doc/guides/cryptodevs/snow3g.rst
F: doc/guides/cryptodevs/features/snow3g.ini
+Virtio
+M: Jay Zhou <jianjay.zhou@huawei.com>
+F: drivers/crypto/virtio/
+F: doc/guides/cryptodevs/virtio.rst
+F: doc/guides/cryptodevs/features/virtio.ini
+
ZUC
M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
F: drivers/crypto/zuc/
@@ -726,6 +856,35 @@ F: doc/guides/cryptodevs/zuc.rst
F: doc/guides/cryptodevs/features/zuc.ini
+Compression Drivers
+-------------------
+M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+T: git://dpdk.org/next/dpdk-next-crypto
+
+Cavium OCTEONTX zipvf
+M: Ashish Gupta <ashish.gupta@cavium.com>
+F: drivers/compress/octeontx/
+F: doc/guides/compressdevs/octeontx.rst
+F: doc/guides/compressdevs/features/octeontx.ini
+
+Intel QuickAssist
+M: Fiona Trahe <fiona.trahe@intel.com>
+F: drivers/compress/qat/
+F: drivers/common/qat/
+
+ISA-L
+M: Lee Daly <lee.daly@intel.com>
+F: drivers/compress/isal/
+F: doc/guides/compressdevs/isal.rst
+F: doc/guides/compressdevs/features/isal.ini
+
+ZLIB
+M: Sunila Sahu <sunila.sahu@caviumnetworks.com>
+F: drivers/compress/zlib/
+F: doc/guides/compressdevs/zlib.rst
+F: doc/guides/compressdevs/features/zlib.ini
+
+
Eventdev Drivers
----------------
M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
@@ -737,11 +896,9 @@ M: Santosh Shukla <santosh.shukla@caviumnetworks.com>
F: drivers/event/octeontx/
F: doc/guides/eventdevs/octeontx.rst
-NXP DPAA2 eventdev
-M: Hemant Agrawal <hemant.agrawal@nxp.com>
-M: Nipun Gupta <nipun.gupta@nxp.com>
-F: drivers/event/dpaa2/
-F: doc/guides/eventdevs/dpaa2.rst
+Cavium OCTEONTX timvf
+M: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
+F: drivers/event/octeontx/timvf_*
NXP DPAA eventdev
M: Hemant Agrawal <hemant.agrawal@nxp.com>
@@ -749,6 +906,12 @@ M: Sunil Kumar Kori <sunil.kori@nxp.com>
F: drivers/event/dpaa/
F: doc/guides/eventdevs/dpaa.rst
+NXP DPAA2 eventdev
+M: Hemant Agrawal <hemant.agrawal@nxp.com>
+M: Nipun Gupta <nipun.gupta@nxp.com>
+F: drivers/event/dpaa2/
+F: doc/guides/eventdevs/dpaa2.rst
+
Software Eventdev PMD
M: Harry van Haaren <harry.van.haaren@intel.com>
F: drivers/event/sw/
@@ -763,6 +926,26 @@ F: drivers/event/opdl/
F: doc/guides/eventdevs/opdl.rst
+Rawdev Drivers
+--------------
+
+Intel FPGA
+M: Rosen Xu <rosen.xu@intel.com>
+M: Tianfei zhang <tianfei.zhang@intel.com>
+F: drivers/raw/ifpga_rawdev/
+F: doc/guides/rawdevs/ifpga_rawdev.rst
+
+NXP DPAA2 QDMA
+M: Nipun Gupta <nipun.gupta@nxp.com>
+F: drivers/raw/dpaa2_qdma/
+F: doc/guides/rawdevs/dpaa2_qdma.rst
+
+DPAA2 CMDIF
+M: Nipun Gupta <nipun.gupta@nxp.com>
+F: drivers/raw/dpaa2_cmdif/
+F: doc/guides/rawdevs/dpaa2_cmdif.rst
+
+
Packet processing
-----------------
@@ -792,7 +975,6 @@ F: doc/guides/prog_guide/generic_receive_offload_lib.rst
Generic Segmentation Offload
M: Jiayu Hu <jiayu.hu@intel.com>
-M: Mark Kavanagh <mark.b.kavanagh@intel.com>
F: lib/librte_gso/
F: doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -837,6 +1019,7 @@ F: doc/guides/prog_guide/pdump_lib.rst
F: app/pdump/
F: doc/guides/tools/pdump.rst
+
Packet Framework
----------------
M: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
@@ -903,6 +1086,7 @@ F: test/test/test_meter.c
F: examples/qos_meter/
F: doc/guides/sample_app_ug/qos_metering.rst
+
Other libraries
---------------
@@ -965,6 +1149,13 @@ Latency statistics
M: Reshma Pattan <reshma.pattan@intel.com>
F: lib/librte_latencystats/
+BPF - EXPERIMENTAL
+M: Konstantin Ananyev <konstantin.ananyev@intel.com>
+F: lib/librte_bpf/
+F: test/bpf/
+F: test/test/test_bpf.c
+F: doc/guides/prog_guide/bpf_lib.rst
+
Test Applications
-----------------
@@ -988,6 +1179,7 @@ F: test/test/virtual_pmd.h
Driver testing tool
M: Wenzhuo Lu <wenzhuo.lu@intel.com>
M: Jingjing Wu <jingjing.wu@intel.com>
+M: Bernard Iremonger <bernard.iremonger@intel.com>
F: app/test-pmd/
F: doc/guides/testpmd_app_ug/
@@ -1006,7 +1198,7 @@ F: test/test/test_event_ring.c
Procinfo tool
M: Maryam Tahhan <maryam.tahhan@intel.com>
M: Reshma Pattan <reshma.pattan@intel.com>
-F: app/proc_info/
+F: app/proc-info/
F: doc/guides/tools/proc_info.rst
@@ -1086,7 +1278,7 @@ M: John McNamara <john.mcnamara@intel.com>
F: examples/skeleton/
F: doc/guides/sample_app_ug/skeleton.rst
-M: Jijiang Liu <jijiang.liu@intel.com>
+M: Xiaoyun Li <xiaoyun.li@intel.com>
F: examples/tep_termination/
F: examples/vmdq/
diff --git a/app/Makefile b/app/Makefile
index 0eaed538..069fa984 100644
--- a/app/Makefile
+++ b/app/Makefile
@@ -4,7 +4,7 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
-DIRS-$(CONFIG_RTE_PROC_INFO) += proc_info
+DIRS-$(CONFIG_RTE_PROC_INFO) += proc-info
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += pdump
ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y)
diff --git a/app/meson.build b/app/meson.build
index 0088de46..99e0b93e 100644
--- a/app/meson.build
+++ b/app/meson.build
@@ -1,5 +1,57 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-subdir('test-pmd')
-subdir('test-eventdev')
+apps = ['pdump',
+ 'proc-info',
+ 'test-bbdev',
+ 'test-crypto-perf',
+ 'test-eventdev',
+ 'test-pmd']
+
+# for BSD only
+lib_execinfo = cc.find_library('execinfo', required: false)
+
+foreach app:apps
+ build = true
+ name = app
+ allow_experimental_apis = false
+ sources = []
+ includes = []
+ cflags = machine_args
+ objs = [] # other object files to link against, used e.g. for
+ # instruction-set optimized versions of code
+
+ # use "deps" for internal DPDK dependencies, and "ext_deps" for
+ # external package/library requirements
+ ext_deps = []
+ deps = []
+
+ subdir(name)
+
+ if build
+ dep_objs = []
+ foreach d:deps
+ dep_objs += get_variable(get_option('default_library')
+ + '_rte_' + d)
+ endforeach
+ dep_objs += lib_execinfo
+
+ link_libs = []
+ if get_option('default_library') == 'static'
+ link_libs = dpdk_drivers
+ endif
+
+ if allow_experimental_apis
+ cflags += '-DALLOW_EXPERIMENTAL_API'
+ endif
+
+ executable('dpdk-' + name,
+ sources,
+ c_args: cflags,
+ link_whole: link_libs,
+ dependencies: dep_objs,
+ install_rpath: join_paths(get_option('prefix'),
+ driver_install_path),
+ install: true)
+ endif
+endforeach
diff --git a/app/pdump/main.c b/app/pdump/main.c
index f6865bdb..ac228712 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -24,6 +24,7 @@
#include <rte_kvargs.h>
#include <rte_mempool.h>
#include <rte_ring.h>
+#include <rte_string_fns.h>
#include <rte_pdump.h>
#define CMD_LINE_OPT_PDUMP "pdump"
@@ -36,11 +37,10 @@
#define PDUMP_RING_SIZE_ARG "ring-size"
#define PDUMP_MSIZE_ARG "mbuf-size"
#define PDUMP_NUM_MBUFS_ARG "total-num-mbufs"
-#define CMD_LINE_OPT_SER_SOCK_PATH "server-socket-path"
-#define CMD_LINE_OPT_CLI_SOCK_PATH "client-socket-path"
-#define VDEV_PCAP "net_pcap_%s_%d,tx_pcap=%s"
-#define VDEV_IFACE "net_pcap_%s_%d,tx_iface=%s"
+#define VDEV_NAME_FMT "net_pcap_%s_%d"
+#define VDEV_PCAP_ARGS_FMT "tx_pcap=%s"
+#define VDEV_IFACE_ARGS_FMT "tx_iface=%s"
#define TX_STREAM_SIZE 64
#define MP_NAME "pdump_pool_%d"
@@ -139,8 +139,6 @@ struct parse_val {
int num_tuples;
static struct rte_eth_conf port_conf_default;
volatile uint8_t quit_signal;
-static char server_socket_path[PATH_MAX];
-static char client_socket_path[PATH_MAX];
/**< display usage */
static void
@@ -153,11 +151,7 @@ pdump_usage(const char *prgname)
" tx-dev=<iface or pcap file>,"
"[ring-size=<ring size>default:16384],"
"[mbuf-size=<mbuf data size>default:2176],"
- "[total-num-mbufs=<number of mbufs>default:65535]'\n"
- "[--server-socket-path=<server socket dir>"
- "default:/var/run/.dpdk/ (or) ~/.dpdk/]\n"
- "[--client-socket-path=<client socket dir>"
- "default:/var/run/.dpdk/ (or) ~/.dpdk/]\n",
+ "[total-num-mbufs=<number of mbufs>default:65535]'\n",
prgname);
}
@@ -382,8 +376,6 @@ launch_args_parse(int argc, char **argv, char *prgname)
int option_index;
static struct option long_option[] = {
{"pdump", 1, 0, 0},
- {"server-socket-path", 1, 0, 0},
- {"client-socket-path", 1, 0, 0},
{NULL, 0, 0, 0}
};
@@ -404,23 +396,6 @@ launch_args_parse(int argc, char **argv, char *prgname)
return -1;
}
}
-
- if (!strncmp(long_option[option_index].name,
- CMD_LINE_OPT_SER_SOCK_PATH,
- sizeof(CMD_LINE_OPT_SER_SOCK_PATH))) {
- snprintf(server_socket_path,
- sizeof(server_socket_path), "%s",
- optarg);
- }
-
- if (!strncmp(long_option[option_index].name,
- CMD_LINE_OPT_CLI_SOCK_PATH,
- sizeof(CMD_LINE_OPT_CLI_SOCK_PATH))) {
- snprintf(client_socket_path,
- sizeof(client_socket_path), "%s",
- optarg);
- }
-
break;
default:
pdump_usage(prgname);
@@ -554,11 +529,10 @@ configure_vdev(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 0, txRings = 1;
- const uint8_t nb_ports = rte_eth_dev_count();
int ret;
uint16_t q;
- if (port_id > nb_ports)
+ if (!rte_eth_dev_is_valid_port(port_id))
return -1;
ret = rte_eth_dev_configure(port_id, rxRings, txRings,
@@ -597,6 +571,7 @@ create_mp_ring_vdev(void)
uint16_t portid;
struct pdump_tuples *pt = NULL;
struct rte_mempool *mbuf_pool = NULL;
+ char vdev_name[SIZE];
char vdev_args[SIZE];
char ring_name[SIZE];
char mempool_name[SIZE];
@@ -646,17 +621,28 @@ create_mp_ring_vdev(void)
}
/* create vdevs */
+ snprintf(vdev_name, sizeof(vdev_name),
+ VDEV_NAME_FMT, RX_STR, i);
(pt->rx_vdev_stream_type == IFACE) ?
- snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i,
- pt->rx_dev) :
- snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i,
- pt->rx_dev);
- if (rte_eth_dev_attach(vdev_args, &portid) < 0) {
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_IFACE_ARGS_FMT, pt->rx_dev) :
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_PCAP_ARGS_FMT, pt->rx_dev);
+ if (rte_eal_hotplug_add("vdev", vdev_name,
+ vdev_args) < 0) {
cleanup_rings();
rte_exit(EXIT_FAILURE,
"vdev creation failed:%s:%d\n",
__func__, __LINE__);
}
+ if (rte_eth_dev_get_port_by_name(vdev_name,
+ &portid) != 0) {
+ rte_eal_hotplug_remove("vdev", vdev_name);
+ cleanup_rings();
+ rte_exit(EXIT_FAILURE,
+ "cannot find added vdev %s:%s:%d\n",
+ vdev_name, __func__, __LINE__);
+ }
pt->rx_vdev_id = portid;
/* configure vdev */
@@ -665,18 +651,29 @@ create_mp_ring_vdev(void)
if (pt->single_pdump_dev)
pt->tx_vdev_id = portid;
else {
- (pt->tx_vdev_stream_type == IFACE) ?
- snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i,
- pt->tx_dev) :
- snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i,
- pt->tx_dev);
- if (rte_eth_dev_attach(vdev_args,
- &portid) < 0) {
+ snprintf(vdev_name, sizeof(vdev_name),
+ VDEV_NAME_FMT, TX_STR, i);
+ (pt->rx_vdev_stream_type == IFACE) ?
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_IFACE_ARGS_FMT, pt->tx_dev) :
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_PCAP_ARGS_FMT, pt->tx_dev);
+ if (rte_eal_hotplug_add("vdev", vdev_name,
+ vdev_args) < 0) {
cleanup_rings();
rte_exit(EXIT_FAILURE,
"vdev creation failed:"
"%s:%d\n", __func__, __LINE__);
}
+ if (rte_eth_dev_get_port_by_name(vdev_name,
+ &portid) != 0) {
+ rte_eal_hotplug_remove("vdev",
+ vdev_name);
+ cleanup_rings();
+ rte_exit(EXIT_FAILURE,
+ "cannot find added vdev %s:%s:%d\n",
+ vdev_name, __func__, __LINE__);
+ }
pt->tx_vdev_id = portid;
/* configure vdev */
@@ -694,17 +691,28 @@ create_mp_ring_vdev(void)
rte_strerror(rte_errno));
}
+ snprintf(vdev_name, sizeof(vdev_name),
+ VDEV_NAME_FMT, RX_STR, i);
(pt->rx_vdev_stream_type == IFACE) ?
- snprintf(vdev_args, SIZE, VDEV_IFACE, RX_STR, i,
- pt->rx_dev) :
- snprintf(vdev_args, SIZE, VDEV_PCAP, RX_STR, i,
- pt->rx_dev);
- if (rte_eth_dev_attach(vdev_args, &portid) < 0) {
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_IFACE_ARGS_FMT, pt->rx_dev) :
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_PCAP_ARGS_FMT, pt->rx_dev);
+ if (rte_eal_hotplug_add("vdev", vdev_name,
+ vdev_args) < 0) {
cleanup_rings();
rte_exit(EXIT_FAILURE,
"vdev creation failed:%s:%d\n",
__func__, __LINE__);
}
+ if (rte_eth_dev_get_port_by_name(vdev_name,
+ &portid) != 0) {
+ rte_eal_hotplug_remove("vdev", vdev_name);
+ cleanup_rings();
+ rte_exit(EXIT_FAILURE,
+ "cannot find added vdev %s:%s:%d\n",
+ vdev_name, __func__, __LINE__);
+ }
pt->rx_vdev_id = portid;
/* configure vdev */
configure_vdev(pt->rx_vdev_id);
@@ -720,16 +728,27 @@ create_mp_ring_vdev(void)
rte_strerror(rte_errno));
}
+ snprintf(vdev_name, sizeof(vdev_name),
+ VDEV_NAME_FMT, TX_STR, i);
(pt->tx_vdev_stream_type == IFACE) ?
- snprintf(vdev_args, SIZE, VDEV_IFACE, TX_STR, i,
- pt->tx_dev) :
- snprintf(vdev_args, SIZE, VDEV_PCAP, TX_STR, i,
- pt->tx_dev);
- if (rte_eth_dev_attach(vdev_args, &portid) < 0) {
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_IFACE_ARGS_FMT, pt->tx_dev) :
+ snprintf(vdev_args, sizeof(vdev_args),
+ VDEV_PCAP_ARGS_FMT, pt->tx_dev);
+ if (rte_eal_hotplug_add("vdev", vdev_name,
+ vdev_args) < 0) {
cleanup_rings();
rte_exit(EXIT_FAILURE,
"vdev creation failed\n");
}
+ if (rte_eth_dev_get_port_by_name(vdev_name,
+ &portid) != 0) {
+ rte_eal_hotplug_remove("vdev", vdev_name);
+ cleanup_rings();
+ rte_exit(EXIT_FAILURE,
+ "cannot find added vdev %s:%s:%d\n",
+ vdev_name, __func__, __LINE__);
+ }
pt->tx_vdev_id = portid;
/* configure vdev */
@@ -745,22 +764,6 @@ enable_pdump(void)
struct pdump_tuples *pt;
int ret = 0, ret1 = 0;
- if (server_socket_path[0] != 0)
- ret = rte_pdump_set_socket_dir(server_socket_path,
- RTE_PDUMP_SOCKET_SERVER);
- if (ret == 0 && client_socket_path[0] != 0) {
- ret = rte_pdump_set_socket_dir(client_socket_path,
- RTE_PDUMP_SOCKET_CLIENT);
- }
- if (ret < 0) {
- cleanup_pdump_resources();
- rte_exit(EXIT_FAILURE,
- "failed to set socket paths of server:%s, "
- "client:%s\n",
- server_socket_path,
- client_socket_path);
- }
-
for (i = 0; i < num_tuples; i++) {
pt = &pdump_t[i];
if (pt->dir == RTE_PDUMP_FLAG_RXTX) {
@@ -863,6 +866,9 @@ main(int argc, char **argv)
if (diag < 0)
rte_panic("Cannot init EAL\n");
+ if (rte_eth_dev_count_avail() == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
argc -= diag;
argv += (diag - 3);
diff --git a/app/pdump/meson.build b/app/pdump/meson.build
new file mode 100644
index 00000000..988cb4eb
--- /dev/null
+++ b/app/pdump/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('main.c')
+allow_experimental_apis = true
+deps = ['ethdev', 'kvargs', 'pdump']
diff --git a/app/proc_info/Makefile b/app/proc-info/Makefile
index 9e87f524..9e87f524 100644
--- a/app/proc_info/Makefile
+++ b/app/proc-info/Makefile
diff --git a/app/proc_info/main.c b/app/proc-info/main.c
index 2f53e3ca..c20effa4 100644
--- a/app/proc_info/main.c
+++ b/app/proc-info/main.c
@@ -159,7 +159,7 @@ proc_info_preparse_args(int argc, char **argv)
proc_info_usage(prgname);
return -1;
}
- strncpy(host_id, argv[i+1], sizeof(host_id));
+ snprintf(host_id, sizeof(host_id), "%s", argv[i+1]);
}
}
@@ -488,14 +488,18 @@ nic_xstats_display(uint16_t port_id)
if (enable_collectd_format) {
char counter_type[MAX_STRING_LEN];
char buf[MAX_STRING_LEN];
+ size_t n;
collectd_resolve_cnt_type(counter_type,
sizeof(counter_type),
xstats_names[i].name);
- sprintf(buf, "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
+ n = snprintf(buf, MAX_STRING_LEN,
+ "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
PRIu64"\n", host_id, port_id, counter_type,
xstats_names[i].name, values[i]);
- ret = write(stdout_fd, buf, strlen(buf));
+ if (n > sizeof(buf) - 1)
+ n = sizeof(buf) - 1;
+ ret = write(stdout_fd, buf, n);
if (ret < 0)
goto err;
} else {
@@ -628,7 +632,7 @@ main(int argc, char **argv)
return 0;
}
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
@@ -636,7 +640,7 @@ main(int argc, char **argv)
if (enabled_port_mask == 0)
enabled_port_mask = 0xffff;
- for (i = 0; i < nb_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
if (enabled_port_mask & (1 << i)) {
if (enable_stats)
nic_stats_display(i);
diff --git a/app/proc-info/meson.build b/app/proc-info/meson.build
new file mode 100644
index 00000000..9c148e36
--- /dev/null
+++ b/app/proc-info/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('main.c')
+allow_experimental_apis = true
+deps = ['ethdev', 'metrics']
diff --git a/app/test-bbdev/Makefile b/app/test-bbdev/Makefile
index 9aedd776..6da0c8e0 100644
--- a/app/test-bbdev/Makefile
+++ b/app/test-bbdev/Makefile
@@ -20,4 +20,6 @@ SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev.c
SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_perf.c
SRCS-$(CONFIG_RTE_TEST_BBDEV) += test_bbdev_vector.c
+LDLIBS += -lm
+
include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-bbdev/meson.build b/app/test-bbdev/meson.build
new file mode 100644
index 00000000..653907de
--- /dev/null
+++ b/app/test-bbdev/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('main.c',
+ 'test_bbdev.c',
+ 'test_bbdev_perf.c',
+ 'test_bbdev_vector.c')
+allow_experimental_apis = true
+deps = ['bbdev', 'bus_vdev']
diff --git a/app/test-bbdev/test-bbdev.py b/app/test-bbdev/test-bbdev.py
index ce781497..acab9eb1 100755
--- a/app/test-bbdev/test-bbdev.py
+++ b/app/test-bbdev/test-bbdev.py
@@ -33,7 +33,7 @@ parser.add_argument("-p", "--testapp-path",
default=dpdk_path + "/" + dpdk_target + "/app/testbbdev")
parser.add_argument("-e", "--eal-params",
help="EAL arguments which are passed to the test app",
- default="--vdev=bbdev_null0")
+ default="--vdev=baseband_null0")
parser.add_argument("-t", "--timeout",
type=int,
help="Timeout in seconds",
@@ -45,7 +45,7 @@ parser.add_argument("-v", "--test-vector",
nargs="+",
help="Specifies paths to the test vector files.",
default=[dpdk_path +
- "/app/test-bbdev/test_vectors/bbdev_vector_null.data"])
+ "/app/test-bbdev/test_vectors/bbdev_null.data"])
parser.add_argument("-n", "--num-ops",
type=int,
help="Number of operations to process on device.",
diff --git a/app/test-bbdev/test_bbdev.c b/app/test-bbdev/test_bbdev.c
index 10579ea0..a914817b 100644
--- a/app/test-bbdev/test_bbdev.c
+++ b/app/test-bbdev/test_bbdev.c
@@ -273,7 +273,7 @@ test_bbdev_configure_stop_queue(void)
/* Valid queue configuration */
ts_params->qconf.queue_size = info.drv.queue_size_lim;
- ts_params->qconf.priority = info.drv.max_queue_priority;
+ ts_params->qconf.priority = info.drv.max_ul_queue_priority;
/* Device - started; queue - started */
rte_bbdev_start(dev_id);
@@ -413,14 +413,7 @@ test_bbdev_configure_invalid_queue_configure(void)
ts_params->qconf.queue_size);
ts_params->qconf.queue_size = info.drv.queue_size_lim;
- ts_params->qconf.priority = info.drv.max_queue_priority + 1;
- TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id,
- &ts_params->qconf),
- "Failed test for rte_bbdev_queue_configure: "
- "invalid value qconf.queue_size: %u",
- ts_params->qconf.queue_size);
-
- ts_params->qconf.priority = info.drv.max_queue_priority;
+ ts_params->qconf.priority = info.drv.max_ul_queue_priority;
queue_id = info.num_queues;
TEST_ASSERT_FAIL(rte_bbdev_queue_configure(dev_id, queue_id,
&ts_params->qconf),
@@ -902,12 +895,12 @@ test_bbdev_callback(void)
"Failed to callback rgstr for RTE_BBDEV_EVENT_UNKNOWN");
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process "
"for RTE_BBDEV_EVENT_UNKNOWN ");
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process: "
"event RTE_BBDEV_EVENT_ERROR was not registered ");
@@ -926,12 +919,12 @@ test_bbdev_callback(void)
event_status = -1;
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process "
"for RTE_BBDEV_EVENT_UNKNOWN ");
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL);
- TEST_ASSERT(event_status == 1,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_ERROR,
"Failed test for rte_bbdev_pmd_callback_process "
"for RTE_BBDEV_EVENT_ERROR ");
@@ -945,12 +938,12 @@ test_bbdev_callback(void)
event_status = -1;
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process "
"for RTE_BBDEV_EVENT_UNKNOWN ");
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_ERROR, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process: "
"event RTE_BBDEV_EVENT_ERROR was unregistered ");
@@ -999,7 +992,7 @@ test_bbdev_callback(void)
"for RTE_BBDEV_EVENT_ERROR ");
rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_ERROR, NULL);
- TEST_ASSERT(event_status == 1,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_ERROR,
"Failed test for rte_bbdev_pmd_callback_process in dev2 "
"for RTE_BBDEV_EVENT_ERROR ");
@@ -1013,7 +1006,7 @@ test_bbdev_callback(void)
"in dev 2 ");
rte_bbdev_pmd_callback_process(dev2, RTE_BBDEV_EVENT_UNKNOWN, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process in dev2"
" for RTE_BBDEV_EVENT_UNKNOWN ");
@@ -1033,7 +1026,7 @@ test_bbdev_callback(void)
" for RTE_BBDEV_EVENT_UNKNOWN ");
rte_bbdev_pmd_callback_process(dev1, RTE_BBDEV_EVENT_UNKNOWN, NULL);
- TEST_ASSERT(event_status == 0,
+ TEST_ASSERT(event_status == (int) RTE_BBDEV_EVENT_UNKNOWN,
"Failed test for rte_bbdev_pmd_callback_process in dev2 "
"for RTE_BBDEV_EVENT_UNKNOWN ");
diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 00f3b085..6861edc4 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <inttypes.h>
+#include <math.h>
#include <rte_eal.h>
#include <rte_common.h>
@@ -83,6 +84,30 @@ struct thread_params {
struct test_op_params *op_params;
};
+#ifdef RTE_BBDEV_OFFLOAD_COST
+/* Stores time statistics */
+struct test_time_stats {
+ /* Stores software enqueue total working time */
+ uint64_t enq_sw_tot_time;
+ /* Stores minimum value of software enqueue working time */
+ uint64_t enq_sw_min_time;
+ /* Stores maximum value of software enqueue working time */
+ uint64_t enq_sw_max_time;
+ /* Stores turbo enqueue total working time */
+ uint64_t enq_tur_tot_time;
+ /* Stores minimum value of turbo enqueue working time */
+ uint64_t enq_tur_min_time;
+ /* Stores maximum value of turbo enqueue working time */
+ uint64_t enq_tur_max_time;
+ /* Stores dequeue total working time */
+ uint64_t deq_tot_time;
+ /* Stores minimum value of dequeue working time */
+ uint64_t deq_min_time;
+ /* Stores maximum value of dequeue working time */
+ uint64_t deq_max_time;
+};
+#endif
+
typedef int (test_case_function)(struct active_device *ad,
struct test_op_params *op_params);
@@ -609,10 +634,32 @@ allocate_buffers_on_socket(struct rte_bbdev_op_data **buffers, const int len,
return (*buffers == NULL) ? TEST_FAILED : TEST_SUCCESS;
}
+static void
+limit_input_llr_val_range(struct rte_bbdev_op_data *input_ops,
+ uint16_t n, int8_t max_llr_modulus)
+{
+ uint16_t i, byte_idx;
+
+ for (i = 0; i < n; ++i) {
+ struct rte_mbuf *m = input_ops[i].data;
+ while (m != NULL) {
+ int8_t *llr = rte_pktmbuf_mtod_offset(m, int8_t *,
+ input_ops[i].offset);
+ for (byte_idx = 0; byte_idx < input_ops[i].length;
+ ++byte_idx)
+ llr[byte_idx] = round((double)max_llr_modulus *
+ llr[byte_idx] / INT8_MAX);
+
+ m = m->next;
+ }
+ }
+}
+
static int
fill_queue_buffers(struct test_op_params *op_params,
struct rte_mempool *in_mp, struct rte_mempool *hard_out_mp,
struct rte_mempool *soft_out_mp, uint16_t queue_id,
+ const struct rte_bbdev_op_cap *capabilities,
uint16_t min_alignment, const int socket_id)
{
int ret;
@@ -649,6 +696,10 @@ fill_queue_buffers(struct test_op_params *op_params,
"Couldn't init rte_bbdev_op_data structs");
}
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ limit_input_llr_val_range(*queue_ops[DATA_INPUT], n,
+ capabilities->cap.turbo_dec.max_llr_modulus);
+
return 0;
}
@@ -995,6 +1046,7 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
struct active_device *ad;
unsigned int burst_sz = get_burst_sz();
enum rte_bbdev_op_type op_type = test_vector.op_type;
+ const struct rte_bbdev_op_cap *capabilities = NULL;
ad = &active_devs[dev_id];
@@ -1027,9 +1079,20 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
goto fail;
}
- if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC)
+ if (test_vector.op_type == RTE_BBDEV_OP_TURBO_DEC) {
+ /* Find Decoder capabilities */
+ const struct rte_bbdev_op_cap *cap = info.drv.capabilities;
+ while (cap->type != RTE_BBDEV_OP_NONE) {
+ if (cap->type == RTE_BBDEV_OP_TURBO_DEC) {
+ capabilities = cap;
+ break;
+ }
+ }
+ TEST_ASSERT_NOT_NULL(capabilities,
+ "Couldn't find Decoder capabilities");
+
create_reference_dec_op(op_params->ref_dec_op);
- else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
+ } else if (test_vector.op_type == RTE_BBDEV_OP_TURBO_ENC)
create_reference_enc_op(op_params->ref_enc_op);
for (i = 0; i < ad->nb_queues; ++i) {
@@ -1038,6 +1101,7 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
ad->hard_out_mbuf_pool,
ad->soft_out_mbuf_pool,
ad->queue_ids[i],
+ capabilities,
info.drv.min_alignment,
socket_id);
if (f_ret != TEST_SUCCESS) {
@@ -1104,7 +1168,6 @@ dequeue_event_callback(uint16_t dev_id,
double in_len;
struct thread_params *tp = cb_arg;
-
RTE_SET_USED(ret_param);
queue_id = tp->queue_id;
@@ -1649,25 +1712,28 @@ throughput_test(struct active_device *ad,
}
static int
-operation_latency_test_dec(struct rte_mempool *mempool,
+latency_test_dec(struct rte_mempool *mempool,
struct test_buffers *bufs, struct rte_bbdev_dec_op *ref_op,
int vector_mask, uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *total_time)
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
{
int ret = TEST_SUCCESS;
uint16_t i, j, dequeued;
struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
- uint64_t start_time = 0;
+ uint64_t start_time = 0, last_time = 0;
for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
uint16_t enq = 0, deq = 0;
bool first_time = true;
+ last_time = 0;
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ ret = rte_bbdev_dec_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_dec_op_alloc_bulk() failed");
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_dec_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
@@ -1692,11 +1758,15 @@ operation_latency_test_dec(struct rte_mempool *mempool,
deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&ops_deq[deq], burst_sz - deq);
if (likely(first_time && (deq > 0))) {
- *total_time += rte_rdtsc_precise() - start_time;
+ last_time = rte_rdtsc_precise() - start_time;
first_time = false;
}
} while (unlikely(burst_sz != deq));
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
ret = validate_dec_op(ops_deq, burst_sz, ref_op,
vector_mask);
@@ -1711,25 +1781,28 @@ operation_latency_test_dec(struct rte_mempool *mempool,
}
static int
-operation_latency_test_enc(struct rte_mempool *mempool,
+latency_test_enc(struct rte_mempool *mempool,
struct test_buffers *bufs, struct rte_bbdev_enc_op *ref_op,
uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *total_time)
+ uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
{
int ret = TEST_SUCCESS;
uint16_t i, j, dequeued;
struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
- uint64_t start_time = 0;
+ uint64_t start_time = 0, last_time = 0;
for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
uint16_t enq = 0, deq = 0;
bool first_time = true;
+ last_time = 0;
if (unlikely(num_to_process - dequeued < burst_sz))
burst_sz = num_to_process - dequeued;
- rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ ret = rte_bbdev_enc_op_alloc_bulk(mempool, ops_enq, burst_sz);
+ TEST_ASSERT_SUCCESS(ret,
+ "rte_bbdev_enc_op_alloc_bulk() failed");
if (test_vector.op_type != RTE_BBDEV_OP_NONE)
copy_reference_enc_op(ops_enq, burst_sz, dequeued,
bufs->inputs,
@@ -1753,11 +1826,15 @@ operation_latency_test_enc(struct rte_mempool *mempool,
deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&ops_deq[deq], burst_sz - deq);
if (likely(first_time && (deq > 0))) {
- *total_time += rte_rdtsc_precise() - start_time;
+ last_time += rte_rdtsc_precise() - start_time;
first_time = false;
}
} while (unlikely(burst_sz != deq));
+ *max_time = RTE_MAX(*max_time, last_time);
+ *min_time = RTE_MIN(*min_time, last_time);
+ *total_time += last_time;
+
if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
ret = validate_enc_op(ops_deq, burst_sz, ref_op);
TEST_ASSERT_SUCCESS(ret, "Validation failed!");
@@ -1771,7 +1848,7 @@ operation_latency_test_enc(struct rte_mempool *mempool,
}
static int
-operation_latency_test(struct active_device *ad,
+latency_test(struct active_device *ad,
struct test_op_params *op_params)
{
int iter;
@@ -1781,9 +1858,12 @@ operation_latency_test(struct active_device *ad,
const uint16_t queue_id = ad->queue_ids[0];
struct test_buffers *bufs = NULL;
struct rte_bbdev_info info;
- uint64_t total_time = 0;
+ uint64_t total_time, min_time, max_time;
const char *op_type_str;
+ total_time = max_time = 0;
+ min_time = UINT64_MAX;
+
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
"BURST_SIZE should be <= %u", MAX_BURST);
@@ -1798,36 +1878,66 @@ operation_latency_test(struct active_device *ad,
info.dev_name, burst_sz, num_to_process, op_type_str);
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
- iter = operation_latency_test_dec(op_params->mp, bufs,
+ iter = latency_test_dec(op_params->mp, bufs,
op_params->ref_dec_op, op_params->vector_mask,
ad->dev_id, queue_id, num_to_process,
- burst_sz, &total_time);
+ burst_sz, &total_time, &min_time, &max_time);
else
- iter = operation_latency_test_enc(op_params->mp, bufs,
+ iter = latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
- num_to_process, burst_sz, &total_time);
+ num_to_process, burst_sz, &total_time,
+ &min_time, &max_time);
if (iter <= 0)
return TEST_FAILED;
- printf("\toperation avg. latency: %lg cycles, %lg us\n",
+ printf("\toperation latency:\n"
+ "\t\tavg latency: %lg cycles, %lg us\n"
+ "\t\tmin latency: %lg cycles, %lg us\n"
+ "\t\tmax latency: %lg cycles, %lg us\n",
(double)total_time / (double)iter,
(double)(total_time * 1000000) / (double)iter /
+ (double)rte_get_tsc_hz(), (double)min_time,
+ (double)(min_time * 1000000) / (double)rte_get_tsc_hz(),
+ (double)max_time, (double)(max_time * 1000000) /
(double)rte_get_tsc_hz());
return TEST_SUCCESS;
}
+#ifdef RTE_BBDEV_OFFLOAD_COST
+static int
+get_bbdev_queue_stats(uint16_t dev_id, uint16_t queue_id,
+ struct rte_bbdev_stats *stats)
+{
+ struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
+ struct rte_bbdev_stats *q_stats;
+
+ if (queue_id >= dev->data->num_queues)
+ return -1;
+
+ q_stats = &dev->data->queues[queue_id].queue_stats;
+
+ stats->enqueued_count = q_stats->enqueued_count;
+ stats->dequeued_count = q_stats->dequeued_count;
+ stats->enqueue_err_count = q_stats->enqueue_err_count;
+ stats->dequeue_err_count = q_stats->dequeue_err_count;
+ stats->offload_time = q_stats->offload_time;
+
+ return 0;
+}
+
static int
offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
uint16_t queue_id, const uint16_t num_to_process,
- uint16_t burst_sz, uint64_t *enq_total_time,
- uint64_t *deq_total_time)
+ uint16_t burst_sz, struct test_time_stats *time_st)
{
- int i, dequeued;
+ int i, dequeued, ret;
struct rte_bbdev_dec_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
uint16_t enq = 0, deq = 0;
@@ -1843,24 +1953,54 @@ offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers *bufs,
bufs->soft_outputs,
ref_op);
- /* Start time measurment for enqueue function offload latency */
- enq_start_time = rte_rdtsc();
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
do {
enq += rte_bbdev_enqueue_dec_ops(dev_id, queue_id,
&ops_enq[enq], burst_sz - enq);
} while (unlikely(burst_sz != enq));
- *enq_total_time += rte_rdtsc() - enq_start_time;
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.offload_time;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_tot_time += enq_sw_last_time;
+
+ time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
+ stats.offload_time);
+ time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
+ stats.offload_time);
+ time_st->enq_tur_tot_time += stats.offload_time;
/* ensure enqueue has been completed */
rte_delay_ms(10);
- /* Start time measurment for dequeue function offload latency */
- deq_start_time = rte_rdtsc();
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_tot_time += deq_last_time;
+
+ /* Dequeue remaining operations if needed*/
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_dec_ops(dev_id, queue_id,
&ops_deq[deq], burst_sz - deq);
- } while (unlikely(burst_sz != deq));
- *deq_total_time += rte_rdtsc() - deq_start_time;
rte_bbdev_dec_op_free_bulk(ops_enq, deq);
dequeued += deq;
@@ -1873,12 +2013,13 @@ static int
offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
struct rte_bbdev_enc_op *ref_op, uint16_t dev_id,
uint16_t queue_id, const uint16_t num_to_process,
- uint16_t burst_sz, uint64_t *enq_total_time,
- uint64_t *deq_total_time)
+ uint16_t burst_sz, struct test_time_stats *time_st)
{
- int i, dequeued;
+ int i, dequeued, ret;
struct rte_bbdev_enc_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
uint64_t enq_start_time, deq_start_time;
+ uint64_t enq_sw_last_time, deq_last_time;
+ struct rte_bbdev_stats stats;
for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
uint16_t enq = 0, deq = 0;
@@ -1893,24 +2034,53 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
bufs->hard_outputs,
ref_op);
- /* Start time measurment for enqueue function offload latency */
- enq_start_time = rte_rdtsc();
+ /* Start time meas for enqueue function offload latency */
+ enq_start_time = rte_rdtsc_precise();
do {
enq += rte_bbdev_enqueue_enc_ops(dev_id, queue_id,
&ops_enq[enq], burst_sz - enq);
} while (unlikely(burst_sz != enq));
- *enq_total_time += rte_rdtsc() - enq_start_time;
+
+ ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+ TEST_ASSERT_SUCCESS(ret,
+ "Failed to get stats for queue (%u) of device (%u)",
+ queue_id, dev_id);
+
+ enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+ stats.offload_time;
+ time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+ enq_sw_last_time);
+ time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+ enq_sw_last_time);
+ time_st->enq_sw_tot_time += enq_sw_last_time;
+
+ time_st->enq_tur_max_time = RTE_MAX(time_st->enq_tur_max_time,
+ stats.offload_time);
+ time_st->enq_tur_min_time = RTE_MIN(time_st->enq_tur_min_time,
+ stats.offload_time);
+ time_st->enq_tur_tot_time += stats.offload_time;
/* ensure enqueue has been completed */
rte_delay_ms(10);
- /* Start time measurment for dequeue function offload latency */
- deq_start_time = rte_rdtsc();
+ /* Start time meas for dequeue function offload latency */
+ deq_start_time = rte_rdtsc_precise();
+ /* Dequeue one operation */
do {
deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
+ &ops_deq[deq], 1);
+ } while (unlikely(deq != 1));
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ time_st->deq_max_time = RTE_MAX(time_st->deq_max_time,
+ deq_last_time);
+ time_st->deq_min_time = RTE_MIN(time_st->deq_min_time,
+ deq_last_time);
+ time_st->deq_tot_time += deq_last_time;
+
+ while (burst_sz != deq)
+ deq += rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
&ops_deq[deq], burst_sz - deq);
- } while (unlikely(burst_sz != deq));
- *deq_total_time += rte_rdtsc() - deq_start_time;
rte_bbdev_enc_op_free_bulk(ops_enq, deq);
dequeued += deq;
@@ -1918,13 +2088,20 @@ offload_latency_test_enc(struct rte_mempool *mempool, struct test_buffers *bufs,
return i;
}
+#endif
static int
-offload_latency_test(struct active_device *ad,
+offload_cost_test(struct active_device *ad,
struct test_op_params *op_params)
{
+#ifndef RTE_BBDEV_OFFLOAD_COST
+ RTE_SET_USED(ad);
+ RTE_SET_USED(op_params);
+ printf("Offload latency test is disabled.\n");
+ printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
+ return TEST_SKIPPED;
+#else
int iter;
- uint64_t enq_total_time = 0, deq_total_time = 0;
uint16_t burst_sz = op_params->burst_sz;
const uint16_t num_to_process = op_params->num_to_process;
const enum rte_bbdev_op_type op_type = test_vector.op_type;
@@ -1932,6 +2109,12 @@ offload_latency_test(struct active_device *ad,
struct test_buffers *bufs = NULL;
struct rte_bbdev_info info;
const char *op_type_str;
+ struct test_time_stats time_st;
+
+ memset(&time_st, 0, sizeof(struct test_time_stats));
+ time_st.enq_sw_min_time = UINT64_MAX;
+ time_st.enq_tur_min_time = UINT64_MAX;
+ time_st.deq_min_time = UINT64_MAX;
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
"BURST_SIZE should be <= %u", MAX_BURST);
@@ -1949,48 +2132,82 @@ offload_latency_test(struct active_device *ad,
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_test_dec(op_params->mp, bufs,
op_params->ref_dec_op, ad->dev_id, queue_id,
- num_to_process, burst_sz, &enq_total_time,
- &deq_total_time);
+ num_to_process, burst_sz, &time_st);
else
iter = offload_latency_test_enc(op_params->mp, bufs,
op_params->ref_enc_op, ad->dev_id, queue_id,
- num_to_process, burst_sz, &enq_total_time,
- &deq_total_time);
+ num_to_process, burst_sz, &time_st);
if (iter <= 0)
return TEST_FAILED;
- printf("\tenq offload avg. latency: %lg cycles, %lg us\n",
- (double)enq_total_time / (double)iter,
- (double)(enq_total_time * 1000000) / (double)iter /
- (double)rte_get_tsc_hz());
-
- printf("\tdeq offload avg. latency: %lg cycles, %lg us\n",
- (double)deq_total_time / (double)iter,
- (double)(deq_total_time * 1000000) / (double)iter /
- (double)rte_get_tsc_hz());
+ printf("\tenq offload cost latency:\n"
+ "\t\tsoftware avg %lg cycles, %lg us\n"
+ "\t\tsoftware min %lg cycles, %lg us\n"
+ "\t\tsoftware max %lg cycles, %lg us\n"
+ "\t\tturbo avg %lg cycles, %lg us\n"
+ "\t\tturbo min %lg cycles, %lg us\n"
+ "\t\tturbo max %lg cycles, %lg us\n",
+ (double)time_st.enq_sw_tot_time / (double)iter,
+ (double)(time_st.enq_sw_tot_time * 1000000) /
+ (double)iter / (double)rte_get_tsc_hz(),
+ (double)time_st.enq_sw_min_time,
+ (double)(time_st.enq_sw_min_time * 1000000) /
+ rte_get_tsc_hz(), (double)time_st.enq_sw_max_time,
+ (double)(time_st.enq_sw_max_time * 1000000) /
+ rte_get_tsc_hz(), (double)time_st.enq_tur_tot_time /
+ (double)iter,
+ (double)(time_st.enq_tur_tot_time * 1000000) /
+ (double)iter / (double)rte_get_tsc_hz(),
+ (double)time_st.enq_tur_min_time,
+ (double)(time_st.enq_tur_min_time * 1000000) /
+ rte_get_tsc_hz(), (double)time_st.enq_tur_max_time,
+ (double)(time_st.enq_tur_max_time * 1000000) /
+ rte_get_tsc_hz());
+
+ printf("\tdeq offload cost latency - one op:\n"
+ "\t\tavg %lg cycles, %lg us\n"
+ "\t\tmin %lg cycles, %lg us\n"
+ "\t\tmax %lg cycles, %lg us\n",
+ (double)time_st.deq_tot_time / (double)iter,
+ (double)(time_st.deq_tot_time * 1000000) /
+ (double)iter / (double)rte_get_tsc_hz(),
+ (double)time_st.deq_min_time,
+ (double)(time_st.deq_min_time * 1000000) /
+ rte_get_tsc_hz(), (double)time_st.deq_max_time,
+ (double)(time_st.deq_max_time * 1000000) /
+ rte_get_tsc_hz());
return TEST_SUCCESS;
+#endif
}
+#ifdef RTE_BBDEV_OFFLOAD_COST
static int
offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *deq_total_time)
+ uint64_t *deq_tot_time, uint64_t *deq_min_time,
+ uint64_t *deq_max_time)
{
int i, deq_total;
struct rte_bbdev_dec_op *ops[MAX_BURST];
- uint64_t deq_start_time;
+ uint64_t deq_start_time, deq_last_time;
/* Test deq offload latency from an empty queue */
- deq_start_time = rte_rdtsc_precise();
+
for (i = 0, deq_total = 0; deq_total < num_to_process;
++i, deq_total += burst_sz) {
+ deq_start_time = rte_rdtsc_precise();
+
if (unlikely(num_to_process - deq_total < burst_sz))
burst_sz = num_to_process - deq_total;
rte_bbdev_dequeue_dec_ops(dev_id, queue_id, ops, burst_sz);
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
+ *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
+ *deq_tot_time += deq_last_time;
}
- *deq_total_time = rte_rdtsc_precise() - deq_start_time;
return i;
}
@@ -1998,31 +2215,45 @@ offload_latency_empty_q_test_dec(uint16_t dev_id, uint16_t queue_id,
static int
offload_latency_empty_q_test_enc(uint16_t dev_id, uint16_t queue_id,
const uint16_t num_to_process, uint16_t burst_sz,
- uint64_t *deq_total_time)
+ uint64_t *deq_tot_time, uint64_t *deq_min_time,
+ uint64_t *deq_max_time)
{
int i, deq_total;
struct rte_bbdev_enc_op *ops[MAX_BURST];
- uint64_t deq_start_time;
+ uint64_t deq_start_time, deq_last_time;
/* Test deq offload latency from an empty queue */
- deq_start_time = rte_rdtsc_precise();
for (i = 0, deq_total = 0; deq_total < num_to_process;
++i, deq_total += burst_sz) {
+ deq_start_time = rte_rdtsc_precise();
+
if (unlikely(num_to_process - deq_total < burst_sz))
burst_sz = num_to_process - deq_total;
rte_bbdev_dequeue_enc_ops(dev_id, queue_id, ops, burst_sz);
+
+ deq_last_time = rte_rdtsc_precise() - deq_start_time;
+ *deq_max_time = RTE_MAX(*deq_max_time, deq_last_time);
+ *deq_min_time = RTE_MIN(*deq_min_time, deq_last_time);
+ *deq_tot_time += deq_last_time;
}
- *deq_total_time = rte_rdtsc_precise() - deq_start_time;
return i;
}
+#endif
static int
offload_latency_empty_q_test(struct active_device *ad,
struct test_op_params *op_params)
{
+#ifndef RTE_BBDEV_OFFLOAD_COST
+ RTE_SET_USED(ad);
+ RTE_SET_USED(op_params);
+ printf("Offload latency empty dequeue test is disabled.\n");
+ printf("Set RTE_BBDEV_OFFLOAD_COST to 'y' to turn the test on.\n");
+ return TEST_SKIPPED;
+#else
int iter;
- uint64_t deq_total_time = 0;
+ uint64_t deq_tot_time, deq_min_time, deq_max_time;
uint16_t burst_sz = op_params->burst_sz;
const uint16_t num_to_process = op_params->num_to_process;
const enum rte_bbdev_op_type op_type = test_vector.op_type;
@@ -2030,6 +2261,9 @@ offload_latency_empty_q_test(struct active_device *ad,
struct rte_bbdev_info info;
const char *op_type_str;
+ deq_tot_time = deq_max_time = 0;
+ deq_min_time = UINT64_MAX;
+
TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST),
"BURST_SIZE should be <= %u", MAX_BURST);
@@ -2044,20 +2278,29 @@ offload_latency_empty_q_test(struct active_device *ad,
if (op_type == RTE_BBDEV_OP_TURBO_DEC)
iter = offload_latency_empty_q_test_dec(ad->dev_id, queue_id,
- num_to_process, burst_sz, &deq_total_time);
+ num_to_process, burst_sz, &deq_tot_time,
+ &deq_min_time, &deq_max_time);
else
iter = offload_latency_empty_q_test_enc(ad->dev_id, queue_id,
- num_to_process, burst_sz, &deq_total_time);
+ num_to_process, burst_sz, &deq_tot_time,
+ &deq_min_time, &deq_max_time);
if (iter <= 0)
return TEST_FAILED;
- printf("\tempty deq offload avg. latency: %lg cycles, %lg us\n",
- (double)deq_total_time / (double)iter,
- (double)(deq_total_time * 1000000) / (double)iter /
- (double)rte_get_tsc_hz());
+ printf("\tempty deq offload\n"
+ "\t\tavg. latency: %lg cycles, %lg us\n"
+ "\t\tmin. latency: %lg cycles, %lg us\n"
+ "\t\tmax. latency: %lg cycles, %lg us\n",
+ (double)deq_tot_time / (double)iter,
+ (double)(deq_tot_time * 1000000) / (double)iter /
+ (double)rte_get_tsc_hz(), (double)deq_min_time,
+ (double)(deq_min_time * 1000000) / rte_get_tsc_hz(),
+ (double)deq_max_time, (double)(deq_max_time * 1000000) /
+ rte_get_tsc_hz());
return TEST_SUCCESS;
+#endif
}
static int
@@ -2067,9 +2310,9 @@ throughput_tc(void)
}
static int
-offload_latency_tc(void)
+offload_cost_tc(void)
{
- return run_test_case(offload_latency_test);
+ return run_test_case(offload_cost_test);
}
static int
@@ -2079,9 +2322,9 @@ offload_latency_empty_q_tc(void)
}
static int
-operation_latency_tc(void)
+latency_tc(void)
{
- return run_test_case(operation_latency_test);
+ return run_test_case(latency_test);
}
static int
@@ -2105,7 +2348,7 @@ static struct unit_test_suite bbdev_validation_testsuite = {
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc),
+ TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -2115,9 +2358,18 @@ static struct unit_test_suite bbdev_latency_testsuite = {
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
- TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_tc),
+ TEST_CASE_ST(ut_setup, ut_teardown, latency_tc),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static struct unit_test_suite bbdev_offload_cost_testsuite = {
+ .suite_name = "BBdev Offload Cost Tests",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, offload_cost_tc),
TEST_CASE_ST(ut_setup, ut_teardown, offload_latency_empty_q_tc),
- TEST_CASE_ST(ut_setup, ut_teardown, operation_latency_tc),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -2135,4 +2387,5 @@ static struct unit_test_suite bbdev_interrupt_testsuite = {
REGISTER_TEST_COMMAND(throughput, bbdev_throughput_testsuite);
REGISTER_TEST_COMMAND(validation, bbdev_validation_testsuite);
REGISTER_TEST_COMMAND(latency, bbdev_latency_testsuite);
+REGISTER_TEST_COMMAND(offload, bbdev_offload_cost_testsuite);
REGISTER_TEST_COMMAND(interrupt, bbdev_interrupt_testsuite);
diff --git a/app/test-bbdev/test_bbdev_vector.c b/app/test-bbdev/test_bbdev_vector.c
index addef057..81b8ee78 100644
--- a/app/test-bbdev/test_bbdev_vector.c
+++ b/app/test-bbdev/test_bbdev_vector.c
@@ -144,6 +144,8 @@ op_decoder_flag_strtoul(char *token, uint32_t *op_flag_value)
*op_flag_value = RTE_BBDEV_TURBO_MAP_DEC;
else if (!strcmp(token, "RTE_BBDEV_TURBO_DEC_SCATTER_GATHER"))
*op_flag_value = RTE_BBDEV_TURBO_DEC_SCATTER_GATHER;
+ else if (!strcmp(token, "RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP"))
+ *op_flag_value = RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP;
else {
printf("The given value is not a turbo decoder flag\n");
return -1;
@@ -889,8 +891,7 @@ test_bbdev_vector_read(const char *filename,
goto exit;
}
- memset(entry, 0, strlen(line) + 1);
- strncpy(entry, line, strlen(line));
+ strcpy(entry, line);
/* check if entry ends with , or = */
if (entry[strlen(entry) - 1] == ','
@@ -912,7 +913,8 @@ test_bbdev_vector_read(const char *filename,
}
entry = entry_extended;
- strncat(entry, line, strlen(line));
+ /* entry has been allocated accordingly */
+ strcpy(&entry[strlen(entry)], line);
if (entry[strlen(entry) - 1] != ',')
break;
diff --git a/app/test-bbdev/test_vectors/bbdev_vector_null.data b/app/test-bbdev/test_vectors/bbdev_null.data
index c9a9abe9..c9a9abe9 100644
--- a/app/test-bbdev/test_vectors/bbdev_vector_null.data
+++ b/app/test-bbdev/test_vectors/bbdev_null.data
diff --git a/app/test-bbdev/test_vectors/bbdev_vector_td_default.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data
index b5c30278..d98210ff 100644
--- a/app/test-bbdev/test_vectors/bbdev_vector_td_default.data
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k40_r0_e17280_sbd_negllr.data
@@ -46,9 +46,12 @@ ext_scale =
num_maps =
0
+code_block_mode =
+1
+
op_flags =
-RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN,
-RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT
+RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE,
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT
expected_status =
OK
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data
new file mode 100644
index 00000000..3472c992
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data
@@ -0,0 +1,643 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0xCE000000, 0x2F00F6D9, 0xCC1AF942, 0x22F8F4E1, 0xBED8FAFF, 0x43DC19B5, 0x35E91CC0, 0x5B070D30,
+0xF9DACDFD, 0x170121DA, 0x012AEF53, 0x39E7D90E, 0xD5FBEFDD, 0xCCF0FDC8, 0xD62B0D54, 0xEAE601BE,
+0xE5001328, 0x0CF00DC7, 0x3EE0E4B8, 0xC2D8EB00, 0xBE1EEBF7, 0xCDF5E7E4, 0x4FDFF4FA, 0xF402DA2A,
+0xFEFEE32B, 0xD62CDA55, 0x17DE0106, 0xF92411B5, 0x301B2142, 0xE50C09E3, 0x51030EDB, 0x48DB2A4C,
+0x03E420BC, 0x1AE1DCF8, 0xB6EE0D3A, 0xC2FADF22, 0xBF211508, 0x3228E8AF, 0xE0170BEE, 0x26F8091F,
+0xDBFA02D2, 0xC810FE19, 0xE4DE1005, 0x18FF0D29, 0xE723F04B, 0xFDF2F1E7, 0xCF05DBD3, 0x0412F616,
+0x00DC234C, 0x1501EDD7, 0xD9EB4FED, 0x1C03BC2B, 0xE2FDF6D4, 0xE9EC10C4, 0x2426034D, 0xEDE33B45,
+0x05DC2304, 0xEDE81411, 0x11E416BD, 0x03D1DAA8, 0x04F72431, 0x30F15718, 0xF3F33534, 0xED043AD3,
+0x1CE8BC11, 0xEDE1EBF7, 0x242C0454, 0x0118D740, 0x07F3D01B, 0x0205D623, 0x15233E05, 0xF2EBE5ED,
+0xED2D3BAC, 0xEBF83CD1, 0xFCF9D421, 0x0B0934E0, 0x0A09E1E0, 0x1AEB0EC4, 0xF8FEDF26, 0xFCE8D53F,
+0x20E10846, 0xE327F5B1, 0x18E340F5, 0xF71ED0BB, 0x0EF5CBE3, 0xF4F11C36, 0x141BC343, 0xF4E7E5F1,
+0x30205809, 0x10EAC713, 0x22DBB603, 0x250AFC1E, 0xE4F3BB1A, 0x2316FA12, 0xDEE105F7, 0x12FDC7D4,
+0xFC212507, 0xDB290300, 0x1500ED00, 0x1EABBA27, 0xF14ECA2D, 0xF2D93625, 0x23F20502, 0xF0B9C8E6,
+0xEAD83EE1, 0x0C331CFF, 0x23D205F5, 0x0EC61906, 0xF128C9EF, 0xEF3CC800, 0x0B9DCDED, 0xF3C2CC3C,
+0x23EB04E9, 0x0B51CD13, 0xEADEC3D7, 0x222D06FB, 0xF0331904, 0x16C93F0C, 0xDAE5FE0F, 0x1F14B80C,
+0x001B28ED, 0x3F3B67F3, 0xEEEDE9EC, 0x1CE94514, 0x1D4145EF, 0xD8BA5019, 0xFC06D41E, 0x17BDC0DF,
+0x16D712E4, 0xE8041001, 0xEC2AEDDB, 0x11BE3803, 0xEBD2ECE5, 0xEF5A17F9, 0x2BFF52CD, 0xE958C126,
+0xEE1A1531, 0x0EBF1BF2, 0x18BC401A, 0x2AC2521C, 0xEE4817EA, 0x07482E20, 0x141314E0, 0xF41DCD15,
+0xDB10B40C, 0xC23700E9, 0x4221EB10, 0x4BE7E607, 0xEEF9DC0E, 0x62AC16DF, 0xE2E1C5D3, 0x27E4F70A,
+0x3213020D, 0x4ABE0A14, 0xACC1DE1B, 0xD636D416, 0xDFAF020E, 0xCE230828, 0x2615F6FB, 0xEA1002EE,
+0xD1351117, 0xB71C07F3, 0x03C9DF0C, 0x2B52DB0F, 0xD9AB02D7, 0xD20702D3, 0x48EEFB21, 0x3AE5E116,
+0xE728EE0E, 0x33C50F00, 0x363EF5EC, 0xD4FBF217, 0xB00FFDDE, 0x2333281A, 0x3624040C, 0x02DB0E04,
+0x3FDE27FD, 0x173716FB, 0xF20BEFF1, 0x3EF0191E, 0xB6B01719, 0x1BC7DD27, 0xE80AF3EE, 0x20BE101E,
+0xF42EF8E6, 0x40531BF9, 0x4646172B, 0x44DA1E1D, 0xC7F5E503, 0xF3E8F0E4, 0xA4E2E5EF, 0x42B935F6,
+0xEB1D1A1E, 0xE500ED0B, 0x2909F2CF, 0xCE0E0037, 0xD1320AA6, 0xEB01F9D9, 0x45E7ECC0, 0x4CFEE3D7,
+0x0EE024B8, 0xD3F31B1A, 0xE846056E, 0xF522EFFB, 0x5F1AE443, 0xCAF93620, 0x2CE9F23E, 0xDDE604F2,
+0x3BDDFC04, 0xC8EBEE13, 0xC11FF0F6, 0x18EBE9EC, 0xFAEEF1C7, 0xB6EFDDEA, 0xF8E123F7, 0x4EF4E1CC,
+0x430C271C, 0x3BE01CB8, 0xABE813EF, 0xCAEED439, 0xC1FAF3D1, 0x4E2F1656, 0x11E4DB0C, 0xDAF816DF,
+0x44E3FE0C, 0x2B1D1C0B, 0x3300FD28, 0xC320F4F9, 0x2A1DEB0A, 0xEA2502B4, 0x41061322, 0xC81518C2,
+0x1F2510FD, 0xD1E1F7F7, 0xBD1906F1, 0xF3C9E55F, 0xB3D91A4F, 0x0D0D26CC, 0xBD071B20, 0x41141BC4,
+0x2A1B19F2, 0x4902FDD5, 0xD6F9DE2E, 0x19190341, 0x09E80F11, 0x31FC1E2C, 0xEC1DF8F4, 0xCC0B14E2,
+0x51DFF3B7, 0x1AF028C8, 0x0F0E0E37, 0x2B041A25, 0x1A03FD25, 0xC5030FD5, 0xBCFAED22, 0x9A1F1CB9,
+0xCCE8C2F0, 0xEF1BF4BD, 0x21EC163C, 0xBAED07C6, 0x03F7E3D0, 0xC503DB2A, 0xC20C13E4, 0xD4F8EA30,
+0x391B0544, 0x59F3EECB, 0xAD0A311D, 0xFE0C2C33, 0x250CDBE4, 0xC5E80341, 0x3B0914CE, 0xC0F2ED36,
+0xC204182B, 0x3B0C16E5, 0xFFFF1429, 0xC316D8C3, 0xE408EB1F, 0x3713F4C4, 0xE4F2F01A, 0x0A280C01,
+0xEC0FE318, 0xCB16153E, 0x5C04F2D4, 0x281F35F8, 0xE4D80051, 0x4E0A0CE3, 0xB107DB21, 0xD1DED9FB,
+0x380AFACE, 0x50CC105C, 0xD9D80100, 0xF40ECCE6, 0xDADC03B4, 0x0AE5E243, 0x0C22E4FA, 0x08CD1F5A,
+0xE2D1F7A9, 0x12CE39A5, 0xFFDDD7FB, 0x032C2BAC, 0xF1D9E700, 0x14003B28, 0xFC08D420, 0x08093131,
+0xFEFFD7D7, 0xBB219348, 0x07DC2E4C, 0xDF0DB8CB, 0x22EB06EC, 0x0BE034F8, 0xF10FC819, 0x1623EEFA,
+0x0D1DE6BA, 0xDA1AB142, 0x1C16F4EE, 0x27F500E2, 0x1137C85E, 0x1C0BF434, 0xF004C924, 0x02EBD9ED,
+0x1BF943D2, 0xE7F80F30, 0xEFFE39D9, 0x2CFA5323, 0x04242DFB, 0xF420E347, 0x15F43C33, 0xE9F41134,
+0x20EBB8C2, 0x0205D523, 0xECE03DB8, 0x10F3E935, 0x180BC11E, 0x12E2C7BA, 0xE70B4233, 0xEED8EAB0,
+0x110E3936, 0xFEE327F5, 0x12E60017, 0xFE4026F3, 0xED26C518, 0xE5E9F2FF, 0x200C08EE, 0xEF1439E4,
+0xE92010ED, 0xE1B80908, 0x18584021, 0x24FBFC31, 0x00AE2823, 0x040CDC2A, 0xDE34FBE5, 0x1439C30D,
+0x0DB7E6EF, 0xE6090F21, 0xECAF3C20, 0x10DFE8D8, 0x0CC0CCF9, 0x15061318, 0x043723DD, 0xEA3A3DF0,
+0xEC18C413, 0xFE2BD611, 0xE3BC45FE, 0x16FCC11D, 0x120E39DC, 0xEC00141B, 0x13F03B27, 0x24D2FC18,
+0x0917E1FB, 0x113BEAEE, 0xE9FE11EE, 0x2C505427, 0x0D0B3529, 0x15E73E1E, 0x0FD91AF1, 0xDFF8B6FF,
+0xF413CCE1, 0x1EEFB9EA, 0xDCD04C17, 0xCEC25AF8, 0x1AB2BE17, 0xD8BEAFDA, 0xE6CEBD19, 0xFCC42CF6,
+0xDE19FA15, 0x250203F2, 0xA90042DB, 0x242A2F1A, 0x004D0401, 0x0B4C2825, 0xACB61DDD, 0x07422CDE,
+0xCDFEDFE7, 0xAFF8F527, 0x35052820, 0x0B34F2DD, 0xD648E2F3, 0x3DDF02E0, 0xFE42EBF8, 0xC1F6251A,
+0xBA0DE9E2, 0x442FE2E5, 0xD014E308, 0x2DB409EC, 0x50F8FADC, 0x264BD8E0, 0x2E0FFEDC, 0x1E2D051A,
+0x0FD20AFB, 0xCE1CE7FA, 0x35CDF6F4, 0xFC49F3F4, 0x642FDCDF, 0x3625C507, 0xDF140FFC, 0xDB21F913,
+0xC7D5FDF9, 0xCEE112FE, 0xD1D4F7F8, 0x5D410704, 0xD914CC19, 0xCCF800EC, 0xE3E00C1F, 0xCD05F408,
+0xC952F4DD, 0x444EF02A, 0x1E15E526, 0xD73909ED, 0xFA1900EF, 0xFFB6210F, 0x06EB27DE, 0x0B5BDF13,
+0x1B4F1D33, 0xCDF5F4D9, 0x00F40C1D, 0xC6031CDC, 0xF91C13F4, 0x061321C6, 0xBFDD2206, 0xEE13193C,
+0xFF1C17BB, 0xA91E27F5, 0x25ECD13C, 0x0CF90321, 0xBA1FE5B8, 0x50151F3D, 0xE935285D, 0x330A10CE,
+0x3EE6F542, 0xE80AEA1E, 0x45251103, 0xC014E3EC, 0x2D2C1854, 0x9F0E0536, 0x2E14C7C3, 0xECE50642,
+0xE6E214F7, 0x1021F208, 0xBCDC19FB, 0x4B281BFF, 0x0CDFDEB7, 0x21E91CEE, 0xC22FF856, 0xA2F3EAE4,
+0x5810CB18, 0xE416D03E, 0x25E3F40C, 0xF4180340, 0xFE0EE4C9, 0x433C2763, 0x38F11BE7, 0xE5FC11D4,
+0x1E11F4C6, 0x491D0ABB, 0xC0E72140, 0xB226E84F, 0x122E26AB, 0xEAF116E7, 0xD5E4EE0B, 0xF4FB0323,
+0xDCE61D42, 0x05F1FCE7, 0x0DDCDDB4, 0xF9E42000, 0x181610EE, 0xE1F246CA, 0xEDEDC53B, 0x14DEC5B6,
+0x1E02F629, 0xDEE749F0, 0x1C02BDD7, 0x1EF3F5CB, 0x11E8C640, 0xFA272301, 0xF3FC352C, 0xFDD52553,
+0x09191FF1, 0xDB11FEE9, 0xF7EB1FEE, 0xFFF9292E, 0x0A2032B8, 0x0C1BCDBC, 0xDDFFFBD9, 0x1BEABDEF,
+0x19F84130, 0xECE5ECF3, 0x271302EB, 0xE7EB423D, 0x1E2146B7, 0x14E13CF7, 0x16FE112B, 0xEB211308,
+0xDAD802B0, 0x0017D811, 0xFE26D901, 0x0D0ACBE2, 0xF2ED1A3B, 0xFBDED206, 0x18F0F1C8, 0xD1EFA9C6,
+0xE5E1BDF7, 0x1514C314, 0xF9FCD1D3, 0x02EB2BED, 0x0EEB1BC2, 0x2427B5FF, 0x0B2233FA, 0xFE0FDBE8,
+0xF7DDCE04, 0x03F42BE3, 0x0525D202, 0xF9B20021, 0xEF0FE926, 0xF505E318, 0xF7DF1F23, 0xEADF12F8,
+0xF12C3807, 0xDF01FA04, 0x1D23F426, 0xF1231A05, 0x20F549FB, 0xE0F0F9E3, 0xDB67B418, 0xC6E5623F,
+0x212AF90D, 0xE6C7BD02, 0x24CA4C11, 0xE249F6F1, 0xE8383F21, 0xFC212310, 0xF7561F07, 0x25C4032E,
+0x070EDE14, 0x1437151A, 0xE93C4010, 0xF10CE615, 0xDB0D4D1C, 0xFAF6221B, 0xF5F3E21E, 0xE2470B1B,
+0xE71BC01F, 0x07B0220C, 0xF311CAD8, 0xFF052816, 0x1DC20BDC, 0xE7BB40EB, 0xF5DEE21E, 0x013527FB,
+0x0FF519F2, 0x1AE9BDE4, 0x2B0F53F0, 0x13C6C5E8, 0xF12636ED, 0x10BD38FE, 0xDD0606E4, 0xDA5C4FDE,
+0x1329C5CC, 0x0DC8CA00, 0xD33956F0, 0x0700D711, 0x31B1DE02, 0x210C09D9, 0x0C2EF81B, 0xF5FFE406,
+0x27C1E2DA, 0xCB1801E8, 0x38C70EF0, 0x0D1EF0EF, 0x142E1C0B, 0x0B00EB06, 0x5D441DD8, 0x1E4035E5,
+0xFD3C0AE8, 0x3732DCEC, 0xC8B5F00A, 0xB9BF1023, 0x0AF22018, 0xC6DDE21A, 0x18EF1306, 0x1AC8F1E9,
+0x1F020EF0, 0xEA0209DB, 0x41CFEEDA, 0x21E31AF8, 0x4BBD070A, 0xEECFDD1A, 0xD4DB15F8, 0x430FFC02,
+0x15361A18, 0xCF17ECF1, 0xE42009EF, 0xC21CF4F8, 0x3DDFEAF3, 0x3E0916F9, 0xF028151F, 0x03CA1700,
+0x983B26F1, 0xDE2AC112, 0x209FF9FD, 0x1935F7C6, 0xED18F1F3, 0xBFD0EA0F, 0xC5071908, 0x1F4E13DE,
+0x17F00926, 0xF80DEE19, 0x4DF01FE6, 0xB7D2DB18, 0x0AF5E0F9, 0x4043E21D, 0x4AD517E5, 0xB0CFDE03,
+0xFB22D90A, 0xDF5023F9, 0xDABDF828, 0x1911011A, 0xBDF30FE8, 0xCEAA1A1A, 0xB0D2F62D, 0x3AAC28F9,
+0x42D6ED2D, 0xA83D1B02, 0x1831D1EA, 0xF11D10F7, 0x1D201AF5, 0x394F0C07, 0xB327F0D9, 0x3254DA01,
+0xEBF40AD5, 0x26A413E4, 0x5113FECC, 0xD4F32915, 0xDDB3FD1A, 0xC0D2FB25, 0x02DEE7FB, 0xC33DDB05,
+0x01021615, 0x0B1D27DA, 0xCBC2E2F6, 0x193CF3E9, 0x27E2F1EC, 0x5A1BFF0A, 0xE8CF320D, 0xBD4DF0F8,
+0x0F291A26, 0x1817E7FF, 0xEFCB10F0, 0xD40CE9F3, 0x0606041B, 0xFABC2322, 0xE0E9DEE5, 0x1BD80710,
+0xC015F301, 0x4718E8EE, 0x69D81E11, 0x19F6BF00, 0xF60F1E00, 0x3002E225, 0x29EA08C1, 0xF4EA00EF,
+0x3910E5E7, 0xDB0B12E4, 0x2A3E0265, 0x3BE8FE11, 0xBBEFEEE9, 0x151BE2BE, 0x1E2314FB, 0x4C2DF754,
+0xD5F823E0, 0x10200247, 0x01DB194D, 0x1B17D93F, 0x5806F42E, 0xFA19D040, 0x040BDF1D, 0xFCCB25A4,
+0x39D9DC00, 0x2C0EEFE7, 0x20F3FDCB, 0xC2E70841, 0x3EEFEAE9, 0x32EDEAC5, 0xAC1BF70D, 0x0C15D4C3,
+0x4CE7E5BF, 0x26F024E9, 0x061AFE0D, 0xDE13DEC5, 0x09DE0706, 0xFC0C1FE5, 0xD41CDC0B, 0xCC1DFBBB,
+0xC6020CDB, 0xCD16133E, 0x3FF30CCC, 0x0B2D16AB, 0x3800E3D9, 0x3AFCF0DC, 0xE4FFED27, 0xFCF9F3E0,
+0x05FF24D9, 0xE1F9DE2F, 0x391908F2, 0xC8F211CB, 0xEAEF003D, 0xDC1C040D, 0x25F74ECF, 0xE41F0CBA,
+0xDF1708EE, 0xE016F83F, 0x1D124515, 0xF325344D, 0xE114F63C, 0x261FFEB9, 0x02F02739, 0xF12018F8,
+0xD9124F3A, 0x1E1A45F3, 0x1504142C, 0xDDF506E3, 0xEAEAC113, 0xDE1AFB42, 0xF113C93B, 0x1721C207,
+0xE3D4BB54, 0x070A2FCE, 0x0C0AE41D, 0xF90D3035, 0xE6DCF2B4, 0xFB15D4C3, 0xFE2425FC, 0x03002B28,
+0xE50AF433, 0x07EC2FC4, 0x07E921EF, 0xFE09DAE1, 0xE21EF6B9, 0xF3E6360D, 0x17EE3F39, 0xE7FA0E22,
+0xF5151DED, 0x1EF945DE, 0x0D1236EB, 0x0AFD32D6, 0x0928CFFF, 0xDCDAFD02, 0xFB272D00, 0xCE025B25,
+0xEA0D3ECB, 0xE3E2BBF5, 0x06D32354, 0xF4DDCCB6, 0xE4002245, 0x213AB706, 0xECCCC3ED, 0x3CED9C0B,
+0x15D23C14, 0x0BE7E3F9, 0x1FE70A0E, 0xF30D350F, 0x1928F01B, 0xD50AAE01, 0x120AC6E1, 0x0ED4C91E,
+0xFFF7D804, 0x0D0CCA1F, 0x0433DBE4, 0xEA0DEFF5, 0x36FB5E1C, 0xEB11ECDC, 0x23C0FB17, 0xEBC63C17,
+0x272800EF, 0xFC34D4FF, 0x0AB8E2F5, 0x01FB2AE0, 0x090630DD, 0xFE0AD522, 0x1B10BD1D, 0xDF520719,
+0xF12436D5, 0x1D20BAFC, 0xE442BCF8, 0x05E0DDE6, 0x183FF1F8, 0xF816E118, 0xE3C9F4EE, 0xF2F51AF0,
+0xFE5126E4, 0x1E31F7D7, 0xF9362F09, 0x15E43CF2, 0x0D3EE50C, 0xF74931EA, 0x23EEB621, 0x02B9DAE9,
+0xDFE5B71F, 0xFD46D50D, 0x103E181F, 0xDF580717, 0x00C3EC31, 0x0CF714EC, 0x4ACCE41E, 0xF2E0DEF4,
+0xB0431A08, 0xBD38D8E6, 0xEED6E510, 0x41B8EA02, 0xEC36E61F, 0xBEE9EBF3, 0xBE28E712, 0xF6051BFF,
+0x2FE5E222, 0x2ADCFAF2, 0x3834FF04, 0x2AD1100D, 0xF4C502F8, 0x4DE7E414, 0x07A5DA10, 0xD1A92133,
+0xB6ACFAD2, 0xCF51222C, 0xCD1BF8D7, 0x3F16F50E, 0xEEF4EA12, 0xA52F161D, 0xC4B3CE07, 0xB622EDDC,
+0xDE2ADE06, 0x4FC8FB02, 0x6EDED9F1, 0xFC19B9F9, 0xD5B4DDF1, 0xDFC30423, 0xDFFBF9EA, 0x012C07DD,
+0x12AF2805, 0x1A2EEAD6, 0xF8A1F207, 0xC0BA21CA, 0xFFDCE9E3, 0x36BFDA03, 0x05DD0E19, 0xF905DDFB,
+0x32AEDF23, 0xFDFB09D5, 0x45D5DA23, 0x4B3E1CFD, 0x0905DDEA, 0xDFE2DE00, 0xB1FB07DD, 0x33EA2813,
+0x02ECF5C3, 0xED0CD91B, 0x0D1BEABD, 0x4A17E5C0, 0xF707DFDF, 0xEE191F41, 0x19EC15C5, 0x4112F1EB,
+0xD4F01AE9, 0xBF21FC06, 0x19DCE605, 0x2F0C10E4, 0x181FF947, 0xC00AEF1E, 0xF72EE8AB, 0x42F8E2E1,
+0xA3EFE7C7, 0x31133514, 0x1EF608E2, 0x0DECF6EC, 0x0CECE5C5, 0x3DD31D54, 0xECE21545, 0xAE0CECE5,
+0x49202B08, 0x0EFE2026, 0x13F41B35, 0x1B0BEB32, 0x1719F30F, 0xECDAF0FF, 0xECE9ECC0, 0xF212EC3B,
+0x3DE8E6EF, 0x13F2EB1B, 0xB008EBE0, 0xBAE22845, 0x1A111DEA, 0xDBFC0EDD, 0x3F29FE50, 0xCDF817E0,
+0x4B030CD5, 0xEADCDE05, 0xC31312C6, 0x272EEAA9, 0x340201DA, 0x17F400EF, 0xED1F1608, 0x24D24B55,
+0xF4091DCE, 0xF51633C2, 0x03D32B56, 0x2325FA4D, 0xDEDA05FD, 0x01F72831, 0x04F12C19, 0x2716B1C3,
+0x1B19F3C0, 0x10E2C909, 0x050C2233, 0x03F22519, 0x1C06F422, 0xE7D70EAF, 0x03D725AF, 0x13EC14EB,
+0xF6F631E3, 0xF900DE28, 0x1FE2F7F6, 0xFBE9D2C1, 0xFBEFDDE9, 0xE110F7C8, 0x1C290C50, 0x27D9FFFF,
+0x07E421BB, 0x3BE89CF0, 0xF50733D0, 0x2514B2EC, 0xF5EB1DED, 0x0619D241, 0x1FE8B840, 0xEEEA3B3E,
+0xED1F1648, 0xFF15D73E, 0x0123D805, 0x0310D5E8, 0x010ED819, 0xECE53B43, 0x03E1D547, 0x01D9D6FF,
+0xDFE3B745, 0x1C14443C, 0xDF0908E1, 0x03FE2ADA, 0x1306C5D2, 0xDD001004, 0x1F3AF818, 0x1A3BF3ED,
+0x1EBC47EE, 0xF83E1F1B, 0xE21445EA, 0x2101B713, 0xE81AF126, 0x1930BFF3, 0x1533C408, 0xEDBFC60C,
+0x060F2EE8, 0x06BDD11A, 0x04F524E4, 0xD94C001E, 0x0334DCDD, 0x06F1D30C, 0xFBDBD219, 0x0EBE1A03,
+0x08D230E5, 0x250B4DFA, 0x0903311C, 0x14233C25, 0x1ABAF205, 0x30C5581D, 0x02FAD712, 0xDF2108DE,
+0x02DDDB07, 0x1D47F606, 0x0EEF191F, 0x1B3AF2E9, 0xF9E52E12, 0xF0B1C8F4, 0x290CAF28, 0x0013D8E5,
+0xE1B50AEC, 0x1A224322, 0xFDE8DB06, 0xF8A531EF, 0x0DD1CCCD, 0xE81AC007, 0x201048F3, 0x1800F1E8,
+0x000BD8D9, 0xEC2A13E2, 0xF3EE3502, 0x282F0117, 0x08F8E006, 0xFE542620, 0x19E9C0D3, 0xDD140412,
+0x1119C6EB, 0xF6C2E10E, 0x1EC50BEB, 0x15BD13ED, 0x143DC51B, 0xE5F2BE14, 0xF552E41A, 0xE10FF62A,
+0x2A0B5218, 0xE33A0CE2, 0x02D7D612, 0xE1A7B800, 0xD8C10031, 0x0810E0E9, 0xD93AB0E8, 0xF8113011,
+0x15B73C17, 0xFE46D520, 0xF6C51EE3, 0xD51A52ED, 0x1A0ABD0E, 0xFBF8DEE2, 0xE245BA20, 0x2225FB1E,
+0xEC3A3CFD, 0x19340EEE, 0x08FE20F4, 0x1F154727, 0x05F8D213, 0x16F2EEE1, 0x22B6FA1A, 0xF404CCDF,
+0xE22E4624, 0xFB3FDDFB, 0xE6F7BF17, 0xF0B6E71F, 0xF013E723, 0x17081114, 0x24EBFCE0, 0xFBE52DEC,
+0xE9C310F3, 0x26F04E15, 0xF145E8E8, 0x122B17E2, 0x0D40CCFD, 0x43CA96E8, 0x421D0F00, 0x27DCE70B,
+0x233B00FC, 0xCC44FAEE, 0xE0BC0BE3, 0x1BE7071C, 0x01C7F20F, 0xB29B28F0, 0xFB02DBC4, 0x32092326,
+0xE648F5E2, 0x4707F2DF, 0xDF36E2E0, 0x1CC308F2, 0xBBA50DEB, 0x1DE71D34, 0xCE36F5F1, 0xCDF90AF2,
+0x0FDE0CE0, 0xCF0C1906, 0xE0E509E4, 0xBC37F9F3, 0xDA06E510, 0x1FD3FDDF, 0xDB09F705, 0xF445031E,
+0x24311CE2, 0x55EEFDF8, 0x3CEDD216, 0xAFF01516, 0xCA3D29E8, 0xCCAEF1EA, 0xE0F60CD5, 0x4CECF7E2,
+0xDCD5DBEB, 0xAEC2FC02, 0x4825D7EA, 0x252D2004, 0x24F604FB, 0xD9D7051F, 0x1AF201FF, 0x3DE70EE6,
+0x43BCEC0F, 0x43D8E5E4, 0x20EB1BFF, 0x2AAFF814, 0x021A02D8, 0xD80C26F2, 0xDB00001C, 0x2BF2FCE6,
+0x181F0347, 0xAFE8F0F1, 0xE31D2A0B, 0xD015F4C2, 0xEF02072B, 0x361717EF, 0xFA25F2B3, 0xEEFADE22,
+0xEE21EBF8, 0xD6F5EA1D, 0xBF160212, 0x03E6190F, 0x452126B7, 0x32D91DFF, 0x2D000A28, 0x25F205CA,
+0xDEED043B, 0x36DCFA04, 0x13EAF112, 0x141915F1, 0xF2F0EC38, 0xBC20E6F8, 0x0BF2E519, 0xD0F11D19,
+0xC7F6F932, 0xBFE2EFB9, 0xA4F919DF, 0xFFEACCEE, 0x3BDBDAFC, 0x2B1E1245, 0x5CEBFC14, 0xDB10CD18,
+0xB718FE40, 0x0BE92012, 0x2414E2C5, 0x3B09FCE0, 0x17DF1306, 0x212F12A9, 0xC71407C4, 0x0901EFD7,
+0x22EAE23F, 0xE7E805C1, 0xC6F21037, 0x4A111218, 0x1AEC22EC, 0x27E30EF6, 0x0C000134, 0x1BF10D37,
+0xFD1F2A09, 0x1AEFF3E9, 0x160F3EC8, 0xEC173B3F, 0xE8F711E1, 0x050A22CD, 0x15263DB1, 0x07012127,
+0x1D22BBB7, 0x34E3A5BB, 0x19210FB8, 0x17D41254, 0xD822B1FA, 0xE6DE0E06, 0x1519ED40, 0xDDE54CBE,
+0x1EFC0BD4, 0xFE0D2A35, 0xE8EF40C7, 0x09E5E1F3, 0xF60631DE, 0xD6EDADC5, 0xE61A0FBE, 0x091C30BC,
+0x0514DE3C, 0x1D1A45BF, 0x03E0DC08, 0xF4FE1C25, 0x1D260BFF, 0xF6FD3126, 0x1013C8EA, 0xFE18DB3F,
+0xDF114916, 0x1806BF2D, 0x0E0535DC, 0xE5ED0DEB, 0x1EE445F5, 0x16DC12B4, 0x23FA4B2F, 0xFFD6DAAF,
+0xECF714E2, 0x273D0066, 0x1B15F313, 0x1D0CF533, 0x17003E28, 0x1FD3F654, 0x00EFB117, 0x1C13BBD9,
+0xF5C21D15, 0xF1CF3715, 0x244A0309, 0x09321FDD, 0xEFF81609, 0xE1ED091F, 0xFDFA2A16, 0xF742D022,
+0xDCECB51B, 0x16FAEE14, 0x140C3C21, 0xEA0AED1B, 0x24E94CE2, 0x17E03F12, 0x1E01BAF9, 0xE82E4128,
+0xEE311506, 0xF7FEE1F6, 0x0661DFD9, 0xDFB1FAC8, 0xFF282928, 0xEA3412FF, 0x104FC80C, 0x00ABD927,
+0xEE32EA2D, 0x153B3D09, 0xFCB82DEC, 0xE9E8C021, 0x09BECEF0, 0xFCCA2D19, 0xDF09F90F, 0x013F2AE1,
+0xFC44D4E9, 0xFD24D6E5, 0x1C0CF3FB, 0x2601021C, 0x1AB6BED9, 0x04B7DCDD, 0x102BC8E0, 0x09331EFC,
+0xE8ACC1F5, 0x222E4BD4, 0xF715E1FA, 0x23484BED, 0xEB3913E0, 0xDFE4B711, 0x0C411B0D, 0x18FEE800,
+0x42231127, 0x493B19FC, 0x39BCDE12, 0xFB46121B, 0x452DDDE2, 0xEA0DE3FB, 0x2109EF1A, 0x1B3CF81E,
+0xE228F2EC, 0x48ADF6FF, 0xC2C0202C, 0x9F3FEB19, 0x192BC8E9, 0x2B0F0F04, 0x4F2DFD19, 0x1045D9FB,
+0x0B3FE8E2, 0xF2FBE316, 0xBC0EE623, 0xB301E419, 0x1F2226D9, 0xBE0009FA, 0x27121A27, 0xF95002EA,
+0x4714DED8, 0xBAE6E1ED, 0xC5191EF2, 0x3B2E14F1, 0xFB0FEC06, 0xDAB123E7, 0x3DDBFFD9, 0xC318ECFD,
+0xDFD014EF, 0xB23D0608, 0x11E326EB, 0xC4CAE90B, 0xD847EC0D, 0xDFC3FFE2, 0x55BD0815, 0x17F9D3E5,
+0x3EE91121, 0xF9C41712, 0x22AFE0ED, 0xCDE7FBD7, 0x1BC9F5F0, 0x0422F4F0, 0x200EDC06, 0xC3F700E6,
+0x3B20EBF7, 0xD115EDEE, 0xC3ED08EA, 0x5AFEEC29, 0xC21F3247, 0x3315163E, 0xE330F659, 0xEAF5F5CC,
+0x1716113E, 0x4CE611BF, 0xD517DBC2, 0xB2EF043A, 0x45DD25B5, 0xC0151E13, 0x131CE843, 0xFFE4EB44,
+0x5FFB27DC, 0x381336EB, 0xB8EFF017, 0xECF720CF, 0x200B131D, 0x0622F805, 0xBA07DD30, 0xD9061FDE,
+0x30EA013D, 0x40300858, 0x9AD917B2, 0x270FC1C9, 0x1209FFCF, 0x201716EE, 0x2A050723, 0x1BEF0239,
+0xBF18F310, 0xA83719A0, 0xA9DDD105, 0xE70F3037, 0xDE0D0E1B, 0xE1D4F955, 0x0CF00AC8, 0xE80FE4C9,
+0x1BE2100A, 0x40FB0EDC, 0xDD1A17BD, 0x4E1604C2, 0x5CE7270F, 0x2BF635E3, 0x00D80350, 0xFD0027DB,
+0xDD22B54A, 0xEAE11247, 0xF705CE22, 0x01F2D736, 0x1D17443E, 0x1D0145D8, 0x18181111, 0x04FEDCDA,
+0x01EAD7C3, 0x05DC23B5, 0x1FCFB959, 0x1A0A431E, 0xFCDC2CFC, 0xD61452EC, 0xECF23DCB, 0x31D6A652,
+0x261BFE0D, 0x0CD73552, 0xEAFB11D3, 0x10291952, 0xF7F41F34, 0xEC1E3CF6, 0x180A41E2, 0x03FBDB23,
+0x2026F9B3, 0x0C02E42A, 0x08F030C9, 0x0E19E70E, 0x0F1519C4, 0xDCE703F2, 0x100CC833, 0x0105272C,
+0x01252903, 0x0DCCCB5C, 0xD21AAA0E, 0x2917AFC0, 0x1BEA0E3F, 0x2A1B52BE, 0xF2E8CAC0, 0x1DDDBB4C,
+0x0215D73D, 0x0DF7E5E1, 0x08E230F5, 0x0EDFCA4A, 0x19FE0ED9, 0xE6EC42C4, 0xFB1D23F4, 0x22090632,
+0xEA073F21, 0x06ED2115, 0x180BBFCC, 0xE6E20EF6, 0xE512F23A, 0xE21BF7F4, 0xF9F22E37, 0xF82B2F53,
+0x1CF94421, 0x16D8C2B1, 0xD9F4FF1C, 0x23EC4B14, 0xF5F03339, 0xFE172B40, 0x1C2F0D57, 0x230A0632,
+0xECDA3CFD, 0x1306C52D, 0x01D3D856, 0x1D2846FF, 0xF9D82150, 0x1B0CF31D, 0xEE03C626, 0x1AFFBED8,
+0xD605AEDD, 0x2403FD25, 0x01EC28C3, 0xF1E1C8F7, 0x06E7D140, 0x09CF325A, 0xF2151AC3, 0xE7F8F120,
+0x0A2BE353, 0x23E5FBBD, 0x1C1645C1, 0xF716D011, 0xF6ED1D15, 0xFB2123B7, 0xE2070A2E, 0x16F7C2E0,
+0xE6100D18, 0xFB19D440, 0x2FF15619, 0x1F0FB938, 0x0101272A, 0xF8F42F35, 0xF3DFE649, 0x21E549F4,
+0x0D430000, 0x400EE51B, 0x452E181A, 0xF1411DFB, 0xF4DC1919, 0xF247E3FC, 0x33141AE1, 0xF81CF515,
+0x3115200D, 0x3FCFF713, 0xE9DA17F6, 0x0BF31102, 0x17D1E3E5, 0x0E9712FA, 0x2EAFE6C0, 0x5900F928,
+0x50D8CE27, 0xCDACD9FF, 0x41BB0B2B, 0x2AC9181E, 0x13E1020F, 0x0708EC08, 0x09D220E1, 0x1EC0E206,
+0x1F2BF6E9, 0xD23809FD, 0x46D3FBF0, 0xF617E2FB, 0xF4F61EEF, 0x31D71BE2, 0x1AD4F7FE, 0xB534F103,
+0xDE15DD0C, 0xC7FBFA12, 0x1931F0DC, 0x4AD8F1F7, 0x4937DEFF, 0xF908210F, 0x35D121E0, 0xECCC0EFA,
+0x34CBEDF5, 0x062A0CF3, 0xE4B12103, 0xD73DF427, 0xDCE8FE15, 0xB4FC04F0, 0x0B06DC23, 0x16391D22,
+0x23051310, 0xE6FCFADC, 0x1C95F2DB, 0xE3DE0DBD, 0xDB270A05, 0xB4EEFC00, 0x1845DD16, 0x0149F11D,
+0xC6D52621, 0x1E34EE03, 0x17D20AF4, 0xA10611FA, 0x20F937DE, 0x5D17F8DF, 0xA00435EF, 0x3ECEC7DC,
+0xF9D8EB0A, 0xFC332200, 0x1DE9DBF5, 0x2E580CEE, 0x3E1AF9D1, 0xC64715F2, 0xD7CB121F, 0xEFF400F2,
+0x084CEAE4, 0x064DE0DD, 0x2B10DE25, 0xC1FB0218, 0xD4F8EADE, 0x32D6FBE1, 0x0F65F602, 0xEC32E83C,
+0xD2F613F5, 0xC526F9E1, 0xF0F51302, 0xF5B017E3, 0xFA0FE4D7, 0xFACBDFE8, 0xC0F222F2, 0xD630E81A,
+0xD6DEFEF8, 0x33A602F9, 0xC826F633, 0x10E51002, 0x0308E9F3, 0xE011251F, 0xBAEAF8EA, 0x26F21E12,
+0x0000021A, 0xE00E08CB, 0xE0ED0715, 0xE71CC0F4, 0x0A1ECE0A, 0xDB260303, 0xE7E2BEF5, 0xDDE705F0,
+0xDA0BFEE3, 0xF6D83250, 0xF4FA1CD3, 0x2805FFDC, 0x1DDBBAB4, 0x1611C218, 0x18E13F09, 0x1C1D0DF5,
+0xDC25B403, 0x2BF6AE32, 0x180F4136, 0x39FC61D3, 0xF3DE1BFA, 0x1AD94100, 0xE8F8BFDF, 0x0826E0FE,
+0x1AE142B8, 0xF80F20C8, 0x0116D9EE, 0x22E84BF1, 0xEB22C3B6, 0xEE01EBD7, 0x0EE8CA10, 0xD8FA00D1,
+0xEB073DD1, 0x1EE3BBBB, 0x12EFC639, 0x3708A121, 0xDFDB49B3, 0x28D85000, 0x0C003429, 0x000D28CA,
+0x12D1C6A9, 0x0D061B2D, 0xEBDCC304, 0xEF05C8D4, 0xDACD02A5, 0xF3F7CAE2, 0xE5144313, 0xFC222B4A,
+0x23F4B51C, 0x18270FFE, 0xEDDC3CB4, 0xE9213F49, 0x1EF309CB, 0xE3E5BB44, 0x0AF6E333, 0xE1EF473A,
+0x080DD036, 0x0FEB38EE, 0xFE0629DE, 0xDAFCB224, 0x0DF9CB21, 0x07D3DFAC, 0x06F822D0, 0x1E1ABAF1,
+0x00F32835, 0xE00AF71E, 0x08E2D009, 0xE003F9D5, 0xD5F2AECA, 0xF4EFCCEA, 0x0B0E33E5, 0x0D271A00,
+0x1232EB59, 0x07DBDFB4, 0xD2ECAAED, 0x17FC40D4, 0xDAEB0313, 0x07E8E010, 0x07F5201E, 0x13173CC1,
+0xDBDCFDFD, 0x24ECFBC3, 0xF2D9E6FE, 0x420695DE, 0xFC172C11, 0xDF0F06C9, 0xF413CC3C, 0x1B11F217,
+0xE01148C7, 0x1EEEBA3B, 0xDDDCFCB3, 0x24E1B4F6, 0xF4DA3303, 0xF1FC192C, 0xEAFC11D4, 0xEAD41354,
+0x22ED06EC, 0x23090000, 0x0BE7FBE1, 0xE0BFE4F0, 0xF2C4F818, 0xE5BEE6EC, 0xDD3E0DE7, 0xEB360516,
+0xFBC6ECF2, 0xFCEC23EE, 0x0A20DD13, 0x12211FF8, 0xF23A1706, 0xCC36E6EF, 0x1C26F5F2, 0x2F05F303,
+0xF540FA23, 0xFE4FE3E9, 0x23CED927, 0x18C2FA0A, 0xD1A411EA, 0x1A4306CB, 0x474E0EE6, 0x21331FDB,
+0x2C33070B, 0xC60804F6, 0x515A12DF, 0xE8FDD7CE, 0xD7171126, 0x1043FF11, 0x48F8E81C, 0xCF48E021,
+0xF9CA09E0, 0x2B26210F, 0x1F10FD02, 0xC9ECF8E7, 0xF9D30FED, 0x33222205, 0xC44E0A05, 0x41381425,
+0xCEC21811, 0xCFFFF5EA, 0xC0E7F727, 0xF5481810, 0xE247E3E1, 0x11150AE0, 0x40C116EE, 0xFDEC18E9,
+0xCF0F2415, 0x46E5F6E6, 0x430D1DF3, 0x11EEE61A, 0xA45017EA, 0xF1BBCB28, 0x35DC19E3, 0xECBC0CFB,
+0x3631ED1D, 0x202C0EF8, 0xE7D2F8FC, 0x441910FA, 0xBED4E4F2, 0xCE3DE6FD, 0xD8CB0B15, 0xDE3C01F2,
+0xD7F1F914, 0xFDCE00E7, 0xBDEF240A, 0x2CC5E616, 0x1C16FCED, 0xE805F412, 0xE4F11023, 0x14110DE8,
+0xE2ED1418, 0xD7F7F6EC, 0xF8E9001E, 0x18C3E111, 0xAA4BF1EA, 0x2BB02DDE, 0xEB18FDD9, 0xEECFED11,
+0xAEF8E9F7, 0xB0D129E0, 0x1952D9FA, 0x07D40ED6, 0xB13EDF03, 0xC74C28E9, 0xD7DB12DC, 0x0B11FFFD,
+0xCEFC1D17, 0xC34CF623, 0x4A011523, 0x0410DDDA, 0xF2072317, 0x33101B21, 0x35F7F4E7, 0xF2CBF3E0,
+0xAACDE6F4, 0x00002F0B, 0xE51344C5, 0x1211E938, 0x091EE1F6, 0x26CD02A4, 0xE028F84F, 0x2522B206,
+0x1BFDF3D5, 0x0C23CC06, 0xE809C031, 0x19D30F54, 0x1B0DBD1C, 0x2EE256F5, 0xE11A080E, 0x17DF11B8,
+0x16E4C20C, 0xEECA3B5F, 0x05EA2D11, 0x140AC432, 0xE0F6481D, 0xE024094B, 0x16F6121D, 0x14083C20,
+0x200A481F, 0xE1F308E5, 0xFB0623D1, 0xDE1EF946, 0xEE211606, 0x15EC3D13, 0xF402E4DA, 0xF1D8E700,
+0xE6D0F358, 0xEEF9C621, 0xE3F3BB36, 0x0A171E3F, 0x0D0ACBE1, 0x09071F21, 0x04DDDDB5, 0x051422C4,
+0x07F2211B, 0xFD1BD5F3, 0xEFFE3926, 0x261602C1, 0xF40F3438, 0xEE07EB21, 0xF4DA1CB2, 0x241EFCBA,
+0x2600B2D8, 0x11DC174C, 0x1A0542D4, 0xDF1349C6, 0x19E10F47, 0x09131F16, 0x010A27E2, 0x0A12E2C5,
+0x01E229F5, 0xE3090AE1, 0xDEFEB629, 0x150E1319, 0x221D050B, 0xE3F4F6CD, 0x21F6071D, 0x23FC4C24,
+0x1312C6EA, 0x12F01738, 0xEFFF3AD7, 0x1FFD48D5, 0x1C0B0CE4, 0xF5F71C30, 0x03F725E0, 0x0D38349F,
+0xEB0614DE, 0xFB03DCD5, 0xEAC9EDA2, 0xDEF8B6E1, 0xDD0B0532, 0x17ECC2C3, 0x27E1FEB9, 0xEF14EA14,
+0xF9FC2F2C, 0x160AC232, 0x1C0445DD, 0xE92311FB, 0x06162F3D, 0x03F12538, 0x0AFE1E26, 0x06E72141,
+0xE1E6F841, 0x3221A5B7, 0x00E6D70D, 0xF8F8E030, 0x24FF4CD8, 0xFB02DCDB, 0x162313FB, 0xEAF1C2CA,
+0x190CBFCC, 0xEFF4E834, 0x60370000, 0x25C139F2, 0xF731FCEA, 0xF734E009, 0xF10AE00B, 0xD2C81AE2,
+0x26590710, 0x1ACE0231, 0xF41D0EF5, 0xD62BE4F6, 0xEF340303, 0x0AB9EA0C, 0xBA251EE1, 0xCE6BE204,
+0xC1530A42, 0xC411E82A, 0xD4CD15E9, 0x35D4040C, 0xBEBE0C04, 0x01D4E7E5, 0xE9CAD9FD, 0x58F1EFF2,
+0xAC46D1E6, 0x21DED4E2, 0x2C240706, 0xE0330304, 0x4931F80B, 0xF44C2109, 0xB74A1DDB, 0x2AC320DE,
+0xBC2D02EC, 0x07EEE4FB, 0x2DCB20E9, 0xC910FBF4, 0xAF52F217, 0xD720D72B, 0xDBCC02F8, 0x1342FDF3,
+0x26B315E7, 0x3D450325, 0xC7C0151E, 0xE9471219, 0x42E4111F, 0x4AA3E60B, 0x5537DF36, 0xB4E92E0F,
+0xDCFEDC10, 0x18F1FCDA, 0xBFFA10E7, 0xD706E721, 0x2BD20022, 0x16500305, 0x4CC2EF29, 0xD71F2417,
+0x20120009, 0xE7EC08EA, 0xE520F1EC, 0x42C8F3F8, 0xF23A19F0, 0x34111BEE, 0xCBF00DE8, 0x02F50DE8,
+0xC3F0DA1C, 0x67CC1518, 0x09463FF4, 0xFCF7E11D, 0x01F6DC20, 0x2F1D27E1, 0x2816F9F4, 0xAA1800EE,
+0x2E592DF0, 0xF4DC07CE, 0xCF49E504, 0xD33BF6E0, 0x17040514, 0xC445EE25, 0x4C33EB1D, 0xBB39250C,
+0x4BAFE310, 0x58F8222A, 0xDD34CF1F, 0x4CCBFBF4, 0x32FDDC0D, 0xCF230ADC, 0x2D13F7FC, 0x49D6FA15,
+0x09E2E002, 0x4425E20A, 0x170D1C02, 0x47DDF0E4, 0x2A441F05, 0xC0B502E4, 0x40B71823, 0xCFEE19DF,
+0xD34A0917, 0xE7370521, 0x0000F1F1, 0x1CFDBD25, 0x1B080D30, 0xC61F61F7, 0x05122C3A, 0xDEF606CF,
+0xF805D023, 0xD91D50F6, 0x1E0D0AE6, 0xE111F7E9, 0x1B1F42F7, 0xE9221106, 0xF7101EC8, 0x061C2FBD,
+0xE8E711F2, 0xFB0E2335, 0x0DFDE6D5, 0x10E7E742, 0xF1FFE62A, 0x04F424CC, 0xF82B1F54, 0x10F8E821,
+0xF40B1C33, 0xE2F3BA1B, 0xFBFBD32C, 0xF6E531BD, 0x12F7171E, 0x13073BDF, 0xDDEFFAC8, 0xDC1DFB0B,
+0x0D00E4D9, 0x120AC7E2, 0xFEE1D546, 0xD9EFFEEA, 0x1EF0F737, 0x021225E9, 0xD4DE54FA, 0xF3FC1B2D,
+0xF80DD1CC, 0x1EE1470A, 0x01E6D7F2, 0xE1E6F70D, 0xF9F0DEE9, 0xFB14D2C5, 0xE603F2D5, 0xD82750FF,
+0x011AD941, 0xE2040A23, 0xE425BD4C, 0xFC28DC01, 0x22D94BFF, 0x0AE6E242, 0x16EDC2C5, 0xD90B011C,
+0x050AD232, 0xDB114E18, 0xF2EB3512, 0x1AE642F3, 0xCFF1A7E7, 0xF220C9F8, 0x171FC148, 0x0735225D,
+0xF2FF1926, 0x14D914FF, 0x0DFC36DC, 0x191BF00E, 0xE50CF3CB, 0x07F5221C, 0x2305FAD4, 0x1FD9094F,
+0x25FFB4D6, 0x170FC1CA, 0x0FF3C91A, 0x02E82BF0, 0xDFEA07ED, 0xECE9143F, 0xE2E44645, 0xF2F6E71E,
+0x26FAFE21, 0xFD17DB3F, 0xF0F6181E, 0xF818D0F0, 0xDD24B5B5, 0x00E8D8BF, 0xFE08DA20, 0x1104C6DC,
+0x22FCB5D4, 0xE5FFF3D7, 0xE4CEF5A6, 0xF1EBE6EC, 0xEC06EBD3, 0xF5E91DC1, 0xE70540D4, 0xFC1A2D0E,
+0x08E7E10F, 0xFAFE22D9, 0x06F82130, 0x09E80000, 0xF4F51F10, 0x49EE1CE2, 0xEEFADF17, 0x44ECE9DE,
+0x0ECEE415, 0x354C1AF6, 0xCC020DDC, 0x50100C25, 0x1F362818, 0xCCB6090D, 0xE5C30C21, 0xFDDA0DEC,
+0x350DDA01, 0x131A0C1A, 0x1AAB15F2, 0x2B590DD2, 0xC4A80331, 0xC517EC30, 0xCED3EEEF, 0xCC44F505,
+0xF40DF5E3, 0xE2341BE5, 0x20C50AF3, 0xF9D308ED, 0xE345DF06, 0x3DF10CE4, 0x203D15E6, 0x1F4BF8EB,
+0x1D37F8DD, 0xFEDC0BF1, 0xC14726FC, 0x0EDD1820, 0xCC221A05, 0xC1CB0D06, 0xC1CAE90C, 0x45EC180F,
+0x9A3AE3EB, 0x15D23EED, 0xABCDEE06, 0x5EB52DF4, 0x19D33523, 0x0C41F1FB, 0xE8F8E4E8, 0xCD24F01F,
+0x1239F604, 0xE6EA1611, 0xF23E0E11, 0x14F01A16, 0x450F14E8, 0x44BD1DE8, 0x3DE3E41C, 0x13E316F5,
+0xEABEECF6, 0xEE00121B, 0x3AEEEB28, 0xE33AED15, 0x229AF5ED, 0xBF26FAC1, 0xE7351A02, 0xA6D80FF3,
+0x5303CEFF, 0xCF082ADB, 0xB7BFF6E1, 0x3521E0E6, 0x07120DFA, 0x25E9E0EA, 0xB423FDEE, 0x0E012305,
+0x1DF21B28, 0xCE1AF6E6, 0x2AF50AF2, 0xF80602E4, 0x0AB71FDF, 0x1F2A1E21, 0x0316F8FE, 0x0F4F2512,
+0xEBCE1A27, 0xC0B3EC0A, 0x23E818DA, 0xEB0AFBEF, 0x164F13E3, 0xCDE61326, 0xC4DF0BF3, 0xBE3C14FA,
+0x51E8E714, 0x10F2D710, 0xD3DB191B, 0xEA1F06FD, 0x344311F7, 0x21D60C1A, 0x301207FE, 0x2FF3F9EA,
+0xEED1F9E4, 0xD335EAF9, 0xFA0505F3, 0x4B62DDDD, 0xD130DC3A, 0xD1D60708, 0x21D508FD, 0x4112FAFE,
+0xEE431816, 0xAE0BEAE4, 0x03E0D51D, 0xD0AEDC07, 0x092BF72A, 0x07C92003, 0x20BDDEF1, 0xDC45F9E5,
+0xCC1905E4, 0x0AE50CF2, 0x53D41E0D, 0xC928D505, 0x48B90F00, 0xF9CCDF1F, 0x3D4D21F4, 0xCB0B15DB,
+0xF0050D1D, 0xE1F2E8DE, 0x36E5091A, 0x3EF20E0C, 0xD8E6161A, 0x241D00F3, 0xC1E7FD0C, 0xCFB8EA0F,
+0xE33CF6DF, 0xF1AA0CEB, 0x45F11A2E, 0x3BFDE3E7, 0x28971425, 0x2DDC00BF, 0xE9DFFC03, 0x05B6EE07,
+0xF1DE23DD, 0xB6E01807, 0x1F31DDF7, 0xEB390A0A, 0xD44B13F0, 0x11E2FC22, 0x5021E80A, 0x072E28FA,
+0xEEBBDF07, 0xEC30E9E3, 0xD2F2EDF8, 0x3AE8061B, 0x4CB6EF10, 0xCBD2DC23, 0x302F0D07, 0xB73308FA,
+0xAFE821F6, 0xD63FD6F0, 0x4CF20216, 0xBFEC23E6, 0x17DB19ED, 0xC9E8EF03, 0x3A34F10F, 0x404711F3,
+0x1814181E, 0xEF17F1EB, 0x551F17EE, 0xD7D62E09, 0xF519FFFD, 0xFC16E4F1, 0x28E62412, 0xDD2600F2,
+0xFD220501, 0x5DE52407, 0xB9B6CC0E, 0x180220DE, 0x27351026, 0xFA13010D, 0x050222EC, 0x2BE0DDDA,
+0xCEB2FC08, 0x0CD00A27, 0xC3FBE509, 0xF6F51422, 0xEEAC1EE3, 0x10CFEAD5, 0x1FDBE9F7, 0x47C20AFE,
+0x3AC8E2E9, 0xC80DEEEF, 0xECD9F1E5, 0x0FCEEC00, 0x1F2CE70B, 0xE5BCF8FC, 0x37B10DE4, 0x4200F1DA,
+0xFB081A27, 0x385C231F, 0xC7210FCB, 0xFEF610F9, 0x26E30000, 0xEBF8132F, 0x051ADEBF, 0xEDE13CBA,
+0x1819C0BF, 0xFE0F2AE7, 0xFC1D24F5, 0xDBE90311, 0x02F02918, 0x190FF037, 0x1CE10B46, 0x0AD0CF58,
+0x170410DD, 0xF72231FA, 0x1D15BBED, 0x16053EDD, 0x0A1D1D45, 0xF412E316, 0x191510EE, 0xE3DE0BF9,
+0x10113838, 0x02D92AB2, 0xFF07D7E0, 0x21ECF83C, 0xEDEBC4C2, 0xE8EDC13B, 0xF9042025, 0xDB35B4A4,
+0x1AE3BF45, 0xFEEB2514, 0xEDFAC522, 0x0B08E3E0, 0x13F2151A, 0xF3D7E551, 0x1D110BE8, 0x25170312,
+0x19C9F1A2, 0xEA0C1234, 0xE3FBF52D, 0xF3DC1BFD, 0x11F518CC, 0xFDF92521, 0xF61E31F6, 0xE0EA47EF,
+0x09E6E142, 0x09F8E031, 0xDDF9B52F, 0xDAE34EBA, 0x160111D8, 0x1524C34C, 0xF01C170C, 0xDDE14BB9,
+0xE7EEC0EB, 0x34005CD8, 0x17E1C2F7, 0x0F1B1943, 0xFD11DCEA, 0xD5EF5417, 0x08D73052, 0x1CDAF4B2,
+0x162AED52, 0xFBE9D3EF, 0xF31C1CBC, 0xF225CA03, 0xEB231405, 0x13FFEC2A, 0xDAF4B135, 0xDC21FDB8,
+0xDBF90420, 0x0F23184B, 0xE40945E1, 0xFE03DBD4, 0xDF1B48BE, 0x0CE01C49, 0x1200C728, 0xEFF016C7,
+0xFC12D43A, 0x0AE9CDC1, 0x15101318, 0xDFE949C1, 0x2314B6C4, 0xFADF2349, 0xFCE4D5F4, 0x1EFA462E,
+0x03DEDBFA, 0x19230F05, 0x210D061B, 0xFF2327FB, 0xDA21B207, 0x1302ECD6, 0xEF14E93D, 0xE4010D27,
+0xEEE7E942, 0x1B1DF30B, 0x1A180D3F, 0xFF17DAC1, 0x0000F2FA, 0xB1351ADD, 0x4FE626F3, 0x2A12D80E,
+0x1434FEEA, 0x36F2EC0D, 0x42C6F2E7, 0xB8C1E512, 0xE23821E9, 0x0E3B0AF0, 0xFC3E1BEE, 0xB631DB15,
+0x0623DDF8, 0xD81B2306, 0x28B5FFF3, 0xEF270023, 0x1718E9FF, 0x33EB110F, 0x30F0F5ED, 0x1426F8E7,
+0xC727ECFD, 0xB34010FF, 0x3CFFDBE9, 0x44141326, 0x5EA2E4EC, 0x3325CBC9, 0xD8130BFC, 0x0C1E0016,
+0xF92EE4F7, 0x1109DEFA, 0x2632E9E0, 0x520EFEF6, 0x342ED619, 0x3229F4F9, 0x0D0DF700, 0x1D111BE5,
+0x15BC0C18, 0xA728ED1B, 0x54283000, 0x47C32B00, 0xCAEE1FEB, 0x11EEF2E9, 0xC00C1717, 0xB215181C,
+0x030CDA13, 0x480325E4, 0xEDCAE126, 0xD8D915F2, 0x23DAFF01, 0x16240503, 0x374AED04, 0x2904F1DE,
+0x3D3DFF25, 0xB3CF1516, 0xDE0A25F8, 0x3306F9E2, 0xCDB9F622, 0xBCD0F5E0, 0xEC291DF8, 0xA8DE14FE,
+0x07FC2F06, 0xCB3CE0DC, 0xFBC4F213, 0x2ED0DE15, 0x35DEFB08, 0x38C8F305, 0xCF36F010, 0x3ACE0A0E,
+0xEC3A1209, 0x2152EDEF, 0x40440729, 0xCC04E91C, 0x0C56F424, 0xB04A1C2E, 0x28CAD721, 0xCDD8FF0F,
+0xCC1CF400, 0xE6BDF3F3, 0x0F020E1C, 0xFBFD19D9, 0x3DC12326, 0x00D8EA17, 0xCB2ED900, 0x5008F3FA,
+0xEF2B28DF, 0xA726E9FE, 0xEAF5CFFD, 0x3FE4EEE3, 0x3F27180B, 0x3C47EA01, 0xC8B5EBE0, 0xBEC1F023,
+0xD7231A16, 0xE15501FA, 0x40AAF82E, 0x4511E72D, 0xF8DAE317, 0x20030000, 0x0BF7E232, 0x0BF1CDE6,
+0xCFF25835, 0xFAE12DF7, 0xCFEAA613, 0x22FFF928, 0xE8E93FC1, 0xE327BAFE, 0x1E030A24, 0xDFDFFAF8,
+0x06F2D3E6, 0x00FED8D6, 0xE7DFF2F8, 0x10FB39DD, 0xDBD74CB0, 0x072BDEAE, 0x1EE90A11, 0x26DB4DFD,
+0x16E8EEC0, 0x17FEC126, 0xEB03C4DB, 0xFCF924D0, 0xDBD8FC00, 0xE510F3E9, 0xEBF6C4E2, 0xFF2127F8,
+0x0DE335F6, 0xE1E7B840, 0x1724C203, 0x0D23E5FB, 0xFD1ADB0F, 0x2C0C5434, 0x22EEFBC6, 0xDB130316,
+0x1EE60A0F, 0x27E94F3F, 0xF7E21F46, 0xF1D837FF, 0xECFB3B22, 0x330FA638, 0x0CE21CF7, 0x22FFB728,
+0x281AB042, 0xF714CE13, 0x11E0C707, 0x11D9E9FE, 0xD9EBFF3E, 0xF8043023, 0x1C15BCC3, 0xCAC95DA0,
+0x13ECEB15, 0xDFEDB73B, 0x201CF8BD, 0xF033185B, 0x160B12CD, 0x01092631, 0x0E06192D, 0x0E001BD8,
+0xEC0FEC37, 0xE4E143B9, 0xDD2405B4, 0x1EDAF501, 0xEFFF16D6, 0xD8F70130, 0xF1EF3738, 0xFBEB2E3D,
+0x1F0147D6, 0xFAF52EE4, 0x00D92800, 0xE31B440D, 0x02112617, 0xF1DCE6B4, 0x17E3C144, 0xE117B9C1,
+0x10F1E719, 0x0C15E3EE, 0xE315F53C, 0xE5260D4E, 0x02F8DB31, 0xEA14EF3C, 0x4B178DC1, 0xDC070320,
+0x0F1CC8F4, 0xEAFB122D, 0xE71042C8, 0xEBE8EC40, 0x0EEDE6C5, 0xEDEF1618, 0xFFDE2AFA, 0x260B4DCE,
+0xF3D9E600, 0xF816DFC2, 0xEA26EE4E, 0x14F23CCA, 0xD51AAD43, 0x0000133C, 0xFF5116EB, 0xCEF7D9D7,
+0x3EFCF7E2, 0xE13E16DC, 0x39D5F7EA, 0x583312FD, 0x3FEB2FF6, 0x35B3E9EE, 0xDD01F424, 0x26230528,
+0x5EB4FFFB, 0xB91E3623, 0x0AE11E09, 0xF6201F09, 0x4CC6E208, 0x1CBA2413, 0xC4370D1E, 0xD1001310,
+0x35150627, 0x122AF4ED, 0x0BF71502, 0xC618E3E1, 0xF202ED10, 0xF2C31A25, 0xB3F71BEB, 0x2312DB1F,
+0xC20804EA, 0x11FAEBDF, 0xA8DE18DE, 0x28113006, 0xE9CC0016, 0x4021120C, 0xEFC41908, 0xEB51E9EB,
+0x331113D7, 0x5ACBF5E8, 0x10C431F4, 0x0927E914, 0xB403E0FE, 0xFBE925DA, 0x1B24DD10, 0xB43B0CFB,
+0xE7C52413, 0xB94A0F13, 0x1DD7E1DE, 0xE5D10A01, 0xAE560C07, 0x290E292E, 0xF3F3001B, 0x5A131B1B,
+0xB4E0CE15, 0xBAE6DDF7, 0xC801E20D, 0xEEC61028, 0x16F1EB11, 0x2FC5EEE6, 0x4D2FF814, 0x2DB526FA,
+0x15B105DD, 0x2932EDD9, 0x0FD1FEF6, 0x2DD719F9, 0x29F30601, 0x3AFFFFE5, 0x44BFEFDA, 0xA437E5E8,
+0x0E38350F, 0xDF07E510, 0x06AA0821, 0x0052222E, 0xEFEB27D5, 0x1ED11814, 0x0B0E0908, 0x2DA11D1A,
+0xEB3405C9, 0x161C140C, 0xC2FBEF0D, 0x02E8E923, 0xD5BADAF1, 0x24C303E3, 0x374F04EA, 0xE8F10FD8,
+0x21B01118, 0xB536F8D9, 0x3BF1DDF2, 0xFD0012E6, 0x2F4FDB28, 0x0D2CF826, 0xBEBC1C04, 0xC64C1A1B,
+0xC62911DB, 0xF3AAEEFF, 0x40D5E52E, 0xA21C18FC, 0x18ED36F3, 0x00000F15, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0 =
+0x7FE75E62, 0xB98D686A, 0x16FBEDEE, 0xB22861F2, 0x0A1180B7, 0x666C4407, 0x1D4C462A, 0xB81A3416,
+0xC6948E41, 0x3CCAE97E, 0x55352D5A, 0x85474DAC, 0xB8DBF013, 0xB9AF7B16, 0xEABEE865, 0x1EB02EF7,
+0xBDE86A3E, 0x24EDAAC9, 0x88A3D4B9, 0x8CA15C44, 0x42487190, 0xF4BF8BFF, 0x05EA0214, 0x4708DC42,
+0x222993B3, 0x24AAA18B, 0x4D49B2EA, 0xBBA8E2B3, 0x40426630, 0x5676818F, 0x1B79CC4B, 0x1595A8CD,
+0x9088385F, 0x351D2A23, 0x0EDC4446, 0x8F17D03E, 0xEA8B3464, 0x8799E718, 0x79E11F5A, 0xE771403F,
+0x38C65263, 0xA969930A, 0x59942FBB, 0x74138F31, 0x80518035, 0xB42D7BE0, 0x0C4A7E00, 0x4ACA8CB5,
+0x76C89CCB, 0xB56F2806, 0x1C8CF4E5, 0xA192BBF1, 0x1139DA94, 0x46B1C7C9, 0x8B630748, 0xA882704B,
+0x69DA8BE2, 0xF2508EA5, 0x05E4AEAF, 0x5AA0A9C8, 0x6177DDCE, 0x8D06A0E1, 0xECD023CA, 0x34FC0800,
+0x5AE69AAD, 0xFE8E2F72, 0x5ED3DC49, 0x8B46EB93, 0xC0C3D55F, 0xAC8C8B89, 0x54A20A93, 0x81F6659B,
+0x412B7086, 0x09D572AB, 0x85891AB0, 0x96DC6C57, 0xD7117C38, 0xFBA06F16, 0x9EAAFCE5, 0x44AD2185,
+0x618D398E, 0xD9AC89E7, 0x031F1D0B, 0xE7D05975, 0x9E500DF3, 0x06BFEF20, 0x42888C91, 0xD6AF7FA6,
+0xC839E4C7, 0x0E7B7724, 0x48693BD9, 0xD17B6519, 0x48004F1A, 0xF5A147D5, 0x521C8AD6, 0x26E19AE7,
+0xAA17116F, 0x7196631D, 0x89643E51, 0x8CE6B02E, 0x128B9CF6, 0x2F45C206, 0x4EB6C731, 0xCFB17835,
+0xCFE85026, 0xA4B9B9B0, 0xD2C152AC, 0xA4F89035, 0x79ED0C5F, 0xBC030479, 0x51D91130, 0x570C019C,
+0x4E0306B7, 0x27978CCD, 0x5F8DA612, 0x33C121FE, 0x4407BB22, 0x6FF06E2F, 0x5D7F378B, 0x491B9A93,
+0xA0BDE688, 0x0EA89618, 0xE91C3661, 0x27C69F6D, 0xC46463FD, 0x67649FE9, 0x8F6E077F, 0x4D456A5F,
+0x396B7218, 0x0C26C824, 0xA4E8FD71, 0xDCA1C865, 0x6F1CC581, 0x97C974D3, 0x38D0F6FC, 0x04190419,
+0xED8C7FC5, 0x794BA619, 0x0090798D, 0x3044F72D, 0xBB77EEEF, 0xF9BCFDE1, 0x2C1580F7, 0xC0292305,
+0x86827E71, 0x356C7D12, 0xA84DBECC, 0xCA53E3D9, 0x236E73F0, 0x9FD2AFBA, 0x652E7A66, 0x744A1147,
+0xBFE9AF5B, 0x59F2552F, 0xBD77D3C0, 0xCD726524, 0x1D56A0FB, 0x85207831, 0xB6F1861B, 0x369CF946,
+0x71CA18BD, 0x5EE3F7CF, 0x2C3FC902, 0x4301214D, 0x53A64F16, 0x2956B34F, 0x3DF40618, 0x41A36C68,
+0xF7BB2B5F, 0x0C5CD46D, 0x3E4354AC, 0xC29AFEF7, 0xC1A310AF, 0xFFEF128F, 0xBE1C8D14, 0x8B123664,
+0x854B7E00, 0xAE175861, 0xE4067FE2, 0xA264F00A, 0x988634EF, 0xEC9E38E7, 0xFE2968A6, 0x2B30719D,
+0xFE3DC88D, 0x09E222C8, 0x6B4F1B50, 0x436C7137, 0x161DFE10, 0xA68C403E, 0x12A2856F, 0xAAB8DFF0
+
+k =
+6144
+
+e =
+10376
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_CRC_TYPE_24B,
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data
new file mode 100644
index 00000000..8ff71aac
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data
@@ -0,0 +1,643 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x16000000, 0x081112e9, 0xcd2d1fac, 0xf9fdf4db, 0x110edf1a, 0x0dd9e84f, 0xdbe9e611, 0x0fde0206,
+0x43df19f9, 0x071ee4f7, 0x1f1322c5, 0xd4e5f6f3, 0xe3fafcde, 0xd002f52a, 0x4427f801, 0xbadce403,
+0xce101dc9, 0x09fff729, 0xd5f91f2e, 0xf3f0fee8, 0x4f061cde, 0x4a2b2753, 0x1d24dffb, 0xd3040bdb,
+0xcaf2fbe6, 0x1cd9f350, 0x00fff42a, 0x49d02759, 0x4005de2d, 0xd3fb1923, 0x36cbfc5c, 0x020a0de1,
+0x39072621, 0x3b0defe5, 0xb4ebec13, 0xddee2416, 0x24f206e5, 0xd1e1fcb9, 0x0e150713, 0x21f6191f,
+0x0d0bf9e3, 0xf0061bd2, 0xae2717ff, 0x0de9d740, 0x13031a2b, 0x4c0215d6, 0x4534dc5c, 0xa1f41c1c,
+0x001b3842, 0xeaf83ecf, 0x2519b4be, 0x1d220afb, 0x05162312, 0xee00ea29, 0x25ef4d39, 0x0cc03398,
+0x08f5e0ce, 0xb7e68f43, 0x14f7ec31, 0xe7f5f11e, 0xfd29d6ae, 0x0af133c9, 0x03fdd5db, 0x0bca1d5f,
+0x00172840, 0x061add0e, 0x0516ddee, 0xe10cf735, 0x1709101f, 0xdf0908d0, 0x06fe2dd6, 0x18ed11c6,
+0xd315ac14, 0x06f0d3c8, 0x1ed9474f, 0xef223806, 0x28f3ffe5, 0x0eeb1aee, 0x080de0cb, 0x29f452e4,
+0xdbd702b0, 0x041b2cbd, 0x1df30b34, 0x1927f102, 0x2010b719, 0xf0fde8d5, 0xe9eaef3e, 0xf10b36ce,
+0xdff4fa1b, 0xfd0cd5e4, 0x04f923d0, 0x110f3936, 0x06fe2e26, 0x00e42844, 0x11e839c0, 0x13ecea13,
+0xf621e349, 0xff332600, 0xe604f1f5, 0x0533d425, 0xf240c9f5, 0x0b5933e8, 0xdf1bf931, 0x02c929f2,
+0xeee6eb0f, 0x12a2160f, 0xfdde26ca, 0xf016c8f9, 0xdd03b5ee, 0xe2ee09da, 0xfdf4db17, 0xdb0603e4,
+0xffda2821, 0xf6f9cefd, 0xd5ca5421, 0x12f03af2, 0x182f0fe9, 0x10423808, 0x0c19e41a, 0x27af02f0,
+0xfc14d42a, 0x08c9d114, 0x274700f2, 0xe8c110e1, 0x0e0f19e9, 0xeb58c3e7, 0xe20fbbd1, 0xe52e0e1a,
+0xf145e607, 0xe316bc1d, 0xe3e04511, 0xd93c0009, 0x27db01ed, 0x0a30cd03, 0x23434af8, 0xe13f09e6,
+0xe4d00de9, 0x07202208, 0xdb3ffd07, 0x0955cfea, 0xeae8112c, 0x041324f1, 0x1011e815, 0xf6d01f17,
+0xe5eb0cf9, 0x17be00ee, 0xa2dbeee6, 0xb2e73603, 0x08ebda0f, 0x250fe0ed, 0xe61a0218, 0xf1dbf2f3,
+0x420b18fd, 0xa1e9e5e4, 0xd9e1caf0, 0xe7cffe08, 0xeba8f009, 0xc45213d0, 0x20a1ed29, 0x28fa0737,
+0x1ad900dd, 0x1bc10dfe, 0xab360c18, 0x023bd3f1, 0xacfd2514, 0x4d382c25, 0xf25adbf1, 0x1eda1a32,
+0xf11e0902, 0x23a5e70a, 0x1f32fcce, 0x0ef0f8f5, 0x2029e6e9, 0xdfda0701, 0x1860fafe, 0xd8e5f139,
+0x5612010d, 0xf2572d15, 0xba23192f, 0x2ac1e2fb, 0xe91e02e8, 0x44d2100b, 0x08f9e307, 0x5edddfe0,
+0x240fc906, 0xc1c3fbe7, 0x0f231614, 0xaa111806, 0x1e2d2eea, 0xc6ba0a06, 0xedc9121e, 0x1ffc150e,
+0xd11b0a23, 0xe800f80d, 0x2f27f101, 0x06110638, 0xd01fdef8, 0x15ee0717, 0xba0e131a, 0xcde51f43,
+0xd21df6bc, 0x051006c8, 0x2616ddc3, 0x3ef8fee0, 0xf3cde9a4, 0x3bd81b4f, 0x25001229, 0xf6eb023e,
+0xd81ee3b9, 0x1cecff14, 0x29fef4d9, 0x9f2d0255, 0x121e38f5, 0x18eb16c2, 0x271a1041, 0xf7f700cf,
+0x28f3e1e4, 0x49e90011, 0x4b0fde1a, 0xd9e723f1, 0x0de402bd, 0x261f1b0a, 0xbe0afe1e, 0x0823e606,
+0x3d041f2c, 0xf1f21436, 0xec1ae7f1, 0x02ebebc3, 0x52f1da1a, 0xc9e5d60d, 0xa9dc0e04, 0xc504d22d,
+0x09fd1325, 0x3706e02e, 0x26dc0f4b, 0x2012fe3b, 0xeef109e7, 0xb509161f, 0xf01dde45, 0x13d51853,
+0xc823ebfb, 0x2d23f0fc, 0xcbf6fbcd, 0xd505f22d, 0xee0afd1e, 0xa6e615bd, 0xa020ce07, 0x411937c0,
+0xbcd318ab, 0x2a1d1cf5, 0xc400ff28, 0xe32014f7, 0x2bf30b1b, 0x0e03fc25, 0xe1e41bf3, 0x2f1b0943,
+0x06060721, 0xc601de26, 0xd3faee2e, 0xe51c05bc, 0x0411f4c8, 0xcfe7dcbf, 0x2d12083a, 0x0bedfb3a,
+0x0efc1d2c, 0xca1f1909, 0x13f80e30, 0x3a21eab7, 0x3c2b1252, 0xf2e4140b, 0x2527e5b1, 0x2ffe03d6,
+0x44e4f90b, 0xcf21e449, 0xd8e9f6c2, 0xeef8001f, 0xfb151513, 0x48f9de22, 0x2ad6e0ad, 0xe63602a3,
+0xdae50e0d, 0xe3e5ff0c, 0xc721f449, 0xa0d5efac, 0xe70039d8, 0xc7f71031, 0x2b22eefa, 0xe5fb04d3,
+0x0021f2f9, 0x2bce28a5, 0xdffefa00, 0x1af642e2, 0x1d190bf1, 0xecf515cd, 0x0af4cf34, 0xf406cbd1,
+0x08ebd013, 0x14173b40, 0xe40e0bca, 0x29ff51d9, 0xd303abda, 0xe303f5d5, 0xf6facddf, 0xecef3cc8,
+0x1324ecb4, 0xd213563c, 0xe00ff91a, 0xef121639, 0xf5d2cdaa, 0x1914f013, 0xe80f3fe8, 0xe31c0bbc,
+0xe3d5bcad, 0xe610be38, 0xe72df155, 0x1cef44c7, 0x13ccc5a3, 0xe1e20909, 0xecf2c337, 0xf2fd35da,
+0xe5cd435b, 0xdc1304c5, 0x050722d2, 0x121fea46, 0x36f8a1e1, 0x1223c6fb, 0x18040fd4, 0xef1517ee,
+0xf8e32ff5, 0xf22cc953, 0xfe04d7dc, 0x10e7390e, 0x1ef4091b, 0xdbf2fde6, 0x1afa432e, 0x2211b5e8,
+0x211eb8ba, 0xff1529c4, 0xdaea004e, 0x11e539ed, 0x13b4eaf2, 0xe10af7db, 0x11e9c81e, 0x1eeef6f0,
+0x17b73fea, 0x09c61fde, 0xeefdebef, 0xd92fffdc, 0xd7e05007, 0xfe42d9f8, 0xd2feaae6, 0xf3001cd9,
+0xee12c727, 0xe5d90cea, 0x2c21ac01, 0xe3de0b08, 0x265703fa, 0x1a410d30, 0x2f1f5719, 0xe742bff6,
+0xfc2e23e5, 0x2746b105, 0x262efe1d, 0xe43ff4fb, 0xf01018e9, 0xd9014e18, 0xef043927, 0x23fb0524,
+0x27094e23, 0x19ce41e1, 0xf70a200b, 0x1daff5e3, 0x274eb22a, 0xf612cd26, 0xf903df17, 0xe3f1ba25,
+0xe45bbc18, 0x062a2132, 0x11bfea01, 0xf4fe34e7, 0x0637ddda, 0xede0c510, 0x02022509, 0xfbbf2c26,
+0xeda8c619, 0xecb0edd0, 0xeb00d0d8, 0xf054edf7, 0x0356182c, 0x5fd3252e, 0xd74e3805, 0xc2100026,
+0xe4161517, 0xf106f412, 0x26f219df, 0x440bfde5, 0x1b031d1d, 0xe6c4f325, 0xd349f213, 0x2b0605df,
+0x4bdbfcdf, 0xe32e23fd, 0x49460a07, 0xcff022e3, 0xf9c2f617, 0xdfbc2116, 0xed2f071b, 0xd6a91406,
+0xc33bfe2f, 0xc623eb12, 0xc2d0ee06, 0x243aeb07, 0x1fe8fb13, 0xffc409f0, 0x1ed6d814, 0xde4bf602,
+0x31dcfb22, 0xf5da0afd, 0x0afd1e03, 0x1cc01fdc, 0xf82bf4e8, 0xcccce003, 0xfb0f0df4, 0xd2ff23e8,
+0x0845fbd9, 0x16e821e2, 0xe5c5eef0, 0x14bbf4ec, 0x1cefeb1d, 0xffcd0de9, 0x5bbad90b, 0x0630331d,
+0xf826dd09, 0xc62c2001, 0x00ed12fc, 0x16e8eb40, 0xf3e4120c, 0xc105e52d, 0xca181610, 0xb9e0f149,
+0xfc011f28, 0xfc17233f, 0xc5f12318, 0xea1dedbb, 0x37e91240, 0x47d9f0b2, 0x19271e01, 0x09240ffb,
+0x1edd1f4b, 0x02240ab3, 0xf3e6d90e, 0xfd03e425, 0xb12325b5, 0xb92cd9ac, 0xcee71e41, 0x47fa0bdf,
+0x0fcb1fa3, 0xd406e7de, 0x31defcfa, 0x300af71e, 0xfa0608dd, 0x10e922c1, 0xb2f3e936, 0xf336265e,
+0x0610e517, 0x16e72242, 0xab23134b, 0xbaddd34b, 0x1afee225, 0xfc100d38, 0x44e6dcbe, 0x02ede4c4,
+0xbe19da41, 0x2ff1e6c9, 0xea14f8c3, 0xc21bed0d, 0xac1017e8, 0x35202df8, 0xd9f10dca, 0x03fc02dc,
+0xf2e425bb, 0x29efe5e9, 0x3ce10046, 0x09153000, 0x0b061d2e, 0x1f1a0abe, 0xfb18d2c0, 0x0008d730,
+0xf3071b30, 0x17dfeff9, 0xc5d09da8, 0x23d7fbaf, 0x07e921ee, 0x16e4ee45, 0x1624c304, 0x01f828d0,
+0x20ddb8fb, 0x030325d5, 0x0424d5fc, 0x0216dac3, 0xe5040c23, 0xd71552ec, 0x28ef00e9, 0x16de13fb,
+0x1520edb8, 0x0416db12, 0xdeee07c7, 0x07fc20d4, 0x08fadf2e, 0xf0f639ce, 0x30f0a7e8, 0xef0d39e6,
+0x210d071c, 0xf2f235cb, 0x05192242, 0xd6095130, 0x2605ff2d, 0x19f8bf30, 0x1320eb48, 0xfbd72e51,
+0x01d92701, 0xe127f601, 0xdc0a051f, 0xfdf1dc37, 0x0b0bce1e, 0x09ff1f29, 0x1507c3df, 0xf7e3e1bb,
+0xdb204df8, 0x11e517bd, 0x3004a7dc, 0x0d1300cc, 0xf42134ea, 0xe9b9c1f8, 0x1fbef720, 0x07bcdfe5,
+0xeccf3ce5, 0x17301109, 0xf2b4e6f7, 0x1258c723, 0xdaf84dd0, 0xd507ae1f, 0x011d2a22, 0xfd3f25f5,
+0xfd4edb17, 0xf9cadf26, 0x144dc3f1, 0x09003124, 0xec503b27, 0xfed1d628, 0x0ffbc9f8, 0xf2e619de,
+0xf832dff1, 0xf3d034f5, 0x1dea0b09, 0xe80e0fef, 0xf1dd38e6, 0xf8fce104, 0xde1f07db, 0x06ccdef7,
+0x24554b0c, 0xcde0a6d2, 0xe2e0baf8, 0xeae6ef09, 0xe31b440f, 0xe5c6f3f2, 0xf447ccee, 0x1944f2e1,
+0xe33fbb1b, 0xebad3eea, 0x0b1de32b, 0x0d24e50c, 0x18e711fc, 0x2830b110, 0x14413cf8, 0xe7e7f219,
+0x1c49bc0e, 0x0734e0df, 0xe255f50c, 0x540045d2, 0xe3c0d3e3, 0xef0ff518, 0x34c017e6, 0x052af517,
+0xff4e2402, 0xc4eadada, 0x00d3ed12, 0x4bc727fa, 0xaaf62211, 0x28e1d21e, 0x530a00f7, 0xfd40d5e1,
+0x0dafdbe8, 0x2b0de5d6, 0xc722fce5, 0xca241007, 0xe1eb0efc, 0xfa63f8ee, 0x28dfdec5, 0x07420107,
+0xccf6dfe7, 0xcb1df4e1, 0xd9def40b, 0x47ef0105, 0x1041e0e9, 0xe849e919, 0x27f10fdf, 0xef2c02e7,
+0xe74fe805, 0x4cb7f2d9, 0xe7ee2521, 0xee430f17, 0x5b09161b, 0xdce2cd1f, 0x2343050a, 0xc22c05e5,
+0x5ae7eafc, 0x9749cef1, 0x4dea4221, 0x0e2726ee, 0x24a8e6ff, 0xcc5afcd0, 0xbcd2f432, 0xf514e306,
+0xbd0ee414, 0xc4dbe41b, 0x310a1302, 0x090cf8e2, 0xd5dce11d, 0x1c13fdfb, 0xeacbf515, 0xc5eeeef3,
+0x3027eee9, 0x29420701, 0xc01700e6, 0xc4b7e910, 0xaaebec21, 0x129fd2ee, 0x2ec5eac7, 0x0eecf9ec,
+0xe10b1aec, 0x57b3f7e3, 0x3939d1db, 0xcf42f012, 0x163ef719, 0xd5111217, 0xe92203e9, 0xf13eef05,
+0xece5e6ea, 0x3301ecf4, 0xfff6f527, 0x39ded9e2, 0x110deffa, 0x262fe91a, 0xb7bd0207, 0x39d8dfe4,
+0x0a2e1101, 0x2adf1ef9, 0xe52ffef9, 0xb70ff307, 0x1cdd22e7, 0x2b3bf4fb, 0xe4eb02ec, 0xd80bf4ed,
+0xfebaff1d, 0xcd12271e, 0xfeee0aea, 0xc70726e9, 0x0f4fefdf, 0xd9b6e6da, 0xf5d300df, 0xccbc1dfa,
+0x34270de4, 0xf0d3f400, 0xc649e7fb, 0x18deeede, 0x4d0f0600, 0x2bdf2549, 0x1f07fe21, 0xf0e6f60f,
+0x0c1b1743, 0x0f0a1b1d, 0x46efe7e9, 0x0ae6e242, 0x0ff61f33, 0xae21e84a, 0x500ad6e2, 0xfcddd74b,
+0x4a17ddc2, 0xe9d9dd4f, 0xc7241205, 0xb9e8110f, 0xda111ec7, 0x37e1fe46, 0x39f3f1e5, 0x000512d2,
+0x0e05d92d, 0xfeec193c, 0xf519daf0, 0x1e291d52, 0x362bf652, 0xbce9f311, 0x30fae5d2, 0xe6fcf7db,
+0x36ec0fc4, 0xddfa0f23, 0xf1f605e3, 0x1ed1e857, 0xb9def6b6, 0xc0091ee0, 0xcff1e8ca, 0x3fe5090d,
+0xf3edea15, 0x23111b16, 0x0207fbd2, 0x1d2cdbac, 0xbdf6f6cd, 0x1a311ba8, 0x420af2cd, 0x1bde1ab7,
+0xc512f217, 0x0fe3edbc, 0x1afe19d6, 0x200bf1e3, 0x100900c8, 0xfe1a29f2, 0x12c51663, 0xdfe606f2,
+0xf7ea1f12, 0xf841df96, 0xe2f50a33, 0xe9f5efcd, 0x13fdeb25, 0x2524044d, 0x0d1a35be, 0xfaee23c7,
+0xf0043824, 0xda200108, 0x04f225c9, 0xe101f7d6, 0xf30dcb36, 0x07fb22dd, 0xd1fba92c, 0xdf03f824,
+0xd0efa8e9, 0xf5dd1d05, 0xfe332a5c, 0xf4221bb6, 0xe5ddbdb5, 0x1401c3d9, 0xfc12d33a, 0x1b14bd3b,
+0xddf2fb36, 0xf0e13709, 0x12d9c601, 0x0cf7cce1, 0x1b06bc22, 0x22d6b7ae, 0xf7291eae, 0x15f81321,
+0x1a04f2db, 0xe425f404, 0x05f32435, 0x060f2218, 0x0bd93250, 0x26f9fd20, 0xfe2fd5aa, 0x121deb45,
+0xf6e731bf, 0xf90c2e1d, 0xe9dbc2b3, 0xfb0edd36, 0x1500b93e, 0x281db01e, 0x061ddff5, 0x2712fe0a,
+0x0f461ae9, 0xe32f0b1f, 0xed691507, 0xf61d32bf, 0x0d24e5f5, 0xfae422fd, 0x03d126f4, 0xee0ac6f9,
+0xd8a4b11e, 0xebf51335, 0x15f7c31e, 0x07fd22e1, 0xe40a0cdb, 0xdd04b5e3, 0xd1f25825, 0xec033de6,
+0x12fee925, 0x292051da, 0xdff54af7, 0x34d0a4e3, 0xf53de209, 0xedeceb14, 0x0f4cca13, 0xe3340b25,
+0xf023e90c, 0xf6be32fa, 0x2254fae7, 0x07c1212b, 0x31015816, 0x1bd24227, 0x1438edf9, 0x151f3df0,
+0x02d1270a, 0x0fffc9f9, 0x0d4235da, 0xf638e2e6, 0x241ffc0f, 0x1d380b08, 0x1a93bdf0, 0x1b4f44bb,
+0xfe57da26, 0x24c8fbd1, 0x1112c711, 0x1f3ef6eb, 0x00173116, 0x100311f8, 0xf2dfe7db, 0x292ce5f8,
+0x46e1fffb, 0x45c71ef7, 0x2bc31c11, 0x4abcfceb, 0x00e0dd1c, 0x202628f9, 0x42f2f801, 0x0bb41b1a,
+0x0d16e225, 0xd2f11a12, 0xf6ec06e6, 0x13ef1deb, 0x48c51617, 0x09ff1f13, 0x314b1f28, 0x1ecf0823,
+0xf9adf6f7, 0xd4da222a, 0xef0f0501, 0xe146e8e8, 0xfe4308e1, 0x4803dbe5, 0x21bc2025, 0x0fc5f9e4,
+0xd341e813, 0xa9d9fae6, 0xaaf5d1ff, 0xb546d2e3, 0x12e2221f, 0xbee715f6, 0xd325e60f, 0x1adc05fd,
+0xf533f3fb, 0xf3d0e3f6, 0xcb11e5f7, 0x33fb0ce9, 0xa3be0b23, 0x29b8351a, 0x312f0120, 0xebbb0afa,
+0xc31a12e3, 0x0c47150d, 0x21c21be2, 0xe1570715, 0x19e0082e, 0xc8f2f800, 0x2a0b11cd, 0xcd3001a8,
+0x2f1e0bf7, 0x1909071f, 0x1c15f2ed, 0x4612f53b, 0x40021d27, 0x3108e821, 0x0c1108c7, 0xcd0d1ce4,
+0xe51c0b44, 0xb4ebf412, 0xf1dddc04, 0xd6f7e6e1, 0xc32efe56, 0xfd10ea18, 0xdb2eda55, 0x48ff0329,
+0xe2fce12c, 0xd5f9f5df, 0x2cda04fe, 0xdf0dfdcb, 0xc5e70741, 0xdf16eec2, 0x3ce1faf7, 0xcc0eec35,
+0xfa28f450, 0xf1e72210, 0x451de7f5, 0x4dfde324, 0xfdf3db35, 0x31eadc3d, 0x391c09bc, 0xcc09ee1f,
+0xdaf90dd1, 0xf6e1ff09, 0x22f8e1d0, 0xe70efae5, 0x13faf02e, 0x271e15ba, 0x03df0007, 0x11ff2627,
+0xb8e9173f, 0x38ecdfec, 0xc4ed0f3b, 0xe4d91402, 0xd5010c29, 0x050400d3, 0x1e01ba26, 0xe3390b60,
+0xe11d0a0c, 0x2adaaefe, 0x1222e906, 0x0ff6e7ce, 0xebeb3cc4, 0xfd082b31, 0xf52a1e52, 0xf1dde7b4,
+0x0ee8e6c1, 0x0c2ae352, 0x0df3cbca, 0x131bebf2, 0x23214bf9, 0xd6f5ad1c, 0x21fff8d6, 0xe3e00a48,
+0xf00119d8, 0x03d02ca7, 0xf1fcc92c, 0x0c0b1b1c, 0xf4daccb1, 0x131ac50f, 0xd6295250, 0xe4f6f3e2,
+0xe9f91221, 0xe6e0be49, 0xdcebb5c3, 0x1f1bf8f3, 0x27f6b2e3, 0x1a1c0e45, 0x0504d224, 0x0918e20f,
+0xe4dabb4f, 0x0a001d27, 0xec193b42, 0x08e9d1c2, 0x0c09e51e, 0xff28d700, 0x0d0ae5cf, 0x0704202c,
+0xfce0d408, 0x0c0b341e, 0x27ee00ea, 0x1206c7d1, 0x1000e9d8, 0xf8000830, 0xe334f5e1, 0x0bc81ef4,
+0xeed33b10, 0x280050fb, 0x02d1d928, 0x0e3be608, 0xdc130412, 0xf6ffceea, 0x03142a28, 0xfeea2aec,
+0xfdc4d613, 0xf1151aed, 0x1425c4ee, 0xfdd0db04, 0x10df1807, 0x0af7cd07, 0x20c108e2, 0xf61bce17,
+0xe6e6f10e, 0x03d0da0e, 0x20edf808, 0x37dc60eb, 0x1bfd0e04, 0x18b73f24, 0xe8f91021, 0x0ed31ade,
+0x1fe50afc, 0x07e020f3, 0x0c2ccb07, 0xedd91405, 0xf700e202, 0xdce903d9, 0xf4b31c12, 0xfcbbd425,
+0x08d431e2, 0x1f00b905, 0xd04158d9, 0xd2acabe8, 0x18e911d3, 0xedca3c12, 0xe467bb0f, 0xd9c350c2,
+0x00c12816, 0x082220e9, 0xfe692bfa, 0xfc322cc0, 0x0d1ae5f5, 0x1de446f2, 0xe136b80d, 0xf2001af2,
+0x1be3bdd8, 0x013e29f4, 0x272301ea, 0x0a021f04, 0x22d1b5da, 0xedafecfa, 0xf958d0d8, 0xb1f48ad1,
+0xf816d01b, 0xe7caf2ee, 0xf7e11ef1, 0x0ae31df7, 0x0feb180b, 0xece61513, 0x3f10670e, 0x0baa1c17,
+0xf233e6d2, 0xce46a7f5, 0xe6c642e2, 0x033225ee, 0x0b3de20a, 0x00f7d814, 0xf8cccf20, 0xe2ee0bf4,
+0xd93601ea, 0xefdf3a0e, 0x2202fb07, 0xe5c70c25, 0x01da29ee, 0xf8182101, 0xdef7b610, 0xeb0aed1e,
+0x0017d8e2, 0x2fa4a9ef, 0x0c25cd35, 0x05f62304, 0x08e7e01f, 0xe11409f2, 0xf5e81ded, 0xeefeeb10,
+0xfe122b25, 0xe9153f16, 0xe34f44ed, 0x1cca0c27, 0xf3dbcb0e, 0x08c231fc, 0xe2cfea00, 0xf031f5f7,
+0xc7e91809, 0xdfe5ef10, 0x1234fa0e, 0xee28ea0c, 0xc6061700, 0xccb8eede, 0xed190b20, 0x21441510,
+0xb1e007e5, 0xe1c7d9f9, 0xcacbf611, 0x3acb0f0c, 0xd7e4110d, 0x41c2010c, 0xef0418ea, 0xffc917dc,
+0xc7e4270f, 0x40e3eff5, 0xaf4c19f6, 0x413f28db, 0x2e12e616, 0xd1e6fbeb, 0x1008f80f, 0x070719e0,
+0xbdebdfde, 0x16f31b12, 0x2e1c12e6, 0x41eafaf4, 0xccc7e6ef, 0x26ff0bef, 0xc201fed9, 0xe733ead8,
+0x373cf10a, 0xfdd5f213, 0xe1dcdcfc, 0xa9400904, 0xe432d018, 0x15340c0a, 0xcd3aedf4, 0x24210cef,
+0x02d90407, 0x43c827ff, 0x27341c11, 0x1226fef4, 0x192cebfe, 0x280cf204, 0x96ff00e4, 0xb5dd4204,
+0x1a2adc52, 0x1bf00ec8, 0x1d07f320, 0x09ed0a15, 0xb5e3200a, 0x12f7de1f, 0xe915eaee, 0x3bf11219,
+0x3df1ec19, 0xe3ecec13, 0x1ce6f543, 0x0d18f4ef, 0x21ea1a13, 0xc8f507ce, 0x22e511be, 0x0b1cfaf3,
+0xcfe8e3f0, 0xc104f6dc, 0x301817f0, 0xb825084d, 0x3fe920c2, 0xeff3171a, 0xc7e01809, 0x2efc10d3,
+0xb602f92b, 0xdecb215c, 0x0d13053c, 0x3908e5d1, 0xb5efefc7, 0x44e0dcb9, 0xe8161def, 0x1de8f040,
+0x2f140b3c, 0xc5ec07ec, 0xd403ed25, 0x0aeefce9, 0xd412e23b, 0xf4f804e0, 0xf218e5c1, 0xca17e612,
+0xc5050ed3, 0x1afc142b, 0x2e2c0dab, 0x39ecfb13, 0x1de6100e, 0xf81ef4ba, 0xf600211d, 0x12c03968,
+0xe8f90f2e, 0xfbe42c44, 0x260bfee3, 0x051b22be, 0xe50842df, 0xfef6d632, 0x130514d3, 0xe6def24a,
+0x09213107, 0xdfddfa05, 0x0418d310, 0xf00739d1, 0x0afa1ed3, 0x1bfff4d7, 0xfddc2b4c, 0xf1e0c9b8,
+0x04e1d4f6, 0xd423acb5, 0x320c5b1b, 0x160eefcb, 0xe11347ec, 0xedfc16d4, 0x10f8c7e0, 0x220e4ae6,
+0xf0f5c834, 0x272002f8, 0xdf0df9cb, 0xe02ff8a9, 0xd7df5106, 0x152512b2, 0x02fe2bd6, 0x1707112f,
+0x2519b2bf, 0x051e2eba, 0x0bdbcefd, 0xf1eae8c2, 0x08dee007, 0x02142b3c, 0x001628ef, 0xe21fb9f6,
+0xe0e9f7c1, 0x31ed593c, 0xe4ff442a, 0x16e1c247, 0xe6e3be0b, 0xdbfc0324, 0x001110c7, 0xda2affe8,
+0x05b6d302, 0xf94a2023, 0xe5350dde, 0x1ee0f60d, 0x282a00f8, 0x28ba50ff, 0x08e5211f, 0xf1f8190d,
+0xd5abad20, 0x16fbc22c, 0xf8cc1fdd, 0xda1efe0d, 0xeb0ec3f5, 0x014c26e7, 0xf0e118dc, 0x1d11bb08,
+0xe4bc44e8, 0xd4d0541d, 0xe43a44f8, 0x28445112, 0xdae0fde5, 0x13e3c6f8, 0xe11b09f4, 0x2ad2520d,
+0xef59eafb, 0x1dcaf530, 0xde4d06f2, 0xf617e325, 0xf3c5e512, 0xf4b1cc13, 0xf91d2128, 0xd8affff4,
+0xe54d43d8, 0x1a060fdc, 0xe7c3be22, 0x2441b415, 0xf3171a19, 0x180ff011, 0x30e4a919, 0xfaed2ff4,
+0xd54253ec, 0xe7df0e1a, 0x232afa08, 0xe9f7effe, 0x1cb5f31f, 0xf4083422, 0xdfd4b620, 0x2223fc00,
+0xddcf06fb, 0x6cc004f8, 0x041c4418, 0x4b04230d, 0xc6f92225, 0xf61aee22, 0xba001d0d, 0x06c3e2d9,
+0xd5c2dd15, 0x240ffde9, 0x405a0319, 0xe3a01731, 0xde3c0b37, 0x2206faeb, 0x27fcfb22, 0x33af00dc,
+0x3ba60a29, 0x2cc913cd, 0x4ef505f1, 0xccbbda1c, 0x203df4e2, 0xe407f815, 0x1a15f321, 0x18d30eed,
+0x2d1f10fa, 0x3c47fb09, 0x3cf9eb1e, 0x4cf114e0, 0x0caadde6, 0x594ee5d2, 0x1ed6d026, 0x2229f602,
+0x18f4faff, 0xddf5f0e4, 0xaf07fae3, 0xdc0fd8df, 0x14c3fce7, 0xf2d2edea, 0xb106e6fb, 0xdb0ada22,
+0xc6bdfce1, 0xcdf111e4, 0x1dcf0ae7, 0x27c4f6f7, 0xe5e5ffeb, 0x20380d0c, 0x4b3e070f, 0x322300e9,
+0xede80a0f, 0x65f7ebe0, 0xcbddc306, 0x572b0d53, 0xb0082f2f, 0x5d0729e0, 0xbf0f3537, 0x18c2199a,
+0x57e3110c, 0x4512d23b, 0x18f01e17, 0xf9f3f11a, 0xc6f1df37, 0xeae1ef47, 0x0df111c9, 0x19fce6d4,
+0xd3f90fd2, 0x08f70520, 0x15f4e0cc, 0xefff12d8, 0xaee6ea0f, 0xc1db2afd, 0x1cdaeab3, 0xbe0bf433,
+0xe6e01b47, 0x5503f125, 0x30fbd422, 0x3723f7b4, 0xcb1510c3, 0x09fdf4d5, 0xeeee1e39, 0x0ae7ebc0,
+0x02d81d00, 0xbc33db5a, 0xc501e429, 0x12e0edb8, 0x03f1eac8, 0xf0eb25c4, 0x09e4e7bd, 0xd71de0f6,
+0xc8ee0017, 0xdf00f0d9, 0x4209f8e2, 0x3100e6d8, 0xe2190942, 0xd5070921, 0x0b0c02e3, 0xee00e4c7,
+0xf5eaceef, 0x2011f7e9, 0xdf1106c7, 0xfede29fa, 0x1c1f0d0a, 0x1f0d08e6, 0x0d2ccc54, 0xf6e61df3,
+0x121f1647, 0x1610c3c8, 0xded7b6af, 0x1ef8f61f, 0xf9172ef0, 0x391f60f7, 0xf3ebcced, 0x273050a9,
+0x1305ebd3, 0x16221206, 0x12e5c6f4, 0x0006d822, 0xecfa152f, 0xf9f92f2e, 0x1fe2470b, 0x2425fc4e,
+0x33285cb1, 0x070430d3, 0xe421bd48, 0xede1c5b9, 0xf4ea1c12, 0xe1c809a1, 0xfa05d223, 0xedf83a2f,
+0xc4259d03, 0x1f180810, 0xdb10fee8, 0x11daeab2, 0xec2614fe, 0xb31e74ba, 0x14143b14, 0x07ff2fd7,
+0xfb23d305, 0x25fffcda, 0xe7dbbefe, 0x13e43abc, 0x070c301c, 0xff0726d1, 0x16dc3eb3, 0x04ee2dc6,
+0xdeff05d7, 0xf8dfd0fa, 0xde13fb15, 0x0b1acebe, 0xe2f1f6e6, 0xee1d3a0b, 0x3003572b, 0xf9d32fab,
+0x0420ddb8, 0xcef05919, 0xd126a9b2, 0xdac9fda1, 0xe0ee083a, 0x30e259ba, 0xecf1ed19, 0xd0da58fe,
+0x071f2e46, 0xe2da4602, 0xf3cecba5, 0xd8e20045, 0x18d6ef53, 0x11dbe9b3, 0x1819c00e, 0x09f930e0,
+0xf2183640, 0x250bfd1e, 0xcae3a2bb, 0x17d13fa8, 0xefe6c8bf, 0x1ff8f620, 0xfaee22c7, 0x0afdce25,
+0xd9010029, 0x1727c2ff, 0xf2221bf9, 0xd8e10147, 0x0726224d, 0xe0ea4811, 0x1a2e0ea9, 0x0d3635a3,
+0xf0f1e7e8, 0x0c0234d9, 0x0ef8c920, 0xecf4151d, 0xe817c112, 0x0e0637dd, 0xddf305cb, 0x16e2ef0b,
+0xf1c10000, 0xfd2ce717, 0x092ada04, 0xb929e202, 0x2902e1ff, 0xcecaff25, 0xd23c09f2, 0xee2107eb,
+0xd9c0e908, 0xd3bc0018, 0xb3bc051d, 0x29ab261b, 0x0b0bff2c, 0xdf5ae41c, 0xe6ef07cf, 0x2ea10fea,
+0x1db5fac8, 0x083a0cde, 0x4ff5e1ee, 0x4643281c, 0x08dee1e6, 0x1a0e1ffa, 0x2a30f11b, 0x0b4a02f8,
+0xe1f5e321, 0xf5eaf71e, 0x21e3e312, 0xccfffaf6, 0x3932f5d8, 0xe3efeff5, 0x314df518, 0xbdc709db,
+0x1336e5ef, 0xeb2bec0d, 0xc4a6ec02, 0xe3b2eccf, 0x33210b27, 0x1a2b0a08, 0xb536f304, 0x51f7def2,
+0xa946d6e1, 0x0f1fd2e2, 0xd5cee6f8, 0xe60303f5, 0xdc25f225, 0xab360303, 0xd9aa2c0f, 0x30f9ffd2,
+0x611ff8e0, 0xc6fa3909, 0x33241323, 0x023ff505, 0xc3f827e8, 0x1601ebdf, 0x35d91228, 0x0cf70d01,
+0xbc271ce1, 0xbae8e400, 0xed09e310, 0x13ec16e2, 0xbffb1513, 0x3452e724, 0xff010cd7, 0x53ddd9d8,
+0x18da2cfc, 0x3b28f102, 0x1ae1ed00, 0x051c0ff7, 0xe16b23f4, 0xfb4f0844, 0x091fdcd8, 0xbea1e109,
+0xbcdc1a36, 0xd908e5fc, 0xd7f301e1, 0xbd1d02e5, 0x2319e5f4, 0xed0cfbf0, 0xed46eae5, 0x3442ebe2,
+0x0323f4e6, 0xd62a25fb, 0xbec7fd03, 0x3841e6ee, 0xd3cc1018, 0x4d06faf4, 0x31422521, 0x15b1f8e7,
+0x354b12d9, 0xe3ca0dde, 0x1fd30cf1, 0x25ddf705, 0xf5e8fdfb, 0x0a121df0, 0x5bc81e17, 0xd8e8cdef,
+0x000000f1, 0xdfec4a13, 0x0d1e1cba, 0x0ff918d1, 0x00de274a, 0x07242105, 0xe5030cd4, 0xe0f40734,
+0xee02ead7, 0x15dc3d4d, 0x0de3cc46, 0xe817f1ee, 0xfbe3d40b, 0x0d09cbe2, 0xd22256b6, 0x1904bf2d,
+0xea17ee10, 0xe019f842, 0x15ebc33d, 0x39ef6217, 0xe6f8f1df, 0xf1001ad8, 0x1cf90dde, 0x19e40fbc,
+0xe9e6ef0e, 0x0103d72a, 0xeffe3929, 0xf7d83151, 0x11273901, 0x28fdb0dc, 0x07f22f36, 0xddddb505,
+0xe0e3f8ba, 0xf3dc354d, 0xe613f1eb, 0x09f4e2e3, 0xe70df1cc, 0x28d950b1, 0x001128e9, 0xfe312658,
+0xf624cdb5, 0xee1217eb, 0x1ac04268, 0xd414ab15, 0x0d191b0f, 0x0e021b26, 0xe10cba1b, 0xfc09d4cf,
+0x1b2b0c53, 0x0be8343f, 0x04d92c01, 0x01f4d733, 0x0d01cad8, 0x16f2121b, 0xddebfbc4, 0xdff2fa37,
+0x1dea0bee, 0x1b0cf4cd, 0x2ff7a9cf, 0x13da15fd, 0x35fb5c2d, 0x0b0fcd36, 0x0e19ca40, 0xef1e380b,
+0x0303d5db, 0x02fdda24, 0xe30845e0, 0x14fc1424, 0xeef115ca, 0x04e5d40e, 0xde2bfaad, 0x2ddaaaff,
+0xf2e91a3e, 0xd51e530b, 0xdde405f3, 0xd908b11f, 0x0fe0c807, 0xecf0c318, 0x20064723, 0xf814d03c,
+0xe9081120, 0xe0f808cf, 0xdbf4b235, 0xfdf225e6, 0x10da19fd, 0x170fef19, 0xedc53a64, 0xea133e16,
+0x1725effd, 0xe4fbbcdc, 0x0208dadf, 0x1ff50933, 0xf706d0d2, 0x0901cfd6, 0x130515d2, 0x1e1e0909,
+0xe01247e9, 0xbe3f0000, 0x35dbe517, 0xdfcd0d03, 0xff29f9f5, 0xb0c0d900, 0x30eed8e7, 0xdabff816,
+0xaa4203e7, 0xd9f62f19, 0x5453ff1d, 0xdbd32bd6, 0xec07fd06, 0xeedd14df, 0x462debfc, 0x173e1d04,
+0xe423efea, 0x4f6cf505, 0xd701da45, 0xb13602d9, 0xa2fd28f3, 0xeec7c9dc, 0xfae21610, 0xd4c9de0a,
+0xd5db04f0, 0x1503fd03, 0x0360eddb, 0x2abf2639, 0x34fdfde7, 0x462bf5dc, 0x3c1f1e04, 0x1d04ec09,
+0xbdd20cdd, 0x1c561a06, 0xfb1d0c2e, 0x19b824f6, 0xd1f4f120, 0xbe23061b, 0x1dcb1bfb, 0x22f10cf4,
+0xc5f8fa18, 0x5227ec20, 0x33d1d501, 0x03caf6f8, 0xd3fc260d, 0xb9150523, 0x5811e114, 0x211bd0e8,
+0xcd1d08f2, 0x44c2f50b, 0xf3d6e417, 0x0535e5ff, 0xfbccddf2, 0xc102def4, 0xf5d01825, 0xfb27e207,
+0xeff62300, 0x3a1417e1, 0x17b6efec, 0xddd9ef23, 0x2ddefb02, 0x0a2bfbfb, 0xfb561e03, 0x2ceedd2e,
+0x17f50416, 0xc4f3efe3, 0xb9e2ece5, 0xe3e71f0b, 0x47e5f4f1, 0xebdbe1f3, 0xe4e514fc, 0xfc280c0d,
+0xfdbb2501, 0x0508251c, 0x0d0add1f, 0xcb201b1e, 0x2e480df9, 0xf3f2061f, 0xc7181b19, 0xe42310f0,
+0x32b3f4fc, 0xe5c70b26, 0x1ceef3ee, 0x3def0d16, 0x2309ec18, 0x3544fb1e, 0x5050f3e5, 0x000d29d9,
+0xef5b271a, 0x57b0ea33, 0xecc6d0d7, 0xd619ecee, 0xc60101f2, 0x3954efd9, 0xf2db11d4, 0xf7b31afc,
+0x433d20da, 0x0000e515, 0x06f5d234, 0xdef0b6c8, 0xed1dc4f4, 0xe8fef126, 0xe215bb3d, 0x14fcedd5,
+0x0deb1aed, 0xe528f2b0, 0xe0e84840, 0xdd1ab542, 0xe91ec10b, 0xe11ff648, 0xbf0968e0, 0x0ddccab5,
+0x0b1ae30e, 0xe6020ed7, 0x07f42f33, 0xf4e91cef, 0xfbe9d3c0, 0x24e4fb44, 0x12fa162f, 0xe9063f21,
+0x01f6d931, 0x22f806df, 0x1d04f423, 0xdffc4923, 0xe4eff3c8, 0x190af11e, 0x18f4111c, 0x170311da,
+0x16f73e20, 0xdbcb04a2, 0x0dfc1bdc, 0xedf43a1d, 0x10cfc8a6, 0xfb402d68, 0xf2273500, 0x21fc4a24,
+0xfae2d345, 0xe7001028, 0xeaf1ed36, 0x00f329e6, 0x0d1234c6, 0xdf27f8ff, 0xecf8eb20, 0x09f432e5,
+0x0bfacdde, 0xeffbe9de, 0x14e73c40, 0x2007f821, 0xe005b9dc, 0xeafe3eda, 0x0701d0d7, 0x29015027,
+0xe9fdee24, 0xe31ff408, 0x24244db4, 0x03f5dcce, 0xf3151c3c, 0xf5da1e01, 0xe01b0843, 0x02db2b4c,
+0xdcf34c35, 0x07f620e1, 0xf40e1b19, 0x0cffcc27, 0xf9112ec8, 0x3c0e9bca, 0x1615123e, 0xf7ddcf04,
+0xfe0326da, 0x2a26524d, 0x09031fd4, 0xe6dc0dfd, 0xfbf32ecb, 0x18121115, 0x231d050a, 0x011a2abe,
+0xfc0bdc34, 0x0a0632d1, 0xe3dabbb2, 0xeee2c60a, 0x11eae9ee, 0x031ad6bd, 0xf6131e3a, 0x0d22cbb6,
+0x13103c18, 0xf5161c3e, 0xf01ae8f2, 0xf306e42d, 0xed04c623, 0xf1211a08, 0xdbf34d34, 0xe9e6eff2,
+0xd90db1cb, 0x0aedcf3b, 0x3f110000, 0x2dffe818, 0x20fbfbda, 0x08fc07dd, 0xd6182025, 0x9d27fff0,
+0xb5e63c01, 0x295323f2, 0x06d401d5, 0xa0ee22fd, 0x15ec3816, 0xba24ed14, 0xe13be1fb, 0x39360aed,
+0xb60f10f3, 0x15e2dee7, 0x174aedf6, 0x1146ee22, 0xee16171e, 0xed1d1512, 0xd7fdec0b, 0x110200db,
+0xfb5116db, 0xf50923d8, 0xd7bae3e1, 0xc960021e, 0x14400e38, 0x33d01419, 0xc737f407, 0xdc6011f0,
+0xebfd05c8, 0x120ced25, 0x0845e9e4, 0xfaf620e3, 0x394923e1, 0x5217efde, 0xc1c8d6ef, 0x030fe810,
+0xf418db19, 0x0ff3e510, 0x0ec7e71c, 0xfbaae5ef, 0x27c823d2, 0xc3c6ff0f, 0x0ed0ecef, 0xbbd11b07,
+0xb8411cf8, 0x42e6e1e7, 0xafcee50e, 0x29bdd8f7, 0x4cf6ff1b, 0xefe2dc1e, 0xc518e9f5, 0xc313ec10,
+0x0ccfea14, 0xba27e309, 0x0c9ae201, 0x19d31bc3, 0x4aec0e06, 0xfa0e23ec, 0xeac5211b, 0xe9281313,
+0xdd711101, 0xed41fab7, 0xbb2f14e7, 0xd6a1e4fa, 0xb6b3fec9, 0xd0b32324, 0xcbdbf8db, 0x4fb60cfc,
+0x3503d9df, 0x06c3f225, 0xd2c7de15, 0xbefff9ef, 0x0aec1ada, 0xbe401eec, 0xc92fe519, 0xb6bf0ff9,
+0xddbcde1a, 0x2047fb1b, 0xcbbaf91f, 0xa7fdf3e2, 0x30eed025, 0x435b07e9, 0x2efc1b34, 0x0ac70723,
+0xbefce311, 0x4ca719dd, 0x1d04db31, 0xead20a24, 0x2a5b12fa, 0xc613ffce, 0xb52c12ea, 0x03bd23fc,
+0xc9b9dc1b, 0xd0e40e1f, 0x000007f4, 0x0cde354a, 0xd2dcaafc, 0x21d908fe, 0x1bf5f3ce, 0xe40d0d1a,
+0xcd06a42f, 0x0fe7180f, 0xda0c4e35, 0xfe28db00, 0x13efc5e9, 0x08032024, 0xff35275d, 0x22eeb639,
+0xeb2bc3ad, 0xe9ec3e14, 0x2523fd06, 0x06f3d1cc, 0xdb2903b0, 0x0fdfc8f9, 0x18e50ff4, 0x0efbcb2e,
+0x11fae8d3, 0x33e4a545, 0xe22af6ae, 0x1f10ba38, 0xe30bf633, 0x0c06e322, 0x0eeb1b3e, 0x09f1cf18,
+0x10ef1939, 0xe9f21036, 0xee17e9c2, 0x1aec0f13, 0xeff4ea1c, 0x16dbedb4, 0x03eb253d, 0x1400c5d9,
+0xfd00db28, 0xdcda03b2, 0xcdff5a28, 0x0bd6ceae, 0x24f8fbd0, 0xfdecd5c4, 0xdfe4faf3, 0x0cedccec,
+0x0807e030, 0xf6213208, 0xebdfc208, 0xf312cbea, 0x1d21ba49, 0x03f02517, 0xda0e4ee6, 0x14ed3dea,
+0x090ae132, 0xfdefd5c7, 0xe019f9f2, 0xf4121dc7, 0xe7febfda, 0x27ee0116, 0x18fbbf23, 0xff04d625,
+0x252d4eab, 0x271d000c, 0xf8ed3116, 0x2009f831, 0x1b17bd3f, 0x201209c6, 0x2af95220, 0xee0816d0,
+0xd6e352bc, 0x240f04e6, 0xf21d19f6, 0xf41b1cf3, 0x171b3ff4, 0x0c26cd4e, 0xfe05d6d4, 0x021dd7f5,
+0x22cf4a59, 0x14efec39, 0x1c0abbce, 0x12d3c655, 0xe8fb4023, 0x07e82fc1, 0xdae6b30e, 0x1d0af5e1,
+0x27f0ffc8, 0x01ed29eb, 0xe625be02, 0xdfe2490b, 0xf11c1945, 0xca21a1f9, 0xe91810c0, 0x20190842,
+0xf1fec929, 0xe2e9b9ef, 0xe814c0c4, 0x6d0c0000, 0x14d0451b, 0x11aeebf8, 0x393d16d6, 0xa9dbeeec,
+0xdbf7d103, 0xa45e0320, 0x1ab1cbc9, 0x0b25f2d9, 0xbf111e02, 0x2b2e1818, 0xb2000305, 0x1608dbd8,
+0x210e1220, 0xfdf1fa1b, 0x2d9e2519, 0x01d0fcc7, 0xd5b8d9f8, 0x08d603df, 0xce25e0fe, 0xea000a03,
+0x234b12d9, 0xe72ffa23, 0xedf00efa, 0x361614e9, 0x2c3a0e13, 0xe114fd11, 0x2a3a0915, 0xbfe3fdef,
+0xd95a180b, 0xbd20fece, 0xbd391c07, 0x610de4ee, 0xbb0d38e4, 0xee08e4e5, 0x10fe1720, 0x2f0fe8d9,
+0xc92af919, 0xdf24f001, 0x64f0f9fc, 0x1551c317, 0xccf61229, 0xbd39f41e, 0xe9ece511, 0xf9f21214,
+0x1dc5e01a, 0x3ac00aec, 0xbf1012e8, 0x2f00e8e8, 0xdffc07d9, 0x1477f9dc, 0x0c301450, 0xfb0d1cf9,
+0xd92ddd1b, 0x43df0006, 0xcc541cf9, 0x0de40d2d, 0x043c1b0b, 0x48ec24eb, 0xcc27e0eb, 0x2ea1f401,
+0x02e00536, 0xec2c25f7, 0x380dec04, 0xe9faf01c, 0xb01b10de, 0x41f527f3, 0x31a6191c, 0xdfeef6ce,
+0x4632f916, 0x2a2c1ef6, 0xff17fefb, 0x3fc2d9ef, 0xc3e717eb, 0xe70915f2, 0xc7110fe1, 0xe9caf0e9,
+0x3a26eff2, 0x44361301, 0x40f11b0e, 0xedc81818, 0x09d8ebf1, 0x2013e2ff, 0x35ccf8eb, 0xaa2c0ef4,
+0xb033d2fd, 0x03dd280b, 0xd0d926fa, 0x4203f7ff, 0xcff31a25, 0xe8cef7e5, 0x403ef00a, 0x0bd8e817,
+0x30251d00, 0xaa10f8fd, 0x3ea7d218, 0xa708e9ce, 0xdde231e0, 0xc3e5040a, 0xeda7140d, 0xb51715cf,
+0xd2dc22f0, 0xedb70603, 0xfc19ecdf, 0xb2e2ddf1, 0x1e06d90b, 0x28ee0a21, 0xb3c90117, 0x21212510,
+0x31b8f8f8, 0xeae0f8df, 0x48bbeef7, 0xd20e20e2, 0xd029f9e6, 0xdd01f9ff, 0x0726fa27, 0x3a0edf01,
+0x160ced1a, 0x1cb412e3, 0xd24c0b23, 0x01b50624, 0xe1e526dd, 0x321ef7f3, 0x200e0af5, 0x362bf819,
+0x04130efd, 0x1af72315, 0xc7f3f31f, 0x2ad8f0e4, 0x47f1fe00, 0xa9411fe6, 0x0be92ee7, 0x1f2be4ef,
+0x14c30804, 0x5c1214ea, 0xf1cd34ea, 0x5535e7f5, 0x3410d30e, 0xfa2e0c18, 0x4218defa, 0xbcbd19f0,
+0xe6fd1c1c, 0xf5fe0eda, 0xf5e0e426, 0xb1041d08, 0xbf06da24, 0xc0d41823, 0xcfaae8fc, 0x25b0f82e,
+0xd9d5fc29, 0xc8ccff02, 0xf04cf00b, 0x32d0e725, 0xda090af9, 0x5604ff1f, 0x2fef2edd, 0xdce2f818,
+0x1fd30409, 0x4c300afb, 0xe018dcf8, 0xb54308f1, 0x281fdde5, 0xc202fff8, 0x3ca11525, 0xc241ecc9,
+0xc06d15e7, 0xc11ce8bb, 0xf51a180c, 0xb43b1cf2, 0xcd2b2413, 0xefd3f4fc, 0x3e30e8fb, 0x43df1708,
+0xd44ae4fa, 0xe912fd22, 0xeb16eee9, 0xe1aa12ee, 0xe2be0a2e, 0x24f90a19, 0x02b1fbdf, 0x4f33d9d9,
+0x1fe4280c, 0x0336f60c, 0x09bc250e, 0x2b36e11d, 0xf2fffd0e, 0xfd33e727, 0xd2ecdb0c, 0xebdc0614,
+0x16ec13fb, 0x2427ee15, 0x0a5d03ff, 0x2f0a1e36, 0x08e20000, 0xe0eaf813, 0xfe1c2bf5, 0x300c57cc,
+0x13e8c510, 0xe2e50a43, 0x0cdd35fa, 0x040f2c37, 0x0001d828, 0x4404932d, 0xdaddfe4c, 0x012d29ab,
+0xee1ec6ba, 0x1b0ef236, 0x16f6121e, 0xd4faab2d, 0xe9e5eef3, 0xe80d4035, 0xe5f642ce, 0xd8010028,
+0x141d3c0c, 0xe7110fe9, 0x14eac5ee, 0xed1c3af3, 0xd616523f, 0xdef7facf, 0xf2e01b47, 0x2211fa16,
+0x290cb033, 0x29e05007, 0x300d58e5, 0xf82b30ad, 0xf01c370d, 0xd4ebadc4, 0x080c1fe5, 0x1324eb4c,
+0xec13c5eb, 0x08fbcfdc, 0xe8ff4029, 0x08e920ee, 0x02fddb2b, 0xf20ce6cd, 0xe6184140, 0xfd1624c2,
+0x27f0b0e8, 0x1b15423d, 0x151c3d0b, 0x21e0f9f7, 0x16e1c148, 0x11161812, 0x0b0e3435, 0x07fb20dd,
+0xe30045d7, 0xebecc33c, 0xe10b471d, 0x1ceb0cc3, 0xf020e809, 0xf412e53a, 0xe905c124, 0x2eef56c7,
+0x10dbc804, 0x13181541, 0x0913cfc5, 0x0be3cef4, 0x34ed5dc5, 0xff10da37, 0xd827ff4f, 0x11d81701,
+0x13e81540, 0xcddd5a04, 0x090ce233, 0x22db49fd, 0x3d09651f, 0xf5edcc3b, 0x25224c06, 0xf7c8e15f,
+0x190dc0e4, 0xf81fe009, 0x08272fb1, 0xec11c417, 0xefe7e9f2, 0x13e1eab9, 0xe6fdf325, 0xf1f01918,
+0x0c27cb01, 0x0e0d36e6, 0x1a0bbf1c, 0x1122c649, 0x0020d9b8, 0x2120f9f9, 0xd50f531a, 0xdbeeb416,
+0xf0fdc9d5, 0x1ee20bf6, 0x24ebfcec, 0xebf1c31a, 0x00003030, 0xd1570709, 0x5a3707d1, 0x482033f1,
+0xfb262008, 0xb8fddcfd, 0x33301fdb, 0x39fff4f8, 0xf9ac11d8, 0x0115e0d4, 0x0401d814, 0xda3323d8,
+0xc72803f5, 0x0bba1100, 0x48e11de2, 0xe81f1f09, 0xfe3af00a, 0xd105da12, 0xacc1fa23, 0x1bc92d16,
+0x05f90cf1, 0x49f52421, 0x35e6211d, 0xe52f0df3, 0xdf50f3f9, 0xf6c3f9d8, 0xf3df1e15, 0xc45e1bf8,
+0xf9c6ebcb, 0x07ec2111, 0xace8dfed, 0xc606d311, 0x3f161223, 0x3e071712, 0x5e3ae9de, 0xdef5c912,
+0x34fcfae2, 0xd0e9f525, 0x0431f9ee, 0xc2bdddf6, 0x0aee161b, 0x0827e217, 0xdf322001, 0xcfbd06f5,
+0xe0daf71c, 0xde3bf802, 0xe11c0613, 0x013ff7f3, 0x1df1d917, 0x46ecf419, 0x40d41e15, 0xd22618fb,
+0xa1360702, 0x1d25c8f3, 0xe0310a04, 0x0939f8f7, 0xe8a51e11, 0x1fbe10cd, 0x4126091b, 0x3c2ee6fe,
+0xdae21406, 0x2624fff6, 0x37380205, 0xb4db0ff1, 0xfddd2504, 0xe51eda05, 0x09c1f30a, 0x15f420e9,
+0xb138ed1b, 0x26142811, 0xc41e0315, 0xba1eedf7, 0xe3ce1e0a, 0x9cef0b0a, 0xfbe2c4e9, 0x370f23f6,
+0xc4f9f019, 0xdb5fece0, 0x140f03c8, 0x1ec115e6, 0xbecd0be8, 0xd1cce60b, 0xc3b007f4, 0xe9ca1628,
+0x1adbf00e, 0xbdbc0efc, 0xc930e51d, 0xb42cf2f9, 0xc2a723fc, 0x35fa17cf, 0x539b0edd, 0xfaffd63d,
+0xe4b62228, 0xe3fc0d22, 0x4ab40a24, 0x3a0cdd24, 0x13bc121c, 0x15e30000, 0xfcee2c3a, 0xf7f2e1e7,
+0x1528c2b0, 0xe51c0df4, 0xdc03fbdc, 0xffe12af8, 0x1c294451, 0xfc18db10, 0x1f0048d8, 0xdc06032f,
+0x0bdee2fa, 0x04162cef, 0x09e7d0c0, 0x0c24e34b, 0xbddc6bb4, 0x09f72032, 0xe11d4746, 0x0bf2e4e6,
+0x2122fab5, 0xd60653d3, 0x30dda84a, 0xfded2b3a, 0x1de80b10, 0x08ead13f, 0xd3055522, 0x2610fe37,
+0x34fc5cd4, 0x1fecf614, 0x0cf41ce4, 0x16f7ed1f, 0xe6150e12, 0x0c00e427, 0x11ee3917, 0x0eee363a,
+0xe3de0cfa, 0x24054c23, 0x2efb5623, 0x27d00058, 0xe71441eb, 0x231b4bbc, 0xf3141c3d, 0xb9f66f32,
+0xd3ecab3c, 0xeb04122d, 0x1a1ebef7, 0x0c0fcce6, 0xeae81240, 0x14ec3c3c, 0x41ea9711, 0xfeded64a,
+0x00ccd95c, 0x22fef926, 0xec063b2d, 0xeae93f3f, 0x1ffff827, 0x25264cb3, 0x180a101e, 0xdcfc0523,
+0xf1dbe7b2, 0xfbe424f4, 0xe9053ed3, 0xdafc4ddc, 0xea23c2b4, 0xf2fe1a26, 0x252403fd, 0xfd0e2537,
+0xd01b59f3, 0x101be80d, 0xd9dd4fb5, 0x1edf0a08, 0x27e702bf, 0xd9f400cc, 0xf914d214, 0x1debf53e,
+0x1524ee05, 0x121f3a0a, 0x01dcd604, 0x3901a029, 0x1dfef629, 0x11dc1804, 0x23f00538, 0xfd1124c8,
+0x32215a07, 0xf7173112, 0xdc0e4dc9, 0xe00008d8, 0x1c1dbc0b, 0x200c09cc, 0x0805e0dc, 0x14e03bb8,
+0x2bf3ac36, 0x2f0aaacf, 0xeadc124c, 0xff16d8ee, 0x01f3da1a, 0x0000163b, 0xfaf213ed, 0x0a08221a,
+0xcebf1f20, 0x46dd0a1a, 0x3fcd1e05, 0xe7cee8f5, 0xf23410f6, 0xedf0e50b, 0xef42ec17, 0x08fdeae6,
+0x975ce0db, 0xca19bf34, 0x241b0df0, 0x27bd04f2, 0xf8f8001b, 0xccbfdfe0, 0xf607f4e6, 0xec4ce3e0,
+0x17d614db, 0xdfd0effd, 0xf751fa07, 0xc7081fd8, 0x225011e0, 0x16e7fbd8, 0x2af71210, 0xef32fd1e,
+0xf5e9ea0a, 0x11f2e411, 0x26cceae5, 0x4e00fff3, 0x724f2728, 0x29ea4a26, 0x1aca00ee, 0x460df30e,
+0x201c1ee4, 0x46dff7f3, 0xd3d41ffa, 0x3beb04fc, 0x4401ed14, 0xd3b3e4da, 0xc03cfb25, 0x1c1de7eb,
+0xcccc0bf4, 0x2ae10df5, 0x402802f7, 0x0117e901, 0xc201d9f0, 0xf7fc15d9, 0xfedc2024, 0x24c82503,
+0xc1fafdef, 0xfef2e9de, 0xd4c1da1a, 0xaff104e8, 0xc12a29e6, 0xb625e8fd, 0xd7c72204, 0xb5c4ffef,
+0xcd45dd15, 0x25330a1d, 0x3ff0030b, 0xa9b2e917, 0x25c12fd9, 0xc23a0218, 0xd2bfea11, 0xe3170518,
+0x282a0aee, 0xda0300ff, 0xfeb6fe26, 0xebd5db22, 0x1ce6edfd, 0xe4d4f5f2, 0x16f2f404, 0x26c8ee1a,
+0xe2e9feef, 0xa8fbf5ee, 0x01073123, 0xe843d920, 0x1416f0e6, 0x0b24ebed, 0x5123e204, 0x0000d7fa,
+0xbd290000, 0xbef21bfe, 0x19eee7e7, 0x0000f1e9, 0x00000000, 0xfa490000, 0x17e0df22, 0x000012f7,
+0x00000000, 0xbadc0000, 0x0f3fe1fd, 0x00001a17, 0x00000000, 0x31c60000, 0x09ee0000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0 =
+0xcddb5769, 0x627f0a91, 0xbcf446fb, 0xc930d133, 0xdd7b82f0, 0xbd591856, 0xb73094b3, 0x32436132,
+0xac268fdc, 0xeeb03845, 0x0087cc79, 0xd372eb62, 0xb595a1bd, 0x511605f7, 0xa428b83f, 0xfb9380a8,
+0xe43c2efb, 0x0a04017a, 0x674608a3, 0xc2464812, 0x208bc693, 0xb03d0580, 0xdc62b803, 0xc2808805,
+0xccaf7f16, 0x848dacef, 0xedf69b49, 0x222d2bd9, 0x78ffa7a5, 0x749cb755, 0x9f4abc69, 0xa27f3a72,
+0x9652d070, 0x8a83be54, 0xa9820b7f, 0x8b00e52a, 0xb02159e4, 0xeddabc6a, 0x47b69064, 0xf161d47f,
+0xc243b0df, 0xebb2ac39, 0x4fd6cddb, 0xc0d38ba3, 0xd9ddef17, 0x8c638875, 0xa132cfef, 0x5893a4f6,
+0x1a29ce5f, 0x9072f3eb, 0x486bda03, 0xf5250cda, 0xdc10edbc, 0x392e6b7f, 0x4664de06, 0xfde2f3f2,
+0x37c7d442, 0x32b60d29, 0xbcb7c8ff, 0xa1542cd0, 0x91b36e18, 0x231f9e19, 0x97cbc4a6, 0x51ceb1df,
+0xc02dedf8, 0x4bfd9728, 0x569deda7, 0xc0790501, 0x132e47b0, 0xb8229185, 0x81cbe103, 0xa30dbf5f,
+0x72c54db6, 0x4abdbdf7, 0x557d21d3, 0x3b75be17, 0xf540476b, 0xc48d78e1, 0x767d44ec, 0x87dc6193,
+0x7a4c52fc, 0x306c3548, 0xa66e51b3, 0xfc627273, 0x5f349268, 0xf5e74974, 0x232388b6, 0x01d8e49e,
+0xbf29f75f, 0x378a9d02, 0x59a45656, 0x46fb25b1, 0x75219b62, 0x4f0d6f71, 0x220c6763, 0xdde47b70,
+0x8c0dce8f, 0x3e3db100, 0xabad4262, 0x800eddcb, 0x32416450, 0x61d260ce, 0x6e2a0fac, 0x73cdb847,
+0xeee01947, 0xe5a3a37f, 0xa710e211, 0x98ecaa36, 0x69197474, 0x6e73e13f, 0xb2100f2e, 0x7714b2fd,
+0xec3d81cb, 0xc50e5194, 0x5c69a3d8, 0xce0bdc13, 0x08e7df1c, 0x5fb014a5, 0x75b5801e, 0xf57d3b75,
+0xac5dd149, 0xa5096b77, 0x4b5da0af, 0x656506d3, 0x3823d675, 0x3840530c, 0x1c0980ba, 0x3bd4150f,
+0x3df8c0f8, 0xa2834aa4, 0x479ea855, 0x4f4557b5, 0x15d730db, 0xe6809738, 0x0aec8f7d, 0xf5e2188e,
+0xa9a0fac7, 0x785c3166, 0x049dd244, 0x72cada2a, 0x5655bab3, 0xff73ca65, 0xb3dc51f1, 0x259f555b,
+0xdaa1933d, 0x0f716b8f, 0x1e29bde6, 0xbe8bf3a9, 0x908b5a60, 0x667636c0, 0xfce80b0b, 0xcf43d154,
+0xfffc60f5, 0x324b1632, 0x1609e45b, 0xf8e09aed, 0x1a01f6ee, 0x013cea78, 0xdd89ba42, 0xfe5bca8f,
+0x894e3aef, 0xda292f76, 0x457a93bd, 0x73a9c254, 0x7b5ca9d1, 0x947355ba, 0xb8a797c8, 0x3b5e97f4,
+0x946889ec, 0xd49316a3, 0xa5383830, 0x55f63bbf, 0x47b3a4ca, 0x415bd007, 0x1227c60a, 0x3610fab0,
+0xa68c75fc, 0xa1441c62, 0x68a02596, 0x6d083d07, 0xceeb9057, 0x915054bc, 0x6f2c7fa8, 0x62be6166,
+0x963703a3, 0x5af6e614, 0x5bc3e4f0, 0xf517a95e, 0x408a2bfd, 0xab7fe7a8, 0x37aac030, 0x9956f2f5
+
+k =
+6144
+
+e =
+10376
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_CRC_TYPE_24B,
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data
new file mode 100644
index 00000000..e946963a
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_negllr.data
@@ -0,0 +1,645 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x7f7fb2e1, 0x467f05ef, 0x3cabba6a, 0x1f826cac, 0xbe817fe7, 0x584dab0f, 0xdcb55f81, 0xe47f915f,
+0x37b1a534, 0x4b478145, 0x9db5727f, 0x7fa6943f, 0x819550ea, 0xd18eb72a, 0xa0ba9c10, 0xcf7f817f,
+0x915a957f, 0x2c699d0a, 0x89815c0f, 0x7f569358, 0x1002587f, 0x54314130, 0x817f1b7e, 0x81a85e2a,
+0x5981bdf2, 0x0c81b00f, 0x6781be0e, 0x297fb17f, 0xfd811fb3, 0x7f077fdb, 0xd37fc2ab, 0x817f1024,
+0x81817f7f, 0x3d848ec6, 0x4486cd9c, 0x7f6d7f81, 0x1d7ffa93, 0x813648ca, 0xdc7f125e, 0xaa81a798,
+0x60838ef4, 0x815fcdbd, 0x81cb9d93, 0x817f8188, 0xc8c3d697, 0x817af2d6, 0x36888781, 0x818117ee,
+0x37a67fbf, 0x89f464b2, 0x7f1e7f81, 0x7fe5c17f, 0x8ca1effc, 0xed7f82cf, 0x4a7f5ac6, 0x7fffb8d6,
+0x427f7f73, 0x747f7f06, 0xdfcd22a9, 0x7fb99981, 0x76f77381, 0x2cd70f12, 0xd8b138e1, 0x81a4ea29,
+0x222db281, 0xc3a07fd2, 0x217f8177, 0xd1cd87c4, 0xd181679f, 0x7f7fdee2, 0x7c81a72f, 0x6e7f5a7f,
+0x77b0c96d, 0xff7f8110, 0xe8c6ad46, 0x376ade0a, 0x447f687f, 0x5916d238, 0x88816b81, 0xda7f6d88,
+0xe2b2b55f, 0x750f7fc8, 0xc624d47f, 0xc6837f79, 0x81de5272, 0xd0847f29, 0x81ad587f, 0x3e7f7f61,
+0xc803e070, 0x237ff600, 0x8c01815e, 0x814e0e81, 0x81657eef, 0xab2a62bc, 0x73d42cb4, 0xaaf03a8a,
+0x817fa57f, 0x5e7fe2c3, 0x6deda37f, 0x6e87818d, 0x767f6e4d, 0x5c76819a, 0x8182b6f0, 0xdcd3037f,
+0x81184c7f, 0x7ff1501c, 0x22c7f159, 0x45a6817f, 0x9b53c5e7, 0x57966a5e, 0x0a7f7f81, 0x4a5c155e,
+0xf37fff81, 0xad97f8f2, 0x052f397f, 0x79638181, 0x7f7f7f7f, 0x81e3fae4, 0x7659cf03, 0xbd7a5a81,
+0x46bcab7f, 0xa57f69a1, 0xba818186, 0x38a347a2, 0x91a88ec0, 0xd76087fc, 0x81818b8c, 0x7fa3424e,
+0x7f89ef7f, 0x81ac183d, 0x3aa8912a, 0xc4937da6, 0x7a4c136b, 0x81568e05, 0x8182812b, 0xebb17f1d,
+0x84b87f4c, 0xf4eab6c9, 0x81034197, 0x8183bd0a, 0xa1b92676, 0xceca2ef1, 0x9b7f7f7f, 0x5d813181,
+0x817fea35, 0x0a6c81b0, 0x9a559b4f, 0x93987691, 0xad819510, 0xa47ab324, 0xfa577f7f, 0xbba8ac3e,
+0x6694b753, 0xee63c4d7, 0x17b4ce81, 0x83c0dc81, 0x6a7f4481, 0x81d87fe0, 0x7f5cc5c0, 0xf828ea77,
+0xbb817fa3, 0x3e816619, 0x817f6aab, 0x26417f7f, 0x0c40817f, 0x9cf01834, 0x32b37f7f, 0xe74fd0a7,
+0x7f7f4167, 0x0e7f6ae0, 0x813d81b2, 0x8172027f, 0x4ea6c275, 0x814525de, 0x46817f7f, 0x7fcb2cd1,
+0x7f8198ca, 0x3d7f60aa, 0x8d7fac81, 0xe2c081ed, 0x656f8181, 0xb2ae9365, 0x6ddd8d81, 0x86e0be65,
+0x5495ec58, 0xcb981829, 0x429ee669, 0x81445fc5, 0xd3435724, 0xce51b323, 0x92b16972, 0xf0137f8e,
+0x7f199a7f, 0x81be813c, 0x8155817f, 0xd481037f, 0x167f8181, 0x5ad131d4, 0x7fbc4149, 0x81046d60,
+0x87174b9c, 0x94bc63ba, 0x68817c7f, 0x12ef7f8d, 0x7a81f4f6, 0x7f7f6db9, 0x4823417e, 0xa4989a48,
+0x7e4c787f, 0xa47f8123, 0x819afb20, 0xbf7f447f, 0x8d95af5c, 0x33a6812e, 0x07c75cd5, 0x337f8151,
+0xa2e5e15a, 0x847f7209, 0xf9f4b934, 0x72a77f2f, 0x1a9f2f7e, 0xac224bb2, 0x81467f0c, 0x3f8fd28d,
+0x4a78b07a, 0x5269a71e, 0x4fd57fb2, 0x0b5262bf, 0xea39ca0a, 0x577f3d1e, 0x509481d7, 0xb7816b48,
+0x35c31d7f, 0x818bdc1d, 0x24087fb9, 0x7fe27f07, 0x55866481, 0x7f9edd28, 0x07811ed1, 0x1b317fb2,
+0x8cb19dee, 0x396920a6, 0xd965dd85, 0x9ed5ff47, 0xc97f7f7f, 0x7f8c1d55, 0x06a69701, 0x9e348881,
+0x7f6cb281, 0x388181d1, 0xd07d61ac, 0x7fdef7f1, 0x08177f7f, 0xd87fa1c0, 0xfe9ebeb5, 0x813d3430,
+0x2181811e, 0xf4dab306, 0xa36bbb7f, 0x05e87f7f, 0x957ff3cd, 0xe4814219, 0x7fbf3f81, 0x818a7fab,
+0x43e081d9, 0x87c08729, 0x0db52785, 0x8181b0da, 0x4c94697e, 0x817fa9b2, 0x7f7f81d9, 0x8bda8193,
+0xb6811781, 0x81796220, 0xdb082ac1, 0x2c7f5d3b, 0x8c8103f3, 0x4dcbd01e, 0x7fc1a87f, 0x457f7f23,
+0x5e26bad6, 0x7f8174f3, 0x30109f08, 0xef8526cc, 0xe8690849, 0xd2b981b0, 0x7f7f7fb5, 0x8156ef7f,
+0x921a7f98, 0x7fe6681e, 0x244e497c, 0x5ef68149, 0xbfbcaa7f, 0xf5011204, 0xfe7f8163, 0x8cd08f31,
+0xc88a6b43, 0x7c8105b6, 0xf6d881aa, 0x074c2c2d, 0xd6815c7f, 0x1d15d265, 0xce816f0c, 0x14c77f2e,
+0x9a7f08c4, 0x1a8b8ed2, 0xbe9637ed, 0xb59fac5a, 0xa4d07f91, 0x7f847fab, 0xd981597f, 0x7328575d,
+0xb83af760, 0xb536b941, 0x81c1937f, 0x16b7814d, 0x3f7fb081, 0x7f41e102, 0x727f77ea, 0x9558c381,
+0x48ac819e, 0xa87f99f2, 0x819e7f05, 0x92816eb0, 0x59447f81, 0x4f6cb056, 0x818163a8, 0xe00a64e2,
+0x81bb7fa3, 0x7f816de7, 0x86ac6a50, 0x48a277b0, 0x8e37cd81, 0x7f88e646, 0x023351d8, 0x1ee7a64b,
+0x8145f239, 0x81810859, 0x557f7f7f, 0x6971d292, 0x813d7fdf, 0xe86cade7, 0xdab67f7f, 0x6d4f247f,
+0x9df2297f, 0x1e8172ec, 0x3f81a56d, 0x907f02d3, 0x45663970, 0x817f46a4, 0x817fd1ce, 0x9843b181,
+0x7fdf817f, 0x784881da, 0xd8a281c2, 0xd77f88d8, 0x8c81817f, 0xeb625633, 0x73fe4eae, 0x57787f19,
+0x7f81ea7f, 0xca8143c8, 0x767cbf1d, 0x7fbc81e1, 0x7f9bbeb5, 0x7fea8a12, 0x7f5c7fde, 0xb5fe1685,
+0xa4da81c8, 0x7f6b542e, 0x81814b7f, 0x6252f7d3, 0x55eebb81, 0x6a0824cf, 0x586128f0, 0xded58181,
+0xe32a819b, 0x79c67f20, 0xf2812781, 0x7f58c369, 0x7ffa2d7f, 0x817f81f4, 0x7ffc2c07, 0x457f9981,
+0xff4281c7, 0xd78e6955, 0x3e10e57f, 0x632b817f, 0x7f0197d1, 0x813e7feb, 0xf981ac81, 0x8f81317f,
+0xc37f137d, 0x01918101, 0x81b007a4, 0xaab059ad, 0x7f7f81f7, 0x6ac590a9, 0x817f81a0, 0x68816067,
+0x0ae08182, 0xb47fb268, 0x33d94a7f, 0xbf43d243, 0xccff8e4e, 0x44ebe23f, 0x0d4cfd2e, 0x7f81947f,
+0x81d1cee8, 0xda0e16a3, 0x34a74376, 0x117b81ae, 0x1d853f6b, 0x81d6d2ed, 0x8d7f5be6, 0xcf1fb4a0,
+0x3e1ec13a, 0x7fed0cf4, 0x65d3817f, 0x0a1fec83, 0x7fc4811f, 0x666381c1, 0x8147c754, 0xa9cf7f53,
+0xa076cf81, 0x4c7a4712, 0x1a814ff5, 0x9e7f8175, 0x8181698b, 0x7f1a7f63, 0x8191f19f, 0xb1037fa8,
+0x7f52aa2e, 0x42b65c09, 0x94afab7f, 0xbda37dda, 0x81f2d66b, 0x3e737f1f, 0xcb816031, 0xd56181ef,
+0x2c6ffb81, 0x44852329, 0x1b4b81cc, 0x055a7bb7, 0x4c6f2e1d, 0xb14ae1b7, 0x7f43c2c4, 0x2368b80d,
+0x7f7ffb9f, 0x990b0523, 0x74b33ea5, 0xd64c8199, 0x98ed81a2, 0x311bc8e9, 0x599963cf, 0xb0eeed4c,
+0x8181ed70, 0x42023428, 0xd00270bc, 0x812a7fd3, 0x7fc4101a, 0x3448d8db, 0x068814a8, 0xfeb5701c,
+0x46db1481, 0x3cc5b001, 0x8181c2b3, 0x9c3d7f7f, 0x7f13e346, 0x2b75819c, 0xf57fcfe1, 0xfdaac581,
+0x7f814c6f, 0x7f186b39, 0x7f818150, 0xea7f5b7f, 0x816776b0, 0xb8657f57, 0x7f447f5e, 0xc3628122,
+0x84849d7f, 0x7f8cb914, 0x92813dd5, 0x81557a3a, 0x6b4cbf7f, 0xc6818b94, 0x7fbb7f98, 0x3c88812d,
+0x8de4267f, 0x8dee81fd, 0x37f98131, 0x3d8164d2, 0xd97f8181, 0x3ccaebf9, 0x8165cb7f, 0x63cb81d3,
+0x817fea94, 0x48817fa1, 0x4ea0b16b, 0x3cb5ab6f, 0x3148f975, 0x8c7f1ab5, 0x7fae5a87, 0xad06c881,
+0x0681c187, 0x81a5a11b, 0xe29ea383, 0xf07f536d, 0x81b0e702, 0xc37fbb59, 0x9e6167e8, 0x3f55c9be,
+0x819a40a9, 0x6450b823, 0x15927b65, 0x72a149d4, 0x6c280b5c, 0x5d7fd8f3, 0x998d831a, 0xbb7f817f,
+0x10ab3f81, 0x96f65101, 0x257f743a, 0x7f04a024, 0xe0c981b4, 0x81163a22, 0x8e197481, 0x21addfdc,
+0x7f2dcdaa, 0x787f4407, 0x6b7f81ce, 0x9fd252c5, 0x85e8979b, 0xb049819d, 0x8f7f7f4d, 0xbe284c7f,
+0x78135181, 0x5d33db0a, 0x8123817f, 0x81496a7f, 0x81889281, 0x8145bf37, 0x2e9f7fdf, 0xe4543a2d,
+0x811e6024, 0x7f62e6ba, 0xaad07f1f, 0x4f73894f, 0x4c7f8181, 0x57817f06, 0xd27feb81, 0xcf1a815c,
+0xa57fbe7f, 0x684f7fdc, 0x81f6cbc0, 0xb72bf82f, 0xda6d448f, 0xc6157f36, 0x7f81407f, 0x9a8c4719,
+0x5fdf81dd, 0xaf817f0f, 0x813c11da, 0xd7ec1181, 0x570186e7, 0x8181f41e, 0xe5b8a2bb, 0x819051f3,
+0x2a7fd68f, 0x37a13ac3, 0x5b81fd38, 0x6342817f, 0x84697f6e, 0x7f5eddfa, 0x6abc81d1, 0x8151d9d8,
+0x3e5c6c53, 0x2b397fc6, 0x60d58548, 0xc540b213, 0x67815781, 0x89983030, 0x767c7f81, 0x71effe3a,
+0x817feb42, 0x05818897, 0x810ea318, 0x99bb0e8a, 0xf1dd4b7b, 0xae6dcbae, 0x8155adc0, 0x15b4c0c1,
+0x81ac9b14, 0x895057be, 0x9599d781, 0x38968fcd, 0x8171e981, 0x7a62d80c, 0x8681509a, 0x7f5d489b,
+0xa381467f, 0x7fa12952, 0xa07f8109, 0x5acd5d7f, 0xbb7fc77f, 0x531f3429, 0x7fb77f5a, 0x566d89f6,
+0x817d4c23, 0x406a81e6, 0x1381c99c, 0xad89d258, 0xa67f0cad, 0x510b916e, 0x7ffa81a1, 0x0b6a11e1,
+0x2be02284, 0x7f4e4338, 0x817fd023, 0x058174bf, 0x677f950a, 0x97a6a01d, 0x81817f81, 0x136881da,
+0x717f7f9a, 0xbf811404, 0x6714c965, 0x51828181, 0xcaf329a1, 0x8881bc0a, 0x7f7d8305, 0x2c7f5a7f,
+0x7f7f63e9, 0x797fced4, 0x818154f5, 0xf13681c4, 0xc04b1d81, 0x0e81d244, 0xbf3f66bf, 0x5a3f2d7f,
+0xfa931714, 0xce485844, 0x263ae55b, 0x7fd37f81, 0x79477ee4, 0x7f8187e0, 0x81ab8144, 0x7f8d7f7f,
+0x6f715068, 0x7f5577f6, 0x7f487f41, 0x81813b84, 0x33431981, 0x81a181c6, 0xca1b667f, 0xe75a7f71,
+0x168128b0, 0xe0a092d5, 0x6e8164de, 0x2e817f61, 0xc4b1818d, 0xea7f8a32, 0x1098817f, 0x14a18156,
+0xdd568862, 0xd3176a4b, 0x81308181, 0x697b3faa, 0xe256cecb, 0x697f1691, 0x4e228c05, 0x7fa6682a,
+0x7449816e, 0x4558a15f, 0x9f513638, 0x7f7f819c, 0x7fb77aa1, 0x5eaf4912, 0x14817f7f, 0x0853817c,
+0xa47fbe41, 0xdfd77f0e, 0x81371d7f, 0xc9cd7299, 0x81d0673a, 0xb133492f, 0xfd7f4003, 0x897f7f1c,
+0x1990d405, 0x9f3ee2bd, 0xc168817f, 0xc6f57fa4, 0x83ee483e, 0x90f781f6, 0x81dcb39d, 0xd37fe881,
+0x28819181, 0x83da81d4, 0x7251dc7a, 0x7928ffc3, 0x7bca8181, 0x7f7f817b, 0xf6fa907f, 0x37916bd4,
+0x0d3fbc7b, 0xde81bdd0, 0xa0147fdc, 0xbf0f7f0b, 0x25958132, 0x7f6c9931, 0x7f44812c, 0xd4868181,
+0x6d9622ad, 0x817f30b6, 0x3c7f817f, 0x81238823, 0x7f2d5c81, 0x6f81531b, 0x947faff2, 0xa5ea038c,
+0x4268817f, 0x814f8cfc, 0x81481809, 0x814d7f1a, 0x3c87507f, 0xa47f1219, 0x4f65f27f, 0x5d7f2c21,
+0x7f4631a5, 0x5a81475a, 0x7f81ef60, 0x73aaa890, 0x327f7f7f, 0x81b614c1, 0x7fc6477f, 0xe5b7a6be,
+0x55816e73, 0x7ffaffb1, 0xb93d1b81, 0x3ce80fe2, 0x37ec6194, 0xe681e2db, 0x81c1d37f, 0x33a2bad3,
+0x582b8181, 0x7fdc7f25, 0x81978169, 0x9733486f, 0x3de13ef3, 0x247fd5cf, 0xa91d817f, 0x606a7f56,
+0x817f8178, 0xe61d7b08, 0x7fa08181, 0x058bd29b, 0x28237fbf, 0x4f81b057, 0x488d8103, 0xe1817fa5,
+0x81666297, 0xfabc75c8, 0x7ff75edb, 0x4a62fab5, 0x687fad84, 0xa7221c28, 0x773a5a6e, 0xf27f8581,
+0x7f7f9569, 0xa31d812f, 0x81d48457, 0x7f88bcfe, 0xd743b981, 0x7f608102, 0x18de2185, 0x81d77fc4,
+0x937f337f, 0xe67fe52e, 0x95d481da, 0x887f6071, 0x81301872, 0xd9088132, 0x7f7f5a81, 0x8181aba8,
+0x7f9a7fcd, 0x7f7f4136, 0x3d87d3a8, 0xd581d381, 0x5d306ed0, 0x615b98dc, 0x812b3281, 0xdbe5c443,
+0x817f7fe9, 0x815462da, 0x8122da0a, 0xb5817d81, 0xe12bd56c, 0xfa819a68, 0x907f8183, 0x236aa83f,
+0x1b576c9b, 0x5b6424ab, 0x3f3e3953, 0xec81507f, 0xddb43aa9, 0x818134f1, 0x81c37fb0, 0xbf4a7fcf,
+0x810b7f7c, 0x705b81d5, 0x23636581, 0x3caf7f9d, 0xbe765d81, 0x189a11ae, 0x8194cabc, 0x6063c5b0,
+0x0e3c5353, 0x388235bb, 0x81904632, 0x2b9781d3, 0xd78db758, 0x953fe437, 0x7423f22d, 0x81ad9c95,
+0x7fd74748, 0x329b582d, 0xe0e07f7f, 0x7f81ae8e, 0x962fe50c, 0x3b7faf1d, 0xcaeb8e37, 0x7f54276a,
+0x927f6c9a, 0x828143c1, 0x90456105, 0x7f757ff6, 0x796681eb, 0x7478b225, 0x70d852bb, 0x7f280bdb,
+0x56597f47, 0x51e6fff8, 0x1e7f818d, 0xdc4a8175, 0x10712985, 0x2ee6daa1, 0x81c7d046, 0xa1a36e79,
+0x9adf7f7f, 0x812ee70d, 0x91bc1062, 0x443b7fa9, 0x818f47b3, 0x7f817f3b, 0x7981a09c, 0x7f9c817f,
+0x1b46d2cc, 0x786179de, 0xd97f61f2, 0x81b4f213, 0xd381874a, 0x7f4f7fc4, 0x9c81c3d5, 0x17d59842,
+0x7fc9a8fc, 0x996840b5, 0xd5a79281, 0x88378181, 0x7fd4ca81, 0x81ba7fee, 0x5dae297b, 0xc8fa0e5c,
+0x987a5757, 0x5debc016, 0xa64eb973, 0x61a51b81, 0x66509c7f, 0xeb7ad965, 0x7ada817f, 0x4d5bf7bc,
+0x8876b8a4, 0x7d73b4d0, 0x762cb5ad, 0x6ec14d66, 0x6bfdad7f, 0x7f7f810e, 0x81bc2b15, 0x0f817f9c,
+0xc72fe940, 0xc9168b0c, 0x7f6e8d79, 0x7f81fadd, 0x706181f6, 0x678181e7, 0x3f68c5e8, 0xa06d8195,
+0x78d73d7f, 0x7295109a, 0x05897f55, 0xbdc07281, 0x81817fe3, 0xcd157f30, 0x489ca757, 0x53468119,
+0x3f6a81b8, 0x03fe7c43, 0xb511cda3, 0xe581a17f, 0x5e3e3ec9, 0x7f507f96, 0xbe12a481, 0x81006ddd,
+0x49322cb4, 0x27f7a81e, 0xc6085618, 0x8148d270, 0x68668181, 0xec5ed498, 0xa011833a, 0x81475300,
+0x071d3a63, 0xa2c55142, 0x7fe9767f, 0x6e3c21fe, 0xeb63814a, 0x7fd57ffd, 0x9f91630b, 0x475f219f,
+0xbbb5c7bc, 0x686de142, 0xb652909f, 0x6b8e5a7f, 0x7f885858, 0x7f027ee1, 0x7f818191, 0x818881d4,
+0x1b7c017e, 0x8581aac9, 0x627fa790, 0xaf837f7f, 0x69064681, 0x311d32e9, 0x6f7f0ba1, 0x7b81a1c5,
+0x7f4f8144, 0x81e38144, 0x528daa3d, 0x1c098175, 0xba6b0ac7, 0x732614ab, 0x5b2c5d7f, 0x81815ce8,
+0xb37f7fbb, 0xc87f8ec5, 0x81b08181, 0x5bcb8136, 0x7f2524c5, 0xf5e881d8, 0x50ceaf7f, 0xf72387a1,
+0xd4c74e74, 0x663f7fe8, 0x26ae4a7f, 0x5d7f6358, 0x8178bbdb, 0x46f38102, 0xb0327f53, 0xa019f297,
+0xeff57f12, 0x7f7f79e2, 0x8120817f, 0x73e4b322, 0x999c6349, 0x92ba973e, 0x7f7fbc5c, 0x81946449,
+0xaf4b817f, 0xca523d00, 0x81817fbf, 0x3b145a81, 0xec54886e, 0x81cb173e, 0x5f02667f, 0x967a8181,
+0x8a817f81, 0x3cb93c41, 0xfa811336, 0x74feb5a9, 0x821b9081, 0xaea865af, 0xf17f4e71, 0x52810059,
+0xaebc3b36, 0x6a8f7fd2, 0x109de3eb, 0x4c555f48, 0x8166cc78, 0x7f4c7b48, 0xc7d1d381, 0x1381537f,
+0xa0ab5c1f, 0xa00d0039, 0x81157f81, 0x2f206f81, 0xacb45b81, 0x2ec382f3, 0x7ff0383e, 0x44dc5aeb,
+0xc37f81cd, 0x43553306, 0xa63d7f81, 0x7f7f8140, 0x7f7f818a, 0x7d82bd27, 0x817f8b7f, 0xa3579535,
+0xc2e47871, 0x81815a0b, 0x49977081, 0x724cc372, 0xc681c27f, 0x2b7cabf0, 0x3081a6ac, 0xec7f964e,
+0x7fda3ad3, 0x422671a4, 0x60008158, 0x7f738e7f, 0x538e6f81, 0xb73e0ab0, 0x796a6551, 0x81ad178b,
+0x19731c7f, 0x1bf8c26a, 0x4acb7fd4, 0x5121b547, 0xda547fff, 0x7e505ff7, 0x817fff48, 0xa0813934,
+0x3d4f77d1, 0x83f327ef, 0xc7937f6b, 0x357c4a84, 0xc183ca7f, 0xbd6081db, 0x30e0d783, 0x492aa57f,
+0xbba6671e, 0xb062baef, 0x7f817f20, 0x7f267f58, 0x7fb94b3f, 0x447f5b21, 0xb864997f, 0x7fb057f8,
+0xb1401a7f, 0x7fb57f9d, 0xefbb8175, 0x66892581, 0x7f06fa7f, 0x817a90f0, 0x7bd5793b, 0xc37ead24,
+0x6feaa4be, 0x3a548cde, 0x8e81f98c, 0x81b78199, 0xe6c11940, 0x4d647f2f, 0xbc5c8181, 0x8175fa91,
+0x8188864b, 0x85591890, 0x47ce5d57, 0x38a4efb7, 0xce5e739f, 0x81737fd4, 0x6d7f635e, 0x9c861774,
+0x89818262, 0x45bc8114, 0xd1552a89, 0xc68174f8, 0xaf9081d8, 0xcb08e35b, 0xf5c404b2, 0x2ce96576,
+0x11895a36, 0xcf20e0ea, 0x2548e7dc, 0x8f774c59, 0xf85a8f56, 0x7f81ddae, 0x7f7f627f, 0x1481491c,
+0x7f97661b, 0x787f81e0, 0xa2192f7f, 0x6cb37fb0, 0x8181ae27, 0x3f7fafec, 0xce7f5a86, 0xae067f7f,
+0x3c1c8181, 0x817eb443, 0x8e741041, 0x5a4ef2ba, 0x61029de4, 0x9d637e34, 0x819d7fca, 0x8181cd7f,
+0x8175b96c, 0xbc4f6ecf, 0xf826aead, 0x567fa36d, 0x81968197, 0x9f9dcf3e, 0x265c8131, 0x81d9deab,
+0xc21cf772, 0x7f6bd907, 0x313f795b, 0x817f197f, 0x09ecfe43, 0x6114402f, 0xe0995a81, 0x5f263281,
+0xbe7f8158, 0x7f7f26dd, 0xfd7f7f7f, 0x3da2c609, 0x6ce3810d, 0x7f81cd1b, 0x81d68809, 0x42be9c6c,
+0xf3d37f5c, 0x7f81e031, 0x774e0c30, 0x7f8f468f, 0x9ebb42b2, 0x7f817fc0, 0x1376817f, 0x25f78181,
+0x7f8eb3be, 0x20228505, 0x229fe181, 0x81ac277f, 0x4f308129, 0xb8818111, 0x81d47fc7, 0x818181f0,
+0xc27fd34a, 0x9dbc810a, 0x8481d981, 0x61817581, 0x8a2c817f, 0xe7815625, 0x96a7946f, 0x3a7f5d5d,
+0x835f2fc1, 0xb6082dd7, 0x567f687f, 0x838181b6, 0xc663e9ad, 0x517f7fbb, 0xa570adb0, 0x12cfc2b4,
+0xc1587f39, 0xbd8140ba, 0x7f814366, 0xd00981ec, 0xc27f217f, 0x5cd1b2ad, 0x81992c7f, 0xb1b57f62,
+0x3bb9b1de, 0x9213cd8a, 0x814f136e, 0xa5c27fc3, 0x947f7f59, 0x8ef30ddd, 0xd3f290e9, 0x39bc7f8c,
+0x7f814e81, 0x5381ff22, 0x7f81f04f, 0x0adde113, 0x5b93867f, 0x687f8111, 0x7f7f1c60, 0xb856814f,
+0x7f8e8196, 0x5cfb66fc, 0x20310ab4, 0xa2599964, 0x973f0065, 0x817f95d6, 0xa43b7f7f, 0x816bb1c3,
+0x7c7581e9, 0x521770cf, 0x81456be1, 0x5e26818d, 0x3b54057f, 0xd4e231d6, 0x977f0346, 0xe777814e,
+0x81b6095a, 0x7b337fd7, 0x18bb202b, 0x219b7cf9, 0x2bdeaa81, 0x55dcd1d2, 0x8123905c, 0xe463fd74,
+0xd16ec17b, 0xd90f4e3b, 0x817fd97f, 0x817f7f7f, 0x816afd35, 0x3dd87f51, 0xa62f0e5f, 0x9f4766ce,
+0xa933c505, 0x81049129, 0x627f3e7f, 0x025ea181, 0x7b5e4119, 0x0d11aff9, 0x817faaa2, 0xe3bc817f,
+0x7fb27f81, 0xc07bc500, 0x9451c181, 0x1fc6811e, 0x7f9f9e3b, 0x7f4c1a91, 0x7f1f7148, 0x8358819a,
+0x81119a81, 0x16d55f0c, 0xc07f7f2e, 0x467f9991, 0x237f8c7f, 0x81638101, 0x2aacb9ae, 0x4f439f32,
+0xc65e96aa, 0x7f826508, 0x7fa86ab6, 0x602ddbc4, 0xd07fd00a, 0xb0e381dd, 0xca81cc2e, 0x7f7b7f15,
+0x7f81b9bd, 0x21627f38, 0xa481ed5c, 0xeee8d79b, 0x64369481, 0xb97f81b1, 0xf443d7e8, 0x50a410b8,
+0xd87ffbbe, 0x18589760, 0x814998d5, 0xc1814b2f, 0x7fc33b7f, 0xd25b7335, 0xb6b2987f, 0x988a7f48,
+0x9aa4fb81, 0x597fd920, 0x7f4e7f9a, 0x7f819892, 0x81daeac9, 0x7f538fcf, 0xac7f8198, 0xd2529c90,
+0x0ff00ba8, 0x765cb53e, 0x7f0fd3ab, 0x0d81cb33, 0x81989961, 0xc13c7f25, 0x20d6a6bf, 0x7f817fd0,
+0x6f6594db, 0xec8c68eb, 0x638194a0, 0x3b89815c, 0x81734c65, 0xaeb69ec3, 0xd20b6440, 0x2f71b6dc,
+0xc990816e, 0xb6077fda, 0x7f4cc48a, 0x816aa8e8, 0x817f397a, 0xfb8181b7, 0xe64d11e2, 0xe97f1576,
+0x10706f7f, 0x9b935d4c, 0xaf7f3547, 0x7f175ec9, 0x7632a981, 0x2f61d4f4, 0x7a597f81, 0x499e7f00,
+0x7f81315e, 0x81c4230c, 0x8cb66a2e, 0xe76a8105, 0x81817fc6, 0x7f0e50bd, 0x83ba337f, 0x81009281,
+0xcac68465, 0x617fd326, 0x893981e4, 0x7f8ed9b3, 0x59467f7f, 0x0181e258, 0x7f81b281, 0xc9816629,
+0x7fbc8181, 0x08e1cadd, 0xa8328181, 0x76b6e071, 0xb36d7feb, 0x505490d7, 0x09d5dd31, 0xba81632d,
+0x7f7f6fd0, 0x9e45813c, 0x7c4c4f70, 0xd97f5090, 0x247f335c, 0x357fbb4d, 0x267fd5c8, 0x26d08181,
+0x51447fa8, 0x81358efb, 0xb9197f52, 0x40aadca1, 0x7f6781aa, 0xaf272a09, 0x8177a4b2, 0x497fb515,
+0x819b7f7f, 0x8b817f41, 0x257f7f92, 0xa8d2244e, 0x477af9ef, 0xa8817f36, 0x5ca681f5, 0x7f1a76cb,
+0x8181c5f1, 0xc4ddc7aa, 0x81a1c2bc, 0x7f466fa2, 0x625c237f, 0x3ddd1f44, 0x4f54fdde, 0xa281f2d5,
+0x95d14e95, 0xf26e7fd4, 0x9a813a90, 0x8154361c, 0x327f7f42, 0x7fa2983b, 0xd9c28197, 0xf00d1898,
+0xb95848d4, 0x387f81f0, 0x9f8e7f4a, 0x1c5cb9ec, 0x1f63b5fc, 0x187f7dcd, 0x7f1c7f05, 0xa855e2cc,
+0x5a747f98, 0x7fa1b4ee, 0xe8e51aaa, 0x7f81ec81, 0x38818175, 0x81815f07, 0x84888181, 0x81810852,
+0x316e0060, 0x819b1c6e, 0x3d814577, 0x5ecc5d81, 0x5e81814c, 0x7f76914d, 0xfc7f814b, 0xd2819d96,
+0x70a34ad0, 0x7fd544f3, 0x71e51581, 0x817d45a4, 0x7f479a68, 0x42447fae, 0x9a974f7f, 0x817fbbd2,
+0x989f367f, 0x78fa30c0, 0xbb902e75, 0x7fda7f02, 0x7f82c5b8, 0x33c325c4, 0xdf89815c, 0x81c37f33,
+0x52f37ffc, 0x7fcc4702, 0x81488124, 0x7f548196, 0x4214a4a3, 0xa034b302, 0x6a817e04, 0xc67f795e,
+0x17a98157, 0x7f7f7f5b, 0x7f7f4c56, 0x7fe0b181, 0x7f907fba, 0x42f82fd9, 0x223ea236, 0x3d2025d1,
+0x097f81b1, 0x81a35969, 0x373cb170, 0x44a57c3b, 0x26bc5a7f, 0x34657ff6, 0xc83b3d3f, 0x99be6c2c,
+0x1c811f7f, 0x5c7f81ac, 0x33fa49f6, 0xa5c38181, 0x81ed8144, 0xc88a652a, 0x7f817fe3, 0x953cfc91,
+0x466681d3, 0x366f16d6, 0x187f2ff5, 0x077ff351, 0xbe1b817f, 0x3884ec81, 0xb17f8181, 0x9a39437f,
+0x476fac61, 0x7f911514, 0x24b0038f, 0x5647567d, 0x8143c7ae, 0x81b03a31, 0xec7f085e, 0xc10d7f55,
+0x0a50b9f6, 0x7f1fd244, 0x7f81a1c7, 0xc48148ca, 0x877fc97f, 0x81115e37, 0x827ffdf3, 0x48b741c2,
+0x7f9fc5c0, 0xf87f81ce, 0x43a1029c, 0x2a78427f, 0xff9a164d, 0x7ff981f6, 0x1b7f81aa, 0x6f48ca6a,
+0x547f7f2a, 0xa9ce5cf0, 0xc07f7f7f, 0x7f6fa54d, 0x58817fbb, 0x7f96fa32, 0xfdc18381, 0x7abd7e7f,
+0xc604b981, 0x64217ffe, 0x8893bd7d, 0x7f140281, 0x7759b9e3, 0xcc648129, 0x81c981c6, 0xaf2ba351,
+0x9ecd819f, 0xac816ff2, 0xb516672e, 0x4157ab81, 0x81d21796, 0x35643433, 0xf13d5a7f, 0x9881d003,
+0x5081bbd0, 0x7d7fbdcc, 0x6ba50d3a, 0x8170677f, 0x85b29f81, 0x8aa72210, 0xfbaaa381, 0x7f0e8f34,
+0xcf367b7f, 0xe60399ab, 0x7f7f7f01, 0x817f7f11, 0x81818932, 0xe39a6e31, 0x8d3b8181, 0x107d2bc1,
+0x908164f7, 0xe41a7fc5, 0xe944503a, 0xeb4b6559, 0x22817f81, 0x816ea8ff, 0x1557e37f, 0xaa7f701c,
+0x817f828e, 0x738106c4, 0x7e0e7f05, 0xac768181, 0x17898158, 0xc081ea33, 0x827f9958, 0x3b7fd4fc,
+0x16181f7f, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x0c7f3a86, 0x7750813d, 0xd2b385bc, 0x6f7f7f72, 0x7d487fa8, 0x7f7f8d65, 0xb6578196, 0x4d93815a,
+0xf9a54748, 0x317d6469, 0xe0466f8a, 0xef869693, 0x3da5a9e2, 0x7f7f05a1, 0x14c68157, 0xc2148135,
+0xad9ed181, 0x7a7f6081, 0xf10d659b, 0x4a38dce0, 0x114d7f2a, 0x2bc7f79f, 0x0f7881f1, 0x423e5553,
+0x9c2b817f, 0xde7ffb85, 0xe18117ca, 0x67fd7f7f, 0x3fbbf57f, 0x9b34ad7a, 0x487a543f, 0xf06322f8,
+0x9ce93023, 0x3581818a, 0x6a7f4d69, 0xad3d627f, 0xbd812c92, 0x8be4cc7f, 0x23b47fc4, 0xc87f819b,
+0x104d6e7f, 0xcfcb8134, 0x0d8152cf, 0x467f7f19, 0x113f1ec5, 0x589bff9e, 0x457fa8c8, 0x2a1246d0,
+0x731e257f, 0x40e460e8, 0x13d3057f, 0x817c8de1, 0xc09dc798, 0xab81d4f1, 0xa1a49d8c, 0xbf50df7f,
+0xbc8192f7, 0x73aa4d5a, 0x1c817f43, 0x428fd6fc, 0xc282884e, 0xb9f67fd8, 0xd7777f4d, 0x1aa2d89a,
+0xa5537f81, 0x8878b631, 0x4bdfd881, 0x7f21b57f, 0xcd7f7f7c, 0x4d2aa1d0, 0x3e138144, 0x3c7fe433,
+0x146e5fe3, 0xa53b7f85, 0xa9537ffd, 0x819eda1c, 0xf93e2881, 0xc3814f7f, 0x018d69b4, 0xf87fa19f,
+0xb09f9144, 0x6ec23f94, 0xdf7f39c6, 0x48087f7f, 0xfc818184, 0x18815bc3, 0xc764598b, 0x11ed68c7,
+0x81df81bd, 0x7feceb38, 0xc90a473a, 0x81817f81, 0xef3a537f, 0x7f4d3873, 0xf45c846f, 0xfeb68afc,
+0xd326d181, 0x7fe4ae7f, 0x3e817fb5, 0x81c95de9, 0xbebdcd7f, 0x23053726, 0xdd8d8181, 0x1d545233,
+0xe37c933d, 0x81e52c1d, 0xc0ab2d81, 0x81e2667f, 0xcf2e70db, 0x9370319c, 0x21add116, 0xe6c34081,
+0x076912e7, 0xa781aca0, 0xd6f5a5d0, 0xa9af5d7f, 0x2e817c77, 0x81e1661c, 0xd5308181, 0xae7f6ec4,
+0x45815295, 0x2dbc2581, 0x50bde53d, 0xcd8cb1bc, 0x2e67977f, 0x32fc789f, 0x2b81ca3f, 0xef7f7f61,
+0x19a7d611, 0x81427f81, 0xe98181fc, 0xd4397d7f, 0xd9ab7fb7, 0x819c8c92, 0xd5478339, 0xf0555aae,
+0xfe09f481, 0x8128527f, 0x337f1081, 0x6dae3aa2, 0xccb67f81, 0xb1d8d87f, 0xc2d3dd02, 0x97af814d,
+0x812f1c5a, 0xc50c8181, 0xcd70610a, 0x7f555df4, 0x567f7f23, 0x0a0d917f, 0x4b81815c, 0x184a5b8e,
+0x5054c67f, 0x7f7f8ced, 0xf0b31f7f, 0x11698131, 0xe0ac5442, 0x57ffc57f, 0xb681cc02, 0xb78c8193,
+0xa629b48c, 0xf2a8d43b, 0x59167d37, 0x81818bf2, 0xc87c817f, 0xbf7f8e7f, 0xcdbc2243, 0x2ed78e74,
+0xfe817d61, 0xa3613c01, 0x25e281af, 0xbd6a7f36, 0xe57f4b7f, 0x81898161, 0x2a415081, 0x50b07fb6,
+0xe77f9c72, 0xd43681aa, 0xd07fb648, 0x3c7fbd5b, 0xa555c475, 0x8169dc64, 0x09106247, 0xf9b660ab,
+0x389542ac, 0x7fa22c4e, 0x81304e71, 0x29208c90, 0x317fa43a, 0x8281819d, 0x7f212065, 0xc0682681,
+0x3f2b5dc7, 0x87d86d81, 0x5c7f4192, 0x7f54332b, 0xc564a449, 0x7f817f58, 0x3c6576aa, 0x147f816d,
+0x4957d091, 0xbdaba3a7, 0xa17fa381, 0x49628138, 0x372f815b, 0x81676748, 0x8dba817f, 0xc89e8129,
+0x866fc855, 0x43ba4a81, 0x8f4b7ec2, 0xa47f054d, 0xc3b07185, 0x7f8d3ad0, 0x2c7f9f8e, 0xc17f27d3,
+0x33956381, 0x8e12a081, 0x7be52e32, 0x44b28122, 0xe4947f6d, 0x6eae4f7f, 0x34df816a, 0xd7938168,
+0xa7a9528b, 0x7f81ec45, 0x41857f6f, 0xbc28817f, 0xcbd67c06, 0xaeb678d2, 0x529a547f, 0x44813b6e,
+0xd44f247f, 0xbe1b9879, 0x877f4996, 0x5b9a8dcd, 0x2e558281, 0x1d817f84, 0xda7f8186, 0xdda9f4a6,
+0xe698ae70, 0x7fb54c8a, 0x0aae8550, 0x857ff8fb, 0x1ac42e8e, 0x3f27814b, 0xe27fdc2d, 0x2db1eb81,
+0xca817e5a, 0xec7f62e2, 0xc7dea079, 0x7f7a7f26, 0x7fdfbf81, 0x7fe89da8, 0xa9187264, 0xf98192c9,
+0x6af88181, 0x667e3981, 0xf97f3c48, 0x7fdf817f, 0x3b9bbc81, 0x55bfbc81, 0x096fa6e1, 0x1b75a67f,
+0x357f83ed, 0x7f7f6f7f, 0xf3b43812, 0x9d4c0d92, 0xa8817f4d, 0xb96c877f, 0x34818178, 0x5d7f997f,
+0xdb7f5c7f, 0x39b056a4, 0x33bcffaf, 0xb398b57f, 0x285a4bf1, 0xb181574b, 0xc1e56d31, 0x4b717fd2,
+0x56337f81, 0xed938157, 0x7f4b7f47, 0x49a3812b, 0x2e667fcd, 0xcd817240, 0x8b5470e6, 0xf693814e,
+0xd3e07f7a, 0x7f7a4781, 0xf44cd60c, 0xfd546281, 0xef995981, 0x81e70628, 0xd3b97f47, 0x07819aa8,
+0x707f447f, 0x9181c079, 0x72ae8173, 0x988d2a78, 0x7f814478, 0xb57fa076, 0xde7fb260, 0x17817f3b,
+0xf045c31b, 0x7f7feb13, 0xbee5d07f, 0x5f8192ae, 0x0c9381f7, 0x28817f7b, 0x985964d5, 0x81817d81,
+0xb381b799, 0x38b0817f, 0x3a887f0f, 0xf353fe81, 0x53817fdb, 0x5c814d56, 0x39b415ff, 0xc9c78c59,
+0xb2817fad, 0x5884bec5, 0xd629b07f, 0xb67f81cb, 0xcb81abe9, 0xab7f9b7f, 0x35815781, 0x85878181,
+0xb0b2177f, 0x86223495, 0x32cd7f69, 0x3c664743, 0x1d7981c2, 0x817f817f, 0x16cadc06, 0x03786a7f,
+0x03818107, 0x5a8dd1d1, 0x58e57f20, 0x7f818152, 0x444c50f0, 0x3d81810c, 0x66ae7f62, 0x39d01614,
+0xbd817f81, 0x789b7fa6, 0xadcd819c, 0xd37f8171, 0x291ebf81, 0xd5923ac9, 0xc37f63bf, 0x1c7981ae,
+0xda5e969d, 0x248c5899, 0x8fbdc4b3, 0x6c50d2cb, 0x2a5fab0b, 0xc42d818d, 0xa4776781, 0xaa6281fd,
+0x20534bc6, 0xb729727f, 0x6481de5c, 0xffc8a5e9, 0x707f25f2, 0xc674ba57, 0xe2ff9cb8, 0x02433e79,
+0x108e223b, 0x617f9cd7, 0xb5507f81, 0xa6f63ee6, 0x09b45856, 0x7e7fe581, 0xe4f8417f, 0xe11d98be,
+0x44817f7f, 0xc9c581df, 0xf7817fb0, 0xac6da566, 0xc681ed7f, 0x3249810f, 0xab55b281, 0x35e64c81,
+0x61e9b982, 0x8d8c4781, 0x4eafd663, 0x7fb8302e, 0x40a9bcb1, 0x17b17f9e, 0x817f92c0, 0xe35d2fb3,
+0x4ca815dd, 0x817086b9, 0x389b817f, 0x3c0d955f, 0x447f7f1a, 0x7f11307f, 0xada0815a, 0xafca7f7f,
+0xce48b124, 0x7fd94e81, 0x2edc81a4, 0xaa098a97, 0x14030263, 0x97813a7f, 0x8181d97f, 0x2581c9e6,
+0xe57f7f05, 0x16880f81, 0xa6e88184, 0x818490a2, 0xd5a87f94, 0x8b81a081, 0x38813781, 0x14a59a5a,
+0x486b8c40, 0x893449c7, 0x1f03d659, 0x81bb2831, 0xe0d8c66b, 0xa1bb7066, 0x1b7f8c71, 0x07815581,
+0x3820cb84, 0x8d3926b5, 0x81f27fd2, 0x46ff3a84, 0xfddc057f, 0x16d21ad1, 0xdbc27852, 0xf9817f34,
+0x7f81397f, 0x6c0f8101, 0xf22a100c, 0x7c998171, 0x4b68bad6, 0x7f588114, 0xc3465c81, 0x439e037f,
+0xd9817f52, 0xf1bd5594, 0xf93f9f81, 0x39447f7f, 0x46dca006, 0x93f30f7f, 0x6e215e81, 0x092ffdc3,
+0xad49e8a0, 0x7f4e7fba, 0x9b9c4fc6, 0x8131f77f, 0x0700b27f, 0xf0de61ab, 0x258181fb, 0xd8e8bb56,
+0x36932155, 0x9c7f2981, 0xb2bfe055, 0x7f0095b9, 0xaf73327f, 0x70817fbb, 0xb3a281d6, 0xfac08a53,
+0x24e4727f, 0xcbb5dbbb, 0x38be99fa, 0xbcb94e7f, 0xdf2653d0, 0x93df5efe, 0xfcdc697f, 0xe754909d,
+0xc6810060, 0x7f0ce4d1, 0xaec4a5ca, 0x4a7f81bd, 0xc17f817a, 0x7feecc3f, 0x2981285d, 0x3e0e6e81,
+0xab81814f, 0xbf27187f, 0x12409322, 0x1c9dc451, 0xc48e03e7, 0x64013b81, 0xb5847aa7, 0x350a1b84,
+0x27458181, 0xa1237f61, 0x9714287f, 0x817f8170, 0x1a7f7f09, 0xf07fecc7, 0xd686d4c9, 0x9abd7fa9,
+0x449dc0bc, 0xc1177f9d, 0x7f547f9a, 0x7fbd7fe3, 0xcab19d2b, 0xc981b27f, 0xdc8ba548, 0xe95304a6,
+0x22225820, 0xd29262ec, 0xf27f9f78, 0x1b94427f, 0x4738a436, 0x7f814fe0, 0x52817f7f, 0x1d818181,
+0xf8d92d89, 0x24817fa3, 0x66d6aadb, 0x4e55b9ca, 0x7f81de8c, 0x7f814e64, 0x24141e31, 0xa5232778,
+0x34e27f81, 0xdc87c78f, 0xc016dfb1, 0xce91829c, 0xe662c230, 0xda74e681, 0xcfb57bb8, 0xf18179ff,
+0xcc818481, 0x4736ed45, 0xcc6ee881, 0x3a815a81, 0x81b77fb2, 0x56816f2f, 0xe82bcb89, 0x3875ad5f,
+0xe05197e1, 0x1a7bc17f, 0x31a3f841, 0x7f90a77f, 0x1df3b910, 0x583f815b, 0x257f814c, 0xee7f7f8e,
+0x7dbdc37f, 0xb6810a1c, 0xce816d85, 0x459bf72d, 0xd4fe0481, 0x0d49024c, 0xbfa3a055, 0xed9baf30,
+0xf27f7f7f, 0x7dd24f9b, 0xc481817e, 0x7f899052, 0x074d5bdb, 0x2335c3fe, 0x00759f7f, 0x251bacbb,
+0x147fc935, 0x0b180994, 0x9f7fa637, 0x387ff150, 0xcbac8967, 0xf37f60cb, 0xe4c216be, 0x7829816a,
+0xd7b89981, 0xf8ae2592, 0xbeca818c, 0xf3817a81, 0xc681017f, 0x2f2f2e8f, 0xfe577f1b, 0xd503d778,
+0x2d7f7f67, 0xb281c0f0, 0xf7d7812c, 0x817f9598, 0x1af6fb36, 0x157f3781, 0x5e31a27f, 0x038181ae,
+0xf9fb81ad, 0x7a8eec81, 0xc8652e7f, 0xc67ba487, 0xbfce7f7f, 0xdf02a27f, 0xf9c1813d, 0xcaac0f90,
+0xdc448181, 0xb17f921f, 0x99be8181, 0x7fc09993, 0x11ad1bc3, 0x5c7532bf, 0x25a8ce28, 0x0ab6a823,
+0xc3c29055, 0xad55b6ea, 0xed81e0dd, 0x8caa6553, 0xff817fc8, 0x7f70d513, 0x433671dc, 0x5db37f6d,
+0x7f318170, 0x55bd4dd5, 0x00c181b7, 0x41c3bb90, 0x441a818e, 0x84e78140, 0xad8a2c81, 0x2bc38158,
+0x82654c81, 0x29d6600e, 0x227f7fc8, 0xa43d577f, 0x2b8364a3, 0x144e2a57, 0x07817f1c, 0x4a8171cd,
+0x47d5a981, 0x3c7d0781, 0xba636c7f, 0x2c7a7f7f, 0x90cf187a, 0x6b4c3671, 0x3a2c436d, 0xef81de81,
+0x4a828117, 0x814dabc0, 0xf930e17f, 0x4c3265c0, 0xe5ef7a7f, 0x819b8504, 0xd5427f81, 0xd881e992,
+0x946a45fe, 0x310162a7, 0x14819c81, 0x7f817f4a, 0x552fcf42, 0x7f707f18, 0xb9dc4881, 0xa17f81c2,
+0x9e476d98, 0x7f7f7ff0, 0x37662d4e, 0xdb7f99cc, 0xf9fe8cc1, 0x1f4c7fd1, 0x3588817f, 0x547abea9,
+0x356e7f7f, 0xab1e857f, 0xda0e7f5c, 0x9d777f7f, 0x451f1bb6, 0xa82e087f, 0xb71d347f, 0x0916d881,
+0x177e817e, 0x7f5a66a4, 0x237f2e6e, 0x816dafde, 0x2f8124c1, 0xcee67158, 0x067f667f, 0xe1258187,
+0x3a3165f7, 0x1f7fac81, 0xf636364d, 0xf2818e36, 0x0b9e207f, 0xed2d7e81, 0xaa6e2281, 0xcb50b6a3,
+0x237f72c6, 0x95819fa0, 0xd7d0c888, 0x39988141, 0xc8b07f8d, 0x5a43c942, 0xf3cd687f, 0xa2e77aec,
+0x9909bb1b, 0x7ff8e681, 0x5d39837c, 0x5625817f, 0xa9738110, 0x8b604bb2, 0xf3ffa881, 0x5f359eb5,
+0x07bf7f49, 0x7f63c67f, 0xc9db7fcb, 0xa65881c5, 0xf2818130, 0xa5817f81, 0x26b4174f, 0xc06f817f,
+0x3b8181c2, 0xbfeb4d6f, 0x9e857f91, 0x58a9a1b7, 0x67147fdc, 0xcd257081, 0xb78d4081, 0x3749ebb8,
+0x22ac7f89, 0x4af84134, 0xe3f97943, 0x2d8187c6, 0x027c8181, 0x81c1bfef, 0xc32b2267, 0x42257f81,
+0xb49b3eb3, 0xa3d47fc2, 0x27ff4d81, 0x69b4a46e, 0x81818181, 0x7fd5817f, 0x00a97d91, 0x10897f7f,
+0xd87f0d3b, 0xcbec8b7f, 0x7f814de6, 0x8286209d, 0x06e2bf7c, 0x81e28881, 0xae64afdd, 0xabc5667f,
+0xee22bee0, 0xe157d171, 0xb39a5bd6, 0xabb6d1c0, 0x53c19d81, 0x447fdb81, 0xfc086996, 0xd65f0481,
+0xc87f4d35, 0x313a12db, 0x5d81a1a0, 0x01231072, 0x327f81d6, 0x7eb1ba7b, 0xe9485dad, 0x5781767f,
+0x047f2481, 0x817f5b50, 0xda064bd7, 0x7ff07b3c, 0xfde4a68a, 0x7f83816b, 0xe67f7f34, 0x1a56f181,
+0xe2bbb681, 0xe4687f74, 0xd5a20bd8, 0x81d581a6, 0xb2395342, 0x977f0fa3, 0x81e3b5dd, 0xf1cd8147,
+0xe34f48a1, 0x258142c3, 0xe24d510b, 0x46d6816a, 0x2a608ae6, 0x5781ef81, 0xcce97f0d, 0x677f7f81,
+0xaa2381bc, 0xbde77f4d, 0xefa38185, 0x81a67f75, 0x6cceb856, 0x4976bb25, 0x3efc977f, 0x86cdf691,
+0x13497f81, 0x7fe7730a, 0xd71d8163, 0x8c7f7fbd, 0x1bec817a, 0x8154b897, 0xc71e45b6, 0xbed568c3,
+0x256c6514, 0xd481af7f, 0x19ad697f, 0x817f745f, 0xd27fa187, 0xd5e769d9, 0xd816cf7f, 0x1aa4815b,
+0x035e7ff6, 0x85de810b, 0x3ea73657, 0x216281b2, 0xe48144b7, 0x3277dfe8, 0xf8c57f20, 0x907f5091,
+0xf481b081, 0x9d8152cf, 0x0e6a81c0, 0x7fff7f06, 0xf8884d81, 0x687f81b3, 0x2657ba35, 0x527fc24c,
+0x9945694c, 0x1faa4e5b, 0x37819334, 0x57f52a7f, 0xec7f817f, 0x817fa0a8, 0xe3a8818a, 0x16beacdb,
+0x2ad2b97f, 0xb981245b, 0x359a8181, 0x7f97265e, 0x9de4387f, 0xed7f058a, 0x0e817f81, 0x4e69bdb5,
+0xc5e38552, 0x77482e81, 0x6c817542, 0x447f2990, 0x07b1f9dc, 0xc4247f82, 0x077f7f81, 0xe58bdca6,
+0x55e07f74, 0x0e7fe47f, 0x04883e81, 0x4981f87f, 0x7d7fce29, 0xe07f7d7f, 0xee817fa6, 0x7262da81,
+0xe158813b, 0xe35e41b9, 0xf17f6d65, 0xef7560ea, 0xae408493, 0x97812300, 0xf29f816e, 0x557fd5aa,
+0x328bb778, 0xa27f7f25, 0xde7fb5cb, 0x03577f0b, 0x577f18a5, 0xb9816d20, 0xe28d2a7f, 0x248c7fb9,
+0xff5fb965, 0x93b27f9c, 0x2d817581, 0x41958195, 0x3a7ff044, 0x7f7f6faa, 0xdb81b784, 0x5081603d,
+0x363ff38d, 0xc747d8b3, 0x537f812b, 0x8176a97f, 0x1d8181d3, 0x81811929, 0xe893d4c7, 0x3c7fcf7f,
+0xf6385c81, 0xaa63b42e, 0x81c3a781, 0x488142a7, 0xcbef89be, 0x3f748159, 0x50c52932, 0x298198d7,
+0x14c68e7f, 0x7fd8a798, 0x085a632f, 0x7f1d7f7f, 0xb2e34289, 0x88c3b262, 0xb044817b, 0xbf7f238a,
+0x5ae6b554, 0x9e833081, 0x607f985d, 0x0203226c, 0x333f63fb, 0xc77f8181, 0xe8297f7f, 0x9dea7f33,
+0x6f2e1f54, 0x2e41327f, 0xff0f7f81, 0x7ffa7f30, 0x343f74a6, 0x81f59f81, 0xaac67dbb, 0x6f81da1f,
+0x79ec8153, 0x78eae674, 0xec7f6e71, 0xa46a48bf, 0xe5810efa, 0x0f41b9a7, 0x3a1124a5, 0x5ff97f7f,
+0x0bd5be75, 0x7f8181f0, 0x242f409e, 0xf47ac606, 0x1e96d94f, 0x43a66a81, 0x18fc815c, 0xa51a7f7f,
+0x1185d27f, 0xa00c810f, 0x078ae481, 0x71d7866c, 0xd47ff796, 0x813cf240, 0x45814d48, 0xbc6c3618,
+0xa8b937ea, 0x838e7f7d, 0x19b3817f, 0x7f73be95, 0x00fa46b3, 0x4c43655a, 0x20f67fc2, 0x98fdda7f,
+0xe879817f, 0x4c9e7f2d, 0xcd7fefc2, 0x9681a909, 0x1e59e7b3, 0x35818136, 0x9e3a977f, 0xaf7f207e,
+0x9633227f, 0x44450e81, 0xc2c25f7f, 0x7f1a3f81, 0x08acf783, 0xd083a838, 0xd2627f59, 0x6d327f32,
+0x362c2b5e, 0xee8e507f, 0x48b431c5, 0x00816381, 0x1e6c1150, 0x7df48149, 0x36f94f81, 0xc5667844,
+0xf7cbf52c, 0x520081b8, 0xc6967f7f, 0xc88981b8, 0x328181ee, 0x697fa9f2, 0xc99a7f81, 0xb5a08181,
+0x1d74dc89, 0x643ab5cb, 0xeb81f9be, 0x987f277f, 0xad38747f, 0x61778c52, 0x59817de7, 0x05ae8144,
+0x217aa1de, 0xc0817fe9, 0xa1a6b4dc, 0xece08189, 0x4118207f, 0x9a8181a8, 0xcf0f81c9, 0xba81d081,
+0x138181b8, 0xd537957f, 0xfc7f9b8e, 0x7fc9607f, 0x16c085fe, 0x81fa7fb6, 0xf7847f66, 0xc20ee0fd,
+0xf983ec97, 0xce209f7f, 0xe2815922, 0x39b07aa8, 0xd3a82353, 0x6ee38287, 0xc60d7f29, 0xcf478f57,
+0x594d731b, 0x0e8c42fc, 0x6a702ce3, 0x7b81b301, 0x3181a450, 0x7fe94481, 0xe581b66f, 0xd87f7f2a,
+0xc1b385ee, 0x81816964, 0xbb7f5093, 0x8151817f, 0x51e3547f, 0xb3fa7549, 0xca81b044, 0x258143bf,
+0xb18181c8, 0x7f54eb08, 0xd4c9db57, 0x68a981b2, 0x4ef17f69, 0x917f977f, 0xda38865e, 0xe97f3e5f,
+0x0bbc63dc, 0xec34daaf, 0x1dc38186, 0xb381810e, 0xcbacafd2, 0xdb3ec56a, 0x22817381, 0xbc44cc4d,
+0xd681d0b0, 0xe614c8a9, 0x53577f7f, 0xdabc3012, 0x353ecdc2, 0x81728179, 0xb660cf7f, 0xef49ca49,
+0xc645dd7f, 0xd16a7f81, 0x1fb99d7f, 0x7d36d57b, 0x04ce7f46, 0x602adede, 0x1142cbb1, 0x60de427f,
+0x29814ccb, 0xe9ad7d99, 0x127b2881, 0x5c5aa08f, 0x38c0db9f, 0x7f76d981, 0x63b32381, 0xdd41ce81,
+0xce14e57f, 0x74ed7398, 0xdd81e6eb, 0xb6f2d581, 0xa581b77f, 0x468c81d7, 0x5f2d88b8, 0xf481fd21,
+0x6ec1e381, 0x71568485, 0xcbfa7f6c, 0xa02e0c9b, 0xd9a09a7f, 0x813d8176, 0xcf43c52b, 0x31a8bca1,
+0x2432dce5, 0x78277f94, 0x474e592e, 0xdefb7f81, 0xff147f51, 0xbb7f3858, 0x25f1e591, 0x36fad833,
+0x0ac87f39, 0x815d9781, 0xaebaaa81, 0x4981815f, 0xd7817781, 0xb44838b5, 0x32626981, 0x417fe2f6,
+0xdf638c92, 0x8624067f, 0x4432e69f, 0xd8688a7f, 0x06815215, 0x25bbbb7f, 0x5b5b6f63, 0x417b5b81,
+0xcc4a7b7f, 0x6ea47fd3, 0xe866b481, 0xc630c9aa, 0x083781ed, 0x69de817f, 0x0381f9f2, 0x84812ce2,
+0x027f4dd9, 0x7f5a9e81, 0x00ea319c, 0x222b9a9b, 0x34a143f0, 0x7fefd1d0, 0xb4947981, 0x11474bc1,
+0xcd44df7f, 0xa2878439, 0xd6f040b9, 0x6f86817f, 0xc67f7f81, 0x7f693443, 0x8d21b1ce, 0xdab1aed5,
+0x3f17ae71, 0x45b7ba7f, 0x05229255, 0xb181d8f5, 0x688681e3, 0xea81e181, 0xf57f817f, 0x7fdc5881,
+0x6e817f7f, 0x817f537a, 0x3987247f, 0x300a7f05, 0x3128057f, 0xe731b77f, 0x4881b50b, 0xb86dae65,
+0x00c015a0, 0x81397552, 0xb9bcab24, 0xf5818181, 0x57d18190, 0x81c3648d, 0xed813a67, 0x4c5d3a06,
+0x3581a4e3, 0x64db6467, 0x21af7d27, 0x5adf7f57, 0x66d9c99f, 0x5b183375, 0xbf4c41e5, 0x577f7ff0,
+0x4e818181, 0xf295493c, 0xd2c28170, 0x5c3a7f8e, 0x09c18181, 0x7f7f7f9c, 0xae675fa9, 0xf6d781a4,
+0x66ba10dc, 0x7f882981, 0xfe7f7f7f, 0x7f7ffea4, 0x2798d845, 0x899cb6fb, 0x4982bcd9, 0xc0815ab0,
+0x558181aa, 0x52110702, 0xd179e3c5, 0x482923bd, 0x187f8181, 0x81b3f6d9, 0xfd96a581, 0x2c5d1a81,
+0x6fde23dd, 0x9e815063, 0xfba49bcc, 0xfa775f4d, 0xb1b8b681, 0x81894f1b, 0x3c8163fa, 0x2d7f3d81,
+0xc57f6681, 0xbd7f7e81, 0x137f7f81, 0x6b3d6f81, 0xe5e2610d, 0xb4683186, 0x7f6e817f, 0x90c12abc,
+0x13a3502a, 0xdddeaa8c, 0x07a67f79, 0x1a5481ce, 0xb87fb01b, 0x0a4b1e7f, 0xd77ffcb7, 0x03535781,
+0x5a5f697f, 0x02ca7966, 0x379638bb, 0xc507c941, 0x5e7fdf8e, 0x7ce0c99b, 0xa162ccfd, 0xff8196cc,
+0xd43aa081, 0x67798185, 0xd6277f2e, 0x7f6dcb7f, 0x217fcd76, 0x2c88fb91, 0x06815b8f, 0xe37f7dae,
+0xe937257f, 0x648181ae, 0x21652c4f, 0xbf81685b, 0x36abe921, 0x3cb5de30, 0xa88e8162, 0xd87f812b,
+0xb981d14d, 0x75c181a7, 0xf7c08aa1, 0x57904e7f, 0xf6f688d8, 0x7fcf6281, 0xd33dbf7f, 0xc07f9a73,
+0x197f8135, 0xcabf8bc6, 0x3f81c37f, 0x487fac4f, 0x8ddc924b, 0x50257f7a, 0xf75e1496, 0x28d2d48f,
+0xcd91817f, 0x2a7f81b2, 0xbd7fb67f, 0x3b877fe1, 0x407fd8fd, 0xbc815f20, 0x7f50e581, 0x8bdfae4d,
+0xb1eb7f81, 0x07819f40, 0x3765274c, 0xde7fd081, 0xa0817f7f, 0x8dbe5081, 0xfa436f7f, 0x4848463b,
+0xc5c45cdb, 0x7f8b1865, 0x5bf381b6, 0x45aa497f, 0x7f813694, 0x54f15918, 0x72886c49, 0xe3fb31b0,
+0x0db1a582, 0x4aadce1b, 0x39926d8b, 0x9a30a85f, 0x1b606181, 0x1c6d165b, 0xfb057f4b, 0xcd7f81ec,
+0x12bf7ffb, 0x7f5f91d4, 0xca8158b7, 0x86134234, 0xf0f17f3c, 0x7f4e05c4, 0x34814f7f, 0xe5c1106a,
+0x2d50c67f, 0xb57fa18b, 0xb7816481, 0x7fffaadb, 0x607f817f, 0x5048ea81, 0x82495d02, 0x17819b7b,
+0xaba67981, 0xb5817f7f, 0xee57a2b6, 0x3591a602, 0x2885a153, 0x429ba160, 0x757fd0f5, 0x344e9d7f,
+0xc77a7f8d, 0x2a7f8160, 0x3198b27f, 0x81c552bd, 0x0374e058, 0x31605de1, 0x40d0817f, 0xb281a37c,
+0x156ce27f, 0x91893090, 0x06f58181, 0xc4487f6d, 0x0a7f7f9d, 0xc3a17141, 0xc03c39c5, 0xb581b2b5,
+0x526a7f7f, 0x7f7f7f4e, 0x7fec7f7f, 0x95cead7f, 0x161ba8d2, 0x8ad0d081, 0xe4f19557, 0x5bb6fca9,
+0x2d813a6d, 0xddbb7f81, 0x1cb1e281, 0xc2b43734, 0xd07f4641, 0x7f81bb39, 0xae9b1b81, 0xbe817f36,
+0x1caf7f81, 0x7f704922, 0x108f777f, 0x81662d7f, 0x479ca35b, 0x818513c2, 0xa41d8607, 0xd081b235,
+0xf6817fcb, 0x7f015960, 0xadb15f5b, 0x81be2abf, 0xf2b0e29f, 0x81819224, 0x38a7407f, 0xc9817b09,
+0xf307556c, 0x7f818181, 0x0281817f, 0x3fce810c, 0x18b47cd4, 0x81e2a53c, 0xe359840f, 0xe27f81ad,
+0xa1665081, 0x7ae062b6, 0xe0b27f35, 0xcd376e73, 0xd9a6d373, 0x817ffe7f, 0x02810946, 0x06498116,
+0x9b7fbb1b, 0x8d2b812a, 0xff14f781, 0x937de081, 0xb77ff44b, 0x4b2732c7, 0x0720bd96, 0x9e647f13,
+0xb831d474, 0x811d8183, 0xa5db837f, 0x8aa1b04a, 0xd4377cfd, 0x7f814241, 0xee8fec39, 0xcc81d881,
+0x2e29d770, 0x7ff76366, 0x7fbd363b, 0x74bbd13e, 0x2e817192, 0x9f6b81db, 0x44913bb3, 0x327f0de1,
+0x2d7f8881, 0x54f2afaa, 0x107f513c, 0x4c8191d1, 0x117fe67f, 0xffa21764, 0x2c931727, 0xd09ba693,
+0xf74e4882, 0x51ed8bc6, 0x3a23d683, 0x47437f70, 0x7fad8181, 0x9a610813, 0xa57ff47f, 0xbe1b5b9a,
+0xf4bf8c33, 0x43074944, 0xe4ef729b, 0x1e7165d6, 0x74e081c6, 0xe281810c, 0x2a4b81e4, 0x10ede221,
+0xc8375d7f, 0x81c07f81, 0x49cb7f83, 0x5f589c22, 0x276441a9, 0x7fe67f0b, 0x4d7f1d7f, 0x1872814a,
+0x05b6a577, 0x8140fa5d, 0xe47f9bc3, 0x44166064, 0x73896729, 0xb5c0867f, 0x247f445f, 0xccc92994,
+0x29611b37, 0x27531258, 0xeac27f9c, 0xd43f4e7f, 0xc27f170f, 0x6d9bfc37, 0xfc819181, 0x077f598a,
+0xe0c5d398, 0x7f23182f, 0xed171743, 0x20bff85d, 0xb0d0be7f, 0xb8938149, 0x45017f32, 0xcf2cc65d,
+0xdf7fc481, 0x7f380777, 0x81b8f0dc, 0xf6696596, 0xff007f29, 0x896e1abf, 0xfaac2d81, 0x5bc357ba,
+0x6a397f2e, 0xbfaeb45a, 0xd64e2b7f, 0xe49b4587, 0x565c6c51, 0x7f206a7f, 0xc381540b, 0xfc3d7f73,
+0xd4ec9f39, 0x887b7f27, 0xe3c1e07f, 0xb721523e, 0x0310819f, 0xc0c793ce, 0x387da141, 0x814f61ac,
+0xd1347fbe, 0x19244c1f, 0x8d3d58f9, 0x813a7fc9, 0xfae27f54, 0x7f39f481, 0xe37f818d, 0x034632d9,
+0x5b06c07f, 0x65ab8b36, 0x127f34b5, 0x18a681ad, 0x73510e20, 0x2f2ffa85, 0x6814ca9e, 0x5b7f7ead,
+0x08da93da, 0xc826da54, 0xc1686fd2, 0xb6f00fbc, 0x28bb5dbb, 0x7fe5297a, 0xb5ed8181, 0xccd9067f,
+0x177f1561, 0x71b41a19, 0xd9817f7f, 0x505b50c5, 0x309dc978, 0xb97f4f81, 0x4e81c219, 0x2d81d04a,
+0xdf3d5fa1, 0xfc6e5c30, 0xf7869481, 0xfc81e47f, 0x67b5817f, 0x7f72577f, 0x2c938136, 0xd576ad7f,
+0x63beb36f, 0x7fb3817f, 0xce56ca81, 0xd801e58d, 0x425f2522, 0x8a25817a, 0xdc7673c9, 0xd7b57fca,
+0x927fbe2a, 0x149b9881, 0x9ad781a9, 0x214b9d4d, 0x2dae1287, 0xfa772ca4, 0x3b7fdd35, 0xf59e4174,
+0xad3a28f5, 0x8144698d, 0x1ca792df, 0xdb7c7fd1, 0xe96d497f, 0x567fa7d9, 0x489b70cb, 0x3442de7f,
+0xa2554c37, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x4dc76c95, 0x29b78ec8, 0x3d927f7f, 0x307f93c2, 0x8181da7f, 0x99f741df, 0x6e1d417f, 0x1bbda834,
+0x954b5a9f, 0xc6817f07, 0x7fc01005, 0x3b5a81e4, 0x73cd7f5b, 0x7f817f3c, 0x8e7f812b, 0x59d48a03,
+0x982475ef, 0x65a1c068, 0x400cc79b, 0x81f231fe, 0x840329e0, 0x1f6054eb, 0xf8813d3a, 0x7f4a9296,
+0xbff677c5, 0x7f3c6f0c, 0x7f2890cd, 0xe0aaaad6, 0x81ae9428, 0x7f8117f1, 0x7f097f88, 0x7f27cfcc,
+0xca67205e, 0x815f7f37, 0x7f0ac07f, 0x717f95d2, 0x7f9232a7, 0x2bb803d4, 0x3c12c7e7, 0x937c81ea,
+0x7f1cdd3b, 0x0622b6c0, 0x62d71247, 0x811cb00b, 0xaef9e981, 0x0f818ff5, 0x740e6f53, 0x30616410,
+0x81fed3e0, 0x49167f3a, 0x4577315b, 0xe49130fa, 0x817c7f23, 0x7ffef7ec, 0x4fb47f17, 0xb365b1f8,
+0x81a09c38, 0x7f818134, 0x9568e717, 0x4ead1921, 0x8ebd9806, 0x7f911cf8, 0x8c7f5efc, 0xf29114a8,
+0x90bc6ae9, 0x0b5f26c1, 0x81bc9281, 0xf0fb5532, 0xc89ab77f, 0x7ec0b9d4, 0xf00bdcf7, 0x7f7f7a24,
+0x097ff51e, 0x64d62dfc, 0x7f7f819e, 0x34c05cc3, 0xa2957f19, 0x81101e05, 0x7fd68181, 0x1dff81af,
+0x7f81dc24, 0x7b3c6769, 0x7ac490dc, 0x814032c5, 0x9d81747f, 0x7fb46342, 0x72607faa, 0xd40c8161,
+0xa5ab574f, 0xf0838bba, 0x33fd7f47, 0x289f81be, 0x817f480f, 0x4bfa552d, 0x8ca1d5d5, 0x817f492b,
+0x418125e0, 0xe45d5af0, 0x811b1751, 0xe04d81f8, 0x7ff2917f, 0x65811a53, 0xe65da37f, 0xbafdd439,
+0x53818189, 0x752c7ebb, 0xc301567f, 0xb0816f27, 0x708166db, 0xc0b57f49, 0x3a818169, 0x4f16189c,
+0x13d6e83b, 0xd258a20d, 0x7fadbdb2, 0x7f93560d, 0x81fd5399, 0x6014dab2, 0x9cf45f1b, 0x107f71d5,
+0x352e8109, 0x5a5d6efa, 0xb458ef47, 0xb4a96098, 0x7ab91c37, 0x81bd762e, 0x816cb0d7, 0x3f9819d4,
+0x8113b97f, 0x7f09fa33, 0x9e7ffa99, 0xe98181ce, 0x4ffb3064, 0x7f0681ea, 0x81ef06ad, 0x385abdf5,
+0x1414457f, 0x18dabc5a, 0xc27e0d7f, 0xd87f8115, 0x703f7fcb, 0x300b7ff1, 0xb081f55d, 0x024d78f8,
+0x72949a16, 0x90603c38, 0x7fba6fd4, 0x4820e44f, 0xd57f81af, 0x9c7f7ef9, 0x61c6b27f, 0x81b6141f,
+0x7c52514a, 0x5f817f78, 0x668119a7, 0xe441aa7f, 0x53819553, 0x7f721521, 0xbd7f7f17, 0x593081de,
+0x8b252de8, 0x7f7f81c1, 0xaca9436e, 0xb5e0b104, 0x81e49d81, 0x75b48102, 0x5c278118, 0x9f6c36c9,
+0x509481fa, 0x53d1e640, 0x52108181, 0xeb7f7fff, 0xde417381, 0xa781cb33, 0x4fab55f4, 0x3e7d75f1,
+0xf17f0c1a, 0xe8e1817f, 0xcf1f7f81, 0x96d8ba40, 0x1318fc72, 0xd7ae3d1d, 0xf7e88126, 0x32bfe432,
+0x453f694b, 0x7f7fbab6, 0xeae7de30, 0x3d81d7d1, 0x438d9cc7, 0x387f4a05, 0x7f46ab81, 0x635bdf10,
+0xc281ae24, 0xc5c67fc4, 0x81acb53d, 0xba4f4d04, 0x424f72cc, 0xbf7faa1f, 0x55de1de8, 0x81429a1d,
+0xdec94147, 0xff007f42, 0x3fa74821, 0x0cb74e9c, 0xff81627e, 0x6343810f, 0xf7393529, 0x7f44ad81,
+0x7f811f39, 0x81e981a0, 0x8c8c4421, 0xc4819190, 0x81816881, 0x7f7fd234, 0x3688c16c, 0x9c34b357,
+0x6046e3f6, 0x86123bdc, 0xbd45957f, 0xe6cd777f, 0xe0951ce7, 0x9b7f63a6, 0x817f29c1, 0x7f5a81ca,
+0xde7ffcbe, 0x812c3207, 0xa099537b, 0x2a2f81d0, 0x16817f9a, 0xbb81817c, 0x7f81e538, 0x957a4200,
+0x81753bab, 0xaf63a506, 0x3081691c, 0xb0a47f2c, 0x0f157feb, 0x8e4330fe, 0x635b8189, 0x7f81ee34,
+0x7f40caae, 0xbce88e05, 0xabb52199, 0xd2ba102f, 0x7f7f8173, 0x810b32ab, 0x2e222781, 0x7f819951,
+0xfbc73dca, 0x5d81d181, 0x7fb09d81, 0x7faa3098, 0x7e81435b, 0x61e995d6, 0x7f7974f3, 0x7fef81f9,
+0x402faba7, 0x7a647f3c, 0x815c6826, 0xba2b2ffe, 0x7f095b7f, 0x7f9236b9, 0x9953636b, 0x344c64d2,
+0xd784acc0, 0x7f818426, 0x61ab43f8, 0xd5813c70, 0x7f817f81, 0xc9867fa8, 0x6f4b4181, 0x3881b1df,
+0x69813f27, 0x81d0780c, 0x17c84a36, 0xbf908ccc, 0x7f5e812b, 0x317f7fea, 0x7f817881, 0xf731c537,
+0x7fb221e7, 0xbae09bf7, 0xcc5d947f, 0xcfc13926, 0x6e0db67f, 0xdf17be28, 0xcaa4ea2f, 0x9870810f,
+0x357f701d, 0xa281815d, 0x91497468, 0x5fbcbeb0, 0x7fd54d81, 0x7d5e91f3, 0x8d6555f9, 0x35d416e3,
+0x9a7f8c11, 0xd875779b, 0x6b7c6681, 0x24f77f50, 0x288bd781, 0x7fad2b50, 0x637f1a23, 0x527f2ea7,
+0x277f7fc3, 0x819c13cb, 0xe1bb65f1, 0x48151c1b, 0x8e7fd27f, 0x7f7065ac, 0xfe0bb14d, 0x588e7f0c,
+0x819d7f34, 0x81c455a5, 0x4f6b7fae, 0x7fb56506, 0x7fcfd5a2, 0x3e6981dd, 0xcc814569, 0xc5fb811a,
+0xc0e07fbc, 0x817f8153, 0xac356315, 0xc94db6d8, 0x7f88e57f, 0x81e57fd0, 0x7f1f6c0f, 0x78d581e1,
+0x7f2137f6, 0x1c57815a, 0x627f55c0, 0x69e4dbcb, 0xa68181c3, 0xb9907fea, 0x55897fa0, 0x7f54a91d,
+0x3e7bb314, 0x7f167f5f, 0x75cc8181, 0x177f2fd0, 0x6d39f7e9, 0x7f817fd3, 0x7f6eb0ea, 0x817fc7ad,
+0xae5b5b04, 0x86cd281f, 0x3528fe7f, 0x87217fff, 0x87860479, 0x8b7f4510, 0x7faa816c, 0xb9d62c33,
+0xdd1c6ccc, 0xb48104fd, 0xbf508f81, 0x34994944, 0x8181b843, 0x74e47f47, 0xc6baf38e, 0x624b06cc,
+0xfdadfaeb, 0x948189ac, 0x7f7f992d, 0x813edf00, 0x81c3587f, 0x5eb2819f, 0xcee54cef, 0x277f38d9,
+0x8a187fb6, 0x9736ed05, 0xed70487f, 0x4f307fa9, 0x81ba975e, 0x81dd81ed, 0xd0da7f88, 0xf24881cc,
+0x12e656d0, 0x7f9b6a1f, 0xcf7fcf7f, 0x78787ebd, 0x7f8319f2, 0x7c7fe9ed, 0x29b381ae, 0xfa4f81cb,
+0x13cc81e2, 0x9a2aa0a8, 0x817f8112, 0xfda22318, 0x7f65e85a, 0x727f6812, 0x7f814b0b, 0xcfe2bcd5,
+0x6b816ce3, 0xb2965ab7, 0x819981bf, 0x73799635, 0x82854b71, 0xbc3e8132, 0x91c714a2, 0x81817f2e,
+0x2cc55af1, 0xb87fb038, 0x7f7f1718, 0x02ba14fe, 0x5e348128, 0x817ff97f, 0x81d03daa, 0xde37813b,
+0x443781c0, 0x0e4e32e2, 0x85a7ad81, 0xba276c16, 0xb61ed9c6, 0x81ca27d3, 0x7f762e7f, 0x4181d581,
+0x7f7f5508, 0x6eb1b250, 0x5ad47f4a, 0x7fd56756, 0xbc817f81, 0xd481a9ba, 0xf8a74db3, 0x7d1aae25,
+0x81397f36, 0x7f62a402, 0xab3b3081, 0x4c45c301, 0x3eb57f88, 0x6d2e7fdd, 0xeb1ac681, 0x813881c0,
+0x7fd127b4, 0x67e57f90, 0x4fcb6d3a, 0xc566afe7, 0xc58bdb7f, 0xe0486a7a, 0x363f8181, 0x7fdd8191,
+0x67149cc2, 0xfe7fcf00, 0x7feeb9dc, 0xccabbd95, 0x7fab347f, 0x98eda0d8, 0x7faf81a3, 0x3b8c7f0a,
+0x1c387fa7, 0x7f055ed9, 0x567fbd7f, 0x4d7f7244, 0xbb8175af, 0xef7f7f5f, 0xa9a58119, 0xada9eec7,
+0x592da3bd, 0xb5a98177, 0xe17f813e, 0x94816af7, 0x69ec7fc5, 0x7f29dd4a, 0x9481c981, 0x5d3198da,
+0xf84e81bb, 0x41817f7c, 0x7fb31b60, 0x7f7f5e03, 0xea81c490, 0x7681571c, 0x9b54b581, 0x812d993b,
+0x7fefd381, 0x7f907fe3, 0x7f81a862, 0x758181bc, 0x46d6507f, 0xae81a50a, 0xd1562a1d, 0x3dcb58d2,
+0x489e40fd, 0x7f8fa98a, 0xb07f7f81, 0x238b7b40, 0x49686384, 0xca817f4f, 0x607f7fd7, 0x3a0d81ab,
+0xb0fa42e3, 0x7f81563d, 0x829a817f, 0x51d48167, 0xcc817781, 0xd54f5638, 0x7fee1218, 0x81a49e22,
+0x0081ec2f, 0x5f5eecf5, 0x6bbc9ce7, 0xa28138d6, 0x86811081, 0x7f0283cf, 0xfff67fb7, 0x81c2814a,
+0xc7817f52, 0x85818146, 0x8781a77f, 0x8181e9f6, 0x4731f881, 0xf51a35d8, 0x236931b2, 0x6eaefadb,
+0x7f81c61c, 0x947f3d7f, 0xbcf43b52, 0xd5814849, 0x17b6e07f, 0x7f6a81b6, 0x817f8163, 0x817f595f,
+0x7fa5ae33, 0x2b63a90a, 0x77627e81, 0x81816660, 0xae7f6281, 0x26c6dedd, 0x757ff9c4, 0x7f7fce30,
+0xab9d7800, 0x68fc444b, 0x3d7ff681, 0x93264a1d, 0x81361db5, 0x817fb6d3, 0x1681136f, 0xc5655027,
+0x792781e2, 0xeaada9c7, 0x34d1817f, 0x81687fc6, 0xa88d451a, 0xdce97f0d, 0x443935cd, 0x810a68b9,
+0x73ef4023, 0x66372979, 0x9a31076e, 0x81813dee, 0x56b11a7f, 0xc76a81da, 0xe0e48181, 0x6681b8b7,
+0xa99858ec, 0x7fb38de5, 0x7f6f817f, 0x92168381, 0x53819b02, 0xc3c27f2d, 0x5299d77f, 0x81e47f60,
+0x687ffbc6, 0x7f5f9df9, 0x0ab4c09d, 0x44856927, 0x5e63405f, 0x7f9fe864, 0x8c884d25, 0x794dd01f,
+0x7f81fb68, 0x31bed5f5, 0x3e658638, 0x3e7f8151, 0x7f7ba8e4, 0x7f304a8b, 0x37c18157, 0x7fb2c6fe,
+0x7fa39bef, 0xa37ff9a1, 0x29983fc2, 0x777facc9, 0x547ff37b, 0xe6815b17, 0xec5a457f, 0xacade303,
+0x7f2603dc, 0x697fcd23, 0x7f7fd042, 0x4a81a54d, 0xd481bf87, 0x7f4e7f0c, 0x0d5c9d05, 0x26ca3c2c,
+0x4f817fe3, 0xc9368b0f, 0xaddb8181, 0xc210e075, 0x248f7f81, 0x697f7f11, 0x817f2f81, 0xef697ff1,
+0xd98181b9, 0x810a4f50, 0xbb9e045a, 0x8ea3771d, 0x787f6931, 0xfbb9812d, 0xb778f981, 0x7f354b27,
+0xfc813658, 0x25818137, 0x7f3402a1, 0x27eabd01, 0x81c18181, 0xfd7fa22c, 0x9784817f, 0x7fa57fb3,
+0xd881f3ce, 0x887f60bf, 0xdc816b0c, 0x3890b818, 0xc37fa281, 0x8f4681f5, 0xb07fb08b, 0x82ad24d0,
+0xc189ab02, 0xe3fddb8f, 0x7f744610, 0x435781b0, 0x8104f653, 0x41bb81be, 0x9a4c717f, 0xf9d4c1e4,
+0xbdc36404, 0x8ff9818a, 0x54d3ab9c, 0x24aec8ef, 0x81ac5bc8, 0x81908100, 0x74f05a0f, 0x0c5b7f2d,
+0x5279a3e3, 0x2d8169b9, 0x7fdc4897, 0x7f5f89bb, 0x9f457581, 0xde817fd7, 0x59813d7f, 0x67817fe7,
+0x7f817fd5, 0x7f5978ca, 0x23ab813a, 0x70819ce8, 0x41575433, 0xb17fa99e, 0x697f7f7f, 0x60aeb15a,
+0x7fa5a9df, 0x39818181, 0x818ea205, 0xbd819fb2, 0x7f0fa491, 0x814981d8, 0xb9c186e4, 0x817f29e5,
+0x81afcd17, 0x842f81ce, 0x058b8e1d, 0xea3da29e, 0x379f5ab7, 0x223c84c9, 0x93bcd8bc, 0xc895baea,
+0x6b7d3cc9, 0x7f2b8152, 0x7feccb81, 0x7f71634c, 0x810f7f29, 0x89c14aff, 0xc0ce05d7, 0x7aed49e1,
+0x817f4d0c, 0xeeaa0afb, 0xd7817f81, 0x6681d76a, 0xa37f4881, 0x38708103, 0x7f8181cb, 0x0a8f6639,
+0x8581aec3, 0xe82836d2, 0xda7f4781, 0x40d67ff3, 0xcc335f7f, 0xce207f44, 0xff9181f8, 0xbae52614,
+0xfccdf700, 0x9dc19f54, 0x5d7faefc, 0x5bcc2357, 0x817f7fb1, 0x6f5a9dd7, 0x81af5633, 0x810a7f59,
+0x551c8a18, 0x4b816452, 0xe981d940, 0x81d175f6, 0xee7f5746, 0x276f51b7, 0x16ae817e, 0x652063ba,
+0xd84e5e44, 0x7f385f2a, 0xa2e1b28f, 0x7f817fd2, 0xd8477f34, 0x508f31fc, 0x7e357fc7, 0x7fa07f7f,
+0x7f6d7f52, 0x7f717038, 0x7f819c3a, 0x2f973937, 0x71ac7d7f, 0x790950c2, 0x9aa97f81, 0x7f427ffe,
+0xf3f974a1, 0x8107812c, 0x817fb77f, 0x4d458151, 0xc93d2395, 0xb497d049, 0x4b955c87, 0x047eaef1,
+0xcc6c07cc, 0x5eff94ad, 0x3bed1246, 0x74d8c531, 0x815afc81, 0xaccf6eca, 0xbe4e3f64, 0x079b37b9,
+0xa0898136, 0xfe8a81b9, 0xb7df45fc, 0x7f7f7157, 0x725581a3, 0x7f0c56ec, 0xe6430781, 0x4356def3,
+0x54ca7fc7, 0x234f3eee, 0x7f270367, 0x704981c1, 0xfe623836, 0x816d2725, 0x7b9d9e81, 0x3b49d3ae,
+0xd2925440, 0x819a7fa5, 0xb025c9a5, 0xbbd77fc1, 0x81737f81, 0xbcb891ae, 0x442ab466, 0x812e41cd,
+0xbdb081a8, 0x8e7f5c2d, 0x8458dfb7, 0xd77f8130, 0x9bcbbc47, 0x783a7f5f, 0x27c4d502, 0xadf8cb31,
+0xa51e9baa, 0x814d812f, 0x7f1a7f8c, 0x1874f4be, 0x4ac57f81, 0xc7b67f37, 0xc0817ff9, 0x8c81e1ae,
+0x6f7f9331, 0xa17f7f9f, 0x7bec5242, 0x81b79554, 0xa27f7f7f, 0x181da812, 0x52e681f5, 0x5c3781fb,
+0x895981ae, 0x7f7fa003, 0x9555817f, 0x32391fdf, 0x0c7fff6a, 0xb681981d, 0x1b93a683, 0x7fc3d495,
+0x83f5810b, 0x854399a0, 0x997fd57f, 0xe87f6929, 0x7fbafb81, 0x7fe201e3, 0x7f7234b3, 0xb6889ebb,
+0x8ea7cdcf, 0xd27fab96, 0x9a562c7f, 0x6c7f7fd8, 0x2277467f, 0xb3108ed9, 0x818d0171, 0x81bc477f,
+0x650719f9, 0xba7fe501, 0x7b5796ca, 0x3fc581eb, 0x9a90469b, 0x7f041112, 0x527f7947, 0x8d3d4657,
+0x7f627fdd, 0x64053a56, 0x817739c6, 0x812268ee, 0x69ed9b7f, 0x3d7f7f1a, 0x7f2264f4, 0xb76c55f2,
+0xd06981fa, 0xa72359d6, 0x3708812f, 0x7f7a4c29, 0x467a227f, 0x685eb4c5, 0xeeeb81c1, 0x2999b5e3,
+0x7f7fa0be, 0x73586e92, 0xc7817f0a, 0xc32e7f4f, 0x819b9046, 0x8179acee, 0x29b1a517, 0x557f0ef9,
+0x8116589d, 0x557f26f3, 0x861970dd, 0xcb817fef, 0x9bb6ba72, 0xaee4ddfa, 0xd17f81c9, 0x257f76bf,
+0xb04b4df4, 0x8f7f8124, 0x818181fc, 0x4fcc8f06, 0x2f7cc181, 0x4900141a, 0xf0709c16, 0x6865b5fd,
+0xd9ab9128, 0xa2321900, 0xa1a6de7f, 0x58e47f23, 0x54ac4a45, 0x1af1e618, 0xed633c24, 0xde7f7ff7,
+0xd48139b1, 0x5a640dcf, 0x63d9de7f, 0x81669358, 0x4f4571bb, 0x7f7fa692, 0x7debaf85, 0x9d7f7f10,
+0xd9c34e9a, 0x708981e8, 0xfc838ac6, 0x430bf6b2, 0x81a1a4ee, 0x7f8181ad, 0x35e98d7b, 0x4e3681de,
+0x7f7256e9, 0x7fa3b47f, 0x5a813e7f, 0x8128aa15, 0xff42357c, 0x581e98a3, 0x81534281, 0xe58196fb,
+0x11b90c6b, 0x7f81e5e5, 0x7473f57f, 0xa9475b2c, 0x24d77fa7, 0xa27f591e, 0x7fba81a5, 0xc9f19310,
+0x3c64d05f, 0x86aca9f0, 0x69b57f01, 0xbb747fb6, 0xdd81e923, 0xbe814a26, 0xe6811b22, 0x7f817fdf,
+0x7f81e46a, 0x647f8170, 0xf2812d9b, 0x7f765fa5, 0x812dfe0e, 0x2584ed72, 0x811a7f7f, 0x417f7fee,
+0x81d28138, 0x7f8e7fff, 0x9c7e28d0, 0xa71eb5f5, 0x65993e7c, 0x927ff0e8, 0x7f5cd4ce, 0x2886b35e,
+0x7f81d3bf, 0xc2d1441e, 0x2f2cbadd, 0x8c9c4cb4, 0x34dba16b, 0xaa37c5c8, 0x5881ec6f, 0xfc7f64a4,
+0x81bb8128, 0x7395489a, 0x7f647f7f, 0x44ef7f9a, 0xc3b4d6a1, 0x8693af02, 0x87fab099, 0x7f7f2f05,
+0x0191d5f8, 0x0b4281cb, 0x615b990b, 0x554b7f4c, 0x7f7f66a2, 0x817f793b, 0x5681aa97, 0x21aea0bc,
+0x81925b0b, 0x81c69839, 0xb115814b, 0x9e81d8b0, 0xc70c6f7f, 0x8f14de16, 0x3f81b4bc, 0xaa0c25dd,
+0x81f8346b, 0x6981226b, 0x81b1817f, 0x7f2081e4, 0xa2848117, 0xe4cbcc32, 0x817081d5, 0x817dbf75,
+0x157f2614, 0x5a0477d6, 0x8b818105, 0x9e7f34d4, 0x48da3235, 0x397ff6d2, 0xa02f7092, 0x817f8b13,
+0x307b35ca, 0x783c7fe6, 0xa2c29081, 0x555b8101, 0xf764ee86, 0x463a8149, 0x577f837e, 0xb1948151,
+0xbd2f86f5, 0x6e5fc338, 0x7f67a373, 0x55817003, 0xa1e17f77, 0x8a817fff, 0x49365e44, 0x817f8933,
+0x209ba8a9, 0x7f311515, 0x7f519140, 0xa9549352, 0xbd817f8a, 0xafc4a005, 0x620d407f, 0x88302fd2,
+0x5cf481f2, 0xbd7fff40, 0x7f7f9cfb, 0x91ae81fc, 0x73f15c81, 0x93814581, 0xc7e8818c, 0xc481a1fa,
+0x99818124, 0x7fecdbf3, 0x207d8d39, 0x64818328, 0x7f9dbf5d, 0x2181f9c2, 0x817f7f8c, 0x367c1801,
+0x2a019011, 0x815f5d39, 0x81817fb8, 0x7f22815e, 0xb3ceca8f, 0xa6560501, 0xfb548181, 0x2b7ffe18,
+0x5394bb1c, 0x7f9981d9, 0x7f7fce32, 0x815ba6e4, 0xabd0d732, 0x7f5c81fa, 0x7f369f77, 0xa42b7f51,
+0x900db9cd, 0x9993bde1, 0x7fc8819a, 0x116f9310, 0x8ef767b0, 0x7a817f02, 0x56819b81, 0x29aee511,
+0x7f2b8d3a, 0x818a442a, 0x555c3ff5, 0x1d6c4929, 0x7ea9697f, 0x780b8fa6, 0x097fa426, 0x9b7f89d9,
+0x165081de, 0x45a184c0, 0x405c7fc7, 0x6bbcd6d3, 0x7b81837f, 0x33867f12, 0x878f7f62, 0xc6cf1002,
+0xad819f7f, 0x3df9422a, 0x77ca9e81, 0x684f7fca, 0x30847f81, 0x81817fda, 0xedc6a081, 0x682c00eb,
+0xca767fdd, 0x1f7fb0e3, 0x377f8a7f, 0x5e85d200, 0xb9237f1f, 0xf2817f2d, 0x0dd88132, 0x81cc0037,
+0x739b39bb, 0xb47fb83c, 0xdf7f8181, 0x7f54f610, 0xbc9e3f06, 0x5762b272, 0x81ebc628, 0x7f9845b9,
+0x7d901f63, 0x81ce7f36, 0x0ebd8e7d, 0x9a81d963, 0x644c3f81, 0x227fbdeb, 0x8e2d5f81, 0x54efc59a,
+0x94ed5511, 0x817f944b, 0x38393d81, 0x592f7ad2, 0x75814881, 0x63275d7c, 0x4a65de10, 0xeb818ff6,
+0x70f93fc1, 0xbbe83610, 0x7d94beba, 0xfd077f2f, 0x7f818181, 0xbb7f47d1, 0x4c8f7f6d, 0x587fa2fb,
+0xbd817f02, 0xae7f8123, 0x4fb981ac, 0x3fd2c446, 0x817f7fa1, 0xb28d81ce, 0x7f813fbf, 0x5a8181a8,
+0x2f6355c9, 0x75624fb5, 0x811842d0, 0x70a38146, 0xea52aef5, 0x7fffa3f0, 0x8f7fdcb0, 0xe31d81e5,
+0x7fe15a51, 0x21894817, 0x9c810f41, 0xe9428145, 0x7f819edc, 0x7feb1761, 0x81368181, 0x2c71f15e,
+0x26813c54, 0xf09c69aa, 0xd17751da, 0x25a1c23c, 0x7f812281, 0x4317b30f, 0x66908149, 0x81869b95,
+0x062874c1, 0x843481a4, 0x81743c89, 0x813e9cae, 0xd8397fd1, 0x7b495f55, 0x7f7f7f53, 0xc0b419e7,
+0xacf11d1b, 0x90a51003, 0x9efa5bb6, 0x77b8989f, 0x8b1ea17f, 0xc46537c3, 0xcca93d93, 0xa955d1db,
+0xc42743ed, 0x7f8a6442, 0x79ab8152, 0xfea48ff7, 0xfbbb819c, 0xa1be81ed, 0xa481da42, 0x477f7fd0,
+0xcc1acdfb, 0x767f1533, 0x99cdda46, 0xfb9c8109, 0x7f757f7f, 0x747f88d6, 0xb5404df1, 0x8b69b739,
+0xc762670a, 0x8125bded, 0x6481a452, 0x943e5aef, 0x7fe3557f, 0x8181bfc5, 0xb14d907f, 0x7f747f9c,
+0x54b4ff28, 0xe19d7fc2, 0x7f25887f, 0x7f6cc0b5, 0x34653ba1, 0xacd70015, 0x7901cc3b, 0xb081815e,
+0x812781fd, 0x871cd81d, 0xd1818494, 0xa97fc1ed, 0x81817f86, 0x9f3cf7da, 0x9857bb81, 0x9d1661d1,
+0x167f5814, 0x037fd0e4, 0x7f337f7f, 0x7fd40904, 0x376a4a69, 0x7f77a308, 0x73b61d36, 0xb4a318cc,
+0x7f9cc3e7, 0xe0d55532, 0xab58707f, 0x9c467f07, 0x8c7f2581, 0x60155a41, 0x7f3fd21d, 0x7f7f59a3,
+0x38176d64, 0xf77fd7a6, 0x0ed2ae81, 0x7f81cdec, 0x3c0f81a2, 0x747d9be6, 0x818c5f83, 0x432e7cfb,
+0x4a44b96b, 0xc8748423, 0x6b818181, 0xe34914d7, 0x980b64bb, 0x7fbe165f, 0x7f104bbd, 0x5c7f9d02,
+0x7f307fcc, 0x8399c1a3, 0x7233316c, 0x92aae7a6, 0x81deda79, 0x812805f0, 0x7f566985, 0xdeffbea8,
+0x2bbf427f, 0x7f819e05, 0x90811687, 0x817f81a0, 0xb581c481, 0xa05cfdd4, 0xe4983e52, 0x813e81e7,
+0x2b5ba42c, 0xf75aaf0c, 0x4bbc8181, 0x817fa502, 0x1a7f2d7f, 0xe97f0700, 0x724a5029, 0xe3700308,
+0xae7fb93c, 0x7f9581ba, 0x819d7f3d, 0x2b2ef875, 0x341c52ae, 0x21b23dfc, 0x6a82a2e1, 0xaa81ecc7,
+0xcf817ff6, 0x7fa17fb5, 0x1fc21a81, 0x817f895d, 0x54973cd6, 0x81a8c919, 0x9f4c7f38, 0x89817fd1,
+0x325d7fea, 0x959581c9, 0x711638c8, 0x07bb81c4, 0x1bf0818c, 0x346b81aa, 0x5d55817f, 0x7f7f8192,
+0x3b7f81d5, 0xe783f367, 0x863038b6, 0x7f53589b, 0x7f4de47f, 0x128165ac, 0x7fa6ccfa, 0xc19f8132,
+0xc3b3483e, 0xa63d628e, 0x2d707fd1, 0xd93a1319, 0x9197277f, 0xd77f0fbf, 0x8156da7f, 0x88819a98,
+0x3eb240f2, 0x81007fbe, 0x067fe653, 0xd3aa8150, 0xa625819b, 0x767ab13b, 0x937f817b, 0xd08181e1,
+0xa47f81b9, 0x3481bd4a, 0x65507fa7, 0xb79181d1, 0xbf1d7d3e, 0xb2818178, 0x8f329d1c, 0x2081b481,
+0x7b84a853, 0x7b679ce3, 0x9ba49881, 0x695ab122, 0x23be9081, 0xd7322a4a, 0x21db3fad, 0x81fc8145,
+0x84a1bad2, 0x9f307bb8, 0x81815581, 0xb84f1450, 0x6d3acdb7, 0xfd64811d, 0x7f8132e6, 0xd7458fee,
+0x81c54610, 0x7f7f73d1, 0x81a5c3bf, 0xe15f7fa0, 0x3c7f517f, 0x047f7fd0, 0x261e438d, 0xb3c47f34,
+0x5c81a1f6, 0xa2c62f5a, 0xe67f9255, 0x4df92530, 0x58877578, 0x817fdbd6, 0x9381d867, 0xb3815eec,
+0x3d8b8ad4, 0x814aab55, 0xa5477f7b, 0xed7f98d4, 0xae7f3681, 0x3b938138, 0x58e37f90, 0x7356b00c,
+0x754181fd, 0x3181503e, 0xc58dc451, 0x818b9cbe, 0x747fcbc4, 0x0918d1f1, 0x5b5ae00e, 0x761d6116,
+0x535ba62c, 0x8c0f22b4, 0xc98fb9ad, 0xad7f6d1d, 0x4f7fec7f, 0x1b437f35, 0x9004b258, 0x787c8d15,
+0xd04b81e6, 0x703d10f0, 0x7f7f8b89, 0x1674b61c, 0x9789ac81, 0xac6c7f58, 0x0e81ac48, 0x0881812d,
+0x117f7cc2, 0xa6bd8145, 0x59817f7f, 0x89cbb4fb, 0x697f9a81, 0x813e5281, 0xf57f4898, 0xee994b1d,
+0x60228cc5, 0x4c341db8, 0x7f6e7f63, 0x107f8151, 0x14397f55, 0x81a951d6, 0xb762ed40, 0x81c39dd6,
+0x546a0438, 0x7f8e88d7, 0x81810e7f, 0x7f7f654c, 0x3b412da8, 0x8aabcfe0, 0x4b83444b, 0x65bea3ee,
+0x507fb43f, 0x16c3c7c5, 0xe322a2d9, 0xfd5a0fd9, 0xc757577f, 0x6cd0b9af, 0xc6078100, 0x3256b728,
+0xc47fdc2f, 0x81dc4bb9, 0xd8d02073, 0xc38671f2, 0xbb9104b4, 0x7fb281b6, 0xcb817f81, 0x819f44af,
+0x29f781bc, 0xdfc231b9, 0x7fae7f43, 0x575b7f79, 0xf4c4414e, 0xe807d15c, 0xd08181ae, 0x8e81cd9f,
+0x942472c9, 0x816362dc, 0x9cba9281, 0xdc6fce74, 0x2b7f7f35, 0x37810c4f, 0xe8709e85, 0x5c7f984d,
+0x8153bc24, 0xbfe581bd, 0x9a7f9c84, 0x3be9e533, 0xb6938127, 0x82ca63d5, 0x08eeb19f, 0x59ebeb0f,
+0x4d7f14f7, 0x3c558f40, 0x62d26981, 0x8181f3a8, 0xe42585ea, 0x7f08817f, 0xaf8181a0, 0x0c5d16cd,
+0xa4dfee3d, 0x3a8182fa, 0x6381883a, 0x7fe9d01c, 0x5e659f90, 0x7f7f812f, 0x4a9d8179, 0xb1e4acf3,
+0x1b8189f9, 0xd983a4d8, 0x817f81df, 0xa2cf1f55, 0x817f337f, 0x7fa3564e, 0x14ae7c44, 0x4ca3859d,
+0x81b37fa2, 0xf8a0b7b1, 0x7f79c0cc, 0x7f3010f6, 0xa5be8c16, 0x2420b129, 0x7f7898f1, 0x7e818139,
+0x7fc919eb, 0x61819a67, 0x88928b15, 0x7eade0d3, 0x4e7fc323, 0x8deb5fe6, 0x7f3f7f9c, 0x81817fbf,
+0x989026db, 0xef81c203, 0x81e4a1e1, 0x35939d41, 0xbbaae870, 0x3d217fca, 0xddd9a1ce, 0x7f7f81a3,
+0x5bf18122, 0x81c55a52, 0xc95c450c, 0x7f5a03e6, 0x65654a14, 0x75d401f6, 0xe0cb297f, 0x5f3c8132,
+0xf7a312d3, 0x81c6d3af, 0x978e8181, 0x1476539f, 0x502534be, 0xd2909112, 0x6ad663ad, 0x7f7fbbac,
+0x8b70da23, 0x4d78af31, 0x7f8159f5, 0x81bad147, 0x348151dd, 0x5e68eace, 0xc2737fa0, 0xf97fa32e,
+0x7e7f81de, 0xa68baddb, 0xbb39817f, 0x7f9bc454, 0x6d09c327, 0xc37f3c33, 0x0a865f7f, 0xba8281e1,
+0x1a3ff406, 0x652c3fd1, 0x94af367b, 0xb381d8cb, 0x4045cce5, 0x7f7fd613, 0x795f7f42, 0xb1818193,
+0x7f371d3c, 0xef816b41, 0x2ca91acc, 0x48b5aa83, 0xaae807de, 0x26813f13, 0x81817f2a, 0x6083604f,
+0xd29e81f6, 0x7fb218ca, 0x4aa50170, 0xc2d74a22, 0x4f4971a3, 0xe9dc5f10, 0x7fb8bedf, 0x1f93814a,
+0xa97f7f0e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0=
+0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00,
+0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69,
+0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963,
+0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece,
+0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655,
+0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2,
+0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982,
+0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7,
+0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2,
+0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c,
+0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff,
+0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b,
+0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22,
+0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d,
+0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d,
+0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b,
+0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376,
+0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec,
+0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38,
+0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4,
+0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885,
+0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa,
+0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4,
+0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda
+
+e =
+34560
+
+k =
+6144
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data
new file mode 100644
index 00000000..fe4f5eef
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_posllr.data
@@ -0,0 +1,645 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x81814e1f, 0xba81fb11, 0xc4554696, 0xe17e9454, 0x427f8119, 0xa8b355f1, 0x244ba17f, 0x1c816fa1,
+0xc94f5bcc, 0xb5b97fbb, 0x634b8e81, 0x815a6cc1, 0x7f6bb016, 0x2f7249d6, 0x604664f0, 0x31817f81,
+0x6fa66b81, 0xd49763f6, 0x777fa4f1, 0x81aa6da8, 0xf0fea881, 0xaccfbfd0, 0x7f81e582, 0x7f58a2d6,
+0xa77f430e, 0xf47f50f1, 0x997f42f2, 0xd7814f81, 0x037fe14d, 0x81f98125, 0x2d813e55, 0x7f81f0dc,
+0x7f7f8181, 0xc37c723a, 0xbc7a3364, 0x8193817f, 0xe381066d, 0x7fcab836, 0x2481eea2, 0x567f5968,
+0xa07d720c, 0x7fa13343, 0x7f35636d, 0x7f817f78, 0x383d2a69, 0x7f860e2a, 0xca78797f, 0x7f7fe912,
+0xc95a8141, 0x770c9c4e, 0x81e2817f, 0x811b3f81, 0x745f1104, 0x13817e31, 0xb681a63a, 0x8101482a,
+0xbe81818d, 0x8c8181fa, 0x2133de57, 0x8147677f, 0x8a098d7f, 0xd429f1ee, 0x284fc81f, 0x7f5c16d7,
+0xded34e7f, 0x3d60812e, 0xdf817f89, 0x2f33793c, 0x2f7f9961, 0x8181221e, 0x847f59d1, 0x9281a681,
+0x89503793, 0x01817ff0, 0x183a53ba, 0xc99622f6, 0xbc819881, 0xa7ea2ec8, 0x787f957f, 0x26819378,
+0x1e4e4ba1, 0x8bf18138, 0x3adc2c81, 0x3a7d8187, 0x7f22ae8e, 0x307c81d7, 0x7f53a881, 0xc281819f,
+0x38fd2090, 0xdd810a00, 0x74ff7fa2, 0x7fb2f27f, 0x7f9b8211, 0x55d69e44, 0x8d2cd44c, 0x5610c676,
+0x7f815b81, 0xa2811e3d, 0x93135d81, 0x92797f73, 0x8a8192b3, 0xa48a7f66, 0x7f7e4a10, 0x242dfd81,
+0x7fe8b481, 0x810fb0e4, 0xde390fa7, 0xbb5a7f81, 0x65ad3b19, 0xa96a96a2, 0xf681817f, 0xb6a4eba2,
+0x0d81017f, 0x5369080e, 0xfbd1c781, 0x879d7f7f, 0x81818181, 0x7f1d061c, 0x8aa731fd, 0x4386a67f,
+0xba445581, 0x5b81975f, 0x467f7f7a, 0xc85db95e, 0x6f587240, 0x29a07904, 0x7f7f7574, 0x815dbeb2,
+0x81771181, 0x7f54e8c3, 0xc6586fd6, 0x3c6d835a, 0x86b4ed95, 0x7faa72fb, 0x7f7e7fd5, 0x154f81e3,
+0x7c4881b4, 0x0c164a37, 0x7ffdbf69, 0x7f7d43f6, 0x5f47da8a, 0x3236d20f, 0x65818181, 0xa37fcf7f,
+0x7f8116cb, 0xf6947f50, 0x66ab65b1, 0x6d688a6f, 0x537f6bf0, 0x5c864ddc, 0x06a98181, 0x455854c2,
+0x9a6c49ad, 0x129d3c29, 0xe94c327f, 0x7d40247f, 0x9681bc7f, 0x7f288120, 0x81a43b40, 0x08d81689,
+0x457f815d, 0xc27f9ae7, 0x7f819655, 0xdabf8181, 0xf4c07f81, 0x6410e8cc, 0xce4d8181, 0x19b13059,
+0x8181bf99, 0xf2819620, 0x7fc37f4e, 0x7f8efe81, 0xb25a3e8b, 0x7fbbdb22, 0xba7f8181, 0x8135d42f,
+0x817f6836, 0xc381a056, 0x7381547f, 0x1e407f13, 0x9b917f7f, 0x4e526d9b, 0x9323737f, 0x7a20429b,
+0xac6b14a8, 0x3568e8d7, 0xbe621a97, 0x7fbca13b, 0x2dbda9dc, 0x32af4ddd, 0x6e4f978e, 0x10ed8172,
+0x81e76681, 0x7f427fc4, 0x7fab7f81, 0x2c7ffd81, 0xea817f7f, 0xa62fcf2c, 0x8144bfb7, 0x7ffc93a0,
+0x79e9b564, 0x6c449d46, 0x987f8481, 0xee118173, 0x867f0c0a, 0x81819347, 0xb8ddbf82, 0x5c6866b8,
+0x82b48881, 0x5c817fdd, 0x7f6605e0, 0x4181bc81, 0x736b51a4, 0xcd5a7fd2, 0xf939a42b, 0xcd817faf,
+0x5e1b1fa6, 0x7c818ef7, 0x070c47cc, 0x8e5981d1, 0xe661d182, 0x54deb54e, 0x7fba81f4, 0xc1712e73,
+0xb6885086, 0xae9759e2, 0xb12b814e, 0xf5ae9e41, 0x16c736f6, 0xa981c3e2, 0xb06c7f29, 0x497f95b8,
+0xcb3de381, 0x7f7524e3, 0xdcf88147, 0x811e81f9, 0xab7a9c7f, 0x816223d8, 0xf97fe22f, 0xe5cf814e,
+0x744f6312, 0xc797e05a, 0x279b237b, 0x622b01b9, 0x37818181, 0x8174e3ab, 0xfa5a69ff, 0x62cc787f,
+0x81944e7f, 0xc87f7f2f, 0x30839f54, 0x8122090f, 0xf8e98181, 0x28815f40, 0x0262424b, 0x7fc3ccd0,
+0xdf7f7fe2, 0x0c264dfa, 0x5d954581, 0xfb188181, 0x6b810d33, 0x1c7fbee7, 0x8141c17f, 0x7f768155,
+0xbd207f27, 0x794079d7, 0xf34bd97b, 0x7f7f5026, 0xb46c9782, 0x7f81574e, 0x81817f27, 0x75267f6d,
+0x4a7fe97f, 0x7f879ee0, 0x25f8d63f, 0xd481a3c5, 0x747ffd0d, 0xb33530e2, 0x813f5881, 0xbb8181dd,
+0xa2da462a, 0x817f8c0d, 0xd0f061f8, 0x117bda34, 0x1897f8b7, 0x2e477f50, 0x8181814b, 0x7faa1181,
+0x6ee68168, 0x811a98e2, 0xdcb2b784, 0xa20a7fb7, 0x41445681, 0x0bffeefc, 0x02817f9d, 0x743071cf,
+0x387695bd, 0x847ffb4a, 0x0a287f56, 0xf9b4d4d3, 0x2a7fa481, 0xe3eb2e9b, 0x327f91f4, 0xec3981d2,
+0x6681f83c, 0xe675722e, 0x426ac913, 0x4b6154a6, 0x5c30816f, 0x817c8155, 0x277fa781, 0x8dd8a9a3,
+0x48c609a0, 0x4bca47bf, 0x7f3f6d81, 0xea497fb3, 0xc181507f, 0x81bf1ffe, 0x8e818916, 0x6ba83d7f,
+0xb8547f62, 0x5881670e, 0x7f6281fb, 0x6e7f9250, 0xa7bc817f, 0xb19450aa, 0x7f7f9d58, 0x20f69c1e,
+0x7f45815d, 0x817f9319, 0x7a5496b0, 0xb85e8950, 0x72c9337f, 0x81781aba, 0xfecdaf28, 0xe2195ab5,
+0x7fbb0ec7, 0x7f7ff8a7, 0xab818181, 0x978f2e6e, 0x7fc38121, 0x18945319, 0x264a8181, 0x93b1dc81,
+0x630ed781, 0xe27f8e14, 0xc17f5b93, 0x7081fe2d, 0xbb9ac790, 0x7f81ba5c, 0x7f812f32, 0x68bd4f7f,
+0x81217f81, 0x88b87f26, 0x285e7f3e, 0x29817828, 0x747f7f81, 0x159eaacd, 0x8d02b252, 0xa98881e7,
+0x817f1681, 0x367fbd38, 0x8a8441e3, 0x81447f1f, 0x8165424b, 0x811676ee, 0x81a48122, 0x4b02ea7b,
+0x5c267f38, 0x8195acd2, 0x7f7fb581, 0x9eae092d, 0xab12457f, 0x96f8dc31, 0xa89fd810, 0x222b7f7f,
+0x1dd67f65, 0x873a81e0, 0x0e7fd97f, 0x81a83d97, 0x8106d381, 0x7f817f0c, 0x8104d4f9, 0xbb81677f,
+0x01be7f39, 0x297297ab, 0xc2f01b81, 0x9dd57f81, 0x81ff692f, 0x7fc28115, 0x077f547f, 0x717fcf81,
+0x3d81ed83, 0xff6f7fff, 0x7f50f95c, 0x5650a753, 0x81817f09, 0x963b7057, 0x7f817f60, 0x987fa099,
+0xf6207f7e, 0x4c814e98, 0xcd27b681, 0x41bd2ebd, 0x340172b2, 0xbc151ec1, 0xf3b403d2, 0x817f6c81,
+0x7f2f3218, 0x26f2ea5d, 0xcc59bd8a, 0xef857f52, 0xe37bc195, 0x7f2a2e13, 0x7381a51a, 0x31e14c60,
+0xc2e23fc6, 0x8113f40c, 0x9b2d7f81, 0xf6e1147d, 0x813c7fe1, 0x9a9d7f3f, 0x7fb939ac, 0x573181ad,
+0x608a317f, 0xb486b9ee, 0xe67fb10b, 0x62817f8b, 0x7f7f9775, 0x81e6819d, 0x7f6f0f61, 0x4ffd8158,
+0x81ae56d2, 0xbe4aa4f7, 0x6c515581, 0x435d8326, 0x7f0e2a95, 0xc28d81e1, 0x357fa0cf, 0x2b9f7f11,
+0xd491057f, 0xbc7bddd7, 0xe5b57f34, 0xfba68549, 0xb491d2e3, 0x4fb61f49, 0x81bd3e3c, 0xdd9848f3,
+0x81810561, 0x67f5fbdd, 0x8c4dc25b, 0x2ab47f67, 0x68137f5e, 0xcfe53817, 0xa7679d31, 0x501213b4,
+0x7f7f1390, 0xbefeccd8, 0x30fe9044, 0x7fd6812d, 0x813cf0e6, 0xccb82825, 0xfa78ec58, 0x024b90e4,
+0xba25ec7f, 0xc43b50ff, 0x7f7f3e4d, 0x64c38181, 0x81ed1dba, 0xd58b7f64, 0x0b81311f, 0x03563b7f,
+0x817fb491, 0x81e895c7, 0x817f7fb0, 0x1681a581, 0x7f998a50, 0x489b81a9, 0x81bc81a2, 0x3d9e7fde,
+0x7c7c6381, 0x817447ec, 0x6e7fc32b, 0x7fab86c6, 0x95b44181, 0x3a7f756c, 0x81458168, 0xc4787fd3,
+0x731cda81, 0x73127f03, 0xc9077fcf, 0xc37f9c2e, 0x27817f7f, 0xc4361507, 0x7f9b3581, 0x9d357f2d,
+0x7f81166c, 0xb87f815f, 0xb2604f95, 0xc44b5591, 0xcfb8078b, 0x7481e64b, 0x8152a679, 0x53fa387f,
+0xfa7f3f79, 0x7f5b5fe5, 0x1e625d7d, 0x1081ad93, 0x7f5019fe, 0x3d8145a7, 0x629f9918, 0xc1ab3742,
+0x7f66c057, 0x9cb048dd, 0xeb6e859b, 0x8e5fb72c, 0x94d8f5a4, 0xa381280d, 0x67737de6, 0x45817f81,
+0xf055c17f, 0x6a0aafff, 0xdb818cc6, 0x81fc60dc, 0x20377f4c, 0x7feac6de, 0x72e78c7f, 0xdf532124,
+0x81d33356, 0x8881bcf9, 0x95817f32, 0x612eae3b, 0x7b186965, 0x50b77f63, 0x718181b3, 0x42d8b481,
+0x88edaf7f, 0xa3cd25f6, 0x7fdd7f81, 0x7fb79681, 0x7f786e7f, 0x7fbb41c9, 0xd2618121, 0x1cacc6d3,
+0x7fe2a0dc, 0x819e1a46, 0x563081e1, 0xb18d77b1, 0xb4817f7f, 0xa97f81fa, 0x2e81157f, 0x31e67fa4,
+0x5b814281, 0x98b18124, 0x7f0a3540, 0x49d508d1, 0x2693bc71, 0x3aeb81ca, 0x817fc081, 0x6674b9e7,
+0xa1217f23, 0x517f81f1, 0x7fc4ef26, 0x2914ef7f, 0xa9ff7a19, 0x7f7f0ce2, 0x1b485e45, 0x7f70af0d,
+0xd6812a71, 0xc95fc63d, 0xa57f03c8, 0x9dbe7f81, 0x7c978192, 0x81a22306, 0x96447f2f, 0x7faf2728,
+0xc2a494ad, 0xd5c7813a, 0xa02b7bb8, 0x3bc04eed, 0x997fa97f, 0x7768d0d0, 0x8a84817f, 0x8f1102c6,
+0x7f8115be, 0xfb7f7869, 0x7ff25de8, 0x6745f276, 0x0f23b585, 0x52933552, 0x7fab5340, 0xeb4c403f,
+0x7f5465ec, 0x77b0a942, 0x6b67297f, 0xc86a7133, 0x7f8f177f, 0x869e28f4, 0x7a7fb066, 0x81a3b865,
+0x5d7fba81, 0x815fd7ae, 0x60817ff7, 0xa633a381, 0x45813981, 0xade1ccd7, 0x814981a6, 0xaa93770a,
+0x7f83b4dd, 0xc0967f1a, 0xed7f3764, 0x53772ea8, 0x5a81f453, 0xaff56f92, 0x81067f5f, 0xf596ef1f,
+0xd520de7c, 0x81b2bdc8, 0x7f8130dd, 0xfb7f8c41, 0x99816bf6, 0x695a60e3, 0x7f7f817f, 0xed987f26,
+0x8f818166, 0x417fecfc, 0x99ec379b, 0xaf7e7f7f, 0x360dd75f, 0x787f44f6, 0x81837dfb, 0xd481a681,
+0x81819d17, 0x8781322c, 0x7f7fac0b, 0x0fca7f3c, 0x40b5e37f, 0xf27f2ebc, 0x41c19a41, 0xa6c1d381,
+0x066de9ec, 0x32b8a8bc, 0xdac61ba5, 0x812d817f, 0x87b9821c, 0x817f7920, 0x7f557fbc, 0x81738181,
+0x918fb098, 0x81ab890a, 0x81b881bf, 0x7f7fc57c, 0xcdbde77f, 0x7f5f7f3a, 0x36e59a81, 0x19a6818f,
+0xea7fd850, 0x20606e2b, 0x927f9c22, 0xd27f819f, 0x3c4f7f73, 0x168176ce, 0xf0687f81, 0xec5f7faa,
+0x23aa789e, 0x2de996b5, 0x7fd07f7f, 0x9785c156, 0x1eaa3235, 0x9781ea6f, 0xb2de74fb, 0x815a98d6,
+0x8cb77f92, 0xbba85fa1, 0x61afcac8, 0x81817f64, 0x8149865f, 0xa251b7ee, 0xec7f8181, 0xf8ad7f84,
+0x5c8142bf, 0x212981f2, 0x7fc9e381, 0x37338e67, 0x7f3099c6, 0x4fcdb7d1, 0x0381c0fd, 0x778181e4,
+0xe7702cfb, 0x61c21e43, 0x3f987f81, 0x3a0b815c, 0x7d12b8c2, 0x70097f0a, 0x7f244d63, 0x2d81187f,
+0xd87f6f7f, 0x7d267f2c, 0x8eaf2486, 0x87d8013d, 0x85367f7f, 0x81817f85, 0x0a067081, 0xc96f952c,
+0xf3c14485, 0x227f4330, 0x60ec8124, 0x41f181f5, 0xdb6b7fce, 0x819467cf, 0x81bc7fd4, 0x2c7a7f7f,
+0x936ade53, 0x7f81d04a, 0xc4817f81, 0x7fdd78dd, 0x81d3a47f, 0x917fade5, 0x6c81510e, 0x5b16fd74,
+0xbe987f81, 0x7fb17404, 0x7fb8e8f7, 0x7fb381e6, 0xc479b081, 0x5c81eee7, 0xb19b0e81, 0xa381d4df,
+0x81bacf5b, 0xa67fb9a6, 0x817f11a0, 0x8d565870, 0xce818181, 0x7f4aec3f, 0x813ab981, 0x1b495a42,
+0xab7f928d, 0x8106014f, 0x47c3e57f, 0xc418f11e, 0xc9149f6c, 0x1a7f1e25, 0x7f3f2d81, 0xcd5e462d,
+0xa8d57f7f, 0x812481db, 0x7f697f97, 0x69cdb891, 0xc31fc20d, 0xdc812b31, 0x57e37f81, 0xa09681aa,
+0x7f817f88, 0x1ae385f8, 0x81607f7f, 0xfb752e65, 0xd8dd8141, 0xb17f50a9, 0xb8737ffd, 0x1f7f815b,
+0x7f9a9e69, 0x06448b38, 0x8109a225, 0xb69e064b, 0x9881537c, 0x59dee4d8, 0x89c6a692, 0x0e817b7f,
+0x81816b97, 0x5de37fd1, 0x7f2c7ca9, 0x81784402, 0x29bd477f, 0x81a07ffe, 0xe822df7b, 0x7f29813c,
+0x6d81cd81, 0x1a811bd2, 0x6b2c7f26, 0x7881a08f, 0x7fd0e88e, 0x27f87fce, 0x8181a67f, 0x7f7f5558,
+0x81668133, 0x8181bfca, 0xc3792d58, 0x2b7f2d7f, 0xa3d09230, 0x9fa56824, 0x7fd5ce7f, 0x251b3cbd,
+0x7f818117, 0x7fac9e26, 0x7fde26f6, 0x4b7f837f, 0x1fd52b94, 0x067f6698, 0x70817f7d, 0xdd9658c1,
+0xe5a99465, 0xa59cdc55, 0xc1c2c7ad, 0x147fb081, 0x234cc657, 0x7f7fcc0f, 0x7f3d8150, 0x41b68131,
+0x7ff58184, 0x90a57f2b, 0xdd9d9b7f, 0xc4518163, 0x428aa37f, 0xe866ef52, 0x7f6c3644, 0xa09d3b50,
+0xf2c4adad, 0xc87ecb45, 0x7f70bace, 0xd5697f2d, 0x297349a8, 0x6bc11cc9, 0x8cdd0ed3, 0x7f53646b,
+0x8129b9b8, 0xce65a8d3, 0x20208181, 0x817f5272, 0x6ad11bf4, 0xc58151e3, 0x361572c9, 0x81acd996,
+0x6e819466, 0x7e7fbd3f, 0x70bb9ffb, 0x818b810a, 0x879a7f15, 0x8c884edb, 0x9028ae45, 0x81d8f525,
+0xaaa781b9, 0xaf1a0108, 0xe2817f73, 0x24b67f8b, 0xf08fd77b, 0xd21a265f, 0x7f3930ba, 0x5f5d9287,
+0x66218181, 0x7fd219f3, 0x6f44f09e, 0xbcc58157, 0x7f71b94d, 0x817f81c5, 0x877f6064, 0x81647f81,
+0xe5ba2e34, 0x889f8722, 0x27819f0e, 0x7f4c0eed, 0x2d7f79b6, 0x81b1813c, 0x647f3d2b, 0xe92b68be,
+0x81375804, 0x6798c04b, 0x2b596e7f, 0x78c97f7f, 0x812c367f, 0x7f468112, 0xa352d785, 0x3806f2a4,
+0x6886a9a9, 0xa31540ea, 0x5ab2478d, 0x9f5be57f, 0x9ab06481, 0x1586279b, 0x86267f81, 0xb3a50944,
+0x788a485c, 0x838d4c30, 0x8ad44b53, 0x923fb39a, 0x95035381, 0x81817ff2, 0x7f44d5eb, 0xf17f8164,
+0x39d117c0, 0x37ea75f4, 0x81927387, 0x817f0623, 0x909f7f0a, 0x997f7f19, 0xc1983b18, 0x60937f6b,
+0x8829c381, 0x8e6bf066, 0xfb7781ab, 0x43408e7f, 0x7f7f811d, 0x33eb81d0, 0xb86459a9, 0xadba7fe7,
+0xc1967f48, 0xfd0284bd, 0x4bef335d, 0x1b7f5f81, 0xa2c2c237, 0x81b0816a, 0x42ee5c7f, 0x7f009323,
+0xb7ced44c, 0xd90958e2, 0x3af8aae8, 0x7fb82e90, 0x989a7f7f, 0x14a22c68, 0x60ef7dc6, 0x7fb9ad00,
+0xf9e3c69d, 0x5e3bafbe, 0x81178a81, 0x92c4df02, 0x159d7fb6, 0x812b8103, 0x616f9df5, 0xb9a1df61,
+0x454b3944, 0x98931fbe, 0x4aae7061, 0x9572a681, 0x8178a8a8, 0x81fe821f, 0x817f7f6f, 0x7f787f2c,
+0xe584ff82, 0x7b7f5637, 0x9e815970, 0x517d8181, 0x97faba7f, 0xcfe3ce17, 0x9181f55f, 0x857f5f3b,
+0x81b17fbc, 0x7f1d7fbc, 0xae7356c3, 0xe4f77f8b, 0x4695f639, 0x8ddaec55, 0xa5d4a381, 0x7f7fa418,
+0x4d818145, 0x3881723b, 0x7f507f7f, 0xa5357fca, 0x81dbdc3b, 0x0b187f28, 0xb0325181, 0x09dd795f,
+0x2c39b28c, 0x9ac18118, 0xda52b681, 0xa3819da8, 0x7f884525, 0xba0d7ffe, 0x50ce81ad, 0x60e70e69,
+0x110b81ee, 0x8181871e, 0x7fe07f81, 0x8d1c4dde, 0x67649db7, 0x6e4669c2, 0x818144a4, 0x7f6c9cb7,
+0x51b57f81, 0x36aec300, 0x7f7f8141, 0xc5eca67f, 0x14ac7892, 0x7f35e9c2, 0xa1fe9a81, 0x6a867f7f,
+0x767f817f, 0xc447c4bf, 0x067fedca, 0x8c024b57, 0x7ee5707f, 0x52589b51, 0x0f81b28f, 0xae7f00a7,
+0x5244c5ca, 0x9671812e, 0xf0631d15, 0xb4aba1b8, 0x7f9a3488, 0x81b485b8, 0x392f2d7f, 0xed7fad81,
+0x6055a4e1, 0x60f300c7, 0x7feb817f, 0xd1e0917f, 0x544ca57f, 0xd23d7e0d, 0x8110c8c2, 0xbc24a615,
+0x3d817f33, 0xbdabcdfa, 0x5ac3817f, 0x81817fc0, 0x81817f76, 0x837e43d9, 0x7f817581, 0x5da96bcb,
+0x3e1c888f, 0x7f7fa6f5, 0xb769907f, 0x8eb43d8e, 0x3a7f3e81, 0xd5845510, 0xd07f5a54, 0x14816ab2,
+0x8126c62d, 0xbeda8f5c, 0xa0007fa8, 0x818d7281, 0xad72917f, 0x49c2f650, 0x87969baf, 0x7f53e975,
+0xe78de481, 0xe5083e96, 0xb635812c, 0xafdf4bb9, 0x26ac8101, 0x82b0a109, 0x7f8101b8, 0x607fc7cc,
+0xc3b1892f, 0x7d0dd911, 0x396d8195, 0xcb84b67c, 0x3f7d3681, 0x43a07f25, 0xd020297d, 0xb7d65b81,
+0x455a99e2, 0x509e4611, 0x817f81e0, 0x81da81a8, 0x8147b5c1, 0xbc81a5df, 0x489c6781, 0x8150a908,
+0x4fc0e681, 0x814b8163, 0x11457f8b, 0x9a77db7f, 0x81fa0681, 0x7f867010, 0x852b87c5, 0x3d8253dc,
+0x91165c42, 0xc6ac7422, 0x727f0774, 0x7f497f67, 0x1a3fe7c0, 0xb39c81d1, 0x44a47f7f, 0x7f8b066f,
+0x7f787ab5, 0x7ba7e870, 0xb932a3a9, 0xc85c1149, 0x32a28d61, 0x7f8d812c, 0x93819da2, 0x647ae98c,
+0x777f7e9e, 0xbb447fec, 0x2fabd677, 0x3a7f8c08, 0x51707f28, 0x35f81da5, 0x0b3cfc4e, 0xd4179b8a,
+0xef77a6ca, 0x31e02016, 0xdbb81924, 0x7189b4a7, 0x08a671aa, 0x817f2352, 0x81819e81, 0xec7fb7e4,
+0x81699ae5, 0x88817f20, 0x5ee7d181, 0x944d8150, 0x7f7f52d9, 0xc1815114, 0x3281a67a, 0x52fa8181,
+0xc4e47f7f, 0x7f824cbd, 0x728cf0bf, 0xa6b20e46, 0x9ffe631c, 0x639d82cc, 0x7f638136, 0x7f7f3381,
+0x7f8b4794, 0x44b19231, 0x08da5253, 0xaa815d93, 0x7f6a7f69, 0x616331c2, 0xdaa47fcf, 0x7f272255,
+0x3ee4098e, 0x819527f9, 0xcfc187a5, 0x7f81e781, 0xf71402bd, 0x9fecc0d1, 0x2067a67f, 0xa1dace7f,
+0x42817fa8, 0x8181da23, 0x03818181, 0xc35e3af7, 0x941d7ff3, 0x817f33e5, 0x7f2a78f7, 0xbe426494,
+0x0d2d81a4, 0x817f20cf, 0x89b2f4d0, 0x8171ba71, 0x6245be4e, 0x817f8140, 0xed8a7f81, 0xdb097f7f,
+0x81724d42, 0xe0de7bfb, 0xde611f7f, 0x7f54d981, 0xb1d07fd7, 0x487f7fef, 0x7f2c8139, 0x7f7f7f10,
+0x3e812db6, 0x63447ff6, 0x7c7f277f, 0x9f7f8b7f, 0x76d47f81, 0x197faadb, 0x6a596c91, 0xc681a3a3,
+0x7da1d13f, 0x4af8d329, 0xaa819881, 0x7d7f7f4a, 0x3a9d1753, 0xaf818145, 0x5b905350, 0xee313e4c,
+0x3fa881c7, 0x437fc046, 0x817fbd9a, 0x30f77f14, 0x3e81df81, 0xa42f4e53, 0x7f67d481, 0x4f4b819e,
+0xc5474f22, 0x6eed3376, 0x7fb1ed92, 0x5b3e813d, 0x6c8181a7, 0x720df323, 0x2d0e7017, 0xc7448174,
+0x817fb27f, 0xad7f01de, 0x817f10b1, 0xf6231fed, 0xa56d7a81, 0x98817fef, 0x8181e4a0, 0x48aa7fb1,
+0x81727f6a, 0xa4059a04, 0xe0cff64c, 0x5ea7679c, 0x69c1009b, 0x7f816b2a, 0x5cc58181, 0x7f954f3d,
+0x848b7f17, 0xaee99031, 0x7fbb951f, 0xa2da7f73, 0xc5acfb81, 0x2c1ecf2a, 0x6981fdba, 0x19897fb2,
+0x7f4af7a6, 0x85cd8129, 0xe845e0d5, 0xdf658407, 0xd522567f, 0xab242f2e, 0x7fdd70a4, 0x1c9d038c,
+0x2f923f85, 0x27f1b2c5, 0x7f812781, 0x7f818181, 0x7f9603cb, 0xc32881af, 0x5ad1f2a1, 0x61b99a32,
+0x57cd3bfb, 0x7ffc6fd7, 0x9e81c281, 0xfea25f7f, 0x85a2bfe7, 0xf3ef5107, 0x7f81565e, 0x1d447f81,
+0x814e817f, 0x40853b00, 0x6caf3f7f, 0xe13a7fe2, 0x816162c5, 0x81b4e66f, 0x81e18fb8, 0x7da87f66,
+0x7fef667f, 0xea2ba1f4, 0x408181d2, 0xba81676f, 0xdd817481, 0x7f9d7fff, 0xd6544752, 0xb1bd61ce,
+0x3aa26a56, 0x817e9bf8, 0x8158964a, 0xa0d3253c, 0x308130f6, 0x501d7f23, 0x367f34d2, 0x818581eb,
+0x817f4743, 0xdf9e81c8, 0x5c7f13a4, 0x12182965, 0x9cca6c7f, 0x47817f4f, 0x0cbd2918, 0xb05cf048,
+0x28810542, 0xe8a869a0, 0x7fb7682b, 0x3f7fb5d1, 0x813dc581, 0x2ea58dcb, 0x4a4e6881, 0x687681b8,
+0x665c057f, 0xa78127e0, 0x81b28166, 0x817f686e, 0x7f261637, 0x81ad7131, 0x54817f68, 0x2eae6470,
+0xf110f558, 0x8aa44bc2, 0x81f12d55, 0xf37f35cd, 0x7f68679f, 0x3fc481db, 0xe02a5a41, 0x817f8130,
+0x919b6c25, 0x14749815, 0x9d7f6c60, 0xc5777fa4, 0x7f8db49b, 0x524a623d, 0x2ef59cc0, 0xd18f4a24,
+0x37707f92, 0x4af98126, 0x81b43c76, 0x7f965818, 0x7f81c786, 0x057f7f49, 0x1ab3ef1e, 0x1781eb8a,
+0xf0909181, 0x656da3b4, 0x5181cbb9, 0x81e9a237, 0x8ace577f, 0xd19f2c0c, 0x86a7817f, 0xb7628100,
+0x817fcfa2, 0x7f3cddf4, 0x744a96d2, 0x19967ffb, 0x7f7f813a, 0x81f2b043, 0x7d46cd81, 0x7f006e7f,
+0x363a7c9b, 0x9f812dda, 0x77c77f1c, 0x8172274d, 0xa7ba8181, 0xff7f1ea8, 0x817f4e7f, 0x377f9ad7,
+0x81447f7f, 0xf81f3623, 0x58ce7f7f, 0x8a4a208f, 0x4d938115, 0xb0ac7029, 0xf72b23cf, 0x467f9dd3,
+0x81819130, 0x62bb7fc4, 0x84b4b190, 0x2781b070, 0xdc81cda4, 0xcb8145b3, 0xda812b38, 0xda307f7f,
+0xafbc8158, 0x7fcb7205, 0x47e781ae, 0xc056245f, 0x81997f56, 0x51d9d6f7, 0x7f895c4e, 0xb7814beb,
+0x7f658181, 0x757f81bf, 0xdb81816e, 0x582edcb2, 0xb9860711, 0x587f81ca, 0xa45a7f0b, 0x81e68a35,
+0x7f7f3b0f, 0x3c233956, 0x7f5f3e44, 0x81ba915e, 0x9ea4dd81, 0xc323e1bc, 0xb1ac0322, 0x5e7f0e2b,
+0x6b2fb26b, 0x0e92812c, 0x667fc670, 0x7faccae4, 0xce8181be, 0x815e68c5, 0x273e7f69, 0x10f3e868,
+0x47a8b82c, 0xc8817f10, 0x617281b6, 0xe4a44714, 0xe19d4b04, 0xe8818333, 0x81e481fb, 0x58ab1e34,
+0xa68c8168, 0x815f4c12, 0x181be656, 0x817f147f, 0xc87f7f8b, 0x7f7fa1f9, 0x7c787f7f, 0x7f7ff8ae,
+0xcf9200a0, 0x7f65e492, 0xc37fbb89, 0xa234a37f, 0xa27f7fb4, 0x818a6fb3, 0x04817fb5, 0x2e7f636a,
+0x905db630, 0x812bbc0d, 0x8f1beb7f, 0x7f83bb5c, 0x81b96698, 0xbebc8152, 0x6669b181, 0x7f81452e,
+0x6861ca81, 0x8806d040, 0x4570d28b, 0x812681fe, 0x817e3b48, 0xcd3ddb3c, 0x21777fa4, 0x7f3d81cd,
+0xae0d8104, 0x8134b9fe, 0x7fb87fdc, 0x81ac7f6a, 0xbeec5c5d, 0x60cc4dfe, 0x967f82fc, 0x3a8187a2,
+0xe9577fa9, 0x818181a5, 0x8181b4aa, 0x81204f7f, 0x81708146, 0xbe08d127, 0xdec25eca, 0xc3e0db2f,
+0xf7817f4f, 0x7f5da797, 0xc9c44f90, 0xbc5b84c5, 0xda44a681, 0xcc9b810a, 0x38c5c3c1, 0x674294d4,
+0xe47fe181, 0xa4817f54, 0xcd06b70a, 0x5b3d7f7f, 0x7f137fbc, 0x38769bd6, 0x817f811d, 0x6bc4046f,
+0xba9a7f2d, 0xca91ea2a, 0xe881d10b, 0xf9810daf, 0x42e57f81, 0xc87c147f, 0x4f817f7f, 0x66c7bd81,
+0xb991549f, 0x816febec, 0xdc50fd71, 0xaab9aa83, 0x7fbd3952, 0x7f50c6cf, 0x1481f8a2, 0x3ff381ab,
+0xf6b0470a, 0x81e12ebc, 0x817f5f39, 0x3c7fb836, 0x79813781, 0x7fefa2c9, 0x7e81030d, 0xb849bf3e,
+0x81613b40, 0x08817f32, 0xbd5ffe64, 0xd688be81, 0x0166eab3, 0x81077f0a, 0xe5817f56, 0x91b83696,
+0xac8181d6, 0x5732a410, 0x40818181, 0x81915bb3, 0xa87f8145, 0x816a06ce, 0x033f7d7f, 0x86438281,
+0x3afc477f, 0x9cdf8102, 0x786d4383, 0x81ecfe7f, 0x89a7471d, 0x349c7fd7, 0x7f377f3a, 0x51d55daf,
+0x62337f61, 0x547f910e, 0x4bea99d2, 0xbfa9557f, 0x7f2ee96a, 0xcb9ccccd, 0x0fc3a681, 0x687f30fd,
+0xb07f4530, 0x83814334, 0x955bf3c6, 0x7f909981, 0x7b4e617f, 0x7659def0, 0x05565d7f, 0x81f271cc,
+0x31ca8581, 0x1afd6755, 0x818181ff, 0x7f8181ef, 0x7f7f77ce, 0x1d6692cf, 0x73c57f7f, 0xf083d53f,
+0x707f9c09, 0x1ce6813b, 0x17bcb0c6, 0x15b59ba7, 0xde7f817f, 0x7f925801, 0xeba91d81, 0x568190e4,
+0x7f817e72, 0x8d7ffa3c, 0x82f281fb, 0x548a7f7f, 0xe9777fa8, 0x407f16cd, 0x7e8167a8, 0xc5812c04,
+0xeae8e181, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0xf481c67a, 0x89b07fc3, 0x2e4d7b44, 0x9181818e, 0x83b88158, 0x8181739b, 0x4aa97f6a, 0xb36d7fa6,
+0x075bb9b8, 0xcf839c97, 0x20ba9176, 0x117a6a6d, 0xc35b571e, 0x8181fb5f, 0xec3a7fa9, 0x3eec7fcb,
+0x53622f7f, 0x8681a07f, 0x0ff39b65, 0xb6c82420, 0xefb381d6, 0xd5390961, 0xf1887f0f, 0xbec2abad,
+0x64d57f81, 0x2281057b, 0x1f7fe936, 0x99038181, 0xc1450b81, 0x65cc5386, 0xb886acc1, 0x109dde08,
+0x6417d0dd, 0xcb7f7f76, 0x9681b397, 0x53c39e81, 0x437fd46e, 0x751c3481, 0xdd4c813c, 0x38817f65,
+0xf0b39281, 0x31357fcc, 0xf37fae31, 0xba8181e7, 0xefc1e23b, 0xa8650162, 0xbb815838, 0xd6eeba30,
+0x8de2db81, 0xc01ca018, 0xed2dfb81, 0x7f84731f, 0x40633968, 0x557f2c0f, 0x5f5c6374, 0x41b02181,
+0x447f6e09, 0x8d56b3a6, 0xe47f81bd, 0xbe712a04, 0x3e7e78b2, 0x470a8128, 0x298981b3, 0xe65e2866,
+0x5bad817f, 0x78884acf, 0xb521287f, 0x81df4b81, 0x33818184, 0xb3d65f30, 0xc2ed7fbc, 0xc4811ccd,
+0xec92a11d, 0x5bc5817b, 0x57ad8103, 0x7f6226e4, 0x07c2d87f, 0x3d7fb181, 0xff73974c, 0x08815f61,
+0x50616fbc, 0x923ec16c, 0x2181c73a, 0xb8f88181, 0x047f7f7c, 0xe87fa53d, 0x399ca775, 0xef139839,
+0x7f217f43, 0x811415c8, 0x37f6b9c6, 0x7f7f817f, 0x11c6ad81, 0x81b3c88d, 0x0ca47c91, 0x024a7604,
+0x2dda2f7f, 0x811c5281, 0xc27f814b, 0x7f37a317, 0x42433381, 0xddfbc9da, 0x23737f7f, 0xe3acaecd,
+0x1d846dc3, 0x7f1bd4e3, 0x4055d37f, 0x7f1e9a81, 0x31d29025, 0x6d90cf64, 0xdf532fea, 0x1a3dc07f,
+0xf997ee19, 0x597f5460, 0x2a0b5b30, 0x5751a381, 0xd27f8489, 0x7f1f9ae4, 0x2bd07f7f, 0x5281923c,
+0xbb7fae6b, 0xd344db7f, 0xb0431bc3, 0x33744f44, 0xd2996981, 0xce048861, 0xd57f36c1, 0x1181819f,
+0xe7592aef, 0x7fbe817f, 0x177f7f04, 0x2cc78381, 0x27558149, 0x7f64746e, 0x2bb97dc7, 0x10aba652,
+0x02f70c7f, 0x7fd8ae81, 0xcd81f07f, 0x9352c65e, 0x344a817f, 0x4f282881, 0x3e2d23fe, 0x69517fb3,
+0x7fd1e4a6, 0x3bf47f7f, 0x33909ff6, 0x81aba30c, 0xaa8181dd, 0xf6f36f81, 0xb57f7fa4, 0xe8b6a572,
+0xb0ac3a81, 0x81817413, 0x104de181, 0xef977fcf, 0x2054acbe, 0xa9013b81, 0x4a7f34fe, 0x49747f6d,
+0x5ad74c74, 0x0e582cc5, 0xa7ea83c9, 0x7f7f750e, 0x38847f81, 0x41817281, 0x3344debd, 0xd229728c,
+0x027f839f, 0x5d9fc4ff, 0xdb1e7f51, 0x439681ca, 0x1b81b581, 0x7f777f9f, 0xd6bfb07f, 0xb050814a,
+0x1981648e, 0x2cca7f56, 0x30814ab8, 0xc48143a5, 0x5bab3c8b, 0x7f97249c, 0xf7f09eb9, 0x074aa055,
+0xc86bbe54, 0x815ed4b2, 0x7fd0b28f, 0xd7e07470, 0xcf815cc6, 0x7e7f7f63, 0x81dfe09b, 0x4098da7f,
+0xc1d5a339, 0x7928937f, 0xa481bf6e, 0x81accdd5, 0x3b9c5cb7, 0x817f81a8, 0xc49b8a56, 0xec817f93,
+0xb7a9306f, 0x43555d59, 0x5f815d7f, 0xb79e7fc8, 0xc9d17fa5, 0x7f9999b8, 0x73467f81, 0x38627fd7,
+0x7a9138ab, 0xbd46b67f, 0x71b5823e, 0x5c81fbb3, 0x3d508f7b, 0x8173c630, 0xd4816172, 0x3f81d92d,
+0xcd6b9d7f, 0x72ee607f, 0x851bd2ce, 0xbc4e7fde, 0x1c6c8193, 0x9252b181, 0xcc217f96, 0x296d7f98,
+0x5957ae75, 0x817f14bb, 0xbf7b8191, 0x44d87f81, 0x352a84fa, 0x524a882e, 0xae66ac81, 0xbc7fc592,
+0x2cb1dc81, 0x42e56887, 0x7981b76a, 0xa5667333, 0xd2ab7e7f, 0xe37f817c, 0x26817f7a, 0x23570c5a,
+0x1a685290, 0x814bb476, 0xf6527bb0, 0x7b810805, 0xe63cd272, 0xc1d97fb5, 0x1e8124d3, 0xd34f157f,
+0x367f82a6, 0x14819e1e, 0x39226087, 0x818681da, 0x8121417f, 0x81186358, 0x57e88e9c, 0x077f6e37,
+0x96087f7f, 0x9a82c77f, 0x0781c4b8, 0x81217f81, 0xc565447f, 0xab41447f, 0xf7915a1f, 0xe58b5a81,
+0xcb817d13, 0x81819181, 0x0d4cc8ee, 0x63b4f36e, 0x587f81b3, 0x47947981, 0xcc7f7f88, 0xa3816781,
+0x2581a481, 0xc750aa5c, 0xcd440151, 0x4d684b81, 0xd8a6b50f, 0x4f7fa9b5, 0x3f1b93cf, 0xb58f812e,
+0xaacd817f, 0x136d7fa9, 0x81b581b9, 0xb75d7fd5, 0xd29a8133, 0x337f8ec0, 0x75ac901a, 0x0a6d7fb2,
+0x2d208186, 0x8186b97f, 0x0cb42af4, 0x03ac9e7f, 0x1167a77f, 0x7f19fad8, 0x2d4781b9, 0xf97f6658,
+0x9081bc81, 0x6f7f4087, 0x8e527f8d, 0x6873d688, 0x817fbc88, 0x4b81608a, 0x22814ea0, 0xe97f81c5,
+0x10bb3de5, 0x818115ed, 0x421b3081, 0xa17f6e52, 0xf46d7f09, 0xd87f8185, 0x68a79c2b, 0x7f7f837f,
+0x4d7f4967, 0xc8507f81, 0xc67881f1, 0x0dad027f, 0xad7f8125, 0xa47fb3aa, 0xc74ceb01, 0x373974a7,
+0x4e7f8153, 0xa87c423b, 0x2ad75081, 0x4a817f35, 0x357f5517, 0x55816581, 0xcb7fa97f, 0x7b797f7f,
+0x504ee981, 0x7adecc6b, 0xce338197, 0xc49ab9bd, 0xe3877f3e, 0x7f817f81, 0xea3624fa, 0xfd889681,
+0xfd7f7ff9, 0xa6732f2f, 0xa81b81e0, 0x817f7fae, 0xbcb4b010, 0xc37f7ff4, 0x9a52819e, 0xc730eaec,
+0x437f817f, 0x8865815a, 0x53337f64, 0x2d817f8f, 0xd7e2417f, 0x2b6ec637, 0x3d819d41, 0xe4877f52,
+0x26a26a63, 0xdc74a867, 0x71433c4d, 0x94b02e35, 0xd6a155f5, 0x3cd37f73, 0x5c89997f, 0x569e7f03,
+0xe0adb53a, 0x49d78e81, 0x9c7f22a4, 0x01385b17, 0x9081db0e, 0x3a8c46a9, 0x1e016448, 0xfebdc287,
+0xf072dec5, 0x9f816429, 0x4bb0817f, 0x5a0ac21a, 0xf74ca8aa, 0x82811b7f, 0x1c08bf81, 0x1fe36842,
+0xbc7f8181, 0x373b7f21, 0x097f8150, 0x54935b9a, 0x3a7f1381, 0xceb77ff1, 0x55ab4e7f, 0xcb1ab47f,
+0x9f17477e, 0x7374b97f, 0xb2512a9d, 0x8148d0d2, 0xc057444f, 0xe94f8162, 0x7f816e40, 0x1da3d14d,
+0xb458eb23, 0x7f907a47, 0xc8657f81, 0xc4f36ba1, 0xbc8181e6, 0x81efd081, 0x53607fa6, 0x51368181,
+0x32b84fdc, 0x8127b27f, 0xd2247f5c, 0x56f77669, 0xecfdfe9d, 0x697fc681, 0x7f7f2781, 0xdb7f371a,
+0x1b8181fb, 0xea78f17f, 0x5a187f7c, 0x7f7c705e, 0x2b58816c, 0x757f607f, 0xc87fc97f, 0xec5b66a6,
+0xb89574c0, 0x77ccb739, 0xe1fd2aa7, 0x7f45d8cf, 0x20283a95, 0x5f45909a, 0xe581748f, 0xf97fab7f,
+0xc8e0357c, 0x73c7da4b, 0x7f0e812e, 0xba01c67c, 0x0324fb81, 0xea2ee62f, 0x253e88ae, 0x077f81cc,
+0x817fc781, 0x94f17fff, 0x0ed6f0f4, 0x84677f8f, 0xb598462a, 0x81a87fec, 0x3dbaa47f, 0xbd62fd81,
+0x277f81ae, 0x0f43ab6c, 0x07c1617f, 0xc7bc8181, 0xba2460fa, 0x6d0df181, 0x92dfa27f, 0xf7d1033d,
+0x53b71860, 0x81b28146, 0x6564b13a, 0x7fcf0981, 0xf9004e81, 0x10229f55, 0xdb7f7f05, 0x281845aa,
+0xca6ddfab, 0x6481d77f, 0x4e4120ab, 0x81006b47, 0x518dce81, 0x907f8145, 0x4d5e7f2a, 0x064076ad,
+0xdc1c8e81, 0x354b2545, 0xc8426706, 0x4447b281, 0x21daad30, 0x6d21a202, 0x04249781, 0x19ac7063,
+0x3a7f00a0, 0x81f41c2f, 0x523c5b36, 0xb6817f43, 0x3f817f86, 0x811234c1, 0xd77fd8a3, 0xc2f2927f,
+0x557f7fb1, 0x41d9e881, 0xeec06dde, 0xe4633caf, 0x3c72fd19, 0x9cffc57f, 0x4b7c8659, 0xcbf6e57c,
+0xd9bb7f7f, 0x5fdd819f, 0x69ecd881, 0x7f817f90, 0xe68181f7, 0x10811439, 0x2a7a2c37, 0x66438157,
+0xbc634044, 0x3fe98163, 0x81ac8166, 0x8143811d, 0x364f63d5, 0x377f4e81, 0x24755bb8, 0x17adfc5a,
+0xdedea8e0, 0x2e6e9e14, 0x0e816188, 0xe56cbe81, 0xb9c85cca, 0x817fb120, 0xae7f8181, 0xe37f7f7f,
+0x0827d377, 0xdc7f815d, 0x9a2a5625, 0xb2ab4736, 0x817f2274, 0x817fb29c, 0xdcece2cf, 0x5bddd988,
+0xcc1e817f, 0x24793971, 0x40ea214f, 0x326f7e64, 0x1a9e3ed0, 0x268c1a7f, 0x314b8548, 0x0f7f8701,
+0x347f7c7f, 0xb9ca13bb, 0x3492187f, 0xc67fa67f, 0x7f49814e, 0xaa7f91d1, 0x18d53577, 0xc88b53a1,
+0x20af691f, 0xe6853f81, 0xcf5d08bf, 0x81705981, 0xe30d47f0, 0xa8c17fa5, 0xdb817fb4, 0x12818172,
+0x83433d81, 0x4a7ff6e4, 0x327f937b, 0xbb6509d3, 0x2c02fc7f, 0xf3b7feb4, 0x415d60ab, 0x136551d0,
+0x0e818181, 0x832eb165, 0x3c7f7f82, 0x817770ae, 0xf9b3a525, 0xddcb3d02, 0x008b6181, 0xdbe55445,
+0xec8137cb, 0xf5e8f76c, 0x61815ac9, 0xc8810fb0, 0x35547799, 0x0d81a035, 0x1c3eea42, 0x88d77f96,
+0x2948677f, 0x0852db6e, 0x42367f74, 0x0d7f867f, 0x3a7fff81, 0xd1d1d271, 0x02a981e5, 0x2bfd2988,
+0xd3818199, 0x4e7f4010, 0x09297fd4, 0x7f816b68, 0xe60a05ca, 0xeb81c97f, 0xa2cf5e81, 0xfd7f7f52,
+0x07057f53, 0x8672147f, 0x389bd281, 0x3a855c79, 0x41328181, 0x21fe5e81, 0x073f7fc3, 0x3654f170,
+0x24bc7f7f, 0x4f816ee1, 0x67427f7f, 0x8140676d, 0xef53e53d, 0xa48bce41, 0xdb5832d8, 0xf64a58dd,
+0x3d3e70ab, 0x53ab4a16, 0x137f2023, 0x74569bad, 0x017f8138, 0x81902bed, 0xbdca8f24, 0xa34d8193,
+0x81cf7f90, 0xab43b32b, 0x003f7f49, 0xbf3d4570, 0xbce67f72, 0x7c197fc0, 0x5376d47f, 0xd53d7fa8,
+0x7e9bb47f, 0xd72aa0f2, 0xde818138, 0x5cc3a981, 0xd57d9c5d, 0xecb2d6a9, 0xf97f81e4, 0xb67f8f33,
+0xb92b577f, 0xc483f97f, 0x469d9481, 0xd4868181, 0x7031e886, 0x95b4ca8f, 0xc6d4bd93, 0x117f227f,
+0xb67e7fe9, 0x7fb35540, 0x07d01f81, 0xb4ce9b40, 0x1b118681, 0x7f657bfc, 0x2bbe817f, 0x287f176e,
+0x6c96bb02, 0xcfff9e59, 0xec7f647f, 0x817f81b6, 0xabd131be, 0x819081e8, 0x4724b87f, 0x5f817f3e,
+0x62b99368, 0x81818110, 0xc99ad3b2, 0x25816734, 0x0702743f, 0xe1b4812f, 0xcb787f81, 0xac864257,
+0xcb928181, 0x55e27b81, 0x26f281a4, 0x63898181, 0xbbe1e54a, 0x58d2f881, 0x49e3cc81, 0xf7ea287f,
+0xe9827f82, 0x81a69a5c, 0xdd81d292, 0x7f935122, 0xd17fdc3f, 0x321a8fa8, 0xfa819a81, 0x1fdb7f79,
+0xc6cf9b09, 0xe181547f, 0x0acacab3, 0x0e7f72ca, 0xf562e081, 0x13d3827f, 0x5692de7f, 0x35b04a5d,
+0xdd818e3a, 0x6b7f6160, 0x29303878, 0xc7687fbf, 0x38508173, 0xa6bd37be, 0x0d339881, 0x5e198614,
+0x67f745e5, 0x81081a7f, 0xa3c77d84, 0xaadb7f81, 0x578d7ff0, 0x75a0b54e, 0x0d01587f, 0xa1cb624b,
+0xf94181b7, 0x819d3a81, 0x37258135, 0x5aa87f3b, 0x0e7f7fd0, 0x5b7f817f, 0xda4ce9b1, 0x40917f81,
+0xc57f7f3e, 0x4115b391, 0x627b816f, 0xa8575f49, 0x99ec8124, 0x33db907f, 0x4973c07f, 0xc9b71548,
+0xde548177, 0xb608bfcc, 0x1d0787bd, 0xd37f793a, 0xfe847f7f, 0x7f3f4111, 0x3dd5de99, 0xbedb817f,
+0x4c65c24d, 0x5d2c813e, 0xd901b37f, 0x974c5c92, 0x7f7f7f7f, 0x812b7f81, 0x0057836f, 0xf0778181,
+0x2881f3c5, 0x35147581, 0x817fb31a, 0x7e7ae063, 0xfa1e4184, 0x7f1e787f, 0x529c5123, 0x553b9a81,
+0x12de4220, 0x1fa92f8f, 0x4d66a52a, 0x554a2f40, 0xad3f637f, 0xbc81257f, 0x04f8976a, 0x2aa1fc7f,
+0x3881b3cb, 0xcfc6ee25, 0xa37f5f60, 0xffddf08e, 0xce817f2a, 0x824f4685, 0x17b8a353, 0xa97f8a81,
+0xfc81dc7f, 0x7f81a5b0, 0x26fab529, 0x811085c4, 0x031c5a76, 0x817d7f95, 0x1a8181cc, 0xe6aa0f7f,
+0x1e454a7f, 0x1c98818c, 0x2b5ef528, 0x7f2b7f5a, 0x4ec7adbe, 0x6981f15d, 0x7f1d4b23, 0x0f337fb9,
+0x1db1b85f, 0xdb7fbe3d, 0x1eb3aff5, 0xba2a7f96, 0xd6a0761a, 0xa97f117f, 0x341781f3, 0x9981817f,
+0x56dd7f44, 0x431981b3, 0x115d7f7b, 0x7f5a818b, 0x943248aa, 0xb78a45db, 0xc2046981, 0x7a330a6f,
+0xedb7817f, 0x81198df6, 0x29e37f9d, 0x74818143, 0xe5147f86, 0x7fac4869, 0x39e2bb4a, 0x422b983d,
+0xdb949bec, 0x2c7f5181, 0xe7539781, 0x7f818ca1, 0x2e815f79, 0x2b199727, 0x28ea3181, 0xe65c7fa5,
+0xfda2810a, 0x7b227ff5, 0xc259caa9, 0xdf9e7f4e, 0x1c7fbc49, 0xce892118, 0x083b81e0, 0x7081b06f,
+0x0c7f507f, 0x637fae31, 0xf2967f40, 0x810181fa, 0x0878b37f, 0x98817f4d, 0xdaa946cb, 0xae813eb4,
+0x67bb97b4, 0xe156b2a5, 0xc97f6dcc, 0xa90bd681, 0x14817f81, 0x7f816058, 0x1d587f76, 0xea425425,
+0xd62e4781, 0x477fdca5, 0xcb667f7f, 0x8169daa2, 0x631cc881, 0x1381fb76, 0xf27f817f, 0xb297434b,
+0x3b1d7bae, 0x89b8d27f, 0x947f8bbe, 0xbc81d770, 0xf94f0724, 0x3cdc817e, 0xf981817f, 0x1b75245a,
+0xab20818c, 0xf2811c81, 0xfc78c27f, 0xb77f0881, 0x838132d7, 0x20818381, 0x127f815a, 0x8e9e267f,
+0x1fa87fc5, 0x1da2bf47, 0x0f81939b, 0x118ba016, 0x52c07c6d, 0x697fdd00, 0x0e617f92, 0xab812b56,
+0xce754988, 0x5e8181db, 0x22814b35, 0xfda981f5, 0xa981e85b, 0x477f93e0, 0x1e73d681, 0xdc748147,
+0x01a1479b, 0x6d4e8164, 0xd37f8b7f, 0xbf6b7f6b, 0xc68110bc, 0x81819156, 0x257f497c, 0xb07fa0c3,
+0xcac10d73, 0x39b9284d, 0xad817fd5, 0x7f8a5781, 0xe37f7f2d, 0x7f7fe7d7, 0x186d2c39, 0xc4813181,
+0x0ac8a47f, 0x569d4cd2, 0x7f3d597f, 0xb87fbe59, 0x35117742, 0xc18c7fa7, 0xb03bd7ce, 0xd77f6829,
+0xec3a7281, 0x81285968, 0xf8a69dd1, 0x81e38181, 0x4e1dbe77, 0x783d4e9e, 0x50bc7f85, 0x4181dd76,
+0xa61a4bac, 0x627dd07f, 0xa08168a3, 0xfefdde94, 0xcdc19d05, 0x39817f7f, 0x18d78181, 0x631681cd,
+0x91d2e1ac, 0xd2bfce81, 0x01f1817f, 0x810681d0, 0xccc18c5a, 0x7f0b617f, 0x563a8345, 0x917f26e1,
+0x87147fad, 0x88161a8c, 0x1481928f, 0x5c96b841, 0x1b7ff206, 0xf1bf4759, 0xc6efdc5b, 0xa1078181,
+0xf52b428b, 0x817f7f10, 0xdcd1c062, 0x0c863afa, 0xe26a27b1, 0xbd5a967f, 0xe8047fa4, 0x5be68181,
+0xef7b2e81, 0x60f47ff1, 0xf9761c7f, 0x8f297a94, 0x2c81096a, 0x7fc40ec0, 0xbb7fb3b8, 0x4494cae8,
+0x5847c916, 0x7d728183, 0xe74d7f81, 0x818d426b, 0x0006ba4d, 0xb4bd9ba6, 0xe00a813e, 0x68032681,
+0x18877f81, 0xb46281d3, 0x3381113e, 0x6a7f57f7, 0xe2a7194d, 0xcb7f7fca, 0x62c66981, 0x5181e082,
+0x6acdde81, 0xbcbbf27f, 0x3e3ea181, 0x81e6c17f, 0xf854097d, 0x307d58c8, 0x2e9e81a7, 0x93ce81ce,
+0xcad4d5a2, 0x1272b081, 0xb84ccf3b, 0x007f9d7f, 0xe294efb0, 0x830c7fb7, 0xca07b17f, 0x3b9a88bc,
+0x09350bd4, 0xae007f48, 0x3a6a8181, 0x38777f48, 0xce7f7f12, 0x9781570e, 0x3766817f, 0x4b607f7f,
+0xe38c2477, 0x9cc64b35, 0x157f0742, 0x6881d981, 0x53c88c81, 0x9f8974ae, 0xa77f8319, 0xfb527fbc,
+0xdf865f22, 0x407f8117, 0x5f5a4c24, 0x14207f77, 0xbfe8e081, 0x667f7f58, 0x31f17f37, 0x467f307f,
+0xed7f7f48, 0x2bc96b81, 0x04816572, 0x8137a081, 0xea407b02, 0x7f06814a, 0x097c819a, 0x3ef22003,
+0x077d1469, 0x32e06181, 0x1e7fa7de, 0xc7508658, 0x2d58ddad, 0x921d7e79, 0x3af381d7, 0x31b971a9,
+0xa7b38de5, 0xf274be04, 0x9690d41d, 0x857f4dff, 0xcf7f5cb0, 0x8117bc7f, 0x1b7f4a91, 0x288181d6,
+0x3f4d7b12, 0x7f7f979c, 0x4581b06d, 0x7faf7f81, 0xaf1dac81, 0x4d068bb7, 0x367f50bc, 0xdb7fbd41,
+0x4f7f7f38, 0x81ac15f8, 0x2c3725a9, 0x98577f4e, 0xb20f8197, 0x6f816981, 0x26c87aa2, 0x1781c2a1,
+0xf5449d24, 0x14cc2651, 0xe33d7f7a, 0x4d7f7ff2, 0x3554512e, 0x25c23b96, 0xde7f8d7f, 0x44bc34b3,
+0x2a7f3050, 0x1aec3857, 0xada98181, 0x2644d0ee, 0xcbc2333e, 0x7f8e7f87, 0x4aa03181, 0x11b736b7,
+0x3abb2381, 0x2f96817f, 0xe1476381, 0x83ca2b85, 0xfc3281ba, 0xa0d62222, 0xefbe354f, 0xa022be81,
+0xd77fb435, 0x17538367, 0xee85d87f, 0xa4a66071, 0xc8402561, 0x818a277f, 0x9d4ddd7f, 0x23bf327f,
+0x32ec1b81, 0x8c138d68, 0x237f1a15, 0x4a0e2b7f, 0x5b7f4981, 0xba747f29, 0xa1d37848, 0x0c7f03df,
+0x923f1d7f, 0x8faa7c7b, 0x35068194, 0x60d2f465, 0x27606681, 0x7fc37f8a, 0x31bd3bd5, 0xcf58445f,
+0xdcce241b, 0x88d9816c, 0xb9b2a7d2, 0x2205817f, 0x01ec81af, 0x4581c8a8, 0xdb0f1b6f, 0xca0628cd,
+0xf63881c7, 0x7fa3697f, 0x5246567f, 0xb77f7fa1, 0x297f897f, 0x4cb8c84b, 0xce9e977f, 0xbf811e0a,
+0x219d746e, 0x7adcfa81, 0xbcce1a61, 0x28987681, 0xfa7faeeb, 0xdb454581, 0xa5a5919d, 0xbf85a57f,
+0x34b68581, 0x925c812d, 0x189a4c7f, 0x3ad03756, 0xf8c97f13, 0x97227f81, 0xfd7f070e, 0x7c7fd41e,
+0xfe81b327, 0x81a6627f, 0x0016cf64, 0xded56665, 0xcc5fbd10, 0x81112f30, 0x4c6c877f, 0xefb9b53f,
+0x33bc2181, 0x5e797cc7, 0x2a10c047, 0x917a7f81, 0x3a81817f, 0x8197ccbd, 0x73df4f32, 0x264f522b,
+0xc1e9528f, 0xbb494681, 0xfbde6eab, 0x4f7f280b, 0x987a7f1d, 0x167f1f7f, 0x0b817f81, 0x8124a87f,
+0x927f8181, 0x7f81ad86, 0xc779dc81, 0xd0f681fb, 0xcfd8fb81, 0x19cf4981, 0xb87f4bf5, 0x4893529b,
+0x0040eb60, 0x7fc78bae, 0x474455dc, 0x0b7f7f7f, 0xa92f7f70, 0x7f3d9c73, 0x137fc699, 0xb4a3c6fa,
+0xcb7f5c1d, 0x9c259c99, 0xdf5183d9, 0xa62181a9, 0x9a273761, 0xa5e8cd8b, 0x41b4bf1b, 0xa9818110,
+0xb27f7f7f, 0x0e6bb7c4, 0x2e3e7f90, 0xa4c68172, 0xf73f7f7f, 0x81818164, 0x5299a157, 0x0a297f5c,
+0x9a46f024, 0x8178d77f, 0x02818181, 0x8181025c, 0xd96828bb, 0x77644a05, 0xb77e4427, 0x407fa650,
+0xab7f7f56, 0xaeeff9fe, 0x2f871d3b, 0xb8d7dd43, 0xe8817f7f, 0x7f4d0a27, 0x036a5b7f, 0xd4a3e67f,
+0x9122dd23, 0x627fb09d, 0x055c6534, 0x0689a1b3, 0x4f484a7f, 0x7f77b1e5, 0xc47f9d06, 0xd381c37f,
+0x3b819a7f, 0x4381827f, 0xed81817f, 0x95c3917f, 0x1b1e9ff3, 0x4c98cf7a, 0x81927f81, 0x703fd644,
+0xed5db0d6, 0x23225674, 0xf95a8187, 0xe6ac7f32, 0x488150e5, 0xf6b5e281, 0x29810449, 0xfdada97f,
+0xa6a19781, 0xfe36879a, 0xc96ac845, 0x3bf937bf, 0xa2812172, 0x84203765, 0x5f9e3403, 0x017f6a34,
+0x2cc6607f, 0x99877f7b, 0x2ad981d2, 0x81933581, 0xdf81338a, 0xd478056f, 0xfa7fa571, 0x1d818352,
+0x17c9db81, 0x9c7f7f52, 0xdf9bd4b1, 0x417f98a5, 0xca5517df, 0xc44b22d0, 0x58727f9e, 0x28817fd5,
+0x477f2fb3, 0x8b3f7f59, 0x0940765f, 0xa970b281, 0x0a0a7828, 0x81319e7f, 0x2dc34181, 0x4081668d,
+0xe7817fcb, 0x3641753a, 0xc17f3d81, 0xb88154b1, 0x73246eb5, 0xb0db8186, 0x09a2ec6a, 0xd82e2c71,
+0x336f7f81, 0xd6817f4e, 0x43814a81, 0xc579811f, 0xc0812803, 0x447fa1e0, 0x81b01b7f, 0x752152b3,
+0x4f15817f, 0xf97f61c0, 0xc99bd9b4, 0x2281307f, 0x607f8181, 0x7342b07f, 0x06bd9181, 0xb8b8bac5,
+0x3b3ca425, 0x8175e89b, 0xa50d7f4a, 0xbb56b781, 0x817fca6c, 0xac0fa7e8, 0x8e7894b7, 0x1d05cf50,
+0xf34f5b7e, 0xb65332e5, 0xc76e9375, 0x66d058a1, 0xe5a09f7f, 0xe493eaa5, 0x05fb81b5, 0x33817f14,
+0xee418105, 0x81a16f2c, 0x367fa849, 0x7aedbecc, 0x100f81c4, 0x81b2fb3c, 0xcc7fb181, 0x1b3ff096,
+0xd3b03a81, 0x4b815f75, 0x497f9c7f, 0x81015625, 0xa0817f81, 0xb0b8167f, 0x7eb7a3fe, 0xe97f6585,
+0x555a877f, 0x4b7f8181, 0x12a95e4a, 0xcb6f5afe, 0xd87b5fad, 0xbe655fa0, 0x8b81300b, 0xccb26381,
+0x39868173, 0xd6817fa0, 0xcf684e81, 0x7f3bae43, 0xfd8c20a8, 0xcfa0a31f, 0xc0307f81, 0x4e7f5d84,
+0xeb941e81, 0x6f77d070, 0xfa0b7f7f, 0x3cb88193, 0xf6818163, 0x3d5f8fbf, 0x40c4c73b, 0x4b7f4e4b,
+0xae968181, 0x818181b2, 0x81148181, 0x6b325381, 0xeae5582e, 0x7630307f, 0x1c0f6ba9, 0xa54a0457,
+0xd37fc693, 0x2345817f, 0xe44f1e7f, 0x3e4cc9cc, 0x3081babf, 0x817f45c7, 0x5265e57f, 0x427f81ca,
+0xe451817f, 0x8190b7de, 0xf0718981, 0x7f9ad381, 0xb9645da5, 0x7f7bed3e, 0x5ce37af9, 0x307f4ecb,
+0x0a7f8135, 0x81ffa7a0, 0x534fa1a5, 0x7f42d641, 0x0e501e61, 0x7f7f6edc, 0xc859c081, 0x377f85f7,
+0x0df9ab94, 0x817f7f7f, 0xfe7f7f81, 0xc1327ff4, 0xe84c842c, 0x7f1e5bc4, 0x1da77cf1, 0x1e817f53,
+0x5f9ab07f, 0x86209e4a, 0x204e81cb, 0x33c9928d, 0x275a2d8d, 0x7f810281, 0xfe7ff7ba, 0xfab77fea,
+0x658145e5, 0x73d57fd6, 0x01ec097f, 0x6d83207f, 0x49810cb5, 0xb5d9ce39, 0xf9e0436a, 0x629c81ed,
+0x48cf2c8c, 0x7fe37f7d, 0x5b257d81, 0x765f50b6, 0x2cc98403, 0x817fbebf, 0x127114c7, 0x347f287f,
+0xd2d72990, 0x81099d9a, 0x8143cac5, 0x8c452fc2, 0xd27f8f6e, 0x61957f25, 0xbc6fc54d, 0xce81f31f,
+0xd381787f, 0xac0e5156, 0xf081afc4, 0xb47f6f2f, 0xef811a81, 0x015ee99c, 0xd46de9d9, 0x30655a6d,
+0x09b2b87e, 0xaf13753a, 0xc6dd2a7d, 0xb9bd8190, 0x81537f7f, 0x669ff8ed, 0x5b810c81, 0x42e5a566,
+0x0c4174cd, 0xbdf9b7bc, 0x1c118e65, 0xe28f9b2a, 0x8c207f3a, 0x1e7f7ff4, 0xd6b57f1c, 0xf0131edf,
+0x38c9a381, 0x7f40817f, 0xb735817d, 0xa1a864de, 0xd99cbf57, 0x811a81f5, 0xb381e381, 0xe88e7fb6,
+0xfb4a5b89, 0x7fc006a3, 0x1c81653d, 0xbceaa09c, 0x8d7799d7, 0x4b407a81, 0xdc81bca1, 0x3437d76c,
+0xd79fe5c9, 0xd9adeea8, 0x163e8164, 0x2cc1b281, 0x3e81e9f1, 0x936504c9, 0x047f6f7f, 0xf981a776,
+0x203b2d68, 0x81dde8d1, 0x13e9e9bd, 0xe04108a3, 0x50304281, 0x486d7fb7, 0xbbff81ce, 0x31d43aa3,
+0x21813c7f, 0x81c8f989, 0x7f481024, 0x0a979b6a, 0x010081d7, 0x7792e641, 0x0654d37f, 0xa53da946,
+0x96c781d2, 0x41524ca6, 0x2ab2d581, 0x1c65bb79, 0xaaa494af, 0x81e09681, 0x3d7facf5, 0x04c3818d,
+0x2c1461c7, 0x788581d9, 0x1d3f2081, 0x49dfaec2, 0xfdf07f61, 0x40396d32, 0xc8835fbf, 0x7fb19f54,
+0x2fcc8142, 0xe7dcb4e1, 0x73c3a807, 0x7fc68137, 0x061e81ac, 0x81c70c7f, 0x1d817f73, 0xfdbace27,
+0xa5fa4081, 0x9b5575ca, 0xee81cc4b, 0xe85a7f53, 0x8daff2e0, 0xd1d1067b, 0x98ec3662, 0xa5818253,
+0xf8266d26, 0x38da26ac, 0x3f98912e, 0x4a10f144, 0xd845a345, 0x811bd786, 0x4b137f7f, 0x3427fa81,
+0xe981eb9f, 0x8f4ce6e7, 0x277f8181, 0xb0a5b03b, 0xd0633788, 0x4781b17f, 0xb27f3ee7, 0xd37f30b6,
+0x21c3a15f, 0x0492a4d0, 0x097a6c7f, 0x047f1c81, 0x994b7f81, 0x818ea981, 0xd46d7fca, 0x2b8a5381,
+0x9d424d91, 0x814d7f81, 0x32aa367f, 0x28ff1b73, 0xbea1dbde, 0x76db7f86, 0x248a8d37, 0x294b8136,
+0x6e8142d6, 0xec65687f, 0x66297f57, 0xdfb563b3, 0xd352ee79, 0x0689d45c, 0xc58123cb, 0x0b62bf8c,
+0x53c6d80b, 0x7fbc9773, 0xe4596e21, 0x2584812f, 0x1793b781, 0xaa815927, 0xb8659035, 0xccbe2281,
+0x5eabb4c9, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0xb339946b, 0xd7497238, 0xc36e8181, 0xd0816d3e, 0x7f7f2681, 0x6709bf21, 0x92e3bf81, 0xe54358cc,
+0x6bb5a661, 0x3a7f81f9, 0x8140f0fb, 0xc5a67f1c, 0x8d3381a5, 0x817f81c4, 0x72817fd5, 0xa72c76fd,
+0x68dc8b11, 0x9b5f4098, 0xc0f43965, 0x7f0ecf02, 0x7cfdd720, 0xe1a0ac15, 0x087fc3c6, 0x81b66e6a,
+0x410a893b, 0x81c491f4, 0x81d87033, 0x2056562a, 0x7f526cd8, 0x817fe90f, 0x81f78178, 0x81d93134,
+0x3699e0a2, 0x7fa181c9, 0x81f64081, 0x8f816b2e, 0x816ece59, 0xd548fd2c, 0xc4ee3919, 0x6d847f16,
+0x81e423c5, 0xfade4a40, 0x9e29eeb9, 0x7fe450f5, 0x5207177f, 0xf17f710b, 0x8cf291ad, 0xd09f9cf0,
+0x7f022d20, 0xb7ea81c6, 0xbb89cfa5, 0x1c6fd006, 0x7f8481dd, 0x81020914, 0xb14c81e9, 0x4d9b4f08,
+0x7f6064c8, 0x817f7fcc, 0x6b9819e9, 0xb253e7df, 0x724368fa, 0x816fe408, 0x7481a204, 0x0e6fec58,
+0x70449617, 0xf5a1da3f, 0x7f446e7f, 0x1005abce, 0x38664981, 0x8240472c, 0x10f52409, 0x818186dc,
+0xf7810be2, 0x9c2ad304, 0x81817f62, 0xcc40a43d, 0x5e6b81e7, 0x7ff0e2fb, 0x812a7f7f, 0xe3017f51,
+0x817f24dc, 0x85c49997, 0x863c7024, 0x7fc0ce3b, 0x637f8c81, 0x814c9dbe, 0x8ea08156, 0x2cf47f9f,
+0x5b55a9b1, 0x107d7546, 0xcd0381b9, 0xd8617f42, 0x7f81b8f1, 0xb506abd3, 0x745f2b2b, 0x7f81b7d5,
+0xbf7fdb20, 0x1ca3a610, 0x7fe5e9af, 0x20b37f08, 0x810e6f81, 0x9b7fe6ad, 0x1aa35d81, 0x46032cc7,
+0xad7f7f77, 0x8bd48245, 0x3dffaa81, 0x507f91d9, 0x907f9a25, 0x404b81b7, 0xc67f7f97, 0xb1eae864,
+0xed2a18c5, 0x2ea85ef3, 0x8153434e, 0x816daaf3, 0x7f03ad67, 0xa0ec264e, 0x640ca1e5, 0xf0818f2b,
+0xcbd27ff7, 0xa6a39206, 0x4ca811b9, 0x4c57a068, 0x8647e4c9, 0x7f438ad2, 0x7f945029, 0xc168e72c,
+0x7fed4781, 0x81f706cd, 0x62810667, 0x177f7f32, 0xb105d09c, 0x81fa7f16, 0x7f11fa53, 0xc8a6430b,
+0xececbb81, 0xe82644a6, 0x3e82f381, 0x28817feb, 0x90c18135, 0xd0f5810f, 0x507f0ba3, 0xfeb38808,
+0x8e6c66ea, 0x70a0c4c8, 0x8146912c, 0xb8e01cb1, 0x2b817f51, 0x64818207, 0x9f3a4e81, 0x7f4aece1,
+0x84aeafb6, 0xa17f8188, 0x9a7fe759, 0x1cbf5681, 0xad7f6bad, 0x818eebdf, 0x438181e9, 0xa7d07f22,
+0x75dbd318, 0x81817f3f, 0x5457bd92, 0x4b204ffc, 0x7f1c637f, 0x8b4c7ffe, 0xa4d97fe8, 0x6194ca37,
+0xb06c7f06, 0xad2f1ac0, 0xaef07f7f, 0x15818101, 0x22bf8d7f, 0x597f35cd, 0xb155ab0c, 0xc2838b0f,
+0x0f81f4e6, 0x181f7f81, 0x31e1817f, 0x6a2846c0, 0xede8048e, 0x2952c3e3, 0x09187fda, 0xce411cce,
+0xbbc197b5, 0x8181464a, 0x161922d0, 0xc37f292f, 0xbd736439, 0xc881b6fb, 0x81ba557f, 0x9da521f0,
+0x3e7f52dc, 0x3b3a813c, 0x7f544bc3, 0x46b1b3fc, 0xbeb18e34, 0x418156e1, 0xab22e318, 0x7fbe66e3,
+0x2237bfb9, 0x010081be, 0xc159b8df, 0xf449b264, 0x017f9e82, 0x9dbd7ff1, 0x09c7cbd7, 0x81bc537f,
+0x817fe1c7, 0x7f177f60, 0x7474bcdf, 0x3c7f6f70, 0x7f7f987f, 0x81812ecc, 0xca783f94, 0x64cc4da9,
+0xa0ba1d0a, 0x7aeec524, 0x43bb6b81, 0x1a338981, 0x206be419, 0x65819d5a, 0x7f81d73f, 0x81a67f36,
+0x22810442, 0x7fd4cef9, 0x6067ad85, 0xd6d17f30, 0xea7f8166, 0x457f7f84, 0x817f1bc8, 0x6b86be00,
+0x7f8bc555, 0x519d5bfa, 0xd07f97e4, 0x505c81d4, 0xf1eb8115, 0x72bdd002, 0x9da57f77, 0x817f12cc,
+0x81c03652, 0x441872fb, 0x554bdf67, 0x2e46f0d1, 0x81817f8d, 0x7ff5ce55, 0xd2ded97f, 0x817f67af,
+0x0539c336, 0xa37f2f7f, 0x8150637f, 0x8156d068, 0x827fbda5, 0x9f176b2a, 0x81878c0d, 0x81117f07,
+0xc0d15559, 0x869c81c4, 0x7fa498da, 0x46d5d102, 0x81f7a581, 0x816eca47, 0x67ad9d95, 0xccb49c2e,
+0x297c5440, 0x817f7cda, 0x9f55bd08, 0x2b7fc490, 0x817f817f, 0x377a8158, 0x91b5bf7f, 0xc87f4f21,
+0x977fc1d9, 0x7f3088f4, 0xe938b6ca, 0x41707434, 0x81a27fd5, 0xcf818116, 0x817f887f, 0x09cf3bc9,
+0x814edf19, 0x46206509, 0x34a36c81, 0x313fc7da, 0x92f34a81, 0x21e942d8, 0x365c16d1, 0x68907ff1,
+0xcb8190e3, 0x5e7f7fa3, 0x6fb78c98, 0xa1444250, 0x812bb37f, 0x83a26f0d, 0x739bab07, 0xcb2cea1d,
+0x668174ef, 0x288b8965, 0x95849a7f, 0xdc0981b0, 0xd875297f, 0x8153d5b0, 0x9d81e6dd, 0xae81d259,
+0xd981813d, 0x7f64ed35, 0x1f459b0f, 0xb8ebe4e5, 0x72812e81, 0x81909b54, 0x02f54fb3, 0xa87281f4,
+0x7f6381cc, 0x7f3cab5b, 0xb1958152, 0x814b9bfa, 0x81312b5e, 0xc2977f23, 0x347fbb97, 0x3b057fe6,
+0x40208144, 0x7f817fad, 0x54cb9deb, 0x37b34a28, 0x81781b81, 0x7f1b8130, 0x81e194f1, 0x882b7f1f,
+0x81dfc90a, 0xe4a97fa6, 0x9e81ab40, 0x971c2535, 0x5a7f7f3d, 0x47708116, 0xab778160, 0x81ac57e3,
+0xc2854dec, 0x81ea81a1, 0x8b347f7f, 0xe981d130, 0x93c70917, 0x817f812d, 0x81925016, 0x7f813953,
+0x52a5a5fc, 0x7a33d8e1, 0xcbd80281, 0x79df8101, 0x797afc87, 0x7581bbf0, 0x81567f94, 0x472ad4cd,
+0x23e49434, 0x4c7ffc03, 0x41b0717f, 0xcc67b7bc, 0x7f7f48bd, 0x8c1c81b9, 0x3a460d72, 0x9eb5fa34,
+0x03530615, 0x6c7f7754, 0x818167d3, 0x7fc22100, 0x7f3da881, 0xa24e7f61, 0x321bb411, 0xd981c827,
+0x76e8814a, 0x69ca13fb, 0x1390b881, 0xb1d08157, 0x7f4669a2, 0x7f237f13, 0x30268178, 0x0eb87f34,
+0xee1aaa30, 0x816596e1, 0x31813181, 0x88888243, 0x817de70e, 0x84811713, 0xd74d7f52, 0x06b17f35,
+0xed347f1e, 0x66d66058, 0x7f817fee, 0x035edde8, 0x819b18a6, 0x8e8198ee, 0x817fb5f5, 0x311e442b,
+0x957f941d, 0x4e6aa649, 0x7f677f41, 0x8d876acb, 0x7e7bb58f, 0x44c27fce, 0x6f39ec5e, 0x7f7f81d2,
+0xd43ba60f, 0x488150c8, 0x8181e9e8, 0xfe46ec02, 0xa2cc7fd8, 0x7f810781, 0x7f30c356, 0x22c97fc5,
+0xbcc97f40, 0xf2b2ce1e, 0x7b59537f, 0x46d994ea, 0x4ae2273a, 0x7f36d92d, 0x818ad281, 0xbf7f2b7f,
+0x8181abf8, 0x924f4eb0, 0xa62c81b6, 0x812b99aa, 0x447f817f, 0x2c7f5746, 0x0859b34d, 0x83e652db,
+0x7fc781ca, 0x819e5cfe, 0x55c5d07f, 0xb4bb3dff, 0xc24b8178, 0x93d28123, 0x15e63a7f, 0x7fc87f40,
+0x812fd94c, 0x991b8170, 0xb13593c6, 0x3b9a5119, 0x3b752581, 0x20b89686, 0xcac17f7f, 0x81237f6f,
+0x99ec643e, 0x02813100, 0x81124724, 0x3455436b, 0x8155cc81, 0x68136028, 0x81517f5d, 0xc57481f6,
+0xe4c88159, 0x81fba227, 0xaa814381, 0xb3818ebc, 0x457f8b51, 0x118181a1, 0x575b7fe7, 0x53571239,
+0xa7d35d43, 0x4b577f89, 0x1f817fc2, 0x6c7f9609, 0x9714813b, 0x81d723b6, 0x6c7f377f, 0xa3cf6826,
+0x08b27f45, 0xbf7f8184, 0x814de5a0, 0x8181a2fd, 0x167f3c70, 0x8a7fa9e4, 0x65ac4b7f, 0x7fd367c5,
+0x81112d7f, 0x8170811d, 0x817f589e, 0x8b7f7f44, 0xba2ab081, 0x527f5bf6, 0x2faad6e3, 0xc335a82e,
+0xb862c003, 0x81715776, 0x5081817f, 0xdd7585c0, 0xb7989d7c, 0x367f81b1, 0xa0818129, 0xc6f37f55,
+0x5006be1d, 0x817faac3, 0x7e667f81, 0xaf2c7f99, 0x347f897f, 0x2bb1aac8, 0x8112eee8, 0x7f5c62de,
+0x007f14d1, 0xa1a2140b, 0x95446419, 0x5e7fc82a, 0x7a7ff07f, 0x81fe7d31, 0x010a8149, 0x7f3e7fb6,
+0x397f81ae, 0x7b7f7fba, 0x797f5981, 0x7f7f170a, 0xb9cf087f, 0x0be6cb28, 0xdd97cf4e, 0x92520625,
+0x817f3ae4, 0x6c81c381, 0x440cc5ae, 0x2b7fb8b7, 0xe94a2081, 0x81967f4a, 0x7f817f9d, 0x7f81a7a1,
+0x815b52cd, 0xd59d57f6, 0x899e827f, 0x7f7f9aa0, 0x52819e7f, 0xda3a2223, 0x8b81073c, 0x818132d0,
+0x55638800, 0x9804bcb5, 0xc3810a7f, 0x6ddab6e3, 0x7fcae34b, 0x7f814a2d, 0xea7fed91, 0x3b9bb0d9,
+0x87d97f1e, 0x16535739, 0xcc2f7f81, 0x7f98813a, 0x5873bbe6, 0x241781f3, 0xbcc7cb33, 0x7ff69847,
+0x8d11c0dd, 0x9ac9d787, 0x66cff992, 0x7f7fc312, 0xaa4fe681, 0x39967f26, 0x201c7f7f, 0x9a7f4849,
+0x5768a814, 0x814d731b, 0x81917f81, 0x6eea7d7f, 0xad7f65fe, 0x3d3e81d3, 0xae672981, 0x7f1c81a0,
+0x9881053a, 0x81a16307, 0xf64c4063, 0xbc7b97d9, 0xa29dc0a1, 0x8161189c, 0x7478b3db, 0x87b330e1,
+0x817f0598, 0xcf422b0b, 0xc29b7ac8, 0xc2817faf, 0x8185581c, 0x81d0b675, 0xc93f7fa9, 0x814e3a02,
+0x815d6511, 0x5d81075f, 0xd768c13e, 0x89815437, 0xac810d85, 0x1a7fa5e9, 0x14a6bb81, 0x54531dfd,
+0x81dafd24, 0x978133dd, 0x818130be, 0xb67f5bb3, 0x2c7f4179, 0x81b281f4, 0xf3a463fb, 0xda36c4d4,
+0xb17f811d, 0x37ca75f1, 0x53257f7f, 0x3ef0208b, 0xdc71817f, 0x978181ef, 0x7f81d17f, 0x1197810f,
+0x277f7f47, 0x7ff6b1b0, 0x4562fca6, 0x725d89e3, 0x888197cf, 0x05477fd3, 0x4988077f, 0x81cbb5d9,
+0x047fcaa8, 0xdb7f7fc9, 0x81ccfe5f, 0xd91643ff, 0x7f3f7f7f, 0x03815ed4, 0x697c7f81, 0x815b814d,
+0x287f0d32, 0x7881a041, 0x247f95f4, 0xc87048e8, 0x3d815e7f, 0x71ba7f0b, 0x50815075, 0x7e53dc30,
+0x3f7755fe, 0x1d032571, 0x818cbaf0, 0xbda97f50, 0x7ffc0aad, 0xbf457f42, 0x66b48f81, 0x072c3f1c,
+0x433d9cfc, 0x71077f76, 0xac2d5564, 0xdc523811, 0x7f54a538, 0x7f707f00, 0x8c10a6f1, 0xf4a581d3,
+0xae875d1d, 0xd37f9747, 0x8124b869, 0x81a17745, 0x61bb8b7f, 0x227f8129, 0xa77fc381, 0x997f8119,
+0x817f812b, 0x81a78836, 0xdd557fc6, 0x907f6418, 0xbfa9accd, 0x4f815762, 0x97818181, 0xa0524fa6,
+0x815b5721, 0xc77f7f7f, 0x7f725efb, 0x437f614e, 0x81f15c6f, 0x7fb77f28, 0x473f7a1c, 0x7f81d71b,
+0x7f5133e9, 0x7cd17f32, 0xfb7572e3, 0x16c35e62, 0xc961a649, 0xdec47c37, 0x6d442844, 0x386b4616,
+0x9583c437, 0x81d57fae, 0x8114357f, 0x818f9db4, 0x7ff181d7, 0x773fb601, 0x4032fb29, 0x8613b71f,
+0x7f81b3f4, 0x1256f605, 0x297f817f, 0x9a7f2996, 0x5d81b87f, 0xc8907ffd, 0x817f7f35, 0xf6719ac7,
+0x7b7f523d, 0x18d8ca2e, 0x2681b97f, 0xc02a810d, 0x34cda181, 0x32e081bc, 0x016f7f08, 0x461bdaec,
+0x04330900, 0x633f61ac, 0xa3815204, 0xa534dda9, 0x7f81814f, 0x91a66329, 0x7f51aacd, 0x7ff681a7,
+0xabe476e8, 0xb57f9cae, 0x177f27c0, 0x7f2f8b0a, 0x1281a9ba, 0xd991af49, 0xea527f82, 0x9be09d46,
+0x28b2a2bc, 0x81c8a1d6, 0x5e1f4e71, 0x817f812e, 0x28b981cc, 0xb071cf04, 0x82cb8139, 0x81608181,
+0x819381ae, 0x818f90c8, 0x817f64c6, 0xd169c7c9, 0x8f548381, 0x87f7b03e, 0x6657817f, 0x81be8102,
+0x0d078c5f, 0x7ff97fd4, 0x7f814981, 0xb3bb7faf, 0x37c3dd6b, 0x4c6930b7, 0xb56ba479, 0xfc82520f,
+0x3494f934, 0xa2016c53, 0xc513eeba, 0x8c283bcf, 0x7fa6047f, 0x54319236, 0x42b2c19c, 0xf965c947,
+0x60777fca, 0x02767f47, 0x4921bb04, 0x81818fa9, 0x8eab7f5d, 0x81f4aa14, 0x1abdf97f, 0xbdaa220d,
+0xac368139, 0xddb1c212, 0x81d9fd99, 0x90b77f3f, 0x029ec8ca, 0x7f93d9db, 0x8563627f, 0xc5b72d52,
+0x2e6eacc0, 0x7f66815b, 0x50db375b, 0x4529813f, 0x7f8d817f, 0x44486f52, 0xbcd64c9a, 0x7fd2bf33,
+0x43507f58, 0x7281a4d3, 0x7ca82149, 0x29817fd0, 0x653544b9, 0x88c681a1, 0xd93c2bfe, 0x530835cf,
+0x5be26556, 0x7fb37fd1, 0x81e68174, 0xe88c0c42, 0xb63b817f, 0x394a81c9, 0x407f8107, 0x747f1f52,
+0x91816dcf, 0x5f818161, 0x8514aebe, 0x7f496bac, 0x5e818181, 0xe8e358ee, 0xae1a7f0b, 0xa4c97f05,
+0x77a77f52, 0x818160fd, 0x6bab7f81, 0xcec7e121, 0xf4810196, 0x4a7f68e3, 0xe56d5a7d, 0x813d2c6b,
+0x7d0b7ff5, 0x7bbd6760, 0x67812b81, 0x188197d7, 0x8146057f, 0x811eff1d, 0x818ecc4d, 0x4a786245,
+0x72593331, 0x2e81556a, 0x66aad481, 0x94818128, 0xde89ba81, 0x4df07227, 0x7f73ff8f, 0x7f44b981,
+0x9bf9e707, 0x46811bff, 0x85a96a36, 0xc13b7f15, 0x6670ba65, 0x81fcefee, 0xae8187b9, 0x73c3baa9,
+0x819e8123, 0x9cfbc6aa, 0x7f89c73a, 0x7fde9812, 0x97136581, 0xc38181e6, 0x81de9c0c, 0x4994ab0e,
+0x30977f06, 0x59dda72a, 0xc9f87fd1, 0x8186b4d7, 0xba86de81, 0x98a24c3b, 0x12157f3f, 0xd7674b1d,
+0x81816042, 0x8da8926e, 0x397f81f6, 0x3dd281b1, 0x7f6570ba, 0x7f875412, 0xd74f5be9, 0xab81f207,
+0x7feaa863, 0xab81da0d, 0x7ae79023, 0x357f8111, 0x654a468e, 0x521c2306, 0x2f817f37, 0xdb818a41,
+0x50b5b30c, 0x71817fdc, 0x7f7f7f04, 0xb13471fa, 0xd1843f7f, 0xb700ece6, 0x109064ea, 0x989b4b03,
+0x27556fd8, 0x5ecee700, 0x5f5a2281, 0xa81c81dd, 0xac54b6bb, 0xe60f1ae8, 0x139dc4dc, 0x22818109,
+0x2c7fc74f, 0xa69cf331, 0x9d272281, 0x7f9a6da8, 0xb1bb8f45, 0x81815a6e, 0x8315517b, 0x638181f0,
+0x273db266, 0x90777f18, 0x047d763a, 0xbdf50a4e, 0x7f5f5c12, 0x817f7f53, 0xcb177385, 0xb2ca7f22,
+0x818eaa17, 0x815d4c81, 0xa67fc281, 0x7fd856eb, 0x01becb84, 0xa8e2685d, 0x7fadbe7f, 0x1b7f6a05,
+0xef47f495, 0x817f1b1b, 0x8c8d0b81, 0x57b9a5d4, 0xdc298159, 0x5e81a7e2, 0x81467f5b, 0x370f6df0,
+0xc49c30a1, 0x7a545710, 0x974b81ff, 0x458c814a, 0x237f17dd, 0x427fb6da, 0x1a7fe5de, 0x817f8121,
+0x817f1c96, 0x9c817f90, 0x0e7fd365, 0x818aa15b, 0x7fd302f2, 0xdb7c138e, 0x7fe68181, 0xbf818112,
+0x7f2e7fc8, 0x81728101, 0x6482d830, 0x59e24b0b, 0x9b67c284, 0x6e811018, 0x81a42c32, 0xd87a4da2,
+0x817f2d41, 0x3e2fbce2, 0xd1d44623, 0x7464b44c, 0xcc255f95, 0x56c93b38, 0xa87f1491, 0x04819c5c,
+0x7f457fd8, 0x8d6bb866, 0x819c8181, 0xbc118166, 0x3d4c2a5f, 0x7a6d51fe, 0x79065067, 0x8181d1fb,
+0xff6f2b08, 0xf5be7f35, 0x9fa567f5, 0xabb581b4, 0x81819a5e, 0x7f8187c5, 0xaa7f5669, 0xdf526044,
+0x7f6ea5f5, 0x7f3a68c7, 0x4feb7fb5, 0x627f2850, 0x39f49181, 0x71ec22ea, 0xc17f4c44, 0x56f4db23,
+0x7f08cc95, 0x977fde95, 0x7f4f7f81, 0x81e07f1c, 0x5e7c7fe9, 0x1c3534ce, 0x7f907f2b, 0x7f83418b,
+0xeb81daec, 0xa6fc892a, 0x757f7ffb, 0x6281cc2c, 0xb826cecb, 0xc7810a2e, 0x60d1906e, 0x7f8175ed,
+0xd085cb36, 0x88c4811a, 0x5e3e707f, 0xaba57fff, 0x099c127a, 0xbac67fb7, 0xa9817d82, 0x4f6c7faf,
+0x43d17a0b, 0x92a13dc8, 0x81995d8d, 0xab7f90fd, 0x5f1f8189, 0x767f8101, 0xb7caa2bc, 0x7f8177cd,
+0xe0655857, 0x81cfebeb, 0x81af6fc0, 0x57ac6dae, 0x437f8176, 0x513c60fb, 0x9ef3c081, 0x78d0d12e,
+0xa40c7f0e, 0x438101c0, 0x81816405, 0x6f527f04, 0x8d0fa47f, 0x6d7fbb7f, 0x39187f74, 0x3c7f5f06,
+0x677f7fdc, 0x8114250d, 0xe08373c7, 0x9c7f7dd8, 0x816341a3, 0xdf7f073e, 0x7f818174, 0xca84e8ff,
+0xd6ff70ef, 0x7fa1a3c7, 0x7f7f8148, 0x81de7fa2, 0x4d323671, 0x5aaafbff, 0x05ac7f7f, 0xd58102e8,
+0xad6c45e4, 0x81677f27, 0x818132ce, 0x7fa55a1c, 0x553029ce, 0x81a47f06, 0x81ca6189, 0x5cd581af,
+0x70f34733, 0x676d431f, 0x81387f66, 0xef916df0, 0x72099950, 0x867f81fe, 0xaa7f657f, 0xd7521bef,
+0x81d573c6, 0x7f76bcd6, 0xaba4c10b, 0xe394b7d7, 0x82579781, 0x88f5715a, 0xf7815cda, 0x65817727,
+0xeab07f22, 0xbb5f7c40, 0xc0a48139, 0x95442a2d, 0x857f7d81, 0xcd7a81ee, 0x7971819e, 0x3a31f0fe,
+0x537f6181, 0xc307bed6, 0x8936627f, 0x98b18136, 0xd07c817f, 0x7f7f8126, 0x133a607f, 0x98d40015,
+0x368a8123, 0xe181501d, 0xc9817681, 0xa27b2e00, 0x47dd81e1, 0x0e7f81d3, 0xf3287fce, 0x7f3400c9,
+0x8d65c745, 0x4c8148c4, 0x21817f7f, 0x81ac0af0, 0x4462c1fa, 0xa99e4e8e, 0x7f153ad8, 0x8168bb47,
+0x8370e19d, 0x7f3281ca, 0xf2437283, 0x667f279d, 0x9cb4c17f, 0xde814315, 0x72d3a17f, 0xac113b66,
+0x6c13abef, 0x7f816cb5, 0xc8c7c37f, 0xa7d1862e, 0x8b7fb87f, 0x9dd9a384, 0xb69b22f0, 0x157f710a,
+0x9007c13f, 0x4518caf0, 0x836c4246, 0x03f981d1, 0x817f7f7f, 0x4581b92f, 0xb4718193, 0xa8815e05,
+0x437f81fe, 0x52817fdd, 0xb1477f54, 0xc12e3cba, 0x7f81815f, 0x4e737f32, 0x817fc141, 0xa67f7f58,
+0xd19dab37, 0x8b9eb14b, 0x7fe8be30, 0x905d7fba, 0x16ae520b, 0x81015d10, 0x71812450, 0x1de37f1b,
+0x811fa6af, 0xdf77b8e9, 0x647ff1bf, 0x17be7fbb, 0x817f6224, 0x8115e99f, 0x7fca7f7f, 0xd48f0fa2,
+0xda7fc4ac, 0x10649756, 0x2f89af26, 0xdb5f3ec4, 0x817fde7f, 0xbde94df1, 0x9a707fb7, 0x7f7a656b,
+0xfad88c3f, 0x7ccc7f5c, 0x7f8cc477, 0x7fc26452, 0x28c7812f, 0x85b7a1ab, 0x818181ad, 0x404ce719,
+0x540fe3e5, 0x705bf0fd, 0x6206a54a, 0x89486861, 0x75e25f81, 0x3c9bc93d, 0x3457c36d, 0x57ab2f25,
+0x3cd9bd13, 0x81769cbe, 0x87557fae, 0x025c7109, 0x05457f64, 0x5f427f13, 0x5c7f26be, 0xb9818130,
+0x34e63305, 0x8a81ebcd, 0x673326ba, 0x05647ff7, 0x818b8181, 0x8c81782a, 0x4bc0b30f, 0x759749c7,
+0x399e99f6, 0x7fdb4313, 0x9c7f5cae, 0x6cc2a611, 0x811dab81, 0x7f7f413b, 0x4fb37081, 0x818c8164,
+0xac4c01d8, 0x1f63813e, 0x81db7881, 0x8194404b, 0xcc9bc55f, 0x542900eb, 0x87ff34c5, 0x507f7fa2,
+0x7fd97f03, 0x79e428e3, 0x2f7f7c6c, 0x57813f13, 0x7f7f817a, 0x61c40926, 0x68a9457f, 0x63ea9f2f,
+0xea81a8ec, 0xfd81301c, 0x81cd8181, 0x812cf7fc, 0xc996b697, 0x81895df8, 0x8d4ae3ca, 0x4c5de834,
+0x81643d19, 0x202babce, 0x55a89081, 0x64ba81f9, 0x7481db7f, 0xa0eba6bf, 0x81c12ee3, 0x8181a75d,
+0xc8e9939c, 0x0981295a, 0xf22e527f, 0x817f3314, 0xc4f17f5e, 0x8c83651a, 0x7f74a17d, 0xbdd28405,
+0xb6bc4795, 0x388c7cdd, 0x957f7f7f, 0x1db7ec29, 0x68f59c45, 0x8142eaa1, 0x81f0b543, 0xa48163fe,
+0x81d08134, 0x7d673f5d, 0x8ecdcf94, 0x6e56195a, 0x7f222687, 0x7fd8fb10, 0x81aa977b, 0x22014258,
+0xd541be81, 0x817f62fb, 0x707fea79, 0x7f817f60, 0x4b7f3c7f, 0x60a4032c, 0x1c68c2ae, 0x7fc27f19,
+0xd5a55cd4, 0x09a651f4, 0xb5447f7f, 0x7f815bfe, 0xe681d381, 0x1781f900, 0x8eb6b0d7, 0x1d90fdf8,
+0x528147c4, 0x816b7f46, 0x7f6381c3, 0xd5d2088b, 0xcce4ae52, 0xdf4ec304, 0x967e5e1f, 0x567f1439,
+0x317f810a, 0x815f814b, 0xe13ee67f, 0x7f8177a3, 0xac69c42a, 0x7f5837e7, 0x61b481c8, 0x777f812f,
+0xcea38116, 0x6b6b7f37, 0x8feac838, 0xf9457f3c, 0xe5107f74, 0xcc957f56, 0xa3ab7f81, 0x81817f6e,
+0xc5817f2b, 0x197d0d99, 0x7ad0c84a, 0x81ada865, 0x81b31c81, 0xee7f9b54, 0x815a3406, 0x3f617fce,
+0x3d4db8c2, 0x5ac39e72, 0xd390812f, 0x27c6ede7, 0x6f69d981, 0x2981f141, 0x7faa2681, 0x787f6668,
+0xc24ec00e, 0x7f008142, 0xfa811aad, 0x2d567fb0, 0x5adb7f65, 0x8a864fc5, 0x6d817f85, 0x307f7f1f,
+0x5c817f47, 0xcc7f43b6, 0x9bb08159, 0x496f7f2f, 0x41e383c2, 0x4e7f7f88, 0x71ce63e4, 0xe07f4c7f,
+0x857c58ad, 0x8599641d, 0x655c687f, 0x97a64fde, 0xdd42707f, 0x29ced6b6, 0xdf25c153, 0x7f047fbb,
+0x7c5f462e, 0x61d08548, 0x7f7fab7f, 0x48b1ecb0, 0x93c63349, 0x039c7fe3, 0x817fce1a, 0x29bb7112,
+0x7f3bbaf0, 0x81818d2f, 0x7f5b3d41, 0x1fa18160, 0xc481af81, 0xfc818130, 0xdae2bd73, 0x4d3c81cc,
+0xa47f5f0a, 0x5e3ad1a6, 0x1a816eab, 0xb307dbd0, 0xa8798b88, 0x7f81252a, 0x6d7f2899, 0x4d7fa214,
+0xc375762c, 0x7fb655ab, 0x5bb98185, 0x1381682c, 0x5281ca7f, 0xc56d7fc8, 0xa81d8170, 0x8daa50f4,
+0x8bbf7f03, 0xcf7fb0c2, 0x3b733caf, 0x7f756442, 0x8c81353c, 0xf7e82f0f, 0xa5a620f2, 0x8ae39fea,
+0xada55ad4, 0x74f1de4c, 0x37714753, 0x538193e3, 0xb1811481, 0xe5bd81cb, 0x70fc4ea8, 0x888473eb,
+0x30b57f1a, 0x90c3f010, 0x81817577, 0xea8c4ae4, 0x6977547f, 0x549481a8, 0xf27f54b8, 0xf87f7fd3,
+0xef81843e, 0x5a437fbb, 0xa77f8181, 0x77354c05, 0x9781667f, 0x7fc2ae7f, 0x0b81b868, 0x1267b5e3,
+0xa0de743b, 0xb4cce348, 0x8192819d, 0xf0817faf, 0xecc781ab, 0x7f57af2a, 0x499e13c0, 0x7f3d632a,
+0xac96fcc8, 0x81727829, 0x7f7ff281, 0x81819bb4, 0xc5bfd358, 0x76553120, 0xb57dbcb5, 0x9b425d12,
+0xb0814cc1, 0xea3d393b, 0x1dde5e27, 0x03a6f127, 0x39a9a981, 0x94304751, 0x3af97f00, 0xceaa49d8,
+0x3c8124d1, 0x7f24b547, 0x2830e08d, 0x3d7a8f0e, 0x456ffc4c, 0x814e7f4a, 0x357f817f, 0x7f61bc51,
+0xd7097f44, 0x213ecf47, 0x815281bd, 0xa9a58187, 0x0c3cbfb2, 0x18f92fa4, 0x307f7f52, 0x727f3361,
+0x6cdc8e37, 0x7f9d9e24, 0x64466e7f, 0x2491328c, 0xd58181cb, 0xc97ff4b1, 0x1890627b, 0xa48168b3,
+0x7fad44dc, 0x411b7f43, 0x6681647c, 0xc5171bcd, 0x4a6d7fd9, 0x7e369d2b, 0xf8124f61, 0xa71515f1,
+0xb381ec09, 0xc4ab71c0, 0x9e2e977f, 0x7f7f0d58, 0x1cdb7b16, 0x81f87f81, 0x517f7f60, 0xf4a3ea33,
+0x5c2112c3, 0xc67f7e06, 0x9d7f78c6, 0x811730e4, 0xa29b6170, 0x81817fd1, 0xb6637f87, 0x4f1c540d,
+0xe57f7707, 0x277d5c28, 0x7f817f21, 0x5e31e1ab, 0x7f81cd81, 0x815daab2, 0xec5284bc, 0xb45d7b63,
+0x7f4d815e, 0x0860494f, 0x81874034, 0x81d0f00a, 0x5b4274ea, 0xdce04fd7, 0x8188680f, 0x827f7fc7,
+0x8137e715, 0x9f7f6699, 0x786e75eb, 0x8253202d, 0xb2813ddd, 0x7315a11a, 0x81c18164, 0x7f7f8141,
+0x6870da25, 0x117f3efd, 0x7f1c5f1f, 0xcb6d63bf, 0x45561890, 0xc3df8136, 0x23275f32, 0x81817f5d,
+0xa50f7fde, 0x7f3ba6ae, 0x37a4bbf4, 0x81a6fd1a, 0x9b9bb6ec, 0x8b2cff0a, 0x2035d781, 0xa1c47fce,
+0x095dee2d, 0x7f3a2d51, 0x69727f7f, 0xec8aad61, 0xb0dbcc42, 0x2e706fee, 0x962a9d53, 0x81814554,
+0x759026dd, 0xb38851cf, 0x817fa70b, 0x7f462fb9, 0xcc7faf23, 0xa2981632, 0x3e8d8160, 0x07815dd2,
+0x82817f22, 0x5a755325, 0x45c77f81, 0x81653cac, 0x93f73dd9, 0x3d81c4cd, 0xf67aa181, 0x467e7f1f,
+0xe6c10cfa, 0x9bd4c12f, 0x6c51ca85, 0x4d7f2835, 0xc0bb341b, 0x81812aed, 0x87a181be, 0x4f7f7f6d,
+0x81c9e3c4, 0x117f95bf, 0xd457e634, 0xb84b567d, 0x5618f922, 0xda7fc1ed, 0x7f7f81d6, 0xa07da0b1,
+0x2e627f0a, 0x814ee836, 0xb65bff90, 0x3e29b6de, 0xb1b78f5d, 0x1724a1f0, 0x81484221, 0xe16d7fb6,
+0x578181f2, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0=
+0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00,
+0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69,
+0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963,
+0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece,
+0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655,
+0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2,
+0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982,
+0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7,
+0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2,
+0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c,
+0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff,
+0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b,
+0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22,
+0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d,
+0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d,
+0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b,
+0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376,
+0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec,
+0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38,
+0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4,
+0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885,
+0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa,
+0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4,
+0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda
+
+e =
+34560
+
+k =
+6144
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data
new file mode 100644
index 00000000..b5ef6246
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data
@@ -0,0 +1,1224 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x0a45ef00, 0xb2bdc60f, 0xc810d206, 0xf21cc300, 0xb0c93da1, 0xaae019d7, 0x23ba3c29, 0xa61d1e09,
+0x202906d1, 0xd2b61ef3, 0x59e7f241, 0x2ec8daec, 0x68015520, 0x0912f4a3, 0x01282329, 0xa1fd1439,
+0x0701231b, 0x0fdcba0a, 0xbe97c6c3, 0x0438e652, 0xd5f644d4, 0xbd0e5f4b, 0xfcb6d0d4, 0x0825b15a,
+0x362e2fc8, 0xbbd5abda, 0x0df8c12d, 0xd016b5de, 0x1e439a0c, 0x44c94242, 0x00e2e8c5, 0x0639d241,
+0xef6aa40b, 0x90de9def, 0x43e0ea14, 0x31dd07cf, 0xbad70a05, 0xcffc228a, 0x00293bd7, 0x6038080c,
+0xdaeb3e20, 0xdd260c4c, 0xaa41fb3c, 0x6eeef0d4, 0x5b02c0f3, 0x14d6ac69, 0xfef0ce44, 0xc5abccf2,
+0x2a0f00c4, 0xd6cadb30, 0x38e212cf, 0x5e9abc29, 0xf105fce4, 0xde34e024, 0xb9d42365, 0x281eb22e,
+0xb219c055, 0x6504b01e, 0x465602ab, 0x1233a4e7, 0xa9ebf4cf, 0x63c1ed3f, 0xdbe9b71f, 0xf994579c,
+0x22f359b5, 0x3606379d, 0xae30fa1e, 0x1d6e290c, 0xc6e0440a, 0x2f129132, 0x1b317bf6, 0xcfdbc119,
+0x32022857, 0xaef168dc, 0xa1251d37, 0x65eec43b, 0x9630e70e, 0xe9e1fd98, 0x3e02d8ab, 0xf348af3e,
+0xf7b0f027, 0x2ff021db, 0xecae5bd4, 0x1b2f3e34, 0xbb2511c0, 0xd611ddad, 0xf951d2d6, 0xb1dd0191,
+0xc325cf35, 0x58bdf4b7, 0x36094dd7, 0x07cd3b44, 0x02c4ae4d, 0x812af6d9, 0x32f63731, 0x31103329,
+0xac0033ff, 0x817f583f, 0xc4817f88, 0x8d81790a, 0xa6a2817f, 0x7f81910a, 0x7fc5ed7f, 0xbf2f7f8d,
+0x7ff14707, 0x49cc3bda, 0xb04d5a2d, 0xd8d392b0, 0x7f69d3e1, 0x83ae43ad, 0x99b7da75, 0x3a7f7fd3,
+0xd46d6fd2, 0x4f7fc524, 0x137f812f, 0x587fcd8a, 0x81c481bf, 0x9caa6184, 0x0bc3a499, 0xe2901a23,
+0xfeb59b6f, 0x7f818171, 0xf68ed39d, 0x8113a975, 0x81dd6681, 0x7ffe707f, 0x5836757f, 0x48a98122,
+0x7f724081, 0x81588447, 0x59f8b799, 0x7f6dbab0, 0x817f8f09, 0x13c3ecb6, 0x7ff98d64, 0xc4911e81,
+0x33922f9b, 0x05c9e85c, 0xa19071b3, 0xec1ca24e, 0x02a48181, 0x813b8196, 0x7fca7d51, 0x7f81814d,
+0x00815911, 0x242a7f5f, 0x29d6ee98, 0x8a61887f, 0x4e815e7f, 0x773e811d, 0x8e65d1a7, 0x8d514860,
+0x3081b248, 0x7f2393ab, 0x815d2e31, 0x817f4be2, 0x81818519, 0xa07f677f, 0x0defa853, 0x22811c4c,
+0xbe81d32d, 0x2d7fdc7f, 0xd8f3195c, 0xf69bc13a, 0x7f7fdae1, 0x2a56717f, 0xd4811c7c, 0xbe218c81,
+0x81a556d3, 0x3f43a8c4, 0x6a95b0cf, 0x427f79db, 0x959cbc5c, 0x9f00dd19, 0xa1e8c5d4, 0x59814997,
+0x4e35eb7f, 0xf87f348b, 0x76749124, 0xab7f7f1c, 0xf0816c81, 0x8c62b45d, 0x744ec34f, 0x329a7fce,
+0x9048b815, 0x0076dcd0, 0x812d2981, 0x98d5cb15, 0xd29652cc, 0x2cd15e33, 0xc2557f91, 0x03517f6a,
+0xfc1cc134, 0x0f7f6a00, 0x81939c0e, 0x7f4677a9, 0x7f597f5e, 0x4f972a86, 0x81b2ab81, 0x207f7f69,
+0x85b9b234, 0xc1857fac, 0xedaa7c08, 0x7f50057f, 0x7f1dc26d, 0x7fa47f81, 0x7ff57f76, 0xb3bca5cc,
+0x6b31d550, 0xce3a6583, 0xdac01f7f, 0x81184838, 0x65239c09, 0xde415bf5, 0x7f7f3881, 0x097fdc7a,
+0x81698160, 0xa8da57db, 0x3281530a, 0x628d057f, 0xad7381f2, 0x18a35579, 0x3d909f7f, 0xbf7f7f81,
+0x8181eb36, 0x6bd45881, 0x578c7520, 0x417fdc89, 0x307f5bad, 0x667f8181, 0xe1b44f6e, 0x817f7f2b,
+0xd55cb62e, 0x8aa0ab9a, 0x81e42e47, 0xbc925270, 0x77aa4a90, 0x56247581, 0x8ff5f670, 0x7d7f9cc7,
+0x3a013a2e, 0x10810005, 0x815eab7e, 0x812fe1c6, 0x81f0b47f, 0x7f2b8c03, 0x7f7fc07f, 0x7e497281,
+0xd1d70cd5, 0xd981b501, 0x0c63b57f, 0xd8a8ea7f, 0xdeaece7f, 0xa08107f0, 0x9f54e62e, 0xa8cfc431,
+0x7f985ee1, 0x811ae887, 0x7f81df4d, 0xc081d1bb, 0x81a15a9a, 0x7f44bf05, 0x037f057f, 0xf22c7f9d,
+0x7f7f7f7f, 0x81856e03, 0xbcb08381, 0x46bb372d, 0x7f7bd59c, 0x8157e815, 0xa1910b3a, 0x5c537f7f,
+0x3e81717f, 0x4851ac7f, 0x813b7f83, 0x867fb25e, 0x098131ca, 0xb06fc77f, 0x7f60e97f, 0xa25f5c46,
+0xe82eae48, 0x40bf987f, 0x817f81e2, 0xf5b2c831, 0x810597de, 0x045c7f4b, 0x81e33f36, 0x81aaf35e,
+0x81817fc6, 0xe700587f, 0x93b37fea, 0x9f81fc97, 0x4def727f, 0x6bc07fe7, 0x7f811076, 0x81248175,
+0x0a7e5cf6, 0xcd7f7f81, 0x7f49f37e, 0x8181917f, 0x7f70df81, 0xd17f81b5, 0x1f6b4ef7, 0xa21d6b8b,
+0x7fb0461a, 0x5c027581, 0x81819bb4, 0x816ee78f, 0xad7f817b, 0xe481a10a, 0xa1cb8d81, 0x32813e3a,
+0x947f7f81, 0x8184bff3, 0xa96cd072, 0xeb0c5881, 0x814ab385, 0xe3f67f7f, 0x584a81c9, 0xdbc5c781,
+0x78816e49, 0x817f8a81, 0x7f3f7fff, 0x56d89f40, 0x4397e427, 0x7f29b20d, 0x7f597fad, 0x35817f65,
+0x0a7f3b19, 0x61c97f81, 0xc6817a65, 0xaa5ceb7f, 0xfc427fef, 0xb8684c75, 0x447fbaa3, 0x4d7fae7f,
+0x8196e3bb, 0xe1588132, 0x7ff27f34, 0x8173bff4, 0x7f705f6d, 0x7f7f817f, 0xa353354c, 0x7f58ca67,
+0x7a5a7f9c, 0x1e81ee7f, 0x98d681d9, 0x9e60c443, 0x7f7f39a3, 0xc79bc87f, 0x3ae8827d, 0x9f812e81,
+0x7f6f8170, 0xa987947f, 0x2481aa81, 0x538fdd7f, 0x237f1442, 0x14e99a84, 0x6e62b068, 0x7b810541,
+0x73a57fad, 0x69977881, 0x9be9cd7f, 0x9a48537c, 0xfccc7f47, 0x7f40a457, 0xbc63b4b8, 0x74bb447e,
+0x36817f12, 0xd371cd1f, 0x7f1ed17f, 0x36624bbe, 0x726c811b, 0x4abe5c58, 0x81de39c1, 0x7b5ae996,
+0xaa818105, 0xa881bebd, 0x5e7f6edb, 0xa8d08165, 0xd495f17f, 0x7fd06098, 0x7fb157fc, 0xc0f661d3,
+0xd09f812a, 0x7f8ef77f, 0x69477f00, 0xf45f8481, 0x0f7fa07f, 0x97f17f7f, 0x6ceaac7f, 0x7f7f8163,
+0x7fbcbe98, 0x698b697f, 0x79c0da81, 0x8b81e681, 0x81817f36, 0x6b814881, 0x7f918ec6, 0xb67aed0e,
+0xc5020b85, 0x81ee8c18, 0x7ff650a5, 0x814f6233, 0x508139a1, 0x814e6aa1, 0xa055487f, 0x3ed75817,
+0x4f7f81da, 0x1ddcfa81, 0x7f7f1dbc, 0x825b6454, 0x2ee6819b, 0x73eb6861, 0xf7fe9516, 0xe3816dc5,
+0x527f3f7f, 0x550d8fb9, 0xf3f82681, 0x5954b562, 0x7e7f20bc, 0x817f6b4f, 0x8108bc22, 0x17fb8113,
+0x7b040f33, 0x586282d5, 0x078c5c7f, 0xe17fc493, 0xdd813545, 0x9ba17f6e, 0x7fccfad5, 0x916f7fa3,
+0x21ce7f1f, 0x1a037f81, 0x8e4d0081, 0x7a360731, 0x167fd77f, 0x96762a84, 0xca5660e3, 0x45f0d87a,
+0x7fd151ae, 0x9e7f22a6, 0x7f817f8c, 0x1501b9cb, 0x886c4184, 0xea627f6c, 0xc53e7f08, 0x1a63d6eb,
+0x481b4a73, 0xca816575, 0x167f7f7f, 0x15814549, 0x6d985e81, 0xa60b1f62, 0xa1818181, 0x33af7f7f,
+0x816c7ff7, 0x7f81b67f, 0x08602281, 0x9a81815b, 0xe6787f3f, 0x7aba4f81, 0x5015817f, 0x1d02d55e,
+0xbaf3e826, 0xc34ca8cb, 0x503e7c82, 0x647a7f60, 0x7f810873, 0x81149d63, 0x7f818181, 0x7f7ff3d1,
+0x11d8dce2, 0x7fe3634c, 0xb63c535b, 0x810e6181, 0x81277f54, 0x817fa2dd, 0x34c34476, 0x848a65f8,
+0x96f911b0, 0x9aa76464, 0x8200816e, 0x6d7f56a6, 0xcdb9e57f, 0x874e836a, 0x93a363a6, 0x41c09883,
+0x8144c072, 0x52a77fef, 0xe8ded5e2, 0xf6857f81, 0x81b79f4c, 0x7f7f71a2, 0x2b5852bc, 0x1f7b43b0,
+0x4c5aa37f, 0x557f3d2a, 0xa17fb581, 0x7349d204, 0x4042ec2b, 0x89cd96bb, 0xd3368281, 0x7f7b8181,
+0x0f28f5cd, 0xe8aa4d23, 0x88628b33, 0x8181817f, 0x758197af, 0x37b43b4a, 0xc081c1a5, 0x8e3c4881,
+0x7fcb0983, 0x55fe14e4, 0x734c7f20, 0x89267c21, 0x7781a4b7, 0x7f7f4eb3, 0x81ac8fa2, 0xddc20981,
+0x7f9b2659, 0x2d7fc65e, 0x818181e8, 0x6a176a89, 0xaa7fb68e, 0x5c5446d2, 0xda7dcc81, 0xc3a5e054,
+0x7881477f, 0x7057146f, 0x00764b7f, 0x7fa87f7f, 0xa4ff8181, 0xf07f7f7f, 0xa37a5cd3, 0x28a881b1,
+0x13e0cb4f, 0x8f7f9804, 0x3d343181, 0x567fda8a, 0x5828c7d0, 0x434fe70a, 0x7fd5fe78, 0x1f818181,
+0x686103cf, 0x62aab5ee, 0x5506cb88, 0x5428ad7f, 0x51908c1a, 0x6d5db4ef, 0x3f7f686a, 0xa6a15a8d,
+0x917f7f53, 0xb77fea86, 0x7f816aa2, 0x6ae581d7, 0x54ad634a, 0xd59ca328, 0x6d815bfa, 0x5f470046,
+0x23818188, 0x817a9419, 0x7f57dc81, 0xb02a81ad, 0xe986757e, 0xd9810681, 0x81f7be26, 0xbcb5cf7f,
+0x63776b56, 0x4358bc47, 0x528aa47b, 0x9e7f7181, 0xd0818100, 0x0d811a7f, 0x7f818155, 0xbe207fc3,
+0xb70d393c, 0x812bbd48, 0x7f7f7d0e, 0x81b5ab00, 0x1ecb8681, 0x24c67fcd, 0x2fc7ed01, 0x5503a881,
+0x7f3d7fb4, 0x9a81559e, 0x6508d5f4, 0x08b56b7d, 0x96d84e10, 0x7fac9ec1, 0x817ca281, 0xd9b01081,
+0xaf81d3a7, 0x8102b34b, 0xa0f98181, 0x7f7f929e, 0x3cf6d023, 0x990ed581, 0x147f817f, 0x81483a81,
+0x68375130, 0x487f1451, 0xa0973d81, 0x87d4d4f7, 0x90633e22, 0xbc7f45e0, 0x2c4ea77f, 0x0811896e,
+0x8d7f52e9, 0x8120aeb0, 0x3d159d81, 0x93cb0097, 0xce81bb81, 0x74194855, 0x4e7f3f26, 0x817f819f,
+0x4531814f, 0x517f7fbb, 0x4981a87f, 0x4c810f4e, 0x3239b67f, 0xa17f194c, 0x81e58e81, 0x7f4890e5,
+0xb07ffa3c, 0x937fa181, 0x447fa516, 0xbab5000e, 0x887f7f7f, 0x8181b17f, 0x7f82d4ad, 0x7f828159,
+0x81b35c57, 0x23bcb1dd, 0x819446c7, 0x7fbf9ea6, 0x817f7fc1, 0x33817f81, 0x5cfe7fb6, 0x7f81fc61,
+0x91477f4c, 0x88994381, 0x65bb447f, 0x198d61ae, 0x817f9f7f, 0x557cbcb8, 0x81fab781, 0x1bab3f7d,
+0x7f812298, 0x7f44fadc, 0x1dc1c665, 0x7fde3a8d, 0x94c37f2b, 0xc7d8eb23, 0xdaae8181, 0x129c68bc,
+0x7f819111, 0x7f32ce2c, 0xf0d17f02, 0x7f6a817f, 0x5cd564e0, 0x7f7fc47f, 0xd6995c9d, 0x70a7d476,
+0x3b7ff299, 0x7f2f237f, 0x4381ac1f, 0x0bd67fb2, 0x81ba594d, 0xa6777fd5, 0x881cc254, 0x8189977f,
+0x7f813b3e, 0xc17f7f7f, 0x3baa3dc9, 0x81007f57, 0x7f810295, 0x81f7a1c3, 0x7f65de7f, 0x4ca87f53,
+0x407f81b9, 0x7f436fa6, 0x399f9581, 0x7f177f86, 0xbc698194, 0x447fd081, 0x81663d37, 0x01faee9b,
+0xc485ff7f, 0xed6ff281, 0x4c6713c4, 0x28b0487f, 0x7f88e8c9, 0x8169016d, 0x7f7f71dd, 0x474bf37f,
+0xb756b143, 0x95caeed0, 0xec7f872d, 0x437f23e1, 0xb42b3030, 0x662f8d76, 0xd4818f71, 0x8161fd50,
+0x8863663e, 0x78256b06, 0x661b549c, 0x8e817fb4, 0x06b98354, 0x5a905ec1, 0xec960281, 0x2c30bbe3,
+0x937f7f63, 0x6ade543f, 0x7f7f9f5e, 0x98dac336, 0x81327f73, 0x677f6d46, 0x637f5c7a, 0x82478181,
+0xedbc9014, 0x9a7f431b, 0xb2d25981, 0x7f898181, 0x81815ab1, 0x2d7fa683, 0x7f03b2b0, 0x89bc7f18,
+0x81947fb8, 0x1995817f, 0x78e54c17, 0x816cb1c3, 0x1a2681e0, 0xac3a7f8a, 0xdff245bb, 0x422ada81,
+0x1ed1e07f, 0x7f6f5276, 0x8481db81, 0x9a817fe4, 0x1e132dab, 0x5c7fdf7f, 0x7d81ac7f, 0x937f7fe0,
+0x49568171, 0x3f81907f, 0x81466896, 0x7f667f2b, 0x577f9a7f, 0x7fd73c0b, 0xc946df59, 0xd72f767a,
+0xb51d326a, 0xc77f4f7c, 0xbc814bf5, 0xdae47fab, 0x40a64f73, 0x898188ea, 0x1c751c97, 0x7f8ed37f,
+0x81b9585f, 0x6eb6758e, 0x5e11b233, 0xf0a47f81, 0x81709065, 0x447fbcc6, 0x58d1819b, 0x9fa36e74,
+0x817fa9f3, 0x9f506f66, 0x81cd047f, 0x187f8136, 0x9d810500, 0x64cd8eb0, 0x7f817f7f, 0xf850e2f6,
+0x81b61869, 0x606a66c4, 0x81638118, 0x20dca772, 0x6287b381, 0x8e056874, 0x086d99b9, 0x54438172,
+0xb281697f, 0x5c470c16, 0xb0340523, 0x7f81b96b, 0x4451b8a1, 0x7f7fe6db, 0x57887f3a, 0x14438129,
+0x927758ce, 0xe27fa16a, 0x8c30bd81, 0x7b7fff47, 0x41e58175, 0x35812462, 0xe7ff4358, 0xb4c04079,
+0xa87c108b, 0x81aae151, 0x3d797f8e, 0x33007f3c, 0x27c2715a, 0x188c7fba, 0xb481e081, 0xe026d96e,
+0x402d8185, 0x7066ffcd, 0xc5914e7f, 0x977f655f, 0x7f68b5d9, 0xcad3235d, 0xc77f8e81, 0x1cb4817f,
+0x7f473044, 0x15168159, 0x7f5c81d2, 0x7f99bd6f, 0xb7ab0006, 0xf2487f41, 0xd2de0f82, 0x6a81627f,
+0x2e8e87fa, 0x25187fb3, 0x6d31b393, 0xdd3d4b81, 0xa942a11d, 0xd21281d0, 0xe6b0e17f, 0x8a5646ad,
+0x907f8124, 0x7f81d2e2, 0xd8c8e17f, 0xeb8b7f81, 0x3ad8bb1a, 0x7f7fbf81, 0xcb30ddf4, 0xa09134d8,
+0x8187d2bc, 0x4949168a, 0x53998181, 0xd5e21412, 0x81811cb0, 0x11349a98, 0xdab2afe4, 0xd97f7f7f,
+0x7f7f8181, 0x327e7fd4, 0x97818114, 0x827b6517, 0x5f0aabbd, 0x7f905b81, 0xafdde37f, 0xcd40cf7e,
+0x7f56817f, 0x95810db2, 0xaf7fd131, 0x8181811a, 0x9e7f8f73, 0xe250d481, 0x7f2abb90, 0x5f7d981f,
+0xb3257f91, 0xec657f2f, 0xfa815e3a, 0x6e223481, 0x6c00eaa8, 0x7fb19394, 0x8799c181, 0x810e7fde,
+0x7d478181, 0x7fdc76bd, 0x035f8102, 0x627f447f, 0x7ff7ff7f, 0x81265db0, 0x6e81ac2c, 0x8802d277,
+0x81c3f781, 0xec81d259, 0x817b7d81, 0x7a5b7f7f, 0x4953ab64, 0x896a52a0, 0xb28111f8, 0xd25d8f0e,
+0x7f818174, 0x813f7f3b, 0x7fff7f72, 0x0fa87f88, 0xbcfad248, 0x507dd360, 0x7fae817f, 0x81f27f81,
+0x72fa4d1b, 0x5a21d2a1, 0x6381817f, 0x5fb55ab3, 0x8ec3816f, 0x257f4ab5, 0x4c74ef81, 0x19a3f27f,
+0x752746c6, 0xe17f8181, 0x7f7c8199, 0xdb9981a1, 0xcb984bd7, 0x815ea881, 0xdc50e0d9, 0xb9366f24,
+0x7f455dec, 0x817cb181, 0x424856f3, 0x67ab02a5, 0x0081657f, 0x105e8191, 0xeab817a7, 0x3a7f6d5a,
+0x425a1503, 0xeaac317f, 0x7fbe2cd0, 0xd2819a6d, 0x34887f6b, 0xef7f817f, 0xc3577f8f, 0xb124a664,
+0x9981167f, 0xb4946031, 0xb8817f7f, 0x81c570ed, 0xc9c88181, 0x3a4cdf81, 0xd9514781, 0x8948c0fe,
+0x2d5a8111, 0x68817f7f, 0x6be87f81, 0xa62c0381, 0x857f7fba, 0xa8c4ab7f, 0x279cc57f, 0x98816e0b,
+0x817ff70e, 0x21536d81, 0x875ca181, 0x008164f2, 0x96955a53, 0x57a53917, 0x6517faad, 0xdecd7f49,
+0x81819c32, 0x7f7fc25d, 0xfd81b181, 0x9f818166, 0x9c7f107f, 0x7f15b67f, 0x81636692, 0x18f276b5,
+0xbb9d08e2, 0x6c25797f, 0x417f43fc, 0xd0a37eca, 0xd4702b8f, 0x5c72ba00, 0x7f9dcdbe, 0xd4ad8122,
+0x39f1a381, 0x9b419181, 0xac816ace, 0xfb7c81e6, 0xdd7f7fb9, 0x2a27bb61, 0x3781499f, 0x7f6a7f93,
+0x4bbf81a5, 0x4a07e527, 0xab4f8143, 0xc2703e81, 0xb1813d81, 0x81747ba3, 0x11cb7f81, 0xd7a385fd,
+0xc9d0c981, 0x647fe554, 0x811d3681, 0x18817fdc, 0x81811bef, 0xd381845e, 0x466539da, 0x1081617f,
+0xb5b99261, 0x56cd7f8d, 0xaaa79076, 0x7f814a81, 0x7f7fe313, 0x7f7f8170, 0x5df9817f, 0x102fe72a,
+0x0c7f79ae, 0x4368d9e1, 0x6b0af013, 0xc13ed920, 0x98ed6a7f, 0xc494d37f, 0x81816a35, 0xc27f7f4f,
+0x451a7f3a, 0x4c812e15, 0x032f49b1, 0xbd7f02a1, 0x507f0d67, 0x9c5f007f, 0x8712c21b, 0x6ba7385a,
+0x7fb62c58, 0x7f818bcf, 0x7f7fc57f, 0x4141698d, 0x1e817f5c, 0x813fbe97, 0x6f817fa8, 0x51637759,
+0x7f4ed17f, 0x81ac2c28, 0xf1c75bfd, 0x1463c260, 0xcb7f7fcf, 0x7483675a, 0x40eb7f7f, 0xad7f81a2,
+0x7f817f50, 0x66816683, 0x407f8c81, 0xaf8190b3, 0x81d347f2, 0x5a215a81, 0xca7f8132, 0xd0528ef2,
+0x8129c3a0, 0xa4a7c52b, 0x0b816383, 0xbc7faf5d, 0x38d34e66, 0xff65a68b, 0x817999d7, 0x5a620463,
+0x885a817f, 0xad947f81, 0x7f1c902c, 0xaa0e9003, 0xd7ccb971, 0x64a68198, 0xb2337f11, 0x81a4d5dd,
+0x817f81fd, 0x7e814f81, 0x817f3da2, 0x8381fd08, 0x81a35a81, 0x7f0099e3, 0xfa1f5850, 0x6773efd6,
+0x6e7e5268, 0x138e7fc5, 0x81449526, 0x815781c2, 0xca2faff4, 0xf37f7f64, 0xaa080369, 0x7fb07f5c,
+0x81397fcd, 0x972dbbbe, 0x813f8e81, 0x812ed669, 0xbf76e310, 0x0be7f981, 0x81929781, 0x577f8644,
+0x0cc7e94b, 0x7e1d2995, 0x7ace8119, 0x81814867, 0x617f505c, 0xb9ad7f3e, 0x3ad56e18, 0x81e5b75d,
+0xca874729, 0x7f81ad9c, 0x5881813e, 0xbb240a46, 0xcc908863, 0x6fc2815b, 0xfa4bca7f, 0x8f817319,
+0xfe819dae, 0x81814281, 0x867f21e9, 0xfdaa0500, 0xd08c9e41, 0x99ea3b94, 0x7fa9394c, 0x81337f7f,
+0xb57f23f9, 0xc59a8181, 0x815a7fa4, 0x16c9c781, 0x9f17b97f, 0xb2817f89, 0x7fbd95a5, 0xb27f7f8e,
+0xa5e0b5c9, 0xefabff4c, 0x7fb7ea7f, 0x9aec9841, 0xb0e1784b, 0x81b29d1d, 0x7fba1781, 0x81f7086b,
+0x8129f27f, 0x818181ea, 0xc1ce8113, 0xfbfbaacf, 0x9d4c14ed, 0x40c1ea26, 0x6051cd3f, 0x6cd681be,
+0x4c469beb, 0x17637f22, 0x81882850, 0xbc91d4be, 0x6e318122, 0x95628181, 0x6c7f7f33, 0x6c47537f,
+0xa8d27f7f, 0x3de9b857, 0xc73a2c81, 0x4e7f8101, 0x3b7f817f, 0x3a78815c, 0x1a67771c, 0x5a8286a4,
+0xf7b98166, 0xd3b37f81, 0x4eb17f2f, 0xc1098181, 0x969a7fc5, 0x0bfbfbb9, 0x316f8194, 0x7f6f8184,
+0x484ec57f, 0x364a007f, 0x1f81817f, 0xc5b9ac81, 0xbb81b97f, 0x1f82647b, 0x2c4b4600, 0x89813d0c,
+0x75ffc374, 0xad7f5e23, 0x0af481a5, 0x3d0e3eee, 0xa49481cb, 0x39815284, 0x8187f438, 0x1a7c7f7f,
+0x817fa8b5, 0x7fca781e, 0xb401d779, 0x424c7fda, 0x3c429944, 0x488d7f7f, 0x78966481, 0xaf687f5d,
+0x89052b37, 0xbf7f407f, 0xe07fce79, 0x9fdf45d3, 0x8181de83, 0xe67f7f5a, 0x7fe6a3fa, 0x38705b81,
+0x81518232, 0x7d5d9978, 0x270372c9, 0x818568a2, 0xca7f66c8, 0x43a06a3c, 0x831b4281, 0x853a7fb0,
+0x8178cf45, 0x7f7f7fbc, 0xbdb69d20, 0x525c5392, 0xc081d97b, 0x18217f16, 0xb6ec7659, 0x0861819b,
+0xc48b819e, 0x817f38f2, 0x7f7f787f, 0x7f365c81, 0x64a9f87f, 0xe4e67dac, 0xd1580073, 0x81817f54,
+0x597f2ced, 0x575cabd0, 0xce81d781, 0x819c81a4, 0x7f5aceb2, 0x7f57ac33, 0x81e4d87f, 0x1df5d24d,
+0x7f4f7f7f, 0x7feb81e8, 0x6a81816a, 0x7f668144, 0x3431b13e, 0x3cc6b82b, 0x815dc38c, 0xc65781b0,
+0xae897f81, 0x9751537a, 0x817f0e88, 0xb15e69ea, 0x6f7f7f90, 0x24e681a4, 0xd97fa74f, 0x1881fa61,
+0x2e743b95, 0xeb817f7f, 0x7fcd677f, 0x317f7fec, 0x9246f573, 0x2e7fae81, 0x7eb72b7d, 0x4d8144bd,
+0x3f7fcb81, 0x7f619f9d, 0x51e7b87f, 0x81688e5c, 0x0d3d55d4, 0xb9b0817f, 0xaec17fd2, 0x017f2ffb,
+0xa8af3550, 0x81187f3d, 0xa033427f, 0x38c83442, 0x7f7f8181, 0xe38a35cc, 0x1f00c081, 0x7f297f7f,
+0xd17f7f81, 0x6e81c637, 0xc4387945, 0x26839381, 0xd481e281, 0x0b72bf12, 0x057f9e7f, 0x5eef2c81,
+0x9216b507, 0xd7906948, 0x637f627f, 0x0a11bfaa, 0xd605bd9e, 0x81ea9c81, 0x72f03c3d, 0x4f819f7f,
+0xc563d7b7, 0xad5a3899, 0x7ff15105, 0x7f692e81, 0xbf79c6c9, 0x3c738181, 0x7f4a0597, 0xecb5d588,
+0x7f7f2b3c, 0x888144dc, 0xbd7f6e61, 0x6b6e81e5, 0x5d5b1caf, 0x4c743b73, 0x7f727f2f, 0x667f3551,
+0x8fc63881, 0x81565a6c, 0x61817f3d, 0x0aa5d083, 0x81215ea2, 0x60461f02, 0x0d7fc1ee, 0xe77f813b,
+0x40d9767f, 0x1c817fa8, 0x7f815e7f, 0xa5447f7f, 0x2ac45607, 0x81417f7f, 0x00aceb81, 0x8181cfe4,
+0x817f81aa, 0xaa3eda6e, 0x7fbd4adc, 0xf8bb5deb, 0xf0867fe7, 0x3f33a481, 0x819e1bb7, 0x81458b81,
+0x9573148c, 0x986d1ee0, 0x45deb557, 0xcf7f688f, 0x23d5b1a9, 0xc3fdfeb0, 0x3fad633c, 0xe4be21bb,
+0x81819acf, 0x567f1571, 0x5a2c130b, 0x7f14e77f, 0x37d38908, 0xe55da5d4, 0xf2e16033, 0x23db8181,
+0x7f8160bf, 0x177fa17f, 0xa00f4dc8, 0x47818153, 0xf7817b81, 0x529681a0, 0xeca34413, 0x7f49a081,
+0x2c9c81c3, 0x8181ae14, 0x8125425f, 0x39b1123a, 0xe4e781b8, 0x4f83e39f, 0xd298507f, 0x49e92f7f,
+0x26bac981, 0xf0a27f49, 0x81d281a8, 0x993dc681, 0x48c19a95, 0x98af7a6f, 0x3baa107f, 0x899d3c00,
+0x7f814467, 0xc6e821df, 0x05226d8c, 0x9a813aba, 0x8d818117, 0x81688142, 0xd9244ff9, 0xdb0da3d0,
+0xbef62430, 0x55868181, 0x8176d83f, 0x33813ef2, 0x941a6534, 0x81d0741b, 0x4e37927f, 0x6b2515e2,
+0x8181aa81, 0x9581605b, 0x678113a0, 0x6e7f2681, 0xc1819f81, 0x813ca072, 0x7f81b97f, 0x3d95817f,
+0x81233f81, 0x911e90e0, 0x76a6d5d9, 0xc6b5057f, 0x5262b67f, 0x81812681, 0xa68110fa, 0xc74a6049,
+0x478eef7f, 0x8ea225d1, 0x77fd31f8, 0x7f568422, 0x81207f81, 0x94628118, 0x81a47fc0, 0x7f637f7f,
+0xa8898caf, 0x8125b97c, 0x3de89f9a, 0x7f81bb71, 0x24183337, 0x88c0437f, 0xe97f6bb5, 0xa0dc007e,
+0x36dcd381, 0x887cd84a, 0x0a817381, 0x9b818176, 0x46327ffa, 0x487f926d, 0x07508107, 0x7f7ffe06,
+0xcefe7f7f, 0x028172d9, 0x7f7381da, 0x81f97f58, 0x81818d0d, 0x06597fcb, 0x817f7ff5, 0x8e999e7f,
+0x7fd22e8f, 0x81766ae5, 0x817f7f86, 0xca81bf7f, 0xfd144e10, 0x947ff681, 0xa9817f4f, 0x7f187748,
+0x81819081, 0x8170ca74, 0x7a5d9c79, 0xbe483f81, 0x6f7f9fa0, 0x7fb0505b, 0x7fc7f15f, 0x81793081,
+0xbc7bb830, 0xce7ff56d, 0x81e02681, 0xa5968113, 0xa47fd381, 0x81a68197, 0xf4ca2a7f, 0xd220acb6,
+0x7f837ae6, 0x5c812609, 0x847fd94f, 0x6adf9afc, 0xb17fc822, 0xfd1b82ec, 0x8dfbf181, 0xbe008215,
+0x1dfd1081, 0xd1768cc8, 0x76818144, 0x7a917f9b, 0x0c6aada1, 0x16d3654e, 0xea1a8d7a, 0x9508c955,
+0xbfe88c4c, 0x593fa4d6, 0x8c45818e, 0x7f7f557f, 0x7f1dcc7f, 0x984c8181, 0x6b817f7f, 0x6c8131d9,
+0x4c8185e0, 0x678457da, 0xa6bb81f1, 0x79c0ca67, 0x7fe2c433, 0x257b8381, 0x37323c7f, 0xd768283d,
+0xdde15d81, 0x7996d7be, 0x7fd38110, 0x81706b66, 0x7feb685e, 0x817fba69, 0x8182ec99, 0x53c67fac,
+0x7f7fc1da, 0xf8afcee6, 0x09816181, 0x8a4f9e6c, 0x5b94c2c6, 0x812b3b97, 0xd0237f7b, 0x81817f64,
+0x81768181, 0x7f24b359, 0x1f326247, 0x7f7f5e38, 0x81267f42, 0xff8781be, 0x85817758, 0x7f172281,
+0x81599137, 0x22423760, 0x81c8e277, 0x7f46f381, 0xbb668184, 0x7f547f7f, 0x4aa27e87, 0x217f8c35,
+0x925eb643, 0x48b89ac8, 0x7f9d8181, 0xffe3a47f, 0x3e810ac3, 0x7f2c7fa0, 0x847f4681, 0x8106818d,
+0x81787f10, 0x3e2a5fa5, 0x81a38181, 0xfa7f712b, 0x74dd166f, 0x0d2819a4, 0x557f426d, 0x7f818158,
+0x1b817f93, 0x927f0e81, 0x7f1b9a56, 0x78c78898, 0xbb07493f, 0xd4b37f1b, 0xae8aafef, 0x7fc2c3a0,
+0xb1bb3d19, 0x1189816f, 0xc2813c7f, 0xc27ff3be, 0x7f3bc183, 0xd1817c7f, 0xc6817fa9, 0x0f9ad87f,
+0x7f10c96f, 0x517f7fca, 0xb9958181, 0x9870315a, 0x1c091752, 0x7f0a4746, 0x509ec654, 0x168190cf,
+0x8e3d0000, 0xc0817f69, 0x7f8a6f85, 0x7fe8b634, 0x2631815a, 0x67942d85, 0x5a7f8b38, 0xa2a07e1d,
+0xfa816e81, 0x3c81bc7f, 0x813b7fed, 0x81aae601, 0x7f81ba4e, 0x81817fa7, 0x32453b81, 0x8e8aa579,
+0x7f81d1e2, 0x78a4847f, 0x81819b57, 0x13137779, 0x81c5557f, 0x7fd18195, 0x049928a6, 0xedd7897f,
+0xa0816adf, 0xb0815ab9, 0xb2c73281, 0x7f01a4b5, 0x5ebacf94, 0x7fbb8181, 0xa97f7fd1, 0xec9d5661,
+0x3da381ec, 0x4445a98f, 0x291ca97f, 0x9d948d9b, 0xf9f0d592, 0x8b1fcd81, 0x81d54fea, 0xdb81600e,
+0x69a781c0, 0x817f78f0, 0x818181a4, 0x36810aa0, 0x646f9f7f, 0x70c25f34, 0x9471817f, 0x3e5081db,
+0x5cc37f74, 0x7f0a814d, 0x990ba07f, 0xe55babcf, 0x59813a5b, 0x26b96e7f, 0x199c8125, 0x812e0db3,
+0xe581b498, 0x8174a97f, 0x440f7ff0, 0x812d487d, 0x227f9881, 0x7fcb77b8, 0x157fc3e9, 0xdbfcff7f,
+0x81085d64, 0x44a9bdaf, 0x42998481, 0xb885b098, 0x94817f94, 0x81d3367f, 0x48394f81, 0x817a697f,
+0x64671052, 0xbd81153c, 0xd8637f02, 0x558cd081, 0x8485d766, 0x9ea7c1ae, 0x81b2afc6, 0x81657f40,
+0x62d4f31b, 0xbd7f7f8b, 0x7b909c60, 0x2f81734e, 0x5060ab22, 0x10b62281, 0x1d83812a, 0xc7aa8866,
+0x31444bc6, 0x815d6281, 0x822f8f58, 0xb75aa477, 0xc21f9a27, 0xd3545a36, 0xad30af19, 0x6b813f7f,
+0x0000188d, 0x7fa14165, 0x177a549f, 0x8f9e037f, 0x1cd8f7f1, 0x1e7fb9d0, 0x557363c3, 0x7f9c1a26,
+0x769fda1c, 0x7f7f8192, 0x157f7e7f, 0xcb61817f, 0x4a9d3d64, 0x8148aa58, 0x637fd2d0, 0x308481d2,
+0x95a8324b, 0x7f7f3681, 0xbe407f4b, 0x2b769128, 0x8156657b, 0x7f7f7f7f, 0x45c97f0c, 0x81577f8d,
+0xe90f8181, 0x817f689e, 0x2781f97f, 0x7fd1a966, 0xa07f6a14, 0xddbb7fab, 0xa53f57fe, 0x56c77f81,
+0x35e0837f, 0xde818164, 0x7f5bb62f, 0x7ffe814c, 0x4a8fe8cb, 0x7f7f5b81, 0x81137fbf, 0x8157a240,
+0x81048171, 0xa9d17f18, 0x8458817f, 0x81424a81, 0x9d817fb2, 0x31ef5181, 0xd081507f, 0x567b6e81,
+0x91a3276b, 0x7f257f81, 0x98d9a897, 0x8eb301e8, 0x7f8a11a8, 0xac7fb482, 0x1420dd00, 0xa629e6aa,
+0x98628159, 0x4a815981, 0xf081eda7, 0xaf5ac540, 0xde387936, 0xf6f2cc49, 0x7fa88152, 0x4587a0b6,
+0x0549f981, 0x7f6a817f, 0x7fde8f79, 0x7fd77f81, 0xbd58b276, 0x477f5db5, 0xa3d0817f, 0xb3811743,
+0x378d5f7f, 0x889c8175, 0x00d9bffb, 0xa386f71b, 0x9b9b5a7f, 0x05301691, 0x077afd81, 0xc9813d20,
+0x655b8118, 0xb1810fc4, 0x2ae18160, 0x7f818141, 0x81c2db39, 0x7f3cd124, 0x52c77f7f, 0xcfdb5141,
+0x8113b964, 0x0c0bd10c, 0x8137637f, 0x56bf8149, 0x5fceb17f, 0x01857f81, 0xea81917a, 0xd67a3c7f,
+0x5fd93fa4, 0x93720000, 0x31e08193, 0x957faa7f, 0x30e1b019, 0x557f19fc, 0x327f5c1c, 0x81e98181,
+0x567f6f7f, 0x817f60bc, 0xe4f481a2, 0xb1f2aa31, 0xba5b7f36, 0x4d2bd790, 0x914d4e38, 0x817f7722,
+0x10fb7fcd, 0x2f7f3026, 0x8c7f3c92, 0xbe81392b, 0x1cae7f78, 0xb6cb6581, 0x2f52db43, 0x49cb7f71,
+0x7fe6dfe9, 0x232e7e66, 0x1497965f, 0x67316ca2, 0xaf71c384, 0x727fbd7f, 0x5e7f6ab9, 0x7b5181bd,
+0x38e38170, 0x48cae97f, 0x4a81669c, 0x3d2d7f7f, 0x69508352, 0xac988181, 0xe093a587, 0xbd907753,
+0x817fb87f, 0x894ac8c0, 0x9f7f9ccc, 0x6336a2de, 0x7f7fd741, 0x75b723c5, 0x396e7fc6, 0xc5c0819d,
+0x813c7172, 0x816a7fa6, 0x95bdf475, 0x69b21f5f, 0x817f7f06, 0x4c90685e, 0x7fea7f7f, 0x7f958f0b,
+0xf6a7937f, 0x5b6caa7f, 0x5fbf7f30, 0x4c6cb506, 0x7f097f95, 0x8181d881, 0x817f34b8, 0x937f7089,
+0x830181a8, 0xa6b2817f, 0x4912930e, 0x7f8fd67b, 0xf69bd281, 0x7a5fd981, 0xc4aa7f7f, 0x817f819b,
+0x9c05c2f5, 0x8f579881, 0x5aa4818e, 0xc14dc0bd, 0x7fce0981, 0x147fcd41, 0x817fe75b, 0xf8e1a54f,
+0x817f8981, 0x1334585f, 0x810281db, 0x146db1bd, 0x25347f7f, 0x9cbf987f, 0xb6736d0c, 0x814ab481,
+0x0fd1653e, 0x7fd67170, 0xe564ce22, 0xd05df37f, 0x10871f96, 0x9dc9e03e, 0x53bc03ad, 0xc47fd1c5,
+0xaa4dd88d, 0x00004ad1, 0x8a35a85a, 0xcff89253, 0x64d0819b, 0x149ab17f, 0x819f7a33, 0x49fc81c7,
+0x1881d433, 0x196171c4, 0x784dbdae, 0x8193148e, 0x75b63674, 0xdf81e4ab, 0xad299a6d, 0x8168b3d3,
+0xeea6426e, 0x81c99981, 0xb17f647f, 0x814ec5d2, 0x2e3b16a8, 0x81597f81, 0xa97f8181, 0x2caec714,
+0x387906fd, 0x818181be, 0x7f7fbcb3, 0xd55a81e6, 0x8134ae81, 0x7fc3817f, 0x9853ee56, 0x5881999d,
+0x9ea98184, 0xfa8181a6, 0xceff5978, 0x688e505f, 0x7fbbb830, 0xc678d06a, 0x3c90e3ae, 0x4b6d7f23,
+0x24cd7f58, 0x7f92c181, 0xb1a97fc2, 0xba872981, 0x66ec49a3, 0x7f7f26b5, 0x7f8163b8, 0xae7f7f7f,
+0xde7f3781, 0x4147d381, 0xe191cb81, 0xd45b81c3, 0x474c9e91, 0x55b546db, 0x0e81b5a6, 0xb5b976aa,
+0x7f7f7f3d, 0x968a81d7, 0x7f1f9333, 0xb37f7f7f, 0x2f7f6418, 0x2532a07e, 0x8b81bf44, 0x89818144,
+0xa1572ffd, 0xfebf182a, 0xe54d7f5f, 0x107f8949, 0x00210081, 0xc53345a1, 0xa2818ff6, 0x81c181e2,
+0x9b81f1d5, 0xd1061965, 0xb7a47ff0, 0x81817fb0, 0x18bc6181, 0x7ccc5981, 0xbe2b9dae, 0x038f8173,
+0x7f3bec4d, 0x81ec81b0, 0x817b9a6a, 0x817cb47f, 0x7fa98fb5, 0xb0355e36, 0x8dad6109, 0x4b138116,
+0xa3e19d81, 0x449ab793, 0x984acd21, 0x168aeb94, 0x85baac5d, 0x7fac8173, 0x81ad81d9, 0xa34abb7f,
+0x81ca817f, 0x817f6074, 0x7fbc0000, 0xc79b108a, 0xc06990ca, 0x317f12cf, 0x9281e743, 0x90c681fd,
+0x17b57f3a, 0xbdd05681, 0xfafcef3d, 0x6f0a0d81, 0x4337197f, 0x7f4881af, 0xb592de71, 0x44c24881,
+0x536f9532, 0x21506996, 0x68489d79, 0x4aaf4312, 0x740c9447, 0x657f6673, 0x637f7f0f, 0x81205569,
+0x8fb3fe9c, 0x4881995c, 0x8163cfb0, 0x17a4817f, 0x7f59ad84, 0x6d0c30d2, 0xbdc6b981, 0x1bfa8155,
+0x7f22a8ca, 0x9c9a817f, 0x3bdba778, 0xf6817eb1, 0x07858141, 0xc037817e, 0x3f2c868c, 0x8181d07f,
+0x02b704dd, 0x467f6bc8, 0x4881ab7f, 0xa25c814e, 0xcb4d8e6e, 0x477c7f88, 0xd991aecb, 0x9c81b243,
+0x12d6b7e6, 0x03d745a0, 0xdf0bc9d8, 0x52637f85, 0xd557817f, 0x96342cc0, 0x81423981, 0x70657f81,
+0xde8181cb, 0x8a81de2b, 0xf55d3e2f, 0x2d717f81, 0xba81289e, 0x99c27f7f, 0x81c5817f, 0x90be817f,
+0x918ea3dc, 0x8de39c22, 0xce577f93, 0x3f7f8186, 0x9e817f7f, 0x816c8aeb, 0x3d818e2e, 0x8181be9f,
+0x0fb9429c, 0x3c7f5155, 0x81275b24, 0xa47fda70, 0x84cc88c5, 0x70797f81, 0x812eaebb, 0x16a1314f,
+0x7f7f817f, 0x38b61a4c, 0x7fb7388b, 0x7fb6e681, 0x5581987f, 0x9281c37f, 0xc45b7f7f, 0x8b35b97f,
+0x7f7f7f81, 0xa23c0e3b, 0x7f9b2083, 0x9cc39283, 0x8843699c, 0xc07f81dc, 0xa1f98b7f, 0x81d245b5,
+0x8181597f, 0x1aa93681, 0x000001df, 0x81574196, 0x7f3f3df1, 0x6fc8c7c4, 0x5e4d7f8c, 0x81b4dc44,
+0xd56f7f8b, 0x8116a381, 0xb03f5f81, 0xf5020639, 0x7f02b25c, 0x55818143, 0xab658147, 0x357f1daa,
+0x296ac18e, 0x8186e57f, 0x7464272d, 0x417863e1, 0xeae67831, 0x1a605547, 0x45ffb1d5, 0x7f066c81,
+0x81bfb062, 0x4cb8f381, 0x81817f7f, 0x145a4bc0, 0x2e813d7f, 0xc6524d71, 0x81818181, 0xc9d681fb,
+0x2a5db57f, 0x12c97fa7, 0x317f7f48, 0xf9b88131, 0x354c1389, 0xd77f8155, 0x811b4dbe, 0x9d3d457f,
+0xf9dc2f28, 0xb01c8181, 0x5a81716d, 0x7f7f3d81, 0xd87f867f, 0x817f0581, 0x564f8181, 0x7f678181,
+0x5cdd7f91, 0x07ad3f96, 0xb4dd9e34, 0x7f7fd50d, 0xa67f81b6, 0x01353420, 0x6481798a, 0xa5a68181,
+0x9c7f816e, 0xafc73c84, 0x427b8d32, 0x1bbb817f, 0xd45c7fa5, 0xb0c2ec48, 0xb459aa7f, 0x70818181,
+0x5ec983e7, 0x81294066, 0x81447f6f, 0x9b819f5e, 0x7fb1a47f, 0x81b8a081, 0x5f91c62b, 0x7f63de81,
+0xdc813ff2, 0x817f81ce, 0x3d677f0b, 0x4da9dae5, 0xcc8190d9, 0x1d7fbbfa, 0x5ffdd2b7, 0x69624b8f,
+0x50963e7f, 0x7f7fa281, 0xcc4b8149, 0x8102da7f, 0x3f7f9df5, 0x435732c5, 0x7f07d881, 0xb20fe07f,
+0x4896ac46, 0x44b3ed39, 0x7f7f8127, 0x9e7f81e4, 0x8181b15f, 0x7c818132, 0x7f41980b, 0x299ea18d,
+0x7f196381, 0x7fc95f36, 0xbecb7f35, 0xdaa80000, 0x292a7fe2, 0x3292947f, 0x7f98e9c5, 0xb77c984e,
+0x74847f81, 0x917f487f, 0x537766db, 0x30b71c7f, 0x81237f81, 0x9d7f9542, 0xfc75737f, 0x72499c3a,
+0x6885625b, 0x7f061c6d, 0x818e7f81, 0x5b814381, 0x81f17f4d, 0x4d81b6cd, 0xd2f7d778, 0xe5e9d5db,
+0xf7f081c2, 0xb80b0481, 0x975658f2, 0xe8b1197f, 0x81634b1a, 0x7f6bd994, 0xdbd67f7f, 0x757f3406,
+0xc4d07f7f, 0x63e7507a, 0x102b7709, 0xe08cf836, 0x1db26230, 0x1a814510, 0x40679bdb, 0xf336a87f,
+0x7fc3bf7f, 0x818e69c8, 0xf67aa2a3, 0x75425b7f, 0xa4b654c1, 0x7f7f5ac1, 0x5f10488d, 0x57dc7f30,
+0x7d817f81, 0xfc81237c, 0x388a81d6, 0xbce67f42, 0x7f7a7f56, 0xfbb7ff87, 0x467f4681, 0x22dc9b7f,
+0xba939029, 0x4a44c1a5, 0xa4be71d3, 0x7ffb3589, 0xfefae9a6, 0xa1963e4f, 0x66b3d6b3, 0x81506f83,
+0xee7f32ee, 0x7ffe7f7f, 0xbf505c53, 0xd769ca7f, 0x69c267d2, 0x7f9f8346, 0x3f7f7f7f, 0x48813f51,
+0x7fed8115, 0x9e81aef0, 0x7f7f22e3, 0x819fa190, 0x55457f81, 0x7f813b81, 0x251b4a0d, 0x6476818e,
+0xc4d8da21, 0x52fd2d4b, 0x81943c7f, 0x273ce481, 0x7d53817f, 0xcd9d9058, 0x754151d2, 0xcb9f365b,
+0xac73ecd4, 0x7ffd9a4b, 0x577f2d92, 0x41c60481, 0x81297fa9, 0x9f7f850f, 0x8c513329, 0xe854c39f,
+0x34bb4a20, 0xc37f5178, 0x0787cc22, 0x6c86717f, 0x75815a48, 0x2023777f, 0xd37fdd7f, 0x6a819cf7,
+0xdc44f5e3, 0x258157bd, 0xe8e7813d, 0xb9118195, 0x9a5a4581, 0x2d8c517f, 0x0c728161, 0xaec769ac,
+0x1f554191, 0xfc8be381, 0xca703b7f, 0xab813d5a, 0x3f7faced, 0x707a2181, 0x7f1b8c7f, 0x7fad7f99,
+0xb307377f, 0x6c9d5b81, 0x7f3bfac6, 0x8182567f, 0x5a246cdd, 0x55408105, 0x277f7f84, 0x7fa09c52,
+0x817fa355, 0x404fd360, 0xecbc4281, 0xc6897f20, 0x7881ae81, 0x407f81e1, 0xfb35587f, 0x9b67fb81,
+0x7f8103ad, 0x36708155, 0xab81f381, 0xa3fe6417, 0xa97f7f98, 0x3cf7cd7e, 0xae1b4dc6, 0x8ac2f749,
+0x7fb35e89, 0x07e0743b, 0x7f818135, 0x81a15481, 0x93819bbc, 0x81f68114, 0x194ccd81, 0x81527f7f,
+0x583ba074, 0x91654d78, 0x4e81398d, 0x0c54567f, 0xe453d054, 0xd37f8175, 0xd57f81ea, 0x345e5b7f,
+0x3589262c, 0xa8b886de, 0x811b8197, 0xbbc890ee, 0x8db0b9dc, 0x9fcb817f, 0x39817f7f, 0x55391fe5,
+0x7f7f3f92, 0x5a7f55d9, 0x747f3c71, 0x43e31da0, 0x67dccd81, 0x81ddffaa, 0xc32a5881, 0xb9816d7f,
+0x424d7f7f, 0xb97fa435, 0x7fdb7f81, 0x48fb8182, 0x8181407f, 0xba7fa88d, 0xa16d467f, 0x81cb8a81,
+0x8181a66c, 0x8c747c1b, 0xb4810470, 0x8133dc82, 0xbc77727f, 0xee981437, 0x7f2e8981, 0x26be1939,
+0x12da817f, 0x81a1da61, 0x1d2af46f, 0x7f3781f5, 0x29500000, 0x657fc67d, 0x81817f7f, 0x49e406cb,
+0x0b787faa, 0x7bc2643b, 0xe4e4f0ec, 0xd28175e5, 0x7f425abc, 0x900c1828, 0x7fa85f7f, 0xe8365361,
+0xc5d87fa2, 0x81baffab, 0x81818612, 0xbcb5af1b, 0x7a7e5d7f, 0x81b07f7f, 0xa27aba93, 0x817fd881,
+0x818481b0, 0x7f8d1c22, 0xb48c869b, 0x977f9429, 0x9a8c7fc5, 0xb8d9b270, 0x6e340e88, 0x670f7f39,
+0x7f4efebd, 0x41b5b57f, 0x7f277f0c, 0x5f177f23, 0x94818592, 0x68362b87, 0x6681ea7b, 0x7f187fd2,
+0xa38131ae, 0xc97f698e, 0x25bd8155, 0xe37d88d6, 0x2d018f4d, 0x391e7f7f, 0x7f7f845a, 0xe8f8ee81,
+0x4beb9d63, 0x7fd47ff8, 0x5e5781ec, 0x237ffe3a, 0x8e818168, 0xa1e781e7, 0x85de7f81, 0xbaaad281,
+0xa7486481, 0x555e737f, 0xa2b28f7f, 0x70635a47, 0x7f837fd8, 0x64ea8641, 0xc20c7f81, 0x0b9e738e,
+0x698e8145, 0x783a5a00, 0x7f376e81, 0x7f8cbd20, 0x7f548181, 0x81149934, 0x3dad456a, 0xb4561fed,
+0x815d8127, 0xaea4bb24, 0x2187755a, 0x847ff0b7, 0x7fdb9039, 0x81887695, 0x8781e111, 0xe0de037f,
+0xc879f7ca, 0x7fc18381, 0x7f7ff7bf, 0x958b7f81, 0xa65fe7ad, 0x3481817f, 0x9f897b7f, 0xa2bb7f7f,
+0x31018170, 0x70e08c81, 0x4c1da62b, 0x16f27ff7, 0xdf0781ed, 0xbf4081c0, 0x3a233c53, 0xf8aed938,
+0xef24617b, 0x812681ab, 0xa66e4db4, 0xef9b65b3, 0x00007f44, 0x7f7f997f, 0x7f341fc7, 0x0f9b2be4,
+0x7ff67f81, 0x81817e2a, 0x4b4d7f81, 0xc0706505, 0x81fc60e1, 0x30d87f9c, 0x7fff9c0d, 0xa789757f,
+0x3881d769, 0x6367bf81, 0x9bae7f8d, 0x8e81bbb6, 0x61e88127, 0xc96c7fbf, 0xdf813181, 0x7f7f7de7,
+0x3e817f81, 0xb97f817f, 0x8b927f81, 0x5e74742d, 0x7c49817f, 0xbc1172b1, 0x81818181, 0x6dd2d4bb,
+0x98f3e058, 0x7f81efde, 0xaeee76df, 0xd57fca01, 0xf5817f81, 0x26747f81, 0xdc3f8181, 0xc335c749,
+0x7f2f7f7f, 0x7f02e67f, 0xfb706975, 0x8f4efde7, 0x819b414c, 0xb14cde70, 0x22e6812e, 0x3843892d,
+0x6f81ce60, 0x50c12725, 0xb4e279d5, 0x7fb1ac7f, 0xbc7f8183, 0xc7767881, 0xb6e71854, 0xb37f7f77,
+0x3d7f7f7f, 0x817f6824, 0x4981ae81, 0x7f811a7f, 0x58c37f74, 0xbef5a27f, 0x92a62541, 0x8643aa3c,
+0x8f838181, 0x397fe4f4, 0x8a814677, 0x93e3affa, 0xa6fa21e9, 0x7a3e7f7f, 0x332a7872, 0xf28c8176,
+0x227f573d, 0xbbbb6348, 0x7fefb2de, 0x43817f69, 0xc4c37b31, 0x747fa118, 0xacb3819c, 0x7f689f89,
+0x74e0604b, 0x81b57f88, 0xe925a0cf, 0x81be2181, 0x126d34f1, 0x7648d74e, 0xd760b29b, 0x04d0fda1,
+0x3b858181, 0x1be20981, 0x8127ac7f, 0x8a6b8181, 0x7f616ca2, 0x37e6e881, 0x7f9b82c0, 0x7f6e7f93,
+0x8dc72420, 0x752f3d39, 0x5e7fd2e5, 0x7f25c372, 0xe97f2677, 0x307f0000, 0x81383b86, 0x713de0fd,
+0xe47c817f, 0xf0214e8f, 0x8108349e, 0xe0c92881, 0x7fafb0e2, 0xe939b48c, 0x4855d8ae, 0xb581e469,
+0x967feb6a, 0xba543d20, 0xc47f0c62, 0x2a28e6b2, 0xd27fb09a, 0xbadf7f7a, 0xbf98d54c, 0x5f54cfa3,
+0x4881248d, 0xc97f7f53, 0x17816966, 0x3450877f, 0x4ff681c8, 0xfdb8786d, 0x0209730d, 0x7fbbba84,
+0xc5994cff, 0x4d31cc44, 0x7fb99400, 0x239d757f, 0xa2bd517f, 0xd5558194, 0x93818191, 0x819b8190,
+0x447f9289, 0x777f3e81, 0xc2c04a7b, 0x27c38eaa, 0x437a383d, 0x7f812432, 0xbd77707f, 0x7f81ea6d,
+0x40256698, 0x81a95b58, 0x2fb47f81, 0x74b64d86, 0x70f07f23, 0xd7d6bbd5, 0x817f18a6, 0xe862327f,
+0x3ff56cff, 0x7f7f8197, 0xcb75c381, 0x58954f57, 0x43818176, 0xa903811d, 0x7f6abbfa, 0x8cd7a77a,
+0x55814473, 0x7f819e1a, 0x557f9e89, 0xa9c955e0, 0x648191b0, 0x81a97f51, 0x1dbc1181, 0x685a6b36,
+0x7f2e5ef2, 0x59819afb, 0x3f30fd68, 0xe986702b, 0x810a2581, 0xfedf7781, 0x947ffb3a, 0xa9777f29,
+0x9c547f3d, 0xe36d7f07, 0x81909281, 0x2b87817f, 0x07aa817f, 0xd9137f30, 0xb791d3ff, 0xb84869c5,
+0x4db4e1ce, 0x81beed66, 0x1637adce, 0x10a1897d, 0xfd817fbb, 0x5771c343, 0x3b16dc58, 0x7fbf813f,
+0x7f9ba269, 0x353a7e21, 0x14f07fa6, 0x7f81815b, 0x484bb301, 0x0000c27c, 0x59141b93, 0x7f637f3e,
+0x3012937f, 0xf2a2b350, 0x1d7f7f7f, 0x81b6d4ed, 0x4fc3ba54, 0x3f7f107f, 0x02af3855, 0x598c814a,
+0x3eb09fd7, 0x636832b6, 0x7f9e817f, 0x7f939c7f, 0x7fa99581, 0x7f817fb1, 0x387f3475, 0x9893f771,
+0x52813581, 0xc5c75881, 0x7f787887, 0xb97981d0, 0x27436262, 0xfae6f21d, 0x81cacf5d, 0x41a5de81,
+0x81817d81, 0x3b2f7f9e, 0x5dc0ade8, 0x3d0e8154, 0x81bd3a0a, 0x6e818153, 0x7f818123, 0x817fc575,
+0x811b669b, 0x7f037929, 0x26acac81, 0x7fb3efb6, 0x82817fc3, 0x0c81f981, 0x607a677f, 0xc8258116,
+0x0ae77a50, 0x816fba35, 0x7f256549, 0x04c57f89, 0x4381075f, 0x81cd3b56, 0x8ccdad7f, 0x7fa45cd5,
+0x817fb67f, 0xb7698dbe, 0x5562298b, 0x688c257f, 0x9d7fde81, 0xe57f4e81, 0x7f81c9ea, 0x281a41f9,
+0x7ffdfc6c, 0xaa32217f, 0x81a08166, 0x8181b1ae, 0xc447880e, 0x2b81367f, 0x2944a47f, 0xc6de9b49,
+0x81816841, 0x54fa7fa8, 0x587beb7f, 0xe3475a81, 0x81dc2cb1, 0xa95dc06d, 0x8bd7477f, 0xb05d7f81,
+0xb4c19d7f, 0x43817f53, 0xde7f5c7f, 0xe3d2817f, 0x8948aadf, 0xc17f7ffb, 0xd08188c1, 0x8181204e,
+0xb3b6d781, 0x7381b381, 0x787f7681, 0xee640849, 0x657f8181, 0x811b329b, 0x5c728eed, 0x0c7f59c9,
+0x4cc3b12c, 0x814f7e3d, 0x5f7f7f46, 0xf9817fd9, 0xb1b5ba76, 0x1f42609e, 0x3db30000, 0x400d7f46,
+0x7f7f7f81, 0x45d36281, 0x81df9581, 0x7a7f7f53, 0x8181330a, 0x7ff5c3ab, 0x9e81b4bd, 0x7f70c27f,
+0xac1666b3, 0xcf7f52e2, 0x817fea30, 0x8c4b3f7f, 0xa085bde5, 0xabae307f, 0x817f7fde, 0x17bc61b4,
+0x914ccc4b, 0xe1e56bae, 0xac294f88, 0x75e562cd, 0xbfbd35cd, 0xed507f81, 0x81afcf81, 0x7fdc819b,
+0x5a0385e8, 0x4f2aabf2, 0x569c7f3f, 0x7fbee1bf, 0xb0407fc4, 0x6b548214, 0xbcd6877f, 0x3d6e7716,
+0x9a8134a3, 0x0a7f7f81, 0x29d73eca, 0xadbe7f65, 0x7fc1bb81, 0x7f63dc7f, 0x7f815430, 0x810e2366,
+0x7f36057f, 0xda39d7d0, 0xe9855ddb, 0x7fffa2f9, 0x3b9a8181, 0x7f06b781, 0x844db0a2, 0x7b1d7fa3,
+0x99a795ad, 0x7b819a6a, 0x3781819a, 0x867fc788, 0xa181817f, 0xfcc3637f, 0x747f5a5a, 0xf27f690f,
+0x2f8a9c2f, 0x617f7fb3, 0x81b4b1c2, 0xa2818b96, 0x7f7f7fa6, 0x20707f81, 0x7fc9817f, 0x55577fc3,
+0x777b40b9, 0xdffa3781, 0x38ba0e4e, 0x4f667d32, 0x9cf081ea, 0x8187d122, 0x79af9ebc, 0x647f99c2,
+0xd1a47f79, 0xaba67f7f, 0x6b270e96, 0x90c07265, 0x817f4b81, 0x71f31f65, 0x2d818692, 0x65570681,
+0x81f59b98, 0xe6b181ec, 0xc5b1a58f, 0x7fb2c981, 0x7fdb5914, 0xe37f81bd, 0x7fefd823, 0x9a7f9ccb,
+0x631762c2, 0x7f4e81b8, 0x813d88c1, 0x9768c97f, 0xbb867f81, 0x2cd79456, 0x00004aa7, 0x8ec66e57,
+0x7f7af878, 0x747f3cb4, 0x8c774fa4, 0x7f8df013, 0x8c5c7264, 0x3aade68d, 0x81819c30, 0xb0d38147,
+0xbd816181, 0x4f415cbc, 0x7f21f710, 0xf7ba5565, 0x81df367f, 0x637f7f9a, 0x7f182e7f, 0x6f81996f,
+0xca547fe5, 0x637f8db9, 0xccb4fe59, 0x55ca7f81, 0x7f7f7fae, 0xceffc677, 0x2955d0f8, 0x91a07f7f,
+0x7f818181, 0xebc2f87f, 0x7f213646, 0x94a2a981, 0xd1819bdc, 0x7f866084, 0x2381ff8b, 0x75b58114,
+0x447f162b, 0x5275e0a3, 0x37578cc2, 0x0dc1ec31, 0xb73681a8, 0xb081978a, 0x74429a2c, 0x698859dc,
+0x937fb91d, 0x7fcdc06e, 0x81b4ffff, 0x7e2b168d, 0x4b649aa9, 0xe648be08, 0x44e37b7f, 0xc0fc27e9,
+0x1b16521e, 0x81577fc5, 0x7f8152a8, 0x2981ee7f, 0xf08dd19f, 0x7d93ed81, 0x814435c5, 0xe6c67f29,
+0x7ffc8111, 0x87f65881, 0x3f62563a, 0xa09a81f9, 0x490f5781, 0xc70d6284, 0xfb818181, 0x56817f38,
+0x87420960, 0x0d2dedb3, 0x8ef18143, 0x4c5b4a62, 0x8f947f81, 0x667f8121, 0xcc817f81, 0xb567a44c,
+0x7996b182, 0x736e9881, 0x81627f7f, 0x7f8e7f81, 0x725ee43d, 0x9f436a50, 0x7f055d88, 0x93498181,
+0x21d08f7f, 0x26f17f3c, 0x581d939b, 0x90595ba7, 0xf5200e81, 0x4b91b78f, 0xcb7fc693, 0xe87fd04b,
+0xaf81087f, 0x14ac4a01, 0x7f7d7f81, 0xe014dd7f, 0xc2816aed, 0x79760a93, 0x7f9b817f, 0x81480000,
+0x844d73a5, 0x7f8181bb, 0x819dae3f, 0xc87f8e82, 0x9d81a23e, 0x7fbd813a, 0x8181702e, 0x4fab7a67,
+0xd57f70b6, 0x817c53ac, 0x1355de7f, 0x4264437f, 0x81b0ff2f, 0x16d6e094, 0x7fc40f55, 0x7f9b7edf,
+0x7f5a7f81, 0x7f996e66, 0x8e932881, 0x7f817f81, 0x6d4ca679, 0x815f871e, 0x81b4817f, 0x7fa97f81,
+0x5e03827f, 0xbcd8b6a8, 0xc5683edc, 0xbb007fdc, 0xea266973, 0x498e467f, 0x86b1cc7f, 0x17814738,
+0x81b7ae62, 0x56fea8f3, 0x5eac534d, 0x54f67f81, 0x24add4ce, 0x811a7881, 0x81cfc383, 0x9f2f81ef,
+0x7f1f41fe, 0x819e3781, 0xcc73a3b0, 0xee148181, 0x7181d87c, 0x81c1c9e2, 0xfee4727f, 0x9b608139,
+0xa2ec4ace, 0x7f810c7f, 0x9a7f2288, 0x46b169e4, 0x9b40817f, 0x547f2f7f, 0x81ef4f81, 0x243fffe3,
+0x8181dd3f, 0x347f6596, 0x7f59c3fa, 0xa26cc7ac, 0xf7384881, 0xbdc0a118, 0x7f8173a8, 0xabf1b3e3,
+0x7e3e8eac, 0x30c07bce, 0xbca0b981, 0x75816414, 0x81377f81, 0x7f7feaa1, 0xd8287f86, 0xfbd98bd1,
+0x7f987fc1, 0x81b8347f, 0x8c7f37e2, 0x987f3c7f, 0xb5f681ab, 0x347f1adc, 0x1b815481, 0x91f17f60,
+0xbf85a67f, 0x6d7f2374, 0x587f3c1b, 0x74b0ae9c, 0x97a64fb4, 0x1437697f, 0xc77f3b81, 0xf4e0bbad,
+0xb6892b64, 0x5ed0e47f, 0xa55c8100, 0xbbe24e10, 0x50bb6551, 0x6db5349d, 0xaaae405f, 0x4d7f4f6d,
+0x989e95a5, 0xcae9bf2b, 0x811e7f4d, 0x90538181, 0x7f9f096e, 0x4126a5df, 0x1369537c, 0x81a73581,
+0x722f1409, 0x8b297c54, 0xf17f5081, 0xc22b4595, 0x7f6fde57, 0xdea96095, 0x7f98814f, 0x40f8fb81,
+0x697fd77f, 0x35e07f33, 0x27459a7f, 0xc0818181, 0x3e817fb2, 0xdd5eae81, 0x8a8efd53, 0x13e91281,
+0x2c486ba8, 0x7f6b447f, 0x7f818120, 0x1c496781, 0xf8e45993, 0x48817f81, 0x009db045, 0x7fd9c722,
+0xab817fe2, 0x73bd7951, 0x687fa97f, 0x7f7f7fb8, 0x4f447ffb, 0xfc31d9c2, 0xc1d5d865, 0x526abd82,
+0x7f6e7f47, 0x6b31817e, 0x8509817f, 0x5581fcbf, 0x7f9bd8ac, 0xcc22f37f, 0x547fa07f, 0xbd4fd2bb,
+0x6f49a523, 0x835e896c, 0x65458e81, 0xd0e37fd2, 0x81587fe0, 0xd95fb08b, 0xd938d43f, 0x11e67fc6,
+0x7fec3c2e, 0x7f8581d5, 0x017981b9, 0x812c8133, 0x307415cb, 0x2081bd7a, 0x994d5c83, 0x53812ab3,
+0x7f8190bc, 0xad811645, 0x73c1ca14, 0x94c87d32, 0xbd4a7063, 0x7f442f7f, 0x06812617, 0xc481acc0,
+0xc7bacc81, 0x81de5481, 0x7fa3167f, 0x4a3a385f, 0x2b817f37, 0xae912b7f, 0x32c4cfeb, 0xc3bf3bb1,
+0xa4a63e50, 0x846c7b7a, 0x5c81816a, 0x75813daf, 0xd0665307, 0x6031117f, 0x507f5429, 0x29bfc44e,
+0x81b69437, 0xa4c54d61, 0x81391b7f, 0x98347fec, 0xf7da5b06, 0x7e3d8b7f, 0x7f7f1abe, 0xa955d23a,
+0xb7810000, 0xa1608164, 0x5f813cfb, 0x16602281, 0x5fb6814d, 0x3c3fd67f, 0x5dae83eb, 0x58ac2c2c,
+0x097f5d25, 0x6081da52, 0x7fd4818c, 0xe181d13c, 0xc66d7f2c, 0xe94a00a3, 0x2cec12a0, 0xe84c6398,
+0x64398162, 0xd056816f, 0x8147e081, 0x9ceb75c0, 0x7fbec481, 0x16d15734, 0x8158cd7f, 0x369c8172,
+0x2a479b81, 0x7f4e9686, 0xb1494e0f, 0xe5816226, 0x057f7f55, 0x81dba929, 0x8f1890e4, 0x5e7f817f,
+0x7f7f8162, 0xfced63c7, 0x370aadc1, 0x5f09b34f, 0x7fc0be25, 0x36927fec, 0x814d0ab6, 0xfd077f60,
+0x8162f9ab, 0x8185597f, 0x2bac2f66, 0x28e6aa9f, 0x814dc1c6, 0x717f3841, 0xffd1078b, 0x4f5b8a12,
+0x7f429a7f, 0x7f734d7f, 0x43817faf, 0x7f4e7f52, 0x232e0524, 0x7f4158e4, 0x327f7f7f, 0x89b464d8,
+0x8130a3a7, 0x7fe6ac32, 0xd1818e81, 0x427f957f, 0x8150c60e, 0x3cb50481, 0x31955f7f, 0xec427f9f,
+0x99eb5f69, 0x8ac893da, 0xf97da17f, 0x7f847f73, 0x7f97ce7f, 0x7f7fe806, 0x8984629e, 0x34539cba,
+0x8a64a575, 0x25297f49, 0x1c509d07, 0xd5aa7f7e, 0x74817f79, 0x81819981, 0x95815a8b, 0x9518a19f,
+0x3d9183ce, 0x817f00a1, 0x30306781, 0xc67f7f7f, 0x81594a49, 0x3d620f81, 0x3481bd81, 0xc3af8e63,
+0xc249dc8b, 0xe5fa637f, 0x81185512, 0xa0b48307, 0x814c817f, 0xc6dac58b, 0x8b5c781a, 0x81982c81,
+0x0000b269, 0x8105f78d, 0x81ad60f7, 0x81ffb8cc, 0x917ffed4, 0x104fc0a1, 0xfa38b45b, 0xb5318137,
+0xbd781466, 0x0bd8068c, 0x72c57f91, 0x8181b48e, 0x7f81aedc, 0x43677f7f, 0x7f4f7f3a, 0x437f8178,
+0xe99d0b81, 0x868792bc, 0x17727f57, 0xada05e06, 0x694d707f, 0x9081e59b, 0x7f3a8181, 0xb2bae481,
+0x7f81dde5, 0x3e307f7f, 0xcaa07f3a, 0x2e1a8170, 0xed0f4881, 0x297f7f61, 0x81cc815e, 0x4fec813b,
+0x1a4f02b2, 0xc6e66a4e, 0xe9817f6f, 0xc2c36a02, 0x302e9f60, 0x4ea28137, 0xb9d57f32, 0x462a7f81,
+0x9085bb36, 0x7f7f817f, 0x3c714908, 0x70c9c17e, 0x5a7f204b, 0x8fbf6f70, 0x97880981, 0x0cbacfdb,
+0xb80f6d81, 0xb6bb3aef, 0x81691db8, 0x1081e2df, 0x7f0504a0, 0x797d5e7f, 0x006de423, 0x7f19f16f,
+0x1eb28181, 0x819f7f81, 0x7f6a84b9, 0x936537f2, 0x14a87f81, 0x7fa9cb81, 0x81813a8c, 0x8182c47f,
+0x56758144, 0x81c55c97, 0x86de0b81, 0x818181d9, 0x7f386281, 0x7fbb2738, 0xffd18d81, 0x17e1eb34,
+0x656449b7, 0x7f7fbe33, 0xd7f681b6, 0x77313c4f, 0x7dc9151e, 0x28debefb, 0x7f7f5c62, 0xa850b25f,
+0x81166b59, 0x7aea7f05, 0x325d81a1, 0x7fd06471, 0x93137fbb, 0x43a51892, 0x3e326cfe, 0xab81a942,
+0xb208d017, 0x817f0781, 0x08fcca86, 0xa31a7f81, 0xeb93206a, 0xd4fa21f4, 0x684f9029, 0x7f817f57,
+0xdca7812c, 0x7f7f0000, 0xf2dc5a96, 0x7f62aa7f, 0x918d1c7f, 0xfbb5add6, 0x407fc0da, 0x4d5d9f7f,
+0x935d8166, 0x817da9b1, 0x205d7f3a, 0xe08b4181, 0xd8bd7f7f, 0x4f33818c, 0x8105b781, 0x2f81cd81,
+0xbaf8a48d, 0x2b81aa7f, 0x90b5810d, 0xbc62c181, 0x1592f72a, 0x4d81b5fe, 0x7f81e447, 0x99d22181,
+0x303e3ea5, 0xa23078a5, 0xba8a7995, 0xd5282790, 0x6681453a, 0x7ff7ab7f, 0x7f4e8195, 0x8bc48181,
+0x817fd481, 0x81b98142, 0x265a8182, 0x81f768a7, 0x85f11690, 0x7f957f7a, 0x109981a4, 0xeabba365,
+0x577f9057, 0x5f7fae65, 0x817f8199, 0x718e3daf, 0xd6818181, 0xd1a1cc81, 0x97a48187, 0xd8d14520,
+0x497b7f10, 0x7f81d781, 0xb77f747f, 0x7f813974, 0xc52a7f7f, 0x7a292226, 0x81602ef8, 0xe481cc7f,
+0x0b4266a9, 0x4722287f, 0x7648747f, 0x9c861ec6, 0x4ba9efbe, 0x2063813f, 0x5b277f81, 0x54608181,
+0x81b3ae7a, 0x5b812281, 0x6c306f81, 0x4fa0bcd5, 0x540c85d5, 0x2f81817f, 0xd2c9078a, 0x4281a39a,
+0x3e7fa1d8, 0xa47fb881, 0x3efe9c7f, 0x7f5f6c23, 0x4681d46f, 0x49cb81c9, 0x7f4eaa68, 0x2e7f7fac,
+0xbb497fd0, 0x3a4253a8, 0x91a6aaaa, 0x4f7f5a52, 0xf9375fad, 0x8b2a7f2d, 0x746e7f81, 0x7fb0cbe0,
+0x5a917fd1, 0x5b65867f, 0xe9606f9c, 0xe9f8814e, 0x3045cf65, 0x937fad52, 0x760f5a81, 0x9be4ba50,
+0xb59d81e5, 0x0000d77f, 0xd481bd81, 0x27224a55, 0x61467c81, 0x91d865df, 0xffa17fe4, 0x7f8a0c68,
+0x1640fd52, 0x987f7f6e, 0x4d815a5a, 0x3081b65b, 0x7d7f6c8e, 0x5b26bf60, 0x44814281, 0x5a813427,
+0x81f47a3b, 0xef9281eb, 0x81994ca6, 0x7081317f, 0x7f7fd49a, 0xfb8c8e7d, 0x546ad581, 0xd6817f16,
+0x7f3e4b81, 0x4f4c4898, 0x817fe22f, 0x819a37c9, 0x387f1a55, 0x8cfddd03, 0x318aa9bb, 0xcb6e2d90,
+0xa47f0d1b, 0xae81c204, 0x7f797f27, 0x0a7f65ad, 0xe4ac81af, 0xb2d74d81, 0xca0fad81, 0x357f69a8,
+0xad71a581, 0x5be9d4de, 0xaebe8181, 0x95817fd8, 0x8f7aedb6, 0x0a81e59e, 0xa07f20eb, 0x7e66427f,
+0x56769b04, 0x2e8149f1, 0x81f6f87f, 0xc3813768, 0xbcc28850, 0x6cbd3dac, 0x7fda99dc, 0x657f7fd5,
+0x7fcf7f60, 0x81233698, 0x81daf17f, 0x867f7f7f, 0x7fda7f36, 0x0c7fae20, 0x7f817d78, 0x7fd09481,
+0x818f30e0, 0x7f437c7f, 0xaecc2b3e, 0xcf427fca, 0xccfd2cce, 0xefd898bc, 0x7f5b81e2, 0x1d4b812c,
+0x865871ae, 0x553ab4ae, 0x69817f7f, 0x811a745a, 0xa32a163d, 0x2e967f57, 0xff817f7d, 0x70d43e9a,
+0x814681ae, 0x9f817f31, 0x819b8110, 0xfca3819d, 0xc4fc45b2, 0x56b2817f, 0x7c811d7b, 0x997f8181,
+0xbe0dc3d8, 0x9f5b56a6, 0x7f8181e2, 0x5d59eb29, 0xa357e4c6, 0x8161817f, 0x3c7e7f32, 0x7fd07f06,
+0x817f82ad, 0x93de8341, 0x92850000, 0x0c65c06f, 0x0a4d2817, 0x7705d752, 0xbcd8687f, 0xc4397f7f,
+0x1b7ffd47, 0xada5012d, 0x7f8158e5, 0xba617e10, 0xa97d811f, 0x1fb61081, 0xac41e74e, 0x8c7ea7a3,
+0x997f452e, 0xb5858149, 0x5c3cb0a0, 0xc8ffab38, 0x49d65d7f, 0xbbd07c81, 0x35b06b7f, 0xcc7f7f7f,
+0x50c42881, 0x707f7fde, 0x7fd67f7f, 0x7f819981, 0xd4d6a781, 0xcb103b7f, 0x7f4fee9f, 0xb3997fe0,
+0x7f9381a5, 0xbc7f9a28, 0xf4aa819f, 0x7fe862df, 0x316dd1f8, 0xb4a66f81, 0x98816581, 0xdb817f2e,
+0x34819ee0, 0x746c817f, 0xdc9cd3e1, 0x8e7fab2d, 0xec368b2e, 0x7f8381c8, 0x817f7f7f, 0x814de179,
+0xed5b7f4d, 0x274bdfa1, 0x5851250b, 0xec811a81, 0x7f365569, 0x57935681, 0x08757781, 0x196d813e,
+0xa67581b5, 0x83a7d981, 0x73988163, 0x816eb57f, 0x2ce47e40, 0x5bef6481, 0xb131155f, 0xc2f9817f,
+0x519b67b4, 0x7d2c7f59, 0x7fdb8150, 0x5c7fc881, 0xca285c9d, 0x7f7f7fe6, 0x39aabd59, 0xb9b494e6,
+0x81401831, 0x74247792, 0xab7dfaab, 0x817fcd81, 0x819b25e3, 0x587f337f, 0x817fd238, 0x818a332c,
+0x9db6bcc3, 0x1681c227, 0x7058306d, 0x50a27f64, 0x8181a4b2, 0x7fe2a57f, 0x8d5f4777, 0x7f7f8f81,
+0x6e8381f7, 0x22518136, 0xae72d0d6, 0x7f9bba7f, 0x8117d27f, 0x792b7ff0, 0xe45892e0, 0x8e6f5c34,
+0x3994817f, 0xa981afca, 0x0000a592, 0x7f811d81, 0x09548181, 0x0ea8127f, 0x7f7fb49d, 0xd6690b81,
+0xa1846059, 0x81d15d81, 0x6ccaf481, 0x81ddef83, 0x7fccc681, 0xab502722, 0x4620e862, 0x3981de76,
+0x7f81889f, 0x5b818154, 0x797222dc, 0x4b8153a6, 0xa470816d, 0x7fb2657f, 0x81150b64, 0x89dc1f57,
+0xaa636e7f, 0xe59cba67, 0xb3b2da41, 0xc7818192, 0x7637d0d9, 0x1a78a78c, 0xaf5e3f5c, 0x8181a581,
+0x56285469, 0xeed47f7a, 0x697ff6a5, 0x7f7b7f1e, 0x398181cb, 0x999fe4a0, 0xc17f8816, 0x5c815aa2,
+0x78717fce, 0x7f7f842c, 0xf07f4c43, 0x7f818148, 0xbc66c134, 0x8168ce22, 0xaf1791a8, 0x3522ae40,
+0x95afa97d, 0x435d4e69, 0x2ab59d7f, 0x8197c47f, 0x93cfe645, 0x8dba727f, 0x227f7f81, 0xb17feb7f,
+0x702a7f81, 0xebd463b7, 0x5381e929, 0x817dba7f, 0x5c811a24, 0xfa7f814d, 0x817f8197, 0x2f7f704f,
+0x36817f7d, 0xe87f0d7f, 0x54b07fb6, 0x81733686, 0x8fcb7fcf, 0xd888c623, 0x2de5ebc5, 0x8f6f6569,
+0x7f7981f9, 0x908136b1, 0xa93a7fb5, 0x405f8141, 0x01a54dbc, 0xb6815763, 0x8ccc3ffc, 0x5681105b,
+0x4a1498bf, 0x4c6f82e5, 0xa67f556c, 0x7f5d564f, 0xdb8132d0, 0x1e958139, 0xe386811b, 0x04845a40,
+0x7fbd8109, 0x833b62ec, 0x81f40717, 0x701d8181, 0x8191ee44, 0xae2d9d7f, 0x3fa17854, 0xcbcad981,
+0x73c2d681, 0x5f738681, 0xb87081dd, 0x817f0000, 0x037fcda9, 0x922caef5, 0x7cc7f91e, 0x9a7fbd88,
+0x81819528, 0xf2cd7f53, 0xfd7c8170, 0xfb7fb997, 0x7f7f3f7f, 0xe4818154, 0x18c4414b, 0x4fa48da4,
+0x81718181, 0x817c957f, 0x7f2e1582, 0x09bc81bf, 0x5e4b817f, 0xd5590d7f, 0x7f818b44, 0x88abcf7f,
+0x39508181, 0x81ab86bf, 0xba58c325, 0x65bc83ed, 0x3402857f, 0x81c61e7f, 0x8bbab505, 0x81b2aba0,
+0x8153ec32, 0x6803d681, 0x819d817f, 0xb6de31a4, 0x367f7fc2, 0xb1048db9, 0x6389815b, 0x7ffb7b01,
+0x8f1b817f, 0xc1817f7f, 0x04187f64, 0x45cfac7a, 0x0f1b578c, 0x0f209f24, 0x33817f7f, 0x7f7f7f81,
+0xac814781, 0x5a9d3dbf, 0x62a65581, 0xcb8a7353, 0x7f81c5b8, 0xba447fa1, 0x9081774d, 0x7af9ed38,
+0xb6849bce, 0xacf07c18, 0xa1894581, 0xd7634242, 0x2d0e8174, 0xdbf799d9, 0x7fe7b446, 0x84110cf7,
+0x6474da81, 0x8185e120, 0x9da4f123, 0xd07fce54, 0xa9cdf7af, 0x84db817f, 0x9e9a23b7, 0x81774c7f,
+0x7f818152, 0x817f5243, 0x39058181, 0xbbc91e81, 0xe3d87581, 0x81b66581, 0x7fb06a61, 0x0bcd0fdf,
+0x8188dee9, 0x1cd87f92, 0xf036977f, 0x977f4d61, 0x1da12581, 0x3a7fbee0, 0x87467fa8, 0x7fe27fa3,
+0x89d37f7c, 0x397c7ff4, 0x57e64171, 0xc4819181, 0x93677f41, 0x65be2517, 0xbe6c7f7f, 0xaa7f7f81,
+0x255d650e, 0x098181c9, 0xe8124525, 0xc73a4949, 0x24d14b47, 0x6730f681, 0xfe251c6e, 0xbc7fa092,
+0x81917f5f, 0x81d1ab81, 0xd6128193, 0x13d62e52, 0x941c14f4, 0x25b452c6, 0x7f9c947d, 0x815d3f42,
+0x81c8c9d0, 0x7f524663, 0x40ae7524, 0x2f81c77e, 0x815c8483, 0x7f7fb27f, 0x7fc37f44, 0xe07f9db7,
+0x7b812117, 0x1c965b7f, 0x1822ad4b, 0xccb9e67f, 0xc5b18115, 0x7f8c377f, 0xd13939cb, 0x38e8147f,
+0x4e722d21, 0x9e81ef00, 0x81c0fa81, 0x812d8158, 0x9d84a57f, 0xefc32797, 0x7fc9987f, 0xa37f8199,
+0x81812681, 0x81818190, 0x89a9814c, 0x7945c381, 0xa57f816d, 0x7d65af81, 0x81bb7f72, 0x1c81cd7f,
+0x6d3e4e7f, 0x6cbef90d, 0xca24894d, 0xb04892b6, 0x7f7f1e81, 0xf57f5965, 0x0769a7b0, 0x698562b9,
+0x16817f7f, 0xabb94bb7, 0xc35c81f3, 0xb9b5728e, 0x8181641f, 0x81d2d2be, 0x9181bb37, 0xf82b9222,
+0x7bdc7ff5, 0x9b812fa1, 0x8173f4ec, 0x94810185, 0x2bd00d63, 0x814c50dd, 0x9be376e5, 0xed7f90dc,
+0x817bf98c, 0xe1df634d, 0x287f81ae, 0x27a4f115, 0x62101a81, 0x2723b481, 0x9c507f66, 0x44a01769,
+0xbfd13025, 0x7f815b81, 0x5d5c817f, 0xb37f7fa5, 0x7f79b2c6, 0xa1e2847f, 0x813ac57f, 0x417f8b7f,
+0x4b505b55, 0x22d47fbb, 0x7f886ad7, 0xf78c7f48, 0x53a5245d, 0xdfd37f1b, 0xb37f81c4, 0x907fc99f,
+0xa393f1c0, 0x7f5f7015, 0x37be3fb3, 0x7f4c9e28, 0x7f770000, 0x9b7a0531, 0x7f35cdde, 0x5b4047cf,
+0x81881773, 0xdc6e9ea5, 0x517f477f, 0xb2a77f81, 0x9981472d, 0xd4c57f81, 0x6ef2a77f, 0x81d481a3,
+0x3d87307f, 0x214321bd, 0x7b7f7f8e, 0x997f1cbe, 0x266681ec, 0x3639f87f, 0x687f7fed, 0xf17f8191,
+0x1558ae38, 0x815ac086, 0x81247f78, 0x7f612db7, 0x128d7fc9, 0x187fbf81, 0x4a898116, 0x3a6c818d,
+0x7f7fdcf1, 0x60cb3e9c, 0x81bf627f, 0xe7c17fa1, 0x52247fd2, 0x814781dc, 0x6eb67f1a, 0x9d0b7f7d,
+0xc2b238f8, 0x81b1427a, 0xa1555aad, 0x103c0c29, 0x97319c81, 0x05ab3a7f, 0x811f1d7f, 0x817f8195,
+0x40bffc7f, 0x3aa38f4a, 0x46e17fcb, 0x6781fc31, 0xb725a5e4, 0x427f8cbd, 0x7f857fd4, 0xca1f7f9d,
+0x2f77c6b9, 0xdde30a0e, 0x7f93fca2, 0xc6aa7fc7, 0x7f9e7f7f, 0x9b78012e, 0xdda0d07f, 0x0b4c7f83,
+0x7fee4b44, 0x81640552, 0x40d573c0, 0x390efbce, 0x327fb881, 0xf5e69aec, 0x81e9c7d1, 0x81717f74,
+0x81817d78, 0xac6eba86, 0x41a2d07f, 0x8981da45, 0x5264b681, 0x527f46f2, 0x949e7f52, 0x7fdd7fbd,
+0x81678102, 0x87756c64, 0x3d2a81ca, 0xc87f8107, 0xd17fb64a, 0xa7b553b5, 0x8191812a, 0x55ddbf7f,
+0x517f7b7f, 0x897aad7f, 0x63817f8d, 0xd9547f7f, 0x43437351, 0x84818181, 0x3a7f8127, 0xccbfdf7f,
+0xe1191588, 0x81c80c65, 0x7ffcf571, 0xcc147b7f, 0x00007081, 0x2b7f7f7f, 0x889b3a2b, 0x5358e78b,
+0xfcb917ab, 0x81c3f74d, 0xd57faa18, 0x69937f23, 0xd7321b81, 0x5db1ad81, 0x17577f0a, 0xf48118bf,
+0x81822681, 0x2981e87f, 0xc16e6c7f, 0x891d38ae, 0xf37f813f, 0x81b96b55, 0x2fcd81b1, 0x23b5f981,
+0x695c4d28, 0xa0810fab, 0x6cd5ea3d, 0xefc68ec4, 0xae32887e, 0xa27f0b17, 0x7f8baa97, 0x8116b3a1,
+0xa393817f, 0x817019f0, 0x1d7f8193, 0x18f0d764, 0xb27fb7c9, 0xc4da637f, 0xcd586f56, 0x7f23810d,
+0x572f25f3, 0x05df7f15, 0x817f815c, 0x8b147f84, 0x0f817f6b, 0x7f1f7f7f, 0xbccee4a8, 0xcb5ad7ed,
+0x33a5f88b, 0xc7817ecd, 0x8781817f, 0x817e6444, 0x6697817f, 0xf9490257, 0x83d5f581, 0x7168b332,
+0xf4ed4781, 0x17e0c1c4, 0x16b9c997, 0x8581247f, 0x81887b3f, 0x2281a5c7, 0xce437f0f, 0x994c6f81,
+0xbcd09735, 0x9269d57d, 0x449a7e61, 0x8c6e7f81, 0x81b38c7f, 0x81db7791, 0x62602681, 0x3246817f,
+0x81bb2881, 0x6d2510b4, 0xb07fbf69, 0x49ea817f, 0x938153e7, 0xf17f425b, 0x3b817f89, 0x36b48181,
+0x837c1d0a, 0x853cbd2c, 0x2950527f, 0x388de1bc, 0xfa1c7f54, 0x7b507f7f, 0xad311c42, 0x8d8ae6c3,
+0x9081677f, 0x58810e81, 0x984b4881, 0x4b9f407f, 0x819a00ff, 0x857faee2, 0xa06d9fb5, 0x448979b8,
+0x9cc0f17f, 0x7f2fce7f, 0xa0b9ad7f, 0x428a7f7f, 0xdf562afa, 0x7f6f0000, 0xe04a5bef, 0xa7ad2867,
+0x23818146, 0x7f7f0642, 0x7f481981, 0x7f810f81, 0x99a9db81, 0x64d437cd, 0xaf7fcb6d, 0x81815311,
+0x723c81bd, 0xcc7fc729, 0x81a47e49, 0x9abce744, 0x7385eb5b, 0x7f7f5b7f, 0x2bb3819d, 0x81fd7f49,
+0x7f5f8198, 0x7fb6a2f3, 0xe97fc33c, 0x436c79d3, 0x5ea67fff, 0x5a7ff2ac, 0x28aa713c, 0x8181c681,
+0x7f7c8846, 0xaf817f39, 0x90bcc57f, 0x841c7f4a, 0x817f8181, 0x7f4e811b, 0xb53a81ce, 0x7f451a7f,
+0x5f38027f, 0x7b81e4f3, 0x817f87c6, 0x8141318c, 0x532c81a4, 0x817fc84c, 0x919d33db, 0x29f2b781,
+0x7f568139, 0x4658b1a6, 0x7f69342d, 0x81ab9582, 0x367fa301, 0x47468181, 0x7f8c8181, 0x81216a81,
+0x9b577f7f, 0x7f447f7f, 0x72ef4649, 0x45418103, 0xee48bb81, 0xa7027c7f, 0x0ea4237f, 0x6b717cf4,
+0xa296a17f, 0x17007f7f, 0x869835c8, 0x8a7f77ec, 0x5d7b8139, 0x32688f81, 0x7fdab0b3, 0x815c7f7d,
+0x06a01fb6, 0x814981de, 0xa1c681d8, 0xdc6ff522, 0xd13081b1, 0x9c5a7ff5, 0x7f7f7f5c, 0x86faa148,
+0x811a696b, 0xbb7fa2c5, 0x815779bf, 0xae3b7f48, 0x8c45d6de, 0x7f867f9a, 0x3e359b7f, 0xb7c48181,
+0x78c27f95, 0xc4818181, 0x81cd7f3f, 0x558a8193, 0x7f4ca874, 0x4e1eb447, 0x2744355f, 0x9020ead4,
+0x16e47ff6, 0x708123b7, 0xbeb61418, 0x27fcdd50, 0xde21e5d8, 0x9500a3db, 0xefc29f4d, 0x5ef0c542,
+0xe02a3bc8, 0xe91a38bf, 0x24f81e3c, 0xe0fe4f11, 0x3be6891d, 0x7fef09ae, 0x16977ff0, 0xe8b74a18,
+0x1a50fa2e, 0x24c04bf9, 0x39c84714, 0xbed7f6c1, 0xaeddab44, 0xa7f9ca2d, 0x275dc01b, 0x1df6e74b,
+0xc3171107, 0xbcc93481, 0x1403f685, 0xcc1c0439, 0xb602ebaa, 0xe235d0e1, 0xf1afe3e3, 0x0814c025,
+0xb4f93607, 0xa709c243, 0xbbfabdd8, 0xfd3e81e7, 0x2f9ae335, 0x1c1d52e9, 0x00f133a5, 0x23eee238,
+0xc625eced, 0xefd56878, 0xe3cadc03, 0x585db90a, 0x024ace2b, 0xe3d804ef, 0xdf54d5a1, 0xc9e11709,
+0xc3a20ccb, 0x18c0005f, 0x52424437, 0xccaba110, 0xc75736d6, 0xa8f1401a, 0x3186aa67, 0x0b1aaebe,
+0xf952cf90, 0xfa4edd16, 0x9d72bee5, 0x2824f455, 0x9a3cb150, 0x6bbfe929, 0x6a6f5f9d, 0xbfa5385f,
+0xf89828bc, 0x6b6d0baf, 0xcab514c5, 0xa9baf505, 0x24cff2c2, 0x1c2511d8, 0x3abccde9, 0x7f60deef,
+0xbbf4dddd, 0x11366331, 0x0241c141, 0x5111c984, 0xc17f54da, 0xed4c1bb8, 0x0af6fb57, 0xfd2c28c0,
+0xe790142d, 0x6bff6403, 0x7fd8cce3, 0x3c282cc0, 0xea48f68b, 0x3ecdd5e3, 0xb917f2e5, 0xd2b25334,
+0xf65b10b5, 0xfdd0d4be, 0xe6e22cc9, 0xc59ec206, 0x3f3238cc, 0xbcbe2fd0, 0x2418c910, 0x3d07f7cc,
+0xa25bf9cf, 0xdb81ebfc, 0xd35b2203, 0xde2d23cc, 0x3cd706d5, 0x0e34f6f5, 0xc2d20000, 0xfef1e4e0,
+0xd26ad6e1, 0xfa130b0d, 0x324b211c, 0xc5dfc3a9, 0xf83ebec9, 0x0dd627c0, 0xcee99850, 0x4fcd1533,
+0x04597ff0, 0x40d0ff25, 0x045cd181, 0x908f9ca1, 0xd0417f7b, 0x2f0a2c87, 0xfef998c7, 0xcc3370f3,
+0xb0f4267f, 0x1bbe5072, 0xd8d6063a, 0xd058cb32, 0x448fffad, 0xa9b50064, 0x184ebdf7, 0xfe2e3538,
+0x561f16a6, 0xe7f20181, 0x449b95f9, 0x0338f7b2, 0x4012bcae, 0xd67f6797, 0x4966f6f2, 0x1dcc60c0,
+0xeecec631, 0x279f81c4, 0xc9f751be, 0x75994dc8, 0x01001ded, 0xb0ba1822, 0xbb14eff9, 0xb2dae837,
+0x4cf69e23, 0xf35d6ad7, 0xf69e57c9, 0x3727d2e3, 0x31b3517f, 0xc1da575d, 0x30e2c1d5, 0x54d7beef,
+0x293edf19, 0xeb37d80e, 0x296cee35, 0xeff14f04, 0x232d06de, 0xb2815853, 0x2c601508, 0xa5ecb6ff,
+0xb407f524, 0x4ccd9a19, 0xe448b0c2, 0x01ebd4c6, 0x52fc03a1, 0x286afce2, 0xe4d45ebb, 0x2953101d,
+0xca12d31f, 0x10cb00dd, 0xd2ae6347, 0x46e82f44, 0x45d64600, 0xae393c05, 0xf7219fb9, 0xeffe09d2,
+0xedfbb5d1, 0x07070413, 0xd7d6ec37, 0xa0f7a621, 0x75bd023f, 0xc45b5d37, 0x19ca9b39, 0xd1ee50b7,
+0x50062231, 0x301ca07f, 0xbeadd410, 0x1ce01d02, 0x51a5fbff, 0xd9104c7f, 0x79e4f23a, 0x33e47449,
+0x1ceda8ea, 0xf6d65581, 0x418dd3e3, 0x9fc1e612, 0x54f747d9, 0x839acbce, 0x0000221c, 0x031434b6,
+0xcc48960f, 0x1045ea23, 0xa8d7f8a1, 0xaf01243e, 0x2bf461c7, 0x9c2139dd, 0xd42bd5d5, 0xf8c2f5d5,
+0xdeb61f4b, 0xf12ac9cd, 0x107f3209, 0x818d1d3c, 0xca34572c, 0x34da0052, 0xf9a951e2, 0xdf34d209,
+0x0f8b37c1, 0xa7dee3d3, 0x1a390c98, 0x1d16e135, 0x33c3ad66, 0xd9e2cca4, 0xcbabcce4, 0x2eadd581,
+0x81383b81, 0xc0db251b, 0x0a6e91c3, 0xdab3c725, 0xd2293bfc, 0x22d6abb5, 0xdb524adc, 0x30cf5f24,
+0xb92527e8, 0x6000b7bf, 0xfefe1fe4, 0x2cf9035e, 0x2743f125, 0xd007b3ad, 0x2dd5e43a, 0x5a35e7b9,
+0xea06e5b7, 0x39f3e1aa, 0x592614f3, 0x7fc3bab7, 0xf1aefe00, 0xf3e9b9fc, 0xcd81aee6, 0xae3e31cc,
+0x95d8fbc7, 0x7f26bbf8, 0xf20e57e3, 0xf9eee307, 0xfde2bff2, 0x10e8f7db, 0xfbb0de50, 0xdfaa10e8,
+0x5e18ee3a, 0x0520a445, 0xddd2bc9e, 0x13c97536, 0x33cf5159, 0xfac6d2f7, 0x18ca01e5, 0x112251da,
+0x0211d9b6, 0x375feb63, 0x9a25b9cf, 0xfb5bf632, 0xe5b4a803, 0x95f55e8d, 0xdbede748, 0x39aed0bf,
+0x5efd9c49, 0xcc7fd13c, 0xfba1a3d7, 0xa8a80206, 0x08f7e7d3, 0xd1fac77f, 0x32fb9272, 0xe1829834,
+0x45408175, 0x34e4eec0, 0x0ca4ecae, 0x15e31638, 0x1d072d02, 0xee44d6ee, 0xafa5282c, 0x4d4d9f2a,
+0xcdfc0f24, 0x9dfaf345, 0xbf3839c3, 0x3268a3e3, 0x2e4eacb5, 0x93dce12c, 0x4a484f3b, 0xdf7d0000,
+0xeb113c3d, 0xd4bdf13f, 0xecc0f511, 0xd4cdf8c2, 0x42fc05f9, 0x53be2def, 0xb22e49cf, 0xead92e2e,
+0xf956f1cc, 0x02c821e0, 0x1da533e5, 0x1fc50531, 0x34c30f37, 0x7ccba6e4, 0xab1afe2e, 0xb93bd67f,
+0xea28a8a8, 0xf3ef282e, 0xac0c507f, 0xd0cbdd53, 0xd344ea1d, 0x472a1029, 0xed099f70, 0x1240edc6,
+0x7f143244, 0xbae0d3d5, 0x7a4bddfd, 0x5f07d846, 0x1cdf4aaf, 0x4fc40ac1, 0xcfca381a, 0xb67fd847,
+0xd381dde6, 0xdad40d1d, 0x64cb2d07, 0x171a8bc6, 0x11110cbf, 0x2c442dff, 0xbe90f52b, 0xd75500e5,
+0xd8459ef9, 0xff0bc92f, 0x44a903c8, 0xb767d7f2, 0xc281fc02, 0xca534906, 0x25fdec32, 0x5f2aaeb2,
+0x121b376c, 0xe3e41dd2, 0x12ecd9f8, 0xc5071a9d, 0xfaaeee7d, 0x183a1a57, 0xadcb921d, 0x1e33a3b2,
+0x72e52634, 0xc8d4e81e, 0x3b1e0200, 0x321e1608, 0x49add232, 0x0516ff41, 0xc23181d3, 0xfa4e0151,
+0xa63502cb, 0xda381204, 0x72d92da5, 0x7cd7ebff, 0xce08d106, 0x61c6f034, 0x55310f68, 0xed66c357,
+0xc527d609, 0xdab11518, 0x41b808e5, 0x5f21e65e, 0xd4f6f036, 0xfc40008d, 0xaa7f19a0, 0xbff0ac1b,
+0x78283b60, 0x1d0a4a03, 0xd6d0d016, 0xf1f23847, 0x58d93518, 0xd6d481b7, 0xaf11e02e, 0x5c74b67f,
+0xd5734f27, 0x2fb07fc2, 0x29564eff, 0xcafae603, 0x1228f673, 0x3367ce30, 0x132d1342, 0xc80c10e9,
+0x68ad07f9, 0x379c0c9c, 0x3a73c010, 0xc1a534bc, 0x69b0fc14, 0xf0d3ba81, 0x0d07bbe3, 0x3319fa45,
+0x38815afe, 0xc1a67850, 0x7fe740fe, 0xc43fb638, 0xa0864249, 0x07a7dc33, 0x05e606d4, 0x3c6a81ca,
+0x0cdb2635, 0x5dd3f756, 0xcbf09b70, 0x53b2a5b3, 0x5f035ab0, 0xfdda1fbd, 0x0510ac20, 0xa8611f44,
+0x38ceb74c, 0x5048e2e5, 0x907f0238, 0xd9ad00d9, 0x7c247736, 0x8aabe3c6, 0xf5443d27, 0x7ff84622,
+0x4bcc0a34, 0x797dc7e0, 0xf914e5f2, 0xa12df5d7, 0x0fdc23f9, 0x377f50c3, 0x8f47bf82, 0xb9948a4a,
+0x8135ca9e, 0x523ace17, 0xd299fb23, 0x523b5407, 0x38b42a22, 0xadee2cd8, 0xee04b9c8, 0x2de3a5e2,
+0x9f132faa, 0xa0030325, 0x019996f4, 0xd6c5562a, 0xf3e19255, 0x00ff2432, 0xe8f6cf36, 0xe55a7f14,
+0x7079f06f, 0x1e11ff0b, 0xcbe89aa8, 0x6b363996, 0xe61dd6f7, 0x15133821, 0xf35940f9, 0xd9b139c1,
+0x2ad6e10b, 0x2a29c0c6, 0x3c6ee3ce, 0x4b0a3624, 0x23cc10df, 0x17cdb502, 0xa46eaa3f, 0x42350300,
+0xed66334e, 0x1d6fc255, 0x3213e4c5, 0x23d4a65a, 0x05b9a3e9, 0xbacd0c19, 0xc9c5b5b1, 0x8e12670d,
+0x4aabbe2d, 0xb815e3c7, 0x5a2dd152, 0x3ef6551c, 0xf0a1b4f3, 0xb8b8459b, 0xc52dd72e, 0xb9f4b9f7,
+0xbd05dcc8, 0xfae04029, 0xb16ad8df, 0x03d167d4, 0xaf08525b, 0xdbdf3117, 0x4192d163, 0x00a2caad
+
+hard_output0 =
+0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00,
+0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69,
+0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963,
+0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece,
+0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655,
+0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2,
+0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982,
+0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7,
+0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2,
+0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c,
+0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff,
+0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b,
+0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22,
+0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d,
+0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d,
+0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b,
+0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376,
+0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec,
+0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38,
+0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4,
+0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885,
+0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa,
+0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4,
+0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda
+
+soft_output0 =
+0x81818181, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x817f817f, 0x81817f81,
+0x7f817f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x817f8181, 0x81817f81,
+0x7f7f7f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f7f8181,
+0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x8181817f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x817f817f,
+0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f8181,
+0x8181817f, 0x817f7f81, 0x81818181, 0x817f8181, 0x7f7f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f,
+0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x7f81817f,
+0x817f7f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, 0x7f81817f, 0x7f7f817f,
+0x81817f81, 0x81817f81, 0x81817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x81818181, 0x8181817f,
+0x7f817f7f, 0x817f7f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f7f81,
+0x7f817f81, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f817f,
+0x7f7f817f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x817f7f81, 0x7f81817f,
+0x7f7f7f7f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f817f81,
+0x817f7f81, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f817f, 0x81818181, 0x7f818181, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x817f8181, 0x81817f7f,
+0x7f7f8181, 0x81817f81, 0x7f7f817f, 0x817f817f, 0x81818181, 0x7f81817f, 0x817f817f, 0x81818181,
+0x817f8181, 0x817f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f7f8181, 0x7f817f7f, 0x7f7f817f,
+0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x81817f81,
+0x817f7f81, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f81, 0x817f7f7f, 0x7f7f8181, 0x81817f7f,
+0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f,
+0x8181817f, 0x817f8181, 0x81817f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, 0x7f7f8181,
+0x8181817f, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x8181817f, 0x81817f81, 0x817f8181,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x817f8181,
+0x817f7f81, 0x817f7f81, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f81817f, 0x81818181, 0x8181817f,
+0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f,
+0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x7f81817f,
+0x7f81817f, 0x81817f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x81817f7f,
+0x81817f7f, 0x81817f7f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x7f7f8181,
+0x817f7f7f, 0x81818181, 0x7f818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f,
+0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x817f8181, 0x7f7f817f,
+0x81817f7f, 0x7f7f7f81, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x81817f7f,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f81817f, 0x7f818181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81,
+0x7f7f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x8181817f,
+0x817f817f, 0x7f7f7f81, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f817f, 0x81817f7f,
+0x817f7f7f, 0x81818181, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x817f8181, 0x7f817f81, 0x817f8181,
+0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f81817f, 0x817f817f,
+0x7f817f7f, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f81,
+0x81817f81, 0x817f7f7f, 0x81817f7f, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f,
+0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f7f,
+0x817f7f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x7f81817f, 0x7f81817f, 0x817f8181,
+0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81,
+0x8181817f, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f81817f,
+0x8181817f, 0x8181817f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f817f, 0x817f7f81, 0x81818181,
+0x817f817f, 0x7f818181, 0x81817f81, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f817f,
+0x817f7f7f, 0x817f8181, 0x81817f81, 0x81818181, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f81,
+0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x81817f81,
+0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x817f7f7f, 0x7f817f81, 0x8181817f,
+0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f817f81, 0x81818181, 0x7f81817f, 0x7f81817f, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x817f7f81, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f818181,
+0x8181817f, 0x81818181, 0x8181817f, 0x817f817f, 0x7f81817f, 0x81817f7f, 0x7f81817f, 0x7f817f7f,
+0x81817f81, 0x7f81817f, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f817f7f,
+0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x81817f7f, 0x81817f7f, 0x7f7f7f7f,
+0x7f818181, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x8181817f, 0x81817f7f,
+0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x81818181,
+0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x81817f81, 0x81817f7f,
+0x7f818181, 0x817f7f81, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f817f81, 0x7f7f7f81,
+0x7f818181, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x81817f7f, 0x8181817f, 0x817f8181, 0x8181817f,
+0x7f818181, 0x7f817f81, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f7f,
+0x817f8181, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x81817f7f, 0x7f7f8181, 0x81817f81, 0x7f7f7f7f,
+0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f,
+0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x81818181, 0x817f8181,
+0x7f7f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81,
+0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x7f818181, 0x817f7f7f, 0x7f7f7f7f,
+0x817f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x81817f81, 0x81817f81, 0x7f7f7f81,
+0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f81, 0x81818181, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x817f817f, 0x8181817f, 0x7f818181,
+0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f,
+0x817f817f, 0x7f81817f, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, 0x7f7f817f,
+0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x7f7f7f81,
+0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f,
+0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f7f817f, 0x7f817f81,
+0x7f817f81, 0x7f817f81, 0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x817f7f81, 0x7f7f7f81,
+0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81818181, 0x7f81817f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f817f7f,
+0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81818181, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f,
+0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x81817f7f,
+0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x817f7f7f, 0x817f817f,
+0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x7f817f81,
+0x817f7f7f, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, 0x81818181, 0x7f817f7f,
+0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f7f7f81, 0x81817f7f,
+0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x7f7f817f,
+0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x81817f7f, 0x817f817f,
+0x7f817f7f, 0x7f817f7f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f,
+0x7f817f81, 0x7f7f8181, 0x81817f81, 0x817f7f81, 0x7f818181, 0x7f817f81, 0x7f7f7f81, 0x8181817f,
+0x81817f7f, 0x817f7f7f, 0x7f818181, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f817f81,
+0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f7f81,
+0x81818181, 0x817f7f7f, 0x7f818181, 0x81818181, 0x7f7f817f, 0x817f817f, 0x7f7f817f, 0x7f817f7f,
+0x7f817f81, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x817f817f, 0x7f818181,
+0x817f7f81, 0x817f7f7f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f8181,
+0x7f818181, 0x81817f81, 0x7f817f7f, 0x7f7f7f81, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181,
+0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x817f8181, 0x7f81817f,
+0x8181817f, 0x81818181, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f,
+0x8181817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f7f817f, 0x7f81817f,
+0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, 0x7f817f81, 0x7f817f81, 0x7f7f817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f7f8181, 0x81817f7f,
+0x7f817f81, 0x817f817f, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x81817f7f, 0x7f817f81, 0x81817f81,
+0x7f7f7f81, 0x81818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f7f81, 0x7f818181, 0x7f7f8181,
+0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x817f8181, 0x817f8181, 0x817f7f81,
+0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x81817f7f, 0x7f817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f,
+0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x7f817f81, 0x81818181, 0x81817f81, 0x817f7f7f, 0x7f7f817f,
+0x817f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f81,
+0x8181817f, 0x8181817f, 0x7f7f7f81, 0x7f7f817f, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f817f,
+0x7f7f7f81, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81,
+0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f818181, 0x7f817f81,
+0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x817f817f, 0x81817f81, 0x7f7f8181,
+0x7f7f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x81817f81, 0x81817f81,
+0x7f7f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f8181, 0x817f7f7f,
+0x7f7f7f81, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f817f7f,
+0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x7f817f81, 0x7f817f7f,
+0x81818181, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x81818181, 0x8181817f, 0x7f817f81,
+0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f81,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f7f81, 0x817f8181, 0x81817f7f, 0x81817f7f,
+0x7f7f817f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f81, 0x81817f7f,
+0x817f817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x817f7f81, 0x817f7f81, 0x817f8181, 0x8181817f,
+0x81818181, 0x81817f7f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f817f, 0x7f7f7f7f, 0x7f817f81,
+0x8181817f, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x81817f7f,
+0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f7f817f, 0x7f818181, 0x81818181, 0x7f818181,
+0x7f7f7f81, 0x81817f7f, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f,
+0x7f7f817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x81818181, 0x7f7f8181, 0x817f8181, 0x7f817f81,
+0x7f817f81, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f817f7f,
+0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f817f7f, 0x817f817f,
+0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f817f, 0x8181817f,
+0x817f7f81, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x817f7f7f,
+0x817f817f, 0x81817f7f, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f81,
+0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f817f, 0x817f7f81, 0x817f7f81, 0x7f818181,
+0x817f7f7f, 0x8181817f, 0x817f8181, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x817f8181,
+0x8181817f, 0x7f817f81, 0x81817f7f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x81818181, 0x81818181,
+0x8181817f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f,
+0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f81817f, 0x81817f81, 0x7f818181, 0x7f7f8181, 0x7f817f7f,
+0x8181817f, 0x7f817f7f, 0x817f7f81, 0x7f817f81, 0x8181817f, 0x81818181, 0x817f7f7f, 0x817f817f,
+0x7f7f7f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x81817f81,
+0x7f818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f,
+0x81817f81, 0x817f7f7f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f,
+0x7f81817f, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x817f7f81, 0x7f818181,
+0x7f818181, 0x817f7f81, 0x7f817f81, 0x7f7f7f7f, 0x81817f81, 0x81818181, 0x7f818181, 0x7f81817f,
+0x81817f81, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f7f8181, 0x81817f7f,
+0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f817f81, 0x817f7f7f,
+0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f7f,
+0x81818181, 0x7f7f8181, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81817f7f, 0x817f817f,
+0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f817f,
+0x817f7f7f, 0x7f7f817f, 0x817f8181, 0x7f817f81, 0x7f818181, 0x7f81817f, 0x7f81817f, 0x81818181,
+0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x81818181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x81818181,
+0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x817f7f81,
+0x7f81817f, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x81818181, 0x7f7f7f7f, 0x81817f7f, 0x81817f81,
+0x81817f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f7f817f, 0x81817f81,
+0x7f7f817f, 0x8181817f, 0x817f7f81, 0x81818181, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x817f7f7f,
+0x8181817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f81, 0x817f7f7f, 0x81818181, 0x817f7f7f,
+0x81817f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x7f81817f, 0x81818181,
+0x81817f81, 0x81818181, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81818181,
+0x81818181, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f818181,
+0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f817f81, 0x817f7f7f, 0x81817f81,
+0x7f7f7f7f, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x81817f7f, 0x81817f81, 0x817f7f81, 0x81817f81,
+0x817f817f, 0x7f817f81, 0x81817f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f,
+0x817f8181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x81817f7f,
+0x81817f7f, 0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f,
+0x817f7f7f, 0x7f817f7f, 0x81817f81, 0x8181817f, 0x8181817f, 0x7f817f81, 0x8181817f, 0x8181817f,
+0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81,
+0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f,
+0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f81817f, 0x7f81817f, 0x81817f81, 0x817f817f, 0x7f7f817f,
+0x7f817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x817f7f7f, 0x8181817f, 0x7f7f8181,
+0x7f818181, 0x7f817f81, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x7f7f7f81, 0x81817f81,
+0x817f8181, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81,
+0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x7f7f8181, 0x8181817f,
+0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x817f817f,
+0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x817f7f81, 0x81817f7f, 0x817f8181, 0x817f8181,
+0x81817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181,
+0x81817f7f, 0x7f818181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, 0x817f8181,
+0x817f817f, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x81818181, 0x7f817f7f, 0x81818181, 0x7f81817f,
+0x7f817f81, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x81817f81,
+0x7f7f8181, 0x7f817f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x817f7f81,
+0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x8181817f,
+0x7f817f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x81818181,
+0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f7f7f7f,
+0x7f7f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181,
+0x817f7f7f, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x7f817f81,
+0x7f7f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181,
+0x7f817f7f, 0x817f8181, 0x7f818181, 0x81817f7f, 0x7f817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f7f,
+0x7f81817f, 0x7f817f81, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f7f, 0x7f81817f,
+0x81817f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f8181,
+0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x81817f81,
+0x817f8181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f81817f,
+0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x817f7f81, 0x81817f7f, 0x817f7f81, 0x81817f81, 0x7f817f81,
+0x817f8181, 0x7f818181, 0x7f817f81, 0x81817f81, 0x81817f81, 0x817f8181, 0x7f818181, 0x81818181,
+0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x8181817f, 0x81818181, 0x817f7f81, 0x7f818181,
+0x81817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x7f7f817f, 0x8181817f,
+0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x7f7f8181,
+0x7f7f8181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81,
+0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f81, 0x81817f7f,
+0x817f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f,
+0x7f7f8181, 0x817f817f, 0x7f7f7f81, 0x7f7f817f, 0x817f8181, 0x7f81817f, 0x7f81817f, 0x817f7f7f,
+0x7f817f7f, 0x817f817f, 0x817f7f81, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f81817f,
+0x7f7f8181, 0x7f818181, 0x7f818181, 0x817f817f, 0x7f81817f, 0x817f8181, 0x8181817f, 0x7f817f7f,
+0x817f8181, 0x817f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181,
+0x7f817f7f, 0x7f7f8181, 0x81817f7f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x8181817f, 0x81817f81,
+0x817f817f, 0x817f817f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f7f81, 0x817f817f,
+0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f8181, 0x817f7f7f,
+0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x7f7f7f7f, 0x81817f7f,
+0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f,
+0x8181817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x7f817f7f,
+0x7f818181, 0x817f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x81817f7f, 0x817f8181, 0x7f7f8181,
+0x81817f7f, 0x7f7f7f7f, 0x81818181, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181,
+0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f,
+0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x817f8181,
+0x81818181, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x81817f81, 0x7f7f8181,
+0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81817f81, 0x7f817f7f, 0x817f817f,
+0x7f81817f, 0x7f818181, 0x817f8181, 0x81817f7f, 0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f,
+0x817f8181, 0x817f8181, 0x7f817f81, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x817f817f, 0x81818181,
+0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x81818181,
+0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f818181,
+0x7f7f7f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81817f81,
+0x7f7f7f81, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f817f7f, 0x81818181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x81817f81,
+0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f,
+0x7f81817f, 0x7f7f8181, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x81817f81, 0x817f7f81,
+0x7f7f7f81, 0x7f81817f, 0x817f7f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x81817f7f, 0x7f817f81,
+0x81818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f7f8181, 0x8181817f, 0x81818181, 0x81818181,
+0x81818181, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f81817f,
+0x7f7f7f81, 0x7f7f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f,
+0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x817f7f81,
+0x81818181, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f,
+0x7f7f8181, 0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x81817f81,
+0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x817f8181, 0x817f7f7f,
+0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x817f8181, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81,
+0x7f818181, 0x7f817f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f,
+0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f7f7f, 0x81817f7f,
+0x7f81817f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x817f7f81,
+0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f81817f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f7f,
+0x7f81817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81,
+0x7f7f7f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f81, 0x7f817f7f, 0x7f818181,
+0x7f818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f,
+0x81818181, 0x81817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x817f7f7f,
+0x81817f7f, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x817f7f81,
+0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f81, 0x817f7f7f, 0x81817f81,
+0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f818181, 0x7f81817f, 0x7f817f81, 0x7f817f81,
+0x7f81817f, 0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x81818181,
+0x7f81817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f81, 0x7f7f7f7f, 0x7f81817f,
+0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x7f7f817f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81817f81, 0x8181817f, 0x7f817f81, 0x81817f7f,
+0x817f7f7f, 0x7f7f8181, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f7f81, 0x7f7f817f,
+0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x81817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f81,
+0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, 0x81817f81,
+0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f,
+0x7f81817f, 0x817f7f81, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f,
+0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x81818181, 0x8181817f, 0x7f7f7f81, 0x7f818181,
+0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f81817f, 0x817f7f81,
+0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x8181817f, 0x7f817f7f,
+0x7f7f8181, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f818181,
+0x81817f81, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f81,
+0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x817f8181,
+0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x7f818181, 0x81818181, 0x7f7f7f81, 0x817f817f,
+0x81817f81, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x817f7f7f,
+0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x81817f7f, 0x8181817f,
+0x817f817f, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f,
+0x817f817f, 0x817f8181, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x7f7f8181, 0x817f817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x817f8181, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f,
+0x7f7f7f7f, 0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f81, 0x81818181, 0x7f817f7f, 0x7f7f8181,
+0x7f7f8181, 0x81817f7f, 0x81817f7f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x81817f7f, 0x7f7f817f,
+0x817f7f81, 0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f7f817f,
+0x81817f81, 0x7f817f81, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181,
+0x817f817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f,
+0x7f7f8181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f817f7f,
+0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x8181817f,
+0x817f7f7f, 0x817f8181, 0x8181817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f7f, 0x7f7f8181,
+0x81818181, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x8181817f, 0x7f81817f,
+0x7f7f817f, 0x7f818181, 0x81817f7f, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f7f7f,
+0x7f818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x817f7f7f, 0x7f818181,
+0x7f817f81, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f817f7f, 0x7f817f81,
+0x817f7f81, 0x7f7f817f, 0x81817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x817f817f, 0x81817f81,
+0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81,
+0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f,
+0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f81817f,
+0x7f817f7f, 0x81817f81, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f,
+0x81817f81, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f81817f,
+0x7f7f8181, 0x817f7f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x7f817f81, 0x81818181,
+0x81817f7f, 0x81818181, 0x817f7f81, 0x81817f81, 0x81817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f,
+0x7f7f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x81817f81, 0x7f7f7f81, 0x8181817f,
+0x8181817f, 0x7f7f817f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x817f817f, 0x817f817f, 0x7f81817f,
+0x8181817f, 0x817f7f7f, 0x7f818181, 0x817f7f7f, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f817f,
+0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x7f7f8181, 0x7f81817f, 0x817f8181,
+0x7f81817f, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x8181817f, 0x81817f81,
+0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f7f,
+0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x81818181, 0x81818181,
+0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81,
+0x7f817f81, 0x817f8181, 0x7f81817f, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x7f81817f,
+0x7f817f81, 0x7f7f817f, 0x817f817f, 0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f7f81, 0x817f817f,
+0x817f817f, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x817f817f,
+0x8181817f, 0x81818181, 0x81817f81, 0x817f8181, 0x81818181, 0x81817f7f, 0x817f8181, 0x7f7f8181,
+0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x817f817f,
+0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f,
+0x7f81817f, 0x7f7f8181, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f817f, 0x81818181,
+0x7f81817f, 0x817f8181, 0x81818181, 0x81817f81, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f7f, 0x7f817f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x7f81817f, 0x7f818181,
+0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f7f81, 0x817f7f81,
+0x7f817f7f, 0x7f7f7f7f, 0x7f818181, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81818181,
+0x7f7f817f, 0x81817f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x81818181, 0x817f817f,
+0x7f7f7f81, 0x7f81817f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x8181817f,
+0x7f7f7f7f, 0x81817f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81817f81,
+0x817f817f, 0x81817f81, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x817f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f81, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x817f817f,
+0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x7f817f81,
+0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x817f7f7f,
+0x817f7f81, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x817f7f81, 0x817f7f81,
+0x7f817f7f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x7f818181, 0x7f7f817f, 0x7f817f81, 0x7f7f817f,
+0x7f817f7f, 0x7f817f81, 0x8181817f, 0x81818181, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f,
+0x817f8181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f,
+0x8181817f, 0x81817f81, 0x817f7f81, 0x81818181, 0x81817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f7f,
+0x817f7f81, 0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f817f,
+0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x817f817f, 0x7f7f7f81,
+0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x7f81817f, 0x817f817f, 0x8181817f, 0x7f7f7f7f,
+0x817f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x8181817f, 0x817f8181, 0x8181817f,
+0x81817f81, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f818181,
+0x7f817f7f, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f817f,
+0x7f81817f, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f8181,
+0x7f7f8181, 0x7f7f7f81, 0x7f818181, 0x81817f7f, 0x8181817f, 0x817f7f81, 0x7f7f7f7f, 0x7f81817f,
+0x817f8181, 0x7f81817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f,
+0x81818181, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x817f8181,
+0x81817f81, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f81, 0x81817f81,
+0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f,
+0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x817f8181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f81, 0x817f7f81, 0x7f81817f, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x81817f7f, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x7f7f817f, 0x7f818181,
+0x7f7f7f7f, 0x81818181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81,
+0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f817f,
+0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x81817f81,
+0x817f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f,
+0x7f818181, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f7f8181,
+0x817f7f81, 0x81817f81, 0x817f8181, 0x817f817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f8181,
+0x7f7f7f81, 0x817f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f7f7f,
+0x81818181, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f,
+0x7f7f7f7f, 0x817f8181, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f,
+0x817f7f7f, 0x817f817f, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f,
+0x81817f7f, 0x817f7f7f, 0x7f817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x7f817f81,
+0x81817f81, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f81, 0x81817f7f, 0x81818181, 0x817f8181,
+0x7f7f817f, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x7f818181, 0x7f817f81, 0x7f817f81,
+0x7f818181, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x8181817f,
+0x7f7f817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x7f818181, 0x817f7f81, 0x817f7f81,
+0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x81817f81, 0x7f817f81,
+0x7f81817f, 0x817f7f81, 0x7f818181, 0x817f817f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f,
+0x81817f7f, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x8181817f, 0x7f7f817f, 0x8181817f, 0x7f818181,
+0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f7f81,
+0x817f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f81817f, 0x81818181,
+0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f7f81, 0x8181817f,
+0x81818181, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f,
+0x81818181, 0x81818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x817f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x817f7f7f,
+0x81818181, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x8181817f,
+0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x817f7f7f, 0x817f817f,
+0x81818181, 0x8181817f, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x8181817f,
+0x7f818181, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x8181817f,
+0x817f817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f817f, 0x7f818181, 0x81818181,
+0x81818181, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81817f7f, 0x7f7f817f, 0x8181817f, 0x7f7f7f7f,
+0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x81818181,
+0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x7f7f817f, 0x81818181,
+0x7f817f7f, 0x817f8181, 0x817f7f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181,
+0x81817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f,
+0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x7f818181,
+0x7f817f7f, 0x7f7f7f7f, 0x817f817f, 0x817f7f7f, 0x7f81817f, 0x817f7f81, 0x81818181, 0x7f7f817f,
+0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x7f817f81, 0x81817f81,
+0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f7f7f7f, 0x81818181, 0x8181817f, 0x7f817f81, 0x8181817f,
+0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f817f81, 0x81817f7f, 0x7f818181, 0x81818181,
+0x81817f81, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f818181, 0x81817f81,
+0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f,
+0x817f817f, 0x817f817f, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x8181817f,
+0x81817f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f,
+0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x817f7f81,
+0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x81818181, 0x81817f7f, 0x817f817f, 0x817f817f, 0x7f7f817f,
+0x7f81817f, 0x81818181, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f81, 0x81817f7f, 0x817f8181,
+0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f81, 0x8181817f, 0x817f7f81, 0x7f817f81,
+0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x8181817f,
+0x7f81817f, 0x817f817f, 0x7f817f81, 0x8181817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x81818181,
+0x7f7f7f81, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f7f81,
+0x7f7f7f81, 0x7f7f7f81, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f817f7f, 0x7f817f81, 0x7f818181,
+0x7f7f8181, 0x817f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f,
+0x7f7f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x81817f81,
+0x7f7f8181, 0x7f818181, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x817f7f7f, 0x7f7f8181, 0x81817f7f,
+0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f817f7f, 0x81817f81,
+0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f817f81, 0x7f81817f,
+0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x7f7f7f81, 0x8181817f, 0x817f7f81,
+0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f7f8181, 0x81817f7f, 0x7f817f81, 0x8181817f, 0x7f81817f,
+0x7f818181, 0x7f818181, 0x7f817f81, 0x7f7f7f7f, 0x7f81817f, 0x817f7f81, 0x7f7f7f81, 0x81818181,
+0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f81817f,
+0x81817f81, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181,
+0x7f81817f, 0x81818181, 0x81818181, 0x7f818181, 0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f7f817f,
+0x817f7f81, 0x817f7f81, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x81818181,
+0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x7f81817f,
+0x81817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f81, 0x81818181, 0x7f7f7f81,
+0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x81817f81, 0x7f7f8181,
+0x817f817f, 0x7f817f81, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f7f817f,
+0x817f8181, 0x7f7f817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f,
+0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f81, 0x8181817f, 0x7f817f7f, 0x81818181, 0x7f7f7f81,
+0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x81818181, 0x7f7f7f81,
+0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x817f8181,
+0x7f817f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x7f818181, 0x817f7f81,
+0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, 0x81818181,
+0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f818181, 0x81817f81,
+0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x7f7f817f,
+0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f81817f, 0x7f7f8181,
+0x7f818181, 0x817f8181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f,
+0x7f7f8181, 0x817f8181, 0x7f818181, 0x81817f81, 0x7f818181, 0x8181817f, 0x8181817f, 0x7f818181,
+0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x81818181, 0x817f7f7f,
+0x8181817f, 0x817f817f, 0x7f7f8181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x817f817f, 0x817f817f,
+0x7f818181, 0x7f7f8181, 0x8181817f, 0x817f817f, 0x81817f7f, 0x81818181, 0x7f81817f, 0x817f817f,
+0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f7f817f, 0x7f7f7f7f,
+0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x7f818181, 0x7f7f7f7f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f,
+0x817f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f81,
+0x817f817f, 0x817f7f7f, 0x81818181, 0x7f818181, 0x81818181, 0x81818181, 0x7f7f7f81, 0x8181817f,
+0x81818181, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x8181817f, 0x8181817f, 0x817f7f81,
+0x7f817f7f, 0x81817f7f, 0x817f8181, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f,
+0x817f7f7f, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81,
+0x7f817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f818181, 0x81817f81,
+0x817f8181, 0x7f817f7f, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x81817f81, 0x81817f81,
+0x81817f7f, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x8181817f,
+0x81817f7f, 0x8181817f, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x817f817f,
+0x81818181, 0x817f7f81, 0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f81, 0x8181817f,
+0x7f818181, 0x7f817f81, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x7f7f8181, 0x7f7f7f7f,
+0x7f81817f, 0x817f8181, 0x817f817f, 0x7f81817f, 0x81818181, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f7f7f,
+0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x817f8181,
+0x81817f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x817f8181, 0x817f7f81, 0x7f7f817f, 0x7f818181,
+0x7f7f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x817f7f7f,
+0x7f7f817f, 0x817f7f7f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f81, 0x81818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81,
+0x81818181, 0x81817f81, 0x7f7f817f, 0x81818181, 0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f81817f,
+0x817f8181, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x7f7f8181,
+0x7f81817f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x7f81817f, 0x817f8181,
+0x81817f81, 0x81818181, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81817f81,
+0x7f818181, 0x81818181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81818181,
+0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81817f81, 0x8181817f,
+0x817f817f, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, 0x8181817f, 0x7f7f7f7f, 0x817f817f,
+0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f7f,
+0x7f81817f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x81818181,
+0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x81818181,
+0x81817f81, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f,
+0x817f817f, 0x817f8181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f81,
+0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f817f81, 0x817f817f, 0x81817f7f, 0x7f81817f, 0x8181817f,
+0x817f7f81, 0x7f7f7f81, 0x81818181, 0x7f817f81, 0x81818181, 0x7f818181, 0x817f7f7f, 0x81818181,
+0x81818181, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f,
+0x817f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81817f7f, 0x7f7f817f,
+0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f,
+0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f817f, 0x7f81817f,
+0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x81817f81,
+0x817f7f81, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x8181817f,
+0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x81818181, 0x817f7f81,
+0x817f8181, 0x81817f7f, 0x8181817f, 0x817f817f, 0x8181817f, 0x7f818181, 0x81817f81, 0x81818181,
+0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x8181817f,
+0x817f7f81, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x817f8181,
+0x7f7f8181, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x81817f81,
+0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f,
+0x817f7f81, 0x817f8181, 0x8181817f, 0x7f818181, 0x7f818181, 0x7f7f7f81, 0x817f7f81, 0x7f817f81,
+0x81817f81, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x81818181, 0x7f7f7f81, 0x817f817f, 0x817f7f81,
+0x81817f7f, 0x7f818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f817f7f,
+0x7f7f8181, 0x817f8181, 0x7f818181, 0x8181817f, 0x81817f81, 0x817f7f7f, 0x7f817f81, 0x81818181,
+0x817f817f, 0x81818181, 0x817f7f81, 0x817f7f7f, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x817f7f81,
+0x7f818181, 0x817f7f7f, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x81818181, 0x817f7f81,
+0x7f7f817f, 0x81817f7f, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x7f81817f,
+0x817f817f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x7f81817f,
+0x81818181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f7f7f,
+0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x7f7f7f7f, 0x817f7f81, 0x81817f81, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x81818181,
+0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x81817f7f,
+0x7f817f7f, 0x817f7f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x81817f7f, 0x817f8181, 0x7f81817f,
+0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x8181817f, 0x81817f81,
+0x817f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f8181, 0x8181817f,
+0x7f817f7f, 0x817f8181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x817f8181,
+0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x81817f81, 0x817f8181, 0x7f7f7f81,
+0x8181817f, 0x8181817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x81818181, 0x7f7f7f81,
+0x7f817f7f, 0x81818181, 0x817f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x817f817f, 0x817f7f81,
+0x81818181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x817f8181,
+0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81817f7f, 0x817f8181,
+0x817f8181, 0x7f818181, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f8181,
+0x7f7f7f7f, 0x817f7f7f, 0x7f817f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81,
+0x7f817f7f, 0x7f818181, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f817f81,
+0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81,
+0x817f817f, 0x81817f7f, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x817f8181, 0x81818181, 0x8181817f,
+0x81817f7f, 0x7f817f7f, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f817f,
+0x817f8181, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f,
+0x81817f81, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x817f8181,
+0x81817f81, 0x8181817f, 0x7f7f7f81, 0x817f8181, 0x81817f7f, 0x81817f81, 0x817f8181, 0x817f8181,
+0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x81817f81,
+0x817f8181, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f7f, 0x81818181,
+0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f7f,
+0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f817f,
+0x817f817f, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81818181, 0x7f817f7f,
+0x7f7f7f81, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f817f,
+0x81818181, 0x81818181, 0x8181817f, 0x81817f81, 0x81818181, 0x817f8181, 0x7f817f7f, 0x7f817f81,
+0x817f8181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f7f,
+0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x81817f81, 0x817f8181, 0x7f817f81, 0x817f8181,
+0x81818181, 0x817f817f, 0x81817f81, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f7f,
+0x81817f81, 0x7f81817f, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f,
+0x8181817f, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x8181817f,
+0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f,
+0x7f817f7f, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x7f818181, 0x7f7f8181,
+0x7f817f81, 0x7f817f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x81818181, 0x7f7f8181, 0x7f817f7f,
+0x7f7f8181, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x7f817f7f, 0x7f7f8181, 0x7f817f7f, 0x8181817f,
+0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x81818181,
+0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f817f81,
+0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x7f817f7f, 0x7f817f81,
+0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x81818181,
+0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f,
+0x817f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181,
+0x7f818181, 0x81818181, 0x817f8181, 0x81817f7f, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x7f7f8181,
+0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x8181817f, 0x81818181, 0x817f8181, 0x7f7f7f7f, 0x7f81817f,
+0x81817f7f, 0x7f7f8181, 0x81817f81, 0x817f7f7f, 0x817f7f7f, 0x7f817f81, 0x817f7f7f, 0x81817f7f,
+0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, 0x7f81817f,
+0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81,
+0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x817f7f81, 0x817f7f81, 0x7f817f81,
+0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f817f81, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x81818181,
+0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f7f81,
+0x81818181, 0x7f817f7f, 0x81818181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x81818181,
+0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f818181, 0x81817f7f, 0x81817f81, 0x8181817f, 0x817f817f,
+0x817f817f, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81,
+0x817f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f817f,
+0x817f7f7f, 0x81818181, 0x8181817f, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x7f7f8181,
+0x817f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f7f81, 0x7f817f81,
+0x81817f81, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f81,
+0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x817f7f7f,
+0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f7f7f7f,
+0x817f817f, 0x7f7f7f7f, 0x7f81817f, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x8181817f,
+0x817f817f, 0x81818181, 0x81817f81, 0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f,
+0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f7f7f,
+0x81818181, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f8181, 0x81818181,
+0x81818181, 0x7f7f8181, 0x7f817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x817f817f,
+0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181,
+0x7f817f7f, 0x7f817f81, 0x817f8181, 0x817f7f7f, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f8181,
+0x81817f7f, 0x7f7f7f7f, 0x817f7f81, 0x8181817f, 0x81818181, 0x7f817f81, 0x7f817f81, 0x7f81817f,
+0x7f81817f, 0x7f7f817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f7f,
+0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181,
+0x817f7f81, 0x7f7f7f81, 0x7f818181, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f81, 0x81818181,
+0x81817f81, 0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x7f81817f,
+0x817f7f81, 0x81818181, 0x7f817f81, 0x7f7f7f81, 0x7f81817f, 0x7f818181, 0x7f817f7f, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f81817f,
+0x81817f81, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81818181,
+0x7f817f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x7f7f7f7f,
+0x7f7f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f7f7f,
+0x817f817f, 0x8181817f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f81817f, 0x7f7f7f81, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f81817f, 0x7f7f7f7f,
+0x81818181, 0x81817f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f81817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f7f7f81, 0x817f7f7f,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f81, 0x7f818181, 0x81817f7f, 0x817f817f, 0x7f81817f,
+0x817f7f81, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f7f7f,
+0x7f818181, 0x817f8181, 0x7f7f817f, 0x817f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81818181, 0x7f7f817f,
+0x817f8181, 0x7f81817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181,
+0x81817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x7f817f7f,
+0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x7f7f8181, 0x81817f81,
+0x7f817f7f, 0x817f8181, 0x817f817f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f81, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x7f81817f, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81818181,
+0x7f818181, 0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81,
+0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f818181, 0x81818181, 0x817f7f81, 0x817f8181,
+0x7f7f7f7f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181,
+0x81818181, 0x7f7f817f, 0x81818181, 0x7f818181, 0x7f817f7f, 0x7f817f81, 0x7f7f8181, 0x81818181,
+0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f817f,
+0x7f817f81, 0x817f8181, 0x817f817f, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81,
+0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f
+
+e =
+34560
+
+k =
+6144
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN,
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data
new file mode 100644
index 00000000..13ad0908
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data
@@ -0,0 +1,1225 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0xf6bb1100, 0x4e433af1, 0x38f02efa, 0x0ee43d00, 0x5037c35f, 0x5620e729, 0xdd46c4d7, 0x5ae3e2f7,
+0xe0d7fa2f, 0x2e4ae20d, 0xa7190ebf, 0xd2382614, 0x98ffabe0, 0xf7ee0c5d, 0xffd8ddd7, 0x5f03ecc7,
+0xf9ffdde5, 0xf12446f6, 0x42693a3d, 0xfcc81aae, 0x2b0abc2c, 0x43f2a1b5, 0x044a302c, 0xf8db4fa6,
+0xcad2d138, 0x452b5526, 0xf3083fd3, 0x30ea4b22, 0xe2bd66f4, 0xbc37bebe, 0x001e183b, 0xfac72ebf,
+0x11965cf5, 0x70226311, 0xbd2016ec, 0xcf23f931, 0x4629f6fb, 0x3104de76, 0x00d7c529, 0xa0c8f8f4,
+0x2615c2e0, 0x23daf4b4, 0x56bf05c4, 0x9212102c, 0xa5fe400d, 0xec2a5497, 0x021032bc, 0x3b55340e,
+0xd6f1003c, 0x2a3625d0, 0xc81eee31, 0xa26644d7, 0x0ffb041c, 0x22cc20dc, 0x472cdd9b, 0xd8e24ed2,
+0x4ee740ab, 0x9bfc50e2, 0xbaaafe55, 0xeecd5c19, 0x57150c31, 0x9d3f13c1, 0x251749e1, 0x076ca964,
+0xde0da74b, 0xcafac963, 0x52d006e2, 0xe392d7f4, 0x3a20bcf6, 0xd1ee6fce, 0xe5cf850a, 0x31253fe7,
+0xcefed8a9, 0x520f9824, 0x5fdbe3c9, 0x9b123cc5, 0x6ad019f2, 0x171f0368, 0xc2fe2855, 0x0db851c2,
+0x095010d9, 0xd110df25, 0x1452a52c, 0xe5d1c2cc, 0x45dbef40, 0x2aef2353, 0x07af2e2a, 0x4f23ff6f,
+0x3ddb31cb, 0xa8430c49, 0xcaf7b329, 0xf933c5bc, 0xfe3c52b3, 0x7fd60a27, 0xce0ac9cf, 0xcff0cdd7,
+0x5400cd01, 0x7f81a8c1, 0x3c7f8178, 0x737f87f6, 0x5a5e7f81, 0x817f6ff6, 0x813b1381, 0x41d18173,
+0x810fb9f9, 0xb734c526, 0x50b3a6d3, 0x282d6e50, 0x81972d1f, 0x7d52bd53, 0x6749268b, 0xc681812d,
+0x2c93912e, 0xb1813bdc, 0xed817fd1, 0xa8813376, 0x7f3c7f41, 0x64569f7c, 0xf53d5c67, 0x1e70e6dd,
+0x024b6591, 0x817f7f8f, 0x0a722d63, 0x7fed578b, 0x7f239a7f, 0x81029081, 0xa8ca8b81, 0xb8577fde,
+0x818ec07f, 0x7fa87cb9, 0xa7084967, 0x81934650, 0x7f8171f7, 0xed3d144a, 0x8107739c, 0x3c6fe27f,
+0xcd6ed165, 0xfb3718a4, 0x5f708f4d, 0x14e45eb2, 0xfe5c7f7f, 0x7fc57f6a, 0x813683af, 0x817f7fb3,
+0x007fa7ef, 0xdcd681a1, 0xd72a1268, 0x769f7881, 0xb27fa281, 0x89c27fe3, 0x729b2f59, 0x73afb8a0,
+0xd07f4eb8, 0x81dd6d55, 0x7fa3d2cf, 0x7f81b51e, 0x7f7f7be7, 0x60819981, 0xf31158ad, 0xde7fe4b4,
+0x427f2dd3, 0xd3812481, 0x280de7a4, 0x0a653fc6, 0x8181261f, 0xd6aa8f81, 0x2c7fe484, 0x42df747f,
+0x7f5baa2d, 0xc1bd583c, 0x966b5031, 0xbe818725, 0x6b6444a4, 0x610023e7, 0x5f183b2c, 0xa77fb769,
+0xb2cb1581, 0x0881cc75, 0x8a8c6fdc, 0x558181e4, 0x107f947f, 0x749e4ca3, 0x8cb23db1, 0xce668132,
+0x70b848eb, 0x008a2430, 0x7fd3d77f, 0x682b35eb, 0x2e6aae34, 0xd42fa2cd, 0x3eab816f, 0xfdaf8196,
+0x04e43fcc, 0xf1819600, 0x7f6d64f2, 0x81ba8957, 0x81a781a2, 0xb169d67a, 0x7f4e557f, 0xe0818197,
+0x7b474ecc, 0x3f7b8154, 0x135684f8, 0x81b0fb81, 0x81e33e93, 0x815c817f, 0x810b818a, 0x4d445b34,
+0x95cf2bb0, 0x32c69b7d, 0x2640e181, 0x7fe8b8c8, 0x9bdd64f7, 0x22bfa50b, 0x8181c87f, 0xf7812486,
+0x7f977fa0, 0x5826a925, 0xce7fadf6, 0x9e73fb81, 0x538d7f0e, 0xe85dab87, 0xc3706181, 0x4181817f,
+0x7f7f15ca, 0x952ca87f, 0xa9748be0, 0xbf812477, 0xd081a553, 0x9a817f7f, 0x1f4cb192, 0x7f8181d5,
+0x2ba44ad2, 0x76605566, 0x7f1cd2b9, 0x446eae90, 0x8956b670, 0xaadc8b7f, 0x710b0a90, 0x83816439,
+0xc6ffc6d2, 0xf07f00fb, 0x7fa25582, 0x7fd11f3a, 0x7f104c81, 0x81d574fd, 0x81814081, 0x82b78e7f,
+0x2f29f42b, 0x277f4bff, 0xf49d4b81, 0x28581681, 0x22523281, 0x607ff910, 0x61ac1ad2, 0x58313ccf,
+0x8168a21f, 0x7fe61879, 0x817f21b3, 0x407f2f45, 0x7f5fa666, 0x81bc41fb, 0xfd81fb81, 0x0ed48163,
+0x81818181, 0x7f7b92fd, 0x44507d7f, 0xba45c9d3, 0x81852b64, 0x7fa918eb, 0x5f6ff5c6, 0xa4ad8181,
+0xc27f8f81, 0xb8af5481, 0x7fc5817d, 0x7a814ea2, 0xf77fcf36, 0x50913981, 0x81a01781, 0x5ea1a4ba,
+0x18d252b8, 0xc0416881, 0x7f817f1e, 0x0b4e38cf, 0x7ffb6922, 0xfca481b5, 0x7f1dc1ca, 0x7f560da2,
+0x7f7f813a, 0x1900a881, 0x6d4d8116, 0x617f0469, 0xb3118e81, 0x95408119, 0x817ff08a, 0x7fdc7f8b,
+0xf682a40a, 0x3381817f, 0x81b70d82, 0x7f7f6f81, 0x8190217f, 0x2f817f4b, 0xe195b209, 0x5ee39575,
+0x8150bae6, 0xa4fe8b7f, 0x7f7f654c, 0x7f921971, 0x53817f85, 0x1c7f5ff6, 0x5f35737f, 0xce7fc2c6,
+0x6c81817f, 0x7f7c410d, 0x5794308e, 0x15f4a87f, 0x7fb64d7b, 0x1d0a8181, 0xa8b67f37, 0x253b397f,
+0x887f92b7, 0x7f81767f, 0x81c18101, 0xaa2861c0, 0xbd691cd9, 0x81d74ef3, 0x81a78153, 0xcb7f819b,
+0xf681c5e7, 0x9f37817f, 0x3a7f869b, 0x56a41581, 0x04be8111, 0x4898b48b, 0xbc81465d, 0xb3815281,
+0x7f6a1d45, 0x1fa87fce, 0x810e81cc, 0x7f8d410c, 0x8190a193, 0x81817f81, 0x5dadcbb4, 0x81a83699,
+0x86a68164, 0xe27f1281, 0x682a7f27, 0x62a03cbd, 0x8181c75d, 0x39653881, 0xc6187e83, 0x617fd27f,
+0x81917f90, 0x57796c81, 0xdc7f567f, 0xad712381, 0xdd81ecbe, 0xec17667c, 0x929e5098, 0x857ffbbf,
+0x8d5b8153, 0x9769887f, 0x65173381, 0x66b8ad84, 0x043481b9, 0x81c05ca9, 0x449d4c48, 0x8c45bc82,
+0xca7f81ee, 0x2d8f33e1, 0x81e22f81, 0xca9eb542, 0x8e947fe5, 0xb642a4a8, 0x7f22c73f, 0x85a6176a,
+0x567f7ffb, 0x587f4243, 0xa2819225, 0x58307f9b, 0x2c6b0f81, 0x8130a068, 0x814fa904, 0x400a9f2d,
+0x30617fd6, 0x81720981, 0x97b98100, 0x0ca17c7f, 0xf1816081, 0x690f8181, 0x94165481, 0x81817f9d,
+0x81444268, 0x97759781, 0x8740267f, 0x757f1a7f, 0x7f7f81ca, 0x957fb87f, 0x816f723a, 0x4a8613f2,
+0x3bfef57b, 0x7f1274e8, 0x810ab05b, 0x7fb19ecd, 0xb07fc75f, 0x7fb2965f, 0x60abb881, 0xc229a8e9,
+0xb1817f26, 0xe324067f, 0x8181e344, 0x7ea59cac, 0xd21a7f65, 0x8d15989f, 0x09026bea, 0x1d7f933b,
+0xae81c181, 0xabf37147, 0x0d08da7f, 0xa7ac4b9e, 0x8281e044, 0x7f8195b1, 0x7ff844de, 0xe9057fed,
+0x85fcf1cd, 0xa89e7e2b, 0xf974a481, 0x1f813c6d, 0x237fcbbb, 0x655f8192, 0x8134062b, 0x6f91815d,
+0xdf3281e1, 0xe6fd817f, 0x72b3007f, 0x86caf9cf, 0xea812981, 0x6a8ad67c, 0x36aaa01d, 0xbb102886,
+0x812faf52, 0x6281de5a, 0x817f8174, 0xebff4735, 0x7894bf7c, 0x169e8194, 0x3bc281f8, 0xe69d2a15,
+0xb8e5b68d, 0x367f9b8b, 0xea818181, 0xeb7fbbb7, 0x9368a27f, 0x5af5e19e, 0x5f7f7f7f, 0xcd518181,
+0x7f948109, 0x817f4a81, 0xf8a0de7f, 0x667f7fa5, 0x1a8881c1, 0x8646b17f, 0xb0eb7f81, 0xe3fe2ba2,
+0x460d18da, 0x3db45835, 0xb0c2847e, 0x9c8681a0, 0x817ff88d, 0x7fec639d, 0x817f7f7f, 0x81810d2f,
+0xef28241e, 0x811d9db4, 0x4ac4ada5, 0x7ff29f7f, 0x7fd981ac, 0x7f815e23, 0xcc3dbc8a, 0x7c769b08,
+0x6a07ef50, 0x66599c9c, 0x7e007f92, 0x9381aa5a, 0x33471b81, 0x79b27d96, 0x6d5d9d5a, 0xbf40687d,
+0x7fbc408e, 0xae598111, 0x18222b1e, 0x0a7b817f, 0x7f4961b4, 0x81818f5e, 0xd5a8ae44, 0xe185bd50,
+0xb4a65d81, 0xab81c3d6, 0x5f814b7f, 0x8db72efc, 0xc0be14d5, 0x77336a45, 0x2dca7e7f, 0x81857f7f,
+0xf1d80b33, 0x1856b3dd, 0x789e75cd, 0x7f7f7f81, 0x8b7f6951, 0xc94cc5b6, 0x407f3f5b, 0x72c4b87f,
+0x8135f77d, 0xab02ec1c, 0x8db481e0, 0x77da84df, 0x897f5c49, 0x8181b24d, 0x7f54715e, 0x233ef77f,
+0x8165daa7, 0xd3813aa2, 0x7f7f7f18, 0x96e99677, 0x56814a72, 0xa4acba2e, 0x2683347f, 0x3d5b20ac,
+0x887fb981, 0x90a9ec91, 0x008ab581, 0x81588181, 0x5c017f7f, 0x10818181, 0x5d86a42d, 0xd8587f4f,
+0xed2035b1, 0x718168fc, 0xc3cccf7f, 0xaa812676, 0xa8d83930, 0xbdb119f6, 0x812b0288, 0xe17f7f7f,
+0x989ffd31, 0x9e564b12, 0xabfa3578, 0xacd85381, 0xaf7074e6, 0x93a34c11, 0xc1819896, 0x5a5fa673,
+0x6f8181ad, 0x4981167a, 0x817f965e, 0x961b7f29, 0xac539db6, 0x2b645dd8, 0x937fa506, 0xa1b900ba,
+0xdd7f7f78, 0x7f866ce7, 0x81a9247f, 0x50d67f53, 0x177a8b82, 0x277ffa7f, 0x7f0942da, 0x444b3181,
+0x9d8995aa, 0xbda844b9, 0xae765c85, 0x62818f7f, 0x307f7f00, 0xf37fe681, 0x817f7fab, 0x42e0813d,
+0x49f3c7c4, 0x7fd543b8, 0x818183f2, 0x7f4b5500, 0xe2357a7f, 0xdc3a8133, 0xd13913ff, 0xabfd587f,
+0x81c3814c, 0x667fab62, 0x9bf82b0c, 0xf84b9583, 0x6a28b2f0, 0x8154623f, 0x7f845e7f, 0x2750f07f,
+0x517f2d59, 0x7ffe4db5, 0x60077f7f, 0x81816e62, 0xc40a30dd, 0x67f22b7f, 0xec817f81, 0x7fb8c67f,
+0x98c9afd0, 0xb881ecaf, 0x6069c37f, 0x792c2c09, 0x709dc2de, 0x4481bb20, 0xd4b25981, 0xf8ef7792,
+0x7381ae17, 0x7fe05250, 0xc3eb637f, 0x6d350069, 0x327f457f, 0x8ce7b8ab, 0xb281c1da, 0x7f817f61,
+0xbbcf7fb1, 0xaf818145, 0xb77f5881, 0xb47ff1b2, 0xcec74a81, 0x5f81e7b4, 0x7f1b727f, 0x81b8701b,
+0x508106c4, 0x6d815f7f, 0xbc815bea, 0x464b00f2, 0x78818181, 0x7f7f4f81, 0x817e2c53, 0x817e7fa7,
+0x7f4da4a9, 0xdd444f23, 0x7f6cba39, 0x8141625a, 0x7f81813f, 0xcd7f817f, 0xa402814a, 0x817f049f,
+0x6fb981b4, 0x7867bd7f, 0x9b45bc81, 0xe7739f52, 0x7f816181, 0xab844448, 0x7f06497f, 0xe555c183,
+0x817fde68, 0x81bc0624, 0xe33f3a9b, 0x8122c673, 0x6c3d81d5, 0x392815dd, 0x26527f7f, 0xee649844,
+0x817f6fef, 0x81ce32d4, 0x102f81fe, 0x81967f81, 0xa42b9c20, 0x81813c81, 0x2a67a463, 0x90592c8a,
+0xc5810e67, 0x81d1dd81, 0xbd7f54e1, 0xf52a814e, 0x7f46a7b3, 0x5a89812b, 0x78e43eac, 0x7f776981,
+0x817fc5c2, 0x3f818181, 0xc556c337, 0x7f0081a9, 0x817ffe6b, 0x7f095f3d, 0x819b2281, 0xb45881ad,
+0xc0817f47, 0x81bd915a, 0xc7616b7f, 0x81e9817a, 0x44977f6c, 0xbc81307f, 0x7f9ac3c9, 0xff061265,
+0x3c7b0181, 0x13910e7f, 0xb499ed3c, 0xd850b881, 0x81781837, 0x7f97ff93, 0x81818f23, 0xb9b50d81,
+0x49aa4fbd, 0x6b361230, 0x148179d3, 0xbd81dd1f, 0x4cd5d0d0, 0x9ad1738a, 0x2c7f718f, 0x7f9f03b0,
+0x789d9ac2, 0x88db95fa, 0x9ae5ac64, 0x727f814c, 0xfa477dac, 0xa670a23f, 0x146afe7f, 0xd4d0451d,
+0x6d81819d, 0x9622acc1, 0x818161a2, 0x68263dca, 0x7fce818d, 0x998193ba, 0x9d81a486, 0x7eb97f7f,
+0x134470ec, 0x6681bde5, 0x4e2ea77f, 0x81777f7f, 0x7f7fa64f, 0xd3815a7d, 0x81fd4e50, 0x774481e8,
+0x7f6c8148, 0xe76b7f81, 0x881bb4e9, 0x7f944f3d, 0xe6da7f20, 0x54c68176, 0x210ebb45, 0xbed6267f,
+0xe22f2081, 0x8191ae8a, 0x7c7f257f, 0x667f811c, 0xe2edd355, 0xa4812181, 0x837f5481, 0x6d818120,
+0xb7aa7f8f, 0xc17f7081, 0x7fba986a, 0x819a81d5, 0xa9816681, 0x8129c4f5, 0x37ba21a7, 0x29d18a86,
+0x4be3ce96, 0x3981b184, 0x447fb50b, 0x261c8155, 0xc05ab18d, 0x777f7816, 0xe48be469, 0x81722d81,
+0x7f47a8a1, 0x924a8b72, 0xa2ef4ecd, 0x105c817f, 0x7f90709b, 0xbc81443a, 0xa82f7f65, 0x615d928c,
+0x7f81570d, 0x61b0919a, 0x7f33fc81, 0xe8817fca, 0x637ffb00, 0x9c337250, 0x817f8181, 0x08b01e0a,
+0x7f4ae897, 0xa0969a3c, 0x7f9d7fe8, 0xe024598e, 0x9e794d7f, 0x72fb988c, 0xf8936747, 0xacbd7f8e,
+0x4e7f9781, 0xa4b9f4ea, 0x50ccfbdd, 0x817f4795, 0xbcaf485f, 0x81811a25, 0xa97881c6, 0xecbd7fd7,
+0x6e89a832, 0x1e815f96, 0x74d0437f, 0x858101b9, 0xbf1b7f8b, 0xcb7fdc9e, 0x1901bda8, 0x4c40c087,
+0x5884f075, 0x7f561faf, 0xc3878172, 0xcd0081c4, 0xd93e8fa6, 0xe8748146, 0x4c7f207f, 0x20da2792,
+0xc0d37f7b, 0x909a0133, 0x3b6fb281, 0x69819ba1, 0x81984b27, 0x362ddda3, 0x3981727f, 0xe44c7f81,
+0x81b9d0bc, 0xebea7fa7, 0x81a47f2e, 0x81674391, 0x495500fa, 0x0eb881bf, 0x2e22f17e, 0x967f9e81,
+0xd2727906, 0xdbe8814d, 0x93cf4d6d, 0x23c3b57f, 0x57be5fe3, 0x2eee7f30, 0x1a501f81, 0x76aaba53,
+0x70817fdc, 0x817f2e1e, 0x28381f81, 0x1575817f, 0xc62845e6, 0x8181417f, 0x35d0230c, 0x606fcc28,
+0x7f792e44, 0xb7b7ea76, 0xad677f7f, 0x2b1eecee, 0x7f7fe450, 0xefcc6668, 0x264e511c, 0x27818181,
+0x81817f7f, 0xce82812c, 0x697f7fec, 0x7e859be9, 0xa1f65543, 0x8170a57f, 0x51231d81, 0x33c03182,
+0x81aa7f81, 0x6b7ff34e, 0x51812fcf, 0x7f7f7fe6, 0x6281718d, 0x1eb02c7f, 0x81d64570, 0xa18368e1,
+0x4ddb816f, 0x149b81d1, 0x067fa2c6, 0x92decc7f, 0x94001658, 0x814f6d6c, 0x79673f7f, 0x7ff28122,
+0x83b97f7f, 0x81248a43, 0xfda17ffe, 0x9e81bc81, 0x81090181, 0x7fdaa350, 0x927f54d4, 0x78fe2e89,
+0x7f3d097f, 0x147f2ea7, 0x7f85837f, 0x86a58181, 0xb7ad559c, 0x7796ae60, 0x4e7fef08, 0x2ea371f2,
+0x817f7f8c, 0x7fc181c5, 0x8101818e, 0xf1588178, 0x44062eb8, 0xb0832da0, 0x81527f81, 0x7f0e817f,
+0x8e06b3e5, 0xa6df2e5f, 0x9d7f7f81, 0xa14ba64d, 0x723d7f91, 0xdb81b64b, 0xb48c117f, 0xe75d0e81,
+0x8bd9ba3a, 0x1f817f7f, 0x81847f67, 0x25677f5f, 0x3568b529, 0x7fa2587f, 0x24b02027, 0x47ca91dc,
+0x81bba314, 0x7f844f7f, 0xbeb8aa0d, 0x9955fe5b, 0x007f9b81, 0xf0a27f6f, 0x1648e959, 0xc68193a6,
+0xbea6ebfd, 0x1654cf81, 0x8142d430, 0x2e7f6693, 0xcc788195, 0x11817f81, 0x3da98171, 0x4fdc5a9c,
+0x677fea81, 0x4c6ca0cf, 0x487f8181, 0x7f3b9013, 0x37387f7f, 0xc6b4217f, 0x27afb97f, 0x77b84002,
+0xd3a67fef, 0x987f8181, 0x9518817f, 0x5ad4fd7f, 0x7b818146, 0x583c5581, 0xd9643b81, 0x687f92f5,
+0x7f8109f2, 0xdfad937f, 0x79a45f7f, 0x007f9c0e, 0x6a6ba6ad, 0xa95bc7e9, 0x9be90653, 0x223381b7,
+0x7f7f64ce, 0x81813ea3, 0x037f4f7f, 0x617f7f9a, 0x6481f081, 0x81eb4a81, 0x7f9d9a6e, 0xe80e8a4b,
+0x4563f81e, 0x94db8781, 0xbf81bd04, 0x305d8236, 0x2c90d571, 0xa48e4600, 0x81633342, 0x2c537fde,
+0xc70f5d7f, 0x65bf6f7f, 0x547f9632, 0x05847f1a, 0x23818147, 0xd6d9459f, 0xc97fb761, 0x8196816d,
+0xb5417f5b, 0xb6f91bd9, 0x55b17fbd, 0x3e90c27f, 0x4f7fc37f, 0x7f8c855d, 0xef35817f, 0x295d7b03,
+0x3730377f, 0x9c811bac, 0x7fe3ca7f, 0xe87f8124, 0x7f7fe511, 0x2d7f7ca2, 0xba9bc726, 0xf07f9f81,
+0x4b476e9f, 0xaa338173, 0x5659708a, 0x817fb67f, 0x81811ded, 0x81817f90, 0xa3077f81, 0xf0d119d6,
+0xf4818752, 0xbd98271f, 0x95f610ed, 0x3fc227e0, 0x68139681, 0x3c6c2d81, 0x7f7f96cb, 0x3e8181b1,
+0xbbe681c6, 0xb47fd2eb, 0xfdd1b74f, 0x4381fe5f, 0xb081f399, 0x64a10081, 0x79ee3ee5, 0x9559c8a6,
+0x814ad4a8, 0x817f7531, 0x81813b81, 0xbfbf9773, 0xe27f81a4, 0x7fc14269, 0x917f8158, 0xaf9d89a7,
+0x81b22f81, 0x7f54d4d8, 0x0f39a503, 0xec9d3ea0, 0x35818131, 0x8c7d99a6, 0xc0158181, 0x53817f5e,
+0x817f81b0, 0x9a7f9a7d, 0xc081747f, 0x517f704d, 0x7f2db90e, 0xa6dfa67f, 0x36817fce, 0x30ae720e,
+0x7fd73d60, 0x5c593bd5, 0xf57f9d7d, 0x448151a3, 0xc82db29a, 0x019b5a75, 0x7f876729, 0xa69efc9d,
+0x78a67f81, 0x536c817f, 0x81e470d4, 0x56f270fd, 0x2934478f, 0x9c5a7f68, 0x4ecd81ef, 0x7f5c2b23,
+0x7f817f03, 0x827fb17f, 0x7f81c35e, 0x7d7f03f8, 0x7f5da67f, 0x8100671d, 0x06e1a8b0, 0x998d112a,
+0x9282ae98, 0xed72813b, 0x7fbc6bda, 0x7fa97f3e, 0x36d1510c, 0x0d81819c, 0x56f8fd97, 0x815081a4,
+0x7fc78133, 0x69d34542, 0x7fc1727f, 0x7fd22a97, 0x418a1df0, 0xf519077f, 0x7f6e697f, 0xa9817abc,
+0xf43917b5, 0x82e3d76b, 0x86327fe7, 0x7f7fb899, 0x9f81b0a4, 0x475381c2, 0xc62b92e8, 0x7f1b49a3,
+0x3679b9d7, 0x817f5364, 0xa87f7fc2, 0x45dcf6ba, 0x3470789d, 0x913e7fa5, 0x06b53681, 0x717f8de7,
+0x027f6352, 0x7f7fbe7f, 0x7a81df17, 0x0356fb00, 0x307462bf, 0x6716c56c, 0x8157c7b4, 0x7fcd8181,
+0x4b81dd07, 0x3b667f7f, 0x7fa6815c, 0xea37397f, 0x61e94781, 0x4e7f8177, 0x81436b5b, 0x4e818172,
+0x5b204b37, 0x115501b4, 0x81491681, 0x661468bf, 0x501f88b5, 0x7f4e63e3, 0x8146e97f, 0x7f09f895,
+0x7fd70e81, 0x7f7f7f16, 0x3f327fed, 0x05055631, 0x63b4ec13, 0xc03f16da, 0xa0af33c1, 0x942a7f42,
+0xb4ba6515, 0xe99d81de, 0x7f78d8b0, 0x446f2c42, 0x92cf7fde, 0x6b9e7f7f, 0x948181cd, 0x94b9ad81,
+0x582e8181, 0xc31748a9, 0x39c6d47f, 0xb2817fff, 0xc5817f81, 0xc6887fa4, 0xe69989e4, 0xa67e7a5c,
+0x09477f9a, 0x2d4d817f, 0xb24f81d1, 0x3ff77f7f, 0x6a66813b, 0xf5050547, 0xcf917f6c, 0x81917f7c,
+0xb8b23b81, 0xcab60081, 0xe17f7f81, 0x3b47547f, 0x457f4781, 0xe17e9c85, 0xd4b5ba00, 0x777fc3f4,
+0x8b013d8c, 0x5381a2dd, 0xf60c7f5b, 0xc3f2c212, 0x5c6c7f35, 0xc77fae7c, 0x7f790cc8, 0xe6848181,
+0x7f81584b, 0x813688e2, 0x4cff2987, 0xbeb48126, 0xc4be67bc, 0xb8738181, 0x886a9c7f, 0x519881a3,
+0x77fbd5c9, 0x4181c081, 0x20813287, 0x6121bb2d, 0x7f7f227d, 0x1a8181a6, 0x811a5d06, 0xc890a57f,
+0x7faf7ece, 0x83a36788, 0xd9fd8e37, 0x7f7b985e, 0x36819a38, 0xbd6096c4, 0x7de5be7f, 0x7bc68150,
+0x7f8831bb, 0x81818144, 0x434a63e0, 0xaea4ad6e, 0x407f2785, 0xe8df81ea, 0x4a148aa7, 0xf89f7f65,
+0x3c757f62, 0x7f81c80e, 0x81818881, 0x81caa47f, 0x9c570881, 0x1c1a8354, 0x2fa8008d, 0x7f7f81ac,
+0xa781d413, 0xa9a45530, 0x327f297f, 0x7f647f5c, 0x81a6324e, 0x81a954cd, 0x7f1c2881, 0xe30b2eb3,
+0x81b18181, 0x81157f18, 0x967f7f96, 0x819a7fbc, 0xcccf4fc2, 0xc43a48d5, 0x7fa33d74, 0x3aa97f50,
+0x5277817f, 0x69afad86, 0x7f81f278, 0x4fa29716, 0x91818170, 0xdc1a7f5c, 0x278159b1, 0xe87f069f,
+0xd28cc56b, 0x157f8181, 0x81339981, 0xcf818114, 0x6eba0b8d, 0xd281527f, 0x8249d583, 0xb37fbc43,
+0xc181357f, 0x819f6163, 0xaf194881, 0x7f9872a4, 0xf3c3ab2c, 0x47507f81, 0x523f812e, 0xff81d105,
+0x5851cbb0, 0x7fe881c3, 0x60cdbe81, 0xc838ccbe, 0x81817f7f, 0x1d76cb34, 0xe100407f, 0x81d78181,
+0x2f81817f, 0x927f3ac9, 0x3cc887bb, 0xda7d6d7f, 0x2c7f1e7f, 0xf58e41ee, 0xfb816281, 0xa211d47f,
+0x6eea4bf9, 0x297097b8, 0x9d819e81, 0xf6ef4156, 0x2afb4362, 0x7f16647f, 0x8e10c4c3, 0xb17f6181,
+0x3b9d2949, 0x53a6c867, 0x810faffb, 0x8197d27f, 0x41873a37, 0xc48d7f7f, 0x81b6fb69, 0x144b2b78,
+0x8181d5c4, 0x787fbc24, 0x4381929f, 0x95927f1b, 0xa3a5e451, 0xb48cc58d, 0x818e81d1, 0x9a81cbaf,
+0x713ac87f, 0x7faaa694, 0x9f7f81c3, 0xf65b307d, 0x7fdfa25e, 0xa0bae1fe, 0xf3813f12, 0x19817fc5,
+0xc0278a81, 0xe47f8158, 0x817fa281, 0x5bbc8181, 0xd63caaf9, 0x7fbf8181, 0x0054157f, 0x7f7f311c,
+0x7f817f56, 0x56c22692, 0x8143b624, 0x0845a315, 0x107a8119, 0xc1cd5c7f, 0x7f62e549, 0x7fbb757f,
+0x6b8dec74, 0x6893e220, 0xbb224ba9, 0x31819871, 0xdd2b4f57, 0x3d030250, 0xc1539dc4, 0x1c42df45,
+0x7f7f6631, 0xaa81eb8f, 0xa6d4edf5, 0x81ec1981, 0xc92d77f8, 0x1ba35b2c, 0x0e1fa0cd, 0xdd257f7f,
+0x817fa041, 0xe9815f81, 0x60f1b338, 0xb97f7fad, 0x097f857f, 0xae6a7f60, 0x145dbced, 0x81b7607f,
+0xd4647f3d, 0x7f7f52ec, 0x7fdbbea1, 0xc74feec6, 0x1c197f48, 0xb17d1d61, 0x2e68b081, 0xb717d181,
+0xda46377f, 0x105e81b7, 0x7f2e7f58, 0x67c33a7f, 0xb83f666b, 0x68518691, 0xc556f081, 0x7763c400,
+0x817fbc99, 0x3a18df21, 0xfbde9374, 0x667fc646, 0x737f7fe9, 0x7f987fbe, 0x27dcb107, 0x25f35d30,
+0x420adcd0, 0xab7a7f7f, 0x7f8a28c1, 0xcd7fc20e, 0x6ce69bcc, 0x7f308ce5, 0xb2c96e81, 0x95dbeb1e,
+0x7f7f567f, 0x6b7fa0a5, 0x997fed60, 0x9281da7f, 0x3f7f617f, 0x7fc4608e, 0x817f4781, 0xc36b7f81,
+0x7fddc17f, 0x6fe27020, 0x8a5a2b27, 0x3a4bfb81, 0xae9e4a81, 0x7f7fda7f, 0x5a7ff006, 0x39b6a0b7,
+0xb9721181, 0x725edb2f, 0x8903cf08, 0x81aa7cde, 0x7fe0817f, 0x6c9e7fe8, 0x7f5c8140, 0x819d8181,
+0x58777451, 0x7fdb4784, 0xc3186166, 0x817f458f, 0xdce8cdc9, 0x7840bd81, 0x1781954b, 0x60240082,
+0xca242d7f, 0x788428b6, 0xf67f8d7f, 0x657f7f8a, 0xbace8106, 0xb8816e93, 0xf9b07ff9, 0x818102fa,
+0x32028181, 0xfe7f8e27, 0x818d7f26, 0x7f0781a8, 0x7f7f73f3, 0xfaa78135, 0x7f81810b, 0x72676281,
+0x812ed271, 0x7f8a961b, 0x7f81817a, 0x367f4181, 0x03ecb2f0, 0x6c810a7f, 0x577f81b1, 0x81e889b8,
+0x7f7f707f, 0x7f90368c, 0x86a36487, 0x42b8c17f, 0x91816160, 0x8150b0a5, 0x81390fa1, 0x7f87d07f,
+0x448548d0, 0x32810b93, 0x7f20da7f, 0x5b6a7fed, 0x5c812d7f, 0x7f5a7f69, 0x0c36d681, 0x2ee0544a,
+0x817d861a, 0xa47fdaf7, 0x7c8127b1, 0x96216604, 0x4f8138de, 0x03e57e14, 0x73050f7f, 0x42007eeb,
+0xe303f07f, 0x2f8a7438, 0x8a7f7fbc, 0x866f8165, 0xf496535f, 0xea2d9bb2, 0x16e67386, 0x6bf837ab,
+0x411874b4, 0xa7c15c2a, 0x74bb7f72, 0x8181ab81, 0x81e33481, 0x68b47f7f, 0x957f8181, 0x947fcf27,
+0xb47f7b20, 0x997ca926, 0x5a457f0f, 0x87403699, 0x811e3ccd, 0xdb857d7f, 0xc9cec481, 0x2998d8c3,
+0x231fa37f, 0x876a2942, 0x812d7ff0, 0x7f90959a, 0x811598a2, 0x7f814697, 0x7f7e1467, 0xad3a8154,
+0x81813f26, 0x0851321a, 0xf77f9f7f, 0x76b16294, 0xa56c3e3a, 0x7fd5c569, 0x30dd8185, 0x7f7f819c,
+0x7f8a7f7f, 0x81dc4da7, 0xe1ce9eb9, 0x8181a2c8, 0x7fda81be, 0x01797f42, 0x7b7f89a8, 0x81e9de7f,
+0x7fa76fc9, 0xdebec9a0, 0x7f381e89, 0x81ba0d7f, 0x459a7f7c, 0x81ac8181, 0xb65e8279, 0xdf8174cb,
+0x6ea24abd, 0xb8486638, 0x81637f7f, 0x011d5c81, 0xc27ff63d, 0x81d48160, 0x7c81ba7f, 0x7ffa7f73,
+0x7f8881f0, 0xc2d6a15b, 0x7f5d7f7f, 0x06818fd5, 0x8c23ea91, 0xf3d8e75c, 0xab81be93, 0x817f7fa8,
+0xe57f816d, 0x6e81f27f, 0x81e566aa, 0x88397868, 0x45f9b7c1, 0x2c4d81e5, 0x52765111, 0x813e3d60,
+0x4f45c3e7, 0xef777f91, 0x3e7fc481, 0x3e810d42, 0x81c53f7d, 0x2f7f8481, 0x3a7f8157, 0xf1662881,
+0x81f03791, 0xaf818136, 0x476b7f7f, 0x6890cfa6, 0xe4f7e9ae, 0x81f6b9ba, 0xb0623aac, 0xea7f7031,
+0x72c30000, 0x407f8197, 0x8176917b, 0x81184acc, 0xdacf7fa6, 0x996cd37b, 0xa68175c8, 0x5e6082e3,
+0x067f927f, 0xc47f4481, 0x7fc58113, 0x7f561aff, 0x817f46b2, 0x7f7f8159, 0xcebbc57f, 0x72765b87,
+0x817f2f1e, 0x885c7c81, 0x7f7f65a9, 0xeded8987, 0x7f3bab81, 0x812f7f6b, 0xfc67d85a, 0x13297781,
+0x607f9621, 0x507fa647, 0x4e39ce7f, 0x81ff5c4b, 0xa246316c, 0x81457f7f, 0x5781812f, 0x1463aa9f,
+0xc35d7f14, 0xbcbb5771, 0xd7e45781, 0x636c7365, 0x07102b6e, 0x75e1337f, 0x7f2bb116, 0x257fa0f2,
+0x97597f40, 0x7f818810, 0x7f7f7f5c, 0xca7ff660, 0x9c916181, 0x903ea1cc, 0x6c8f7f81, 0xc2b07f25,
+0xa43d818c, 0x81f67fb3, 0x67f56081, 0x1ba55531, 0xa77fc6a5, 0xda479281, 0xe7647fdb, 0x7fd2f34d,
+0x1b7f4c68, 0x7f8c5781, 0xbcf18110, 0x7fd3b883, 0xde81687f, 0x81358948, 0xeb813d17, 0x25040181,
+0x7ff8a39c, 0xbc574351, 0xbe677c7f, 0x487b5068, 0x6c7f816c, 0x7f2dca81, 0xb8c7b17f, 0x7f869781,
+0x9c99f0ae, 0x437febc4, 0x289d81fe, 0xab74307f, 0x7c7b299a, 0x62593f52, 0x7f4e513a, 0x7f9b81c0,
+0x9e2c0de5, 0x43818175, 0x857064a0, 0xd17f8db2, 0xb0a055de, 0xf04ade7f, 0xe37d7fd6, 0x3956789a,
+0xcfbcb53a, 0x7fa39e7f, 0x7ed171a8, 0x49a65c89, 0x3ee166d9, 0x2daca6ca, 0x53d051e7, 0x957fc181,
+0x0000e873, 0x815fbf9b, 0xe986ac61, 0x7162fd81, 0xe428090f, 0xe2814730, 0xab8d9d3d, 0x8164e6da,
+0x8a6126e4, 0x81817f6e, 0xeb818281, 0x359f7f81, 0xb663c39c, 0x7fb856a8, 0x9d812e30, 0xd07c7f2e,
+0x6b58ceb5, 0x8181ca7f, 0x42c081b5, 0xd58a6fd8, 0x7faa9b85, 0x81818181, 0xbb3781f4, 0x7fa98173,
+0x17f17f7f, 0x7f819862, 0xd97f0781, 0x812f579a, 0x608196ec, 0x23458155, 0x5bc1a902, 0xaa39817f,
+0xcb207d81, 0x227f7f9c, 0x81a54ad1, 0x81027fb4, 0xb6711835, 0x8181a57f, 0x7fed8141, 0x7fa95ec0,
+0x7ffc7f8f, 0x572f81e8, 0x7ca87f81, 0x7fbeb67f, 0x637f814e, 0xcf11af7f, 0x307fb081, 0xaa85927f,
+0x6f5dd995, 0x81db817f, 0x68275869, 0x724dff18, 0x8176ef58, 0x54814c7e, 0xece02300, 0x5ad71a56,
+0x689e7fa7, 0xb67fa77f, 0x107f1359, 0x51a63bc0, 0x22c887ca, 0x0a0e34b7, 0x81587fae, 0xbb79604a,
+0xfbb7077f, 0x81967f81, 0x81227187, 0x8129817f, 0x43a84e8a, 0xb981a34b, 0x5d307f81, 0x4d7fe9bd,
+0xc973a181, 0x78647f8b, 0x00274105, 0x5d7a09e5, 0x6565a681, 0xfbd0ea6f, 0xf986037f, 0x377fc3e0,
+0x9ba57fe8, 0x4f7ff13c, 0xd61f7fa0, 0x817f7fbf, 0x7f3e25c7, 0x81c42fdc, 0xae398181, 0x3125afbf,
+0x7fed479c, 0xf4f52ff4, 0x7fc99d81, 0xaa417fb7, 0xa1324f81, 0xff7b817f, 0x167f6f86, 0x2a86c481,
+0xa127c15c, 0x6d8e0000, 0xcf207f6d, 0x6b815681, 0xd01f50e7, 0xab81e704, 0xce81a4e4, 0x7f177f7f,
+0xaa819181, 0x7f81a044, 0x1c0c7f5e, 0x4f0e56cf, 0x46a581ca, 0xb3d52970, 0x6fb3b2c8, 0x7f8189de,
+0xf0058133, 0xd181d0da, 0x7481c46e, 0x427fc7d5, 0xe4528188, 0x4a359b7f, 0xd1ae25bd, 0xb735818f,
+0x811a2117, 0xddd2829a, 0xec696aa1, 0x99cf945e, 0x518f3d7c, 0x8e814381, 0xa2819647, 0x85af7f43,
+0xc81d7f90, 0xb8361781, 0xb67f9a64, 0xc3d38181, 0x97b07dae, 0x54687f7f, 0x206d5b79, 0x437089ad,
+0x7f814881, 0x77b63840, 0x61816434, 0x9dca5e22, 0x818129bf, 0x8b49dd3b, 0xc792813a, 0x3b407f63,
+0x7fc48f8e, 0x7f96815a, 0x6b430c8b, 0x974ee1a1, 0x7f8181fa, 0xb47098a2, 0x81168181, 0x816b71f5,
+0x0a596d81, 0xa5945681, 0xa14181d0, 0xb4944bfa, 0x81f7816b, 0x7f7f287f, 0x7f81cc48, 0x6d819077,
+0x7dff7f58, 0x5a4e7f81, 0xb7ee6df2, 0x81712a85, 0x0a652e7f, 0x86a1277f, 0x3c568181, 0x7f817f65,
+0x64fb3e0b, 0x71a9687f, 0xa65c7f72, 0x3fb34043, 0x8132f77f, 0xec8133bf, 0x7f8119a5, 0x081f5bb1,
+0x7f81777f, 0xedcca8a1, 0x7ffe7f25, 0xec934f43, 0xdbcc8181, 0x64416881, 0x4a8d93f4, 0x7fb64c7f,
+0xf12f9bc2, 0x812a8f90, 0x1b9c32de, 0x30a30d81, 0xf079e16a, 0x633720c2, 0xad44fd53, 0x3c812f3b,
+0x56b32873, 0x0000b62f, 0x76cb58a6, 0x31086ead, 0x9c307f65, 0xec664f81, 0x7f6186cd, 0xb7047f39,
+0xe87f2ccd, 0xe79f8f3c, 0x88b34352, 0x7f6dec72, 0x8b4aca8c, 0x217f1c55, 0x53d76693, 0x7f984d2d,
+0x125abe92, 0x7f37677f, 0x4f819c81, 0x7fb23b2e, 0xd2c5ea58, 0x7fa7817f, 0x57817f7f, 0xd45239ec,
+0xc887fa03, 0x7f7f7f42, 0x8181444d, 0x2ba67f1a, 0x7fcc527f, 0x813d7f81, 0x68ad12aa, 0xa87f6763,
+0x62577f7c, 0x067f7f5a, 0x3201a788, 0x9872b0a1, 0x814548d0, 0x3a883096, 0xc4701d52, 0xb59381dd,
+0xdc3381a8, 0x816e3f7f, 0x4f57813e, 0x4679d77f, 0x9a14b75d, 0x8181da4b, 0x817f9d48, 0x52818181,
+0x2281c97f, 0xbfb92d7f, 0x1f6f357f, 0x2ca57f3d, 0xb9b4626f, 0xab4bba25, 0xf27f4b5a, 0x4b478a56,
+0x818181c3, 0x6a767f29, 0x81e16dcd, 0x4d818181, 0xd1819ce8, 0xdbce6082, 0x757f41bc, 0x777f7fbc,
+0x5fa9d103, 0x0241e8d6, 0x1bb381a1, 0xf08177b7, 0x00df007f, 0x3bcdbb5f, 0x5e7f710a, 0x7f3f7f1e,
+0x657f0f2b, 0x2ffae79b, 0x495c8110, 0x7f7f8150, 0xe8449f7f, 0x8434a77f, 0x42d56352, 0xfd717f8d,
+0x81c514b3, 0x7f147f50, 0x7f856696, 0x7f844c81, 0x8157714b, 0x50cba2ca, 0x73539ff7, 0xb5ed7fea,
+0x5d1f637f, 0xbc66496d, 0x68b633df, 0xea76156c, 0x7b4654a3, 0x81547f8d, 0x7f537f27, 0x5db64581,
+0x7f367f81, 0x7f81a08c, 0x81440000, 0x3965f076, 0x40977036, 0xcf81ee31, 0x6e7f19bd, 0x703a7f03,
+0xe94b81c6, 0x4330aa7f, 0x060411c3, 0x91f6f37f, 0xbdc9e781, 0x81b87f51, 0x4b6e228f, 0xbc3eb87f,
+0xad916bce, 0xdfb0976a, 0x98b86387, 0xb651bdee, 0x8cf46cb9, 0x9b819a8d, 0x9d8181f1, 0x7fe0ab97,
+0x714d0264, 0xb87f67a4, 0x7f9d3150, 0xe95c7f81, 0x81a7537c, 0x93f4d02e, 0x433a477f, 0xe5067fab,
+0x81de5836, 0x64667f81, 0xc5255988, 0x0a7f824f, 0xf97b7fbf, 0x40c97f82, 0xc1d47a74, 0x7f7f3081,
+0xfe49fc23, 0xba819538, 0xb87f5581, 0x5ea47fb2, 0x35b37292, 0xb9848178, 0x276f5235, 0x647f4ebd,
+0xee2a491a, 0xfd29bb60, 0x21f53728, 0xae9d817b, 0x2ba97f81, 0x6accd440, 0x7fbec77f, 0x909b817f,
+0x227f7f35, 0x767f22d5, 0x0ba3c2d1, 0xd38f817f, 0x467fd862, 0x673e8181, 0x7f3b7f81, 0x70427f81,
+0x6f725d24, 0x731d64de, 0x32a9816d, 0xc1817f7a, 0x627f8181, 0x7f947615, 0xc37f72d2, 0x7f7f4261,
+0xf147be64, 0xc481afab, 0x7fd9a5dc, 0x5c812690, 0x7c34783b, 0x9087817f, 0x7fd25245, 0xea5fcfb1,
+0x81817f81, 0xc84ae6b4, 0x8149c875, 0x814a1a7f, 0xab7f6881, 0x6e7f3d81, 0x3ca58181, 0x75cb4781,
+0x8181817f, 0x5ec4f2c5, 0x8165e07d, 0x643d6e7d, 0x78bd9764, 0x40817f24, 0x5f077581, 0x7f2ebb4b,
+0x7f7fa781, 0xe657ca7f, 0x0000ff21, 0x7fa9bf6a, 0x81c1c30f, 0x9138393c, 0xa2b38174, 0x7f4c24bc,
+0x2b918175, 0x7fea5d7f, 0x50c1a17f, 0x0bfefac7, 0x81fe4ea4, 0xab7f7fbd, 0x559b7fb9, 0xcb81e356,
+0xd7963f72, 0x7f7a1b81, 0x8c9cd9d3, 0xbf889d1f, 0x161a88cf, 0xe6a0abb9, 0xbb014f2b, 0x81fa947f,
+0x7f41509e, 0xb4480d7f, 0x7f7f8181, 0xeca6b540, 0xd27fc381, 0x3aaeb38f, 0x7f7f7f7f, 0x372a7f05,
+0xd6a34b81, 0xee378159, 0xcf8181b8, 0x07487fcf, 0xcbb4ed77, 0x29817fab, 0x7fe5b342, 0x63c3bb81,
+0x0724d1d8, 0x50e47f7f, 0xa67f8f93, 0x8181c37f, 0x28817a81, 0x7f81fb7f, 0xaab17f7f, 0x81997f7f,
+0xa423816f, 0xf953c16a, 0x4c2362cc, 0x81812bf3, 0x5a817f4a, 0xffcbcce0, 0x9c7f8776, 0x5b5a7f7f,
+0x64817f92, 0x5139c47c, 0xbe8573ce, 0xe5457f81, 0x2ca4815b, 0x503e14b8, 0x4ca75681, 0x907f7f7f,
+0xa2377d19, 0x7fd7c09a, 0x7fbc8191, 0x657f61a2, 0x814f5c81, 0x7f48607f, 0xa16f3ad5, 0x819d227f,
+0x247fc10e, 0x7f817f32, 0xc39981f5, 0xb357261b, 0x347f7027, 0xe3814506, 0xa1032e49, 0x979eb571,
+0xb06ac281, 0x81815e7f, 0x34b57fb7, 0x7ffe2681, 0xc181630b, 0xbda9ce3b, 0x81f9287f, 0x4ef12081,
+0xb86a54ba, 0xbc4d13c7, 0x81817fd9, 0x62817f1c, 0x7f7f4fa1, 0x847f7fce, 0x81bf68f5, 0xd7625f73,
+0x81e79d7f, 0x8137a1ca, 0x423581cb, 0x26580000, 0xd7d6811e, 0xce6e6c81, 0x8168173b, 0x498468b2,
+0x8c7c817f, 0x6f81b881, 0xad899a25, 0xd049e481, 0x7fdd817f, 0x63816bbe, 0x048b8d81, 0x8eb764c6,
+0x987b9ea5, 0x81fae493, 0x7f72817f, 0xa57fbd7f, 0x7f0f81b3, 0xb37f4a33, 0x2e092988, 0x1b172b25,
+0x09107f3e, 0x48f5fc7f, 0x69aaa80e, 0x184fe781, 0x7f9db5e6, 0x8195276c, 0x252a8181, 0x8b81ccfa,
+0x3c308181, 0x9d19b086, 0xf0d589f7, 0x207408ca, 0xe34e9ed0, 0xe67fbbf0, 0xc0996525, 0x0dca5881,
+0x813d4181, 0x7f729738, 0x0a865e5d, 0x8bbea581, 0x5c4aac3f, 0x8181a63f, 0xa1f0b873, 0xa92481d0,
+0x837f817f, 0x047fdd84, 0xc8767f2a, 0x441a81be, 0x818681aa, 0x05490179, 0xba81ba7f, 0xde246581,
+0x466d70d7, 0xb6bc3f5b, 0x5c428f2d, 0x8105cb77, 0x0206175a, 0x5f6ac2b1, 0x9a4d2a4d, 0x7fb0917d,
+0x1281ce12, 0x81028181, 0x41b0a4ad, 0x29973681, 0x973e992e, 0x81617dba, 0xc1818181, 0xb87fc1af,
+0x81137feb, 0x627f5210, 0x8181de1d, 0x7f615f70, 0xabbb817f, 0x817fc57f, 0xdbe5b6f3, 0x9c8a7f72,
+0x3c2826df, 0xae03d3b5, 0x7f6cc481, 0xd9c41c7f, 0x83ad7f81, 0x336370a8, 0x8bbfaf2e, 0x3561caa5,
+0x548d142c, 0x810366b5, 0xa981d36e, 0xbf3afc7f, 0x7fd78157, 0x61817bf1, 0x74afcdd7, 0x18ac3d61,
+0xcc45b6e0, 0x3d81af88, 0xf97934de, 0x947a8f81, 0x8b7fa6b8, 0xe0dd8981, 0x2d812381, 0x967f6409,
+0x24bc0b1d, 0xdb7fa943, 0x18197fc3, 0x47ef7f6b, 0x66a6bb7f, 0xd374af81, 0xf48e7f9f, 0x52399754,
+0xe1abbf6f, 0x04751d7f, 0x3690c581, 0x557fc3a6, 0xc1815413, 0x9086df7f, 0x81e57481, 0x81538167,
+0x4df9c981, 0x9463a57f, 0x81c5063a, 0x7f7eaa81, 0xa6dc9423, 0xabc07ffb, 0xd981817c, 0x816064ae,
+0x7f815dab, 0xc0b12da0, 0x1444be7f, 0x3a7781e0, 0x887f527f, 0xc0817f1f, 0x05cba881, 0x6599057f,
+0x817ffd53, 0xca907fab, 0x557f0d7f, 0x5d029ce9, 0x57818168, 0xc4093382, 0x52e5b33a, 0x763e09b7,
+0x814da277, 0xf9208cc5, 0x817f7fcb, 0x7f5fac7f, 0x6d7f6544, 0x7f0a7fec, 0xe7b4337f, 0x7fae8181,
+0xa8c5608c, 0x6f9bb388, 0xb27fc773, 0xf4acaa81, 0x1cad30ac, 0x2d817f8b, 0x2b817f16, 0xcca2a581,
+0xcb77dad4, 0x58487a22, 0x7fe57f69, 0x45387012, 0x73504724, 0x61357f81, 0xc77f8181, 0xabc7e11b,
+0x8181c16e, 0xa681ab27, 0x8c81c48f, 0xbd1de360, 0x9924337f, 0x7f230156, 0x3dd6a87f, 0x477f9381,
+0xbeb38181, 0x47815ccb, 0x8125817f, 0xb8057f7e, 0x7f7fc081, 0x46815873, 0x5f93ba81, 0x7f35767f,
+0x7f7f5a94, 0x748c84e5, 0x4c7ffc90, 0x7fcd247e, 0x44898e81, 0x1268ecc9, 0x81d2777f, 0xda42e7c7,
+0xee267f81, 0x7f5f269f, 0xe3d60c91, 0x81c97f0b, 0xd7b00000, 0x9b813a83, 0x7f7f8181, 0xb71cfa35,
+0xf5888156, 0x853e9cc5, 0x1c1c1014, 0x2e7f8b1b, 0x81bea644, 0x70f4e8d8, 0x8158a181, 0x18caad9f,
+0x3b28815e, 0x7f460155, 0x7f7f7aee, 0x444b51e5, 0x8682a381, 0x7f508181, 0x5e86466d, 0x7f81287f,
+0x7f7c7f50, 0x8173e4de, 0x4c747a65, 0x69816cd7, 0x6674813b, 0x48274e90, 0x92ccf278, 0x99f181c7,
+0x81b20243, 0xbf4b4b81, 0x81d981f4, 0xa1e981dd, 0x6c7f7b6e, 0x98cad579, 0x9a7f1685, 0x81e8812e,
+0x5d7fcf52, 0x37819772, 0xdb437fab, 0x1d83782a, 0xd3ff71b3, 0xc7e28181, 0x81817ca6, 0x1808127f,
+0xb515639d, 0x812c8108, 0xa2a97f14, 0xdd8102c6, 0x727f7f98, 0x5f197f19, 0x7b22817f, 0x46562e7f,
+0x59b89c7f, 0xaba28d81, 0x5e4e7181, 0x909da6b9, 0x817d8128, 0x9c167abf, 0x3ef4817f, 0xf5628d72,
+0x97727fbb, 0x88c6a600, 0x81c9927f, 0x817443e0, 0x81ac7f7f, 0x7fec67cc, 0xc353bb96, 0x4caae113,
+0x7fa37fd9, 0x525c45dc, 0xdf798ba6, 0x7c811049, 0x812570c7, 0x7f788a6b, 0x797f1fef, 0x2022fd81,
+0x38870936, 0x813f7d7f, 0x81810941, 0x6b75817f, 0x5aa11953, 0xcc7f7f81, 0x61778581, 0x5e458181,
+0xcfff7f90, 0x9020747f, 0xb4e35ad5, 0xea0e8109, 0x21f97f13, 0x41c07f40, 0xc6ddc4ad, 0x085227c8,
+0x11dc9f85, 0x7fda7f55, 0x5a92b34c, 0x11659b4d, 0x000081bc, 0x81816781, 0x81cce139, 0xf165d51c,
+0x810a817f, 0x7f7f82d6, 0xb5b3817f, 0x40909bfb, 0x7f04a01f, 0xd0288164, 0x810164f3, 0x59778b81,
+0xc87f2997, 0x9d99417f, 0x65528173, 0x727f454a, 0x9f187fd9, 0x37948141, 0x217fcf7f, 0x81818319,
+0xc27f817f, 0x47817f81, 0x756e817f, 0xa28c8cd3, 0x84b77f81, 0x44ef8e4f, 0x7f7f7f7f, 0x932e2c45,
+0x680d20a8, 0x817f1122, 0x52128a21, 0x2b8136ff, 0x0b7f817f, 0xda8c817f, 0x24c17f7f, 0x3dcb39b7,
+0x81d18181, 0x81fe1a81, 0x0590978b, 0x71b20319, 0x7f65bfb4, 0x4fb42290, 0xde1a7fd2, 0xc8bd77d3,
+0x917f32a0, 0xb03fd9db, 0x4c1e872b, 0x814f5481, 0x44817f7d, 0x398a887f, 0x4a19e8ac, 0x4d818189,
+0xc3818181, 0x7f8198dc, 0xb77f527f, 0x817fe681, 0xa83d818c, 0x420b5e81, 0x6e5adbbf, 0x7abd56c4,
+0x717d7f7f, 0xc7811c0c, 0x767fba89, 0x6d1d5106, 0x5a06df17, 0x86c28181, 0xcdd6888e, 0x0e747f8a,
+0xde81a9c3, 0x45459db8, 0x81114e22, 0xbd7f8197, 0x3c3d85cf, 0x8c815fe8, 0x544d7f64, 0x81986177,
+0x8c20a0b5, 0x7f4b8178, 0x17db6031, 0x7f42df7f, 0xee93cc0f, 0x8ab829b2, 0x29a04e65, 0xfc30035f,
+0xc57b7f7f, 0xe51ef77f, 0x7fd95481, 0x76957f7f, 0x819f945e, 0xc91a187f, 0x81657e40, 0x8192816d,
+0x7339dce0, 0x8bd1c3c7, 0xa2812e1b, 0x81db3d8e, 0x1781da89, 0xd0810000, 0x7fc8c57a, 0x8fc32003,
+0x1c847f81, 0x10dfb271, 0x7ff8cc62, 0x2037d87f, 0x8151501e, 0x17c74c74, 0xb8ab2852, 0x4b7f1c97,
+0x6a811596, 0x46acc3e0, 0x3c81f49e, 0xd6d81a4e, 0x2e815066, 0x46218186, 0x41682bb4, 0xa1ac315d,
+0xb87fdc73, 0x378181ad, 0xe97f979a, 0xccb07981, 0xb10a7f38, 0x03488893, 0xfef78df3, 0x8145467c,
+0x3b67b401, 0xb3cf34bc, 0x81476c00, 0xdd638b81, 0x5e43af81, 0x2bab7f6c, 0x6d7f7f6f, 0x7f657f70,
+0xbc816e77, 0x8981c27f, 0x3e40b685, 0xd93d7256, 0xbd86c8c3, 0x817fdcce, 0x43899081, 0x817f1693,
+0xc0db9a68, 0x7f57a5a8, 0xd14c817f, 0x8c4ab37a, 0x901081dd, 0x292a452b, 0x7f81e85a, 0x189ece81,
+0xc10b9401, 0x81817f69, 0x358b3d7f, 0xa86bb1a9, 0xbd7f7f8a, 0x57fd7fe3, 0x81964506, 0x74295986,
+0xab7fbc8d, 0x817f62e6, 0xab816277, 0x5737ab20, 0x9c7f6f50, 0x7f5781af, 0xe344ef7f, 0x98a695ca,
+0x81d2a20e, 0xa77f6605, 0xc1d00398, 0x177a90d5, 0x7ff6db7f, 0x0221897f, 0x6c8105c6, 0x578981d7,
+0x64ac81c3, 0x1d9381f9, 0x7f706e7f, 0xd5797f81, 0xf9567f81, 0x27ed81d0, 0x496f2d01, 0x48b8973b,
+0xb34c1f32, 0x7f42139a, 0xeac95332, 0xf05f7783, 0x037f8145, 0xa98f3dbd, 0xc5ea24a8, 0x81417fc1,
+0x81655e97, 0xcbc682df, 0xec10815a, 0x817f7fa5, 0xb8b54dff, 0x00003e84, 0xa7ece56d, 0x819d81c2,
+0xd0ee6d81, 0x0e5e4db0, 0xe3818181, 0x7f4a2c13, 0xb13d46ac, 0xc181f081, 0xfe51c8ab, 0xa7747fb6,
+0xc2506129, 0x9d98ce4a, 0x81627f81, 0x816d6481, 0x81576b7f, 0x817f814f, 0xc881cc8b, 0x686d098f,
+0xae7fcb7f, 0x3b39a87f, 0x81888879, 0x47877f30, 0xd9bd9e9e, 0x061a0ee3, 0x7f3631a3, 0xbf5b227f,
+0x7f7f837f, 0xc5d18162, 0xa3405318, 0xc3f27fac, 0x7f43c6f6, 0x927f7fad, 0x817f7fdd, 0x7f813b8b,
+0x7fe59a65, 0x81fd87d7, 0xda54547f, 0x814d114a, 0x7e7f813d, 0xf47f077f, 0xa0869981, 0x38db7fea,
+0xf61986b0, 0x7f9146cb, 0x81db9bb7, 0xfc3b8177, 0xbd7ff9a1, 0x7f33c5aa, 0x74335381, 0x815ca42b,
+0x7f814a81, 0x49977342, 0xab9ed775, 0x9874db81, 0x6381227f, 0x1b81b27f, 0x817f3716, 0xd8e6bf07,
+0x81030494, 0x56cedf81, 0x7f607f9a, 0x7f7f4f52, 0x3cb978f2, 0xd57fca81, 0xd7bc5c81, 0x3a2265b7,
+0x7f7f98bf, 0xac068158, 0xa8851581, 0x1db9a67f, 0x7f24d44f, 0x57a34093, 0x7529b981, 0x50a3817f,
+0x4c3f6381, 0xbd7f81ad, 0x2281a481, 0x1d2e7f81, 0x77b85621, 0x3f818105, 0x307f783f, 0x7f7fe0b2,
+0x4d4a297f, 0x8d7f4d7f, 0x88818a7f, 0x129cf8b7, 0x9b817f7f, 0x7fe5ce65, 0xa48e7213, 0xf481a737,
+0xb43d4fd4, 0x7fb182c3, 0xa18181ba, 0x077f8127, 0x4f4b468a, 0xe1bea062, 0xc34d0000, 0xc0f381ba,
+0x8181817f, 0xbb2d9e7f, 0x7f216b7f, 0x868181ad, 0x7f7fcdf6, 0x810b3d55, 0x627f4c43, 0x81903e81,
+0x54ea9a4d, 0x3181ae1e, 0x7f8116d0, 0x74b5c181, 0x607b431b, 0x5552d081, 0x7f818122, 0xe9449f4c,
+0x6fb434b5, 0x1f1b9552, 0x54d7b178, 0x8b1b9e33, 0x4143cb33, 0x13b0817f, 0x7f51317f, 0x81247f65,
+0xa6fd7b18, 0xb1d6550e, 0xaa6481c1, 0x81421f41, 0x50c0813c, 0x95ac7eec, 0x442a7981, 0xc39289ea,
+0x667fcc5d, 0xf681817f, 0xd729c236, 0x5342819b, 0x813f457f, 0x819d2481, 0x817facd0, 0x7ff2dd9a,
+0x81cafb81, 0x26c72930, 0x177ba325, 0x81015e07, 0xc5667f7f, 0x81fa497f, 0x7cb3505e, 0x85e3815d,
+0x67596b53, 0x857f6696, 0xc97f7f66, 0x7a813978, 0x5f7f7f81, 0x043d9d81, 0x8c81a6a6, 0x0e8197f1,
+0xd17664d1, 0x9f81814d, 0x7f4c4f3e, 0x5e7f756a, 0x8181815a, 0xe090817f, 0x81377f81, 0xaba9813d,
+0x8985c047, 0x2106c97f, 0xc846f2b2, 0xb19a83ce, 0x64107f16, 0x7f792fde, 0x87516244, 0x9c81673e,
+0x2f5c8187, 0x555a8181, 0x95d9f26a, 0x70408e9b, 0x7f81b57f, 0x8f0de19b, 0xd37f7a6e, 0x9ba9fa7f,
+0x7f0b6568, 0x1a4f7f14, 0x3b4f5b71, 0x814e377f, 0x8125a7ec, 0x1d817f43, 0x811128dd, 0x66816435,
+0x9de99e3e, 0x81b27f48, 0x7fc3783f, 0x69983781, 0x457a817f, 0xd4296caa, 0x0000b659, 0x723a92a9,
+0x81860888, 0x8c81c44c, 0x7489b15c, 0x817310ed, 0x74a48e9c, 0xc6531a73, 0x7f7f64d0, 0x502d7fb9,
+0x437f9f7f, 0xb1bfa444, 0x81df09f0, 0x0946ab9b, 0x7f21ca81, 0x9d818166, 0x81e8d281, 0x917f6791,
+0x36ac811b, 0x9d817347, 0x344c02a7, 0xab36817f, 0x81818152, 0x32013a89, 0xd7ab3008, 0x6f608181,
+0x817f7f7f, 0x153e0881, 0x81dfcaba, 0x6c5e577f, 0x2f7f6524, 0x817aa07c, 0xdd7f0175, 0x8b4b7fec,
+0xbc81ead5, 0xae8b205d, 0xc9a9743e, 0xf33f14cf, 0x49ca7f58, 0x507f6976, 0x8cbe66d4, 0x9778a724,
+0x6d8147e3, 0x81334092, 0x7f4c0101, 0x82d5ea73, 0xb59c6657, 0x1ab842f8, 0xbc1d8581, 0x4004d917,
+0xe5eaaee2, 0x7fa9813b, 0x817fae58, 0xd77f1281, 0x10732f61, 0x836d137f, 0x7fbccb3b, 0x1a3a81d7,
+0x81047fef, 0x790aa87f, 0xc19eaac6, 0x60667f07, 0xb7f1a97f, 0x39f39e7c, 0x057f7f7f, 0xaa7f81c8,
+0x79bef7a0, 0xf3d3134d, 0x720f7fbd, 0xb4a5b69e, 0x716c817f, 0x9a817fdf, 0x347f817f, 0x4b995cb4,
+0x876a4f7e, 0x8d92687f, 0x7f9e8181, 0x8172817f, 0x8ea21cc3, 0x61bd96b0, 0x81fba378, 0x6db77f7f,
+0xdf307181, 0xda0f81c4, 0xa8e36d65, 0x70a7a559, 0x0be0f27f, 0xb56f4971, 0x35813a6d, 0x188130b5,
+0x517ff881, 0xec54b6ff, 0x8183817f, 0x20ec2381, 0x3e7f9613, 0x878af66d, 0x81657f81, 0x7fb80000,
+0x7cb38d5b, 0x817f7f45, 0x7f6352c1, 0x3881727e, 0x637f5ec2, 0x81437fc6, 0x7f7f90d2, 0xb1558699,
+0x2b81904a, 0x7f84ad54, 0xedab2281, 0xbe9cbd81, 0x7f5001d1, 0xea2a206c, 0x813cf1ab, 0x81658221,
+0x81a6817f, 0x8167929a, 0x726dd87f, 0x817f817f, 0x93b45a87, 0x7fa179e2, 0x7f4c7f81, 0x8157817f,
+0xa2fd7e81, 0x44284a58, 0x3b98c224, 0x45008124, 0x16da978d, 0xb772ba81, 0x7a4f3481, 0xe97fb9c8,
+0x7f49529e, 0xaa02580d, 0xa254adb3, 0xac0a817f, 0xdc532c32, 0x7fe6887f, 0x7f313d7d, 0x61d17f11,
+0x81e1bf02, 0x7f62c97f, 0x348d5d50, 0x12ec7f7f, 0x8f7f2884, 0x7f3f371e, 0x021c8e81, 0x65a07fc7,
+0x5e14b632, 0x817ff481, 0x6681de78, 0xba4f971c, 0x65c07f81, 0xac81d181, 0x7f11b17f, 0xdcc1011d,
+0x7f7f23c1, 0xcc819b6a, 0x81a73d06, 0x5e943954, 0x09c8b87f, 0x43405fe8, 0x817f8d58, 0x550f4d1d,
+0x82c27254, 0xd0408532, 0x4460477f, 0x8b7f9cec, 0x7fc9817f, 0x8181165f, 0x28d8817a, 0x0527752f,
+0x8168813f, 0x7f48cc81, 0x7481c91e, 0x6881c481, 0x4b0a7f55, 0xcc81e624, 0xe57fac7f, 0x6f0f81a0,
+0x417b5a81, 0x9381dd8c, 0xa881c4e5, 0x8c505264, 0x695ab14c, 0xecc99781, 0x3981c57f, 0x0c204553,
+0x4a77d59c, 0xa2301c81, 0x5ba47f00, 0x451eb2f0, 0xb0459baf, 0x934bcc63, 0x5652c0a1, 0xb381b193,
+0x68626b5b, 0x361741d5, 0x7fe281b3, 0x70ad7f7f, 0x8161f792, 0xbfda5b21, 0xed97ad84, 0x7f59cb7f,
+0x8ed1ecf7, 0x75d784ac, 0x0f81b07f, 0x3ed5bb6b, 0x819122a9, 0x2257a06b, 0x81687fb1, 0xc008057f,
+0x97812981, 0xcb2081cd, 0xd9bb6681, 0x407f7f7f, 0xc27f814e, 0x23a2527f, 0x767203ad, 0xed17ee7f,
+0xd4b89558, 0x8195bc81, 0x817f7fe0, 0xe4b7997f, 0x081ca76d, 0xb87f817f, 0x006350bb, 0x812739de,
+0x557f811e, 0x8d4387af, 0x98815781, 0x81818148, 0xb1bc8105, 0x04cf273e, 0x3f2b289b, 0xae96437e,
+0x819281b9, 0x95cf7f82, 0x7bf77f81, 0xab7f0441, 0x81652854, 0x34de0d81, 0xac816081, 0x43b12e45,
+0x91b75bdd, 0x7da27794, 0x9bbb727f, 0x301d812e, 0x7fa88120, 0x27a15075, 0x27c82cc1, 0xef1a813a,
+0x8114c4d2, 0x817b7f2b, 0xff877f47, 0x7fd47fcd, 0xd08ceb35, 0xe07f4386, 0x67b3a47d, 0xad7fd64d,
+0x817f7044, 0x537feabb, 0x8d3f36ec, 0x6c3883ce, 0x43b6909d, 0x81bcd181, 0xfa7fdae9, 0x3c7f5440,
+0x3946347f, 0x7f22ac7f, 0x815dea81, 0xb6c6c8a1, 0xd57f81c9, 0x526fd581, 0xce3c3115, 0x3d41c54f,
+0x5c5ac2b0, 0x7c948586, 0xa47f7f96, 0x8b7fc351, 0x309aadf9, 0xa0cfef81, 0xb081acd7, 0xd7413cb2,
+0x7f4a6cc9, 0x5c3bb39f, 0x7fc7e581, 0x68cc8114, 0x0926a5fa, 0x82c37581, 0x8181e642, 0x57ab2ec6,
+0x497f0000, 0x5fa07f9c, 0xa17fc405, 0xeaa0de7f, 0xa14a7fb3, 0xc4c12a81, 0xa3527d15, 0xa854d4d4,
+0xf781a3db, 0xa07f26ae, 0x812c7f74, 0x1f7f2fc4, 0x3a9381d4, 0x17b6005d, 0xd414ee60, 0x18b49d68,
+0x9cc77f9e, 0x30aa7f91, 0x7fb9207f, 0x64158b40, 0x81423c7f, 0xea2fa9cc, 0x7fa83381, 0xca647f8e,
+0xd6b9657f, 0x81b26a7a, 0x4fb7b2f1, 0x1b7f9eda, 0xfb8181ab, 0x7f2557d7, 0x71e8701c, 0xa2817f81,
+0x81817f9e, 0x04139d39, 0xc9f6533f, 0xa1f74db1, 0x814042db, 0xca6e8114, 0x7fb3f64a, 0x03f981a0,
+0x7f9e0755, 0x7f7ba781, 0xd554d19a, 0xd81a5661, 0x7fb33f3a, 0x8f81c8bf, 0x012ff975, 0xb1a576ee,
+0x81be6681, 0x818db381, 0xbd7f8151, 0x81b281ae, 0xddd2fbdc, 0x81bfa81c, 0xce818181, 0x774c9c28,
+0x7fd05d59, 0x811a54ce, 0x2f7f727f, 0xbe816b81, 0x7fb03af2, 0xc44bfc7f, 0xcf6ba181, 0x14be8161,
+0x6715a197, 0x76386d26, 0x07835f81, 0x817c818d, 0x81693281, 0x818118fa, 0x777c9e62, 0xccad6446,
+0x769c5b8b, 0xdbd781b7, 0xe4b063f9, 0x2b568182, 0x8c7f8187, 0x7f7f677f, 0x6b7fa675, 0x6be85f61,
+0xc36f7d32, 0x7f81005f, 0xd0d0997f, 0x3a818181, 0x7fa7b6b7, 0xc39ef17f, 0xcc7f437f, 0x3d51729d,
+0x3eb72475, 0x1b069d81, 0x7fe8abee, 0x604c7df9, 0x7fb47f81, 0x3a263b75, 0x75a488e6, 0x7f68d47f,
+0x00004e97, 0x7ffb0973, 0x7f53a009, 0x7f014834, 0x6f81022c, 0xf0b1405f, 0x06c84ca5, 0x4bcf7fc9,
+0x4388ec9a, 0xf528fa74, 0x8e3b816f, 0x7f7f4c72, 0x817f5224, 0xbd998181, 0x81b181c6, 0xbd817f88,
+0x1763f57f, 0x7a796e44, 0xe98e81a9, 0x5360a2fa, 0x97b39081, 0x707f1b65, 0x81c67f7f, 0x4e461c7f,
+0x817f231b, 0xc2d08181, 0x366081c6, 0xd2e67f90, 0x13f1b87f, 0xd781819f, 0x7f347fa2, 0xb1147fc5,
+0xe6b1fe4e, 0x3a1a96b2, 0x177f8191, 0x3e3d96fe, 0xd0d261a0, 0xb25e7fc9, 0x472b81ce, 0xbad6817f,
+0x707b45ca, 0x81817f81, 0xc48fb7f8, 0x90373f82, 0xa681e0b5, 0x71419190, 0x6978f77f, 0xf4463125,
+0x48f1937f, 0x4a45c611, 0x7f97e348, 0xf07f1e21, 0x81fbfc60, 0x8783a281, 0x00931cdd, 0x81e70f91,
+0xe24e7f7f, 0x7f61817f, 0x81967c47, 0x6d9bc90e, 0xec58817f, 0x8157357f, 0x7f7fc674, 0x7f7e3c81,
+0xaa8b7fbc, 0x7f3ba469, 0x7a22f57f, 0x7f7f7f27, 0x81c89e7f, 0x8145d9c8, 0x012f737f, 0xe91f15cc,
+0x9b9cb749, 0x818142cd, 0x290a7f4a, 0x89cfc4b1, 0x8337ebe2, 0xd8224205, 0x8181a49e, 0x58b04ea1,
+0x7fea95a7, 0x861681fb, 0xcea37f5f, 0x81309c8f, 0x6ded8145, 0xbd5be86e, 0xc2ce9402, 0x557f57be,
+0x4ef830e9, 0x7f81f97f, 0xf804367a, 0x5de6817f, 0x156de096, 0x2c06df0c, 0x98b170d7, 0x817f81a9,
+0x24597fd4, 0x81810000, 0x0e24a66a, 0x819e5681, 0x6f73e481, 0x054b532a, 0xc0814026, 0xb3a36181,
+0x6da37f9a, 0x7f83574f, 0xe0a381c6, 0x2075bf7f, 0x28438181, 0xb1cd7f74, 0x7ffb497f, 0xd17f337f,
+0x46085c73, 0xd57f5681, 0x704b7ff3, 0x449e3f7f, 0xeb6e09d6, 0xb37f4b02, 0x817f1cb9, 0x672edf7f,
+0xd0c2c25b, 0x5ed0885b, 0x4676876b, 0x2bd8d970, 0x9a7fbbc6, 0x81095581, 0x81b27f6b, 0x753c7f7f,
+0x7f812c7f, 0x7f477fbe, 0xdaa67f7e, 0x7f099859, 0x7b0fea70, 0x816b8186, 0xf0677f5c, 0x16455d9b,
+0xa98170a9, 0xa181529b, 0x7f817f67, 0x8f72c351, 0x2a7f7f7f, 0x2f5f347f, 0x695c7f79, 0x282fbbe0,
+0xb78581f0, 0x817f297f, 0x49818c81, 0x817fc78c, 0x3bd68181, 0x86d7deda, 0x7fa0d208, 0x1c7f3481,
+0xf5be9a57, 0xb9ded881, 0x8ab88c81, 0x647ae23a, 0xb5571142, 0xe09d7fc1, 0xa5d9817f, 0xaca07f7f,
+0x7f4d5286, 0xa57fde7f, 0x94d0917f, 0xb160442b, 0xacf47b2b, 0xd17f7f81, 0x2e37f976, 0xbe7f5d66,
+0xc2815f28, 0x5c81487f, 0xc2026481, 0x81a194dd, 0xba7f2c91, 0xb7357f37, 0x81b25698, 0xd2818154,
+0x45b78130, 0xc6bead58, 0x6f5a5656, 0xb181a6ae, 0x07c9a153, 0x75d681d3, 0x8c92817f, 0x81503520,
+0xa66f812f, 0xa59b7a81, 0x17a09164, 0x17087fb2, 0xd0bb319b, 0x6d8153ae, 0x8af1a67f, 0x651c46b0,
+0x4b637f1b, 0x00002981, 0x2c7f437f, 0xd9deb6ab, 0x9fba847f, 0x6f289b21, 0x015f811c, 0x8176f498,
+0xeac003ae, 0x68818192, 0xb37fa6a6, 0xd07f4aa5, 0x83819472, 0xa5da41a0, 0xbc7fbe7f, 0xa67fccd9,
+0x7f0c86c5, 0x116e7f15, 0x7f67b45a, 0x907fcf81, 0x81812c66, 0x05747283, 0xac962b7f, 0x2a7f81ea,
+0x81c2b57f, 0xb1b4b868, 0x7f811ed1, 0x7f66c937, 0xc881e6ab, 0x740323fd, 0xcf765745, 0x3592d370,
+0x5c81f3e5, 0x527f3efc, 0x818781d9, 0xf6819b53, 0x1c547f51, 0x4e29b37f, 0x36f1537f, 0xcb819758,
+0x538f5b7f, 0xa5172c22, 0x52427f7f, 0x6b7f8128, 0x7186134a, 0xf67f1b62, 0x6081e015, 0x829abe81,
+0xaa8a65fc, 0xd27fb70f, 0x7f0a0881, 0x3d7fc998, 0x443e78b0, 0x9443c354, 0x81266724, 0x9b81812b,
+0x813181a0, 0x7fddca68, 0x7f260f81, 0x7a818181, 0x812681ca, 0xf48152e0, 0x817f8388, 0x81306c7f,
+0x7f71d020, 0x81bd8481, 0x5234d5c2, 0x31be8136, 0x3403d432, 0x11286844, 0x81a57f1e, 0xe3b57fd4,
+0x7aa88f52, 0xabc64c52, 0x977f8181, 0x7fe68ca6, 0x5dd6eac3, 0xd26a81a9, 0x017f8183, 0x902cc266,
+0x7fba7f52, 0x617f81cf, 0x7f657ff0, 0x045d7f63, 0x3c04bb4e, 0xaa4e7f81, 0x847fe385, 0x67817f7f,
+0x42f33d28, 0x61a5aa5a, 0x817f7f1e, 0xa3a715d7, 0x5da91c3a, 0x7f9f7f81, 0xc48281ce, 0x813081fa,
+0x7f817e53, 0x6d227dbf, 0x6e7b0000, 0xf49b4091, 0xf6b3d8e9, 0x89fb29ae, 0x44289881, 0x3cc78181,
+0xe58103b9, 0x535bffd3, 0x817fa81b, 0x469f82f0, 0x57837fe1, 0xe14af07f, 0x54bf19b2, 0x7482595d,
+0x6781bbd2, 0x4b7b7fb7, 0xa4c45060, 0x380155c8, 0xb72aa381, 0x4530847f, 0xcb509581, 0x34818181,
+0xb03cd87f, 0x90818122, 0x812a8181, 0x817f677f, 0x2c2a597f, 0x35f0c581, 0x81b11261, 0x4d678120,
+0x816d7f5b, 0x448166d8, 0x0c567f61, 0x81189e21, 0xcf932f08, 0x4c5a917f, 0x687f9b7f, 0x257f81d2,
+0xcc7f6220, 0x8c947f81, 0x24642d1f, 0x728155d3, 0x14ca75d2, 0x817d7f38, 0x7f818181, 0x7fb31f87,
+0x13a581b3, 0xd9b5215f, 0xa8afdbf5, 0x147fe67f, 0x81caab97, 0xa96daa7f, 0xf88b897f, 0xe7937fc2,
+0x5a8b7f4b, 0x7d59277f, 0x8d687f9d, 0x7f924b81, 0xd41c82c0, 0xa5119c7f, 0x4fcfeba1, 0x3e077f81,
+0xaf65994c, 0x83d481a7, 0x81257fb0, 0xa481387f, 0x36d8a463, 0x8181811a, 0xc75643a7, 0x474c6c1a,
+0x7fc0e8cf, 0x8cdc896e, 0x55830655, 0x7f81337f, 0x7f65db1d, 0xa881cd81, 0x7f812ec8, 0x7f76cdd4,
+0x634a443d, 0xea7f3ed9, 0x90a8d093, 0xb05e819c, 0x7f7f5c4e, 0x811e5b81, 0x73a1b989, 0x8181717f,
+0x927d7f09, 0xdeaf7fca, 0x528e302a, 0x81654681, 0x7fe92e81, 0x87d58110, 0x1ca86e20, 0x7291a4cc,
+0xc76c7f81, 0x577f5136, 0x00005b6e, 0x817fe37f, 0xf7ac7f7f, 0xf258ee81, 0x81814c63, 0x2a97f57f,
+0x5f7ca0a7, 0x7f2fa37f, 0x94360c7f, 0x7f23117d, 0x81343a7f, 0x55b0d9de, 0xbae0189e, 0xc77f228a,
+0x817f7861, 0xa57f7fac, 0x878ede24, 0xb57fad5a, 0x5c907f93, 0x814e9b81, 0x7febf59c, 0x7724e1a9,
+0x569d9281, 0x1b644699, 0x4d4e26bf, 0x397f7f6e, 0x8ac93027, 0xe6885974, 0x51a2c1a4, 0x7f7f5b7f,
+0xaad8ac97, 0x122c8186, 0x97810a5b, 0x818581e2, 0xc77f7f35, 0x67611c60, 0x3f8178ea, 0xa47fa65e,
+0x888f8132, 0x81817cd4, 0x1081b4bd, 0x817f7fb8, 0x449a3fcc, 0x7f9832de, 0x51e96f58, 0xcbde52c0,
+0x6b515783, 0xbda3b297, 0xd64b6381, 0x7f693c81, 0x6d311abb, 0x73468e81, 0xde81817f, 0x4f811581,
+0x90d6817f, 0x152c9d49, 0xad7f17d7, 0x7f834681, 0xa47fe6dc, 0x06817fb3, 0x7f817f69, 0xd18190b1,
+0xca7f8183, 0x1881f381, 0xac50814a, 0x7f8dca7a, 0x71358131, 0x28783add, 0xd31b153b, 0x71919b97,
+0x81877f07, 0x707fca4f, 0x57c6814b, 0xc0a17fbf, 0xff5bb344, 0x4a7fa99d, 0x7434c104, 0xaa7ff0a5,
+0xb6ec6841, 0xb4917e1b, 0x5a81ab94, 0x81a3aab1, 0x257fce30, 0xe26b7fc7, 0x1d7a7fe5, 0xfc7ca6c0,
+0x81437ff7, 0x7dc59e14, 0x7f0cf9e9, 0x90e37f7f, 0x7f6f12bc, 0x52d36381, 0xc15f88ac, 0x3536277f,
+0x8d3e2a7f, 0xa18d7a7f, 0x48907f23, 0x7f810000, 0xfd813357, 0x6ed4520b, 0x843907e2, 0x66814378,
+0x7f7f6bd8, 0x0e3381ad, 0x03847f90, 0x05814769, 0x8181c181, 0x1c7f7fac, 0xe83cbfb5, 0xb15c735c,
+0x7f8f7f7f, 0x7f846b81, 0x81d2eb7e, 0xf7447f41, 0xa2b57f81, 0x2ba7f381, 0x817f75bc, 0x78553181,
+0xc7b07f7f, 0x7f557a41, 0x46a83ddb, 0x9b447d13, 0xccfe7b81, 0x7f3ae281, 0x75464bfb, 0x7f4e5560,
+0x7fad14ce, 0x98fd2a7f, 0x7f637f81, 0x4a22cf5c, 0xca81813e, 0x4ffc7347, 0x9d777fa5, 0x810585ff,
+0x71e57f81, 0x3f7f8181, 0xfce8819c, 0xbb315486, 0xf1e5a974, 0xf1e061dc, 0xcd7f8181, 0x8181817f,
+0x547fb97f, 0xa663c341, 0x9e5aab7f, 0x35768dad, 0x817f3b48, 0x46bc815f, 0x707f89b3, 0x860713c8,
+0x4a7c6532, 0x541084e8, 0x5f77bb7f, 0x299dbebe, 0xd3f27f8c, 0x25096727, 0x81194cba, 0x7ceff409,
+0x9c8c267f, 0x7f7b1fe0, 0x635c0fdd, 0x308132ac, 0x57330951, 0x7c257f81, 0x6266dd49, 0x7f89b481,
+0x817f7fae, 0x7f81aebd, 0xc7fb7f7f, 0x4537e27f, 0x1d288b7f, 0x7f4a9b7f, 0x8150969f, 0xf533f121,
+0x7f782217, 0xe428816e, 0x10ca6981, 0x6981b39f, 0xe35fdb7f, 0xc6814220, 0x79ba8158, 0x811e815d,
+0x772d8184, 0xc784810c, 0xa91abf8f, 0x3c7f6f7f, 0x6d9981bf, 0x9b42dbe9, 0x42948181, 0x5681817f,
+0xdba39bf2, 0xf77f7f37, 0x18eebbdb, 0x39c6b7b7, 0xdc2fb5b9, 0x99d00a7f, 0x02dbe492, 0x4481606e,
+0x7f6f81a1, 0x7f2f557f, 0x2aee7f6d, 0xed2ad2ae, 0x6ce4ec0c, 0xdb4cae3a, 0x81646c83, 0x7fa3c1be,
+0x7f383730, 0x81aeba9d, 0xc0528bdc, 0xd17f3982, 0x7fa47c7d, 0x81814e81, 0x813d81bc, 0x20816349,
+0x857fdfe9, 0xe46aa581, 0xe8de53b5, 0x34471a81, 0x3b4f7feb, 0x8174c981, 0x2fc7c735, 0xc818ec81,
+0xb28ed3df, 0x627f1100, 0x7f40067f, 0x7fd37fa8, 0x637c5b81, 0x113dd969, 0x81376881, 0x5d817f67,
+0x7f7fda7f, 0x7f7f7f70, 0x77577fb4, 0x87bb3d7f, 0x5b817f93, 0x839b517f, 0x7f45818e, 0xe47f3381,
+0x93c2b281, 0x944207f3, 0x36dc77b3, 0x50b86e4a, 0x8181e27f, 0x0b81a79b, 0xf9975950, 0x977b9e47,
+0xea7f8181, 0x5547b549, 0x3da47f0d, 0x474b8e72, 0x7f7f9ce1, 0x7f2e2e42, 0x6f7f45c9, 0x08d56ede,
+0x8524810b, 0x657fd15f, 0x7f8d0c14, 0x6c7fff7b, 0xd530f39d, 0x7fb4b023, 0x651d8a1b, 0x13817024,
+0x7f850774, 0x1f219db3, 0xd8817f52, 0xd95c0feb, 0x9ef0e67f, 0xd9dd4c7f, 0x64b0819a, 0xbc60e997,
+0x412fd0db, 0x817fa57f, 0xa3a47f81, 0x4d81815b, 0x81874e3a, 0x5f1e7c81, 0x7fc63b81, 0xbf817581,
+0xb5b0a5ab, 0xde2c8145, 0x81789629, 0x097481b8, 0xad5bdca3, 0x212d81e5, 0x4d817f3c, 0x70813761,
+0x5d6d0f40, 0x81a190eb, 0xc942c14d, 0x81b462d8, 0x81890000, 0x6586fbcf, 0x81cb3322, 0xa5c0b931,
+0x7f78e98d, 0x2492625b, 0xaf81b981, 0x4e59817f, 0x677fb9d3, 0x2c3b817f, 0x920e5981, 0x7f2c7f5d,
+0xc379d081, 0xdfbddf43, 0x85818172, 0x6781e442, 0xda9a7f14, 0xcac70881, 0x98818113, 0x0f817f6f,
+0xeba852c8, 0x7fa6407a, 0x7fdc8188, 0x819fd349, 0xee738137, 0xe881417f, 0xb6777fea, 0xc6947f73,
+0x8181240f, 0xa035c264, 0x7f419e81, 0x193f815f, 0xaedc812e, 0x7fb97f24, 0x924a81e6, 0x63f58183,
+0x3e4ec808, 0x7f4fbe86, 0x5faba653, 0xf0c4f4d7, 0x69cf647f, 0xfb55c681, 0x7fe1e381, 0x7f817f6b,
+0xc0410481, 0xc65d71b6, 0xba1f8135, 0x997f04cf, 0x49db5b1c, 0xbe817443, 0x817b812c, 0x36e18163,
+0xd1893a47, 0x231df6f2, 0x816d045e, 0x3a568139, 0x81628181, 0x6588ffd2, 0x23603081, 0xf5b4817d,
+0x8112b5bc, 0x7f9cfbae, 0xc02b8d40, 0xc7f20532, 0xce81487f, 0x0b1a6614, 0x7f17392f, 0x7f8f818c,
+0x7f7f8388, 0x5492467a, 0xbf5e3081, 0x777f26bb, 0xae9c4a7f, 0xae81ba0e, 0x6c6281ae, 0x81238143,
+0x7f997ffe, 0x798b949c, 0xc3d67f36, 0x38817ff9, 0x2f814ab6, 0x594bad4b, 0x7f6f7fd6, 0xab234181,
+0xaf818581, 0x77865381, 0x9d7f8173, 0x27ac8181, 0xbdbd8daf, 0x7c7f7f7f, 0xc6817fd9, 0x34412181,
+0x1fe7eb78, 0x7f38f49b, 0x81040b8f, 0x34ec8581, 0x0000907f, 0xd5818181, 0x7865c6d5, 0xada81975,
+0x0447e955, 0x7f3d09b3, 0x2b8156e8, 0x976d81dd, 0x29cee57f, 0xa34f537f, 0xe9a981f6, 0x0c7fe841,
+0x7f7eda7f, 0xd77f1881, 0x3f929481, 0x77e3c852, 0x0d817fc1, 0x7f4795ab, 0xd1337f4f, 0xdd4b077f,
+0x97a4b3d8, 0x607ff155, 0x942b16c3, 0x113a723c, 0x52ce7882, 0x5e81f5e9, 0x81755669, 0x7fea4d5f,
+0x5d6d7f81, 0x7f90e710, 0xe3817f6d, 0xe810299c, 0x4e814937, 0x3c269d81, 0x33a891aa, 0x81dd7ff3,
+0xa9d1db0d, 0xfb2181eb, 0x7f817fa4, 0x75ec817c, 0xf17f8195, 0x81e18181, 0x44321c58, 0x35a62913,
+0xcd5b0875, 0x397f8233, 0x797f7f81, 0x7f829cbc, 0x9a697f81, 0x07b7fea9, 0x7d2b0b7f, 0x8f984dce,
+0x0c13b97f, 0xe9203f3c, 0xea473769, 0x7b7fdc81, 0x7f7885c1, 0xde7f5b39, 0x32bd81f1, 0x67b4917f,
+0x443069cb, 0x6e972b83, 0xbc66829f, 0x7492817f, 0x7f4d7481, 0x7f25896f, 0x9ea0da7f, 0xceba7f81,
+0x7f45d87f, 0x93dbf04c, 0x50814197, 0xb7167f81, 0x6d7fad19, 0x0f81bea5, 0xc57f8177, 0xca4c7f7f,
+0x7d84e3f6, 0x7bc443d4, 0xd7b0ae81, 0xc8731f44, 0x06e481ac, 0x85b08181, 0x53cfe4be, 0x73761a3d,
+0x707f9981, 0xa87ff27f, 0x68b5b87f, 0xb561c081, 0x7f660001, 0x7b81521e, 0x6093614b, 0xbc778748,
+0x64400f81, 0x81d13281, 0x60475381, 0xbe768181, 0x21aad606, 0x81910000, 0x20b6a511, 0x5953d899,
+0xdd7f7fba, 0x8181fabe, 0x81b8e77f, 0x817ff17f, 0x6757257f, 0x9c2cc933, 0x51813593, 0x7f7fadef,
+0x8ec47f43, 0x348139d7, 0x7f5c82b7, 0x664419bc, 0x8d7b15a5, 0x8181a581, 0xd54d7f63, 0x7f0381b7,
+0x81a17f68, 0x814a5e0d, 0x17813dc4, 0xbd94872d, 0xa25a8101, 0xa6810e54, 0xd8568fc4, 0x7f7f3a7f,
+0x818478ba, 0x517f81c7, 0x70443b81, 0x7ce481b6, 0x7f817f7f, 0x81b27fe5, 0x4bc67f32, 0x81bbe681,
+0xa1c8fe81, 0x857f1c0d, 0x7f81793a, 0x7fbfcf74, 0xadd47f5c, 0x7f8138b4, 0x6f63cd25, 0xd70e497f,
+0x81aa7fc7, 0xbaa84f5a, 0x8197ccd3, 0x7f556b7e, 0xca815dff, 0xb9ba7f7f, 0x81747f7f, 0x7fdf967f,
+0x65a98181, 0x81bc8181, 0x8e11bab7, 0xbbbf7ffd, 0x12b8457f, 0x59fe8481, 0xf25cdd81, 0x958f840c,
+0x5e6a5f81, 0xe9008181, 0x7a68cb38, 0x76818914, 0xa3857fc7, 0xce98717f, 0x8126504d, 0x7fa48183,
+0xfa60e14a, 0x7fb77f22, 0x5f3a7f28, 0x24910bde, 0x2fd07f4f, 0x64a6810b, 0x818181a4, 0x7a065fb8,
+0x7fe69795, 0x45815e3b, 0x7fa98741, 0x52c581b8, 0x74bb2a22, 0x817a8166, 0xc2cb6581, 0x493c7f7f,
+0x883e816b, 0x3c7f7f7f, 0x7f3381c1, 0xab767f6d, 0x81b4588c, 0xb2e24cb9, 0xd9bccba1, 0x70e0162c,
+0xea1c810a, 0x907fdd49, 0x424aece8, 0xd90423b0, 0x22df1b28, 0x6b005d25, 0x113e61b3, 0xa2103bbe,
+0x20d6c538, 0x17e6c841, 0xdc08e2c4, 0x2002b1ef, 0xc51a77e3, 0x8111f752, 0xea698110, 0x1849b6e8,
+0xe6b006d2, 0xdc40b507, 0xc738b9ec, 0x42290a3f, 0x522355bc, 0x590736d3, 0xd9a340e5, 0xe30a19b5,
+0x3de9eff9, 0x4437cc7f, 0xecfd0a7b, 0x34e4fcc7, 0x4afe1556, 0x1ecb301f, 0x0f511d1d, 0xf8ec40db,
+0x4c07caf9, 0x59f73ebd, 0x45064328, 0x03c27f19, 0xd1661dcb, 0xe4e3ae17, 0x000fcd5b, 0xdd121ec8,
+0x3adb1413, 0x112b9888, 0x1d3624fd, 0xa8a347f6, 0xfeb632d5, 0x1d28fc11, 0x21ac2b5f, 0x371fe9f7,
+0x3d5ef435, 0xe84000a1, 0xaebebcc9, 0x34555ff0, 0x39a9ca2a, 0x580fc0e6, 0xcf7a5699, 0xf5e65242,
+0x07ae3170, 0x06b223ea, 0x638e421b, 0xd8dc0cab, 0x66c44fb0, 0x954117d7, 0x9691a163, 0x415bc8a1,
+0x0868d844, 0x9593f551, 0x364bec3b, 0x57460bfb, 0xdc310e3e, 0xe4dbef28, 0xc6443317, 0x81a02211,
+0x450c2323, 0xefca9dcf, 0xfebf3fbf, 0xafef377c, 0x3f81ac26, 0x13b4e548, 0xf60a05a9, 0x03d4d840,
+0x1970ecd3, 0x95019cfd, 0x8128341d, 0xc4d8d440, 0x16b80a75, 0xc2332b1d, 0x47e90e1b, 0x2e4eadcc,
+0x0aa5f04b, 0x03302c42, 0x1a1ed437, 0x3b623efa, 0xc1cec834, 0x4442d130, 0xdce837f0, 0xc3f90934,
+0x5ea50731, 0x257f1504, 0x2da5defd, 0x22d3dd34, 0xc429fa2b, 0xf2cc0a0b, 0x3e2e0000, 0x020f1c20,
+0x2e962a1f, 0x06edf5f3, 0xceb5dfe4, 0x3b213d57, 0x08c24237, 0xf32ad940, 0x321768b0, 0xb133ebcd,
+0xfca78110, 0xc03001db, 0xfca42f7f, 0x7071645f, 0x30bf8185, 0xd1f6d479, 0x02076839, 0x34cd900d,
+0x500cda81, 0xe542b08e, 0x282afac6, 0x30a835ce, 0xbc710153, 0x574b009c, 0xe8b24309, 0x02d2cbc8,
+0xaae1ea5a, 0x190eff7f, 0xbc656b07, 0xfdc8094e, 0xc0ee4452, 0x2a819969, 0xb79a0a0e, 0xe334a040,
+0x12323acf, 0xd9617f3c, 0x3709af42, 0x8b67b338, 0xff00e313, 0x5046e8de, 0x45ec1107, 0x4e2618c9,
+0xb40a62dd, 0x0da39629, 0x0a62a937, 0xc9d92e1d, 0xcf4daf81, 0x3f26a9a3, 0xd01e3f2b, 0xac294211,
+0xd7c221e7, 0x15c928f2, 0xd79412cb, 0x110fb1fc, 0xddd3fa22, 0x4e7fa8ad, 0xd4a0ebf8, 0x5b144a01,
+0x4cf90bdc, 0xb43366e7, 0x1cb8503e, 0xff152c3a, 0xae04fd5f, 0xd896041e, 0x1c2ca245, 0xd7adf0e3,
+0x36ee2de1, 0xf0350023, 0x2e529db9, 0xba18d1bc, 0xbb2aba00, 0x52c7c4fb, 0x09df6147, 0x1102f72e,
+0x13054b2f, 0xf9f9fced, 0x292a14c9, 0x60095adf, 0x8b43fec1, 0x3ca5a3c9, 0xe73665c7, 0x2f12b049,
+0xb0fadecf, 0xd0e46081, 0x42532cf0, 0xe420e3fe, 0xaf5b0501, 0x27f0b481, 0x871c0ec6, 0xcd1c8cb7,
+0xe4135816, 0x0a2aab7f, 0xbf732d1d, 0x613f1aee, 0xac09b927, 0x7d663532, 0x0000dee4, 0xfdeccc4a,
+0x34b86af1, 0xf0bb16dd, 0x5829085f, 0x51ffdcc2, 0xd50c9f39, 0x64dfc723, 0x2cd52b2b, 0x083e0b2b,
+0x224ae1b5, 0x0fd63733, 0xf081cef7, 0x7f73e3c4, 0x36cca9d4, 0xcc2600ae, 0x0757af1e, 0x21cc2ef7,
+0xf175c93f, 0x59221d2d, 0xe6c7f468, 0xe3ea1fcb, 0xcd3d539a, 0x271e345c, 0x3555341c, 0xd2532b7f,
+0x7fc8c57f, 0x4025dbe5, 0xf6926f3d, 0x264d39db, 0x2ed7c504, 0xde2a554b, 0x25aeb624, 0xd031a1dc,
+0x47dbd918, 0xa0004941, 0x0202e11c, 0xd407fda2, 0xd9bd0fdb, 0x30f94d53, 0xd32b1cc6, 0xa6cb1947,
+0x16fa1b49, 0xc70d1f56, 0xa7daec0d, 0x813d4649, 0x0f520200, 0x0d174704, 0x337f521a, 0x52c2cf34,
+0x6b280539, 0x81da4508, 0x0ef2a91d, 0x07121df9, 0x031e410e, 0xf0180925, 0x055022b0, 0x2156f018,
+0xa2e812c6, 0xfbe05cbb, 0x232e4462, 0xed378bca, 0xcd31afa7, 0x063a2e09, 0xe836ff1b, 0xefdeaf26,
+0xfeef274a, 0xc9a1159d, 0x66db4731, 0x05a50ace, 0x1b4c58fd, 0x6b0ba273, 0x251319b8, 0xc7523041,
+0xa20364b7, 0x34812fc4, 0x055f5d29, 0x5858fefa, 0xf809192d, 0x2f063981, 0xce056e8e, 0x1f7e68cc,
+0xbbc07f8b, 0xcc1c1240, 0xf45c1452, 0xeb1deac8, 0xe3f9d3fe, 0x12bc2a12, 0x515bd8d4, 0xb3b361d6,
+0x3304f1dc, 0x63060dbb, 0x41c8c73d, 0xce985d1d, 0xd2b2544b, 0x6d241fd4, 0xb6b8b1c5, 0x21830000,
+0x15efc4c3, 0x2c430fc1, 0x14400bef, 0x2c33083e, 0xbe04fb07, 0xad42d311, 0x4ed2b731, 0x1627d2d2,
+0x07aa0f34, 0xfe38df20, 0xe35bcd1b, 0xe13bfbcf, 0xcc3df1c9, 0x84355a1c, 0x55e602d2, 0x47c52a81,
+0x16d85858, 0x0d11d8d2, 0x54f4b081, 0x303523ad, 0x2dbc16e3, 0xb9d6f0d7, 0x13f76190, 0xeec0133a,
+0x81eccebc, 0x46202d2b, 0x86b52303, 0xa1f928ba, 0xe421b651, 0xb13cf63f, 0x3136c8e6, 0x4a8128b9,
+0x2d7f231a, 0x262cf3e3, 0x9c35d3f9, 0xe9e6753a, 0xefeff441, 0xd4bcd301, 0x42700bd5, 0x29ab001b,
+0x28bb6207, 0x01f537d1, 0xbc57fd38, 0x4999290e, 0x3e7f04fe, 0x36adb7fa, 0xdb0314ce, 0xa1d6524e,
+0xeee5c994, 0x1d1ce32e, 0xee142708, 0x3bf9e663, 0x06521283, 0xe8c6e6a9, 0x53356ee3, 0xe2cd5d4e,
+0x8e1bdacc, 0x382c18e2, 0xc5e2fe00, 0xcee2eaf8, 0xb7532ece, 0xfbea01bf, 0x3ecf7f2d, 0x06b2ffaf,
+0x5acbfe35, 0x26c8eefc, 0x8e27d35b, 0x84291501, 0x32f82ffa, 0x9f3a10cc, 0xabcff198, 0x139a3da9,
+0x3bd92af7, 0x264febe8, 0xbf48f81b, 0xa1df1aa2, 0x2c0a10ca, 0x04c00073, 0x5681e760, 0x411054e5,
+0x88d8c5a0, 0xe3f6b6fd, 0x2a3030ea, 0x0f0ec8b9, 0xa827cbe8, 0x2a2c7f49, 0x51ef20d2, 0xa48c4a81,
+0x2b8db1d9, 0xd150813e, 0xd7aab201, 0x36061afd, 0xeed80a8d, 0xcd9932d0, 0xedd3edbe, 0x38f4f017,
+0x9853f907, 0xc964f464, 0xc68d40f0, 0x3f5bcc44, 0x975004ec, 0x102d467f, 0xf3f9451d, 0xcde706bb,
+0xc87fa602, 0x3f5a88b0, 0x8119c002, 0x3cc14ac8, 0x607abeb7, 0xf95924cd, 0xfb1afa2c, 0xc4967f36,
+0xf425dacb, 0xa32d09aa, 0x35106590, 0xad4e5b4d, 0xa1fda650, 0x0326e143, 0xfbf054e0, 0x589fe1bc,
+0xc83249b4, 0xb0b81e1b, 0x7081fec8, 0x27530027, 0x84dc89ca, 0x76551d3a, 0x0bbcc3d9, 0x8108bade,
+0xb534f6cc, 0x87833920, 0x07ec1b0e, 0x5fd30b29, 0xf124dd07, 0xc981b03d, 0x71b9417e, 0x476c76b6,
+0x7fcb3662, 0xaec632e9, 0x2e6705dd, 0xaec5acf9, 0xc84cd6de, 0x5312d428, 0x12fc4738, 0xd31d5b1e,
+0x61edd156, 0x60fdfddb, 0xff676a0c, 0x2a3baad6, 0x0d1f6eab, 0x0001dcce, 0x180a31ca, 0x1ba681ec,
+0x90871091, 0xe2ef01f5, 0x35186658, 0x95cac76a, 0x1ae32a09, 0xebedc8df, 0x0da7c007, 0x274fc73f,
+0xd62a1ff5, 0xd6d7403a, 0xc4921d32, 0xb5f6cadc, 0xdd34f021, 0xe9334bfe, 0x5c9256c1, 0xbecbfd00,
+0x139acdb2, 0xe3913eab, 0xceed1c3b, 0xdd2c5aa6, 0xfb475d17, 0x4633f4e7, 0x373b4b4f, 0x72ee99f3,
+0xb65542d3, 0x48eb1d39, 0xa6d32fae, 0xc20aabe4, 0x105f4c0d, 0x4848bb65, 0x3bd329d2, 0x470c4709,
+0x43fb2438, 0x0620c0d7, 0x4f962821, 0xfd2f992c, 0x51f8aea5, 0x2521cfe9, 0xbf6e2f9d, 0x005e3653
+
+hard_output0 =
+0xb5bc6ac8, 0xf5373664, 0x1310345c, 0xd5bae4e7, 0x1fc9e83e, 0xebfdfded, 0x84bd86ab, 0xb7aabe00,
+0x60b44fea, 0xb9067464, 0x30325378, 0xa9195955, 0xf70c6e5c, 0x90922632, 0xc90b1cdb, 0xf2f5fb69,
+0x73056b63, 0x1a33bf3f, 0x17755b5c, 0xc58bff6d, 0x2f4390b2, 0x2869d508, 0xe7c7dfe8, 0x38552963,
+0x21da5367, 0x07282b9b, 0xa4767105, 0x1e294251, 0xe350a940, 0xb8a6aa27, 0xed12d778, 0xf10d9ece,
+0xab93527f, 0xcf2da7e7, 0x68f6d0b1, 0x811f4bca, 0x577b06b2, 0x3234f13e, 0x30bab7df, 0x8dc47655,
+0xbb843bed, 0x86da3aba, 0x30950c97, 0xdd096d7a, 0xa871fd6c, 0x8bee4e6b, 0x8fea30d0, 0x6c05b4d2,
+0xf3e144d3, 0xd24ebb1f, 0x065635e5, 0x8d3f2cf9, 0x536c6c6a, 0xfbb0a5d0, 0x3d707b42, 0xc44d5982,
+0xa5f4ad8f, 0xf32c0970, 0x1bccf1a6, 0x05916020, 0xa64fb176, 0x5ede6a35, 0xaf4966da, 0x9df5e0e7,
+0x75042abc, 0x9ef10481, 0x11ddcbc8, 0xa0f5518c, 0xd5c23418, 0x2393d558, 0xfbe7dfeb, 0xed1c64c2,
+0x86a36508, 0xde2dfb1e, 0xb8d0fef9, 0x24505232, 0xc894e71c, 0xbcc752a0, 0x40b74e83, 0x90d23c8c,
+0x728e4a61, 0x108f0b08, 0x66f522ee, 0xc258d851, 0x35a31c44, 0x11311b5b, 0xfd3d5be9, 0x5ae448ff,
+0x4f64994b, 0x5b8247a9, 0x4021114d, 0x2f0b6e82, 0x5eaa9828, 0x50ac71c0, 0xfb86ee52, 0x0dc1ac9b,
+0xbbd47645, 0x8f357115, 0x978ceea0, 0xd557db99, 0x99b30388, 0xfc9a8a1c, 0x0f75be1a, 0x50143e22,
+0x8840989b, 0x738ec50e, 0xe6b2783d, 0xf67899c8, 0x27ebed69, 0x6c415a16, 0x3a6cc2dc, 0xcd4e4e5d,
+0x6cb12b2e, 0xdb88d7c0, 0x79cd1582, 0xbc422413, 0xe72ad2f4, 0x8eaac30f, 0x0bd86747, 0x6d87f69d,
+0x15d62038, 0x4b375630, 0x0d51b859, 0x16db2cb2, 0xf210603a, 0x0abeb833, 0x55c694d0, 0xe57ca43b,
+0x0ba94428, 0x1398a406, 0xe47d3889, 0x5a20203d, 0x250d7a1a, 0xd930ffec, 0x03992e79, 0xf2759376,
+0x024ec121, 0x91fc3a2c, 0xb7e11cc5, 0x4ff7d459, 0xb8700134, 0xd6e61758, 0x4eba0a32, 0xb747e3ec,
+0x7073fad7, 0xded80f99, 0x331e2f1b, 0xfa1f1bed, 0x056424a2, 0x1d1d95e0, 0x550b9ec8, 0x51ee2a38,
+0x19525153, 0xd70c4cd5, 0x0d6cd7ad, 0xe44d1cf2, 0x30dfecda, 0xdacd7fe8, 0x7321d795, 0xddf48ef4,
+0xe271e6a4, 0x9c1feecb, 0x951fcd7b, 0x8acc5a03, 0x3fb83527, 0xe306de74, 0x7b9cd6ee, 0x8e140885,
+0xd4c91e8d, 0xe8c39733, 0x0f02f87f, 0xfb06b1b9, 0x0dc9349c, 0xf76bae8e, 0x4f642a07, 0x3d48a9aa,
+0xe3ea323a, 0xa1cd5c8a, 0x40aa0e70, 0x132042d3, 0xa9732f6c, 0xd15a00c4, 0x43d3b046, 0x9a51ebd4,
+0xc46ee0ed, 0xe2a2148b, 0xf5c478f0, 0x1fb01cf3, 0xf4f321ec, 0xd973811f, 0x11ad11b9, 0x5c67adda
+
+soft_output0 =
+0x81818181, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x817f817f, 0x81817f81,
+0x7f817f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x817f8181, 0x81817f81,
+0x7f7f7f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f7f8181,
+0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x8181817f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x817f817f,
+0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f8181,
+0x8181817f, 0x817f7f81, 0x81818181, 0x817f8181, 0x7f7f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f,
+0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x7f81817f,
+0x817f7f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f, 0x7f81817f, 0x7f7f817f,
+0x81817f81, 0x81817f81, 0x81817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x81818181, 0x8181817f,
+0x7f817f7f, 0x817f7f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f7f81,
+0x7f817f81, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f817f,
+0x7f7f817f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x817f7f81, 0x7f81817f,
+0x7f7f7f7f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f817f81,
+0x817f7f81, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f817f, 0x81818181, 0x7f818181, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x817f8181, 0x81817f7f,
+0x7f7f8181, 0x81817f81, 0x7f7f817f, 0x817f817f, 0x81818181, 0x7f81817f, 0x817f817f, 0x81818181,
+0x817f8181, 0x817f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f7f8181, 0x7f817f7f, 0x7f7f817f,
+0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x81817f81,
+0x817f7f81, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f81, 0x817f7f7f, 0x7f7f8181, 0x81817f7f,
+0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f,
+0x8181817f, 0x817f8181, 0x81817f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81, 0x7f7f8181,
+0x8181817f, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x8181817f, 0x81817f81, 0x817f8181,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x8181817f, 0x817f8181,
+0x817f7f81, 0x817f7f81, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f81817f, 0x81818181, 0x8181817f,
+0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f,
+0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x7f81817f,
+0x7f81817f, 0x81817f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f, 0x7f7f7f7f, 0x81817f81, 0x81817f7f,
+0x81817f7f, 0x81817f7f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x7f7f8181,
+0x817f7f7f, 0x81818181, 0x7f818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f,
+0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x817f8181, 0x7f7f817f,
+0x81817f7f, 0x7f7f7f81, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x81817f7f,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f81817f, 0x7f818181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81,
+0x7f7f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x8181817f,
+0x817f817f, 0x7f7f7f81, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f817f, 0x81817f7f,
+0x817f7f7f, 0x81818181, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x817f8181, 0x7f817f81, 0x817f8181,
+0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f81817f, 0x817f817f,
+0x7f817f7f, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f81,
+0x81817f81, 0x817f7f7f, 0x81817f7f, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f,
+0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f8181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f7f,
+0x817f7f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x7f81817f, 0x7f81817f, 0x817f8181,
+0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81,
+0x8181817f, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x7f7f7f7f, 0x7f81817f,
+0x8181817f, 0x8181817f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f817f, 0x817f7f81, 0x81818181,
+0x817f817f, 0x7f818181, 0x81817f81, 0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f817f,
+0x817f7f7f, 0x817f8181, 0x81817f81, 0x81818181, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f81,
+0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x81817f81,
+0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x817f7f7f, 0x7f817f81, 0x8181817f,
+0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f817f81, 0x81818181, 0x7f81817f, 0x7f81817f, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f81817f, 0x817f7f81, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f818181,
+0x8181817f, 0x81818181, 0x8181817f, 0x817f817f, 0x7f81817f, 0x81817f7f, 0x7f81817f, 0x7f817f7f,
+0x81817f81, 0x7f81817f, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f817f7f,
+0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x81817f7f, 0x81817f7f, 0x7f7f7f7f,
+0x7f818181, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x8181817f, 0x81817f7f,
+0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x81818181,
+0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x81817f81, 0x81817f7f,
+0x7f818181, 0x817f7f81, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f817f81, 0x7f7f7f81,
+0x7f818181, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x81817f7f, 0x8181817f, 0x817f8181, 0x8181817f,
+0x7f818181, 0x7f817f81, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f7f,
+0x817f8181, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x81817f7f, 0x7f7f8181, 0x81817f81, 0x7f7f7f7f,
+0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f,
+0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x81818181, 0x817f8181,
+0x7f7f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81,
+0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x7f818181, 0x817f7f7f, 0x7f7f7f7f,
+0x817f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x81817f81, 0x81817f81, 0x7f7f7f81,
+0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f81, 0x81818181, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x817f817f, 0x8181817f, 0x7f818181,
+0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f,
+0x817f817f, 0x7f81817f, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f, 0x7f7f817f,
+0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x7f7f7f81,
+0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f,
+0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f7f817f, 0x7f817f81,
+0x7f817f81, 0x7f817f81, 0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x817f7f81, 0x7f7f7f81,
+0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81818181, 0x7f81817f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f817f7f,
+0x81818181, 0x7f817f7f, 0x7f7f817f, 0x81818181, 0x8181817f, 0x81817f7f, 0x81818181, 0x817f817f,
+0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x81817f7f,
+0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x817f7f7f, 0x817f817f,
+0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x7f817f81,
+0x817f7f7f, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f81, 0x81818181, 0x7f817f7f,
+0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f81, 0x7f7f7f81, 0x81817f7f,
+0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x7f7f817f,
+0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x81817f7f, 0x817f817f,
+0x7f817f7f, 0x7f817f7f, 0x81817f7f, 0x817f7f7f, 0x81818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f,
+0x7f817f81, 0x7f7f8181, 0x81817f81, 0x817f7f81, 0x7f818181, 0x7f817f81, 0x7f7f7f81, 0x8181817f,
+0x81817f7f, 0x817f7f7f, 0x7f818181, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f817f81,
+0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f7f81,
+0x81818181, 0x817f7f7f, 0x7f818181, 0x81818181, 0x7f7f817f, 0x817f817f, 0x7f7f817f, 0x7f817f7f,
+0x7f817f81, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x817f817f, 0x7f818181,
+0x817f7f81, 0x817f7f7f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f8181,
+0x7f818181, 0x81817f81, 0x7f817f7f, 0x7f7f7f81, 0x7f7f817f, 0x817f7f81, 0x7f7f7f81, 0x81818181,
+0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x817f8181, 0x7f81817f,
+0x8181817f, 0x81818181, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f,
+0x8181817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f7f817f, 0x7f81817f,
+0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f, 0x7f817f81, 0x7f817f81, 0x7f7f817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f7f8181, 0x81817f7f,
+0x7f817f81, 0x817f817f, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x81817f7f, 0x7f817f81, 0x81817f81,
+0x7f7f7f81, 0x81818181, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f7f81, 0x7f818181, 0x7f7f8181,
+0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x817f8181, 0x817f8181, 0x817f7f81,
+0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x81817f7f, 0x7f817f7f, 0x81817f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f,
+0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x7f817f81, 0x81818181, 0x81817f81, 0x817f7f7f, 0x7f7f817f,
+0x817f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f81,
+0x8181817f, 0x8181817f, 0x7f7f7f81, 0x7f7f817f, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f817f,
+0x7f7f7f81, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81,
+0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f818181, 0x7f817f81,
+0x8181817f, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x817f817f, 0x81817f81, 0x7f7f8181,
+0x7f7f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x81817f81, 0x81817f81,
+0x7f7f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f8181, 0x817f7f7f,
+0x7f7f7f81, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f817f7f,
+0x81817f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x7f817f81, 0x7f817f7f,
+0x81818181, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x81818181, 0x8181817f, 0x7f817f81,
+0x81817f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x7f817f81,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f7f, 0x817f7f81, 0x817f8181, 0x81817f7f, 0x81817f7f,
+0x7f7f817f, 0x817f817f, 0x7f817f81, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f81, 0x81817f7f,
+0x817f817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x817f7f81, 0x817f7f81, 0x817f8181, 0x8181817f,
+0x81818181, 0x81817f7f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f817f, 0x7f7f7f7f, 0x7f817f81,
+0x8181817f, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x81817f7f,
+0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f7f817f, 0x7f818181, 0x81818181, 0x7f818181,
+0x7f7f7f81, 0x81817f7f, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f,
+0x7f7f817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x81818181, 0x7f7f8181, 0x817f8181, 0x7f817f81,
+0x7f817f81, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f817f7f,
+0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f817f, 0x7f817f7f, 0x817f817f,
+0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f817f, 0x8181817f,
+0x817f7f81, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x817f7f7f,
+0x817f817f, 0x81817f7f, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f81,
+0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f817f, 0x817f7f81, 0x817f7f81, 0x7f818181,
+0x817f7f7f, 0x8181817f, 0x817f8181, 0x7f7f817f, 0x7f818181, 0x817f8181, 0x7f7f7f81, 0x817f8181,
+0x8181817f, 0x7f817f81, 0x81817f7f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x81818181, 0x81818181,
+0x8181817f, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f,
+0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f81817f, 0x81817f81, 0x7f818181, 0x7f7f8181, 0x7f817f7f,
+0x8181817f, 0x7f817f7f, 0x817f7f81, 0x7f817f81, 0x8181817f, 0x81818181, 0x817f7f7f, 0x817f817f,
+0x7f7f7f7f, 0x817f8181, 0x81818181, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x81817f81,
+0x7f818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f,
+0x81817f81, 0x817f7f7f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f,
+0x7f81817f, 0x7f81817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x817f7f81, 0x7f818181,
+0x7f818181, 0x817f7f81, 0x7f817f81, 0x7f7f7f7f, 0x81817f81, 0x81818181, 0x7f818181, 0x7f81817f,
+0x81817f81, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f7f8181, 0x81817f7f,
+0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f817f81, 0x817f7f7f,
+0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x7f7f7f7f,
+0x81818181, 0x7f7f8181, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81817f7f, 0x817f817f,
+0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f817f,
+0x817f7f7f, 0x7f7f817f, 0x817f8181, 0x7f817f81, 0x7f818181, 0x7f81817f, 0x7f81817f, 0x81818181,
+0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x81818181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x81818181,
+0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x817f7f81,
+0x7f81817f, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x81818181, 0x7f7f7f7f, 0x81817f7f, 0x81817f81,
+0x81817f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f7f817f, 0x81817f81,
+0x7f7f817f, 0x8181817f, 0x817f7f81, 0x81818181, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x817f7f7f,
+0x8181817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f81, 0x817f7f7f, 0x81818181, 0x817f7f7f,
+0x81817f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x7f81817f, 0x81818181,
+0x81817f81, 0x81818181, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x81818181,
+0x81818181, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f7f81, 0x7f818181,
+0x817f7f7f, 0x7f817f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f817f81, 0x817f7f7f, 0x81817f81,
+0x7f7f7f7f, 0x817f7f81, 0x817f7f7f, 0x7f817f81, 0x81817f7f, 0x81817f81, 0x817f7f81, 0x81817f81,
+0x817f817f, 0x7f817f81, 0x81817f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f,
+0x817f8181, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x81817f7f,
+0x81817f7f, 0x8181817f, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f,
+0x817f7f7f, 0x7f817f7f, 0x81817f81, 0x8181817f, 0x8181817f, 0x7f817f81, 0x8181817f, 0x8181817f,
+0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81,
+0x7f818181, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f817f, 0x7f818181, 0x817f8181, 0x7f7f817f,
+0x7f7f7f81, 0x817f7f7f, 0x7f81817f, 0x7f81817f, 0x7f81817f, 0x81817f81, 0x817f817f, 0x7f7f817f,
+0x7f817f81, 0x817f817f, 0x7f817f7f, 0x7f81817f, 0x8181817f, 0x817f7f7f, 0x8181817f, 0x7f7f8181,
+0x7f818181, 0x7f817f81, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f81, 0x7f7f7f81, 0x81817f81,
+0x817f8181, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81,
+0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x7f817f7f, 0x7f7f817f, 0x7f7f8181, 0x8181817f,
+0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f81817f, 0x81817f81, 0x7f817f81, 0x817f8181, 0x817f817f,
+0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x817f7f81, 0x81817f7f, 0x817f8181, 0x817f8181,
+0x81817f7f, 0x8181817f, 0x7f7f817f, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f7f7f7f, 0x817f8181,
+0x81817f7f, 0x7f818181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f81, 0x817f8181,
+0x817f817f, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x81818181, 0x7f817f7f, 0x81818181, 0x7f81817f,
+0x7f817f81, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x81817f81,
+0x7f7f8181, 0x7f817f7f, 0x817f7f7f, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x817f7f81,
+0x817f7f7f, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x8181817f,
+0x7f817f7f, 0x8181817f, 0x7f817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x81818181,
+0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f7f7f7f,
+0x7f7f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181,
+0x817f7f7f, 0x81817f81, 0x7f818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x7f817f81,
+0x7f7f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181,
+0x7f817f7f, 0x817f8181, 0x7f818181, 0x81817f7f, 0x7f817f7f, 0x8181817f, 0x7f81817f, 0x7f7f7f7f,
+0x7f81817f, 0x7f817f81, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f7f, 0x7f81817f,
+0x81817f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x817f8181,
+0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x81817f81,
+0x817f8181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x8181817f, 0x7f817f7f, 0x8181817f, 0x7f81817f,
+0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x817f7f81, 0x81817f7f, 0x817f7f81, 0x81817f81, 0x7f817f81,
+0x817f8181, 0x7f818181, 0x7f817f81, 0x81817f81, 0x81817f81, 0x817f8181, 0x7f818181, 0x81818181,
+0x817f7f81, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x8181817f, 0x81818181, 0x817f7f81, 0x7f818181,
+0x81817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x7f7f817f, 0x8181817f,
+0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x81818181, 0x817f7f81, 0x7f7f8181,
+0x7f7f8181, 0x8181817f, 0x7f7f8181, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f, 0x81817f81,
+0x817f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f81, 0x81817f7f,
+0x817f817f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f,
+0x7f7f8181, 0x817f817f, 0x7f7f7f81, 0x7f7f817f, 0x817f8181, 0x7f81817f, 0x7f81817f, 0x817f7f7f,
+0x7f817f7f, 0x817f817f, 0x817f7f81, 0x817f7f81, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f81817f,
+0x7f7f8181, 0x7f818181, 0x7f818181, 0x817f817f, 0x7f81817f, 0x817f8181, 0x8181817f, 0x7f817f7f,
+0x817f8181, 0x817f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181,
+0x7f817f7f, 0x7f7f8181, 0x81817f7f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x8181817f, 0x81817f81,
+0x817f817f, 0x817f817f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x7f817f7f, 0x7f7f7f81, 0x817f817f,
+0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81817f81, 0x817f8181, 0x817f7f7f,
+0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x7f7f7f7f, 0x81817f7f,
+0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f,
+0x8181817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x7f817f7f,
+0x7f818181, 0x817f7f7f, 0x81817f81, 0x817f8181, 0x7f818181, 0x81817f7f, 0x817f8181, 0x7f7f8181,
+0x81817f7f, 0x7f7f7f7f, 0x81818181, 0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181,
+0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f,
+0x7f817f81, 0x817f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x7f817f81, 0x817f8181,
+0x81818181, 0x7f818181, 0x7f7f7f7f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x81817f81, 0x7f7f8181,
+0x81818181, 0x817f7f81, 0x7f817f81, 0x7f7f8181, 0x817f7f81, 0x81817f81, 0x7f817f7f, 0x817f817f,
+0x7f81817f, 0x7f818181, 0x817f8181, 0x81817f7f, 0x817f8181, 0x7f7f7f81, 0x7f818181, 0x7f7f817f,
+0x817f8181, 0x817f8181, 0x7f817f81, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x817f817f, 0x81818181,
+0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x81818181,
+0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f818181,
+0x7f7f7f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81817f7f, 0x81817f81,
+0x7f7f7f81, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f817f7f, 0x81818181, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x81817f81,
+0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x817f7f81, 0x7f81817f,
+0x7f81817f, 0x7f7f8181, 0x7f818181, 0x7f818181, 0x7f817f7f, 0x8181817f, 0x81817f81, 0x817f7f81,
+0x7f7f7f81, 0x7f81817f, 0x817f7f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x81817f7f, 0x7f817f81,
+0x81818181, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f7f8181, 0x8181817f, 0x81818181, 0x81818181,
+0x81818181, 0x817f8181, 0x81817f81, 0x817f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f81817f,
+0x7f7f7f81, 0x7f7f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f,
+0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f817f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x817f7f81,
+0x81818181, 0x7f817f7f, 0x81817f7f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x7f7f817f,
+0x7f7f8181, 0x817f7f81, 0x7f81817f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x81817f81,
+0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x817f8181, 0x817f7f7f,
+0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x817f8181, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81,
+0x7f818181, 0x7f817f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f817f, 0x7f7f7f7f,
+0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x81818181, 0x817f817f, 0x817f7f7f, 0x81817f7f,
+0x7f81817f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x81818181, 0x817f7f7f, 0x817f7f81,
+0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f81817f, 0x7f818181, 0x7f7f817f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f7f,
+0x7f81817f, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x817f817f, 0x81817f81,
+0x7f7f7f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f81, 0x7f817f7f, 0x7f818181,
+0x7f818181, 0x81818181, 0x7f7f817f, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f,
+0x81818181, 0x81817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x817f7f7f,
+0x81817f7f, 0x7f81817f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x817f7f81,
+0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x7f7f7f7f, 0x7f817f81, 0x7f7f7f81, 0x817f7f7f, 0x81817f81,
+0x7f817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f818181, 0x7f81817f, 0x7f817f81, 0x7f817f81,
+0x7f81817f, 0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f7f7f81, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x81818181,
+0x7f81817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x81817f81, 0x7f818181, 0x81817f81, 0x7f7f7f7f, 0x7f81817f,
+0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x81818181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x7f7f817f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81817f81, 0x8181817f, 0x7f817f81, 0x81817f7f,
+0x817f7f7f, 0x7f7f8181, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f7f81, 0x7f7f817f,
+0x7f7f8181, 0x7f818181, 0x7f7f8181, 0x81817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f8181, 0x7f817f81,
+0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181, 0x81817f81,
+0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f,
+0x7f81817f, 0x817f7f81, 0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f,
+0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x81818181, 0x8181817f, 0x7f7f7f81, 0x7f818181,
+0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f81817f, 0x817f7f81,
+0x81818181, 0x7f7f7f81, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x8181817f, 0x7f817f7f,
+0x7f7f8181, 0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f818181,
+0x81817f81, 0x7f818181, 0x81817f81, 0x7f818181, 0x7f7f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f81,
+0x817f817f, 0x7f817f81, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f81, 0x8181817f, 0x817f8181,
+0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x7f818181, 0x81818181, 0x7f7f7f81, 0x817f817f,
+0x81817f81, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x8181817f, 0x817f7f7f,
+0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x81817f7f, 0x8181817f,
+0x817f817f, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f7f,
+0x817f817f, 0x817f8181, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f818181, 0x7f7f8181, 0x817f817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x817f8181, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f,
+0x7f7f7f7f, 0x7f817f7f, 0x817f817f, 0x817f817f, 0x7f817f81, 0x81818181, 0x7f817f7f, 0x7f7f8181,
+0x7f7f8181, 0x81817f7f, 0x81817f7f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x81817f7f, 0x7f7f817f,
+0x817f7f81, 0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f7f81, 0x81817f7f, 0x7f7f817f,
+0x81817f81, 0x7f817f81, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181, 0x7f817f7f, 0x7f7f8181,
+0x817f817f, 0x817f7f7f, 0x7f817f7f, 0x817f8181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x817f817f,
+0x7f7f8181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x7f817f81, 0x7f817f7f,
+0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f7f7f,
+0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181, 0x817f7f81, 0x817f817f, 0x8181817f,
+0x817f7f7f, 0x817f8181, 0x8181817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x81817f7f, 0x7f7f8181,
+0x81818181, 0x81818181, 0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x8181817f, 0x7f81817f,
+0x7f7f817f, 0x7f818181, 0x81817f7f, 0x7f817f81, 0x817f8181, 0x7f7f7f81, 0x7f7f8181, 0x817f7f7f,
+0x7f818181, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f818181, 0x81817f81, 0x817f7f7f, 0x7f818181,
+0x7f817f81, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x817f817f, 0x81818181, 0x7f817f7f, 0x7f817f81,
+0x817f7f81, 0x7f7f817f, 0x81817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x817f817f, 0x81817f81,
+0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81,
+0x817f7f81, 0x7f7f7f81, 0x8181817f, 0x7f7f7f7f, 0x817f817f, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f,
+0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x7f81817f,
+0x7f817f7f, 0x81817f81, 0x8181817f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f,
+0x81817f81, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x7f818181, 0x7f818181, 0x7f818181, 0x7f81817f,
+0x7f7f8181, 0x817f7f81, 0x817f8181, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x7f817f81, 0x81818181,
+0x81817f7f, 0x81818181, 0x817f7f81, 0x81817f81, 0x81817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f,
+0x7f7f8181, 0x81818181, 0x7f7f7f7f, 0x7f7f8181, 0x81817f7f, 0x81817f81, 0x7f7f7f81, 0x8181817f,
+0x8181817f, 0x7f7f817f, 0x81817f81, 0x817f817f, 0x817f7f7f, 0x817f817f, 0x817f817f, 0x7f81817f,
+0x8181817f, 0x817f7f7f, 0x7f818181, 0x817f7f7f, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f817f,
+0x817f7f81, 0x7f7f7f7f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x7f7f8181, 0x7f81817f, 0x817f8181,
+0x7f81817f, 0x7f7f8181, 0x8181817f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x8181817f, 0x81817f81,
+0x7f7f817f, 0x7f817f7f, 0x817f817f, 0x7f81817f, 0x7f817f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f7f,
+0x7f7f7f81, 0x817f8181, 0x7f817f81, 0x817f7f81, 0x817f7f81, 0x81818181, 0x81818181, 0x81818181,
+0x7f7f8181, 0x7f81817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81,
+0x7f817f81, 0x817f8181, 0x7f81817f, 0x81817f81, 0x7f7f7f81, 0x817f7f81, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x8181817f, 0x7f81817f,
+0x7f817f81, 0x7f7f817f, 0x817f817f, 0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x817f7f81, 0x817f817f,
+0x817f817f, 0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x817f817f,
+0x8181817f, 0x81818181, 0x81817f81, 0x817f8181, 0x81818181, 0x81817f7f, 0x817f8181, 0x7f7f8181,
+0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x817f817f,
+0x817f7f7f, 0x7f7f817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x7f817f7f,
+0x7f81817f, 0x7f7f8181, 0x7f817f81, 0x817f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f817f, 0x81818181,
+0x7f81817f, 0x817f8181, 0x81818181, 0x81817f81, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f7f, 0x7f817f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f8181, 0x817f7f7f, 0x7f81817f, 0x7f818181,
+0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x817f7f81, 0x7f7f7f81, 0x817f7f81,
+0x7f817f7f, 0x7f7f7f7f, 0x7f818181, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81818181,
+0x7f7f817f, 0x81817f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x81818181, 0x817f817f,
+0x7f7f7f81, 0x7f81817f, 0x7f7f817f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x8181817f,
+0x7f7f7f7f, 0x81817f81, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f7f, 0x7f817f81, 0x7f81817f, 0x81817f81,
+0x817f817f, 0x81817f81, 0x81817f7f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x817f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x8181817f, 0x817f7f7f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x817f7f7f,
+0x7f817f81, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x817f817f,
+0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f818181, 0x7f7f817f, 0x817f7f81, 0x7f818181, 0x817f8181, 0x7f817f81,
+0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x81818181, 0x817f7f7f,
+0x817f7f81, 0x817f7f7f, 0x817f7f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x817f7f81, 0x817f7f81,
+0x7f817f7f, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x7f818181, 0x7f7f817f, 0x7f817f81, 0x7f7f817f,
+0x7f817f7f, 0x7f817f81, 0x8181817f, 0x81818181, 0x7f7f7f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f,
+0x817f8181, 0x7f818181, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f,
+0x8181817f, 0x81817f81, 0x817f7f81, 0x81818181, 0x81817f7f, 0x8181817f, 0x7f817f7f, 0x7f7f7f7f,
+0x817f7f81, 0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f817f, 0x81817f7f, 0x7f7f8181, 0x7f7f817f,
+0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x7f818181, 0x817f817f, 0x7f7f7f81,
+0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x8181817f, 0x7f81817f, 0x817f817f, 0x8181817f, 0x7f7f7f7f,
+0x817f7f7f, 0x7f818181, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x8181817f, 0x817f8181, 0x8181817f,
+0x81817f81, 0x817f8181, 0x817f7f81, 0x7f7f8181, 0x817f8181, 0x7f817f81, 0x7f7f7f81, 0x7f818181,
+0x7f817f7f, 0x7f7f8181, 0x7f7f817f, 0x81817f81, 0x81817f7f, 0x81817f7f, 0x81817f7f, 0x817f817f,
+0x7f81817f, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x7f817f81, 0x7f817f7f, 0x817f7f7f, 0x7f7f8181,
+0x7f7f8181, 0x7f7f7f81, 0x7f818181, 0x81817f7f, 0x8181817f, 0x817f7f81, 0x7f7f7f7f, 0x7f81817f,
+0x817f8181, 0x7f81817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f,
+0x81818181, 0x7f7f7f7f, 0x81818181, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x817f8181,
+0x81817f81, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x81817f81, 0x81817f81,
+0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181, 0x7f818181, 0x81818181, 0x7f7f7f7f,
+0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x817f8181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f81, 0x817f7f81, 0x7f81817f, 0x817f7f81, 0x817f817f, 0x7f7f7f7f, 0x81817f7f, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x7f7f817f, 0x7f818181,
+0x7f7f7f7f, 0x81818181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x7f817f81, 0x7f7f7f7f, 0x7f817f81,
+0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f817f,
+0x7f7f7f81, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x81817f81,
+0x817f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x817f7f81, 0x817f8181, 0x7f7f7f7f,
+0x7f818181, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x7f7f8181, 0x7f7f817f, 0x817f7f81, 0x7f7f8181,
+0x817f7f81, 0x81817f81, 0x817f8181, 0x817f817f, 0x81818181, 0x7f817f81, 0x81817f81, 0x7f7f8181,
+0x7f7f7f81, 0x817f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81818181, 0x7f7f7f7f, 0x7f817f7f, 0x7f7f7f7f,
+0x81818181, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x817f8181, 0x817f817f, 0x817f7f7f, 0x817f8181,
+0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f81, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f,
+0x7f7f7f7f, 0x817f8181, 0x817f8181, 0x7f7f817f, 0x81817f81, 0x7f7f8181, 0x7f81817f, 0x81817f7f,
+0x817f7f7f, 0x817f817f, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f81817f,
+0x81817f7f, 0x817f7f7f, 0x7f817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x7f817f81,
+0x81817f81, 0x81818181, 0x7f7f8181, 0x817f7f7f, 0x7f7f7f81, 0x81817f7f, 0x81818181, 0x817f8181,
+0x7f7f817f, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x7f7f817f, 0x7f818181, 0x7f817f81, 0x7f817f81,
+0x7f818181, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x81817f7f, 0x7f817f81, 0x8181817f,
+0x7f7f817f, 0x817f817f, 0x81817f81, 0x81817f81, 0x81817f7f, 0x7f818181, 0x817f7f81, 0x817f7f81,
+0x7f7f7f7f, 0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f818181, 0x8181817f, 0x81817f81, 0x7f817f81,
+0x7f81817f, 0x817f7f81, 0x7f818181, 0x817f817f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f,
+0x81817f7f, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x8181817f, 0x7f7f817f, 0x8181817f, 0x7f818181,
+0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f7f817f, 0x7f7f7f81,
+0x817f7f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f81817f, 0x81818181,
+0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x817f7f81, 0x8181817f,
+0x81818181, 0x8181817f, 0x8181817f, 0x817f7f81, 0x7f7f8181, 0x817f7f81, 0x7f817f7f, 0x817f7f7f,
+0x81818181, 0x81818181, 0x81817f7f, 0x7f81817f, 0x7f817f81, 0x81817f81, 0x7f7f7f7f, 0x817f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x817f8181, 0x7f7f7f81, 0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x8181817f,
+0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x817f7f7f,
+0x81818181, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x817f817f, 0x817f7f7f, 0x7f7f817f, 0x8181817f,
+0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f81817f, 0x817f7f81, 0x7f7f7f7f, 0x817f7f7f, 0x817f817f,
+0x81818181, 0x8181817f, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x8181817f,
+0x7f818181, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x81817f7f, 0x7f818181, 0x81818181, 0x8181817f,
+0x817f817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81, 0x7f7f817f, 0x7f818181, 0x81818181,
+0x81818181, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x81817f7f, 0x7f7f817f, 0x8181817f, 0x7f7f7f7f,
+0x7f7f8181, 0x7f7f817f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x817f8181, 0x81818181,
+0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f817f7f, 0x7f7f817f, 0x81818181,
+0x7f817f7f, 0x817f8181, 0x817f7f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f8181,
+0x81817f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f,
+0x7f81817f, 0x81818181, 0x7f7f8181, 0x7f817f81, 0x7f817f81, 0x7f817f7f, 0x817f817f, 0x7f818181,
+0x7f817f7f, 0x7f7f7f7f, 0x817f817f, 0x817f7f7f, 0x7f81817f, 0x817f7f81, 0x81818181, 0x7f7f817f,
+0x817f817f, 0x81817f81, 0x817f7f81, 0x81817f7f, 0x7f81817f, 0x7f81817f, 0x7f817f81, 0x81817f81,
+0x7f7f817f, 0x8181817f, 0x7f818181, 0x7f7f7f7f, 0x81818181, 0x8181817f, 0x7f817f81, 0x8181817f,
+0x7f81817f, 0x7f7f8181, 0x817f817f, 0x817f7f81, 0x7f817f81, 0x81817f7f, 0x7f818181, 0x81818181,
+0x81817f81, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x81817f81, 0x7f7f7f81, 0x7f818181, 0x81817f81,
+0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f817f7f, 0x817f817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f817f,
+0x817f817f, 0x817f817f, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x817f8181, 0x7f7f7f81, 0x8181817f,
+0x81817f7f, 0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x817f817f, 0x81817f7f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f7f, 0x81818181, 0x81818181, 0x7f7f7f81, 0x81817f7f, 0x8181817f, 0x7f818181, 0x7f817f7f,
+0x81817f81, 0x817f7f7f, 0x7f7f8181, 0x7f817f7f, 0x81817f7f, 0x8181817f, 0x817f8181, 0x817f7f81,
+0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x81818181, 0x81817f7f, 0x817f817f, 0x817f817f, 0x7f7f817f,
+0x7f81817f, 0x81818181, 0x7f817f81, 0x7f7f8181, 0x7f7f817f, 0x7f7f7f81, 0x81817f7f, 0x817f8181,
+0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f81, 0x8181817f, 0x817f7f81, 0x7f817f81,
+0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f817f7f, 0x7f7f7f81, 0x7f817f7f, 0x8181817f,
+0x7f81817f, 0x817f817f, 0x7f817f81, 0x8181817f, 0x7f818181, 0x81817f81, 0x7f818181, 0x81818181,
+0x7f7f7f81, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f7f81,
+0x7f7f7f81, 0x7f7f7f81, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f817f7f, 0x7f817f81, 0x7f818181,
+0x7f7f8181, 0x817f8181, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f81, 0x817f8181, 0x7f817f7f, 0x7f817f7f,
+0x7f7f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f817f7f, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x81817f81,
+0x7f7f8181, 0x7f818181, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x817f7f7f, 0x7f7f8181, 0x81817f7f,
+0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81818181, 0x7f7f817f, 0x7f818181, 0x7f817f7f, 0x81817f81,
+0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f, 0x7f817f81, 0x7f81817f,
+0x7f7f7f81, 0x7f7f7f81, 0x817f817f, 0x7f7f7f81, 0x81818181, 0x7f7f7f81, 0x8181817f, 0x817f7f81,
+0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f7f8181, 0x81817f7f, 0x7f817f81, 0x8181817f, 0x7f81817f,
+0x7f818181, 0x7f818181, 0x7f817f81, 0x7f7f7f7f, 0x7f81817f, 0x817f7f81, 0x7f7f7f81, 0x81818181,
+0x7f7f7f81, 0x817f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f81817f,
+0x81817f81, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181,
+0x7f81817f, 0x81818181, 0x81818181, 0x7f818181, 0x7f7f7f7f, 0x81817f81, 0x8181817f, 0x7f7f817f,
+0x817f7f81, 0x817f7f81, 0x817f8181, 0x7f7f8181, 0x817f8181, 0x7f7f7f81, 0x817f7f81, 0x81818181,
+0x8181817f, 0x81817f81, 0x7f7f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x7f81817f,
+0x81817f7f, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x7f7f7f81, 0x817f7f81, 0x81818181, 0x7f7f7f81,
+0x7f7f8181, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x81817f81, 0x7f7f8181,
+0x817f817f, 0x7f817f81, 0x7f7f8181, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f81, 0x7f7f817f,
+0x817f8181, 0x7f7f817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f7f817f, 0x81817f7f,
+0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f81, 0x8181817f, 0x7f817f7f, 0x81818181, 0x7f7f7f81,
+0x7f7f7f7f, 0x81817f7f, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x8181817f, 0x81818181, 0x7f7f7f81,
+0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x817f8181,
+0x7f817f7f, 0x7f817f7f, 0x7f7f817f, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x7f818181, 0x817f7f81,
+0x7f81817f, 0x7f817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f7f, 0x7f81817f, 0x81818181,
+0x7f7f8181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81817f81, 0x8181817f, 0x7f818181, 0x81817f81,
+0x817f7f81, 0x817f7f7f, 0x7f7f7f81, 0x7f7f7f7f, 0x7f7f8181, 0x7f81817f, 0x7f7f817f, 0x7f7f817f,
+0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x817f817f, 0x817f817f, 0x7f81817f, 0x7f7f8181,
+0x7f818181, 0x817f8181, 0x7f7f8181, 0x7f817f81, 0x81818181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f,
+0x7f7f8181, 0x817f8181, 0x7f818181, 0x81817f81, 0x7f818181, 0x8181817f, 0x8181817f, 0x7f818181,
+0x8181817f, 0x7f7f7f7f, 0x7f7f817f, 0x7f818181, 0x7f7f7f81, 0x7f7f817f, 0x81818181, 0x817f7f7f,
+0x8181817f, 0x817f817f, 0x7f7f8181, 0x7f7f7f81, 0x81818181, 0x8181817f, 0x817f817f, 0x817f817f,
+0x7f818181, 0x7f7f8181, 0x8181817f, 0x817f817f, 0x81817f7f, 0x81818181, 0x7f81817f, 0x817f817f,
+0x7f7f7f7f, 0x7f817f81, 0x7f818181, 0x817f7f7f, 0x817f7f81, 0x7f817f81, 0x7f7f817f, 0x7f7f7f7f,
+0x81817f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f817f, 0x8181817f, 0x7f81817f, 0x7f818181, 0x7f7f7f7f,
+0x7f7f7f7f, 0x7f7f7f81, 0x7f818181, 0x7f818181, 0x8181817f, 0x7f7f8181, 0x817f7f81, 0x7f817f7f,
+0x817f7f7f, 0x7f7f7f81, 0x7f7f8181, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f81,
+0x817f817f, 0x817f7f7f, 0x81818181, 0x7f818181, 0x81818181, 0x81818181, 0x7f7f7f81, 0x8181817f,
+0x81818181, 0x7f817f7f, 0x817f7f81, 0x7f7f8181, 0x7f818181, 0x8181817f, 0x8181817f, 0x817f7f81,
+0x7f817f7f, 0x81817f7f, 0x817f8181, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f817f7f, 0x817f7f7f,
+0x817f7f7f, 0x7f817f81, 0x8181817f, 0x817f7f7f, 0x7f817f81, 0x81817f81, 0x817f817f, 0x7f7f7f81,
+0x7f817f81, 0x7f817f7f, 0x817f817f, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f818181, 0x81817f81,
+0x817f8181, 0x7f817f7f, 0x81817f81, 0x817f7f81, 0x7f7f7f7f, 0x81817f81, 0x81817f81, 0x81817f81,
+0x81817f7f, 0x7f81817f, 0x81818181, 0x7f7f8181, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x8181817f,
+0x81817f7f, 0x8181817f, 0x7f817f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x817f817f,
+0x81818181, 0x817f7f81, 0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f7f8181, 0x817f7f81, 0x8181817f,
+0x7f818181, 0x7f817f81, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x81818181, 0x7f7f8181, 0x7f7f7f7f,
+0x7f81817f, 0x817f8181, 0x817f817f, 0x7f81817f, 0x81818181, 0x7f817f7f, 0x7f7f7f7f, 0x7f7f7f7f,
+0x7f7f8181, 0x817f8181, 0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x7f7f817f, 0x7f817f7f, 0x817f8181,
+0x81817f81, 0x817f8181, 0x7f81817f, 0x817f8181, 0x817f8181, 0x817f7f81, 0x7f7f817f, 0x7f818181,
+0x7f7f817f, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x8181817f, 0x817f8181, 0x81818181, 0x817f7f7f,
+0x7f7f817f, 0x817f7f7f, 0x817f817f, 0x7f818181, 0x8181817f, 0x817f817f, 0x7f7f7f7f, 0x7f81817f,
+0x7f817f81, 0x81818181, 0x7f817f7f, 0x817f817f, 0x7f7f7f7f, 0x817f7f81, 0x7f7f8181, 0x81817f81,
+0x81818181, 0x81817f81, 0x7f7f817f, 0x81818181, 0x817f8181, 0x7f81817f, 0x81817f7f, 0x7f81817f,
+0x817f8181, 0x81818181, 0x7f7f7f81, 0x81817f81, 0x817f7f7f, 0x817f8181, 0x7f7f817f, 0x7f7f8181,
+0x7f81817f, 0x7f818181, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x7f7f7f81, 0x7f81817f, 0x817f8181,
+0x81817f81, 0x81818181, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f7f8181, 0x7f81817f, 0x81817f81,
+0x7f818181, 0x81818181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f81817f, 0x81818181,
+0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81817f81, 0x8181817f,
+0x817f817f, 0x81817f7f, 0x7f817f7f, 0x817f817f, 0x817f7f7f, 0x8181817f, 0x7f7f7f7f, 0x817f817f,
+0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f, 0x7f7f8181, 0x7f7f7f7f,
+0x7f81817f, 0x817f817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f81817f, 0x81818181,
+0x7f7f817f, 0x7f81817f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x81818181,
+0x81817f81, 0x817f817f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f81, 0x81817f81, 0x7f7f7f81, 0x817f7f7f,
+0x817f817f, 0x817f8181, 0x7f817f81, 0x817f7f7f, 0x7f817f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f81,
+0x7f817f7f, 0x7f81817f, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f7f7f7f, 0x7f817f7f, 0x7f7f817f, 0x7f817f81, 0x817f817f, 0x81817f7f, 0x7f81817f, 0x8181817f,
+0x817f7f81, 0x7f7f7f81, 0x81818181, 0x7f817f81, 0x81818181, 0x7f818181, 0x817f7f7f, 0x81818181,
+0x81818181, 0x7f7f7f7f, 0x8181817f, 0x7f7f7f81, 0x81818181, 0x7f7f817f, 0x817f7f81, 0x7f81817f,
+0x817f817f, 0x7f7f7f81, 0x7f81817f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81817f7f, 0x7f7f817f,
+0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f,
+0x81817f7f, 0x7f7f7f7f, 0x7f81817f, 0x817f7f7f, 0x7f7f8181, 0x7f7f8181, 0x7f7f817f, 0x7f81817f,
+0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f,
+0x7f7f7f7f, 0x81817f81, 0x817f817f, 0x81817f81, 0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x81817f81,
+0x817f7f81, 0x817f7f7f, 0x81817f81, 0x817f7f81, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x8181817f,
+0x7f7f7f81, 0x81817f81, 0x7f817f81, 0x81818181, 0x817f8181, 0x7f7f8181, 0x81818181, 0x817f7f81,
+0x817f8181, 0x81817f7f, 0x8181817f, 0x817f817f, 0x8181817f, 0x7f818181, 0x81817f81, 0x81818181,
+0x817f817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x8181817f,
+0x817f7f81, 0x7f818181, 0x7f7f817f, 0x7f81817f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x817f8181,
+0x7f7f8181, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f817f7f, 0x81817f81,
+0x7f7f817f, 0x817f817f, 0x817f8181, 0x7f817f7f, 0x7f817f7f, 0x8181817f, 0x817f8181, 0x817f7f7f,
+0x817f7f81, 0x817f8181, 0x8181817f, 0x7f818181, 0x7f818181, 0x7f7f7f81, 0x817f7f81, 0x7f817f81,
+0x81817f81, 0x817f8181, 0x817f7f7f, 0x817f7f7f, 0x81818181, 0x7f7f7f81, 0x817f817f, 0x817f7f81,
+0x81817f7f, 0x7f818181, 0x7f7f817f, 0x7f818181, 0x7f7f8181, 0x7f817f7f, 0x817f8181, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x7f817f81, 0x817f7f81, 0x7f7f7f81, 0x81817f7f, 0x7f817f7f,
+0x7f7f8181, 0x817f8181, 0x7f818181, 0x8181817f, 0x81817f81, 0x817f7f7f, 0x7f817f81, 0x81818181,
+0x817f817f, 0x81818181, 0x817f7f81, 0x817f7f7f, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x817f7f81,
+0x7f818181, 0x817f7f7f, 0x7f81817f, 0x81817f7f, 0x817f8181, 0x817f8181, 0x81818181, 0x817f7f81,
+0x7f7f817f, 0x81817f7f, 0x8181817f, 0x7f7f8181, 0x7f81817f, 0x8181817f, 0x817f7f81, 0x7f81817f,
+0x817f817f, 0x7f7f817f, 0x817f7f7f, 0x8181817f, 0x7f7f817f, 0x7f81817f, 0x8181817f, 0x7f81817f,
+0x81818181, 0x81818181, 0x7f81817f, 0x817f7f7f, 0x817f7f7f, 0x817f7f81, 0x7f818181, 0x817f7f7f,
+0x817f7f7f, 0x81817f81, 0x7f817f81, 0x7f817f7f, 0x817f8181, 0x7f81817f, 0x7f7f817f, 0x81817f7f,
+0x7f7f7f7f, 0x817f7f81, 0x81817f81, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x81818181,
+0x7f7f7f81, 0x81817f7f, 0x817f7f81, 0x8181817f, 0x7f7f7f81, 0x817f7f7f, 0x81818181, 0x81817f7f,
+0x7f817f7f, 0x817f7f7f, 0x81817f7f, 0x81817f7f, 0x817f7f7f, 0x81817f7f, 0x817f8181, 0x7f81817f,
+0x7f7f7f7f, 0x817f817f, 0x817f7f81, 0x8181817f, 0x817f7f7f, 0x7f817f7f, 0x8181817f, 0x81817f81,
+0x817f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f818181, 0x7f817f81, 0x81818181, 0x817f8181, 0x8181817f,
+0x7f817f7f, 0x817f8181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x7f7f8181, 0x817f817f, 0x817f8181,
+0x7f81817f, 0x7f7f817f, 0x7f7f8181, 0x7f7f817f, 0x8181817f, 0x81817f81, 0x817f8181, 0x7f7f7f81,
+0x8181817f, 0x8181817f, 0x8181817f, 0x7f7f7f81, 0x8181817f, 0x7f818181, 0x81818181, 0x7f7f7f81,
+0x7f817f7f, 0x81818181, 0x817f8181, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x817f817f, 0x817f7f81,
+0x81818181, 0x817f8181, 0x7f7f8181, 0x7f7f8181, 0x817f817f, 0x81817f7f, 0x7f7f8181, 0x817f8181,
+0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f818181, 0x7f81817f, 0x81817f7f, 0x817f8181,
+0x817f8181, 0x7f818181, 0x8181817f, 0x7f817f7f, 0x81817f81, 0x7f7f8181, 0x817f7f81, 0x817f8181,
+0x7f7f7f7f, 0x817f7f7f, 0x7f817f81, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f81, 0x81818181, 0x81817f81,
+0x7f817f7f, 0x7f818181, 0x817f7f81, 0x8181817f, 0x81817f7f, 0x8181817f, 0x7f81817f, 0x7f817f81,
+0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f818181, 0x7f81817f, 0x7f7f8181, 0x7f7f7f81,
+0x817f817f, 0x81817f7f, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x817f8181, 0x81818181, 0x8181817f,
+0x81817f7f, 0x7f817f7f, 0x817f8181, 0x81817f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x81817f81, 0x817f817f,
+0x817f8181, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f,
+0x81817f81, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f817f7f, 0x7f7f817f, 0x81817f81, 0x817f8181,
+0x81817f81, 0x8181817f, 0x7f7f7f81, 0x817f8181, 0x81817f7f, 0x81817f81, 0x817f8181, 0x817f8181,
+0x817f817f, 0x817f7f7f, 0x81817f7f, 0x7f817f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f818181, 0x81817f81,
+0x817f8181, 0x817f7f7f, 0x817f817f, 0x8181817f, 0x81818181, 0x7f817f81, 0x81817f7f, 0x81818181,
+0x7f818181, 0x817f817f, 0x7f7f7f81, 0x817f817f, 0x7f7f817f, 0x817f817f, 0x7f7f7f7f, 0x817f7f7f,
+0x817f7f7f, 0x7f817f81, 0x7f7f817f, 0x81817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f817f,
+0x817f817f, 0x7f817f81, 0x7f818181, 0x7f7f7f81, 0x7f817f81, 0x81817f81, 0x81818181, 0x7f817f7f,
+0x7f7f7f81, 0x817f817f, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f817f,
+0x81818181, 0x81818181, 0x8181817f, 0x81817f81, 0x81818181, 0x817f8181, 0x7f817f7f, 0x7f817f81,
+0x817f8181, 0x7f818181, 0x817f7f81, 0x817f7f7f, 0x7f7f817f, 0x8181817f, 0x817f7f7f, 0x7f817f7f,
+0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f81817f, 0x81817f81, 0x817f8181, 0x7f817f81, 0x817f8181,
+0x81818181, 0x817f817f, 0x81817f81, 0x817f817f, 0x817f8181, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f7f7f,
+0x81817f81, 0x7f81817f, 0x817f7f81, 0x7f818181, 0x81817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f817f7f,
+0x8181817f, 0x7f81817f, 0x8181817f, 0x81818181, 0x81817f7f, 0x7f7f817f, 0x81817f81, 0x8181817f,
+0x7f81817f, 0x7f817f81, 0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f8181, 0x81817f81, 0x7f7f7f7f,
+0x7f817f7f, 0x81817f7f, 0x7f7f7f7f, 0x817f817f, 0x7f7f7f81, 0x817f7f81, 0x7f818181, 0x7f7f8181,
+0x7f817f81, 0x7f817f7f, 0x8181817f, 0x7f7f7f81, 0x7f7f8181, 0x81818181, 0x7f7f8181, 0x7f817f7f,
+0x7f7f8181, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x7f817f7f, 0x7f7f8181, 0x7f817f7f, 0x8181817f,
+0x7f817f7f, 0x817f7f7f, 0x81817f81, 0x7f817f81, 0x817f817f, 0x7f817f81, 0x7f817f7f, 0x81818181,
+0x7f817f81, 0x7f7f8181, 0x7f81817f, 0x81818181, 0x7f7f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x7f817f81,
+0x7f81817f, 0x817f7f7f, 0x817f8181, 0x7f817f7f, 0x817f7f81, 0x7f7f7f7f, 0x7f817f7f, 0x7f817f81,
+0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f817f, 0x81818181,
+0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x81817f81, 0x7f81817f, 0x7f7f7f81, 0x7f7f7f81, 0x81817f7f,
+0x817f7f81, 0x817f7f81, 0x7f817f7f, 0x7f7f7f7f, 0x8181817f, 0x81817f7f, 0x7f7f7f7f, 0x7f818181,
+0x7f818181, 0x81818181, 0x817f8181, 0x81817f7f, 0x7f7f8181, 0x817f817f, 0x7f818181, 0x7f7f8181,
+0x7f7f817f, 0x7f7f8181, 0x7f7f7f7f, 0x8181817f, 0x81818181, 0x817f8181, 0x7f7f7f7f, 0x7f81817f,
+0x81817f7f, 0x7f7f8181, 0x81817f81, 0x817f7f7f, 0x817f7f7f, 0x7f817f81, 0x817f7f7f, 0x81817f7f,
+0x817f817f, 0x7f7f7f81, 0x817f7f7f, 0x817f817f, 0x817f7f7f, 0x817f8181, 0x81818181, 0x7f81817f,
+0x7f7f8181, 0x7f81817f, 0x81817f7f, 0x7f818181, 0x81817f81, 0x817f7f81, 0x7f7f8181, 0x7f817f81,
+0x7f7f7f81, 0x7f7f7f81, 0x7f7f7f7f, 0x817f8181, 0x8181817f, 0x817f7f81, 0x817f7f81, 0x7f817f81,
+0x7f7f817f, 0x7f817f81, 0x7f817f7f, 0x7f817f81, 0x81818181, 0x81818181, 0x7f7f7f7f, 0x81818181,
+0x7f817f7f, 0x81818181, 0x7f81817f, 0x7f817f81, 0x81817f7f, 0x817f7f81, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x7f7f817f, 0x7f817f7f, 0x7f7f7f7f, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f7f81,
+0x81818181, 0x7f817f7f, 0x81818181, 0x7f818181, 0x817f7f81, 0x7f817f7f, 0x81818181, 0x81818181,
+0x7f817f81, 0x7f7f7f7f, 0x81817f7f, 0x7f818181, 0x81817f7f, 0x81817f81, 0x8181817f, 0x817f817f,
+0x817f817f, 0x817f7f81, 0x7f7f8181, 0x817f7f7f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f7f, 0x817f7f81,
+0x817f7f7f, 0x7f7f8181, 0x7f817f81, 0x7f81817f, 0x817f7f7f, 0x7f7f7f7f, 0x7f7f8181, 0x7f7f817f,
+0x817f7f7f, 0x81818181, 0x8181817f, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x7f81817f, 0x7f7f8181,
+0x817f7f7f, 0x817f7f81, 0x7f81817f, 0x7f7f7f81, 0x7f817f81, 0x8181817f, 0x817f7f81, 0x7f817f81,
+0x81817f81, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f81817f, 0x817f817f, 0x7f817f81,
+0x817f7f81, 0x7f7f817f, 0x7f7f817f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f817f, 0x8181817f, 0x817f7f7f,
+0x7f817f81, 0x7f817f7f, 0x7f81817f, 0x81817f81, 0x7f817f7f, 0x7f81817f, 0x81817f7f, 0x7f7f7f7f,
+0x817f817f, 0x7f7f7f7f, 0x7f81817f, 0x817f8181, 0x81818181, 0x7f81817f, 0x7f817f7f, 0x8181817f,
+0x817f817f, 0x81818181, 0x81817f81, 0x81817f81, 0x7f7f8181, 0x7f7f7f81, 0x7f7f7f7f, 0x8181817f,
+0x7f817f81, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x81817f7f, 0x81818181, 0x7f81817f, 0x7f7f7f7f,
+0x81818181, 0x7f81817f, 0x7f7f7f81, 0x8181817f, 0x7f7f7f81, 0x7f7f7f81, 0x7f7f8181, 0x81818181,
+0x81818181, 0x7f7f8181, 0x7f817f81, 0x81817f7f, 0x7f7f817f, 0x817f7f81, 0x7f7f817f, 0x817f817f,
+0x81817f7f, 0x7f7f817f, 0x7f7f7f7f, 0x81817f81, 0x817f7f7f, 0x7f7f7f7f, 0x8181817f, 0x7f7f8181,
+0x7f817f7f, 0x7f817f81, 0x817f8181, 0x817f7f7f, 0x7f818181, 0x7f81817f, 0x817f817f, 0x7f7f8181,
+0x81817f7f, 0x7f7f7f7f, 0x817f7f81, 0x8181817f, 0x81818181, 0x7f817f81, 0x7f817f81, 0x7f81817f,
+0x7f81817f, 0x7f7f817f, 0x817f7f7f, 0x7f817f7f, 0x817f7f81, 0x7f7f817f, 0x817f8181, 0x7f7f7f7f,
+0x7f81817f, 0x7f7f817f, 0x817f817f, 0x817f817f, 0x7f817f7f, 0x7f7f7f7f, 0x7f81817f, 0x7f7f8181,
+0x817f7f81, 0x7f7f7f81, 0x7f818181, 0x817f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f81, 0x81818181,
+0x81817f81, 0x7f7f7f81, 0x7f817f7f, 0x7f818181, 0x817f817f, 0x7f7f8181, 0x7f81817f, 0x7f81817f,
+0x817f7f81, 0x81818181, 0x7f817f81, 0x7f7f7f81, 0x7f81817f, 0x7f818181, 0x7f817f7f, 0x81817f81,
+0x81818181, 0x7f817f7f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x817f8181, 0x81818181, 0x7f81817f,
+0x81817f81, 0x7f7f817f, 0x7f7f817f, 0x7f817f81, 0x817f7f7f, 0x8181817f, 0x81817f81, 0x81818181,
+0x7f817f81, 0x7f7f7f7f, 0x81818181, 0x7f7f8181, 0x81817f81, 0x81817f7f, 0x7f7f817f, 0x7f7f7f7f,
+0x7f7f7f81, 0x7f7f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f81, 0x817f8181, 0x7f7f817f, 0x7f7f7f7f,
+0x817f817f, 0x8181817f, 0x817f8181, 0x7f7f7f7f, 0x7f7f7f81, 0x81817f7f, 0x7f7f7f7f, 0x7f7f817f,
+0x7f81817f, 0x7f7f7f81, 0x817f8181, 0x81817f81, 0x817f7f81, 0x7f7f817f, 0x7f81817f, 0x7f7f7f7f,
+0x81818181, 0x81817f81, 0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x81817f81, 0x8181817f, 0x7f81817f,
+0x81818181, 0x7f818181, 0x817f7f81, 0x81817f7f, 0x81818181, 0x817f7f7f, 0x7f7f7f81, 0x817f7f7f,
+0x817f817f, 0x817f7f81, 0x81818181, 0x81817f81, 0x7f818181, 0x81817f7f, 0x817f817f, 0x7f81817f,
+0x817f7f81, 0x7f81817f, 0x8181817f, 0x7f7f7f7f, 0x8181817f, 0x7f7f817f, 0x7f817f7f, 0x817f7f7f,
+0x7f818181, 0x817f8181, 0x7f7f817f, 0x817f7f81, 0x7f817f81, 0x81817f81, 0x7f7f8181, 0x7f81817f,
+0x7f7f817f, 0x7f817f7f, 0x7f817f7f, 0x817f7f81, 0x817f817f, 0x817f7f81, 0x81818181, 0x7f7f817f,
+0x817f8181, 0x7f81817f, 0x7f817f7f, 0x81817f81, 0x7f7f817f, 0x81817f81, 0x7f7f817f, 0x81818181,
+0x81817f7f, 0x7f817f7f, 0x817f8181, 0x7f7f8181, 0x817f7f81, 0x81818181, 0x7f7f8181, 0x7f817f7f,
+0x7f817f7f, 0x817f817f, 0x817f7f81, 0x81817f81, 0x7f7f8181, 0x817f7f7f, 0x7f7f8181, 0x81817f81,
+0x7f817f7f, 0x817f8181, 0x817f817f, 0x81817f81, 0x81817f81, 0x7f7f817f, 0x7f7f8181, 0x7f7f7f81,
+0x81817f81, 0x7f81817f, 0x7f7f8181, 0x81818181, 0x817f7f7f, 0x7f81817f, 0x7f7f817f, 0x7f817f7f,
+0x7f818181, 0x7f81817f, 0x817f8181, 0x7f7f7f7f, 0x7f81817f, 0x7f818181, 0x817f817f, 0x81818181,
+0x7f818181, 0x7f817f81, 0x7f81817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f81817f, 0x7f7f7f81,
+0x7f818181, 0x7f817f7f, 0x7f7f8181, 0x7f7f8181, 0x7f818181, 0x81818181, 0x817f7f81, 0x817f8181,
+0x7f7f7f7f, 0x7f818181, 0x7f7f7f7f, 0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x817f7f7f, 0x7f818181,
+0x81818181, 0x7f7f817f, 0x81818181, 0x7f818181, 0x7f817f7f, 0x7f817f81, 0x7f7f8181, 0x81818181,
+0x7f7f7f81, 0x817f8181, 0x7f7f7f7f, 0x81818181, 0x81817f7f, 0x7f817f7f, 0x7f817f7f, 0x817f817f,
+0x7f817f81, 0x817f8181, 0x817f817f, 0x817f817f, 0x7f7f8181, 0x817f7f81, 0x7f7f8181, 0x7f7f7f81,
+0x7f7f7f7f, 0x7f7f7f7f, 0x8181817f
+
+e =
+34560
+
+k =
+6144
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_SOFT_OUTPUT, RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN,
+RTE_BBDEV_TURBO_NEG_LLR_1_BIT_SOFT_OUT
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data
new file mode 100644
index 00000000..cbf4e72e
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data
@@ -0,0 +1,676 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0xD0000000, 0x33CDC8CE, 0x4FAEE4CC, 0xC7DC23C3, 0xC306D6CA, 0x2B360A24, 0xE91C423D,
+0x1F323427, 0x4B1C33B6, 0x3EC9D0E7, 0x39204DD1, 0xCCD73C37, 0xC6F6D8E1, 0x1DF828F3, 0xDEE34025,
+0xC41EC235, 0xD035E1D8, 0x3CC32843, 0x29B60C3C, 0xE92E122E, 0x454BD4C8, 0x35D02543, 0x33E4D0AC,
+0x07AF2228, 0x38C62DD1, 0x233800C5, 0x3CC02DD1, 0x35E73B19, 0xDEBED026, 0x33C5EDD9, 0x33E51527,
+0x1F21EA18, 0xCC3E3BD2, 0x013CC14C, 0x3724D23B, 0x23CDED2D, 0x21424630, 0xC5B0E64D, 0xCFC03BEF,
+0x24294241, 0x46C526BC, 0xD82F334C, 0x1E283DCB, 0x3F3F33C7, 0x4A14D2A1, 0xD92F2AF3, 0xCFA820CC,
+0xDD30C6CB, 0x2B3ACAB7, 0x4AFE29CD, 0x25BD3200, 0x2D293323, 0xD32A3B29, 0x29E64D0C, 0xBE4BFB3C,
+0xB1DD242D, 0x0427F11D, 0x3046CFDA, 0xD633C0D5, 0xC0DDBE01, 0xDA3DD9B4, 0x3BCE3638, 0x23262CC6,
+0x432BD2AE, 0x23A9E1C7, 0x1BE45609, 0x22CD35AA, 0x32D6371F, 0xCE27352A, 0xE2B73F40, 0xC2D0CE2C,
+0xE72E3A3E, 0x2FF52147, 0xEFCF2E33, 0x003103CA, 0xC6C14A33, 0x45CAC0CF, 0x011E1FD3, 0x23D3C72B,
+0x2EB644C4, 0x484BD930, 0x3AC22E1A, 0x1AB6DD42, 0xCEDDE232, 0x3ACCCC31, 0xB830CA2D, 0xC930D2D7,
+0xD0E049BB, 0xE6243524, 0x3EF53914, 0xCB083BE3, 0x411EDBE9, 0x30C3C3E8, 0x3721F5E7, 0xBBDF3FD9,
+0x2AD21F33, 0xEE402858, 0xD3AFD237, 0x3D39401D, 0x30003623, 0x4734DC46, 0x192ACD32, 0x3324CAC2,
+0xD714D4B5, 0xDC2E21D0, 0x2327CDBA, 0x15B739C3, 0x31C9EEB8, 0xFAB5372E, 0xC239B5DA, 0xE628440F,
+0x34D425C6, 0x293BE6FD, 0xBDC04BBC, 0xE0DDB7CF, 0xD627BC34, 0xA5422FCB, 0x13211ABC, 0x1708E51C,
+0xCE33DAD1, 0x51AEC731, 0x2E383824, 0xC722D0D3, 0xCB32292B, 0xCECC402B, 0x1B312EF0, 0x34CF46CA,
+0xE3BD252C, 0x0C21EC36, 0x4ADFECBE, 0xC4D2E62C, 0xADE5BAD3, 0xDDCC1D18, 0x521AC135, 0x3F302D37,
+0x22CE2DCC, 0x262A1BCF, 0x4EE7D7CA, 0xD43D3526, 0x1840E020, 0xC3201F2F, 0x3ACB453E, 0x41EAD5B6,
+0xB6C4CF17, 0x243A55D4, 0xC7501E28, 0x351DCD1C, 0xE3A8D502, 0x3235271C, 0xAC342F00, 0x34B4E3D8,
+0x36473AE4, 0xAE403CEB, 0xBC3FDA3F, 0x552CDBEC, 0xE7C42B5C, 0xE743E13D, 0x18D44650, 0xBA2EDF31,
+0xB5C3EC42, 0x432EBE2E, 0xADDE4F2A, 0x401BCAD6, 0x03D6EAEB, 0xD0DEB133, 0x153FC4D4, 0x30DA2FD3,
+0xEE282FD9, 0x242ADC39, 0xD72B42D9, 0x44C7E0D5, 0xC72CB3C7, 0x27C9472B, 0x0036DFB6, 0xD9C53112,
+0xC0360E0E, 0x4EEA0E27, 0xEDCF27C1, 0x1739D5DC, 0x22DE2CE1, 0xDCCAF633, 0x31D02417, 0xD7DCD0DA,
+0xDE1DE0DC, 0xD827DD22, 0x1CDBD7EC, 0xCE171BCB, 0xEE2347CF, 0xC22C312D, 0xDBCC2A38, 0xBE0ADED4,
+0xDC20CC2D, 0xD5380DB2, 0x2A32C701, 0x3EC7D41E, 0xCDD7D74C, 0xE83E2A44, 0xC12B26E6, 0xE70046D7,
+0xD626D1EC, 0x35DDE41D, 0x303F27BD, 0xC94D4517, 0x31D1E229, 0xAD3529BF, 0x3AC5D21C, 0xCF1DB731,
+0x2738DBEB, 0x1F382B33, 0x1ED52213, 0xFCE22CD2, 0x1330331E, 0x4A30BA0F, 0x1F38B42A, 0x2E34CB1F,
+0xE3E125D5, 0x3AC72D17, 0xBD3C342C, 0x37272810, 0x1D34E4C7, 0xE5CC2EF8, 0x1E2FF1DF, 0xB9294236,
+0x26F10017, 0xBDE3FBD1, 0xB7DB52D0, 0x2E3CB8E9, 0xB3343D3C, 0x3E3BDFDD, 0x1F46393A, 0x25FA20C1,
+0x2D194CD1, 0xE9471D26, 0x34C3242E, 0xAE012034, 0xC8F3FED7, 0xB825362B, 0x322DC136, 0x42F41511,
+0x181855C2, 0x4937D5EB, 0xE13CDC23, 0x08242A28, 0x2DCCDFC0, 0x13CEC64F, 0xB3E6F7ED, 0xD93401EE,
+0x3D02DEE0, 0xCFC52300, 0xD2F1BE38, 0x2D092924, 0x253E2AF0, 0x37DACAD0, 0xDB1ECFE7, 0x40CED84D,
+0x3706C42A, 0x4231D91A, 0xDD30471D, 0xBD272DC8, 0xCB1941D9, 0xC22FC7C5, 0x333F3C0C, 0x3F3D2518,
+0xDDCCD143, 0xB3D834D5, 0xD231BEE4, 0x2BDB3B3D, 0x2F2754E7, 0xEAF3CE64, 0x3632E4C0, 0xD83324C2,
+0x11A4D5CC, 0x00D627D3, 0xD51F2731, 0x17C92BE2, 0x1AD62E1E, 0x16DB2418, 0x16B83823, 0x2711D54B,
+0xDFB02BDF, 0x28CDA600, 0x2AE9ED3E, 0xD314CD20, 0xD04F272D, 0xD0F9CD37, 0xCC1735E5, 0xD328EC37,
+0xA7D31A26, 0x214BD9A5, 0xCFCB4226, 0x233C4AD5, 0x26EF37D8, 0xD83E3230, 0x3EC8C93C, 0x07B9282B,
+0xD1E6DAF2, 0x31C52EDD, 0x3A0038A5, 0x56C1E0DF, 0x22E437C0, 0xDBC8143B, 0xCF1DB03F, 0x1D4BE636,
+0xB8D3C23A, 0xBCC3372E, 0xAF284228, 0xDCE02F1E, 0xB9D4E3D4, 0x56D3D0D8, 0x494816C1, 0xFB1240C3,
+0x23B1BDD2, 0x1236322E, 0xC2CBCB47, 0x102FE7C8, 0xBB0AE1D9, 0x3831BC29, 0x39AECBD9, 0xC7CF1ED2,
+0x0AE43913, 0xDDD62F2C, 0x4FD522E6, 0x3CB90A55, 0x2101C8EC, 0x432604BD, 0x29E3E31E, 0xC25B3C29,
+0x20E927D4, 0xD4300C41, 0xD9AECBD5, 0x48D8E143, 0x2CEE272F, 0xD2C11FBA, 0x2F2DB8D7, 0x1CF4DFE6,
+0xCCDCCEBF, 0xE052D0EA, 0xDADDEB1E, 0xC72ACB3A, 0x332E2B1B, 0x2D26E827, 0x2A29E6A6, 0xE614D6D5,
+0x221D1CC5, 0x51102031, 0x1735C248, 0x31D4DCD5, 0x3C20DC00, 0x2D231732, 0xE54AD9CD, 0x1D24D826,
+0xE936373A, 0x20C0D3C9, 0x2B231520, 0xD6CED133, 0xDFC8C7B5, 0xDC2FE8B8, 0x3332249B, 0xC6E6E11F,
+0xB7DA17AF, 0x1FD91321, 0xEE29CD41, 0xE1262FEF, 0xB6181DC6, 0x24F618C6, 0x3BE5DA2D, 0xFC2C35C8,
+0x51C232CC, 0x36183EDF, 0x2B2CD6EF, 0x2F2DBDB9, 0x00D03413, 0xCDC9CBC4, 0xDB3309CC, 0x264EB22C,
+0xCED1EAC9, 0x38D53F27, 0x2EE7D6F3, 0xC7232C23, 0xCCD42515, 0xC9C8C8E3, 0x231E3930, 0x5BBEDFCA,
+0x34D7E5D7, 0x25CCE9A7, 0xCD4BF1E8, 0x5E2542E6, 0x38DCE43E, 0xDA34F0CA, 0x2EBFF841, 0xDA422D3E,
+0x13CA2231, 0xD5C6DA27, 0xBAC0C6B6, 0xC82C49FF, 0x31312123, 0xBF00DFBF, 0xE8F23819, 0x2C34E81E,
+0xDC4C2B0C, 0x3A5650F2, 0x2CCB0FD0, 0xC42D4C28, 0x4F19DA4B, 0xF63DE630, 0x29F51C1F, 0xD6BE14C4,
+0xD2DDCDD6, 0xE0D02825, 0x2FE0E3D1, 0x41342FE8, 0x25D547ED, 0xC5A732B5, 0x19BD503F, 0x4CD9DA3D,
+0x24D013BF, 0x4FE93FCA, 0x12DBE8B9, 0x33DF151F, 0x1E24AECD, 0x2E2E3656, 0x133500AD, 0x3641403C,
+0xBDDF2BD4, 0xE33130E0, 0x3DC02726, 0x18390C3C, 0x26C04DC9, 0xDBF22D35, 0xC3E1EC45, 0xCED12623,
+0x1CE92E39, 0xCF2FD222, 0xD8DE31E6, 0x1EE916D1, 0x35DCB621, 0x19C2EB54, 0x41B34ED4, 0xD9373BDF,
+0x203ED6D8, 0x2CD4A8C8, 0x14E0C6C6, 0xDF242131, 0xCDD921CF, 0x33AACBCC, 0x2AD1A5C6, 0x3ED42B00,
+0xBF181FD0, 0xC0CC2329, 0x1BD1EDC9, 0x3A2D47CB, 0xDD27D8CB, 0xD8E8BAE6, 0xD5DE3FF5, 0x2C30D641,
+0xDCDCDF26, 0xC2C34433, 0x2837F53D, 0x29D8E12D, 0xBF3FEBD0, 0x34CACC45, 0x30E1DA21, 0x39DDDCB3,
+0x30193518, 0xC0132E19, 0x313D3736, 0xB2D4FF34, 0xF4D92945, 0xC70D3AEA, 0x3E3034F0, 0x00DA2F07,
+0x1DD415EB, 0xCC1AC4B2, 0x232EB21C, 0xD7431E16, 0x2FCE0835, 0xCF39242C, 0x1BCA2CD4, 0xD8B6D0FA,
+0x18C3262F, 0x2CE3BDCB, 0xD839DC38, 0x26161D24, 0x1223B3DC, 0xD6D63515, 0xCFBABFC6, 0xC1BF18C7,
+0x2637C715, 0x264020EA, 0xD024D3C4, 0x4126A11D, 0x2E34442D, 0x32BB1CC9, 0x36E7DD40, 0x3113C3C5,
+0xD100C8CE, 0xD21651E8, 0xC3CFCDD9, 0x403139F1, 0xD3D02119, 0x32171AC8, 0xD2F9B62E, 0xB5B335D4,
+0xC1462835, 0x1F1C2C35, 0xD8DDBF35, 0x422D2DDA, 0x38ED19D7, 0x3326BD2F, 0x4534DFCE, 0x46403132,
+0x19CDD51E, 0xCBC3CBC0, 0xE3CF2139, 0xC646ECD8, 0x29E7C7DD, 0xE94CCAF8, 0xB8C6EA37, 0xEC2B09DA,
+0xC8C82738, 0xC6CCDDFB, 0x3FCA3B4D, 0xC6482BD1, 0x2DE1D333, 0x3530BB3A, 0xBF4838EA, 0xE4411ACB,
+0x3514D939, 0xCD372B27, 0x4BEC1C39, 0xAB21C221, 0xD8D83F37, 0xCCE0512D, 0xDA4640C2, 0x36AE1CD6,
+0xBC2F303B, 0x20C3B8CD, 0xBFE6D4E0, 0xBECCC4EB, 0xC7CC1E4E, 0xCC2BD03C, 0x3738C518, 0x23B824CC,
+0x2BECD0BC, 0x2107B945, 0x2AC21B00, 0xBE49F2EB, 0x31D2E3DC, 0xDAD23F11, 0xCDED3137, 0x3ABADA3A,
+0xD9C0C12C, 0x36CEE8CF, 0xD0D5BDD3, 0x2AC9CFCC, 0xE61FD747, 0xC7B7B12A, 0x2BD520DF, 0xD5BC1D31,
+0xE6D8423B, 0xCC21EB43, 0x1F46292D, 0x171EE038, 0xDFA5E244, 0x1BD5C4AD, 0xC622C2B5, 0x4E33CFD3,
+0x3F263F30, 0x34E4CF15, 0x0041CC36, 0x4021DB29, 0xD62E2231, 0x2C22AE21, 0x1A201F44, 0xC6D7C100,
+0x3A335125, 0x42473A2A, 0xDF32DA1F, 0xE5D8B51F, 0x3049E832, 0xE6DED531, 0x315DD01E, 0xDDD92034,
+0x2EBF0AEB, 0x4B4639DD, 0xBED6C8DA, 0xDABA3714, 0xDD133BD7, 0xC934EACF, 0x0D35C3E2, 0xB4CCC213,
+0x37EE202B, 0x0AD21AD9, 0x28D2C9CD, 0xD800DB2D, 0xD34B274A, 0xBDD33644, 0x38EFF1CC, 0xBDABD734,
+0xB11E3E32, 0x2F1C23DC, 0xE80BD7DE, 0xDBE7D6F7, 0x240248BF, 0x23CEF1F6, 0x1F56E634, 0x33CD2230,
+0xB4DB182F, 0xD338BA36, 0x33392B24, 0xBB3B3649, 0x282F30E7, 0x45CC35B1, 0x46261B30, 0x1FD9A722,
+0x3BE3E1C3, 0x25334FF0, 0xB8BB28C0, 0xD751E2CC, 0x403600E1, 0x461FC3ED, 0xCB30C202, 0xD8D2B92A,
+0x2923D71B, 0xB9DDE6CC, 0xB935CB0A, 0xEF37D723, 0xD1DA06CD, 0x303CE928, 0xF8B9BA27, 0x37370A1F,
+0xD6C1DDD9, 0x18B6463F, 0x28C71FC3, 0x38D8B53A, 0xD22DE9A1, 0x18DDB236, 0x0A28D515, 0x363B263D,
+0x1F3A122D, 0x202AE323, 0xC72337E8, 0x32ED08CC, 0x2658C347, 0xC6F4E200, 0x45D1C2C3, 0xF92D343A,
+0x3EBE1E65, 0x10EB2FD3, 0xC64FD5E5, 0xC5D3F521, 0xE9CA1A37, 0xD736CDD6, 0xDCC7C233, 0xC9C8E2D4,
+0x2B99D60A, 0x1EE3DA0D, 0x30BDD8E1, 0x16D3BE1F, 0x1C4524D4, 0xC8D5D432, 0x19AEDA3D, 0xCDD4CAD5,
+0xD835332E, 0x2412DA1B, 0xD0CBEBD7, 0xC3CEF425, 0xD1D743E3, 0x000A163B, 0x23DC3129, 0x202332D1,
+0x322F2216, 0x28F5131A, 0xD316CF0D, 0xD0344C30, 0x423325D7, 0x37204237, 0xEEDDC721, 0x2038E805,
+0xD525C522, 0xBEDDE327, 0x3AC8BCC5, 0x45402E27, 0x34DFC1D9, 0x2DB0D049, 0x322F2ACF, 0xDB3AC3C5,
+0xD8D0EB4F, 0x2A1DCCCC, 0xD0D33DC2, 0x1349DC45, 0x39CBBC32, 0x30301EBF, 0x2C00DFC0, 0x5DD0CA11,
+0xD0220C3A, 0xC527CD1A, 0x31CD372C, 0x2E0ED7DC, 0xC6F92338, 0xF537E8D0, 0x3626DBBF, 0xC9C3C6C3,
+0x30DDBE17, 0x50394C39, 0x3A11343B, 0x27E8DC2E, 0x0DDB303F, 0x1818BCF0, 0xC5DC17DB, 0x24C53A26,
+0xECB0E7E2, 0xC21B1928, 0x15D7C920, 0x4126AACE, 0xE214D8C7, 0xD40BCD42, 0xB2CBD018, 0x554436DB,
+0x3C2D25D9, 0x21CEC0DC, 0xE9251919, 0xE41ACDC3, 0xDECF67C3, 0xEDDE3225, 0xCA30EEDD, 0xCFE01E37,
+0xD231BF41, 0x34DF1EBD, 0x1DD2CC2C, 0x21142E29, 0xEA322B37, 0xCEC311DE, 0xF0F738DD, 0xEBED1A40,
+0xDCE640E8, 0xD0D1BA2B, 0xE906D6DB, 0xC71938B1, 0x13C59F39, 0x1ED6DBB6, 0x231BD3A9, 0x2C2B30DC,
+0x0ED50000, 0x2DD4EED6, 0xD9D21BEE, 0xE1442E36, 0x38CEC4AE, 0xC626CCF4, 0xC2CD3F36, 0xD71F2BD9,
+0x2F421837, 0x31B11A23, 0xD4D5282D, 0xD0E6BED7, 0x30D737CB, 0xC8142A09, 0xD8D2CFDC, 0xC22D3C44,
+0xF62DF1C9, 0x3D38DBC9, 0xBFC2E5D3, 0xDCE42128, 0x3A304038, 0xDBE6D626, 0x28250FE0, 0x27B52BCF,
+0xE0D8DC3C, 0xC8DA09DA, 0x2DF02FD4, 0xE539D7D1, 0xCDE729C2, 0x4816E313, 0x4E392AC5, 0x112CF50F,
+0x3AD94924, 0x4B22273A, 0x43D2DE9E, 0xDA22DE30, 0x3FE324DB, 0xD9CC2811, 0x29D253D4, 0xE2BC353C,
+0x0CC92147, 0xB8F0C6F5, 0x3D35DEBF, 0xED4ACBEB, 0xAC34E532, 0xEA2AC526, 0xEAC0BFC8, 0x4431D9DD,
+0xC0CE4E21, 0x0000F2DD, 0xE0D8C7BA, 0xDDDC2E36, 0xDA42C22B, 0x2B312614, 0xDC391EF4, 0xCC2E31D8,
+0x3DF4D630, 0xC72C21D9, 0xDB2BD531, 0xD916DB3C, 0xBEC11DD2, 0xD1D5CEA4, 0xBA072ED7, 0x402F2829,
+0xE3C0F627, 0xC7CC3CBD, 0xF41D3345, 0xD706DB14, 0xB2DF36AC, 0xEBC4551F, 0x32B01929, 0xC5B6DDEC,
+0xD91DE130, 0xE1213222, 0x4126D927, 0xAF52300D, 0x291B301C, 0x241A1843, 0xE43C1FDC, 0xBBCC191C,
+0x2C25E011, 0xBD3EE9DB, 0xC9BF1345, 0x2F333AD2, 0x1CD822DB, 0x23BE43EA, 0x2527EADC, 0xB52E1C1E,
+0x391CCF17, 0xD2C82C0C, 0x242E3830, 0xD328D808, 0x35CEF4CD, 0xD81BE035, 0xD0DF38DA, 0xD1D3C42E,
+0x3A2C1EEA, 0xC6D03AD2, 0x1E362E4E, 0xE6080000, 0xD018C312, 0xEFAA323B, 0xD1294BD2, 0xBAD52C3B,
+0xE0373E10, 0x260CDDE2, 0xC902E040, 0x30472BD5, 0x534BE7D2, 0x20CCB523, 0x21CCD843, 0x2259E24E,
+0xDE35FDD1, 0xD4D91D3E, 0xCFCA11D5, 0x35D724BD, 0x3C3F0CB8, 0x3EC447CB, 0xE835DDC4, 0xC3C73E31,
+0xDD432B2F, 0x2FDCBDC2, 0x2D3CCE17, 0x25B8AB31, 0x2ACCFFE5, 0x40E03C2B, 0xE7DA1841, 0x17E53020,
+0xD44E0B2F, 0x46DA22EC, 0x33422425, 0x211840C5, 0x23C1CFD5, 0xC8350BE5, 0xD833CDD0, 0xE63DC528,
+0x292ADC17, 0xD8BCD3C1, 0xCD39F02E, 0xA71EC343, 0xD6CC3A3E, 0x2E12D2CF, 0xC40B1FB1, 0x37DCDC33,
+0x20E52EEA, 0x271629E5, 0xC4BEE0C2, 0x1B3CDBD6, 0x00004934, 0x18E145C7, 0x4BE93522, 0x3029C309,
+0x31D1D50F, 0x3DB42C1A, 0x21A643B2, 0x3E26D8C2, 0xCDDE28D1, 0x44E835E1, 0xEDC4DED1, 0xC4F9C9ED,
+0xC5E1BBCC, 0x4426C1D4, 0xD9BB372B, 0x37232E0D, 0xE5D0D0AE, 0xC9D23CC5, 0xF638E2C7, 0x2DB6E6D9,
+0xC0C9B72D, 0xDF28E6D0, 0x11E2DCC8, 0xD8E2F31C, 0xEA3937DE, 0x2741B953, 0x19D5CEE6, 0x33DDC22D,
+0x2BF72326, 0x2143363C, 0x13CA1A1F, 0xB9D7DAC9, 0x22C6B72E, 0xC5DA2631, 0xCB32ECCF, 0x032D15D4,
+0xBB1EE613, 0x38C42452, 0xDBCD2726, 0x1AF44039, 0x22CF2EC6, 0xCAEDE6B4, 0x2823D423, 0xDCC61C39,
+0xC12D19BF, 0xC612C31B, 0x31F1D32D, 0x3E1C09E4, 0xD824C5E7, 0xDFFC2839, 0x2AD40000, 0xDFCCBBBC,
+0x3FC9CC12, 0xEF2626E1, 0x0FCDEA2A, 0xEBD735CF, 0x4E1A2834, 0xCDEDE1D5, 0x31CCCD29, 0x2F4618DF,
+0x48C11936, 0x29DADE41, 0xD71C1AE0, 0xDDD2285A, 0xB13DCDC8, 0xF020B8DB, 0xD0B837C2, 0xDC3B48E8,
+0x18C3C530, 0x192C2940, 0x2ADA3AE4, 0xEA34C55A, 0xDAB2BED4, 0x1A51E838, 0xC832AEBE, 0x10DE2DE2,
+0x3B2408CC, 0x22CEBD1E, 0xC919D6BE, 0x21D2D128, 0xCFE03A2A, 0xB7DEDC3B, 0x33C630ED, 0x5A0EEAD1,
+0x3CB03FCE, 0x24EC15E4, 0xD0385BBF, 0x20CB4E4B, 0x30E0C535, 0x301BCD3C, 0x3642DDD3, 0xD90FDC50,
+0xCA35BBCC, 0x393B3B2F, 0xE2BE352D, 0x362BE3BF, 0x1922B73D, 0xDE3448BD, 0xF7C0C02D, 0x00002F43,
+0x2FCAD0E4, 0xDE412A34, 0xFBF339DA, 0x2BB94330, 0x39B521D5, 0x2DE31F43, 0x382C4F23, 0x35314C2D,
+0x2DE3DBDC, 0x3A18EAB9, 0xD0DF131C, 0x45EA41E0, 0x48BBC1BF, 0x2220D7E4, 0x29C8D0C5, 0x3C0527CA,
+0xC1ED061C, 0xCB48CA3E, 0xE846E633, 0x0EB2D7CF, 0x44C42335, 0x27F0D440, 0x332FD847, 0xD72F2CE3,
+0xD7ECD6C9, 0x4026D51F, 0xD334BEE6, 0xE01B40C5, 0xD62BDA2F, 0xDEEA36AF, 0xABD039C7, 0x2E123FCE,
+0xFD3924A8, 0xD0C6D529, 0xD52AEED4, 0x211D2A35, 0x1EE9C5F1, 0xCECAF921, 0x38C92740, 0x3E4E2A1A,
+0xB8FAC7C3, 0xF11F1C36, 0xD4DA163A, 0x2701DC38, 0x0E3019D5, 0x57DB23E1, 0x43CD26D0, 0xADCC4934,
+0xE0D3D706, 0x403A0000, 0xDECED5E1, 0xDAE6CF1D, 0xD5E33C40, 0x35B3D038, 0x24D73231, 0x2EC01D40,
+0xC2D23CDC, 0x44C20BC6, 0x44C1D7A8, 0xE9D1C745, 0x36E5C2C6, 0xB81B2D42, 0x413917CC, 0xD8E031BA,
+0xCE3CC1C4, 0x3EC91AC5, 0x0DE406CE, 0x35D1DA27, 0xC8E5CAE3, 0x313EC127, 0xB61631CA, 0xD73B3444,
+0xD834C850, 0x37D31DE2, 0xCCC9153D, 0xBF41E9CA, 0xD649F9DC, 0x3A31D501, 0x50CD1DE3, 0xC8E54012,
+0x4432D4D5, 0xCBB93023, 0xE8FCD6C2, 0xCEE0CAD1, 0xE41B2E39, 0x28CC0E40, 0xDFEAEFFD, 0xB3EA2723,
+0x3534B935, 0x42D12A09, 0xED3DCA0A, 0x263C352B, 0xAD224EDF, 0x2CDF2A10, 0xF0BE4CAD, 0xBD45D8D5,
+0x263F27D4, 0xE2372FB4, 0xC35240DF, 0x1EF3DCDA, 0x3AD0D224, 0x27DAD527, 0x303A2CBE, 0xCDDCC829,
+0xE22B35C9, 0xD5DC222F, 0xEEBDCB63, 0x2042DAD8, 0x06CC3524, 0xD11BBB3E, 0xAE2229E6, 0x1FC81E1F,
+0xEF4B12DA, 0xE1D846DD, 0xBA3B1CDA, 0xE154C801, 0xD6092CD2, 0xD0F132CB, 0x3A40D027, 0x123530BB,
+0x1EC5B4DC, 0xD11825CB, 0xC603DECC, 0x28BCC5ED, 0x2C03E1CD, 0xE9AA12A5, 0xDB2DD53A, 0xCA32D01E,
+0x2F4A09C5, 0xFE411E26, 0x1354CEC8, 0x3736C5F4, 0xC62F3C3B, 0x252DDFB9, 0xD0353723, 0xD8DDC21E,
+0xE3DF21ED, 0xDAE33AC0, 0x2ADA323E, 0xDC43E1D9, 0x3A23463E, 0xD71934F8, 0xDB1037C8, 0x38B7BCCC,
+0xDA2C1CC1, 0x3EEEE349, 0x2ACB2A26, 0xD4BCBCD8, 0x4AB50000, 0x9EC941D8, 0x3824C9D0, 0x203CCD36,
+0x0DCDEDD8, 0x5B333036, 0x3125C7DD, 0xD7383F30, 0xD6C72617, 0xE1C245DD, 0xBDD3E818, 0x3210422F,
+0xD00ECF2E, 0x44411A3A, 0x02E72E22, 0xF106D24C, 0xCFC847CF, 0x0421AFCF, 0xD2C525A9, 0x3CBCE63A,
+0xE02A1E36, 0xE036E3E4, 0xDDC22B25, 0x2EC7452E, 0xE1E3EFE3, 0xD82ED6CE, 0x36C1E115, 0xD129E026,
+0x251E1C44, 0xE8E5D7CA, 0xDDE7D40F, 0xC5D8D9D8, 0xA1C52550, 0x3ACDDA25, 0x3548C4E1, 0x2913D9C8,
+0x1E28243A, 0xCA3324E2, 0xD0D7D1C0, 0xBDE9D0C4, 0x35BF4FE0, 0xCA42E33C, 0xBBFA33DA, 0xD753C9CD,
+0xDA2CE11D, 0xD9BEE407, 0xE31DD4C1, 0xCBCCD51B, 0x3BB8D6C9, 0x0000DDDD, 0x28D3CDDC, 0xD62445B5,
+0xB3211F13, 0xD7B9BF29, 0xD429D5E5, 0xB9C23CCE, 0xC219EDC1, 0x211E0B34, 0xB8F0E1C1, 0x33C0EF4C,
+0xD8D6C7D0, 0x30E8D3D8, 0xB06514D1, 0x21B9D915, 0xE2323DCD, 0xDA41311F, 0xE24AB11F, 0xE9B92BE7,
+0x33EA23C3, 0x04C430C0, 0xE849EF0D, 0xD1232C2C, 0x260FD13E, 0x002DCBDD, 0xEB3129E2, 0x20B4C7C6,
+0x25D32335, 0x2CCB39D0, 0xEA13DAC6, 0xCDEE2E14, 0xCDAA2CD0, 0xE1D339E6, 0x36DCD03B, 0xF5453114,
+0x34D428ED, 0xEAEDF458, 0xD226CC2D, 0xEDD03AD8, 0xEE1CC83D, 0x10DBB5E5, 0xF018CBDB, 0x5E33E31E,
+0xC6D9232B, 0x1A373130, 0xDFE7DDCB, 0xB21FC5C6, 0xC1CDB1B7, 0x3F2DF128, 0x482BB426, 0x323E0000,
+0xF5D43741, 0xCC1C1FB9, 0x00004E2E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x13000000, 0xC9112F26, 0xE3DF1FED, 0x284020CF, 0xC6ED13CA, 0x38B1E217, 0xD90FE8DB,
+0xD7C0C3B7, 0x341336CE, 0x45E2D239, 0x3E22F0D8, 0x461D2C29, 0x3825D628, 0xD733E5F1, 0xD7B6C532,
+0xC62B28C7, 0xD0BF3024, 0xF0BA3A26, 0x2D1D2836, 0x1BCF2DDE, 0xD02B251A, 0xB938CD2C, 0x34E220D9,
+0xC2BEC23B, 0xCC2908CD, 0x1D080039, 0x42BB25C5, 0x32EB3206, 0x36D3D1E0, 0xC32E10C3, 0x243B13E6,
+0xC02A1FF0, 0xC325D6DD, 0x23E33D2C, 0xDCD7DED5, 0x9C23C21D, 0x32E62056, 0x06314421, 0xE9E4ED24,
+0xC0C9C233, 0x44BE3FDD, 0xD25134E5, 0x292EE2E7, 0x4EE0D1DF, 0x29C82E3A, 0xF5392A22, 0x4BE2D133,
+0x58C2C4C8, 0xD2212920, 0x2935D4DB, 0xDECF4400, 0x31D7CCC9, 0x12DCFB2D, 0x2B2F0723, 0x2DE7B54E,
+0x2AD24ADA, 0x261E2931, 0xD538CEDF, 0xE12A1C2C, 0x33DDEDE9, 0x2FFC36C4, 0xE61823CE, 0xC225E1DF,
+0xC2102FEF, 0xCFD22617, 0x3EFBDA37, 0xDAD6BCD8, 0x110623E7, 0x3320E2D1, 0x1932B8D5, 0xCCEBBFC9,
+0x47234011, 0x0A2B45D8, 0x25C11244, 0x00EF3221, 0xC634CC38, 0x0F34D0E6, 0x1A3F40EA, 0x2DEDC82D,
+0xFFD2C8BD, 0x40E321CD, 0x24441BD3, 0xD932EDDD, 0x48362127, 0x1230EADE, 0x39E632F9, 0xD0D451DC,
+0x18DD2DC0, 0xC22AC72E, 0xD71E172E, 0xAFDED12B, 0x1DBAA948, 0xF927B647, 0xD7C31C23, 0x3328E73B,
+0xDBD72DD9, 0x3E23D424, 0xCB09E025, 0x16BA21CF, 0xDD00B9CB, 0x29E3CAD7, 0x3A19D011, 0xEA0EC4E0,
+0xD51CDDBF, 0x2AD1C640, 0x2CD6CBD3, 0x36D6D43C, 0xA72851C2, 0xE7EE3C3F, 0x432A4721, 0x46C726E5,
+0x253FD426, 0xD03B2C27, 0x1AEC343B, 0x5A2BC735, 0xCE31D2D2, 0xCFEDF30D, 0x492B4141, 0x20DC1BAD,
+0xDAF81CD9, 0xD6EAD21D, 0x2339CD25, 0xA31546BB, 0xD0C0F63F, 0x382BE719, 0xDA34BFDB, 0x1EDBBADF,
+0x5151E646, 0xC164D5E2, 0xD8292FCA, 0x33CF1F3B, 0x3FF531F4, 0xD3EB3453, 0xD433E236, 0x1B102B1C,
+0xC6C320E8, 0xD8393C2E, 0x17EAB7EB, 0xE0C42CE5, 0x313720CC, 0x3F39C42D, 0x33374AD5, 0xCE1ED41E,
+0xF7D2BE0D, 0xF114311E, 0xE22CD6EB, 0x4E23CA2A, 0xCDCF36D3, 0x463B1EC4, 0x312DE400, 0x1A1219CE,
+0x0CD1E2C3, 0x3850CBEA, 0x31D7CCCD, 0x3F2D3936, 0xC518E33E, 0xC31F31B7, 0xC0AED6C9, 0x2AC6E5D9,
+0xC334B8BF, 0x35EFD72E, 0xD72B3AD8, 0x31073AD7, 0x25E22D38, 0xF0CD4A64, 0x28DFDB2D, 0x22D9E714,
+0xB8E43935, 0x35FCBFEC, 0x1DC1391E, 0xD6E731A5, 0xBBC8B837, 0xBD3FBE3A, 0x00BADC53, 0x28C43F33,
+0x1CE0BAC2, 0xE0E125D9, 0x3BD6CB3D, 0x313230BE, 0xC4243D34, 0x1F24D2B1, 0x2058384C, 0x2AE9192D,
+0x302CCCEE, 0xD435C4C7, 0xB0D53217, 0xE52D32D9, 0xE0AF2127, 0x3E47D0CA, 0x4112BFCA, 0x27D0D0BD,
+0xE81E5CF9, 0xD50CBCE5, 0x2DC9B4F7, 0x3FBA33BC, 0xCBCC2821, 0xDFD51326, 0xD20ADA3A, 0x2000C3BB,
+0xC4DC4BE5, 0xCD39D7A7, 0xC40FF54F, 0xCC3BD1CC, 0x2C19EACC, 0xCD2E2129, 0x44D9F0DB, 0x2B40E11C,
+0x31343F4C, 0x0E142817, 0xD0104135, 0xE304CAC7, 0x259A1FD4, 0xC3EAED3C, 0x1A311AD7, 0xD8242BEE,
+0x20BF4EFD, 0x3FCA16C5, 0xDDEED03A, 0x0A22C52E, 0xD53B2D3A, 0xE22A2CB5, 0x291D3228, 0x2CCC2E45,
+0x48BA00D5, 0x4FE12D1D, 0xD8D91DD1, 0xCEDBEC31, 0xFBD526B9, 0x3C1DE845, 0xAFDCE91C, 0xD622D31E,
+0xCF2620D9, 0x28D63427, 0x362C3DDF, 0xD72C2530, 0xD6A7C203, 0x442C3233, 0x29B24E3F, 0xDD21412C,
+0x400BD6D0, 0xC8D4B831, 0x171EF12A, 0x34FFC9D0, 0x3B3ED02D, 0x0BC7E2CB, 0xC43D28C6, 0x42F6C948,
+0xD24AD429, 0xCB2C2A00, 0x43E2A236, 0xBC353312, 0xD91324B5, 0x30B4D013, 0xC92FDE2F, 0xBFD6D432,
+0xC82DCABC, 0xC032DC1D, 0x30C9D2CB, 0xE0341DC5, 0xDDCEBE39, 0x2CC41D58, 0xE213BF14, 0xDFCDCBD9,
+0x2E3C36DD, 0xE5262833, 0xF841291D, 0x15E324F5, 0x0315D732, 0xD1C5C5CA, 0xD73144D3, 0x2ABEBA1A,
+0xBCF42E2C, 0x00E1E53B, 0x4423AD2C, 0xB021CF20, 0xD8D5EEB6, 0xBD35BCCD, 0xE6D85334, 0xD549DF2D,
+0x35C74D19, 0x3F4BBE3D, 0x1F3FA3EB, 0xCEB2B335, 0x35D0BCBA, 0xD5D8CF27, 0xB11DCFC4, 0x24D3D408,
+0x183B35D0, 0x1C14D619, 0x35D445BB, 0xDE452444, 0xC83315C5, 0x3F2122D0, 0xB623C737, 0x33321DD0,
+0xD6DF33DE, 0xDE39AA20, 0xCF00F239, 0x412410D5, 0xC5DC2339, 0x1D3BD1B6, 0xC0E3481E, 0x1030451A,
+0x30EA30D1, 0xC6DCB9C7, 0x2C36DC17, 0xCBB0ED30, 0x26C20FBE, 0x35D6D528, 0xD6CC2242, 0xEDE4241E,
+0xC036DED9, 0x26303627, 0xB6B6371B, 0x26E3D6DF, 0x0D36C729, 0xBEEE4FD7, 0xE3C839D1, 0xC0313031,
+0xAEAD2828, 0x36323D23, 0x13342728, 0x2904EE20, 0x261DDCD6, 0x0CBCE1E1, 0xD2CBD5C3, 0xFD1C4B2C,
+0x1CD8CDDB, 0xD4BE53B1, 0x2BD6CDDE, 0xC61A01E1, 0x3D41BED3, 0xE0333BD0, 0x4040E821, 0xC4A32449,
+0xD3D3A8BF, 0x36E11C2C, 0x3CC1321B, 0xC5D03E1C, 0xB813D9DD, 0xD6C51838, 0xCEBED141, 0x1FEA3813,
+0x3611CAC0, 0xD12EE2C6, 0xE1BFD22D, 0xD0DAE9CC, 0x0E03DB00, 0xBC37393F, 0x422BC916, 0x49C4D72A,
+0xBFC0243B, 0xC1C2F1C3, 0xDF36CC32, 0x49E53EE4, 0xBFC02CDF, 0x2BEE2BE1, 0xC23224C5, 0xD63F16DA,
+0x15DBC21F, 0x49E4CC57, 0xBE272CC7, 0x274E0643, 0x5C1DDBE7, 0x34B73DF0, 0xCD2D2EB9, 0x1AD43BCD,
+0xFD3047D3, 0xDAC53936, 0x1328DCC8, 0xBC32331D, 0x001F2634, 0xC845410F, 0xDBDB1C2B, 0xD1D819D0,
+0xEBDDE12D, 0x37E335D7, 0xCDA84635, 0x0F362FBE, 0x30C229C4, 0xBC1DC43E, 0x3CD02F3D, 0x2EF5F5F4,
+0x462D15E6, 0x362F1FC5, 0xC0C9CD49, 0x14C92639, 0x2BD135D0, 0x49C6B8DA, 0xDBC7E1D9, 0xC6382328,
+0xEDC217BC, 0xE4D1331C, 0xDCC741CF, 0x283A341C, 0x38B93420, 0x46001BE7, 0x3E1030C4, 0x42A8C1CD,
+0xBB3339C5, 0xE538DD20, 0xDF3F2602, 0xE52BE5C1, 0xBE2B41BA, 0x3BD4D337, 0x2D39C3DB, 0xC3BB36F0,
+0xD12CCF17, 0x4130B93A, 0xE31DEAEC, 0xF9DEF52D, 0xADD35231, 0xD62ED0EF, 0xBE303521, 0x1E2ADB41,
+0x310AC62F, 0x22DA2DC5, 0x26E1D9D0, 0x1F4D2AC1, 0xC8E6DCE1, 0xC6B3D62E, 0xD6D20019, 0x4EDC2D39,
+0x342AE224, 0x07CBD4CB, 0xE43FB3BE, 0x1BE53748, 0xDD44C8A2, 0xBC34B7BA, 0xA1DD1D43, 0x23D31827,
+0x3519E93F, 0xC83B3720, 0x1BF0B72A, 0xEAC7DCC0, 0xCA4B28EC, 0xD54D242F, 0xF35435D1, 0x3DDEC824,
+0xC31128CE, 0x0B0FD136, 0x442D1E2B, 0xCBD1C3EC, 0xD635341B, 0xDD1F2931, 0x3442CDD0, 0xDADD3600,
+0xC31E40E3, 0xB0BD1B22, 0x38CA1F0D, 0xD42D3043, 0xE65127E4, 0x2F15C9D6, 0x3E360B30, 0xB2CB1524,
+0xC7BBC4DF, 0x38D244C7, 0x483B31C0, 0xDB43C8D1, 0x43DCDBBC, 0xDCC7571D, 0x24BAC2C6, 0x3B4ED6DC,
+0x3AE03245, 0x0C40E1E2, 0x493724C0, 0xD4ECC537, 0x36CC30BA, 0x1C303FC6, 0xD12F27D5, 0x00C4272F,
+0xFFFD061D, 0xC2D9251D, 0xC1D7D625, 0xBFCD3AD3, 0xB42E11D4, 0xC7230E1E, 0x24CD2C21, 0x0BD436F9,
+0xCECCD331, 0xE6BDE0D5, 0xD2C6C941, 0xE1D1D3EF, 0xEB4123CD, 0xDA1755E5, 0xCEE4C529, 0xDC43C5D1,
+0xD8FDDFEA, 0x1AC9C317, 0x22E13DCE, 0x01D7BA29, 0x1551E449, 0x43CD363E, 0xC6281B24, 0xCBD23725,
+0x3200D61C, 0x35D6CCBC, 0x262CB22C, 0xE2E43DB1, 0x26493335, 0x3AD72ACA, 0xC4244430, 0x3AD7C9C3,
+0xC91ED8D1, 0x40EAD2AC, 0x38C9CDAC, 0x413BC137, 0xCD304B1F, 0xEFCA2323, 0xAC3FE4CD, 0x3616F6E0,
+0x41C2272A, 0xDCD7B2E2, 0x27D9E018, 0xDEE2D943, 0xD5C01F22, 0xB0D93540, 0xB411C13D, 0xDE39CBE0,
+0xE2C6F126, 0x31450828, 0xD2BC2927, 0xBACA1DD1, 0x34DDCDC3, 0xD8302AD5, 0xE2D02824, 0x0B2BE03E,
+0x3BCAFD2C, 0x2E461D45, 0xBBF6F32E, 0x31E5DED2, 0x2ED4E847, 0x3BDDCD29, 0x231331D8, 0xF521CF2E,
+0xD024C718, 0x20D91011, 0xDB1E0F34, 0xD931C0BB, 0xBB310837, 0x29B7C5CB, 0x3B31DEE0, 0x0836B926,
+0x28CB1FD4, 0x31072A30, 0xEE41F300, 0x35CB501E, 0x603A2B47, 0x2C2739CE, 0xC0CFC4D9, 0xE0CED43C,
+0x22BDC9CA, 0xBAEC122D, 0x4D16F2C5, 0xB6E5C3DA, 0x19D5C9D0, 0x1D1DDFC2, 0x223A23FB, 0xCD382B09,
+0x3025C2E6, 0x3918BDD5, 0x33DFEF0E, 0x291A302B, 0x14184930, 0xD414D233, 0x473CCED7, 0xD2D318B7,
+0x3E2738D2, 0x2928D537, 0x002E14BB, 0xE2E03734, 0x1B1EDFD1, 0xCF483FD8, 0x3D043844, 0xD52E1C4C,
+0x2823EAD6, 0x1F2D241A, 0xBC4B22E6, 0x2EE7DFB8, 0x3424E2F0, 0xD41BD7CB, 0x2BBCDF1E, 0x155B5033,
+0xB527D543, 0xE3C5E9F4, 0xD9B5C92B, 0xD3CAE0DF, 0x31C4CED7, 0x0BD33B25, 0xCF3B111D, 0xE62F241F,
+0x22D93D22, 0xCF27EF43, 0xDAD2C5DD, 0xC60019BE, 0x2935D5C3, 0x2D0BCD22, 0x224714EA, 0xE7CAB817,
+0xD1282222, 0x3C25EBD0, 0x45F0A8DC, 0x3ACFB92D, 0x34B4E7E6, 0xE93523E0, 0x2C292FCA, 0x44D4C7AC,
+0xEA22C3FE, 0xEF37E524, 0x313ADCDA, 0x2212D846, 0x1BE0FF4B, 0x1CBC2E34, 0x174AF041, 0xD245223E,
+0xE1DB2BDC, 0xF2E9A515, 0xE3C33BCC, 0x43DBD5CE, 0xF11700DB, 0xDE44DD36, 0x26E1D604, 0xC2ACE419,
+0xB9D8C7D4, 0x37EFE21E, 0xE2BAC2D1, 0x3926E91A, 0xD013E8BC, 0x2C43ECD9, 0x232B30F8, 0xD6C9E0BD,
+0x26A718BC, 0xEFF6C840, 0xC13332C2, 0xC4B9BD36, 0x1B4921BD, 0xD0EBECE0, 0xBCE4D647, 0x35D4DCD5,
+0x1BBE4C27, 0xECC1B7CB, 0xD820284D, 0x0BC33DBF, 0x34D03140, 0x38EC3900, 0x29EBDBC9, 0xD43A3041,
+0x3AE0DD0C, 0x09CFCFDB, 0x44E0C7CE, 0x20DBC4E3, 0x26DC41DE, 0xBA3AD2E5, 0x2ABA34E1, 0xD70FBEB7,
+0x40E03FE2, 0xF8D62041, 0xBD47B7EA, 0xD2E03E34, 0x1FC6D2DE, 0xCF1CD155, 0x32D52630, 0x3C463D2C,
+0x253130E3, 0x1ED8BB4A, 0x411BDFDD, 0x3FD8EE35, 0x33D02BBB, 0x002ECAAE, 0xDF1FDFEB, 0x2CD8DA3C,
+0xB6352D59, 0xD5BDD3B9, 0x1946630D, 0xD9181731, 0xD419D31F, 0xCE2FB7D0, 0x32EFE3C3, 0xCDDB2FD3,
+0xD247BB3E, 0xBA41DECA, 0x23244AD2, 0xDEAAF2B2, 0xB8D525F2, 0xD327A4D0, 0x3718C0E2, 0x2D3E20D9,
+0x24CFDF12, 0x26D0BE1C, 0xC8D9282E, 0xEB54CADF, 0x44DB1AED, 0xC927BF29, 0x2E002B39, 0xBB2625E4,
+0xBB4421D2, 0x18CBAA3A, 0x213ECD37, 0x0F33163C, 0x3622E432, 0xE5C7C62B, 0xDE39E7C7, 0xF3151CCA,
+0x2ACED6D2, 0x2ADADFC4, 0xD6241ACC, 0xB92F0622, 0x1BC639E4, 0x33D92BC4, 0xAF14E0DD, 0xE6EEC2C9,
+0x16BDCAE3, 0x272E1AC3, 0x292E1EBE, 0xDCE439B1, 0x4225E1C6, 0xCD372420, 0x27D716B1, 0xD83EE339,
+0xDBCAD4B3, 0x26385033, 0xC5373528, 0x2DD2C936, 0xF9D5BAE6, 0x2E2DC7C2, 0xCD4BD045, 0x1DDF2DE8,
+0x0E3740CE, 0xE5D82C36, 0x29D1360A, 0xCD03C8C6, 0x37CCCCD8, 0xD83AD330, 0xBFD619CD, 0xDEDDC432,
+0xD0DADAEC, 0xD3E14F26, 0xA8CD35D9, 0xA3F325BD, 0xBC1626D7, 0x3AB81BD5, 0xBBFF1832, 0xC9DB2126,
+0x16250000, 0xE003BB39, 0x35D533CE, 0x1801C610, 0xD9D44644, 0x474BD3C3, 0x1C30C51E, 0x2E3342C6,
+0xB7D6B3E9, 0x5AE44BD2, 0x46D62830, 0xDBDD4B0C, 0x38CF2F1D, 0x2D331ACB, 0x36CCDED6, 0x10D7DA10,
+0xD42246E0, 0x0FC73FC0, 0x2C3E1E37, 0x2CF0D1B8, 0x17412EDD, 0x2630D0DD, 0xDBD535E4, 0xD927DA43,
+0xB636C11E, 0x25CBE2D8, 0x4711F64B, 0xBC2C2029, 0xD0E3F538, 0xB92C27DF, 0x272DE2D2, 0x0CC5BCEB,
+0xC0BC2EDF, 0x26D131C9, 0x1F092FEE, 0x2D342B34, 0x31B825D5, 0xC6BBC9C1, 0x38F1D0E4, 0xCBF7FDC7,
+0xC3CAC9B6, 0xDBC436C6, 0x19DC1FD8, 0x28CCAED1, 0x1921BDB4, 0x24BCBF33, 0x301EE244, 0x36BDBDE1,
+0x31C5B724, 0x00003ECD, 0x220B1EE7, 0xDEBAD3DC, 0x21CAB942, 0xC9193B33, 0xE3CD3A0A, 0x0DC22CC3,
+0x3EC0C7CB, 0x43F2BFE4, 0x0ECFE9C6, 0x3ECA2FD5, 0xCB492DD9, 0x23E7BDC9, 0xD33DC151, 0xFCBE2933,
+0x5ACEC51E, 0xF2172E1C, 0x3C24DE23, 0x3320D4D5, 0x2AB53C20, 0xC328323A, 0x45EA4133, 0x24391A32,
+0xC7D8DA2B, 0xF4EAAF39, 0xDFCA36E3, 0xDEBED5C8, 0x36252C36, 0x04D7DBC7, 0x30CAC23D, 0xB60BF73D,
+0xEACA2A30, 0x34C12ABE, 0xE2D747D8, 0x19D21FE3, 0x1DFD24EC, 0xB6B5D9C4, 0xBF2C1DD2, 0xD2B442E0,
+0x37CCE1BD, 0x2AD1E1CC, 0x18DAC6BD, 0x213AD93E, 0x2031D8CB, 0xD82F2D12, 0xD112D2F2, 0x19CD43F1,
+0x36BCB9CA, 0xD7C5DDFC, 0x34011D28, 0x3BDB0000, 0xCD00E1AA, 0x5228252C, 0x3512CA34, 0x0EE53A41,
+0x242CD2C2, 0xF425DD2A, 0xF02FDDF8, 0xD82026D3, 0x4ABEE7B8, 0x2DC9DDDC, 0x3BEECD3C, 0x2ED1442A,
+0xC713E92A, 0x310CC12B, 0xE128D520, 0x48E5C1CF, 0xC91CE3ED, 0xCCCBC72F, 0x0B23E3E9, 0xC1D2C8B6,
+0x511ED534, 0x2AC7D4C3, 0xCFDCE335, 0xD4C9F643, 0xD433EBF1, 0x1BF8DA31, 0x223FC515, 0xCC2F13C2,
+0xF825FFC8, 0x35E51633, 0x36293C4A, 0xC72ACA36, 0x4544E736, 0x1BD9315C, 0xCF16DFD4, 0xD0AFEFC3,
+0xCECBBDE2, 0x05D6AA32, 0xD851F045, 0x240FD948, 0x3D35B2E6, 0x17D7C7EE, 0x0B22DACE, 0xF53AD637,
+0xB2372DDB, 0xF6DF14CB, 0xBED95B24, 0xD4D5190B, 0x0000CB29, 0x3DC1ECF5, 0x30C6B34F, 0x1D2629B6,
+0xD429D1CC, 0xD50716E3, 0xD0C3C2CC, 0xC82CC7D4, 0x5F27C718, 0xE6201BE4, 0xDACFD42F, 0x2E2EE9D4,
+0x3AD036C0, 0x3828DB20, 0x27D63439, 0xD21026DE, 0xD60710BC, 0x13AEB9E9, 0x28C41944, 0x49D5E3D7,
+0x27163429, 0x1BBAD528, 0xB12A14BC, 0xC72FD9E2, 0x1F43213A, 0x133B3EE5, 0x3622FE21, 0xD438DCBE,
+0x41CB25E8, 0xD0D83E2C, 0x32BFC2CE, 0x362BE1DA, 0xC3CBCE24, 0x22DECDC2, 0x30CBD5CD, 0xD42DBC26,
+0xDDF6AAD0, 0xCADC0EC8, 0xDC333434, 0x333114F5, 0xCE24361A, 0xB1C5D123, 0x4B25380B, 0x2FEC3634,
+0x3EE2A610, 0xB4CEE72F, 0x29BCC721, 0xB23850CD, 0xA51AE4DD, 0xC81BBC2F, 0xD4DF0000, 0x2DE5D1E8,
+0x2E0A28D0, 0x29E3C1E1, 0x56F62DD6, 0x3828DD4B, 0xC41ACFDF, 0xD2390A3A, 0xDF3028CC, 0xE8C5E22B,
+0xD11CDCC1, 0x44D1CBEC, 0x33C73DDC, 0x1D25B7DC, 0xE7DD0525, 0x2743C41A, 0x2AE2D3DC, 0x21D6E82A,
+0x133CBAC7, 0x593F34B0, 0xDA5FE339, 0x5206252C, 0x394FD5BD, 0x21D2C92C, 0x292426E0, 0x48E0362B,
+0xCC272DE4, 0xC815C4DE, 0x49BD5D37, 0x1912342F, 0x2CE9E01B, 0x30D8D2D9, 0xDEBF46E8, 0x392F473A,
+0x3A18D324, 0x2FE31FD0, 0x2B2B2710, 0xB53B2AC7, 0xB3481CFF, 0x2DD7CFBC, 0x2232351A, 0xD7C91C16,
+0x1CB4DA16, 0x1BCBDF3C, 0xD2213822, 0x2121DB1F, 0xDBD1C02A, 0x27DA33C7, 0x351FC4CD, 0x0000B9D8,
+0xC2142C29, 0x38B9DD31, 0x54E60CDF, 0x2A4A42DB, 0x35CF2BB5, 0x0CEA3323, 0xC2D8BEE2, 0xC61F3B33,
+0x10CE11D0, 0x3336CE2E, 0x303ADD25, 0x3347B7D9, 0x33222C1A, 0x1A43E4C1, 0x4326E019, 0x2AE2E92E,
+0xD2C5DC40, 0xBA45D7D6, 0xDF341BDC, 0x3A361925, 0xD4FEF4D2, 0x3018DA31, 0xDDC12817, 0x3038DE24,
+0xEDC0292B, 0x3E3E3336, 0xCB1BEF17, 0xE10B2EBF, 0x2B20BABF, 0xC80C421B, 0xC2411D21, 0xECD8C7D6,
+0xC824E4AD, 0x65B53DD7, 0x304325D4, 0xDD1DEAE6, 0xD33131CF, 0x2F36452B, 0x23B219E9, 0xC1C23529,
+0x21DC3836, 0xD618E933, 0x393E2C14, 0x362ACA3A, 0xD6E2C237, 0xD6085111, 0xD721FCB1, 0xCCE6DEBC,
+0xD1DCD920, 0xC3320000, 0xEA26CB19, 0x18BB26B7, 0x233BDF4F, 0xDE23FD3F, 0x2ADA1ECB, 0x3122E839,
+0xEF42D7D7, 0xCAC7202D, 0xCD4CDED1, 0x2633C824, 0xD0123E4E, 0x104429B6, 0xC8CD4B00, 0x5136B738,
+0xCFC8202B, 0x163135DD, 0x2C3821E8, 0x29314CE0, 0x0F3CE4EC, 0xE5432DAF, 0xC63AE340, 0x4639DB18,
+0xDF282511, 0x25B5CBD3, 0xCAF50F37, 0x1E443B33, 0xE32CD32D, 0x10DA47B9, 0xDA410526, 0x34F0E4EA,
+0xCEED3134, 0xC0B31AA7, 0xD12B1AD3, 0xC722C743, 0xBFBEDD1F, 0xD328D52B, 0xCC384024, 0x18B43FB8,
+0x281742BB, 0xD8322FD2, 0xB5D628C6, 0xE420C0CB, 0xC9EB391D, 0x41E6B419, 0xBCC543E1, 0x2C37DDEF,
+0x1F2D41E9, 0xD0F0D205, 0x1FEAE5C5, 0x15DF14B8, 0x47E327D9, 0xD328CE25, 0x36DB2E24, 0xD92E1EBF,
+0x2B3230F4, 0xD1EAD5D9, 0xC7212ADA, 0xCD1235DC, 0xC519D5DB, 0xBFC631DF, 0xD5222B35, 0x35B6DEC6,
+0x153C4428, 0xC5C3E42F, 0x1B2E2D2A, 0x3CD422A6, 0xC22842CC, 0xCADAC43D, 0x2C10DAD2, 0x18C708B5,
+0x36D636BD, 0xCD29C5D3, 0xBFED433A, 0xE0384939, 0x005629F2, 0x36E2C425, 0x2830DF1C, 0xC9CB351C,
+0xB0D7D3E6, 0xF3CEC214, 0xDC25D1C7, 0x1BD5CBCC, 0x19DD21C9, 0xC854EE30, 0x4FE14EE3, 0x38D5D3E8,
+0xC9D126E4, 0x2FDD2130, 0xCE2130D1, 0xCFCAB0C9, 0xC71F0E1A, 0x2627D4DE, 0xCBE9DACD, 0xE7121F22,
+0x2CC8E435, 0x3838391D, 0xDD37242E, 0xEBBE2F10, 0x233C0000, 0xAFC91CE7, 0xCE3536F6, 0x0F25D248,
+0x2D04EBE4, 0x3FF634E5, 0xE929CE26, 0x24E0BABE, 0x302CD52B, 0xD1E2073F, 0x4333D11C, 0xDA48C2D1,
+0x2D33F315, 0x29B2C8C3, 0x3B26E3EB, 0x3821C6DA, 0x32163DD6, 0x1DC737BC, 0x463CCD24, 0x19D63DDD,
+0x200BDDB7, 0xBDD41E22, 0xC40A444F, 0x34BF49ED, 0xD4C0C0F4, 0xA41FB721, 0xBEDF51E8, 0x15D82A17,
+0x383FDF21, 0x2B2918D0, 0xE6EB4718, 0x244530DB, 0x25DD3919, 0x3BA819BE, 0xDAEFBE38, 0xEB3FCCD9,
+0xD1DF2E1C, 0x2520C3E3, 0x32D0BFD5, 0x37312BD5, 0xDC29D0C9, 0xBED02432, 0x310C3A18, 0x411ED8D7,
+0xA4E5CE22, 0x163CBD30, 0xC3E2BE45, 0xCFBBAAC3, 0xC8C0E4CF, 0x0000CEFC, 0xE8CC2433, 0x39ED223F,
+0x243AD83B, 0xD44D39C6, 0xCABF24DE, 0x252050EB, 0x29C73B40, 0xD4F434CF, 0xCAE4D5DF, 0xC5F2EEF1,
+0x2919C8D2, 0x2FC4D20B, 0x1CBF5528, 0x3DBBD52D, 0xE03AD031, 0x3D2CCA17, 0xBE23BDB8, 0xE7DA3B30,
+0xD1D2D331, 0xCC1825EA, 0x2F2D39D2, 0xF9313526, 0xCFBCC2E1, 0xE5BE3AD4, 0xBF59BE1C, 0x22FFC8B5,
+0x32B14015, 0x31D6BEE6, 0x46DF3843, 0xDCD1D8B2, 0xE728C8BE, 0xEA1E2A28, 0xD1D8C7D2, 0x30CAF134,
+0xEDD0EADA, 0x13C1E3D0, 0xB9D7DC29, 0xEAB93624, 0x24E49F38, 0x2E3FDEDC, 0xE6D0D23D, 0x1FBB3ED2,
+0x23D540DD, 0xD727D92B, 0x251E33E6, 0x4BA7EECE, 0xC1CD203C, 0xC5CFED32, 0xB9CF35C3, 0x29710000,
+0x29D7C636, 0x3A3ED1D0, 0xCC1B271F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0 =
+0x42441F44, 0x006DFAE9, 0xDBB63AC2, 0xA3365CB3, 0xFA44372B, 0xC2603862, 0xA6C86E09, 0x416F0BF7,
+0x8E58011D, 0x47C54B99, 0x269AD14C, 0x0B286902, 0x093E4C64, 0x8D84D8E1, 0xB8AE0993, 0x49567118,
+0x8CEAABD2, 0x12D33409, 0xE3358237, 0x130176F6, 0x0986718F, 0xA18C83B5, 0x04EEF058, 0x59704040,
+0x7478FE81, 0x01D27193, 0xEA1AD7F3, 0x21E6C2B9, 0x6478A1FF, 0xED9959AD, 0xE39E57BE, 0x4D859105,
+0x056EF72D, 0xBD170BB7, 0xF01F9ADD, 0x99BF0C05, 0x44BAD09F, 0xF6ABDD61, 0xC0F8F116, 0x2972B53C,
+0x0FEC944B, 0xD1675432, 0x0025F563, 0xF42B2EA8, 0x808E5C37, 0xB6E79AD5, 0x3706284D, 0xB9C0AFA5,
+0xFF0E7E28, 0x5FA45C6F, 0x9CD5244E, 0xA013DDEA, 0x0D27D1A2, 0x2AE414AD, 0x41614379, 0xE6B68872,
+0x5585D926, 0x5098D45B, 0xF8980ABD, 0x65821418, 0xEF8968A3, 0x301DEC3A, 0x57EF2A7F, 0xC17BE446,
+0x94B65D62, 0xC9E6F350, 0x2E6130BC, 0x235F2E5E, 0xF1C13241, 0x94B291D6, 0x8C342458, 0x175B1FCA,
+0xE07310FD, 0xD24934DD, 0x3CE8D053, 0x5C8F243D, 0x945B0AB3, 0x50EB8CC1, 0x8EF499D4, 0xA67801BF,
+0x1680F061, 0x283FE705, 0xB8D7E773, 0x13AD3D2F, 0x4A6C305B, 0x1C1E5B12, 0x6F57D880, 0x8A666E5E,
+0x26511296, 0xFFC09750, 0xFF91760A, 0xAC26795C, 0x75F321FD, 0x4221B9CB, 0x2E119188, 0x0772A832,
+0x74D6036B, 0xA001B764, 0xF0D72816, 0xCBE119F5, 0xC5D6B248, 0x1BBAB091, 0x1BBE518E, 0xC647DCB1,
+0x240CE252, 0x3F8AAE65, 0xE0CDEAAB, 0xBE724062, 0x5720636C, 0xC816E67F, 0x8E5A6C7F, 0x9E2738B3,
+0x2419CBDF, 0xA6BF9B75, 0x48B21FC3, 0x8C4AED8D, 0xC1D8BCA8, 0x01353DE3, 0xA99D7D85, 0x9C6DDD20,
+0xA650398D, 0x4E7E5777, 0x623FA183, 0xF02B261F, 0xF0BBA66C, 0x35AC701F, 0xD818E590, 0x7CA713BE,
+0x01153743, 0xD1302B91, 0x55459E3C, 0x63FDA677, 0xECF93759, 0xBBE0E349, 0x73FD4FDE, 0x5C48C9E8,
+0x70F46F73, 0x0B15437C, 0x9F4159A0, 0x7C343E78, 0xB3D085E1, 0x04266351, 0x1933DAD9, 0x83AF8C77,
+0x2AC11BE1, 0x76C25CF6, 0x083E6CF7, 0x00DDBA22, 0x6AD2F284, 0xF87DBC6D, 0x7D40B45D, 0x6EBF21BD,
+0x972E8535, 0x632B51D8, 0x5BFBA23A, 0xCDF1D44D, 0x11749158, 0x06DC45D1, 0x93D63A59, 0x9D0552F7,
+0x61C8048F, 0xBFDE9FD2, 0x77827B70, 0xFFB130DC, 0xF18BBCE9, 0xD50C324F, 0x15521E17, 0x7D2470C3,
+0x5EE34C4C, 0x2AC0C8AB, 0xB2283269, 0x4107FE15, 0xE6DA1ED3, 0x66A2A3A7, 0xB204B429, 0x0E02F9D0,
+0xE5A45550, 0xEAF94102, 0x18F57A81, 0x8EEC9C68, 0xD575F397, 0xFC1BE8BF, 0x104E073F, 0xDF219F97,
+0x2EC93D4A, 0x6033EE18, 0xC1351A38, 0x420C6A87, 0x0B5DC831, 0xD90D23E2, 0x13B9E883, 0xCD65BE3C,
+0xDB17A9CC, 0xC73C2A7E, 0x3281
+
+c =
+2
+
+cab =
+1
+
+ea =
+4918
+
+eb =
+4920
+
+c_neg =
+0
+
+k_neg =
+3072
+
+k_pos =
+3136
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+0
+
+op_flags =
+RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data
new file mode 100644
index 00000000..a866878a
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data
@@ -0,0 +1,677 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_DEC
+
+input0 =
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0xD0000000, 0x33CDC8CE, 0x4FAEE4CC, 0xC7DC23C3, 0xC306D6CA, 0x2B360A24, 0xE91C423D,
+0x1F323427, 0x4B1C33B6, 0x3EC9D0E7, 0x39204DD1, 0xCCD73C37, 0xC6F6D8E1, 0x1DF828F3, 0xDEE34025,
+0xC41EC235, 0xD035E1D8, 0x3CC32843, 0x29B60C3C, 0xE92E122E, 0x454BD4C8, 0x35D02543, 0x33E4D0AC,
+0x07AF2228, 0x38C62DD1, 0x233800C5, 0x3CC02DD1, 0x35E73B19, 0xDEBED026, 0x33C5EDD9, 0x33E51527,
+0x1F21EA18, 0xCC3E3BD2, 0x013CC14C, 0x3724D23B, 0x23CDED2D, 0x21424630, 0xC5B0E64D, 0xCFC03BEF,
+0x24294241, 0x46C526BC, 0xD82F334C, 0x1E283DCB, 0x3F3F33C7, 0x4A14D2A1, 0xD92F2AF3, 0xCFA820CC,
+0xDD30C6CB, 0x2B3ACAB7, 0x4AFE29CD, 0x25BD3200, 0x2D293323, 0xD32A3B29, 0x29E64D0C, 0xBE4BFB3C,
+0xB1DD242D, 0x0427F11D, 0x3046CFDA, 0xD633C0D5, 0xC0DDBE01, 0xDA3DD9B4, 0x3BCE3638, 0x23262CC6,
+0x432BD2AE, 0x23A9E1C7, 0x1BE45609, 0x22CD35AA, 0x32D6371F, 0xCE27352A, 0xE2B73F40, 0xC2D0CE2C,
+0xE72E3A3E, 0x2FF52147, 0xEFCF2E33, 0x003103CA, 0xC6C14A33, 0x45CAC0CF, 0x011E1FD3, 0x23D3C72B,
+0x2EB644C4, 0x484BD930, 0x3AC22E1A, 0x1AB6DD42, 0xCEDDE232, 0x3ACCCC31, 0xB830CA2D, 0xC930D2D7,
+0xD0E049BB, 0xE6243524, 0x3EF53914, 0xCB083BE3, 0x411EDBE9, 0x30C3C3E8, 0x3721F5E7, 0xBBDF3FD9,
+0x2AD21F33, 0xEE402858, 0xD3AFD237, 0x3D39401D, 0x30003623, 0x4734DC46, 0x192ACD32, 0x3324CAC2,
+0xD714D4B5, 0xDC2E21D0, 0x2327CDBA, 0x15B739C3, 0x31C9EEB8, 0xFAB5372E, 0xC239B5DA, 0xE628440F,
+0x34D425C6, 0x293BE6FD, 0xBDC04BBC, 0xE0DDB7CF, 0xD627BC34, 0xA5422FCB, 0x13211ABC, 0x1708E51C,
+0xCE33DAD1, 0x51AEC731, 0x2E383824, 0xC722D0D3, 0xCB32292B, 0xCECC402B, 0x1B312EF0, 0x34CF46CA,
+0xE3BD252C, 0x0C21EC36, 0x4ADFECBE, 0xC4D2E62C, 0xADE5BAD3, 0xDDCC1D18, 0x521AC135, 0x3F302D37,
+0x22CE2DCC, 0x262A1BCF, 0x4EE7D7CA, 0xD43D3526, 0x1840E020, 0xC3201F2F, 0x3ACB453E, 0x41EAD5B6,
+0xB6C4CF17, 0x243A55D4, 0xC7501E28, 0x351DCD1C, 0xE3A8D502, 0x3235271C, 0xAC342F00, 0x34B4E3D8,
+0x36473AE4, 0xAE403CEB, 0xBC3FDA3F, 0x552CDBEC, 0xE7C42B5C, 0xE743E13D, 0x18D44650, 0xBA2EDF31,
+0xB5C3EC42, 0x432EBE2E, 0xADDE4F2A, 0x401BCAD6, 0x03D6EAEB, 0xD0DEB133, 0x153FC4D4, 0x30DA2FD3,
+0xEE282FD9, 0x242ADC39, 0xD72B42D9, 0x44C7E0D5, 0xC72CB3C7, 0x27C9472B, 0x0036DFB6, 0xD9C53112,
+0xC0360E0E, 0x4EEA0E27, 0xEDCF27C1, 0x1739D5DC, 0x22DE2CE1, 0xDCCAF633, 0x31D02417, 0xD7DCD0DA,
+0xDE1DE0DC, 0xD827DD22, 0x1CDBD7EC, 0xCE171BCB, 0xEE2347CF, 0xC22C312D, 0xDBCC2A38, 0xBE0ADED4,
+0xDC20CC2D, 0xD5380DB2, 0x2A32C701, 0x3EC7D41E, 0xCDD7D74C, 0xE83E2A44, 0xC12B26E6, 0xE70046D7,
+0xD626D1EC, 0x35DDE41D, 0x303F27BD, 0xC94D4517, 0x31D1E229, 0xAD3529BF, 0x3AC5D21C, 0xCF1DB731,
+0x2738DBEB, 0x1F382B33, 0x1ED52213, 0xFCE22CD2, 0x1330331E, 0x4A30BA0F, 0x1F38B42A, 0x2E34CB1F,
+0xE3E125D5, 0x3AC72D17, 0xBD3C342C, 0x37272810, 0x1D34E4C7, 0xE5CC2EF8, 0x1E2FF1DF, 0xB9294236,
+0x26F10017, 0xBDE3FBD1, 0xB7DB52D0, 0x2E3CB8E9, 0xB3343D3C, 0x3E3BDFDD, 0x1F46393A, 0x25FA20C1,
+0x2D194CD1, 0xE9471D26, 0x34C3242E, 0xAE012034, 0xC8F3FED7, 0xB825362B, 0x322DC136, 0x42F41511,
+0x181855C2, 0x4937D5EB, 0xE13CDC23, 0x08242A28, 0x2DCCDFC0, 0x13CEC64F, 0xB3E6F7ED, 0xD93401EE,
+0x3D02DEE0, 0xCFC52300, 0xD2F1BE38, 0x2D092924, 0x253E2AF0, 0x37DACAD0, 0xDB1ECFE7, 0x40CED84D,
+0x3706C42A, 0x4231D91A, 0xDD30471D, 0xBD272DC8, 0xCB1941D9, 0xC22FC7C5, 0x333F3C0C, 0x3F3D2518,
+0xDDCCD143, 0xB3D834D5, 0xD231BEE4, 0x2BDB3B3D, 0x2F2754E7, 0xEAF3CE64, 0x3632E4C0, 0xD83324C2,
+0x11A4D5CC, 0x00D627D3, 0xD51F2731, 0x17C92BE2, 0x1AD62E1E, 0x16DB2418, 0x16B83823, 0x2711D54B,
+0xDFB02BDF, 0x28CDA600, 0x2AE9ED3E, 0xD314CD20, 0xD04F272D, 0xD0F9CD37, 0xCC1735E5, 0xD328EC37,
+0xA7D31A26, 0x214BD9A5, 0xCFCB4226, 0x233C4AD5, 0x26EF37D8, 0xD83E3230, 0x3EC8C93C, 0x07B9282B,
+0xD1E6DAF2, 0x31C52EDD, 0x3A0038A5, 0x56C1E0DF, 0x22E437C0, 0xDBC8143B, 0xCF1DB03F, 0x1D4BE636,
+0xB8D3C23A, 0xBCC3372E, 0xAF284228, 0xDCE02F1E, 0xB9D4E3D4, 0x56D3D0D8, 0x494816C1, 0xFB1240C3,
+0x23B1BDD2, 0x1236322E, 0xC2CBCB47, 0x102FE7C8, 0xBB0AE1D9, 0x3831BC29, 0x39AECBD9, 0xC7CF1ED2,
+0x0AE43913, 0xDDD62F2C, 0x4FD522E6, 0x3CB90A55, 0x2101C8EC, 0x432604BD, 0x29E3E31E, 0xC25B3C29,
+0x20E927D4, 0xD4300C41, 0xD9AECBD5, 0x48D8E143, 0x2CEE272F, 0xD2C11FBA, 0x2F2DB8D7, 0x1CF4DFE6,
+0xCCDCCEBF, 0xE052D0EA, 0xDADDEB1E, 0xC72ACB3A, 0x332E2B1B, 0x2D26E827, 0x2A29E6A6, 0xE614D6D5,
+0x221D1CC5, 0x51102031, 0x1735C248, 0x31D4DCD5, 0x3C20DC00, 0x2D231732, 0xE54AD9CD, 0x1D24D826,
+0xE936373A, 0x20C0D3C9, 0x2B231520, 0xD6CED133, 0xDFC8C7B5, 0xDC2FE8B8, 0x3332249B, 0xC6E6E11F,
+0xB7DA17AF, 0x1FD91321, 0xEE29CD41, 0xE1262FEF, 0xB6181DC6, 0x24F618C6, 0x3BE5DA2D, 0xFC2C35C8,
+0x51C232CC, 0x36183EDF, 0x2B2CD6EF, 0x2F2DBDB9, 0x00D03413, 0xCDC9CBC4, 0xDB3309CC, 0x264EB22C,
+0xCED1EAC9, 0x38D53F27, 0x2EE7D6F3, 0xC7232C23, 0xCCD42515, 0xC9C8C8E3, 0x231E3930, 0x5BBEDFCA,
+0x34D7E5D7, 0x25CCE9A7, 0xCD4BF1E8, 0x5E2542E6, 0x38DCE43E, 0xDA34F0CA, 0x2EBFF841, 0xDA422D3E,
+0x13CA2231, 0xD5C6DA27, 0xBAC0C6B6, 0xC82C49FF, 0x31312123, 0xBF00DFBF, 0xE8F23819, 0x2C34E81E,
+0xDC4C2B0C, 0x3A5650F2, 0x2CCB0FD0, 0xC42D4C28, 0x4F19DA4B, 0xF63DE630, 0x29F51C1F, 0xD6BE14C4,
+0xD2DDCDD6, 0xE0D02825, 0x2FE0E3D1, 0x41342FE8, 0x25D547ED, 0xC5A732B5, 0x19BD503F, 0x4CD9DA3D,
+0x24D013BF, 0x4FE93FCA, 0x12DBE8B9, 0x33DF151F, 0x1E24AECD, 0x2E2E3656, 0x133500AD, 0x3641403C,
+0xBDDF2BD4, 0xE33130E0, 0x3DC02726, 0x18390C3C, 0x26C04DC9, 0xDBF22D35, 0xC3E1EC45, 0xCED12623,
+0x1CE92E39, 0xCF2FD222, 0xD8DE31E6, 0x1EE916D1, 0x35DCB621, 0x19C2EB54, 0x41B34ED4, 0xD9373BDF,
+0x203ED6D8, 0x2CD4A8C8, 0x14E0C6C6, 0xDF242131, 0xCDD921CF, 0x33AACBCC, 0x2AD1A5C6, 0x3ED42B00,
+0xBF181FD0, 0xC0CC2329, 0x1BD1EDC9, 0x3A2D47CB, 0xDD27D8CB, 0xD8E8BAE6, 0xD5DE3FF5, 0x2C30D641,
+0xDCDCDF26, 0xC2C34433, 0x2837F53D, 0x29D8E12D, 0xBF3FEBD0, 0x34CACC45, 0x30E1DA21, 0x39DDDCB3,
+0x30193518, 0xC0132E19, 0x313D3736, 0xB2D4FF34, 0xF4D92945, 0xC70D3AEA, 0x3E3034F0, 0x00DA2F07,
+0x1DD415EB, 0xCC1AC4B2, 0x232EB21C, 0xD7431E16, 0x2FCE0835, 0xCF39242C, 0x1BCA2CD4, 0xD8B6D0FA,
+0x18C3262F, 0x2CE3BDCB, 0xD839DC38, 0x26161D24, 0x1223B3DC, 0xD6D63515, 0xCFBABFC6, 0xC1BF18C7,
+0x2637C715, 0x264020EA, 0xD024D3C4, 0x4126A11D, 0x2E34442D, 0x32BB1CC9, 0x36E7DD40, 0x3113C3C5,
+0xD100C8CE, 0xD21651E8, 0xC3CFCDD9, 0x403139F1, 0xD3D02119, 0x32171AC8, 0xD2F9B62E, 0xB5B335D4,
+0xC1462835, 0x1F1C2C35, 0xD8DDBF35, 0x422D2DDA, 0x38ED19D7, 0x3326BD2F, 0x4534DFCE, 0x46403132,
+0x19CDD51E, 0xCBC3CBC0, 0xE3CF2139, 0xC646ECD8, 0x29E7C7DD, 0xE94CCAF8, 0xB8C6EA37, 0xEC2B09DA,
+0xC8C82738, 0xC6CCDDFB, 0x3FCA3B4D, 0xC6482BD1, 0x2DE1D333, 0x3530BB3A, 0xBF4838EA, 0xE4411ACB,
+0x3514D939, 0xCD372B27, 0x4BEC1C39, 0xAB21C221, 0xD8D83F37, 0xCCE0512D, 0xDA4640C2, 0x36AE1CD6,
+0xBC2F303B, 0x20C3B8CD, 0xBFE6D4E0, 0xBECCC4EB, 0xC7CC1E4E, 0xCC2BD03C, 0x3738C518, 0x23B824CC,
+0x2BECD0BC, 0x2107B945, 0x2AC21B00, 0xBE49F2EB, 0x31D2E3DC, 0xDAD23F11, 0xCDED3137, 0x3ABADA3A,
+0xD9C0C12C, 0x36CEE8CF, 0xD0D5BDD3, 0x2AC9CFCC, 0xE61FD747, 0xC7B7B12A, 0x2BD520DF, 0xD5BC1D31,
+0xE6D8423B, 0xCC21EB43, 0x1F46292D, 0x171EE038, 0xDFA5E244, 0x1BD5C4AD, 0xC622C2B5, 0x4E33CFD3,
+0x3F263F30, 0x34E4CF15, 0x0041CC36, 0x4021DB29, 0xD62E2231, 0x2C22AE21, 0x1A201F44, 0xC6D7C100,
+0x3A335125, 0x42473A2A, 0xDF32DA1F, 0xE5D8B51F, 0x3049E832, 0xE6DED531, 0x315DD01E, 0xDDD92034,
+0x2EBF0AEB, 0x4B4639DD, 0xBED6C8DA, 0xDABA3714, 0xDD133BD7, 0xC934EACF, 0x0D35C3E2, 0xB4CCC213,
+0x37EE202B, 0x0AD21AD9, 0x28D2C9CD, 0xD800DB2D, 0xD34B274A, 0xBDD33644, 0x38EFF1CC, 0xBDABD734,
+0xB11E3E32, 0x2F1C23DC, 0xE80BD7DE, 0xDBE7D6F7, 0x240248BF, 0x23CEF1F6, 0x1F56E634, 0x33CD2230,
+0xB4DB182F, 0xD338BA36, 0x33392B24, 0xBB3B3649, 0x282F30E7, 0x45CC35B1, 0x46261B30, 0x1FD9A722,
+0x3BE3E1C3, 0x25334FF0, 0xB8BB28C0, 0xD751E2CC, 0x403600E1, 0x461FC3ED, 0xCB30C202, 0xD8D2B92A,
+0x2923D71B, 0xB9DDE6CC, 0xB935CB0A, 0xEF37D723, 0xD1DA06CD, 0x303CE928, 0xF8B9BA27, 0x37370A1F,
+0xD6C1DDD9, 0x18B6463F, 0x28C71FC3, 0x38D8B53A, 0xD22DE9A1, 0x18DDB236, 0x0A28D515, 0x363B263D,
+0x1F3A122D, 0x202AE323, 0xC72337E8, 0x32ED08CC, 0x2658C347, 0xC6F4E200, 0x45D1C2C3, 0xF92D343A,
+0x3EBE1E65, 0x10EB2FD3, 0xC64FD5E5, 0xC5D3F521, 0xE9CA1A37, 0xD736CDD6, 0xDCC7C233, 0xC9C8E2D4,
+0x2B99D60A, 0x1EE3DA0D, 0x30BDD8E1, 0x16D3BE1F, 0x1C4524D4, 0xC8D5D432, 0x19AEDA3D, 0xCDD4CAD5,
+0xD835332E, 0x2412DA1B, 0xD0CBEBD7, 0xC3CEF425, 0xD1D743E3, 0x000A163B, 0x23DC3129, 0x202332D1,
+0x322F2216, 0x28F5131A, 0xD316CF0D, 0xD0344C30, 0x423325D7, 0x37204237, 0xEEDDC721, 0x2038E805,
+0xD525C522, 0xBEDDE327, 0x3AC8BCC5, 0x45402E27, 0x34DFC1D9, 0x2DB0D049, 0x322F2ACF, 0xDB3AC3C5,
+0xD8D0EB4F, 0x2A1DCCCC, 0xD0D33DC2, 0x1349DC45, 0x39CBBC32, 0x30301EBF, 0x2C00DFC0, 0x5DD0CA11,
+0xD0220C3A, 0xC527CD1A, 0x31CD372C, 0x2E0ED7DC, 0xC6F92338, 0xF537E8D0, 0x3626DBBF, 0xC9C3C6C3,
+0x30DDBE17, 0x50394C39, 0x3A11343B, 0x27E8DC2E, 0x0DDB303F, 0x1818BCF0, 0xC5DC17DB, 0x24C53A26,
+0xECB0E7E2, 0xC21B1928, 0x15D7C920, 0x4126AACE, 0xE214D8C7, 0xD40BCD42, 0xB2CBD018, 0x554436DB,
+0x3C2D25D9, 0x21CEC0DC, 0xE9251919, 0xE41ACDC3, 0xDECF67C3, 0xEDDE3225, 0xCA30EEDD, 0xCFE01E37,
+0xD231BF41, 0x34DF1EBD, 0x1DD2CC2C, 0x21142E29, 0xEA322B37, 0xCEC311DE, 0xF0F738DD, 0xEBED1A40,
+0xDCE640E8, 0xD0D1BA2B, 0xE906D6DB, 0xC71938B1, 0x13C59F39, 0x1ED6DBB6, 0x231BD3A9, 0x2C2B30DC,
+0x0ED50000, 0x2DD4EED6, 0xD9D21BEE, 0xE1442E36, 0x38CEC4AE, 0xC626CCF4, 0xC2CD3F36, 0xD71F2BD9,
+0x2F421837, 0x31B11A23, 0xD4D5282D, 0xD0E6BED7, 0x30D737CB, 0xC8142A09, 0xD8D2CFDC, 0xC22D3C44,
+0xF62DF1C9, 0x3D38DBC9, 0xBFC2E5D3, 0xDCE42128, 0x3A304038, 0xDBE6D626, 0x28250FE0, 0x27B52BCF,
+0xE0D8DC3C, 0xC8DA09DA, 0x2DF02FD4, 0xE539D7D1, 0xCDE729C2, 0x4816E313, 0x4E392AC5, 0x112CF50F,
+0x3AD94924, 0x4B22273A, 0x43D2DE9E, 0xDA22DE30, 0x3FE324DB, 0xD9CC2811, 0x29D253D4, 0xE2BC353C,
+0x0CC92147, 0xB8F0C6F5, 0x3D35DEBF, 0xED4ACBEB, 0xAC34E532, 0xEA2AC526, 0xEAC0BFC8, 0x4431D9DD,
+0xC0CE4E21, 0x0000F2DD, 0xE0D8C7BA, 0xDDDC2E36, 0xDA42C22B, 0x2B312614, 0xDC391EF4, 0xCC2E31D8,
+0x3DF4D630, 0xC72C21D9, 0xDB2BD531, 0xD916DB3C, 0xBEC11DD2, 0xD1D5CEA4, 0xBA072ED7, 0x402F2829,
+0xE3C0F627, 0xC7CC3CBD, 0xF41D3345, 0xD706DB14, 0xB2DF36AC, 0xEBC4551F, 0x32B01929, 0xC5B6DDEC,
+0xD91DE130, 0xE1213222, 0x4126D927, 0xAF52300D, 0x291B301C, 0x241A1843, 0xE43C1FDC, 0xBBCC191C,
+0x2C25E011, 0xBD3EE9DB, 0xC9BF1345, 0x2F333AD2, 0x1CD822DB, 0x23BE43EA, 0x2527EADC, 0xB52E1C1E,
+0x391CCF17, 0xD2C82C0C, 0x242E3830, 0xD328D808, 0x35CEF4CD, 0xD81BE035, 0xD0DF38DA, 0xD1D3C42E,
+0x3A2C1EEA, 0xC6D03AD2, 0x1E362E4E, 0xE6080000, 0xD018C312, 0xEFAA323B, 0xD1294BD2, 0xBAD52C3B,
+0xE0373E10, 0x260CDDE2, 0xC902E040, 0x30472BD5, 0x534BE7D2, 0x20CCB523, 0x21CCD843, 0x2259E24E,
+0xDE35FDD1, 0xD4D91D3E, 0xCFCA11D5, 0x35D724BD, 0x3C3F0CB8, 0x3EC447CB, 0xE835DDC4, 0xC3C73E31,
+0xDD432B2F, 0x2FDCBDC2, 0x2D3CCE17, 0x25B8AB31, 0x2ACCFFE5, 0x40E03C2B, 0xE7DA1841, 0x17E53020,
+0xD44E0B2F, 0x46DA22EC, 0x33422425, 0x211840C5, 0x23C1CFD5, 0xC8350BE5, 0xD833CDD0, 0xE63DC528,
+0x292ADC17, 0xD8BCD3C1, 0xCD39F02E, 0xA71EC343, 0xD6CC3A3E, 0x2E12D2CF, 0xC40B1FB1, 0x37DCDC33,
+0x20E52EEA, 0x271629E5, 0xC4BEE0C2, 0x1B3CDBD6, 0x00004934, 0x18E145C7, 0x4BE93522, 0x3029C309,
+0x31D1D50F, 0x3DB42C1A, 0x21A643B2, 0x3E26D8C2, 0xCDDE28D1, 0x44E835E1, 0xEDC4DED1, 0xC4F9C9ED,
+0xC5E1BBCC, 0x4426C1D4, 0xD9BB372B, 0x37232E0D, 0xE5D0D0AE, 0xC9D23CC5, 0xF638E2C7, 0x2DB6E6D9,
+0xC0C9B72D, 0xDF28E6D0, 0x11E2DCC8, 0xD8E2F31C, 0xEA3937DE, 0x2741B953, 0x19D5CEE6, 0x33DDC22D,
+0x2BF72326, 0x2143363C, 0x13CA1A1F, 0xB9D7DAC9, 0x22C6B72E, 0xC5DA2631, 0xCB32ECCF, 0x032D15D4,
+0xBB1EE613, 0x38C42452, 0xDBCD2726, 0x1AF44039, 0x22CF2EC6, 0xCAEDE6B4, 0x2823D423, 0xDCC61C39,
+0xC12D19BF, 0xC612C31B, 0x31F1D32D, 0x3E1C09E4, 0xD824C5E7, 0xDFFC2839, 0x2AD40000, 0xDFCCBBBC,
+0x3FC9CC12, 0xEF2626E1, 0x0FCDEA2A, 0xEBD735CF, 0x4E1A2834, 0xCDEDE1D5, 0x31CCCD29, 0x2F4618DF,
+0x48C11936, 0x29DADE41, 0xD71C1AE0, 0xDDD2285A, 0xB13DCDC8, 0xF020B8DB, 0xD0B837C2, 0xDC3B48E8,
+0x18C3C530, 0x192C2940, 0x2ADA3AE4, 0xEA34C55A, 0xDAB2BED4, 0x1A51E838, 0xC832AEBE, 0x10DE2DE2,
+0x3B2408CC, 0x22CEBD1E, 0xC919D6BE, 0x21D2D128, 0xCFE03A2A, 0xB7DEDC3B, 0x33C630ED, 0x5A0EEAD1,
+0x3CB03FCE, 0x24EC15E4, 0xD0385BBF, 0x20CB4E4B, 0x30E0C535, 0x301BCD3C, 0x3642DDD3, 0xD90FDC50,
+0xCA35BBCC, 0x393B3B2F, 0xE2BE352D, 0x362BE3BF, 0x1922B73D, 0xDE3448BD, 0xF7C0C02D, 0x00002F43,
+0x2FCAD0E4, 0xDE412A34, 0xFBF339DA, 0x2BB94330, 0x39B521D5, 0x2DE31F43, 0x382C4F23, 0x35314C2D,
+0x2DE3DBDC, 0x3A18EAB9, 0xD0DF131C, 0x45EA41E0, 0x48BBC1BF, 0x2220D7E4, 0x29C8D0C5, 0x3C0527CA,
+0xC1ED061C, 0xCB48CA3E, 0xE846E633, 0x0EB2D7CF, 0x44C42335, 0x27F0D440, 0x332FD847, 0xD72F2CE3,
+0xD7ECD6C9, 0x4026D51F, 0xD334BEE6, 0xE01B40C5, 0xD62BDA2F, 0xDEEA36AF, 0xABD039C7, 0x2E123FCE,
+0xFD3924A8, 0xD0C6D529, 0xD52AEED4, 0x211D2A35, 0x1EE9C5F1, 0xCECAF921, 0x38C92740, 0x3E4E2A1A,
+0xB8FAC7C3, 0xF11F1C36, 0xD4DA163A, 0x2701DC38, 0x0E3019D5, 0x57DB23E1, 0x43CD26D0, 0xADCC4934,
+0xE0D3D706, 0x403A0000, 0xDECED5E1, 0xDAE6CF1D, 0xD5E33C40, 0x35B3D038, 0x24D73231, 0x2EC01D40,
+0xC2D23CDC, 0x44C20BC6, 0x44C1D7A8, 0xE9D1C745, 0x36E5C2C6, 0xB81B2D42, 0x413917CC, 0xD8E031BA,
+0xCE3CC1C4, 0x3EC91AC5, 0x0DE406CE, 0x35D1DA27, 0xC8E5CAE3, 0x313EC127, 0xB61631CA, 0xD73B3444,
+0xD834C850, 0x37D31DE2, 0xCCC9153D, 0xBF41E9CA, 0xD649F9DC, 0x3A31D501, 0x50CD1DE3, 0xC8E54012,
+0x4432D4D5, 0xCBB93023, 0xE8FCD6C2, 0xCEE0CAD1, 0xE41B2E39, 0x28CC0E40, 0xDFEAEFFD, 0xB3EA2723,
+0x3534B935, 0x42D12A09, 0xED3DCA0A, 0x263C352B, 0xAD224EDF, 0x2CDF2A10, 0xF0BE4CAD, 0xBD45D8D5,
+0x263F27D4, 0xE2372FB4, 0xC35240DF, 0x1EF3DCDA, 0x3AD0D224, 0x27DAD527, 0x303A2CBE, 0xCDDCC829,
+0xE22B35C9, 0xD5DC222F, 0xEEBDCB63, 0x2042DAD8, 0x06CC3524, 0xD11BBB3E, 0xAE2229E6, 0x1FC81E1F,
+0xEF4B12DA, 0xE1D846DD, 0xBA3B1CDA, 0xE154C801, 0xD6092CD2, 0xD0F132CB, 0x3A40D027, 0x123530BB,
+0x1EC5B4DC, 0xD11825CB, 0xC603DECC, 0x28BCC5ED, 0x2C03E1CD, 0xE9AA12A5, 0xDB2DD53A, 0xCA32D01E,
+0x2F4A09C5, 0xFE411E26, 0x1354CEC8, 0x3736C5F4, 0xC62F3C3B, 0x252DDFB9, 0xD0353723, 0xD8DDC21E,
+0xE3DF21ED, 0xDAE33AC0, 0x2ADA323E, 0xDC43E1D9, 0x3A23463E, 0xD71934F8, 0xDB1037C8, 0x38B7BCCC,
+0xDA2C1CC1, 0x3EEEE349, 0x2ACB2A26, 0xD4BCBCD8, 0x4AB50000, 0x9EC941D8, 0x3824C9D0, 0x203CCD36,
+0x0DCDEDD8, 0x5B333036, 0x3125C7DD, 0xD7383F30, 0xD6C72617, 0xE1C245DD, 0xBDD3E818, 0x3210422F,
+0xD00ECF2E, 0x44411A3A, 0x02E72E22, 0xF106D24C, 0xCFC847CF, 0x0421AFCF, 0xD2C525A9, 0x3CBCE63A,
+0xE02A1E36, 0xE036E3E4, 0xDDC22B25, 0x2EC7452E, 0xE1E3EFE3, 0xD82ED6CE, 0x36C1E115, 0xD129E026,
+0x251E1C44, 0xE8E5D7CA, 0xDDE7D40F, 0xC5D8D9D8, 0xA1C52550, 0x3ACDDA25, 0x3548C4E1, 0x2913D9C8,
+0x1E28243A, 0xCA3324E2, 0xD0D7D1C0, 0xBDE9D0C4, 0x35BF4FE0, 0xCA42E33C, 0xBBFA33DA, 0xD753C9CD,
+0xDA2CE11D, 0xD9BEE407, 0xE31DD4C1, 0xCBCCD51B, 0x3BB8D6C9, 0x0000DDDD, 0x28D3CDDC, 0xD62445B5,
+0xB3211F13, 0xD7B9BF29, 0xD429D5E5, 0xB9C23CCE, 0xC219EDC1, 0x211E0B34, 0xB8F0E1C1, 0x33C0EF4C,
+0xD8D6C7D0, 0x30E8D3D8, 0xB06514D1, 0x21B9D915, 0xE2323DCD, 0xDA41311F, 0xE24AB11F, 0xE9B92BE7,
+0x33EA23C3, 0x04C430C0, 0xE849EF0D, 0xD1232C2C, 0x260FD13E, 0x002DCBDD, 0xEB3129E2, 0x20B4C7C6,
+0x25D32335, 0x2CCB39D0, 0xEA13DAC6, 0xCDEE2E14, 0xCDAA2CD0, 0xE1D339E6, 0x36DCD03B, 0xF5453114,
+0x34D428ED, 0xEAEDF458, 0xD226CC2D, 0xEDD03AD8, 0xEE1CC83D, 0x10DBB5E5, 0xF018CBDB, 0x5E33E31E,
+0xC6D9232B, 0x1A373130, 0xDFE7DDCB, 0xB21FC5C6, 0xC1CDB1B7, 0x3F2DF128, 0x482BB426, 0x323E0000,
+0xF5D43741, 0xCC1C1FB9, 0x00004E2E, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x13000000, 0xC9112F26, 0xE3DF1FED, 0x284020CF, 0xC6ED13CA, 0x38B1E217, 0xD90FE8DB,
+0xD7C0C3B7, 0x341336CE, 0x45E2D239, 0x3E22F0D8, 0x461D2C29, 0x3825D628, 0xD733E5F1, 0xD7B6C532,
+0xC62B28C7, 0xD0BF3024, 0xF0BA3A26, 0x2D1D2836, 0x1BCF2DDE, 0xD02B251A, 0xB938CD2C, 0x34E220D9,
+0xC2BEC23B, 0xCC2908CD, 0x1D080039, 0x42BB25C5, 0x32EB3206, 0x36D3D1E0, 0xC32E10C3, 0x243B13E6,
+0xC02A1FF0, 0xC325D6DD, 0x23E33D2C, 0xDCD7DED5, 0x9C23C21D, 0x32E62056, 0x06314421, 0xE9E4ED24,
+0xC0C9C233, 0x44BE3FDD, 0xD25134E5, 0x292EE2E7, 0x4EE0D1DF, 0x29C82E3A, 0xF5392A22, 0x4BE2D133,
+0x58C2C4C8, 0xD2212920, 0x2935D4DB, 0xDECF4400, 0x31D7CCC9, 0x12DCFB2D, 0x2B2F0723, 0x2DE7B54E,
+0x2AD24ADA, 0x261E2931, 0xD538CEDF, 0xE12A1C2C, 0x33DDEDE9, 0x2FFC36C4, 0xE61823CE, 0xC225E1DF,
+0xC2102FEF, 0xCFD22617, 0x3EFBDA37, 0xDAD6BCD8, 0x110623E7, 0x3320E2D1, 0x1932B8D5, 0xCCEBBFC9,
+0x47234011, 0x0A2B45D8, 0x25C11244, 0x00EF3221, 0xC634CC38, 0x0F34D0E6, 0x1A3F40EA, 0x2DEDC82D,
+0xFFD2C8BD, 0x40E321CD, 0x24441BD3, 0xD932EDDD, 0x48362127, 0x1230EADE, 0x39E632F9, 0xD0D451DC,
+0x18DD2DC0, 0xC22AC72E, 0xD71E172E, 0xAFDED12B, 0x1DBAA948, 0xF927B647, 0xD7C31C23, 0x3328E73B,
+0xDBD72DD9, 0x3E23D424, 0xCB09E025, 0x16BA21CF, 0xDD00B9CB, 0x29E3CAD7, 0x3A19D011, 0xEA0EC4E0,
+0xD51CDDBF, 0x2AD1C640, 0x2CD6CBD3, 0x36D6D43C, 0xA72851C2, 0xE7EE3C3F, 0x432A4721, 0x46C726E5,
+0x253FD426, 0xD03B2C27, 0x1AEC343B, 0x5A2BC735, 0xCE31D2D2, 0xCFEDF30D, 0x492B4141, 0x20DC1BAD,
+0xDAF81CD9, 0xD6EAD21D, 0x2339CD25, 0xA31546BB, 0xD0C0F63F, 0x382BE719, 0xDA34BFDB, 0x1EDBBADF,
+0x5151E646, 0xC164D5E2, 0xD8292FCA, 0x33CF1F3B, 0x3FF531F4, 0xD3EB3453, 0xD433E236, 0x1B102B1C,
+0xC6C320E8, 0xD8393C2E, 0x17EAB7EB, 0xE0C42CE5, 0x313720CC, 0x3F39C42D, 0x33374AD5, 0xCE1ED41E,
+0xF7D2BE0D, 0xF114311E, 0xE22CD6EB, 0x4E23CA2A, 0xCDCF36D3, 0x463B1EC4, 0x312DE400, 0x1A1219CE,
+0x0CD1E2C3, 0x3850CBEA, 0x31D7CCCD, 0x3F2D3936, 0xC518E33E, 0xC31F31B7, 0xC0AED6C9, 0x2AC6E5D9,
+0xC334B8BF, 0x35EFD72E, 0xD72B3AD8, 0x31073AD7, 0x25E22D38, 0xF0CD4A64, 0x28DFDB2D, 0x22D9E714,
+0xB8E43935, 0x35FCBFEC, 0x1DC1391E, 0xD6E731A5, 0xBBC8B837, 0xBD3FBE3A, 0x00BADC53, 0x28C43F33,
+0x1CE0BAC2, 0xE0E125D9, 0x3BD6CB3D, 0x313230BE, 0xC4243D34, 0x1F24D2B1, 0x2058384C, 0x2AE9192D,
+0x302CCCEE, 0xD435C4C7, 0xB0D53217, 0xE52D32D9, 0xE0AF2127, 0x3E47D0CA, 0x4112BFCA, 0x27D0D0BD,
+0xE81E5CF9, 0xD50CBCE5, 0x2DC9B4F7, 0x3FBA33BC, 0xCBCC2821, 0xDFD51326, 0xD20ADA3A, 0x2000C3BB,
+0xC4DC4BE5, 0xCD39D7A7, 0xC40FF54F, 0xCC3BD1CC, 0x2C19EACC, 0xCD2E2129, 0x44D9F0DB, 0x2B40E11C,
+0x31343F4C, 0x0E142817, 0xD0104135, 0xE304CAC7, 0x259A1FD4, 0xC3EAED3C, 0x1A311AD7, 0xD8242BEE,
+0x20BF4EFD, 0x3FCA16C5, 0xDDEED03A, 0x0A22C52E, 0xD53B2D3A, 0xE22A2CB5, 0x291D3228, 0x2CCC2E45,
+0x48BA00D5, 0x4FE12D1D, 0xD8D91DD1, 0xCEDBEC31, 0xFBD526B9, 0x3C1DE845, 0xAFDCE91C, 0xD622D31E,
+0xCF2620D9, 0x28D63427, 0x362C3DDF, 0xD72C2530, 0xD6A7C203, 0x442C3233, 0x29B24E3F, 0xDD21412C,
+0x400BD6D0, 0xC8D4B831, 0x171EF12A, 0x34FFC9D0, 0x3B3ED02D, 0x0BC7E2CB, 0xC43D28C6, 0x42F6C948,
+0xD24AD429, 0xCB2C2A00, 0x43E2A236, 0xBC353312, 0xD91324B5, 0x30B4D013, 0xC92FDE2F, 0xBFD6D432,
+0xC82DCABC, 0xC032DC1D, 0x30C9D2CB, 0xE0341DC5, 0xDDCEBE39, 0x2CC41D58, 0xE213BF14, 0xDFCDCBD9,
+0x2E3C36DD, 0xE5262833, 0xF841291D, 0x15E324F5, 0x0315D732, 0xD1C5C5CA, 0xD73144D3, 0x2ABEBA1A,
+0xBCF42E2C, 0x00E1E53B, 0x4423AD2C, 0xB021CF20, 0xD8D5EEB6, 0xBD35BCCD, 0xE6D85334, 0xD549DF2D,
+0x35C74D19, 0x3F4BBE3D, 0x1F3FA3EB, 0xCEB2B335, 0x35D0BCBA, 0xD5D8CF27, 0xB11DCFC4, 0x24D3D408,
+0x183B35D0, 0x1C14D619, 0x35D445BB, 0xDE452444, 0xC83315C5, 0x3F2122D0, 0xB623C737, 0x33321DD0,
+0xD6DF33DE, 0xDE39AA20, 0xCF00F239, 0x412410D5, 0xC5DC2339, 0x1D3BD1B6, 0xC0E3481E, 0x1030451A,
+0x30EA30D1, 0xC6DCB9C7, 0x2C36DC17, 0xCBB0ED30, 0x26C20FBE, 0x35D6D528, 0xD6CC2242, 0xEDE4241E,
+0xC036DED9, 0x26303627, 0xB6B6371B, 0x26E3D6DF, 0x0D36C729, 0xBEEE4FD7, 0xE3C839D1, 0xC0313031,
+0xAEAD2828, 0x36323D23, 0x13342728, 0x2904EE20, 0x261DDCD6, 0x0CBCE1E1, 0xD2CBD5C3, 0xFD1C4B2C,
+0x1CD8CDDB, 0xD4BE53B1, 0x2BD6CDDE, 0xC61A01E1, 0x3D41BED3, 0xE0333BD0, 0x4040E821, 0xC4A32449,
+0xD3D3A8BF, 0x36E11C2C, 0x3CC1321B, 0xC5D03E1C, 0xB813D9DD, 0xD6C51838, 0xCEBED141, 0x1FEA3813,
+0x3611CAC0, 0xD12EE2C6, 0xE1BFD22D, 0xD0DAE9CC, 0x0E03DB00, 0xBC37393F, 0x422BC916, 0x49C4D72A,
+0xBFC0243B, 0xC1C2F1C3, 0xDF36CC32, 0x49E53EE4, 0xBFC02CDF, 0x2BEE2BE1, 0xC23224C5, 0xD63F16DA,
+0x15DBC21F, 0x49E4CC57, 0xBE272CC7, 0x274E0643, 0x5C1DDBE7, 0x34B73DF0, 0xCD2D2EB9, 0x1AD43BCD,
+0xFD3047D3, 0xDAC53936, 0x1328DCC8, 0xBC32331D, 0x001F2634, 0xC845410F, 0xDBDB1C2B, 0xD1D819D0,
+0xEBDDE12D, 0x37E335D7, 0xCDA84635, 0x0F362FBE, 0x30C229C4, 0xBC1DC43E, 0x3CD02F3D, 0x2EF5F5F4,
+0x462D15E6, 0x362F1FC5, 0xC0C9CD49, 0x14C92639, 0x2BD135D0, 0x49C6B8DA, 0xDBC7E1D9, 0xC6382328,
+0xEDC217BC, 0xE4D1331C, 0xDCC741CF, 0x283A341C, 0x38B93420, 0x46001BE7, 0x3E1030C4, 0x42A8C1CD,
+0xBB3339C5, 0xE538DD20, 0xDF3F2602, 0xE52BE5C1, 0xBE2B41BA, 0x3BD4D337, 0x2D39C3DB, 0xC3BB36F0,
+0xD12CCF17, 0x4130B93A, 0xE31DEAEC, 0xF9DEF52D, 0xADD35231, 0xD62ED0EF, 0xBE303521, 0x1E2ADB41,
+0x310AC62F, 0x22DA2DC5, 0x26E1D9D0, 0x1F4D2AC1, 0xC8E6DCE1, 0xC6B3D62E, 0xD6D20019, 0x4EDC2D39,
+0x342AE224, 0x07CBD4CB, 0xE43FB3BE, 0x1BE53748, 0xDD44C8A2, 0xBC34B7BA, 0xA1DD1D43, 0x23D31827,
+0x3519E93F, 0xC83B3720, 0x1BF0B72A, 0xEAC7DCC0, 0xCA4B28EC, 0xD54D242F, 0xF35435D1, 0x3DDEC824,
+0xC31128CE, 0x0B0FD136, 0x442D1E2B, 0xCBD1C3EC, 0xD635341B, 0xDD1F2931, 0x3442CDD0, 0xDADD3600,
+0xC31E40E3, 0xB0BD1B22, 0x38CA1F0D, 0xD42D3043, 0xE65127E4, 0x2F15C9D6, 0x3E360B30, 0xB2CB1524,
+0xC7BBC4DF, 0x38D244C7, 0x483B31C0, 0xDB43C8D1, 0x43DCDBBC, 0xDCC7571D, 0x24BAC2C6, 0x3B4ED6DC,
+0x3AE03245, 0x0C40E1E2, 0x493724C0, 0xD4ECC537, 0x36CC30BA, 0x1C303FC6, 0xD12F27D5, 0x00C4272F,
+0xFFFD061D, 0xC2D9251D, 0xC1D7D625, 0xBFCD3AD3, 0xB42E11D4, 0xC7230E1E, 0x24CD2C21, 0x0BD436F9,
+0xCECCD331, 0xE6BDE0D5, 0xD2C6C941, 0xE1D1D3EF, 0xEB4123CD, 0xDA1755E5, 0xCEE4C529, 0xDC43C5D1,
+0xD8FDDFEA, 0x1AC9C317, 0x22E13DCE, 0x01D7BA29, 0x1551E449, 0x43CD363E, 0xC6281B24, 0xCBD23725,
+0x3200D61C, 0x35D6CCBC, 0x262CB22C, 0xE2E43DB1, 0x26493335, 0x3AD72ACA, 0xC4244430, 0x3AD7C9C3,
+0xC91ED8D1, 0x40EAD2AC, 0x38C9CDAC, 0x413BC137, 0xCD304B1F, 0xEFCA2323, 0xAC3FE4CD, 0x3616F6E0,
+0x41C2272A, 0xDCD7B2E2, 0x27D9E018, 0xDEE2D943, 0xD5C01F22, 0xB0D93540, 0xB411C13D, 0xDE39CBE0,
+0xE2C6F126, 0x31450828, 0xD2BC2927, 0xBACA1DD1, 0x34DDCDC3, 0xD8302AD5, 0xE2D02824, 0x0B2BE03E,
+0x3BCAFD2C, 0x2E461D45, 0xBBF6F32E, 0x31E5DED2, 0x2ED4E847, 0x3BDDCD29, 0x231331D8, 0xF521CF2E,
+0xD024C718, 0x20D91011, 0xDB1E0F34, 0xD931C0BB, 0xBB310837, 0x29B7C5CB, 0x3B31DEE0, 0x0836B926,
+0x28CB1FD4, 0x31072A30, 0xEE41F300, 0x35CB501E, 0x603A2B47, 0x2C2739CE, 0xC0CFC4D9, 0xE0CED43C,
+0x22BDC9CA, 0xBAEC122D, 0x4D16F2C5, 0xB6E5C3DA, 0x19D5C9D0, 0x1D1DDFC2, 0x223A23FB, 0xCD382B09,
+0x3025C2E6, 0x3918BDD5, 0x33DFEF0E, 0x291A302B, 0x14184930, 0xD414D233, 0x473CCED7, 0xD2D318B7,
+0x3E2738D2, 0x2928D537, 0x002E14BB, 0xE2E03734, 0x1B1EDFD1, 0xCF483FD8, 0x3D043844, 0xD52E1C4C,
+0x2823EAD6, 0x1F2D241A, 0xBC4B22E6, 0x2EE7DFB8, 0x3424E2F0, 0xD41BD7CB, 0x2BBCDF1E, 0x155B5033,
+0xB527D543, 0xE3C5E9F4, 0xD9B5C92B, 0xD3CAE0DF, 0x31C4CED7, 0x0BD33B25, 0xCF3B111D, 0xE62F241F,
+0x22D93D22, 0xCF27EF43, 0xDAD2C5DD, 0xC60019BE, 0x2935D5C3, 0x2D0BCD22, 0x224714EA, 0xE7CAB817,
+0xD1282222, 0x3C25EBD0, 0x45F0A8DC, 0x3ACFB92D, 0x34B4E7E6, 0xE93523E0, 0x2C292FCA, 0x44D4C7AC,
+0xEA22C3FE, 0xEF37E524, 0x313ADCDA, 0x2212D846, 0x1BE0FF4B, 0x1CBC2E34, 0x174AF041, 0xD245223E,
+0xE1DB2BDC, 0xF2E9A515, 0xE3C33BCC, 0x43DBD5CE, 0xF11700DB, 0xDE44DD36, 0x26E1D604, 0xC2ACE419,
+0xB9D8C7D4, 0x37EFE21E, 0xE2BAC2D1, 0x3926E91A, 0xD013E8BC, 0x2C43ECD9, 0x232B30F8, 0xD6C9E0BD,
+0x26A718BC, 0xEFF6C840, 0xC13332C2, 0xC4B9BD36, 0x1B4921BD, 0xD0EBECE0, 0xBCE4D647, 0x35D4DCD5,
+0x1BBE4C27, 0xECC1B7CB, 0xD820284D, 0x0BC33DBF, 0x34D03140, 0x38EC3900, 0x29EBDBC9, 0xD43A3041,
+0x3AE0DD0C, 0x09CFCFDB, 0x44E0C7CE, 0x20DBC4E3, 0x26DC41DE, 0xBA3AD2E5, 0x2ABA34E1, 0xD70FBEB7,
+0x40E03FE2, 0xF8D62041, 0xBD47B7EA, 0xD2E03E34, 0x1FC6D2DE, 0xCF1CD155, 0x32D52630, 0x3C463D2C,
+0x253130E3, 0x1ED8BB4A, 0x411BDFDD, 0x3FD8EE35, 0x33D02BBB, 0x002ECAAE, 0xDF1FDFEB, 0x2CD8DA3C,
+0xB6352D59, 0xD5BDD3B9, 0x1946630D, 0xD9181731, 0xD419D31F, 0xCE2FB7D0, 0x32EFE3C3, 0xCDDB2FD3,
+0xD247BB3E, 0xBA41DECA, 0x23244AD2, 0xDEAAF2B2, 0xB8D525F2, 0xD327A4D0, 0x3718C0E2, 0x2D3E20D9,
+0x24CFDF12, 0x26D0BE1C, 0xC8D9282E, 0xEB54CADF, 0x44DB1AED, 0xC927BF29, 0x2E002B39, 0xBB2625E4,
+0xBB4421D2, 0x18CBAA3A, 0x213ECD37, 0x0F33163C, 0x3622E432, 0xE5C7C62B, 0xDE39E7C7, 0xF3151CCA,
+0x2ACED6D2, 0x2ADADFC4, 0xD6241ACC, 0xB92F0622, 0x1BC639E4, 0x33D92BC4, 0xAF14E0DD, 0xE6EEC2C9,
+0x16BDCAE3, 0x272E1AC3, 0x292E1EBE, 0xDCE439B1, 0x4225E1C6, 0xCD372420, 0x27D716B1, 0xD83EE339,
+0xDBCAD4B3, 0x26385033, 0xC5373528, 0x2DD2C936, 0xF9D5BAE6, 0x2E2DC7C2, 0xCD4BD045, 0x1DDF2DE8,
+0x0E3740CE, 0xE5D82C36, 0x29D1360A, 0xCD03C8C6, 0x37CCCCD8, 0xD83AD330, 0xBFD619CD, 0xDEDDC432,
+0xD0DADAEC, 0xD3E14F26, 0xA8CD35D9, 0xA3F325BD, 0xBC1626D7, 0x3AB81BD5, 0xBBFF1832, 0xC9DB2126,
+0x16250000, 0xE003BB39, 0x35D533CE, 0x1801C610, 0xD9D44644, 0x474BD3C3, 0x1C30C51E, 0x2E3342C6,
+0xB7D6B3E9, 0x5AE44BD2, 0x46D62830, 0xDBDD4B0C, 0x38CF2F1D, 0x2D331ACB, 0x36CCDED6, 0x10D7DA10,
+0xD42246E0, 0x0FC73FC0, 0x2C3E1E37, 0x2CF0D1B8, 0x17412EDD, 0x2630D0DD, 0xDBD535E4, 0xD927DA43,
+0xB636C11E, 0x25CBE2D8, 0x4711F64B, 0xBC2C2029, 0xD0E3F538, 0xB92C27DF, 0x272DE2D2, 0x0CC5BCEB,
+0xC0BC2EDF, 0x26D131C9, 0x1F092FEE, 0x2D342B34, 0x31B825D5, 0xC6BBC9C1, 0x38F1D0E4, 0xCBF7FDC7,
+0xC3CAC9B6, 0xDBC436C6, 0x19DC1FD8, 0x28CCAED1, 0x1921BDB4, 0x24BCBF33, 0x301EE244, 0x36BDBDE1,
+0x31C5B724, 0x00003ECD, 0x220B1EE7, 0xDEBAD3DC, 0x21CAB942, 0xC9193B33, 0xE3CD3A0A, 0x0DC22CC3,
+0x3EC0C7CB, 0x43F2BFE4, 0x0ECFE9C6, 0x3ECA2FD5, 0xCB492DD9, 0x23E7BDC9, 0xD33DC151, 0xFCBE2933,
+0x5ACEC51E, 0xF2172E1C, 0x3C24DE23, 0x3320D4D5, 0x2AB53C20, 0xC328323A, 0x45EA4133, 0x24391A32,
+0xC7D8DA2B, 0xF4EAAF39, 0xDFCA36E3, 0xDEBED5C8, 0x36252C36, 0x04D7DBC7, 0x30CAC23D, 0xB60BF73D,
+0xEACA2A30, 0x34C12ABE, 0xE2D747D8, 0x19D21FE3, 0x1DFD24EC, 0xB6B5D9C4, 0xBF2C1DD2, 0xD2B442E0,
+0x37CCE1BD, 0x2AD1E1CC, 0x18DAC6BD, 0x213AD93E, 0x2031D8CB, 0xD82F2D12, 0xD112D2F2, 0x19CD43F1,
+0x36BCB9CA, 0xD7C5DDFC, 0x34011D28, 0x3BDB0000, 0xCD00E1AA, 0x5228252C, 0x3512CA34, 0x0EE53A41,
+0x242CD2C2, 0xF425DD2A, 0xF02FDDF8, 0xD82026D3, 0x4ABEE7B8, 0x2DC9DDDC, 0x3BEECD3C, 0x2ED1442A,
+0xC713E92A, 0x310CC12B, 0xE128D520, 0x48E5C1CF, 0xC91CE3ED, 0xCCCBC72F, 0x0B23E3E9, 0xC1D2C8B6,
+0x511ED534, 0x2AC7D4C3, 0xCFDCE335, 0xD4C9F643, 0xD433EBF1, 0x1BF8DA31, 0x223FC515, 0xCC2F13C2,
+0xF825FFC8, 0x35E51633, 0x36293C4A, 0xC72ACA36, 0x4544E736, 0x1BD9315C, 0xCF16DFD4, 0xD0AFEFC3,
+0xCECBBDE2, 0x05D6AA32, 0xD851F045, 0x240FD948, 0x3D35B2E6, 0x17D7C7EE, 0x0B22DACE, 0xF53AD637,
+0xB2372DDB, 0xF6DF14CB, 0xBED95B24, 0xD4D5190B, 0x0000CB29, 0x3DC1ECF5, 0x30C6B34F, 0x1D2629B6,
+0xD429D1CC, 0xD50716E3, 0xD0C3C2CC, 0xC82CC7D4, 0x5F27C718, 0xE6201BE4, 0xDACFD42F, 0x2E2EE9D4,
+0x3AD036C0, 0x3828DB20, 0x27D63439, 0xD21026DE, 0xD60710BC, 0x13AEB9E9, 0x28C41944, 0x49D5E3D7,
+0x27163429, 0x1BBAD528, 0xB12A14BC, 0xC72FD9E2, 0x1F43213A, 0x133B3EE5, 0x3622FE21, 0xD438DCBE,
+0x41CB25E8, 0xD0D83E2C, 0x32BFC2CE, 0x362BE1DA, 0xC3CBCE24, 0x22DECDC2, 0x30CBD5CD, 0xD42DBC26,
+0xDDF6AAD0, 0xCADC0EC8, 0xDC333434, 0x333114F5, 0xCE24361A, 0xB1C5D123, 0x4B25380B, 0x2FEC3634,
+0x3EE2A610, 0xB4CEE72F, 0x29BCC721, 0xB23850CD, 0xA51AE4DD, 0xC81BBC2F, 0xD4DF0000, 0x2DE5D1E8,
+0x2E0A28D0, 0x29E3C1E1, 0x56F62DD6, 0x3828DD4B, 0xC41ACFDF, 0xD2390A3A, 0xDF3028CC, 0xE8C5E22B,
+0xD11CDCC1, 0x44D1CBEC, 0x33C73DDC, 0x1D25B7DC, 0xE7DD0525, 0x2743C41A, 0x2AE2D3DC, 0x21D6E82A,
+0x133CBAC7, 0x593F34B0, 0xDA5FE339, 0x5206252C, 0x394FD5BD, 0x21D2C92C, 0x292426E0, 0x48E0362B,
+0xCC272DE4, 0xC815C4DE, 0x49BD5D37, 0x1912342F, 0x2CE9E01B, 0x30D8D2D9, 0xDEBF46E8, 0x392F473A,
+0x3A18D324, 0x2FE31FD0, 0x2B2B2710, 0xB53B2AC7, 0xB3481CFF, 0x2DD7CFBC, 0x2232351A, 0xD7C91C16,
+0x1CB4DA16, 0x1BCBDF3C, 0xD2213822, 0x2121DB1F, 0xDBD1C02A, 0x27DA33C7, 0x351FC4CD, 0x0000B9D8,
+0xC2142C29, 0x38B9DD31, 0x54E60CDF, 0x2A4A42DB, 0x35CF2BB5, 0x0CEA3323, 0xC2D8BEE2, 0xC61F3B33,
+0x10CE11D0, 0x3336CE2E, 0x303ADD25, 0x3347B7D9, 0x33222C1A, 0x1A43E4C1, 0x4326E019, 0x2AE2E92E,
+0xD2C5DC40, 0xBA45D7D6, 0xDF341BDC, 0x3A361925, 0xD4FEF4D2, 0x3018DA31, 0xDDC12817, 0x3038DE24,
+0xEDC0292B, 0x3E3E3336, 0xCB1BEF17, 0xE10B2EBF, 0x2B20BABF, 0xC80C421B, 0xC2411D21, 0xECD8C7D6,
+0xC824E4AD, 0x65B53DD7, 0x304325D4, 0xDD1DEAE6, 0xD33131CF, 0x2F36452B, 0x23B219E9, 0xC1C23529,
+0x21DC3836, 0xD618E933, 0x393E2C14, 0x362ACA3A, 0xD6E2C237, 0xD6085111, 0xD721FCB1, 0xCCE6DEBC,
+0xD1DCD920, 0xC3320000, 0xEA26CB19, 0x18BB26B7, 0x233BDF4F, 0xDE23FD3F, 0x2ADA1ECB, 0x3122E839,
+0xEF42D7D7, 0xCAC7202D, 0xCD4CDED1, 0x2633C824, 0xD0123E4E, 0x104429B6, 0xC8CD4B00, 0x5136B738,
+0xCFC8202B, 0x163135DD, 0x2C3821E8, 0x29314CE0, 0x0F3CE4EC, 0xE5432DAF, 0xC63AE340, 0x4639DB18,
+0xDF282511, 0x25B5CBD3, 0xCAF50F37, 0x1E443B33, 0xE32CD32D, 0x10DA47B9, 0xDA410526, 0x34F0E4EA,
+0xCEED3134, 0xC0B31AA7, 0xD12B1AD3, 0xC722C743, 0xBFBEDD1F, 0xD328D52B, 0xCC384024, 0x18B43FB8,
+0x281742BB, 0xD8322FD2, 0xB5D628C6, 0xE420C0CB, 0xC9EB391D, 0x41E6B419, 0xBCC543E1, 0x2C37DDEF,
+0x1F2D41E9, 0xD0F0D205, 0x1FEAE5C5, 0x15DF14B8, 0x47E327D9, 0xD328CE25, 0x36DB2E24, 0xD92E1EBF,
+0x2B3230F4, 0xD1EAD5D9, 0xC7212ADA, 0xCD1235DC, 0xC519D5DB, 0xBFC631DF, 0xD5222B35, 0x35B6DEC6,
+0x153C4428, 0xC5C3E42F, 0x1B2E2D2A, 0x3CD422A6, 0xC22842CC, 0xCADAC43D, 0x2C10DAD2, 0x18C708B5,
+0x36D636BD, 0xCD29C5D3, 0xBFED433A, 0xE0384939, 0x005629F2, 0x36E2C425, 0x2830DF1C, 0xC9CB351C,
+0xB0D7D3E6, 0xF3CEC214, 0xDC25D1C7, 0x1BD5CBCC, 0x19DD21C9, 0xC854EE30, 0x4FE14EE3, 0x38D5D3E8,
+0xC9D126E4, 0x2FDD2130, 0xCE2130D1, 0xCFCAB0C9, 0xC71F0E1A, 0x2627D4DE, 0xCBE9DACD, 0xE7121F22,
+0x2CC8E435, 0x3838391D, 0xDD37242E, 0xEBBE2F10, 0x233C0000, 0xAFC91CE7, 0xCE3536F6, 0x0F25D248,
+0x2D04EBE4, 0x3FF634E5, 0xE929CE26, 0x24E0BABE, 0x302CD52B, 0xD1E2073F, 0x4333D11C, 0xDA48C2D1,
+0x2D33F315, 0x29B2C8C3, 0x3B26E3EB, 0x3821C6DA, 0x32163DD6, 0x1DC737BC, 0x463CCD24, 0x19D63DDD,
+0x200BDDB7, 0xBDD41E22, 0xC40A444F, 0x34BF49ED, 0xD4C0C0F4, 0xA41FB721, 0xBEDF51E8, 0x15D82A17,
+0x383FDF21, 0x2B2918D0, 0xE6EB4718, 0x244530DB, 0x25DD3919, 0x3BA819BE, 0xDAEFBE38, 0xEB3FCCD9,
+0xD1DF2E1C, 0x2520C3E3, 0x32D0BFD5, 0x37312BD5, 0xDC29D0C9, 0xBED02432, 0x310C3A18, 0x411ED8D7,
+0xA4E5CE22, 0x163CBD30, 0xC3E2BE45, 0xCFBBAAC3, 0xC8C0E4CF, 0x0000CEFC, 0xE8CC2433, 0x39ED223F,
+0x243AD83B, 0xD44D39C6, 0xCABF24DE, 0x252050EB, 0x29C73B40, 0xD4F434CF, 0xCAE4D5DF, 0xC5F2EEF1,
+0x2919C8D2, 0x2FC4D20B, 0x1CBF5528, 0x3DBBD52D, 0xE03AD031, 0x3D2CCA17, 0xBE23BDB8, 0xE7DA3B30,
+0xD1D2D331, 0xCC1825EA, 0x2F2D39D2, 0xF9313526, 0xCFBCC2E1, 0xE5BE3AD4, 0xBF59BE1C, 0x22FFC8B5,
+0x32B14015, 0x31D6BEE6, 0x46DF3843, 0xDCD1D8B2, 0xE728C8BE, 0xEA1E2A28, 0xD1D8C7D2, 0x30CAF134,
+0xEDD0EADA, 0x13C1E3D0, 0xB9D7DC29, 0xEAB93624, 0x24E49F38, 0x2E3FDEDC, 0xE6D0D23D, 0x1FBB3ED2,
+0x23D540DD, 0xD727D92B, 0x251E33E6, 0x4BA7EECE, 0xC1CD203C, 0xC5CFED32, 0xB9CF35C3, 0x29710000,
+0x29D7C636, 0x3A3ED1D0, 0xCC1B271F, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+hard_output0 =
+0x42441F44, 0x006DFAE9, 0xDBB63AC2, 0xA3365CB3, 0xFA44372B, 0xC2603862, 0xA6C86E09, 0x416F0BF7,
+0x8E58011D, 0x47C54B99, 0x269AD14C, 0x0B286902, 0x093E4C64, 0x8D84D8E1, 0xB8AE0993, 0x49567118,
+0x8CEAABD2, 0x12D33409, 0xE3358237, 0x130176F6, 0x0986718F, 0xA18C83B5, 0x04EEF058, 0x59704040,
+0x7478FE81, 0x01D27193, 0xEA1AD7F3, 0x21E6C2B9, 0x6478A1FF, 0xED9959AD, 0xE39E57BE, 0x4D859105,
+0x056EF72D, 0xBD170BB7, 0xF01F9ADD, 0x99BF0C05, 0x44BAD09F, 0xF6ABDD61, 0xC0F8F116, 0x2972B53C,
+0x0FEC944B, 0xD1675432, 0x0025F563, 0xF42B2EA8, 0x808E5C37, 0xB6E79AD5, 0x3706284D, 0xB9C0AFA5,
+0xFF0E7E28, 0x5FA45C6F, 0x9CD5244E, 0xA013DDEA, 0x0D27D1A2, 0x2AE414AD, 0x41614379, 0xE6B68872,
+0x5585D926, 0x5098D45B, 0xF8980ABD, 0x65821418, 0xEF8968A3, 0x301DEC3A, 0x57EF2A7F, 0xC17BE446,
+0x94B65D62, 0xC9E6F350, 0x2E6130BC, 0x235F2E5E, 0xF1C13241, 0x94B291D6, 0x8C342458, 0x175B1FCA,
+0xE07310FD, 0xD24934DD, 0x3CE8D053, 0x5C8F243D, 0x945B0AB3, 0x50EB8CC1, 0x8EF499D4, 0xA67801BF,
+0x1680F061, 0x283FE705, 0xB8D7E773, 0x13AD3D2F, 0x4A6C305B, 0x1C1E5B12, 0x6F57D880, 0x8A666E5E,
+0x26511296, 0xFFC09750, 0xFF91760A, 0xAC26795C, 0x75F321FD, 0x4221B9CB, 0x2E119188, 0x0772A832,
+0x74D6036B, 0x505D6D64, 0x16A001B7, 0xF5F0D728, 0x48CBE119, 0x91C5D6B2, 0x8E1BBAB0, 0xB11BBE51,
+0x52C647DC, 0x65240CE2, 0xAB3F8AAE, 0x62E0CDEA, 0x6CBE7240, 0x7F572063, 0x7FC816E6, 0xB38E5A6C,
+0xDF9E2738, 0x752419CB, 0xC3A6BF9B, 0x8D48B21F, 0xA88C4AED, 0xE3C1D8BC, 0x8501353D, 0x20A99D7D,
+0x8D9C6DDD, 0x77A65039, 0x834E7E57, 0x1F623FA1, 0x6CF02B26, 0x1FF0BBA6, 0x9035AC70, 0xBED818E5,
+0x437CA713, 0x91011537, 0x3CD1302B, 0x7755459E, 0x5963FDA6, 0x49ECF937, 0xDEBBE0E3, 0xE873FD4F,
+0x735C48C9, 0x7C70F46F, 0xA00B1543, 0x789F4159, 0xE17C343E, 0x51B3D085, 0xD9042663, 0x771933DA,
+0xE183AF8C, 0xF62AC11B, 0xF776C25C, 0x22083E6C, 0x8400DDBA, 0x6D6AD2F2, 0x5DF87DBC, 0xBD7D40B4,
+0x356EBF21, 0xD8972E85, 0x3A632B51, 0x4D5BFBA2, 0x58CDF1D4, 0xD1117491, 0x5906DC45, 0xF793D63A,
+0x8F9D0552, 0xD261C804, 0x70BFDE9F, 0xDC77827B, 0xE9FFB130, 0x4FF18BBC, 0x17D50C32, 0xC315521E,
+0x4C7D2470, 0xAB5EE34C, 0x692AC0C8, 0x15B22832, 0xD34107FE, 0xA7E6DA1E, 0x2966A2A3, 0xD0B204B4,
+0x500E02F9, 0x02E5A455, 0x81EAF941, 0x6818F57A, 0x978EEC9C, 0xBFD575F3, 0x3FFC1BE8, 0x97104E07,
+0x4ADF219F, 0x182EC93D, 0x386033EE, 0x87C1351A, 0x31420C6A, 0xE20B5DC8, 0x83D90D23, 0x3C13B9E8,
+0xCCCD65BE, 0x7EDB17A9, 0x81C73C2A, 0xE202E332
+
+c =
+2
+
+cab =
+1
+
+ea =
+4918
+
+eb =
+4920
+
+c_neg =
+0
+
+k_neg =
+3072
+
+k_pos =
+3136
+
+rv_index =
+0
+
+iter_max =
+8
+
+iter_min =
+4
+
+expected_iter_count =
+8
+
+ext_scale =
+15
+
+num_maps =
+0
+
+code_block_mode =
+0
+
+op_flags =
+RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE, RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN,
+RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data
new file mode 100644
index 00000000..6221756a
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1190_rm.data
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11D2BCAC, 0x4D
+
+output0 =
+0xD2399179, 0x640EB999, 0x2CBAF577, 0xAF224AE2, 0x9D139927, 0xE6909B29, 0xA25B7F47, 0x2AA224CE,
+0x399179F2, 0x0EB999D2, 0xBAF57764, 0x224AE22C, 0x139927AF, 0x909B299D, 0x5B7F47E6, 0xA224CEA2,
+0x9179F22A, 0xB999D239, 0xF577640E, 0x4AE22CBA, 0x9927AF22, 0x9B299D13, 0x7F47E690, 0x24CEA25B,
+0x79F22AA2, 0x99D23991, 0x77640EB9, 0xE22CBAF5, 0x27AF224A, 0x299D1399, 0x47E6909B, 0xCEA25B7F,
+0xF22AA224, 0xD2399179, 0x640EB999, 0x2CBAF577, 0xAF224AE2, 0x24
+
+e =
+1190
+
+k =
+40
+
+ncb =
+192
+
+rv_index =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_RATE_MATCH
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data
new file mode 100644
index 00000000..c569abd7
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1194_rm.data
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11D2BCAC, 0x4D
+
+output0 =
+0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0xAB889238, 0xE744E6C9, 0x39E4664A,
+0xE8D6DF91, 0x8A2889B3, 0x4E649EBC, 0x436EA674, 0x6EFD1D99, 0x8892388B, 0x44E6C9AB, 0xE4664AE7,
+0xD6DF9139, 0x2889B3E8, 0x649EBC8A, 0x6EA6744E, 0xFD1D9943, 0x92388B6E, 0xE6C9AB88, 0x664AE744,
+0xDF9139E4, 0x89B3E8D6, 0x9EBC8A28, 0xA6744E64, 0x1D99436E, 0x388B6EFD, 0xC9AB8892, 0x4AE744E6,
+0x9139E466, 0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0xC01D
+
+e =
+1194
+
+k =
+40
+
+ncb =
+192
+
+rv_index =
+2
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_RATE_MATCH
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data
new file mode 100644
index 00000000..72be6f5d
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e1196_rm.data
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11D2BCAC, 0x4D
+
+output0 =
+0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0xAB889238, 0xE744E6C9, 0x39E4664A, 0xE8D6DF91,
+0x8A2889B3, 0x4E649EBC, 0x436EA674, 0x6EFD1D99, 0x8892388B, 0x44E6C9AB, 0xE4664AE7, 0xD6DF9139,
+0x2889B3E8, 0x649EBC8A, 0x6EA6744E, 0xFD1D9943, 0x92388B6E, 0xE6C9AB88, 0x664AE744, 0xDF9139E4,
+0x89B3E8D6, 0x9EBC8A28, 0xA6744E64, 0x1D99436E, 0x388B6EFD, 0xC9AB8892, 0x4AE744E6, 0x9139E466,
+0xB3E8D6DF, 0xBC8A2889, 0x744E649E, 0x99436EA6, 0x8B6EFD1D, 0x9038
+
+e =
+1196
+
+k =
+40
+
+ncb =
+192
+
+rv_index =
+3
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_RATE_MATCH
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/bbdev_vector_te_default.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data
index 883a76cf..883a76cf 100644
--- a/app/test-bbdev/test_vectors/bbdev_vector_te_default.data
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k40_r0_e272_rm.data
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data
new file mode 100644
index 00000000..b3867f35
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e120_rm_rvidx.data
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc,
+0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f
+
+output0 =
+0xd0d936d6, 0x870c26f3, 0x3f12805d, 0x4a6016
+
+e =
+120
+
+k =
+6144
+
+ncb =
+18528
+
+rv_index =
+1
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_RATE_MATCH, RTE_BBDEV_TURBO_RV_INDEX_BYPASS
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data
new file mode 100644
index 00000000..dcad5587
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18444.data
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc,
+0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f
+
+output0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc,
+0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xfaa42c5f,
+0x4fde636c, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a,
+0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d,
+0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123,
+0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2,
+0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371,
+0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301,
+0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026,
+0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961,
+0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02,
+0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765,
+0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f,
+0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800,
+0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c,
+0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b,
+0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae,
+0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17,
+0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410,
+0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19,
+0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611,
+0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547,
+0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e,
+0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11,
+0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c,
+0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc,
+0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385,
+0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f,
+0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace,
+0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a,
+0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586,
+0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed,
+0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0xb703685f,
+0x72499876, 0xb71dd772, 0x5d02b478, 0x4b6fed1d, 0x2df232c1, 0x733eaaff,
+0x1618c029, 0x0adda94b, 0xd271e560, 0xd120ba30, 0x6dfecc73, 0x4e751af7,
+0x0e7a6b63, 0xa1ad5dad, 0xb41201b1, 0x245f3e86, 0x3a5114ac, 0x5cd532c9,
+0x27ff9c3a, 0x7a847392, 0x6b2d0514, 0x82d0cc45, 0x0c779ba2, 0x8690f3ef,
+0x0c058474, 0x47a8ee9a, 0xac4c2475, 0x496b67a5, 0x033f43f5, 0x6e6ce3c9,
+0x644f5163, 0x8f4b92d9, 0x217798ee, 0x4b8e5368, 0xce751989, 0x7ab05365,
+0xe227cee3, 0xf35e2851, 0x6f304c87, 0x1389c81d, 0x6ebc3989, 0x2b32ac3e,
+0x59c5d111, 0xa8e8699c, 0xa5b15150, 0x8cacebe8, 0x40c2ede6, 0x71ea78c9,
+0xa9e40f49, 0x58309eab, 0xd2eb879c, 0x54b9086f, 0xce206d93, 0x47e7087a,
+0xac1e1dd9, 0x1d3a7bf1, 0x07d21fd4, 0x3a84a2a7, 0xe3ce33de, 0x55c94ddf,
+0xd827c1ea, 0x3b4dcf5a, 0x7d5fbeb6, 0xd71ccdae, 0x516a9035, 0x33b3bee0,
+0x61201364, 0xcf344f8d, 0x8c887934, 0x1998c127, 0xe24f1190, 0x75e8ea20,
+0x8a379eda, 0x8a894b14, 0xa3d7c264, 0xa62b0119, 0x87f4d316, 0xecdc5f2b,
+0xbc2424ec, 0x71169a71, 0x61aa2d5d, 0xb5f5f160, 0xc15a4969, 0xc5419315,
+0xbf84483e, 0x2b1687c1, 0xd1aa06df, 0x22d5befd, 0x8b09b15f, 0xa88ffc01,
+0xbb33617f, 0xb01a3e2e, 0x912a939c, 0xc649d802, 0xeba14b11, 0x3c902b57,
+0xbcf35a8d, 0x45a964a1, 0x0c9416ef, 0xec9ae2d1, 0xc5e56fee, 0x88bfa336,
+0x653cfe85, 0x92d21037, 0x96bc60e4, 0xf5317c1e, 0x5e7118f4, 0x91a9e04b,
+0x43f0cfec, 0xb763dc3c, 0xad6bac40, 0x21685f69, 0x5e4d6f70, 0xd8d5a4c4,
+0x15ed5efe, 0x3bbefa38, 0xbf7cf912, 0xb1427ffd, 0x652235ff, 0x927a7046,
+0x8806625f, 0x56967b59, 0x6ec3dba6, 0x0ee31e0f, 0x25111a51, 0x091c763e,
+0x1867732b, 0x28365594, 0xa589c975, 0x585b906f, 0xb85c4f20, 0xced98475,
+0x52f29d16, 0x1ca0de39, 0xede3c4c8, 0xda808e91, 0x449784ab, 0xe3caf590,
+0xabf23939, 0xe97f259a, 0x0a879f81, 0x5c6268ac, 0x0a4f1b62, 0xfe15825e,
+0xbea5e6f9, 0x0d2db98f, 0xeec8cd64, 0x1747ddf5, 0x8b4f161c, 0xd236c9f6,
+0x1a5b5a2f, 0x08918e6d, 0xdde43ad9, 0x8c316d0f, 0x7f1b3342, 0x3e385cee,
+0x55ff6f87, 0x89bbe534, 0x67c23afe, 0x250dc97a, 0x06b3b332, 0x61a03930,
+0x2ca17159, 0x3260c4f6, 0x3c810bde, 0x28d94372, 0xa9701f5a, 0x3880475b,
+0x352ff389, 0x2bf653e0, 0xa4a6cc35, 0x4d211cfc, 0xeda0b37d, 0x14c0e6d8,
+0xe457aaea, 0x1c6ed15f, 0x155d3b90, 0x7f43aed4, 0x289a98f9, 0x42246910,
+0x62d047f5, 0x75714462, 0xe2c2a4df, 0x9dfc0f13, 0x03361f4c, 0xc55b5ca7,
+0x9dd89720, 0x17376073, 0xfb6ee849, 0x280e697e, 0x57b24f3a, 0x2089b956,
+0xa8baa602, 0x5b7ab88c, 0xd69f8b56, 0x64f80d52, 0x223427b9, 0xfd393c95,
+0xb074
+
+e =
+18444
+
+k =
+6144
+
+ncb =
+18528
+
+rv_index =
+0
+
+code_block_mode =
+1
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data
new file mode 100644
index 00000000..345d1410
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e18448_crc24a.data
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc,
+0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0x5f
+
+output0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0x3d8353bc,
+0x0d005559, 0xd457eb38, 0xb1931313, 0xd4f12824, 0xcc444f1b, 0xa533d85f,
+0x4fde63ac, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a,
+0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d,
+0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123,
+0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2,
+0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371,
+0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301,
+0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026,
+0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961,
+0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02,
+0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765,
+0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f,
+0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800,
+0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c,
+0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b,
+0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae,
+0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17,
+0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410,
+0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19,
+0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611,
+0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547,
+0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e,
+0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11,
+0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c,
+0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc,
+0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385,
+0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f,
+0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace,
+0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a,
+0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586,
+0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed,
+0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0xb703685f,
+0x72499876, 0xb71dd772, 0x5d02b478, 0x4b6fed1d, 0x2df232c1, 0xb9dea0ff,
+0x1618c046, 0x0adda94b, 0xd271e560, 0xd120ba30, 0x6dfecc73, 0x4e751af7,
+0x0e7a6b63, 0xa1ad5dad, 0xb41201b1, 0x245f3e86, 0x3a5114ac, 0x5cd532c9,
+0x27ff9c3a, 0x7a847392, 0x6b2d0514, 0x82d0cc45, 0x30779ba2, 0x3f0c8156,
+0x9bce6106, 0x3511b2b4, 0x82dbef90, 0xac19def9, 0x5f11d43e, 0xa5899170,
+0xdd137ff4, 0x188077ab, 0x53cec4c0, 0x6519988d, 0x2b07a0d5, 0x269ec4ae,
+0x29c2bc5a, 0x4a0206c6, 0xf8fba9f5, 0x61309433, 0x402bf26c, 0xce401562,
+0xf7eb46da, 0x4d9ad0c0, 0xf99fc69b, 0x1b670e56, 0x327bb1c8, 0x5f7db32c,
+0x4c96b615, 0x041e0960, 0x190ef525, 0xede526f8, 0x59eb88e1, 0x355e5454,
+0x8289d63c, 0xf848c2ad, 0x5bfc881f, 0xf161d01e, 0x5a921d49, 0xc202a8ad,
+0xaa9e9dc4, 0x8211e14d, 0xea945bc4, 0xa5a59180, 0x7ffd5bd0, 0xd6c107bc,
+0x3d0e84af, 0x04d13d34, 0xa21f54a3, 0xfcea787b, 0xbe61865b, 0xbe0d9899,
+0x336bb04d, 0xf843ae66, 0x8d400981, 0x4359b845, 0xdbda44dd, 0x27392d92,
+0x05780a7b, 0xe6dd7f03, 0x13137173, 0x9b623a85, 0x2428f035, 0x996f04de,
+0x74613a87, 0x924aa956, 0x4661e3ad, 0x506ce2d3, 0xa59e7aba, 0x34fd455d,
+0x70d613c6, 0x094610b9, 0x06e176ee, 0xb4f0842c, 0xc53680f4, 0xd9e2920b,
+0xe0ddcd46, 0x8e4c1618, 0xb5c83878, 0x7b5107a3, 0xb75c33c0, 0xa62868d3,
+0x804e47d9, 0xcefc87fc, 0xca5e125d, 0x3ed40ea7, 0xe72d3663, 0x06620539,
+0x314993c2, 0x99f417d9, 0x4819151c, 0x7d46c8a2, 0x95a81dc9, 0x61898a53,
+0x8226bb8c, 0x4907a616, 0x91eb32f7, 0x5430c6a1, 0x390ca234, 0x599f02ff,
+0x315a4cc8, 0xc15d9e2b, 0x1c7a8788, 0x2074d5ea, 0xc063a30d, 0x5532e1f5,
+0xd3820192, 0x916a7b03, 0x32422c07, 0x2ae2cc41, 0x96cb84c5, 0x2bab3d29,
+0x0edc0add, 0x408e4981, 0x2606b671, 0x63dca006, 0x8f4261d9, 0xbfe4623b,
+0x60174b80, 0x50230b0d, 0x9d4c7af3, 0x2edb3482, 0x24d8d087, 0x1b673b02,
+0xe28b7132, 0xc6c8cb36, 0x5794e3f3, 0x808c3887, 0xf9f64a32, 0xfca10213,
+0xff29e373, 0x54bf19a6, 0x16014860, 0x356d4398, 0xe8d0d630, 0x4c8100c0,
+0x7b68a462, 0x6cc95c68, 0x3becad35, 0xb2c6b4c3, 0x740aef1c, 0x4f37f2d5,
+0xc9d3c805, 0x6e4e533d, 0xf7647967, 0x91856de5, 0x87e7e428, 0xddf2fe07,
+0x69016442, 0xe0132159, 0x1afae2a2, 0x63b6d719, 0x08d20a21, 0x48ee7113,
+0x2fb2d853, 0xa532ffc8, 0x8296dee2, 0x0dfaf2fa, 0x060d531c, 0xa756d04c,
+0x3efed03e, 0xbe9436db, 0x5b9e8a48, 0x0a37ea61, 0x718f4362, 0xebcc9742,
+0x78aa2e7c, 0x4b19f7b8, 0x308b9af0, 0x915247e9, 0xc079aa48, 0x5230e578,
+0x862d6de7, 0xbe0801d0, 0x8ab11c9d, 0xaf1d7feb, 0xe9d15530, 0x44651202,
+0xc006
+
+e =
+18448
+
+k =
+6144
+
+ncb =
+18528
+
+rv_index =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_CRC_24A_ATTACH
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data
new file mode 100644
index 00000000..9ce68b72
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data
@@ -0,0 +1,180 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0xe5e0a392, 0x6f5468f1, 0xc8f1c505, 0x4502caf2, 0x23df5578, 0xc2f29afe, 0x06838d34, 0x264eab9b,
+0x1e97c133, 0xabad27aa, 0x684fa9b4, 0x0fed735e, 0x6ab6bcad, 0x9c3e401a, 0x66a63035, 0xa72f7f6b,
+0x509d2953, 0xf03c574a, 0x48f12185, 0x27318395, 0x008fc1ce, 0x7efcf19d, 0x0120de12, 0x6ed28bff,
+0x2044547a, 0x04d95124, 0x6baf2e1e, 0x60d93509, 0x299aa27f, 0x8b67d93a, 0xc33d9631, 0xe468dbe1,
+0x1a82d0b6, 0x8ee2ff7b, 0x39298170, 0x33a90b6a, 0x273d17f7, 0xb4368615, 0x60c390b8, 0x24696570,
+0xa1f4abcc, 0x08b68dd0, 0x91f8c883, 0xc9ceed85, 0x6467da9b, 0x48dfae24, 0xf4ff4992, 0x8c8200cf,
+0xaac02792, 0xf506f71d, 0x096df98a, 0x3074be38, 0x9b675626, 0x4ccace33, 0x50f0f60d, 0xf4b59cc4,
+0x748a8799, 0x9032c151, 0x1fb945f6, 0x01bd098a, 0x90a7907b, 0xe086172c, 0x7ed3dc8d, 0xea0dd126,
+0xf9b35bc1, 0x8ad4e233, 0x37b5e1a1, 0x95bf8c3a, 0x9d288b43, 0x23a860f1, 0x4e993fc3, 0x3c774bd4,
+0xe411be48, 0x22ab80c7, 0x9dd09a47, 0xc9e6988e, 0xb4cbf0b2, 0xb9e2a389, 0xf1494438, 0x2c87638b,
+0x892468ec, 0x7915d1aa, 0x4d9cb9c4, 0x643a388e, 0x48c4358f, 0x937ad5a1, 0xdae53508, 0xa1305607,
+0xbfb63a5c, 0xb4bb9324, 0x71daab0b, 0xa582346b, 0x0e5402e1, 0x75af83e7, 0x2b241b17, 0xfa17aca1,
+0x9637cb2e, 0xc61fe511, 0x093934f7, 0x4b703b44, 0x992ab229, 0x4ddc99b5, 0xe2be6b60, 0xa2217861,
+0x8f621931, 0x272c0ec6, 0x23a4d5b2, 0xc6f4e002, 0xb76397c0, 0xc00e3c01, 0xb89d29b3, 0x5e9bc984,
+0x00391d56, 0x55fae02f, 0x4bc4bc9d, 0x9a147b22, 0x0fb492ed, 0x753dc9a6, 0x9f493eec, 0x6e135dc2,
+0x0eeca515, 0x1c1512f9, 0xb88075e3, 0x326a97f0, 0xc755faf5, 0x06b7961b, 0xbd50fd69, 0x5f071c92,
+0x2806c099, 0xe364f4ab, 0x2c4a114a, 0x7655b100, 0xf5e66750, 0xf9212705, 0x245e3b8d, 0xb0f1fb0d,
+0xd7be9558, 0x27030e24, 0x9ea9b62e, 0xbde9be45, 0xce0adae6, 0xc28780a4, 0x1664ccde, 0xa7a62e3f,
+0xa4f8ec00, 0x964e813c, 0x108c95e0, 0x097fe10b, 0x08da3505, 0xde413158, 0x046a31c0, 0xd9cdd804,
+0xdfbdff9a, 0xfd77493f, 0xdffb2388, 0xe2ffa111, 0x29eb6ac7, 0xf9e1ac68, 0xc2ed68c7, 0x34d65405,
+0x25f187ba, 0x3df23a2c, 0x96a2a5a9, 0x2bef4b26, 0x28d1c41d, 0xf380c0f1, 0xc4d5456e, 0x6b699c28,
+0x0aafd6b3, 0xf88f3078, 0x114438d0, 0x93836544, 0xebd5767d, 0x9185b5c2, 0x593e4fe7, 0x29d3348c,
+0x881f16c4, 0x77e33d03, 0x6794bbf5, 0x6b6c38f3, 0xa860673a, 0xb1d2d152, 0x08f9f310, 0x82c37500,
+0xce0ae34c, 0xd5478e86, 0xcfe1adfc, 0xfaf06d37, 0xf7e21bfc, 0x3b077287, 0x45b33c08, 0x41
+
+output0 =
+0x25832957, 0x1235fe50, 0xa3b0b09b, 0x0bb7c797, 0xce815b03, 0x7b702251, 0x2c25fc60, 0x23ac55e0,
+0x8bea35cb, 0xe33b833c, 0xec1d38a4, 0xb59acb59, 0x0f1fa8b7, 0xcb77fce6, 0x6564fc78, 0x9e6c39cc,
+0x94f1bed1, 0xe02684ad, 0x074045a4, 0x4dc9b344, 0xd187bed8, 0x2a129ecb, 0xf6c5bd4d, 0xf98d5c26,
+0x78addaae, 0xbe87f7d2, 0xf42a7df6, 0xa4683306, 0xbfa9b509, 0x3a67ddfe, 0x013d68d5, 0xb2a1fb8a,
+0x0dfc5da7, 0x792f6b24, 0xaa220ec2, 0x6cb0a7a4, 0xd6e5ea4a, 0x1cd6156c, 0x9bac3cad, 0x926a3950,
+0x658815b0, 0x2a2e4349, 0x9684d70f, 0x67a62f63, 0x7a8aae48, 0x8aa046e5, 0x3cad66d5, 0xe7e269b8,
+0x95a9a8ca, 0x426f46ae, 0xf780d265, 0x2e0d244d, 0x5480bc07, 0x66554568, 0x938aeda7, 0x1144006c,
+0x450320b3, 0xce9dfce9, 0x59a277a8, 0xde38cc1a, 0x1cd2bd60, 0x0079ca20, 0x58dd4633, 0x55398bbd,
+0x5fcf982f, 0xdbbd3e30, 0xdcaab5e4, 0xc368c59f, 0xfba8e7d2, 0x9902b581, 0x2ec0a87f, 0x0ebea4ee,
+0x13bb2fd2, 0xbc1df5be, 0xe34baa5e, 0x4a66fc5d, 0x7fc9d926, 0x1d2dc178, 0xe780752f, 0x160e82fd,
+0x2e6205ba, 0x3d2fd1d7, 0x22b03620, 0x1c1ce57b, 0x5a8f8e09, 0xc9522523, 0x712e5cc0, 0xa27b10ad,
+0x52d2df01, 0x3fe886d6, 0x29f37f12, 0x479e3783, 0xa37a445c, 0x338f9df1, 0x74dca176, 0x11acbbc4,
+0xac50920a, 0xe9375593, 0xc483a084, 0xed281b58, 0x21ea3cd3, 0x5cf2e76a, 0x50c5e5f4, 0x68d14253,
+0x374cea8e, 0xf3533631, 0xb8f1bece, 0x3a188078, 0xb15325de, 0x989b4e70, 0x40f142f9, 0x4a55cc30,
+0x72a442b2, 0xe583a6c7, 0x4cb90a7d, 0xe19187c0, 0x899ea4e0, 0x42598555, 0x7d012357, 0x3e9cd281,
+0x75094cbb, 0x80afef50, 0x8a834f90, 0xa6182605, 0xdfb7c49e, 0xde57caf6, 0x62720457, 0x5a40705e,
+0x2728f823, 0xde22d20c, 0x8389268a, 0x2aad9f81, 0xa376314c, 0xd9288769, 0xa7a1cc5d, 0x1dcdb0ed,
+0x0b23e2d8, 0x024e9165, 0x0e375ea5, 0x27c561c7, 0x48687976, 0xe97021bc, 0xbf82aea2, 0x6cb87bd3,
+0xaa3f7ed5, 0x1e5c3aaf, 0xccad60b5, 0xb672d5da, 0xde0ff047, 0xf7cd4216, 0x45220a3b, 0x060d16c7,
+0xb6dc8617, 0xc10d9dc7, 0x3d229a69, 0x8deea79a, 0xda6883e1, 0x610f41f9, 0xfa0f57f0, 0x6f97b547,
+0xf00577ee, 0xe1be3d28, 0x809810e0, 0x7d4b8416, 0xdcc82557, 0xce6a01b1, 0xbc4cb663, 0x8bc88eac,
+0xab9e918d, 0x6ec29c41, 0xbd0e745e, 0xa97bc23e, 0x1e1370c2, 0xc2744139, 0xf5d2f67b, 0x9d5ae9dd,
+0x1040a10a, 0x1b54944c, 0x9a6810b1, 0xc7079d11, 0x8c89d60d, 0xc9523c76, 0x304b7adc, 0x2fd915eb,
+0x076192e8, 0x8cbd3bc1, 0x90440a9c, 0x81961a26, 0x6a53f026, 0xd2b07ce5, 0xc72e767a, 0x9d47c3d9,
+0xab2b7cb3, 0xf4b3aeec, 0x18441c12, 0x56b3007f, 0x81c3cfff, 0x128b72ed, 0x214dee41, 0xc735baeb,
+0x89cf54e7, 0xcdfce851, 0x77cc0a00, 0x57cc6e0a, 0x7ffc8dad, 0xb036445b, 0x0699a9f7, 0x8781d7b5,
+0xdd0f9d01, 0x43f01de8, 0x6a2e7dee, 0xf802d6b3, 0x10094fc1, 0x954cf6ad, 0xd7aee91a, 0x1e3369c8,
+0x2acd8c15, 0x3303a61d, 0x79344e4d, 0x1253cd91, 0x34c34c77, 0xbd075dce, 0xe0fdd1cc, 0xf9015c56,
+0x931b9096, 0x604f4bbf, 0x3b1d991a, 0x24007891, 0x7c0c1709, 0xe0d43230, 0x11821166, 0xf21e2558,
+0x2c37009d, 0x98718786, 0xa9a112e6, 0xd7e20b10, 0xfe917f72, 0xb76101ff, 0x42a8d327, 0x9039f71b,
+0x6d1628cb, 0xfd01251c, 0x6f0572a2, 0xb36a7e7a, 0x2b1a4556, 0xe679b027, 0x313573d0, 0xb5f7c20c,
+0xc8d1530a, 0x338e756e, 0xe6befc47, 0xfe12dccd, 0x6ad1e5b7, 0x40b078ff, 0x96aaacba, 0xedfdeb8f,
+0x8da3673a, 0xc965882e, 0xa4dec6e9, 0x7bace08f, 0xf144852a, 0xd5d787d0, 0x204d32e6, 0x733098aa,
+0x12f3a53a, 0xe060086d, 0x3ab1739a, 0xd391e860, 0xc23d9687, 0x0d3a84e0, 0x855ea4cf, 0x0d0ea5b7,
+0xfd082430, 0x70df7cb4, 0xe712f026, 0x84ce32ac, 0x67b04602, 0xac0b1faa, 0xed119dcd, 0x184afb0b,
+0x9abbefd2, 0x41b3e2f9, 0xe18d5703, 0x6e5f4de7, 0xb47380a2, 0x239a3714, 0x810bb522, 0x5ad95c77,
+0xe4b37e0c, 0x1cc200a6, 0x404c4390, 0x90533af1, 0xb49f2403, 0x1dada65f, 0x96f86d1b, 0x58f679a9,
+0x1fe3c66d, 0xcc5eb28d, 0x504b7aff, 0x7c18a397, 0x47ad4d5f, 0x348f12c5, 0x80bac6e0, 0x47bb3c74,
+0xccf16f02, 0xa932a5af, 0x51ebb452, 0x25808fcc, 0x47d14a43, 0xf0c2a0af, 0x30f938b5, 0x428420c7,
+0x61a66d9c, 0x082caa4f, 0xd9c74e3d, 0xb4d161b9, 0xb409f331, 0x14245a44, 0xddb67f1e, 0x1018c62c,
+0xc1e58e6d, 0x4ba86bd6, 0xf4a282aa, 0x4a85f90d, 0xbf0efb7b, 0x03706f08, 0xbe06a5f2, 0xe37f45f1,
+0xcbc4001c, 0x14decb05, 0xf3c6050f, 0xd1df33d7, 0xdab729c7, 0x8898dad3, 0x4a44a313, 0x30bee51e,
+0xd67b07ef, 0xf233eb19, 0x4c52bca3, 0x5d824a4b, 0x275faf2b, 0x428f8a95, 0x03f70c8b, 0x609e4036,
+0x59ce2147, 0xc7eecc6b, 0xa22d21c2, 0xc5764d9a, 0xb4524bc6, 0x6d9bdfc9, 0x0918b4f7, 0x62bb8022,
+0x7a98bdcc, 0xc963131d, 0x949036b8, 0x649ae51d, 0x93038d33, 0xe26cb45e, 0x4813516a, 0xc06c759c,
+0xfa6a9793, 0xdd59c686, 0x37b4b5b0, 0x66a02fac, 0xde159ecf, 0x90cdb66e, 0x1c9f7f94, 0x3a31f69e,
+0xcd93bc45, 0x32744ee0, 0x0f6ae2b1, 0x3bc9e963, 0x8cddbae4, 0x8f9517ff, 0x7d304ec1, 0x344e0357,
+0x7cd2ed30, 0x1f9fa3a4, 0x9dbeb67b, 0x59253b51, 0x41b6911a, 0x12b574af, 0x62a777da, 0x9a6c834a,
+0xaa16081d, 0x3f105c37, 0x6eac5dc0, 0x76e56fd1, 0x67bffbfb, 0xce069b80, 0x8fe970b1, 0x61945ee0,
+0xaf9d44c2, 0xf2cd16cc, 0x07a768c5, 0xd6eecaa6, 0x6e8ce063, 0x4b30b443, 0x360822c9, 0xa6442b33,
+0xf02d09a0, 0x22056421, 0xc552aa94, 0x1346ea1a, 0xab6c54a1, 0x35a69bdf, 0xfc6095ea, 0xbde0d55a,
+0x1b433ec0, 0x137483a0, 0x8c1cb29d, 0xf2bc4e17, 0x45df6882, 0xbd702bc9, 0xabbead61, 0xc97ef61d,
+0x01b68438, 0xbdd10231, 0x2e046323, 0x80e619bd, 0xa5126a2a, 0x74106e0d, 0x9e57b499, 0xcb275127,
+0x9e01f21e, 0xbaf4a744, 0x38604f7e, 0xa85493d6, 0x9a4b4541, 0x3f397269, 0xdde71734, 0x157dfe98,
+0x40334ad9, 0xc889f52f, 0xbddf4d23, 0xd2200018, 0x2a4b7180, 0x8a1a357f, 0xcd6af7e4, 0x1426b52b,
+0x2a45481f, 0xfc38394c, 0x05123e4e, 0xa0215834, 0xe2c2306e, 0x4ee9fe1a, 0x6bbbf30c, 0x19a39072,
+0x846b0bdf, 0xde4ef7f9, 0x47f1d67c, 0xf507e9e8, 0x14c1f617, 0xaa756ccf, 0xececc02a, 0x183cd616,
+0x9d4bac81, 0xd959d1ea, 0xefb81e2b, 0xd8ca27fc, 0x5c7e9eb4, 0x64d5d550, 0x32ed3c41, 0x8f62e8a8,
+0x5e2b65c5, 0x0a7e5e1b, 0x88abb657, 0x781ee74a, 0x840c27fd, 0xec83df4c, 0x163acaf0, 0xb2649960,
+0x2a706f9a, 0x5226c811, 0x5bcb501c, 0xb87caa99, 0xecb4a37e, 0x8927c424, 0xfea9ac62, 0x17da41d1,
+0x571c5987, 0x4217ea55, 0x5043b664, 0x18fb3ece, 0xfb2d13b9, 0xb6599d4b, 0xbb8a536d, 0xa719683f,
+0x81cc2d0f, 0xc888f1d3, 0xccb0a428, 0xd2cdff48, 0x0b3440e6, 0x45700619, 0xcea80987, 0xa5ad1cc3,
+0xd29deaf7, 0x4a8c95af, 0x9ac63a98, 0xf46d26b0, 0xf9a09352, 0x5f8b3fb7, 0x367c8da1, 0x3244b6a2,
+0x9634b973, 0x27292ae5, 0xc4f7ae33, 0x9b73a4f7, 0x086a8bb4, 0x616cd17b, 0x74fdb210, 0x5f483d60,
+0xf7666630, 0x107cd026, 0xfa2a4a07, 0x171b319e, 0x509d50c8, 0xf2d15061, 0x981d5759, 0xf23bc5c6,
+0xe9befe22, 0x1e784009, 0x7cda3751, 0x1c5b2b69, 0x3ba09115, 0x756f1ef5, 0x2b47353b, 0x553a33d9,
+0x1f029262, 0x6393d0ee, 0x97e01180, 0x0c53980c, 0x5da79e34, 0x1e0b53c4, 0x06a9f2ba, 0x622181b0,
+0x4898ac50, 0x874c780e, 0xf8cc5072, 0x4a5a63ca, 0xf6dea6a2, 0xc3134004, 0x4505bae6, 0xc3f4b638,
+0x0dda8e07, 0x0c4135bf, 0x61e80f25, 0x4b31a76c, 0xd91f0d09, 0x4ba0d577, 0xc95d0086, 0x21a61725,
+0x16862c1f, 0x9588795d, 0x6ecde7c4, 0x353288cd, 0x94e4647e, 0xeceaba51, 0x7c602ff8, 0xbba48cef,
+0x6556c6bf, 0xde912190, 0x86602d55, 0xf549554a, 0xd4d52081, 0x9728c5db, 0x7d09e129, 0x068008ab,
+0x353a6ea4, 0x90b44e4e, 0x5c97a08a, 0x6293b3c7, 0xa3e4114b, 0x7ce44646, 0x8f2b0269, 0x291ca1bb,
+0x98721528, 0xe30f5532, 0x0bbb2951, 0x7b7c390a, 0xb835b070, 0x2712e51c, 0xc20fb607, 0x5a05ce52,
+0x5eb33cc2, 0x33c8b3a8, 0x81433abe, 0xb99cc5de, 0x817a5bab, 0xc76ffef0, 0xc68fb77c, 0x96c35c46,
+0xef1bedc9, 0x42d84a19, 0x54440a6e, 0x3c4b7400, 0xe88bdd94, 0xe1b91c7d, 0xdcdba422, 0xc865625f,
+0xaaed9adf, 0x782f8dd7, 0xd267ef7b, 0x366340af, 0x5a9b408a, 0xd6edff9b, 0x8356ad73, 0xbaaf18d0,
+0xdf752a1b, 0xb246d2c0, 0xe2209cf7, 0x7b4aaa2a, 0xaeaec406, 0x5dc1665d, 0xcad3ca61, 0x9603b5c9,
+0x58012ba9, 0x32945486, 0x78fda0e2, 0xfa326649, 0xe88a7466, 0x6a54aea7, 0x6a56ad08, 0x9e86cbd3,
+0x8aaa7c2e, 0x66e45a99, 0x285d26f4, 0x40d2740f, 0xc87be0d2, 0x55844605, 0xd87e6a56, 0x04c036a9,
+0x00321b41, 0xc99f5e34, 0x7a87eadc, 0xc3ac9125, 0xdd0be68d, 0xa70cc221, 0x6d340390, 0xb3d88bd5,
+0x8cf95295, 0xeb03f3f5, 0x5a4bbedd, 0x56fcc9ad, 0x7a2e3d8c, 0x501bb88f, 0x8cfa9729, 0x4beaee02,
+0xfb22ede0, 0x51ef3bb1, 0xa4eac5db, 0xc6df35be, 0x9c6da264, 0x128cf797, 0x58f7d2d1, 0x20d87f0e,
+0x56a06be1, 0x127ded22, 0x6b03d2f3, 0x51be2702, 0xe898c0c1, 0x5532a2f5, 0xc2059c2c, 0x07d11ae7,
+0xfd1d20ba, 0x6e682d25, 0xff27f183, 0x79339832, 0x47c475e4, 0xd8193faa, 0x1d6a37f3, 0xba4b4cc7,
+0x25a910c1, 0x5335c90a, 0x084a987e, 0xb281453c, 0xce33dd8e, 0x7fae16a2, 0x5c4ecf25, 0x2d340555,
+0xa4ee8816, 0x651373c3, 0xefeb3c3f, 0x0188871b, 0x55e2ad83, 0xe904173b, 0x2f948fb9, 0xc50c0314,
+0x2a24ab54, 0x687a2c47, 0xabd0573e, 0x7908cc94, 0x490a1e1e, 0x555895e8, 0x30722594, 0x291dd817,
+0xc0b4ebc3, 0xfa0e5597, 0xf80409f8, 0x6152a038, 0x4bec698a, 0xa56cff7d, 0x4770e57d, 0x04e72526,
+0x823fa205, 0x22cd7082, 0x68a2e82d, 0xfa193898, 0x17c3a4d2, 0x7298366a, 0xcadc958d, 0x0cdb7e1a,
+0x228eddd1, 0x1459b630, 0xe3552ae0, 0x1c76ec70, 0x96677752, 0x17c28b84, 0xe82a9a0e, 0xbb37fd2b,
+0xe357cd86, 0xa5f3aafa, 0x0a56ebc1, 0x57adcddc, 0x007f642b, 0x2c64e1fd, 0xa2b073df, 0x60715c24,
+0x6d7861d0, 0xd0796ccb, 0xa29916dc, 0x7eaad923, 0x3618dee8, 0x1094af8d, 0x70051ff6, 0x597ba4ff,
+0x70e7fe76, 0xdb83025f, 0x09011eee, 0x44680188, 0x5c72d5b7, 0x1610cb8d, 0x643be6ac, 0xecc8cacb,
+0x19d9b888, 0xcc19b4ea, 0x40e7e526, 0x27ecd3eb, 0x01279cba, 0x1794e331, 0x6dbf274c, 0x95de5d2f,
+0x14aad0a9, 0x45c90401, 0x0611bb41, 0xd019a189, 0x68dd707c, 0xc563c798, 0xa4c79d2c, 0x5db10eb3,
+0x2689fe92, 0xbb137c10, 0xa4c0c9d8, 0xa9610249, 0x056f1268, 0xcb57ae36, 0x62a7270d, 0x349c7dec,
+0xc237db79, 0xebcabeba, 0xc421413f, 0x0bf08741, 0xfcfc6f35, 0x28d71e38, 0xe41e24b1, 0xa3bb1ed2,
+0x4c757e5c, 0x8f1e95f8, 0xac00d0cc, 0xeca670c7, 0xdfd87ac5, 0x43b4f5c7, 0x997a0f6b, 0x785d6b90,
+0xd0197018, 0xdf81defd, 0xd2e73e04, 0x603dabe6, 0xf0148c2f, 0x64df0a91, 0x9aae51c9, 0x93867ced,
+0xcc58e131, 0x60daa1d2, 0xe3d43433, 0xd51c9947, 0xcc742731, 0xd0e54c33, 0x1fcddc7b, 0xc06505de,
+0x0169991f, 0xb4f43bb9, 0x91a901f6, 0x8017b9d3, 0x70914002, 0x2d03c3c7, 0x1861064e, 0x51821521,
+0x03d029ef, 0x7768c872, 0x2a618e19, 0xbe00911a, 0xf927772d, 0x16f0ef1f, 0x3a7d721b, 0x73bf2184,
+0x81b20c99, 0x50c2d166, 0x2027da1f, 0xe6a7f756, 0x516435ab, 0x077bb2a2, 0x33076d9e, 0x2fcc1053,
+0x3da5507b, 0x58e7861c, 0xcb7f34e3, 0xc1dd6cee, 0x5d7eeb2f, 0x8bf7af16, 0xcaaa0b04, 0xbffe68a9,
+0x7aa6d3de, 0x86e8d238, 0x6d9c9e5c, 0x0afe48ea, 0x54a8b2c7, 0x7d081d4f, 0x24635e7d, 0x83a90ad2,
+0x5faa3307, 0x86d02631, 0x3ba7090e, 0x890ea613, 0x6379381d, 0x43082edc, 0x45fadca0, 0x507a5be8,
+0x4002d3e0, 0xcd47db8f, 0x016f02f7, 0x2cc37a2e, 0x6b2440e8, 0xf0a17a06, 0xd1d9ccba, 0xb4bfd01e,
+0xfb2e8da1, 0x2b9eafb9, 0x78351034, 0xd5741ede, 0x0728eaf6, 0x7943413b, 0x502b32a2, 0xcd7517b8,
+0xebc7a095, 0x0c604a3e, 0x3404c921, 0xa5130fc4, 0x49320039, 0x6afa45fb, 0xdfb6d1d1, 0x9f976a89,
+0x6edc8665, 0x25dbf831, 0xa4f7cfec, 0x317a09b5, 0xdaf4c587, 0x28517cd4, 0x6b0c4ef3, 0xcb4307a8,
+0xff2670b4, 0x53faca1c, 0x4e2b952a, 0xf8c81cb5, 0xad345402, 0x0cfa7a14, 0x8f530b2f, 0x08720c93,
+0xdac62944, 0xa2fa1466, 0xecd483c0, 0x1d969b7d, 0x301f431b, 0xa245449b, 0xfbe74141, 0x61ccd26d,
+0xeed80681, 0xba661d5c, 0x2aa8ba84, 0x98df402f, 0xb0bfa754, 0xf786f0eb, 0x502a3f00, 0x5714ef6b,
+0x0cc031fe, 0xbd5cb04c, 0x5cf040e1, 0x3d733d6f, 0x9b721cfd, 0xa93dad7d, 0x343a8188, 0x5beea144,
+0x77f00ee3, 0xb39e61bd, 0xc53b2a3f, 0xa8b4c424, 0xf5bad225, 0xa85879f2, 0xcfb028f4, 0x09643370,
+0x1c7204e6, 0xcebc96e5, 0x12227cec, 0xd7a429da, 0xb5645c6c, 0xf99d4c2b, 0x417bdfb6, 0x0b289280,
+0xd9cb2cb6, 0x36d1a187, 0x69839b3c, 0x59de4109, 0xd03843a6, 0x46eb3539, 0x11a526ce, 0x56c78934,
+0x763909cc, 0x656ca8af, 0x5b0bdb9d, 0xfac27a43, 0xe1f96c06, 0x6cebe65d, 0xf94709d9, 0x63efc9f1,
+0xc95ba413, 0xe704de3c, 0x261e2b43, 0x9c3ef6a0, 0xad4bbe93, 0x79f1cfd8, 0xe314fc58, 0x3470d507,
+0xdd0e43e3, 0x394aca27, 0x6bbbf7f1, 0xb213d5e9, 0x1ba99155, 0x4bf71a64, 0x7aa72d51, 0x36a82476,
+0x81d0a1c9, 0xc175a36a, 0xda05fc03, 0xfe16edc6, 0xbbbf6f57, 0xb00978f6, 0x0e17eb6c, 0xe905fe98,
+0x49241c46, 0x6cc1fcda, 0x8a562cdf, 0xae6c7a70, 0x083e66ed, 0x433be4c6, 0x2092bc04, 0xb4326383,
+0x92006a4a, 0x401602df, 0xa54a2952, 0xa4ae512c, 0x46153a61, 0xbaf9bdca, 0x56a95e63, 0x5eadc50f,
+0xe403dc0b, 0x3708ba31, 0x21db3941, 0xeb74c1c8, 0x8d2628cf, 0xb7925cf4, 0xdb1ad60b, 0x67dfb1ea,
+0x4b8893ec, 0x2d101360, 0x3036d21b, 0x9ed1eb42, 0xa1a60268, 0xe1d6502a, 0x459b4907, 0x1275e279,
+0x20efb17c, 0x7f4ae419, 0xf6e4a74b, 0x35698d03, 0x5414844a, 0x2397a6b9, 0x7e41f393, 0xe78fd97d
+
+e =
+32256
+
+k =
+6144
+
+ncb =
+18528
+
+rv_index =
+0
+
+code_block_mode =
+1
+
+op_flags =
+RTE_BBDEV_TURBO_RATE_MATCH, RTE_BBDEV_TURBO_CRC_24B_ATTACH
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data
new file mode 100644
index 00000000..13006501
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c2_k5952_r0_e17868_crc24b.data
@@ -0,0 +1,300 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0xaf9437bc,
+0x341f8045, 0x93f6ff86, 0x2e019ec9, 0xc80116e8, 0xf984ff34, 0xe4f1be20,
+0xfe455510, 0xc561cdba, 0x2d6f560f, 0xb239fd9d, 0xd254e343, 0x8090448a,
+0xa724c4f1, 0xa00fe4f0, 0xf0379ac6, 0x0c61b383, 0x44b2228e, 0x9d223bc0,
+0xe7cfbc38, 0x62ae6a40, 0x6f8d445c, 0xc6045eaf, 0x8507df18, 0xe64c057e,
+0x4e589113, 0x3ba3bf55, 0x6a702b19, 0xea23af7d, 0xa1b98022, 0x7096c45c,
+0xace3b8c1, 0x47ed7bb4, 0x198a2b8f, 0x586b57db, 0xbf9d24d4, 0x96054c66,
+0x13f26306, 0x2a43524b, 0x1ecd419b, 0xb3a24852, 0x4ed7765f, 0x4c99fbe3,
+0x6c3b9ebb, 0x1ef7bfeb, 0x0daf1256, 0xe9c90c7f, 0xbe067ed7, 0x469102ba,
+0xedccc098, 0x96691fb1, 0x2a9b850f, 0x58aba5f2, 0x0bcad31d, 0x90b1f25b,
+0x78a55fe4, 0xdd41e626, 0xdcba3e83, 0x2209d8a9, 0x6ee2f76f, 0x55643570,
+0xd181a0ef, 0xd1b2790a, 0xc7793587, 0x3aebe10c, 0xbbde5fe0, 0x5c308948,
+0x01439ece, 0x5319fbef, 0x10b0de1b, 0x6595f4f3, 0xa3e6a7c9, 0xf97b2e10,
+0x4513cf3d, 0x8ded3394, 0x0949a772, 0x934f2269, 0x2b636628, 0x92101c68,
+0x804dfd94, 0x178315be, 0x384adf65, 0xe64d11d3, 0xc44881c0, 0x7a8e7886,
+0xb721c0c8, 0x6d0d0fc0, 0x97f0cc5a, 0x3c67c31f, 0x4906dd09, 0x6202c01a,
+0xad8d6c5b, 0x53158056, 0x13704ed5, 0xa1dfeaaa, 0xb35e4b40, 0xd9c3e6fa,
+0x0c9650ce, 0xbdfe5c22, 0x874bedb3, 0xbd07d0bd, 0x4eef4a4f, 0x2970d932,
+0xfdd003ac, 0x609fbea4, 0xad817794, 0xb4ee9f96, 0x559b3faa, 0x1edcf35f,
+0x8342e9fb, 0x69a73981, 0x5fc86a07, 0x929b45aa, 0x5c32e847, 0x43421484,
+0x9962abf5, 0x8cf07ae4, 0xba7b556d, 0x2da42ac1, 0xac0eef6f, 0xdc118ef8,
+0x6061597a, 0xc5f7a007, 0xb9b92c51, 0x87049008, 0x72c9da8a, 0x54dc48c6,
+0xb4c4b392, 0x50204c8d, 0x00b2ef59, 0xcdac687c, 0xb2158dc3, 0x4f1c65e3,
+0x8af22286, 0x4444ee49, 0x2d8921ce, 0x3c0a8d11, 0xb97325da, 0x056aec7b,
+0xa96ca31b, 0x71a5febb, 0xa3166c47, 0x5e920dcd, 0xa58b2e61, 0x4d3b392f,
+0x1b9b177b, 0x058b0dfa, 0x4bdc16ab, 0x8a4eeac3, 0xfbf2d2fc, 0x636f04a6,
+0x75a14bda, 0xa28f1947, 0xf2f248e4, 0x934e7dd6, 0xeb4401cb, 0x37f693a2,
+0xd1cb7e16, 0xbd5d9abb, 0x6f613f63, 0x9bfb5fd5, 0x6b70c5ea, 0xbdcc6c7a,
+0x41efaee1, 0xf45d3965, 0x0499b12b, 0x7820873a, 0xed53961f, 0x303c4f04,
+0x2e4bd363, 0x1bcdd589, 0xf7317de0, 0x7ba3d53a, 0x61fb36a8, 0x23e55b43,
+0x0ef838e9, 0x7b9d91a3, 0x3cd7d835, 0x5f28e517, 0xa100fad7, 0xff39800d,
+0xd7894433, 0xb61a62cd, 0xde618b70, 0x8560d770, 0xed02
+
+output0 =
+0x11d2bcac, 0x7715ae4d, 0xc9f4dbc6, 0x2fda3c1a, 0x09349b16, 0x2cd3c189,
+0xe5650429, 0xc73c42c1, 0xe7336bb7, 0xbb7de593, 0x83f986aa, 0xc0ade12e,
+0x6730b095, 0x78f7c059, 0xaa907199, 0x75c323d1, 0x7061f9ba, 0x97e067bf,
+0xc155cd55, 0x6a4cb08d, 0x4260e2fa, 0xff35e496, 0x37f251d3, 0x02fd9f9e,
+0x6f53345c, 0x790cda6d, 0x3b8549e4, 0x56d0c6ea, 0x70a38006, 0xfff18223,
+0x6f2035b5, 0x6cf508d3, 0x98a0a308, 0x432353fc, 0x0eb818eb, 0xdd9a128b,
+0xf92431b2, 0xad788286, 0xda07d5de, 0x44b4b3a8, 0xbe62fd67, 0x61a4eb54,
+0x90d7ac9f, 0xc4d9a930, 0xfddc3c24, 0xf5e3b1c8, 0x38143538, 0xcb1d3062,
+0xcae36df3, 0x50a73437, 0x542dcab7, 0x875973a0, 0x34690309, 0x49cb1ddb,
+0xf8a62b92, 0x82af6103, 0xc9f8c928, 0x3835b822, 0x16b044a6, 0xdae89096,
+0xa5be47d4, 0xaf61189e, 0x5cd70faf, 0x037331cf, 0xef7fa5f6, 0xb2f2b41a,
+0xa6f222c6, 0xdb60fe4c, 0x2a857a9a, 0x0b821f9d, 0x348afd17, 0x7eecbaeb,
+0x92bb9509, 0x8a3cec24, 0xd02549a2, 0x155ffa81, 0x2b7feac6, 0x3ee461e7,
+0xc981f936, 0x89b544c7, 0x9a431e36, 0x62511734, 0x769f9647, 0x211a747e,
+0x567abef4, 0xad87e2b4, 0xa3e0c3bf, 0x6d325dd5, 0xf561cc46, 0x39925735,
+0x3d8abbfd, 0xc3724c88, 0x8bdf03c9, 0x1b02a12a, 0x4f233a0c, 0x9ca9444a,
+0xc5d1f7e6, 0x4d995f37, 0xd9aefb32, 0xd0465248, 0x0f3a3b21, 0x62ea8c0c,
+0x91f8d54e, 0x5cf75514, 0x14618a01, 0x8fe9b87e, 0xf2b424f9, 0x49724ce2,
+0xa1464587, 0x5e00dc83, 0x59475455, 0x444119b1, 0x4fb9f036, 0x65fcbc1a,
+0x1c63a990, 0x767a6114, 0xb0ede06a, 0xcfb91ae3, 0x7874af5f, 0xf78772a3,
+0xa2932c81, 0x77f2759d, 0x930dc8f1, 0x95ce14a5, 0x134363ee, 0x61ee143f,
+0xf0034b35, 0xfdc75fce, 0x3be2dcf3, 0xff3a07eb, 0xdc43f0eb, 0x23ba73bb,
+0x45f7649e, 0xcacc297c, 0xa3dd98db, 0x058d46a9, 0x6bcfc154, 0x7be8e1e6,
+0x618a4754, 0x8d193c46, 0xba39e1ce, 0xc3b85cfc, 0xd80d853b, 0x38d6440d,
+0x9c1a6185, 0x90c9dfcb, 0x01c6e841, 0xeedfe6ac, 0x7b61a3ae, 0xad3924dd,
+0x514bc1a8, 0x6ed60d7e, 0x1b74b79b, 0xea295947, 0x5f9a5d33, 0xcd24311f,
+0x0bd3d10c, 0xd214ecfe, 0xcb37035d, 0x8ce95168, 0x7020cb52, 0xe432107d,
+0x042d63ac, 0x6201c8dd, 0x4bea65a2, 0x1a3cf453, 0x5b8e868d, 0x3d653bb0,
+0x24c1967e, 0x37f183a9, 0x3700e703, 0xb168a02b, 0x2592cd82, 0x5c0cdb66,
+0x70b64ebd, 0xf8dcb9a7, 0x634a8335, 0x06642398, 0xfe2f4497, 0x2e775256,
+0x30f85cbe, 0xf4d2fdaf, 0x46ca6c9d, 0x022097d6, 0xcedeb03e, 0xb4ca20bc,
+0x4fde636c, 0x10ea20a0, 0xe6da8721, 0xbfde5b08, 0x9c3739da, 0xb6dc015a,
+0x427db088, 0xdfdb8e6f, 0x756be6c1, 0x21f5297b, 0x06135665, 0xc1602b7d,
+0x049536c5, 0xbbb3b801, 0x0cdb0c19, 0x7b2ad622, 0xfee8218f, 0xc5c7f123,
+0x8abd3301, 0xa15b534d, 0x29dd2053, 0xd409abf9, 0x3ef19d6b, 0x70a3cbc2,
+0x7a51423a, 0x4505b2ad, 0xdc74c75e, 0x068751a9, 0xb0b56437, 0x14a10371,
+0x76af806f, 0xa8a47e19, 0x7c97a26e, 0x7998a3d6, 0xdc1ad1e2, 0xb532a301,
+0xca8a3e7d, 0xd0aef374, 0x204990c0, 0xc7011aec, 0xa69151ea, 0x53390026,
+0x7bf0d762, 0x735c2202, 0x64159e54, 0x5a3b1a56, 0x9ef1def2, 0x0ab8a961,
+0x587b0886, 0xb8cc5975, 0x2a5a0f23, 0x069d05be, 0x9cc3c207, 0x40ef1a02,
+0x4fae3f5b, 0x1f127aae, 0xd4e6d411, 0x17ac43ef, 0xe4bf891b, 0xfbb21765,
+0x2c560c7e, 0x8561988c, 0x73a01032, 0x0cfef73a, 0x694c4991, 0x885d7a3f,
+0x4218d1ff, 0xc2efaffb, 0xaf9d9715, 0xf76de6b2, 0xcce8e8ff, 0x370e3800,
+0x493675eb, 0xd8fbcbda, 0xa5b382c2, 0x86c8f1ea, 0x3d724ea4, 0xb067034c,
+0x6491d87e, 0x1a745ce4, 0xbb27180b, 0x1a2f0acc, 0xac4b7b3b, 0xe324578b,
+0xc87928df, 0x9c1de566, 0x0ce2a17d, 0xaf2e13ce, 0x146a8659, 0x8727f6ae,
+0xe2df7d03, 0x1a8e4cb4, 0xfa590976, 0x13a7c236, 0xc07489d0, 0xbe905e17,
+0xafeb3d4b, 0x201e73f2, 0x5bdca12e, 0x3e15a852, 0xbcfc3271, 0x5d398410,
+0x6bfacc15, 0x011fc61f, 0x43e43fd7, 0x0640717c, 0x96bfb3ff, 0x158eac19,
+0x3b852e91, 0x74f9ceda, 0xcac71326, 0xfc0e312a, 0x20e8137b, 0xa1162611,
+0x239ac7fe, 0xb9d00f8a, 0xea0b5241, 0x019f0d25, 0xc5153264, 0xb48a5547,
+0xe54e339f, 0x17a6cca5, 0x5065af0d, 0x5ce648b9, 0xb457b342, 0xc1cb3f0e,
+0x28d03c8b, 0x5144ed7a, 0xdb80779f, 0x53ce1b87, 0xbc071147, 0xbfe46c11,
+0x7296785e, 0x83e4a63e, 0xc58982e9, 0x9538c5b9, 0xf14abaaa, 0xd915124c,
+0x73540cd6, 0xe333696b, 0x58f9e00a, 0xd4dad10f, 0xc0de1420, 0x355e2bdc,
+0xb2faa8fd, 0xbe6a12f1, 0x45d415cc, 0x47f5aed9, 0x4754e770, 0x2bb07385,
+0x41374352, 0xf80beb47, 0xef02f35c, 0xc9c1b86e, 0x94b5785b, 0xba33123f,
+0x7e39f0c9, 0x028a9286, 0x7d52c9f1, 0x06f04da6, 0xbc6a68d1, 0xfc70bace,
+0x95b6a129, 0xfff224bb, 0x701ef3fb, 0x3309286f, 0x544ae8c1, 0xdca62c4a,
+0xf8862ee2, 0xf9e3cd29, 0x2c07cce2, 0x8d93652a, 0xf47e4611, 0x4635f586,
+0x1c03e0f4, 0x819724c7, 0x96b2a3f0, 0xeeb1ad95, 0xff08e517, 0xbd4ba6ed,
+0x49ddb12d, 0x365734b5, 0x5edf7459, 0x2ee117a9, 0x067b9462, 0x549f6d5f,
+0xe1dd8309, 0x12bb8e0a, 0x4382fe0f, 0x57fce11b, 0x1bc1c809, 0xd2741876,
+0xcffd36b9, 0x45a64a67, 0xb77955d5, 0xb5825f24, 0x86eef2a8, 0xb66cac3b,
+0xfec661f8, 0x531d5963, 0xa0a2f109, 0xe1795b68, 0x6bfd44e0, 0x0849af41,
+0xf56d7d73, 0x3eab025f, 0xd109b015, 0xec24d23b, 0x8b3f603c, 0x648f421f,
+0xc833f32a, 0x53cbda14, 0xb9b3fee4, 0xcf9ac8ab, 0x300f0548, 0xfe0bc595,
+0x4f437a1a, 0xdacfae1f, 0x284e4a6d, 0x57815e28, 0x15d32b39, 0x68f85b97,
+0x18b21602, 0x941259ef, 0x68598d90, 0x6dfd81db, 0xa8fc9a55, 0x60fef2b3,
+0xcf07961b, 0x11b96588, 0xd9928a88, 0x2bbc72fd, 0x025f81b4, 0x44794d6d,
+0x6a7420aa, 0xa73d4d89, 0x25b523ff, 0x521dbea3, 0xdacde374, 0xbf41776f,
+0x9c1e3cb6, 0xb7b3499b, 0x8e154ed3, 0x1ac8f8c8, 0x4f2cf203, 0xa293c979,
+0x2792a2e1, 0x329ca1dd, 0x01e1aa4a, 0x01a78d3e, 0x100c74ed, 0xb0db13c7,
+0x99d53541, 0xd435a3b9, 0x898bd713, 0x069287bd, 0xe3175d24, 0xb09dc7fb,
+0x7340d7d3, 0x81369cc3, 0x8f16a7a8, 0x43963d57, 0x58e0f6a3, 0x39803b6c,
+0xa419a5ac, 0xf45fd6fa, 0xec389d5b, 0x636ce34f, 0x71405df6, 0xfc504724,
+0xa3a3b192, 0x19df1379, 0xaf028f42, 0x3e1838e8, 0x6b7a64a9, 0xaa90ffee,
+0x238ee398, 0x67eae4bd, 0x0a549a15, 0x125873d3, 0x00ece005, 0x9f7b2b8f,
+0x7571f73c, 0x98940ab4, 0x192ed328, 0x8fae66da, 0x9063b323, 0x2542f666,
+0x07ac7c8e, 0xce92857a, 0x3ded4b69, 0x50204cb4, 0x2a81cd17, 0xfd6ac536,
+0xafb46424, 0xe802147b, 0x30a37858, 0x90697235, 0x1606e767, 0x26c1048e,
+0x3a20df7b, 0xe127de97, 0x481abd6c, 0xc3a61d15, 0xbb2e929c, 0xd2ec0d65,
+0xc0e53693, 0x500c2e9a, 0x316d0f4b, 0x18354452, 0xb2dd7728, 0x5a874b3e,
+0x6dcf6f9c, 0x9d7c15ce, 0x1b25566d, 0xe13d6e7a, 0x9d9d06ae, 0x4d0e7d23,
+0x33100e24, 0x7c20e0f8, 0x2cfc95bf, 0xb8e702a0, 0x5e60215b, 0xa516093e,
+0x4cf3bed7, 0x4a0d6f56, 0xaf4901cc, 0xaeb7137e, 0x07d3c34d, 0x38641bc6,
+0xf7366ca3, 0x6e01bd0d, 0x86818282, 0xf0f8712a, 0x431f57ad, 0x46b832c3,
+0xec1893b8, 0xfa43c94f, 0x8b4c382a, 0x84cc0cec, 0x3f1016f3, 0xdbc7b88d,
+0xa3c34e52, 0x50691492, 0x8b10e709, 0x5c5db670, 0xcda5888b, 0xa1ef2917,
+0x1d4689ba, 0xc6470dcc, 0x7010373d, 0x4f27b426, 0x06b0de9d, 0xba83e005,
+0x97cd5aad, 0xc1d0f41a, 0x01bae35a, 0xb30c805e, 0x8d7982eb, 0xe1a7b51d,
+0x88a9f4d2, 0x7a6742a8, 0x0d9cb569, 0x06e9bc87, 0xf8c93b93, 0x89f3e80a,
+0x96b971de, 0xcd548ef1, 0x5fbeb12a, 0x39c798e3, 0x90183352, 0x9b2476a7,
+0x9437b0ae, 0x1f8045af, 0xf6ff8634, 0x019ec993, 0x0116e82e, 0x84ff34c8,
+0xf1be20f9, 0x455510e4, 0x61cdbafe, 0x6f560fc5, 0x39fd9d2d, 0x54e343b2,
+0x90448ad2, 0x24c4f180, 0x0fe4f0a7, 0x379ac6a0, 0x61b383f0, 0xb2228e0c,
+0x223bc044, 0xcfbc389d, 0xae6a40e7, 0x8d445c62, 0x045eaf6f, 0x07df18c6,
+0x4c057e85, 0x589113e6, 0xa3bf554e, 0x702b193b, 0x23af7d6a, 0xb98022ea,
+0x96c45ca1, 0xe3b8c170, 0xed7bb4ac, 0x8a2b8f47, 0x6b57db19, 0x9d24d458,
+0x054c66bf, 0xf2630696, 0x43524b13, 0xcd419b2a, 0xa248521e, 0xd7765fb3,
+0x99fbe34e, 0x3b9ebb4c, 0xf7bfeb6c, 0xaf12561e, 0xc90c7f0d, 0x067ed7e9,
+0x9102babe, 0xccc09846, 0x691fb1ed, 0x9b850f96, 0xaba5f22a, 0xcad31d58,
+0xb1f25b0b, 0xa55fe490, 0x41e62678, 0xba3e83dd, 0x09d8a9dc, 0xe2f76f22,
+0x6435706e, 0x81a0ef55, 0xb2790ad1, 0x793587d1, 0xebe10cc7, 0xde5fe03a,
+0x308948bb, 0x439ece5c, 0x19fbef01, 0xb0de1b53, 0x95f4f310, 0xe6a7c965,
+0x7b2e10a3, 0x13cf3df9, 0xed339445, 0x49a7728d, 0x4f226909, 0x63662893,
+0x101c682b, 0x4dfd9492, 0x8315be80, 0x4adf6517, 0x4d11d338, 0x4881c0e6,
+0x8e7886c4, 0x21c0c87a, 0x0d0fc0b7, 0xf0cc5a6d, 0x67c31f97, 0x06dd093c,
+0x02c01a49, 0x8d6c5b62, 0x158056ad, 0x704ed553, 0xdfeaaa13, 0x5e4b40a1,
+0xc3e6fab3, 0x9650ced9, 0xfe5c220c, 0x4bedb3bd, 0x07d0bd87, 0xef4a4fbd,
+0x70d9324e, 0xd003ac29, 0x9fbea4fd, 0x81779460, 0xee9f96ad, 0x9b3faab4,
+0xdcf35f55, 0x42e9fb1e, 0xa7398183, 0xc86a0769, 0x9b45aa5f, 0x32e84792,
+0x4214845c, 0x62abf543, 0xf07ae499, 0x7b556d8c, 0xa42ac1ba, 0x0eef6f2d,
+0x118ef8ac, 0x61597adc, 0xf7a00760, 0xb92c51c5, 0x049008b9, 0xc9da8a87,
+0xdc48c672, 0xc4b39254, 0x204c8db4, 0xb2ef5950, 0xac687c00, 0x158dc3cd,
+0x1c65e3b2, 0xf222864f, 0x44ee498a, 0x8921ce44, 0x0a8d112d, 0x7325da3c,
+0x6aec7bb9, 0x6ca31b05, 0xa5febba9, 0x166c4771, 0x920dcda3, 0x8b2e615e,
+0x3b392fa5, 0x9b177b4d, 0x8b0dfa1b, 0xdc16ab05, 0x4eeac34b, 0xf2d2fc8a,
+0x6f04a6fb, 0xa14bda63, 0x8f194775, 0xf248e4a2, 0x4e7dd6f2, 0x4401cb93,
+0xf693a2eb, 0xcb7e1637, 0x5d9abbd1, 0x613f63bd, 0xfb5fd56f, 0x70c5ea9b,
+0xcc6c7a6b, 0xefaee1bd, 0x5d396541, 0x99b12bf4, 0x20873a04, 0x53961f78,
+0x3c4f04ed, 0x4bd36330, 0xcdd5892e, 0x317de01b, 0xa3d53af7, 0xfb36a87b,
+0xe55b4361, 0xf838e923, 0x9d91a30e, 0xd7d8357b, 0x28e5173c, 0x00fad75f,
+0x39800da1, 0x894433ff, 0x1a62cdd7, 0x618b70b6, 0x60d770de, 0xbaed0285,
+0x7bd24f54, 0x96a33975, 0x6884bd60, 0xeb75cf39, 0x19dd058a, 0xd4b8f064,
+0x045178ca, 0xd9e3c1de, 0x91b494a6, 0x3316ac81, 0xb926c7d8, 0xc190f058,
+0xce70a42c, 0x54f3b3c4, 0x79b20d90, 0x75177ad7, 0x9f010f51, 0x889a4fd5,
+0x8fbbc8f0, 0xf1340040, 0xc22acb69, 0xe1473a0a, 0x52ddd7e3, 0x4cd54494,
+0x4eb7a9ed, 0xd35e6a65, 0x3236b14c, 0x89786899, 0x3dc59704, 0x51d674be,
+0xbcf13659, 0x6ef6266e, 0xf50ef497, 0x141354bc, 0xc8a42e76, 0x59b14233,
+0x8b6cad7d, 0x061d8efe, 0xaca42513, 0x161cee21, 0xfeac5471, 0x4c499d31,
+0xac2892cb, 0x9bacf4c5, 0xfff36588, 0x898f7a06, 0xad87be87, 0xebe7cb95,
+0xa52b5fdf, 0x9f1f3945, 0xad435317, 0x1b316bcc, 0x8e6c0fd3, 0x767981d6,
+0x1f86412b, 0x32b457f5, 0x30ab14db, 0x7120e8ae, 0x3972f57e, 0x2268af9d,
+0xf45e5738, 0xc48bd523, 0x76726cae, 0x10bcdaa2, 0x49dcbe96, 0x8e28c545,
+0x25fd1cc8, 0xc272f106, 0x1a34497e, 0x5f850d98, 0x92e324e4, 0x7e59f811,
+0x0bba7ff0, 0x86b3b23f, 0x29558cee, 0xcbd55b0d, 0x15c10dc2, 0xd6617dd5,
+0x41168898, 0x920132eb, 0xd10f51c6, 0xdc6eba51, 0xa07f89cc, 0xfcea8627,
+0x44a52afa, 0x03d4230b, 0xdbb998d8, 0xf30d2a06, 0x08d35aeb, 0x9597f9bc,
+0x720d2689, 0xf64314dd, 0xcadd04c5, 0x9cab9213, 0x22205cca, 0xd6cc196e,
+0xa820e3c8, 0x7c262f92, 0xaf481f6b, 0xcecc0fe6, 0x2ec0b33a, 0x20aebc0a,
+0xb5699b4b, 0xe0a559ba, 0xb9cd5224, 0x9f7ceb48, 0x35807e3c, 0x30be0794,
+0x05463d41, 0x0e10046f, 0x776e71d6, 0xc6bac22d, 0xfee7f911, 0x3bc76715,
+0x5024f666, 0x88a07482, 0x7dc55ed7, 0x3ee1d3b6, 0xec419af3, 0x4cd0e39f,
+0x988f3802, 0x23baa080, 0xef9ce568, 0xf46ff60f, 0x2efcb093, 0x23b5b12a,
+0xd5fb68c7, 0x58d45968, 0x6dce76cd, 0xf82c5a91, 0xfe6a2bf7, 0x0303b383,
+0xf1b1196a, 0xa18d6d69, 0xb0025e33, 0x3f51e6c7, 0x0263bac1, 0x87743b29,
+0x8f62b573, 0x7a80ad05, 0x95869387, 0x33e6a0f5, 0x50950449, 0x5da456ad,
+0xb2ab1617, 0x7b345bf9, 0x7f923fd1, 0x4ca337fe, 0x95ccf61b, 0xfd8221ca,
+0x0f19808f, 0x80029266, 0x915a3c47, 0x415045d7, 0x92f91b36, 0xd23576e5,
+0x74a5652e, 0xe3d7288c, 0xba8213f7, 0x3a0ecd53, 0xa34fa01a, 0x62b4db3b,
+0x3a96a793, 0xcef7b0da, 0xb64b8d17, 0x8716c3ed, 0xbd0ab055, 0xa6896492,
+0x1250e75b, 0xb2bce1a7, 0x4100ca8f, 0x2e05625f, 0x8eb5bc83, 0x3dac8027,
+0x62b3376d, 0xd6b2cc64, 0x4c9502dc, 0x6c1cfcbe, 0x380c1dce, 0x2b238ea8,
+0x7051d59b, 0x580b122d, 0x6174b6e5, 0x4353d726, 0xba555765, 0xb645490f,
+0x58427e5f, 0x06fc96e1, 0xae8f4528, 0xab0fee5e, 0x5de960b3, 0x63e79c19,
+0x8552eeb8, 0x9344db26, 0x3326d17c, 0xa4174885, 0x6fe45472, 0x7bd20a9e,
+0xa5e4aba0, 0x77b509af, 0x22f64240, 0x40bc5144, 0x57ca1b2d, 0xc1260090,
+0x008997aa, 0xe8029716, 0x6efb01f3, 0xc73e1366, 0x9de737ff, 0x1b5fabe8,
+0x957bb682, 0x5e27f5a5, 0x7777491a, 0x9b6fc6c1, 0xb0802205, 0x16e94d46,
+0x58079aee, 0x2b24c4c4, 0xfffb2e7b, 0x7c5560e1, 0xe1ad3009, 0x6a0c0027,
+0x09aea741, 0x9af375a6, 0x7106b7bf, 0x1cdcff69, 0xa007f9c2, 0x6f7729c0,
+0xeac4297f, 0x8622ede7, 0x89346bfa, 0xc24674b5, 0xd1eabf00, 0xfa6b2d39,
+0x04b0c153, 0x6adf49dc, 0x48ea0937, 0xae771ea6, 0xd4208255, 0x4052586d,
+0x9c6aa640, 0x9adbb652, 0x017d8661, 0xb20855f0, 0x1e2ed7b6, 0x70af7f25,
+0x354df0de, 0xf2fdfde7, 0x0844478c, 0x63993fb2, 0xc49e71ae, 0x1e15ae81,
+0x932ad241, 0xba83cf8f, 0xc7e227f3, 0xd1171a85, 0x63ea34a0, 0xe74f042b,
+0xd88173d9, 0xba7d8443, 0x9d47c6c4, 0xbe282a93, 0x6142fbac, 0x69307e44,
+0xbb86a7f1, 0x9fa3418f, 0x3a3eadce, 0x9804167b, 0x2d5eb47a, 0x5aa2372d,
+0x66eb8e1c, 0xf56da1fd, 0x3753eee9, 0xac78c709, 0x787ae648, 0x42d4fa7f,
+0x5a036af5, 0xfc6dfa54, 0x269f1195, 0xef1c698d, 0x2932b2d6, 0xba10b26c,
+0xf9b58396, 0x8f04a38a, 0x31c1264d, 0xf11c5ac6, 0x693d8ef6, 0x25b1d015,
+0x01c06e8f, 0x9c250a6f, 0x4b8fffcd, 0x1eff0743, 0x5a4974ec, 0x9d2d24c1,
+0x5562b2ae, 0x2a5b2deb, 0xd4a32596, 0x8ec43665, 0xde3ae654, 0x99e979d0,
+0xb6250c7c, 0x478a2891, 0xe1d6c035, 0x1dc453e8, 0xc7490c8d, 0xd4e85297,
+0x8fa94952, 0xc863cde8, 0x724dbbc8, 0x89ba9607, 0x0144ba1b, 0x3fed833c,
+0xd16278d4, 0x0080f227, 0xfbe241c5, 0x61892e7f, 0x503dced0, 0xa54d6ea5,
+0xcb5024a3, 0x591e11d1, 0xabeb85db, 0xc8314e58, 0xfb7792cb, 0xc70e5ada,
+0xcc07e739, 0xdb3ad4d6, 0x7a2d24e8, 0x2a0e216a, 0xa04aa774, 0x1762ac59,
+0xed53f5f9, 0x261a791e, 0x7a4102ae, 0x4650dcc9, 0xffa48ffa, 0x35a8f4e1,
+0x98e0f041, 0x3e3a9a83, 0xfb43c8e9, 0x7e0e6a1e, 0x06f718e5, 0x6812c114,
+0xaf21e4a1, 0x10ce04ca, 0x6e465a05, 0x250758f2, 0xfdf02f68, 0x426ec74c,
+0x51e44567, 0x17e637cc, 0x9da657e4, 0x39ecef6e, 0xd31c4423, 0x12bc06b4,
+0x35af82e0, 0x76379d09, 0x7fb7aa1e, 0x0b73e469, 0x7b3697a4, 0x6bbbca38,
+0x018d2e07, 0x8d74a290, 0x02e00314, 0xdc9d1f4f, 0x62cbdd73, 0xd1173c86,
+0x62c1d5f2, 0xdfb16100, 0x37cc28e8, 0x43954e64, 0xeaa699d9, 0x0cd0a997,
+0xa0f26045
+
+ea =
+17868
+
+eb =
+17868
+
+cab =
+2
+
+c =
+2
+
+c_neg =
+0
+
+k_pos =
+5952
+
+k_neg =
+5888
+
+ncb_pos =
+17952
+
+ncb_neg =
+17760
+
+r =
+0
+
+rv_index =
+0
+
+code_block_mode =
+0
+
+op_flags =
+RTE_BBDEV_TURBO_CRC_24B_ATTACH
+
+expected_status =
+OK
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data
new file mode 100644
index 00000000..9e17429d
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c3_k4800_r2_e14412_crc24b.data
@@ -0,0 +1,153 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c,
+0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13,
+0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79,
+0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f,
+0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3,
+0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd,
+0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6,
+0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30,
+0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24,
+0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b,
+0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c,
+0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f,
+0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5,
+0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008,
+0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986,
+0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3,
+0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700,
+0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4,
+0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae,
+0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477,
+0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615,
+0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890,
+0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666,
+0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530,
+0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0xb8
+
+output0 =
+0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c,
+0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13,
+0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79,
+0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f,
+0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3,
+0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd,
+0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6,
+0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30,
+0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24,
+0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b,
+0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c,
+0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f,
+0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5,
+0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008,
+0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986,
+0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3,
+0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700,
+0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4,
+0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae,
+0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477,
+0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615,
+0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890,
+0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666,
+0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530,
+0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x47ee26b8,
+0x94f624ac, 0xa3bd4876, 0xf7d4854e, 0x8871d433, 0x9321d942, 0x7b626be8,
+0x72c934b0, 0x3b7af8a4, 0x5102c29f, 0x710e4dbc, 0x99708292, 0x1458c4c1,
+0x61026bc5, 0xe776e388, 0x4a0222b3, 0x760e5aaf, 0x662f3583, 0x5ab1005b,
+0xe527ef70, 0x4170d611, 0x307bebc4, 0xfdd00caf, 0xbaae1044, 0xcab4d459,
+0x38281dcf, 0x90580c89, 0x49cf5986, 0xa27da769, 0xceced49b, 0x5ea37953,
+0x8a7e6c1c, 0x1e01b4e2, 0xe08026ae, 0x3754534a, 0x903b0ecf, 0x65f97a55,
+0x90798ed9, 0x9d1133bc, 0xe356a39f, 0xe47acbce, 0x01ccf326, 0x1954fd3d,
+0x240e69f8, 0x1da20bb4, 0xe1ab1684, 0x44c65d48, 0xd265e6c2, 0x51d4ef07,
+0x000970ef, 0xfeb939f4, 0x5dcc0132, 0x2bd27ae5, 0xba5dbd25, 0xa9d190e4,
+0x61556bec, 0x7fd6caba, 0x7fe312cb, 0xdd319413, 0x92a5dbbf, 0x17e61915,
+0x56067b67, 0x3dc348c0, 0x58c17fe0, 0xbe6bffcc, 0xd379026c, 0xc4174780,
+0xcce7f026, 0xb74b7eb0, 0xe4d5f625, 0x6bc16d3b, 0xff3e300e, 0x83f0d55a,
+0xf2e537df, 0x75c18f78, 0xa5458d1a, 0x47c778b0, 0x92eb8716, 0xcb5816fd,
+0xc2cc7079, 0xa7f1dc75, 0xed9e5ffa, 0xb7d6747f, 0xa2dc6907, 0x99b4d187,
+0x69f3138a, 0xbd285a1f, 0xb8ee18e0, 0x734b77a5, 0xc0700f69, 0x6c72a77a,
+0x76609ba9, 0xcfae9b73, 0x2cd329f0, 0x0d45aa12, 0x419fbcd7, 0x03e00f00,
+0x0effee99, 0x7f879eb4, 0x29a4c8df, 0x4432400a, 0x22b9cf55, 0xa1c4c645,
+0x200251b0, 0xff293906, 0xe11288e2, 0xde1e8ec7, 0x675752d1, 0x9630743e,
+0xd848c67e, 0x1eedfda9, 0x58b954fc, 0xa0dc7f1c, 0x0aea313e, 0x062b9449,
+0x0f17e57f, 0x96def6ec, 0x2ba45f14, 0xd9a5f8c5, 0x231483c8, 0x29b8783e,
+0xa6d24594, 0x1a62ffcd, 0x54c87a7f, 0x6fa9ca9a, 0xfff0f890, 0x51d6ae46,
+0xa96e82b0, 0x68cd9808, 0x56214df5, 0x2169defa, 0x72c1ecce, 0x2448e3c9,
+0x8eb2b3b3, 0x62c89c11, 0xbba20ee2, 0xea33f53a, 0x8b96a6b8, 0x9d33d551,
+0x363bd14e, 0x5fdd2b5f, 0xf8187546, 0xb692beab, 0x8df6b4c0, 0x753e2302,
+0xb90f7a37, 0x2b6bac6a, 0x13a07bc5, 0xb67f6d2b, 0x47b21569, 0xc2ab143f,
+0xf86ce30a, 0xde2dab70, 0x6f258860, 0x2878735c, 0x2aaeac20, 0xda80fb3e,
+0xe7a8ccf6, 0xbf011844, 0x40a610f8, 0x82f3fdf7, 0xadc4ec2c, 0xb8551030,
+0x76004380, 0xa384ff18, 0x080cf587, 0x6c4991d3, 0x2daea9aa, 0xa92d0c4f,
+0x4a9d117e, 0x2761f025, 0x96b40443, 0x57a82be4, 0x62374c44, 0x55dc64ae,
+0x28aa9f0c, 0x03a3b963, 0x41dbaa26, 0xb3c23735, 0x971cbd31, 0x939a9f80,
+0x76439fdc, 0xa9df79d6, 0x926ae3e2, 0x5ee75745, 0xf2396e52, 0xe18bd816,
+0x3f9834b9, 0x816a07bc, 0x8f873310, 0x45cf9b96, 0x0ce634dd, 0xe64a16d9,
+0x2733775a, 0x2b648c7e, 0xe600ee8e, 0xd99d8ae3, 0x10dadf2a, 0x904d3f87,
+0x3963e9e7, 0x47fcce89, 0xc256c898, 0x7db6cb66, 0xe1611a8b, 0xed81b10d,
+0x75eff974, 0x8a0a3d67, 0xa44311ff, 0x6f876783, 0x43dc93ce, 0x88616a33,
+0xa8706e8f, 0x33a2cbed, 0x3a6d20c3, 0x55175086, 0x39680945, 0x2d779a7b,
+0x0818a4ce, 0x55558918, 0xf7c08d35, 0x980a3038, 0x9cf068db, 0x3385d333,
+0xd81b33fb, 0x188018d5, 0x47c57bd3, 0x9ec2324f, 0x6901cd77, 0xc3bac44f,
+0x4d96aba8, 0x9094da5b, 0xa67a1353, 0x1fdfc4db, 0xa2fbeefa, 0xab4828e3,
+0x37d1db45, 0x0d33b3e9, 0x1ad72bb9, 0x7257bf9c, 0x2ec35167, 0xa4488b7f,
+0xf9dae588, 0x1038905a, 0x88ddf410, 0xaac11693, 0x24ac025d, 0x56cefbb5,
+0x6afe7f59, 0xc7f989e8, 0x15872570, 0x1bf16cdb, 0xfe9c93ce, 0x1fc9a076,
+0x85d37185, 0x1078cd31, 0xe1cd0327, 0x6d5315bc, 0x298cd836, 0xc8e21f06,
+0xe561c32d, 0x8ec404b4, 0x4d39bfbb, 0x24ede8c8, 0x451d6034, 0x3bafeea2,
+0x202f0ccf, 0x1fad37ce, 0xf04b5693, 0xeee57cd9, 0x5ef70007, 0x018e8a4f,
+0xfa61c9a9, 0x09989fcf, 0xe66b558b, 0x966efd48, 0x7525021d, 0xe7978b5e,
+0x7eb1d6dc, 0xa10c5e5b, 0xb7815e69, 0x7d486cfb, 0xcffdeb2a, 0x7375cb32,
+0x599008dc, 0xff91c796, 0x560ed4ad, 0x14e9a876, 0xfccf6a66, 0xa58be028,
+0xea9d408b, 0x3afc373b, 0xee008458, 0x19b6042e, 0x84806314, 0x431a4ba4,
+0x009ad6a1, 0xd7c62bf4, 0x1bebecba, 0x5c662f69, 0x83bfdea1, 0x45872a9a,
+0x00c9
+
+
+ea =
+14412
+
+eb =
+14412
+
+cab =
+4
+
+c =
+4
+
+c_neg =
+0
+
+k_pos =
+4800
+
+k_neg =
+4736
+
+ncb_pos =
+14496
+
+ncb_neg =
+14304
+
+r =
+2
+
+rv_index =
+0
+
+code_block_mode =
+0
+
+op_flags =
+RTE_BBDEV_TURBO_CRC_24B_ATTACH
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data b/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data
new file mode 100644
index 00000000..ff95a3f9
--- /dev/null
+++ b/app/test-bbdev/test_vectors/turbo_enc_c4_k4800_r2_e14412_crc24b.data
@@ -0,0 +1,252 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+op_type =
+RTE_BBDEV_OP_TURBO_ENC
+
+input0 =
+0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c,
+0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13,
+0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79,
+0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f,
+0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3,
+0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd,
+0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6,
+0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30,
+0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24,
+0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b,
+0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c,
+0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f,
+0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5,
+0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008,
+0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986,
+0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3,
+0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700,
+0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4,
+0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae,
+0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477,
+0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615,
+0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890,
+0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666,
+0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530,
+0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x9cca47b8,
+0xff47dda2, 0xa557841a, 0x7459fb6a, 0x93ec6495, 0x0ef7e6d2, 0xa8a6e6d4,
+0x46364f91, 0xd52c5776, 0x4dfd0d25, 0xd458c769, 0x24864930, 0xf7789991,
+0xfcc231f2, 0xc8e4cfd0, 0x50975038, 0x989398d1, 0x04504b14, 0x5e529570,
+0xf8c17202, 0xaf459a8a, 0xac2fecb2, 0x0eb3a176, 0xf6bac24f, 0x6fbddade,
+0x42456542, 0x83c10198, 0xbea4efaf, 0xff2f2966, 0x5edae82b, 0x09c27f13,
+0xa094c3b4, 0xc53b810e, 0x7f1cc39d, 0x25731c8a, 0x8d34b6ee, 0x5fb78b80,
+0x2980eef8, 0x7b76a9fb, 0x37bfacb9, 0x80f7a0ea, 0xd1e3e69d, 0xcda19b8b,
+0x9af26246, 0xf55495cf, 0x6f45ad1b, 0xfe2cb329, 0xc33cb7e6, 0x67bbde4a,
+0x32310c7b, 0x80e6b00a, 0xdd4004d2, 0x0e0eb8b6, 0x6f8d951f, 0xb196cb40,
+0x6dbe29dd, 0xa138a375, 0xd4f5b058, 0x69ea638f, 0x50438a7a, 0xe5530eb3,
+0x527cb218, 0xbfa67b9d, 0x7efb86df, 0x724ed65b, 0x38006782, 0x6a9924a4,
+0x749955c2, 0x5cb82627, 0xa74ceecb, 0x89eb103d, 0x89a12973, 0x660613c4,
+0x8bdf96ae, 0xf08afcce, 0xa4023081, 0x79c6786c, 0x8026bdf0, 0x621f29a1,
+0xa8d8d3ec, 0x4ee5d537, 0xda83c99f, 0x9a69d0a8, 0xae027f9a, 0x639d9cc9,
+0xe80e2be1, 0x89d78cfb, 0x862fabb7, 0xb248c138, 0x3fe35f29, 0xf8b74cc6,
+0x27741dcf, 0xf9236485, 0x05225155, 0x4ed40ee6, 0xb30e4a95, 0x7d890ee0,
+0x0a97eb0d, 0xe7877652, 0x3de6fa67, 0xa3a5961e, 0x1bdf2af5, 0x5a7dcbbe,
+0x1641f1af, 0x4db5845b, 0x735761f2, 0x32a1431e, 0xab1e00b1, 0x495ddeb7,
+0xb480f29c, 0x47d83714, 0x8ba04129, 0x8c96062e, 0x657e7f7f, 0x30dfba23,
+0xfd4ee1bc, 0x9ffe10b1, 0xc917d2db, 0x18f0bd8d, 0x4b6c35d7, 0x54896222,
+0x7ce0f6c6, 0xb7813f4a, 0x71c52b4b, 0xac65da3a, 0x9aef9b4e, 0x85f9657e,
+0x62ba1965, 0xa1e39267, 0x40f66fb2, 0x10433b94, 0x851df333, 0xf0c6aa26,
+0xe0e19368, 0x5a706636, 0xf5de1e8c, 0x5ebc735a, 0x0967ef39, 0xf76c1fb7,
+0xe53f7f16, 0x51f2c4eb, 0x047dda3d, 0x73518b4a, 0x3ad90dd3, 0xaee51fc1,
+0x78f01060, 0x98d1502c, 0x990d5499, 0x7a19453c, 0x104c
+
+output0 =
+0xb0fe86aa, 0xffbf2968, 0x4f55a631, 0xf2152dbc, 0x195a49a5, 0xa4aa7c6c,
+0x48a7be38, 0xd32c84f2, 0xb040da60, 0xb39eb07c, 0x3443aad6, 0x19f52e13,
+0xb2914341, 0x0d5dfdb3, 0x1ba8aec0, 0x62d0c7be, 0x1d55475c, 0x10f7dd79,
+0x967e5333, 0xc0d524c1, 0xdca8da87, 0x1146ecf7, 0xfa09a85c, 0x83be800f,
+0x11b814cc, 0x362a4704, 0x4f22e7ce, 0x9985a0e2, 0x9de4be72, 0x488a45b3,
+0x6996ade2, 0xdb18c09a, 0x487d62a3, 0x94f82d13, 0xa0743f83, 0x75d2f5dd,
+0x0e470346, 0x6e2ec2e5, 0x1a67fe5f, 0xc369330d, 0x860e8766, 0x42c110f6,
+0x804a9985, 0x55db02b4, 0xb899fc5c, 0x57811d20, 0xda4f1f9d, 0x2a2efc30,
+0x953e7487, 0x4b0c3065, 0x247d5d47, 0xdde382e7, 0x1d6d2ad7, 0x4358ac24,
+0x2d0fd97f, 0x1483821e, 0xf6c74ee5, 0x23b5c2f4, 0x95d4a5fc, 0x486f370b,
+0x8dbae43d, 0x248f17d5, 0x2df76099, 0xd19ec872, 0xf592aa0e, 0x8de04c3c,
+0x3ada94fb, 0x9dd001a3, 0x2b4db6d1, 0x791cf495, 0x57dc764b, 0x21789f1f,
+0x0d38adf5, 0xd8cefdc2, 0xed98f8bf, 0x024bc4ed, 0x4f9d3062, 0x332d27c5,
+0xde70ad4e, 0xd85872d2, 0x5d0e29c0, 0xa4fd1887, 0x3fa67ba1, 0x46b9b008,
+0x055bbf61, 0xf78e59f0, 0x287d2e3b, 0x5b473fe7, 0xd7ad6e04, 0x9cdf4986,
+0x830432ae, 0xffded1ba, 0x35c00f77, 0xcf0a6648, 0x196c03aa, 0xba4e2dd3,
+0xdaf70fe4, 0x883de2cd, 0xedc3b52e, 0x14e638c1, 0xe607103a, 0xc4cfb700,
+0x4ed9843d, 0xc442a0cb, 0xc0d3bba2, 0x36789ba8, 0x9c9b7a24, 0xb943b6a4,
+0x8f2c6879, 0x0fb16a0c, 0x2677bada, 0xd14f2f7e, 0x662ec398, 0x1e37b7ae,
+0x8ad0312d, 0x21adb6a1, 0x072a11fd, 0x7ae828d1, 0xb510655f, 0x09f7b477,
+0x10a175fc, 0xb7086f51, 0x24546af3, 0xfae81b50, 0xcb328832, 0x63693615,
+0xa37c3678, 0x15fb6364, 0xb5f75a78, 0xfa9aa5f6, 0x03804a0e, 0x1d245890,
+0x06d6cdcd, 0x7485a3c7, 0x35a008ae, 0x1abeaf1c, 0xd9c55966, 0x2b694666,
+0x5341f2df, 0xcb647280, 0xfb88d81e, 0xf99a44d0, 0xb0585534, 0x0877b530,
+0x752ab39a, 0xd493d8bc, 0xfaa6a57a, 0xccb7c008, 0x9c35994b, 0x47ee26b8,
+0x94f624ac, 0xa3bd4876, 0xf7d4854e, 0x8871d433, 0x9321d942, 0x7b626be8,
+0x72c934b0, 0x3b7af8a4, 0x5102c29f, 0x710e4dbc, 0x99708292, 0x1458c4c1,
+0x61026bc5, 0xe776e388, 0x4a0222b3, 0x760e5aaf, 0x662f3583, 0x5ab1005b,
+0xe527ef70, 0x4170d611, 0x307bebc4, 0xfdd00caf, 0xbaae1044, 0xcab4d459,
+0x38281dcf, 0x90580c89, 0x49cf5986, 0xa27da769, 0xceced49b, 0x5ea37953,
+0x8a7e6c1c, 0x1e01b4e2, 0xe08026ae, 0x3754534a, 0x903b0ecf, 0x65f97a55,
+0x90798ed9, 0x9d1133bc, 0xe356a39f, 0xe47acbce, 0x01ccf326, 0x1954fd3d,
+0x240e69f8, 0x1da20bb4, 0xe1ab1684, 0x44c65d48, 0xd265e6c2, 0x51d4ef07,
+0x000970ef, 0xfeb939f4, 0x5dcc0132, 0x2bd27ae5, 0xba5dbd25, 0xa9d190e4,
+0x61556bec, 0x7fd6caba, 0x7fe312cb, 0xdd319413, 0x92a5dbbf, 0x17e61915,
+0x56067b67, 0x3dc348c0, 0x58c17fe0, 0xbe6bffcc, 0xd379026c, 0xc4174780,
+0xcce7f026, 0xb74b7eb0, 0xe4d5f625, 0x6bc16d3b, 0xff3e300e, 0x83f0d55a,
+0xf2e537df, 0x75c18f78, 0xa5458d1a, 0x47c778b0, 0x92eb8716, 0xcb5816fd,
+0xc2cc7079, 0xa7f1dc75, 0xed9e5ffa, 0xb7d6747f, 0xa2dc6907, 0x99b4d187,
+0x69f3138a, 0xbd285a1f, 0xb8ee18e0, 0x734b77a5, 0xc0700f69, 0x6c72a77a,
+0x76609ba9, 0xcfae9b73, 0x2cd329f0, 0x0d45aa12, 0x419fbcd7, 0x03e00f00,
+0x0effee99, 0x7f879eb4, 0x29a4c8df, 0x4432400a, 0x22b9cf55, 0xa1c4c645,
+0x200251b0, 0xff293906, 0xe11288e2, 0xde1e8ec7, 0x675752d1, 0x9630743e,
+0xd848c67e, 0x1eedfda9, 0x58b954fc, 0xa0dc7f1c, 0x0aea313e, 0x062b9449,
+0x0f17e57f, 0x96def6ec, 0x2ba45f14, 0xd9a5f8c5, 0x231483c8, 0x29b8783e,
+0xa6d24594, 0x1a62ffcd, 0x54c87a7f, 0x6fa9ca9a, 0xfff0f890, 0x51d6ae46,
+0xa96e82b0, 0x68cd9808, 0x56214df5, 0x2169defa, 0x72c1ecce, 0x2448e3c9,
+0x8eb2b3b3, 0x62c89c11, 0xbba20ee2, 0xea33f53a, 0x8b96a6b8, 0x9d33d551,
+0x363bd14e, 0x5fdd2b5f, 0xf8187546, 0xb692beab, 0x8df6b4c0, 0x753e2302,
+0xb90f7a37, 0x2b6bac6a, 0x13a07bc5, 0xb67f6d2b, 0x47b21569, 0xc2ab143f,
+0xf86ce30a, 0xde2dab70, 0x6f258860, 0x2878735c, 0x2aaeac20, 0xda80fb3e,
+0xe7a8ccf6, 0xbf011844, 0x40a610f8, 0x82f3fdf7, 0xadc4ec2c, 0xb8551030,
+0x76004380, 0xa384ff18, 0x080cf587, 0x6c4991d3, 0x2daea9aa, 0xa92d0c4f,
+0x4a9d117e, 0x2761f025, 0x96b40443, 0x57a82be4, 0x62374c44, 0x55dc64ae,
+0x28aa9f0c, 0x03a3b963, 0x41dbaa26, 0xb3c23735, 0x971cbd31, 0x939a9f80,
+0x76439fdc, 0xa9df79d6, 0x926ae3e2, 0x5ee75745, 0xf2396e52, 0xe18bd816,
+0x3f9834b9, 0x816a07bc, 0x8f873310, 0x45cf9b96, 0x0ce634dd, 0xe64a16d9,
+0x2733775a, 0x2b648c7e, 0xe600ee8e, 0xd99d8ae3, 0x10dadf2a, 0x904d3f87,
+0x3963e9e7, 0x47fcce89, 0xc256c898, 0x7db6cb66, 0xe1611a8b, 0xed81b10d,
+0x75eff974, 0x8a0a3d67, 0xa44311ff, 0x6f876783, 0x43dc93ce, 0x88616a33,
+0xa8706e8f, 0x33a2cbed, 0x3a6d20c3, 0x55175086, 0x39680945, 0x2d779a7b,
+0x0818a4ce, 0x55558918, 0xf7c08d35, 0x980a3038, 0x9cf068db, 0x3385d333,
+0xd81b33fb, 0x188018d5, 0x47c57bd3, 0x9ec2324f, 0x6901cd77, 0xc3bac44f,
+0x4d96aba8, 0x9094da5b, 0xa67a1353, 0x1fdfc4db, 0xa2fbeefa, 0xab4828e3,
+0x37d1db45, 0x0d33b3e9, 0x1ad72bb9, 0x7257bf9c, 0x2ec35167, 0xa4488b7f,
+0xf9dae588, 0x1038905a, 0x88ddf410, 0xaac11693, 0x24ac025d, 0x56cefbb5,
+0x6afe7f59, 0xc7f989e8, 0x15872570, 0x1bf16cdb, 0xfe9c93ce, 0x1fc9a076,
+0x85d37185, 0x1078cd31, 0xe1cd0327, 0x6d5315bc, 0x298cd836, 0xc8e21f06,
+0xe561c32d, 0x8ec404b4, 0x4d39bfbb, 0x24ede8c8, 0x451d6034, 0x3bafeea2,
+0x202f0ccf, 0x1fad37ce, 0xf04b5693, 0xeee57cd9, 0x5ef70007, 0x018e8a4f,
+0xfa61c9a9, 0x09989fcf, 0xe66b558b, 0x966efd48, 0x7525021d, 0xe7978b5e,
+0x7eb1d6dc, 0xa10c5e5b, 0xb7815e69, 0x7d486cfb, 0xcffdeb2a, 0x7375cb32,
+0x599008dc, 0xff91c796, 0x560ed4ad, 0x14e9a876, 0xfccf6a66, 0xa58be028,
+0xea9d408b, 0x3afc373b, 0xee008458, 0x19b6042e, 0x84806314, 0x431a4ba4,
+0x009ad6a1, 0xd7c62bf4, 0x1bebecba, 0x5c662f69, 0x83bfdea1, 0x45872a9a,
+0xca4700c9, 0x47dda29c, 0x57841aff, 0x59fb6aa5, 0xec649574, 0xf7e6d293,
+0xa6e6d40e, 0x364f91a8, 0x2c577646, 0xfd0d25d5, 0x58c7694d, 0x864930d4,
+0x78999124, 0xc231f2f7, 0xe4cfd0fc, 0x975038c8, 0x9398d150, 0x504b1498,
+0x52957004, 0xc172025e, 0x459a8af8, 0x2fecb2af, 0xb3a176ac, 0xbac24f0e,
+0xbddadef6, 0x4565426f, 0xc1019842, 0xa4efaf83, 0x2f2966be, 0xdae82bff,
+0xc27f135e, 0x94c3b409, 0x3b810ea0, 0x1cc39dc5, 0x731c8a7f, 0x34b6ee25,
+0xb78b808d, 0x80eef85f, 0x76a9fb29, 0xbfacb97b, 0xf7a0ea37, 0xe3e69d80,
+0xa19b8bd1, 0xf26246cd, 0x5495cf9a, 0x45ad1bf5, 0x2cb3296f, 0x3cb7e6fe,
+0xbbde4ac3, 0x310c7b67, 0xe6b00a32, 0x4004d280, 0x0eb8b6dd, 0x8d951f0e,
+0x96cb406f, 0xbe29ddb1, 0x38a3756d, 0xf5b058a1, 0xea638fd4, 0x438a7a69,
+0x530eb350, 0x7cb218e5, 0xa67b9d52, 0xfb86dfbf, 0x4ed65b7e, 0x00678272,
+0x9924a438, 0x9955c26a, 0xb8262774, 0x4ceecb5c, 0xeb103da7, 0xa1297389,
+0x0613c489, 0xdf96ae66, 0x8afcce8b, 0x023081f0, 0xc6786ca4, 0x26bdf079,
+0x1f29a180, 0xd8d3ec62, 0xe5d537a8, 0x83c99f4e, 0x69d0a8da, 0x027f9a9a,
+0x9d9cc9ae, 0x0e2be163, 0xd78cfbe8, 0x2fabb789, 0x48c13886, 0xe35f29b2,
+0xb74cc63f, 0x741dcff8, 0x23648527, 0x225155f9, 0xd40ee605, 0x0e4a954e,
+0x890ee0b3, 0x97eb0d7d, 0x8776520a, 0xe6fa67e7, 0xa5961e3d, 0xdf2af5a3,
+0x7dcbbe1b, 0x41f1af5a, 0xb5845b16, 0x5761f24d, 0xa1431e73, 0x1e00b132,
+0x5ddeb7ab, 0x80f29c49, 0xd83714b4, 0xa0412947, 0x96062e8b, 0x7e7f7f8c,
+0xdfba2365, 0x4ee1bc30, 0xfe10b1fd, 0x17d2db9f, 0xf0bd8dc9, 0x6c35d718,
+0x8962224b, 0xe0f6c654, 0x813f4a7c, 0xc52b4bb7, 0x65da3a71, 0xef9b4eac,
+0xf9657e9a, 0xba196585, 0xe3926762, 0xf66fb2a1, 0x433b9440, 0x1df33310,
+0xc6aa2685, 0xe19368f0, 0x706636e0, 0xde1e8c5a, 0xbc735af5, 0x67ef395e,
+0x6c1fb709, 0x3f7f16f7, 0xf2c4ebe5, 0x7dda3d51, 0x518b4a04, 0xd90dd373,
+0xe51fc13a, 0xf01060ae, 0xd1502c78, 0x0d549998, 0x19453c99, 0x52104c7a,
+0xc477d443, 0xce6910c0, 0x0dc163b4, 0x1dc4dab2, 0x5c4dbc0c, 0x6145ced9,
+0xf02e295b, 0x60196b15, 0x2aa68b89, 0x9ff0dd3b, 0xe4b874ce, 0x9175ec6c,
+0x9b629234, 0x644ce81d, 0x359fac36, 0x5211675f, 0xd24c092f, 0x1de9385e,
+0x1352b7e5, 0x8d650e83, 0x7057d366, 0x43371dd2, 0x61678e6e, 0xd436279b,
+0x267562b6, 0xabaf1706, 0x814bd74e, 0x3269cbda, 0x0a34b3cd, 0xa74c3d1a,
+0xd3b098c8, 0x02030412, 0xd75d7207, 0x519d1b3d, 0x1958436b, 0x69ba4221,
+0x81b6b4cf, 0xb83234e4, 0x7e652d03, 0x63bcf36e, 0xefecb5f7, 0x60550e08,
+0x394963ce, 0xfd6f2b38, 0x1342c68b, 0xbc39f1ca, 0x21bdd863, 0x59ade0af,
+0x2d0c793a, 0xa74702d4, 0xc00885f9, 0xc73c27af, 0x6566ea9e, 0xa31e0f7c,
+0x499f1706, 0xe19617ad, 0x0e19900a, 0x0a8d3669, 0xcc482af9, 0x5eb35096,
+0x269b51c1, 0x80d1145b, 0x4be232ff, 0x3d31fc83, 0x89127a6c, 0x90af3379,
+0x7726d002, 0x35f15151, 0x8393c3b5, 0x27a9ada8, 0x25940510, 0x05c49bf1,
+0xc7c1b886, 0xdb00826f, 0xf658f61f, 0xe5d77d98, 0xa637b6fb, 0x2f515fa5,
+0xb1f80c38, 0xe082d248, 0x4220acd0, 0x06360060, 0xcf42c277, 0xf5972529,
+0xf7e274fb, 0xfe41cc28, 0xde661de0, 0xa157bd26, 0x8e1f4788, 0x35c4111a,
+0x11a7360e, 0x751188e8, 0x544d9fc3, 0x33d853cd, 0x754542f7, 0x05d9979b,
+0x73e59071, 0x4909bb7e, 0x64ae94be, 0x0eb4c8e9, 0xacb903fe, 0xc716288e,
+0x1e914aa8, 0x19d127bd, 0x913a6dd4, 0x6af354d9, 0x72c29a95, 0x2fa731a5,
+0x9f206402, 0xa44abe92, 0xeb090ab4, 0x85b7584c, 0xf27cd398, 0x3b828e38,
+0x0cb8dd56, 0x37657f3c, 0x78fa0f2a, 0x3df7a0cb, 0x2621740f, 0x4d92422f,
+0x334a8fa2, 0x124f947d, 0x31be3505, 0xf6a1e561, 0x5d0c4087, 0x62971b45,
+0x312472a7, 0x933af4e3, 0x45c28196, 0x8ddd4f00, 0xa5ec20fb, 0x9acde751,
+0x23ac64e8, 0xbea00461, 0xf9d65eac, 0xe21db306, 0xcacb4f76, 0x83950ca3,
+0x66069329, 0xf7d72838, 0x5d13a747, 0x5c9ca583, 0x1d9d225e, 0xb2c705fe,
+0xd0fac625, 0x0a73b38e, 0xec8d6692, 0x0bb64587, 0xf147c00c, 0xcdd6020c,
+0x1608cede, 0xb531a423, 0x1c8b1b08, 0xdd74f03e, 0x6d3076fb, 0x2dcaaa44,
+0xe869582b, 0x0f19cde9, 0x44d15927, 0xee751f17, 0x655f24c4, 0x7508164e,
+0xab414b0d, 0x381d3525, 0x7a18cb74, 0xc91e435a, 0xb3397aa0, 0xa5567595,
+0x1da36e52, 0xb43ba598, 0x3ef0d4fd, 0x11d348c4, 0xfcb4cbfa, 0xa3dee1a7,
+0x8c6be341, 0xefafff59, 0xeaa95a58, 0x01ba74bb, 0x5b1d3193, 0x2ac942bb,
+0xbb5816ca, 0xe79720dd, 0x1683bbad, 0xea2f3992, 0xa89cdd39, 0x3a389386,
+0xc54d671f, 0xe8564365, 0x373196e0, 0xc08acfd2, 0x32f556b2, 0x15340220,
+0x817d0d1b, 0x4b9afaa3, 0xd3bbf932, 0x6c9ddf75, 0x0fca9bea, 0x2cd2d913,
+0x026e3647, 0x6e769b35, 0xdab34e6e, 0xc9b75a74, 0xfdc90304, 0xc6442959,
+0xa8f44fb4, 0x73fb26de, 0x833d2d60, 0x8cf7a461, 0xdc5c4bf9, 0x1bce371f,
+0xd1d6b743, 0xc1124cae, 0xf4d33161, 0x696956a5, 0xa5ce9c72, 0x5f84e109,
+0xa0dae0ff, 0xdfdf169e, 0xb734307c, 0x25843b5d, 0x0b710ae4, 0xafd25b2b,
+0xc013b89b, 0x5246e064, 0xeb28ab92, 0x4f92747c, 0x2f3c0c8b, 0x3268720d,
+0xef638533, 0xd0fc40ab, 0x5d29c943, 0x3fd9b311, 0x6833bf43, 0xa188ad03,
+0xacba3ad2, 0x9696f4da, 0x07b1b2fa, 0xb8f925f6, 0x082573f4, 0x506b5c0e,
+0xe290e707, 0x80933f06, 0x38dcad25, 0x276ab82f, 0x788f3a2c, 0x4b0e14bc,
+0xc1b38b43, 0x962a0efe, 0x77f19522, 0xd5bd951b, 0x90415ed2, 0x67a6a806,
+0x82d0503f, 0x814e505f, 0x6448341a, 0x2c88ba72, 0x1f783411, 0x5dcfc5d9,
+0x15cea1e2, 0x351ebdaa, 0xff083e5e, 0x173297d6, 0xbdadb9f2, 0xe82ebe50,
+0x33fff936, 0xaed0e402, 0xcd08e297, 0xeefee410, 0x023aedc0, 0x493fdd4c,
+0xd4454937, 0xa4c21893, 0xf9740292, 0x14308130, 0x2aa05568, 0xbd88a714,
+0x3f264976, 0x62203300, 0x5d9aa0bf, 0x3b9be4b0, 0xcb1c6dff, 0xe7f5ded8,
+0xb6ce0ec7, 0xdc1f094b, 0x0f93747c, 0x1768e49a, 0x1d7ebe93, 0x25d53887,
+0x144a1fe2, 0x32bc280c, 0x1d0b7884, 0x106b8928, 0x5aa38780, 0x87ca0d93,
+0x11a81f71, 0xe0957877, 0xb21fb0f7, 0x1e5a7ac0, 0x09db53a1, 0x5210dd59,
+0xa0566364
+
+ea =
+14412
+
+eb =
+14412
+
+cab =
+4
+
+c =
+4
+
+c_neg =
+0
+
+k_pos =
+4800
+
+k_neg =
+4736
+
+ncb_pos =
+14496
+
+ncb_neg =
+14304
+
+r =
+2
+
+rv_index =
+0
+
+code_block_mode =
+0
+
+op_flags =
+RTE_BBDEV_TURBO_CRC_24B_ATTACH
+
+expected_status =
+OK \ No newline at end of file
diff --git a/app/test-bbdev/turbo_dec_default.data b/app/test-bbdev/turbo_dec_default.data
new file mode 120000
index 00000000..371cbc69
--- /dev/null
+++ b/app/test-bbdev/turbo_dec_default.data
@@ -0,0 +1 @@
+test_vectors/turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data \ No newline at end of file
diff --git a/app/test-bbdev/turbo_enc_default.data b/app/test-bbdev/turbo_enc_default.data
new file mode 120000
index 00000000..5587f9cc
--- /dev/null
+++ b/app/test-bbdev/turbo_enc_default.data
@@ -0,0 +1 @@
+test_vectors/turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data \ No newline at end of file
diff --git a/app/test-crypto-perf/Makefile b/app/test-crypto-perf/Makefile
index 3935aec4..78135f38 100644
--- a/app/test-crypto-perf/Makefile
+++ b/app/test-crypto-perf/Makefile
@@ -7,6 +7,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
APP = dpdk-test-crypto-perf
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
# all source are stored in SRCS-y
SRCS-y := main.c
diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c
index 8f320099..44808f50 100644
--- a/app/test-crypto-perf/cperf_ops.c
+++ b/app/test-crypto-perf/cperf_ops.c
@@ -514,6 +514,7 @@ cperf_create_session(struct rte_mempool *sess_mp,
auth_xform.next = NULL;
auth_xform.auth.algo = options->auth_algo;
auth_xform.auth.op = options->auth_op;
+ auth_xform.auth.iv.offset = iv_offset;
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
@@ -568,6 +569,8 @@ cperf_create_session(struct rte_mempool *sess_mp,
auth_xform.next = NULL;
auth_xform.auth.algo = options->auth_algo;
auth_xform.auth.op = options->auth_op;
+ auth_xform.auth.iv.offset = iv_offset +
+ cipher_xform.cipher.iv.length;
/* auth different than null */
if (options->auth_algo != RTE_CRYPTO_AUTH_NULL) {
diff --git a/app/test-crypto-perf/cperf_options.h b/app/test-crypto-perf/cperf_options.h
index 54a3ad5c..f5bf03c8 100644
--- a/app/test-crypto-perf/cperf_options.h
+++ b/app/test-crypto-perf/cperf_options.h
@@ -1,3 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
#ifndef _CPERF_OPTIONS_
#define _CPERF_OPTIONS_
@@ -73,6 +76,8 @@ struct cperf_options {
uint32_t pool_sz;
uint32_t total_ops;
+ uint32_t headroom_sz;
+ uint32_t tailroom_sz;
uint32_t segment_sz;
uint32_t test_buffer_size;
uint32_t *imix_buffer_sizes;
diff --git a/app/test-crypto-perf/cperf_test_common.c b/app/test-crypto-perf/cperf_test_common.c
index 21cb1c22..e803dc10 100644
--- a/app/test-crypto-perf/cperf_test_common.c
+++ b/app/test-crypto-perf/cperf_test_common.c
@@ -3,6 +3,7 @@
*/
#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
#include "cperf_test_common.h"
@@ -10,12 +11,15 @@ struct obj_params {
uint32_t src_buf_offset;
uint32_t dst_buf_offset;
uint16_t segment_sz;
+ uint16_t headroom_sz;
+ uint16_t data_len;
uint16_t segments_nb;
};
static void
fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
- void *obj, uint32_t mbuf_offset, uint16_t segment_sz)
+ void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
+ uint16_t headroom, uint16_t data_len)
{
uint32_t mbuf_hdr_size = sizeof(struct rte_mbuf);
@@ -25,10 +29,10 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
m->buf_iova = rte_mempool_virt2iova(obj) +
mbuf_offset + mbuf_hdr_size;
m->buf_len = segment_sz;
- m->data_len = segment_sz;
+ m->data_len = data_len;
- /* No headroom needed for the buffer */
- m->data_off = 0;
+ /* Use headroom specified for the buffer */
+ m->data_off = headroom;
/* init some constant fields */
m->pool = mp;
@@ -41,7 +45,7 @@ fill_single_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
static void
fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
void *obj, uint32_t mbuf_offset, uint16_t segment_sz,
- uint16_t segments_nb)
+ uint16_t headroom, uint16_t data_len, uint16_t segments_nb)
{
uint16_t mbuf_hdr_size = sizeof(struct rte_mbuf);
uint16_t remaining_segments = segments_nb;
@@ -56,10 +60,10 @@ fill_multi_seg_mbuf(struct rte_mbuf *m, struct rte_mempool *mp,
m->buf_iova = next_seg_phys_addr;
next_seg_phys_addr += mbuf_hdr_size + segment_sz;
m->buf_len = segment_sz;
- m->data_len = segment_sz;
+ m->data_len = data_len;
- /* No headroom needed for the buffer */
- m->data_off = 0;
+ /* Use headroom specified for the buffer */
+ m->data_off = headroom;
/* init some constant fields */
m->pool = mp;
@@ -91,17 +95,19 @@ mempool_obj_init(struct rte_mempool *mp,
op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
- op->phys_addr = rte_mem_virt2phy(obj);
+ op->phys_addr = rte_mem_virt2iova(obj);
op->mempool = mp;
/* Set source buffer */
op->sym->m_src = m;
if (params->segments_nb == 1)
fill_single_seg_mbuf(m, mp, obj, params->src_buf_offset,
- params->segment_sz);
+ params->segment_sz, params->headroom_sz,
+ params->data_len);
else
fill_multi_seg_mbuf(m, mp, obj, params->src_buf_offset,
- params->segment_sz, params->segments_nb);
+ params->segment_sz, params->headroom_sz,
+ params->data_len, params->segments_nb);
/* Set destination buffer */
@@ -109,7 +115,8 @@ mempool_obj_init(struct rte_mempool *mp,
m = (struct rte_mbuf *) ((uint8_t *) obj +
params->dst_buf_offset);
fill_single_seg_mbuf(m, mp, obj, params->dst_buf_offset,
- params->segment_sz);
+ params->segment_sz, params->headroom_sz,
+ params->data_len);
op->sym->m_dst = m;
} else
op->sym->m_dst = NULL;
@@ -124,6 +131,7 @@ cperf_alloc_common_memory(const struct cperf_options *options,
uint32_t *dst_buf_offset,
struct rte_mempool **pool)
{
+ const char *mp_ops_name;
char pool_name[32] = "";
int ret;
@@ -170,6 +178,11 @@ cperf_alloc_common_memory(const struct cperf_options *options,
struct obj_params params = {
.segment_sz = options->segment_sz,
+ .headroom_sz = options->headroom_sz,
+ /* Data len = segment size - (headroom + tailroom) */
+ .data_len = options->segment_sz -
+ options->headroom_sz -
+ options->tailroom_sz,
.segments_nb = segments_nb,
.src_buf_offset = crypto_op_total_size_padded,
.dst_buf_offset = 0
@@ -193,8 +206,10 @@ cperf_alloc_common_memory(const struct cperf_options *options,
return -1;
}
+ mp_ops_name = rte_mbuf_best_mempool_ops();
+
ret = rte_mempool_set_ops_byname(*pool,
- RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
+ mp_ops_name, NULL);
if (ret != 0) {
RTE_LOG(ERR, USER1,
"Error setting mempool handler for device %u\n",
diff --git a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
index 8f761608..c8d16db6 100644
--- a/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
+++ b/app/test-crypto-perf/cperf_test_pmd_cyclecount.c
@@ -145,7 +145,7 @@ pmd_cyclecount_bench_ops(struct pmd_cyclecount_state *state, uint32_t cur_op,
for (cur_iter_op = 0; cur_iter_op < iter_ops_needed;
cur_iter_op += test_burst_size) {
- uint32_t burst_size = RTE_MIN(state->opts->total_ops - cur_op,
+ uint32_t burst_size = RTE_MIN(iter_ops_needed - cur_iter_op,
test_burst_size);
struct rte_crypto_op **ops = &state->ctx->ops[cur_iter_op];
diff --git a/app/test-crypto-perf/cperf_test_vector_parsing.c b/app/test-crypto-perf/cperf_test_vector_parsing.c
index 26321d00..92932a23 100644
--- a/app/test-crypto-perf/cperf_test_vector_parsing.c
+++ b/app/test-crypto-perf/cperf_test_vector_parsing.c
@@ -506,8 +506,7 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
if (entry == NULL)
return -1;
- memset(entry, 0, strlen(line) + 1);
- strncpy(entry, line, strlen(line));
+ strcpy(entry, line);
/* check if entry ends with , or = */
if (entry[strlen(entry) - 1] == ','
@@ -524,8 +523,8 @@ parse_file(struct cperf_test_vector *vector, struct cperf_options *opts)
if (entry_extended == NULL)
goto err;
entry = entry_extended;
-
- strncat(entry, line, strlen(line));
+ /* entry has been allocated accordingly */
+ strcpy(&entry[strlen(entry)], line);
if (entry[strlen(entry) - 1] != ',')
break;
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 019d8359..5c7dadb6 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -21,8 +21,6 @@
#include "cperf_test_verify.h"
#include "cperf_test_pmd_cyclecount.h"
-#define NUM_SESSIONS 2048
-#define SESS_MEMPOOL_CACHE_SIZE 64
const char *cperf_test_type_strs[] = {
[CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
@@ -67,6 +65,7 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
struct rte_mempool *session_pool_socket[])
{
uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
+ uint32_t sessions_needed = 0;
unsigned int i, j;
int ret;
@@ -80,18 +79,24 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
nb_lcores = rte_lcore_count() - 1;
- if (enabled_cdev_count > nb_lcores) {
- printf("Number of capable crypto devices (%d) "
- "has to be less or equal to number of slave "
- "cores (%d)\n", enabled_cdev_count, nb_lcores);
+ if (nb_lcores < 1) {
+ RTE_LOG(ERR, USER1,
+ "Number of enabled cores need to be higher than 1\n");
return -EINVAL;
}
+ /*
+ * Use less number of devices,
+ * if there are more available than cores.
+ */
+ if (enabled_cdev_count > nb_lcores)
+ enabled_cdev_count = nb_lcores;
+
/* Create a mempool shared by all the devices */
uint32_t max_sess_size = 0, sess_size;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- sess_size = rte_cryptodev_get_private_session_size(cdev_id);
+ sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
if (sess_size > max_sess_size)
max_sess_size = sess_size;
}
@@ -143,17 +148,62 @@ cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
.nb_descriptors = opts->nb_descriptors
};
+ /**
+ * Device info specifies the min headroom and tailroom
+ * requirement for the crypto PMD. This need to be honoured
+ * by the application, while creating mbuf.
+ */
+ if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
+ /* Update headroom */
+ opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
+ }
+ if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
+ /* Update tailroom */
+ opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
+ }
+
+ /* Update segment size to include headroom & tailroom */
+ opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
+
+ uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
+ /*
+ * Two sessions objects are required for each session
+ * (one for the header, one for the private data)
+ */
+ if (!strcmp((const char *)opts->device_type,
+ "crypto_scheduler")) {
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ uint32_t nb_slaves =
+ rte_cryptodev_scheduler_slaves_get(cdev_id,
+ NULL);
+
+ sessions_needed = 2 * enabled_cdev_count *
+ opts->nb_qps * nb_slaves;
+#endif
+ } else
+ sessions_needed = 2 * enabled_cdev_count *
+ opts->nb_qps;
+
+ /*
+ * A single session is required per queue pair
+ * in each device
+ */
+ if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
+ RTE_LOG(ERR, USER1,
+ "Device does not support at least "
+ "%u sessions\n", opts->nb_qps);
+ return -ENOTSUP;
+ }
if (session_pool_socket[socket_id] == NULL) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
"sess_mp_%u", socket_id);
-
sess_mp = rte_mempool_create(mp_name,
- NUM_SESSIONS,
+ sessions_needed,
max_sess_size,
- SESS_MEMPOOL_CACHE_SIZE,
+ 0,
0, NULL, NULL, NULL,
NULL, socket_id,
0);
diff --git a/app/test-crypto-perf/meson.build b/app/test-crypto-perf/meson.build
new file mode 100644
index 00000000..eacd7a0f
--- /dev/null
+++ b/app/test-crypto-perf/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('cperf_ops.c',
+ 'cperf_options_parsing.c',
+ 'cperf_test_common.c',
+ 'cperf_test_latency.c',
+ 'cperf_test_pmd_cyclecount.c',
+ 'cperf_test_throughput.c',
+ 'cperf_test_vector_parsing.c',
+ 'cperf_test_vectors.c',
+ 'cperf_test_verify.c',
+ 'main.c')
+deps = ['cryptodev']
diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c
index 57bb9457..a8d304ba 100644
--- a/app/test-eventdev/evt_main.c
+++ b/app/test-eventdev/evt_main.c
@@ -20,29 +20,41 @@ struct evt_test *test;
static void
signal_handler(int signum)
{
- if (signum == SIGINT || signum == SIGTERM) {
+ int i;
+ static uint8_t once;
+
+ if ((signum == SIGINT || signum == SIGTERM) && !once) {
+ once = true;
printf("\nSignal %d received, preparing to exit...\n",
signum);
- /* request all lcores to exit from the main loop */
- *(int *)test->test_priv = true;
- rte_wmb();
- rte_eal_mp_wait_lcore();
+ if (test != NULL) {
+ /* request all lcores to exit from the main loop */
+ *(int *)test->test_priv = true;
+ rte_wmb();
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
- if (test->ops.test_result)
- test->ops.test_result(test, &opt);
+ rte_eal_mp_wait_lcore();
- if (test->ops.eventdev_destroy)
- test->ops.eventdev_destroy(test, &opt);
+ if (test->ops.test_result)
+ test->ops.test_result(test, &opt);
- if (test->ops.ethdev_destroy)
- test->ops.ethdev_destroy(test, &opt);
+ if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i)
+ rte_eth_dev_close(i);
+ }
- if (test->ops.mempool_destroy)
- test->ops.mempool_destroy(test, &opt);
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
- if (test->ops.test_destroy)
- test->ops.test_destroy(test, &opt);
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+ }
/* exit with the expected status */
signal(signum, SIG_DFL);
diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
index 9683b222..cfa43a16 100644
--- a/app/test-eventdev/evt_options.c
+++ b/app/test-eventdev/evt_options.c
@@ -27,6 +27,11 @@ evt_options_default(struct evt_options *opt)
opt->pool_sz = 16 * 1024;
opt->wkr_deq_dep = 16;
opt->nb_pkts = (1ULL << 26); /* do ~64M packets */
+ opt->nb_timers = 1E8;
+ opt->nb_timer_adptrs = 1;
+ opt->timer_tick_nsec = 1E3; /* 1000ns ~ 1us */
+ opt->max_tmo_nsec = 1E5; /* 100000ns ~100us */
+ opt->expiry_nsec = 1E4; /* 10000ns ~10us */
opt->prod_type = EVT_PROD_TYPE_SYNT;
}
@@ -87,6 +92,22 @@ evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
}
static int
+evt_parse_timer_prod_type(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR;
+ return 0;
+}
+
+static int
+evt_parse_timer_prod_type_burst(struct evt_options *opt,
+ const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR;
+ opt->timdev_use_burst = 1;
+ return 0;
+}
+
+static int
evt_parse_test_name(struct evt_options *opt, const char *arg)
{
snprintf(opt->test_name, EVT_TEST_NAME_MAX_LEN, "%s", arg);
@@ -120,6 +141,56 @@ evt_parse_nb_pkts(struct evt_options *opt, const char *arg)
}
static int
+evt_parse_nb_timers(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->nb_timers), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_timer_tick_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->timer_tick_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_max_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->max_tmo_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_expiry_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->expiry_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg);
+
+ return ret;
+}
+
+static int
evt_parse_pool_sz(struct evt_options *opt, const char *arg)
{
opt->pool_sz = atoi(arg);
@@ -169,7 +240,17 @@ usage(char *program)
"\t--worker_deq_depth : dequeue depth of the worker\n"
"\t--fwd_latency : perform fwd_latency measurement\n"
"\t--queue_priority : enable queue priority\n"
- "\t--prod_type_ethdev : use ethernet device as producer\n."
+ "\t--prod_type_ethdev : use ethernet device as producer.\n"
+ "\t--prod_type_timerdev : use event timer device as producer.\n"
+ "\t expity_nsec would be the timeout\n"
+ "\t in ns.\n"
+ "\t--prod_type_timerdev_burst : use timer device as producer\n"
+ "\t burst mode.\n"
+ "\t--nb_timers : number of timers to arm.\n"
+ "\t--nb_timer_adptrs : number of timer adapters to use.\n"
+ "\t--timer_tick_nsec : timer tick interval in ns.\n"
+ "\t--max_tmo_nsec : max timeout interval in ns.\n"
+ "\t--expiry_nsec : event timer expiry ns.\n"
);
printf("available tests:\n");
evt_test_dump_names();
@@ -217,22 +298,29 @@ evt_parse_sched_type_list(struct evt_options *opt, const char *arg)
}
static struct option lgopts[] = {
- { EVT_NB_FLOWS, 1, 0, 0 },
- { EVT_DEVICE, 1, 0, 0 },
- { EVT_VERBOSE, 1, 0, 0 },
- { EVT_TEST, 1, 0, 0 },
- { EVT_PROD_LCORES, 1, 0, 0 },
- { EVT_WORK_LCORES, 1, 0, 0 },
- { EVT_SOCKET_ID, 1, 0, 0 },
- { EVT_POOL_SZ, 1, 0, 0 },
- { EVT_NB_PKTS, 1, 0, 0 },
- { EVT_WKR_DEQ_DEP, 1, 0, 0 },
- { EVT_SCHED_TYPE_LIST, 1, 0, 0 },
- { EVT_FWD_LATENCY, 0, 0, 0 },
- { EVT_QUEUE_PRIORITY, 0, 0, 0 },
- { EVT_PROD_ETHDEV, 0, 0, 0 },
- { EVT_HELP, 0, 0, 0 },
- { NULL, 0, 0, 0 }
+ { EVT_NB_FLOWS, 1, 0, 0 },
+ { EVT_DEVICE, 1, 0, 0 },
+ { EVT_VERBOSE, 1, 0, 0 },
+ { EVT_TEST, 1, 0, 0 },
+ { EVT_PROD_LCORES, 1, 0, 0 },
+ { EVT_WORK_LCORES, 1, 0, 0 },
+ { EVT_SOCKET_ID, 1, 0, 0 },
+ { EVT_POOL_SZ, 1, 0, 0 },
+ { EVT_NB_PKTS, 1, 0, 0 },
+ { EVT_WKR_DEQ_DEP, 1, 0, 0 },
+ { EVT_SCHED_TYPE_LIST, 1, 0, 0 },
+ { EVT_FWD_LATENCY, 0, 0, 0 },
+ { EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_PROD_ETHDEV, 0, 0, 0 },
+ { EVT_PROD_TIMERDEV, 0, 0, 0 },
+ { EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
+ { EVT_NB_TIMERS, 1, 0, 0 },
+ { EVT_NB_TIMER_ADPTRS, 1, 0, 0 },
+ { EVT_TIMER_TICK_NSEC, 1, 0, 0 },
+ { EVT_MAX_TMO_NSEC, 1, 0, 0 },
+ { EVT_EXPIRY_NSEC, 1, 0, 0 },
+ { EVT_HELP, 0, 0, 0 },
+ { NULL, 0, 0, 0 }
};
static int
@@ -255,11 +343,18 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
+ { EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
+ { EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
+ { EVT_NB_TIMERS, evt_parse_nb_timers},
+ { EVT_NB_TIMER_ADPTRS, evt_parse_nb_timer_adptrs},
+ { EVT_TIMER_TICK_NSEC, evt_parse_timer_tick_nsec},
+ { EVT_MAX_TMO_NSEC, evt_parse_max_tmo_nsec},
+ { EVT_EXPIRY_NSEC, evt_parse_expiry_nsec},
};
for (i = 0; i < RTE_DIM(parsermap); i++) {
if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
- strlen(parsermap[i].lgopt_name)) == 0)
+ strlen(lgopts[opt_idx].name)) == 0)
return parsermap[i].parser_fn(opt, optarg);
}
@@ -305,6 +400,7 @@ evt_options_dump(struct evt_options *opt)
evt_dump("pool_sz", "%d", opt->pool_sz);
evt_dump("master lcore", "%d", rte_get_master_lcore());
evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
+ evt_dump("nb_timers", "%"PRIu64, opt->nb_timers);
evt_dump_begin("available lcores");
RTE_LCORE_FOREACH(lcore_id)
printf("%d ", lcore_id);
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
index 46d12222..f3de48a1 100644
--- a/app/test-eventdev/evt_options.h
+++ b/app/test-eventdev/evt_options.h
@@ -9,6 +9,7 @@
#include <stdbool.h>
#include <rte_common.h>
+#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_lcore.h>
@@ -31,12 +32,20 @@
#define EVT_FWD_LATENCY ("fwd_latency")
#define EVT_QUEUE_PRIORITY ("queue_priority")
#define EVT_PROD_ETHDEV ("prod_type_ethdev")
+#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
+#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
+#define EVT_NB_TIMERS ("nb_timers")
+#define EVT_NB_TIMER_ADPTRS ("nb_timer_adptrs")
+#define EVT_TIMER_TICK_NSEC ("timer_tick_nsec")
+#define EVT_MAX_TMO_NSEC ("max_tmo_nsec")
+#define EVT_EXPIRY_NSEC ("expiry_nsec")
#define EVT_HELP ("help")
enum evt_prod_type {
EVT_PROD_TYPE_NONE,
EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
EVT_PROD_TYPE_MAX,
};
@@ -52,11 +61,19 @@ struct evt_options {
int nb_stages;
int verbose_level;
uint64_t nb_pkts;
+ uint8_t nb_timer_adptrs;
+ uint64_t nb_timers;
+ uint64_t timer_tick_nsec;
+ uint64_t optm_timer_tick_nsec;
+ uint64_t max_tmo_nsec;
+ uint64_t expiry_nsec;
uint16_t wkr_deq_dep;
uint8_t dev_id;
uint32_t fwd_latency:1;
uint32_t q_priority:1;
enum evt_prod_type prod_type;
+ uint8_t timdev_use_burst;
+ uint8_t timdev_cnt;
};
void evt_options_default(struct evt_options *opt);
@@ -262,6 +279,24 @@ evt_dump_producer_type(struct evt_options *opt)
case EVT_PROD_TYPE_ETH_RX_ADPTR:
snprintf(name, EVT_PROD_MAX_NAME_LEN,
"Ethdev Rx Adapter producers");
+ evt_dump("nb_ethdev", "%d", rte_eth_dev_count_avail());
+ break;
+ case EVT_PROD_TYPE_EVENT_TIMER_ADPTR:
+ if (opt->timdev_use_burst)
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Event timer adapter burst mode producer");
+ else
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Event timer adapter producer");
+ evt_dump("nb_timer_adapters", "%d", opt->nb_timer_adptrs);
+ evt_dump("max_tmo_nsec", "%"PRIu64"", opt->max_tmo_nsec);
+ evt_dump("expiry_nsec", "%"PRIu64"", opt->expiry_nsec);
+ if (opt->optm_timer_tick_nsec)
+ evt_dump("optm_timer_tick_nsec", "%"PRIu64"",
+ opt->optm_timer_tick_nsec);
+ else
+ evt_dump("timer_tick_nsec", "%"PRIu64"",
+ opt->timer_tick_nsec);
break;
}
evt_dump("prod_type", "%s", name);
diff --git a/app/test-eventdev/evt_test.h b/app/test-eventdev/evt_test.h
index 7477a325..f07d2c33 100644
--- a/app/test-eventdev/evt_test.h
+++ b/app/test-eventdev/evt_test.h
@@ -77,8 +77,7 @@ void evt_test_dump_names(void);
#define EVT_TEST_REGISTER(nm) \
static struct evt_test_entry _evt_test_entry_ ##nm; \
-RTE_INIT(evt_test_ ##nm); \
-static void evt_test_ ##nm(void) \
+RTE_INIT(evt_test_ ##nm) \
{ \
_evt_test_entry_ ##nm.test.name = RTE_STR(nm);\
memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \
diff --git a/app/test-eventdev/meson.build b/app/test-eventdev/meson.build
index 7c373c87..a81dcd13 100644
--- a/app/test-eventdev/meson.build
+++ b/app/test-eventdev/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
+allow_experimental_apis = true
sources = files('evt_main.c',
'evt_options.c',
'evt_test.c',
@@ -11,19 +12,4 @@ sources = files('evt_main.c',
'test_perf_common.c',
'test_perf_atq.c',
'test_perf_queue.c')
-
-dep_objs = [get_variable(get_option('default_library') + '_rte_eventdev')]
-dep_objs += cc.find_library('execinfo', required: false) # BSD only
-
-link_libs = []
-if get_option('default_library') == 'static'
- link_libs = dpdk_drivers
-endif
-
-executable('dpdk-test-eventdev',
- sources,
- c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'],
- link_whole: link_libs,
- dependencies: dep_objs,
- install_rpath: join_paths(get_option('prefix'), driver_install_path),
- install: true)
+deps += 'eventdev'
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index c57fbbfa..35debcfd 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -151,10 +151,14 @@ order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
- ret = evt_service_setup(opt->dev_id);
- if (ret) {
- evt_err("No service lcore found to run event dev.");
- return ret;
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
}
ret = rte_event_dev_start(opt->dev_id);
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index f603a023..17f7b984 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -164,10 +164,14 @@ order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
if (ret)
return ret;
- ret = evt_service_setup(opt->dev_id);
- if (ret) {
- evt_err("No service lcore found to run event dev.");
- return ret;
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
}
ret = rte_event_dev_start(opt->dev_id);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index b36b22a7..9715a2ce 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -11,7 +11,7 @@ atq_nb_event_queues(struct evt_options *opt)
{
/* nb_queues = number of producers */
return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
- rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores);
+ rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
}
static inline __attribute__((always_inline)) void
@@ -43,15 +43,12 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
while (t->done == false) {
uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
- if (enable_fwd_latency)
- rte_prefetch0(ev.event_ptr);
-
if (!event) {
rte_pause();
continue;
}
- if (enable_fwd_latency)
+ if (enable_fwd_latency && !prod_timer_type)
/* first stage in pipeline, mark ts to compute fwd latency */
atq_mark_fwd_latency(&ev);
@@ -90,7 +87,7 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
}
for (i = 0; i < nb_rx; i++) {
- if (enable_fwd_latency) {
+ if (enable_fwd_latency && !prod_timer_type) {
rte_prefetch0(ev[i+1].event_ptr);
/* first stage in pipeline.
* mark time stamp to compute fwd latency
@@ -163,7 +160,8 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
struct rte_event_dev_info dev_info;
nb_ports = evt_nr_active_lcores(opt->wlcores);
- nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+ nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
evt_nr_active_lcores(opt->plcores);
nb_queues = atq_nb_event_queues(opt);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 59fa0a49..d33cb2cd 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -72,6 +72,128 @@ perf_producer(void *arg)
return 0;
}
+static inline int
+perf_event_timer_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m;
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get(pool, (void **)&m) < 0)
+ continue;
+
+ m->tim = tim;
+ m->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m->tim.ev.event_ptr = m;
+ m->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m->timestamp;
+ count++;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(), (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000));
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ rte_event_timer_arm_tmo_tick_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)m,
+ tim.timeout_ticks,
+ BURST_SIZE);
+ arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(), (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000));
+ return 0;
+}
+
static int
perf_producer_wrapper(void *arg)
{
@@ -80,6 +202,12 @@ perf_producer_wrapper(void *arg)
/* Launch the producer function only in case of synthetic producer. */
if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
return perf_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ !t->opt->timdev_use_burst)
+ return perf_event_timer_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ t->opt->timdev_use_burst)
+ return perf_event_timer_producer_burst(arg);
return 0;
}
@@ -146,8 +274,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
port_idx++;
}
- const uint64_t total_pkts = opt->nb_pkts *
- evt_nr_active_lcores(opt->plcores);
+ const uint64_t total_pkts = t->outstand_pkts;
uint64_t dead_lock_cycles = rte_get_timer_cycles();
int64_t dead_lock_remaining = total_pkts;
@@ -189,7 +316,9 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
if (remaining <= 0) {
t->result = EVT_TEST_SUCCESS;
- if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
t->done = true;
rte_smp_wmb();
break;
@@ -226,7 +355,7 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
queue_conf.ev.sched_type = opt->sched_type_list[0];
- for (prod = 0; prod < rte_eth_dev_count(); prod++) {
+ RTE_ETH_FOREACH_DEV(prod) {
uint32_t cap;
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
@@ -283,6 +412,65 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
return ret;
}
+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+ int i;
+ int ret;
+ struct rte_event_timer_adapter_info adapter_info;
+ struct rte_event_timer_adapter *wl;
+ uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+ uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+ if (nb_producers == 1)
+ flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+ for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = t->opt->dev_id,
+ .timer_adapter_id = i,
+ .timer_tick_ns = t->opt->timer_tick_nsec,
+ .max_tmo_ns = t->opt->max_tmo_nsec,
+ .nb_timers = 2 * 1024 * 1024,
+ .flags = flags,
+ };
+
+ wl = rte_event_timer_adapter_create(&config);
+ if (wl == NULL) {
+ evt_err("failed to create event timer ring %d", i);
+ return rte_errno;
+ }
+
+ memset(&adapter_info, 0,
+ sizeof(struct rte_event_timer_adapter_info));
+ rte_event_timer_adapter_get_info(wl, &adapter_info);
+ t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
+
+ if (!(adapter_info.caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id;
+
+ rte_event_timer_adapter_service_id_get(wl,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for timer adapter\n");
+ return ret;
+ }
+ rte_service_runstate_set(service_id, 1);
+ }
+
+ ret = rte_event_timer_adapter_start(wl);
+ if (ret) {
+ evt_err("failed to Start event timer adapter %d", i);
+ return ret;
+ }
+ t->timer_adptr[i] = wl;
+ }
+ return 0;
+}
+
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
uint8_t stride, uint8_t nb_queues,
@@ -326,6 +514,18 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
if (ret)
return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->queue_id = prod * stride;
+ p->t = t;
+ prod++;
+ }
+
+ ret = perf_event_timer_adapter_setup(t);
+ if (ret)
+ return ret;
} else {
prod = 0;
for ( ; port < perf_nb_event_ports(opt); port++) {
@@ -415,10 +615,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
}
/* Fixups */
- if (opt->nb_stages == 1 && opt->fwd_latency) {
+ if ((opt->nb_stages == 1 &&
+ opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+ opt->fwd_latency) {
evt_info("fwd_latency is valid when nb_stages > 1, disabling");
opt->fwd_latency = 0;
}
+
if (opt->fwd_latency && !opt->q_priority) {
evt_info("enabled queue priority for latency measurement");
opt->q_priority = 1;
@@ -447,8 +650,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
void
perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- RTE_SET_USED(test);
+ int i;
+ struct test_perf *t = evt_test_priv(test);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (i = 0; i < opt->nb_timer_adptrs; i++)
+ rte_event_timer_adapter_stop(t->timer_adptr[i]);
+ }
rte_event_dev_stop(opt->dev_id);
rte_event_dev_close(opt->dev_id);
}
@@ -465,20 +673,14 @@ perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
int
perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
struct test_perf *t = evt_test_priv(test);
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .hw_vlan_strip = 0,
- .hw_vlan_extend = 0,
- .jumbo_frame = 0,
- .hw_strip_crc = 1,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
@@ -488,19 +690,33 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
},
};
- if (opt->prod_type == EVT_PROD_TYPE_SYNT)
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
return 0;
- if (!rte_eth_dev_count()) {
+ if (!rte_eth_dev_count_avail()) {
evt_err("No ethernet ports found.");
return -ENODEV;
}
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ rte_eth_dev_info_get(i, &dev_info);
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
- if (rte_eth_dev_configure(i, 1, 1,
- &port_conf)
- < 0) {
+ if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
evt_err("Failed to configure eth port [%d]", i);
return -EINVAL;
}
@@ -527,14 +743,13 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
RTE_SET_USED(test);
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
rte_eth_dev_stop(i);
- rte_eth_dev_close(i);
}
}
}
@@ -544,7 +759,8 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
{
struct test_perf *t = evt_test_priv(test);
- if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
t->pool = rte_mempool_create(test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
sizeof(struct perf_elt), /* element size*/
@@ -594,10 +810,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt)
struct test_perf *t = evt_test_priv(test);
- t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->outstand_pkts = opt->nb_timers *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_timers;
+ } else {
+ t->outstand_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_pkts;
+ }
+
t->nb_workers = evt_nr_active_lcores(opt->wlcores);
t->done = false;
- t->nb_pkts = opt->nb_pkts;
t->nb_flows = opt->nb_flows;
t->result = EVT_TEST_FAILED;
t->opt = opt;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 9ad99733..d8fbee6d 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -13,6 +13,7 @@
#include <rte_ethdev.h>
#include <rte_eventdev.h>
#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_timer_adapter.h>
#include <rte_lcore.h>
#include <rte_malloc.h>
#include <rte_mempool.h>
@@ -39,6 +40,7 @@ struct prod_data {
struct test_perf *t;
} __rte_cache_aligned;
+
struct test_perf {
/* Don't change the offset of "done". Signal handler use this memory
* to terminate all lcores work.
@@ -54,10 +56,18 @@ struct test_perf {
struct worker_data worker[EVT_MAX_PORTS];
struct evt_options *opt;
uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+ struct rte_event_timer_adapter *timer_adptr[
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
} __rte_cache_aligned;
struct perf_elt {
- uint64_t timestamp;
+ union {
+ struct rte_event_timer tim;
+ struct {
+ char pad[offsetof(struct rte_event_timer, user_meta)];
+ uint64_t timestamp;
+ };
+ };
} __rte_cache_aligned;
#define BURST_SIZE 16
@@ -68,6 +78,8 @@ struct perf_elt {
struct evt_options *opt = t->opt;\
const uint8_t dev = w->dev_id;\
const uint8_t port = w->port_id;\
+ const uint8_t prod_timer_type = \
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
uint8_t *const sched_type_list = &t->sched_type_list[0];\
struct rte_mempool *const pool = t->pool;\
const uint8_t nb_stages = t->opt->nb_stages;\
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index db8f2f3e..04ce9419 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -11,7 +11,7 @@ perf_queue_nb_event_queues(struct evt_options *opt)
{
/* nb_queues = number of producers * number of stages */
uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
- rte_eth_dev_count() : evt_nr_active_lcores(opt->plcores);
+ rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
return nb_prod * opt->nb_stages;
}
@@ -49,7 +49,7 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
rte_pause();
continue;
}
- if (enable_fwd_latency)
+ if (enable_fwd_latency && !prod_timer_type)
/* first q in pipeline, mark timestamp to compute fwd latency */
mark_fwd_latency(&ev, nb_stages);
@@ -88,7 +88,7 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
}
for (i = 0; i < nb_rx; i++) {
- if (enable_fwd_latency) {
+ if (enable_fwd_latency && !prod_timer_type) {
rte_prefetch0(ev[i+1].event_ptr);
/* first queue in pipeline.
* mark time stamp to compute fwd latency
@@ -161,7 +161,8 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
struct rte_event_dev_info dev_info;
nb_ports = evt_nr_active_lcores(opt->wlcores);
- nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+ nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
evt_nr_active_lcores(opt->plcores);
nb_queues = perf_queue_nb_event_queues(opt);
diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index dd718977..26dc79f9 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -12,7 +12,7 @@ pipeline_atq_nb_event_queues(struct evt_options *opt)
{
RTE_SET_USED(opt);
- return rte_eth_dev_count();
+ return rte_eth_dev_count_avail();
}
static int
@@ -324,7 +324,7 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
uint8_t nb_worker_queues = 0;
nb_ports = evt_nr_active_lcores(opt->wlcores);
- nb_queues = rte_eth_dev_count();
+ nb_queues = rte_eth_dev_count_avail();
/* One extra port and queueu for Tx service */
if (t->mt_unsafe) {
diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c
index 6cad9357..a54068df 100644
--- a/app/test-eventdev/test_pipeline_common.c
+++ b/app/test-eventdev/test_pipeline_common.c
@@ -166,7 +166,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
*/
lcores = 2;
- if (!rte_eth_dev_count()) {
+ if (!rte_eth_dev_count_avail()) {
evt_err("test needs minimum 1 ethernet dev");
return -1;
}
@@ -213,7 +213,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
int
pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
uint8_t nb_queues = 1;
uint8_t mt_state = 0;
struct test_pipeline *t = evt_test_priv(test);
@@ -223,7 +223,6 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -234,23 +233,34 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
};
RTE_SET_USED(opt);
- if (!rte_eth_dev_count()) {
+ if (!rte_eth_dev_count_avail()) {
evt_err("No ethernet ports found.\n");
return -ENODEV;
}
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
- memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
rte_eth_dev_info_get(i, &dev_info);
mt_state = !(dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MT_LOCKFREE);
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
if (rte_eth_dev_configure(i, nb_queues, nb_queues,
- &port_conf)
+ &local_port_conf)
< 0) {
evt_err("Failed to configure eth port [%d]\n", i);
return -EINVAL;
@@ -337,7 +347,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
queue_conf.ev.sched_type = opt->sched_type_list[0];
- for (prod = 0; prod < rte_eth_dev_count(); prod++) {
+ RTE_ETH_FOREACH_DEV(prod) {
uint32_t cap;
ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
@@ -419,7 +429,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt,
tx->dev_id = opt->dev_id;
tx->queue_id = tx_queue_id;
tx->port_id = tx_port_id;
- tx->nb_ethports = rte_eth_dev_count();
+ tx->nb_ethports = rte_eth_dev_count_avail();
tx->t = t;
/* Register Tx service */
@@ -453,7 +463,7 @@ pipeline_event_tx_service_setup(struct evt_test *test, struct evt_options *opt,
void
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
- int i;
+ uint16_t i;
RTE_SET_USED(test);
RTE_SET_USED(opt);
struct test_pipeline *t = evt_test_priv(test);
@@ -464,10 +474,9 @@ pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
rte_service_component_unregister(t->tx_service.service_id);
}
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_event_eth_rx_adapter_stop(i);
rte_eth_dev_stop(i);
- rte_eth_dev_close(i);
}
}
diff --git a/app/test-eventdev/test_pipeline_queue.c b/app/test-eventdev/test_pipeline_queue.c
index 02fc27cf..ca5f4578 100644
--- a/app/test-eventdev/test_pipeline_queue.c
+++ b/app/test-eventdev/test_pipeline_queue.c
@@ -10,7 +10,7 @@
static __rte_always_inline int
pipeline_queue_nb_event_queues(struct evt_options *opt)
{
- uint16_t eth_count = rte_eth_dev_count();
+ uint16_t eth_count = rte_eth_dev_count_avail();
return (eth_count * opt->nb_stages) + eth_count;
}
@@ -333,7 +333,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
uint8_t nb_worker_queues = 0;
nb_ports = evt_nr_active_lcores(opt->wlcores);
- nb_queues = rte_eth_dev_count() * (nb_stages);
+ nb_queues = rte_eth_dev_count_avail() * (nb_stages);
/* Extra port for Tx service. */
if (t->mt_unsafe) {
@@ -341,7 +341,7 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
nb_ports++;
nb_queues++;
} else
- nb_queues += rte_eth_dev_count();
+ nb_queues += rte_eth_dev_count_avail();
rte_event_dev_info_get(opt->dev_id, &info);
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
index ed588ab6..2b4d604b 100644
--- a/app/test-pmd/Makefile
+++ b/app/test-pmd/Makefile
@@ -13,6 +13,7 @@ APP = testpmd
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-deprecated-declarations
#
# all source are stored in SRCS-y
@@ -33,9 +34,10 @@ SRCS-y += txonly.c
SRCS-y += csumonly.c
SRCS-y += icmpecho.c
SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c
-ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC)$(CONFIG_RTE_LIBRTE_SCHED),yy)
-SRCS-y += tm.c
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y)
+SRCS-y += softnicfwd.c
endif
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
@@ -44,8 +46,10 @@ ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y)
LDLIBS += -lrte_pmd_bond
endif
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS)$(CONFIG_RTE_LIBRTE_DPAA_PMD),yy)
LDLIBS += -lrte_pmd_dpaa
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
endif
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
diff --git a/app/test-pmd/bpf_cmd.c b/app/test-pmd/bpf_cmd.c
new file mode 100644
index 00000000..830bfc13
--- /dev/null
+++ b/app/test-pmd/bpf_cmd.c
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_bpf_ethdev.h>
+
+#include <cmdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "testpmd.h"
+
+static const struct rte_bpf_xsym bpf_xsym[] = {
+ {
+ .name = RTE_STR(stdout),
+ .type = RTE_BPF_XTYPE_VAR,
+ .var = {
+ .val = &stdout,
+ .desc = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(stdout),
+ },
+ },
+ },
+ {
+ .name = RTE_STR(rte_pktmbuf_dump),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = {
+ .val = (void *)rte_pktmbuf_dump,
+ .nb_args = 3,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_RAW,
+ .size = sizeof(uintptr_t),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR_MBUF,
+ .size = sizeof(struct rte_mbuf),
+ },
+ [2] = {
+ .type = RTE_BPF_ARG_RAW,
+ .size = sizeof(uint32_t),
+ },
+ },
+ },
+ },
+};
+
+/* *** load BPF program *** */
+struct cmd_bpf_ld_result {
+ cmdline_fixed_string_t bpf;
+ cmdline_fixed_string_t dir;
+ uint8_t port;
+ uint16_t queue;
+ cmdline_fixed_string_t op;
+ cmdline_fixed_string_t flags;
+ cmdline_fixed_string_t prm;
+};
+
+static void
+bpf_parse_flags(const char *str, struct rte_bpf_arg *arg, uint32_t *flags)
+{
+ uint32_t i, v;
+
+ *flags = RTE_BPF_ETH_F_NONE;
+ arg->type = RTE_BPF_ARG_PTR;
+ arg->size = mbuf_data_size;
+
+ for (i = 0; str[i] != 0; i++) {
+ v = toupper(str[i]);
+ if (v == 'J')
+ *flags |= RTE_BPF_ETH_F_JIT;
+ else if (v == 'M') {
+ arg->type = RTE_BPF_ARG_PTR_MBUF;
+ arg->size = sizeof(struct rte_mbuf);
+ arg->buf_size = mbuf_data_size;
+ } else if (v == '-')
+ continue;
+ else
+ printf("unknown flag: \'%c\'", v);
+ }
+}
+
+static void cmd_operate_bpf_ld_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int32_t rc;
+ uint32_t flags;
+ struct cmd_bpf_ld_result *res;
+ struct rte_bpf_prm prm;
+ const char *fname, *sname;
+
+ res = parsed_result;
+ memset(&prm, 0, sizeof(prm));
+ prm.xsym = bpf_xsym;
+ prm.nb_xsym = RTE_DIM(bpf_xsym);
+
+ bpf_parse_flags(res->flags, &prm.prog_arg, &flags);
+ fname = res->prm;
+ sname = ".text";
+
+ if (strcmp(res->dir, "rx") == 0) {
+ rc = rte_bpf_eth_rx_elf_load(res->port, res->queue, &prm,
+ fname, sname, flags);
+ printf("%d:%s\n", rc, strerror(-rc));
+ } else if (strcmp(res->dir, "tx") == 0) {
+ rc = rte_bpf_eth_tx_elf_load(res->port, res->queue, &prm,
+ fname, sname, flags);
+ printf("%d:%s\n", rc, strerror(-rc));
+ } else
+ printf("invalid value: %s\n", res->dir);
+}
+
+cmdline_parse_token_string_t cmd_load_bpf_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ bpf, "bpf-load");
+cmdline_parse_token_string_t cmd_load_bpf_dir =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ dir, "rx#tx");
+cmdline_parse_token_num_t cmd_load_bpf_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, port, UINT8);
+cmdline_parse_token_num_t cmd_load_bpf_queue =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, queue, UINT16);
+cmdline_parse_token_string_t cmd_load_bpf_flags =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ flags, NULL);
+cmdline_parse_token_string_t cmd_load_bpf_prm =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result,
+ prm, NULL);
+
+cmdline_parse_inst_t cmd_operate_bpf_ld_parse = {
+ .f = cmd_operate_bpf_ld_parsed,
+ .data = NULL,
+ .help_str = "bpf-load rx|tx <port> <queue> <J|M|B> <file_name>",
+ .tokens = {
+ (void *)&cmd_load_bpf_start,
+ (void *)&cmd_load_bpf_dir,
+ (void *)&cmd_load_bpf_port,
+ (void *)&cmd_load_bpf_queue,
+ (void *)&cmd_load_bpf_flags,
+ (void *)&cmd_load_bpf_prm,
+ NULL,
+ },
+};
+
+/* *** unload BPF program *** */
+struct cmd_bpf_unld_result {
+ cmdline_fixed_string_t bpf;
+ cmdline_fixed_string_t dir;
+ uint8_t port;
+ uint16_t queue;
+};
+
+static void cmd_operate_bpf_unld_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_bpf_unld_result *res;
+
+ res = parsed_result;
+
+ if (strcmp(res->dir, "rx") == 0)
+ rte_bpf_eth_rx_unload(res->port, res->queue);
+ else if (strcmp(res->dir, "tx") == 0)
+ rte_bpf_eth_tx_unload(res->port, res->queue);
+ else
+ printf("invalid value: %s\n", res->dir);
+}
+
+cmdline_parse_token_string_t cmd_unload_bpf_start =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result,
+ bpf, "bpf-unload");
+cmdline_parse_token_string_t cmd_unload_bpf_dir =
+ TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result,
+ dir, "rx#tx");
+cmdline_parse_token_num_t cmd_unload_bpf_port =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, port, UINT8);
+cmdline_parse_token_num_t cmd_unload_bpf_queue =
+ TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, queue, UINT16);
+
+cmdline_parse_inst_t cmd_operate_bpf_unld_parse = {
+ .f = cmd_operate_bpf_unld_parsed,
+ .data = NULL,
+ .help_str = "bpf-unload rx|tx <port> <queue>",
+ .tokens = {
+ (void *)&cmd_unload_bpf_start,
+ (void *)&cmd_unload_bpf_dir,
+ (void *)&cmd_unload_bpf_port,
+ (void *)&cmd_unload_bpf_queue,
+ NULL,
+ },
+};
diff --git a/app/test-pmd/bpf_cmd.h b/app/test-pmd/bpf_cmd.h
new file mode 100644
index 00000000..5ee4c9f7
--- /dev/null
+++ b/app/test-pmd/bpf_cmd.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _BPF_CMD_H_
+#define _BPF_CMD_H_
+
+#ifdef RTE_LIBRTE_BPF
+
+ /* BPF CLI */
+extern cmdline_parse_inst_t cmd_operate_bpf_ld_parse;
+extern cmdline_parse_inst_t cmd_operate_bpf_unld_parse;
+
+#endif /* RTE_LIBRTE_BPF */
+
+#endif /* _BPF_CMD_H_ */
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index d1dc1de6..589121d6 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -60,7 +60,7 @@
#include <rte_eth_bond.h>
#include <rte_eth_bond_8023ad.h>
#endif
-#ifdef RTE_LIBRTE_DPAA_PMD
+#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD
#include <rte_pmd_dpaa.h>
#endif
#ifdef RTE_LIBRTE_IXGBE_PMD
@@ -75,6 +75,7 @@
#include "testpmd.h"
#include "cmdline_mtr.h"
#include "cmdline_tm.h"
+#include "bpf_cmd.h"
static struct cmdline *testpmd_cl;
@@ -771,9 +772,36 @@ static void cmd_help_long_parsed(void *parsed_result,
" (priority) (weight)\n"
" Set port tm node parent.\n\n"
+ "suspend port tm node (port_id) (node_id)"
+ " Suspend tm node.\n\n"
+
+ "resume port tm node (port_id) (node_id)"
+ " Resume tm node.\n\n"
+
"port tm hierarchy commit (port_id) (clean_on_fail)\n"
" Commit tm hierarchy.\n\n"
+ "vxlan ip-version (ipv4|ipv6) vni (vni) udp-src"
+ " (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst"
+ " (ip-dst) eth-src (eth-src) eth-dst (eth-dst)\n"
+ " Configure the VXLAN encapsulation for flows.\n\n"
+
+ "vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni)"
+ " udp-src (udp-src) udp-dst (udp-dst) ip-src (ip-src)"
+ " ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src)"
+ " eth-dst (eth-dst)\n"
+ " Configure the VXLAN encapsulation for flows.\n\n"
+
+ "nvgre ip-version (ipv4|ipv6) tni (tni) ip-src"
+ " (ip-src) ip-dst (ip-dst) eth-src (eth-src) eth-dst"
+ " (eth-dst)\n"
+ " Configure the NVGRE encapsulation for flows.\n\n"
+
+ "nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni)"
+ " ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci)"
+ " eth-src (eth-src) eth-dst (eth-dst)\n"
+ " Configure the NVGRE encapsulation for flows.\n\n"
+
, list_pkt_forwarding_modes()
);
}
@@ -806,6 +834,9 @@ static void cmd_help_long_parsed(void *parsed_result,
" duplex (half|full|auto)\n"
" Set speed and duplex for all ports or port_id\n\n"
+ "port config (port_id|all) loopback (mode)\n"
+ " Set loopback mode for all ports or port_id\n\n"
+
"port config all (rxq|txq|rxd|txd) (value)\n"
" Set number for rxq/txq/rxd/txd.\n\n"
@@ -818,8 +849,8 @@ static void cmd_help_long_parsed(void *parsed_result,
" Set crc-strip/scatter/rx-checksum/hardware-vlan/drop_en"
" for ports.\n\n"
- "port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|"
- "geneve|nvgre|none|<flowtype_id>)\n"
+ "port config all rss (all|default|ip|tcp|udp|sctp|"
+ "ether|port|vxlan|geneve|nvgre|none|<flowtype_id>)\n"
" Set the RSS mode.\n\n"
"port config port-id rss reta (hash,queue)[,(hash,queue)]\n"
@@ -843,10 +874,18 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config mtu X value\n"
" Set the MTU of port X to a given value\n\n"
+ "port config (port_id) (rxq|txq) (queue_id) ring_size (value)\n"
+ " Set a rx/tx queue's ring size configuration, the new"
+ " value will take effect after command that (re-)start the port"
+ " or command that setup the specific queue\n\n"
+
"port (port_id) (rxq|txq) (queue_id) (start|stop)\n"
" Start/stop a rx/tx queue of port X. Only take effect"
" when port X is started\n\n"
+ "port (port_id) (rxq|txq) (queue_id) setup\n"
+ " Setup a rx/tx queue of port X.\n\n"
+
"port config (port_id|all) l2-tunnel E-tag ether-type"
" (value)\n"
" Set the value of E-tag ether-type.\n\n"
@@ -870,6 +909,9 @@ static void cmd_help_long_parsed(void *parsed_result,
"port config (port_id) pctype (pctype_id) hash_inset|"
"fdir_inset|fdir_flx_inset clear all"
" Clear RSS|FDIR|FDIR_FLX input set completely for some pctype\n\n"
+
+ "port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)\n\n"
+ " Add/remove UDP tunnel port for tunneling offload\n\n"
);
}
@@ -1415,7 +1457,7 @@ cmdline_parse_inst_t cmd_config_speed_all = {
struct cmd_config_speed_specific {
cmdline_fixed_string_t port;
cmdline_fixed_string_t keyword;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t item1;
cmdline_fixed_string_t item2;
cmdline_fixed_string_t value1;
@@ -1455,7 +1497,7 @@ cmdline_parse_token_string_t cmd_config_speed_specific_keyword =
TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, keyword,
"config");
cmdline_parse_token_num_t cmd_config_speed_specific_id =
- TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT16);
cmdline_parse_token_string_t cmd_config_speed_specific_item1 =
TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item1,
"speed");
@@ -1487,6 +1529,122 @@ cmdline_parse_inst_t cmd_config_speed_specific = {
},
};
+/* *** configure loopback for all ports *** */
+struct cmd_config_loopback_all {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ cmdline_fixed_string_t all;
+ cmdline_fixed_string_t item;
+ uint32_t mode;
+};
+
+static void
+cmd_config_loopback_all_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_loopback_all *res = parsed_result;
+ portid_t pid;
+
+ if (!all_ports_stopped()) {
+ printf("Please stop all ports first\n");
+ return;
+ }
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ ports[pid].dev_conf.lpbk_mode = res->mode;
+ }
+
+ cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_loopback_all_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, port, "port");
+cmdline_parse_token_string_t cmd_config_loopback_all_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, keyword,
+ "config");
+cmdline_parse_token_string_t cmd_config_loopback_all_all =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, all, "all");
+cmdline_parse_token_string_t cmd_config_loopback_all_item =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, item,
+ "loopback");
+cmdline_parse_token_num_t cmd_config_loopback_all_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_all, mode, UINT32);
+
+cmdline_parse_inst_t cmd_config_loopback_all = {
+ .f = cmd_config_loopback_all_parsed,
+ .data = NULL,
+ .help_str = "port config all loopback <mode>",
+ .tokens = {
+ (void *)&cmd_config_loopback_all_port,
+ (void *)&cmd_config_loopback_all_keyword,
+ (void *)&cmd_config_loopback_all_all,
+ (void *)&cmd_config_loopback_all_item,
+ (void *)&cmd_config_loopback_all_mode,
+ NULL,
+ },
+};
+
+/* *** configure loopback for specific port *** */
+struct cmd_config_loopback_specific {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t keyword;
+ uint16_t port_id;
+ cmdline_fixed_string_t item;
+ uint32_t mode;
+};
+
+static void
+cmd_config_loopback_specific_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_loopback_specific *res = parsed_result;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ if (!port_is_stopped(res->port_id)) {
+ printf("Please stop port %u first\n", res->port_id);
+ return;
+ }
+
+ ports[res->port_id].dev_conf.lpbk_mode = res->mode;
+
+ cmd_reconfig_device_queue(res->port_id, 1, 1);
+}
+
+
+cmdline_parse_token_string_t cmd_config_loopback_specific_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_loopback_specific_keyword =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, keyword,
+ "config");
+cmdline_parse_token_num_t cmd_config_loopback_specific_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, port_id,
+ UINT16);
+cmdline_parse_token_string_t cmd_config_loopback_specific_item =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, item,
+ "loopback");
+cmdline_parse_token_num_t cmd_config_loopback_specific_mode =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, mode,
+ UINT32);
+
+cmdline_parse_inst_t cmd_config_loopback_specific = {
+ .f = cmd_config_loopback_specific_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> loopback <mode>",
+ .tokens = {
+ (void *)&cmd_config_loopback_specific_port,
+ (void *)&cmd_config_loopback_specific_keyword,
+ (void *)&cmd_config_loopback_specific_id,
+ (void *)&cmd_config_loopback_specific_item,
+ (void *)&cmd_config_loopback_specific_mode,
+ NULL,
+ },
+};
+
/* *** configure txq/rxq, txd/rxd *** */
struct cmd_config_rx_tx {
cmdline_fixed_string_t port;
@@ -1739,11 +1897,13 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result,
port = &ports[pid];
rx_offloads = port->dev_conf.rxmode.offloads;
if (!strcmp(res->name, "crc-strip")) {
- if (!strcmp(res->value, "on"))
+ if (!strcmp(res->value, "on")) {
rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- else if (!strcmp(res->value, "off"))
+ rx_offloads &= ~DEV_RX_OFFLOAD_KEEP_CRC;
+ } else if (!strcmp(res->value, "off")) {
+ rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
- else {
+ } else {
printf("Unknown parameter\n");
return;
}
@@ -1879,8 +2039,11 @@ cmd_config_rss_parsed(void *parsed_result,
{
struct cmd_config_rss *res = parsed_result;
struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, };
+ struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
+ int use_default = 0;
+ int all_updated = 1;
int diag;
- uint8_t i;
+ uint16_t i;
if (!strcmp(res->value, "all"))
rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP |
@@ -1906,6 +2069,8 @@ cmd_config_rss_parsed(void *parsed_result,
rss_conf.rss_hf = ETH_RSS_NVGRE;
else if (!strcmp(res->value, "none"))
rss_conf.rss_hf = 0;
+ else if (!strcmp(res->value, "default"))
+ use_default = 1;
else if (isdigit(res->value[0]) && atoi(res->value) > 0 &&
atoi(res->value) < 64)
rss_conf.rss_hf = 1ULL << atoi(res->value);
@@ -1914,13 +2079,32 @@ cmd_config_rss_parsed(void *parsed_result,
return;
}
rss_conf.rss_key = NULL;
- for (i = 0; i < rte_eth_dev_count(); i++) {
- diag = rte_eth_dev_rss_hash_update(i, &rss_conf);
- if (diag < 0)
+ /* Update global configuration for RSS types. */
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_rss_conf local_rss_conf;
+
+ rte_eth_dev_info_get(i, &dev_info);
+ if (use_default)
+ rss_conf.rss_hf = dev_info.flow_type_rss_offloads;
+
+ local_rss_conf = rss_conf;
+ local_rss_conf.rss_hf = rss_conf.rss_hf &
+ dev_info.flow_type_rss_offloads;
+ if (local_rss_conf.rss_hf != rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i, rss_conf.rss_hf, local_rss_conf.rss_hf);
+ }
+ diag = rte_eth_dev_rss_hash_update(i, &local_rss_conf);
+ if (diag < 0) {
+ all_updated = 0;
printf("Configuration of RSS hash at ethernet port %d "
"failed with error (%d): %s.\n",
i, -diag, strerror(-diag));
+ }
}
+ if (all_updated && !use_default)
+ rss_hf = rss_conf.rss_hf;
}
cmdline_parse_token_string_t cmd_config_rss_port =
@@ -1938,7 +2122,7 @@ cmdline_parse_inst_t cmd_config_rss = {
.f = cmd_config_rss_parsed,
.data = NULL,
.help_str = "port config all rss "
- "all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|<flowtype_id>",
+ "all|default|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none|<flowtype_id>",
.tokens = {
(void *)&cmd_config_rss_port,
(void *)&cmd_config_rss_keyword,
@@ -2067,6 +2251,102 @@ cmdline_parse_inst_t cmd_config_rss_hash_key = {
},
};
+/* *** configure port rxq/txq ring size *** */
+struct cmd_config_rxtx_ring_size {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t rsize;
+ uint16_t size;
+};
+
+static void
+cmd_config_rxtx_ring_size_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_rxtx_ring_size *res = parsed_result;
+ struct rte_port *port;
+ uint8_t isrx;
+
+ if (port_id_is_invalid(res->portid, ENABLED_WARN))
+ return;
+
+ if (res->portid == (portid_t)RTE_PORT_ALL) {
+ printf("Invalid port id\n");
+ return;
+ }
+
+ port = &ports[res->portid];
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid))
+ return;
+ else if (!isrx && tx_queue_id_is_invalid(res->qid))
+ return;
+
+ if (isrx && res->size != 0 && res->size <= rx_free_thresh) {
+ printf("Invalid rx ring_size, must > rx_free_thresh: %d\n",
+ rx_free_thresh);
+ return;
+ }
+
+ if (isrx)
+ port->nb_rx_desc[res->qid] = res->size;
+ else
+ port->nb_tx_desc[res->qid] = res->size;
+
+ cmd_reconfig_device_queue(res->portid, 0, 1);
+}
+
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ portid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ qid, UINT16);
+cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rsize =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ rsize, "ring_size");
+cmdline_parse_token_num_t cmd_config_rxtx_ring_size_size =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size,
+ size, UINT16);
+
+cmdline_parse_inst_t cmd_config_rxtx_ring_size = {
+ .f = cmd_config_rxtx_ring_size_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> rxq|txq <queue_id> ring_size <value>",
+ .tokens = {
+ (void *)&cmd_config_rxtx_ring_size_port,
+ (void *)&cmd_config_rxtx_ring_size_config,
+ (void *)&cmd_config_rxtx_ring_size_portid,
+ (void *)&cmd_config_rxtx_ring_size_rxtxq,
+ (void *)&cmd_config_rxtx_ring_size_qid,
+ (void *)&cmd_config_rxtx_ring_size_rsize,
+ (void *)&cmd_config_rxtx_ring_size_size,
+ NULL,
+ },
+};
+
/* *** configure port rxq/txq start/stop *** */
struct cmd_config_rxtx_queue {
cmdline_fixed_string_t port;
@@ -2152,7 +2432,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
.data = NULL,
.help_str = "port <port_id> rxq|txq <queue_id> start|stop",
.tokens = {
- (void *)&cmd_config_speed_all_port,
+ (void *)&cmd_config_rxtx_queue_port,
(void *)&cmd_config_rxtx_queue_portid,
(void *)&cmd_config_rxtx_queue_rxtxq,
(void *)&cmd_config_rxtx_queue_qid,
@@ -2161,6 +2441,117 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = {
},
};
+/* *** configure port rxq/txq setup *** */
+struct cmd_setup_rxtx_queue {
+ cmdline_fixed_string_t port;
+ portid_t portid;
+ cmdline_fixed_string_t rxtxq;
+ uint16_t qid;
+ cmdline_fixed_string_t setup;
+};
+
+/* Common CLI fields for queue setup */
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, port, "port");
+cmdline_parse_token_num_t cmd_setup_rxtx_queue_portid =
+ TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, portid, UINT16);
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_rxtxq =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, rxtxq, "rxq#txq");
+cmdline_parse_token_num_t cmd_setup_rxtx_queue_qid =
+ TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, qid, UINT16);
+cmdline_parse_token_string_t cmd_setup_rxtx_queue_setup =
+ TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, setup, "setup");
+
+static void
+cmd_setup_rxtx_queue_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_setup_rxtx_queue *res = parsed_result;
+ struct rte_port *port;
+ struct rte_mempool *mp;
+ unsigned int socket_id;
+ uint8_t isrx = 0;
+ int ret;
+
+ if (port_id_is_invalid(res->portid, ENABLED_WARN))
+ return;
+
+ if (res->portid == (portid_t)RTE_PORT_ALL) {
+ printf("Invalid port id\n");
+ return;
+ }
+
+ if (!strcmp(res->rxtxq, "rxq"))
+ isrx = 1;
+ else if (!strcmp(res->rxtxq, "txq"))
+ isrx = 0;
+ else {
+ printf("Unknown parameter\n");
+ return;
+ }
+
+ if (isrx && rx_queue_id_is_invalid(res->qid)) {
+ printf("Invalid rx queue\n");
+ return;
+ } else if (!isrx && tx_queue_id_is_invalid(res->qid)) {
+ printf("Invalid tx queue\n");
+ return;
+ }
+
+ port = &ports[res->portid];
+ if (isrx) {
+ socket_id = rxring_numa[res->portid];
+ if (!numa_support || socket_id == NUMA_NO_CONFIG)
+ socket_id = port->socket_id;
+
+ mp = mbuf_pool_find(socket_id);
+ if (mp == NULL) {
+ printf("Failed to setup RX queue: "
+ "No mempool allocation"
+ " on the socket %d\n",
+ rxring_numa[res->portid]);
+ return;
+ }
+ ret = rte_eth_rx_queue_setup(res->portid,
+ res->qid,
+ port->nb_rx_desc[res->qid],
+ socket_id,
+ &port->rx_conf[res->qid],
+ mp);
+ if (ret)
+ printf("Failed to setup RX queue\n");
+ } else {
+ socket_id = txring_numa[res->portid];
+ if (!numa_support || socket_id == NUMA_NO_CONFIG)
+ socket_id = port->socket_id;
+
+ ret = rte_eth_tx_queue_setup(res->portid,
+ res->qid,
+ port->nb_tx_desc[res->qid],
+ socket_id,
+ &port->tx_conf[res->qid]);
+ if (ret)
+ printf("Failed to setup TX queue\n");
+ }
+}
+
+cmdline_parse_inst_t cmd_setup_rxtx_queue = {
+ .f = cmd_setup_rxtx_queue_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> rxq|txq <queue_idx> setup",
+ .tokens = {
+ (void *)&cmd_setup_rxtx_queue_port,
+ (void *)&cmd_setup_rxtx_queue_portid,
+ (void *)&cmd_setup_rxtx_queue_rxtxq,
+ (void *)&cmd_setup_rxtx_queue_qid,
+ (void *)&cmd_setup_rxtx_queue_setup,
+ NULL,
+ },
+};
+
+
/* *** Configure RSS RETA *** */
struct cmd_config_rss_reta {
cmdline_fixed_string_t port;
@@ -2599,6 +2990,8 @@ cmd_config_burst_parsed(void *parsed_result,
__attribute__((unused)) void *data)
{
struct cmd_config_burst *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ uint16_t rec_nb_pkts;
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
@@ -2606,11 +2999,34 @@ cmd_config_burst_parsed(void *parsed_result,
}
if (!strcmp(res->name, "burst")) {
- if (res->value < 1 || res->value > MAX_PKT_BURST) {
+ if (res->value == 0) {
+ /* If user gives a value of zero, query the PMD for
+ * its recommended Rx burst size. Testpmd uses a single
+ * size for all ports, so assume all ports are the same
+ * NIC model and use the values from Port 0.
+ */
+ rte_eth_dev_info_get(0, &dev_info);
+ rec_nb_pkts = dev_info.default_rxportconf.burst_size;
+
+ if (rec_nb_pkts == 0) {
+ printf("PMD does not recommend a burst size.\n"
+ "User provided value must be between"
+ " 1 and %d\n", MAX_PKT_BURST);
+ return;
+ } else if (rec_nb_pkts > MAX_PKT_BURST) {
+ printf("PMD recommended burst size of %d"
+ " exceeds maximum value of %d\n",
+ rec_nb_pkts, MAX_PKT_BURST);
+ return;
+ }
+ printf("Using PMD-provided burst value of %d\n",
+ rec_nb_pkts);
+ nb_pkt_per_burst = rec_nb_pkts;
+ } else if (res->value > MAX_PKT_BURST) {
printf("burst must be >= 1 && <= %d\n", MAX_PKT_BURST);
return;
- }
- nb_pkt_per_burst = res->value;
+ } else
+ nb_pkt_per_burst = res->value;
} else {
printf("Unknown parameter\n");
return;
@@ -4013,6 +4429,12 @@ check_tunnel_tso_nic_support(portid_t port_id)
if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
printf("Warning: GENEVE TUNNEL TSO not supported therefore "
"not enabled for port %d\n", port_id);
+ if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO))
+ printf("Warning: IP TUNNEL TSO not supported therefore "
+ "not enabled for port %d\n", port_id);
+ if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO))
+ printf("Warning: UDP TUNNEL TSO not supported therefore "
+ "not enabled for port %d\n", port_id);
return dev_info;
}
@@ -4040,13 +4462,17 @@ cmd_tunnel_tso_set_parsed(void *parsed_result,
~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
printf("TSO for tunneled packets is disabled\n");
} else {
uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
ports[res->port_id].dev_conf.txmode.offloads |=
(tso_offloads & dev_info.tx_offload_capa);
@@ -4399,8 +4825,9 @@ cmd_gso_show_parsed(void *parsed_result,
if (gso_ports[res->cmd_pid].enable) {
printf("Max GSO'd packet size: %uB\n"
"Supported GSO types: TCP/IPv4, "
- "VxLAN with inner TCP/IPv4 packet, "
- "GRE with inner TCP/IPv4 packet\n",
+ "UDP/IPv4, VxLAN with inner "
+ "TCP/IPv4 packet, GRE with inner "
+ "TCP/IPv4 packet\n",
gso_max_segment_size);
} else
printf("GSO is not enabled on Port %u\n", res->cmd_pid);
@@ -5402,7 +5829,7 @@ static void cmd_create_bonded_device_parsed(void *parsed_result,
port_id);
/* Update number of ports */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
reconfig(port_id, res->socket);
rte_eth_promiscuous_enable(port_id);
}
@@ -5511,11 +5938,6 @@ static void cmd_set_bond_mon_period_parsed(void *parsed_result,
struct cmd_set_bond_mon_period_result *res = parsed_result;
int ret;
- if (res->port_num >= nb_ports) {
- printf("Port id %d must be less than %d\n", res->port_num, nb_ports);
- return;
- }
-
ret = rte_eth_bond_link_monitoring_set(res->port_num, res->period_ms);
/* check the return value and print it if is < 0 */
@@ -5572,12 +5994,6 @@ cmd_set_bonding_agg_mode(void *parsed_result,
struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result;
uint8_t policy = AGG_BANDWIDTH;
- if (res->port_num >= nb_ports) {
- printf("Port id %d must be less than %d\n",
- res->port_num, nb_ports);
- return;
- }
-
if (!strcmp(res->policy, "bandwidth"))
policy = AGG_BANDWIDTH;
else if (!strcmp(res->policy, "stable"))
@@ -8278,6 +8694,89 @@ cmdline_parse_inst_t cmd_tunnel_udp_config = {
},
};
+struct cmd_config_tunnel_udp_port {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t udp_tunnel_port;
+ cmdline_fixed_string_t action;
+ cmdline_fixed_string_t tunnel_type;
+ uint16_t udp_port;
+};
+
+static void
+cmd_cfg_tunnel_udp_port_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_tunnel_udp_port *res = parsed_result;
+ struct rte_eth_udp_tunnel tunnel_udp;
+ int ret = 0;
+
+ if (port_id_is_invalid(res->port_id, ENABLED_WARN))
+ return;
+
+ tunnel_udp.udp_port = res->udp_port;
+
+ if (!strcmp(res->tunnel_type, "vxlan")) {
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN;
+ } else if (!strcmp(res->tunnel_type, "geneve")) {
+ tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE;
+ } else {
+ printf("Invalid tunnel type\n");
+ return;
+ }
+
+ if (!strcmp(res->action, "add"))
+ ret = rte_eth_dev_udp_tunnel_port_add(res->port_id,
+ &tunnel_udp);
+ else
+ ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id,
+ &tunnel_udp);
+
+ if (ret < 0)
+ printf("udp tunneling port add error: (%s)\n", strerror(-ret));
+}
+
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, port,
+ "port");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_config =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, config,
+ "config");
+cmdline_parse_token_num_t cmd_config_tunnel_udp_port_port_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, port_id,
+ UINT16);
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_port =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port,
+ udp_tunnel_port,
+ "udp_tunnel_port");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_action =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, action,
+ "add#rm");
+cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_type =
+ TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, tunnel_type,
+ "vxlan#geneve");
+cmdline_parse_token_num_t cmd_config_tunnel_udp_port_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, udp_port,
+ UINT16);
+
+cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = {
+ .f = cmd_cfg_tunnel_udp_port_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> udp_tunnel_port add|rm vxlan|geneve <udp_port>",
+ .tokens = {
+ (void *)&cmd_config_tunnel_udp_port_port,
+ (void *)&cmd_config_tunnel_udp_port_config,
+ (void *)&cmd_config_tunnel_udp_port_port_id,
+ (void *)&cmd_config_tunnel_udp_port_tunnel_port,
+ (void *)&cmd_config_tunnel_udp_port_action,
+ (void *)&cmd_config_tunnel_udp_port_tunnel_type,
+ (void *)&cmd_config_tunnel_udp_port_value,
+ NULL,
+ },
+};
+
/* *** GLOBAL CONFIG *** */
struct cmd_global_config_result {
cmdline_fixed_string_t cmd;
@@ -8621,7 +9120,7 @@ static void cmd_dump_parsed(void *parsed_result,
else if (!strcmp(res->dump, "dump_mempool"))
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
- rte_eal_devargs_dump(stdout);
+ rte_devargs_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
}
@@ -10725,11 +11224,6 @@ cmd_flow_director_mask_parsed(void *parsed_result,
struct rte_eth_fdir_masks *mask;
struct rte_port *port;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -10926,11 +11420,6 @@ cmd_flow_director_flex_mask_parsed(void *parsed_result,
uint16_t i;
int ret;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -11082,11 +11571,6 @@ cmd_flow_director_flxpld_parsed(void *parsed_result,
struct rte_port *port;
int ret = 0;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
port = &ports[res->port_id];
/** Check if the port is not started **/
if (port->port_status != RTE_PORT_STOPPED) {
@@ -11728,7 +12212,7 @@ struct cmd_config_l2_tunnel_eth_type_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
cmdline_fixed_string_t all;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t l2_tunnel;
cmdline_fixed_string_t l2_tunnel_type;
cmdline_fixed_string_t eth_type;
@@ -11750,7 +12234,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_all_str =
cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_id =
TOKEN_NUM_INITIALIZER
(struct cmd_config_l2_tunnel_eth_type_result,
- id, UINT8);
+ id, UINT16);
cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel =
TOKEN_STRING_INITIALIZER
(struct cmd_config_l2_tunnel_eth_type_result,
@@ -11863,7 +12347,7 @@ struct cmd_config_l2_tunnel_en_dis_result {
cmdline_fixed_string_t port;
cmdline_fixed_string_t config;
cmdline_fixed_string_t all;
- uint8_t id;
+ portid_t id;
cmdline_fixed_string_t l2_tunnel;
cmdline_fixed_string_t l2_tunnel_type;
cmdline_fixed_string_t en_dis;
@@ -11884,7 +12368,7 @@ cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_all_str =
cmdline_parse_token_num_t cmd_config_l2_tunnel_en_dis_id =
TOKEN_NUM_INITIALIZER
(struct cmd_config_l2_tunnel_en_dis_result,
- id, UINT8);
+ id, UINT16);
cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel =
TOKEN_STRING_INITIALIZER
(struct cmd_config_l2_tunnel_en_dis_result,
@@ -12861,7 +13345,7 @@ cmd_set_tx_loopback_parsed(
if (ret == -ENOTSUP)
ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on);
#endif
-#ifdef RTE_LIBRTE_DPAA_PMD
+#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD
if (ret == -ENOTSUP)
ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on);
#endif
@@ -14356,20 +14840,14 @@ static void cmd_set_port_tm_hierarchy_default_parsed(void *parsed_result,
p = &ports[port_id];
- /* Port tm flag */
- if (p->softport.tm_flag == 0) {
- printf(" tm not enabled on port %u (error)\n", port_id);
- return;
- }
-
/* Forward mode: tm */
- if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "tm")) {
- printf(" tm mode not enabled(error)\n");
+ if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "softnic")) {
+ printf(" softnicfwd mode not enabled(error)\n");
return;
}
/* Set the default tm hierarchy */
- p->softport.tm.default_hierarchy_enable = 1;
+ p->softport.default_tm_hierarchy_enable = 1;
}
cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
@@ -14388,6 +14866,326 @@ cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = {
};
#endif
+/** Set VXLAN encapsulation details */
+struct cmd_set_vxlan_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t vxlan;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t vlan_present:1;
+ uint32_t vni;
+ uint16_t udp_src;
+ uint16_t udp_dst;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_vxlan_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, set, "set");
+cmdline_parse_token_string_t cmd_set_vxlan_vxlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, "vxlan");
+cmdline_parse_token_string_t cmd_set_vxlan_vxlan_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan,
+ "vxlan-with-vlan");
+cmdline_parse_token_string_t cmd_set_vxlan_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_vxlan_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_vxlan_vni =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "vni");
+cmdline_parse_token_num_t cmd_set_vxlan_vni_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, vni, UINT32);
+cmdline_parse_token_string_t cmd_set_vxlan_udp_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "udp-src");
+cmdline_parse_token_num_t cmd_set_vxlan_udp_src_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_src, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_udp_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "udp-dst");
+cmdline_parse_token_num_t cmd_set_vxlan_udp_dst_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_dst, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-src");
+cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_src);
+cmdline_parse_token_string_t cmd_set_vxlan_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_vxlan_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_vxlan_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_vxlan_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_src);
+cmdline_parse_token_string_t cmd_set_vxlan_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_dst);
+
+static void cmd_set_vxlan_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_vxlan_result *res = parsed_result;
+ union {
+ uint32_t vxlan_id;
+ uint8_t vni[4];
+ } id = {
+ .vxlan_id = rte_cpu_to_be_32(res->vni) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->vxlan, "vxlan") == 0)
+ vxlan_encap_conf.select_vlan = 0;
+ else if (strcmp(res->vxlan, "vxlan-with-vlan") == 0)
+ vxlan_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ vxlan_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ vxlan_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(vxlan_encap_conf.vni, &id.vni[1], 3);
+ vxlan_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src);
+ vxlan_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst);
+ if (vxlan_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, vxlan_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, vxlan_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, vxlan_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, vxlan_encap_conf.ipv6_dst);
+ }
+ if (vxlan_encap_conf.select_vlan)
+ vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_vxlan = {
+ .f = cmd_set_vxlan_parsed,
+ .data = NULL,
+ .help_str = "set vxlan ip-version ipv4|ipv6 vni <vni> udp-src"
+ " <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst <ip-dst>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_vxlan_set,
+ (void *)&cmd_set_vxlan_vxlan,
+ (void *)&cmd_set_vxlan_ip_version,
+ (void *)&cmd_set_vxlan_ip_version_value,
+ (void *)&cmd_set_vxlan_vni,
+ (void *)&cmd_set_vxlan_vni_value,
+ (void *)&cmd_set_vxlan_udp_src,
+ (void *)&cmd_set_vxlan_udp_src_value,
+ (void *)&cmd_set_vxlan_udp_dst,
+ (void *)&cmd_set_vxlan_udp_dst_value,
+ (void *)&cmd_set_vxlan_ip_src,
+ (void *)&cmd_set_vxlan_ip_src_value,
+ (void *)&cmd_set_vxlan_ip_dst,
+ (void *)&cmd_set_vxlan_ip_dst_value,
+ (void *)&cmd_set_vxlan_eth_src,
+ (void *)&cmd_set_vxlan_eth_src_value,
+ (void *)&cmd_set_vxlan_eth_dst,
+ (void *)&cmd_set_vxlan_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_vxlan_with_vlan = {
+ .f = cmd_set_vxlan_parsed,
+ .data = NULL,
+ .help_str = "set vxlan-with-vlan ip-version ipv4|ipv6 vni <vni>"
+ " udp-src <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst"
+ " <ip-dst> vlan-tci <vlan-tci> eth-src <eth-src> eth-dst"
+ " <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_vxlan_set,
+ (void *)&cmd_set_vxlan_vxlan_with_vlan,
+ (void *)&cmd_set_vxlan_ip_version,
+ (void *)&cmd_set_vxlan_ip_version_value,
+ (void *)&cmd_set_vxlan_vni,
+ (void *)&cmd_set_vxlan_vni_value,
+ (void *)&cmd_set_vxlan_udp_src,
+ (void *)&cmd_set_vxlan_udp_src_value,
+ (void *)&cmd_set_vxlan_udp_dst,
+ (void *)&cmd_set_vxlan_udp_dst_value,
+ (void *)&cmd_set_vxlan_ip_src,
+ (void *)&cmd_set_vxlan_ip_src_value,
+ (void *)&cmd_set_vxlan_ip_dst,
+ (void *)&cmd_set_vxlan_ip_dst_value,
+ (void *)&cmd_set_vxlan_vlan,
+ (void *)&cmd_set_vxlan_vlan_value,
+ (void *)&cmd_set_vxlan_eth_src,
+ (void *)&cmd_set_vxlan_eth_src_value,
+ (void *)&cmd_set_vxlan_eth_dst,
+ (void *)&cmd_set_vxlan_eth_dst_value,
+ NULL,
+ },
+};
+
+/** Set NVGRE encapsulation details */
+struct cmd_set_nvgre_result {
+ cmdline_fixed_string_t set;
+ cmdline_fixed_string_t nvgre;
+ cmdline_fixed_string_t pos_token;
+ cmdline_fixed_string_t ip_version;
+ uint32_t tni;
+ cmdline_ipaddr_t ip_src;
+ cmdline_ipaddr_t ip_dst;
+ uint16_t tci;
+ struct ether_addr eth_src;
+ struct ether_addr eth_dst;
+};
+
+cmdline_parse_token_string_t cmd_set_nvgre_set =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set, "set");
+cmdline_parse_token_string_t cmd_set_nvgre_nvgre =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, "nvgre");
+cmdline_parse_token_string_t cmd_set_nvgre_nvgre_with_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre,
+ "nvgre-with-vlan");
+cmdline_parse_token_string_t cmd_set_nvgre_ip_version =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-version");
+cmdline_parse_token_string_t cmd_set_nvgre_ip_version_value =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, ip_version,
+ "ipv4#ipv6");
+cmdline_parse_token_string_t cmd_set_nvgre_tni =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "tni");
+cmdline_parse_token_num_t cmd_set_nvgre_tni_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni, UINT32);
+cmdline_parse_token_string_t cmd_set_nvgre_ip_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-src");
+cmdline_parse_token_num_t cmd_set_nvgre_ip_src_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src);
+cmdline_parse_token_string_t cmd_set_nvgre_ip_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "ip-dst");
+cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst_value =
+ TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst);
+cmdline_parse_token_string_t cmd_set_nvgre_vlan =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "vlan-tci");
+cmdline_parse_token_num_t cmd_set_nvgre_vlan_value =
+ TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tci, UINT16);
+cmdline_parse_token_string_t cmd_set_nvgre_eth_src =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "eth-src");
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_src);
+cmdline_parse_token_string_t cmd_set_nvgre_eth_dst =
+ TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token,
+ "eth-dst");
+cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst_value =
+ TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_dst);
+
+static void cmd_set_nvgre_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_set_nvgre_result *res = parsed_result;
+ union {
+ uint32_t nvgre_tni;
+ uint8_t tni[4];
+ } id = {
+ .nvgre_tni = rte_cpu_to_be_32(res->tni) & RTE_BE32(0x00ffffff),
+ };
+
+ if (strcmp(res->nvgre, "nvgre") == 0)
+ nvgre_encap_conf.select_vlan = 0;
+ else if (strcmp(res->nvgre, "nvgre-with-vlan") == 0)
+ nvgre_encap_conf.select_vlan = 1;
+ if (strcmp(res->ip_version, "ipv4") == 0)
+ nvgre_encap_conf.select_ipv4 = 1;
+ else if (strcmp(res->ip_version, "ipv6") == 0)
+ nvgre_encap_conf.select_ipv4 = 0;
+ else
+ return;
+ rte_memcpy(nvgre_encap_conf.tni, &id.tni[1], 3);
+ if (nvgre_encap_conf.select_ipv4) {
+ IPV4_ADDR_TO_UINT(res->ip_src, nvgre_encap_conf.ipv4_src);
+ IPV4_ADDR_TO_UINT(res->ip_dst, nvgre_encap_conf.ipv4_dst);
+ } else {
+ IPV6_ADDR_TO_ARRAY(res->ip_src, nvgre_encap_conf.ipv6_src);
+ IPV6_ADDR_TO_ARRAY(res->ip_dst, nvgre_encap_conf.ipv6_dst);
+ }
+ if (nvgre_encap_conf.select_vlan)
+ nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci);
+ rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes,
+ ETHER_ADDR_LEN);
+ rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes,
+ ETHER_ADDR_LEN);
+}
+
+cmdline_parse_inst_t cmd_set_nvgre = {
+ .f = cmd_set_nvgre_parsed,
+ .data = NULL,
+ .help_str = "set nvgre ip-version <ipv4|ipv6> tni <tni> ip-src"
+ " <ip-src> ip-dst <ip-dst> eth-src <eth-src>"
+ " eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_nvgre_set,
+ (void *)&cmd_set_nvgre_nvgre,
+ (void *)&cmd_set_nvgre_ip_version,
+ (void *)&cmd_set_nvgre_ip_version_value,
+ (void *)&cmd_set_nvgre_tni,
+ (void *)&cmd_set_nvgre_tni_value,
+ (void *)&cmd_set_nvgre_ip_src,
+ (void *)&cmd_set_nvgre_ip_src_value,
+ (void *)&cmd_set_nvgre_ip_dst,
+ (void *)&cmd_set_nvgre_ip_dst_value,
+ (void *)&cmd_set_nvgre_eth_src,
+ (void *)&cmd_set_nvgre_eth_src_value,
+ (void *)&cmd_set_nvgre_eth_dst,
+ (void *)&cmd_set_nvgre_eth_dst_value,
+ NULL,
+ },
+};
+
+cmdline_parse_inst_t cmd_set_nvgre_with_vlan = {
+ .f = cmd_set_nvgre_parsed,
+ .data = NULL,
+ .help_str = "set nvgre-with-vlan ip-version <ipv4|ipv6> tni <tni>"
+ " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>"
+ " eth-src <eth-src> eth-dst <eth-dst>",
+ .tokens = {
+ (void *)&cmd_set_nvgre_set,
+ (void *)&cmd_set_nvgre_nvgre_with_vlan,
+ (void *)&cmd_set_nvgre_ip_version,
+ (void *)&cmd_set_nvgre_ip_version_value,
+ (void *)&cmd_set_nvgre_tni,
+ (void *)&cmd_set_nvgre_tni_value,
+ (void *)&cmd_set_nvgre_ip_src,
+ (void *)&cmd_set_nvgre_ip_src_value,
+ (void *)&cmd_set_nvgre_ip_dst,
+ (void *)&cmd_set_nvgre_ip_dst_value,
+ (void *)&cmd_set_nvgre_vlan,
+ (void *)&cmd_set_nvgre_vlan_value,
+ (void *)&cmd_set_nvgre_eth_src,
+ (void *)&cmd_set_nvgre_eth_src_value,
+ (void *)&cmd_set_nvgre_eth_dst,
+ (void *)&cmd_set_nvgre_eth_dst_value,
+ NULL,
+ },
+};
+
/* Strict link priority scheduling mode setting */
static void
cmd_strict_link_prio_parsed(
@@ -14467,11 +15265,6 @@ cmd_ddp_add_parsed(
int file_num;
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -14549,11 +15342,6 @@ cmd_ddp_del_parsed(
uint32_t size;
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -14850,12 +15638,12 @@ cmdline_parse_token_num_t cmd_ddp_get_list_port_id =
static void
cmd_ddp_get_list_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_ddp_get_list_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_ddp_get_list_result *res = parsed_result;
struct rte_pmd_i40e_profile_list *p_list;
struct rte_pmd_i40e_profile_info *p_info;
uint32_t p_num;
@@ -14864,11 +15652,6 @@ cmd_ddp_get_list_parsed(
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
#ifdef RTE_LIBRTE_I40E_PMD
size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4;
p_list = (struct rte_pmd_i40e_profile_list *)malloc(size);
@@ -14931,22 +15714,17 @@ struct cmd_cfg_input_set_result {
static void
cmd_cfg_input_set_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_cfg_input_set_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_cfg_input_set_result *res = parsed_result;
enum rte_pmd_i40e_inset_type inset_type = INSET_NONE;
struct rte_pmd_i40e_inset inset;
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -15059,22 +15837,17 @@ struct cmd_clear_input_set_result {
static void
cmd_clear_input_set_parsed(
- void *parsed_result,
+ __attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
{
- struct cmd_clear_input_set_result *res = parsed_result;
#ifdef RTE_LIBRTE_I40E_PMD
+ struct cmd_clear_input_set_result *res = parsed_result;
enum rte_pmd_i40e_inset_type inset_type = INSET_NONE;
struct rte_pmd_i40e_inset inset;
#endif
int ret = -ENOTSUP;
- if (res->port_id > nb_ports) {
- printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
- return;
- }
-
if (!all_ports_stopped()) {
printf("Please stop all ports first\n");
return;
@@ -16030,6 +16803,805 @@ cmdline_parse_inst_t cmd_load_from_file = {
},
};
+/* Get Rx offloads capabilities */
+struct cmd_rx_offload_get_capa_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t capabilities;
+};
+
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_rx_offload_get_capa_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_rx_offload_get_capa_capabilities =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_capa_result,
+ capabilities, "capabilities");
+
+static void
+print_rx_offloads(uint64_t offloads)
+{
+ uint64_t single_offload;
+ int begin;
+ int end;
+ int bit;
+
+ if (offloads == 0)
+ return;
+
+ begin = __builtin_ctzll(offloads);
+ end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
+
+ single_offload = 1 << begin;
+ for (bit = begin; bit < end; bit++) {
+ if (offloads & single_offload)
+ printf(" %s",
+ rte_eth_dev_rx_offload_name(single_offload));
+ single_offload <<= 1;
+ }
+}
+
+static void
+cmd_rx_offload_get_capa_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_offload_get_capa_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint64_t queue_offloads;
+ uint64_t port_offloads;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ queue_offloads = dev_info.rx_queue_offload_capa;
+ port_offloads = dev_info.rx_offload_capa ^ queue_offloads;
+
+ printf("Rx Offloading Capabilities of port %d :\n", port_id);
+ printf(" Per Queue :");
+ print_rx_offloads(queue_offloads);
+
+ printf("\n");
+ printf(" Per Port :");
+ print_rx_offloads(port_offloads);
+ printf("\n\n");
+}
+
+cmdline_parse_inst_t cmd_rx_offload_get_capa = {
+ .f = cmd_rx_offload_get_capa_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> rx_offload capabilities",
+ .tokens = {
+ (void *)&cmd_rx_offload_get_capa_show,
+ (void *)&cmd_rx_offload_get_capa_port,
+ (void *)&cmd_rx_offload_get_capa_port_id,
+ (void *)&cmd_rx_offload_get_capa_rx_offload,
+ (void *)&cmd_rx_offload_get_capa_capabilities,
+ NULL,
+ }
+};
+
+/* Get Rx offloads configuration */
+struct cmd_rx_offload_get_configuration_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t configuration;
+};
+
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_rx_offload_get_configuration_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_rx_offload_get_configuration_configuration =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_rx_offload_get_configuration_result,
+ configuration, "configuration");
+
+static void
+cmd_rx_offload_get_configuration_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_rx_offload_get_configuration_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t port_offloads;
+ uint64_t queue_offloads;
+ uint16_t nb_rx_queues;
+ int q;
+
+ printf("Rx Offloading Configuration of port %d :\n", port_id);
+
+ port_offloads = port->dev_conf.rxmode.offloads;
+ printf(" Port :");
+ print_rx_offloads(port_offloads);
+ printf("\n");
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_rx_queues = dev_info.nb_rx_queues;
+ for (q = 0; q < nb_rx_queues; q++) {
+ queue_offloads = port->rx_conf[q].offloads;
+ printf(" Queue[%2d] :", q);
+ print_rx_offloads(queue_offloads);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+cmdline_parse_inst_t cmd_rx_offload_get_configuration = {
+ .f = cmd_rx_offload_get_configuration_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> rx_offload configuration",
+ .tokens = {
+ (void *)&cmd_rx_offload_get_configuration_show,
+ (void *)&cmd_rx_offload_get_configuration_port,
+ (void *)&cmd_rx_offload_get_configuration_port_id,
+ (void *)&cmd_rx_offload_get_configuration_rx_offload,
+ (void *)&cmd_rx_offload_get_configuration_configuration,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per port offloading */
+struct cmd_config_per_port_rx_offload_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_per_port_rx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_rx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
+ "qinq_strip#outer_ipv4_cksum#macsec_strip#"
+ "header_split#vlan_filter#vlan_extend#jumbo_frame#"
+ "crc_strip#scatter#timestamp#security#keep_crc");
+cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_rx_offload_result,
+ on_off, "on#off");
+
+static uint64_t
+search_rx_offload(const char *name)
+{
+ uint64_t single_offload;
+ const char *single_name;
+ int found = 0;
+ unsigned int bit;
+
+ single_offload = 1;
+ for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) {
+ single_name = rte_eth_dev_rx_offload_name(single_offload);
+ if (!strcasecmp(single_name, name)) {
+ found = 1;
+ break;
+ } else if (!strcasecmp(single_name, "UNKNOWN"))
+ break;
+ else if (single_name == NULL)
+ break;
+ single_offload <<= 1;
+ }
+
+ if (found)
+ return single_offload;
+
+ return 0;
+}
+
+static void
+cmd_config_per_port_rx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_port_rx_offload_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_eth_dev_info dev_info;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+ uint16_t nb_rx_queues;
+ int q;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ single_offload = search_rx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_rx_queues = dev_info.nb_rx_queues;
+ if (!strcmp(res->on_off, "on")) {
+ port->dev_conf.rxmode.offloads |= single_offload;
+ for (q = 0; q < nb_rx_queues; q++)
+ port->rx_conf[q].offloads |= single_offload;
+ } else {
+ port->dev_conf.rxmode.offloads &= ~single_offload;
+ for (q = 0; q < nb_rx_queues; q++)
+ port->rx_conf[q].offloads &= ~single_offload;
+ }
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_port_rx_offload = {
+ .f = cmd_config_per_port_rx_offload_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|"
+ "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
+ "macsec_strip|header_split|vlan_filter|vlan_extend|"
+ "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_port_rx_offload_result_port,
+ (void *)&cmd_config_per_port_rx_offload_result_config,
+ (void *)&cmd_config_per_port_rx_offload_result_port_id,
+ (void *)&cmd_config_per_port_rx_offload_result_rx_offload,
+ (void *)&cmd_config_per_port_rx_offload_result_offload,
+ (void *)&cmd_config_per_port_rx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per queue offloading */
+struct cmd_config_per_queue_rx_offload_result {
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t rxq;
+ uint16_t queue_id;
+ cmdline_fixed_string_t rx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxq =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ rxq, "rxq");
+cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_queue_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxoffload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ rx_offload, "rx_offload");
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#"
+ "qinq_strip#outer_ipv4_cksum#macsec_strip#"
+ "header_split#vlan_filter#vlan_extend#jumbo_frame#"
+ "crc_strip#scatter#timestamp#security#keep_crc");
+cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_rx_offload_result,
+ on_off, "on#off");
+
+static void
+cmd_config_per_queue_rx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_queue_rx_offload_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint16_t queue_id = res->queue_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (queue_id >= dev_info.nb_rx_queues) {
+ printf("Error: input queue_id should be 0 ... "
+ "%d\n", dev_info.nb_rx_queues - 1);
+ return;
+ }
+
+ single_offload = search_rx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ if (!strcmp(res->on_off, "on"))
+ port->rx_conf[queue_id].offloads |= single_offload;
+ else
+ port->rx_conf[queue_id].offloads &= ~single_offload;
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_queue_rx_offload = {
+ .f = cmd_config_per_queue_rx_offload_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> rxq <queue_id> rx_offload "
+ "vlan_strip|ipv4_cksum|"
+ "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|"
+ "macsec_strip|header_split|vlan_filter|vlan_extend|"
+ "jumbo_frame|crc_strip|scatter|timestamp|security|keep_crc "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_queue_rx_offload_result_port,
+ (void *)&cmd_config_per_queue_rx_offload_result_port_id,
+ (void *)&cmd_config_per_queue_rx_offload_result_rxq,
+ (void *)&cmd_config_per_queue_rx_offload_result_queue_id,
+ (void *)&cmd_config_per_queue_rx_offload_result_rxoffload,
+ (void *)&cmd_config_per_queue_rx_offload_result_offload,
+ (void *)&cmd_config_per_queue_rx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Get Tx offloads capabilities */
+struct cmd_tx_offload_get_capa_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t capabilities;
+};
+
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_tx_offload_get_capa_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_tx_offload_get_capa_capabilities =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_capa_result,
+ capabilities, "capabilities");
+
+static void
+print_tx_offloads(uint64_t offloads)
+{
+ uint64_t single_offload;
+ int begin;
+ int end;
+ int bit;
+
+ if (offloads == 0)
+ return;
+
+ begin = __builtin_ctzll(offloads);
+ end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads);
+
+ single_offload = 1 << begin;
+ for (bit = begin; bit < end; bit++) {
+ if (offloads & single_offload)
+ printf(" %s",
+ rte_eth_dev_tx_offload_name(single_offload));
+ single_offload <<= 1;
+ }
+}
+
+static void
+cmd_tx_offload_get_capa_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_offload_get_capa_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint64_t queue_offloads;
+ uint64_t port_offloads;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ queue_offloads = dev_info.tx_queue_offload_capa;
+ port_offloads = dev_info.tx_offload_capa ^ queue_offloads;
+
+ printf("Tx Offloading Capabilities of port %d :\n", port_id);
+ printf(" Per Queue :");
+ print_tx_offloads(queue_offloads);
+
+ printf("\n");
+ printf(" Per Port :");
+ print_tx_offloads(port_offloads);
+ printf("\n\n");
+}
+
+cmdline_parse_inst_t cmd_tx_offload_get_capa = {
+ .f = cmd_tx_offload_get_capa_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> tx_offload capabilities",
+ .tokens = {
+ (void *)&cmd_tx_offload_get_capa_show,
+ (void *)&cmd_tx_offload_get_capa_port,
+ (void *)&cmd_tx_offload_get_capa_port_id,
+ (void *)&cmd_tx_offload_get_capa_tx_offload,
+ (void *)&cmd_tx_offload_get_capa_capabilities,
+ NULL,
+ }
+};
+
+/* Get Tx offloads configuration */
+struct cmd_tx_offload_get_configuration_result {
+ cmdline_fixed_string_t show;
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t configuration;
+};
+
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_show =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ show, "show");
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_tx_offload_get_configuration_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_tx_offload_get_configuration_configuration =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_tx_offload_get_configuration_result,
+ configuration, "configuration");
+
+static void
+cmd_tx_offload_get_configuration_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_tx_offload_get_configuration_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t port_offloads;
+ uint64_t queue_offloads;
+ uint16_t nb_tx_queues;
+ int q;
+
+ printf("Tx Offloading Configuration of port %d :\n", port_id);
+
+ port_offloads = port->dev_conf.txmode.offloads;
+ printf(" Port :");
+ print_tx_offloads(port_offloads);
+ printf("\n");
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_tx_queues = dev_info.nb_tx_queues;
+ for (q = 0; q < nb_tx_queues; q++) {
+ queue_offloads = port->tx_conf[q].offloads;
+ printf(" Queue[%2d] :", q);
+ print_tx_offloads(queue_offloads);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+cmdline_parse_inst_t cmd_tx_offload_get_configuration = {
+ .f = cmd_tx_offload_get_configuration_parsed,
+ .data = NULL,
+ .help_str = "show port <port_id> tx_offload configuration",
+ .tokens = {
+ (void *)&cmd_tx_offload_get_configuration_show,
+ (void *)&cmd_tx_offload_get_configuration_port,
+ (void *)&cmd_tx_offload_get_configuration_port_id,
+ (void *)&cmd_tx_offload_get_configuration_tx_offload,
+ (void *)&cmd_tx_offload_get_configuration_configuration,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per port offloading */
+struct cmd_config_per_port_tx_offload_result {
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t config;
+ portid_t port_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ port, "port");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_config =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ config, "config");
+cmdline_parse_token_num_t cmd_config_per_port_tx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_tx_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#"
+ "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
+ "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
+ "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
+ "mt_lockfree#multi_segs#mbuf_fast_free#security");
+cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_port_tx_offload_result,
+ on_off, "on#off");
+
+static uint64_t
+search_tx_offload(const char *name)
+{
+ uint64_t single_offload;
+ const char *single_name;
+ int found = 0;
+ unsigned int bit;
+
+ single_offload = 1;
+ for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) {
+ single_name = rte_eth_dev_tx_offload_name(single_offload);
+ if (!strcasecmp(single_name, name)) {
+ found = 1;
+ break;
+ } else if (!strcasecmp(single_name, "UNKNOWN"))
+ break;
+ else if (single_name == NULL)
+ break;
+ single_offload <<= 1;
+ }
+
+ if (found)
+ return single_offload;
+
+ return 0;
+}
+
+static void
+cmd_config_per_port_tx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_port_tx_offload_result *res = parsed_result;
+ portid_t port_id = res->port_id;
+ struct rte_eth_dev_info dev_info;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+ uint16_t nb_tx_queues;
+ int q;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ single_offload = search_tx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ nb_tx_queues = dev_info.nb_tx_queues;
+ if (!strcmp(res->on_off, "on")) {
+ port->dev_conf.txmode.offloads |= single_offload;
+ for (q = 0; q < nb_tx_queues; q++)
+ port->tx_conf[q].offloads |= single_offload;
+ } else {
+ port->dev_conf.txmode.offloads &= ~single_offload;
+ for (q = 0; q < nb_tx_queues; q++)
+ port->tx_conf[q].offloads &= ~single_offload;
+ }
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_port_tx_offload = {
+ .f = cmd_config_per_port_tx_offload_parsed,
+ .data = NULL,
+ .help_str = "port config <port_id> tx_offload "
+ "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|"
+ "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
+ "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
+ "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
+ "mt_lockfree|multi_segs|mbuf_fast_free|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_port_tx_offload_result_port,
+ (void *)&cmd_config_per_port_tx_offload_result_config,
+ (void *)&cmd_config_per_port_tx_offload_result_port_id,
+ (void *)&cmd_config_per_port_tx_offload_result_tx_offload,
+ (void *)&cmd_config_per_port_tx_offload_result_offload,
+ (void *)&cmd_config_per_port_tx_offload_result_on_off,
+ NULL,
+ }
+};
+
+/* Enable/Disable a per queue offloading */
+struct cmd_config_per_queue_tx_offload_result {
+ cmdline_fixed_string_t port;
+ portid_t port_id;
+ cmdline_fixed_string_t txq;
+ uint16_t queue_id;
+ cmdline_fixed_string_t tx_offload;
+ cmdline_fixed_string_t offload;
+ cmdline_fixed_string_t on_off;
+};
+
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_port =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ port, "port");
+cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_port_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ port_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txq =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ txq, "txq");
+cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_queue_id =
+ TOKEN_NUM_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ queue_id, UINT16);
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txoffload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ tx_offload, "tx_offload");
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_offload =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#"
+ "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#"
+ "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#"
+ "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#"
+ "mt_lockfree#multi_segs#mbuf_fast_free#security");
+cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_on_off =
+ TOKEN_STRING_INITIALIZER
+ (struct cmd_config_per_queue_tx_offload_result,
+ on_off, "on#off");
+
+static void
+cmd_config_per_queue_tx_offload_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_config_per_queue_tx_offload_result *res = parsed_result;
+ struct rte_eth_dev_info dev_info;
+ portid_t port_id = res->port_id;
+ uint16_t queue_id = res->queue_id;
+ struct rte_port *port = &ports[port_id];
+ uint64_t single_offload;
+
+ if (port->port_status != RTE_PORT_STOPPED) {
+ printf("Error: Can't config offload when Port %d "
+ "is not stopped\n", port_id);
+ return;
+ }
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (queue_id >= dev_info.nb_tx_queues) {
+ printf("Error: input queue_id should be 0 ... "
+ "%d\n", dev_info.nb_tx_queues - 1);
+ return;
+ }
+
+ single_offload = search_tx_offload(res->offload);
+ if (single_offload == 0) {
+ printf("Unknown offload name: %s\n", res->offload);
+ return;
+ }
+
+ if (!strcmp(res->on_off, "on"))
+ port->tx_conf[queue_id].offloads |= single_offload;
+ else
+ port->tx_conf[queue_id].offloads &= ~single_offload;
+
+ cmd_reconfig_device_queue(port_id, 1, 1);
+}
+
+cmdline_parse_inst_t cmd_config_per_queue_tx_offload = {
+ .f = cmd_config_per_queue_tx_offload_parsed,
+ .data = NULL,
+ .help_str = "port <port_id> txq <queue_id> tx_offload "
+ "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|"
+ "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|"
+ "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|"
+ "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|"
+ "mt_lockfree|multi_segs|mbuf_fast_free|security "
+ "on|off",
+ .tokens = {
+ (void *)&cmd_config_per_queue_tx_offload_result_port,
+ (void *)&cmd_config_per_queue_tx_offload_result_port_id,
+ (void *)&cmd_config_per_queue_tx_offload_result_txq,
+ (void *)&cmd_config_per_queue_tx_offload_result_queue_id,
+ (void *)&cmd_config_per_queue_tx_offload_result_txoffload,
+ (void *)&cmd_config_per_queue_tx_offload_result_offload,
+ (void *)&cmd_config_per_queue_tx_offload_result_on_off,
+ NULL,
+ }
+};
+
/* ******************************************************************************** */
/* list of instructions */
@@ -16130,12 +17702,16 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_operate_detach_port,
(cmdline_parse_inst_t *)&cmd_config_speed_all,
(cmdline_parse_inst_t *)&cmd_config_speed_specific,
+ (cmdline_parse_inst_t *)&cmd_config_loopback_all,
+ (cmdline_parse_inst_t *)&cmd_config_loopback_specific,
(cmdline_parse_inst_t *)&cmd_config_rx_tx,
(cmdline_parse_inst_t *)&cmd_config_mtu,
(cmdline_parse_inst_t *)&cmd_config_max_pkt_len,
(cmdline_parse_inst_t *)&cmd_config_rx_mode_flag,
(cmdline_parse_inst_t *)&cmd_config_rss,
+ (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size,
(cmdline_parse_inst_t *)&cmd_config_rxtx_queue,
+ (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue,
(cmdline_parse_inst_t *)&cmd_config_rss_reta,
(cmdline_parse_inst_t *)&cmd_showport_reta,
(cmdline_parse_inst_t *)&cmd_config_burst,
@@ -16234,6 +17810,10 @@ cmdline_parse_ctx_t main_ctx[] = {
#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
(cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default,
#endif
+ (cmdline_parse_inst_t *)&cmd_set_vxlan,
+ (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan,
+ (cmdline_parse_inst_t *)&cmd_set_nvgre,
+ (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan,
(cmdline_parse_inst_t *)&cmd_ddp_add,
(cmdline_parse_inst_t *)&cmd_ddp_del,
(cmdline_parse_inst_t *)&cmd_ddp_get_list,
@@ -16271,7 +17851,22 @@ cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node,
(cmdline_parse_inst_t *)&cmd_del_port_tm_node,
(cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent,
+ (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node,
+ (cmdline_parse_inst_t *)&cmd_resume_port_tm_node,
(cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit,
+ (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port,
+ (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa,
+ (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration,
+ (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload,
+ (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload,
+ (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa,
+ (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration,
+ (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload,
+ (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload,
+#ifdef RTE_LIBRTE_BPF
+ (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse,
+ (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse,
+#endif
NULL,
};
diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index a5cf84f7..f9260600 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -14,6 +14,7 @@
#include <sys/socket.h>
#include <rte_common.h>
+#include <rte_eth_ctrl.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <cmdline_parse.h>
@@ -68,6 +69,7 @@ enum index {
PRIORITY,
INGRESS,
EGRESS,
+ TRANSFER,
/* Validate/create pattern. */
PATTERN,
@@ -85,8 +87,12 @@ enum index {
ITEM_PF,
ITEM_VF,
ITEM_VF_ID,
- ITEM_PORT,
- ITEM_PORT_INDEX,
+ ITEM_PHY_PORT,
+ ITEM_PHY_PORT_INDEX,
+ ITEM_PORT_ID,
+ ITEM_PORT_ID_ID,
+ ITEM_MARK,
+ ITEM_MARK_ID,
ITEM_RAW,
ITEM_RAW_RELATIVE,
ITEM_RAW_SEARCH,
@@ -98,11 +104,11 @@ enum index {
ITEM_ETH_SRC,
ITEM_ETH_TYPE,
ITEM_VLAN,
- ITEM_VLAN_TPID,
ITEM_VLAN_TCI,
ITEM_VLAN_PCP,
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
+ ITEM_VLAN_INNER_TYPE,
ITEM_IPV4,
ITEM_IPV4_TOS,
ITEM_IPV4_TTL,
@@ -150,6 +156,28 @@ enum index {
ITEM_GENEVE,
ITEM_GENEVE_VNI,
ITEM_GENEVE_PROTO,
+ ITEM_VXLAN_GPE,
+ ITEM_VXLAN_GPE_VNI,
+ ITEM_ARP_ETH_IPV4,
+ ITEM_ARP_ETH_IPV4_SHA,
+ ITEM_ARP_ETH_IPV4_SPA,
+ ITEM_ARP_ETH_IPV4_THA,
+ ITEM_ARP_ETH_IPV4_TPA,
+ ITEM_IPV6_EXT,
+ ITEM_IPV6_EXT_NEXT_HDR,
+ ITEM_ICMP6,
+ ITEM_ICMP6_TYPE,
+ ITEM_ICMP6_CODE,
+ ITEM_ICMP6_ND_NS,
+ ITEM_ICMP6_ND_NS_TARGET_ADDR,
+ ITEM_ICMP6_ND_NA,
+ ITEM_ICMP6_ND_NA_TARGET_ADDR,
+ ITEM_ICMP6_ND_OPT,
+ ITEM_ICMP6_ND_OPT_TYPE,
+ ITEM_ICMP6_ND_OPT_SLA_ETH,
+ ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
+ ITEM_ICMP6_ND_OPT_TLA_ETH,
+ ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
/* Validate/create actions. */
ACTIONS,
@@ -157,6 +185,8 @@ enum index {
ACTION_END,
ACTION_VOID,
ACTION_PASSTHRU,
+ ACTION_JUMP,
+ ACTION_JUMP_GROUP,
ACTION_MARK,
ACTION_MARK_ID,
ACTION_FLAG,
@@ -164,33 +194,106 @@ enum index {
ACTION_QUEUE_INDEX,
ACTION_DROP,
ACTION_COUNT,
- ACTION_DUP,
- ACTION_DUP_INDEX,
+ ACTION_COUNT_SHARED,
+ ACTION_COUNT_ID,
ACTION_RSS,
+ ACTION_RSS_FUNC,
+ ACTION_RSS_LEVEL,
+ ACTION_RSS_FUNC_DEFAULT,
+ ACTION_RSS_FUNC_TOEPLITZ,
+ ACTION_RSS_FUNC_SIMPLE_XOR,
+ ACTION_RSS_TYPES,
+ ACTION_RSS_TYPE,
+ ACTION_RSS_KEY,
+ ACTION_RSS_KEY_LEN,
ACTION_RSS_QUEUES,
ACTION_RSS_QUEUE,
ACTION_PF,
ACTION_VF,
ACTION_VF_ORIGINAL,
ACTION_VF_ID,
+ ACTION_PHY_PORT,
+ ACTION_PHY_PORT_ORIGINAL,
+ ACTION_PHY_PORT_INDEX,
+ ACTION_PORT_ID,
+ ACTION_PORT_ID_ORIGINAL,
+ ACTION_PORT_ID_ID,
ACTION_METER,
ACTION_METER_ID,
+ ACTION_OF_SET_MPLS_TTL,
+ ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
+ ACTION_OF_DEC_MPLS_TTL,
+ ACTION_OF_SET_NW_TTL,
+ ACTION_OF_SET_NW_TTL_NW_TTL,
+ ACTION_OF_DEC_NW_TTL,
+ ACTION_OF_COPY_TTL_OUT,
+ ACTION_OF_COPY_TTL_IN,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_PUSH_VLAN_ETHERTYPE,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_VID_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
+ ACTION_OF_POP_MPLS,
+ ACTION_OF_POP_MPLS_ETHERTYPE,
+ ACTION_OF_PUSH_MPLS,
+ ACTION_OF_PUSH_MPLS_ETHERTYPE,
+ ACTION_VXLAN_ENCAP,
+ ACTION_VXLAN_DECAP,
+ ACTION_NVGRE_ENCAP,
+ ACTION_NVGRE_DECAP,
};
-/** Size of pattern[] field in struct rte_flow_item_raw. */
-#define ITEM_RAW_PATTERN_SIZE 36
+/** Maximum size for pattern in struct rte_flow_item_raw. */
+#define ITEM_RAW_PATTERN_SIZE 40
/** Storage size for struct rte_flow_item_raw including pattern. */
#define ITEM_RAW_SIZE \
- (offsetof(struct rte_flow_item_raw, pattern) + ITEM_RAW_PATTERN_SIZE)
+ (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
-/** Number of queue[] entries in struct rte_flow_action_rss. */
-#define ACTION_RSS_NUM 32
+/** Maximum number of queue indices in struct rte_flow_action_rss. */
+#define ACTION_RSS_QUEUE_NUM 32
-/** Storage size for struct rte_flow_action_rss including queues. */
-#define ACTION_RSS_SIZE \
- (offsetof(struct rte_flow_action_rss, queue) + \
- sizeof(*((struct rte_flow_action_rss *)0)->queue) * ACTION_RSS_NUM)
+/** Storage for struct rte_flow_action_rss including external data. */
+struct action_rss_data {
+ struct rte_flow_action_rss conf;
+ uint8_t key[RSS_HASH_KEY_LENGTH];
+ uint16_t queue[ACTION_RSS_QUEUE_NUM];
+};
+
+/** Maximum number of items in struct rte_flow_action_vxlan_encap. */
+#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
+
+/** Storage for struct rte_flow_action_vxlan_encap including external data. */
+struct action_vxlan_encap_data {
+ struct rte_flow_action_vxlan_encap conf;
+ struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
+ struct rte_flow_item_eth item_eth;
+ struct rte_flow_item_vlan item_vlan;
+ union {
+ struct rte_flow_item_ipv4 item_ipv4;
+ struct rte_flow_item_ipv6 item_ipv6;
+ };
+ struct rte_flow_item_udp item_udp;
+ struct rte_flow_item_vxlan item_vxlan;
+};
+
+/** Maximum number of items in struct rte_flow_action_nvgre_encap. */
+#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
+
+/** Storage for struct rte_flow_action_nvgre_encap including external data. */
+struct action_nvgre_encap_data {
+ struct rte_flow_action_nvgre_encap conf;
+ struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
+ struct rte_flow_item_eth item_eth;
+ struct rte_flow_item_vlan item_vlan;
+ union {
+ struct rte_flow_item_ipv4 item_ipv4;
+ struct rte_flow_item_ipv6 item_ipv6;
+ };
+ struct rte_flow_item_nvgre item_nvgre;
+};
/** Maximum number of subsequent tokens and arguments on the stack. */
#define CTX_STACK_SIZE 16
@@ -217,6 +320,9 @@ struct context {
struct arg {
uint32_t hton:1; /**< Use network byte ordering. */
uint32_t sign:1; /**< Value is signed. */
+ uint32_t bounded:1; /**< Value is bounded. */
+ uintmax_t min; /**< Minimum value if bounded. */
+ uintmax_t max; /**< Maximum value if bounded. */
uint32_t offset; /**< Relative offset from ctx->object. */
uint32_t size; /**< Field size. */
const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
@@ -309,11 +415,21 @@ struct token {
.size = sizeof(*((s *)0)->f), \
})
-/** Static initializer for ARGS() with arbitrary size. */
-#define ARGS_ENTRY_USZ(s, f, sz) \
+/** Static initializer for ARGS() with arbitrary offset and size. */
+#define ARGS_ENTRY_ARB(o, s) \
(&(const struct arg){ \
- .offset = offsetof(s, f), \
- .size = (sz), \
+ .offset = (o), \
+ .size = (s), \
+ })
+
+/** Same as ARGS_ENTRY_ARB() with bounded values. */
+#define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
+ (&(const struct arg){ \
+ .bounded = 1, \
+ .min = (i), \
+ .max = (a), \
+ .offset = (o), \
+ .size = (s), \
})
/** Same as ARGS_ENTRY() using network byte ordering. */
@@ -343,7 +459,7 @@ struct buffer {
} destroy; /**< Destroy arguments. */
struct {
uint32_t rule;
- enum rte_flow_action_type action;
+ struct rte_flow_action action;
} query; /**< Query arguments. */
struct {
uint32_t *group;
@@ -384,6 +500,7 @@ static const enum index next_vc_attr[] = {
PRIORITY,
INGRESS,
EGRESS,
+ TRANSFER,
PATTERN,
ZERO,
};
@@ -416,7 +533,9 @@ static const enum index next_item[] = {
ITEM_ANY,
ITEM_PF,
ITEM_VF,
- ITEM_PORT,
+ ITEM_PHY_PORT,
+ ITEM_PORT_ID,
+ ITEM_MARK,
ITEM_RAW,
ITEM_ETH,
ITEM_VLAN,
@@ -436,6 +555,15 @@ static const enum index next_item[] = {
ITEM_GTPC,
ITEM_GTPU,
ITEM_GENEVE,
+ ITEM_VXLAN_GPE,
+ ITEM_ARP_ETH_IPV4,
+ ITEM_IPV6_EXT,
+ ITEM_ICMP6,
+ ITEM_ICMP6_ND_NS,
+ ITEM_ICMP6_ND_NA,
+ ITEM_ICMP6_ND_OPT,
+ ITEM_ICMP6_ND_OPT_SLA_ETH,
+ ITEM_ICMP6_ND_OPT_TLA_ETH,
ZERO,
};
@@ -457,8 +585,20 @@ static const enum index item_vf[] = {
ZERO,
};
-static const enum index item_port[] = {
- ITEM_PORT_INDEX,
+static const enum index item_phy_port[] = {
+ ITEM_PHY_PORT_INDEX,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_port_id[] = {
+ ITEM_PORT_ID_ID,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_mark[] = {
+ ITEM_MARK_ID,
ITEM_NEXT,
ZERO,
};
@@ -482,11 +622,11 @@ static const enum index item_eth[] = {
};
static const enum index item_vlan[] = {
- ITEM_VLAN_TPID,
ITEM_VLAN_TCI,
ITEM_VLAN_PCP,
ITEM_VLAN_DEI,
ITEM_VLAN_VID,
+ ITEM_VLAN_INNER_TYPE,
ITEM_NEXT,
ZERO,
};
@@ -586,20 +726,96 @@ static const enum index item_geneve[] = {
ZERO,
};
+static const enum index item_vxlan_gpe[] = {
+ ITEM_VXLAN_GPE_VNI,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_arp_eth_ipv4[] = {
+ ITEM_ARP_ETH_IPV4_SHA,
+ ITEM_ARP_ETH_IPV4_SPA,
+ ITEM_ARP_ETH_IPV4_THA,
+ ITEM_ARP_ETH_IPV4_TPA,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_ipv6_ext[] = {
+ ITEM_IPV6_EXT_NEXT_HDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6[] = {
+ ITEM_ICMP6_TYPE,
+ ITEM_ICMP6_CODE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_ns[] = {
+ ITEM_ICMP6_ND_NS_TARGET_ADDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_na[] = {
+ ITEM_ICMP6_ND_NA_TARGET_ADDR,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt[] = {
+ ITEM_ICMP6_ND_OPT_TYPE,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt_sla_eth[] = {
+ ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
+ ITEM_NEXT,
+ ZERO,
+};
+
+static const enum index item_icmp6_nd_opt_tla_eth[] = {
+ ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
+ ITEM_NEXT,
+ ZERO,
+};
+
static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
ACTION_PASSTHRU,
+ ACTION_JUMP,
ACTION_MARK,
ACTION_FLAG,
ACTION_QUEUE,
ACTION_DROP,
ACTION_COUNT,
- ACTION_DUP,
ACTION_RSS,
ACTION_PF,
ACTION_VF,
+ ACTION_PHY_PORT,
+ ACTION_PORT_ID,
ACTION_METER,
+ ACTION_OF_SET_MPLS_TTL,
+ ACTION_OF_DEC_MPLS_TTL,
+ ACTION_OF_SET_NW_TTL,
+ ACTION_OF_DEC_NW_TTL,
+ ACTION_OF_COPY_TTL_OUT,
+ ACTION_OF_COPY_TTL_IN,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ ACTION_OF_POP_MPLS,
+ ACTION_OF_PUSH_MPLS,
+ ACTION_VXLAN_ENCAP,
+ ACTION_VXLAN_DECAP,
+ ACTION_NVGRE_ENCAP,
+ ACTION_NVGRE_DECAP,
ZERO,
};
@@ -615,13 +831,19 @@ static const enum index action_queue[] = {
ZERO,
};
-static const enum index action_dup[] = {
- ACTION_DUP_INDEX,
+static const enum index action_count[] = {
+ ACTION_COUNT_ID,
+ ACTION_COUNT_SHARED,
ACTION_NEXT,
ZERO,
};
static const enum index action_rss[] = {
+ ACTION_RSS_FUNC,
+ ACTION_RSS_LEVEL,
+ ACTION_RSS_TYPES,
+ ACTION_RSS_KEY,
+ ACTION_RSS_KEY_LEN,
ACTION_RSS_QUEUES,
ACTION_NEXT,
ZERO,
@@ -634,12 +856,74 @@ static const enum index action_vf[] = {
ZERO,
};
+static const enum index action_phy_port[] = {
+ ACTION_PHY_PORT_ORIGINAL,
+ ACTION_PHY_PORT_INDEX,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_port_id[] = {
+ ACTION_PORT_ID_ORIGINAL,
+ ACTION_PORT_ID_ID,
+ ACTION_NEXT,
+ ZERO,
+};
+
static const enum index action_meter[] = {
ACTION_METER_ID,
ACTION_NEXT,
ZERO,
};
+static const enum index action_of_set_mpls_ttl[] = {
+ ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_nw_ttl[] = {
+ ACTION_OF_SET_NW_TTL_NW_TTL,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_push_vlan[] = {
+ ACTION_OF_PUSH_VLAN_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_vlan_vid[] = {
+ ACTION_OF_SET_VLAN_VID_VLAN_VID,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_set_vlan_pcp[] = {
+ ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_pop_mpls[] = {
+ ACTION_OF_POP_MPLS_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_of_push_mpls[] = {
+ ACTION_OF_PUSH_MPLS_ETHERTYPE,
+ ACTION_NEXT,
+ ZERO,
+};
+
+static const enum index action_jump[] = {
+ ACTION_JUMP_GROUP,
+ ACTION_NEXT,
+ ZERO,
+};
+
static int parse_init(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -650,9 +934,24 @@ static int parse_vc_spec(struct context *, const struct token *,
const char *, unsigned int, void *, unsigned int);
static int parse_vc_conf(struct context *, const struct token *,
const char *, unsigned int, void *, unsigned int);
+static int parse_vc_action_rss(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_rss_func(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_rss_type(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
static int parse_vc_action_rss_queue(struct context *, const struct token *,
const char *, unsigned int, void *,
unsigned int);
+static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
+static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
+ const char *, unsigned int, void *,
+ unsigned int);
static int parse_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
@@ -705,6 +1004,8 @@ static int comp_port(struct context *, const struct token *,
unsigned int, char *, unsigned int);
static int comp_rule_id(struct context *, const struct token *,
unsigned int, char *, unsigned int);
+static int comp_vc_action_rss_type(struct context *, const struct token *,
+ unsigned int, char *, unsigned int);
static int comp_vc_action_rss_queue(struct context *, const struct token *,
unsigned int, char *, unsigned int);
@@ -856,7 +1157,7 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(QUERY_ACTION),
NEXT_ENTRY(RULE_ID),
NEXT_ENTRY(PORT_ID)),
- .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action),
+ .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
ARGS_ENTRY(struct buffer, args.query.rule),
ARGS_ENTRY(struct buffer, port)),
.call = parse_query,
@@ -928,6 +1229,12 @@ static const struct token token_list[] = {
.next = NEXT(next_vc_attr),
.call = parse_vc,
},
+ [TRANSFER] = {
+ .name = "transfer",
+ .help = "apply rule directly to endpoints found in pattern",
+ .next = NEXT(next_vc_attr),
+ .call = parse_vc,
+ },
/* Validate/create pattern. */
[PATTERN] = {
.name = "pattern",
@@ -1001,36 +1308,64 @@ static const struct token token_list[] = {
},
[ITEM_PF] = {
.name = "pf",
- .help = "match packets addressed to the physical function",
+ .help = "match traffic from/to the physical function",
.priv = PRIV_ITEM(PF, 0),
.next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
.call = parse_vc,
},
[ITEM_VF] = {
.name = "vf",
- .help = "match packets addressed to a virtual function ID",
+ .help = "match traffic from/to a virtual function ID",
.priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
.next = NEXT(item_vf),
.call = parse_vc,
},
[ITEM_VF_ID] = {
.name = "id",
- .help = "destination VF ID",
+ .help = "VF ID",
.next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
.args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
},
- [ITEM_PORT] = {
- .name = "port",
- .help = "device-specific physical port index to use",
- .priv = PRIV_ITEM(PORT, sizeof(struct rte_flow_item_port)),
- .next = NEXT(item_port),
+ [ITEM_PHY_PORT] = {
+ .name = "phy_port",
+ .help = "match traffic from/to a specific physical port",
+ .priv = PRIV_ITEM(PHY_PORT,
+ sizeof(struct rte_flow_item_phy_port)),
+ .next = NEXT(item_phy_port),
.call = parse_vc,
},
- [ITEM_PORT_INDEX] = {
+ [ITEM_PHY_PORT_INDEX] = {
.name = "index",
.help = "physical port index",
- .next = NEXT(item_port, NEXT_ENTRY(UNSIGNED), item_param),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port, index)),
+ .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
+ },
+ [ITEM_PORT_ID] = {
+ .name = "port_id",
+ .help = "match traffic from/to a given DPDK port ID",
+ .priv = PRIV_ITEM(PORT_ID,
+ sizeof(struct rte_flow_item_port_id)),
+ .next = NEXT(item_port_id),
+ .call = parse_vc,
+ },
+ [ITEM_PORT_ID_ID] = {
+ .name = "id",
+ .help = "DPDK port ID",
+ .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
+ },
+ [ITEM_MARK] = {
+ .name = "mark",
+ .help = "match traffic against value set in previously matched rule",
+ .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
+ .next = NEXT(item_mark),
+ .call = parse_vc,
+ },
+ [ITEM_MARK_ID] = {
+ .name = "id",
+ .help = "Integer value to match against",
+ .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
},
[ITEM_RAW] = {
.name = "raw",
@@ -1073,9 +1408,9 @@ static const struct token token_list[] = {
NEXT_ENTRY(ITEM_PARAM_IS,
ITEM_PARAM_SPEC,
ITEM_PARAM_MASK)),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, length),
- ARGS_ENTRY_USZ(struct rte_flow_item_raw,
- pattern,
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
+ ARGS_ENTRY(struct rte_flow_item_raw, length),
+ ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
ITEM_RAW_PATTERN_SIZE)),
},
[ITEM_ETH] = {
@@ -1110,12 +1445,6 @@ static const struct token token_list[] = {
.next = NEXT(item_vlan),
.call = parse_vc,
},
- [ITEM_VLAN_TPID] = {
- .name = "tpid",
- .help = "tag protocol identifier",
- .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
- .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tpid)),
- },
[ITEM_VLAN_TCI] = {
.name = "tci",
.help = "tag control information",
@@ -1143,6 +1472,13 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
tci, "\x0f\xff")),
},
+ [ITEM_VLAN_INNER_TYPE] = {
+ .name = "inner_type",
+ .help = "inner EtherType",
+ .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
+ inner_type)),
+ },
[ITEM_IPV4] = {
.name = "ipv4",
.help = "match IPv4 header",
@@ -1473,6 +1809,182 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
protocol)),
},
+ [ITEM_VXLAN_GPE] = {
+ .name = "vxlan-gpe",
+ .help = "match VXLAN-GPE header",
+ .priv = PRIV_ITEM(VXLAN_GPE,
+ sizeof(struct rte_flow_item_vxlan_gpe)),
+ .next = NEXT(item_vxlan_gpe),
+ .call = parse_vc,
+ },
+ [ITEM_VXLAN_GPE_VNI] = {
+ .name = "vni",
+ .help = "VXLAN-GPE identifier",
+ .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
+ vni)),
+ },
+ [ITEM_ARP_ETH_IPV4] = {
+ .name = "arp_eth_ipv4",
+ .help = "match ARP header for Ethernet/IPv4",
+ .priv = PRIV_ITEM(ARP_ETH_IPV4,
+ sizeof(struct rte_flow_item_arp_eth_ipv4)),
+ .next = NEXT(item_arp_eth_ipv4),
+ .call = parse_vc,
+ },
+ [ITEM_ARP_ETH_IPV4_SHA] = {
+ .name = "sha",
+ .help = "sender hardware address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ sha)),
+ },
+ [ITEM_ARP_ETH_IPV4_SPA] = {
+ .name = "spa",
+ .help = "sender IPv4 address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ spa)),
+ },
+ [ITEM_ARP_ETH_IPV4_THA] = {
+ .name = "tha",
+ .help = "target hardware address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ tha)),
+ },
+ [ITEM_ARP_ETH_IPV4_TPA] = {
+ .name = "tpa",
+ .help = "target IPv4 address",
+ .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
+ tpa)),
+ },
+ [ITEM_IPV6_EXT] = {
+ .name = "ipv6_ext",
+ .help = "match presence of any IPv6 extension header",
+ .priv = PRIV_ITEM(IPV6_EXT,
+ sizeof(struct rte_flow_item_ipv6_ext)),
+ .next = NEXT(item_ipv6_ext),
+ .call = parse_vc,
+ },
+ [ITEM_IPV6_EXT_NEXT_HDR] = {
+ .name = "next_hdr",
+ .help = "next header",
+ .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
+ next_hdr)),
+ },
+ [ITEM_ICMP6] = {
+ .name = "icmp6",
+ .help = "match any ICMPv6 header",
+ .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
+ .next = NEXT(item_icmp6),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_TYPE] = {
+ .name = "type",
+ .help = "ICMPv6 type",
+ .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
+ type)),
+ },
+ [ITEM_ICMP6_CODE] = {
+ .name = "code",
+ .help = "ICMPv6 code",
+ .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
+ code)),
+ },
+ [ITEM_ICMP6_ND_NS] = {
+ .name = "icmp6_nd_ns",
+ .help = "match ICMPv6 neighbor discovery solicitation",
+ .priv = PRIV_ITEM(ICMP6_ND_NS,
+ sizeof(struct rte_flow_item_icmp6_nd_ns)),
+ .next = NEXT(item_icmp6_nd_ns),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
+ .name = "target_addr",
+ .help = "target address",
+ .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
+ target_addr)),
+ },
+ [ITEM_ICMP6_ND_NA] = {
+ .name = "icmp6_nd_na",
+ .help = "match ICMPv6 neighbor discovery advertisement",
+ .priv = PRIV_ITEM(ICMP6_ND_NA,
+ sizeof(struct rte_flow_item_icmp6_nd_na)),
+ .next = NEXT(item_icmp6_nd_na),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
+ .name = "target_addr",
+ .help = "target address",
+ .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
+ target_addr)),
+ },
+ [ITEM_ICMP6_ND_OPT] = {
+ .name = "icmp6_nd_opt",
+ .help = "match presence of any ICMPv6 neighbor discovery"
+ " option",
+ .priv = PRIV_ITEM(ICMP6_ND_OPT,
+ sizeof(struct rte_flow_item_icmp6_nd_opt)),
+ .next = NEXT(item_icmp6_nd_opt),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_TYPE] = {
+ .name = "type",
+ .help = "ND option type",
+ .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
+ type)),
+ },
+ [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
+ .name = "icmp6_nd_opt_sla_eth",
+ .help = "match ICMPv6 neighbor discovery source Ethernet"
+ " link-layer address option",
+ .priv = PRIV_ITEM
+ (ICMP6_ND_OPT_SLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
+ .next = NEXT(item_icmp6_nd_opt_sla_eth),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
+ .name = "sla",
+ .help = "source Ethernet LLA",
+ .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
+ },
+ [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
+ .name = "icmp6_nd_opt_tla_eth",
+ .help = "match ICMPv6 neighbor discovery target Ethernet"
+ " link-layer address option",
+ .priv = PRIV_ITEM
+ (ICMP6_ND_OPT_TLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
+ .next = NEXT(item_icmp6_nd_opt_tla_eth),
+ .call = parse_vc,
+ },
+ [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
+ .name = "tla",
+ .help = "target Ethernet LLA",
+ .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
+ item_param),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
+ },
/* Validate/create actions. */
[ACTIONS] = {
@@ -1506,6 +2018,20 @@ static const struct token token_list[] = {
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
+ [ACTION_JUMP] = {
+ .name = "jump",
+ .help = "redirect traffic to a given group",
+ .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
+ .next = NEXT(action_jump),
+ .call = parse_vc,
+ },
+ [ACTION_JUMP_GROUP] = {
+ .name = "group",
+ .help = "group to redirect traffic to",
+ .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
+ .call = parse_vc_conf,
+ },
[ACTION_MARK] = {
.name = "mark",
.help = "attach 32 bit value to packets",
@@ -1552,30 +2078,100 @@ static const struct token token_list[] = {
[ACTION_COUNT] = {
.name = "count",
.help = "enable counters for this rule",
- .priv = PRIV_ACTION(COUNT, 0),
- .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .priv = PRIV_ACTION(COUNT,
+ sizeof(struct rte_flow_action_count)),
+ .next = NEXT(action_count),
.call = parse_vc,
},
- [ACTION_DUP] = {
- .name = "dup",
- .help = "duplicate packets to a given queue index",
- .priv = PRIV_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
- .next = NEXT(action_dup),
- .call = parse_vc,
+ [ACTION_COUNT_ID] = {
+ .name = "identifier",
+ .help = "counter identifier to use",
+ .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
+ .call = parse_vc_conf,
},
- [ACTION_DUP_INDEX] = {
- .name = "index",
- .help = "queue index to duplicate packets to",
- .next = NEXT(action_dup, NEXT_ENTRY(UNSIGNED)),
- .args = ARGS(ARGS_ENTRY(struct rte_flow_action_dup, index)),
+ [ACTION_COUNT_SHARED] = {
+ .name = "shared",
+ .help = "shared counter",
+ .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
+ shared, 1)),
.call = parse_vc_conf,
},
[ACTION_RSS] = {
.name = "rss",
.help = "spread packets among several queues",
- .priv = PRIV_ACTION(RSS, ACTION_RSS_SIZE),
+ .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
.next = NEXT(action_rss),
- .call = parse_vc,
+ .call = parse_vc_action_rss,
+ },
+ [ACTION_RSS_FUNC] = {
+ .name = "func",
+ .help = "RSS hash function to apply",
+ .next = NEXT(action_rss,
+ NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
+ ACTION_RSS_FUNC_TOEPLITZ,
+ ACTION_RSS_FUNC_SIMPLE_XOR)),
+ },
+ [ACTION_RSS_FUNC_DEFAULT] = {
+ .name = "default",
+ .help = "default hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_FUNC_TOEPLITZ] = {
+ .name = "toeplitz",
+ .help = "Toeplitz hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_FUNC_SIMPLE_XOR] = {
+ .name = "simple_xor",
+ .help = "simple XOR hash function",
+ .call = parse_vc_action_rss_func,
+ },
+ [ACTION_RSS_LEVEL] = {
+ .name = "level",
+ .help = "encapsulation level for \"types\"",
+ .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_ARB
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, level),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ level))),
+ },
+ [ACTION_RSS_TYPES] = {
+ .name = "types",
+ .help = "specific RSS hash types",
+ .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
+ },
+ [ACTION_RSS_TYPE] = {
+ .name = "{type}",
+ .help = "RSS hash type",
+ .call = parse_vc_action_rss_type,
+ .comp = comp_vc_action_rss_type,
+ },
+ [ACTION_RSS_KEY] = {
+ .name = "key",
+ .help = "RSS hash key",
+ .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
+ .args = ARGS(ARGS_ENTRY_ARB(0, 0),
+ ARGS_ENTRY_ARB
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len)),
+ ARGS_ENTRY(struct action_rss_data, key)),
+ },
+ [ACTION_RSS_KEY_LEN] = {
+ .name = "key_len",
+ .help = "RSS hash key length in bytes",
+ .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
+ (offsetof(struct action_rss_data, conf) +
+ offsetof(struct rte_flow_action_rss, key_len),
+ sizeof(((struct rte_flow_action_rss *)0)->
+ key_len),
+ 0,
+ RSS_HASH_KEY_LENGTH)),
},
[ACTION_RSS_QUEUES] = {
.name = "queues",
@@ -1591,14 +2187,14 @@ static const struct token token_list[] = {
},
[ACTION_PF] = {
.name = "pf",
- .help = "redirect packets to physical device function",
+ .help = "direct traffic to physical function",
.priv = PRIV_ACTION(PF, 0),
.next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
.call = parse_vc,
},
[ACTION_VF] = {
.name = "vf",
- .help = "redirect packets to virtual device function",
+ .help = "direct traffic to a virtual function ID",
.priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
.next = NEXT(action_vf),
.call = parse_vc,
@@ -1613,11 +2209,58 @@ static const struct token token_list[] = {
},
[ACTION_VF_ID] = {
.name = "id",
- .help = "VF ID to redirect packets to",
+ .help = "VF ID",
.next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
.args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
.call = parse_vc_conf,
},
+ [ACTION_PHY_PORT] = {
+ .name = "phy_port",
+ .help = "direct packets to physical port index",
+ .priv = PRIV_ACTION(PHY_PORT,
+ sizeof(struct rte_flow_action_phy_port)),
+ .next = NEXT(action_phy_port),
+ .call = parse_vc,
+ },
+ [ACTION_PHY_PORT_ORIGINAL] = {
+ .name = "original",
+ .help = "use original port index if possible",
+ .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
+ original, 1)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PHY_PORT_INDEX] = {
+ .name = "index",
+ .help = "physical port index",
+ .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
+ index)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PORT_ID] = {
+ .name = "port_id",
+ .help = "direct matching traffic to a given DPDK port ID",
+ .priv = PRIV_ACTION(PORT_ID,
+ sizeof(struct rte_flow_action_port_id)),
+ .next = NEXT(action_port_id),
+ .call = parse_vc,
+ },
+ [ACTION_PORT_ID_ORIGINAL] = {
+ .name = "original",
+ .help = "use original DPDK port ID if possible",
+ .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
+ .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
+ original, 1)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_PORT_ID_ID] = {
+ .name = "id",
+ .help = "DPDK port ID",
+ .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
+ .call = parse_vc_conf,
+ },
[ACTION_METER] = {
.name = "meter",
.help = "meter the directed packets at given id",
@@ -1633,6 +2276,200 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
.call = parse_vc_conf,
},
+ [ACTION_OF_SET_MPLS_TTL] = {
+ .name = "of_set_mpls_ttl",
+ .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
+ .priv = PRIV_ACTION
+ (OF_SET_MPLS_TTL,
+ sizeof(struct rte_flow_action_of_set_mpls_ttl)),
+ .next = NEXT(action_of_set_mpls_ttl),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
+ .name = "mpls_ttl",
+ .help = "MPLS TTL",
+ .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
+ mpls_ttl)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_DEC_MPLS_TTL] = {
+ .name = "of_dec_mpls_ttl",
+ .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
+ .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_NW_TTL] = {
+ .name = "of_set_nw_ttl",
+ .help = "OpenFlow's OFPAT_SET_NW_TTL",
+ .priv = PRIV_ACTION
+ (OF_SET_NW_TTL,
+ sizeof(struct rte_flow_action_of_set_nw_ttl)),
+ .next = NEXT(action_of_set_nw_ttl),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_NW_TTL_NW_TTL] = {
+ .name = "nw_ttl",
+ .help = "IP TTL",
+ .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
+ nw_ttl)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_DEC_NW_TTL] = {
+ .name = "of_dec_nw_ttl",
+ .help = "OpenFlow's OFPAT_DEC_NW_TTL",
+ .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_COPY_TTL_OUT] = {
+ .name = "of_copy_ttl_out",
+ .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
+ .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_COPY_TTL_IN] = {
+ .name = "of_copy_ttl_in",
+ .help = "OpenFlow's OFPAT_COPY_TTL_IN",
+ .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_POP_VLAN] = {
+ .name = "of_pop_vlan",
+ .help = "OpenFlow's OFPAT_POP_VLAN",
+ .priv = PRIV_ACTION(OF_POP_VLAN, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_VLAN] = {
+ .name = "of_push_vlan",
+ .help = "OpenFlow's OFPAT_PUSH_VLAN",
+ .priv = PRIV_ACTION
+ (OF_PUSH_VLAN,
+ sizeof(struct rte_flow_action_of_push_vlan)),
+ .next = NEXT(action_of_push_vlan),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_push_vlan,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_SET_VLAN_VID] = {
+ .name = "of_set_vlan_vid",
+ .help = "OpenFlow's OFPAT_SET_VLAN_VID",
+ .priv = PRIV_ACTION
+ (OF_SET_VLAN_VID,
+ sizeof(struct rte_flow_action_of_set_vlan_vid)),
+ .next = NEXT(action_of_set_vlan_vid),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
+ .name = "vlan_vid",
+ .help = "VLAN id",
+ .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_set_vlan_vid,
+ vlan_vid)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_SET_VLAN_PCP] = {
+ .name = "of_set_vlan_pcp",
+ .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
+ .priv = PRIV_ACTION
+ (OF_SET_VLAN_PCP,
+ sizeof(struct rte_flow_action_of_set_vlan_pcp)),
+ .next = NEXT(action_of_set_vlan_pcp),
+ .call = parse_vc,
+ },
+ [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
+ .name = "vlan_pcp",
+ .help = "VLAN priority",
+ .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_set_vlan_pcp,
+ vlan_pcp)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_POP_MPLS] = {
+ .name = "of_pop_mpls",
+ .help = "OpenFlow's OFPAT_POP_MPLS",
+ .priv = PRIV_ACTION(OF_POP_MPLS,
+ sizeof(struct rte_flow_action_of_pop_mpls)),
+ .next = NEXT(action_of_pop_mpls),
+ .call = parse_vc,
+ },
+ [ACTION_OF_POP_MPLS_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_pop_mpls,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_OF_PUSH_MPLS] = {
+ .name = "of_push_mpls",
+ .help = "OpenFlow's OFPAT_PUSH_MPLS",
+ .priv = PRIV_ACTION
+ (OF_PUSH_MPLS,
+ sizeof(struct rte_flow_action_of_push_mpls)),
+ .next = NEXT(action_of_push_mpls),
+ .call = parse_vc,
+ },
+ [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
+ .name = "ethertype",
+ .help = "EtherType",
+ .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
+ .args = ARGS(ARGS_ENTRY_HTON
+ (struct rte_flow_action_of_push_mpls,
+ ethertype)),
+ .call = parse_vc_conf,
+ },
+ [ACTION_VXLAN_ENCAP] = {
+ .name = "vxlan_encap",
+ .help = "VXLAN encapsulation, uses configuration set by \"set"
+ " vxlan\"",
+ .priv = PRIV_ACTION(VXLAN_ENCAP,
+ sizeof(struct action_vxlan_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_vxlan_encap,
+ },
+ [ACTION_VXLAN_DECAP] = {
+ .name = "vxlan_decap",
+ .help = "Performs a decapsulation action by stripping all"
+ " headers of the VXLAN tunnel network overlay from the"
+ " matched flow.",
+ .priv = PRIV_ACTION(VXLAN_DECAP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
+ [ACTION_NVGRE_ENCAP] = {
+ .name = "nvgre_encap",
+ .help = "NVGRE encapsulation, uses configuration set by \"set"
+ " nvgre\"",
+ .priv = PRIV_ACTION(NVGRE_ENCAP,
+ sizeof(struct action_nvgre_encap_data)),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc_action_nvgre_encap,
+ },
+ [ACTION_NVGRE_DECAP] = {
+ .name = "nvgre_decap",
+ .help = "Performs a decapsulation action by stripping all"
+ " headers of the NVGRE tunnel network overlay from the"
+ " matched flow.",
+ .priv = PRIV_ACTION(NVGRE_DECAP, 0),
+ .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
+ .call = parse_vc,
+ },
};
/** Remove and return last entry from argument stack. */
@@ -1858,6 +2695,9 @@ parse_vc(struct context *ctx, const struct token *token,
case EGRESS:
out->args.vc.attr.egress = 1;
return len;
+ case TRANSFER:
+ out->args.vc.attr.transfer = 1;
+ return len;
case PATTERN:
out->args.vc.pattern =
(void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
@@ -1909,6 +2749,7 @@ parse_vc(struct context *ctx, const struct token *token,
return -1;
*action = (struct rte_flow_action){
.type = priv->type,
+ .conf = data_size ? data : NULL,
};
++out->args.vc.actions_n;
ctx->object = action;
@@ -1989,7 +2830,6 @@ parse_vc_conf(struct context *ctx, const struct token *token,
void *buf, unsigned int size)
{
struct buffer *out = buf;
- struct rte_flow_action *action;
(void)size;
/* Token name must match. */
@@ -1998,14 +2838,147 @@ parse_vc_conf(struct context *ctx, const struct token *token,
/* Nothing else to do if there is no buffer. */
if (!out)
return len;
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ return len;
+}
+
+/** Parse RSS action. */
+static int
+parse_vc_action_rss(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_rss_data *action_rss_data;
+ unsigned int i;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
if (!out->args.vc.actions_n)
return -1;
action = &out->args.vc.actions[out->args.vc.actions_n - 1];
/* Point to selected object. */
ctx->object = out->args.vc.data;
ctx->objmask = NULL;
- /* Update configuration pointer. */
- action->conf = ctx->object;
+ /* Set up default configuration. */
+ action_rss_data = ctx->object;
+ *action_rss_data = (struct action_rss_data){
+ .conf = (struct rte_flow_action_rss){
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = rss_hf,
+ .key_len = sizeof(action_rss_data->key),
+ .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
+ .key = action_rss_data->key,
+ .queue = action_rss_data->queue,
+ },
+ .key = "testpmd's default RSS hash key, "
+ "override it for better balancing",
+ .queue = { 0 },
+ };
+ for (i = 0; i < action_rss_data->conf.queue_num; ++i)
+ action_rss_data->queue[i] = i;
+ if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
+ ctx->port != (portid_t)RTE_PORT_ALL) {
+ struct rte_eth_dev_info info;
+
+ rte_eth_dev_info_get(ctx->port, &info);
+ action_rss_data->conf.key_len =
+ RTE_MIN(sizeof(action_rss_data->key),
+ info.hash_key_size);
+ }
+ action->conf = &action_rss_data->conf;
+ return ret;
+}
+
+/**
+ * Parse func field for RSS action.
+ *
+ * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
+ * ACTION_RSS_FUNC_* index that called this function.
+ */
+static int
+parse_vc_action_rss_func(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct action_rss_data *action_rss_data;
+ enum rte_eth_hash_function func;
+
+ (void)buf;
+ (void)size;
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ switch (ctx->curr) {
+ case ACTION_RSS_FUNC_DEFAULT:
+ func = RTE_ETH_HASH_FUNCTION_DEFAULT;
+ break;
+ case ACTION_RSS_FUNC_TOEPLITZ:
+ func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ break;
+ case ACTION_RSS_FUNC_SIMPLE_XOR:
+ func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ break;
+ default:
+ return -1;
+ }
+ if (!ctx->object)
+ return len;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.func = func;
+ return len;
+}
+
+/**
+ * Parse type field for RSS action.
+ *
+ * Valid tokens are type field names and the "end" token.
+ */
+static int
+parse_vc_action_rss_type(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
+ struct action_rss_data *action_rss_data;
+ unsigned int i;
+
+ (void)token;
+ (void)buf;
+ (void)size;
+ if (ctx->curr != ACTION_RSS_TYPE)
+ return -1;
+ if (!(ctx->objdata >> 16) && ctx->object) {
+ action_rss_data = ctx->object;
+ action_rss_data->conf.types = 0;
+ }
+ if (!strcmp_partial("end", str, len)) {
+ ctx->objdata &= 0xffff;
+ return len;
+ }
+ for (i = 0; rss_type_table[i].str; ++i)
+ if (!strcmp_partial(rss_type_table[i].str, str, len))
+ break;
+ if (!rss_type_table[i].str)
+ return -1;
+ ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
+ /* Repeat token. */
+ if (ctx->next_num == RTE_DIM(ctx->next))
+ return -1;
+ ctx->next[ctx->next_num++] = next;
+ if (!ctx->object)
+ return len;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.types |= rss_type_table[i].rss_type;
return len;
}
@@ -2020,6 +2993,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
void *buf, unsigned int size)
{
static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
+ struct action_rss_data *action_rss_data;
int ret;
int i;
@@ -2031,11 +3005,14 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
i = ctx->objdata >> 16;
if (!strcmp_partial("end", str, len)) {
ctx->objdata &= 0xffff;
- return len;
+ goto end;
}
- if (i >= ACTION_RSS_NUM)
+ if (i >= ACTION_RSS_QUEUE_NUM)
return -1;
- if (push_args(ctx, ARGS_ENTRY(struct rte_flow_action_rss, queue[i])))
+ if (push_args(ctx,
+ ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
+ i * sizeof(action_rss_data->queue[i]),
+ sizeof(action_rss_data->queue[i]))))
return -1;
ret = parse_int(ctx, token, str, len, NULL, 0);
if (ret < 0) {
@@ -2048,12 +3025,206 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
if (ctx->next_num == RTE_DIM(ctx->next))
return -1;
ctx->next[ctx->next_num++] = next;
+end:
if (!ctx->object)
return len;
- ((struct rte_flow_action_rss *)ctx->object)->num = i;
+ action_rss_data = ctx->object;
+ action_rss_data->conf.queue_num = i;
+ action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
return len;
}
+/** Parse VXLAN encap action. */
+static int
+parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_vxlan_encap_data *action_vxlan_encap_data;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Set up default configuration. */
+ action_vxlan_encap_data = ctx->object;
+ *action_vxlan_encap_data = (struct action_vxlan_encap_data){
+ .conf = (struct rte_flow_action_vxlan_encap){
+ .definition = action_vxlan_encap_data->items,
+ },
+ .items = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &action_vxlan_encap_data->item_eth,
+ .mask = &rte_flow_item_eth_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &action_vxlan_encap_data->item_vlan,
+ .mask = &rte_flow_item_vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &action_vxlan_encap_data->item_ipv4,
+ .mask = &rte_flow_item_ipv4_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &action_vxlan_encap_data->item_udp,
+ .mask = &rte_flow_item_udp_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .spec = &action_vxlan_encap_data->item_vxlan,
+ .mask = &rte_flow_item_vxlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ .item_eth.type = 0,
+ .item_vlan = {
+ .tci = vxlan_encap_conf.vlan_tci,
+ .inner_type = 0,
+ },
+ .item_ipv4.hdr = {
+ .src_addr = vxlan_encap_conf.ipv4_src,
+ .dst_addr = vxlan_encap_conf.ipv4_dst,
+ },
+ .item_udp.hdr = {
+ .src_port = vxlan_encap_conf.udp_src,
+ .dst_port = vxlan_encap_conf.udp_dst,
+ },
+ .item_vxlan.flags = 0,
+ };
+ memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
+ vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
+ vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
+ if (!vxlan_encap_conf.select_ipv4) {
+ memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
+ &vxlan_encap_conf.ipv6_src,
+ sizeof(vxlan_encap_conf.ipv6_src));
+ memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
+ &vxlan_encap_conf.ipv6_dst,
+ sizeof(vxlan_encap_conf.ipv6_dst));
+ action_vxlan_encap_data->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &action_vxlan_encap_data->item_ipv6,
+ .mask = &rte_flow_item_ipv6_mask,
+ };
+ }
+ if (!vxlan_encap_conf.select_vlan)
+ action_vxlan_encap_data->items[1].type =
+ RTE_FLOW_ITEM_TYPE_VOID;
+ memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
+ RTE_DIM(vxlan_encap_conf.vni));
+ action->conf = &action_vxlan_encap_data->conf;
+ return ret;
+}
+
+/** Parse NVGRE encap action. */
+static int
+parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+ struct rte_flow_action *action;
+ struct action_nvgre_encap_data *action_nvgre_encap_data;
+ int ret;
+
+ ret = parse_vc(ctx, token, str, len, buf, size);
+ if (ret < 0)
+ return ret;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return ret;
+ if (!out->args.vc.actions_n)
+ return -1;
+ action = &out->args.vc.actions[out->args.vc.actions_n - 1];
+ /* Point to selected object. */
+ ctx->object = out->args.vc.data;
+ ctx->objmask = NULL;
+ /* Set up default configuration. */
+ action_nvgre_encap_data = ctx->object;
+ *action_nvgre_encap_data = (struct action_nvgre_encap_data){
+ .conf = (struct rte_flow_action_nvgre_encap){
+ .definition = action_nvgre_encap_data->items,
+ },
+ .items = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &action_nvgre_encap_data->item_eth,
+ .mask = &rte_flow_item_eth_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .spec = &action_nvgre_encap_data->item_vlan,
+ .mask = &rte_flow_item_vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &action_nvgre_encap_data->item_ipv4,
+ .mask = &rte_flow_item_ipv4_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .spec = &action_nvgre_encap_data->item_nvgre,
+ .mask = &rte_flow_item_nvgre_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ },
+ .item_eth.type = 0,
+ .item_vlan = {
+ .tci = nvgre_encap_conf.vlan_tci,
+ .inner_type = 0,
+ },
+ .item_ipv4.hdr = {
+ .src_addr = nvgre_encap_conf.ipv4_src,
+ .dst_addr = nvgre_encap_conf.ipv4_dst,
+ },
+ .item_nvgre.flow_id = 0,
+ };
+ memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
+ nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
+ memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
+ nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
+ if (!nvgre_encap_conf.select_ipv4) {
+ memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
+ &nvgre_encap_conf.ipv6_src,
+ sizeof(nvgre_encap_conf.ipv6_src));
+ memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
+ &nvgre_encap_conf.ipv6_dst,
+ sizeof(nvgre_encap_conf.ipv6_dst));
+ action_nvgre_encap_data->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &action_nvgre_encap_data->item_ipv6,
+ .mask = &rte_flow_item_ipv6_mask,
+ };
+ }
+ if (!nvgre_encap_conf.select_vlan)
+ action_nvgre_encap_data->items[1].type =
+ RTE_FLOW_ITEM_TYPE_VOID;
+ memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
+ RTE_DIM(nvgre_encap_conf.tni));
+ action->conf = &action_nvgre_encap_data->conf;
+ return ret;
+}
+
/** Parse tokens for destroy command. */
static int
parse_destroy(struct context *ctx, const struct token *token,
@@ -2269,6 +3440,11 @@ parse_int(struct context *ctx, const struct token *token,
strtoumax(str, &end, 0);
if (errno || (size_t)(end - str) != len)
goto error;
+ if (arg->bounded &&
+ ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
+ (intmax_t)u > (intmax_t)arg->max)) ||
+ (!arg->sign && (u < arg->min || u > arg->max))))
+ goto error;
if (!ctx->object)
return len;
if (arg->mask) {
@@ -2323,8 +3499,8 @@ error:
/**
* Parse a string.
*
- * Two arguments (ctx->args) are retrieved from the stack to store data and
- * its length (in that order).
+ * Three arguments (ctx->args) are retrieved from the stack to store data,
+ * its actual length and address (in that order).
*/
static int
parse_string(struct context *ctx, const struct token *token,
@@ -2333,6 +3509,7 @@ parse_string(struct context *ctx, const struct token *token,
{
const struct arg *arg_data = pop_args(ctx);
const struct arg *arg_len = pop_args(ctx);
+ const struct arg *arg_addr = pop_args(ctx);
char tmp[16]; /* Ought to be enough. */
int ret;
@@ -2343,6 +3520,11 @@ parse_string(struct context *ctx, const struct token *token,
push_args(ctx, arg_data);
return -1;
}
+ if (!arg_addr) {
+ push_args(ctx, arg_len);
+ push_args(ctx, arg_data);
+ return -1;
+ }
size = arg_data->size;
/* Bit-mask fill is not supported. */
if (arg_data->mask || size < len)
@@ -2362,11 +3544,26 @@ parse_string(struct context *ctx, const struct token *token,
buf = (uint8_t *)ctx->object + arg_data->offset;
/* Output buffer is not necessarily NUL-terminated. */
memcpy(buf, str, len);
- memset((uint8_t *)buf + len, 0x55, size - len);
+ memset((uint8_t *)buf + len, 0x00, size - len);
if (ctx->objmask)
memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
+ /* Save address if requested. */
+ if (arg_addr->size) {
+ memcpy((uint8_t *)ctx->object + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->object + arg_data->offset
+ },
+ arg_addr->size);
+ if (ctx->objmask)
+ memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
+ (void *[]){
+ (uint8_t *)ctx->objmask + arg_data->offset
+ },
+ arg_addr->size);
+ }
return len;
error:
+ push_args(ctx, arg_addr);
push_args(ctx, arg_len);
push_args(ctx, arg_data);
return -1;
@@ -2509,6 +3706,7 @@ static const char *const boolean_name[] = {
"false", "true",
"no", "yes",
"N", "Y",
+ "off", "on",
NULL,
};
@@ -2658,22 +3856,40 @@ comp_rule_id(struct context *ctx, const struct token *token,
return i;
}
+/** Complete type field for RSS action. */
+static int
+comp_vc_action_rss_type(struct context *ctx, const struct token *token,
+ unsigned int ent, char *buf, unsigned int size)
+{
+ unsigned int i;
+
+ (void)ctx;
+ (void)token;
+ for (i = 0; rss_type_table[i].str; ++i)
+ ;
+ if (!buf)
+ return i + 1;
+ if (ent < i)
+ return snprintf(buf, size, "%s", rss_type_table[ent].str);
+ if (ent == i)
+ return snprintf(buf, size, "end");
+ return -1;
+}
+
/** Complete queue field for RSS action. */
static int
comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
unsigned int ent, char *buf, unsigned int size)
{
- static const char *const str[] = { "", "end", NULL };
- unsigned int i;
-
(void)ctx;
(void)token;
- for (i = 0; str[i] != NULL; ++i)
- if (buf && i == ent)
- return snprintf(buf, size, "%s", str[i]);
- if (buf)
- return -1;
- return i;
+ if (!buf)
+ return nb_rxq + 1;
+ if (ent < nb_rxq)
+ return snprintf(buf, size, "%u", ent);
+ if (ent == nb_rxq)
+ return snprintf(buf, size, "end");
+ return -1;
}
/** Internal context. */
@@ -2927,7 +4143,7 @@ cmd_flow_parsed(const struct buffer *in)
break;
case QUERY:
port_flow_query(in->port, in->args.query.rule,
- in->args.query.action);
+ &in->args.query.action);
break;
case LIST:
port_flow_list(in->port, in->args.list.group_n,
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index 35cad543..631f1799 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -234,6 +234,7 @@ static void cmd_show_port_tm_cap_parsed(void *parsed_result,
return;
memset(&cap, 0, sizeof(struct rte_tm_capabilities));
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_capabilities_get(port_id, &cap, &error);
if (ret) {
print_err_msg(&error);
@@ -374,6 +375,7 @@ static void cmd_show_port_tm_level_cap_parsed(void *parsed_result,
return;
memset(&lcap, 0, sizeof(struct rte_tm_level_capabilities));
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_level_capabilities_get(port_id, level_id, &lcap, &error);
if (ret) {
print_err_msg(&error);
@@ -498,6 +500,7 @@ static void cmd_show_port_tm_node_cap_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node id must be valid */
ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
if (ret != 0) {
@@ -615,6 +618,7 @@ static void cmd_show_port_tm_node_stats_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -727,6 +731,7 @@ static void cmd_show_port_tm_node_type_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -832,6 +837,7 @@ static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result,
/* Private shaper profile params */
memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
sp.peak.rate = res->tb_rate;
sp.peak.size = res->tb_size;
sp.pkt_length_adjust = pkt_len_adjust;
@@ -919,6 +925,7 @@ static void cmd_del_port_tm_node_shaper_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_shaper_profile_delete(port_id, shaper_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1004,6 +1011,7 @@ static void cmd_add_port_tm_node_shared_shaper_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Command type: add */
if ((strcmp(res->cmd_type, "add") == 0) &&
(port_is_started(port_id))) {
@@ -1098,6 +1106,7 @@ static void cmd_del_port_tm_node_shared_shaper_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_shared_shaper_delete(port_id, shared_shaper_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1254,6 +1263,7 @@ static void cmd_add_port_tm_node_wred_profile_parsed(void *parsed_result,
return;
memset(&wp, 0, sizeof(struct rte_tm_wred_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* WRED Params (Green Color)*/
color = RTE_TM_GREEN;
@@ -1369,6 +1379,7 @@ static void cmd_del_port_tm_node_wred_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_wred_profile_delete(port_id, wred_profile_id, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -1455,6 +1466,7 @@ static void cmd_set_port_tm_node_shaper_profile_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -1500,7 +1512,7 @@ struct cmd_add_port_tm_nonleaf_node_result {
uint32_t priority;
uint32_t weight;
uint32_t level_id;
- uint32_t shaper_profile_id;
+ int32_t shaper_profile_id;
uint32_t n_sp_priorities;
uint64_t stats_mask;
cmdline_multi_string_t multi_shared_shaper_id;
@@ -1542,7 +1554,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_level_id =
level_id, UINT32);
cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_shaper_profile_id =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
- shaper_profile_id, UINT32);
+ shaper_profile_id, INT32);
cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_n_sp_priorities =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result,
n_sp_priorities, UINT32);
@@ -1571,6 +1583,7 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
return;
memset(&np, 0, sizeof(struct rte_tm_node_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node parameters */
if (res->parent_node_id < 0)
@@ -1593,12 +1606,18 @@ static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result,
return;
}
- np.shaper_profile_id = res->shaper_profile_id;
+ if (res->shaper_profile_id < 0)
+ np.shaper_profile_id = UINT32_MAX;
+ else
+ np.shaper_profile_id = res->shaper_profile_id;
+
np.n_shared_shapers = n_shared_shapers;
- if (np.n_shared_shapers)
+ if (np.n_shared_shapers) {
np.shared_shaper_id = &shared_shaper_id[0];
- else
- np.shared_shaper_id = NULL;
+ } else {
+ free(shared_shaper_id);
+ shared_shaper_id = NULL;
+ }
np.nonleaf.n_sp_priorities = res->n_sp_priorities;
np.stats_mask = res->stats_mask;
@@ -1651,7 +1670,7 @@ struct cmd_add_port_tm_leaf_node_result {
uint32_t priority;
uint32_t weight;
uint32_t level_id;
- uint32_t shaper_profile_id;
+ int32_t shaper_profile_id;
uint32_t cman_mode;
uint32_t wred_profile_id;
uint64_t stats_mask;
@@ -1693,7 +1712,7 @@ cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_level_id =
level_id, UINT32);
cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_shaper_profile_id =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
- shaper_profile_id, UINT32);
+ shaper_profile_id, INT32);
cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_cman_mode =
TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result,
cman_mode, UINT32);
@@ -1725,6 +1744,7 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
return;
memset(&np, 0, sizeof(struct rte_tm_node_params));
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Node parameters */
if (res->parent_node_id < 0)
@@ -1747,13 +1767,19 @@ static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result,
return;
}
- np.shaper_profile_id = res->shaper_profile_id;
+ if (res->shaper_profile_id < 0)
+ np.shaper_profile_id = UINT32_MAX;
+ else
+ np.shaper_profile_id = res->shaper_profile_id;
+
np.n_shared_shapers = n_shared_shapers;
- if (np.n_shared_shapers)
+ if (np.n_shared_shapers) {
np.shared_shaper_id = &shared_shaper_id[0];
- else
- np.shared_shaper_id = NULL;
+ } else {
+ free(shared_shaper_id);
+ shared_shaper_id = NULL;
+ }
np.leaf.cman = res->cman_mode;
np.leaf.wred.wred_profile_id = res->wred_profile_id;
@@ -1836,6 +1862,7 @@ static void cmd_del_port_tm_node_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (port_is_started(port_id)) {
printf(" Port %u not stopped (error)\n", port_id);
@@ -1925,6 +1952,7 @@ static void cmd_set_port_tm_node_parent_parsed(void *parsed_result,
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
+ memset(&error, 0, sizeof(struct rte_tm_error));
/* Port status */
if (!port_is_started(port_id)) {
printf(" Port %u not started (error)\n", port_id);
@@ -1958,6 +1986,136 @@ cmdline_parse_inst_t cmd_set_port_tm_node_parent = {
},
};
+/* *** Suspend Port TM Node *** */
+struct cmd_suspend_port_tm_node_result {
+ cmdline_fixed_string_t suspend;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_suspend =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, suspend, "suspend");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, port, "port");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_suspend_port_tm_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, node, "node");
+cmdline_parse_token_num_t cmd_suspend_port_tm_node_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_suspend_port_tm_node_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_suspend_port_tm_node_result, node_id, UINT32);
+
+static void cmd_suspend_port_tm_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_suspend_port_tm_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&error, 0, sizeof(struct rte_tm_error));
+ ret = rte_tm_node_suspend(port_id, node_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_suspend_port_tm_node = {
+ .f = cmd_suspend_port_tm_node_parsed,
+ .data = NULL,
+ .help_str = "Suspend port tm node",
+ .tokens = {
+ (void *)&cmd_suspend_port_tm_node_suspend,
+ (void *)&cmd_suspend_port_tm_node_port,
+ (void *)&cmd_suspend_port_tm_node_tm,
+ (void *)&cmd_suspend_port_tm_node_node,
+ (void *)&cmd_suspend_port_tm_node_port_id,
+ (void *)&cmd_suspend_port_tm_node_node_id,
+ NULL,
+ },
+};
+
+/* *** Resume Port TM Node *** */
+struct cmd_resume_port_tm_node_result {
+ cmdline_fixed_string_t resume;
+ cmdline_fixed_string_t port;
+ cmdline_fixed_string_t tm;
+ cmdline_fixed_string_t node;
+ uint16_t port_id;
+ uint32_t node_id;
+};
+
+cmdline_parse_token_string_t cmd_resume_port_tm_node_resume =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, resume, "resume");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_port =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, port, "port");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_tm =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, tm, "tm");
+cmdline_parse_token_string_t cmd_resume_port_tm_node_node =
+ TOKEN_STRING_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, node, "node");
+cmdline_parse_token_num_t cmd_resume_port_tm_node_port_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, port_id, UINT16);
+cmdline_parse_token_num_t cmd_resume_port_tm_node_node_id =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_resume_port_tm_node_result, node_id, UINT32);
+
+static void cmd_resume_port_tm_node_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_resume_port_tm_node_result *res = parsed_result;
+ struct rte_tm_error error;
+ uint32_t node_id = res->node_id;
+ portid_t port_id = res->port_id;
+ int ret;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
+ memset(&error, 0, sizeof(struct rte_tm_error));
+ ret = rte_tm_node_resume(port_id, node_id, &error);
+ if (ret != 0) {
+ print_err_msg(&error);
+ return;
+ }
+}
+
+cmdline_parse_inst_t cmd_resume_port_tm_node = {
+ .f = cmd_resume_port_tm_node_parsed,
+ .data = NULL,
+ .help_str = "Resume port tm node",
+ .tokens = {
+ (void *)&cmd_resume_port_tm_node_resume,
+ (void *)&cmd_resume_port_tm_node_port,
+ (void *)&cmd_resume_port_tm_node_tm,
+ (void *)&cmd_resume_port_tm_node_node,
+ (void *)&cmd_resume_port_tm_node_port_id,
+ (void *)&cmd_resume_port_tm_node_node_id,
+ NULL,
+ },
+};
+
/* *** Port TM Hierarchy Commit *** */
struct cmd_port_tm_hierarchy_commit_result {
cmdline_fixed_string_t port;
@@ -2007,6 +2165,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result,
else
clean_on_fail = 0;
+ memset(&error, 0, sizeof(struct rte_tm_error));
ret = rte_tm_hierarchy_commit(port_id, clean_on_fail, &error);
if (ret != 0) {
print_err_msg(&error);
@@ -2017,7 +2176,7 @@ static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result,
cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = {
.f = cmd_port_tm_hierarchy_commit_parsed,
.data = NULL,
- .help_str = "Set port tm node shaper profile",
+ .help_str = "Commit port tm hierarchy",
.tokens = {
(void *)&cmd_port_tm_hierarchy_commit_port,
(void *)&cmd_port_tm_hierarchy_commit_tm,
diff --git a/app/test-pmd/cmdline_tm.h b/app/test-pmd/cmdline_tm.h
index ba303607..b3a14ade 100644
--- a/app/test-pmd/cmdline_tm.h
+++ b/app/test-pmd/cmdline_tm.h
@@ -22,6 +22,8 @@ extern cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node;
extern cmdline_parse_inst_t cmd_add_port_tm_leaf_node;
extern cmdline_parse_inst_t cmd_del_port_tm_node;
extern cmdline_parse_inst_t cmd_set_port_tm_node_parent;
+extern cmdline_parse_inst_t cmd_suspend_port_tm_node;
+extern cmdline_parse_inst_t cmd_resume_port_tm_node;
extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit;
#endif /* _CMDLINE_TM_H_ */
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 4bb255c6..14ccd686 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -73,12 +73,7 @@ static const struct {
},
};
-struct rss_type_info {
- char str[32];
- uint64_t rss_type;
-};
-
-static const struct rss_type_info rss_type_table[] = {
+const struct rss_type_info rss_type_table[] = {
{ "ipv4", ETH_RSS_IPV4 },
{ "ipv4-frag", ETH_RSS_FRAG_IPV4 },
{ "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
@@ -99,7 +94,12 @@ static const struct rss_type_info rss_type_table[] = {
{ "vxlan", ETH_RSS_VXLAN },
{ "geneve", ETH_RSS_GENEVE },
{ "nvgre", ETH_RSS_NVGRE },
-
+ { "ip", ETH_RSS_IP },
+ { "udp", ETH_RSS_UDP },
+ { "tcp", ETH_RSS_TCP },
+ { "sctp", ETH_RSS_SCTP },
+ { "tunnel", ETH_RSS_TUNNEL },
+ { NULL, 0 },
};
static void
@@ -121,15 +121,11 @@ nic_stats_display(portid_t port_id)
struct rte_eth_stats stats;
struct rte_port *port = &ports[port_id];
uint8_t i;
- portid_t pid;
static const char *nic_stats_border = "########################";
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
rte_eth_stats_get(port_id, &stats);
@@ -203,13 +199,8 @@ nic_stats_display(portid_t port_id)
void
nic_stats_clear(portid_t port_id)
{
- portid_t pid;
-
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
rte_eth_stats_reset(port_id);
@@ -286,15 +277,11 @@ nic_stats_mapping_display(portid_t port_id)
{
struct rte_port *port = &ports[port_id];
uint16_t i;
- portid_t pid;
static const char *nic_stats_mapping_border = "########################";
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
@@ -405,14 +392,11 @@ port_infos_display(portid_t port_id)
int vlan_offload;
struct rte_mempool * mp;
static const char *info_border = "*********************";
- portid_t pid;
uint16_t mtu;
+ char name[RTE_ETH_NAME_MAX_LEN];
if (port_id_is_invalid(port_id, ENABLED_WARN)) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return;
}
port = &ports[port_id];
@@ -423,6 +407,8 @@ port_infos_display(portid_t port_id)
info_border, port_id, info_border);
rte_eth_macaddr_get(port_id, &mac_addr);
print_ethaddr("MAC address: ", &mac_addr);
+ rte_eth_dev_get_name_by_port(port_id, name);
+ printf("\nDevice name: %s", name);
printf("\nDriver name: %s", dev_info.driver_name);
printf("\nConnect to socket: %u", port->socket_id);
@@ -517,6 +503,18 @@ port_infos_display(portid_t port_id)
printf("Min possible number of TXDs per queue: %hu\n",
dev_info.tx_desc_lim.nb_min);
printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
+
+ /* Show switch info only if valid switch domain and port id is set */
+ if (dev_info.switch_info.domain_id !=
+ RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ if (dev_info.switch_info.name)
+ printf("Switch name: %s\n", dev_info.switch_info.name);
+
+ printf("Switch domain Id: %u\n",
+ dev_info.switch_info.domain_id);
+ printf("Switch Port Id: %u\n",
+ dev_info.switch_info.port_id);
+ }
}
void
@@ -722,6 +720,23 @@ port_offload_cap_display(portid_t port_id)
printf("off\n");
}
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) {
+ printf("IP tunnel TSO: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_IP_TNL_TSO)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) {
+ printf("UDP tunnel TSO: ");
+ if (ports[port_id].dev_conf.txmode.offloads &
+ DEV_TX_OFFLOAD_UDP_TNL_TSO)
+ printf("on\n");
+ else
+ printf("off\n");
+ }
}
int
@@ -742,6 +757,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning)
return 1;
}
+void print_valid_ports(void)
+{
+ portid_t pid;
+
+ printf("The valid ports array is [");
+ RTE_ETH_FOREACH_DEV(pid) {
+ printf(" %d", pid);
+ }
+ printf(" ]\n");
+}
+
static int
vlan_id_is_invalid(uint16_t vlan_id)
{
@@ -754,6 +780,8 @@ vlan_id_is_invalid(uint16_t vlan_id)
static int
port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
uint64_t pci_len;
if (reg_off & 0x3) {
@@ -762,7 +790,21 @@ port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
(unsigned)reg_off);
return 1;
}
- pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
+
+ if (!ports[port_id].dev_info.device) {
+ printf("Invalid device\n");
+ return 0;
+ }
+
+ bus = rte_bus_find_by_device(ports[port_id].dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return 1;
+ }
+
+ pci_len = pci_dev->mem_resource[0].len;
if (reg_off >= pci_len) {
printf("Port %d: register offset %u (0x%X) out of port PCI "
"resource (length=%"PRIu64")\n",
@@ -960,8 +1002,9 @@ static const struct {
MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
MK_FLOW_ITEM(PF, 0),
MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
- MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
- MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
+ MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
@@ -980,33 +1023,89 @@ static const struct {
MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
+ MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
+ MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
+ MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
+ MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
+ MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
+ MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
};
-/** Compute storage space needed by item specification. */
-static void
-flow_item_spec_size(const struct rte_flow_item *item,
- size_t *size, size_t *pad)
+/** Pattern item specification types. */
+enum item_spec_type {
+ ITEM_SPEC,
+ ITEM_LAST,
+ ITEM_MASK,
+};
+
+/** Compute storage space needed by item specification and copy it. */
+static size_t
+flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
+ enum item_spec_type type)
{
- if (!item->spec) {
- *size = 0;
+ size_t size = 0;
+ const void *data =
+ type == ITEM_SPEC ? item->spec :
+ type == ITEM_LAST ? item->last :
+ type == ITEM_MASK ? item->mask :
+ NULL;
+
+ if (!item->spec || !data)
goto empty;
- }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
} spec;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } last;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } mask;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } src;
+ union {
+ struct rte_flow_item_raw *raw;
+ } dst;
+ size_t off;
case RTE_FLOW_ITEM_TYPE_RAW:
spec.raw = item->spec;
- *size = offsetof(struct rte_flow_item_raw, pattern) +
- spec.raw->length * sizeof(*spec.raw->pattern);
+ last.raw = item->last ? item->last : item->spec;
+ mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
+ src.raw = data;
+ dst.raw = buf;
+ off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
+ sizeof(*src.raw->pattern));
+ if (type == ITEM_SPEC ||
+ (type == ITEM_MASK &&
+ ((spec.raw->length & mask.raw->length) >=
+ (last.raw->length & mask.raw->length))))
+ size = spec.raw->length & mask.raw->length;
+ else
+ size = last.raw->length & mask.raw->length;
+ size = off + size * sizeof(*src.raw->pattern);
+ if (dst.raw) {
+ memcpy(dst.raw, src.raw, sizeof(*src.raw));
+ dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
+ src.raw->pattern,
+ size - off);
+ }
break;
default:
- *size = flow_item[item->type].size;
+ size = flow_item[item->type].size;
+ if (buf)
+ memcpy(buf, data, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
/** Generate flow_action[] entry. */
@@ -1028,39 +1127,92 @@ static const struct {
MK_FLOW_ACTION(FLAG, 0),
MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
MK_FLOW_ACTION(DROP, 0),
- MK_FLOW_ACTION(COUNT, 0),
- MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
- MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
MK_FLOW_ACTION(PF, 0),
MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+ MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
+ MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
+ MK_FLOW_ACTION(OF_SET_MPLS_TTL,
+ sizeof(struct rte_flow_action_of_set_mpls_ttl)),
+ MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
+ MK_FLOW_ACTION(OF_SET_NW_TTL,
+ sizeof(struct rte_flow_action_of_set_nw_ttl)),
+ MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
+ MK_FLOW_ACTION(OF_POP_VLAN, 0),
+ MK_FLOW_ACTION(OF_PUSH_VLAN,
+ sizeof(struct rte_flow_action_of_push_vlan)),
+ MK_FLOW_ACTION(OF_SET_VLAN_VID,
+ sizeof(struct rte_flow_action_of_set_vlan_vid)),
+ MK_FLOW_ACTION(OF_SET_VLAN_PCP,
+ sizeof(struct rte_flow_action_of_set_vlan_pcp)),
+ MK_FLOW_ACTION(OF_POP_MPLS,
+ sizeof(struct rte_flow_action_of_pop_mpls)),
+ MK_FLOW_ACTION(OF_PUSH_MPLS,
+ sizeof(struct rte_flow_action_of_push_mpls)),
};
-/** Compute storage space needed by action configuration. */
-static void
-flow_action_conf_size(const struct rte_flow_action *action,
- size_t *size, size_t *pad)
+/** Compute storage space needed by action configuration and copy it. */
+static size_t
+flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
{
- if (!action->conf) {
- *size = 0;
+ size_t size = 0;
+
+ if (!action->conf)
goto empty;
- }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
- } conf;
+ } src;
+ union {
+ struct rte_flow_action_rss *rss;
+ } dst;
+ size_t off;
case RTE_FLOW_ACTION_TYPE_RSS:
- conf.rss = action->conf;
- *size = offsetof(struct rte_flow_action_rss, queue) +
- conf.rss->num * sizeof(*conf.rss->queue);
+ src.rss = action->conf;
+ dst.rss = buf;
+ off = 0;
+ if (dst.rss)
+ *dst.rss = (struct rte_flow_action_rss){
+ .func = src.rss->func,
+ .level = src.rss->level,
+ .types = src.rss->types,
+ .key_len = src.rss->key_len,
+ .queue_num = src.rss->queue_num,
+ };
+ off += sizeof(*src.rss);
+ if (src.rss->key_len) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->key) * src.rss->key_len;
+ if (dst.rss)
+ dst.rss->key = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->key, size);
+ off += size;
+ }
+ if (src.rss->queue_num) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->queue) * src.rss->queue_num;
+ if (dst.rss)
+ dst.rss->queue = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->queue, size);
+ off += size;
+ }
+ size = off;
break;
default:
- *size = flow_action[action->type].size;
+ size = flow_action[action->type].size;
+ if (buf)
+ memcpy(buf, action->conf, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
/** Generate a port_flow entry from attributes/pattern/actions. */
@@ -1073,7 +1225,6 @@ port_flow_new(const struct rte_flow_attr *attr,
const struct rte_flow_action *action;
struct port_flow *pf = NULL;
size_t tmp;
- size_t pad;
size_t off1 = 0;
size_t off2 = 0;
int err = ENOTSUP;
@@ -1091,24 +1242,23 @@ store:
if (pf)
dst = memcpy(pf->data + off1, item, sizeof(*item));
off1 += sizeof(*item);
- flow_item_spec_size(item, &tmp, &pad);
if (item->spec) {
if (pf)
- dst->spec = memcpy(pf->data + off2,
- item->spec, tmp);
- off2 += tmp + pad;
+ dst->spec = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_SPEC);
}
if (item->last) {
if (pf)
- dst->last = memcpy(pf->data + off2,
- item->last, tmp);
- off2 += tmp + pad;
+ dst->last = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_LAST);
}
if (item->mask) {
if (pf)
- dst->mask = memcpy(pf->data + off2,
- item->mask, tmp);
- off2 += tmp + pad;
+ dst->mask = pf->data + off2;
+ off2 += flow_item_spec_copy
+ (pf ? pf->data + off2 : NULL, item, ITEM_MASK);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
@@ -1125,12 +1275,11 @@ store:
if (pf)
dst = memcpy(pf->data + off1, action, sizeof(*action));
off1 += sizeof(*action);
- flow_action_conf_size(action, &tmp, &pad);
if (action->conf) {
if (pf)
- dst->conf = memcpy(pf->data + off2,
- action->conf, tmp);
- off2 += tmp + pad;
+ dst->conf = pf->data + off2;
+ off2 += flow_action_conf_copy
+ (pf ? pf->data + off2 : NULL, action);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
@@ -1168,10 +1317,15 @@ port_flow_complain(struct rte_flow_error *error)
[RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
[RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
[RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
+ [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field",
[RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
[RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
+ [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification",
+ [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range",
+ [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask",
[RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
[RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
+ [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration",
[RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
};
const char *errstr;
@@ -1326,7 +1480,7 @@ port_flow_flush(portid_t port_id)
/** Query a flow rule. */
int
port_flow_query(portid_t port_id, uint32_t rule,
- enum rte_flow_action_type action)
+ const struct rte_flow_action *action)
{
struct rte_flow_error error;
struct rte_port *port;
@@ -1347,16 +1501,17 @@ port_flow_query(portid_t port_id, uint32_t rule,
printf("Flow rule #%u not found\n", rule);
return -ENOENT;
}
- if ((unsigned int)action >= RTE_DIM(flow_action) ||
- !flow_action[action].name)
+ if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
+ !flow_action[action->type].name)
name = "unknown";
else
- name = flow_action[action].name;
- switch (action) {
+ name = flow_action[action->type].name;
+ switch (action->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
break;
default:
- printf("Cannot query action type %d (%s)\n", action, name);
+ printf("Cannot query action type %d (%s)\n",
+ action->type, name);
return -ENOTSUP;
}
/* Poisoning to make sure PMDs update it in case of error. */
@@ -1364,7 +1519,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
memset(&query, 0, sizeof(query));
if (rte_flow_query(port_id, pf->flow, action, &query, &error))
return port_flow_complain(&error);
- switch (action) {
+ switch (action->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
printf("%s:\n"
" hits_set: %u\n"
@@ -1379,7 +1534,7 @@ port_flow_query(portid_t port_id, uint32_t rule,
break;
default:
printf("Cannot display result for action type %d (%s)\n",
- action, name);
+ action->type, name);
break;
}
return 0;
@@ -1429,12 +1584,13 @@ port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
const struct rte_flow_item *item = pf->pattern;
const struct rte_flow_action *action = pf->actions;
- printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
+ printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t",
pf->id,
pf->attr.group,
pf->attr.priority,
pf->attr.ingress ? 'i' : '-',
- pf->attr.egress ? 'e' : '-');
+ pf->attr.egress ? 'e' : '-',
+ pf->attr.transfer ? 't' : '-');
while (item->type != RTE_FLOW_ITEM_TYPE_END) {
if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
printf("%s ", flow_item[item->type].name);
@@ -1664,6 +1820,7 @@ void
rxtx_config_display(void)
{
portid_t pid;
+ queueid_t qid;
printf(" %s packet forwarding%s packets/burst=%d\n",
cur_fwd_eng->fwd_mode_name,
@@ -1678,30 +1835,63 @@ rxtx_config_display(void)
nb_fwd_lcores, nb_fwd_ports);
RTE_ETH_FOREACH_DEV(pid) {
- struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf;
- struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf;
-
- printf(" port %d:\n", (unsigned int)pid);
- printf(" CRC stripping %s\n",
- (ports[pid].dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ?
- "enabled" : "disabled");
- printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n",
- nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
- printf(" RX threshold registers: pthresh=%d hthresh=%d "
- " wthresh=%d\n",
- rx_conf->rx_thresh.pthresh,
- rx_conf->rx_thresh.hthresh,
- rx_conf->rx_thresh.wthresh);
- printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n",
- nb_txq, nb_txd, tx_conf->tx_free_thresh);
- printf(" TX threshold registers: pthresh=%d hthresh=%d "
- " wthresh=%d\n",
- tx_conf->tx_thresh.pthresh,
- tx_conf->tx_thresh.hthresh,
- tx_conf->tx_thresh.wthresh);
- printf(" TX RS bit threshold=%d - TXQ offloads=0x%"PRIx64"\n",
- tx_conf->tx_rs_thresh, tx_conf->offloads);
+ struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0];
+ struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0];
+ uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0];
+ uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0];
+ uint16_t nb_rx_desc_tmp;
+ uint16_t nb_tx_desc_tmp;
+ struct rte_eth_rxq_info rx_qinfo;
+ struct rte_eth_txq_info tx_qinfo;
+ int32_t rc;
+
+ /* per port config */
+ printf(" port %d: RX queue number: %d Tx queue number: %d\n",
+ (unsigned int)pid, nb_rxq, nb_txq);
+
+ printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n",
+ ports[pid].dev_conf.rxmode.offloads,
+ ports[pid].dev_conf.txmode.offloads);
+
+ /* per rx queue config only for first queue to be less verbose */
+ for (qid = 0; qid < 1; qid++) {
+ rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo);
+ if (rc)
+ nb_rx_desc_tmp = nb_rx_desc[qid];
+ else
+ nb_rx_desc_tmp = rx_qinfo.nb_desc;
+
+ printf(" RX queue: %d\n", qid);
+ printf(" RX desc=%d - RX free threshold=%d\n",
+ nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh);
+ printf(" RX threshold registers: pthresh=%d hthresh=%d "
+ " wthresh=%d\n",
+ rx_conf[qid].rx_thresh.pthresh,
+ rx_conf[qid].rx_thresh.hthresh,
+ rx_conf[qid].rx_thresh.wthresh);
+ printf(" RX Offloads=0x%"PRIx64"\n",
+ rx_conf[qid].offloads);
+ }
+
+ /* per tx queue config only for first queue to be less verbose */
+ for (qid = 0; qid < 1; qid++) {
+ rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo);
+ if (rc)
+ nb_tx_desc_tmp = nb_tx_desc[qid];
+ else
+ nb_tx_desc_tmp = tx_qinfo.nb_desc;
+
+ printf(" TX queue: %d\n", qid);
+ printf(" TX desc=%d - TX free threshold=%d\n",
+ nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh);
+ printf(" TX threshold registers: pthresh=%d hthresh=%d "
+ " wthresh=%d\n",
+ tx_conf[qid].tx_thresh.pthresh,
+ tx_conf[qid].tx_thresh.hthresh,
+ tx_conf[qid].tx_thresh.wthresh);
+ printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n",
+ tx_conf[qid].offloads, tx_conf->tx_rs_thresh);
+ }
}
}
@@ -1761,7 +1951,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
}
rss_conf.rss_hf = 0;
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_info, rss_type_table[i].str))
rss_conf.rss_hf = rss_type_table[i].rss_type;
}
@@ -1790,7 +1980,7 @@ port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
return;
}
printf("RSS functions:\n ");
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (rss_hf & rss_type_table[i].rss_type)
printf("%s ", rss_type_table[i].str);
}
@@ -1814,7 +2004,7 @@ port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
rss_conf.rss_key = NULL;
rss_conf.rss_key_len = hash_key_len;
rss_conf.rss_hf = 0;
- for (i = 0; i < RTE_DIM(rss_type_table); i++) {
+ for (i = 0; rss_type_table[i].str; i++) {
if (!strcmp(rss_type_table[i].str, rss_type))
rss_conf.rss_hf = rss_type_table[i].rss_type;
}
@@ -1987,15 +2177,11 @@ rss_fwd_config_setup(void)
fs->tx_queue = rxq;
fs->peer_addr = fs->tx_port;
fs->retry_enabled = retry_enabled;
- rxq = (queueid_t) (rxq + 1);
- if (rxq < nb_q)
- continue;
- /*
- * rxq == nb_q
- * Restart from RX queue 0 on next RX port
- */
- rxq = 0;
rxp++;
+ if (rxp < nb_fwd_ports)
+ continue;
+ rxp = 0;
+ rxq++;
}
}
@@ -2142,6 +2328,55 @@ icmp_echo_config_setup(void)
}
}
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+static void
+softnic_fwd_config_setup(void)
+{
+ struct rte_port *port;
+ portid_t pid, softnic_portid;
+ queueid_t i;
+ uint8_t softnic_enable = 0;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ port = &ports[pid];
+ const char *driver = port->dev_info.driver_name;
+
+ if (strcmp(driver, "net_softnic") == 0) {
+ softnic_portid = pid;
+ softnic_enable = 1;
+ break;
+ }
+ }
+
+ if (softnic_enable == 0) {
+ printf("Softnic mode not configured(%s)!\n", __func__);
+ return;
+ }
+
+ cur_fwd_config.nb_fwd_ports = 1;
+ cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq;
+
+ /* Re-initialize forwarding streams */
+ init_fwd_streams();
+
+ /*
+ * In the softnic forwarding test, the number of forwarding cores
+ * is set to one and remaining are used for softnic packet processing.
+ */
+ cur_fwd_config.nb_fwd_lcores = 1;
+ setup_fwd_config_of_each_lcore(&cur_fwd_config);
+
+ for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) {
+ fwd_streams[i]->rx_port = softnic_portid;
+ fwd_streams[i]->rx_queue = i;
+ fwd_streams[i]->tx_port = softnic_portid;
+ fwd_streams[i]->tx_queue = i;
+ fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
+ fwd_streams[i]->retry_enabled = retry_enabled;
+ }
+}
+#endif
+
void
fwd_config_setup(void)
{
@@ -2150,6 +2385,14 @@ fwd_config_setup(void)
icmp_echo_config_setup();
return;
}
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
+ softnic_fwd_config_setup();
+ return;
+ }
+#endif
+
if ((nb_rxq > 1) && (nb_txq > 1)){
if (dcb_config)
dcb_fwd_config_setup();
@@ -3018,6 +3261,7 @@ flowtype_to_str(uint16_t flow_type)
{"vxlan", RTE_ETH_FLOW_VXLAN},
{"geneve", RTE_ETH_FLOW_GENEVE},
{"nvgre", RTE_ETH_FLOW_NVGRE},
+ {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE},
};
for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 5f5ab64a..49482926 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -49,9 +49,12 @@
#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
-#define GRE_KEY_PRESENT 0x2000
-#define GRE_KEY_LEN 4
-#define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
+#define GRE_CHECKSUM_PRESENT 0x8000
+#define GRE_KEY_PRESENT 0x2000
+#define GRE_SEQUENCE_PRESENT 0x1000
+#define GRE_EXT_LEN 4
+#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
+ GRE_SEQUENCE_PRESENT)
/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -60,6 +63,8 @@
#define _htons(x) (x)
#endif
+uint16_t vxlan_gpe_udp_port = 4790;
+
/* structure that caches offload info for the current packet */
struct testpmd_offload_info {
uint16_t ethertype;
@@ -194,6 +199,70 @@ parse_vxlan(struct udp_hdr *udp_hdr,
info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
}
+/* Parse a vxlan-gpe header */
+static void
+parse_vxlan_gpe(struct udp_hdr *udp_hdr,
+ struct testpmd_offload_info *info)
+{
+ struct ether_hdr *eth_hdr;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct vxlan_gpe_hdr *vxlan_gpe_hdr;
+ uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
+
+ /* Check udp destination port. */
+ if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
+ return;
+
+ vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)((char *)udp_hdr +
+ sizeof(struct udp_hdr));
+
+ if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
+ VXLAN_GPE_TYPE_IPV4) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ parse_ipv4(ipv4_hdr, info);
+ info->ethertype = _htons(ETHER_TYPE_IPv4);
+ info->l2_len = 0;
+
+ } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ info->ethertype = _htons(ETHER_TYPE_IPv6);
+ parse_ipv6(ipv6_hdr, info);
+ info->l2_len = 0;
+
+ } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {
+ info->is_tunnel = 1;
+ info->outer_ethertype = info->ethertype;
+ info->outer_l2_len = info->l2_len;
+ info->outer_l3_len = info->l3_len;
+ info->outer_l4_proto = info->l4_proto;
+
+ eth_hdr = (struct ether_hdr *)((char *)vxlan_gpe_hdr +
+ vxlan_gpe_len);
+
+ parse_ethernet(eth_hdr, info);
+ } else
+ return;
+
+ info->l2_len += ETHER_VXLAN_GPE_HLEN;
+}
+
/* Parse a gre header */
static void
parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
@@ -203,14 +272,14 @@ parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
struct ipv6_hdr *ipv6_hdr;
uint8_t gre_len = 0;
- /* check which fields are supported */
- if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
- return;
-
gre_len += sizeof(struct simple_gre_hdr);
if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
- gre_len += GRE_KEY_LEN;
+ gre_len += GRE_EXT_LEN;
+ if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
+ gre_len += GRE_EXT_LEN;
+ if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
+ gre_len += GRE_EXT_LEN;
if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
info->is_tunnel = 1;
@@ -342,6 +411,8 @@ process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
info->ethertype);
}
}
+ if (info->gso_enable)
+ ol_flags |= PKT_TX_UDP_SEG;
} else if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
tcp_hdr->cksum = 0;
@@ -588,6 +659,10 @@ pkt_copy_split(const struct rte_mbuf *pkt)
* Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
* Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
* UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
+ * UDP|TCP|SCTP
+ * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
+ * UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
* Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
@@ -664,7 +739,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
m = pkts_burst[i];
info.is_tunnel = 0;
info.pkt_len = rte_pktmbuf_pkt_len(m);
- tx_ol_flags = 0;
+ tx_ol_flags = m->ol_flags &
+ (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
rx_ol_flags = m->ol_flags;
/* Update the L3/L4 checksum error packet statistics */
@@ -691,9 +767,16 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
info.l3_len);
- parse_vxlan(udp_hdr, &info, m->packet_type);
- if (info.is_tunnel)
- tx_ol_flags |= PKT_TX_TUNNEL_VXLAN;
+ parse_vxlan_gpe(udp_hdr, &info);
+ if (info.is_tunnel) {
+ tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE;
+ } else {
+ parse_vxlan(udp_hdr, &info,
+ m->packet_type);
+ if (info.is_tunnel)
+ tx_ol_flags |=
+ PKT_TX_TUNNEL_VXLAN;
+ }
} else if (info.l4_proto == IPPROTO_GRE) {
struct simple_gre_hdr *gre_hdr;
@@ -738,6 +821,7 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
/* step 3: fill the mbuf meta data (flags and header lengths) */
+ m->tx_offload = 0;
if (info.is_tunnel == 1) {
if (info.tunnel_tso_segsz ||
(tx_offloads &
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
index 2adce701..7cac757a 100644
--- a/app/test-pmd/macfwd.c
+++ b/app/test-pmd/macfwd.c
@@ -96,7 +96,8 @@ pkt_burst_mac_forward(struct fwd_stream *fs)
&eth_hdr->d_addr);
ether_addr_copy(&ports[fs->tx_port].eth_addr,
&eth_hdr->s_addr);
- mb->ol_flags = ol_flags;
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct ether_hdr);
mb->l3_len = sizeof(struct ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
diff --git a/app/test-pmd/macswap.c b/app/test-pmd/macswap.c
index e2cc4812..a8384d5b 100644
--- a/app/test-pmd/macswap.c
+++ b/app/test-pmd/macswap.c
@@ -127,7 +127,8 @@ pkt_burst_mac_swap(struct fwd_stream *fs)
ether_addr_copy(&eth_hdr->s_addr, &eth_hdr->d_addr);
ether_addr_copy(&addr, &eth_hdr->s_addr);
- mb->ol_flags = ol_flags;
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
mb->l2_len = sizeof(struct ether_hdr);
mb->l3_len = sizeof(struct ipv4_hdr);
mb->vlan_tci = txp->tx_vlan_id;
diff --git a/app/test-pmd/meson.build b/app/test-pmd/meson.build
index 7ed74db2..a0b3be07 100644
--- a/app/test-pmd/meson.build
+++ b/app/test-pmd/meson.build
@@ -1,6 +1,10 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+# override default name to drop the hyphen
+name = 'testpmd'
+allow_experimental_apis = true
+cflags += '-Wno-deprecated-declarations'
sources = files('cmdline.c',
'cmdline_flow.c',
'cmdline_mtr.c',
@@ -22,6 +26,9 @@ deps = ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci']
if dpdk_conf.has('RTE_LIBRTE_PDUMP')
deps += 'pdump'
endif
+if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD')
+ deps += 'pmd_bnxt'
+endif
if dpdk_conf.has('RTE_LIBRTE_I40E_PMD')
deps += 'pmd_i40e'
endif
@@ -29,25 +36,13 @@ if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD')
deps += 'pmd_ixgbe'
endif
if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD')
- sources += files('tm.c')
+ sources += files('softnicfwd.c')
deps += 'pmd_softnic'
endif
-
-dep_objs = []
-foreach d:deps
- dep_objs += get_variable(get_option('default_library') + '_rte_' + d)
-endforeach
-dep_objs += cc.find_library('execinfo', required: false) # for BSD only
-
-link_libs = []
-if get_option('default_library') == 'static'
- link_libs = dpdk_drivers
+if dpdk_conf.has('RTE_LIBRTE_DPAA_PMD')
+ deps += ['bus_dpaa', 'mempool_dpaa', 'pmd_dpaa']
+endif
+if dpdk_conf.has('RTE_LIBRTE_BPF')
+ sources += files('bpf_cmd.c')
+ deps += 'bpf'
endif
-
-executable('dpdk-testpmd',
- sources,
- c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'],
- link_whole: link_libs,
- dependencies: dep_objs,
- install_rpath: join_paths(get_option('prefix'), driver_install_path),
- install: true)
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 97d22b86..962fad78 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -70,7 +70,7 @@ usage(char* progname)
"--rss-ip | --rss-udp | "
"--rxpt= | --rxht= | --rxwt= | --rxfreet= | "
"--txpt= | --txht= | --txwt= | --txfreet= | "
- "--txrst= | --tx-offloads ]\n",
+ "--txrst= | --tx-offloads= | --vxlan-gpe-port= ]\n",
progname);
#ifdef RTE_LIBRTE_CMDLINE
printf(" --interactive: run in interactive mode.\n");
@@ -186,6 +186,10 @@ usage(char* progname)
printf(" --flow-isolate-all: "
"requests flow API isolated mode on all ports at initialization time.\n");
printf(" --tx-offloads=0xXXXXXXXX: hexadecimal bitmask of TX queue offloads\n");
+ printf(" --hot-plug: enable hot plug for device.\n");
+ printf(" --vxlan-gpe-port=N: UPD port of tunnel VXLAN-GPE\n");
+ printf(" --mlockall: lock all memory\n");
+ printf(" --no-mlockall: do not lock all memory\n");
}
#ifdef RTE_LIBRTE_CMDLINE
@@ -373,7 +377,6 @@ parse_portnuma_config(const char *q_arg)
};
unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
- portid_t pid;
/* reset from value set at definition */
while ((p = strchr(p0,'(')) != NULL) {
@@ -397,10 +400,7 @@ parse_portnuma_config(const char *q_arg)
port_id = (portid_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
@@ -431,7 +431,6 @@ parse_ringnuma_config(const char *q_arg)
};
unsigned long int_fld[_NUM_FLD];
char *str_fld[_NUM_FLD];
- portid_t pid;
#define RX_RING_ONLY 0x1
#define TX_RING_ONLY 0x2
#define RXTX_RING 0x3
@@ -458,10 +457,7 @@ parse_ringnuma_config(const char *q_arg)
port_id = (portid_t)int_fld[FLD_PORT];
if (port_id_is_invalid(port_id, ENABLED_WARN) ||
port_id == (portid_t)RTE_PORT_ALL) {
- printf("Valid port range is [0");
- RTE_ETH_FOREACH_DEV(pid)
- printf(", %d", pid);
- printf("]\n");
+ print_valid_ports();
return -1;
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
@@ -512,6 +508,8 @@ parse_event_printing_config(const char *optarg, int enable)
mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET;
else if (!strcmp(optarg, "vf_mbox"))
mask = UINT32_C(1) << RTE_ETH_EVENT_VF_MBOX;
+ else if (!strcmp(optarg, "ipsec"))
+ mask = UINT32_C(1) << RTE_ETH_EVENT_IPSEC;
else if (!strcmp(optarg, "macsec"))
mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC;
else if (!strcmp(optarg, "intr_rmv"))
@@ -544,6 +542,8 @@ launch_args_parse(int argc, char** argv)
/* Default offloads for all ports. */
uint64_t rx_offloads = rx_mode.offloads;
uint64_t tx_offloads = tx_mode.offloads;
+ struct rte_eth_dev_info dev_info;
+ uint16_t rec_nb_pkts;
static struct option lgopts[] = {
{ "help", 0, 0, 0 },
@@ -621,6 +621,10 @@ launch_args_parse(int argc, char** argv)
{ "print-event", 1, 0, 0 },
{ "mask-event", 1, 0, 0 },
{ "tx-offloads", 1, 0, 0 },
+ { "hot-plug", 0, 0, 0 },
+ { "vxlan-gpe-port", 1, 0, 0 },
+ { "mlockall", 0, 0, 0 },
+ { "no-mlockall", 0, 0, 0 },
{ 0, 0, 0, 0 },
};
@@ -658,9 +662,8 @@ launch_args_parse(int argc, char** argv)
if (!strcmp(lgopts[opt_idx].name, "cmdline-file")) {
printf("CLI commands to be read from %s\n",
optarg);
- snprintf(cmdline_filename,
- sizeof(cmdline_filename), "%s",
- optarg);
+ strlcpy(cmdline_filename, optarg,
+ sizeof(cmdline_filename));
}
if (!strcmp(lgopts[opt_idx].name, "auto-start")) {
printf("Auto-start selected\n");
@@ -875,8 +878,10 @@ launch_args_parse(int argc, char** argv)
" must be >= 0\n", n);
}
#endif
- if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip"))
+ if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) {
rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
+ rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+ }
if (!strcmp(lgopts[opt_idx].name, "enable-lro"))
rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO;
if (!strcmp(lgopts[opt_idx].name, "enable-scatter"))
@@ -948,12 +953,38 @@ launch_args_parse(int argc, char** argv)
}
if (!strcmp(lgopts[opt_idx].name, "burst")) {
n = atoi(optarg);
- if ((n >= 1) && (n <= MAX_PKT_BURST))
- nb_pkt_per_burst = (uint16_t) n;
- else
+ if (n == 0) {
+ /* A burst size of zero means that the
+ * PMD should be queried for
+ * recommended Rx burst size. Since
+ * testpmd uses a single size for all
+ * ports, port 0 is queried for the
+ * value, on the assumption that all
+ * ports are of the same NIC model.
+ */
+ rte_eth_dev_info_get(0, &dev_info);
+ rec_nb_pkts = dev_info
+ .default_rxportconf.burst_size;
+
+ if (rec_nb_pkts == 0)
+ rte_exit(EXIT_FAILURE,
+ "PMD does not recommend a burst size. "
+ "Provided value must be between "
+ "1 and %d\n", MAX_PKT_BURST);
+ else if (rec_nb_pkts > MAX_PKT_BURST)
+ rte_exit(EXIT_FAILURE,
+ "PMD recommended burst size of %d"
+ " exceeds maximum value of %d\n",
+ rec_nb_pkts, MAX_PKT_BURST);
+ printf("Using PMD-provided burst value of %d\n",
+ rec_nb_pkts);
+ nb_pkt_per_burst = rec_nb_pkts;
+ } else if (n > MAX_PKT_BURST)
rte_exit(EXIT_FAILURE,
- "burst must >= 1 and <= %d]",
- MAX_PKT_BURST);
+ "burst must be between1 and %d\n",
+ MAX_PKT_BURST);
+ else
+ nb_pkt_per_burst = (uint16_t) n;
}
if (!strcmp(lgopts[opt_idx].name, "mbcache")) {
n = atoi(optarg);
@@ -1092,6 +1123,14 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE,
"tx-offloads must be >= 0\n");
}
+ if (!strcmp(lgopts[opt_idx].name, "vxlan-gpe-port")) {
+ n = atoi(optarg);
+ if (n >= 0)
+ vxlan_gpe_udp_port = (uint16_t)n;
+ else
+ rte_exit(EXIT_FAILURE,
+ "vxlan-gpe-port must be >= 0\n");
+ }
if (!strcmp(lgopts[opt_idx].name, "print-event"))
if (parse_event_printing_config(optarg, 1)) {
rte_exit(EXIT_FAILURE,
@@ -1102,7 +1141,12 @@ launch_args_parse(int argc, char** argv)
rte_exit(EXIT_FAILURE,
"invalid mask-event argument\n");
}
-
+ if (!strcmp(lgopts[opt_idx].name, "hot-plug"))
+ hot_plug = 1;
+ if (!strcmp(lgopts[opt_idx].name, "mlockall"))
+ do_mlockall = 1;
+ if (!strcmp(lgopts[opt_idx].name, "no-mlockall"))
+ do_mlockall = 0;
break;
case 'h':
usage(argv[0]);
diff --git a/app/test-pmd/tm.c b/app/test-pmd/softnicfwd.c
index 7231552a..7ff62280 100644
--- a/app/test-pmd/tm.c
+++ b/app/test-pmd/softnicfwd.c
@@ -6,6 +6,7 @@
#include <rte_cycles.h>
#include <rte_mbuf.h>
+#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_flow.h>
#include <rte_meter.h>
@@ -71,170 +72,17 @@ struct tm_hierarchy {
uint32_t n_shapers;
};
-#define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
-({ \
- uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \
- uint64_t val = \
- (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
- val; \
-})
-
-#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
- traffic_class, queue, color) \
- ((((uint64_t) (queue)) & 0x3) | \
- ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
- ((((uint64_t) (color)) & 0x3) << 4) | \
- ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
- ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
-
-
-static void
-pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts,
- uint32_t n_pkts)
-{
- struct softnic_port_tm *tm = &p->softport.tm;
- uint32_t i;
-
- for (i = 0; i < (n_pkts & (~0x3)); i += 4) {
- struct rte_mbuf *pkt0 = pkts[i];
- struct rte_mbuf *pkt1 = pkts[i + 1];
- struct rte_mbuf *pkt2 = pkts[i + 2];
- struct rte_mbuf *pkt3 = pkts[i + 3];
-
- uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *);
- uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *);
- uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *);
- uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *);
-
- uint64_t pkt0_subport = BITFIELD(pkt0_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt0_pipe = BITFIELD(pkt0_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt0_dscp = BITFIELD(pkt0_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2;
- uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3;
- uint64_t pkt1_subport = BITFIELD(pkt1_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt1_pipe = BITFIELD(pkt1_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt1_dscp = BITFIELD(pkt1_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2;
- uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3;
-
- uint64_t pkt2_subport = BITFIELD(pkt2_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt2_pipe = BITFIELD(pkt2_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt2_dscp = BITFIELD(pkt2_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2;
- uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3;
-
- uint64_t pkt3_subport = BITFIELD(pkt3_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt3_pipe = BITFIELD(pkt3_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt3_dscp = BITFIELD(pkt3_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2;
- uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3;
-
- uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport,
- pkt0_pipe,
- pkt0_tc,
- pkt0_tc_q,
- 0);
- uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport,
- pkt1_pipe,
- pkt1_tc,
- pkt1_tc_q,
- 0);
- uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport,
- pkt2_pipe,
- pkt2_tc,
- pkt2_tc_q,
- 0);
- uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport,
- pkt3_pipe,
- pkt3_tc,
- pkt3_tc_q,
- 0);
-
- pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
- pkt0->hash.sched.hi = pkt0_sched >> 32;
- pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
- pkt1->hash.sched.hi = pkt1_sched >> 32;
- pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
- pkt2->hash.sched.hi = pkt2_sched >> 32;
- pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
- pkt3->hash.sched.hi = pkt3_sched >> 32;
- }
-
- for (; i < n_pkts; i++) {
- struct rte_mbuf *pkt = pkts[i];
-
- uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
-
- uint64_t pkt_subport = BITFIELD(pkt_data,
- tm->tm_pktfield0_slabpos,
- tm->tm_pktfield0_slabmask,
- tm->tm_pktfield0_slabshr);
- uint64_t pkt_pipe = BITFIELD(pkt_data,
- tm->tm_pktfield1_slabpos,
- tm->tm_pktfield1_slabmask,
- tm->tm_pktfield1_slabshr);
- uint64_t pkt_dscp = BITFIELD(pkt_data,
- tm->tm_pktfield2_slabpos,
- tm->tm_pktfield2_slabmask,
- tm->tm_pktfield2_slabshr);
- uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2;
- uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3;
-
- uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport,
- pkt_pipe,
- pkt_tc,
- pkt_tc_q,
- 0);
-
- pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
- pkt->hash.sched.hi = pkt_sched >> 32;
- }
-}
+static struct fwd_lcore *softnic_fwd_lcore;
+static uint16_t softnic_port_id;
+struct fwd_engine softnic_fwd_engine;
/*
- * Soft port packet forward
+ * Softnic packet forward
*/
static void
-softport_packet_fwd(struct fwd_stream *fs)
+softnic_fwd(struct fwd_stream *fs)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_port *rte_tx_port = &ports[fs->tx_port];
uint16_t nb_rx;
uint16_t nb_tx;
uint32_t retry;
@@ -258,14 +106,6 @@ softport_packet_fwd(struct fwd_stream *fs)
fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
#endif
- if (rte_tx_port->softnic_enable) {
- /* Set packet metadata if tm flag enabled */
- if (rte_tx_port->softport.tm_flag)
- pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx);
-
- /* Softport run */
- rte_pmd_softnic_run(fs->tx_port);
- }
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
pkts_burst, nb_rx);
@@ -298,7 +138,34 @@ softport_packet_fwd(struct fwd_stream *fs)
}
static void
-set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
+softnic_fwd_run(struct fwd_stream *fs)
+{
+ rte_pmd_softnic_run(softnic_port_id);
+ softnic_fwd(fs);
+}
+
+/**
+ * Softnic init
+ */
+static int
+softnic_begin(void *arg __rte_unused)
+{
+ for (;;) {
+ if (!softnic_fwd_lcore->stopped)
+ break;
+ }
+
+ do {
+ /* Run softnic */
+ rte_pmd_softnic_run(softnic_port_id);
+ } while (!softnic_fwd_lcore->stopped);
+
+ return 0;
+}
+
+static void
+set_tm_hiearchy_nodes_shaper_rate(portid_t port_id,
+ struct tm_hierarchy *h)
{
struct rte_eth_link link_params;
uint64_t tm_port_rate;
@@ -306,10 +173,7 @@ set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
memset(&link_params, 0, sizeof(link_params));
rte_eth_link_get(port_id, &link_params);
- tm_port_rate = (uint64_t)link_params.link_speed * BYTES_IN_MBPS;
-
- if (tm_port_rate > UINT32_MAX)
- tm_port_rate = UINT32_MAX;
+ tm_port_rate = (uint64_t)ETH_SPEED_NUM_10G * BYTES_IN_MBPS;
/* Set tm hierarchy shapers rate */
h->root_node_shaper_rate = tm_port_rate;
@@ -374,7 +238,8 @@ softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_subport_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t subport_parent_node_id, subport_node_id = 0;
@@ -442,7 +307,8 @@ softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_pipe_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t pipe_parent_node_id;
@@ -511,7 +377,8 @@ softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
}
static int
-softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
+softport_tm_tc_node_add(portid_t port_id,
+ struct tm_hierarchy *h,
struct rte_tm_error *error)
{
uint32_t tc_parent_node_id;
@@ -674,63 +541,9 @@ softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
return 0;
}
-/*
- * TM Packet Field Setup
- */
-static void
-softport_tm_pktfield_setup(portid_t port_id)
-{
- struct rte_port *p = &ports[port_id];
- uint64_t pktfield0_mask = 0;
- uint64_t pktfield1_mask = 0x0000000FFF000000LLU;
- uint64_t pktfield2_mask = 0x00000000000000FCLLU;
-
- p->softport.tm = (struct softnic_port_tm) {
- .n_subports_per_port = SUBPORT_NODES_PER_PORT,
- .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT,
-
- /* Packet field to identify subport
- *
- * Default configuration assumes only one subport, thus
- * the subport ID is hardcoded to 0
- */
- .tm_pktfield0_slabpos = 0,
- .tm_pktfield0_slabmask = pktfield0_mask,
- .tm_pktfield0_slabshr =
- __builtin_ctzll(pktfield0_mask),
-
- /* Packet field to identify pipe.
- *
- * Default value assumes Ethernet/IPv4/UDP packets,
- * UDP payload bits 12 .. 23
- */
- .tm_pktfield1_slabpos = 40,
- .tm_pktfield1_slabmask = pktfield1_mask,
- .tm_pktfield1_slabshr =
- __builtin_ctzll(pktfield1_mask),
-
- /* Packet field used as index into TC translation table
- * to identify the traffic class and queue.
- *
- * Default value assumes Ethernet/IPv4 packets, IPv4
- * DSCP field
- */
- .tm_pktfield2_slabpos = 8,
- .tm_pktfield2_slabmask = pktfield2_mask,
- .tm_pktfield2_slabshr =
- __builtin_ctzll(pktfield2_mask),
-
- .tm_tc_table = {
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
- }, /**< TC translation table */
- };
-}
-
static int
-softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
+softport_tm_hierarchy_specify(portid_t port_id,
+ struct rte_tm_error *error)
{
struct tm_hierarchy h;
@@ -766,75 +579,96 @@ softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
if (status)
return status;
- /* TM packet fields setup */
- softport_tm_pktfield_setup(port_id);
-
return 0;
}
/*
- * Soft port Init
+ * Softnic TM default configuration
*/
static void
-softport_tm_begin(portid_t pi)
+softnic_tm_default_config(portid_t pi)
{
struct rte_port *port = &ports[pi];
+ struct rte_tm_error error;
+ int status;
- /* Soft port TM flag */
- if (port->softport.tm_flag == 1) {
- printf("\n\n TM feature available on port %u\n", pi);
-
- /* Soft port TM hierarchy configuration */
- if ((port->softport.tm.hierarchy_config == 0) &&
- (port->softport.tm.default_hierarchy_enable == 1)) {
- struct rte_tm_error error;
- int status;
-
- /* Stop port */
- rte_eth_dev_stop(pi);
-
- /* TM hierarchy specification */
- status = softport_tm_hierarchy_specify(pi, &error);
- if (status) {
- printf(" TM Hierarchy built error(%d) - %s\n",
- error.type, error.message);
- return;
- }
- printf("\n TM Hierarchy Specified!\n\v");
-
- /* TM hierarchy commit */
- status = rte_tm_hierarchy_commit(pi, 0, &error);
- if (status) {
- printf(" Hierarchy commit error(%d) - %s\n",
- error.type, error.message);
- return;
- }
- printf(" Hierarchy Committed (port %u)!", pi);
- port->softport.tm.hierarchy_config = 1;
-
- /* Start port */
- status = rte_eth_dev_start(pi);
- if (status) {
- printf("\n Port %u start error!\n", pi);
- return;
- }
- printf("\n Port %u started!\n", pi);
- return;
- }
+ /* Stop port */
+ rte_eth_dev_stop(pi);
+
+ /* TM hierarchy specification */
+ status = softport_tm_hierarchy_specify(pi, &error);
+ if (status) {
+ printf(" TM Hierarchy built error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf("\n TM Hierarchy Specified!\n");
+
+ /* TM hierarchy commit */
+ status = rte_tm_hierarchy_commit(pi, 0, &error);
+ if (status) {
+ printf(" Hierarchy commit error(%d) - %s\n",
+ error.type, error.message);
+ return;
+ }
+ printf(" Hierarchy Committed (port %u)!\n", pi);
+
+ /* Start port */
+ status = rte_eth_dev_start(pi);
+ if (status) {
+ printf("\n Port %u start error!\n", pi);
+ return;
}
- printf("\n TM feature not available on port %u", pi);
+
+ /* Reset the default hierarchy flag */
+ port->softport.default_tm_hierarchy_enable = 0;
}
-struct fwd_engine softnic_tm_engine = {
- .fwd_mode_name = "tm",
- .port_fwd_begin = softport_tm_begin,
- .port_fwd_end = NULL,
- .packet_fwd = softport_packet_fwd,
-};
+/*
+ * Softnic forwarding init
+ */
+static void
+softnic_fwd_begin(portid_t pi)
+{
+ struct rte_port *port = &ports[pi];
+ uint32_t lcore, fwd_core_present = 0, softnic_run_launch = 0;
+ int status;
+
+ softnic_fwd_lcore = port->softport.fwd_lcore_arg[0];
+ softnic_port_id = pi;
+
+ /* Launch softnic_run function on lcores */
+ for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) {
+ if (!rte_lcore_is_enabled(lcore))
+ continue;
+
+ if (lcore == rte_get_master_lcore())
+ continue;
+
+ if (fwd_core_present == 0) {
+ fwd_core_present++;
+ continue;
+ }
+
+ status = rte_eal_remote_launch(softnic_begin, NULL, lcore);
+ if (status)
+ printf("softnic launch on lcore %u failed (%d)\n",
+ lcore, status);
+
+ softnic_run_launch = 1;
+ }
+
+ if (!softnic_run_launch)
+ softnic_fwd_engine.packet_fwd = softnic_fwd_run;
+
+ /* Softnic TM default configuration */
+ if (port->softport.default_tm_hierarchy_enable == 1)
+ softnic_tm_default_config(pi);
+}
-struct fwd_engine softnic_tm_bypass_engine = {
- .fwd_mode_name = "tm-bypass",
- .port_fwd_begin = NULL,
+struct fwd_engine softnic_fwd_engine = {
+ .fwd_mode_name = "softnic",
+ .port_fwd_begin = softnic_fwd_begin,
.port_fwd_end = NULL,
- .packet_fwd = softport_packet_fwd,
+ .packet_fwd = softnic_fwd,
};
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 4c0e2586..ee48db2a 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -12,6 +12,7 @@
#include <sys/mman.h>
#include <sys/types.h>
#include <errno.h>
+#include <stdbool.h>
#include <sys/queue.h>
#include <sys/stat.h>
@@ -126,6 +127,8 @@ portid_t nb_ports; /**< Number of probed ethernet ports. */
struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
lcoreid_t nb_lcores; /**< Number of probed logical cores. */
+portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
+
/*
* Test Forwarding Configuration.
* nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
@@ -154,9 +157,8 @@ struct fwd_engine * fwd_engines[] = {
&tx_only_engine,
&csum_fwd_engine,
&icmp_echo_engine,
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- &softnic_tm_engine,
- &softnic_tm_bypass_engine,
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ &softnic_fwd_engine,
#endif
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
@@ -210,9 +212,10 @@ queueid_t nb_txq = 1; /**< Number of TX queues per port. */
/*
* Configurable number of RX/TX ring descriptors.
+ * Defaults are supplied by drivers via ethdev.
*/
-#define RTE_TEST_RX_DESC_DEFAULT 1024
-#define RTE_TEST_TX_DESC_DEFAULT 1024
+#define RTE_TEST_RX_DESC_DEFAULT 0
+#define RTE_TEST_TX_DESC_DEFAULT 0
uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
@@ -284,6 +287,8 @@ uint8_t lsc_interrupt = 1; /* enabled by default */
*/
uint8_t rmv_interrupt = 1; /* enabled by default */
+uint8_t hot_plug = 0; /**< hotplug disabled by default. */
+
/*
* Display or mask ether events
* Default to all events except VF_MBOX
@@ -292,8 +297,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
(UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
+ (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
(UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
(UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
+/*
+ * Decide if all memory are locked for performance.
+ */
+int do_mlockall = 0;
/*
* NIC bypass mode configuration options.
@@ -325,7 +335,6 @@ lcoreid_t latencystats_lcore_id = -1;
struct rte_eth_rxmode rx_mode = {
.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
};
struct rte_eth_txmode tx_mode = {
@@ -337,7 +346,7 @@ struct rte_fdir_conf fdir_conf = {
.pballoc = RTE_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
- .vlan_tci_mask = 0x0,
+ .vlan_tci_mask = 0xFFEF,
.ipv4_mask = {
.src_ip = 0xFFFFFFFF,
.dst_ip = 0xFFFFFFFF,
@@ -384,6 +393,38 @@ uint8_t bitrate_enabled;
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
+struct vxlan_encap_conf vxlan_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .vni = "\x00\x00\x00",
+ .udp_src = 0,
+ .udp_dst = RTE_BE16(4789),
+ .ipv4_src = IPv4(127, 0, 0, 1),
+ .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
+struct nvgre_encap_conf nvgre_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .tni = "\x00\x00\x00",
+ .ipv4_src = IPv4(127, 0, 0, 1),
+ .ipv4_dst = IPv4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
/* Forward function declarations */
static void map_port_queue_stats_mapping_registers(portid_t pi,
struct rte_port *port);
@@ -391,6 +432,12 @@ static void check_all_ports_link_status(uint32_t port_mask);
static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
+static void eth_dev_event_callback(char *device_name,
+ enum rte_dev_event_type type,
+ void *param);
+static int eth_dev_event_callback_register(void);
+static int eth_dev_event_callback_unregister(void);
+
/*
* Check if all the ports are started.
@@ -656,6 +703,7 @@ init_config(void)
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
struct rte_gro_param gro_param;
uint32_t gso_types;
+ int k;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
@@ -690,6 +738,11 @@ init_config(void)
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
+
+ if (!(port->dev_info.rx_offload_capa &
+ DEV_RX_OFFLOAD_CRC_STRIP))
+ port->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_CRC_STRIP;
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
@@ -707,6 +760,15 @@ init_config(void)
}
}
+ /* Apply Rx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->rx_conf[k].offloads =
+ port->dev_conf.rxmode.offloads;
+ /* Apply Tx offloads configuration */
+ for (k = 0; k < port->dev_info.max_tx_queues; k++)
+ port->tx_conf[k].offloads =
+ port->dev_conf.txmode.offloads;
+
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
@@ -747,7 +809,7 @@ init_config(void)
init_port_config();
gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO;
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
@@ -786,6 +848,19 @@ init_config(void)
"rte_gro_ctx_create() failed\n");
}
}
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
+ RTE_ETH_FOREACH_DEV(pid) {
+ port = &ports[pid];
+ const char *driver = port->dev_info.driver_name;
+
+ if (strcmp(driver, "net_softnic") == 0)
+ port->softport.fwd_lcore_arg = fwd_lcores;
+ }
+ }
+#endif
+
}
@@ -871,18 +946,23 @@ init_fwd_streams(void)
/* init new */
nb_fwd_streams = nb_fwd_streams_new;
- fwd_streams = rte_zmalloc("testpmd: fwd_streams",
- sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
- if (fwd_streams == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
- "failed\n", nb_fwd_streams);
+ if (nb_fwd_streams) {
+ fwd_streams = rte_zmalloc("testpmd: fwd_streams",
+ sizeof(struct fwd_stream *) * nb_fwd_streams,
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
+ " (struct fwd_stream *)) failed\n",
+ nb_fwd_streams);
- for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
- fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
- sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
- if (fwd_streams[sm_id] == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
- " failed\n");
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ fwd_streams[sm_id] = rte_zmalloc("testpmd:"
+ " struct fwd_stream", sizeof(struct fwd_stream),
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams[sm_id] == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc"
+ "(struct fwd_stream) failed\n");
+ }
}
return 0;
@@ -916,6 +996,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
pktnb_stats[1] = pktnb_stats[0];
burst_stats[0] = nb_burst;
pktnb_stats[0] = nb_pkt;
+ } else if (nb_burst > burst_stats[1]) {
+ burst_stats[1] = nb_burst;
+ pktnb_stats[1] = nb_pkt;
}
}
if (total_burst == 0)
@@ -1110,9 +1193,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
uint64_t tics_per_1sec;
uint64_t tics_datum;
uint64_t tics_current;
- uint8_t idx_port, cnt_ports;
+ uint16_t i, cnt_ports;
- cnt_ports = rte_eth_dev_count();
+ cnt_ports = nb_ports;
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
@@ -1127,11 +1210,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
tics_current = rte_rdtsc();
if (tics_current - tics_datum >= tics_per_1sec) {
/* Periodic bitrate calculation */
- for (idx_port = 0;
- idx_port < cnt_ports;
- idx_port++)
+ for (i = 0; i < cnt_ports; i++)
rte_stats_bitrate_calc(bitrate_data,
- idx_port);
+ ports_ids[i]);
tics_datum = tics_current;
}
}
@@ -1202,6 +1283,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
}
/*
+ * Update the forward ports list.
+ */
+void
+update_fwd_ports(portid_t new_pid)
+{
+ unsigned int i;
+ unsigned int new_nb_fwd_ports = 0;
+ int move = 0;
+
+ for (i = 0; i < nb_fwd_ports; ++i) {
+ if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
+ move = 1;
+ else if (move)
+ fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
+ else
+ new_nb_fwd_ports++;
+ }
+ if (new_pid < RTE_MAX_ETHPORTS)
+ fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
+
+ nb_fwd_ports = new_nb_fwd_ports;
+ nb_cfg_ports = new_nb_fwd_ports;
+}
+
+/*
* Launch packet forwarding configuration.
*/
void
@@ -1236,10 +1342,6 @@ start_packet_forwarding(int with_tx_first)
return;
}
- if (init_fwd_streams() < 0) {
- printf("Fail from init_fwd_streams()\n");
- return;
- }
if(dcb_test) {
for (i = 0; i < nb_fwd_ports; i++) {
@@ -1259,10 +1361,11 @@ start_packet_forwarding(int with_tx_first)
}
test_done = 0;
+ fwd_config_setup();
+
if(!no_flush_rx)
flush_fwd_rx_queues();
- fwd_config_setup();
pkt_fwd_config_display(&cur_fwd_config);
rxtx_config_display();
@@ -1586,20 +1689,19 @@ start_port(portid_t pid)
}
if (port->need_reconfig_queues > 0) {
port->need_reconfig_queues = 0;
- port->tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
- /* Apply Tx offloads configuration */
- port->tx_conf.offloads = port->dev_conf.txmode.offloads;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
- nb_txd,txring_numa[pi],
- &(port->tx_conf));
+ port->nb_tx_desc[qi],
+ txring_numa[pi],
+ &(port->tx_conf[qi]));
else
diag = rte_eth_tx_queue_setup(pi, qi,
- nb_txd,port->socket_id,
- &(port->tx_conf));
+ port->nb_tx_desc[qi],
+ port->socket_id,
+ &(port->tx_conf[qi]));
if (diag == 0)
continue;
@@ -1610,15 +1712,14 @@ start_port(portid_t pid)
RTE_PORT_STOPPED) == 0)
printf("Port %d can not be set back "
"to stopped\n", pi);
- printf("Fail to configure port %d tx queues\n", pi);
+ printf("Fail to configure port %d tx queues\n",
+ pi);
/* try to reconfigure queues next time */
port->need_reconfig_queues = 1;
return -1;
}
- /* Apply Rx offloads configuration */
- port->rx_conf.offloads = port->dev_conf.rxmode.offloads;
- /* setup rx queues */
for (qi = 0; qi < nb_rxq; qi++) {
+ /* setup rx queues */
if ((numa_support) &&
(rxring_numa[pi] != NUMA_NO_CONFIG)) {
struct rte_mempool * mp =
@@ -1632,8 +1733,10 @@ start_port(portid_t pid)
}
diag = rte_eth_rx_queue_setup(pi, qi,
- nb_rxd,rxring_numa[pi],
- &(port->rx_conf),mp);
+ port->nb_rx_desc[qi],
+ rxring_numa[pi],
+ &(port->rx_conf[qi]),
+ mp);
} else {
struct rte_mempool *mp =
mbuf_pool_find(port->socket_id);
@@ -1645,8 +1748,10 @@ start_port(portid_t pid)
return -1;
}
diag = rte_eth_rx_queue_setup(pi, qi,
- nb_rxd,port->socket_id,
- &(port->rx_conf), mp);
+ port->nb_rx_desc[qi],
+ port->socket_id,
+ &(port->rx_conf[qi]),
+ mp);
}
if (diag == 0)
continue;
@@ -1657,7 +1762,8 @@ start_port(portid_t pid)
RTE_PORT_STOPPED) == 0)
printf("Port %d can not be set back "
"to stopped\n", pi);
- printf("Fail to configure port %d rx queues\n", pi);
+ printf("Fail to configure port %d rx queues\n",
+ pi);
/* try to reconfigure queues next time */
port->need_reconfig_queues = 1;
return -1;
@@ -1853,6 +1959,39 @@ reset_port(portid_t pid)
printf("Done\n");
}
+static int
+eth_dev_event_callback_register(void)
+{
+ int ret;
+
+ /* register the device event callback */
+ ret = rte_dev_event_callback_register(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret) {
+ printf("Failed to register device event callback\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static int
+eth_dev_event_callback_unregister(void)
+{
+ int ret;
+
+ /* unregister the device event callback */
+ ret = rte_dev_event_callback_unregister(NULL,
+ eth_dev_event_callback, NULL);
+ if (ret < 0) {
+ printf("Failed to unregister device event callback\n");
+ return -1;
+ }
+
+ return 0;
+}
+
void
attach_port(char *identifier)
{
@@ -1876,10 +2015,13 @@ attach_port(char *identifier)
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
- nb_ports = rte_eth_dev_count();
+ ports_ids[nb_ports] = pi;
+ nb_ports = rte_eth_dev_count_avail();
ports[pi].port_status = RTE_PORT_STOPPED;
+ update_fwd_ports(pi);
+
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
printf("Done\n");
}
@@ -1888,6 +2030,7 @@ void
detach_port(portid_t port_id)
{
char name[RTE_ETH_NAME_MAX_LEN];
+ uint16_t i;
printf("Detaching a port...\n");
@@ -1900,14 +2043,23 @@ detach_port(portid_t port_id)
port_flow_flush(port_id);
if (rte_eth_dev_detach(port_id, name)) {
- TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
+ TESTPMD_LOG(ERR, "Failed to detach port %u\n", port_id);
return;
}
- nb_ports = rte_eth_dev_count();
+ for (i = 0; i < nb_ports; i++) {
+ if (ports_ids[i] == port_id) {
+ ports_ids[i] = ports_ids[nb_ports-1];
+ ports_ids[nb_ports-1] = 0;
+ break;
+ }
+ }
+ nb_ports = rte_eth_dev_count_avail();
+
+ update_fwd_ports(RTE_MAX_ETHPORTS);
- printf("Port '%s' is detached. Now total ports is %d\n",
- name, nb_ports);
+ printf("Port %u is detached. Now total ports is %d\n",
+ port_id, nb_ports);
printf("Done\n");
return;
}
@@ -1915,7 +2067,9 @@ detach_port(portid_t port_id)
void
pmd_test_exit(void)
{
+ struct rte_device *device;
portid_t pt_id;
+ int ret;
if (test_done == 0)
stop_packet_forwarding();
@@ -1927,8 +2081,33 @@ pmd_test_exit(void)
fflush(stdout);
stop_port(pt_id);
close_port(pt_id);
+
+ /*
+ * This is a workaround to fix a virtio-user issue that
+ * requires to call clean-up routine to remove existing
+ * socket.
+ * This workaround valid only for testpmd, needs a fix
+ * valid for all applications.
+ * TODO: Implement proper resource cleanup
+ */
+ device = rte_eth_devices[pt_id].device;
+ if (device && !strcmp(device->driver->name, "net_virtio_user"))
+ detach_port(pt_id);
}
}
+
+ if (hot_plug) {
+ ret = rte_dev_event_monitor_stop();
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "fail to stop device event monitor.");
+
+ ret = eth_dev_event_callback_unregister();
+ if (ret)
+ RTE_LOG(ERR, EAL,
+ "fail to unregister all event callbacks.");
+ }
+
printf("\nBye...\n");
}
@@ -1999,18 +2178,23 @@ check_all_ports_link_status(uint32_t port_mask)
static void
rmv_event_callback(void *arg)
{
- struct rte_eth_dev *dev;
+ int need_to_start = 0;
+ int org_no_link_check = no_link_check;
portid_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
- dev = &rte_eth_devices[port_id];
+ if (!test_done && port_is_forwarding(port_id)) {
+ need_to_start = 1;
+ stop_packet_forwarding();
+ }
+ no_link_check = 1;
stop_port(port_id);
+ no_link_check = org_no_link_check;
close_port(port_id);
- printf("removing device %s\n", dev->device->name);
- if (rte_eal_dev_detach(dev->device))
- TESTPMD_LOG(ERR, "Failed to detach device %s\n",
- dev->device->name);
+ detach_port(port_id);
+ if (need_to_start)
+ start_packet_forwarding(0);
}
/* This function is used by the interrupt thread */
@@ -2024,6 +2208,7 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
[RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
[RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
[RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
+ [RTE_ETH_EVENT_IPSEC] = "IPsec",
[RTE_ETH_EVENT_MACSEC] = "MACsec",
[RTE_ETH_EVENT_INTR_RMV] = "device removal",
[RTE_ETH_EVENT_NEW] = "device probed",
@@ -2059,6 +2244,37 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
return 0;
}
+/* This function is used by the interrupt thread */
+static void
+eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
+ __rte_unused void *arg)
+{
+ if (type >= RTE_DEV_EVENT_MAX) {
+ fprintf(stderr, "%s called upon invalid event %d\n",
+ __func__, type);
+ fflush(stderr);
+ }
+
+ switch (type) {
+ case RTE_DEV_EVENT_REMOVE:
+ RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
+ device_name);
+ /* TODO: After finish failure handle, begin to stop
+ * packet forward, stop port, close port, detach port.
+ */
+ break;
+ case RTE_DEV_EVENT_ADD:
+ RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
+ device_name);
+ /* TODO: After finish kernel driver binding,
+ * begin to attach port.
+ */
+ break;
+ default:
+ break;
+ }
+}
+
static int
set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
{
@@ -2140,39 +2356,51 @@ map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
static void
rxtx_port_config(struct rte_port *port)
{
- port->rx_conf = port->dev_info.default_rxconf;
- port->tx_conf = port->dev_info.default_txconf;
+ uint16_t qid;
+
+ for (qid = 0; qid < nb_rxq; qid++) {
+ port->rx_conf[qid] = port->dev_info.default_rxconf;
+
+ /* Check if any Rx parameters have been passed */
+ if (rx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh;
+
+ if (rx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh;
+
+ if (rx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh;
- /* Check if any RX/TX parameters have been passed */
- if (rx_pthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.pthresh = rx_pthresh;
+ if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_free_thresh = rx_free_thresh;
- if (rx_hthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.hthresh = rx_hthresh;
+ if (rx_drop_en != RTE_PMD_PARAM_UNSET)
+ port->rx_conf[qid].rx_drop_en = rx_drop_en;
- if (rx_wthresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_thresh.wthresh = rx_wthresh;
+ port->nb_rx_desc[qid] = nb_rxd;
+ }
- if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_free_thresh = rx_free_thresh;
+ for (qid = 0; qid < nb_txq; qid++) {
+ port->tx_conf[qid] = port->dev_info.default_txconf;
- if (rx_drop_en != RTE_PMD_PARAM_UNSET)
- port->rx_conf.rx_drop_en = rx_drop_en;
+ /* Check if any Tx parameters have been passed */
+ if (tx_pthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh;
- if (tx_pthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.pthresh = tx_pthresh;
+ if (tx_hthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh;
- if (tx_hthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.hthresh = tx_hthresh;
+ if (tx_wthresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh;
- if (tx_wthresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_thresh.wthresh = tx_wthresh;
+ if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh;
- if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_rs_thresh = tx_rs_thresh;
+ if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
+ port->tx_conf[qid].tx_free_thresh = tx_free_thresh;
- if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
- port->tx_conf.tx_free_thresh = tx_free_thresh;
+ port->nb_tx_desc[qid] = nb_txd;
+ }
}
void
@@ -2184,9 +2412,11 @@ init_port_config(void)
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
+ rte_eth_dev_info_get(pid, &port->dev_info);
if (nb_rxq > 1) {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
- port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+ rss_hf & port->dev_info.flow_type_rss_offloads;
} else {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
@@ -2216,17 +2446,6 @@ init_port_config(void)
(rte_eth_devices[pid].data->dev_flags &
RTE_ETH_DEV_INTR_RMV))
port->dev_conf.intr_conf.rmv = 1;
-
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- /* Detect softnic port */
- if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
- port->softnic_enable = 1;
- memset(&port->softport, 0, sizeof(struct softnic_port));
-
- if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
- port->softport.tm_flag = 1;
- }
-#endif
}
}
@@ -2251,7 +2470,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid)
struct rte_port *port;
port = &ports[slave_pid];
- return port->slave_flag;
+ if ((rte_eth_devices[slave_pid].data->dev_flags &
+ RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
+ return 1;
+ return 0;
}
const uint16_t vlan_tags[] = {
@@ -2262,12 +2484,14 @@ const uint16_t vlan_tags[] = {
};
static int
-get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
enum dcb_mode_enable dcb_mode,
enum rte_eth_nb_tcs num_tcs,
uint8_t pfc_en)
{
uint8_t i;
+ int32_t rc;
+ struct rte_eth_rss_conf rss_conf;
/*
* Builds up the correct configuration for dcb+vt based on the vlan tags array
@@ -2307,6 +2531,10 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
struct rte_eth_dcb_tx_conf *tx_conf =
&eth_conf->tx_adv_conf.dcb_tx_conf;
+ rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
+ if (rc != 0)
+ return rc;
+
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
@@ -2314,8 +2542,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
+
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ eth_conf->rx_adv_conf.rss_conf = rss_conf;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
}
@@ -2349,17 +2578,13 @@ init_port_dcb_config(portid_t pid,
port_conf.txmode = rte_port->dev_conf.txmode;
/*set configuration of DCB in vt mode and DCB in non-vt mode*/
- retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
+ retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
- /**
- * Write the configuration into the device.
- * Set the numbers of RX & TX queues to 0, so
- * the RX & TX queues will not be setup.
- */
- rte_eth_dev_configure(pid, 0, 0, &port_conf);
+ /* re-configure the device . */
+ rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
rte_eth_dev_info_get(pid, &rte_port->dev_info);
@@ -2474,8 +2699,10 @@ signal_handler(int signum)
int
main(int argc, char** argv)
{
- int diag;
+ int diag;
portid_t port_id;
+ uint16_t count;
+ int ret;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
@@ -2489,17 +2716,17 @@ main(int argc, char** argv)
rte_panic("Cannot register log type");
rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
- if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
- TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
- strerror(errno));
- }
-
#ifdef RTE_LIBRTE_PDUMP
/* initialize packet capture framework */
rte_pdump_init(NULL);
#endif
- nb_ports = (portid_t) rte_eth_dev_count();
+ count = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ports_ids[count] = port_id;
+ count++;
+ }
+ nb_ports = (portid_t) count;
if (nb_ports == 0)
TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
@@ -2519,11 +2746,23 @@ main(int argc, char** argv)
latencystats_enabled = 0;
#endif
+ /* on FreeBSD, mlockall() is disabled by default */
+#ifdef RTE_EXEC_ENV_BSDAPP
+ do_mlockall = 0;
+#else
+ do_mlockall = 1;
+#endif
+
argc -= diag;
argv += diag;
if (argc > 1)
launch_args_parse(argc, argv);
+ if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) {
+ TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n",
+ strerror(errno));
+ }
+
if (tx_first && interactive)
rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
"interactive mode.\n");
@@ -2543,6 +2782,18 @@ main(int argc, char** argv)
nb_rxq, nb_txq);
init_config();
+
+ if (hot_plug) {
+ /* enable hot plug monitoring */
+ ret = rte_dev_event_monitor_start();
+ if (ret) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ eth_dev_event_callback_register();
+
+ }
+
if (start_port(RTE_PORT_ALL) != 0)
rte_exit(EXIT_FAILURE, "Start ports failed\n");
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 153abea0..a1f66147 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -57,10 +57,10 @@ typedef uint16_t streamid_t;
#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
-#define TM_MODE 1
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+#define SOFTNIC 1
#else
-#define TM_MODE 0
+#define SOFTNIC 0
#endif
enum {
@@ -79,6 +79,19 @@ struct pkt_burst_stats {
};
#endif
+/** Information for a given RSS type. */
+struct rss_type_info {
+ const char *str; /**< Type name. */
+ uint64_t rss_type; /**< Type value. */
+};
+
+/**
+ * RSS type information table.
+ *
+ * An entry with a NULL type name terminates the list.
+ */
+extern const struct rss_type_info rss_type_table[];
+
/**
* The data structure associated with a forwarding stream between a receive
* port/queue and a transmit port/queue.
@@ -122,35 +135,13 @@ struct port_flow {
uint8_t data[]; /**< Storage for pattern/actions. */
};
-#ifdef TM_MODE
-/**
- * Soft port tm related parameters
- */
-struct softnic_port_tm {
- uint32_t default_hierarchy_enable; /**< def hierarchy enable flag */
- uint32_t hierarchy_config; /**< set to 1 if hierarchy configured */
-
- uint32_t n_subports_per_port; /**< Num of subport nodes per port */
- uint32_t n_pipes_per_subport; /**< Num of pipe nodes per subport */
-
- uint64_t tm_pktfield0_slabpos; /**< Pkt field position for subport */
- uint64_t tm_pktfield0_slabmask; /**< Pkt field mask for the subport */
- uint64_t tm_pktfield0_slabshr;
- uint64_t tm_pktfield1_slabpos; /**< Pkt field position for the pipe */
- uint64_t tm_pktfield1_slabmask; /**< Pkt field mask for the pipe */
- uint64_t tm_pktfield1_slabshr;
- uint64_t tm_pktfield2_slabpos; /**< Pkt field position table index */
- uint64_t tm_pktfield2_slabmask; /**< Pkt field mask for tc table idx */
- uint64_t tm_pktfield2_slabshr;
- uint64_t tm_tc_table[64]; /**< TC translation table */
-};
-
+#ifdef SOFTNIC
/**
* The data structure associate with softnic port
*/
struct softnic_port {
- unsigned int tm_flag; /**< set to 1 if tm feature is enabled */
- struct softnic_port_tm tm; /**< softnic port tm parameters */
+ uint32_t default_tm_hierarchy_enable; /**< default tm hierarchy */
+ struct fwd_lcore **fwd_lcore_arg; /**< softnic fwd core parameters */
};
#endif
@@ -181,15 +172,16 @@ struct rte_port {
uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */
uint8_t rss_flag; /**< enable rss or not */
uint8_t dcb_flag; /**< enable dcb */
- struct rte_eth_rxconf rx_conf; /**< rx configuration */
- struct rte_eth_txconf tx_conf; /**< tx configuration */
+ uint16_t nb_rx_desc[MAX_QUEUE_ID+1]; /**< per queue rx desc number */
+ uint16_t nb_tx_desc[MAX_QUEUE_ID+1]; /**< per queue tx desc number */
+ struct rte_eth_rxconf rx_conf[MAX_QUEUE_ID+1]; /**< per queue rx configuration */
+ struct rte_eth_txconf tx_conf[MAX_QUEUE_ID+1]; /**< per queue tx configuration */
struct ether_addr *mc_addr_pool; /**< pool of multicast addrs */
uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */
uint8_t slave_flag; /**< bonding slave port */
struct port_flow *flow_list; /**< Associated flows. */
-#ifdef TM_MODE
- unsigned int softnic_enable; /**< softnic flag */
- struct softnic_port softport; /**< softnic port params */
+#ifdef SOFTNIC
+ struct softnic_port softport; /**< softnic params */
#endif
};
@@ -251,9 +243,8 @@ extern struct fwd_engine rx_only_engine;
extern struct fwd_engine tx_only_engine;
extern struct fwd_engine csum_fwd_engine;
extern struct fwd_engine icmp_echo_engine;
-#ifdef TM_MODE
-extern struct fwd_engine softnic_tm_engine;
-extern struct fwd_engine softnic_tm_bypass_engine;
+#ifdef SOFTNIC
+extern struct fwd_engine softnic_fwd_engine;
#endif
#ifdef RTE_LIBRTE_IEEE1588
extern struct fwd_engine ieee1588_fwd_engine;
@@ -320,6 +311,8 @@ extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */
extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */
extern uint32_t event_print_mask;
/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */
+extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */
+extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */
#ifdef RTE_LIBRTE_IXGBE_BYPASS
extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */
@@ -433,6 +426,8 @@ extern uint32_t retry_enabled;
extern struct fwd_lcore **fwd_lcores;
extern struct fwd_stream **fwd_streams;
+extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */
+
extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
@@ -460,6 +455,38 @@ struct gso_status {
extern struct gso_status gso_ports[RTE_MAX_ETHPORTS];
extern uint16_t gso_max_segment_size;
+/* VXLAN encap/decap parameters. */
+struct vxlan_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t vni[3];
+ rte_be16_t udp_src;
+ rte_be16_t udp_dst;
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct vxlan_encap_conf vxlan_encap_conf;
+
+/* NVGRE encap/decap parameters. */
+struct nvgre_encap_conf {
+ uint32_t select_ipv4:1;
+ uint32_t select_vlan:1;
+ uint8_t tni[3];
+ rte_be32_t ipv4_src;
+ rte_be32_t ipv4_dst;
+ uint8_t ipv6_src[16];
+ uint8_t ipv6_dst[16];
+ rte_be16_t vlan_tci;
+ uint8_t eth_src[ETHER_ADDR_LEN];
+ uint8_t eth_dst[ETHER_ADDR_LEN];
+};
+struct nvgre_encap_conf nvgre_encap_conf;
+
static inline unsigned int
lcore_num(void)
{
@@ -500,12 +527,25 @@ mbuf_pool_find(unsigned int sock_id)
static inline uint32_t
port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
void *reg_addr;
uint32_t reg_v;
- reg_addr = (void *)
- ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
- reg_off);
+ if (!port->dev_info.device) {
+ printf("Invalid device\n");
+ return 0;
+ }
+
+ bus = rte_bus_find_by_device(port->dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return 0;
+ }
+
+ reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
reg_v = *((volatile uint32_t *)reg_addr);
return rte_le_to_cpu_32(reg_v);
}
@@ -516,11 +556,24 @@ port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
static inline void
port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
{
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
void *reg_addr;
- reg_addr = (void *)
- ((char *)port->dev_info.pci_dev->mem_resource[0].addr +
- reg_off);
+ if (!port->dev_info.device) {
+ printf("Invalid device\n");
+ return;
+ }
+
+ bus = rte_bus_find_by_device(port->dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(port->dev_info.device);
+ } else {
+ printf("Not a PCI device\n");
+ return;
+ }
+
+ reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off);
*((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
}
@@ -551,6 +604,7 @@ void fwd_config_setup(void);
void set_def_fwd_config(void);
void reconfig(portid_t new_port_id, unsigned socket_id);
int init_fwd_streams(void);
+void update_fwd_ports(portid_t new_pid);
void set_fwd_eth_peer(portid_t port_id, char *peer_addr);
@@ -575,7 +629,7 @@ int port_flow_create(portid_t port_id,
int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule);
int port_flow_flush(portid_t port_id);
int port_flow_query(portid_t port_id, uint32_t rule,
- enum rte_flow_action_type action);
+ const struct rte_flow_action *action);
void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group);
int port_flow_isolate(portid_t port_id, int set);
@@ -681,6 +735,7 @@ enum print_warning {
DISABLED_WARN
};
int port_id_is_invalid(portid_t port_id, enum print_warning warning);
+void print_valid_ports(void);
int new_socket_id(unsigned int socket_id);
queueid_t get_allowed_max_nb_rxq(portid_t *pid);
diff --git a/buildtools/Makefile b/buildtools/Makefile
index 35a42ff5..7f76fd7d 100644
--- a/buildtools/Makefile
+++ b/buildtools/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Neil Horman. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Neil Horman <nhorman@tuxdriver.com>
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/buildtools/auto-config-h.sh b/buildtools/auto-config-h.sh
index cb8bce9b..d28a5c3a 100755
--- a/buildtools/auto-config-h.sh
+++ b/buildtools/auto-config-h.sh
@@ -1,34 +1,6 @@
#!/bin/sh
-#
-# BSD LICENSE
-#
-# Copyright 2014-2015 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2014-2015 6WIND S.A.
#
# Crude script to detect whether particular types, macros and functions are
# defined by trying to compile a file with a given header. Can be used to
diff --git a/buildtools/pmdinfogen/Makefile b/buildtools/pmdinfogen/Makefile
index bf07b6f2..a97a7648 100644
--- a/buildtools/pmdinfogen/Makefile
+++ b/buildtools/pmdinfogen/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 Neil Horman. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Neil Horman <nhorman@tuxdriver.com>
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -41,7 +14,7 @@ HOSTAPP = dpdk-pmdinfogen
#
SRCS-y += pmdinfogen.c
-HOST_CFLAGS += $(WERROR_FLAGS) -g
+HOST_CFLAGS += $(HOST_WERROR_FLAGS) -g
HOST_CFLAGS += -I$(RTE_OUTPUT)/include
include $(RTE_SDK)/mk/rte.hostapp.mk
diff --git a/config/arm/arm64_dpaa2_linuxapp_gcc b/config/arm/arm64_dpaa2_linuxapp_gcc
new file mode 100644
index 00000000..7ec74ec4
--- /dev/null
+++ b/config/arm/arm64_dpaa2_linuxapp_gcc
@@ -0,0 +1,15 @@
+[binaries]
+c = 'aarch64-linux-gnu-gcc'
+cpp = 'aarch64-linux-gnu-cpp'
+ar = 'aarch64-linux-gnu-ar'
+as = 'aarch64-linux-gnu-as'
+strip = 'aarch64-linux-gnu-strip'
+
+[host_machine]
+system = 'linux'
+cpu_family = 'aarch64'
+cpu = 'armv8-a'
+endian = 'little'
+
+[properties]
+implementor_id = 'dpaa2'
diff --git a/config/arm/arm64_dpaa_linuxapp_gcc b/config/arm/arm64_dpaa_linuxapp_gcc
new file mode 100644
index 00000000..73a8f0b8
--- /dev/null
+++ b/config/arm/arm64_dpaa_linuxapp_gcc
@@ -0,0 +1,15 @@
+[binaries]
+c = 'aarch64-linux-gnu-gcc'
+cpp = 'aarch64-linux-gnu-cpp'
+ar = 'aarch64-linux-gnu-ar'
+as = 'aarch64-linux-gnu-as'
+strip = 'aarch64-linux-gnu-strip'
+
+[host_machine]
+system = 'linux'
+cpu_family = 'aarch64'
+cpu = 'armv8-a'
+endian = 'little'
+
+[properties]
+implementor_id = 'dpaa'
diff --git a/config/arm/arm64_thunderx_linuxapp_gcc b/config/arm/arm64_thunderx_linuxapp_gcc
index 7ff34af7..967d9d46 100644
--- a/config/arm/arm64_thunderx_linuxapp_gcc
+++ b/config/arm/arm64_thunderx_linuxapp_gcc
@@ -2,6 +2,7 @@
c = 'aarch64-linux-gnu-gcc'
cpp = 'aarch64-linux-gnu-cpp'
ar = 'aarch64-linux-gnu-gcc-ar'
+strip = 'aarch64-linux-gnu-strip'
[host_machine]
system = 'linux'
diff --git a/config/arm/meson.build b/config/arm/meson.build
index 4e788a4e..40dbc87f 100644
--- a/config/arm/meson.build
+++ b/config/arm/meson.build
@@ -8,7 +8,7 @@ march_opt = '-march=@0@'.format(machine)
arm_force_native_march = false
machine_args_generic = [
- ['default', ['-march=armv8-a']],
+ ['default', ['-march=armv8-a+crc+crypto']],
['native', ['-march=native']],
['0xd03', ['-mcpu=cortex-a53']],
['0xd04', ['-mcpu=cortex-a35']],
@@ -54,6 +54,17 @@ flags_cavium = [
['RTE_MAX_LCORE', 96],
['RTE_MAX_VFIO_GROUPS', 128],
['RTE_RING_USE_C11_MEM_MODEL', false]]
+flags_dpaa = [
+ ['RTE_MACHINE', '"dpaa"'],
+ ['RTE_CACHE_LINE_SIZE', 64],
+ ['RTE_MAX_NUMA_NODES', 1],
+ ['RTE_MAX_LCORE', 16]]
+flags_dpaa2 = [
+ ['RTE_MACHINE', '"dpaa2"'],
+ ['RTE_CACHE_LINE_SIZE', 64],
+ ['RTE_MAX_NUMA_NODES', 1],
+ ['RTE_MAX_LCORE', 16],
+ ['RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', false]]
## Arm implementer ID (ARM DDI 0487C.a, Section G7.2.106, Page G7-5321)
impl_generic = ['Generic armv8', flags_generic, machine_args_generic]
@@ -69,15 +80,8 @@ impl_0x51 = ['Qualcomm', flags_generic, machine_args_generic]
impl_0x53 = ['Samsung', flags_generic, machine_args_generic]
impl_0x56 = ['Marvell', flags_generic, machine_args_generic]
impl_0x69 = ['Intel', flags_generic, machine_args_generic]
-
-
-if cc.get_define('__clang__') != ''
- dpdk_conf.set_quoted('RTE_TOOLCHAIN', 'clang')
- dpdk_conf.set('RTE_TOOLCHAIN_CLANG', 1)
-else
- dpdk_conf.set_quoted('RTE_TOOLCHAIN', 'gcc')
- dpdk_conf.set('RTE_TOOLCHAIN_GCC', 1)
-endif
+impl_dpaa = ['NXP DPAA', flags_dpaa, machine_args_generic]
+impl_dpaa2 = ['NXP DPAA2', flags_dpaa2, machine_args_generic]
dpdk_conf.set('RTE_FORCE_INTRINSICS', 1)
diff --git a/config/common_armv8a_linuxapp b/config/common_armv8a_linuxapp
index 507b28a8..111c0056 100644
--- a/config/common_armv8a_linuxapp
+++ b/config/common_armv8a_linuxapp
@@ -36,61 +36,3 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_SCHED_VECTOR=n
-
-#
-# ARMv8 Specific driver compilation flags
-#
-
-#
-# Compile NXP DPAA Bus
-#
-CONFIG_RTE_LIBRTE_DPAA_BUS=y
-CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
-
-#
-# Compile NXP DPAA2 FSL-MC Bus
-#
-CONFIG_RTE_LIBRTE_FSLMC_BUS=y
-
-#
-# Compile NXP DPAA Mempool
-#
-CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
-
-#
-# Compile NXP DPAA2 Mempool
-#
-CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y
-
-#
-# Compile bust-oriented NXP DPAA PMD
-#
-CONFIG_RTE_LIBRTE_DPAA_PMD=y
-
-#
-# Compile burst-oriented NXP DPAA2 PMD driver
-#
-CONFIG_RTE_LIBRTE_DPAA2_PMD=y
-
-#
-# Compile schedule-oriented NXP DPAA Event Dev PMD
-#
-CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=y
-
-#
-# Compile schedule-oriented NXP DPAA2 EVENTDEV driver
-#
-CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=y
-
-#
-# Compile NXP DPAA caam - crypto driver
-#
-CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=y
-CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
-CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS=2048
-
-#
-# Compile NXP DPAA2 crypto sec driver for CAAM HW
-#
-CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=y
-CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS=2048
diff --git a/config/common_base b/config/common_base
index ad03cf43..4bcbaf92 100644
--- a/config/common_base
+++ b/config/common_base
@@ -61,7 +61,20 @@ CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_LIBRTE_EAL=y
CONFIG_RTE_MAX_LCORE=128
CONFIG_RTE_MAX_NUMA_NODES=8
-CONFIG_RTE_MAX_MEMSEG=256
+CONFIG_RTE_MAX_MEMSEG_LISTS=64
+# each memseg list will be limited to either RTE_MAX_MEMSEG_PER_LIST pages
+# or RTE_MAX_MEM_MB_PER_LIST megabytes worth of memory, whichever is smaller
+CONFIG_RTE_MAX_MEMSEG_PER_LIST=8192
+CONFIG_RTE_MAX_MEM_MB_PER_LIST=32768
+# a "type" is a combination of page size and NUMA node. total number of memseg
+# lists per type will be limited to either RTE_MAX_MEMSEG_PER_TYPE pages (split
+# over multiple lists of RTE_MAX_MEMSEG_PER_LIST pages), or
+# RTE_MAX_MEM_MB_PER_TYPE megabytes of memory (split over multiple lists of
+# RTE_MAX_MEM_MB_PER_LIST), whichever is smaller
+CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768
+CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072
+# global maximum usable amount of VA, in megabytes
+CONFIG_RTE_MAX_MEM_MB=524288
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_ENABLE_ASSERT=n
@@ -74,8 +87,10 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MAX_VFIO_GROUPS=64
+CONFIG_RTE_MAX_VFIO_CONTAINERS=64
CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_USE_LIBBSD=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
@@ -124,6 +139,11 @@ CONFIG_RTE_ETHDEV_PROFILE_ITT_WASTED_RX_ITERATIONS=n
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
#
+# Compile the Intel FPGA bus
+#
+CONFIG_RTE_LIBRTE_IFPGA_BUS=y
+
+#
# Compile PCI bus driver
#
CONFIG_RTE_LIBRTE_PCI_BUS=y
@@ -144,6 +164,12 @@ CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
#
+# Compile AMD PMD
+#
+CONFIG_RTE_LIBRTE_AXGBE_PMD=y
+CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG=n
+
+#
# Compile burst-oriented Broadcom PMD driver
#
CONFIG_RTE_LIBRTE_BNX2X_PMD=n
@@ -172,6 +198,7 @@ CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA_PMD=n
+CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
#
# Compile NXP DPAA2 FSL-MC Bus
@@ -188,11 +215,7 @@ CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
# Compile burst-oriented NXP DPAA2 PMD driver
#
CONFIG_RTE_LIBRTE_DPAA2_PMD=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n
#
# Compile burst-oriented Amazon ENA PMD driver
@@ -241,8 +264,6 @@ CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
-# interval up to 8160 us, aligned to 2 (or default value)
-CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL=-1
#
# Compile burst-oriented FM10K PMD
@@ -270,15 +291,14 @@ CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n
-CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
#
-# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD
+# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
+# (MLX5) PMD
#
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
-CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8
#
# Compile burst-oriented Netronome NFP PMD driver
@@ -306,11 +326,6 @@ CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
#
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
-#
-# Defines firmware type address space.
-# See documentation for supported values.
-# Other values raise compile time error.
-CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS=0
#
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
@@ -382,7 +397,20 @@ CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y
#
# Compile Marvell PMD driver
#
-CONFIG_RTE_LIBRTE_MRVL_PMD=n
+CONFIG_RTE_LIBRTE_MVPP2_PMD=n
+
+#
+# Compile support for VMBus library
+#
+CONFIG_RTE_LIBRTE_VMBUS=n
+
+#
+# Compile native PMD for Hyper-V/Azure
+#
+CONFIG_RTE_LIBRTE_NETVSC_PMD=n
+CONFIG_RTE_LIBRTE_NETVSC_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_NETVSC_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_NETVSC_DEBUG_DUMP=n
#
# Compile virtual device driver for NetVSC on Hyper-V/Azure
@@ -409,7 +437,7 @@ CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16
#
# Compile SOFTNIC PMD
#
-CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y
+CONFIG_RTE_LIBRTE_PMD_SOFTNIC=n
#
# Compile the TAP PMD
@@ -427,6 +455,7 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
#
CONFIG_RTE_LIBRTE_BBDEV=y
CONFIG_RTE_BBDEV_MAX_DEVS=128
+CONFIG_RTE_BBDEV_OFFLOAD_COST=n
#
# Compile PMD for NULL bbdev device
@@ -442,7 +471,6 @@ CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile generic crypto device library
#
CONFIG_RTE_LIBRTE_CRYPTODEV=y
-CONFIG_RTE_LIBRTE_CRYPTODEV_DEBUG=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
#
@@ -455,49 +483,47 @@ CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
# Compile NXP DPAA2 crypto sec driver for CAAM HW
#
CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
#
# NXP DPAA caam - crypto driver
#
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
#
-# Compile PMD for QuickAssist based devices
+# Compile PMD for QuickAssist based devices - see docs for details
#
-CONFIG_RTE_LIBRTE_PMD_QAT=n
-CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER=n
+CONFIG_RTE_LIBRTE_PMD_QAT=y
+CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
#
-# Number of sessions to create in the session memory pool
-# on a single QuickAssist device.
+# Max. number of QuickAssist devices, which can be detected and attached
#
-CONFIG_RTE_QAT_PMD_MAX_NB_SESSIONS=2048
+CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
+CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
+
+#
+# Compile PMD for virtio crypto devices
+#
+CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=y
+#
+# Number of maximum virtio crypto devices
+#
+CONFIG_RTE_MAX_VIRTIO_CRYPTO=32
#
# Compile PMD for AESNI backed device
#
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
-CONFIG_RTE_LIBRTE_PMD_AESNI_MB_DEBUG=n
#
# Compile PMD for Software backed device
#
CONFIG_RTE_LIBRTE_PMD_OPENSSL=n
-CONFIG_RTE_LIBRTE_PMD_OPENSSL_DEBUG=n
#
# Compile PMD for AESNI GCM device
#
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
-CONFIG_RTE_LIBRTE_PMD_AESNI_GCM_DEBUG=n
#
# Compile PMD for SNOW 3G device
@@ -509,19 +535,15 @@ CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
# Compile PMD for KASUMI device
#
CONFIG_RTE_LIBRTE_PMD_KASUMI=n
-CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n
#
# Compile PMD for ZUC device
#
CONFIG_RTE_LIBRTE_PMD_ZUC=n
-CONFIG_RTE_LIBRTE_PMD_ZUC_DEBUG=n
-#
# Compile PMD for Crypto Scheduler device
#
CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=y
-CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
#
# Compile PMD for NULL Crypto device
@@ -529,10 +551,15 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
#
+# Compile PMD for AMD CCP crypto device
+#
+CONFIG_RTE_LIBRTE_PMD_CCP=n
+
+#
# Compile PMD for Marvell Crypto device
#
-CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
-CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
+CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
+CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO_DEBUG=n
#
# Compile generic security library
@@ -540,12 +567,41 @@ CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG=n
CONFIG_RTE_LIBRTE_SECURITY=y
#
+# Compile generic compression device library
+#
+CONFIG_RTE_LIBRTE_COMPRESSDEV=y
+CONFIG_RTE_COMPRESS_MAX_DEVS=64
+
+#
+# Compile compressdev unit test
+#
+CONFIG_RTE_COMPRESSDEV_TEST=n
+
+#
+# Compile PMD for Octeontx ZIPVF compression device
+#
+CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF=y
+
+#
+# Compile PMD for ISA-L compression device
+#
+CONFIG_RTE_LIBRTE_PMD_ISAL=n
+
+#
+# Compile PMD for ZLIB compression device
+#
+CONFIG_RTE_LIBRTE_PMD_ZLIB=n
+
+#
# Compile generic event device library
#
CONFIG_RTE_LIBRTE_EVENTDEV=y
CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
CONFIG_RTE_EVENT_MAX_DEVS=16
CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
+CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32
+CONFIG_RTE_EVENT_ETH_INTR_RING_SIZE=1024
+CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32
#
# Compile PMD for skeleton event device
@@ -587,6 +643,21 @@ CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=y
#
+# Compile PMD for NXP DPAA2 CMDIF raw device
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
+
+#
+# Compile PMD for NXP DPAA2 QDMA raw device
+#
+CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
+
+#
+# Compile PMD for Intel FPGA raw device
+#
+CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=y
+
+#
# Compile librte_ring
#
CONFIG_RTE_LIBRTE_RING=y
@@ -602,6 +673,8 @@ CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
#
# Compile Mempool drivers
#
+CONFIG_RTE_DRIVER_MEMPOOL_BUCKET=y
+CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB=64
CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
@@ -792,6 +865,20 @@ CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
CONFIG_RTE_LIBRTE_PMD_VHOST=n
#
+# Compile IFC driver
+# To compile, CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_EAL_VFIO
+# should be enabled.
+#
+CONFIG_RTE_LIBRTE_IFC_PMD=n
+
+#
+# Compile librte_bpf
+#
+CONFIG_RTE_LIBRTE_BPF=y
+# allow load BPF from ELF files (requires libelf)
+CONFIG_RTE_LIBRTE_BPF_ELF=n
+
+#
# Compile the test application
#
CONFIG_RTE_APP_TEST=y
diff --git a/config/common_linuxapp b/config/common_linuxapp
index ff98f235..9c5ea9d8 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -15,7 +15,9 @@ CONFIG_RTE_LIBRTE_PMD_KNI=y
CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_PMD_VHOST=y
+CONFIG_RTE_LIBRTE_IFC_PMD=y
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=y
+CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y
CONFIG_RTE_LIBRTE_PMD_TAP=y
CONFIG_RTE_LIBRTE_AVP_PMD=y
CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD=y
@@ -23,3 +25,22 @@ CONFIG_RTE_LIBRTE_NFP_PMD=y
CONFIG_RTE_LIBRTE_POWER=y
CONFIG_RTE_VIRTIO_USER=y
CONFIG_RTE_PROC_INFO=y
+
+CONFIG_RTE_LIBRTE_VMBUS=y
+CONFIG_RTE_LIBRTE_NETVSC_PMD=y
+
+# NXP DPAA BUS and drivers
+CONFIG_RTE_LIBRTE_DPAA_BUS=y
+CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=y
+CONFIG_RTE_LIBRTE_DPAA_PMD=y
+CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=y
+CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=y
+
+# NXP FSLMC BUS and DPAA2 drivers
+CONFIG_RTE_LIBRTE_FSLMC_BUS=y
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=y
+CONFIG_RTE_LIBRTE_DPAA2_PMD=y
+CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=y
+CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=y
+CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=y
+CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=y
diff --git a/config/defconfig_arm-armv7a-linuxapp-gcc b/config/defconfig_arm-armv7a-linuxapp-gcc
index a20b7a85..13be308d 100644
--- a/config/defconfig_arm-armv7a-linuxapp-gcc
+++ b/config/defconfig_arm-armv7a-linuxapp-gcc
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (C) 2015 RehiveTech. All right reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of RehiveTech nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2015 RehiveTech. All right reserved.
#include "common_linuxapp"
diff --git a/config/defconfig_arm64-dpaa-linuxapp-gcc b/config/defconfig_arm64-dpaa-linuxapp-gcc
index 52bfc792..c47aec0a 100644
--- a/config/defconfig_arm64-dpaa-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa-linuxapp-gcc
@@ -13,7 +13,7 @@ CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
#
# Compile Environment Abstraction Layer
#
-CONFIG_RTE_MAX_LCORE=4
+CONFIG_RTE_MAX_LCORE=16
CONFIG_RTE_MAX_NUMA_NODES=1
CONFIG_RTE_CACHE_LINE_SIZE=64
CONFIG_RTE_PKTMBUF_HEADROOM=128
@@ -21,10 +21,3 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_DEBUG_DRIVER=n
CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
-
-#
-# FSL DPAA caam - crypto driver
-#
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index afdbc347..96f478a0 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -9,9 +9,6 @@
CONFIG_RTE_MACHINE="dpaa2"
CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
-#
-# Compile Environment Abstraction Layer
-#
CONFIG_RTE_MAX_LCORE=16
CONFIG_RTE_MAX_NUMA_NODES=1
CONFIG_RTE_CACHE_LINE_SIZE=64
@@ -22,23 +19,4 @@ CONFIG_RTE_PKTMBUF_HEADROOM=128
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
CONFIG_RTE_LIBRTE_VHOST_NUMA=n
-#
-# Compile Support Libraries for DPAA2
-#
CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=n
-
-#
-# Compile burst-oriented NXP DPAA2 PMD driver
-#
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX=n
-CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE=n
-
-#
-# Compile NXP DPAA2 crypto sec driver for CAAM HW
-#
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT=n
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER=n
-CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX=n
diff --git a/config/defconfig_arm64-stingray-linuxapp-gcc b/config/defconfig_arm64-stingray-linuxapp-gcc
new file mode 100644
index 00000000..99925072
--- /dev/null
+++ b/config/defconfig_arm64-stingray-linuxapp-gcc
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) Broadcom 2017-2018. All rights reserved.
+#
+
+#include "defconfig_arm64-armv8a-linuxapp-gcc"
+
+# Broadcom - Stingray
+CONFIG_RTE_MACHINE="armv8a"
+CONFIG_RTE_ARCH_ARM_TUNE="cortex-a72"
+
+# Doesn't support NUMA
+CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_LIBRTE_VHOST_NUMA=n
+
+CONFIG_RTE_EAL_IGB_UIO=y
+CONFIG_RTE_KNI_KMOD=n
diff --git a/config/defconfig_i686-native-linuxapp-gcc b/config/defconfig_i686-native-linuxapp-gcc
index a42ba4f5..1178fe35 100644
--- a/config/defconfig_i686-native-linuxapp-gcc
+++ b/config/defconfig_i686-native-linuxapp-gcc
@@ -46,3 +46,6 @@ CONFIG_RTE_LIBRTE_PMD_ZUC=n
# AVP PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_AVP_PMD=n
+
+# 32-bit doesn't break up memory in lists, but does have VA allocation limit
+CONFIG_RTE_MAX_MEM_MB=2048
diff --git a/config/defconfig_i686-native-linuxapp-icc b/config/defconfig_i686-native-linuxapp-icc
index 144ba0ae..016c73f3 100644
--- a/config/defconfig_i686-native-linuxapp-icc
+++ b/config/defconfig_i686-native-linuxapp-icc
@@ -18,11 +18,6 @@ CONFIG_RTE_TOOLCHAIN_ICC=y
CONFIG_RTE_LIBRTE_KNI=n
#
-# Vectorized PMD is not supported on 32-bit
-#
-CONFIG_RTE_IXGBE_INC_VECTOR=n
-
-#
# Solarflare PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
@@ -51,3 +46,6 @@ CONFIG_RTE_LIBRTE_PMD_ZUC=n
# AVP PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_AVP_PMD=n
+
+# 32-bit doesn't break up memory in lists, but does have VA allocation limit
+CONFIG_RTE_MAX_MEM_MB=2048
diff --git a/config/defconfig_x86_x32-native-linuxapp-gcc b/config/defconfig_x86_x32-native-linuxapp-gcc
index b6206a5c..57d000dc 100644
--- a/config/defconfig_x86_x32-native-linuxapp-gcc
+++ b/config/defconfig_x86_x32-native-linuxapp-gcc
@@ -26,3 +26,6 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
# AVP PMD is not supported on 32-bit
#
CONFIG_RTE_LIBRTE_AVP_PMD=n
+
+# 32-bit doesn't break up memory in lists, but does have VA allocation limit
+CONFIG_RTE_MAX_MEM_MB=2048
diff --git a/config/meson.build b/config/meson.build
index f8c67578..4d755323 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -11,6 +11,10 @@ dpdk_conf.set('RTE_MACHINE', machine)
machine_args = []
machine_args += '-march=' + machine
+toolchain = cc.get_id()
+dpdk_conf.set_quoted('RTE_TOOLCHAIN', toolchain)
+dpdk_conf.set('RTE_TOOLCHAIN_' + toolchain.to_upper(), 1)
+
# use pthreads
add_project_link_arguments('-pthread', language: 'c')
dpdk_extra_ldflags += '-pthread'
@@ -38,6 +42,14 @@ if numa_dep.found() and cc.has_header('numaif.h')
dpdk_extra_ldflags += '-lnuma'
endif
+# check for strlcpy
+if host_machine.system() == 'linux' and cc.find_library('bsd',
+ required: false).found() and cc.has_header('bsd/string.h')
+ dpdk_conf.set('RTE_USE_LIBBSD', 1)
+ add_project_link_arguments('-lbsd', language: 'c')
+ dpdk_extra_ldflags += '-lbsd'
+endif
+
# add -include rte_config to cflags
add_project_arguments('-include', 'rte_config.h', language: 'c')
@@ -45,9 +57,12 @@ add_project_arguments('-include', 'rte_config.h', language: 'c')
warning_flags = [
'-Wsign-compare',
'-Wcast-qual',
- '-Wno-address-of-packed-member',
- '-Wno-format-truncation'
+ '-Wno-address-of-packed-member'
]
+if cc.sizeof('void *') == 4
+# for 32-bit, don't warn about casting a 32-bit pointer to 64-bit int - it's fine!!
+ warning_flags += '-Wno-pointer-to-int-cast'
+endif
foreach arg: warning_flags
if cc.has_argument(arg)
add_project_arguments(arg, language: 'c')
@@ -61,6 +76,8 @@ dpdk_conf.set('RTE_LIBEAL_USE_HPET', get_option('use_hpet'))
dpdk_conf.set('RTE_EAL_ALLOW_INV_SOCKET_ID', get_option('allow_invalid_socket_id'))
# values which have defaults which may be overridden
dpdk_conf.set('RTE_MAX_VFIO_GROUPS', 64)
+dpdk_conf.set('RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB', 64)
+dpdk_conf.set('RTE_LIBRTE_DPAA2_USE_PHYS_IOVA', true)
compile_time_cpuflags = []
if host_machine.cpu_family().startswith('x86')
diff --git a/config/rte_config.h b/config/rte_config.h
index 699878ad..a8e47977 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -21,13 +21,18 @@
/****** library defines ********/
/* EAL defines */
-#define RTE_MAX_MEMSEG 512
+#define RTE_MAX_MEMSEG_LISTS 128
+#define RTE_MAX_MEMSEG_PER_LIST 8192
+#define RTE_MAX_MEM_MB_PER_LIST 32768
+#define RTE_MAX_MEMSEG_PER_TYPE 32768
+#define RTE_MAX_MEM_MB_PER_TYPE 65536
+#define RTE_MAX_MEM_MB 524288
#define RTE_MAX_MEMZONE 2560
#define RTE_MAX_TAILQ 32
-#define RTE_LOG_LEVEL RTE_LOG_INFO
#define RTE_LOG_DP_LEVEL RTE_LOG_INFO
#define RTE_BACKTRACE 1
#define RTE_EAL_VFIO 1
+#define RTE_MAX_VFIO_CONTAINERS 64
/* bsd module defines */
#define RTE_CONTIGMEM_MAX_NUM_BUFS 64
@@ -52,9 +57,18 @@
#define RTE_CRYPTO_MAX_DEVS 64
#define RTE_CRYPTODEV_NAME_LEN 64
+/* compressdev defines */
+#define RTE_COMPRESS_MAX_DEVS 64
+
/* eventdev defines */
#define RTE_EVENT_MAX_DEVS 16
#define RTE_EVENT_MAX_QUEUES_PER_DEV 64
+#define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
+#define RTE_EVENT_ETH_INTR_RING_SIZE 1024
+#define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32
+
+/* rawdev defines */
+#define RTE_RAWDEV_MAX_DEVS 10
/* ip_fragmentation defines */
#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4
@@ -72,11 +86,16 @@
/****** driver defines ********/
-/*
- * Number of sessions to create in the session memory pool
- * on a single QuickAssist device.
- */
-#define RTE_QAT_PMD_MAX_NB_SESSIONS 2048
+/* QuickAssist device */
+/* Max. number of QuickAssist devices which can be attached */
+#define RTE_PMD_QAT_MAX_PCI_DEVICES 48
+#define RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS 16
+
+/* virtio crypto defines */
+#define RTE_MAX_VIRTIO_CRYPTO 32
+
+/* DPAA SEC max cryptodev devices*/
+#define RTE_LIBRTE_DPAA_MAX_CRYPTODEV 4
/* fm10k defines */
#define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1
diff --git a/devtools/check-dup-includes.sh b/devtools/check-dup-includes.sh
index 10365e4a..e4c2748c 100755
--- a/devtools/check-dup-includes.sh
+++ b/devtools/check-dup-includes.sh
@@ -1,25 +1,6 @@
#! /bin/sh -e
-
-# Copyright 2017 Mellanox Technologies, Ltd. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 Mellanox Technologies, Ltd
# Check C files in git repository for duplicated includes.
# Usage: devtools/check-dup-includes.sh [directory]
diff --git a/devtools/check-git-log.sh b/devtools/check-git-log.sh
index c601f6ae..97dae4be 100755
--- a/devtools/check-git-log.sh
+++ b/devtools/check-git-log.sh
@@ -1,34 +1,6 @@
#! /bin/sh
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Check commit logs (headlines and references)
#
@@ -106,8 +78,8 @@ bad=$(echo "$headlines" | grep --color=always \
# check headline lowercase for first words
bad=$(echo "$headlines" | grep --color=always \
- -e '^.*[A-Z].*:' \
- -e ': *[A-Z]' \
+ -e '^.*[[:upper:]].*:' \
+ -e ': *[[:upper:]]' \
| sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong headline uppercase:\n$bad\n"
@@ -123,12 +95,14 @@ bad=$(echo "$headlines" | grep -E --color=always \
-e ':.*\<armv8\>' \
-e ':.*\<crc\>' \
-e ':.*\<dma\>' \
+ -e ':.*\<eeprom\>' \
-e ':.*\<freebsd\>' \
-e ':.*\<iova\>' \
-e ':.*\<linux\>' \
-e ':.*\<lro\>' \
-e ':.*\<lsc\>' \
-e ':.*\<mac\>' \
+ -e ':.*\<mss\>' \
-e ':.*\<mtu\>' \
-e ':.*\<nic\>' \
-e ':.*\<nvm\>' \
@@ -138,7 +112,9 @@ bad=$(echo "$headlines" | grep -E --color=always \
-e ':.*\<rss\>' \
-e ':.*\<sctp\>' \
-e ':.*\<tso\>' \
+ -e ':.*\<udp\>' \
-e ':.*\<[Vv]lan\>' \
+ -e ':.*\<vdpa\>' \
-e ':.*\<vsi\>' \
| sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong headline lowercase:\n$bad\n"
diff --git a/devtools/check-includes.sh b/devtools/check-includes.sh
index 685a3e77..9057633e 100755
--- a/devtools/check-includes.sh
+++ b/devtools/check-includes.sh
@@ -1,34 +1,6 @@
#!/bin/sh -e
-#
-# BSD LICENSE
-#
-# Copyright 2016 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2016 6WIND S.A.
# This script checks that header files in a given directory do not miss
# dependencies when included on their own, do not conflict and accept being
diff --git a/devtools/check-maintainers.sh b/devtools/check-maintainers.sh
index ac5326b8..85a300f0 100755
--- a/devtools/check-maintainers.sh
+++ b/devtools/check-maintainers.sh
@@ -1,34 +1,6 @@
#! /bin/sh
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Do some basic checks in MAINTAINERS file
diff --git a/devtools/check-symbol-change.sh b/devtools/check-symbol-change.sh
new file mode 100755
index 00000000..daaf45e1
--- /dev/null
+++ b/devtools/check-symbol-change.sh
@@ -0,0 +1,159 @@
+#!/bin/sh
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Neil Horman <nhorman@tuxdriver.com>
+
+build_map_changes()
+{
+ local fname="$1"
+ local mapdb="$2"
+
+ cat "$fname" | awk '
+ # Initialize our variables
+ BEGIN {map="";sym="";ar="";sec=""; in_sec=0; in_map=0}
+
+ # Anything that starts with + or -, followed by an a
+ # and ends in the string .map is the name of our map file
+ # This may appear multiple times in a patch if multiple
+ # map files are altered, and all section/symbol names
+ # appearing between a triggering of this rule and the
+ # next trigger of this rule are associated with this file
+ /[-+] a\/.*\.map/ {map=$2; in_map=1}
+
+ # Same pattern as above, only it matches on anything that
+ # does not end in 'map', indicating we have left the map chunk.
+ # When we hit this, turn off the in_map variable, which
+ # supresses the subordonate rules below
+ /[-+] a\/.*\.^(map)/ {in_map=0}
+
+ # Triggering this rule, which starts a line with a + and ends it
+ # with a { identifies a versioned section. The section name is
+ # the rest of the line with the + and { symbols remvoed.
+ # Triggering this rule sets in_sec to 1, which actives the
+ # symbol rule below
+ /+.*{/ {gsub("+","");
+ if (in_map == 1) {
+ sec=$1; in_sec=1;
+ }
+ }
+
+ # This rule idenfies the end of a section, and disables the
+ # symbol rule
+ /.*}/ {in_sec=0}
+
+ # This rule matches on a + followed by any characters except a :
+ # (which denotes a global vs local segment), and ends with a ;.
+ # The semicolon is removed and the symbol is printed with its
+ # association file name and version section, along with an
+ # indicator that the symbol is a new addition. Note this rule
+ # only works if we have found a version section in the rule
+ # above (hence the in_sec check) And found a map file (the
+ # in_map check). If we are not in a map chunk, do nothing. If
+ # we are in a map chunk but not a section chunk, record it as
+ # unknown.
+ /^+[^}].*[^:*];/ {gsub(";","");sym=$2;
+ if (in_map == 1) {
+ if (in_sec == 1) {
+ print map " " sym " " sec " add"
+ } else {
+ print map " " sym " unknown add"
+ }
+ }
+ }
+
+ # This is the same rule as above, but the rule matches on a
+ # leading - rather than a +, denoting that the symbol is being
+ # removed.
+ /^-[^}].*[^:*];/ {gsub(";","");sym=$2;
+ if (in_map == 1) {
+ if (in_sec == 1) {
+ print map " " sym " " sec " del"
+ } else {
+ print map " " sym " unknown del"
+ }
+ }
+ }' > "$mapdb"
+
+ sort -u "$mapdb" > "$mapdb.2"
+ mv -f "$mapdb.2" "$mapdb"
+
+}
+
+check_for_rule_violations()
+{
+ local mapdb="$1"
+ local mname
+ local symname
+ local secname
+ local ar
+ local ret=0
+
+ while read mname symname secname ar
+ do
+ if [ "$ar" = "add" ]
+ then
+
+ if [ "$secname" = "unknown" ]
+ then
+ # Just inform the user of this occurrence, but
+ # don't flag it as an error
+ echo -n "INFO: symbol $syname is added but "
+ echo -n "patch has insuficient context "
+ echo -n "to determine the section name "
+ echo -n "please ensure the version is "
+ echo "EXPERIMENTAL"
+ continue
+ fi
+
+ if [ "$secname" != "EXPERIMENTAL" ]
+ then
+ # Symbols that are getting added in a section
+ # other than the experimental section
+ # to be moving from an already supported
+ # section or its a violation
+ grep -q \
+ "$mname $symname [^EXPERIMENTAL] del" "$mapdb"
+ if [ $? -ne 0 ]
+ then
+ echo -n "ERROR: symbol $symname "
+ echo -n "is added in a section "
+ echo -n "other than the EXPERIMENTAL "
+ echo "section of the version map"
+ ret=1
+ fi
+ fi
+ else
+
+ if [ "$secname" != "EXPERIMENTAL" ]
+ then
+ # Just inform users that non-experimenal
+ # symbols need to go through a deprecation
+ # process
+ echo -n "INFO: symbol $symname is being "
+ echo -n "removed, ensure that it has "
+ echo "gone through the deprecation process"
+ fi
+ fi
+ done < "$mapdb"
+
+ return $ret
+}
+
+trap clean_and_exit_on_sig EXIT
+
+mapfile=`mktemp mapdb.XXXXXX`
+patch=$1
+exit_code=1
+
+clean_and_exit_on_sig()
+{
+ rm -f "$mapfile"
+ exit $exit_code
+}
+
+build_map_changes "$patch" "$mapfile"
+check_for_rule_violations "$mapfile"
+exit_code=$?
+
+rm -f "$mapfile"
+
+exit $exit_code
diff --git a/devtools/check-symbol-maps.sh b/devtools/check-symbol-maps.sh
new file mode 100755
index 00000000..e137d48a
--- /dev/null
+++ b/devtools/check-symbol-maps.sh
@@ -0,0 +1,30 @@
+#! /bin/sh -e
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Mellanox Technologies, Ltd
+
+cd $(dirname $0)/..
+
+# speed up by ignoring Unicode details
+export LC_ALL=C
+
+find_orphan_symbols ()
+{
+ for map in $(find lib drivers -name '*.map') ; do
+ for sym in $(sed -rn 's,^([^}]*_.*);,\1,p' $map) ; do
+ if echo $sym | grep -q '^per_lcore_' ; then
+ continue
+ fi
+ if ! grep -q -r --exclude=$(basename $map) \
+ -w $sym $(dirname $map) ; then
+ echo "$map: $sym"
+ fi
+ done
+ done
+}
+
+orphan_symbols=$(find_orphan_symbols)
+if [ -n "$orphan_symbols" ] ; then
+ echo "Found only in symbol map file:"
+ echo "$orphan_symbols" | sed 's,^,\t,'
+ exit 1
+fi
diff --git a/devtools/checkpatches.sh b/devtools/checkpatches.sh
index 7676a6b5..ba795ad1 100755
--- a/devtools/checkpatches.sh
+++ b/devtools/checkpatches.sh
@@ -1,40 +1,14 @@
#! /bin/sh
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Load config options:
# - DPDK_CHECKPATCH_PATH
# - DPDK_CHECKPATCH_LINE_LENGTH
. $(dirname $(readlink -e $0))/load-devel-config
+VALIDATE_NEW_API=$(dirname $(readlink -e $0))/check-symbol-change.sh
+
length=${DPDK_CHECKPATCH_LINE_LENGTH:-80}
# override default Linux options
@@ -42,13 +16,21 @@ options="--no-tree"
options="$options --max-line-length=$length"
options="$options --show-types"
options="$options --ignore=LINUX_VERSION_CODE,\
-FILE_PATH_CHANGES,MAINTAINERS_STYLE,\
+FILE_PATH_CHANGES,MAINTAINERS_STYLE,SPDX_LICENSE_TAG,\
VOLATILE,PREFER_PACKED,PREFER_ALIGNED,PREFER_PRINTF,\
PREFER_KERNEL_TYPES,BIT_MACRO,CONST_STRUCT,\
SPLIT_STRING,LONG_LINE_STRING,\
LINE_SPACING,PARENTHESIS_ALIGNMENT,NETWORKING_BLOCK_COMMENT_STYLE,\
NEW_TYPEDEFS,COMPARISON_TO_NULL"
+clean_tmp_files() {
+ if echo $tmpinput | grep -q '^checkpatches\.' ; then
+ rm -f "$tmpinput"
+ fi
+}
+
+trap "clean_tmp_files" INT
+
print_usage () {
cat <<- END_OF_HELP
usage: $(basename $0) [-q] [-v] [-nX|patch1 [patch2] ...]]
@@ -61,6 +43,88 @@ print_usage () {
END_OF_HELP
}
+check_forbidden_additions() {
+ # This awk script receives a list of expressions to monitor
+ # and a list of folders to search these expressions in
+ # - No search is done inside comments
+ # - Both additions and removals of the expressions are checked
+ # A positive balance of additions fails the check
+ read -d '' awk_script << 'EOF'
+ BEGIN {
+ split(FOLDERS,deny_folders," ");
+ split(EXPRESSIONS,deny_expr," ");
+ in_file=0;
+ in_comment=0;
+ count=0;
+ comment_start="/*"
+ comment_end="*/"
+ }
+ # search for add/remove instances in current file
+ # state machine assumes the comments structure is enforced by
+ # checkpatches.pl
+ (in_file) {
+ # comment start
+ if (index($0,comment_start) > 0) {
+ in_comment = 1
+ }
+ # non comment code
+ if (in_comment == 0) {
+ for (i in deny_expr) {
+ forbidden_added = "^\+.*" deny_expr[i];
+ forbidden_removed="^-.*" deny_expr[i];
+ current = expressions[deny_expr[i]]
+ if ($0 ~ forbidden_added) {
+ count = count + 1;
+ expressions[deny_expr[i]] = current + 1
+ }
+ if ($0 ~ forbidden_removed) {
+ count = count - 1;
+ expressions[deny_expr[i]] = current - 1
+ }
+ }
+ }
+ # comment end
+ if (index($0,comment_end) > 0) {
+ in_comment = 0
+ }
+ }
+ # switch to next file , check if the balance of add/remove
+ # of previous filehad new additions
+ ($0 ~ "^\+\+\+ b/") {
+ in_file = 0;
+ if (count > 0) {
+ exit;
+ }
+ for (i in deny_folders) {
+ re = "^\+\+\+ b/" deny_folders[i];
+ if ($0 ~ deny_folders[i]) {
+ in_file = 1
+ last_file = $0
+ }
+ }
+ }
+ END {
+ if (count > 0) {
+ print "Warning in " substr(last_file,6) ":"
+ print "are you sure you want to add the following:"
+ for (key in expressions) {
+ if (expressions[key] > 0) {
+ print key
+ }
+ }
+ exit RET_ON_FAIL
+ }
+ }
+EOF
+ # ---------------------------------
+ # refrain from new additions of rte_panic() and rte_exit()
+ # multiple folders and expressions are separated by spaces
+ awk -v FOLDERS="lib drivers" \
+ -v EXPRESSIONS="rte_panic\\\( rte_exit\\\(" \
+ -v RET_ON_FAIL=1 \
+ "$awk_script" -
+}
+
number=0
quiet=false
verbose=false
@@ -75,7 +139,7 @@ while getopts hn:qv ARG ; do
done
shift $(($OPTIND - 1))
-if [ ! -x "$DPDK_CHECKPATCH_PATH" ] ; then
+if [ ! -f "$DPDK_CHECKPATCH_PATH" ] || [ ! -x "$DPDK_CHECKPATCH_PATH" ] ; then
print_usage >&2
echo
echo 'Cannot execute DPDK_CHECKPATCH_PATH' >&2
@@ -86,19 +150,45 @@ total=0
status=0
check () { # <patch> <commit> <title>
+ local ret=0
+
total=$(($total + 1))
! $verbose || printf '\n### %s\n\n' "$3"
if [ -n "$1" ] ; then
- report=$($DPDK_CHECKPATCH_PATH $options "$1" 2>/dev/null)
+ tmpinput=$1
elif [ -n "$2" ] ; then
- report=$(git format-patch --find-renames --no-stat --stdout -1 $commit |
- $DPDK_CHECKPATCH_PATH $options - 2>/dev/null)
+ tmpinput=$(mktemp checkpatches.XXXXXX)
+ git format-patch --find-renames \
+ --no-stat --stdout -1 $commit > "$tmpinput"
else
- report=$($DPDK_CHECKPATCH_PATH $options - 2>/dev/null)
+ tmpinput=$(mktemp checkpatches.XXXXXX)
+ cat > "$tmpinput"
fi
- [ $? -ne 0 ] || return 0
- $verbose || printf '\n### %s\n\n' "$3"
- printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p'
+
+ report=$($DPDK_CHECKPATCH_PATH $options "$tmpinput" 2>/dev/null)
+ if [ $? -ne 0 ] ; then
+ $verbose || printf '\n### %s\n\n' "$3"
+ printf '%s\n' "$report" | sed -n '1,/^total:.*lines checked$/p'
+ ret=1
+ fi
+
+ ! $verbose || printf '\nChecking API additions/removals:\n'
+ report=$($VALIDATE_NEW_API "$tmpinput")
+ if [ $? -ne 0 ] ; then
+ printf '%s\n' "$report"
+ ret=1
+ fi
+
+ ! $verbose || printf '\nChecking forbidden tokens additions:\n'
+ report=$(check_forbidden_additions <"$tmpinput")
+ if [ $? -ne 0 ] ; then
+ printf '%s\n' "$report"
+ ret=1
+ fi
+
+ clean_tmp_files
+ [ $ret -eq 0 ] && return 0
+
status=$(($status + 1))
}
diff --git a/devtools/cocci/strlcpy.cocci b/devtools/cocci/strlcpy.cocci
new file mode 100644
index 00000000..335e2712
--- /dev/null
+++ b/devtools/cocci/strlcpy.cocci
@@ -0,0 +1,8 @@
+@use_strlcpy@
+identifier src, dst;
+expression size;
+@@
+(
+- snprintf(dst, size, "%s", src)
++ strlcpy(dst, src, size)
+)
diff --git a/devtools/get-maintainer.sh b/devtools/get-maintainer.sh
index 904b7785..b9160486 100755
--- a/devtools/get-maintainer.sh
+++ b/devtools/get-maintainer.sh
@@ -23,7 +23,8 @@ print_usage () {
}
# Requires DPDK_GETMAINTAINER_PATH devel config option set
-if [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then
+if [ ! -f "$DPDK_GETMAINTAINER_PATH" ] ||
+ [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then
print_usage >&2
echo
echo 'Cannot execute DPDK_GETMAINTAINER_PATH' >&2
@@ -31,7 +32,7 @@ if [ ! -x "$DPDK_GETMAINTAINER_PATH" ] ; then
fi
FILES="COPYING CREDITS Kbuild"
-FOLDERS="Documentation arch include fs init ipc kernel scripts"
+FOLDERS="Documentation arch include fs init ipc scripts"
# Kernel script checks for some files and folders to run
workaround () {
diff --git a/devtools/git-log-fixes.sh b/devtools/git-log-fixes.sh
index cd5cf893..e37ee226 100755
--- a/devtools/git-log-fixes.sh
+++ b/devtools/git-log-fixes.sh
@@ -1,34 +1,6 @@
#! /bin/sh -e
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2016 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
print_usage ()
{
diff --git a/devtools/test-build.sh b/devtools/test-build.sh
index 3362edcc..1eee2417 100755
--- a/devtools/test-build.sh
+++ b/devtools/test-build.sh
@@ -1,48 +1,21 @@
#! /bin/sh -e
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_path=$PATH
# Load config options:
-# - AESNI_MULTI_BUFFER_LIB_PATH
# - ARMV8_CRYPTO_LIB_PATH
# - DPDK_BUILD_TEST_CONFIGS (defconfig1+option1+option2 defconfig2)
# - DPDK_DEP_ARCHIVE
# - DPDK_DEP_CFLAGS
+# - DPDK_DEP_ISAL (y/[n])
# - DPDK_DEP_LDFLAGS
# - DPDK_DEP_MLX (y/[n])
# - DPDK_DEP_NUMA ([y]/n)
# - DPDK_DEP_PCAP (y/[n])
# - DPDK_DEP_SSL (y/[n])
+# - DPDK_DEP_IPSEC_MB (y/[n])
# - DPDK_DEP_SZE (y/[n])
# - DPDK_DEP_ZLIB (y/[n])
# - DPDK_MAKE_JOBS (int)
@@ -122,14 +95,15 @@ reset_env ()
unset CROSS
unset DPDK_DEP_ARCHIVE
unset DPDK_DEP_CFLAGS
+ unset DPDK_DEP_ISAL
unset DPDK_DEP_LDFLAGS
unset DPDK_DEP_MLX
unset DPDK_DEP_NUMA
unset DPDK_DEP_PCAP
unset DPDK_DEP_SSL
+ unset DPDK_DEP_IPSEC_MB
unset DPDK_DEP_SZE
unset DPDK_DEP_ZLIB
- unset AESNI_MULTI_BUFFER_LIB_PATH
unset ARMV8_CRYPTO_LIB_PATH
unset FLEXRAN_SDK
unset LIBMUSDK_PATH
@@ -171,20 +145,23 @@ config () # <directory> <target> <options>
sed -ri 's,(BYPASS=)n,\1y,' $1/.config
test "$DPDK_DEP_ARCHIVE" != y || \
sed -ri 's,(RESOURCE_TAR=)n,\1y,' $1/.config
+ test "$DPDK_DEP_ISAL" != y || \
+ sed -ri 's,(ISAL_PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_MLX" != y || \
sed -ri 's,(MLX._PMD=)n,\1y,' $1/.config
test "$DPDK_DEP_SZE" != y || \
sed -ri 's,(PMD_SZEDATA2=)n,\1y,' $1/.config
test "$DPDK_DEP_ZLIB" != y || \
sed -ri 's,(BNX2X_PMD=)n,\1y,' $1/.config
- sed -ri 's,(NFP_PMD=)n,\1y,' $1/.config
+ test "$DPDK_DEP_ZLIB" != y || \
+ sed -ri 's,(COMPRESSDEV_TEST=)n,\1y,' $1/.config
test "$DPDK_DEP_PCAP" != y || \
sed -ri 's,(PCAP=)n,\1y,' $1/.config
test -z "$ARMV8_CRYPTO_LIB_PATH" || \
sed -ri 's,(PMD_ARMV8_CRYPTO=)n,\1y,' $1/.config
- test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
+ test "$DPDK_DEP_IPSEC_MB" != y || \
sed -ri 's,(PMD_AESNI_MB=)n,\1y,' $1/.config
- test -z "$AESNI_MULTI_BUFFER_LIB_PATH" || \
+ test "$DPDK_DEP_IPSEC_MB" != y || \
sed -ri 's,(PMD_AESNI_GCM=)n,\1y,' $1/.config
test -z "$LIBSSO_SNOW3G_PATH" || \
sed -ri 's,(PMD_SNOW3G=)n,\1y,' $1/.config
@@ -193,6 +170,8 @@ config () # <directory> <target> <options>
test -z "$LIBSSO_ZUC_PATH" || \
sed -ri 's,(PMD_ZUC=)n,\1y,' $1/.config
test "$DPDK_DEP_SSL" != y || \
+ sed -ri 's,(PMD_CCP=)n,\1y,' $1/.config
+ test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_OPENSSL=)n,\1y,' $1/.config
test "$DPDK_DEP_SSL" != y || \
sed -ri 's,(PMD_QAT=)n,\1y,' $1/.config
@@ -200,9 +179,9 @@ config () # <directory> <target> <options>
sed -ri 's,(BBDEV_TURBO_SW=)n,\1y,' $1/.config
sed -ri 's,(SCHED_.*=)n,\1y,' $1/.config
test -z "$LIBMUSDK_PATH" || \
- sed -ri 's,(PMD_MRVL_CRYPTO=)n,\1y,' $1/.config
+ sed -ri 's,(PMD_MVSAM_CRYPTO=)n,\1y,' $1/.config
test -z "$LIBMUSDK_PATH" || \
- sed -ri 's,(MRVL_PMD=)n,\1y,' $1/.config
+ sed -ri 's,(MVPP2_PMD=)n,\1y,' $1/.config
build_config_hook $1 $2 $3
# Explicit enabler/disabler (uppercase)
diff --git a/devtools/test-meson-builds.sh b/devtools/test-meson-builds.sh
new file mode 100755
index 00000000..951c9067
--- /dev/null
+++ b/devtools/test-meson-builds.sh
@@ -0,0 +1,60 @@
+#! /bin/sh -e
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# Run meson to auto-configure the various builds.
+# * all builds get put in a directory whose name starts with "build-"
+# * if a build-directory already exists we assume it was properly configured
+# Run ninja after configuration is done.
+
+srcdir=$(dirname $(readlink -m $0))/..
+MESON=${MESON:-meson}
+
+if command -v ninja >/dev/null 2>&1 ; then
+ ninja_cmd=ninja
+elif command -v ninja-build >/dev/null 2>&1 ; then
+ ninja_cmd=ninja-build
+else
+ echo "ERROR: ninja is not found" >&2
+ exit 1
+fi
+
+build () # <directory> <meson options>
+{
+ builddir=$1
+ shift
+ if [ ! -d "$builddir" ] ; then
+ options="--werror -Dexamples=all $*"
+ echo "$MESON $options $srcdir $builddir"
+ $MESON $options $srcdir $builddir
+ unset CC
+ fi
+ echo "$ninja_cmd -C $builddir"
+ $ninja_cmd -C $builddir
+}
+
+# shared and static linked builds with gcc and clang
+for c in gcc clang ; do
+ for s in static shared ; do
+ export CC="ccache $c"
+ build build-$c-$s --default-library=$s
+ done
+done
+
+# test compilation with minimal x86 instruction set
+build build-x86-default -Dmachine=nehalem
+
+# enable cross compilation if gcc cross-compiler is found
+c=aarch64-linux-gnu-gcc
+if command -v $c >/dev/null 2>&1 ; then
+ # compile the general v8a also for clang to increase coverage
+ export CC="ccache clang"
+ build build-arm64-host-clang --cross-file \
+ config/arm/arm64_armv8_linuxapp_gcc
+
+ for f in config/arm/arm*gcc ; do
+ export CC="ccache gcc"
+ build build-$(basename $f | tr '_' '-' | cut -d'-' -f-2) \
+ --cross-file $f
+ done
+fi
diff --git a/devtools/test-null.sh b/devtools/test-null.sh
index 30cd0b03..61879e3e 100755
--- a/devtools/test-null.sh
+++ b/devtools/test-null.sh
@@ -1,34 +1,6 @@
#! /bin/sh -e
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Run a quick testpmd forwarding with null PMD without hugepage
diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index d77f205b..9265907f 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -2,35 +2,8 @@ API {#index}
===
<!--
- BSD LICENSE
-
- Copyright 2013-2017 6WIND S.A.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2013-2017 6WIND S.A.
-->
The public API headers are grouped by topics:
@@ -45,14 +18,19 @@ The public API headers are grouped by topics:
[bbdev] (@ref rte_bbdev.h),
[cryptodev] (@ref rte_cryptodev.h),
[security] (@ref rte_security.h),
+ [compressdev] (@ref rte_compressdev.h),
+ [compress] (@ref rte_comp.h),
[eventdev] (@ref rte_eventdev.h),
[event_eth_rx_adapter] (@ref rte_event_eth_rx_adapter.h),
+ [event_timer_adapter] (@ref rte_event_timer_adapter.h),
+ [event_crypto_adapter] (@ref rte_event_crypto_adapter.h),
[rawdev] (@ref rte_rawdev.h),
[metrics] (@ref rte_metrics.h),
[bitrate] (@ref rte_bitrate.h),
[latency] (@ref rte_latencystats.h),
[devargs] (@ref rte_devargs.h),
- [PCI] (@ref rte_pci.h)
+ [PCI] (@ref rte_pci.h),
+ [vfio] (@ref rte_vfio.h)
- **device specific**:
[softnic] (@ref rte_eth_softnic.h),
@@ -63,6 +41,9 @@ The public API headers are grouped by topics:
[i40e] (@ref rte_pmd_i40e.h),
[bnxt] (@ref rte_pmd_bnxt.h),
[dpaa] (@ref rte_pmd_dpaa.h),
+ [dpaa2_mempool] (@ref rte_dpaa2_mempool.h),
+ [dpaa2_cmdif] (@ref rte_pmd_dpaa2_cmdif.h),
+ [dpaa2_qdma] (@ref rte_pmd_dpaa2_qdma.h),
[crypto_scheduler] (@ref rte_cryptodev_scheduler.h)
- **memory**:
@@ -133,7 +114,8 @@ The public API headers are grouped by topics:
[EFD] (@ref rte_efd.h),
[ACL] (@ref rte_acl.h),
[member] (@ref rte_member.h),
- [flow classify] (@ref rte_flow_classify.h)
+ [flow classify] (@ref rte_flow_classify.h),
+ [BPF] (@ref rte_bpf.h)
- **containers**:
[mbuf] (@ref rte_mbuf.h),
@@ -159,6 +141,8 @@ The public API headers are grouped by topics:
[array] (@ref rte_table_array.h),
[stub] (@ref rte_table_stub.h)
* [pipeline] (@ref rte_pipeline.h)
+ [port_in_action] (@ref rte_port_in_action.h)
+ [table_action] (@ref rte_table_action.h)
- **basic**:
[approx fraction] (@ref rte_approx.h),
diff --git a/doc/api/doxy-api.conf b/doc/api/doxy-api.conf
index cda52fdf..66693c38 100644
--- a/doc/api/doxy-api.conf
+++ b/doc/api/doxy-api.conf
@@ -1,54 +1,32 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2013-2017 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PROJECT_NAME = DPDK
INPUT = doc/api/doxy-api-index.md \
drivers/crypto/scheduler \
+ drivers/mempool/dpaa2 \
drivers/net/bnxt \
drivers/net/bonding \
drivers/net/dpaa \
drivers/net/i40e \
drivers/net/ixgbe \
drivers/net/softnic \
+ drivers/raw/dpaa2_cmdif \
+ drivers/raw/dpaa2_qdma \
lib/librte_eal/common/include \
lib/librte_eal/common/include/generic \
lib/librte_acl \
lib/librte_bbdev \
lib/librte_bitratestats \
+ lib/librte_bpf \
lib/librte_cfgfile \
lib/librte_cmdline \
lib/librte_compat \
+ lib/librte_compressdev \
lib/librte_cryptodev \
lib/librte_distributor \
lib/librte_efd \
- lib/librte_ether \
+ lib/librte_ethdev \
lib/librte_eventdev \
lib/librte_flow_classify \
lib/librte_gro \
@@ -82,6 +60,7 @@ INPUT = doc/api/doxy-api-index.md \
FILE_PATTERNS = rte_*.h \
cmdline.h
PREDEFINED = __DOXYGEN__ \
+ VFIO_PRESENT \
__attribute__(x)=
OPTIMIZE_OUTPUT_FOR_C = YES
diff --git a/doc/api/doxy-html-custom.sh b/doc/api/doxy-html-custom.sh
index e684a75a..3802007c 100755
--- a/doc/api/doxy-html-custom.sh
+++ b/doc/api/doxy-html-custom.sh
@@ -1,34 +1,6 @@
#! /bin/sh -e
-
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2013 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CSS=$1
diff --git a/doc/guides/bbdevs/null.rst b/doc/guides/bbdevs/null.rst
index 9baf2a99..0b885d17 100644
--- a/doc/guides/bbdevs/null.rst
+++ b/doc/guides/bbdevs/null.rst
@@ -4,7 +4,7 @@
BBDEV null Poll Mode Driver
============================
-The (**bbdev_null**) is a bbdev poll mode driver which provides a minimal
+The (**baseband_null**) is a bbdev poll mode driver which provides a minimal
implementation of a software bbdev device. As a null device it does not modify
the data in the mbuf on which the bbdev operation is to operate and it only
works for operation type ``RTE_BBDEV_OP_NONE``.
@@ -30,9 +30,9 @@ Initialization
To use the PMD in an application, user must:
-- Call ``rte_vdev_init("bbdev_null")`` within the application.
+- Call ``rte_vdev_init("baseband_null")`` within the application.
-- Use ``--vdev="bbdev_null"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
+- Use ``--vdev="baseband_null"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -46,4 +46,4 @@ Example:
.. code-block:: console
- ./test-bbdev.py -e="--vdev=bbdev_null,socket_id=0,max_nb_queues=8"
+ ./test-bbdev.py -e="--vdev=baseband_null,socket_id=0,max_nb_queues=8"
diff --git a/doc/guides/bbdevs/turbo_sw.rst b/doc/guides/bbdevs/turbo_sw.rst
index b3fed163..0b96fbb1 100644
--- a/doc/guides/bbdevs/turbo_sw.rst
+++ b/doc/guides/bbdevs/turbo_sw.rst
@@ -4,7 +4,7 @@
SW Turbo Poll Mode Driver
=========================
-The SW Turbo PMD (**turbo_sw**) provides a poll mode bbdev driver that utilizes
+The SW Turbo PMD (**baseband_turbo_sw**) provides a poll mode bbdev driver that utilizes
Intel optimized libraries for LTE Layer 1 workloads acceleration. This PMD
supports the functions: Turbo FEC, Rate Matching and CRC functions.
@@ -26,6 +26,8 @@ For the decode operation:
* ``RTE_BBDEV_TURBO_CRC_TYPE_24B``
* ``RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN``
* ``RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN``
+* ``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP``
+* ``RTE_BBDEV_TURBO_EARLY_TERMINATION``
Limitations
@@ -39,25 +41,39 @@ Installation
FlexRAN SDK Download
~~~~~~~~~~~~~~~~~~~~
-To build DPDK with the *turbo_sw* PMD the user is required to download
-the export controlled ``FlexRAN SDK`` Libraries. An account at Intel Resource
-Design Center needs to be registered from
-`<https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_.
+To build DPDK with the *baseband_turbo_sw* PMD the user is required to download
+the export controlled ``FlexRAN SDK`` Libraries. An account at `Intel Resource
+Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_
+needs to be registered.
Once registered, the user needs to log in, and look for
-*Intel SWA_SW_FlexRAN_Release_Package R1_3_0* and click for download. Or use
-this direct download link `<https://cdrd.intel.com/v1/dl/getContent/575367>`_.
+*Intel FlexRAN Software Release Package -1-6-0* to download or directly through
+this `link <https://cdrdv2.intel.com/v1/dl/getContent/600609>`_.
After download is complete, the user needs to unpack and compile on their
system before building DPDK.
+The following table maps DPDK versions with past FlexRAN SDK releases:
+
+.. _table_flexran_releases:
+
+.. table:: DPDK and FlexRAN SDK releases compliance
+
+ ===================== ============================
+ DPDK version FlexRAN SDK release
+ ===================== ============================
+ 18.02 1.3.0
+ 18.05 1.4.0
+ 18.08 1.6.0
+ ===================== ============================
+
FlexRAN SDK Installation
~~~~~~~~~~~~~~~~~~~~~~~~
The following are pre-requisites for building FlexRAN SDK Libraries:
(a) An AVX2 supporting machine
- (b) Windriver TS 2 or CentOS 7 operating systems
- (c) Intel ICC compiler installed
+ (b) CentOS Linux release 7.2.1511 (Core) operating system
+ (c) Intel ICC 18.0.1 20171018 compiler installed
The following instructions should be followed in this exact order:
@@ -67,31 +83,25 @@ The following instructions should be followed in this exact order:
source <path-to-icc-compiler-install-folder>/linux/bin/compilervars.sh intel64 -platform linux
-
-#. Extract the ``FlexRAN-1.3.0.tar.gz.zip`` package, then run the SDK extractor
- script and accept the license:
+#. Extract the ``flexran-1-6-0-tar.gz.zip`` package:
.. code-block:: console
- cd <path-to-workspace>/FlexRAN-1.3.0/
- ./SDK-R1.3.0.sh
+ unzip flexran-1-6-0-tar.gz.zip
+ tar xvzf flexran-1-6-0-tar.gz -C FlexRAN-1.6.0/
-#. To allow ``FlexRAN SDK R1.3.0`` to work with bbdev properly, the following
- hotfix is required. Change the return of function ``rate_matching_turbo_lte_avx2()``
- located in file
- ``<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/source/phy/lib_rate_matching/phy_rate_match_avx2.cpp``
- to return 0 instead of 1.
+#. Run the SDK extractor script and accept the license:
- .. code-block:: c
+ .. code-block:: console
- - return 1;
- + return 0;
+ cd <path-to-workspace>/FlexRAN-1.6.0/
+ ./SDK-R1.6.0.sh
#. Generate makefiles based on system configuration:
.. code-block:: console
- cd <path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/
+ cd <path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/
./create-makefiles-linux.sh
#. A build folder is generated in this form ``build-<ISA>-<CC>``, enter that
@@ -100,7 +110,7 @@ The following instructions should be followed in this exact order:
.. code-block:: console
cd build-avx2-icc/
- make install
+ make && make install
Initialization
@@ -118,8 +128,8 @@ Example:
.. code-block:: console
- export FLEXRAN_SDK=<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/build-avx2-icc/install
- export DIR_WIRELESS_SDK=<path-to-workspace>/FlexRAN-1.3.0/SDK-R1.3.0/sdk/
+ export FLEXRAN_SDK=<path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/build-avx2-icc/install
+ export DIR_WIRELESS_SDK=<path-to-workspace>/FlexRAN-1.6.0/SDK-R1.6.0/sdk/
* Set ``CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y`` in DPDK common configuration
@@ -127,9 +137,9 @@ Example:
To use the PMD in an application, user must:
-- Call ``rte_vdev_init("turbo_sw")`` within the application.
+- Call ``rte_vdev_init("baseband_turbo_sw")`` within the application.
-- Use ``--vdev="turbo_sw"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
+- Use ``--vdev="baseband_turbo_sw"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
The following parameters (all optional) can be provided in the previous two calls:
@@ -143,5 +153,5 @@ Example:
.. code-block:: console
- ./test-bbdev.py -e="--vdev=turbo_sw,socket_id=0,max_nb_queues=8" \
- -c validation -v ./test_vectors/bbdev_vector_t?_default.data
+ ./test-bbdev.py -e="--vdev=baseband_turbo_sw,socket_id=0,max_nb_queues=8" \
+ -c validation -v ./turbo_*_default.data
diff --git a/doc/guides/compressdevs/features/default.ini b/doc/guides/compressdevs/features/default.ini
new file mode 100644
index 00000000..829e4df6
--- /dev/null
+++ b/doc/guides/compressdevs/features/default.ini
@@ -0,0 +1,26 @@
+;
+; Features of a default compression driver.
+;
+; This file defines the features that are valid for inclusion in
+; the other driver files and also the order that they appear in
+; the features table in the documentation.
+;
+[Features]
+HW Accelerated =
+CPU SSE =
+CPU AVX =
+CPU AVX2 =
+CPU AVX512 =
+CPU NEON =
+Stateful =
+Pass-through =
+OOP SGL In SGL Out =
+OOP SGL In LB Out =
+OOP LB In SGL Out =
+Deflate =
+LZS =
+Adler32 =
+Crc32 =
+Adler32&Crc32 =
+Fixed =
+Dynamic =
diff --git a/doc/guides/compressdevs/features/isal.ini b/doc/guides/compressdevs/features/isal.ini
new file mode 100644
index 00000000..919cf705
--- /dev/null
+++ b/doc/guides/compressdevs/features/isal.ini
@@ -0,0 +1,16 @@
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+; Supported features of 'ISA-L' compression driver.
+;
+[Features]
+CPU SSE = Y
+CPU AVX = Y
+CPU AVX2 = Y
+CPU AVX512 = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
+Deflate = Y
+Fixed = Y
+Dynamic = Y
diff --git a/doc/guides/compressdevs/features/octeontx.ini b/doc/guides/compressdevs/features/octeontx.ini
new file mode 100644
index 00000000..884a8b07
--- /dev/null
+++ b/doc/guides/compressdevs/features/octeontx.ini
@@ -0,0 +1,10 @@
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+; Supported features of 'OCTEONTX ZIP' compression driver.
+;
+[Features]
+HW Accelerated = Y
+Deflate = Y
+Fixed = Y
+Dynamic = Y
diff --git a/doc/guides/compressdevs/features/qat.ini b/doc/guides/compressdevs/features/qat.ini
new file mode 100644
index 00000000..5cd4524b
--- /dev/null
+++ b/doc/guides/compressdevs/features/qat.ini
@@ -0,0 +1,15 @@
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+; Supported features of 'QAT' compression driver.
+;
+[Features]
+HW Accelerated = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
+Deflate = Y
+Adler32 = Y
+Crc32 = Y
+Adler32&Crc32 = Y
+Fixed = Y
diff --git a/doc/guides/compressdevs/features/zlib.ini b/doc/guides/compressdevs/features/zlib.ini
new file mode 100644
index 00000000..58a4ee3a
--- /dev/null
+++ b/doc/guides/compressdevs/features/zlib.ini
@@ -0,0 +1,10 @@
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+; Supported features of 'ZLIB' compression driver.
+;
+[Features]
+Pass-through = Y
+Deflate = Y
+Fixed = Y
+Dynamic = Y
diff --git a/doc/guides/compressdevs/index.rst b/doc/guides/compressdevs/index.rst
new file mode 100644
index 00000000..1f37e260
--- /dev/null
+++ b/doc/guides/compressdevs/index.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+Compression Device Drivers
+==========================
+
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ overview
+ isal
+ octeontx
+ qat_comp
+ zlib
diff --git a/doc/guides/compressdevs/isal.rst b/doc/guides/compressdevs/isal.rst
new file mode 100644
index 00000000..3bc30229
--- /dev/null
+++ b/doc/guides/compressdevs/isal.rst
@@ -0,0 +1,123 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+ISA-L Compression Poll Mode Driver
+==================================
+
+The ISA-L PMD (**librte_pmd_isal_comp**) provides poll mode compression &
+decompression driver support for utilizing Intel ISA-L library,
+which implements the deflate algorithm for both Deflate(compression) and Inflate(decompression).
+
+
+Features
+--------
+
+ISA-L PMD has support for:
+
+Compression/Decompression algorithm:
+
+ * DEFLATE
+
+Huffman code type:
+
+ * FIXED
+ * DYNAMIC
+
+Window size support:
+
+ * 32K
+
+Level guide:
+
+The ISA-L levels have been mapped to somewhat correspond to the same ZLIB level,
+i.e. ZLIB L1 gives a compression ratio similar to ISA-L L1.
+Compressdev level 0 enables "No Compression", which passes the uncompressed
+data to the output buffer, plus deflate headers.
+The ISA-L library does not support this, therefore compressdev level 0 is not supported.
+
+The compressdev API has 10 levels, 0-9. ISA-L has 4 levels of compression, 0-3.
+As a result the level mappings from the API to the PMD are shown below.
+
+.. _table_ISA-L_compression_levels:
+
+.. table:: Level mapping from Compressdev to ISA-L PMD.
+
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | Compressdev | PMD Functionality | Internal ISA-L |
+ | API Level | | Level |
+ +=============+==============================================+===============================================+
+ | 0 | No compression, Not Supported | --- |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 1 | Dynamic (Fast compression) | 1 |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 2 | Dynamic | 2 |
+ | | (Higher compression ratio) | |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 3 | Dynamic | 3 |
+ | | (Best compression ratio) | (Level 2 if |
+ | | | no AVX512/AVX2) |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 4 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 5 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 6 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 7 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 8 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+ | 9 | Dynamic (Best compression ratio) | Same as above |
+ +-------------+----------------------------------------------+-----------------------------------------------+
+
+.. Note::
+
+ The above table only shows mapping when API calls for dynamic compression.
+ For fixed compression, regardless of API level, internally ISA-L level 0 is always used.
+
+Limitations
+-----------
+
+* Compressdev level 0, no compression, is not supported.
+
+* Checksums will not be supported until future release.
+
+Installation
+------------
+
+* To build DPDK with Intel's ISA-L library, the user is required to download the library from `<https://github.com/01org/isa-l>`_.
+
+* Once downloaded, the user needs to build the library, the ISA-L autotools are usually sufficient::
+
+ ./autogen.sh
+ ./configure
+
+* make can be used to install the library on their system, before building DPDK::
+
+ make
+ sudo make install
+
+* To build with meson, the **libisal.pc** file, must be copied into "pkgconfig",
+ e.g. /usr/lib/pkgconfig or /usr/lib64/pkgconfig depending on your system,
+ for meson to find the ISA-L library. The **libisal.pc** is located in library sources::
+
+ cp isal/libisal.pc /usr/lib/pkgconfig/
+
+
+Initialization
+--------------
+
+In order to enable this virtual compression PMD, user must:
+
+* Set ``CONFIG_RTE_LIBRTE_PMD_ISAL=y`` in config/common_base.
+
+To use the PMD in an application, user must:
+
+* Call ``rte_vdev_init("compress_isal")`` within the application.
+
+* Use ``--vdev="compress_isal"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
+
+The following parameter (optional) can be provided in the previous two calls:
+
+* ``socket_id:`` Specify the socket where the memory for the device is going to be allocated
+ (by default, socket_id will be the socket where the core that is creating the PMD is running on).
diff --git a/doc/guides/compressdevs/octeontx.rst b/doc/guides/compressdevs/octeontx.rst
new file mode 100644
index 00000000..5a32d5d1
--- /dev/null
+++ b/doc/guides/compressdevs/octeontx.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Cavium Networks.
+
+Octeontx ZIP Compression Poll Mode Driver
+=========================================
+
+The Octeontx ZIP PMD (**librte_pmd_octeontx_zip**) provides poll mode
+compression & decompression driver for ZIP HW offload device, found in
+**Cavium OCTEONTX** SoC family.
+
+More information can be found at `Cavium, Inc Official Website
+<http://www.cavium.com/OCTEON-TX_ARM_Processors.html>`_.
+
+Features
+--------
+
+Octeontx ZIP PMD has support for:
+
+Compression/Decompression algorithm:
+
+* DEFLATE
+
+Huffman code type:
+
+* FIXED
+* DYNAMIC
+
+Window size support:
+
+* 2 to 2^14
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+
+Supported OCTEONTX SoCs
+-----------------------
+
+- CN83xx
+
+Steps To Setup Platform
+-----------------------
+
+ Octeontx SDK includes kernel image which provides Octeontx ZIP PF
+ driver to manage configuration of ZIPVF device
+ Required version of SDK is "OCTEONTX-SDK-6.2.0-build35" or above.
+
+ SDK can be install by using below command.
+ #rpm -ivh CTEONTX-SDK-6.2.0-build35.x86_64.rpm --force --nodeps
+ It will install OCTEONTX-SDK at following default location
+ /usr/local/Cavium_Networks/OCTEONTX-SDK/
+
+ For more information on building and booting linux kernel on OCTEONTX
+ please refer /usr/local/Cavium_Networks/OCTEONTX-SDK/docs/OcteonTX-SDK-UG_6.2.0.pdf.
+
+ SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
+
+Installation
+------------
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the OCTEONTX ZIP PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+ .. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-thunderx-linuxapp-gcc install
+
+
+Initialization
+--------------
+
+The octeontx zip is exposed as pci device which consists of a set of
+PCIe VF devices. On EAL initialization, ZIP PCIe VF devices will be
+probed. To use the PMD in an application, user must:
+
+* run dev_bind script to bind eight ZIP PCIe VFs to the ``vfio-pci`` driver:
+
+ .. code-block:: console
+
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.1
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.2
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.3
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.4
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.5
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.6
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:00.7
+ ./usertools/dpdk-devbind.py -b vfio-pci 0001:04:01.0
+
+* The unit test cases can be tested as below:
+
+ .. code-block:: console
+
+ reserve enough huge pages
+ cd to the top-level DPDK directory
+ export RTE_TARGET=arm64-thunderx-linuxapp-gcc
+ export RTE_SDK=`pwd`
+ cd to test/test
+ type the command "make" to compile
+ run the tests with "./test"
+ type the command "compressdev_autotest" to test
diff --git a/doc/guides/compressdevs/overview.rst b/doc/guides/compressdevs/overview.rst
new file mode 100644
index 00000000..70bbe82b
--- /dev/null
+++ b/doc/guides/compressdevs/overview.rst
@@ -0,0 +1,32 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+Compression Device Supported Functionality Matrices
+===================================================
+
+Supported Feature Flags
+-----------------------
+
+.. _table_compression_pmd_features:
+
+.. include:: overview_feature_table.txt
+
+.. Note::
+
+ - "Pass-through" feature flag refers to the ability of the PMD
+ to let input buffers pass-through it, copying the input to the output,
+ without making any modifications to it (no compression done).
+
+ - "OOP SGL In SGL Out" feature flag stands for
+ "Out-of-place Scatter-gather list Input, Scatter-gater list Output",
+ which means PMD supports different scatter-gather styled input and output buffers
+ (i.e. both can consists of multiple segments).
+
+ - "OOP SGL In LB Out" feature flag stands for
+ "Out-of-place Scatter-gather list Input, Linear Buffers Output",
+ which means PMD supports input from scatter-gathered styled buffers, outputting linear buffers
+ (i.e. single segment).
+
+ - "OOP LB In SGL Out" feature flag stands for
+ "Out-of-place Linear Buffers Input, Scatter-gather list Output",
+ which means PMD supports input from linear buffer, outputting scatter-gathered styled buffers.
diff --git a/doc/guides/compressdevs/qat_comp.rst b/doc/guides/compressdevs/qat_comp.rst
new file mode 100644
index 00000000..8b1270b7
--- /dev/null
+++ b/doc/guides/compressdevs/qat_comp.rst
@@ -0,0 +1,47 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+Intel(R) QuickAssist (QAT) Compression Poll Mode Driver
+=======================================================
+
+The QAT compression PMD provides poll mode compression & decompression driver
+support for the following hardware accelerator devices:
+
+* ``Intel QuickAssist Technology C62x``
+* ``Intel QuickAssist Technology C3xxx``
+
+
+Features
+--------
+
+QAT compression PMD has support for:
+
+Compression/Decompression algorithm:
+
+ * DEFLATE
+
+Huffman code type:
+
+ * FIXED
+
+Window size support:
+
+ * 32K
+
+Checksum generation:
+
+ * CRC32, Adler and combined checksum
+
+Limitations
+-----------
+
+* Compressdev level 0, no compression, is not supported.
+
+* Dynamic Huffman encoding is not yet supported.
+
+Installation
+------------
+
+The QAT compression PMD is built by default with a standard DPDK build.
+
+It depends on a QAT kernel driver, see :ref:`qat_kernel_installation`.
diff --git a/doc/guides/compressdevs/zlib.rst b/doc/guides/compressdevs/zlib.rst
new file mode 100644
index 00000000..986c59d4
--- /dev/null
+++ b/doc/guides/compressdevs/zlib.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Cavium Networks.
+
+ZLIB Compression Poll Mode Driver
+==================================
+
+The ZLIB PMD (**librte_pmd_zlib**) provides poll mode compression &
+decompression driver based on SW zlib library,
+
+Features
+--------
+
+ZLIB PMD has support for:
+
+Compression/Decompression algorithm:
+
+* DEFLATE
+
+Huffman code type:
+
+* FIXED
+* DYNAMIC
+
+Window size support:
+
+* Min - 256 bytes
+* Max - 32K
+
+Limitations
+-----------
+
+* Scatter-Gather and Stateful not supported.
+
+Installation
+------------
+
+* To build DPDK with ZLIB library, the user is required to download the ``libz`` library.
+* Use following command for installation.
+
+* For Fedora users::
+ sudo yum install zlib-devel
+* For Ubuntu users::
+ sudo apt-get install zlib1g-dev
+
+* Once downloaded, the user needs to build the library.
+
+* To build from sources
+ download zlib sources from http://zlib.net/ and do following before building DPDK::
+
+ make
+ sudo make install
+
+Initialization
+--------------
+
+In order to enable this virtual compression PMD, user must:
+
+* Set ``CONFIG_RTE_LIBRTE_PMD_ZLIB=y`` in config/common_base.
+
+To use the PMD in an application, user must:
+
+* Call ``rte_vdev_init("compress_zlib")`` within the application.
+
+* Use ``--vdev="compress_zlib"`` in the EAL options, which will call ``rte_vdev_init()`` internally.
+
+The following parameter (optional) can be provided in the previous two calls:
+
+* ``socket_id:`` Specify the socket where the memory for the device is going to be allocated
+ (by default, socket_id will be the socket where the core that is creating the PMD is running on).
diff --git a/doc/guides/conf.py b/doc/guides/conf.py
index cf06f257..c883306d 100644
--- a/doc/guides/conf.py
+++ b/doc/guides/conf.py
@@ -190,18 +190,23 @@ def generate_overview_table(output_filename, table_id, section, table_name, titl
ini_files.sort()
# Build up a list of the table header names from the ini filenames.
- header_names = []
+ pmd_names = []
for ini_filename in ini_files:
name = ini_filename[:-4]
name = name.replace('_vf', 'vf')
+ pmd_names.append(name)
- # Pad the table header names to match the existing format.
+ # Pad the table header names.
+ max_header_len = len(max(pmd_names, key=len))
+ header_names = []
+ for name in pmd_names:
if '_vec' in name:
pmd, vec = name.split('_')
- name = '{0:{fill}{align}7}vec'.format(pmd, fill='.', align='<')
+ name = '{0:{fill}{align}{width}}vec'.format(pmd,
+ fill='.', align='<', width=max_header_len-3)
else:
- name = '{0:{fill}{align}10}'.format(name, fill=' ', align='<')
-
+ name = '{0:{fill}{align}{width}}'.format(name,
+ fill=' ', align='<', width=max_header_len)
header_names.append(name)
# Create a dict of the defined features for each driver from the ini files.
@@ -253,7 +258,7 @@ def print_table_header(outfile, num_cols, header_names, title):
print_table_row(outfile, title, line)
- for i in range(1, 10):
+ for i in range(1, len(header_names[0])):
line = ''
for name in header_names:
line += ' ' + name[i]
@@ -310,7 +315,7 @@ def print_table_css(outfile, table_id):
text-align: center;
}
table#idx th {
- font-size: 80%;
+ font-size: 72%;
white-space: pre-wrap;
vertical-align: top;
padding: 0.5em 0;
@@ -383,6 +388,11 @@ def setup(app):
'AEAD',
'AEAD algorithms in crypto drivers',
'AEAD algorithm')
+ table_file = dirname(__file__) + '/compressdevs/overview_feature_table.txt'
+ generate_overview_table(table_file, 1,
+ 'Features',
+ 'Features availability in compression drivers',
+ 'Feature')
if LooseVersion(sphinx_version) < LooseVersion('1.3.1'):
print('Upgrade sphinx to version >= 1.3.1 for '
diff --git a/doc/guides/contributing/cheatsheet.rst b/doc/guides/contributing/cheatsheet.rst
index 7bc07715..0debd118 100644
--- a/doc/guides/contributing/cheatsheet.rst
+++ b/doc/guides/contributing/cheatsheet.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
Patch Cheatsheet
================
diff --git a/doc/guides/contributing/coding_style.rst b/doc/guides/contributing/coding_style.rst
index b0f0adb8..b1bf0d15 100644
--- a/doc/guides/contributing/coding_style.rst
+++ b/doc/guides/contributing/coding_style.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
.. _coding_style:
DPDK Coding Style
@@ -614,8 +617,8 @@ In the DPDK environment, use the logging interface provided:
* is DEBUG) */
rte_log_set_level(my_logtype2, RTE_LOG_NOTICE);
- /* enable all PMD logs (whose identifier string starts with "pmd") */
- rte_log_set_level_regexp("pmd.*", RTE_LOG_DEBUG);
+ /* enable all PMD logs (whose identifier string starts with "pmd.") */
+ rte_log_set_level_pattern("pmd.*", RTE_LOG_DEBUG);
/* log in debug level */
rte_log_set_global_level(RTE_LOG_DEBUG);
diff --git a/doc/guides/contributing/design.rst b/doc/guides/contributing/design.rst
index 88d3a433..651fd224 100644
--- a/doc/guides/contributing/design.rst
+++ b/doc/guides/contributing/design.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
Design
======
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index 82f2e1bb..6a075553 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
.. _doc_guidelines:
DPDK Documentation Guidelines
diff --git a/doc/guides/contributing/index.rst b/doc/guides/contributing/index.rst
index 329b678a..f90df451 100644
--- a/doc/guides/contributing/index.rst
+++ b/doc/guides/contributing/index.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
Contributor's Guidelines
========================
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index 2287835f..a3d78802 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
.. submitting_patches:
Contributing Code to DPDK
@@ -256,6 +259,66 @@ In addition to the ``Signed-off-by:`` name the commit messages can also have
tags for who reported, suggested, tested and reviewed the patch being
posted. Please refer to the `Tested, Acked and Reviewed by`_ section.
+Patch Fix Related Issues
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+`Coverity <https://scan.coverity.com/projects/dpdk-data-plane-development-kit>`_
+is a tool for static code analysis.
+It is used as a cloud-based service used to scan the DPDK source code,
+and alert developers of any potential defects in the source code.
+When fixing an issue found by Coverity, the patch must contain a Coverity issue ID
+in the body of the commit message. For example::
+
+
+ doc: fix some parameter description
+
+ Update the docs, fixing description of some parameter.
+
+ Coverity issue: 12345
+ Fixes: abcdefgh1234 ("doc: add some parameter")
+ Cc: author@example.com
+
+ Signed-off-by: Alex Smith <alex.smith@example.com>
+
+
+`Bugzilla <https://dpdk.org/tracker>`_
+is a bug- or issue-tracking system.
+Bug-tracking systems allow individual or groups of developers
+effectively to keep track of outstanding problems with their product.
+When fixing an issue raised in Bugzilla, the patch must contain
+a Bugzilla issue ID in the body of the commit message.
+For example::
+
+ doc: fix some parameter description
+
+ Update the docs, fixing description of some parameter.
+
+ Bugzilla ID: 12345
+ Fixes: abcdefgh1234 ("doc: add some parameter")
+ Cc: author@example.com
+
+ Signed-off-by: Alex Smith <alex.smith@example.com>
+
+Patch for Stable Releases
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All fix patches to the master branch that are candidates for backporting
+should also be CCed to the `stable@dpdk.org <http://dpdk.org/ml/listinfo/stable>`_
+mailing list.
+In the commit message body the Cc: stable@dpdk.org should be inserted as follows::
+
+ doc: fix some parameter description
+
+ Update the docs, fixing description of some parameter.
+
+ Fixes: abcdefgh1234 ("doc: add some parameter")
+ Cc: stable@dpdk.org
+
+ Signed-off-by: Alex Smith <alex.smith@example.com>
+
+For further information on stable contribution you can go to
+:doc:`Stable Contribution Guide <stable>`.
+
Creating Patches
----------------
@@ -450,6 +513,20 @@ Experienced committers may send patches directly with ``git send-email`` without
The options ``--annotate`` and ``confirm = always`` are recommended for checking patches before sending.
+Backporting patches for Stable Releases
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sometimes a maintainer or contributor wishes, or can be asked, to send a patch
+for a stable release rather than mainline.
+In this case the patch(es) should be sent to ``stable@dpdk.org``,
+not to ``dev@dpdk.org``.
+
+Given that there are multiple stable releases being maintained at the same time,
+please specify exactly which branch(es) the patch is for
+using ``git send-email --subject-prefix='PATCH 16.11' ...``
+and also optionally in the cover letter or in the annotation.
+
+
The Review Process
------------------
diff --git a/doc/guides/contributing/stable.rst b/doc/guides/contributing/stable.rst
index 0f2f1f37..1746c046 100644
--- a/doc/guides/contributing/stable.rst
+++ b/doc/guides/contributing/stable.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
.. stable_lts_releases:
DPDK Stable Releases and Long Term Support
@@ -57,7 +60,25 @@ that a tagged release has been tested.
What changes should be backported
---------------------------------
-Backporting should be limited to bug fixes.
+Backporting should be limited to bug fixes. All patches accepted on the master
+branch with a Fixes: tag should be backported to the relevant stable/LTS
+branches, unless the submitter indicates otherwise. If there are exceptions,
+they will be discussed on the mailing lists.
+
+Fixes suitable for backport should have a ``Cc: stable@dpdk.org`` tag in the
+commit message body as follows::
+
+ doc: fix some parameter description
+
+ Update the docs, fixing description of some parameter.
+
+ Fixes: abcdefgh1234 ("doc: add some parameter")
+ Cc: stable@dpdk.org
+
+ Signed-off-by: Alex Smith <alex.smith@example.com>
+
+
+Fixes not suitable for backport should not include the ``Cc: stable@dpdk.org`` tag.
Features should not be backported to stable releases. It may be acceptable, in
limited cases, to back port features for the LTS release where:
diff --git a/doc/guides/contributing/versioning.rst b/doc/guides/contributing/versioning.rst
index c495294d..01b36247 100644
--- a/doc/guides/contributing/versioning.rst
+++ b/doc/guides/contributing/versioning.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
Managing ABI updates
====================
@@ -43,29 +46,6 @@ ABI versions are set at the time of major release labeling, and the ABI may
change multiple times, without warning, between the last release label and the
HEAD label of the git tree.
-APIs marked as ``experimental`` are not considered part of the ABI and may
-change without warning at any time. Since changes to APIs are most likely
-immediately after their introduction, as users begin to take advantage of
-those new APIs and start finding issues with them, new DPDK APIs will be
-automatically marked as ``experimental`` to allow for a period of stabilization
-before they become part of a tracked ABI.
-
-Note that marking an API as experimental is a multi step process.
-To mark an API as experimental, the symbols which are desired to be exported
-must be placed in an EXPERIMENTAL version block in the corresponding libraries'
-version map script.
-Secondly, the corresponding definitions of those exported functions, and
-their forward declarations (in the development header files), must be marked
-with the ``__rte_experimental`` tag (see ``rte_compat.h``).
-The DPDK build makefiles perform a check to ensure that the map file and the
-C code reflect the same list of symbols.
-This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API``
-during compilation in the corresponding library Makefile.
-
-In addition to tagging the code with ``__rte_experimental``,
-the doxygen markup must also contain the EXPERIMENTAL string,
-and the MAINTAINERS file should note the EXPERIMENTAL libraries.
-
ABI versions, once released, are available until such time as their
deprecation has been noted in the Release Notes for at least one major release
cycle. For example consider the case where the ABI for DPDK 2.0 has been
@@ -119,6 +99,37 @@ readability purposes should be avoided.
follow the relevant deprecation policy procedures as above: 3 acks and
announcement at least one release in advance.
+Experimental APIs
+~~~~~~~~~~~~~~~~~
+
+APIs marked as ``experimental`` are not considered part of the ABI and may
+change without warning at any time. Since changes to APIs are most likely
+immediately after their introduction, as users begin to take advantage of
+those new APIs and start finding issues with them, new DPDK APIs will be
+automatically marked as ``experimental`` to allow for a period of stabilization
+before they become part of a tracked ABI.
+
+Note that marking an API as experimental is a multi step process.
+To mark an API as experimental, the symbols which are desired to be exported
+must be placed in an EXPERIMENTAL version block in the corresponding libraries'
+version map script.
+Secondly, the corresponding definitions of those exported functions, and
+their forward declarations (in the development header files), must be marked
+with the ``__rte_experimental`` tag (see ``rte_compat.h``).
+The DPDK build makefiles perform a check to ensure that the map file and the
+C code reflect the same list of symbols.
+This check can be circumvented by defining ``ALLOW_EXPERIMENTAL_API``
+during compilation in the corresponding library Makefile.
+
+In addition to tagging the code with ``__rte_experimental``,
+the doxygen markup must also contain the EXPERIMENTAL string,
+and the MAINTAINERS file should note the EXPERIMENTAL libraries.
+
+For removing the experimental tag associated with an API, deprecation notice
+is not required. Though, an API should remain in experimental state for at least
+one release. Thereafter, normal process of posting patch for review to mailing
+list can be followed.
+
Examples of Deprecation Notices
-------------------------------
diff --git a/doc/guides/cryptodevs/aesni_gcm.rst b/doc/guides/cryptodevs/aesni_gcm.rst
index ffd6ba90..e0346080 100644
--- a/doc/guides/cryptodevs/aesni_gcm.rst
+++ b/doc/guides/cryptodevs/aesni_gcm.rst
@@ -36,12 +36,13 @@ Installation
To build DPDK with the AESNI_GCM_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.48, which
-can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.48.zip>`_.
+The latest version of the library supported by this PMD is v0.50, which
+can be downloaded in `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_.
.. code-block:: console
- make
+ make
+ make install
As a reference, the following table shows a mapping between the past DPDK versions
and the external crypto libraries supported by them:
@@ -55,7 +56,8 @@ and the external crypto libraries supported by them:
============= ================================
16.04 - 16.11 Multi-buffer library 0.43 - 0.44
17.02 - 17.05 ISA-L Crypto v2.18
- 17.08+ Multi-buffer library 0.46+
+ 17.08 - 18.02 Multi-buffer library 0.46 - 0.48
+ 18.05+ Multi-buffer library 0.49+
============= ================================
@@ -64,9 +66,6 @@ Initialization
In order to enable this virtual crypto PMD, user must:
-* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
- the library was extracted.
-
* Build the multi buffer library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=y in config/common_base.
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index 3950daae..c2929500 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -27,6 +27,7 @@ Cipher algorithms:
* RTE_CRYPTO_CIPHER_AES256_CTR
* RTE_CRYPTO_CIPHER_AES_DOCSISBPI
* RTE_CRYPTO_CIPHER_DES_CBC
+* RTE_CRYPTO_CIPHER_3DES_CBC
* RTE_CRYPTO_CIPHER_DES_DOCSISBPI
Hash algorithms:
@@ -38,6 +39,7 @@ Hash algorithms:
* RTE_CRYPTO_HASH_SHA384_HMAC
* RTE_CRYPTO_HASH_SHA512_HMAC
* RTE_CRYPTO_HASH_AES_XCBC_HMAC
+* RTE_CRYPTO_HASH_AES_CMAC
AEAD algorithms:
@@ -56,12 +58,13 @@ Installation
To build DPDK with the AESNI_MB_PMD the user is required to download the multi-buffer
library from `here <https://github.com/01org/intel-ipsec-mb>`_
and compile it on their user system before building DPDK.
-The latest version of the library supported by this PMD is v0.48, which
-can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.48.zip>`_.
+The latest version of the library supported by this PMD is v0.50, which
+can be downloaded from `<https://github.com/01org/intel-ipsec-mb/archive/v0.50.zip>`_.
.. code-block:: console
- make
+ make
+ make install
As a reference, the following table shows a mapping between the past DPDK versions
and the Multi-Buffer library version supported by them:
@@ -77,7 +80,8 @@ and the Multi-Buffer library version supported by them:
17.02 0.44
17.05 - 17.08 0.45 - 0.48
17.11 0.47 - 0.48
- 18.02+ 0.48
+ 18.02 0.48
+ 18.05+ 0.49+
============== ============================
@@ -86,9 +90,6 @@ Initialization
In order to enable this virtual crypto PMD, user must:
-* Export the environmental variable AESNI_MULTI_BUFFER_LIB_PATH with the path where
- the library was extracted.
-
* Build the multi buffer library (explained in Installation section).
* Set CONFIG_RTE_LIBRTE_PMD_AESNI_MB=y in config/common_base.
diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
new file mode 100644
index 00000000..034d2036
--- /dev/null
+++ b/doc/guides/cryptodevs/ccp.rst
@@ -0,0 +1,140 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+AMD CCP Poll Mode Driver
+========================
+
+This code provides the initial implementation of the ccp poll mode driver.
+The CCP poll mode driver library (librte_pmd_ccp) implements support for
+AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
+poll mode driver which schedules crypto operations to one or more available
+CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
+driver support for the following hardware accelerator devices::
+
+ AMD Cryptographic Co-processor (0x1456)
+ AMD Cryptographic Co-processor (0x1468)
+
+Features
+--------
+
+CCP crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+* ``RTE_CRYPTO_CIPHER_AES_ECB``
+* ``RTE_CRYPTO_CIPHER_AES_CTR``
+* ``RTE_CRYPTO_CIPHER_3DES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1``
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA224``
+* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA256``
+* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA384``
+* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA512``
+* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
+* ``RTE_CRYPTO_AUTH_MD5_HMAC``
+* ``RTE_CRYPTO_AUTH_AES_CMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_224``
+* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_256``
+* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_384``
+* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
+* ``RTE_CRYPTO_AUTH_SHA3_512``
+* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
+
+AEAD algorithms:
+
+* ``RTE_CRYPTO_AEAD_AES_GCM``
+
+Installation
+------------
+
+To compile ccp PMD, it has to be enabled in the config/common_base file and openssl
+packages have to be installed in the build environment.
+
+* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
+
+For Ubuntu 16.04 LTS use below to install openssl in the build system:
+
+.. code-block:: console
+
+ sudo apt-get install openssl
+
+This code was verified on Ubuntu 16.04.
+
+Initialization
+--------------
+
+Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
+e.g. for the 0x1456 device::
+
+ cd to the top-level DPDK directory
+ modprobe uio
+ insmod ./build/kmod/igb_uio.ko
+ echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
+
+Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
+The following command assumes ``BFD`` as ``0000:09:00.2``::
+
+ cd to the top-level DPDK directory
+ ./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
+
+In order to enable the ccp crypto PMD, user must set CONFIG_RTE_LIBRTE_PMD_CCP=y in config/common_base.
+
+To use the PMD in an application, user must:
+
+* Call rte_vdev_init("crypto_ccp") within the application.
+
+* Use --vdev="crypto_ccp" in the EAL options, which will call rte_vdev_init() internally.
+
+The following parameters (all optional) can be provided in the previous two calls:
+
+* socket_id: Specify the socket where the memory for the device is going to be allocated.
+ (by default, socket_id will be the socket where the core that is creating the PMD is running on).
+
+* max_nb_queue_pairs: Specify the maximum number of queue pairs in the device.
+
+* max_nb_sessions: Specify the maximum number of sessions that can be created (2048 by default).
+
+* ccp_auth_opt: Specify authentication operations to perform on CPU using openssl APIs.
+
+To validate ccp pmd, l2fwd-crypto example can be used with following command:
+
+.. code-block:: console
+
+ sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
+ --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+ --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+ --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+ --auth_op GENERATE --auth_algo SHA1_HMAC
+ --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+The CCP PMD also supports computing authentication over CPU with cipher offloaded to CCP.
+To enable this feature, pass an additional argument as ccp_auth_opt=1 to --vdev parameters as
+following:
+
+.. code-block:: console
+
+ sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp,ccp_auth_opt=1" -- -p 0x1
+ --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
+ --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
+ --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
+ --auth_op GENERATE --auth_algo SHA1_HMAC
+ --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
+
+Limitations
+-----------
+
+* Chained mbufs are not supported.
+* MD5_HMAC is supported only for CPU based authentication.
diff --git a/doc/guides/cryptodevs/dpaa2_sec.rst b/doc/guides/cryptodevs/dpaa2_sec.rst
index 5460a92d..9191704e 100644
--- a/doc/guides/cryptodevs/dpaa2_sec.rst
+++ b/doc/guides/cryptodevs/dpaa2_sec.rst
@@ -134,10 +134,20 @@ Supported DPAA2 SoCs
* LS2088A/LS2048A
* LS1088A/LS1048A
+Whitelisting & Blacklisting
+---------------------------
+
+For blacklisting a DPAA2 SEC device, following commands can be used.
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> -b "fslmc:dpseci.x" -- ...
+
+Where x is the device object id as configured in resource container.
+
Limitations
-----------
-* Chained mbufs are not supported.
* Hash followed by Cipher mode is not supported
* Only supports the session-oriented API implementation (session-less APIs are not supported).
@@ -189,20 +199,6 @@ Please note that enabling debugging options may affect system performance.
By default it is only enabled in defconfig_arm64-dpaa2-* config.
Toggle compilation of the ``librte_pmd_dpaa2_sec`` driver.
-* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT`` (default ``n``)
- Toggle display of initialization related driver messages
-
-* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER`` (default ``n``)
- Toggle display of driver runtime messages
-
-* ``CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_RX`` (default ``n``)
- Toggle display of receive fast path run-time message
-
-* ``CONFIG_RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS``
- By default it is set as 2048 in defconfig_arm64-dpaa2-* config.
- It indicates Number of sessions to create in the session memory pool
- on a single DPAA2 SEC device.
-
Installations
-------------
To compile the DPAA2_SEC PMD for Linux arm64 gcc target, run the
@@ -212,3 +208,15 @@ following ``make`` command:
cd <DPDK-source-directory>
make config T=arm64-dpaa2-linuxapp-gcc install
+
+Enabling logs
+-------------
+
+For enabling logs, use the following EAL parameter:
+
+.. code-block:: console
+
+ ./your_crypto_application <EAL args> --log-level=pmd.crypto.dpaa2:<level>
+
+Using ``crypto.dpaa2`` as log matching criteria, all Crypto PMD logs can be
+enabled which are lower than logging ``level``.
diff --git a/doc/guides/cryptodevs/dpaa_sec.rst b/doc/guides/cryptodevs/dpaa_sec.rst
index b98f7864..dd683894 100644
--- a/doc/guides/cryptodevs/dpaa_sec.rst
+++ b/doc/guides/cryptodevs/dpaa_sec.rst
@@ -78,10 +78,22 @@ Supported DPAA SoCs
* LS1046A/LS1026A
* LS1043A/LS1023A
+Whitelisting & Blacklisting
+---------------------------
+
+For blacklisting a DPAA device, following commands can be used.
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> -b "dpaa_bus:dpaa-secX" -- ...
+ e.g. "dpaa_bus:dpaa-sec0"
+
+ or to disable all 4 SEC devices
+ -b "dpaa_sec:dpaa-sec0" -b "dpaa_sec:dpaa-sec1" -b "dpaa_sec:dpaa-sec2" -b "dpaa_sec:dpaa-sec3"
+
Limitations
-----------
-* Chained mbufs are not supported.
* Hash followed by Cipher mode is not supported
* Only supports the session-oriented API implementation (session-less APIs are not supported).
@@ -132,20 +144,6 @@ Please note that enabling debugging options may affect system performance.
By default it is only enabled in defconfig_arm64-dpaa-* config.
Toggle compilation of the ``librte_pmd_dpaa_sec`` driver.
-* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT`` (default ``n``)
- Toggle display of initialization related driver messages
-
-* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER`` (default ``n``)
- Toggle display of driver runtime messages
-
-* ``CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_RX`` (default ``n``)
- Toggle display of receive fast path run-time message
-
-* ``CONFIG_RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS``
- By default it is set as 2048 in defconfig_arm64-dpaa-* config.
- It indicates Number of sessions to create in the session memory pool
- on a single DPAA SEC device.
-
Installations
-------------
To compile the DPAA_SEC PMD for Linux arm64 gcc target, run the
@@ -155,3 +153,15 @@ following ``make`` command:
cd <DPDK-source-directory>
make config T=arm64-dpaa-linuxapp-gcc install
+
+Enabling logs
+-------------
+
+For enabling logs, use the following EAL parameter:
+
+.. code-block:: console
+
+ ./your_crypto_application <EAL args> --log-level=pmd.crypto.dpaa:<level>
+
+Using ``pmd.crypto.dpaa`` as log matching criteria, all Crypto PMD logs can be
+enabled which are lower than logging ``level``.
diff --git a/doc/guides/cryptodevs/features/aesni_gcm.ini b/doc/guides/cryptodevs/features/aesni_gcm.ini
index 920b6b6a..b9e9c906 100644
--- a/doc/guides/cryptodevs/features/aesni_gcm.ini
+++ b/doc/guides/cryptodevs/features/aesni_gcm.ini
@@ -10,7 +10,8 @@ CPU AESNI = Y
CPU SSE = Y
CPU AVX = Y
CPU AVX2 = Y
-Mbuf scatter gather = Y
+OOP SGL In LB Out = Y
+OOP LB In LB Out = Y
;
; Supported crypto algorithms of the 'aesni_gcm' crypto driver.
;
diff --git a/doc/guides/cryptodevs/features/aesni_mb.ini b/doc/guides/cryptodevs/features/aesni_mb.ini
index a5a45a6d..f7295745 100644
--- a/doc/guides/cryptodevs/features/aesni_mb.ini
+++ b/doc/guides/cryptodevs/features/aesni_mb.ini
@@ -24,6 +24,7 @@ AES CTR (192) = Y
AES CTR (256) = Y
AES DOCSIS BPI = Y
DES CBC = Y
+3DES CBC = Y
DES DOCSIS BPI = Y
;
@@ -37,6 +38,7 @@ SHA256 HMAC = Y
SHA384 HMAC = Y
SHA512 HMAC = Y
AES XCBC MAC = Y
+AES CMAC (128) = Y
;
; Supported AEAD algorithms of the 'aesni_mb' crypto driver.
diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
new file mode 100644
index 00000000..4722e135
--- /dev/null
+++ b/doc/guides/cryptodevs/features/ccp.ini
@@ -0,0 +1,59 @@
+;
+; Supported features of the 'ccp' crypto poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+HW Accelerated = Y
+
+;
+; Supported crypto algorithms of the 'ccp' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+AES ECB (128) = Y
+AES ECB (192) = Y
+AES ECB (256) = Y
+AES CTR (128) = Y
+AES CTR (192) = Y
+AES CTR (256) = Y
+3DES CBC = Y
+
+;
+; Supported authentication algorithms of the 'ccp' crypto driver.
+;
+[Auth]
+MD5 HMAC = Y
+SHA1 = Y
+SHA1 HMAC = Y
+SHA224 = Y
+SHA224 HMAC = Y
+SHA256 = Y
+SHA256 HMAC = Y
+SHA384 = Y
+SHA384 HMAC = Y
+SHA512 = Y
+SHA512 HMAC = Y
+AES CMAC (128) = Y
+AES CMAC (192) = Y
+AES CMAC (256) = Y
+SHA3_224 = Y
+SHA3_224 HMAC = Y
+SHA3_256 = Y
+SHA3_256 HMAC = Y
+SHA3_384 = Y
+SHA3_384 HMAC = Y
+SHA3_512 = Y
+SHA3_512 HMAC = Y
+
+;
+; Supported AEAD algorithms of the 'ccp' crypto driver.
+;
+[AEAD]
+AES GCM (128) = Y
+AES GCM (192) = Y
+AES GCM (256) = Y
diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
index 728ce3b7..92a7ccf3 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -18,7 +18,11 @@ CPU AVX512 =
CPU AESNI =
CPU NEON =
CPU ARM CE =
-Mbuf scatter gather =
+In Place SGL =
+OOP SGL In SGL Out =
+OOP SGL In LB Out =
+OOP LB In SGL Out =
+OOP LB In LB Out =
;
; Supported crypto algorithms of a default crypto driver.
@@ -28,6 +32,9 @@ NULL =
AES CBC (128) =
AES CBC (192) =
AES CBC (256) =
+AES ECB (128) =
+AES ECB (192) =
+AES ECB (256) =
AES CTR (128) =
AES CTR (192) =
AES CTR (256) =
@@ -62,6 +69,17 @@ AES GMAC =
SNOW3G UIA2 =
KASUMI F9 =
ZUC EIA3 =
+AES CMAC (128) =
+AES CMAC (192) =
+AES CMAC (256) =
+SHA3_224 =
+SHA3_224 HMAC =
+SHA3_256 =
+SHA3_256 HMAC =
+SHA3_384 =
+SHA3_384 HMAC =
+SHA3_512 =
+SHA3_512 HMAC =
;
; Supported AEAD algorithms of a default crypto driver.
diff --git a/doc/guides/cryptodevs/features/dpaa2_sec.ini b/doc/guides/cryptodevs/features/dpaa2_sec.ini
index 68c9960d..69700df4 100644
--- a/doc/guides/cryptodevs/features/dpaa2_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa2_sec.ini
@@ -8,7 +8,11 @@ Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
Protocol offload = Y
-Mbuf scatter gather = Y
+In Place SGL = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
+OOP LB In LB Out = Y
;
; Supported crypto algorithms of the 'dpaa2_sec' crypto driver.
diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 260fae72..937b621c 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -8,7 +8,11 @@ Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
Protocol offload = Y
-Mbuf scatter gather = Y
+In Place SGL = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
+OOP LB In LB Out = Y
;
; Supported crypto algorithms of the 'dpaa_sec' crypto driver.
diff --git a/doc/guides/cryptodevs/features/mrvl.ini b/doc/guides/cryptodevs/features/mvsam.ini
index 6d2fe6aa..b7c105af 100644
--- a/doc/guides/cryptodevs/features/mrvl.ini
+++ b/doc/guides/cryptodevs/features/mvsam.ini
@@ -1,4 +1,4 @@
-; Supported features of the 'mrvl' crypto driver.
+; Supported features of the 'mvsam' crypto driver.
;
; Refer to default.ini for the full list of available PMD features.
;
diff --git a/doc/guides/cryptodevs/features/null.ini b/doc/guides/cryptodevs/features/null.ini
index a9e172da..ecf5779a 100644
--- a/doc/guides/cryptodevs/features/null.ini
+++ b/doc/guides/cryptodevs/features/null.ini
@@ -6,7 +6,7 @@
[Features]
Symmetric crypto = Y
Sym operation chaining = Y
-Mbuf scatter gather = Y
+In Place SGL = Y
;
; Supported crypto algorithms of the 'null' crypto driver.
diff --git a/doc/guides/cryptodevs/features/openssl.ini b/doc/guides/cryptodevs/features/openssl.ini
index 69156586..b9c0bdcc 100644
--- a/doc/guides/cryptodevs/features/openssl.ini
+++ b/doc/guides/cryptodevs/features/openssl.ini
@@ -6,7 +6,9 @@
[Features]
Symmetric crypto = Y
Sym operation chaining = Y
-Mbuf scatter gather = Y
+OOP SGL In LB Out = Y
+OOP LB In LB Out = Y
+Asymmetric crypto = Y
;
; Supported crypto algorithms of the 'openssl' crypto driver.
@@ -49,3 +51,13 @@ AES GCM (256) = Y
AES CCM (128) = Y
AES CCM (192) = Y
AES CCM (256) = Y
+
+;
+; Supported Asymmetric algorithms of the 'openssl' crypto driver.
+;
+[Asymmetric]
+RSA = Y
+DSA = Y
+Modular Exponentiation = Y
+Modular Inversion = Y
+Diffie-hellman = Y
diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
index 51ed5967..29d865e0 100644
--- a/doc/guides/cryptodevs/features/qat.ini
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -7,7 +7,11 @@
Symmetric crypto = Y
Sym operation chaining = Y
HW Accelerated = Y
-Mbuf scatter gather = Y
+In Place SGL = Y
+OOP SGL In SGL Out = Y
+OOP SGL In LB Out = Y
+OOP LB In SGL Out = Y
+OOP LB In LB Out = Y
;
; Supported crypto algorithms of the 'qat' crypto driver.
diff --git a/doc/guides/cryptodevs/features/virtio.ini b/doc/guides/cryptodevs/features/virtio.ini
new file mode 100644
index 00000000..168fc174
--- /dev/null
+++ b/doc/guides/cryptodevs/features/virtio.ini
@@ -0,0 +1,26 @@
+; Supported features of the 'virtio' crypto driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Symmetric crypto = Y
+Sym operation chaining = Y
+
+;
+; Supported crypto algorithms of the 'virtio' crypto driver.
+;
+[Cipher]
+AES CBC (128) = Y
+AES CBC (192) = Y
+AES CBC (256) = Y
+
+;
+; Supported authentication algorithms of the 'virtio' crypto driver.
+;
+[Auth]
+SHA1 HMAC = Y
+
+;
+; Supported AEAD algorithms of the 'virtio' crypto driver.
+;
+[AEAD]
diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
index 558c9267..e9928a4e 100644
--- a/doc/guides/cryptodevs/index.rst
+++ b/doc/guides/cryptodevs/index.rst
@@ -13,13 +13,15 @@ Crypto Device Drivers
aesni_mb
aesni_gcm
armv8
+ ccp
dpaa2_sec
dpaa_sec
kasumi
openssl
- mrvl
+ mvsam
null
scheduler
snow3g
qat
+ virtio
zuc
diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst
index f56b5475..2265eee4 100644
--- a/doc/guides/cryptodevs/kasumi.rst
+++ b/doc/guides/cryptodevs/kasumi.rst
@@ -34,11 +34,11 @@ Installation
------------
To build DPDK with the KASUMI_PMD the user is required to download
-the export controlled ``libsso_kasumi`` library, by requesting it from
-`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
-Once approval has been granted, the user needs to log in
-`<https://networkbuilders.intel.com/dpdklogin>`_
-and click on "Kasumi Bit Stream crypto library" link, to download the library.
+the export controlled ``libsso_kasumi`` library, by registering in
+`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_.
+Once approval has been granted, the user needs to search for
+*Kasumi F8 F9 3GPP cryptographic algorithms Software Library* to download the
+library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575866>`_.
After downloading the library, the user needs to unpack and compile it
on their system before building DPDK::
diff --git a/doc/guides/cryptodevs/mrvl.rst b/doc/guides/cryptodevs/mvsam.rst
index 6a0b08c5..fd418c26 100644
--- a/doc/guides/cryptodevs/mrvl.rst
+++ b/doc/guides/cryptodevs/mvsam.rst
@@ -29,10 +29,10 @@
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-MRVL Crypto Poll Mode Driver
-============================
+MVSAM Crypto Poll Mode Driver
+=============================
-The MRVL CRYPTO PMD (**librte_crypto_mrvl_pmd**) provides poll mode crypto driver
+The MVSAM CRYPTO PMD (**librte_crypto_mvsam_pmd**) provides poll mode crypto driver
support by utilizing MUSDK library, which provides cryptographic operations
acceleration by using Security Acceleration Engine (EIP197) directly from
user-space with minimum overhead and high performance.
@@ -40,7 +40,7 @@ user-space with minimum overhead and high performance.
Features
--------
-MRVL CRYPTO PMD has support for:
+MVSAM CRYPTO PMD has support for:
* Symmetric crypto
* Sym operation chaining
@@ -73,22 +73,22 @@ Limitations
Installation
------------
-MRVL CRYPTO PMD driver compilation is disabled by default due to external dependencies.
+MVSAM CRYPTO PMD driver compilation is disabled by default due to external dependencies.
Currently there are two driver specific compilation options in
``config/common_base`` available:
-- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO`` (default ``n``)
+- ``CONFIG_RTE_LIBRTE_MVSAM_CRYPTO`` (default ``n``)
- Toggle compilation of the librte_pmd_mrvl driver.
+ Toggle compilation of the librte_pmd_mvsam driver.
-- ``CONFIG_RTE_LIBRTE_MRVL_CRYPTO_DEBUG`` (default ``n``)
+- ``CONFIG_RTE_LIBRTE_MVSAM_CRYPTO_DEBUG`` (default ``n``)
Toggle display of debugging messages.
For a list of prerequisites please refer to `Prerequisites` section in
-:ref:`MRVL Poll Mode Driver <mrvl_poll_mode_driver>` guide.
+:ref:`MVPP2 Poll Mode Driver <mvpp2_poll_mode_driver>` guide.
-MRVL CRYPTO PMD requires MUSDK built with EIP197 support thus following
+MVSAM CRYPTO PMD requires MUSDK built with EIP197 support thus following
extra option must be passed to the library configuration script:
.. code-block:: console
@@ -101,7 +101,7 @@ to `doc/musdk_get_started.txt`.
Initialization
--------------
-After successfully building MRVL CRYPTO PMD, the following modules need to be
+After successfully building MVSAM CRYPTO PMD, the following modules need to be
loaded:
.. code-block:: console
@@ -118,12 +118,12 @@ The following parameters (all optional) are exported by the driver:
* max_nb_sessions: maximum number of sessions that can be created (2048 by default).
* socket_id: socket on which to allocate the device resources on.
-l2fwd-crypto example application can be used to verify MRVL CRYPTO PMD
+l2fwd-crypto example application can be used to verify MVSAM CRYPTO PMD
operation:
.. code-block:: console
- ./l2fwd-crypto --vdev=net_mrvl,iface=eth0 --vdev=crypto_mrvl -- \
+ ./l2fwd-crypto --vdev=eth_mvpp2,iface=eth0 --vdev=crypto_mvsam -- \
--cipher_op ENCRYPT --cipher_algo aes-cbc \
--cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f \
--auth_op GENERATE --auth_algo sha1-hmac \
diff --git a/doc/guides/cryptodevs/openssl.rst b/doc/guides/cryptodevs/openssl.rst
index 427fc807..bdc30f66 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -80,6 +80,7 @@ crypto processing.
Test name is cryptodev_openssl_autotest.
For performance test cryptodev_openssl_perftest can be used.
+For asymmetric crypto operations testing, run cryptodev_openssl_asym_autotest.
To verify real traffic l2fwd-crypto example can be used with this command:
diff --git a/doc/guides/cryptodevs/overview.rst b/doc/guides/cryptodevs/overview.rst
index b3cb6cae..3f776f07 100644
--- a/doc/guides/cryptodevs/overview.rst
+++ b/doc/guides/cryptodevs/overview.rst
@@ -11,6 +11,33 @@ Supported Feature Flags
.. include:: overview_feature_table.txt
+.. Note::
+
+ - "In Place SGL" feature flag stands for "In place Scatter-gather list",
+ which means that an input buffer can consist of multiple segments,
+ being the operation in-place (input address = output address).
+
+ - "OOP SGL In SGL Out" feature flag stands for
+ "Out-of-place Scatter-gather list Input, Scatter-gater list Output",
+ which means pmd supports different scatter-gather styled input and output buffers
+ (i.e. both can consists of multiple segments).
+
+ - "OOP SGL In LB Out" feature flag stands for
+ "Out-of-place Scatter-gather list Input, Linear Buffers Output",
+ which means PMD supports input from scatter-gathered styled buffers,
+ outputting linear buffers (i.e. single segment).
+
+ - "OOP LB In SGL Out" feature flag stands for
+ "Out-of-place Linear Buffers Input, Scatter-gather list Output",
+ which means PMD supports input from linear buffer, outputting
+ scatter-gathered styled buffers.
+
+ - "OOP LB In LB Out" feature flag stands for
+ "Out-of-place Linear Buffers Input, Scatter-gather list Output",
+ which means that Out-of-place operation is supported,
+ with linear input and output buffers.
+
+
Supported Cipher Algorithms
---------------------------
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 8c8fefaa..bdc58eb2 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -68,12 +68,32 @@ Limitations
* Queue pairs are not thread-safe (that is, within a single queue pair, RX and TX from different lcores is not supported).
-Installation
-------------
+Extra notes on KASUMI F9
+------------------------
+
+When using KASUMI F9 authentication algorithm, the input buffer must be
+constructed according to the 3GPP KASUMI specifications (section 4.4, page 13):
+`<http://cryptome.org/3gpp/35201-900.pdf>`_.
+Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit)
+concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by
+between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits.
+Note that the actual message can be any length, specified in bits.
+
+Once this buffer is passed this way, when creating the crypto operation,
+length of data to authenticate (op.sym.auth.data.length) must be the length
+of all the items described above, including the padding at the end.
+Also, offset of data to authenticate (op.sym.auth.data.offset)
+must be such that points at the start of the COUNT bytes.
+
+
+Building the DPDK QAT cryptodev PMD
+-----------------------------------
+
-To enable QAT in DPDK, follow the instructions for modifying the compile-time
+To enable QAT crypto in DPDK, follow the instructions for modifying the compile-time
configuration file as described `here <http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html>`_.
+
Quick instructions are as follows:
.. code-block:: console
@@ -81,29 +101,95 @@ Quick instructions are as follows:
cd to the top-level DPDK directory
make config T=x86_64-native-linuxapp-gcc
sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_QAT\)=n,\1=y,' build/.config
+ sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_QAT_SYM\)=n,\1=y,' build/.config
make
-To use the DPDK QAT PMD an SRIOV-enabled QAT kernel driver is required. The VF
-devices exposed by this driver will be used by the QAT PMD. The devices and
-available kernel drivers and device ids are :
+
+.. _qat_kernel_installation:
+
+Dependency on the QAT kernel driver
+-----------------------------------
+
+To use the QAT PMD an SRIOV-enabled QAT kernel driver is required. The VF
+devices created and initialised by this driver will be used by the QAT PMD.
+
+Instructions for installation are below, but first an explanation of the
+relationships between the PF/VF devices and the PMDs visible to
+DPDK applications.
+
+
+Acceleration services - cryptography and compression - are provided to DPDK
+applications via PMDs which register to implement the corresponding
+cryptodev and compressdev APIs.
+
+Each QuickAssist VF device can expose one cryptodev PMD and/or one compressdev PMD.
+These QAT PMDs share the same underlying device and pci-mgmt code, but are
+enumerated independently on their respective APIs and appear as independent
+devices to applications.
+
+.. Note::
+
+ Each VF can only be used by one DPDK process. It is not possible to share
+ the same VF across multiple processes, even if these processes are using
+ different acceleration services.
+
+ Conversely one DPDK process can use one or more QAT VFs and can expose both
+ cryptodev and compressdev instances on each of those VFs.
+
+
+
+Device and driver naming
+------------------------
+
+* The qat cryptodev driver name is "crypto_qat".
+ The rte_cryptodev_devices_get() returns the devices exposed by this driver.
+
+* Each qat crypto device has a unique name, in format
+ <pci bdf>_<service>, e.g. "0000:41:01.0_qat_sym".
+ This name can be passed to rte_cryptodev_get_dev_id() to get the device_id.
+
+.. Note::
+
+ The qat crypto driver name is passed to the dpdk-test-crypto-perf tool in the -devtype parameter.
+
+ The qat crypto device name is in the format of the slave parameter passed to the crypto scheduler.
+
+* The qat compressdev driver name is "comp_qat".
+ The rte_compressdev_devices_get() returns the devices exposed by this driver.
+
+* Each qat compression device has a unique name, in format
+ <pci bdf>_<service>, e.g. "0000:41:01.0_qat_comp".
+ This name can be passed to rte_compressdev_get_dev_id() to get the device_id.
+
+
+Available kernel drivers
+------------------------
+
+Kernel drivers for each device are listed in the following table. Scroll right
+to check that the driver and device supports the servic you require.
+
.. _table_qat_pmds_drivers:
.. table:: QAT device generations, devices and drivers
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
- | Gen | Device | Driver | Kernel Module | Pci Driver | PF Did | #PFs | Vf Did | VFs/PF |
- +=====+==========+========+===============+============+========+======+========+========+
- | 1 | DH895xCC | 01.org | icp_qa_al | n/a | 435 | 1 | 443 | 32 |
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
- | 1 | DH895xCC | 4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 |
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
- | 2 | C62x | 4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 |
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
- | 2 | C3xxx | 4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 |
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
- | 2 | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 |
- +-----+----------+--------+---------------+------------+--------+------+--------+--------+
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | Gen | Device | Driver/ver | Kernel Module | Pci Driver | PF Did | #PFs | VF Did | VFs/PF | cryptodev | compressdev |
+ +=====+==========+===============+===============+============+========+======+========+========+===========+=============+
+ | 1 | DH895xCC | linux/4.4+ | qat_dh895xcc | dh895xcc | 435 | 1 | 443 | 32 | Yes | No |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | No |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | 2 | C62x | linux/4.5+ | qat_c62x | c6xx | 37c8 | 3 | 37c9 | 16 | Yes | No |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | Yes |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | 2 | C3xxx | linux/4.5+ | qat_c3xxx | c3xxx | 19e2 | 1 | 19e3 | 16 | Yes | No |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | " | " | 01.org/4.2.0+ | " | " | " | " | " | " | Yes | Yes |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
+ | 2 | D15xx | p | qat_d15xx | d15xx | 6f54 | 1 | 6f55 | 16 | Yes | No |
+ +-----+----------+---------------+---------------+------------+--------+------+--------+--------+-----------+-------------+
The ``Driver`` column indicates either the Linux kernel version in which
@@ -196,9 +282,9 @@ Consult the *Getting Started Guide* at the same URL for further information.
The steps below assume you are:
-* Building on a platform with one ``DH895xCC`` device.
-* Using package ``qatmux.l.2.3.0-34.tgz``.
-* On Fedora21 kernel ``3.17.4-301.fc21.x86_64``.
+* Building on a platform with one ``C62x`` device.
+* Using package ``qat1.7.l.4.2.0-000xx.tar.gz``.
+* On Fedora26 kernel ``4.11.11-300.fc26.x86_64``.
In the BIOS ensure that SRIOV is enabled and VT-d is disabled.
@@ -206,21 +292,30 @@ Uninstall any existing QAT driver, for example by running:
* ``./installer.sh uninstall`` in the directory where originally installed.
-* or ``rmmod qat_dh895xcc; rmmod intel_qat``.
Build and install the SRIOV-enabled QAT driver::
mkdir /QAT
cd /QAT
- # Copy qatmux.l.2.3.0-34.tgz to this location
- tar zxof qatmux.l.2.3.0-34.tgz
+ # Copy the package to this location and unpack
+ tar zxof qat1.7.l.4.2.0-000xx.tar.gz
+
+ ./configure --enable-icp-sriov=host
+ make install
+
+You can use ``cat /sys/kernel/debug/qat<your device type and bdf>/version/fw`` to confirm the driver is correctly installed and is using firmware version 4.2.0.
+You can use ``lspci -d:37c9`` to confirm the presence of the 16 VF devices available per ``C62x`` PF.
+
+Confirm the driver is correctly installed and is using firmware version 4.2.0::
+
+ cat /sys/kernel/debug/qat<your device type and bdf>/version/fw
- export ICP_WITHOUT_IOMMU=1
- ./installer.sh install QAT1.6 host
-You can use ``cat /proc/icp_dh895xcc_dev0/version`` to confirm the driver is correctly installed.
-You can use ``lspci -d:443`` to confirm the of the 32 VF devices available per ``DH895xCC`` device.
+Confirm the presence of 48 VF devices - 16 per PF::
+
+ lspci -d:37c9
+
To complete the installation - follow instructions in `Binding the available VFs to the DPDK UIO driver`_.
@@ -261,6 +356,7 @@ To complete the installation - follow instructions in `Binding the available VFs
sudo yum install zlib-devel
sudo yum install openssl-devel
+ sudo yum install libudev-devel
.. Note::
@@ -343,19 +439,28 @@ Another way to bind the VFs to the DPDK UIO driver is by using the
./usertools/dpdk-devbind.py -b igb_uio 0000:03:01.1
-Extra notes on KASUMI F9
-------------------------
+Debugging
+----------------------------------------
-When using KASUMI F9 authentication algorithm, the input buffer must be
-constructed according to the 3GPP KASUMI specifications (section 4.4, page 13):
-`<http://cryptome.org/3gpp/35201-900.pdf>`_.
-Input buffer has to have COUNT (4 bytes), FRESH (4 bytes), MESSAGE and DIRECTION (1 bit)
-concatenated. After the DIRECTION bit, a single '1' bit is appended, followed by
-between 0 and 7 '0' bits, so that the total length of the buffer is multiple of 8 bits.
-Note that the actual message can be any length, specified in bits.
+There are 2 sets of trace available via the dynamic logging feature:
-Once this buffer is passed this way, when creating the crypto operation,
-length of data to authenticate (op.sym.auth.data.length) must be the length
-of all the items described above, including the padding at the end.
-Also, offset of data to authenticate (op.sym.auth.data.offset)
-must be such that points at the start of the COUNT bytes.
+* pmd.qat_dp exposes trace on the data-path.
+* pmd.qat_general exposes all other trace.
+
+pmd.qat exposes both sets of traces.
+They can be enabled using the log-level option (where 8=maximum log level) on
+the process cmdline, e.g. using any of the following::
+
+ --log-level="pmd.qat_general,8"
+ --log-level="pmd.qat_dp,8"
+ --log-level="pmd.qat,8"
+
+.. Note::
+
+ The global RTE_LOG_DP_LEVEL overrides data-path trace so must be set to
+ RTE_LOG_DEBUG to see all the trace. This variable is in config/rte_config.h
+ for meson build and config/common_base for gnu make.
+ Also the dynamic global log level overrides both sets of trace, so e.g. no
+ QAT trace would display in this case::
+
+ --log-level="7" --log-level="pmd.qat_general,8"
diff --git a/doc/guides/cryptodevs/scheduler.rst b/doc/guides/cryptodevs/scheduler.rst
index d67894d5..a754a27e 100644
--- a/doc/guides/cryptodevs/scheduler.rst
+++ b/doc/guides/cryptodevs/scheduler.rst
@@ -71,6 +71,11 @@ two calls:
mode parameter values are specified in the "Cryptodev Scheduler Modes
Overview" section.
+* mode_param: Specify the mode-specific parameter. Some scheduling modes
+ may be initialized with specific parameters other than the default ones,
+ such as the **threshold** packet size of **packet-size-distr** mode. This
+ parameter fulfills the purpose.
+
* ordering: Specify the status of the crypto operations ordering feature.
The value of this parameter can be "enable" or "disable". This feature
is disabled by default.
@@ -132,7 +137,12 @@ operation:
**option_type** must be **CDEV_SCHED_OPTION_THRESHOLD** and **option** should
point to a rte_cryptodev_scheduler_threshold_option structure filled with
appropriate threshold value. Please NOTE this threshold has be a power-of-2
- unsigned integer.
+ unsigned integer. It is possible to use **mode_param** initialization
+ parameter to achieve the same purpose. For example:
+
+ ... --vdev "crypto_scheduler,mode=packet-size-distr,mode_param=threshold:512" ...
+
+ The above parameter will overwrite the threshold value to 512.
* **CDEV_SCHED_MODE_FAILOVER:**
diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index 24b4f661..7cba712c 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -33,11 +33,11 @@ Installation
------------
To build DPDK with the SNOW3G_PMD the user is required to download
-the export controlled ``libsso_snow3g`` library, by requesting it from
-`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
-Once approval has been granted, the user needs to log in
-`<https://networkbuilders.intel.com/dpdklogin>`_
-and click on "Snow3G Bit Stream crypto library" link, to download the library.
+the export controlled ``libsso_snow3g`` library, by registering in
+`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_.
+Once approval has been granted, the user needs to search for
+*Snow3G F8 F9 3GPP cryptographic algorithms Software Library* to download the
+library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575867>`_.
After downloading the library, the user needs to unpack and compile it
on their system before building DPDK::
diff --git a/doc/guides/cryptodevs/virtio.rst b/doc/guides/cryptodevs/virtio.rst
new file mode 100644
index 00000000..f3aa7c65
--- /dev/null
+++ b/doc/guides/cryptodevs/virtio.rst
@@ -0,0 +1,117 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+Virtio Crypto Poll Mode Driver
+==============================
+
+The virtio crypto PMD provides poll mode driver support for the virtio crypto
+device.
+
+Features
+--------
+
+The virtio crypto PMD has support for:
+
+Cipher algorithms:
+
+* ``RTE_CRYPTO_CIPHER_AES_CBC``
+
+Hash algorithms:
+
+* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
+
+Limitations
+-----------
+
+* Only supports the session-oriented API implementation (session-less APIs are
+ not supported).
+* Only supports modern mode since virtio crypto conforms to virtio-1.0.
+* Only has two types of queues: data queue and control queue. These two queues
+ only support indirect buffers to communication with the virtio backend.
+* Only supports AES_CBC cipher only algorithm and AES_CBC with HMAC_SHA1
+ chaining algorithm since the vhost crypto backend only these algorithms
+ are supported.
+* Does not support Link State interrupt.
+* Does not support runtime configuration.
+
+Virtio crypto PMD Rx/Tx Callbacks
+---------------------------------
+
+Rx callbacks:
+
+* ``virtio_crypto_pkt_rx_burst``
+
+Tx callbacks:
+
+* ``virtio_crypto_pkt_tx_burst``
+
+Installation
+------------
+
+Quick instructions are as follows:
+
+Firstly run DPDK vhost crypto sample as a server side and build QEMU with
+vhost crypto enabled.
+QEMU can then be started using the following parameters:
+
+.. code-block:: console
+
+ qemu-system-x86_64 \
+ [...] \
+ -chardev socket,id=charcrypto0,path=/path/to/your/socket \
+ -object cryptodev-vhost-user,id=cryptodev0,chardev=charcrypto0 \
+ -device virtio-crypto-pci,id=crypto0,cryptodev=cryptodev0
+ [...]
+
+Secondly bind the uio_generic driver for the virtio-crypto device.
+For example, 0000:00:04.0 is the domain, bus, device and function
+number of the virtio-crypto device:
+
+.. code-block:: console
+
+ modprobe uio_pci_generic
+ echo -n 0000:00:04.0 > /sys/bus/pci/drivers/virtio-pci/unbind
+ echo "1af4 1054" > /sys/bus/pci/drivers/uio_pci_generic/new_id
+
+Finally the front-end virtio crypto PMD driver can be installed:
+
+.. code-block:: console
+
+ cd to the top-level DPDK directory
+ sed -i 's,\(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO\)=n,\1=y,' config/common_base
+ make config T=x86_64-native-linuxapp-gcc
+ make install T=x86_64-native-linuxapp-gcc
+
+Tests
+-----
+
+The unit test cases can be tested as below:
+
+.. code-block:: console
+
+ reserve enough huge pages
+ cd to the top-level DPDK directory
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+ export RTE_SDK=`pwd`
+ cd to test/test
+ type the command "make" to compile
+ run the tests with "./test"
+ type the command "cryptodev_virtio_autotest" to test
+
+The performance can be tested as below:
+
+.. code-block:: console
+
+ reserve enough huge pages
+ cd to the top-level DPDK directory
+ export RTE_TARGET=x86_64-native-linuxapp-gcc
+ export RTE_SDK=`pwd`
+ cd to app/test-crypto-perf
+ type the command "make" to compile
+ run the tests with the following command:
+
+ ./dpdk-test-crypto-perf -l 0,1 -- --devtype crypto_virtio \
+ --ptest throughput --optype cipher-then-auth --cipher-algo aes-cbc \
+ --cipher-op encrypt --cipher-key-sz 16 --auth-algo sha1-hmac \
+ --auth-op generate --auth-key-sz 64 --digest-sz 12 \
+ --total-ops 100000000 --burst-sz 64 --buffer-sz 2048
diff --git a/doc/guides/cryptodevs/zuc.rst b/doc/guides/cryptodevs/zuc.rst
index e226ef9d..e3898996 100644
--- a/doc/guides/cryptodevs/zuc.rst
+++ b/doc/guides/cryptodevs/zuc.rst
@@ -35,11 +35,11 @@ Installation
------------
To build DPDK with the ZUC_PMD the user is required to download
-the export controlled ``libsso_zuc`` library, by requesting it from
-`<https://networkbuilders.intel.com/network-technologies/dpdk>`_.
-Once approval has been granted, the user needs to log in
-`<https://networkbuilders.intel.com/dpdklogin>`_
-and click on "ZUC Library" link, to download the library.
+the export controlled ``libsso_zuc`` library, by registering in
+`Intel Resource & Design Center <https://www.intel.com/content/www/us/en/design/resource-design-center.html>`_.
+Once approval has been granted, the user needs to search for
+*ZUC 128-EAA3 and 128-EIA3 3GPP cryptographic algorithms Software Library* to download the
+library or directly through this `link <https://cdrdv2.intel.com/v1/dl/getContent/575868>`_.
After downloading the library, the user needs to unpack and compile it
on their system before building DPDK::
diff --git a/doc/guides/eventdevs/dpaa2.rst b/doc/guides/eventdevs/dpaa2.rst
index 5b8da95d..ad94f24b 100644
--- a/doc/guides/eventdevs/dpaa2.rst
+++ b/doc/guides/eventdevs/dpaa2.rst
@@ -129,7 +129,19 @@ Example:
.. code-block:: console
- ./your_eventdev_application --vdev="event_dpaa2"
+ ./your_eventdev_application --vdev="event_dpaa2"
+
+Enabling logs
+-------------
+
+For enabling logs, use the following EAL parameter:
+
+.. code-block:: console
+
+ ./your_eventdev_application <EAL args> --log-level=pmd.event.dpaa2,<level>
+
+Using ``eventdev.dpaa2`` as log matching criteria, all Event PMD logs can be
+enabled which are lower than logging ``level``.
Limitations
-----------
diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index 4fabe54f..18cfc7a9 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -28,6 +28,9 @@ Features of the OCTEONTX SSOVF PMD are:
- Open system with configurable amount of outstanding events
- HW accelerated dequeue timeout support to enable power management
- SR-IOV VF
+- HW managed event timers support through TIMVF, with high precision and
+ time granularity of 1us.
+- Up to 64 event timer adapters.
Supported OCTEONTX SoCs
-----------------------
@@ -36,7 +39,7 @@ Supported OCTEONTX SoCs
Prerequisites
-------------
-See :doc: `../platform/octeontx` for setup information.
+See :doc:`../platform/octeontx` for setup information.
Pre-Installation Configuration
------------------------------
@@ -93,7 +96,17 @@ The tests are run once the vdev creation is successfully complete.
.. code-block:: console
- --vdev="event_octeontx,self_test=1"
+ --vdev="event_octeontx,selftest=1"
+
+
+Enable TIMvf stats
+------------------
+TIMvf stats can be enabled by using this option, by default the stats are
+disabled.
+
+.. code-block:: console
+
+ --vdev="event_octeontx,timvf_stats=1"
Limitations
@@ -110,3 +123,19 @@ Rx adapter support
When eth_octeontx is used as Rx adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
+
+Event timer adapter support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When timvf is used as Event timer adapter the clock source mapping is as
+follows:
+
+.. code-block:: console
+
+ RTE_EVENT_TIMER_ADAPTER_CPU_CLK = TIM_CLK_SRC_SCLK
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK0 = TIM_CLK_SRC_GPIO
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK1 = TIM_CLK_SRC_GTI
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK2 = TIM_CLK_SRC_PTP
+
+When timvf is used as Event timer adapter event schedule type
+``RTE_SCHED_TYPE_PARALLEL`` is not supported.
diff --git a/doc/guides/faq/faq.rst b/doc/guides/faq/faq.rst
index c1132fb4..f19c1389 100644
--- a/doc/guides/faq/faq.rst
+++ b/doc/guides/faq/faq.rst
@@ -62,19 +62,16 @@ the wrong socket, the application simply will not start.
On application startup, there is a lot of EAL information printed. Is there any way to reduce this?
---------------------------------------------------------------------------------------------------
-Yes, the option ``--log-level=`` accepts one of these numbers:
-
-.. code-block:: c
-
- #define RTE_LOG_EMERG 1U /* System is unusable. */
- #define RTE_LOG_ALERT 2U /* Action must be taken immediately. */
- #define RTE_LOG_CRIT 3U /* Critical conditions. */
- #define RTE_LOG_ERR 4U /* Error conditions. */
- #define RTE_LOG_WARNING 5U /* Warning conditions. */
- #define RTE_LOG_NOTICE 6U /* Normal but significant condition. */
- #define RTE_LOG_INFO 7U /* Informational. */
- #define RTE_LOG_DEBUG 8U /* Debug-level messages. */
-
+Yes, the option ``--log-level=`` accepts either symbolic names (or numbers):
+
+1. emergency
+2. alert
+3. critical
+4. error
+5. warning
+6. notice
+7. info
+8. debug
How can I tune my network application to achieve lower latency?
---------------------------------------------------------------
diff --git a/doc/guides/freebsd_gsg/build_sample_apps.rst b/doc/guides/freebsd_gsg/build_sample_apps.rst
index 90f05023..a085b618 100644
--- a/doc/guides/freebsd_gsg/build_sample_apps.rst
+++ b/doc/guides/freebsd_gsg/build_sample_apps.rst
@@ -128,6 +128,9 @@ The EAL options for FreeBSD are as follows:
* ``--proc-type``:
The type of process instance.
+* ``-m MB``:
+ Memory to allocate from hugepages, regardless of processor socket.
+
Other options, specific to Linux and are not supported under FreeBSD are as follows:
* ``socket-mem``:
@@ -142,10 +145,6 @@ Other options, specific to Linux and are not supported under FreeBSD are as foll
* ``--file-prefix``:
The prefix text used for hugepage filenames.
-* ``-m MB``:
- Memory to allocate from hugepages, regardless of processor socket.
- It is recommended that ``--socket-mem`` be used instead of this option.
-
The ``-c`` or ``-l`` option is mandatory; the others are optional.
Copy the DPDK application binary to your target, then run the application
diff --git a/doc/guides/howto/rte_flow.rst b/doc/guides/howto/rte_flow.rst
index 4a27c995..caa4e1af 100644
--- a/doc/guides/howto/rte_flow.rst
+++ b/doc/guides/howto/rte_flow.rst
@@ -1,33 +1,5 @@
-.. BSD LICENSE
- Copyright(c) 2017 Mellanox Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Mellanox Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 Mellanox Technologies, Ltd
Generic flow API - examples
===========================
@@ -276,7 +248,7 @@ Code
/* end the pattern array */
pattern[2].type = RTE_FLOW_ITEM)TYPE_END;
- /* create the drop action */
+ /* create the queue action */
actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
actions[0].conf = &queue
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
diff --git a/doc/guides/howto/virtio_user_for_container_networking.rst b/doc/guides/howto/virtio_user_for_container_networking.rst
index aa68b531..476ce3a6 100644
--- a/doc/guides/howto/virtio_user_for_container_networking.rst
+++ b/doc/guides/howto/virtio_user_for_container_networking.rst
@@ -109,7 +109,8 @@ We have below limitations in this solution:
* Cannot work with --no-huge option. Currently, DPDK uses anonymous mapping
under this option which cannot be reopened to share with vhost backend.
* Cannot work when there are more than VHOST_MEMORY_MAX_NREGIONS(8) hugepages.
- In another word, do not use 2MB hugepage so far.
+ If you have more regions (especially when 2MB hugepages are used), the option,
+ --single-file-segments, can help to reduce the number of shared files.
* Applications should not use file name like HUGEFILE_FMT ("%smap_%d"). That
will bring confusion when sharing hugepage files with backend by name.
* Root privilege is a must. DPDK resolves physical addresses of hugepages
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index d60529dd..8a9ed65c 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -17,7 +17,9 @@ DPDK documentation
nics/index
bbdevs/index
cryptodevs/index
+ compressdevs/index
eventdevs/index
+ rawdevs/index
mempool/index
platform/index
contributing/index
diff --git a/doc/guides/linux_gsg/build_sample_apps.rst b/doc/guides/linux_gsg/build_sample_apps.rst
index 39a47b71..332424e0 100644
--- a/doc/guides/linux_gsg/build_sample_apps.rst
+++ b/doc/guides/linux_gsg/build_sample_apps.rst
@@ -110,7 +110,13 @@ The EAL options are as follows:
``[domain:]bus:devid.func`` values. Cannot be used with ``-b`` option.
* ``--socket-mem``:
- Memory to allocate from hugepages on specific sockets.
+ Memory to allocate from hugepages on specific sockets. In dynamic memory mode,
+ this memory will also be pinned (i.e. not released back to the system until
+ application closes).
+
+* ``--socket-limit``:
+ Limit maximum memory available for allocation on each socket. Does not support
+ legacy memory mode.
* ``-d``:
Add a driver or driver directory to be loaded.
@@ -148,6 +154,14 @@ The EAL options are as follows:
* ``--vfio-intr``:
Specify interrupt type to be used by VFIO (has no effect if VFIO is not used).
+* ``--legacy-mem``:
+ Run DPDK in legacy memory mode (disable memory reserve/unreserve at runtime,
+ but provide more IOVA-contiguous memory).
+
+* ``--single-file-segments``:
+ Store memory segments in fewer files (dynamic memory mode only - does not
+ affect legacy memory mode).
+
The ``-c`` or ``-l`` and option is mandatory; the others are optional.
Copy the DPDK application binary to your target, then run the application as follows
diff --git a/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
new file mode 100644
index 00000000..9d1f0fa0
--- /dev/null
+++ b/doc/guides/linux_gsg/cross_build_dpdk_for_arm64.rst
@@ -0,0 +1,132 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 ARM Corporation.
+
+Cross compile DPDK for ARM64
+============================
+This chapter describes how to cross compile DPDK for ARM64 from x86 build hosts.
+
+.. note::
+
+ Whilst it is recommended to natively build DPDK on ARM64 (just
+ like with x86), it is also possible to cross-build DPDK for ARM64. An
+ ARM64 cross compile GNU toolchain is used for this.
+
+Obtain the cross tool chain
+---------------------------
+The latest cross compile tool chain can be downloaded from:
+https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/.
+
+Following is the step to get the version 7.2.1, latest one at the time of this writing.
+
+.. code-block:: console
+
+ wget https://releases.linaro.org/components/toolchain/binaries/latest/aarch64-linux-gnu/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz
+
+Unzip and add into the PATH
+---------------------------
+
+.. code-block:: console
+
+ tar -xvf gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu.tar.xz
+ export PATH=$PATH:<cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin
+
+.. note::
+
+ For the host requirements and other info, refer to the release note section: https://releases.linaro.org/components/toolchain/binaries/latest/
+
+Getting the prerequisite library
+--------------------------------
+
+NUMA is required by most modern machines, not needed for non-NUMA architectures.
+
+.. note::
+
+ For compiling the NUMA lib, run libtool --version to ensure the libtool version >= 2.2,
+ otherwise the compilation will fail with errors.
+
+.. code-block:: console
+
+ git clone https://github.com/numactl/numactl.git
+ cd numactl
+ git checkout v2.0.11 -b v2.0.11
+ ./autogen.sh
+ autoconf -i
+ ./configure --host=aarch64-linux-gnu CC=aarch64-linux-gnu-gcc --prefix=<numa install dir>
+ make install
+
+The numa header files and lib file is generated in the include and lib folder respectively under <numa install dir>.
+
+.. _augment_the_cross_toolchain_with_numa_support:
+
+Augment the cross toolchain with NUMA support
+---------------------------------------------
+
+.. note::
+
+ This way is optional, an alternative is to use extra CFLAGS and LDFLAGS, depicted in :ref:`configure_and_cross_compile_dpdk_build` below.
+
+Copy the NUMA header files and lib to the cross compiler's directories:
+
+.. code-block:: console
+
+ cp <numa_install_dir>/include/numa*.h <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/bin/../aarch64-linux-gnu/libc/usr/include/
+ cp <numa_install_dir>/lib/libnuma.a <cross_install_dir>/gcc-linaro-7.2.1-2017.11-x86_64_aarch64-linux-gnu/lib/gcc/aarch64-linux-gnu/7.2.1/
+
+.. _configure_and_cross_compile_dpdk_build:
+
+Configure and cross compile DPDK Build
+--------------------------------------
+To configure a build, choose one of the target configurations, like arm64-dpaa2-linuxapp-gcc and arm64-thunderx-linuxapp-gcc.
+
+.. code-block:: console
+
+ make config T=arm64-armv8a-linuxapp-gcc
+
+To cross-compile, without compiling the kernel modules, use the following command:
+
+.. code-block:: console
+
+ make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n
+
+To cross-compile, including the kernel modules, the kernel source tree needs to be specified by setting
+RTE_KERNELDIR:
+
+.. code-block:: console
+
+ make -j CROSS=aarch64-linux-gnu- RTE_KERNELDIR=<kernel_src_rootdir> CROSS_COMPILE=aarch64-linux-gnu-
+
+To compile for non-NUMA targets, without compiling the kernel modules, use the following command:
+
+.. code-block:: console
+
+ make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n CONFIG_RTE_LIBRTE_VHOST_NUMA=n CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+
+.. note::
+
+ 1. EXTRA_CFLAGS and EXTRA_LDFLAGS should be added to include the NUMA headers and link the library respectively,
+ if the above step :ref:`augment_the_cross_toolchain_with_numa_support` was skipped therefore the toolchain was not
+ augmented with NUMA support.
+
+ 2. "-isystem <numa_install_dir>/include" should be add to EXTRA_CFLAGS, otherwise the numa.h file will get a lot of compiling
+ errors of Werror=cast-qual, Werror=strict-prototypes and Werror=old-style-definition.
+
+ An example is given below:
+
+ .. code-block:: console
+
+ make -j CROSS=aarch64-linux-gnu- CONFIG_RTE_KNI_KMOD=n CONFIG_RTE_EAL_IGB_UIO=n EXTRA_CFLAGS="-isystem <numa_install_dir>/include" EXTRA_LDFLAGS="-L<numa_install_dir>/lib -lnuma"
+
+Meson Cross Compiling DPDK
+--------------------------
+
+To cross-compile DPDK on a desired target machine we can use the following
+command::
+
+ meson cross-build --cross-file <target_machine_configuration>
+ ninja -C cross-build
+
+For example if the target machine is arm64 we can use the following
+command::
+
+ meson arm64-build --cross-file config/arm/arm64_armv8_linuxapp_gcc
+ ninja -C arm64-build
diff --git a/doc/guides/linux_gsg/index.rst b/doc/guides/linux_gsg/index.rst
index 2a7bdfe9..077f9302 100644
--- a/doc/guides/linux_gsg/index.rst
+++ b/doc/guides/linux_gsg/index.rst
@@ -13,6 +13,7 @@ Getting Started Guide for Linux
intro
sys_reqs
build_dpdk
+ cross_build_dpdk_for_arm64
linux_drivers
build_sample_apps
enable_func
diff --git a/doc/guides/linux_gsg/linux_drivers.rst b/doc/guides/linux_gsg/linux_drivers.rst
index 14381eed..371a817f 100644
--- a/doc/guides/linux_gsg/linux_drivers.rst
+++ b/doc/guides/linux_gsg/linux_drivers.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2010-2015 Intel Corporation.
- Copyright(c) 2017 Mellanox Corporation.
+ Copyright 2017 Mellanox Technologies, Ltd
All rights reserved.
.. _linux_gsg_linux_drivers:
diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst
new file mode 100644
index 00000000..e30f4944
--- /dev/null
+++ b/doc/guides/nics/axgbe.rst
@@ -0,0 +1,89 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+AXGBE Poll Mode Driver
+======================
+
+The AXGBE poll mode driver library (**librte_pmd_axgbe**) implements support
+for AMD 10 Gbps family of adapters. It is compiled and tested in standard linux distro like Ubuntu.
+
+Detailed information about SoCs that use these devices can be found here:
+
+- `AMD EPYC™ EMBEDDED 3000 family <https://www.amd.com/en/products/embedded-epyc-3000-series>`_.
+
+
+Supported Features
+------------------
+
+AXGBE PMD has support for:
+
+- Base L2 features
+- TSS (Transmit Side Scaling)
+- Promiscuous mode
+- Port statistics
+- Multicast mode
+- RSS (Receive Side Scaling)
+- Checksum offload
+- Jumbo Frame upto 9K
+
+
+Configuration Information
+-------------------------
+
+The following options can be modified in the ``.config`` file. Please note that
+enabling debugging options may affect system performance.
+
+- ``CONFIG_RTE_LIBRTE_AXGBE_PMD`` (default **y**)
+
+ Toggle compilation of axgbe PMD.
+
+- ``CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG`` (default **n**)
+
+ Toggle display for PMD debug related messages.
+
+
+Building DPDK
+-------------
+
+See the :ref:`DPDK Getting Started Guide for Linux <linux_gsg>` for
+instructions on how to build DPDK.
+
+By default the AXGBE PMD library will be built into the DPDK library.
+
+For configuring and using UIO frameworks, please also refer :ref:`the
+documentation that comes with DPDK suite <linux_gsg>`.
+
+
+Prerequisites and Pre-conditions
+--------------------------------
+- Prepare the system as recommended by DPDK suite.
+
+- Bind the intended AMD device to ``igb_uio`` or ``vfio-pci`` module.
+
+Now system is ready to run DPDK application.
+
+
+Usage Example
+-------------
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+for details.
+
+Example output:
+
+.. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:02:00.4 on NUMA socket 0
+ EAL: probe driver: 1022:1458 net_axgbe
+ Interactive-mode selected
+ USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0
+ USER1: create a new mbuf pool <mbuf_pool_socket_1>: n=171456, size=2176, socket=1
+ USER1: create a new mbuf pool <mbuf_pool_socket_2>: n=171456, size=2176, socket=2
+ USER1: create a new mbuf pool <mbuf_pool_socket_3>: n=171456, size=2176, socket=3
+ Configuring Port 0 (socket 0)
+ Port 0: 00:00:1A:1C:6A:17
+ Checking link statuses...
+ Port 0 Link Up - speed 10000 Mbps - full-duplex
+ Done
+ testpmd>
diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst
index 31f146a0..cecbfc2e 100644
--- a/doc/guides/nics/bnx2x.rst
+++ b/doc/guides/nics/bnx2x.rst
@@ -194,6 +194,7 @@ This section provides instructions to configure SR-IOV with Linux OS.
using the instructions outlined in the Application notes below.
#. Running testpmd:
+ (Supply ``--log-level="pmd.net.bnx2x.driver",7`` to view informational messages):
Follow instructions available in the document
:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
diff --git a/doc/guides/nics/bnxt.rst b/doc/guides/nics/bnxt.rst
index 9826b350..697b97e6 100644
--- a/doc/guides/nics/bnxt.rst
+++ b/doc/guides/nics/bnxt.rst
@@ -1,46 +1,20 @@
-.. BSD LICENSE
- Copyright 2016 Broadcom Limited
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Broadcom Limited nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2016-2018 Broadcom
BNXT Poll Mode Driver
=====================
The bnxt poll mode library (**librte_pmd_bnxt**) implements support for:
- * **Broadcom NetXtreme-C®/NetXtreme-E® BCM5730X and BCM574XX family of
- Ethernet Network Controllers**
+ * **Broadcom NetXtreme-C®/NetXtreme-E®/NetXtreme-S®
+ BCM5730X / BCM574XX / BCM58000 family of Ethernet Network Controllers**
These adapters support Standards compliant 10/25/50/100Gbps 30MPPS
full-duplex throughput.
Information about the NetXtreme family of adapters can be found in the
`NetXtreme® Brand section
- <https://www.broadcom.com/products/ethernet-communication-and-switching?technology%5B%5D=88>`_
+ <https://www.broadcom.com/products/ethernet-connectivity/controllers/>`_
of the `Broadcom website <http://www.broadcom.com/>`_.
* **Broadcom StrataGX® BCM5871X Series of Communucations Processors**
diff --git a/doc/guides/nics/cxgbe.rst b/doc/guides/nics/cxgbe.rst
index 8651a7be..58d88eef 100644
--- a/doc/guides/nics/cxgbe.rst
+++ b/doc/guides/nics/cxgbe.rst
@@ -1,32 +1,6 @@
-.. BSD LICENSE
- Copyright 2015-2017 Chelsio Communications.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Chelsio Communications nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2014-2018 Chelsio Communications.
+ All rights reserved.
CXGBE Poll Mode Driver
======================
@@ -35,22 +9,28 @@ The CXGBE PMD (**librte_pmd_cxgbe**) provides poll mode driver support
for **Chelsio Terminator** 10/25/40/100 Gbps family of adapters. CXGBE PMD
has support for the latest Linux and FreeBSD operating systems.
+CXGBEVF PMD provides poll mode driver support for SR-IOV Virtual functions
+and has support for the latest Linux operating systems.
+
More information can be found at `Chelsio Communications Official Website
<http://www.chelsio.com>`_.
Features
--------
-CXGBE PMD has support for:
+CXGBE and CXGBEVF PMD has support for:
- Multiple queues for TX and RX
- Receiver Side Steering (RSS)
+ Receiver Side Steering (RSS) on IPv4, IPv6, IPv4-TCP/UDP, IPv6-TCP/UDP.
+ For 4-tuple, enabling 'RSS on TCP' and 'RSS on TCP + UDP' is supported.
- VLAN filtering
- Checksum offload
- Promiscuous mode
- All multicast mode
- Port hardware statistics
- Jumbo frames
+- Flow API - Support for both Wildcard (LE-TCAM) and Exact (HASH) match filters.
Limitations
-----------
@@ -63,6 +43,8 @@ port.
For this reason, one cannot whitelist/blacklist a single port without
whitelisting/blacklisting the other ports on the same device.
+.. _t5-nics:
+
Supported Chelsio T5 NICs
-------------------------
@@ -71,16 +53,24 @@ Supported Chelsio T5 NICs
- 40G NICs: T580-CR, T580-LP-CR, T580-SO-CR
- Other T5 NICs: T522-CR
+.. _t6-nics:
+
Supported Chelsio T6 NICs
-------------------------
- 25G NICs: T6425-CR, T6225-CR, T6225-LL-CR, T6225-SO-CR
- 100G NICs: T62100-CR, T62100-LP-CR, T62100-SO-CR
+Supported SR-IOV Chelsio NICs
+-----------------------------
+
+SR-IOV virtual functions are supported on all the Chelsio NICs listed
+in :ref:`t5-nics` and :ref:`t6-nics`.
+
Prerequisites
-------------
-- Requires firmware version **1.16.43.0** and higher. Visit
+- Requires firmware version **1.17.14.0** and higher. Visit
`Chelsio Download Center <http://service.chelsio.com>`_ to get latest firmware
bundled with the latest Chelsio Unified Wire package.
@@ -110,6 +100,10 @@ enabling debugging options may affect system performance.
Toggle compilation of librte_pmd_cxgbe driver.
+ .. note::
+
+ This controls compilation of both CXGBE and CXGBEVF PMD.
+
- ``CONFIG_RTE_LIBRTE_CXGBE_DEBUG`` (default **n**)
Toggle display of generic debugging messages.
@@ -134,6 +128,28 @@ enabling debugging options may affect system performance.
Toggle behaviour to prefer Throughput or Latency.
+Runtime Options
+~~~~~~~~~~~~~~~
+
+The following ``devargs`` options can be enabled at runtime. They must
+be passed as part of EAL arguments. For example,
+
+.. code-block:: console
+
+ testpmd -w 02:00.4,keep_ovlan=1 -- -i
+
+- ``keep_ovlan`` (default **0**)
+
+ Toggle behaviour to keep/strip outer VLAN in Q-in-Q packets. If
+ enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise,
+ the outer VLAN tag is stripped in Q-in-Q packets.
+
+- ``force_link_up`` (default **0**)
+
+ When set to 1, CXGBEVF PMD always forces link as up for all VFs on
+ underlying Chelsio NICs. This enables multiple VFs on the same NIC
+ to send traffic to each other even when the physical link is down.
+
.. _driver-compilation:
Driver compilation and testing
@@ -208,7 +224,7 @@ Unified Wire package for Linux operating system are as follows:
.. code-block:: console
- firmware-version: 1.16.43.0, TP 0.1.4.9
+ firmware-version: 1.17.14.0, TP 0.1.4.9
Running testpmd
~~~~~~~~~~~~~~~
@@ -266,7 +282,7 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
EAL: PCI memory mapped at 0x7fd7c0200000
EAL: PCI memory mapped at 0x7fd77cdfd000
EAL: PCI memory mapped at 0x7fd7c10b7000
- PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9
+ PMD: rte_cxgbe_pmd: fw: 1.17.14.0, TP: 0.1.4.9
PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter
Interactive-mode selected
Configuring Port 0 (socket 0)
@@ -286,6 +302,114 @@ devices managed by librte_pmd_cxgbe in Linux operating system.
Flow control pause TX/RX is disabled by default and can be enabled via
testpmd. Refer section :ref:`flow-control` for more details.
+Configuring SR-IOV Virtual Functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This section demonstrates how to enable SR-IOV virtual functions
+on Chelsio NICs and demonstrates how to run testpmd with SR-IOV
+virtual functions.
+
+#. Load the kernel module:
+
+ .. code-block:: console
+
+ modprobe cxgb4
+
+#. Get the PCI bus addresses of the interfaces bound to cxgb4 driver:
+
+ .. code-block:: console
+
+ dmesg | tail -2
+
+ Example output:
+
+ .. code-block:: console
+
+ cxgb4 0000:02:00.4 p1p1: renamed from eth0
+ cxgb4 0000:02:00.4 p1p2: renamed from eth1
+
+ .. note::
+
+ Both the interfaces of a Chelsio 2-port adapter are bound to the
+ same PCI bus address.
+
+#. Use ifconfig to get the interface name assigned to Chelsio card:
+
+ .. code-block:: console
+
+ ifconfig -a | grep "00:07:43"
+
+ Example output:
+
+ .. code-block:: console
+
+ p1p1 Link encap:Ethernet HWaddr 00:07:43:2D:EA:C0
+ p1p2 Link encap:Ethernet HWaddr 00:07:43:2D:EA:C8
+
+#. Bring up the interfaces:
+
+ .. code-block:: console
+
+ ifconfig p1p1 up
+ ifconfig p1p2 up
+
+#. Instantiate SR-IOV Virtual Functions. PF0..3 can be used for
+ SR-IOV VFs. Multiple VFs can be instantiated on each of PF0..3.
+ To instantiate one SR-IOV VF on each PF0 and PF1:
+
+ .. code-block:: console
+
+ echo 1 > /sys/bus/pci/devices/0000\:02\:00.0/sriov_numvfs
+ echo 1 > /sys/bus/pci/devices/0000\:02\:00.1/sriov_numvfs
+
+#. Get the PCI bus addresses of the virtual functions:
+
+ .. code-block:: console
+
+ lspci | grep -i "Chelsio" | grep -i "VF"
+
+ Example output:
+
+ .. code-block:: console
+
+ 02:01.0 Ethernet controller: Chelsio Communications Inc T540-CR Unified Wire Ethernet Controller [VF]
+ 02:01.1 Ethernet controller: Chelsio Communications Inc T540-CR Unified Wire Ethernet Controller [VF]
+
+#. Running testpmd
+
+ Follow instructions available in the document
+ :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
+ to bind virtual functions and run testpmd.
+
+ Example output:
+
+ .. code-block:: console
+
+ [...]
+ EAL: PCI device 0000:02:01.0 on NUMA socket 0
+ EAL: probe driver: 1425:5803 net_cxgbevf
+ PMD: rte_cxgbe_pmd: Firmware version: 1.17.14.0
+ PMD: rte_cxgbe_pmd: TP Microcode version: 0.1.4.9
+ PMD: rte_cxgbe_pmd: Chelsio rev 0
+ PMD: rte_cxgbe_pmd: No bootstrap loaded
+ PMD: rte_cxgbe_pmd: No Expansion ROM loaded
+ PMD: rte_cxgbe_pmd: 0000:02:01.0 Chelsio rev 0 1G/10GBASE-SFP
+ EAL: PCI device 0000:02:01.1 on NUMA socket 0
+ EAL: probe driver: 1425:5803 net_cxgbevf
+ PMD: rte_cxgbe_pmd: Firmware version: 1.17.14.0
+ PMD: rte_cxgbe_pmd: TP Microcode version: 0.1.4.9
+ PMD: rte_cxgbe_pmd: Chelsio rev 0
+ PMD: rte_cxgbe_pmd: No bootstrap loaded
+ PMD: rte_cxgbe_pmd: No Expansion ROM loaded
+ PMD: rte_cxgbe_pmd: 0000:02:01.1 Chelsio rev 0 1G/10GBASE-SFP
+ Configuring Port 0 (socket 0)
+ Port 0: 06:44:29:44:40:00
+ Configuring Port 1 (socket 0)
+ Port 1: 06:44:29:44:40:10
+ Checking link statuses...
+ Done
+ testpmd>
+
FreeBSD
-------
@@ -350,7 +474,7 @@ Unified Wire package for FreeBSD operating system are as follows:
.. code-block:: console
- dev.t5nex.0.firmware_version: 1.16.43.0
+ dev.t5nex.0.firmware_version: 1.17.14.0
Running testpmd
~~~~~~~~~~~~~~~
@@ -468,7 +592,7 @@ devices managed by librte_pmd_cxgbe in FreeBSD operating system.
EAL: PCI memory mapped at 0x8007ec000
EAL: PCI memory mapped at 0x842800000
EAL: PCI memory mapped at 0x80086c000
- PMD: rte_cxgbe_pmd: fw: 1.16.43.0, TP: 0.1.4.9
+ PMD: rte_cxgbe_pmd: fw: 1.17.14.0, TP: 0.1.4.9
PMD: rte_cxgbe_pmd: Coming up as MASTER: Initializing adapter
Interactive-mode selected
Configuring Port 0 (socket 0)
diff --git a/doc/guides/nics/dpaa.rst b/doc/guides/nics/dpaa.rst
index 0a13996c..620c045d 100644
--- a/doc/guides/nics/dpaa.rst
+++ b/doc/guides/nics/dpaa.rst
@@ -162,6 +162,16 @@ Manager.
this pool.
+Whitelisting & Blacklisting
+---------------------------
+
+For blacklisting a DPAA device, following commands can be used.
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> -b "dpaa_bus:fmX-macY" -- ...
+ e.g. "dpaa_bus:fm1-mac4"
+
Supported DPAA SoCs
-------------------
diff --git a/doc/guides/nics/dpaa2.rst b/doc/guides/nics/dpaa2.rst
index 9c66edd4..66c03e10 100644
--- a/doc/guides/nics/dpaa2.rst
+++ b/doc/guides/nics/dpaa2.rst
@@ -494,28 +494,12 @@ Please note that enabling debugging options may affect system performance.
- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER`` (default ``n``)
- Toggle display of generic debugging messages
+ Toggle display of debugging messages/logic
- ``CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA`` (default ``y``)
Toggle to use physical address vs virtual address for hardware accelerators.
-- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT`` (default ``n``)
-
- Toggle display of initialization related messages.
-
-- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_RX`` (default ``n``)
-
- Toggle display of receive fast path run-time message
-
-- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX`` (default ``n``)
-
- Toggle display of transmit fast path run-time message
-
-- ``CONFIG_RTE_LIBRTE_DPAA2_DEBUG_TX_FREE`` (default ``n``)
-
- Toggle display of transmit fast path buffer free run-time message
-
Driver compilation and testing
------------------------------
@@ -532,8 +516,7 @@ for details.
.. code-block:: console
- ./arm64-dpaa2-linuxapp-gcc/testpmd -c 0xff -n 1 \
- -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx
+ ./testpmd -c 0xff -n 1 -- -i --portmask=0x3 --nb-cores=1 --no-flush-rx
.....
EAL: Registered [pci] bus.
@@ -557,6 +540,38 @@ for details.
Done
testpmd>
+Enabling logs
+-------------
+
+For enabling logging for DPAA2 PMD, following log-level prefix can be used:
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> --log-level=bus.fslmc:<level> -- ...
+
+Using ``bus.fslmc`` as log matching criteria, all FSLMC bus logs can be enabled
+which are lower than logging ``level``.
+
+ Or
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> --log-level=pmd.net.dpaa2:<level> -- ...
+
+Using ``pmd.dpaa2`` as log matching criteria, all PMD logs can be enabled
+which are lower than logging ``level``.
+
+Whitelisting & Blacklisting
+---------------------------
+
+For blacklisting a DPAA2 device, following commands can be used.
+
+ .. code-block:: console
+
+ <dpdk app> <EAL args> -b "fslmc:dpni.x" -- ...
+
+Where x is the device object id as configured in resource container.
+
Limitations
-----------
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index 4dffce1a..438a83d5 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -1,32 +1,7 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2017, Cisco Systems, Inc.
All rights reserved.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-
ENIC Poll Mode Driver
=====================
@@ -114,11 +89,24 @@ Configuration information
- **Interrupts**
- Only one interrupt per vNIC interface should be configured in the UCS
+ At least one interrupt per vNIC interface should be configured in the UCS
manager regardless of the number receive/transmit queues. The ENIC PMD
uses this interrupt to get information about link status and errors
in the fast path.
+ In addition to the interrupt for link status and errors, when using Rx queue
+ interrupts, increase the number of configured interrupts so that there is at
+ least one interrupt for each Rx queue. For example, if the app uses 3 Rx
+ queues and wants to use per-queue interrupts, configure 4 (3 + 1) interrupts.
+
+ - **Receive Side Scaling**
+
+ In order to fully utilize RSS in DPDK, enable all RSS related settings in
+ CIMC or UCSM. These include the following items listed under
+ Receive Side Scaling:
+ TCP, IPv4, TCP-IPv4, IPv6, TCP-IPv6, IPv6 Extension, TCP-IPv6 Extension.
+
+
.. _enic-flow-director:
Flow director support
@@ -140,20 +128,21 @@ perfect filtering of the 5-tuple with no masking of fields supported.
SR-IOV mode utilization
-----------------------
-UCS blade servers configured with dynamic vNIC connection policies in UCS
-manager are capable of supporting assigned devices on virtual machines (VMs)
-through a KVM hypervisor. Assigned devices, also known as 'passthrough'
-devices, are SR-IOV virtual functions (VFs) on the host which are exposed
-to VM instances.
+UCS blade servers configured with dynamic vNIC connection policies in UCSM
+are capable of supporting SR-IOV. SR-IOV virtual functions (VFs) are
+specialized vNICs, distinct from regular Ethernet vNICs. These VFs can be
+directly assigned to virtual machines (VMs) as 'passthrough' devices.
-The Cisco Virtual Machine Fabric Extender (VM-FEX) gives the VM a dedicated
+In UCS, SR-IOV VFs require the use of the Cisco Virtual Machine Fabric Extender
+(VM-FEX), which gives the VM a dedicated
interface on the Fabric Interconnect (FI). Layer 2 switching is done at
the FI. This may eliminate the requirement for software switching on the
host to route intra-host VM traffic.
Please refer to `Creating a Dynamic vNIC Connection Policy
<http://www.cisco.com/c/en/us/td/docs/unified_computing/ucs/sw/vm_fex/vmware/gui/config_guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide/b_GUI_VMware_VM-FEX_UCSM_Configuration_Guide_chapter_010.html#task_433E01651F69464783A68E66DA8A47A5>`_
-for information on configuring SR-IOV adapter policies using UCS manager.
+for information on configuring SR-IOV adapter policies and port profiles
+using UCSM.
Once the policies are in place and the host OS is rebooted, VFs should be
visible on the host, E.g.:
@@ -170,30 +159,37 @@ visible on the host, E.g.:
0d:00.6 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
0d:00.7 Ethernet controller: Cisco Systems Inc VIC SR-IOV VF (rev a2)
-Enable Intel IOMMU on the host and install KVM and libvirt. A VM instance should
-be created with an assigned device. When using libvirt, this configuration can
-be done within the domain (i.e. VM) config file. For example this entry maps
-host VF 0d:00:01 into the VM.
+Enable Intel IOMMU on the host and install KVM and libvirt, and reboot again as
+required. Then, using libvirt, create a VM instance with an assigned device.
+Below is an example ``interface`` block (part of the domain configuration XML)
+that adds the host VF 0d:00:01 to the VM. ``profileid='pp-vlan-25'`` indicates
+the port profile that has been configured in UCSM.
.. code-block:: console
<interface type='hostdev' managed='yes'>
<mac address='52:54:00:ac:ff:b6'/>
+ <driver name='vfio'/>
<source>
<address type='pci' domain='0x0000' bus='0x0d' slot='0x00' function='0x1'/>
</source>
+ <virtualport type='802.1Qbh'>
+ <parameters profileid='pp-vlan-25'/>
+ </virtualport>
+ </interface>
+
Alternatively, the configuration can be done in a separate file using the
``network`` keyword. These methods are described in the libvirt documentation for
`Network XML format <https://libvirt.org/formatnetwork.html>`_.
-When the VM instance is started, the ENIC KVM driver will bind the host VF to
+When the VM instance is started, libvirt will bind the host VF to
vfio, complete provisioning on the FI and bring up the link.
.. note::
It is not possible to use a VF directly from the host because it is not
- fully provisioned until the hypervisor brings up the VM that it is assigned
+ fully provisioned until libvirt brings up the VM that it is assigned
to.
In the VM instance, the VF will now be visible. E.g., here the VF 00:04.0 is
@@ -207,9 +203,27 @@ seen on the VM instance and should be available for binding to a DPDK.
Follow the normal DPDK install procedure, binding the VF to either ``igb_uio``
or ``vfio`` in non-IOMMU mode.
+In the VM, the kernel enic driver may be automatically bound to the VF during
+boot. Unbinding it currently hangs due to a known issue with the driver. To
+work around the issue, blacklist the enic module as follows.
Please see :ref:`Limitations <enic_limitations>` for limitations in
the use of SR-IOV.
+.. code-block:: console
+
+ # cat /etc/modprobe.d/enic.conf
+ blacklist enic
+
+ # dracut --force
+
+.. note::
+
+ Passthrough does not require SR-IOV. If VM-FEX is not desired, the user
+ may create as many regular vNICs as necessary and assign them to VMs as
+ passthrough devices. Since these vNICs are not SR-IOV VFs, using them as
+ passthrough devices do not require libvirt, port profiles, and VM-FEX.
+
+
.. _enic-genic-flow-api:
Generic Flow API support
@@ -227,7 +241,7 @@ Generic Flow API is supported. The baseline support is:
- Actions: queue and void
- Selectors: 'is'
-- **1300 series VICS with advanced filters disabled**
+- **1300 and later series VICS with advanced filters disabled**
With advanced filters disabled, an IPv4 or IPv6 item must be specified
in the pattern.
@@ -238,17 +252,99 @@ Generic Flow API is supported. The baseline support is:
- Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
- In total, up to 64 bytes of mask is allowed across all headers
-- **1300 series VICS with advanced filters enabled**
+- **1300 and later series VICS with advanced filters enabled**
- Attributes: ingress
- Items: eth, ipv4, ipv6, udp, tcp, vxlan, inner eth, ipv4, ipv6, udp, tcp
- - Actions: queue, mark, flag and void
+ - Actions: queue, mark, drop, flag and void
- Selectors: 'is', 'spec' and 'mask'. 'last' is not supported
- In total, up to 64 bytes of mask is allowed across all headers
More features may be added in future firmware and new versions of the VIC.
Please refer to the release notes.
+.. _overlay_offload:
+
+Overlay Offload
+---------------
+
+Recent hardware models support overlay offload. When enabled, the NIC performs
+the following operations for VXLAN, NVGRE, and GENEVE packets. In all cases,
+inner and outer packets can be IPv4 or IPv6.
+
+- TSO for VXLAN and GENEVE packets.
+
+ Hardware supports NVGRE TSO, but DPDK currently has no NVGRE offload flags.
+
+- Tx checksum offloads.
+
+ The NIC fills in IPv4/UDP/TCP checksums for both inner and outer packets.
+
+- Rx checksum offloads.
+
+ The NIC validates IPv4/UDP/TCP checksums of both inner and outer packets.
+ Good checksum flags (e.g. ``PKT_RX_L4_CKSUM_GOOD``) indicate that the inner
+ packet has the correct checksum, and if applicable, the outer packet also
+ has the correct checksum. Bad checksum flags (e.g. ``PKT_RX_L4_CKSUM_BAD``)
+ indicate that the inner and/or outer packets have invalid checksum values.
+
+- Inner Rx packet type classification
+
+ PMD sets inner L3/L4 packet types (e.g. ``RTE_PTYPE_INNER_L4_TCP``), and
+ ``RTE_PTYPE_TUNNEL_GRENAT`` to indicate that the packet is tunneled.
+ PMD does not set L3/L4 packet types for outer packets.
+
+- Inner RSS
+
+ RSS hash calculation, therefore queue selection, is done on inner packets.
+
+In order to enable overlay offload, the 'Enable VXLAN' box should be checked
+via CIMC or UCSM followed by a reboot of the server. When PMD successfully
+enables overlay offload, it prints the following message on the console.
+
+.. code-block:: console
+
+ Overlay offload is enabled
+
+By default, PMD enables overlay offload if hardware supports it. To disable
+it, set ``devargs`` parameter ``disable-overlay=1``. For example::
+
+ -w 12:00.0,disable-overlay=1
+
+By default, the NIC uses 4789 as the VXLAN port. The user may change
+it through ``rte_eth_dev_udp_tunnel_port_{add,delete}``. However, as
+the current NIC has a single VXLAN port number, the user cannot
+configure multiple port numbers.
+
+Ingress VLAN Rewrite
+--------------------
+
+VIC adapters can tag, untag, or modify the VLAN headers of ingress
+packets. The ingress VLAN rewrite mode controls this behavior. By
+default, it is set to pass-through, where the NIC does not modify the
+VLAN header in any way so that the application can see the original
+header. This mode is sufficient for many applications, but may not be
+suitable for others. Such applications may change the mode by setting
+``devargs`` parameter ``ig-vlan-rewrite`` to one of the following.
+
+- ``pass``: Pass-through mode. The NIC does not modify the VLAN
+ header. This is the default mode.
+
+- ``priority``: Priority-tag default VLAN mode. If the ingress packet
+ is tagged with the default VLAN, the NIC replaces its VLAN header
+ with the priority tag (VLAN ID 0).
+
+- ``trunk``: Default trunk mode. The NIC tags untagged ingress packets
+ with the default VLAN. Tagged ingress packets are not modified. To
+ the application, every packet appears as tagged.
+
+- ``untag``: Untag default VLAN mode. If the ingress packet is tagged
+ with the default VLAN, the NIC removes or untags its VLAN header so
+ that the application sees an untagged packet. As a result, the
+ default VLAN becomes `untagged`. This mode can be useful for
+ applications such as OVS-DPDK performance benchmarks that utilize
+ only the default VLAN and want to see only untagged packets.
+
.. _enic_limitations:
Limitations
@@ -264,9 +360,10 @@ Limitations
In test setups where an Ethernet port of a Cisco adapter in TRUNK mode is
connected point-to-point to another adapter port or connected though a router
instead of a switch, all ingress packets will be VLAN tagged. Programs such
- as l3fwd which do not account for VLAN tags in packets will misbehave. The
- solution is to enable VLAN stripping on ingress. The following code fragment is
- an example of how to accomplish this:
+ as l3fwd may not account for VLAN tags in packets and may misbehave. One
+ solution is to enable VLAN stripping on ingress so the VLAN tag is removed
+ from the packet and put into the mbuf->vlan_tci field. Here is an example
+ of how to accomplish this:
.. code-block:: console
@@ -274,6 +371,14 @@ Limitations
vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
rte_eth_dev_set_vlan_offload(port, vlan_offload);
+Another alternative is modify the adapter's ingress VLAN rewrite mode so that
+packets with the default VLAN tag are stripped by the adapter and presented to
+DPDK as untagged packets. In this case mbuf->vlan_tci and the PKT_RX_VLAN and
+PKT_RX_VLAN_STRIPPED mbuf flags would not be set. This mode is enabled with the
+``devargs`` parameter ``ig-vlan-rewrite=untag``. For example::
+
+ -w 12:00.0,ig-vlan-rewrite=untag
+
- Limited flow director support on 1200 series and 1300 series Cisco VIC
adapters with old firmware. Please see :ref:`enic-flow-director`.
@@ -305,6 +410,24 @@ Limitations
were added. Since there currently is no grouping or priority support,
'catch-all' filters should be added last.
+- **Statistics**
+
+ - ``rx_good_bytes`` (ibytes) always includes VLAN header (4B) and CRC bytes (4B).
+ This behavior applies to 1300 and older series VIC adapters.
+ 1400 series VICs do not count CRC bytes, and count VLAN header only when VLAN
+ stripping is disabled.
+ - When the NIC drops a packet because the Rx queue has no free buffers,
+ ``rx_good_bytes`` still increments by 4B if the packet is not VLAN tagged or
+ VLAN stripping is disabled, or by 8B if the packet is VLAN tagged and stripping
+ is enabled.
+ This behavior applies to 1300 and older series VIC adapters. 1400 series VICs
+ do not increment this byte counter when packets are dropped.
+
+- **RSS Hashing**
+
+ - Hardware enables and disables UDP and TCP RSS hashing together. The driver
+ cannot control UDP and TCP hashing individually.
+
How to build the suite
----------------------
@@ -322,17 +445,9 @@ Supported Cisco VIC adapters
ENIC PMD supports all recent generations of Cisco VIC adapters including:
-- VIC 1280
-- VIC 1240
-- VIC 1225
-- VIC 1285
-- VIC 1225T
-- VIC 1227
-- VIC 1227T
-- VIC 1380
-- VIC 1340
-- VIC 1385
-- VIC 1387
+- VIC 1200 series
+- VIC 1300 series
+- VIC 1400 series
Supported Operating Systems
---------------------------
@@ -356,10 +471,16 @@ Supported features
- VLAN filtering (supported via UCSM/CIMC only)
- Execution of application by unprivileged system users
- IPV4, IPV6 and TCP RSS hashing
+- UDP RSS hashing (1400 series and later adapters)
- Scattered Rx
- MTU update
- SR-IOV on UCS managed servers connected to Fabric Interconnects
- Flow API
+- Overlay offload
+
+ - Rx/Tx checksum offloads for VXLAN, NVGRE, GENEVE
+ - TSO for VXLAN and GENEVE packets
+ - Inner RSS
Known bugs and unsupported features in this release
---------------------------------------------------
@@ -369,8 +490,8 @@ Known bugs and unsupported features in this release
- VLAN based flow direction
- Non-IPV4 flow direction
- Setting of extended VLAN
-- UDP RSS hashing
- MTU update only works if Scattered Rx mode is disabled
+- Maximum receive packet length is ignored if Scattered Rx mode is used
Prerequisites
-------------
@@ -427,4 +548,4 @@ Any questions or bugs should be reported to DPDK community and to the ENIC PMD
maintainers:
- John Daley <johndale@cisco.com>
-- Nelson Escobar <neescoba@cisco.com>
+- Hyong Youb Kim <hyonkim@cisco.com>
diff --git a/doc/guides/nics/fail_safe.rst b/doc/guides/nics/fail_safe.rst
index 3f72b593..6c02d7ef 100644
--- a/doc/guides/nics/fail_safe.rst
+++ b/doc/guides/nics/fail_safe.rst
@@ -1,32 +1,6 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2017 6WIND S.A.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
Fail-safe poll mode driver library
==================================
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index 1b4fb979..cddc877d 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -278,6 +278,17 @@ Supports RSS hashing on RX.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
+.. _nic_features_inner_rss:
+
+Inner RSS
+---------
+
+Supports RX RSS hashing on Inner headers.
+
+* **[users] rte_flow_action_rss**: ``level``.
+* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_RSS_HASH``, ``mbuf.rss``.
+
+
.. _nic_features_rss_key_update:
RSS key update
@@ -503,7 +514,7 @@ CRC offload
Supports CRC stripping by hardware.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_CRC_STRIP``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_CRC_STRIP,DEV_RX_OFFLOAD_KEEP_CRC``.
.. _nic_features_vlan_offload:
@@ -566,7 +577,6 @@ Supports L4 checksum offload.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
-* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
@@ -749,6 +759,17 @@ Supports getting/setting device eeprom data.
``rte_eth_dev_set_eeprom()``.
+.. _nic_features_module_eeprom_dump:
+
+Module EEPROM dump
+------------------
+
+Supports getting information and data of plugin module eeprom.
+
+* **[implements] eth_dev_ops**: ``get_module_info``, ``get_module_eeprom``.
+* **[related] API**: ``rte_eth_dev_get_module_info()``, ``rte_eth_dev_get_module_eeprom()``.
+
+
.. _nic_features_register_dump:
Registers dump
@@ -892,7 +913,25 @@ Documentation describes performance values.
See ``dpdk.org/doc/perf/*``.
+.. _nic_features_runtime_rx_queue_setup:
+
+Runtime Rx queue setup
+----------------------
+
+Supports Rx queue setup after device started.
+
+* **[provides] rte_eth_dev_info**: ``dev_capa:RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP``.
+* **[related] API**: ``rte_eth_dev_info_get()``.
+.. _nic_features_runtime_tx_queue_setup:
+
+Runtime Tx queue setup
+----------------------
+
+Supports Tx queue setup after device started.
+
+* **[provides] rte_eth_dev_info**: ``dev_capa:RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP``.
+* **[related] API**: ``rte_eth_dev_info_get()``.
.. _nic_features_other:
diff --git a/doc/guides/nics/features/avf.ini b/doc/guides/nics/features/avf.ini
index ccb9edde..35ceada2 100644
--- a/doc/guides/nics/features/avf.ini
+++ b/doc/guides/nics/features/avf.ini
@@ -6,7 +6,6 @@
[Features]
Speed capabilities = Y
Link status = Y
-Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
diff --git a/doc/guides/nics/features/avf_vec.ini b/doc/guides/nics/features/avf_vec.ini
index 89249948..3050bc4a 100644
--- a/doc/guides/nics/features/avf_vec.ini
+++ b/doc/guides/nics/features/avf_vec.ini
@@ -6,7 +6,6 @@
[Features]
Speed capabilities = Y
Link status = Y
-Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
new file mode 100644
index 00000000..ab4da559
--- /dev/null
+++ b/doc/guides/nics/features/axgbe.ini
@@ -0,0 +1,19 @@
+;
+; Supported features of the 'axgbe' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+Jumbo frame = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+RSS hash = Y
+CRC offload = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Basic stats = Y
+Linux UIO = Y
+x86-32 = Y
+x86-64 = Y
diff --git a/doc/guides/nics/features/cxgbe.ini b/doc/guides/nics/features/cxgbe.ini
index 3d0fde2f..88f2f92b 100644
--- a/doc/guides/nics/features/cxgbe.ini
+++ b/doc/guides/nics/features/cxgbe.ini
@@ -14,7 +14,9 @@ TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
RSS hash = Y
+RSS key update = Y
Flow control = Y
+Flow API = Y
CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
@@ -24,6 +26,7 @@ Basic stats = Y
Stats per queue = Y
EEPROM dump = Y
Registers dump = Y
+Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
Linux VFIO = Y
diff --git a/doc/guides/nics/features/cxgbevf.ini b/doc/guides/nics/features/cxgbevf.ini
new file mode 100644
index 00000000..b41fc365
--- /dev/null
+++ b/doc/guides/nics/features/cxgbevf.ini
@@ -0,0 +1,29 @@
+;
+; Supported features of the 'cxgbevf' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = Y
+Link status = Y
+Queue start/stop = Y
+MTU update = Y
+Jumbo frame = Y
+Scattered Rx = Y
+TSO = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+RSS hash = Y
+CRC offload = Y
+VLAN offload = Y
+L3 checksum offload = Y
+L4 checksum offload = Y
+Packet type parsing = Y
+Basic stats = Y
+Stats per queue = Y
+Multiprocess aware = Y
+Linux UIO = Y
+Linux VFIO = Y
+x86-32 = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/default.ini b/doc/guides/nics/features/default.ini
index dae2ad77..f1a39d0f 100644
--- a/doc/guides/nics/features/default.ini
+++ b/doc/guides/nics/features/default.ini
@@ -17,6 +17,8 @@ Lock-free Tx queue =
Fast mbuf free =
Free Tx mbuf on demand =
Queue start/stop =
+Runtime Rx queue setup =
+Runtime Tx queue setup =
MTU update =
Jumbo frame =
Scattered Rx =
@@ -29,6 +31,7 @@ Multicast MAC filter =
RSS hash =
RSS key update =
RSS reta update =
+Inner RSS =
VMDq =
SR-IOV =
DCB =
@@ -63,6 +66,7 @@ Extended stats =
Stats per queue =
FW version =
EEPROM dump =
+Module EEPROM dump =
Registers dump =
LED =
Multiprocess aware =
diff --git a/doc/guides/nics/features/enic.ini b/doc/guides/nics/features/enic.ini
index 498341f0..8a4bad29 100644
--- a/doc/guides/nics/features/enic.ini
+++ b/doc/guides/nics/features/enic.ini
@@ -6,23 +6,29 @@
[Features]
Link status = Y
Link status event = Y
+Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
TSO = Y
Promiscuous mode = Y
+Allmulticast mode = Y
Unicast MAC filter = Y
-Multicast MAC filter = Y
+Multicast MAC filter =
RSS hash = Y
+RSS key update = Y
+RSS reta update = Y
+Inner RSS = Y
SR-IOV = Y
-VLAN filter = Y
CRC offload = Y
VLAN offload = Y
Flow director = Y
Flow API = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Packet type parsing = Y
Basic stats = Y
Multiprocess aware = Y
diff --git a/doc/guides/nics/features/fm10k.ini b/doc/guides/nics/features/fm10k.ini
index f0f61a7d..0acdf0d3 100644
--- a/doc/guides/nics/features/fm10k.ini
+++ b/doc/guides/nics/features/fm10k.ini
@@ -5,6 +5,8 @@
;
[Features]
Speed capabilities = P
+Link status = Y
+Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
@@ -24,6 +26,8 @@ VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
diff --git a/doc/guides/nics/features/fm10k_vf.ini b/doc/guides/nics/features/fm10k_vf.ini
index 32b93df4..44b50faa 100644
--- a/doc/guides/nics/features/fm10k_vf.ini
+++ b/doc/guides/nics/features/fm10k_vf.ini
@@ -5,6 +5,8 @@
;
[Features]
Speed capabilities = P
+Link status = Y
+Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
Jumbo frame = Y
diff --git a/doc/guides/nics/features/i40e.ini b/doc/guides/nics/features/i40e.ini
index e862712c..16eab7f4 100644
--- a/doc/guides/nics/features/i40e.ini
+++ b/doc/guides/nics/features/i40e.ini
@@ -9,6 +9,8 @@ Link status = Y
Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
+Runtime Rx queue setup = Y
+Runtime Tx queue setup = Y
Jumbo frame = Y
Scattered Rx = Y
TSO = Y
@@ -44,6 +46,7 @@ Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
FW version = Y
+Module EEPROM dump = Y
Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
diff --git a/doc/guides/nics/features/i40e_vec.ini b/doc/guides/nics/features/i40e_vec.ini
index 7d7b3a92..c65e8b03 100644
--- a/doc/guides/nics/features/i40e_vec.ini
+++ b/doc/guides/nics/features/i40e_vec.ini
@@ -34,6 +34,7 @@ Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
+Module EEPROM dump = Y
Multiprocess aware = Y
BSD nic_uio = Y
Linux UIO = Y
diff --git a/doc/guides/nics/features/i40e_vf.ini b/doc/guides/nics/features/i40e_vf.ini
index 46e0d9fc..ba2d8cbe 100644
--- a/doc/guides/nics/features/i40e_vf.ini
+++ b/doc/guides/nics/features/i40e_vf.ini
@@ -5,6 +5,7 @@
;
[Features]
Rx interrupt = Y
+Link status = Y
Queue start/stop = Y
Jumbo frame = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/i40e_vf_vec.ini b/doc/guides/nics/features/i40e_vf_vec.ini
index c2c6c19f..421ed919 100644
--- a/doc/guides/nics/features/i40e_vf_vec.ini
+++ b/doc/guides/nics/features/i40e_vf_vec.ini
@@ -5,6 +5,7 @@
;
[Features]
Rx interrupt = Y
+Link status = Y
Queue start/stop = Y
Jumbo frame = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/ifcvf.ini b/doc/guides/nics/features/ifcvf.ini
new file mode 100644
index 00000000..ef1fc471
--- /dev/null
+++ b/doc/guides/nics/features/ifcvf.ini
@@ -0,0 +1,8 @@
+;
+; Supported features of the 'ifcvf' vDPA driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+x86-32 = Y
+x86-64 = Y
diff --git a/doc/guides/nics/features/igb.ini b/doc/guides/nics/features/igb.ini
index 33d64d99..c53fd075 100644
--- a/doc/guides/nics/features/igb.ini
+++ b/doc/guides/nics/features/igb.ini
@@ -41,6 +41,7 @@ Basic stats = Y
Extended stats = Y
FW version = Y
EEPROM dump = Y
+Module EEPROM dump = Y
Registers dump = Y
BSD nic_uio = Y
Linux UIO = Y
diff --git a/doc/guides/nics/features/igb_vf.ini b/doc/guides/nics/features/igb_vf.ini
index e641a2c9..d9653234 100644
--- a/doc/guides/nics/features/igb_vf.ini
+++ b/doc/guides/nics/features/igb_vf.ini
@@ -4,6 +4,7 @@
; Refer to default.ini for the full list of available PMD features.
;
[Features]
+Link status = Y
Rx interrupt = Y
Scattered Rx = Y
TSO = Y
diff --git a/doc/guides/nics/features/ixgbe.ini b/doc/guides/nics/features/ixgbe.ini
index 1d68ee8e..41431117 100644
--- a/doc/guides/nics/features/ixgbe.ini
+++ b/doc/guides/nics/features/ixgbe.ini
@@ -51,6 +51,7 @@ Extended stats = Y
Stats per queue = Y
FW version = Y
EEPROM dump = Y
+Module EEPROM dump = Y
Registers dump = Y
Multiprocess aware = Y
BSD nic_uio = Y
diff --git a/doc/guides/nics/features/ixgbe_vec.ini b/doc/guides/nics/features/ixgbe_vec.ini
index 28bc0547..ef3ee688 100644
--- a/doc/guides/nics/features/ixgbe_vec.ini
+++ b/doc/guides/nics/features/ixgbe_vec.ini
@@ -40,6 +40,7 @@ Basic stats = Y
Extended stats = Y
Stats per queue = Y
EEPROM dump = Y
+Module EEPROM dump = Y
Registers dump = Y
Multiprocess aware = Y
BSD nic_uio = Y
diff --git a/doc/guides/nics/features/mlx4.ini b/doc/guides/nics/features/mlx4.ini
index f6efd21d..98a3f611 100644
--- a/doc/guides/nics/features/mlx4.ini
+++ b/doc/guides/nics/features/mlx4.ini
@@ -13,6 +13,7 @@ Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
+TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index c3636391..b28b43e5 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -21,6 +21,7 @@ Multicast MAC filter = Y
RSS hash = Y
RSS key update = Y
RSS reta update = Y
+Inner RSS = Y
SR-IOV = Y
VLAN filter = Y
Flow director = Y
@@ -29,6 +30,9 @@ CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
+Timestamp offload = Y
+Inner L3 checksum = Y
+Inner L4 checksum = Y
Packet type parsing = Y
Rx descriptor status = Y
Tx descriptor status = Y
@@ -39,5 +43,6 @@ Multiprocess aware = Y
Other kdrv = Y
ARMv8 = Y
Power8 = Y
+x86-32 = Y
x86-64 = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/mrvl.ini b/doc/guides/nics/features/mvpp2.ini
index 00d96218..ef47546d 100644
--- a/doc/guides/nics/features/mrvl.ini
+++ b/doc/guides/nics/features/mvpp2.ini
@@ -1,5 +1,5 @@
;
-; Supported features of the 'mrvl' network poll mode driver.
+; Supported features of the 'mvpp2' network poll mode driver.
;
; Refer to default.ini for the full list of available PMD features.
;
@@ -13,11 +13,13 @@ Allmulticast mode = Y
Unicast MAC filter = Y
Multicast MAC filter = Y
RSS hash = Y
+Flow control = Y
VLAN filter = Y
CRC offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
Packet type parsing = Y
Basic stats = Y
+Extended stats = Y
ARMv8 = Y
Usage doc = Y
diff --git a/doc/guides/nics/features/netvsc.ini b/doc/guides/nics/features/netvsc.ini
new file mode 100644
index 00000000..2ff6042b
--- /dev/null
+++ b/doc/guides/nics/features/netvsc.ini
@@ -0,0 +1,23 @@
+;
+; Supported features of the 'netvsc' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Speed capabilities = P
+Link status = Y
+Queue start/stop = Y
+Scattered Rx = Y
+Promiscuous mode = Y
+Allmulticast mode = Y
+Basic stats = Y
+Stats per queue = Y
+Extended stats = Y
+Multiprocess aware = Y
+Other kdrv = Y
+ARMv7 = Y
+ARMv8 = Y
+x86-32 = Y
+x86-64 = Y
+Usage doc = Y
+MTU update = Y
diff --git a/doc/guides/nics/features/qede.ini b/doc/guides/nics/features/qede.ini
index cbadc194..0d081002 100644
--- a/doc/guides/nics/features/qede.ini
+++ b/doc/guides/nics/features/qede.ini
@@ -6,10 +6,11 @@
[Features]
Speed capabilities = Y
Link status = Y
-Link status event = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
+LRO = Y
+TSO = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
@@ -18,12 +19,14 @@ RSS hash = Y
RSS key update = Y
RSS reta update = Y
VLAN filter = Y
+N-tuple filter = Y
+Tunnel filter = Y
+Flow director = Y
Flow control = Y
CRC offload = Y
VLAN offload = Y
L3 checksum offload = Y
L4 checksum offload = Y
-Tunnel filter = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
@@ -32,11 +35,8 @@ Extended stats = Y
Stats per queue = Y
Multiprocess aware = Y
Linux UIO = Y
+Linux VFIO = Y
ARMv8 = Y
x86-32 = Y
x86-64 = Y
Usage doc = Y
-N-tuple filter = Y
-Flow director = Y
-LRO = Y
-TSO = Y
diff --git a/doc/guides/nics/features/qede_vf.ini b/doc/guides/nics/features/qede_vf.ini
index 18857b6e..e796b313 100644
--- a/doc/guides/nics/features/qede_vf.ini
+++ b/doc/guides/nics/features/qede_vf.ini
@@ -6,7 +6,6 @@
[Features]
Speed capabilities = Y
Link status = Y
-Link status event = Y
MTU update = Y
Jumbo frame = Y
Scattered Rx = Y
@@ -30,6 +29,7 @@ Extended stats = Y
Stats per queue = Y
Multiprocess aware = Y
Linux UIO = Y
+Linux VFIO = Y
ARMv8 = Y
x86-32 = Y
x86-64 = Y
diff --git a/doc/guides/nics/features/softnic.ini b/doc/guides/nics/features/softnic.ini
new file mode 100644
index 00000000..0583381c
--- /dev/null
+++ b/doc/guides/nics/features/softnic.ini
@@ -0,0 +1,9 @@
+;
+; Supported features of the 'softnic' poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+x86-32 = Y
+x86-64 = Y
+Usage doc = Y
diff --git a/doc/guides/nics/features/vhost.ini b/doc/guides/nics/features/vhost.ini
index dffd1f49..ef81abb4 100644
--- a/doc/guides/nics/features/vhost.ini
+++ b/doc/guides/nics/features/vhost.ini
@@ -5,7 +5,6 @@
;
[Features]
Link status = Y
-Link status event = Y
Free Tx mbuf on demand = Y
Queue status event = Y
Basic stats = Y
diff --git a/doc/guides/nics/features/virtio.ini b/doc/guides/nics/features/virtio.ini
index 16e577df..a16b8172 100644
--- a/doc/guides/nics/features/virtio.ini
+++ b/doc/guides/nics/features/virtio.ini
@@ -6,6 +6,7 @@
[Features]
Speed capabilities = P
Link status = Y
+Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
Scattered Rx = Y
diff --git a/doc/guides/nics/features/virtio_vec.ini b/doc/guides/nics/features/virtio_vec.ini
index c06c860d..e60fe36a 100644
--- a/doc/guides/nics/features/virtio_vec.ini
+++ b/doc/guides/nics/features/virtio_vec.ini
@@ -6,6 +6,7 @@
[Features]
Speed capabilities = P
Link status = Y
+Link status event = Y
Rx interrupt = Y
Queue start/stop = Y
Promiscuous mode = Y
diff --git a/doc/guides/nics/fm10k.rst b/doc/guides/nics/fm10k.rst
index c44e226e..d1391e99 100644
--- a/doc/guides/nics/fm10k.rst
+++ b/doc/guides/nics/fm10k.rst
@@ -79,14 +79,14 @@ Other features are supported using optional MACRO configuration. They include:
To enable via ``RX_OLFLAGS`` use ``RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y``.
-To guarantee the constraint, the following configuration flags in ``dev_conf.rxmode``
+To guarantee the constraint, the following capabilities in ``dev_conf.rxmode.offloads``
will be checked:
-* ``hw_vlan_extend``
+* ``DEV_RX_OFFLOAD_VLAN_EXTEND``
-* ``hw_ip_checksum``
+* ``DEV_RX_OFFLOAD_CHECKSUM``
-* ``header_split``
+* ``DEV_RX_OFFLOAD_HEADER_SPLIT``
* ``fdir_conf->mode``
@@ -106,19 +106,9 @@ TX Constraint
Features not Supported by TX Vector PMD
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-TX vPMD only works when ``txq_flags`` is set to ``FM10K_SIMPLE_TX_FLAG``.
-This means that it does not support TX multi-segment, VLAN offload or TX csum
-offload. The following MACROs are used for these three features:
+TX vPMD only works when offloads is set to 0
-* ``ETH_TXQ_FLAGS_NOMULTSEGS``
-
-* ``ETH_TXQ_FLAGS_NOVLANOFFL``
-
-* ``ETH_TXQ_FLAGS_NOXSUMSCTP``
-
-* ``ETH_TXQ_FLAGS_NOXSUMUDP``
-
-* ``ETH_TXQ_FLAGS_NOXSUMTCP``
+This means that it does not support any TX offload.
Limitations
-----------
@@ -149,9 +139,8 @@ CRC striping
~~~~~~~~~~~~
The FM10000 family of NICs strip the CRC for every packets coming into the
-host interface. So, CRC will be stripped even when the
-``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``.
-
+host interface. So, CRC will be stripped even when ``DEV_RX_OFFLOAD_CRC_STRIP``
+in ``rxmode.offloads`` is NOT set in ``struct rte_eth_conf``.
Maximum packet length
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/nics/i40e.rst b/doc/guides/nics/i40e.rst
index e1b8083c..65d87f86 100644
--- a/doc/guides/nics/i40e.rst
+++ b/doc/guides/nics/i40e.rst
@@ -4,14 +4,16 @@
I40E Poll Mode Driver
======================
-The I40E PMD (librte_pmd_i40e) provides poll mode driver support
-for the Intel X710/XL710/X722 10/40 Gbps family of adapters.
+The i40e PMD (librte_pmd_i40e) provides poll mode driver support for
+10/25/40 Gbps Intel® Ethernet 700 Series Network Adapters based on
+the Intel Ethernet Controller X710/XL710/XXV710 and Intel Ethernet
+Connection X722 (only support part of features).
Features
--------
-Features of the I40E PMD are:
+Features of the i40e PMD are:
- Multiple queues for TX and RX
- Receiver Side Scaling (RSS)
@@ -40,6 +42,7 @@ Features of the I40E PMD are:
- VF Daemon (VFD) - EXPERIMENTAL
- Dynamic Device Personalization (DDP)
- Queue region configuration
+- Virtual Function Port Representors
Prerequisites
-------------
@@ -53,7 +56,37 @@ Prerequisites
section of the :ref:`Getting Started Guide for Linux <linux_gsg>`.
- Upgrade the NVM/FW version following the `Intel® Ethernet NVM Update Tool Quick Usage Guide for Linux
- <https://www-ssl.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-linux-usage-guide.html>`_ if needed.
+ <https://www-ssl.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-linux-usage-guide.html>`_ and `Intel® Ethernet NVM Update Tool: Quick Usage Guide for EFI <https://www.intel.com/content/www/us/en/embedded/products/networking/nvm-update-tool-quick-efi-usage-guide.html>`_ if needed.
+
+Recommended Matching List
+-------------------------
+
+It is highly recommended to upgrade the i40e kernel driver and firmware to
+avoid the compatibility issues with i40e PMD. Here is the suggested matching
+list which has been tested and verified. The detailed information can refer
+to chapter Tested Platforms/Tested NICs in release notes.
+
+ +--------------+-----------------------+------------------+
+ | DPDK version | Kernel driver version | Firmware version |
+ +==============+=======================+==================+
+ | 18.05 | 2.4.6 | 6.01 |
+ +--------------+-----------------------+------------------+
+ | 18.02 | 2.4.3 | 6.01 |
+ +--------------+-----------------------+------------------+
+ | 17.11 | 2.1.26 | 6.01 |
+ +--------------+-----------------------+------------------+
+ | 17.08 | 2.0.19 | 6.01 |
+ +--------------+-----------------------+------------------+
+ | 17.05 | 1.5.23 | 5.05 |
+ +--------------+-----------------------+------------------+
+ | 17.02 | 1.5.23 | 5.05 |
+ +--------------+-----------------------+------------------+
+ | 16.11 | 1.5.23 | 5.05 |
+ +--------------+-----------------------+------------------+
+ | 16.07 | 1.4.25 | 5.04 |
+ +--------------+-----------------------+------------------+
+ | 16.04 | 1.4.25 | 5.02 |
+ +--------------+-----------------------+------------------+
Pre-Installation Configuration
------------------------------
@@ -93,11 +126,6 @@ Please note that enabling debugging options may affect system performance.
Number of queues reserved for each VMDQ Pool.
-- ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` (default ``-1``)
-
- Interrupt Throttling interval.
-
-
Runtime Config Options
~~~~~~~~~~~~~~~~~~~~~~
@@ -121,6 +149,20 @@ Runtime Config Options
will switch PF interrupt from IntN to Int0 to avoid interrupt conflict between
DPDK and Linux Kernel.
+- ``Support VF Port Representor`` (default ``not enabled``)
+
+ The i40e PF PMD supports the creation of VF port representors for the control
+ and monitoring of i40e virtual function devices. Each port representor
+ corresponds to a single virtual function of that device. Using the ``devargs``
+ option ``representor`` the user can specify which virtual functions to create
+ port representors for on initialization of the PF PMD by passing the VF IDs of
+ the VFs which are required.::
+
+ -w DBDF,representor=[0,1,4]
+
+ Currently hot-plugging of representor ports is not supported so all required
+ representors must be specified on the creation of the PF.
+
Driver compilation and testing
------------------------------
@@ -324,7 +366,7 @@ Delete all flow director rules on a port:
Floating VEB
~~~~~~~~~~~~~
-The Intel® Ethernet Controller X710 and XL710 Family support a feature called
+The Intel® Ethernet 700 Series support a feature called
"Floating VEB".
A Virtual Ethernet Bridge (VEB) is an IEEE Edge Virtual Bridging (EVB) term
@@ -370,21 +412,22 @@ or greater.
Dynamic Device Personalization (DDP)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The Intel® Ethernet Controller X*710 support a feature called "Dynamic Device
-Personalization (DDP)", which is used to configure hardware by downloading
-a profile to support protocols/filters which are not supported by default.
-The DDP functionality requires a NIC firmware version of 6.0 or greater.
+The Intel® Ethernet 700 Series except for the Intel Ethernet Connection
+X722 support a feature called "Dynamic Device Personalization (DDP)",
+which is used to configure hardware by downloading a profile to support
+protocols/filters which are not supported by default. The DDP
+functionality requires a NIC firmware version of 6.0 or greater.
-Current implementation supports MPLSoUDP/MPLSoGRE/GTP-C/GTP-U/PPPoE/PPPoL2TP,
+Current implementation supports GTP-C/GTP-U/PPPoE/PPPoL2TP,
steering can be used with rte_flow API.
-Load a profile which supports MPLSoUDP/MPLSoGRE and store backup profile:
+Load a profile which supports GTP and store backup profile:
.. code-block:: console
- testpmd> ddp add 0 ./mpls.pkgo,./backup.pkgo
+ testpmd> ddp add 0 ./gtp.pkgo,./backup.pkgo
-Delete a MPLS profile and restore backup profile:
+Delete a GTP profile and restore backup profile:
.. code-block:: console
@@ -396,11 +439,11 @@ Get loaded DDP package info list:
testpmd> ddp get list 0
-Display information about a MPLS profile:
+Display information about a GTP profile:
.. code-block:: console
- testpmd> ddp get info ./mpls.pkgo
+ testpmd> ddp get info ./gtp.pkgo
Input set configuration
~~~~~~~~~~~~~~~~~~~~~~~
@@ -416,7 +459,7 @@ For example, to use only 48bit prefix for IPv6 src address for IPv6 TCP RSS:
Queue region configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The Ethernet Controller X710/XL710 supports a feature of queue regions
+The Intel® Ethernet 700 Series supports a feature of queue regions
configuration for RSS in the PF, so that different traffic classes or
different packet classification types can be separated to different
queues in different queue regions. There is an API for configuration
@@ -440,8 +483,8 @@ details please refer to :doc:`../testpmd_app_ug/index`.
Limitations or Known issues
---------------------------
-MPLS packet classification on X710/XL710
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MPLS packet classification
+~~~~~~~~~~~~~~~~~~~~~~~~~~
For firmware versions prior to 5.0, MPLS packets are not recognized by the NIC.
The L2 Payload flow type in flow director can be used to classify MPLS packet
@@ -489,14 +532,14 @@ Incorrect Rx statistics when packet is oversize
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When a packet is over maximum frame size, the packet is dropped.
-However the Rx statistics, when calling `rte_eth_stats_get` incorrectly
+However, the Rx statistics, when calling `rte_eth_stats_get` incorrectly
shows it as received.
VF & TC max bandwidth setting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The per VF max bandwidth and per TC max bandwidth cannot be enabled in parallel.
-The dehavior is different when handling per VF and per TC max bandwidth setting.
+The behavior is different when handling per VF and per TC max bandwidth setting.
When enabling per VF max bandwidth, SW will check if per TC max bandwidth is
enabled. If so, return failure.
When enabling per TC max bandwidth, SW will check if per VF max bandwidth
@@ -517,11 +560,11 @@ VF performance is impacted by PCI extended tag setting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To reach maximum NIC performance in the VF the PCI extended tag must be
-enabled. The DPDK I40E PF driver will set this feature during initialization,
+enabled. The DPDK i40e PF driver will set this feature during initialization,
but the kernel PF driver does not. So when running traffic on a VF which is
managed by the kernel PF driver, a significant NIC performance downgrade has
-been observed (for 64 byte packets, there is about 25% linerate downgrade for
-a 25G device and about 35% for a 40G device).
+been observed (for 64 byte packets, there is about 25% line-rate downgrade for
+a 25GbE device and about 35% for a 40GbE device).
For kernel version >= 4.11, the kernel's PCI driver will enable the extended
tag if it detects that the device supports it. So by default, this is not an
@@ -562,12 +605,12 @@ with DPDK, then the configuration will also impact port B in the NIC with
kernel driver, which don't want to use the TPID.
So PMD reports warning to clarify what is changed by writing global register.
-High Performance of Small Packets on 40G NIC
---------------------------------------------
+High Performance of Small Packets on 40GbE NIC
+----------------------------------------------
As there might be firmware fixes for performance enhancement in latest version
of firmware image, the firmware update might be needed for getting high performance.
-Check with the local Intel's Network Division application engineers for firmware updates.
+Check the Intel support website for the latest firmware updates.
Users should consult the release notes specific to a DPDK release to identify
the validated firmware version for a NIC using the i40e driver.
@@ -577,23 +620,13 @@ Use 16 Bytes RX Descriptor Size
As i40e PMD supports both 16 and 32 bytes RX descriptor sizes, and 16 bytes size can provide helps to high performance of small packets.
Configuration of ``CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC`` in config files can be changed to use 16 bytes size RX descriptors.
-High Performance and per Packet Latency Tradeoff
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Due to the hardware design, the interrupt signal inside NIC is needed for per
-packet descriptor write-back. The minimum interval of interrupts could be set
-at compile time by ``CONFIG_RTE_LIBRTE_I40E_ITR_INTERVAL`` in configuration files.
-Though there is a default configuration, the interval could be tuned by the
-users with that configuration item depends on what the user cares about more,
-performance or per packet latency.
-
Example of getting best performance with l3fwd example
------------------------------------------------------
-The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with an
-Intel server platform and Intel XL710 NICs.
+The following is an example of running the DPDK ``l3fwd`` sample application to get high performance with a
+server with Intel Xeon processors and Intel Ethernet CNA XL710.
-The example scenario is to get best performance with two Intel XL710 40GbE ports.
+The example scenario is to get best performance with two Intel Ethernet CNA XL710 40GbE ports.
See :numref:`figure_intel_perf_test_setup` for the performance test setup.
.. _figure_intel_perf_test_setup:
@@ -603,9 +636,9 @@ See :numref:`figure_intel_perf_test_setup` for the performance test setup.
Performance Test Setup
-1. Add two Intel XL710 NICs to the platform, and use one port per card to get best performance.
- The reason for using two NICs is to overcome a PCIe Gen3's limitation since it cannot provide 80G bandwidth
- for two 40G ports, but two different PCIe Gen3 x8 slot can.
+1. Add two Intel Ethernet CNA XL710 to the platform, and use one port per card to get best performance.
+ The reason for using two NICs is to overcome a PCIe v3.0 limitation since it cannot provide 80GbE bandwidth
+ for two 40GbE ports, but two different PCIe v3.0 x8 slot can.
Refer to the sample NICs output above, then we can select ``82:00.0`` and ``85:00.0`` as test ports::
82:00.0 Ethernet [0200]: Intel XL710 for 40GbE QSFP+ [8086:1583]
@@ -621,7 +654,7 @@ See :numref:`figure_intel_perf_test_setup` for the performance test setup.
4. Bind these two ports to igb_uio.
-5. As to XL710 40G port, we need at least two queue pairs to achieve best performance, then two queues per port
+5. As to Intel Ethernet CNA XL710 40GbE port, we need at least two queue pairs to achieve best performance, then two queues per port
will be required, and each queue pair will need a dedicated CPU core for receiving/transmitting packets.
6. The DPDK sample application ``l3fwd`` will be used for performance testing, with using two ports for bi-directional forwarding.
diff --git a/doc/guides/nics/ifc.rst b/doc/guides/nics/ifc.rst
new file mode 100644
index 00000000..48f9adf1
--- /dev/null
+++ b/doc/guides/nics/ifc.rst
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+IFCVF vDPA driver
+=================
+
+The IFCVF vDPA (vhost data path acceleration) driver provides support for the
+Intel FPGA 100G VF (IFCVF). IFCVF's datapath is virtio ring compatible, it
+works as a HW vhost backend which can send/receive packets to/from virtio
+directly by DMA. Besides, it supports dirty page logging and device state
+report/restore, this driver enables its vDPA functionality.
+
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following option can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_IFCVF_VDPA_PMD`` (default ``y`` for linux)
+
+ Toggle compilation of the ``librte_ifcvf_vdpa`` driver.
+
+
+IFCVF vDPA Implementation
+-------------------------
+
+IFCVF's vendor ID and device ID are same as that of virtio net pci device,
+with its specific subsystem vendor ID and device ID. To let the device be
+probed by IFCVF driver, adding "vdpa=1" parameter helps to specify that this
+device is to be used in vDPA mode, rather than polling mode, virtio pmd will
+skip when it detects this message.
+
+Different VF devices serve different virtio frontends which are in different
+VMs, so each VF needs to have its own DMA address translation service. During
+the driver probe a new container is created for this device, with this
+container vDPA driver can program DMA remapping table with the VM's memory
+region information.
+
+Key IFCVF vDPA driver ops
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- ifcvf_dev_config:
+ Enable VF data path with virtio information provided by vhost lib, including
+ IOMMU programming to enable VF DMA to VM's memory, VFIO interrupt setup to
+ route HW interrupt to virtio driver, create notify relay thread to translate
+ virtio driver's kick to a MMIO write onto HW, HW queues configuration.
+
+ This function gets called to set up HW data path backend when virtio driver
+ in VM gets ready.
+
+- ifcvf_dev_close:
+ Revoke all the setup in ifcvf_dev_config.
+
+ This function gets called when virtio driver stops device in VM.
+
+To create a vhost port with IFC VF
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- Create a vhost socket and assign a VF's device ID to this socket via
+ vhost API. When QEMU vhost connection gets ready, the assigned VF will
+ get configured automatically.
+
+
+Features
+--------
+
+Features of the IFCVF driver are:
+
+- Compatibility with virtio 0.95 and 1.0.
+
+
+Prerequisites
+-------------
+
+- Platform with IOMMU feature. IFC VF needs address translation service to
+ Rx/Tx directly with virtio driver in VM.
+
+
+Limitations
+-----------
+
+Dependency on vfio-pci
+~~~~~~~~~~~~~~~~~~~~~~
+
+vDPA driver needs to setup VF MSIX interrupts, each queue's interrupt vector
+is mapped to a callfd associated with a virtio ring. Currently only vfio-pci
+allows multiple interrupts, so the IFCVF driver is dependent on vfio-pci.
+
+Live Migration with VIRTIO_NET_F_GUEST_ANNOUNCE
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+IFC VF doesn't support RARP packet generation, virtio frontend supporting
+VIRTIO_NET_F_GUEST_ANNOUNCE feature can help to do that.
diff --git a/doc/guides/nics/img/szedata2_nfb200g_architecture.svg b/doc/guides/nics/img/szedata2_nfb200g_architecture.svg
new file mode 100644
index 00000000..e152e4a8
--- /dev/null
+++ b/doc/guides/nics/img/szedata2_nfb200g_architecture.svg
@@ -0,0 +1,214 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ id="svg2"
+ stroke-miterlimit="10"
+ stroke-linecap="square"
+ stroke="none"
+ fill="none"
+ viewBox="0.0 0.0 568.7322834645669 352.3937007874016"
+ version="1.1">
+ <metadata
+ id="metadata65">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <defs
+ id="defs63" />
+ <clipPath
+ id="p.0">
+ <path
+ id="path5"
+ clip-rule="nonzero"
+ d="m0 0l568.7323 0l0 352.3937l-568.7323 0l0 -352.3937z" />
+ </clipPath>
+ <g
+ id="g7"
+ clip-path="url(#p.0)">
+ <path
+ id="path9"
+ fill-rule="evenodd"
+ d="m0 0l568.7323 0l0 352.3937l-568.7323 0z"
+ fill-opacity="0.0"
+ fill="#000000" />
+ <path
+ id="path11"
+ d="m 40.564137,14.365075 254.362203,0 0,131.842535 -254.362203,0 z"
+ style="fill:#47c3d3;fill-rule:evenodd" />
+ <path
+ id="path15"
+ d="m 54.075948,146.2076 227.338592,0 0,32.94488 -227.338592,0 z"
+ style="fill:#c2c2c2;fill-rule:evenodd" />
+ <path
+ id="path19"
+ d="m 321.90535,146.2076 227.33856,0 0,32.94488 -227.33856,0 z"
+ style="fill:#c2c2c2;fill-rule:evenodd" />
+ <path
+ id="path23"
+ d="m 440.30217,146.24338 -11.82364,-20.50632 6.86313,0 0,-44.550399 -120.12924,0 0,6.938519 -20.28345,-11.953539 20.28345,-11.953547 0,6.93852 130.0503,0 0,54.580446 6.8631,0 z"
+ style="fill:#9a9a9a;fill-rule:evenodd" />
+ <path
+ id="path25"
+ d="m 112.39353,263.09765 0,0 c 0,-8.08875 6.55722,-14.64597 14.64597,-14.64597 l 58.58208,0 0,0 c 3.88435,0 7.60962,1.54305 10.35626,4.28971 2.74666,2.74664 4.28971,6.47189 4.28971,10.35626 l 0,58.58209 c 0,8.08875 -6.55722,14.64597 -14.64597,14.64597 l -58.58208,0 c -8.08875,0 -14.64597,-6.55722 -14.64597,-14.64597 z"
+ style="fill:#c2c2c2;fill-rule:evenodd" />
+ <path
+ id="path29"
+ d="m 391.63763,263.09765 0,0 c 0,-8.08875 6.55722,-14.64597 14.64597,-14.64597 l 58.58209,0 0,0 c 3.88437,0 7.60962,1.54305 10.35626,4.28971 2.74664,2.74664 4.2897,6.47189 4.2897,10.35626 l 0,58.58209 c 0,8.08875 -6.55722,14.64597 -14.64596,14.64597 l -58.58209,0 c -8.08875,0 -14.64597,-6.55722 -14.64597,-14.64597 z"
+ style="fill:#c2c2c2;fill-rule:evenodd" />
+ <path
+ id="path33"
+ d="m 135.20981,199.01075 19.85826,-19.85826 19.85828,19.85826 -9.92914,0 0,29.5748 9.92914,0 -19.85828,19.85827 -19.85826,-19.85827 9.92914,0 0,-29.5748 z"
+ style="fill:#9a9a9a;fill-rule:evenodd" />
+ <path
+ id="path35"
+ d="m 415.71635,199.01064 19.85828,-19.85826 19.85827,19.85826 -9.92914,0 0,29.57481 9.92914,0 -19.85827,19.85826 -19.85828,-19.85826 9.92914,0 0,-29.57481 z"
+ style="fill:#9a9a9a;fill-rule:evenodd" />
+ <path
+ id="path37"
+ d="m 15.205,31.273212 74.362206,0 0,32.944885 -74.362206,0 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <path
+ id="path41"
+ d="m 16.05531,80.231216 74.3622,0 0,32.944884 -74.3622,0 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <path
+ id="path45"
+ d="m 275.44377,174.07111 0,111.55905 -37.16536,0 0,-111.55905 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <path
+ id="path49"
+ d="m 97.923493,174.07111 0,111.55905 -37.16535,0 0,-111.55905 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <path
+ id="path53"
+ d="m 366.27543,174.07111 0,111.55905 -37.16537,0 0,-111.55905 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <path
+ id="path57"
+ d="m 542.0392,174.07111 0,111.55905 -37.16534,0 0,-111.55905 z"
+ style="fill:#ff8434;fill-rule:evenodd" />
+ <text
+ id="text4480"
+ y="54.570911"
+ x="24.425898"
+ style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ y="54.570911"
+ x="24.425898"
+ id="tspan4482">ETH 0</tspan></text>
+ <text
+ id="text4480-3"
+ y="103.53807"
+ x="25.51882"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502"
+ y="103.53807"
+ x="25.51882">ETH 1</tspan></text>
+ <text
+ id="text4480-7"
+ y="86.200645"
+ x="103.15979"
+ style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ id="tspan4524"
+ y="86.200645"
+ x="103.15979">NFB-200G2QL card</tspan></text>
+ <text
+ id="text4480-7-3"
+ y="169.2041"
+ x="92.195312"
+ style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4546"
+ y="169.2041"
+ x="92.195312">PCI-E master slot</tspan></text>
+ <text
+ id="text4480-7-3-6"
+ y="169.20409"
+ x="367.98856"
+ style="font-style:normal;font-weight:normal;font-size:18.75px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4546-2"
+ y="169.20409"
+ x="367.98856">PCI-E slave slot</tspan></text>
+ <text
+ transform="matrix(0,1,-1,0,0,0)"
+ id="text4480-3-9"
+ y="-73.591309"
+ x="182.29367"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-1"
+ y="-73.591309"
+ x="182.29367">QUEUE 0</tspan></text>
+ <text
+ transform="matrix(0,1.0000002,-0.99999976,0,0,0)"
+ id="text4480-3-9-2"
+ y="-251.11163"
+ x="182.29283"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-1-7"
+ y="-251.11163"
+ x="182.29283">QUEUE 15</tspan></text>
+ <text
+ transform="matrix(0,1,-1,0,0,0)"
+ id="text4480-3-9-2-0"
+ y="-341.94324"
+ x="182.29311"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-1-7-9"
+ y="-341.94324"
+ x="182.29311">QUEUE 16</tspan></text>
+ <text
+ transform="matrix(0,1,-1,0,0,0)"
+ id="text4480-3-9-2-3"
+ y="-517.70703"
+ x="182.29356"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-1-7-6"
+ y="-517.70703"
+ x="182.29356">QUEUE 31</tspan></text>
+ <text
+ id="text4480-3-0"
+ y="299.21396"
+ x="128.3978"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-6"
+ y="299.21396"
+ x="128.3978">CPU 0</tspan></text>
+ <text
+ id="text4480-3-0-2"
+ y="299.21396"
+ x="407.88452"
+ style="font-style:normal;font-weight:normal;font-size:20px;line-height:125%;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ xml:space="preserve"><tspan
+ style="font-size:18.75px"
+ id="tspan4502-6-6"
+ y="299.21396"
+ x="407.88452">CPU 1</tspan></text>
+ </g>
+</svg>
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 59419f43..59f6063d 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -13,6 +13,7 @@ Network Interface Controller Drivers
build_and_test
ark
avp
+ axgbe
bnx2x
bnxt
cxgbe
@@ -23,6 +24,7 @@ Network Interface Controller Drivers
enic
fm10k
i40e
+ ifc
igb
ixgbe
intel_vf
@@ -30,11 +32,13 @@ Network Interface Controller Drivers
liquidio
mlx4
mlx5
- mrvl
+ mvpp2
+ netvsc
nfp
octeontx
qede
sfc_efx
+ softnic
szedata2
tap
thunderx
diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 0c660f29..16d63902 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -68,15 +68,15 @@ Other features are supported using optional MACRO configuration. They include:
* HW extend dual VLAN
-To guarantee the constraint, configuration flags in dev_conf.rxmode will be checked:
+To guarantee the constraint, capabilities in dev_conf.rxmode.offloads will be checked:
-* hw_vlan_strip
+* DEV_RX_OFFLOAD_VLAN_STRIP
-* hw_vlan_extend
+* DEV_RX_OFFLOAD_VLAN_EXTEND
-* hw_ip_checksum
+* DEV_RX_OFFLOAD_CHECKSUM
-* header_split
+* DEV_RX_OFFLOAD_HEADER_SPLIT
* dev_conf
@@ -102,20 +102,9 @@ Consequently, by default the tx_rs_thresh value is in the range 32 to 64.
Feature not Supported by TX Vector PMD
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-TX vPMD only works when txq_flags is set to IXGBE_SIMPLE_FLAGS.
+TX vPMD only works when offloads is set to 0
-This means that it does not support TX multi-segment, VLAN offload and TX csum offload.
-The following MACROs are used for these three features:
-
-* ETH_TXQ_FLAGS_NOMULTSEGS
-
-* ETH_TXQ_FLAGS_NOVLANOFFL
-
-* ETH_TXQ_FLAGS_NOXSUMSCTP
-
-* ETH_TXQ_FLAGS_NOXSUMUDP
-
-* ETH_TXQ_FLAGS_NOXSUMTCP
+This means that it does not support any TX offload.
Application Programming Interface
---------------------------------
@@ -130,13 +119,13 @@ l3fwd
~~~~~
When running l3fwd with vPMD, there is one thing to note.
-In the configuration, ensure that port_conf.rxmode.hw_ip_checksum=0.
+In the configuration, ensure that DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads is NOT set.
Otherwise, by default, RX vPMD is disabled.
load_balancer
~~~~~~~~~~~~~
-As in the case of l3fwd, set configure port_conf.rxmode.hw_ip_checksum=0 to enable vPMD.
+As in the case of l3fwd, to enable vPMD, do NOT set DEV_RX_OFFLOAD_CHECKSUM in port_conf.rxmode.offloads.
In addition, for improved performance, use -bsz "(32,32),(64,64),(32,32)" in load_balancer to avoid using the default burst size of 144.
@@ -228,6 +217,20 @@ For more details see the IPsec Security Gateway Sample Application and Security
library documentation.
+Virtual Function Port Representors
+----------------------------------
+The IXGBE PF PMD supports the creation of VF port representors for the control
+and monitoring of IXGBE virtual function devices. Each port representor
+corresponds to a single virtual function of that device. Using the ``devargs``
+option ``representor`` the user can specify which virtual functions to create
+port representors for on initialization of the PF PMD by passing the VF IDs of
+the VFs which are required.::
+
+ -w DBDF,representor=[0,1,4]
+
+Currently hot-plugging of representor ports is not supported so all required
+representors must be specified on the creation of the PF.
+
Supported Chipsets and NICs
---------------------------
diff --git a/doc/guides/nics/liquidio.rst b/doc/guides/nics/liquidio.rst
index 61485ade..87b42cdc 100644
--- a/doc/guides/nics/liquidio.rst
+++ b/doc/guides/nics/liquidio.rst
@@ -201,6 +201,4 @@ Number of descriptors for Rx/Tx ring should be in the range 128 to 512.
CRC striping
~~~~~~~~~~~~
-LiquidIO adapters strip ethernet FCS of every packet coming to the host
-interface. So, CRC will be stripped even when the ``rxmode.hw_strip_crc``
-member is set to 0 in ``struct rte_eth_conf``.
+LiquidIO adapters strip ethernet FCS of every packet coming to the host interface.
diff --git a/doc/guides/nics/mlx4.rst b/doc/guides/nics/mlx4.rst
index 98b97166..4a57c7a6 100644
--- a/doc/guides/nics/mlx4.rst
+++ b/doc/guides/nics/mlx4.rst
@@ -1,32 +1,6 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2012 6WIND S.A.
- Copyright 2015 Mellanox
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ Copyright 2015 Mellanox Technologies, Ltd
MLX4 poll mode driver library
=============================
@@ -98,9 +72,10 @@ These options can be modified in the ``.config`` file.
missing with ``ldd(1)``.
It works by moving these dependencies to a purpose-built rdma-core "glue"
- plug-in, which must either be installed in ``CONFIG_RTE_EAL_PMD_PATH`` if
- set, or in a standard location for the dynamic linker (e.g. ``/lib``) if
- left to the default empty string (``""``).
+ plug-in which must either be installed in a directory whose name is based
+ on ``CONFIG_RTE_EAL_PMD_PATH`` suffixed with ``-glue`` if set, or in a
+ standard location for the dynamic linker (e.g. ``/lib``) if left to the
+ default empty string (``""``).
This option has no performance impact.
@@ -110,14 +85,6 @@ These options can be modified in the ``.config`` file.
adds additional run-time checks and debugging messages at the cost of
lower performance.
-- ``CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE`` (default **8**)
-
- Maximum number of cached memory pools (MPs) per TX queue. Each MP from
- which buffers are to be transmitted must be associated to memory regions
- (MRs). This is a slow operation that must be cached.
-
- This value is always 1 for RX queues since they use a single MP.
-
Environment variables
~~~~~~~~~~~~~~~~~~~~~
@@ -168,6 +135,16 @@ below.
following limitation: VLAN filtering is not supported with this mode.
This is the recommended mode in case VLAN filter is not needed.
+Limitations
+-----------
+
+- CRC stripping is supported by default and always reported as "true".
+ The ability to enable/disable CRC stripping requires OFED version
+ 4.3-1.5.0.0 and above or rdma-core version v18 and above.
+
+- TSO (Transmit Segmentation Offload) is supported in OFED version
+ 4.4 and above.
+
Prerequisites
-------------
@@ -236,7 +213,7 @@ Current RDMA core package and Linux kernel (recommended)
Mellanox OFED as a fallback
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- `Mellanox OFED`_ version: **4.2, 4.3**.
+- `Mellanox OFED`_ version: **4.3, 4.4**.
- firmware version: **2.42.5000** and above.
.. _`Mellanox OFED`: http://www.mellanox.com/page/products_dyn?product_family=26&mtag=linux_sw_drivers
@@ -396,6 +373,12 @@ Performance tuning
The XXX can be different on different systems. Make sure to configure
according to the setpci output.
+6. To minimize overhead of searching Memory Regions:
+
+ - '--socket-mem' is recommended to pin memory by predictable amount.
+ - Configure per-lcore cache when creating Mempools for packet buffer.
+ - Refrain from dynamically allocating/freeing memory in run-time.
+
Usage example
-------------
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 0e6e525c..52e1213c 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -1,40 +1,14 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2015 6WIND S.A.
- Copyright 2015 Mellanox
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ Copyright 2015 Mellanox Technologies, Ltd
MLX5 poll mode driver
=====================
The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support
-for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** and **Mellanox
-ConnectX-5** families of 10/25/40/50/100 Gb/s adapters as well as their
-virtual functions (VF) in SR-IOV context.
+for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** , **Mellanox
+ConnectX-5** and **Mellanox Bluefield** families of 10/25/40/50/100 Gb/s
+adapters as well as their virtual functions (VF) in SR-IOV context.
Information and documentation about these adapters can be found on the
`Mellanox website <http://www.mellanox.com>`__. Help is also provided by the
@@ -75,7 +49,7 @@ libibverbs.
Features
--------
-- Multi arch support: x86_64, POWER8, ARMv8.
+- Multi arch support: x86_64, POWER8, ARMv8, i686.
- Multiple TX and RX queues.
- Support for scattered TX and RX frames.
- IPv4, IPv6, TCPv4, TCPv6, UDPv4 and UDPv6 RSS on any number of queues.
@@ -95,17 +69,17 @@ Features
- Multiple process.
- KVM and VMware ESX SR-IOV modes are supported.
- RSS hash result is supported.
-- Hardware TSO.
-- Hardware checksum TX offload for VXLAN and GRE.
+- Hardware TSO for generic IP or UDP tunnel, including VXLAN and GRE.
+- Hardware checksum Tx offload for generic IP or UDP tunnel, including VXLAN and GRE.
- RX interrupts.
- Statistics query including Basic, Extended and per queue.
- Rx HW timestamp.
+- Tunnel types: VXLAN, L3 VXLAN, VXLAN-GPE, GRE, MPLSoGRE, MPLSoUDP.
+- Tunnel HW offloads: packet type, inner/outer RSS, IP and UDP checksum verification.
Limitations
-----------
-- Inner RSS for VXLAN frames is not supported yet.
-- Hardware checksum RX offloads for VXLAN inner header are not supported yet.
- For secondary process:
- Forked secondary process not supported.
@@ -131,11 +105,37 @@ Limitations
- A multi segment packet must have less than 6 segments in case the Tx burst function
is set to multi-packet send or Enhanced multi-packet send. Otherwise it must have
less than 50 segments.
+
- Count action for RTE flow is **only supported in Mellanox OFED**.
+
- Flows with a VXLAN Network Identifier equal (or ends to be equal)
to 0 are not supported.
+
- VXLAN TSO and checksum offloads are not supported on VM.
+- L3 VXLAN and VXLAN-GPE tunnels cannot be supported together with MPLSoGRE and MPLSoUDP.
+
+- VF: flow rules created on VF devices can only match traffic targeted at the
+ configured MAC addresses (see ``rte_eth_dev_mac_addr_add()``).
+
+.. note::
+
+ MAC addresses not already present in the bridge table of the associated
+ kernel network device will be added and cleaned up by the PMD when closing
+ the device. In case of ungraceful program termination, some entries may
+ remain present and should be removed manually by other means.
+
+- When Multi-Packet Rx queue is configured (``mprq_en``), a Rx packet can be
+ externally attached to a user-provided mbuf with having EXT_ATTACHED_MBUF in
+ ol_flags. As the mempool for the external buffer is managed by PMD, all the
+ Rx mbufs must be freed before the device is closed. Otherwise, the mempool of
+ the external buffers will be freed by PMD and the application which still
+ holds the external buffers may be corrupted.
+
+- If Multi-Packet Rx queue is configured (``mprq_en``) and Rx CQE compression is
+ enabled (``rxq_cqe_comp_en``) at the same time, RSS hash result is not fully
+ supported. Some Rx packets may not have PKT_RX_RSS_HASH.
+
Statistics
----------
@@ -171,9 +171,10 @@ These options can be modified in the ``.config`` file.
missing with ``ldd(1)``.
It works by moving these dependencies to a purpose-built rdma-core "glue"
- plug-in, which must either be installed in ``CONFIG_RTE_EAL_PMD_PATH`` if
- set, or in a standard location for the dynamic linker (e.g. ``/lib``) if
- left to the default empty string (``""``).
+ plug-in which must either be installed in a directory whose name is based
+ on ``CONFIG_RTE_EAL_PMD_PATH`` suffixed with ``-glue`` if set, or in a
+ standard location for the dynamic linker (e.g. ``/lib``) if left to the
+ default empty string (``""``).
This option has no performance impact.
@@ -183,14 +184,6 @@ These options can be modified in the ``.config`` file.
adds additional run-time checks and debugging messages at the cost of
lower performance.
-- ``CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE`` (default **8**)
-
- Maximum number of cached memory pools (MPs) per TX queue. Each MP from
- which buffers are to be transmitted must be associated to memory regions
- (MRs). This is a slow operation that must be cached.
-
- This value is always 1 for RX queues since they use a single MP.
-
Environment variables
~~~~~~~~~~~~~~~~~~~~~
@@ -250,8 +243,55 @@ Run-time configuration
Supported on:
- - x86_64 with ConnectX-4, ConnectX-4 LX and ConnectX-5.
- - POWER8 and ARMv8 with ConnectX-4 LX and ConnectX-5.
+ - x86_64 with ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield.
+ - POWER8 and ARMv8 with ConnectX-4 LX, ConnectX-5 and Bluefield.
+
+- ``mprq_en`` parameter [int]
+
+ A nonzero value enables configuring Multi-Packet Rx queues. Rx queue is
+ configured as Multi-Packet RQ if the total number of Rx queues is
+ ``rxqs_min_mprq`` or more and Rx scatter isn't configured. Disabled by
+ default.
+
+ Multi-Packet Rx Queue (MPRQ a.k.a Striding RQ) can further save PCIe bandwidth
+ by posting a single large buffer for multiple packets. Instead of posting a
+ buffers per a packet, one large buffer is posted in order to receive multiple
+ packets on the buffer. A MPRQ buffer consists of multiple fixed-size strides
+ and each stride receives one packet. MPRQ can improve throughput for
+ small-packet tarffic.
+
+ When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
+ user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
+ configure large stride size enough to accommodate max_rx_pkt_len as long as
+ device allows. Note that this can waste system memory compared to enabling Rx
+ scatter and multi-segment packet.
+
+- ``mprq_log_stride_num`` parameter [int]
+
+ Log 2 of the number of strides for Multi-Packet Rx queue. Configuring more
+ strides can reduce PCIe tarffic further. If configured value is not in the
+ range of device capability, the default value will be set with a warning
+ message. The default value is 4 which is 16 strides per a buffer, valid only
+ if ``mprq_en`` is set.
+
+ The size of Rx queue should be bigger than the number of strides.
+
+- ``mprq_max_memcpy_len`` parameter [int]
+
+ The maximum length of packet to memcpy in case of Multi-Packet Rx queue. Rx
+ packet is mem-copied to a user-provided mbuf if the size of Rx packet is less
+ than or equal to this parameter. Otherwise, PMD will attach the Rx packet to
+ the mbuf by external buffer attachment - ``rte_pktmbuf_attach_extbuf()``.
+ A mempool for external buffers will be allocated and managed by PMD. If Rx
+ packet is externally attached, ol_flags field of the mbuf will have
+ EXT_ATTACHED_MBUF and this flag must be preserved. ``RTE_MBUF_HAS_EXTBUF()``
+ checks the flag. The default value is 128, valid only if ``mprq_en`` is set.
+
+- ``rxqs_min_mprq`` parameter [int]
+
+ Configure Rx queues as Multi-Packet RQ if the total number of Rx queues is
+ greater or equal to this value. The default value is 12, valid only if
+ ``mprq_en`` is set.
- ``txq_inline`` parameter [int]
@@ -270,34 +310,35 @@ Run-time configuration
This option should be used in combination with ``txq_inline`` above.
- On ConnectX-4, ConnectX-4 LX and ConnectX-5 without Enhanced MPW:
+ On ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield without
+ Enhanced MPW:
- Disabled by default.
- In case ``txq_inline`` is set recommendation is 4.
- On ConnectX-5 with Enhanced MPW:
+ On ConnectX-5 and Bluefield with Enhanced MPW:
- Set to 8 by default.
- ``txq_mpw_en`` parameter [int]
A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and
- enhanced multi-packet send (Enhanced MPS) for ConnectX-5. MPS allows the
- TX burst function to pack up multiple packets in a single descriptor
- session in order to save PCI bandwidth and improve performance at the
- cost of a slightly higher CPU usage. When ``txq_inline`` is set along
- with ``txq_mpw_en``, TX burst function tries to copy entire packet data
- on to TX descriptor instead of including pointer of packet only if there
- is enough room remained in the descriptor. ``txq_inline`` sets
- per-descriptor space for either pointers or inlined packets. In addition,
- Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
- in the same descriptor.
+ enhanced multi-packet send (Enhanced MPS) for ConnectX-5 and Bluefield.
+ MPS allows the TX burst function to pack up multiple packets in a
+ single descriptor session in order to save PCI bandwidth and improve
+ performance at the cost of a slightly higher CPU usage. When
+ ``txq_inline`` is set along with ``txq_mpw_en``, TX burst function tries
+ to copy entire packet data on to TX descriptor instead of including
+ pointer of packet only if there is enough room remained in the
+ descriptor. ``txq_inline`` sets per-descriptor space for either pointers
+ or inlined packets. In addition, Enhanced MPS supports hybrid mode -
+ mixing inlined packets and pointers in the same descriptor.
This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
When those offloads are requested the MPS send function will not be used.
- It is currently only supported on the ConnectX-4 Lx and ConnectX-5
+ It is currently only supported on the ConnectX-4 Lx, ConnectX-5 and Bluefield
families of adapters. Enabled by default.
- ``txq_mpw_hdr_dseg_en`` parameter [int]
@@ -318,14 +359,14 @@ Run-time configuration
- ``tx_vec_en`` parameter [int]
- A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
+ A nonzero value enables Tx vector on ConnectX-5 and Bluefield NICs if the number of
global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO,
DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, DEV_TX_OFFLOAD_VLAN_INSERT``.
When those offloads are requested the MPS send function will not be used.
- Enabled by default on ConnectX-5.
+ Enabled by default on ConnectX-5 and Bluefield.
- ``rx_vec_en`` parameter [int]
@@ -334,6 +375,53 @@ Run-time configuration
Enabled by default.
+- ``vf_nl_en`` parameter [int]
+
+ A nonzero value enables Netlink requests from the VF to add/remove MAC
+ addresses or/and enable/disable promiscuous/all multicast on the Netdevice.
+ Otherwise the relevant configuration must be run with Linux iproute2 tools.
+ This is a prerequisite to receive this kind of traffic.
+
+ Enabled by default, valid only on VF devices ignored otherwise.
+
+- ``l3_vxlan_en`` parameter [int]
+
+ A nonzero value allows L3 VXLAN and VXLAN-GPE flow creation. To enable
+ L3 VXLAN or VXLAN-GPE, users has to configure firmware and enable this
+ parameter. This is a prerequisite to receive this kind of traffic.
+
+ Disabled by default.
+
+- ``representor`` parameter [list]
+
+ This parameter can be used to instantiate DPDK Ethernet devices from
+ existing port (or VF) representors configured on the device.
+
+ It is a standard parameter whose format is described in
+ :ref:`ethernet_device_standard_device_arguments`.
+
+ For instance, to probe port representors 0 through 2::
+
+ representor=[0-2]
+
+Firmware configuration
+~~~~~~~~~~~~~~~~~~~~~~
+
+- L3 VXLAN and VXLAN-GPE destination UDP port
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> set IP_OVER_VXLAN_EN=1
+ mlxconfig -d <mst device> set IP_OVER_VXLAN_PORT=<udp dport>
+
+ Verify configurations are set:
+
+ .. code-block:: console
+
+ mlxconfig -d <mst device> query | grep IP_OVER_VXLAN
+ IP_OVER_VXLAN_EN True(1)
+ IP_OVER_VXLAN_PORT <udp dport>
+
Prerequisites
-------------
@@ -353,12 +441,19 @@ DPDK and must be installed separately:
- **libmlx5**
- Low-level user space driver library for Mellanox ConnectX-4/ConnectX-5
- devices, it is automatically loaded by libibverbs.
+ Low-level user space driver library for Mellanox
+ ConnectX-4/ConnectX-5/Bluefield devices, it is automatically loaded
+ by libibverbs.
This library basically implements send/receive calls to the hardware
queues.
+- **libmnl**
+
+ Minimalistic Netlink library mainly relied on to manage E-Switch flow
+ rules (i.e. those with the "transfer" attribute and typically involving
+ port representors).
+
- **Kernel modules**
They provide the kernel-side Verbs API and low level device drivers that
@@ -368,15 +463,16 @@ DPDK and must be installed separately:
Unlike most other PMDs, these modules must remain loaded and bound to
their devices:
- - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5
- devices and related Ethernet kernel network devices.
+ - mlx5_core: hardware driver managing Mellanox
+ ConnectX-4/ConnectX-5/Bluefield devices and related Ethernet kernel
+ network devices.
- mlx5_ib: InifiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
- Mellanox OFED releases include firmware updates for ConnectX-4/ConnectX-5
- adapters.
+ Mellanox OFED releases include firmware updates for
+ ConnectX-4/ConnectX-5/Bluefield adapters.
Because each release provides new features, these updates must be applied to
match the kernel modules and libraries they come with.
@@ -399,6 +495,10 @@ RMDA Core with Linux Kernel
- Minimal kernel version : v4.14 or the most recent 4.14-rc (see `Linux installation documentation`_)
- Minimal rdma-core version: v15+ commit 0c5f5765213a ("Merge pull request #227 from yishaih/tm")
(see `RDMA Core installation documentation`_)
+- When building for i686 use:
+
+ - rdma-core version 18.0 or above built with 32bit support.
+ - Kernel version 4.14.41 or above.
.. _`Linux installation documentation`: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git/plain/Documentation/admin-guide/README.rst
.. _`RDMA Core installation documentation`: https://raw.githubusercontent.com/linux-rdma/rdma-core/master/README.md
@@ -406,13 +506,14 @@ RMDA Core with Linux Kernel
Mellanox OFED
^^^^^^^^^^^^^
-- Mellanox OFED version: **4.2, 4.3**.
+- Mellanox OFED version: **4.3, 4.4**.
- firmware version:
- ConnectX-4: **12.21.1000** and above.
- ConnectX-4 Lx: **14.21.1000** and above.
- ConnectX-5: **16.21.1000** and above.
- ConnectX-5 Ex: **16.21.1000** and above.
+ - Bluefield: **18.99.3950** and above.
While these libraries and kernel modules are available on OpenFabrics
Alliance's `website <https://www.openfabrics.org/>`__ and provided by package
@@ -431,6 +532,19 @@ required from that distribution.
this DPDK release was developed and tested against is strongly
recommended. Please check the `prerequisites`_.
+Libmnl
+^^^^^^
+
+Minimal version for libmnl is **1.0.3**.
+
+As a dependency of the **iproute2** suite, this library is often installed
+by default. It is otherwise readily available through standard system
+packages.
+
+Its development headers must be installed in order to compile this PMD.
+These packages are usually named **libmnl-dev** or **libmnl-devel**
+depending on the Linux distribution.
+
Supported NICs
--------------
@@ -603,6 +717,12 @@ Performance tuning
The XXX can be different on different systems. Make sure to configure
according to the setpci output.
+7. To minimize overhead of searching Memory Regions:
+
+ - '--socket-mem' is recommended to pin memory by predictable amount.
+ - Configure per-lcore cache when creating Mempools for packet buffer.
+ - Refrain from dynamically allocating/freeing memory in run-time.
+
Notes for testpmd
-----------------
@@ -624,7 +744,7 @@ Usage example
-------------
This section demonstrates how to launch **testpmd** with Mellanox
-ConnectX-4/ConnectX-5 devices managed by librte_pmd_mlx5.
+ConnectX-4/ConnectX-5/Bluefield devices managed by librte_pmd_mlx5.
#. Load the kernel modules:
diff --git a/doc/guides/nics/mrvl.rst b/doc/guides/nics/mrvl.rst
deleted file mode 100644
index b7f32921..00000000
--- a/doc/guides/nics/mrvl.rst
+++ /dev/null
@@ -1,275 +0,0 @@
-.. BSD LICENSE
- Copyright(c) 2017 Marvell International Ltd.
- Copyright(c) 2017 Semihalf.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-.. _mrvl_poll_mode_driver:
-
-MRVL Poll Mode Driver
-======================
-
-The MRVL PMD (librte_pmd_mrvl) provides poll mode driver support
-for the Marvell PPv2 (Packet Processor v2) 1/10 Gbps adapter.
-
-Detailed information about SoCs that use PPv2 can be obtained here:
-
-* https://www.marvell.com/embedded-processors/armada-70xx/
-* https://www.marvell.com/embedded-processors/armada-80xx/
-
-.. Note::
-
- Due to external dependencies, this driver is disabled by default. It must
- be enabled manually by setting relevant configuration option manually.
- Please refer to `Config File Options`_ section for further details.
-
-
-Features
---------
-
-Features of the MRVL PMD are:
-
-- Speed capabilities
-- Link status
-- Queue start/stop
-- MTU update
-- Jumbo frame
-- Promiscuous mode
-- Allmulticast mode
-- Unicast MAC filter
-- Multicast MAC filter
-- RSS hash
-- VLAN filter
-- CRC offload
-- L3 checksum offload
-- L4 checksum offload
-- Packet type parsing
-- Basic stats
-- QoS
-
-
-Limitations
------------
-
-- Number of lcores is limited to 9 by MUSDK internal design. If more lcores
- need to be allocated, locking will have to be considered. Number of available
- lcores can be changed via ``MRVL_MUSDK_HIFS_RESERVED`` define in
- ``mrvl_ethdev.c`` source file.
-
-- Flushing vlans added for filtering is not possible due to MUSDK missing
- functionality. Current workaround is to reset board so that PPv2 has a
- chance to start in a sane state.
-
-
-Prerequisites
--------------
-
-- Custom Linux Kernel sources
-
- .. code-block:: console
-
- git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell.git -b linux-4.4.52-armada-17.10
-
-- Out of tree `mvpp2x_sysfs` kernel module sources
-
- .. code-block:: console
-
- git clone https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell.git -b mvpp2x-armada-17.10
-
-- MUSDK (Marvell User-Space SDK) sources
-
- .. code-block:: console
-
- git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell.git -b musdk-armada-17.10
-
- MUSDK is a light-weight library that provides direct access to Marvell's
- PPv2 (Packet Processor v2). Alternatively prebuilt MUSDK library can be
- requested from `Marvell Extranet <https://extranet.marvell.com>`_. Once
- approval has been granted, library can be found by typing ``musdk`` in
- the search box.
-
- MUSDK must be configured with the following features:
-
- .. code-block:: console
-
- --enable-bpool-dma=64
-
-- DPDK environment
-
- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup
- DPDK environment.
-
-
-Config File Options
--------------------
-
-The following options can be modified in the ``config`` file.
-
-- ``CONFIG_RTE_LIBRTE_MRVL_PMD`` (default ``n``)
-
- Toggle compilation of the librte_pmd_mrvl driver.
-
-
-QoS Configuration
------------------
-
-QoS configuration is done through external configuration file. Path to the
-file must be given as `cfg` in driver's vdev parameter list.
-
-Configuration syntax
-~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: console
-
- [port <portnum> default]
- default_tc = <default_tc>
- mapping_priority = <mapping_priority>
-
- [port <portnum> tc <traffic_class>]
- rxq = <rx_queue_list>
- pcp = <pcp_list>
- dscp = <dscp_list>
-
- [port <portnum> tc <traffic_class>]
- rxq = <rx_queue_list>
- pcp = <pcp_list>
- dscp = <dscp_list>
-
-Where:
-
-- ``<portnum>``: DPDK Port number (0..n).
-
-- ``<default_tc>``: Default traffic class (e.g. 0)
-
-- ``<mapping_priority>``: QoS priority for mapping (`ip`, `vlan`, `ip/vlan` or `vlan/ip`).
-
-- ``<traffic_class>``: Traffic Class to be configured.
-
-- ``<rx_queue_list>``: List of DPDK RX queues (e.g. 0 1 3-4)
-
-- ``<pcp_list>``: List of PCP values to handle in particular TC (e.g. 0 1 3-4 7).
-
-- ``<dscp_list>``: List of DSCP values to handle in particular TC (e.g. 0-12 32-48 63).
-
-Setting PCP/DSCP values for the default TC is not required. All PCP/DSCP
-values not assigned explicitly to particular TC will be handled by the
-default TC.
-
-Configuration file example
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. code-block:: console
-
- [port 0 default]
- default_tc = 0
- qos_mode = ip
-
- [port 0 tc 0]
- rxq = 0 1
-
- [port 0 tc 1]
- rxq = 2
- pcp = 5 6 7
- dscp = 26-38
-
- [port 1 default]
- default_tc = 0
- qos_mode = vlan/ip
-
- [port 1 tc 0]
- rxq = 0
-
- [port 1 tc 1]
- rxq = 1 2
- pcp = 5 6 7
- dscp = 26-38
-
-Usage example
-^^^^^^^^^^^^^
-
-.. code-block:: console
-
- ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2,cfg=/home/user/mrvl.conf \
- -c 7 -- -i -a --rxq=2
-
-
-Building DPDK
--------------
-
-Driver needs precompiled MUSDK library during compilation.
-
-.. code-block:: console
-
- export CROSS_COMPILE=<toolchain>/bin/aarch64-linux-gnu-
- ./bootstrap
- ./configure --host=aarch64-linux-gnu --enable-bpool-dma=64
- make install
-
-MUSDK will be installed to `usr/local` under current directory.
-For the detailed build instructions please consult ``doc/musdk_get_started.txt``.
-
-Before the DPDK build process the environmental variable ``LIBMUSDK_PATH`` with
-the path to the MUSDK installation directory needs to be exported.
-
-.. code-block:: console
-
- export LIBMUSDK_PATH=<musdk>/usr/local
- export CROSS=aarch64-linux-gnu-
- make config T=arm64-armv8a-linuxapp-gcc
- sed -ri 's,(MRVL_PMD=)n,\1y,' build/.config
- make
-
-Usage Example
--------------
-
-MRVL PMD requires extra out of tree kernel modules to function properly.
-`musdk_uio` and `mv_pp_uio` sources are part of the MUSDK. Please consult
-``doc/musdk_get_started.txt`` for the detailed build instructions.
-For `mvpp2x_sysfs` please consult ``Documentation/pp22_sysfs.txt`` for the
-detailed build instructions.
-
-.. code-block:: console
-
- insmod musdk_uio.ko
- insmod mv_pp_uio.ko
- insmod mvpp2x_sysfs.ko
-
-Additionally interfaces used by DPDK application need to be put up:
-
-.. code-block:: console
-
- ip link set eth0 up
- ip link set eth2 up
-
-In order to run testpmd example application following command can be used:
-
-.. code-block:: console
-
- ./testpmd --vdev=eth_mrvl,iface=eth0,iface=eth2 -c 7 -- \
- --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \
- -i -a --rss-udp
diff --git a/doc/guides/nics/mvpp2.rst b/doc/guides/nics/mvpp2.rst
new file mode 100644
index 00000000..0408752c
--- /dev/null
+++ b/doc/guides/nics/mvpp2.rst
@@ -0,0 +1,520 @@
+.. BSD LICENSE
+ Copyright(c) 2017 Marvell International Ltd.
+ Copyright(c) 2017 Semihalf.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.. _mvpp2_poll_mode_driver:
+
+MVPP2 Poll Mode Driver
+======================
+
+The MVPP2 PMD (librte_pmd_mvpp2) provides poll mode driver support
+for the Marvell PPv2 (Packet Processor v2) 1/10 Gbps adapter.
+
+Detailed information about SoCs that use PPv2 can be obtained here:
+
+* https://www.marvell.com/embedded-processors/armada-70xx/
+* https://www.marvell.com/embedded-processors/armada-80xx/
+
+.. Note::
+
+ Due to external dependencies, this driver is disabled by default. It must
+ be enabled manually by setting relevant configuration option manually.
+ Please refer to `Config File Options`_ section for further details.
+
+
+Features
+--------
+
+Features of the MVPP2 PMD are:
+
+- Speed capabilities
+- Link status
+- Queue start/stop
+- MTU update
+- Jumbo frame
+- Promiscuous mode
+- Allmulticast mode
+- Unicast MAC filter
+- Multicast MAC filter
+- RSS hash
+- VLAN filter
+- CRC offload
+- L3 checksum offload
+- L4 checksum offload
+- Packet type parsing
+- Basic stats
+- Extended stats
+- QoS
+- RX flow control
+- TX queue start/stop
+
+
+Limitations
+-----------
+
+- Number of lcores is limited to 9 by MUSDK internal design. If more lcores
+ need to be allocated, locking will have to be considered. Number of available
+ lcores can be changed via ``MRVL_MUSDK_HIFS_RESERVED`` define in
+ ``mrvl_ethdev.c`` source file.
+
+- Flushing vlans added for filtering is not possible due to MUSDK missing
+ functionality. Current workaround is to reset board so that PPv2 has a
+ chance to start in a sane state.
+
+
+Prerequisites
+-------------
+
+- Custom Linux Kernel sources
+
+ .. code-block:: console
+
+ git clone https://github.com/MarvellEmbeddedProcessors/linux-marvell.git -b linux-4.4.52-armada-17.10
+
+- Out of tree `mvpp2x_sysfs` kernel module sources
+
+ .. code-block:: console
+
+ git clone https://github.com/MarvellEmbeddedProcessors/mvpp2x-marvell.git -b mvpp2x-armada-17.10
+
+- MUSDK (Marvell User-Space SDK) sources
+
+ .. code-block:: console
+
+ git clone https://github.com/MarvellEmbeddedProcessors/musdk-marvell.git -b musdk-armada-17.10
+
+ MUSDK is a light-weight library that provides direct access to Marvell's
+ PPv2 (Packet Processor v2). Alternatively prebuilt MUSDK library can be
+ requested from `Marvell Extranet <https://extranet.marvell.com>`_. Once
+ approval has been granted, library can be found by typing ``musdk`` in
+ the search box.
+
+ To get better understanding of the library one can consult documentation
+ available in the ``doc`` top level directory of the MUSDK sources.
+
+ MUSDK must be configured with the following features:
+
+ .. code-block:: console
+
+ --enable-bpool-dma=64
+
+- DPDK environment
+
+ Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup
+ DPDK environment.
+
+
+Config File Options
+-------------------
+
+The following options can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_MVPP2_PMD`` (default ``n``)
+
+ Toggle compilation of the librte mvpp2 driver.
+
+
+QoS Configuration
+-----------------
+
+QoS configuration is done through external configuration file. Path to the
+file must be given as `cfg` in driver's vdev parameter list.
+
+Configuration syntax
+~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ [port <portnum> default]
+ default_tc = <default_tc>
+ mapping_priority = <mapping_priority>
+ policer_enable = <policer_enable>
+ token_unit = <token_unit>
+ color = <color_mode>
+ cir = <cir>
+ ebs = <ebs>
+ cbs = <cbs>
+
+ rate_limit_enable = <rate_limit_enable>
+ rate_limit = <rate_limit>
+ burst_size = <burst_size>
+
+ [port <portnum> tc <traffic_class>]
+ rxq = <rx_queue_list>
+ pcp = <pcp_list>
+ dscp = <dscp_list>
+ default_color = <default_color>
+
+ [port <portnum> tc <traffic_class>]
+ rxq = <rx_queue_list>
+ pcp = <pcp_list>
+ dscp = <dscp_list>
+
+ [port <portnum> txq <txqnum>]
+ sched_mode = <sched_mode>
+ wrr_weight = <wrr_weight>
+
+ rate_limit_enable = <rate_limit_enable>
+ rate_limit = <rate_limit>
+ burst_size = <burst_size>
+
+Where:
+
+- ``<portnum>``: DPDK Port number (0..n).
+
+- ``<default_tc>``: Default traffic class (e.g. 0)
+
+- ``<mapping_priority>``: QoS priority for mapping (`ip`, `vlan`, `ip/vlan` or `vlan/ip`).
+
+- ``<traffic_class>``: Traffic Class to be configured.
+
+- ``<rx_queue_list>``: List of DPDK RX queues (e.g. 0 1 3-4)
+
+- ``<pcp_list>``: List of PCP values to handle in particular TC (e.g. 0 1 3-4 7).
+
+- ``<dscp_list>``: List of DSCP values to handle in particular TC (e.g. 0-12 32-48 63).
+
+- ``<policer_enable>``: Enable ingress policer.
+
+- ``<token_unit>``: Policer token unit (`bytes` or `packets`).
+
+- ``<color_mode>``: Policer color mode (`aware` or `blind`).
+
+- ``<cir>``: Committed information rate in unit of kilo bits per second (data rate) or packets per second.
+
+- ``<cbs>``: Committed burst size in unit of kilo bytes or number of packets.
+
+- ``<ebs>``: Excess burst size in unit of kilo bytes or number of packets.
+
+- ``<default_color>``: Default color for specific tc.
+
+- ``<rate_limit_enable>``: Enables per port or per txq rate limiting.
+
+- ``<rate_limit>``: Committed information rate, in kilo bits per second.
+
+- ``<burst_size>``: Committed burst size, in kilo bytes.
+
+- ``<sched_mode>``: Egress scheduler mode (`wrr` or `sp`).
+
+- ``<wrr_weight>``: Txq weight.
+
+Setting PCP/DSCP values for the default TC is not required. All PCP/DSCP
+values not assigned explicitly to particular TC will be handled by the
+default TC.
+
+Configuration file example
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ [port 0 default]
+ default_tc = 0
+ mapping_priority = ip
+
+ rate_limit_enable = 1
+ rate_limit = 1000
+ burst_size = 2000
+
+ [port 0 tc 0]
+ rxq = 0 1
+
+ [port 0 txq 0]
+ sched_mode = wrr
+ wrr_weight = 10
+
+ [port 0 txq 1]
+ sched_mode = wrr
+ wrr_weight = 100
+
+ [port 0 txq 2]
+ sched_mode = sp
+
+ [port 0 tc 1]
+ rxq = 2
+ pcp = 5 6 7
+ dscp = 26-38
+
+ [port 1 default]
+ default_tc = 0
+ mapping_priority = vlan/ip
+
+ policer_enable = 1
+ token_unit = bytes
+ color = blind
+ cir = 100000
+ ebs = 64
+ cbs = 64
+
+ [port 1 tc 0]
+ rxq = 0
+ dscp = 10
+
+ [port 1 tc 1]
+ rxq = 1
+ dscp = 11-20
+
+ [port 1 tc 2]
+ rxq = 2
+ dscp = 30
+
+ [port 1 txq 0]
+ rate_limit_enable = 1
+ rate_limit = 10000
+ burst_size = 2000
+
+Usage example
+^^^^^^^^^^^^^
+
+.. code-block:: console
+
+ ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2,cfg=/home/user/mrvl.conf \
+ -c 7 -- -i -a --disable-hw-vlan-strip --rxq=3 --txq=3
+
+
+Building DPDK
+-------------
+
+Driver needs precompiled MUSDK library during compilation.
+
+.. code-block:: console
+
+ export CROSS_COMPILE=<toolchain>/bin/aarch64-linux-gnu-
+ ./bootstrap
+ ./configure --host=aarch64-linux-gnu --enable-bpool-dma=64
+ make install
+
+MUSDK will be installed to `usr/local` under current directory.
+For the detailed build instructions please consult ``doc/musdk_get_started.txt``.
+
+Before the DPDK build process the environmental variable ``LIBMUSDK_PATH`` with
+the path to the MUSDK installation directory needs to be exported.
+
+.. code-block:: console
+
+ export LIBMUSDK_PATH=<musdk>/usr/local
+ export CROSS=aarch64-linux-gnu-
+ make config T=arm64-armv8a-linuxapp-gcc
+ sed -ri 's,(MVPP2_PMD=)n,\1y,' build/.config
+ make
+
+Flow API
+--------
+
+PPv2 offers packet classification capabilities via classifier engine which
+can be configured via generic flow API offered by DPDK.
+
+Supported flow actions
+~~~~~~~~~~~~~~~~~~~~~~
+
+Following flow action items are supported by the driver:
+
+* DROP
+* QUEUE
+
+Supported flow items
+~~~~~~~~~~~~~~~~~~~~
+
+Following flow items and their respective fields are supported by the driver:
+
+* ETH
+
+ * source MAC
+ * destination MAC
+ * ethertype
+
+* VLAN
+
+ * PCP
+ * VID
+
+* IPV4
+
+ * DSCP
+ * protocol
+ * source address
+ * destination address
+
+* IPV6
+
+ * flow label
+ * next header
+ * source address
+ * destination address
+
+* UDP
+
+ * source port
+ * destination port
+
+* TCP
+
+ * source port
+ * destination port
+
+Classifier match engine
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Classifier has an internal match engine which can be configured to
+operate in either exact or maskable mode.
+
+Mode is selected upon creation of the first unique flow rule as follows:
+
+* maskable, if key size is up to 8 bytes.
+* exact, otherwise, i.e for keys bigger than 8 bytes.
+
+Where the key size equals the number of bytes of all fields specified
+in the flow items.
+
+.. table:: Examples of key size calculation
+
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | Flow pattern | Key size in bytes | Used engine |
+ +============================================================================+===================+=============+
+ | ETH (destination MAC) / VLAN (VID) | 6 + 2 = 8 | Maskable |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | VLAN (VID) / IPV4 (source address) | 2 + 4 = 6 | Maskable |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | TCP (source port, destination port) | 2 + 2 = 4 | Maskable |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | VLAN (priority) / IPV4 (source address) | 1 + 4 = 5 | Maskable |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | IPV4 (destination address) / UDP (source port, destination port) | 6 + 2 + 2 = 10 | Exact |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | VLAN (VID) / IPV6 (flow label, destination address) | 2 + 3 + 16 = 21 | Exact |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | IPV4 (DSCP, source address, destination address) | 1 + 4 + 4 = 9 | Exact |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+ | IPV6 (flow label, source address, destination address) | 3 + 16 + 16 = 35 | Exact |
+ +----------------------------------------------------------------------------+-------------------+-------------+
+
+From the user perspective maskable mode means that masks specified
+via flow rules are respected. In case of exact match mode, masks
+which do not provide exact matching (all bits masked) are ignored.
+
+If the flow matches more than one classifier rule the first
+(with the lowest index) matched takes precedence.
+
+Flow rules usage example
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before proceeding run testpmd user application:
+
+.. code-block:: console
+
+ ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2 -c 3 -- -i --p 3 -a --disable-hw-vlan-strip
+
+Example #1
+^^^^^^^^^^
+
+.. code-block:: console
+
+ testpmd> flow create 0 ingress pattern eth src is 10:11:12:13:14:15 / end actions drop / end
+
+In this case key size is 6 bytes thus maskable type is selected. Testpmd
+will set mask to ff:ff:ff:ff:ff:ff i.e traffic explicitly matching
+above rule will be dropped.
+
+Example #2
+^^^^^^^^^^
+
+.. code-block:: console
+
+ testpmd> flow create 0 ingress pattern ipv4 src spec 10.10.10.0 src mask 255.255.255.0 / tcp src spec 0x10 src mask 0x10 / end action drop / end
+
+In this case key size is 8 bytes thus maskable type is selected.
+Flows which have IPv4 source addresses ranging from 10.10.10.0 to 10.10.10.255
+and tcp source port set to 16 will be dropped.
+
+Example #3
+^^^^^^^^^^
+
+.. code-block:: console
+
+ testpmd> flow create 0 ingress pattern vlan vid spec 0x10 vid mask 0x10 / ipv4 src spec 10.10.1.1 src mask 255.255.0.0 dst spec 11.11.11.1 dst mask 255.255.255.0 / end actions drop / end
+
+In this case key size is 10 bytes thus exact type is selected.
+Even though each item has partial mask set, masks will be ignored.
+As a result only flows with VID set to 16 and IPv4 source and destination
+addresses set to 10.10.1.1 and 11.11.11.1 respectively will be dropped.
+
+Limitations
+~~~~~~~~~~~
+
+Following limitations need to be taken into account while creating flow rules:
+
+* For IPv4 exact match type the key size must be up to 12 bytes.
+* For IPv6 exact match type the key size must be up to 36 bytes.
+* Following fields cannot be partially masked (all masks are treated as
+ if they were exact):
+
+ * ETH: ethertype
+ * VLAN: PCP, VID
+ * IPv4: protocol
+ * IPv6: next header
+ * TCP/UDP: source port, destination port
+
+* Only one classifier table can be created thus all rules in the table
+ have to match table format. Table format is set during creation of
+ the first unique flow rule.
+* Up to 5 fields can be specified per flow rule.
+* Up to 20 flow rules can be added.
+
+For additional information about classifier please consult
+``doc/musdk_cls_user_guide.txt``.
+
+Usage Example
+-------------
+
+MVPP2 PMD requires extra out of tree kernel modules to function properly.
+`musdk_uio` and `mv_pp_uio` sources are part of the MUSDK. Please consult
+``doc/musdk_get_started.txt`` for the detailed build instructions.
+For `mvpp2x_sysfs` please consult ``Documentation/pp22_sysfs.txt`` for the
+detailed build instructions.
+
+.. code-block:: console
+
+ insmod musdk_uio.ko
+ insmod mv_pp_uio.ko
+ insmod mvpp2x_sysfs.ko
+
+Additionally interfaces used by DPDK application need to be put up:
+
+.. code-block:: console
+
+ ip link set eth0 up
+ ip link set eth2 up
+
+In order to run testpmd example application following command can be used:
+
+.. code-block:: console
+
+ ./testpmd --vdev=eth_mvpp2,iface=eth0,iface=eth2 -c 7 -- \
+ --burst=128 --txd=2048 --rxd=1024 --rxq=2 --txq=2 --nb-cores=2 \
+ -i -a --rss-udp
diff --git a/doc/guides/nics/netvsc.rst b/doc/guides/nics/netvsc.rst
new file mode 100644
index 00000000..345f393c
--- /dev/null
+++ b/doc/guides/nics/netvsc.rst
@@ -0,0 +1,105 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) Microsoft Corporation.
+
+Netvsc poll mode driver
+=======================
+
+The Netvsc Poll Mode driver (PMD) provides support for the paravirtualized
+network device for Microsoft Hyper-V. It can be used with
+Window Server 2008/2012/2016, Windows 10.
+The device offers multi-queue support (if kernel and host support it),
+checksum and segmentation offloads.
+
+
+Features and Limitations of Hyper-V PMD
+---------------------------------------
+
+In this release, the hyper PMD driver provides the basic functionality of packet reception and transmission.
+
+* It supports merge-able buffers per packet when receiving packets and scattered buffer per packet
+ when transmitting packets. The packet size supported is from 64 to 65536.
+
+* The PMD supports multicast packets and promiscuous mode subject to restrictions on the host.
+ In order to this to work, the guest network configuration on Hyper-V must be configured to allow MAC address
+ spoofing.
+
+* The device has only a single MAC address.
+ Hyper-V driver does not support MAC or VLAN filtering because the Hyper-V host does not support it.
+
+* VLAN tags are always stripped and presented in mbuf tci field.
+
+* The Hyper-V driver does not use or support Link State or Rx interrupt.
+
+* The maximum number of queues is limited by the host (currently 64).
+ When used with 4.16 kernel only a single queue is available.
+
+.. note::
+ This driver is intended for use with **Hyper-V only** and is
+ not recommended for use on Azure because accelerated Networking
+ (SR-IOV) is not supported.
+
+ On Azure, use the :doc:`vdev_netvsc` which
+ automatically configures the necessary TAP and failsave drivers.
+
+
+Installation
+------------
+
+The Netvsc PMD is a standalone driver, similar to virtio and vmxnet3.
+Using Netvsc PMD requires that the associated VMBUS device be bound to the userspace
+I/O device driver for Hyper-V (uio_hv_generic). By default, all netvsc devices
+will be bound to the Linux kernel driver; in order to use netvsc PMD the
+device must first be overridden.
+
+The first step is to identify the network device to override.
+VMBUS uses Universal Unique Identifiers
+(`UUID`_) to identify devices on the bus similar to how PCI uses Domain:Bus:Function.
+The UUID associated with a Linux kernel network device can be determined
+by looking at the sysfs information. To find the UUID for eth1 and
+store it in a shell variable:
+
+ .. code-block:: console
+
+ DEV_UUID=$(basename $(readlink /sys/class/net/eth1/device))
+
+
+.. _`UUID`: https://en.wikipedia.org/wiki/Universally_unique_identifier
+
+There are several possible ways to assign the uio device driver for a device.
+The easiest way (but only on 4.18 or later)
+is to use the `driverctl Device Driver control utility`_ to override
+the normal kernel device.
+
+ .. code-block:: console
+
+ driverctl -b vmbus set-override $DEV_UUID uio_hv_generic
+
+.. _`driverctl Device Driver control utility`: https://gitlab.com/driverctl/driverctl
+
+Any settings done with driverctl are by default persistent and will be reapplied
+on reboot.
+
+On older kernels, the same effect can be had by manual sysfs bind and unbind
+operations:
+
+ .. code-block:: console
+
+ NET_UUID="f8615163-df3e-46c5-913f-f2d2f965ed0e"
+ modprobe uio_hv_generic
+ echo $NET_UUID > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
+ echo $DEV_UUID > /sys/bus/vmbus/drivers/hv_netvsc/unbind
+ echo $DEV_UUID > /sys/bus/vmbus/drivers/uio_hv_generic/bind
+
+.. Note::
+
+ The dpkd-devbind.py script can not be used since it only handles PCI devices.
+
+
+Prerequisites
+-------------
+
+The following prerequisites apply:
+
+* Linux kernel support for UIO on vmbus is done with the uio_hv_generic driver.
+ Full support of multiple queues requires the 4.17 kernel. It is possible
+ to use the netvsc PMD with 4.16 kernel but it is limited to a single queue.
diff --git a/doc/guides/nics/nfp.rst b/doc/guides/nics/nfp.rst
index 99a3b76e..927c03c6 100644
--- a/doc/guides/nics/nfp.rst
+++ b/doc/guides/nics/nfp.rst
@@ -34,14 +34,14 @@ NFP poll mode driver library
Netronome's sixth generation of flow processors pack 216 programmable
cores and over 100 hardware accelerators that uniquely combine packet,
flow, security and content processing in a single device that scales
-up to 400 Gbps.
+up to 400-Gb/s.
This document explains how to use DPDK with the Netronome Poll Mode
Driver (PMD) supporting Netronome's Network Flow Processor 6xxx
(NFP-6xxx) and Netronome's Flow Processor 4xxx (NFP-4xxx).
NFP is a SRIOV capable device and the PMD driver supports the physical
-function (PF) and virtual functions (VFs).
+function (PF) and the virtual functions (VFs).
Dependencies
------------
@@ -49,17 +49,18 @@ Dependencies
Before using the Netronome's DPDK PMD some NFP configuration,
which is not related to DPDK, is required. The system requires
installation of **Netronome's BSP (Board Support Package)** along
-with some specific NFP firmware application. Netronome's NSP ABI
+with a specific NFP firmware application. Netronome's NSP ABI
version should be 0.20 or higher.
If you have a NFP device you should already have the code and
-documentation for doing all this configuration. Contact
+documentation for this configuration. Contact
**support@netronome.com** to obtain the latest available firmware.
-The NFP Linux netdev kernel driver for VFs is part of vanilla kernel
-since kernel version 4.5, and support for the PF since kernel version
-4.11. Support for older kernels can be obtained on Github at
-**https://github.com/Netronome/nfp-drv-kmods** along with build
+The NFP Linux netdev kernel driver for VFs has been a part of the
+vanilla kernel since kernel version 4.5, and support for the PF
+since kernel version 4.11. Support for older kernels can be obtained
+on Github at
+**https://github.com/Netronome/nfp-drv-kmods** along with the build
instructions.
NFP PMD needs to be used along with UIO ``igb_uio`` or VFIO (``vfio-pci``)
@@ -70,15 +71,15 @@ Building the software
Netronome's PMD code is provided in the **drivers/net/nfp** directory.
Although NFP PMD has Netronome´s BSP dependencies, it is possible to
-compile it along with other DPDK PMDs even if no BSP was installed before.
+compile it along with other DPDK PMDs even if no BSP was installed previously.
Of course, a DPDK app will require such a BSP installed for using the
NFP PMD, along with a specific NFP firmware application.
-Default PMD configuration is at **common_linuxapp configuration** file:
+Default PMD configuration is at the **common_linuxapp configuration** file:
- **CONFIG_RTE_LIBRTE_NFP_PMD=y**
-Once DPDK is built all the DPDK apps and examples include support for
+Once the DPDK is built all the DPDK apps and examples include support for
the NFP PMD.
@@ -91,37 +92,55 @@ for details.
Using the PF
------------
-NFP PMD has support for using the NFP PF as another DPDK port, but it does not
+NFP PMD supports using the NFP PF as another DPDK port, but it does not
have any functionality for controlling VFs. In fact, it is not possible to use
the PMD with the VFs if the PF is being used by DPDK, that is, with the NFP PF
-bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK version will
+bound to ``igb_uio`` or ``vfio-pci`` kernel drivers. Future DPDK versions will
have a PMD able to work with the PF and VFs at the same time and with the PF
implementing VF management along with other PF-only functionalities/offloads.
The PMD PF has extra work to do which will delay the DPDK app initialization
-like checking if a firmware is already available in the device, uploading the
-firmware if necessary, and configure the Link state properly when starting or
-stopping a PF port. Note that firmware upload is not always necessary which is
-the main delay for NFP PF PMD initialization.
+like uploading the firmware and configure the Link state properly when starting or
+stopping a PF port. Since DPDK 18.05 the firmware upload happens when
+a PF is initialized, which was not always true with older DPDK versions.
Depending on the Netronome product installed in the system, firmware files
should be available under ``/lib/firmware/netronome``. DPDK PMD supporting the
-PF requires a specific link, ``/lib/firmware/netronome/nic_dpdk_default.nffw``,
-which should be created automatically with Netronome's Agilio products
-installation.
+PF looks for a firmware file in this order:
+
+ 1) First try to find a firmware image specific for this device using the
+ NFP serial number:
+
+ serial-00-15-4d-12-20-65-10-ff.nffw
+
+ 2) Then try the PCI name:
+
+ pci-0000:04:00.0.nffw
+
+ 3) Finally try the card type and media:
+
+ nic_AMDA0099-0001_2x25.nffw
+
+Netronome's software packages install firmware files under ``/lib/firmware/netronome``
+to support all the Netronome's SmartNICs and different firmware applications.
+This is usually done using file names based on SmartNIC type and media and with a
+directory per firmware application. Options 1 and 2 for firmware filenames allow
+more than one SmartNIC, same type of SmartNIC or different ones, and to upload a
+different firmware to each SmartNIC.
+
PF multiport support
--------------------
Some NFP cards support several physical ports with just one single PCI device.
-DPDK core is designed with the 1:1 relationship between PCI devices and DPDK
+The DPDK core is designed with a 1:1 relationship between PCI devices and DPDK
ports, so NFP PMD PF support requires handling the multiport case specifically.
During NFP PF initialization, the PMD will extract the information about the
number of PF ports from the firmware and will create as many DPDK ports as
needed.
Because the unusual relationship between a single PCI device and several DPDK
-ports, there are some limitations when using more than one PF DPDK ports: there
+ports, there are some limitations when using more than one PF DPDK port: there
is no support for RX interrupts and it is not possible either to use those PF
ports with the device hotplug functionality.
@@ -136,7 +155,7 @@ System configuration
get the drivers from the above Github repository and follow the instructions
for building and installing it.
- Virtual Functions need to be enabled before they can be used with the PMD.
+ VFs need to be enabled before they can be used with the PMD.
Before enabling the VFs it is useful to obtain information about the
current NFP PCI device detected by the system:
diff --git a/doc/guides/nics/octeontx.rst b/doc/guides/nics/octeontx.rst
index 8e2a2b75..f8eaaa63 100644
--- a/doc/guides/nics/octeontx.rst
+++ b/doc/guides/nics/octeontx.rst
@@ -165,8 +165,7 @@ CRC striping
~~~~~~~~~~~~
The OCTEONTX SoC family NICs strip the CRC for every packets coming into the
-host interface. So, CRC will be stripped even when the
-``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``.
+host interface irrespective of the offload configuration.
Maximum packet length
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst
index 0df0ef81..20cd52b0 100644
--- a/doc/guides/nics/overview.rst
+++ b/doc/guides/nics/overview.rst
@@ -1,32 +1,6 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2016 6WIND S.A.
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
Overview of Networking Drivers
==============================
diff --git a/doc/guides/nics/pcap_ring.rst b/doc/guides/nics/pcap_ring.rst
index 7fd063c9..879e5430 100644
--- a/doc/guides/nics/pcap_ring.rst
+++ b/doc/guides/nics/pcap_ring.rst
@@ -71,11 +71,19 @@ The different stream types are:
tx_pcap=/path/to/file.pcap
* rx_iface: Defines a reception stream based on a network interface name.
- The driver reads packets coming from the given interface using the Linux kernel driver for that interface.
+ The driver reads packets from the given interface using the Linux kernel driver for that interface.
+ The driver captures both the incoming and outgoing packets on that interface.
The value is an interface name.
rx_iface=eth0
+* rx_iface_in: Defines a reception stream based on a network interface name.
+ The driver reads packets from the given interface using the Linux kernel driver for that interface.
+ The driver captures only the incoming packets on that interface.
+ The value is an interface name.
+
+ rx_iface_in=eth0
+
* tx_iface: Defines a transmission stream based on a network interface name.
The driver sends packets to the given interface using the Linux kernel driver for that interface.
The value is an interface name.
@@ -122,6 +130,21 @@ Forward packets through two network interfaces:
$RTE_TARGET/app/testpmd -l 0-3 -n 4 \
--vdev 'net_pcap0,iface=eth0' --vdev='net_pcap1;iface=eth1'
+Enable 2 tx queues on a network interface:
+
+.. code-block:: console
+
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
+ --vdev 'net_pcap0,rx_iface=eth1,tx_iface=eth1,tx_iface=eth1' \
+ -- --txq 2
+
+Read only incoming packets from a network interface and write them back to the same network interface:
+
+.. code-block:: console
+
+ $RTE_TARGET/app/testpmd -l 0-3 -n 4 \
+ --vdev 'net_pcap0,rx_iface_in=eth1,tx_iface=eth1'
+
Using libpcap-based PMD with the testpmd Application
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/guides/nics/qede.rst b/doc/guides/nics/qede.rst
index 63ce9b4c..cba38868 100644
--- a/doc/guides/nics/qede.rst
+++ b/doc/guides/nics/qede.rst
@@ -35,15 +35,15 @@ Supported Features
- N-tuple filter and flow director (limited support)
- NPAR (NIC Partitioning)
- SR-IOV VF
-- VXLAN Tunneling offload
+- GRE Tunneling offload
- GENEVE Tunneling offload
+- VXLAN Tunneling offload
- MPLSoUDP Tx Tunneling offload
Non-supported Features
----------------------
- SR-IOV PF
-- GRE and NVGRE Tunneling offloads
Co-existence considerations
---------------------------
@@ -58,19 +58,21 @@ Supported QLogic Adapters
Prerequisites
-------------
-- Requires storm firmware version **8.30.12.0**. Firmware may be available
+- Requires storm firmware version **8.33.12.0**. Firmware may be available
inbox in certain newer Linux distros under the standard directory
- ``E.g. /lib/firmware/qed/qed_init_values-8.30.12.0.bin``
+ ``E.g. /lib/firmware/qed/qed_init_values-8.33.12.0.bin``.
If the required firmware files are not available then download it from
- `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
- For downloading firmware file, select adapter category, model and DPDK Poll Mode Driver.
-
-- Requires management firmware (MFW) version **8.30.x.x** or higher to be
- flashed on to the adapter. If the required management firmware is not
- available then download from
- `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
- For downloading firmware upgrade utility, select adapter category, model and Linux distro.
- To flash the management firmware refer to the instructions in the QLogic Firmware Upgrade Utility Readme document.
+ `linux-firmware git repository <http://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/qed>`_
+ or `QLogic Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
+ To download firmware file from QLogic website, select adapter category, model and DPDK Poll Mode Driver.
+
+- Requires the NIC be updated minimally with **8.30.x.x** Management firmware(MFW) version supported for that NIC.
+ It is highly recommended that the NIC be updated with the latest available management firmware version to get latest feature set.
+ Management Firmware and Firmware Upgrade Utility for Cavium FastLinQ(r) branded adapters can be downloaded from
+ `Driver Download Center <http://driverdownloads.qlogic.com/QLogicDriverDownloads_UI/DefaultNewSearch.aspx>`_.
+ For downloading Firmware Upgrade Utility, select NIC category, model and Linux distro.
+ To update the management firmware, refer to the instructions in the Firmware Upgrade Utility Readme document.
+ For OEM branded adapters please follow the instruction provided by the OEM to update the Management Firmware on the NIC.
- SR-IOV requires Linux PF driver version **8.20.x.x** or higher.
If the required PF driver is not available then download it from
@@ -104,7 +106,7 @@ enabling debugging options may affect system performance.
- ``CONFIG_RTE_LIBRTE_QEDE_FW`` (default **""**)
Gives absolute path of firmware file.
- ``Eg: "/lib/firmware/qed/qed_init_values-8.30.12.0.bin"``
+ ``Eg: "/lib/firmware/qed/qed_init_values-8.33.12.0.bin"``
Empty string indicates driver will pick up the firmware file
from the default location /lib/firmware/qed.
CAUTION this option is more for custom firmware, it is not
@@ -121,7 +123,7 @@ SR-IOV: Prerequisites and Sample Application Notes
This section provides instructions to configure SR-IOV with Linux OS.
-**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will function as SR-IOV PF driver. Requires PF driver to be 8.10.x.x or higher.
+**Note**: librte_pmd_qede will be used to bind to SR-IOV VF device and Linux native kernel driver (qede) will function as SR-IOV PF driver. Requires PF driver to be 8.20.x.x or higher.
#. Verify SR-IOV and ARI capability is enabled on the adapter using ``lspci``:
@@ -193,7 +195,7 @@ This section provides instructions to configure SR-IOV with Linux OS.
#. Running testpmd
- (Supply ``--log-level="pmd.net.qede.driver",7`` to view informational messages):
+ (Supply ``--log-level="pmd.net.qede.driver:info`` to view informational messages):
Refer to the document
:ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>` to run
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index ccdf5ff0..63939ec8 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -30,7 +30,8 @@ Solarflare libefx-based Poll Mode Driver
========================================
The SFC EFX PMD (**librte_pmd_sfc_efx**) provides poll mode driver support
-for **Solarflare SFN7xxx and SFN8xxx** family of 10/40 Gbps adapters.
+for **Solarflare SFN7xxx and SFN8xxx** family of 10/40 Gbps adapters and
+**Solarflare XtremeScale X2xxx** family of 10/25/40/50/100 Gbps adapters.
SFC EFX PMD has support for the latest Linux and FreeBSD operating systems.
More information can be found at `Solarflare Communications website
@@ -87,6 +88,8 @@ SFC EFX PMD has support for:
- Flow API
+- Loopback
+
Non-supported Features
----------------------
@@ -97,8 +100,6 @@ The features not yet supported include:
- Priority-based flow control
-- Loopback
-
- Configurable RX CRC stripping (always stripped)
- Header split on receive
@@ -120,22 +121,37 @@ required in the receive buffer.
It should be taken into account when mbuf pool for receive is created.
+Equal stride super-buffer mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the receive queue uses equal stride super-buffer DMA mode, one HW Rx
+descriptor carries many Rx buffers which contiguously follow each other
+with some stride (equal to total size of rte_mbuf as mempool object).
+Each Rx buffer is an independent rte_mbuf.
+However dedicated mempool manager must be used when mempool for the Rx
+queue is created. The manager must support dequeue of the contiguous
+block of objects and provide mempool info API to get the block size.
+
+Another limitation of a equal stride super-buffer mode, imposed by the
+firmware, is that it allows for a single RSS context.
+
+
Tunnels support
---------------
-NVGRE, VXLAN and GENEVE tunnels are supported on SFN8xxx family adapters
-with full-feature firmware variant running.
+NVGRE, VXLAN and GENEVE tunnels are supported on SFN8xxx and X2xxx family
+adapters with full-feature firmware variant running.
**sfboot** should be used to configure NIC to run full-feature firmware variant.
See Solarflare Server Adapter User's Guide for details.
-SFN8xxx family adapters provide either inner or outer packet classes.
+SFN8xxx and X2xxx family adapters provide either inner or outer packet classes.
If adapter firmware advertises support for tunnels then the PMD
configures the hardware to report inner classes, and outer classes are
not reported in received packets.
However, for VXLAN and GENEVE tunnels the PMD does report UDP as the
outer layer 4 packet type.
-SFN8xxx family adapters report GENEVE packets as VXLAN.
+SFN8xxx and X2xxx family adapters report GENEVE packets as VXLAN.
If UDP ports are configured for only one tunnel type then it is safe to
treat VXLAN packet type indication as the corresponding UDP tunnel type.
@@ -152,7 +168,9 @@ Supported pattern items:
- VOID
- ETH (exact match of source/destination addresses, individual/group match
- of destination address, EtherType)
+ of destination address, EtherType in the outer frame and exact match of
+ destination addresses, individual/group match of destination address in
+ the inner frame)
- VLAN (exact match of VID, double-tagging is supported)
@@ -166,6 +184,13 @@ Supported pattern items:
- UDP (exact match of source/destination ports)
+- VXLAN (exact match of VXLAN network identifier)
+
+- GENEVE (exact match of virtual network identifier, only Ethernet (0x6558)
+ protocol type is supported)
+
+- NVGRE (exact match of virtual subnet ID)
+
Supported actions:
- VOID
@@ -174,6 +199,12 @@ Supported actions:
- RSS
+- DROP
+
+- FLAG (supported only with ef10_essb Rx datapath)
+
+- MARK (supported only with ef10_essb Rx datapath)
+
Validating flow rules depends on the firmware variant.
Ethernet destinaton individual/group match
@@ -184,10 +215,31 @@ in the mask of destination address. If destinaton address in the spec is
multicast, it matches all multicast (and broadcast) packets, oherwise it
matches unicast packets that are not filtered by other flow rules.
+Exceptions to flow rules
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+There is a list of exceptional flow rule patterns which will not be
+accepted by the PMD. A pattern will be rejected if at least one of the
+conditions is met:
+
+- Filtering by IPv4 or IPv6 EtherType without pattern items of internet
+ layer and above.
+
+- The last item is IPV4 or IPV6, and it's empty.
+
+- Filtering by TCP or UDP IP transport protocol without pattern items of
+ transport layer and above.
+
+- The last item is TCP or UDP, and it's empty.
+
Supported NICs
--------------
+- Solarflare XtremeScale Adapters:
+
+ - Solarflare X2522 Dual Port SFP28 10/25GbE Adapter
+
- Solarflare Flareon [Ultra] Server Adapters:
- Solarflare SFN8522 Dual Port SFP+ Server Adapter
@@ -258,15 +310,18 @@ whitelist option like "-w 02:00.0,arg1=value1,...".
Case-insensitive 1/y/yes/on or 0/n/no/off may be used to specify
boolean parameters value.
-- ``rx_datapath`` [auto|efx|ef10] (default **auto**)
+- ``rx_datapath`` [auto|efx|ef10|ef10_esps] (default **auto**)
Choose receive datapath implementation.
**auto** allows the driver itself to make a choice based on firmware
features available and required by the datapath implementation.
**efx** chooses libefx-based datapath which supports Rx scatter.
- **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
+ **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
more efficient than libefx-based and provides richer packet type
classification, but lacks Rx scatter support.
+ **ef10_esps** chooses SFNX2xxx equal stride packed stream datapath
+ which may be used on DPDK firmware variant only
+ (see notes about its limitations above).
- ``tx_datapath`` [auto|efx|ef10|ef10_simple] (default **auto**)
@@ -277,12 +332,12 @@ boolean parameters value.
(full-feature firmware variant only), TSO and multi-segment mbufs.
Mbuf segments may come from different mempools, and mbuf reference
counters are treated responsibly.
- **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
+ **ef10** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which is
more efficient than libefx-based but has no VLAN insertion and TSO
support yet.
Mbuf segments may come from different mempools, and mbuf reference
counters are treated responsibly.
- **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which
+ **ef10_simple** chooses EF10 (SFN7xxx, SFN8xxx, X2xxx) native datapath which
is even more faster then **ef10** but does not support multi-segment
mbufs, disallows multiple mempools and neglects mbuf reference counters.
@@ -293,21 +348,73 @@ boolean parameters value.
**auto** allows NIC firmware to make a choice based on
installed licences and firmware variant configured using **sfboot**.
-- ``debug_init`` [bool] (default **n**)
-
- Enable extra logging during device initialization and startup.
-
-- ``mcdi_logging`` [bool] (default **n**)
-
- Enable extra logging of the communication with the NIC's management CPU.
- The logging is done using RTE_LOG() with INFO level and PMD type.
- The format is consumed by the Solarflare netlogdecode cross-platform tool.
-
- ``stats_update_period_ms`` [long] (default **1000**)
Adjust period in milliseconds to update port hardware statistics.
The accepted range is 0 to 65535. The value of **0** may be used
to disable periodic statistics update. One should note that it's
- only possible to set an arbitrary value on SFN8xxx provided that
+ only possible to set an arbitrary value on SFN8xxx and X2xxx provided that
firmware version is 6.2.1.1033 or higher, otherwise any positive
value will select a fixed update period of **1000** milliseconds
+
+- ``fw_variant`` [dont-care|full-feature|ultra-low-latency|
+ capture-packed-stream|dpdk] (default **dont-care**)
+
+ Choose the preferred firmware variant to use. In order for the selected
+ option to have an effect, the **sfboot** utility must be configured with the
+ **auto** firmware-variant option. The preferred firmware variant applies to
+ all ports on the NIC.
+ **dont-care** ensures that the driver can attach to an unprivileged function.
+ The datapath firmware type to use is controlled by the **sfboot**
+ utility.
+ **full-feature** chooses full featured firmware.
+ **ultra-low-latency** chooses firmware with fewer features but lower latency.
+ **capture-packed-stream** chooses firmware for SolarCapture packed stream
+ mode.
+ **dpdk** chooses DPDK firmware with equal stride super-buffer Rx mode
+ for higher Rx packet rate and packet marks support and firmware subvariant
+ without checksumming on transmit for higher Tx packet rate if
+ checksumming is not required.
+
+- ``rxd_wait_timeout_ns`` [long] (default **200 us**)
+
+ Adjust timeout in nanoseconds to head-of-line block to wait for
+ Rx descriptors.
+ The accepted range is 0 to 400 ms.
+ Flow control should be enabled to make it work.
+ The value of **0** disables it and packets are dropped immediately.
+ When a packet is dropped because of no Rx descriptors,
+ ``rx_nodesc_drop_cnt`` counter grows.
+ The feature is supported only by the DPDK firmware variant when equal
+ stride super-buffer Rx mode is used.
+
+
+Dynamic Logging Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One may leverage EAL option "--log-level" to change default levels
+for the log types supported by the driver. The option is used with
+an argument typically consisting of two parts separated by a colon.
+
+Level value is the last part which takes a symbolic name (or integer).
+Log type is the former part which may shell match syntax.
+Depending on the choice of the expression, the given log level may
+be used either for some specific log type or for a subset of types.
+
+SFC EFX PMD provides the following log types available for control:
+
+- ``pmd.net.sfc.driver`` (default level is **notice**)
+
+ Affects driver-wide messages unrelated to any particular devices.
+
+- ``pmd.net.sfc.main`` (default level is **notice**)
+
+ Matches a subset of per-port log types registered during runtime.
+ A full name for a particular type may be obtained by appending a
+ dot and a PCI device identifier (``XXXX:XX:XX.X``) to the prefix.
+
+- ``pmd.net.sfc.mcdi`` (default level is **notice**)
+
+ Extra logging of the communication with the NIC's management CPU.
+ The format of the log is consumed by the Solarflare netlogdecode
+ cross-platform tool. May be managed per-port, as explained above.
diff --git a/doc/guides/nics/softnic.rst b/doc/guides/nics/softnic.rst
new file mode 100644
index 00000000..6c2287a1
--- /dev/null
+++ b/doc/guides/nics/softnic.rst
@@ -0,0 +1,250 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+Soft NIC Poll Mode Driver
+=========================
+
+The Soft NIC allows building custom NIC pipelines in software. The Soft NIC pipeline
+is DIY and reconfigurable through ``firmware`` (DPDK Packet Framework script).
+
+The Soft NIC leverages the DPDK Packet Framework libraries (librte_port,
+librte_table and librte_pipeline) to make it modular, flexible and extensible
+with new functionality. Please refer to DPDK Programmer's Guide, Chapter
+``Packet Framework`` and DPDK Sample Application User Guide,
+Chapter ``IP Pipeline Application`` for more details.
+
+The Soft NIC is configured through the standard DPDK ethdev API (ethdev, flow,
+QoS, security). The internal framework is not externally visible.
+
+Key benefits:
+ - Can be used to augment missing features to HW NICs.
+ - Allows consumption of advanced DPDK features without application redesign.
+ - Allows out-of-the-box performance boost of DPDK consumers applications simply by
+ instantiating this type of Ethernet device.
+
+Flow
+----
+* ``Device creation``: Each Soft NIC instance is a virtual device.
+
+* ``Device start``: The Soft NIC firmware script is executed every time the device
+ is started. The firmware script typically creates several internal objects,
+ such as: memory pools, SW queues, traffic manager, action profiles, pipelines,
+ etc.
+
+* ``Device stop``: All the internal objects that were previously created by the
+ firmware script during device start are now destroyed.
+
+* ``Device run``: Each Soft NIC device needs one or several CPU cores to run.
+ The firmware script maps each internal pipeline to a CPU core. Multiple
+ pipelines can be mapped to the same CPU core. In order for a given pipeline
+ assigned to CPU core X to run, the application needs to periodically call on
+ CPU core X the `rte_pmd_softnic_run()` function for the current Soft NIC
+ device.
+
+* ``Application run``: The application reads packets from the Soft NIC device RX
+ queues and writes packets to the Soft NIC device TX queues.
+
+Supported Operating Systems
+---------------------------
+
+Any Linux distribution fulfilling the conditions described in ``System Requirements``
+section of :ref:`the DPDK documentation <linux_gsg>` or refer to *DPDK
+Release Notes*.
+
+Build options
+-------------
+
+The default PMD configuration available in the common_linuxapp configuration file:
+
+CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y
+
+Once the DPDK is built, all the DPDK applications include support for the
+Soft NIC PMD.
+
+Soft NIC PMD arguments
+----------------------
+
+The user can specify below arguments in EAL ``--vdev`` options to create the
+Soft NIC device instance:
+
+ --vdev "net_softnic0,firmware=firmware.cli,conn_port=8086"
+
+#. ``firmware``: path to the firmware script used for Soft NIC configuration.
+ The example "firmware" script is provided at `drivers/net/softnic/`.
+ (Optional: No, Default = NA)
+
+#. ``conn_port``: tcp connection port (non-zero value) used by remote client
+ (for examples- telnet, netcat, etc.) to connect and configure Soft NIC device in run-time.
+ (Optional: yes, Default value: 0, no connection with external client)
+
+#. ``cpu_id``: numa node id. (Optional: yes, Default value: 0)
+
+#. ``tm_n_queues``: number of traffic manager's scheduler queues. The traffic manager
+ is based on DPDK *librte_sched* library. (Optional: yes, Default value: 65,536 queues)
+
+#. ``tm_qsize0``: size of scheduler queue 0 per traffic class of the pipes/subscribers.
+ (Optional: yes, Default: 64)
+
+#. ``tm_qsize1``: size of scheduler queue 1 per traffic class of the pipes/subscribers.
+ (Optional: yes, Default: 64)
+
+#. ``tm_qsize2``: size of scheduler queue 2 per traffic class of the pipes/subscribers.
+ (Optional: yes, Default: 64)
+
+#. ``tm_qsize3``: size of scheduler queue 3 per traffic class of the pipes/subscribers.
+ (Optional: yes, Default: 64)
+
+
+Soft NIC testing
+----------------
+
+* Run testpmd application in Soft NIC forwarding mode with loopback feature
+ enabled on Soft NIC port:
+
+ .. code-block:: console
+
+ ./testpmd -c 0x3 --vdev 'net_softnic0,firmware=<script path>/firmware.cli,cpu_id=0,conn_port=8086' -- -i
+ --forward-mode=softnic --portmask=0x2
+
+ .. code-block:: console
+
+ ...
+ Interactive-mode selected
+ Set softnic packet forwarding mode
+ ...
+ Configuring Port 0 (socket 0)
+ Port 0: 90:E2:BA:37:9D:DC
+ Configuring Port 1 (socket 0)
+
+ ; SPDX-License-Identifier: BSD-3-Clause
+ ; Copyright(c) 2018 Intel Corporation
+
+ link LINK dev 0000:02:00.0
+
+ pipeline RX period 10 offset_port_id 0
+ pipeline RX port in bsz 32 link LINK rxq 0
+ pipeline RX port out bsz 32 swq RXQ0
+ pipeline RX table match stub
+ pipeline RX port in 0 table 0
+
+ pipeline TX period 10 offset_port_id 0
+ pipeline TX port in bsz 32 swq TXQ0
+ pipeline TX port out bsz 32 link LINK txq 0
+ pipeline TX table match stub
+ pipeline TX port in 0 table 0
+
+ thread 1 pipeline RX enable
+ thread 1 pipeline TX enable
+ Port 1: 00:00:00:00:00:00
+ Checking link statuses...
+ Done
+ testpmd>
+
+* Start forwarding
+
+ .. code-block:: console
+
+ testpmd> start
+ softnic packet forwarding - ports=1 - cores=1 - streams=1 - NUMA support enabled, MP over anonymous pages disabled
+ Logical Core 1 (socket 0) forwards packets on 1 streams:
+ RX P=2/Q=0 (socket 0) -> TX P=2/Q=0 (socket 0) peer=02:00:00:00:00:02
+
+ softnic packet forwarding packets/burst=32
+ nb forwarding cores=1 - nb forwarding ports=1
+ port 0: RX queue number: 1 Tx queue number: 1
+ Rx offloads=0x1000 Tx offloads=0x0
+ RX queue: 0
+ RX desc=512 - RX free threshold=32
+ RX threshold registers: pthresh=8 hthresh=8 wthresh=0
+ RX Offloads=0x0
+ TX queue: 0
+ TX desc=512 - TX free threshold=32
+ TX threshold registers: pthresh=32 hthresh=0 wthresh=0
+ TX offloads=0x0 - TX RS bit threshold=32
+ port 1: RX queue number: 1 Tx queue number: 1
+ Rx offloads=0x0 Tx offloads=0x0
+ RX queue: 0
+ RX desc=0 - RX free threshold=0
+ RX threshold registers: pthresh=0 hthresh=0 wthresh=0
+ RX Offloads=0x0
+ TX queue: 0
+ TX desc=0 - TX free threshold=0
+ TX threshold registers: pthresh=0 hthresh=0 wthresh=0
+ TX offloads=0x0 - TX RS bit threshold=0
+
+* Start remote client (e.g. telnet) to communicate with the softnic device:
+
+ .. code-block:: console
+
+ $ telnet 127.0.0.1 8086
+ Trying 127.0.0.1...
+ Connected to 127.0.0.1.
+ Escape character is '^]'.
+
+ Welcome to Soft NIC!
+
+ softnic>
+
+* Add/update Soft NIC pipeline table match-action entries from telnet client:
+
+ .. code-block:: console
+
+ softnic> pipeline RX table 0 rule add match default action fwd port 0
+ softnic> pipeline TX table 0 rule add match default action fwd port 0
+
+Soft NIC Firmware
+-----------------
+
+The Soft NIC firmware, for example- `softnic/firmware.cli`, consists of following CLI commands
+for creating and managing software based NIC pipelines. For more details, please refer to CLI
+command description provided in `softnic/rte_eth_softnic_cli.c`.
+
+* Physical port for packets send/receive:
+
+ .. code-block:: console
+
+ link LINK dev 0000:02:00.0
+
+* Pipeline create:
+
+ .. code-block:: console
+
+ pipeline RX period 10 offset_port_id 0 (Soft NIC rx-path pipeline)
+ pipeline TX period 10 offset_port_id 0 (Soft NIC tx-path pipeline)
+
+* Pipeline input/output port create
+
+ .. code-block:: console
+
+ pipeline RX port in bsz 32 link LINK rxq 0 (Soft NIC rx pipeline input port)
+ pipeline RX port out bsz 32 swq RXQ0 (Soft NIC rx pipeline output port)
+ pipeline TX port in bsz 32 swq TXQ0 (Soft NIC tx pipeline input port)
+ pipeline TX port out bsz 32 link LINK txq 0 (Soft NIC tx pipeline output port)
+
+* Pipeline table create
+
+ .. code-block:: console
+
+ pipeline RX table match stub (Soft NIC rx pipeline match-action table)
+ pipeline TX table match stub (Soft NIC tx pipeline match-action table)
+
+* Pipeline input port connection with table
+
+ .. code-block:: console
+
+ pipeline RX port in 0 table 0 (Soft NIC rx pipeline input port 0 connection with table 0)
+ pipeline TX port in 0 table 0 (Soft NIC tx pipeline input port 0 connection with table 0)
+
+* Pipeline table match-action rules add
+
+ .. code-block:: console
+
+ pipeline RX table 0 rule add match default action fwd port 0 (Soft NIC rx pipeline table 0 rule)
+ pipeline TX table 0 rule add match default action fwd port 0 (Soft NIC tx pipeline table 0 rule)
+
+* Enable pipeline on CPU thread
+
+ .. code-block:: console
+
+ thread 1 pipeline RX enable (Soft NIC rx pipeline enable on cpu thread id 1)
+ thread 1 pipeline TX enable (Soft NIC tx pipeline enable on cpu thread id 1)
diff --git a/doc/guides/nics/szedata2.rst b/doc/guides/nics/szedata2.rst
index 1a5d4138..a2092f9c 100644
--- a/doc/guides/nics/szedata2.rst
+++ b/doc/guides/nics/szedata2.rst
@@ -1,32 +1,5 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2015 - 2016 CESNET
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of CESNET nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SZEDATA2 poll mode driver library
=================================
@@ -70,8 +43,10 @@ separately:
* **Kernel modules**
+ * combo6core
* combov3
- * szedata2_cv3
+ * szedata2
+ * szedata2_cv3 or szedata2_cv3_fdt
Kernel modules manage initialization of hardware, allocation and
sharing of resources for user space applications.
@@ -79,6 +54,15 @@ separately:
Information about getting the dependencies can be found `here
<http://www.netcope.com/en/company/community-support/dpdk-libsze2>`_.
+Versions of the packages
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+The minimum version of the provided packages:
+
+* for DPDK from 18.05: **4.4.1**
+
+* for DPDK up to 18.02 (including): **3.0.5**
+
Configuration
-------------
@@ -89,45 +73,53 @@ These configuration options can be modified before compilation in the
Value **y** enables compilation of szedata2 PMD.
-* ``CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS`` default value: **0**
-
- This option defines type of firmware address space and must be set
- according to the used card and mode.
- Currently supported values are:
-
- * **0** - for cards (modes):
-
- * NFB-100G1 (100G1)
+Using the SZEDATA2 PMD
+----------------------
- * **1** - for cards (modes):
+From DPDK version 16.04 the type of SZEDATA2 PMD is changed to PMD_PDEV.
+SZEDATA2 device is automatically recognized during EAL initialization.
+No special command line options are needed.
- * NFB-100G2Q (100G1)
+Kernel modules have to be loaded before running the DPDK application.
- * **2** - for cards (modes):
+NFB card architecture
+---------------------
- * NFB-40G2 (40G2)
- * NFB-100G2C (100G2)
- * NFB-100G2Q (40G2)
+The NFB cards are multi-port multi-queue cards, where (generally) data from any
+Ethernet port may be sent to any queue.
+They were historically represented in DPDK as a single port.
- * **3** - for cards (modes):
+However, the new NFB-200G2QL card employs an addon cable which allows to connect
+it to two physical PCI-E slots at the same time (see the diagram below).
+This is done to allow 200 Gbps of traffic to be transferred through the PCI-E
+bus (note that a single PCI-E 3.0 x16 slot provides only 125 Gbps theoretical
+throughput).
- * NFB-40G2 (10G8)
- * NFB-100G2Q (10G8)
+Since each slot may be connected to a different CPU and therefore to a different
+NUMA node, the card is represented as two ports in DPDK (each with half of the
+queues), which allows DPDK to work with data from the individual queues on the
+right NUMA node.
- * **4** - for cards (modes):
+.. figure:: img/szedata2_nfb200g_architecture.*
+ :align: center
- * NFB-100G1 (10G10)
+ NFB-200G2QL high-level diagram
- * **5** - for experimental firmwares and future use
+Limitations
+-----------
-Using the SZEDATA2 PMD
-----------------------
+The SZEDATA2 PMD does not support operations related to Ethernet ports
+(link_up, link_down, set_mac_address, etc.).
-From DPDK version 16.04 the type of SZEDATA2 PMD is changed to PMD_PDEV.
-SZEDATA2 device is automatically recognized during EAL initialization.
-No special command line options are needed.
+NFB cards employ multiple Ethernet ports.
+Until now, Ethernet port-related operations were performed on all of them
+(since the whole card was represented as a single port).
+With NFB-200G2QL card, this is no longer viable (see above).
-Kernel modules have to be loaded before running the DPDK application.
+Since there is no fixed mapping between the queues and Ethernet ports, and since
+a single card can be represented as two ports in DPDK, there is no way of
+telling which (if any) physical ports should be associated with individual
+ports in DPDK.
Example of usage
----------------
diff --git a/doc/guides/nics/tap.rst b/doc/guides/nics/tap.rst
index ea61be38..27148681 100644
--- a/doc/guides/nics/tap.rst
+++ b/doc/guides/nics/tap.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2016 Intel Corporation.
-Tap Poll Mode Driver
-====================
+Tun|Tap Poll Mode Driver
+========================
The ``rte_eth_tap.c`` PMD creates a device using TAP interfaces on the
local host. The PMD allows for DPDK and the host to communicate using a raw
@@ -37,6 +37,12 @@ for each interface string containing ``mac=fixed``. The MAC address is formatted
as 00:'d':'t':'a':'p':[00-FF]. Convert the characters to hex and you get the
actual MAC address: ``00:64:74:61:70:[00-FF]``.
+ --vdev=net_tap0,mac="00:64:74:61:70:11"
+
+The MAC address will have a user value passed as string. The MAC address is in
+format with delimeter ``:``. The string is byte converted to hex and you get
+the actual MAC address: ``00:64:74:61:70:11``.
+
It is possible to specify a remote netdevice to capture packets from by adding
``remote=foo1``, for example::
@@ -77,6 +83,17 @@ can utilize that stack to handle the network protocols. Plus you would be able
to address the interface using an IP address assigned to the internal
interface.
+The TUN PMD allows user to create a TUN device on host. The PMD allows user
+to transmit and receive packets via DPDK API calls with L3 header and payload.
+The devices in host can be accessed via ``ifconfig`` or ``ip`` command. TUN
+interfaces are passed to DPDK ``rte_eal_init`` arguments as ``--vdev=net_tunX``,
+where X stands for unique id, example::
+
+ --vdev=net_tun0 --vdev=net_tun1,iface=foo1, ...
+
+Unlike TAP PMD, TUN PMD does not support user arguments as ``MAC`` or ``remote`` user
+options. Default interface name is ``dtunX``, where X stands for unique id.
+
Flow API support
----------------
@@ -91,7 +108,7 @@ The kernel support can be checked with this command::
Supported items:
- eth: src and dst (with variable masks), and eth_type (0xffff mask).
-- vlan: vid, pcp, tpid, but not eid. (requires kernel 4.9)
+- vlan: vid, pcp, but not eid. (requires kernel 4.9)
- ipv4/6: src and dst (with variable masks), and ip_proto (0xffff mask).
- udp/tcp: src and dst port (0xffff) mask.
@@ -149,7 +166,7 @@ Run pktgen from the pktgen directory in a terminal with a commandline like the
following::
sudo ./app/app/x86_64-native-linuxapp-gcc/app/pktgen -l 1-5 -n 4 \
- --proc-type auto --log-level 8 --socket-mem 512,512 --file-prefix pg \
+ --proc-type auto --log-level debug --socket-mem 512,512 --file-prefix pg \
--vdev=net_tap0 --vdev=net_tap1 -b 05:00.0 -b 05:00.1 \
-b 04:00.0 -b 04:00.1 -b 04:00.2 -b 04:00.3 \
-b 81:00.0 -b 81:00.1 -b 81:00.2 -b 81:00.3 \
@@ -241,6 +258,11 @@ Please refer to ``iproute2`` package file ``lib/bpf.c`` function
An example utility for eBPF instruction generation in the format of C arrays will
be added in next releases
+TAP reports on supported RSS functions as part of dev_infos_get callback:
+``ETH_RSS_IP``, ``ETH_RSS_UDP`` and ``ETH_RSS_TCP``.
+**Known limitation:** TAP supports all of the above hash functions together
+and not in partial combinations.
+
Systems supporting flow API
---------------------------
diff --git a/doc/guides/nics/thunderx.rst b/doc/guides/nics/thunderx.rst
index 5270ef23..e84eaafe 100644
--- a/doc/guides/nics/thunderx.rst
+++ b/doc/guides/nics/thunderx.rst
@@ -30,6 +30,7 @@ Features of the ThunderX PMD are:
- SR-IOV VF
- NUMA support
- Multi queue set support (up to 96 queues (12 queue sets)) per port
+- Skip data bytes
Supported ThunderX SoCs
-----------------------
@@ -312,6 +313,21 @@ We will choose four secondary queue sets from the ending of the list (0002:01:01
The nicvf thunderx driver will make use of attached secondary VFs automatically during the interface configuration stage.
+
+Module params
+--------------
+
+skip_data_bytes
+~~~~~~~~~~~~~~~
+This feature is used to create a hole between HEADROOM and actual data. Size of hole is specified
+in bytes as module param("skip_data_bytes") to pmd.
+This scheme is useful when application would like to insert vlan header without disturbing HEADROOM.
+
+Example:
+ .. code-block:: console
+
+ -w 0002:01:00.2,skip_data_bytes=8
+
Limitations
-----------
@@ -319,8 +335,7 @@ CRC striping
~~~~~~~~~~~~
The ThunderX SoC family NICs strip the CRC for every packets coming into the
-host interface. So, CRC will be stripped even when the
-``rxmode.hw_strip_crc`` member is set to 0 in ``struct rte_eth_conf``.
+host interface irrespective of the offload configuration.
Maximum packet length
~~~~~~~~~~~~~~~~~~~~~
@@ -336,3 +351,8 @@ Maximum packet segments
The ThunderX SoC family NICs support up to 12 segments per packet when working
in scatter/gather mode. So, setting MTU will result with ``EINVAL`` when the
frame size does not fit in the maximum number of segments.
+
+skip_data_bytes
+~~~~~~~~~~~~~~~
+
+Maximum limit of skip_data_bytes is 128 bytes and number of bytes should be multiple of 8.
diff --git a/doc/guides/nics/vdev_netvsc.rst b/doc/guides/nics/vdev_netvsc.rst
index 55d130a3..d1da0711 100644
--- a/doc/guides/nics/vdev_netvsc.rst
+++ b/doc/guides/nics/vdev_netvsc.rst
@@ -1,6 +1,6 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2017 6WIND S.A.
- Copyright 2017 Mellanox Technologies, Ltd.
+ Copyright 2017 Mellanox Technologies, Ltd
VDEV_NETVSC driver
==================
@@ -89,12 +89,16 @@ The following device parameters are supported:
- ``force`` [int]
If nonzero, forces the use of specified interfaces even if not detected as
- NetVSC or detected as routed NETVSC.
+ NetVSC.
- ``ignore`` [int]
- If nonzero, ignores the driver runnig (actually used to disable the
+ If nonzero, ignores the driver running (actually used to disable the
auto-detection in Hyper-V VM).
-Not specifying either ``iface`` or ``mac`` makes this driver attach itself to
-all unrouted NetVSC interfaces found on the system.
+.. note::
+
+ Not specifying either ``iface`` or ``mac`` makes this driver attach itself to
+ all unrouted NetVSC interfaces found on the system.
+ Specifying the device makes this driver attach itself to the device
+ regardless the device routes.
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index ca09cd20..7c099fb7 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -201,7 +201,7 @@ The packet transmission flow is:
Virtio PMD Rx/Tx Callbacks
--------------------------
-Virtio driver has 3 Rx callbacks and 2 Tx callbacks.
+Virtio driver has 4 Rx callbacks and 3 Tx callbacks.
Rx callbacks:
@@ -215,6 +215,9 @@ Rx callbacks:
Vector version without mergeable Rx buffer support, also fixes the available
ring indexes and uses vector instructions to optimize performance.
+#. ``virtio_recv_mergeable_pkts_inorder``:
+ In-order version with mergeable Rx buffer support.
+
Tx callbacks:
#. ``virtio_xmit_pkts``:
@@ -223,6 +226,8 @@ Tx callbacks:
#. ``virtio_xmit_pkts_simple``:
Vector version fixes the available ring indexes to optimize performance.
+#. ``virtio_xmit_pkts_inorder``:
+ In-order version.
By default, the non-vector callbacks are used:
@@ -234,7 +239,7 @@ By default, the non-vector callbacks are used:
Vector callbacks will be used when:
-* ``txq_flags`` is set to ``VIRTIO_SIMPLE_FLAGS`` (0xF01), which implies:
+* ``txmode.offloads`` is set to ``0x0``, which implies:
* Single segment is specified.
@@ -252,8 +257,14 @@ The corresponding callbacks are:
Example of using the vector version of the virtio poll mode driver in
``testpmd``::
- testpmd -l 0-2 -n 4 -- -i --txqflags=0xF01 --rxq=1 --txq=1 --nb-cores=1
+ testpmd -l 0-2 -n 4 -- -i --tx-offloads=0x0 --rxq=1 --txq=1 --nb-cores=1
+
+In-order callbacks only work on simulated virtio user vdev.
+
+* For Rx: If mergeable Rx buffers is enabled and in-order is enabled then
+ ``virtio_xmit_pkts_inorder`` is used.
+* For Tx: If in-order is enabled then ``virtio_xmit_pkts_inorder`` is used.
Interrupt mode
--------------
@@ -318,3 +329,26 @@ Here we use l3fwd-power as an example to show how to get started.
$ l3fwd-power -l 0-1 -- -p 1 -P --config="(0,0,1)" \
--no-numa --parse-ptype
+
+
+Virtio PMD arguments
+--------------------
+
+The user can specify below argument in devargs.
+
+#. ``vdpa``:
+
+ A virtio device could also be driven by vDPA (vhost data path acceleration)
+ driver, and works as a HW vhost backend. This argument is used to specify
+ a virtio device needs to work in vDPA mode.
+ (Default: 0 (disabled))
+
+#. ``mrg_rxbuf``:
+
+ It is used to enable virtio device mergeable Rx buffer feature.
+ (Default: 1 (enabled))
+
+#. ``in_order``:
+
+ It is used to enable virtio device in-order feature.
+ (Default: 1 (enabled))
diff --git a/doc/guides/platform/octeontx.rst b/doc/guides/platform/octeontx.rst
index dc1aa4fe..b0a99c38 100644
--- a/doc/guides/platform/octeontx.rst
+++ b/doc/guides/platform/octeontx.rst
@@ -16,11 +16,11 @@ Common Offload HW Block Drivers
-------------------------------
1. **Eventdev Driver**
- See :doc: `../eventdevs/octeontx.rst` for octeontx ssovf eventdev driver
+ See :doc:`../eventdevs/octeontx` for octeontx ssovf eventdev driver
information.
2. **Mempool Driver**
- See :doc: `../mempool/octeontx.rst` for octeontx fpavf mempool driver
+ See :doc:`../mempool/octeontx` for octeontx fpavf mempool driver
information.
Steps To Setup Platform
@@ -52,4 +52,4 @@ OCTEONTX compatible board:
SDK and related information can be obtained from: `Cavium support site <https://support.cavium.com/>`_.
-- Follow the DPDK :doc: `../linux_gsg/index.rst` to setup the basic DPDK environment.
+- Follow the DPDK :doc:`../linux_gsg/index` to setup the basic DPDK environment.
diff --git a/doc/guides/prog_guide/bbdev.rst b/doc/guides/prog_guide/bbdev.rst
index d40c7f4c..9de14443 100644
--- a/doc/guides/prog_guide/bbdev.rst
+++ b/doc/guides/prog_guide/bbdev.rst
@@ -6,12 +6,12 @@ Wireless Baseband Device Library
The Wireless Baseband library provides a common programming framework that
abstracts HW accelerators based on FPGA and/or Fixed Function Accelerators that
-assist with 3gpp Physical Layer processing. Furthermore, it decouples the
+assist with 3GPP Physical Layer processing. Furthermore, it decouples the
application from the compute-intensive wireless functions by abstracting their
optimized libraries to appear as virtual bbdev devices.
The functional scope of the BBDEV library are those functions in relation to
-the 3gpp Layer 1 signal processing (channel coding, modulation, ...).
+the 3GPP Layer 1 signal processing (channel coding, modulation, ...).
The framework currently only supports Turbo Code FEC function.
@@ -42,13 +42,13 @@ From the command line using the --vdev EAL option
.. code-block:: console
- --vdev 'turbo_sw,max_nb_queues=8,socket_id=0'
+ --vdev 'baseband_turbo_sw,max_nb_queues=8,socket_id=0'
Our using the rte_vdev_init API within the application code.
.. code-block:: c
- rte_vdev_init("turbo_sw", "max_nb_queues=2,socket_id=0")
+ rte_vdev_init("baseband_turbo_sw", "max_nb_queues=2,socket_id=0")
All virtual bbdev devices support the following initialization parameters:
@@ -167,7 +167,7 @@ Logical Cores, Memory and Queues Relationships
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bbdev device Library as the Poll Mode Driver library support NUMA for when
-a processor’s logical cores and interfaces utilize its local memory. Therefore
+a processor's logical cores and interfaces utilize its local memory. Therefore
baseband operations, the mbuf being operated on should be allocated from memory
pools created in the local memory. The buffers should, if possible, remain on
the local processor to obtain the best performance results and buffer
@@ -202,7 +202,7 @@ structure in the *DPDK API Reference*.
A device reports its capabilities when registering itself in the bbdev framework.
With the aid of this capabilities mechanism, an application can query devices to
-discover which operations within the 3gpp physical layer they are capable of
+discover which operations within the 3GPP physical layer they are capable of
performing. Below is an example of the capabilities for a PMD it supports in
relation to Turbo Encoding and Decoding operations.
@@ -216,7 +216,10 @@ relation to Turbo Encoding and Decoding operations.
RTE_BBDEV_TURBO_SUBBLOCK_DEINTERLEAVE |
RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
- RTE_BBDEV_TURBO_CRC_TYPE_24B,
+ RTE_BBDEV_TURBO_CRC_TYPE_24B |
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
+ RTE_BBDEV_TURBO_EARLY_TERMINATION,
+ .max_llr_modulus = 16,
.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
.num_buffers_hard_out =
RTE_BBDEV_MAX_CODE_BLOCKS,
@@ -228,6 +231,7 @@ relation to Turbo Encoding and Decoding operations.
.cap.turbo_enc = {
.capability_flags =
RTE_BBDEV_TURBO_CRC_24B_ATTACH |
+ RTE_BBDEV_TURBO_CRC_24A_ATTACH |
RTE_BBDEV_TURBO_RATE_MATCH |
RTE_BBDEV_TURBO_RV_INDEX_BYPASS,
.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
@@ -391,27 +395,101 @@ operation to its allocating pool.
void rte_bbdev_enc_op_free_bulk(struct rte_bbdev_enc_op **ops,
unsigned int num_ops)
-BBDEV Operations
-~~~~~~~~~~~~~~~~
+BBDEV Inbound/Outbound Memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bbdev operation structure contains all the mutable data relating to
-performing Turbo code processing on a referenced mbuf data buffer. It is used
-for either encode or decode operations.
+performing Turbo coding on a referenced mbuf data buffer. It is used for either
+encode or decode operations.
Turbo Encode operation accepts one input and one output.
-
Turbo Decode operation accepts one input and two outputs, called *hard-decision*
and *soft-decision* outputs. *Soft-decision* output is optional.
-It is expected that the application provides input and output ``mbuf`` pointers
+It is expected that the application provides input and output mbuf pointers
allocated and ready to use. The baseband framework supports turbo coding on
Code Blocks (CB) and Transport Blocks (TB).
-For the output buffer(s), the application needs only to provide an allocated and
-free mbuf (containing only one mbuf segment), so that bbdev can write the
-operation outcome.
+For the output buffer(s), the application is required to provide an allocated
+and free mbuf, so that bbdev write back the resulting output.
+
+The support of split "scattered" buffers is a driver-specific feature, so it is
+reported individually by the supporting driver as a capability.
+
+Input and output data buffers are identified by ``rte_bbdev_op_data`` structure,
+as follows:
+
+.. code-block:: c
+
+ struct rte_bbdev_op_data {
+ struct rte_mbuf *data;
+ uint32_t offset;
+ uint32_t length;
+ };
+
+
+This structure has three elements:
+
+- ``data``: This is the mbuf data structure representing the data for BBDEV
+ operation.
+
+ This mbuf pointer can point to one Code Block (CB) data buffer or multiple CBs
+ contiguously located next to each other. A Transport Block (TB) represents a
+ whole piece of data that is divided into one or more CBs. Maximum number of
+ CBs can be contained in one TB is defined by ``RTE_BBDEV_MAX_CODE_BLOCKS``.
+
+ An mbuf data structure cannot represent more than one TB. The smallest piece
+ of data that can be contained in one mbuf is one CB.
+ An mbuf can include one contiguous CB, subset of contiguous CBs that are
+ belonging to one TB, or all contiguous CBs that are belonging to one TB.
+
+ If a BBDEV PMD supports the extended capability "Scatter-Gather", then it is
+ capable of collecting (gathering) non-contiguous (scattered) data from
+ multiple locations in the memory.
+ This capability is reported by the capability flags:
+
+ - ``RTE_BBDEV_TURBO_ENC_SCATTER_GATHER``, and
-**Turbo Encode Op structure**
+ - ``RTE_BBDEV_TURBO_DEC_SCATTER_GATHER``.
+
+ Only if a BBDEV PMD supports this feature, chained mbuf data structures are
+ accepted. A chained mbuf can represent one non-contiguous CB or multiple
+ non-contiguous CBs.
+ The first mbuf segment in the given chained mbuf represents the first piece
+ of the CB. Offset is only applicable to the first segment. ``length`` is the
+ total length of the CB.
+
+ BBDEV driver is responsible for identifying where the split is and enqueue
+ the split data to its internal queues.
+
+ If BBDEV PMD does not support this feature, it will assume inbound mbuf data
+ contains one segment.
+
+ The output mbuf data though is always one segment, even if the input was a
+ chained mbuf.
+
+
+- ``offset``: This is the starting point of the BBDEV (encode/decode) operation,
+ in bytes.
+
+ BBDEV starts to read data past this offset.
+ In case of chained mbuf, this offset applies only to the first mbuf segment.
+
+
+- ``length``: This is the total data length to be processed in one operation,
+ in bytes.
+
+ In case the mbuf data is representing one CB, this is the length of the CB
+ undergoing the operation.
+ If it is for multiple CBs, this is the total length of those CBs undergoing
+ the operation.
+ If it is for one TB, this is the total length of the TB under operation.
+ In case of chained mbuf, this data length includes the lengths of the
+ "scattered" data segments undergoing the operation.
+
+
+BBDEV Turbo Encode Operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: c
@@ -428,8 +506,78 @@ operation outcome.
};
};
+The Turbo encode structure is composed of the ``input`` and ``output`` mbuf
+data pointers. The provided mbuf pointer of ``input`` needs to be big enough to
+stretch for extra CRC trailers.
+
+``op_flags`` parameter holds all operation related flags, like whether CRC24A is
+included by the application or not.
+
+``code_block_mode`` flag identifies the mode in which bbdev is operating in.
+
+The encode interface works on both the code block (CB) and the transport block
+(TB). An operation executes in "CB-mode" when the CB is standalone. While
+"TB-mode" executes when an operation performs on one or multiple CBs that
+belong to a TB. Therefore, a given data can be standalone CB, full-size TB or
+partial TB. Partial TB means that only a subset of CBs belonging to a bigger TB
+are being enqueued.
-**Turbo Decode Op structure**
+ **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_enc_ops()``
+ call belong to one mode, either CB-mode or TB-mode.
+
+In case that the CB is smaller than Z (6144 bits), then effectively the TB = CB.
+CRC24A is appended to the tail of the CB. The application is responsible for
+calculating and appending CRC24A before calling BBDEV in case that the
+underlying driver does not support CRC24A generation.
+
+In CB-mode, CRC24A/B is an optional operation.
+The input ``k`` is the size of the CB (this maps to K as described in 3GPP TS
+36.212 section 5.1.2), this size is inclusive of CRC24A/B.
+The ``length`` is inclusive of CRC24A/B and equals to ``k`` in this case.
+
+Not all BBDEV PMDs are capable of CRC24A/B calculation. Flags
+``RTE_BBDEV_TURBO_CRC_24A_ATTACH`` and ``RTE_BBDEV_TURBO_CRC_24B_ATTACH``
+informs the application with relevant capability. These flags can be set in the
+``op_flags`` parameter to indicate BBDEV to calculate and append CRC24A to CB
+before going forward with Turbo encoding.
+
+Output format of the CB encode will have the encoded CB in ``e`` size output
+(this maps to E described in 3GPP TS 36.212 section 5.1.4.1.2). The output mbuf
+buffer size needs to be big enough to hold the encoded buffer of size ``e``.
+
+In TB-mode, CRC24A is assumed to be pre-calculated and appended to the inbound
+TB mbuf data buffer.
+The output mbuf data structure is expected to be allocated by the application
+with enough room for the output data.
+
+The difference between the partial and full-size TB is that we need to know the
+index of the first CB in this group and the number of CBs contained within.
+The first CB index is given by ``r`` but the number of the remaining CBs is
+calculated automatically by BBDEV before passing down to the driver.
+
+The number of remaining CBs should not be confused with ``c``. ``c`` is the
+total number of CBs that composes the whole TB (this maps to C as
+described in 3GPP TS 36.212 section 5.1.2).
+
+The ``length`` is total size of the CBs inclusive of any CRC24A and CRC24B in
+case they were appended by the application.
+
+The case when one CB belongs to TB and is being enqueued individually to BBDEV,
+this case is considered as a special case of partial TB where its number of CBs
+is 1. Therefore, it requires to get processed in TB-mode.
+
+The figure below visualizes the encoding of CBs using BBDEV interface in
+TB-mode. CB-mode is a reduced version, where only one CB exists:
+
+.. _figure_turbo_tb_encode:
+
+.. figure:: img/turbo_tb_encode.*
+
+ Turbo encoding of Code Blocks in mbuf structure
+
+
+BBDEV Turbo Decode Operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: c
@@ -452,18 +600,75 @@ operation outcome.
};
};
-Input and output data buffers are identified by ``rte_bbdev_op_data`` structure.
-This structure has three elements:
+The Turbo decode structure is composed of the ``input`` and ``output`` mbuf
+data pointers.
+
+``op_flags`` parameter holds all operation related flags, like whether CRC24B is
+retained or not.
+
+``code_block_mode`` flag identifies the mode in which bbdev is operating in.
+
+Similarly, the decode interface works on both the code block (CB) and the
+transport block (TB). An operation executes in "CB-mode" when the CB is
+standalone. While "TB-mode" executes when an operation performs on one or
+multiple CBs that belong to a TB. Therefore, a given data can be standalone CB,
+full-size TB or partial TB. Partial TB means that only a subset of CBs belonging
+to a bigger TB are being enqueued.
+
+ **NOTE:** It is assumed that all enqueued ops in one ``rte_bbdev_enqueue_dec_ops()``
+ call belong to one mode, either CB-mode or TB-mode.
+
+The input ``k`` is the size of the decoded CB (this maps to K as described in
+3GPP TS 36.212 section 5.1.2), this size is inclusive of CRC24A/B.
+The ``length`` is inclusive of CRC24A/B and equals to ``k`` in this case.
+
+The input encoded CB data is the Virtual Circular Buffer data stream, wk, with
+the null padding included as described in 3GPP TS 36.212 section 5.1.4.1.2 and
+shown in 3GPP TS 36.212 section 5.1.4.1 Figure 5.1.4-1.
+The size of the virtual circular buffer is 3*Kpi, where Kpi is the 32 byte
+aligned value of K, as specified in 3GPP TS 36.212 section 5.1.4.1.1.
+
+Each byte in the input circular buffer is the LLR value of each bit of the
+original CB.
+
+``hard_output`` is a mandatory capability that all BBDEV PMDs support. This is
+the decoded CBs of K sizes (CRC24A/B is the last 24-bit in each decoded CB).
+Soft output is an optional capability for BBDEV PMDs. Setting flag
+``RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP`` in ``op_flags`` directs BBDEV to retain
+CRC24B at the end of each CB. This might be useful for the application in debug
+mode.
+An LLR rate matched output is computed in the ``soft_output`` buffer structure
+for the given ``e`` size (this maps to E described in 3GPP TS 36.212 section
+5.1.4.1.2). The output mbuf buffer size needs to be big enough to hold the
+encoded buffer of size ``e``.
+
+The first CB Virtual Circular Buffer (VCB) index is given by ``r`` but the
+number of the remaining CB VCBs is calculated automatically by BBDEV before
+passing down to the driver.
+
+The number of remaining CB VCBs should not be confused with ``c``. ``c`` is the
+total number of CBs that composes the whole TB (this maps to C as
+described in 3GPP TS 36.212 section 5.1.2).
+
+The ``length`` is total size of the CBs inclusive of any CRC24A and CRC24B in
+case they were appended by the application.
+
+The case when one CB belongs to TB and is being enqueued individually to BBDEV,
+this case is considered as a special case of partial TB where its number of CBs
+is 1. Therefore, it requires to get processed in TB-mode.
+
+The output mbuf data structure is expected to be allocated by the application
+with enough room for the output data.
+
+The figure below visualizes the decoding of CBs using BBDEV interface in
+TB-mode. CB-mode is a reduced version, where only one CB exists:
+
+.. _figure_turbo_tb_decode:
-- ``data`` - This is the mbuf reference
+.. figure:: img/turbo_tb_decode.*
-- ``offset`` - The starting point for the Turbo input/output, in bytes, from the
- start of the data in the data buffer. It must be smaller than data_len of the
- mbuf's first segment
+ Turbo decoding of Code Blocks in mbuf structure
-- ``length`` - The length, in bytes, of the buffer on which the Turbo operation
- will or has been computed. For the input, the length is set by the application.
- For the output(s), the length is computed by the bbdev PMD driver.
Sample code
-----------
diff --git a/doc/guides/prog_guide/bpf_lib.rst b/doc/guides/prog_guide/bpf_lib.rst
new file mode 100644
index 00000000..7c08e6b2
--- /dev/null
+++ b/doc/guides/prog_guide/bpf_lib.rst
@@ -0,0 +1,38 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+Berkeley Packet Filter Library
+==============================
+
+The DPDK provides an BPF library that gives the ability
+to load and execute Enhanced Berkeley Packet Filter (eBPF) bytecode within
+user-space dpdk application.
+
+It supports basic set of features from eBPF spec.
+Please refer to the
+`eBPF spec <https://www.kernel.org/doc/Documentation/networking/filter.txt>`
+for more information.
+Also it introduces basic framework to load/unload BPF-based filters
+on eth devices (right now only via SW RX/TX callbacks).
+
+The library API provides the following basic operations:
+
+* Create a new BPF execution context and load user provided eBPF code into it.
+
+* Destroy an BPF execution context and its runtime structures and free the associated memory.
+
+* Execute eBPF bytecode associated with provided input parameter.
+
+* Provide information about natively compiled code for given BPF context.
+
+* Load BPF program from the ELF file and install callback to execute it on given ethdev port/queue.
+
+Not currently supported eBPF features
+-------------------------------------
+
+ - JIT for non X86_64 platforms
+ - cBPF
+ - tail-pointer call
+ - eBPF MAP
+ - skb
+ - external function calls for 32-bit platforms
diff --git a/doc/guides/prog_guide/compressdev.rst b/doc/guides/prog_guide/compressdev.rst
new file mode 100644
index 00000000..87e26490
--- /dev/null
+++ b/doc/guides/prog_guide/compressdev.rst
@@ -0,0 +1,623 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2017-2018 Cavium Networks.
+
+Compression Device Library
+===========================
+
+The compression framework provides a generic set of APIs to perform compression services
+as well as to query and configure compression devices both physical(hardware) and virtual(software)
+to perform those services. The framework currently only supports lossless compression schemes:
+Deflate and LZS.
+
+Device Management
+-----------------
+
+Device Creation
+~~~~~~~~~~~~~~~
+
+Physical compression devices are discovered during the bus probe of the EAL function
+which is executed at DPDK initialization, based on their unique device identifier.
+For eg. PCI devices can be identified using PCI BDF (bus/bridge, device, function).
+Specific physical compression devices, like other physical devices in DPDK can be
+white-listed or black-listed using the EAL command line options.
+
+Virtual devices can be created by two mechanisms, either using the EAL command
+line options or from within the application using an EAL API directly.
+
+From the command line using the --vdev EAL option
+
+.. code-block:: console
+
+ --vdev '<pmd name>,socket_id=0'
+
+.. Note::
+
+ * If DPDK application requires multiple software compression PMD devices then required
+ number of ``--vdev`` with appropriate libraries are to be added.
+
+ * An Application with multiple compression device instances exposed by the same PMD must
+ specify a unique name for each device.
+
+ Example: ``--vdev 'pmd0' --vdev 'pmd1'``
+
+Or, by using the rte_vdev_init API within the application code.
+
+.. code-block:: c
+
+ rte_vdev_init("<pmd_name>","socket_id=0")
+
+All virtual compression devices support the following initialization parameters:
+
+* ``socket_id`` - socket on which to allocate the device resources on.
+
+Device Identification
+~~~~~~~~~~~~~~~~~~~~~
+
+Each device, whether virtual or physical is uniquely designated by two
+identifiers:
+
+- A unique device index used to designate the compression device in all functions
+ exported by the compressdev API.
+
+- A device name used to designate the compression device in console messages, for
+ administration or debugging purposes.
+
+Device Configuration
+~~~~~~~~~~~~~~~~~~~~
+
+The configuration of each compression device includes the following operations:
+
+- Allocation of resources, including hardware resources if a physical device.
+- Resetting the device into a well-known default state.
+- Initialization of statistics counters.
+
+The ``rte_compressdev_configure`` API is used to configure a compression device.
+
+The ``rte_compressdev_config`` structure is used to pass the configuration
+parameters.
+
+See *DPDK API Reference* for details.
+
+Configuration of Queue Pairs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each compression device queue pair is individually configured through the
+``rte_compressdev_queue_pair_setup`` API.
+
+The ``max_inflight_ops`` is used to pass maximum number of
+rte_comp_op that could be present in a queue at-a-time.
+PMD then can allocate resources accordingly on a specified socket.
+
+See *DPDK API Reference* for details.
+
+Logical Cores, Memory and Queues Pair Relationships
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Library supports NUMA similarly as described in Cryptodev library section.
+
+A queue pair cannot be shared and should be exclusively used by a single processing
+context for enqueuing operations or dequeuing operations on the same compression device
+since sharing would require global locks and hinder performance. It is however possible
+to use a different logical core to dequeue an operation on a queue pair from the logical
+core on which it was enqueued. This means that a compression burst enqueue/dequeue
+APIs are a logical place to transition from one logical core to another in a
+data processing pipeline.
+
+Device Features and Capabilities
+---------------------------------
+
+Compression devices define their functionality through two mechanisms, global device
+features and algorithm features. Global devices features identify device
+wide level features which are applicable to the whole device such as supported hardware
+acceleration and CPU features. List of compression device features can be seen in the
+RTE_COMPDEV_FF_XXX macros.
+
+The algorithm features lists individual algo feature which device supports per-algorithm,
+such as a stateful compression/decompression, checksums operation etc. List of algorithm
+features can be seen in the RTE_COMP_FF_XXX macros.
+
+Capabilities
+~~~~~~~~~~~~
+Each PMD has a list of capabilities, including algorithms listed in
+enum ``rte_comp_algorithm`` and its associated feature flag and
+sliding window range in log base 2 value. Sliding window tells
+the minimum and maximum size of lookup window that algorithm uses
+to find duplicates.
+
+See *DPDK API Reference* for details.
+
+Each Compression poll mode driver defines its array of capabilities
+for each algorithm it supports. See PMD implementation for capability
+initialization.
+
+Capabilities Discovery
+~~~~~~~~~~~~~~~~~~~~~~
+
+PMD capability and features are discovered via ``rte_compressdev_info_get`` function.
+
+The ``rte_compressdev_info`` structure contains all the relevant information for the device.
+
+See *DPDK API Reference* for details.
+
+Compression Operation
+----------------------
+
+DPDK compression supports two types of compression methodologies:
+
+- Stateless, data associated to a compression operation is compressed without any reference
+ to another compression operation.
+
+- Stateful, data in each compression operation is compressed with reference to previous compression
+ operations in the same data stream i.e. history of data is maintained between the operations.
+
+For more explanation, please refer RFC https://www.ietf.org/rfc/rfc1951.txt
+
+Operation Representation
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Compression operation is described via ``struct rte_comp_op``, which contains both input and
+output data. The operation structure includes the operation type (stateless or stateful),
+the operation status and the priv_xform/stream handle, source, destination and checksum buffer
+pointers. It also contains the source mempool from which the operation is allocated.
+PMD updates consumed field with amount of data read from source buffer and produced
+field with amount of data of written into destination buffer along with status of
+operation. See section *Produced, Consumed And Operation Status* for more details.
+
+Compression operations mempool also has an ability to allocate private memory with the
+operation for application's purposes. Application software is responsible for specifying
+all the operation specific fields in the ``rte_comp_op`` structure which are then used
+by the compression PMD to process the requested operation.
+
+
+Operation Management and Allocation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The compressdev library provides an API set for managing compression operations which
+utilize the Mempool Library to allocate operation buffers. Therefore, it ensures
+that the compression operation is interleaved optimally across the channels and
+ranks for optimal processing.
+
+A ``rte_comp_op`` contains a field indicating the pool it originated from.
+
+``rte_comp_op_alloc()`` and ``rte_comp_op_bulk_alloc()`` are used to allocate
+compression operations from a given compression operation mempool.
+The operation gets reset before being returned to a user so that operation
+is always in a good known state before use by the application.
+
+``rte_comp_op_free()`` is called by the application to return an operation to
+its allocating pool.
+
+See *DPDK API Reference* for details.
+
+Passing source data as mbuf-chain
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+If input data is scattered across several different buffers, then
+Application can either parse through all such buffers and make one
+mbuf-chain and enqueue it for processing or, alternatively, it can
+make multiple sequential enqueue_burst() calls for each of them
+processing them statefully. See *Compression API Stateful Operation*
+for stateful processing of ops.
+
+Operation Status
+~~~~~~~~~~~~~~~~
+Each operation carries a status information updated by PMD after it is processed.
+following are currently supported status:
+
+- RTE_COMP_OP_STATUS_SUCCESS,
+ Operation is successfully completed
+
+- RTE_COMP_OP_STATUS_NOT_PROCESSED,
+ Operation has not yet been processed by the device
+
+- RTE_COMP_OP_STATUS_INVALID_ARGS,
+ Operation failed due to invalid arguments in request
+
+- RTE_COMP_OP_STATUS_ERROR,
+ Operation failed because of internal error
+
+- RTE_COMP_OP_STATUS_INVALID_STATE,
+ Operation is invoked in invalid state
+
+- RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
+ Output buffer ran out of space during processing. Error case,
+ PMD cannot continue from here.
+
+- RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
+ Output buffer ran out of space before operation completed, but this
+ is not an error case. Output data up to op.produced can be used and
+ next op in the stream should continue on from op.consumed+1.
+
+Produced, Consumed And Operation Status
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+- If status is RTE_COMP_OP_STATUS_SUCCESS,
+ consumed = amount of data read from input buffer, and
+ produced = amount of data written in destination buffer
+- If status is RTE_COMP_OP_STATUS_FAILURE,
+ consumed = produced = 0 or undefined
+- If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
+ consumed = 0 and
+ produced = usually 0, but in decompression cases a PMD may return > 0
+ i.e. amount of data successfully produced until out of space condition
+ hit. Application can consume output data in this case, if required.
+- If status is RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
+ consumed = amount of data read, and
+ produced = amount of data successfully produced until
+ out of space condition hit. PMD has ability to recover
+ from here, so application can submit next op from
+ consumed+1 and a destination buffer with available space.
+
+Transforms
+----------
+
+Compression transforms (``rte_comp_xform``) are the mechanism
+to specify the details of the compression operation such as algorithm,
+window size and checksum.
+
+Compression API Hash support
+----------------------------
+
+Compression API allows application to enable digest calculation
+alongside compression and decompression of data. A PMD reflects its
+support for hash algorithms via capability algo feature flags.
+If supported, PMD calculates digest always on plaintext i.e.
+before compression and after decompression.
+
+Currently supported list of hash algos are SHA-1 and SHA2 family
+SHA256.
+
+See *DPDK API Reference* for details.
+
+If required, application should set valid hash algo in compress
+or decompress xforms during ``rte_compressdev_stream_create()``
+or ``rte_compressdev_private_xform_create()`` and pass a valid
+output buffer in ``rte_comp_op`` hash field struct to store the
+resulting digest. Buffer passed should be contiguous and large
+enough to store digest which is 20 bytes for SHA-1 and
+32 bytes for SHA2-256.
+
+Compression API Stateless operation
+------------------------------------
+
+An op is processed stateless if it has
+- op_type set to RTE_COMP_OP_STATELESS
+- flush value set to RTE_FLUSH_FULL or RTE_FLUSH_FINAL
+(required only on compression side),
+- All required input in source buffer
+
+When all of the above conditions are met, PMD initiates stateless processing
+and releases acquired resources after processing of current operation is
+complete. Application can enqueue multiple stateless ops in a single burst
+and must attach priv_xform handle to such ops.
+
+priv_xform in Stateless operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+priv_xform is PMD internally managed private data that it maintains to do stateless processing.
+priv_xforms are initialized provided a generic xform structure by an application via making call
+to ``rte_comp_private_xform_create``, at an output PMD returns an opaque priv_xform reference.
+If PMD support SHAREABLE priv_xform indicated via algorithm feature flag, then application can
+attach same priv_xform with many stateless ops at-a-time. If not, then application needs to
+create as many priv_xforms as it expects to have stateless operations in-flight.
+
+.. figure:: img/stateless-op.*
+
+ Stateless Ops using Non-Shareable priv_xform
+
+
+.. figure:: img/stateless-op-shared.*
+
+ Stateless Ops using Shareable priv_xform
+
+
+Application should call ``rte_compressdev_private_xform_create()`` and attach to stateless op before
+enqueuing them for processing and free via ``rte_compressdev_private_xform_free()`` during termination.
+
+An example pseudocode to setup and process NUM_OPS stateless ops with each of length OP_LEN
+using priv_xform would look like:
+
+.. code-block:: c
+
+ /*
+ * pseudocode for stateless compression
+ */
+
+ uint8_t cdev_id = rte_compdev_get_dev_id(<pmd name>);
+
+ /* configure the device. */
+ if (rte_compressdev_configure(cdev_id, &conf) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to configure compressdev %u", cdev_id);
+
+ if (rte_compressdev_queue_pair_setup(cdev_id, 0, NUM_MAX_INFLIGHT_OPS,
+ socket_id()) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n");
+
+ if (rte_compressdev_start(cdev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start device\n");
+
+ /* setup compress transform */
+ struct rte_compress_compress_xform compress_xform = {
+ .type = RTE_COMP_COMPRESS,
+ .compress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .deflate = {
+ .huffman = RTE_COMP_HUFFMAN_DEFAULT
+ },
+ .level = RTE_COMP_LEVEL_PMD_DEFAULT,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+
+ /* create priv_xform and initialize it for the compression device. */
+ void *priv_xform = NULL;
+ rte_compressdev_info_get(cdev_id, &dev_info);
+ if(dev_info.capability->comps_feature_flag & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+ rte_comp_priv_xform_create(cdev_id, &compress_xform, &priv_xform);
+ } else {
+ shareable = 0;
+ }
+
+ /* create operation pool via call to rte_comp_op_pool_create and alloc ops */
+ rte_comp_op_bulk_alloc(op_pool, comp_ops, NUM_OPS);
+
+ /* prepare ops for compression operations */
+ for (i = 0; i < NUM_OPS; i++) {
+ struct rte_comp_op *op = comp_ops[i];
+ if (!shareable)
+ rte_priv_xform_create(cdev_id, &compress_xform, &op->priv_xform)
+ else
+ op->priv_xform = priv_xform;
+ op->type = RTE_COMP_OP_STATELESS;
+ op->flush = RTE_COMP_FLUSH_FINAL;
+
+ op->src.offset = 0;
+ op->dst.offset = 0;
+ op->src.length = OP_LEN;
+ op->input_chksum = 0;
+ setup op->m_src and op->m_dst;
+ }
+ num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, comp_ops, NUM_OPS);
+ /* wait for this to complete before enqueing next*/
+ do {
+ num_deque = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, NUM_OPS);
+ } while (num_dqud < num_enqd);
+
+
+Stateless and OUT_OF_SPACE
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+OUT_OF_SPACE is a condition when output buffer runs out of space and where PMD
+still has more data to produce. If PMD runs into such condition, then PMD returns
+RTE_COMP_OP_OUT_OF_SPACE_TERMINATED error. In such case, PMD resets itself and can set
+consumed=0 and produced=amount of output it could produce before hitting out_of_space.
+Application would need to resubmit the whole input with a larger output buffer, if it
+wants the operation to be completed.
+
+Hash in Stateless
+~~~~~~~~~~~~~~~~~
+If hash is enabled, digest buffer will contain valid data after op is successfully
+processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS.
+
+Checksum in Stateless
+~~~~~~~~~~~~~~~~~~~~~
+If checksum is enabled, checksum will only be available after op is successfully
+processed i.e. dequeued with status = RTE_COMP_OP_STATUS_SUCCESS.
+
+Compression API Stateful operation
+-----------------------------------
+
+Compression API provide RTE_COMP_FF_STATEFUL_COMPRESSION and
+RTE_COMP_FF_STATEFUL_DECOMPRESSION feature flag for PMD to reflect
+its support for Stateful operations.
+
+A Stateful operation in DPDK compression means application invokes enqueue
+burst() multiple times to process related chunk of data because
+application broke data into several ops.
+
+In such case
+- ops are setup with op_type RTE_COMP_OP_STATEFUL,
+- all ops except last set to flush value = RTE_COMP_NO/SYNC_FLUSH
+and last set to flush value RTE_COMP_FULL/FINAL_FLUSH.
+
+In case of either one or all of the above conditions, PMD initiates
+stateful processing and releases acquired resources after processing
+operation with flush value = RTE_COMP_FLUSH_FULL/FINAL is complete.
+Unlike stateless, application can enqueue only one stateful op from
+a particular stream at a time and must attach stream handle
+to each op.
+
+Stream in Stateful operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`stream` in DPDK compression is a logical entity which identifies related set of ops, say, a one large
+file broken into multiple chunks then file is represented by a stream and each chunk of that file is
+represented by compression op `rte_comp_op`. Whenever application wants a stateful processing of such
+data, then it must get a stream handle via making call to ``rte_comp_stream_create()``
+with xform, at an output the target PMD will return an opaque stream handle to application which
+it must attach to all of the ops carrying data of that stream. In stateful processing, every op
+requires previous op data for compression/decompression. A PMD allocates and set up resources such
+as history, states, etc. within a stream, which are maintained during the processing of the related ops.
+
+Unlike priv_xforms, stream is always a NON_SHAREABLE entity. One stream handle must be attached to only
+one set of related ops and cannot be reused until all of them are processed with status Success or failure.
+
+.. figure:: img/stateful-op.*
+
+ Stateful Ops
+
+
+Application should call ``rte_comp_stream_create()`` and attach to op before
+enqueuing them for processing and free via ``rte_comp_stream_free()`` during
+termination. All ops that are to be processed statefully should carry *same* stream.
+
+See *DPDK API Reference* document for details.
+
+An example pseudocode to set up and process a stream having NUM_CHUNKS with each chunk size of CHUNK_LEN would look like:
+
+.. code-block:: c
+
+ /*
+ * pseudocode for stateful compression
+ */
+
+ uint8_t cdev_id = rte_compdev_get_dev_id(<pmd name>);
+
+ /* configure the device. */
+ if (rte_compressdev_configure(cdev_id, &conf) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to configure compressdev %u", cdev_id);
+
+ if (rte_compressdev_queue_pair_setup(cdev_id, 0, NUM_MAX_INFLIGHT_OPS,
+ socket_id()) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n");
+
+ if (rte_compressdev_start(cdev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start device\n");
+
+ /* setup compress transform. */
+ struct rte_compress_compress_xform compress_xform = {
+ .type = RTE_COMP_COMPRESS,
+ .compress = {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .deflate = {
+ .huffman = RTE_COMP_HUFFMAN_DEFAULT
+ },
+ .level = RTE_COMP_LEVEL_PMD_DEFAULT,
+ .chksum = RTE_COMP_CHECKSUM_NONE,
+ .window_size = DEFAULT_WINDOW_SIZE,
+ .hash_algo = RTE_COMP_HASH_ALGO_NONE
+ }
+ };
+
+ /* create stream */
+ rte_comp_stream_create(cdev_id, &compress_xform, &stream);
+
+ /* create an op pool and allocate ops */
+ rte_comp_op_bulk_alloc(op_pool, comp_ops, NUM_CHUNKS);
+
+ /* Prepare source and destination mbufs for compression operations */
+ unsigned int i;
+ for (i = 0; i < NUM_CHUNKS; i++) {
+ if (rte_pktmbuf_append(mbufs[i], CHUNK_LEN) == NULL)
+ rte_exit(EXIT_FAILURE, "Not enough room in the mbuf\n");
+ comp_ops[i]->m_src = mbufs[i];
+ if (rte_pktmbuf_append(dst_mbufs[i], CHUNK_LEN) == NULL)
+ rte_exit(EXIT_FAILURE, "Not enough room in the mbuf\n");
+ comp_ops[i]->m_dst = dst_mbufs[i];
+ }
+
+ /* Set up the compress operations. */
+ for (i = 0; i < NUM_CHUNKS; i++) {
+ struct rte_comp_op *op = comp_ops[i];
+ op->stream = stream;
+ op->m_src = src_buf[i];
+ op->m_dst = dst_buf[i];
+ op->type = RTE_COMP_OP_STATEFUL;
+ if(i == NUM_CHUNKS-1) {
+ /* set to final, if last chunk*/
+ op->flush = RTE_COMP_FLUSH_FINAL;
+ } else {
+ /* set to NONE, for all intermediary ops */
+ op->flush = RTE_COMP_FLUSH_NONE;
+ }
+ op->src.offset = 0;
+ op->dst.offset = 0;
+ op->src.length = CHUNK_LEN;
+ op->input_chksum = 0;
+ num_enqd = rte_compressdev_enqueue_burst(cdev_id, 0, &op[i], 1);
+ /* wait for this to complete before enqueing next*/
+ do {
+ num_deqd = rte_compressdev_dequeue_burst(cdev_id, 0 , &processed_ops, 1);
+ } while (num_deqd < num_enqd);
+ /* push next op*/
+ }
+
+
+Stateful and OUT_OF_SPACE
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If PMD supports stateful operation, then OUT_OF_SPACE status is not an actual
+error for the PMD. In such case, PMD returns with status
+RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE with consumed = number of input bytes
+read and produced = length of complete output buffer.
+Application should enqueue next op with source starting at consumed+1 and an
+output buffer with available space.
+
+Hash in Stateful
+~~~~~~~~~~~~~~~~
+If enabled, digest buffer will contain valid digest after last op in stream
+(having flush = RTE_COMP_OP_FLUSH_FINAL) is successfully processed i.e. dequeued
+with status = RTE_COMP_OP_STATUS_SUCCESS.
+
+Checksum in Stateful
+~~~~~~~~~~~~~~~~~~~~
+If enabled, checksum will only be available after last op in stream
+(having flush = RTE_COMP_OP_FLUSH_FINAL) is successfully processed i.e. dequeued
+with status = RTE_COMP_OP_STATUS_SUCCESS.
+
+Burst in compression API
+-------------------------
+
+Scheduling of compression operations on DPDK's application data path is
+performed using a burst oriented asynchronous API set. A queue pair on a compression
+device accepts a burst of compression operations using enqueue burst API. On physical
+devices the enqueue burst API will place the operations to be processed
+on the device's hardware input queue, for virtual devices the processing of the
+operations is usually completed during the enqueue call to the compression
+device. The dequeue burst API will retrieve any processed operations available
+from the queue pair on the compression device, from physical devices this is usually
+directly from the devices processed queue, and for virtual device's from a
+``rte_ring`` where processed operations are place after being processed on the
+enqueue call.
+
+A burst in DPDK compression can be a combination of stateless and stateful operations with a condition
+that for stateful ops only one op at-a-time should be enqueued from a particular stream i.e. no-two ops
+should belong to same stream in a single burst. However a burst may contain multiple stateful ops as long
+as each op is attached to a different stream i.e. a burst can look like:
+
++---------------+--------------+--------------+-----------------+--------------+--------------+
+| enqueue_burst | op1.no_flush | op2.no_flush | op3.flush_final | op4.no_flush | op5.no_flush |
++---------------+--------------+--------------+-----------------+--------------+--------------+
+
+Where, op1 .. op5 all belong to different independent data units. op1, op2, op4, op5 must be stateful
+as stateless ops can only use flush full or final and op3 can be of type stateless or stateful.
+Every op with type set to RTE_COMP_OP_TYPE_STATELESS must be attached to priv_xform and
+Every op with type set to RTE_COMP_OP_TYPE_STATEFUL *must* be attached to stream.
+
+Since each operation in a burst is independent and thus can be completed
+out-of-order, applications which need ordering, should setup per-op user data
+area with reordering information so that it can determine enqueue order at
+dequeue.
+
+Also if multiple threads calls enqueue_burst() on same queue pair then it’s
+application onus to use proper locking mechanism to ensure exclusive enqueuing
+of operations.
+
+Enqueue / Dequeue Burst APIs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The burst enqueue API uses a compression device identifier and a queue pair
+identifier to specify the compression device queue pair to schedule the processing on.
+The ``nb_ops`` parameter is the number of operations to process which are
+supplied in the ``ops`` array of ``rte_comp_op`` structures.
+The enqueue function returns the number of operations it actually enqueued for
+processing, a return value equal to ``nb_ops`` means that all packets have been
+enqueued.
+
+The dequeue API uses the same format as the enqueue API but
+the ``nb_ops`` and ``ops`` parameters are now used to specify the max processed
+operations the user wishes to retrieve and the location in which to store them.
+The API call returns the actual number of processed operations returned, this
+can never be larger than ``nb_ops``.
+
+Sample code
+-----------
+
+There are unit test applications that show how to use the compressdev library inside
+test/test/test_compressdev.c
+
+Compression Device API
+~~~~~~~~~~~~~~~~~~~~~~
+
+The compressdev Library API is described in the *DPDK API Reference* document.
diff --git a/doc/guides/prog_guide/cryptodev_lib.rst b/doc/guides/prog_guide/cryptodev_lib.rst
index 066fe2d2..90d01e93 100644
--- a/doc/guides/prog_guide/cryptodev_lib.rst
+++ b/doc/guides/prog_guide/cryptodev_lib.rst
@@ -8,7 +8,7 @@ The cryptodev library provides a Crypto device framework for management and
provisioning of hardware and software Crypto poll mode drivers, defining generic
APIs which support a number of different Crypto operations. The framework
currently only supports cipher, authentication, chained cipher/authentication
-and AEAD symmetric Crypto operations.
+and AEAD symmetric and asymmetric Crypto operations.
Design Principles
@@ -41,7 +41,7 @@ From the command line using the --vdev EAL option
.. code-block:: console
- --vdev 'crypto_aesni_mb0,max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0'
+ --vdev 'crypto_aesni_mb0,max_nb_queue_pairs=2,socket_id=0'
.. Note::
@@ -57,12 +57,11 @@ Our using the rte_vdev_init API within the application code.
.. code-block:: c
rte_vdev_init("crypto_aesni_mb",
- "max_nb_queue_pairs=2,max_nb_sessions=1024,socket_id=0")
+ "max_nb_queue_pairs=2,socket_id=0")
All virtual Crypto devices support the following initialization parameters:
* ``max_nb_queue_pairs`` - maximum number of queue pairs supported by the device.
-* ``max_nb_sessions`` - maximum number of sessions supported by the device
* ``socket_id`` - socket on which to allocate the device resources on.
@@ -159,8 +158,8 @@ Device Features and Capabilities
Crypto devices define their functionality through two mechanisms, global device
features and algorithm capabilities. Global devices features identify device
wide level features which are applicable to the whole device such as
-the device having hardware acceleration or supporting symmetric Crypto
-operations,
+the device having hardware acceleration or supporting symmetric and/or asymmetric
+Crypto operations.
The capabilities mechanism defines the individual algorithms/functions which
the device supports, such as a specific symmetric Crypto cipher,
@@ -269,7 +268,7 @@ relevant information for the device.
struct rte_cryptodev_info {
const char *driver_name;
uint8_t driver_id;
- struct rte_pci_device *pci_dev;
+ struct rte_device *device;
uint64_t feature_flags;
@@ -299,6 +298,33 @@ directly from the devices processed queue, and for virtual device's from a
enqueue call.
+Private data
+~~~~~~~~~~~~
+For session-based operations, the set and get API provides a mechanism for an
+application to store and retrieve the private user data information stored along
+with the crypto session.
+
+For example, suppose an application is submitting a crypto operation with a session
+associated and wants to indicate private user data information which is required to be
+used after completion of the crypto operation. In this case, the application can use
+the set API to set the user data and retrieve it using get API.
+
+.. code-block:: c
+
+ int rte_cryptodev_sym_session_set_user_data(
+ struct rte_cryptodev_sym_session *sess, void *data, uint16_t size);
+
+ void * rte_cryptodev_sym_session_get_user_data(
+ struct rte_cryptodev_sym_session *sess);
+
+
+For session-less mode, the private user data information can be placed along with the
+``struct rte_crypto_op``. The ``rte_crypto_op::private_data_offset`` indicates the
+start of private data information. The offset is counted from the start of the
+rte_crypto_op including other crypto information such as the IVs (since there can
+be an IV also for authentication).
+
+
Enqueue / Dequeue Burst APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -419,7 +445,7 @@ Crypto workloads.
.. figure:: img/cryptodev_sym_sess.*
-The Crypto device framework provides APIs to allocate and initizalize sessions
+The Crypto device framework provides APIs to allocate and initialize sessions
for crypto devices, where sessions are mempool objects.
It is the application's responsibility to create and manage the session mempools.
This approach allows for different scenarios such as having a single session
@@ -427,12 +453,12 @@ mempool for all crypto devices (where the mempool object size is big
enough to hold the private session of any crypto device), as well as having
multiple session mempools of different sizes for better memory usage.
-An application can use ``rte_cryptodev_get_private_session_size()`` to
+An application can use ``rte_cryptodev_sym_get_private_session_size()`` to
get the private session size of given crypto device. This function would allow
an application to calculate the max device session size of all crypto devices
to create a single session mempool.
If instead an application creates multiple session mempools, the Crypto device
-framework also provides ``rte_cryptodev_get_header_session_size`` to get
+framework also provides ``rte_cryptodev_sym_get_header_session_size`` to get
the size of an uninitialized session.
Once the session mempools have been created, ``rte_cryptodev_sym_session_create()``
@@ -635,7 +661,7 @@ using one of the crypto PMDs available in DPDK.
uint8_t cdev_id = rte_cryptodev_get_dev_id(crypto_name);
/* Get private session data size. */
- session_size = rte_cryptodev_get_private_session_size(cdev_id);
+ session_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
/*
* Create session mempool, with two objects per session,
@@ -761,14 +787,260 @@ using one of the crypto PMDs available in DPDK.
num_dequeued_ops);
} while (total_num_dequeued_ops < num_enqueued_ops);
-
Asymmetric Cryptography
-----------------------
-Asymmetric functionality is currently not supported by the cryptodev API.
+The cryptodev library currently provides support for the following asymmetric
+Crypto operations; RSA, Modular exponentiation and inversion, Diffie-Hellman
+public and/or private key generation and shared secret compute, DSA Signature
+generation and verification.
+
+Session and Session Management
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Sessions are used in asymmetric cryptographic processing to store the immutable
+data defined in asymmetric cryptographic transform which is further used in the
+operation processing. Sessions typically stores information, such as, public
+and private key information or domain params or prime modulus data i.e. immutable
+across data sets. Crypto sessions cache this immutable data in a optimal way for the
+underlying PMD and this allows further acceleration of the offload of Crypto workloads.
+
+Like symmetric, the Crypto device framework provides APIs to allocate and initialize
+asymmetric sessions for crypto devices, where sessions are mempool objects.
+It is the application's responsibility to create and manage the session mempools.
+Application using both symmetric and asymmetric sessions should allocate and maintain
+different sessions pools for each type.
+
+An application can use ``rte_cryptodev_get_asym_session_private_size()`` to
+get the private size of asymmetric session on a given crypto device. This
+function would allow an application to calculate the max device asymmetric
+session size of all crypto devices to create a single session mempool.
+If instead an application creates multiple asymmetric session mempools,
+the Crypto device framework also provides ``rte_cryptodev_asym_get_header_session_size()`` to get
+the size of an uninitialized session.
+
+Once the session mempools have been created, ``rte_cryptodev_asym_session_create()``
+is used to allocate an uninitialized asymmetric session from the given mempool.
+The session then must be initialized using ``rte_cryptodev_asym_session_init()``
+for each of the required crypto devices. An asymmetric transform chain
+is used to specify the operation and its parameters. See the section below for
+details on transforms.
+
+When a session is no longer used, user must call ``rte_cryptodev_asym_session_clear()``
+for each of the crypto devices that are using the session, to free all driver
+private asymmetric session data. Once this is done, session should be freed using
+``rte_cryptodev_asym_session_free()`` which returns them to their mempool.
+
+Asymmetric Sessionless Support
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Currently asymmetric crypto framework does not support sessionless.
+
+Transforms and Transform Chaining
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Asymmetric Crypto transforms (``rte_crypto_asym_xform``) are the mechanism used
+to specify the details of the asymmetric Crypto operation. Next pointer within
+xform allows transform to be chained together. Also it is important to note that
+the order in which the transforms are passed indicates the order of the chaining.
+
+Not all asymmetric crypto xforms are supported for chaining. Currently supported
+asymmetric crypto chaining is Diffie-Hellman private key generation followed by
+public generation. Also, currently API does not support chaining of symmetric and
+asymmetric crypto xfroms.
+
+Each xform defines specific asymmetric crypto algo. Currently supported are:
+* RSA
+* Modular operations (Exponentiation and Inverse)
+* Diffie-Hellman
+* DSA
+* None - special case where PMD may support a passthrough mode. More for diagnostic purpose
+
+See *DPDK API Reference* for details on each rte_crypto_xxx_xform struct
+
+Asymmetric Operations
+~~~~~~~~~~~~~~~~~~~~~
+
+The asymmetric Crypto operation structure contains all the mutable data relating
+to asymmetric cryptographic processing on an input data buffer. It uses either
+RSA, Modular, Diffie-Hellman or DSA operations depending upon session it is attached
+to.
+
+Every operation must carry a valid session handle which further carries information
+on xform or xform-chain to be performed on op. Every xform type defines its own set
+of operational params in their respective rte_crypto_xxx_op_param struct. Depending
+on xform information within session, PMD picks up and process respective op_param
+struct.
+Unlike symmetric, asymmetric operations do not use mbufs for input/output.
+They operate on data buffer of type ``rte_crypto_param``.
+
+See *DPDK API Reference* for details on each rte_crypto_xxx_op_param struct
+
+Asymmetric crypto Sample code
+-----------------------------
+
+There's a unit test application test_cryptodev_asym.c inside unit test framework that
+show how to setup and process asymmetric operations using cryptodev library.
+
+The following sample code shows the basic steps to compute modular exponentiation
+using 1024-bit modulus length using openssl PMD available in DPDK (performing other
+crypto operations is similar except change to respective op and xform setup).
+
+.. code-block:: c
+
+ /*
+ * Simple example to compute modular exponentiation with 1024-bit key
+ *
+ */
+ #define MAX_ASYM_SESSIONS 10
+ #define NUM_ASYM_BUFS 10
+
+ struct rte_mempool *crypto_op_pool, *asym_session_pool;
+ unsigned int asym_session_size;
+ int ret;
+
+ /* Initialize EAL. */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+
+ uint8_t socket_id = rte_socket_id();
+
+ /* Create crypto operation pool. */
+ crypto_op_pool = rte_crypto_op_pool_create(
+ "crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ NUM_ASYM_BUFS, 0, 0,
+ socket_id);
+ if (crypto_op_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
+ /* Create the virtual crypto device. */
+ char args[128];
+ const char *crypto_name = "crypto_openssl";
+ snprintf(args, sizeof(args), "socket_id=%d", socket_id);
+ ret = rte_vdev_init(crypto_name, args);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create virtual device");
-Crypto Device API
-~~~~~~~~~~~~~~~~~
+ uint8_t cdev_id = rte_cryptodev_get_dev_id(crypto_name);
+
+ /* Get private asym session data size. */
+ asym_session_size = rte_cryptodev_get_asym_private_session_size(cdev_id);
+
+ /*
+ * Create session mempool, with two objects per session,
+ * one for the session header and another one for the
+ * private asym session data for the crypto device.
+ */
+ asym_session_pool = rte_mempool_create("asym_session_pool",
+ MAX_ASYM_SESSIONS * 2,
+ asym_session_size,
+ 0,
+ 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ /* Configure the crypto device. */
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = 1,
+ .socket_id = socket_id
+ };
+ struct rte_cryptodev_qp_conf qp_conf = {
+ .nb_descriptors = 2048
+ };
+
+ if (rte_cryptodev_configure(cdev_id, &conf) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to configure cryptodev %u", cdev_id);
+
+ if (rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
+ socket_id, asym_session_pool) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to setup queue pair\n");
+
+ if (rte_cryptodev_start(cdev_id) < 0)
+ rte_exit(EXIT_FAILURE, "Failed to start device\n");
+
+ /* Setup crypto xform to do modular exponentiation with 1024 bit
+ * length modulus
+ */
+ struct rte_crypto_asym_xform modex_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .modex = {
+ .modulus = {
+ .data =
+ (uint8_t *)
+ ("\xb3\xa1\xaf\xb7\x13\x08\x00\x0a\x35\xdc\x2b\x20\x8d"
+ "\xa1\xb5\xce\x47\x8a\xc3\x80\xf4\x7d\x4a\xa2\x62\xfd\x61\x7f"
+ "\xb5\xa8\xde\x0a\x17\x97\xa0\xbf\xdf\x56\x5a\x3d\x51\x56\x4f"
+ "\x70\x70\x3f\x63\x6a\x44\x5b\xad\x84\x0d\x3f\x27\x6e\x3b\x34"
+ "\x91\x60\x14\xb9\xaa\x72\xfd\xa3\x64\xd2\x03\xa7\x53\x87\x9e"
+ "\x88\x0b\xc1\x14\x93\x1a\x62\xff\xb1\x5d\x74\xcd\x59\x63\x18"
+ "\x11\x3d\x4f\xba\x75\xd4\x33\x4e\x23\x6b\x7b\x57\x44\xe1\xd3"
+ "\x03\x13\xa6\xf0\x8b\x60\xb0\x9e\xee\x75\x08\x9d\x71\x63\x13"
+ "\xcb\xa6\x81\x92\x14\x03\x22\x2d\xde\x55"),
+ .length = 128
+ },
+ .exponent = {
+ .data = (uint8_t *)("\x01\x00\x01"),
+ .length = 3
+ }
+ }
+ };
+ /* Create asym crypto session and initialize it for the crypto device. */
+ struct rte_cryptodev_asym_session *asym_session;
+ asym_session = rte_cryptodev_asym_session_create(asym_session_pool);
+ if (asym_session == NULL)
+ rte_exit(EXIT_FAILURE, "Session could not be created\n");
+
+ if (rte_cryptodev_asym_session_init(cdev_id, asym_session,
+ &modex_xform, asym_session_pool) < 0)
+ rte_exit(EXIT_FAILURE, "Session could not be initialized "
+ "for the crypto device\n");
+
+ /* Get a burst of crypto operations. */
+ struct rte_crypto_op *crypto_ops[1];
+ if (rte_crypto_op_bulk_alloc(crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ crypto_ops, 1) == 0)
+ rte_exit(EXIT_FAILURE, "Not enough crypto operations available\n");
+
+ /* Set up the crypto operations. */
+ struct rte_crypto_asym_op *asym_op = crypto_ops[0]->asym;
+
+ /* calculate mod exp of value 0xf8 */
+ static unsigned char base[] = {0xF8};
+ asym_op->modex.base.data = base;
+ asym_op->modex.base.length = sizeof(base);
+ asym_op->modex.base.iova = base;
+
+ /* Attach the asym crypto session to the operation */
+ rte_crypto_op_attach_asym_session(op, asym_session);
+
+ /* Enqueue the crypto operations in the crypto device. */
+ uint16_t num_enqueued_ops = rte_cryptodev_enqueue_burst(cdev_id, 0,
+ crypto_ops, 1);
+
+ /*
+ * Dequeue the crypto operations until all the operations
+ * are processed in the crypto device.
+ */
+ uint16_t num_dequeued_ops, total_num_dequeued_ops = 0;
+ do {
+ struct rte_crypto_op *dequeued_ops[1];
+ num_dequeued_ops = rte_cryptodev_dequeue_burst(cdev_id, 0,
+ dequeued_ops, 1);
+ total_num_dequeued_ops += num_dequeued_ops;
+
+ /* Check if operation was processed successfully */
+ if (dequeued_ops[0]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
+ rte_exit(EXIT_FAILURE,
+ "Some operations were not processed correctly");
+
+ } while (total_num_dequeued_ops < num_enqueued_ops);
+
+
+Asymmetric Crypto Device API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The cryptodev Library API is described in the *DPDK API Reference* document.
+The cryptodev Library API is described in the
+`DPDK API Reference <http://dpdk.org/doc/api/>`_
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst
index 9a2fab1e..d362c920 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -94,10 +94,125 @@ The allocation of large contiguous physical memory is done using the hugetlbfs k
The EAL provides an API to reserve named memory zones in this contiguous memory.
The physical address of the reserved memory for that memory zone is also returned to the user by the memory zone reservation API.
+There are two modes in which DPDK memory subsystem can operate: dynamic mode,
+and legacy mode. Both modes are explained below.
+
.. note::
Memory reservations done using the APIs provided by rte_malloc are also backed by pages from the hugetlbfs filesystem.
++ Dynamic memory mode
+
+Currently, this mode is only supported on Linux.
+
+In this mode, usage of hugepages by DPDK application will grow and shrink based
+on application's requests. Any memory allocation through ``rte_malloc()``,
+``rte_memzone_reserve()`` or other methods, can potentially result in more
+hugepages being reserved from the system. Similarly, any memory deallocation can
+potentially result in hugepages being released back to the system.
+
+Memory allocated in this mode is not guaranteed to be IOVA-contiguous. If large
+chunks of IOVA-contiguous are required (with "large" defined as "more than one
+page"), it is recommended to either use VFIO driver for all physical devices (so
+that IOVA and VA addresses can be the same, thereby bypassing physical addresses
+entirely), or use legacy memory mode.
+
+For chunks of memory which must be IOVA-contiguous, it is recommended to use
+``rte_memzone_reserve()`` function with ``RTE_MEMZONE_IOVA_CONTIG`` flag
+specified. This way, memory allocator will ensure that, whatever memory mode is
+in use, either reserved memory will satisfy the requirements, or the allocation
+will fail.
+
+There is no need to preallocate any memory at startup using ``-m`` or
+``--socket-mem`` command-line parameters, however it is still possible to do so,
+in which case preallocate memory will be "pinned" (i.e. will never be released
+by the application back to the system). It will be possible to allocate more
+hugepages, and deallocate those, but any preallocated pages will not be freed.
+If neither ``-m`` nor ``--socket-mem`` were specified, no memory will be
+preallocated, and all memory will be allocated at runtime, as needed.
+
+Another available option to use in dynamic memory mode is
+``--single-file-segments`` command-line option. This option will put pages in
+single files (per memseg list), as opposed to creating a file per page. This is
+normally not needed, but can be useful for use cases like userspace vhost, where
+there is limited number of page file descriptors that can be passed to VirtIO.
+
+If the application (or DPDK-internal code, such as device drivers) wishes to
+receive notifications about newly allocated memory, it is possible to register
+for memory event callbacks via ``rte_mem_event_callback_register()`` function.
+This will call a callback function any time DPDK's memory map has changed.
+
+If the application (or DPDK-internal code, such as device drivers) wishes to be
+notified about memory allocations above specified threshold (and have a chance
+to deny them), allocation validator callbacks are also available via
+``rte_mem_alloc_validator_callback_register()`` function.
+
+A default validator callback is provided by EAL, which can be enabled with a
+``--socket-limit`` command-line option, for a simple way to limit maximum amount
+of memory that can be used by DPDK application.
+
+.. note::
+
+ In multiprocess scenario, all related processes (i.e. primary process, and
+ secondary processes running with the same prefix) must be in the same memory
+ modes. That is, if primary process is run in dynamic memory mode, all of its
+ secondary processes must be run in the same mode. The same is applicable to
+ ``--single-file-segments`` command-line option - both primary and secondary
+ processes must shared this mode.
+
++ Legacy memory mode
+
+This mode is enabled by specifying ``--legacy-mem`` command-line switch to the
+EAL. This switch will have no effect on FreeBSD as FreeBSD only supports
+legacy mode anyway.
+
+This mode mimics historical behavior of EAL. That is, EAL will reserve all
+memory at startup, sort all memory into large IOVA-contiguous chunks, and will
+not allow acquiring or releasing hugepages from the system at runtime.
+
+If neither ``-m`` nor ``--socket-mem`` were specified, the entire available
+hugepage memory will be preallocated.
+
++ 32-bit support
+
+Additional restrictions are present when running in 32-bit mode. In dynamic
+memory mode, by default maximum of 2 gigabytes of VA space will be preallocated,
+and all of it will be on master lcore NUMA node unless ``--socket-mem`` flag is
+used.
+
+In legacy mode, VA space will only be preallocated for segments that were
+requested (plus padding, to keep IOVA-contiguousness).
+
++ Maximum amount of memory
+
+All possible virtual memory space that can ever be used for hugepage mapping in
+a DPDK process is preallocated at startup, thereby placing an upper limit on how
+much memory a DPDK application can have. DPDK memory is stored in segment lists,
+each segment is strictly one physical page. It is possible to change the amount
+of virtual memory being preallocated at startup by editing the following config
+variables:
+
+* ``CONFIG_RTE_MAX_MEMSEG_LISTS`` controls how many segment lists can DPDK have
+* ``CONFIG_RTE_MAX_MEM_MB_PER_LIST`` controls how much megabytes of memory each
+ segment list can address
+* ``CONFIG_RTE_MAX_MEMSEG_PER_LIST`` controls how many segments each segment can
+ have
+* ``CONFIG_RTE_MAX_MEMSEG_PER_TYPE`` controls how many segments each memory type
+ can have (where "type" is defined as "page size + NUMA node" combination)
+* ``CONFIG_RTE_MAX_MEM_MB_PER_TYPE`` controls how much megabytes of memory each
+ memory type can address
+* ``CONFIG_RTE_MAX_MEM_MB`` places a global maximum on the amount of memory
+ DPDK can reserve
+
+Normally, these options do not need to be changed.
+
+.. note::
+
+ Preallocated virtual memory is not to be confused with preallocated hugepage
+ memory! All DPDK processes preallocate virtual memory at startup. Hugepages
+ can later be mapped into that preallocated VA space (if dynamic memory mode
+ is enabled), and can optionally be mapped into it at startup.
+
PCI Access
~~~~~~~~~~
@@ -211,7 +326,7 @@ Memory Segments and Memory Zones (memzone)
The mapping of physical memory is provided by this feature in the EAL.
As physical memory can have gaps, the memory is described in a table of descriptors,
-and each descriptor (called rte_memseg ) describes a contiguous portion of memory.
+and each descriptor (called rte_memseg ) describes a physical page.
On top of this, the memzone allocator's role is to reserve contiguous portions of physical memory.
These zones are identified by a unique name when the memory is reserved.
@@ -225,6 +340,9 @@ Memory zones can be reserved with specific start address alignment by supplying
The alignment value should be a power of two and not less than the cache line size (64 bytes).
Memory zones can also be reserved from either 2 MB or 1 GB hugepages, provided that both are available on the system.
+Both memsegs and memzones are stored using ``rte_fbarray`` structures. Please
+refer to *DPDK API Reference* for more information.
+
Multiple pthread
----------------
@@ -331,17 +449,21 @@ Known Issues
Bypassing this constraint may cause the 2nd pthread to spin until the 1st one is scheduled again.
Moreover, if the 1st pthread is preempted by a context that has an higher priority, it may even cause a dead lock.
- This does not mean it cannot be used, simply, there is a need to narrow down the situation when it is used by multi-pthread on the same core.
+ This means, use cases involving preemptible pthreads should consider using rte_ring carefully.
+
+ 1. It CAN be used for preemptible single-producer and single-consumer use case.
- 1. It CAN be used for any single-producer or single-consumer situation.
+ 2. It CAN be used for non-preemptible multi-producer and preemptible single-consumer use case.
- 2. It MAY be used by multi-producer/consumer pthread whose scheduling policy are all SCHED_OTHER(cfs). User SHOULD be aware of the performance penalty before using it.
+ 3. It CAN be used for preemptible single-producer and non-preemptible multi-consumer use case.
- 3. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.
+ 4. It MAY be used by preemptible multi-producer and/or preemptible multi-consumer pthreads whose scheduling policy are all SCHED_OTHER(cfs), SCHED_IDLE or SCHED_BATCH. User SHOULD be aware of the performance penalty before using it.
+
+ 5. It MUST not be used by multi-producer/consumer pthreads, whose scheduling policies are SCHED_FIFO or SCHED_RR.
+ rte_timer
- Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
+ Running ``rte_timer_manage()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed.
+ rte_log
@@ -453,11 +575,9 @@ The key fields of the heap structure and their function are described below
* free_head - this points to the first element in the list of free nodes for
this malloc heap.
-.. note::
+* first - this points to the first element in the heap.
- The malloc_heap structure does not keep track of in-use blocks of memory,
- since these are never touched except when they are to be freed again -
- at which point the pointer to the block is an input to the free() function.
+* last - this points to the last element in the heap.
.. _figure_malloc_heap:
@@ -473,16 +593,21 @@ Structure: malloc_elem
The malloc_elem structure is used as a generic header structure for various
blocks of memory.
-It is used in three different ways - all shown in the diagram above:
+It is used in two different ways - all shown in the diagram above:
#. As a header on a block of free or allocated memory - normal case
#. As a padding header inside a block of memory
-#. As an end-of-memseg marker
-
The most important fields in the structure and how they are used are described below.
+Malloc heap is a doubly-linked list, where each element keeps track of its
+previous and next elements. Due to the fact that hugepage memory can come and
+go, neighbouring malloc elements may not necessarily be adjacent in memory.
+Also, since a malloc element may span multiple pages, its contents may not
+necessarily be IOVA-contiguous either - each malloc element is only guaranteed
+to be virtually contiguous.
+
.. note::
If the usage of a particular field in one of the above three usages is not
@@ -495,13 +620,20 @@ The most important fields in the structure and how they are used are described b
It is used for normal memory blocks when they are being freed, to add the
newly-freed block to the heap's free-list.
-* prev - this pointer points to the header element/block in the memseg
- immediately behind the current one. When freeing a block, this pointer is
- used to reference the previous block to check if that block is also free.
- If so, then the two free blocks are merged to form a single larger block.
+* prev - this pointer points to previous header element/block in memory. When
+ freeing a block, this pointer is used to reference the previous block to
+ check if that block is also free. If so, and the two blocks are immediately
+ adjacent to each other, then the two free blocks are merged to form a single
+ larger block.
-* next_free - this pointer is used to chain the free-list of unallocated
- memory blocks together.
+* next - this pointer points to next header element/block in memory. When
+ freeing a block, this pointer is used to reference the next block to check
+ if that block is also free. If so, and the two blocks are immediately
+ adjacent to each other, then the two free blocks are merged to form a single
+ larger block.
+
+* free_list - this is a structure pointing to previous and next elements in
+ this heap's free list.
It is only used in normal memory blocks; on ``malloc()`` to find a suitable
free block to allocate and on ``free()`` to add the newly freed element to
the free-list.
@@ -515,9 +647,6 @@ The most important fields in the structure and how they are used are described b
constraints.
In that case, the pad header is used to locate the actual malloc element
header for the block.
- For the end-of-memseg structure, this is always a ``BUSY`` value, which
- ensures that no element, on being freed, searches beyond the end of the
- memseg for other blocks to merge with into a larger free area.
* pad - this holds the length of the padding present at the start of the block.
In the case of a normal block header, it is added to the address of the end
@@ -528,22 +657,19 @@ The most important fields in the structure and how they are used are described b
actual block header.
* size - the size of the data block, including the header itself.
- For end-of-memseg structures, this size is given as zero, though it is never
- actually checked.
- For normal blocks which are being freed, this size value is used in place of
- a "next" pointer to identify the location of the next block of memory that
- in the case of being ``FREE``, the two free blocks can be merged into one.
Memory Allocation
^^^^^^^^^^^^^^^^^
-On EAL initialization, all memsegs are setup as part of the malloc heap.
-This setup involves placing a dummy structure at the end with ``BUSY`` state,
-which may contain a sentinel value if ``CONFIG_RTE_MALLOC_DEBUG`` is enabled,
-and a proper :ref:`element header<malloc_elem>` with ``FREE`` at the start
-for each memseg.
+On EAL initialization, all preallocated memory segments are setup as part of the
+malloc heap. This setup involves placing an :ref:`element header<malloc_elem>`
+with ``FREE`` at the start of each virtually contiguous segment of memory.
The ``FREE`` element is then added to the ``free_list`` for the malloc heap.
+This setup also happens whenever memory is allocated at runtime (if supported),
+in which case newly allocated pages are also added to the heap, merging with any
+adjacent free segments if there are any.
+
When an application makes a call to a malloc-like function, the malloc function
will first index the ``lcore_config`` structure for the calling thread, and
determine the NUMA node of that thread.
@@ -574,8 +700,34 @@ the start and/or end of the element, resulting in the following behavior:
The advantage of allocating the memory from the end of the existing element is
that no adjustment of the free list needs to take place - the existing element
-on the free list just has its size pointer adjusted, and the following element
-has its "prev" pointer redirected to the newly created element.
+on the free list just has its size value adjusted, and the next/previous elements
+have their "prev"/"next" pointers redirected to the newly created element.
+
+In case when there is not enough memory in the heap to satisfy allocation
+request, EAL will attempt to allocate more memory from the system (if supported)
+and, following successful allocation, will retry reserving the memory again. In
+a multiprocessing scenario, all primary and secondary processes will synchronize
+their memory maps to ensure that any valid pointer to DPDK memory is guaranteed
+to be valid at all times in all currently running processes.
+
+Failure to synchronize memory maps in one of the processes will cause allocation
+to fail, even though some of the processes may have allocated the memory
+successfully. The memory is not added to the malloc heap unless primary process
+has ensured that all other processes have mapped this memory successfully.
+
+Any successful allocation event will trigger a callback, for which user
+applications and other DPDK subsystems can register. Additionally, validation
+callbacks will be triggered before allocation if the newly allocated memory will
+exceed threshold set by the user, giving a chance to allow or deny allocation.
+
+.. note::
+
+ Any allocation of new pages has to go through primary process. If the
+ primary process is not active, no memory will be allocated even if it was
+ theoretically possible to do so. This is because primary's process map acts
+ as an authority on what should or should not be mapped, while each secondary
+ process has its own, local memory map. Secondary processes do not update the
+ shared memory map, they only copy its contents to their local memory map.
Freeing Memory
^^^^^^^^^^^^^^
@@ -589,8 +741,17 @@ the pointer to get the proper element header for the entire block.
From this element header, we get pointers to the heap from which the block was
allocated and to where it must be freed, as well as the pointer to the previous
-element, and via the size field, we can calculate the pointer to the next element.
-These next and previous elements are then checked to see if they are also
-``FREE``, and if so, they are merged with the current element.
-This means that we can never have two ``FREE`` memory blocks adjacent to one
-another, as they are always merged into a single block.
+and next elements. These next and previous elements are then checked to see if
+they are also ``FREE`` and are immediately adjacent to the current one, and if
+so, they are merged with the current element. This means that we can never have
+two ``FREE`` memory blocks adjacent to one another, as they are always merged
+into a single block.
+
+If deallocating pages at runtime is supported, and the free element encloses
+one or more pages, those pages can be deallocated and be removed from the heap.
+If DPDK was started with command-line parameters for preallocating memory
+(``-m`` or ``--socket-mem``), then those pages that were allocated at startup
+will not be deallocated.
+
+Any successful deallocation event will trigger a callback, for which user
+applications and other DPDK subsystems can register.
diff --git a/doc/guides/prog_guide/event_crypto_adapter.rst b/doc/guides/prog_guide/event_crypto_adapter.rst
new file mode 100644
index 00000000..9fe09c80
--- /dev/null
+++ b/doc/guides/prog_guide/event_crypto_adapter.rst
@@ -0,0 +1,296 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation. All rights reserved.
+
+Event Crypto Adapter Library
+============================
+
+The DPDK :doc:`Eventdev library <eventdev>` provides event driven
+programming model with features to schedule events.
+The :doc:`Cryptodev library <cryptodev_lib>` provides an interface to
+the crypto poll mode drivers which supports different crypto operations.
+The Event Crypto Adapter is one of the adapter which is intended to
+bridge between the event device and the crypto device.
+
+The packet flow from crypto device to the event device can be accomplished
+using SW and HW based transfer mechanism.
+The Adapter queries an eventdev PMD to determine which mechanism to be used.
+The adapter uses an EAL service core function for SW based packet transfer
+and uses the eventdev PMD functions to configure HW based packet transfer
+between the crypto device and the event device. The crypto adapter uses a new
+event type called ``RTE_EVENT_TYPE_CRYPTODEV`` to indicate the event source.
+
+The application can choose to submit a crypto operation directly to
+crypto device or send it to the crypto adapter via eventdev based on
+RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
+mode. The choice of mode can be specified while creating the adapter.
+In the former mode, it is an application responsibility to enable ingress
+packet ordering. In the latter mode, it is the adapter responsibility to
+enable the ingress packet ordering.
+
+
+Adapter Mode
+------------
+
+RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
+operations directly to crypto device. The adapter then dequeues crypto
+completions from crypto device and enqueues them as events to the event device.
+This mode does not ensure ingress ordering, if the application directly
+enqueues to the cryptodev without going through crypto/atomic stage.
+In this mode, events dequeued from the adapter will be treated as new events.
+The application needs to specify event information (response information)
+which is needed to enqueue an event after the crypto operation is completed.
+
+.. _figure_event_crypto_adapter_op_new:
+
+.. figure:: img/event_crypto_adapter_op_new.*
+
+ Working model of ``RTE_EVENT_CRYPTO_ADAPTER_OP_NEW`` mode
+
+
+RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
+RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
+can directly submit the crypto operations to the cryptodev.
+If not, application retrieves crypto adapter's event port using
+rte_event_crypto_adapter_event_port_get() API. Then, links its event
+queue to this port and starts enqueuing crypto operations as events
+to the eventdev. The adapter then dequeues the events and submits the
+crypto operations to the cryptodev. After the crypto completions, the
+adapter enqueues events to the event device.
+Application can use this mode, when ingress packet ordering is needed.
+In this mode, events dequeued from the adapter will be treated as
+forwarded events. The application needs to specify the cryptodev ID
+and queue pair ID (request information) needed to enqueue a crypto
+operation in addition to the event information (response information)
+needed to enqueue an event after the crypto operation has completed.
+
+.. _figure_event_crypto_adapter_op_forward:
+
+.. figure:: img/event_crypto_adapter_op_forward.*
+
+ Working model of ``RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD`` mode
+
+
+API Overview
+------------
+
+This section has a brief introduction to the event crypto adapter APIs.
+The application is expected to create an adapter which is associated with
+a single eventdev, then add cryptodev and queue pair to the adapter instance.
+
+Create an adapter instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An adapter instance is created using ``rte_event_crypto_adapter_create()``. This
+function is called with event device to be associated with the adapter and port
+configuration for the adapter to setup an event port(if the adapter needs to use
+a service function).
+
+Adapter can be started in ``RTE_EVENT_CRYPTO_ADAPTER_OP_NEW`` or
+``RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD`` mode.
+
+.. code-block:: c
+
+ int err;
+ uint8_t dev_id, id;
+ struct rte_event_dev_info dev_info;
+ struct rte_event_port_conf conf;
+ enum rte_event_crypto_adapter_mode mode;
+
+ err = rte_event_dev_info_get(id, &dev_info);
+
+ conf.new_event_threshold = dev_info.max_num_events;
+ conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+ mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD;
+ err = rte_event_crypto_adapter_create(id, dev_id, &conf, mode);
+
+If the application desires to have finer control of eventdev port allocation
+and setup, it can use the ``rte_event_crypto_adapter_create_ext()`` function.
+The ``rte_event_crypto_adapter_create_ext()`` function is passed as a callback
+function. The callback function is invoked if the adapter needs to use a
+service function and needs to create an event port for it. The callback is
+expected to fill the ``struct rte_event_crypto_adapter_conf`` structure
+passed to it.
+
+For RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, the event port created by adapter
+can be retrieved using ``rte_event_crypto_adapter_event_port_get()`` API.
+Application can use this event port to link with event queue on which it
+enqueues events towards the crypto adapter.
+
+.. code-block:: c
+
+ uint8_t id, evdev, crypto_ev_port_id, app_qid;
+ struct rte_event ev;
+ int ret;
+
+ ret = rte_event_crypto_adapter_event_port_get(id, &crypto_ev_port_id);
+ ret = rte_event_queue_setup(evdev, app_qid, NULL);
+ ret = rte_event_port_link(evdev, crypto_ev_port_id, &app_qid, NULL, 1);
+
+ // Fill in event info and update event_ptr with rte_crypto_op
+ memset(&ev, 0, sizeof(ev));
+ ev.queue_id = app_qid;
+ .
+ .
+ ev.event_ptr = op;
+ ret = rte_event_enqueue_burst(evdev, app_ev_port_id, ev, nb_events);
+
+Querying adapter capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_crypto_adapter_caps_get()`` function allows
+the application to query the adapter capabilities for an eventdev and cryptodev
+combination. This API provides whether cryptodev and eventdev are connected using
+internal HW port or not.
+
+.. code-block:: c
+
+ rte_event_crypto_adapter_caps_get(dev_id, cdev_id, &cap);
+
+Adding queue pair to the adapter instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Cryptodev device id and queue pair are created using cryptodev APIs.
+For more information see :doc:`here <cryptodev_lib>`.
+
+.. code-block:: c
+
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+ uint8_t cdev_id = 0;
+ uint16_t qp_id = 0;
+
+ rte_cryptodev_configure(cdev_id, &conf);
+ rte_cryptodev_queue_pair_setup(cdev_id, qp_id, &qp_conf);
+
+These cryptodev id and queue pair are added to the instance using the
+``rte_event_crypto_adapter_queue_pair_add()`` API.
+The same is removed using ``rte_event_crypto_adapter_queue_pair_del()`` API.
+If HW supports RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+capability, event information must be passed to the add API.
+
+.. code-block:: c
+
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(id, evdev, &cap);
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ struct rte_event event;
+
+ // Fill in event information & pass it to add API
+ rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, &event);
+ } else
+ rte_event_crypto_adapter_queue_pair_add(id, cdev_id, qp_id, NULL);
+
+Configure the service function
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the adapter uses a service function, the application is required to assign
+a service core to the service function as show below.
+
+.. code-block:: c
+
+ uint32_t service_id;
+
+ if (rte_event_crypto_adapter_service_id_get(id, &service_id) == 0)
+ rte_service_map_lcore_set(service_id, CORE_ID);
+
+Set event request/response information
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, the application needs
+to specify the cryptodev ID and queue pair ID (request information) in
+addition to the event information (response information) needed to enqueue
+an event after the crypto operation has completed. The request and response
+information are specified in the ``struct rte_crypto_op`` private data or
+session's private data.
+
+In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, the application is required
+to provide only the response information.
+
+The SW adapter or HW PMD uses ``rte_crypto_op::sess_type`` to
+decide whether request/response data is located in the crypto session/
+crypto security session or at an offset in the ``struct rte_crypto_op``.
+The ``rte_crypto_op::private_data_offset`` is used to locate the request/
+response in the ``rte_crypto_op``.
+
+For crypto session, ``rte_cryptodev_sym_session_set_user_data()`` API
+will be used to set request/response data. The same data will be obtained
+by ``rte_cryptodev_sym_session_get_user_data()`` API. The
+RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
+whether HW or SW supports this feature.
+
+For security session, ``rte_security_session_set_private_data()`` API
+will be used to set request/response data. The same data will be obtained
+by ``rte_security_session_get_private_data()`` API.
+
+For session-less it is mandatory to place the request/response data with
+the ``rte_crypto_op``.
+
+.. code-block:: c
+
+ union rte_event_crypto_metadata m_data;
+ struct rte_event ev;
+ struct rte_crypto_op *op;
+
+ /* Allocate & fill op structure */
+ op = rte_crypto_op_alloc();
+
+ memset(&m_data, 0, sizeof(m_data));
+ memset(&ev, 0, sizeof(ev));
+ /* Fill event information and update event_ptr to rte_crypto_op */
+ ev.event_ptr = op;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ /* Copy response information */
+ rte_memcpy(&m_data.response_info, &ev, sizeof(ev));
+ /* Copy request information */
+ m_data.request_info.cdev_id = cdev_id;
+ m_data.request_info.queue_pair_id = qp_id;
+ /* Call set API to store private data information */
+ rte_cryptodev_sym_session_set_user_data(
+ op->sym->session,
+ &m_data,
+ sizeof(m_data));
+ } if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
+ (sizeof(struct rte_crypto_sym_xform) * 2);
+ op->private_data_offset = len;
+ /* Copy response information */
+ rte_memcpy(&m_data.response_info, &ev, sizeof(ev));
+ /* Copy request information */
+ m_data.request_info.cdev_id = cdev_id;
+ m_data.request_info.queue_pair_id = qp_id;
+ /* Store private data information along with rte_crypto_op */
+ rte_memcpy(op + len, &m_data, sizeof(m_data));
+ }
+
+Start the adapter instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application calls ``rte_event_crypto_adapter_start()`` to start the adapter.
+This function calls the start callbacks of the eventdev PMDs for hardware based
+eventdev-cryptodev connections and ``rte_service_run_state_set()`` to enable the
+service function if one exists.
+
+.. code-block:: c
+
+ rte_event_crypto_adapter_start(id, mode);
+
+Get adapter statistics
+~~~~~~~~~~~~~~~~~~~~~~
+
+The ``rte_event_crypto_adapter_stats_get()`` function reports counters defined
+in struct ``rte_event_crypto_adapter_stats``. The received packet and
+enqueued event counts are a sum of the counts from the eventdev PMD callbacks
+if the callback is supported, and the counts maintained by the service function,
+if one exists.
diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 4ab87a37..0166bb45 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -12,7 +12,11 @@ be supported in hardware or require a software thread to receive packets from
the ethdev port using ethdev poll mode APIs and enqueue these as events to the
event device using the eventdev API. Both transfer mechanisms may be present on
the same platform depending on the particular combination of the ethdev and
-the event device.
+the event device. For SW based packet transfer, if the mbuf does not have a
+timestamp set, the adapter adds a timestamp to the mbuf using
+rte_get_tsc_cycles(), this provides a more accurate timestamp as compared to
+if the application were to set the timestamp since it avoids event device
+schedule latency.
The Event Ethernet Rx Adapter library is intended for the application code to
configure both transfer mechanisms using a common API. A capability API allows
@@ -140,3 +144,44 @@ enqueued event counts are a sum of the counts from the eventdev PMD callbacks
if the callback is supported, and the counts maintained by the service function,
if one exists. The service function also maintains a count of cycles for which
it was not able to enqueue to the event device.
+
+Interrupt Based Rx Queues
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The service core function is typically set up to poll ethernet Rx queues for
+packets. Certain queues may have low packet rates and it would be more
+efficient to enable the Rx queue interrupt and read packets after receiving
+the interrupt.
+
+The servicing_weight member of struct rte_event_eth_rx_adapter_queue_conf
+is applicable when the adapter uses a service core function. The application
+has to enable Rx queue interrupts when configuring the ethernet device
+using the ``rte_eth_dev_configure()`` function and then use a servicing_weight
+of zero when addding the Rx queue to the adapter.
+
+The adapter creates a thread blocked on the interrupt, on an interrupt this
+thread enqueues the port id and the queue id to a ring buffer. The adapter
+service function dequeues the port id and queue id from the ring buffer,
+invokes the ``rte_eth_rx_burst()`` to receive packets on the queue and
+converts the received packets to events in the same manner as packets
+received on a polled Rx queue. The interrupt thread is affinitized to the same
+CPUs as the lcores of the Rx adapter service function, if the Rx adapter
+service function has not been mapped to any lcores, the interrupt thread
+is mapped to the master lcore.
+
+Rx Callback for SW Rx Adapter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For SW based packet transfers, i.e., when the
+``RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT`` is not set in the adapter's
+capabilities flags for a particular ethernet device, the service function
+temporarily enqueues mbufs to an event buffer before batch enqueueing these
+to the event device. If the buffer fills up, the service function stops
+dequeueing packets from the ethernet device. The application may want to
+monitor the buffer fill level and instruct the service function to selectively
+enqueue packets to the event device. The application may also use some other
+criteria to decide which packets should enter the event device even when
+the event buffer fill level is low. The
+``rte_event_eth_rx_adapter_cb_register()`` function allow the application
+to register a callback that selects which packets to enqueue to the event
+device.
diff --git a/doc/guides/prog_guide/event_timer_adapter.rst b/doc/guides/prog_guide/event_timer_adapter.rst
new file mode 100644
index 00000000..7bbbdfe9
--- /dev/null
+++ b/doc/guides/prog_guide/event_timer_adapter.rst
@@ -0,0 +1,296 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+
+Event Timer Adapter Library
+===========================
+
+The DPDK
+`Event Device library <http://dpdk.org/doc/guides/prog_guide/eventdev.html>`_
+introduces an event driven programming model which presents applications with
+an alternative to the polling model traditionally used in DPDK
+applications. Event devices can be coupled with arbitrary components to provide
+new event sources by using **event adapters**. The Event Timer Adapter is one
+such adapter; it bridges event devices and timer mechanisms.
+
+The Event Timer Adapter library extends the event driven model
+by introducing a :ref:`new type of event <timer_expiry_event>` that represents
+a timer expiration, and providing an API with which adapters can be created or
+destroyed, and :ref:`event timers <event_timer>` can be armed and canceled.
+
+The Event Timer Adapter library is designed to interface with hardware or
+software implementations of the timer mechanism; it will query an eventdev PMD
+to determine which implementation should be used. The default software
+implementation manages timers using the DPDK
+`Timer library <http://dpdk.org/doc/guides/prog_guide/timer_lib.html>`_.
+
+Examples of using the API are presented in the `API Overview`_ and
+`Processing Timer Expiry Events`_ sections. Code samples are abstracted and
+are based on the example of handling a TCP retransmission.
+
+.. _event_timer:
+
+Event Timer struct
+------------------
+Event timers are timers that enqueue a timer expiration event to an event
+device upon timer expiration.
+
+The Event Timer Adapter API represents each event timer with a generic struct,
+which contains an event and user metadata. The ``rte_event_timer`` struct is
+defined in ``lib/librte_event/librte_event_timer_adapter.h``.
+
+.. _timer_expiry_event:
+
+Timer Expiry Event
+~~~~~~~~~~~~~~~~~~
+
+The event contained by an event timer is enqueued in the event device when the
+timer expires, and the event device uses the attributes below when scheduling
+it:
+
+* ``event_queue_id`` - Application should set this to specify an event queue to
+ which the timer expiry event should be enqueued
+* ``event_priority`` - Application can set this to indicate the priority of the
+ timer expiry event in the event queue relative to other events
+* ``sched_type`` - Application can set this to specify the scheduling type of
+ the timer expiry event
+* ``flow_id`` - Application can set this to indicate which flow this timer
+ expiry event corresponds to
+* ``op`` - Will be set to ``RTE_EVENT_OP_NEW`` by the event timer adapter
+* ``event_type`` - Will be set to ``RTE_EVENT_TYPE_TIMER`` by the event timer
+ adapter
+
+Timeout Ticks
+~~~~~~~~~~~~~
+
+The number of ticks from now in which the timer will expire. The ticks value
+has a resolution (``timer_tick_ns``) that is specified in the event timer
+adapter configuration.
+
+State
+~~~~~
+
+Before arming an event timer, the application should initialize its state to
+RTE_EVENT_TIMER_NOT_ARMED. The event timer's state will be updated when a
+request to arm or cancel it takes effect.
+
+If the application wishes to rearm the timer after it has expired, it should
+reset the state back to RTE_EVENT_TIMER_NOT_ARMED before doing so.
+
+User Metadata
+~~~~~~~~~~~~~
+
+Memory to store user specific metadata. The event timer adapter implementation
+will not modify this area.
+
+API Overview
+------------
+
+This section will introduce the reader to the event timer adapter API, showing
+how to create and configure an event timer adapter and use it to manage event
+timers.
+
+From a high level, the setup steps are:
+
+* rte_event_timer_adapter_create()
+* rte_event_timer_adapter_start()
+
+And to start and stop timers:
+
+* rte_event_timer_arm_burst()
+* rte_event_timer_cancel_burst()
+
+Create and Configure an Adapter Instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To create an event timer adapter instance, initialize an
+``rte_event_timer_adapter_conf`` struct with the desired values, and pass it
+to ``rte_event_timer_adapter_create()``.
+
+.. code-block:: c
+
+ #define NSECPERSEC 1E9 // No of ns in 1 sec
+ const struct rte_event_timer_adapter_conf adapter_config = {
+ .event_dev_id = event_dev_id,
+ .timer_adapter_id = 0,
+ .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ .timer_tick_ns = NSECPERSEC / 10, // 100 milliseconds
+ .max_tmo_nsec = 180 * NSECPERSEC // 2 minutes
+ .nb_timers = 40000,
+ .timer_adapter_flags = 0,
+ };
+
+ struct rte_event_timer_adapter *adapter = NULL;
+ adapter = rte_event_timer_adapter_create(&adapter_config);
+
+ if (adapter == NULL) { ... };
+
+Before creating an instance of a timer adapter, the application should create
+and configure an event device along with its event ports. Based on the event
+device capability, it might require creating an additional event port to be
+used by the timer adapter. If required, the
+``rte_event_timer_adapter_create()`` function will use a default method to
+configure an event port; it will examine the current event device
+configuration, determine the next available port identifier number, and create
+a new event port with a default port configuration.
+
+If the application desires to have finer control of event port allocation
+and setup, it can use the ``rte_event_timer_adapter_create_ext()`` function.
+This function is passed a callback function that will be invoked if the
+adapter needs to create an event port, giving the application the opportunity
+to control how it is done.
+
+Retrieve Event Timer Adapter Contextual Information
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The event timer adapter implementation may have constraints on tick resolution
+or maximum timer expiry timeout based on the given event timer adapter or
+system. In this case, the implementation may adjust the tick resolution or
+maximum timeout to the best possible configuration.
+
+Upon successful event timer adapter creation, the application can get the
+configured resolution and max timeout with
+``rte_event_timer_adapter_get_info()``. This function will return an
+``rte_event_timer_adapter_info`` struct, which contains the following members:
+
+* ``min_resolution_ns`` - Minimum timer adapter tick resolution in ns.
+* ``max_tmo_ns`` - Maximum timer timeout(expiry) in ns.
+* ``adapter_conf`` - Configured event timer adapter attributes
+
+Configuring the Service Component
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If the adapter uses a service component, the application is required to map
+the service to a service core before starting the adapter:
+
+.. code-block:: c
+
+ uint32_t service_id;
+
+ if (rte_event_timer_adapter_service_id_get(adapter, &service_id) == 0)
+ rte_service_map_lcore_set(service_id, EVTIM_CORE_ID);
+
+An event timer adapter uses a service component if the event device PMD
+indicates that the adapter should use a software implementation.
+
+Starting the Adapter Instance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The application should call ``rte_event_timer_adapter_start()`` to start
+running the event timer adapter. This function calls the start entry points
+defined by eventdev PMDs for hardware implementations or puts a service
+component into the running state in the software implementation.
+
+Arming Event Timers
+~~~~~~~~~~~~~~~~~~~
+
+Once an event timer adapter has been started, an application can begin to
+manage event timers with it.
+
+The application should allocate ``struct rte_event_timer`` objects from a
+mempool or huge-page backed application buffers of required size. Upon
+successful allocation, the application should initialize the event timer, and
+then set any of the necessary event attributes described in the
+`Timer Expiry Event`_ section. In the following example, assume ``conn``
+represents a TCP connection and that ``event_timer_pool`` is a mempool that
+was created previously:
+
+.. code-block:: c
+
+ rte_mempool_get(event_timer_pool, (void **)&conn->evtim);
+ if (conn->evtim == NULL) { ... }
+
+ /* Set up the event timer. */
+ conn->evtim->ev.op = RTE_EVENT_OP_NEW;
+ conn->evtim->ev.queue_id = event_queue_id;
+ conn->evtim->ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ conn->evtim->ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ conn->evtim->ev.event_type = RTE_EVENT_TYPE_TIMER;
+ conn->evtim->ev.event_ptr = conn;
+ conn->evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
+ conn->evtim->timeout_ticks = 30; //3 sec Per RFC1122(TCP returns)
+
+Note that it is necessary to initialize the event timer state to
+RTE_EVENT_TIMER_NOT_ARMED. Also note that we have saved a pointer to the
+``conn`` object in the timer's event payload. This will allow us to locate
+the connection object again once we dequeue the timer expiry event from the
+event device later. As a convenience, the application may specify no value for
+ev.event_ptr, and the adapter will by default set it to point at the event
+timer itself.
+
+Now we can arm the event timer with ``rte_event_timer_arm_burst()``:
+
+.. code-block:: c
+
+ ret = rte_event_timer_arm_burst(adapter, &conn->evtim, 1);
+ if (ret != 1) { ... }
+
+Once an event timer expires, the application may free it or rearm it as
+necessary. If the application will rearm the timer, the state should be reset
+to RTE_EVENT_TIMER_NOT_ARMED by the application before rearming it.
+
+Multiple Event Timers with Same Expiry Value
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In the special case that there is a set of event timers that should all expire
+at the same time, the application may call
+``rte_event_timer_arm_tmo_tick_burst()``, which allows the implementation to
+optimize the operation if possible.
+
+Canceling Event Timers
+~~~~~~~~~~~~~~~~~~~~~~
+
+An event timer that has been armed as described in `Arming Event Timers`_ can
+be canceled by calling ``rte_event_timer_cancel_burst()``:
+
+.. code-block:: c
+
+ /* Ack for the previous tcp data packet has been received;
+ * cancel the retransmission timer
+ */
+ rte_event_timer_cancel_burst(adapter, &conn->timer, 1);
+
+Processing Timer Expiry Events
+------------------------------
+
+Once an event timer has successfully enqueued a timer expiry event in the event
+device, the application will subsequently dequeue it from the event device.
+The application can use the event payload to retrieve a pointer to the object
+associated with the event timer. It can then re-arm the event timer or free the
+event timer object as desired:
+
+.. code-block:: c
+
+ void
+ event_processing_loop(...)
+ {
+ while (...) {
+ /* Receive events from the configured event port. */
+ rte_event_dequeue_burst(event_dev_id, event_port, &ev, 1, 0);
+ ...
+ switch(ev.event_type) {
+ ...
+ case RTE_EVENT_TYPE_TIMER:
+ process_timer_event(ev);
+ ...
+ break;
+ }
+ }
+ }
+
+ uint8_t
+ process_timer_event(...)
+ {
+ /* A retransmission timeout for the connection has been received. */
+ conn = ev.event_ptr;
+ /* Retransmit last packet (e.g. TCP segment). */
+ ...
+ /* Re-arm timer using original values. */
+ rte_event_timer_arm_burst(adapter_id, &conn->timer, 1);
+ }
+
+Summary
+-------
+
+The Event Timer Adapter library extends the DPDK event-based programming model
+by representing timer expirations as events in the system and allowing
+applications to use existing event processing loops to arm and cancel event
+timers or handle timer expiry events.
diff --git a/doc/guides/prog_guide/eventdev.rst b/doc/guides/prog_guide/eventdev.rst
index ce19997d..8fcae546 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: BSD-3-Clause
Copyright(c) 2017 Intel Corporation.
+ Copyright(c) 2018 Arm Limited.
Event Device Library
====================
@@ -129,8 +130,10 @@ API Walk-through
This section will introduce the reader to the eventdev API, showing how to
create and configure an eventdev and use it for a two-stage atomic pipeline
-with a single core for TX. The diagram below shows the final state of the
-application after this walk-through:
+with one core each for RX and TX. RX and TX cores are shown here for
+illustration, refer to Eventdev Adapter documentation for further details.
+The diagram below shows the final state of the application after this
+walk-through:
.. _figure_eventdev-usage1:
@@ -196,23 +199,29 @@ calling the setup function. Repeat this step for each queue, starting from
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
};
+ struct rte_event_queue_conf single_link_conf = {
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
+ };
int dev_id = 0;
- int queue_id = 0;
- int err = rte_event_queue_setup(dev_id, queue_id, &atomic_conf);
+ int atomic_q_1 = 0;
+ int atomic_q_2 = 1;
+ int single_link_q = 2;
+ int err = rte_event_queue_setup(dev_id, atomic_q_1, &atomic_conf);
+ int err = rte_event_queue_setup(dev_id, atomic_q_2, &atomic_conf);
+ int err = rte_event_queue_setup(dev_id, single_link_q, &single_link_conf);
-The remainder of this walk-through assumes that the queues are configured as
-follows:
+As shown above, queue IDs are as follows:
* id 0, atomic queue #1
* id 1, atomic queue #2
* id 2, single-link queue
+These queues are used for the remainder of this walk-through.
+
Setting up Ports
~~~~~~~~~~~~~~~~
-Once queues are set up successfully, create the ports as required. Each port
-should be set up with its corresponding port_conf type, worker for worker cores,
-rx and tx for the RX and TX cores:
+Once queues are set up successfully, create the ports as required.
.. code-block:: c
@@ -232,15 +241,24 @@ rx and tx for the RX and TX cores:
.new_event_threshold = 4096,
};
int dev_id = 0;
- int port_id = 0;
- int err = rte_event_port_setup(dev_id, port_id, &CORE_FUNCTION_conf);
+ int rx_port_id = 0;
+ int err = rte_event_port_setup(dev_id, rx_port_id, &rx_conf);
+
+ for(int worker_port_id = 1; worker_port_id <= 4; worker_port_id++) {
+ int err = rte_event_port_setup(dev_id, worker_port_id, &worker_conf);
+ }
-It is now assumed that:
+ int tx_port_id = 5;
+ int err = rte_event_port_setup(dev_id, tx_port_id, &tx_conf);
+
+As shown above:
* port 0: RX core
* ports 1,2,3,4: Workers
* port 5: TX core
+These ports are used for the remainder of this walk-through.
+
Linking Queues and Ports
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -254,15 +272,14 @@ can be achieved like this:
.. code-block:: c
- uint8_t port_id = 0;
+ uint8_t rx_port_id = 0;
+ uint8_t tx_port_id = 5;
uint8_t atomic_qs[] = {0, 1};
uint8_t single_link_q = 2;
- uint8_t tx_port_id = 5;
uin8t_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
- for(int i = 0; i < 4; i++) {
- int worker_port = i + 1;
- int links_made = rte_event_port_link(dev_id, worker_port, atomic_qs, NULL, 2);
+ for(int worker_port_id = 1; worker_port_id <= 4; worker_port_id++) {
+ int links_made = rte_event_port_link(dev_id, worker_port_id, atomic_qs, NULL, 2);
}
int links_made = rte_event_port_link(dev_id, tx_port_id, &single_link_q, &priority, 1);
@@ -295,14 +312,14 @@ The following code shows how those packets can be enqueued into the eventdev:
ev[i].flow_id = mbufs[i]->hash.rss;
ev[i].op = RTE_EVENT_OP_NEW;
ev[i].sched_type = RTE_SCHED_TYPE_ATOMIC;
- ev[i].queue_id = 0;
+ ev[i].queue_id = atomic_q_1;
ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
ev[i].sub_event_type = 0;
ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
ev[i].mbuf = mbufs[i];
}
- const int nb_tx = rte_event_enqueue_burst(dev_id, port_id, ev, nb_rx);
+ const int nb_tx = rte_event_enqueue_burst(dev_id, rx_port_id, ev, nb_rx);
if (nb_tx != nb_rx) {
for(i = nb_tx; i < nb_rx; i++)
rte_pktmbuf_free(mbufs[i]);
@@ -334,7 +351,7 @@ the event to the next stage in the pipeline.
events[i].queue_id++;
}
- uint16_t nb_tx = rte_event_enqueue_burst(dev_id, port_id, events, nb_rx);
+ uint16_t nb_tx = rte_event_enqueue_burst(dev_id, worker_port_id, events, nb_rx);
Egress of Events
diff --git a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
index 9959f0d2..0cfc1198 100644
--- a/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
+++ b/doc/guides/prog_guide/generic_segmentation_offload_lib.rst
@@ -43,6 +43,7 @@ Limitations
#. Currently, the GSO library supports the following IPv4 packet types:
- TCP
+ - UDP
- VxLAN
- GRE
@@ -146,6 +147,15 @@ TCP/IPv4 GSO
TCP/IPv4 GSO supports segmentation of suitably large TCP/IPv4 packets, which
may also contain an optional VLAN tag.
+UDP/IPv4 GSO
+~~~~~~~~~~~~
+UDP/IPv4 GSO supports segmentation of suitably large UDP/IPv4 packets, which
+may also contain an optional VLAN tag. UDP GSO is the same as IP fragmentation.
+Specifically, UDP GSO treats the UDP header as a part of the payload and
+does not modify it during segmentation. Therefore, after UDP GSO, only the
+first output packet has the original UDP header, and others just have l2
+and l3 headers.
+
VxLAN GSO
~~~~~~~~~
VxLAN packets GSO supports segmentation of suitably large VxLAN packets,
diff --git a/doc/guides/prog_guide/glossary.rst b/doc/guides/prog_guide/glossary.rst
index e101bc02..dda45bd1 100644
--- a/doc/guides/prog_guide/glossary.rst
+++ b/doc/guides/prog_guide/glossary.rst
@@ -41,9 +41,6 @@ CPU
CRC
Cyclic Redundancy Check
-ctrlmbuf
- An *mbuf* carrying control data.
-
Data Plane
In contrast to the control plane, the data plane in a network architecture
are the layers involved when forwarding packets. These layers must be
diff --git a/doc/guides/prog_guide/hash_lib.rst b/doc/guides/prog_guide/hash_lib.rst
index 180c776e..76a1f323 100644
--- a/doc/guides/prog_guide/hash_lib.rst
+++ b/doc/guides/prog_guide/hash_lib.rst
@@ -19,6 +19,8 @@ The main configuration parameters for the hash are:
* Size of the key in bytes
+* An extra flag used to describe additional settings, for example the multithreading mode of operation (as will be described later)
+
The hash also allows the configuration of some low-level implementation related parameters such as:
* Hash function to translate the key into a bucket index
@@ -49,8 +51,7 @@ Apart from these method explained above, the API allows the user three more opti
Also, the API contains a method to allow the user to look up entries in bursts, achieving higher performance
than looking up individual entries, as the function prefetches next entries at the time it is operating
with the first ones, which reduces significantly the impact of the necessary memory accesses.
-Notice that this method uses a pipeline of 8 entries (4 stages of 2 entries), so it is highly recommended
-to use at least 8 entries per burst.
+
The actual data associated with each key can be either managed by the user using a separate table that
mirrors the hash in terms of number of entries and position of each entry,
@@ -63,11 +64,33 @@ However, this table could also be used for more sophisticated features and provi
Multi-process support
---------------------
-The hash library can be used in a multi-process environment, minding that only lookups are thread-safe.
+The hash library can be used in a multi-process environment.
The only function that can only be used in single-process mode is rte_hash_set_cmp_func(), which sets up
a custom compare function, which is assigned to a function pointer (therefore, it is not supported in
multi-process mode).
+
+Multi-thread support
+---------------------
+
+The hash library supports multithreading, and the user specifies the needed mode of operation at the creation time of the hash table
+by appropriately setting the flag. In all modes of operation lookups are thread-safe meaning lookups can be called from multiple
+threads concurrently.
+
+For concurrent writes, and concurrent reads and writes the following flag values define the corresponding modes of operation:
+
+* If the multi-writer flag (RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) is set, multiple threads writing to the table is allowed.
+ Key add, delete, and table reset are protected from other writer threads. With only this flag set, readers are not protected from ongoing writes.
+
+* If the read/write concurrency (RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) is set, multithread read/write operation is safe
+ (i.e., no need to stop the readers from accessing the hash table until writers finish their updates. Reads and writes can operate table concurrently).
+
+* In addition to these two flag values, if the transactional memory flag (RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT) is also set,
+ hardware transactional memory will be used to guarantee the thread safety as long as it is supported by the hardware (for example the Intel® TSX support).
+
+If the platform supports Intel® TSX, it is advised to set the transactional memory flag, as this will speed up concurrent table operations.
+Otherwise concurrent operations will be slower because of the overhead associated with the software locking mechanisms.
+
Implementation Details
----------------------
diff --git a/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg b/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg
new file mode 100644
index 00000000..54466f2e
--- /dev/null
+++ b/doc/guides/prog_guide/img/event_crypto_adapter_op_forward.svg
@@ -0,0 +1,1078 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="720px"
+ height="486px"
+ id="svg13237"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="event_crypto_adapter_enq_deq.svg">
+ <defs
+ id="defs13239">
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Sstart"
+ style="overflow:visible">
+ <path
+ id="path8416"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.2) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Send"
+ style="overflow:visible;">
+ <path
+ id="path8419"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="DiamondL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DiamondL"
+ style="overflow:visible">
+ <path
+ id="path8483"
+ d="M 0,-7.0710768 L -7.0710894,0 L 0,7.0710589 L 7.0710462,0 L 0,-7.0710768 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="DotL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DotL"
+ style="overflow:visible">
+ <path
+ id="path8465"
+ d="M -2.5,-1.0 C -2.5,1.7600000 -4.7400000,4.0 -7.5,4.0 C -10.260000,4.0 -12.5,1.7600000 -12.5,-1.0 C -12.5,-3.7600000 -10.260000,-6.0 -7.5,-6.0 C -4.7400000,-6.0 -2.5,-3.7600000 -2.5,-1.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8) translate(7.4, 1)" />
+ </marker>
+ <marker
+ inkscape:stockid="SquareL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="SquareL"
+ style="overflow:visible">
+ <path
+ id="path8474"
+ d="M -5.0,-5.0 L -5.0,5.0 L 5.0,5.0 L 5.0,-5.0 L -5.0,-5.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="TriangleOutL"
+ style="overflow:visible">
+ <path
+ id="path8546"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path8404"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8) translate(12.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path8413"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Lend"
+ style="overflow:visible;">
+ <path
+ id="path8425"
+ style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(1.1) rotate(180) translate(1,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lend"
+ style="overflow:visible;">
+ <path
+ id="path8407"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.8) rotate(180) translate(12.5,0)" />
+ </marker>
+ <filter
+ id="filter_2"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15" />
+ </filter>
+ <filter
+ id="filter_2-3"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1" />
+ </filter>
+ <filter
+ id="filter_2-0"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-7" />
+ </filter>
+ <filter
+ id="filter_2-0-8"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-7-7" />
+ </filter>
+ <filter
+ id="filter_2-3-9"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-6" />
+ </filter>
+ <filter
+ id="filter_2-3-6"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-63" />
+ </filter>
+ <filter
+ id="filter_2-3-91"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-3" />
+ </filter>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-5"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-6"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8404-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-51"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-62"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-2"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-7-9"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8404-0-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <filter
+ id="filter_2-3-6-1"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-63-8" />
+ </filter>
+ <filter
+ id="filter_2-3-92"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-2" />
+ </filter>
+ <filter
+ id="filter_2-3-94"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-7" />
+ </filter>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-7-6"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8404-0-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-55"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-4"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1"
+ inkscape:cx="359.77003"
+ inkscape:cy="287.74194"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1200"
+ inkscape:window-height="898"
+ inkscape:window-x="0"
+ inkscape:window-y="31"
+ inkscape:window-maximized="1"
+ inkscape:snap-nodes="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid13454" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata13242">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ id="layer1"
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer">
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-4"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,88.874699,-812.39909)">
+ <title
+ id="title22-7-5">Square</title>
+ <desc
+ id="desc24-7-8">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-5"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-7"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-91)" />
+ </g>
+ <g
+ id="g13515-33">
+ <g
+ id="g13534-8">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-95"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.71226478;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Lstart-7);marker-end:none"
+ d="m 312.28671,240.74335 -84.28774,0"
+ id="path17209"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.71898615px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-mid:none;marker-end:url(#Arrow1Lend)"
+ d="m 221.6484,77.57125 94.28101,0"
+ id="path17209-8"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,314.24227,-811.89589)">
+ <title
+ id="title22-7">Square</title>
+ <desc
+ id="desc24-7">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3)" />
+ </g>
+ <g
+ id="g13515">
+ <g
+ id="g13534">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.72471404;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Lstart);marker-end:none"
+ d="m 89.025329,74.39932 -64.275286,0"
+ id="path17209-3"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-4.325033,28.642983)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-3"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-1.93108,192.80833)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-1"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.75141162;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Lstart-7);marker-end:none"
+ d="m 18.763392,120.7432 68.995153,0"
+ id="path17209-3-0"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z"
+ transform="matrix(0.73232502,0,0,0.75477602,-218.16394,72.68276)" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-2"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z"
+ transform="matrix(0.73232502,0,0,0.75477602,-217.40136,26.716271)" />
+ <g
+ id="g29167-4"
+ transform="matrix(0.73232502,0,0,0.75477602,-217.31662,28.007562)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-9"
+ sodipodi:role="line">1</tspan></text>
+ </g>
+ <g
+ id="g29167-9"
+ transform="matrix(0.73232502,0,0,0.75477602,-4.9726112,28.689051)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-3"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-3"
+ sodipodi:role="line">2</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.67803264px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-7);marker-end:none"
+ d="m 181,214.66098 0,-69.32196"
+ id="path17211-7-1-6"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <g
+ id="g29167"
+ transform="matrix(0.73232502,0,0,0.75477602,-218.07919,73.10621)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165"
+ sodipodi:role="line">8</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.67803264px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-start:url(#Arrow1Lstart-7);marker-end:none"
+ d="m 131,145.8531 0,69.32197"
+ id="path17211-7-1"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-140.37076,129.97088)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-8"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <g
+ id="g29167-2"
+ transform="matrix(0.73232502,0,0,0.75477602,-140.28602,131.01695)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-92"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-8"
+ sodipodi:role="line">7</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.71898615px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-mid:none;marker-end:url(#Arrow1Lend)"
+ d="m 317.1405,116 -94.281,0"
+ id="path17209-8-0"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-3.4914,66.68745)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-6"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <g
+ id="g29167-46"
+ transform="matrix(0.73232502,0,0,0.75477602,-4.40666,67.48829)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-1"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-5"
+ sodipodi:role="line">3</tspan></text>
+ </g>
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-90.692582,130.31695)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-8-6"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <g
+ id="g29167-6"
+ transform="matrix(0.73232502,0,0,0.75477602,-90.84634,131.60918)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-17"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-2"
+ sodipodi:role="line">4</tspan></text>
+ </g>
+ <g
+ id="g29167-2-0"
+ transform="matrix(0.73232502,0,0,0.75477602,-2.424397,194.0216)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-92-6"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-8-2"
+ sodipodi:role="line">5</tspan></text>
+ </g>
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-8"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,93.82055,-648.98949)">
+ <title
+ id="title22-7-97">Square</title>
+ <desc
+ id="desc24-7-3">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-6"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-12"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-92)" />
+ </g>
+ <g
+ id="g13515-9">
+ <g
+ id="g13534-3">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-1"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-84"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,314.82055,-648.98949)">
+ <title
+ id="title22-7-50">Square</title>
+ <desc
+ id="desc24-7-36">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-1"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-0"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-94)" />
+ </g>
+ <g
+ id="g13515-6">
+ <g
+ id="g13534-32">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-0"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.71226478;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:none;marker-end:url(#Arrow1Lend)"
+ d="m 313.14387,285 -84.28774,0"
+ id="path17209-7"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-2.692582,236.31695)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-1-6"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <g
+ id="g29167-2-0-5"
+ transform="matrix(0.73232502,0,0,0.75477602,-2.424397,237.0216)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-92-6-6"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-8-2-9"
+ sodipodi:role="line">6</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3"
+ transform="matrix(0.73232502,0,0,0.75477602,-154.60784,51.117791)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-9-7"
+ sodipodi:role="line">Eventdev</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-5"
+ transform="matrix(0.73232502,0,0,0.75477602,-144.65044,201.97821)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-3"
+ y="70"
+ x="412.93716"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="412.93716"
+ id="tspan29165-9-7-5"
+ sodipodi:role="line">Crypto</tspan><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="100.26366"
+ x="412.93716"
+ sodipodi:role="line"
+ id="tspan3201">Adapter</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-5-6"
+ transform="matrix(0.73232502,0,0,0.75477602,79.53518,46.62529)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-3-2"
+ y="48.801659"
+ x="412.93716"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="48.801659"
+ x="412.93716"
+ sodipodi:role="line"
+ id="tspan3155">Application</tspan><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="79.065323"
+ x="412.93716"
+ sodipodi:role="line"
+ id="tspan3201-1">in ordered</tspan><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="109.32899"
+ x="412.93716"
+ sodipodi:role="line"
+ id="tspan3161">stage</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-5-2"
+ transform="matrix(0.73232502,0,0,0.75477602,77.535182,213.62529)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-3-7"
+ y="70"
+ x="412.93716"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="412.93716"
+ sodipodi:role="line"
+ id="tspan3201-9">Cryptodev</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-5-3"
+ transform="matrix(0.73232502,0,0,0.75477602,188.53518,-3.37471)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-3-6"
+ y="70"
+ x="375.65271"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3201-6">1. Events from the previous stage.</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="93.538406"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3260" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="117.07681"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3262">2. Application in ordered stage</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="140.61522"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3288"> dequeues events from eventdev.</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="164.15363"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3264" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="187.69203"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3266">3. Application enqueues crypto</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="211.23044"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3290"> operations as events to eventdev.</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="234.76884"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3268" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="258.30725"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3270">4. Crypto adapter dequeues event</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="281.84564"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3292"> from eventdev.</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="305.38406"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3272" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="328.92245"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3274">5. Crypto adapter submits crypto</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="352.46088"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3294"> operations to cryptodev (Atomic</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="375.99927"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3296"> stage)</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="399.53769"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3276" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="423.07608"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3278">6. Crypto adapter dequeues crypto</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="446.6145"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3298"> completions from cryptodev</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="470.15289"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3280" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="493.69131"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3282">7. Crypto adapter enqueues events</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="517.22974"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3300"> to the eventdev</tspan><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="540.76813"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3284" /><tspan
+ style="font-size:18.83072472px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="564.30652"
+ x="375.65271"
+ sodipodi:role="line"
+ id="tspan3286">8. Events to the next stage</tspan></text>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg b/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg
new file mode 100644
index 00000000..256862ed
--- /dev/null
+++ b/doc/guides/prog_guide/img/event_crypto_adapter_op_new.svg
@@ -0,0 +1,1061 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="720px"
+ height="486px"
+ id="svg13237"
+ version="1.1"
+ inkscape:version="0.48.4 r9939"
+ sodipodi:docname="event_crypto_adapter_deq_only.svg">
+ <defs
+ id="defs13239">
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Sstart"
+ style="overflow:visible">
+ <path
+ id="path8416"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.2) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Send"
+ style="overflow:visible;">
+ <path
+ id="path8419"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.2) rotate(180) translate(6,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="DiamondL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DiamondL"
+ style="overflow:visible">
+ <path
+ id="path8483"
+ d="M 0,-7.0710768 L -7.0710894,0 L 0,7.0710589 L 7.0710462,0 L 0,-7.0710768 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="DotL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="DotL"
+ style="overflow:visible">
+ <path
+ id="path8465"
+ d="M -2.5,-1.0 C -2.5,1.7600000 -4.7400000,4.0 -7.5,4.0 C -10.260000,4.0 -12.5,1.7600000 -12.5,-1.0 C -12.5,-3.7600000 -10.260000,-6.0 -7.5,-6.0 C -4.7400000,-6.0 -2.5,-3.7600000 -2.5,-1.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8) translate(7.4, 1)" />
+ </marker>
+ <marker
+ inkscape:stockid="SquareL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="SquareL"
+ style="overflow:visible">
+ <path
+ id="path8474"
+ d="M -5.0,-5.0 L -5.0,5.0 L 5.0,5.0 L 5.0,-5.0 L -5.0,-5.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="TriangleOutL"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="TriangleOutL"
+ style="overflow:visible">
+ <path
+ id="path8546"
+ d="M 5.77,0.0 L -2.88,5.0 L -2.88,-5.0 L 5.77,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lstart"
+ style="overflow:visible">
+ <path
+ id="path8404"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
+ transform="scale(0.8) translate(12.5,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Mend"
+ style="overflow:visible;">
+ <path
+ id="path8413"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.4) rotate(180) translate(10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow2Lend"
+ style="overflow:visible;">
+ <path
+ id="path8425"
+ style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+ d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+ transform="scale(1.1) rotate(180) translate(1,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0.0"
+ refX="0.0"
+ id="Arrow1Lend"
+ style="overflow:visible;">
+ <path
+ id="path8407"
+ d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+ transform="scale(0.8) rotate(180) translate(12.5,0)" />
+ </marker>
+ <filter
+ id="filter_2"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15" />
+ </filter>
+ <filter
+ id="filter_2-3"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1" />
+ </filter>
+ <filter
+ id="filter_2-0"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-7" />
+ </filter>
+ <filter
+ id="filter_2-0-8"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-7-7" />
+ </filter>
+ <filter
+ id="filter_2-3-9"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-6" />
+ </filter>
+ <filter
+ id="filter_2-3-6"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-63" />
+ </filter>
+ <filter
+ id="filter_2-3-91"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-3" />
+ </filter>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-5"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-3"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-6"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart-7"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8404-0"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(0.8,0,0,0.8,10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-51"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-1"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-3"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-6"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-62"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-9"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend-2"
+ style="overflow:visible">
+ <path
+ inkscape:connector-curvature="0"
+ id="path8407-7"
+ d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)" />
+ </marker>
+ <filter
+ id="filter_2-3-91-3"
+ color-interpolation-filters="sRGB">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur15-1-3-6" />
+ </filter>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="1"
+ inkscape:cx="511.66308"
+ inkscape:cy="233.69667"
+ inkscape:document-units="px"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:window-width="1200"
+ inkscape:window-height="898"
+ inkscape:window-x="0"
+ inkscape:window-y="31"
+ inkscape:window-maximized="1"
+ inkscape:snap-nodes="false">
+ <inkscape:grid
+ type="xygrid"
+ id="grid13454" />
+ </sodipodi:namedview>
+ <metadata
+ id="metadata13242">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ id="layer1"
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer">
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-0"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,323.2187,-540.25927)">
+ <title
+ id="title22-7-6">Square</title>
+ <desc
+ id="desc24-7-4">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-0"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-9"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-9)" />
+ </g>
+ <g
+ id="g13515-4">
+ <g
+ id="g13534-5">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-4"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-4"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.165886,88.874699,-447.8809)">
+ <title
+ id="title22-7-5">Square</title>
+ <desc
+ id="desc24-7-8">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-5"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-7"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-91)" />
+ </g>
+ <g
+ id="g13515-33">
+ <g
+ id="g13534-8">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-95"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-9"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,88.874699,-538.24651)">
+ <title
+ id="title22-7-9">Square</title>
+ <desc
+ id="desc24-7-5">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-2"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-1"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-6)" />
+ </g>
+ <g
+ id="g13515-3">
+ <g
+ id="g13534-0">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-9"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.74346578px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-mid:none;marker-end:url(#Arrow1Lend)"
+ d="m 220.66064,98.57125 101.22528,0"
+ id="path17209-8"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.5671361,322.24227,-811.89589)">
+ <title
+ id="title22-7">Square</title>
+ <desc
+ id="desc24-7">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3)" />
+ </g>
+ <g
+ id="g13515">
+ <g
+ id="g13534">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g13518"
+ transform="matrix(0.73232502,0,0,0.75477602,25.29268,348.89752)">
+ <g
+ id="g13526">
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot13464-9"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ transform="translate(-12.00521,-129.65179)"><flowRegion
+ id="flowRegion13466-1"><rect
+ id="rect13468-2"
+ width="195.99997"
+ height="112.00001"
+ x="273.33334"
+ y="175.33333"
+ style="text-align:center;text-anchor:middle" /></flowRegion><flowPara
+ style="font-size:20px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ id="flowPara13511" /></flowRoot> </g>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.75145501;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Lstart);marker-end:none"
+ d="m 176.26096,124.64833 0,69.24854"
+ id="path17209-3"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-4.325033,50.642983)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-3"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.74346578px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-mid:none;marker-end:url(#Arrow1Lend)"
+ d="m 322.61264,375 -101.22528,0"
+ id="path17209-8-0"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,0.0689171,324.80833)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-1"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.62908167px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
+ d="m 155,324.19955 0,-59.37092"
+ id="path17211-7-1"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,-116.37076,245.97088)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-8"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.75058991;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow1Lstart-7);marker-end:none"
+ d="m 126.26097,124.99178 0,69.24941"
+ id="path17209-3-0"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z"
+ transform="matrix(0.73232502,0,0,0.75477602,-146.16394,110.68276)" />
+ <path
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-2"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z"
+ transform="matrix(0.73232502,0,0,0.75477602,-95.40136,110.71627)" />
+ <g
+ id="g29167-4"
+ transform="matrix(0.73232502,0,0,0.75477602,-95.31662,112.00756)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-9"
+ sodipodi:role="line">1</tspan></text>
+ </g>
+ <g
+ id="g29167-9"
+ transform="matrix(0.73232502,0,0,0.75477602,-4.9726112,50.689051)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-3"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-3"
+ sodipodi:role="line">2</tspan></text>
+ </g>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:1.04032874px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow1Lend)"
+ d="m 388.20118,147.93341 0,173.95967"
+ id="path17211-7"
+ inkscape:connector-type="orthogonal"
+ inkscape:connector-curvature="0" />
+ <path
+ transform="matrix(0.73232502,0,0,0.75477602,116.5086,136.68745)"
+ sodipodi:type="arc"
+ style="fill:#539de6;fill-opacity:1;stroke:#0000ea;stroke-width:1;stroke-linecap:square;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
+ id="path29161-6"
+ sodipodi:cx="371"
+ sodipodi:cy="64.5"
+ sodipodi:rx="17"
+ sodipodi:ry="15.5"
+ d="m 388,64.5 a 17,15.5 0 1 1 -34,0 17,15.5 0 1 1 34,0 z" />
+ <g
+ id="g29167-46"
+ transform="matrix(0.73232502,0,0,0.75477602,116.59334,137.48829)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-1"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-5"
+ sodipodi:role="line">3</tspan></text>
+ </g>
+ <g
+ id="g29167-6"
+ transform="matrix(0.73232502,0,0,0.75477602,0.1536639,325.60918)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-17"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-2"
+ sodipodi:role="line">4</tspan></text>
+ </g>
+ <g
+ id="g29167"
+ transform="matrix(0.73232502,0,0,0.75477602,-146.07919,111.10621)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165"
+ sodipodi:role="line">6</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3"
+ transform="matrix(0.73232502,0,0,0.75477602,-117.60784,180.11779)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6"
+ y="70"
+ x="321.30356"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="321.30356"
+ id="tspan29165-9-7"
+ sodipodi:role="line">Eventdev</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-5"
+ transform="matrix(0.73232502,0,0,0.75477602,55.34956,26.97821)">
+ <text
+ sodipodi:linespacing="100%"
+ id="text29163-9-6-3"
+ y="70"
+ x="454.74152"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="454.74152"
+ id="tspan29165-9-7-5"
+ sodipodi:role="line">A<tspan
+ style="line-height:100%"
+ id="tspan3374">tomic stage</tspan></tspan><tspan
+ style="font-size:21.52082825000000099px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="96.901031"
+ x="454.74152"
+ sodipodi:role="line"
+ id="tspan3320">+</tspan><tspan
+ style="font-size:21.52082825000000099px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="123.80207"
+ x="454.74152"
+ sodipodi:role="line"
+ id="tspan3322">enqueue to</tspan><tspan
+ style="font-size:21.52082825000000099px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:100%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="150.70311"
+ x="454.74152"
+ sodipodi:role="line"
+ id="tspan3324">cryptodev</tspan></text>
+ </g>
+ <g
+ id="g29167-2"
+ transform="matrix(0.73232502,0,0,0.75477602,-116.28602,248.01695)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-92"
+ y="70"
+ x="365"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:18px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="365"
+ id="tspan29165-8"
+ sodipodi:role="line">5</tspan></text>
+ </g>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot3376"
+ style="fill:black;stroke:none;stroke-opacity:1;stroke-width:1px;stroke-linejoin:miter;stroke-linecap:butt;fill-opacity:1;font-family:Sans;font-style:normal;font-weight:normal;font-size:18px;line-height:125%;letter-spacing:0px;word-spacing:0px"><flowRegion
+ id="flowRegion3378"><rect
+ id="rect3380"
+ width="100"
+ height="37"
+ x="109"
+ y="259" /></flowRegion><flowPara
+ id="flowPara3382" /></flowRoot> <g
+ id="g29167-4-3-1"
+ transform="matrix(0.73232502,0,0,0.75477602,109.34956,323.97821)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-8"
+ y="70"
+ x="321.30356"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="321.30356"
+ id="tspan29165-9-7-7"
+ sodipodi:role="line">Cryptodev</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-1-9"
+ transform="matrix(0.73232502,0,0,0.75477602,-114.48565,314.20704)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-8-2"
+ y="70"
+ x="368.01718"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="368.01718"
+ id="tspan29165-9-7-7-0"
+ sodipodi:role="line">Crypto</tspan><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Sans;-inkscape-font-specification:Sans"
+ y="100.26366"
+ x="368.01718"
+ sodipodi:role="line"
+ id="tspan3488">adapter</tspan></text>
+ </g>
+ <g
+ id="g29167-4-3-1-9-2"
+ transform="matrix(0.73232502,0,0,0.75477602,250.96804,192.62529)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-8-2-3"
+ y="-188.35481"
+ x="318.61978"
+ style="font-size:40px;font-style:normal;font-weight:normal;text-align:center;line-height:125%;letter-spacing:0px;word-spacing:0px;text-anchor:middle;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-188.35481"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3543">1. Application dequeues</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-161.45378"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3196"> events from the previous</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-134.55275"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3232"> stage</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-107.65171"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3519" /><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-80.750671"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3551">2. Application prepares the</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-53.84964"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3203"> crypto operations.</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-26.948603"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3523" /><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="-0.047568277"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3541">3. Crypto operations are</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="26.853468"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3207"> submitted to cryptodev</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="53.754501"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3209"> by application..</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="80.65554"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3527" /><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="107.55657"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3547">4. Crypto adapter dequeues</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="134.45761"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3216"> crypto completions from</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="161.35864"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3218"> cryptodev.</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="188.25969"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3531" /><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="215.16072"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3549">5. Crypto adapter enqueues</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="242.06175"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3222"> events to the eventdev.</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="268.96277"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3535" /><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="295.8638"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3537">6. Application dequeues from</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="322.76483"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3224"> eventdev and prepare for</tspan><tspan
+ style="font-size:21.52082825px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="349.66586"
+ x="318.61978"
+ sodipodi:role="line"
+ id="tspan3226"> further processing</tspan></text>
+ </g>
+ <g
+ style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
+ id="shape1-1-2-4-7"
+ v:mID="1"
+ v:groupContext="shape"
+ transform="matrix(2.1604167,0,0,1.165886,90.820551,-587.97129)">
+ <title
+ id="title22-7-5-5">Square</title>
+ <desc
+ id="desc24-7-8-3">Atomic Queue #1</desc>
+ <v:userDefs>
+ <v:ud
+ v:nameU="visVersion"
+ v:val="VT0(15):26" />
+ </v:userDefs>
+ <v:textBlock
+ v:margins="rect(4,4,4,4)" />
+ <v:textRect
+ cx="30.75"
+ cy="581.25"
+ width="61.5"
+ height="61.5" />
+ <g
+ id="shadow1-2-9-5-5"
+ v:groupContext="shadow"
+ v:shadowOffsetX="0.345598"
+ v:shadowOffsetY="-1.97279"
+ v:shadowType="1"
+ transform="translate(0.345598,1.97279)"
+ class="st1"
+ style="visibility:visible">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st2"
+ id="rect27-8-7-6"
+ style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-3-91-3)" />
+ </g>
+ <g
+ id="g13515-33-2">
+ <g
+ id="g13534-8-9">
+ <rect
+ x="0"
+ y="550.5"
+ width="61.5"
+ height="61.5"
+ class="st3"
+ id="rect29-1-95-1"
+ style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
+ </g>
+ </g>
+ </g>
+ <g
+ id="g29167-4-3-2"
+ transform="matrix(0.73232502,0,0,0.75477602,-125.66199,44.027402)">
+ <text
+ sodipodi:linespacing="125%"
+ id="text29163-9-6-7"
+ y="70"
+ x="321.30356"
+ style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+ xml:space="preserve"><tspan
+ style="font-size:24.21093369px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
+ y="70"
+ x="321.30356"
+ id="tspan29165-9-7-0"
+ sodipodi:role="line">Application</tspan></text>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/eventdev_usage.svg b/doc/guides/prog_guide/img/eventdev_usage.svg
index 7765649b..c19818b9 100644
--- a/doc/guides/prog_guide/img/eventdev_usage.svg
+++ b/doc/guides/prog_guide/img/eventdev_usage.svg
@@ -1,994 +1,549 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export eventdev_usage.svg Page-1 -->
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2018 Arm -->
-<svg
- xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/"
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:xlink="http://www.w3.org/1999/xlink"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="683.12061"
- height="184.672"
- viewBox="0 0 546.49648 147.7376"
- xml:space="preserve"
- color-interpolation-filters="sRGB"
- class="st9"
- id="svg2"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="eventdev_usage.svg"
- style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"><metadata
- id="metadata214"><rdf:RDF><cc:Work
- rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" /></cc:Work></rdf:RDF></metadata><sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="1017"
- id="namedview212"
- showgrid="false"
- fit-margin-top="2"
- fit-margin-left="2"
- fit-margin-right="2"
- fit-margin-bottom="2"
- inkscape:zoom="1.2339869"
- inkscape:cx="501.15554"
- inkscape:cy="164.17693"
- inkscape:window-x="-8"
- inkscape:window-y="406"
- inkscape:window-maximized="1"
- inkscape:current-layer="g17" />
- <v:documentProperties
- v:langID="1033"
- v:viewMarkup="false">
- <v:userDefs>
- <v:ud
- v:nameU="msvSubprocessMaster"
- v:prompt=""
- v:val="VT4(Rectangle)" />
- <v:ud
- v:nameU="msvNoAutoConnect"
- v:val="VT0(1):26" />
- </v:userDefs>
- </v:documentProperties>
-
- <style
- type="text/css"
- id="style4">
-
- .st1 {visibility:visible}
- .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
- .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
- .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
- .st5 {font-size:1em}
- .st6 {fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25}
- .st7 {marker-end:url(#mrkr4-33);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
- .st8 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409090909091}
- .st9 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="9.04167in" height="1.84602in"
+ viewBox="0 0 651 132.913" xml:space="preserve" color-interpolation-filters="sRGB" class="st12">
+ <v:documentProperties v:langID="1033" v:viewMarkup="false"/>
+ <style type="text/css">
+ <![CDATA[
+ .st1 {fill:#3c63ac;stroke:#30518f;stroke-width:0.75}
+ .st2 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st3 {fill:none;stroke:#203864;stroke-width:0.25}
+ .st4 {stroke:#203864;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st5 {fill:#ffd965;stroke:#203864;stroke-width:0.25}
+ .st6 {fill:#000000;font-family:Calibri;font-size:1.16666em}
+ .st7 {font-size:1em}
+ .st8 {fill:none;stroke:none;stroke-width:0.25}
+ .st9 {fill:#000000;font-family:Calibri;font-size:0.833336em}
+ .st10 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st11 {font-size:1.16665em}
+ .st12 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
</style>
- <defs
- id="Markers">
- <g
- id="lend4">
- <path
- d="M 2,1 0,0 2,-1 2,1"
- style="stroke:none"
- id="path8"
- inkscape:connector-curvature="0" />
- </g>
- <marker
- id="mrkr4-33"
- class="st8"
- v:arrowType="4"
- v:arrowSize="2"
- v:setback="7.04"
- refX="-7.04"
- orient="auto"
- markerUnits="strokeWidth"
- overflow="visible"
- style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible">
- <use
- xlink:href="#lend4"
- transform="scale(-3.52,-3.52)"
- id="use11"
- x="0"
- y="0"
- width="3"
- height="3" />
- </marker>
- <filter
- id="filter_2-7"
- color-interpolation-filters="sRGB"><feGaussianBlur
- stdDeviation="2"
- id="feGaussianBlur15-1" /></filter><marker
- id="mrkr4-33-2"
- class="st8"
- v:arrowType="4"
- v:arrowSize="2"
- v:setback="7.04"
- refX="-7.04"
- orient="auto"
- markerUnits="strokeWidth"
- overflow="visible"
- style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible"><use
- xlink:href="#lend4"
- transform="scale(-3.52,-3.52)"
- id="use11-3"
- x="0"
- y="0"
- width="3"
- height="3" /></marker><marker
- id="mrkr4-33-6"
- class="st8"
- v:arrowType="4"
- v:arrowSize="2"
- v:setback="7.04"
- refX="-7.04"
- orient="auto"
- markerUnits="strokeWidth"
- overflow="visible"
- style="fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-width:0.28409091;stroke-opacity:1;overflow:visible"><use
- xlink:href="#lend4"
- transform="scale(-3.52,-3.52)"
- id="use11-8"
- x="0"
- y="0"
- width="3"
- height="3" /></marker></defs>
- <defs
- id="Filters">
- <filter
- id="filter_2"
- color-interpolation-filters="sRGB">
- <feGaussianBlur
- stdDeviation="2"
- id="feGaussianBlur15" />
- </filter>
- </defs>
- <g
- v:mID="0"
- v:index="1"
- v:groupContext="foregroundPage"
- id="g17"
- transform="translate(-47.323579,-90.784072)">
- <v:userDefs>
- <v:ud
- v:nameU="msvThemeOrder"
- v:val="VT0(0):26" />
- </v:userDefs>
- <title
- id="title19">Page-1</title>
- <v:pageProperties
- v:drawingScale="1"
- v:pageScale="1"
- v:drawingUnits="0"
- v:shadowOffsetX="9"
- v:shadowOffsetY="-9" />
- <v:layer
- v:name="Connector"
- v:index="0" />
- <g
- id="shape1-1"
- v:mID="1"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,128.62352,-288.18843)">
- <title
- id="title22">Square</title>
- <desc
- id="desc24">Atomic Queue #1</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="30.75"
- cy="581.25"
- width="61.5"
- height="61.5" />
- <g
- id="shadow1-2"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st2"
- id="rect27"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
- </g>
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st3"
- id="rect29"
- style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
-
- </g>
- <g
- id="shape3-8"
- v:mID="3"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,297.37175,-288.18843)">
- <title
- id="title36">Square.3</title>
- <desc
- id="desc38">Atomic Queue #2</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="30.75"
- cy="581.25"
- width="61.5"
- height="61.5" />
- <g
- id="shadow3-9"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st2"
- id="rect41"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
- </g>
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st3"
- id="rect43"
- style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
-
- </g>
- <g
- id="shape4-15"
- v:mID="4"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,466.1192,-288.18843)">
- <title
- id="title50">Square.4</title>
- <desc
- id="desc52">Single Link Queue # 1</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="30.75"
- cy="581.25"
- width="61.5"
- height="61.5" />
- <g
- id="shadow4-16"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st2"
- id="rect55"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
- </g>
- <rect
- x="0"
- y="550.5"
- width="61.5"
- height="61.5"
- class="st3"
- id="rect57"
- style="fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25" />
-
- </g>
- <g
- id="shape5-22"
- v:mID="5"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,52.208527,-296.14701)">
- <title
- id="title64">Circle</title>
- <desc
- id="desc66">RX</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow5-23"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path69"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
- </g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path71"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="15.19"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text73"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />RX</text>
-
- </g>
- <g
- id="shape6-28"
- v:mID="6"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,84.042834,-305.07614)">
- <title
- id="title76">Dynamic connector</title>
- <path
- d="m 0,603 50.38,0"
- class="st7"
- id="path78"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape7-34"
- v:mID="7"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,220.95621,-296.14701)">
- <title
- id="title81">Circle.7</title>
- <desc
- id="desc83">W ..</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow7-35"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path86"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
- </g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path88"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="12.4"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text90"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W ..</text>
-
- </g>
- <g
- id="shape9-40"
- v:mID="9"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,220.95621,-243.34865)">
- <title
- id="title93">Circle.9</title>
- <desc
- id="desc95">W N</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow9-41"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path98"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="1" v:pageScale="1" v:drawingUnits="0" v:shadowOffsetX="9" v:shadowOffsetY="-9"/>
+ <v:layer v:name="Connector" v:index="0"/>
+ <g id="group1068-1" transform="translate(0.75,-0.25)" v:mID="1068" v:groupContext="group">
+ <title>Sheet.1068</title>
+ <g id="shape3-2" v:mID="3" v:groupContext="shape" transform="translate(63,184.827) rotate(180)">
+ <title>Simple Arrow</title>
+ <desc>In Intf</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="31.5" cy="132.913" width="63.01" height="0" transform="rotate(180)"/>
+ <path d="M0 132.91 L12 120.92 L12 126.92 L63 126.92 L63 132.91 L63 138.91 L12 138.91 L12 144.91 L0 132.91 Z"
+ class="st1"/>
+ <text x="-43.6" y="-129.91" transform="rotate(180)" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>In Intf</text> </g>
+ <g id="group1010-5" transform="translate(130.5,-73.3167)" v:mID="1010" v:groupContext="group">
+ <title>Sheet.1010</title>
+ <g id="group1000-6" transform="translate(-2.19824E-14,-0.0534178)" v:mID="1000" v:groupContext="group">
+ <title>Sheet.1000</title>
+ <g id="shape1001-7" v:mID="1001" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1002-9" v:mID="1002" v:groupContext="shape" v:layerMember="0" transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1003-12" v:mID="1003" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1004-15" v:mID="1004" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
+ <g id="group1005-18" transform="translate(22.6034,0)" v:mID="1005" v:groupContext="group">
+ <title>Sheet.1005</title>
+ <g id="shape1006-19" v:mID="1006" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1007-21" v:mID="1007" v:groupContext="shape" v:layerMember="0"
+ transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1008-24" v:mID="1008" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1009-27" v:mID="1009" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
</g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path100"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="11.69"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text102"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W N</text>
-
- </g>
- <g
- id="shape10-46"
- v:mID="10"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,220.95621,-348.94537)">
- <title
- id="title105">Circle.10</title>
- <desc
- id="desc107">W 1</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow10-47"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path110"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ <g id="group1016-30" transform="translate(301.5,-73.3167)" v:mID="1016" v:groupContext="group">
+ <title>Sheet.1016</title>
+ <g id="group1017-31" transform="translate(-2.19824E-14,-0.0534178)" v:mID="1017" v:groupContext="group">
+ <title>Sheet.1017</title>
+ <g id="shape1018-32" v:mID="1018" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1019-34" v:mID="1019" v:groupContext="shape" v:layerMember="0"
+ transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1020-37" v:mID="1020" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1021-40" v:mID="1021" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
+ <g id="group1022-43" transform="translate(22.6034,0)" v:mID="1022" v:groupContext="group">
+ <title>Sheet.1022</title>
+ <g id="shape1023-44" v:mID="1023" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1024-46" v:mID="1024" v:groupContext="shape" v:layerMember="0"
+ transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1025-49" v:mID="1025" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1026-52" v:mID="1026" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
</g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path112"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="12.39"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text114"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W 1</text>
-
- </g>
- <g
- id="shape11-52"
- v:mID="11"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,195.91581,-312.06416)">
- <title
- id="title117">Dynamic connector.11</title>
- <path
- d="m 0,612 0,-68 25.21,0"
- class="st7"
- id="path119"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape12-57"
- v:mID="12"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,176.37498,-305.07614)">
- <title
- id="title122">Dynamic connector.12</title>
- <path
- d="m 0,603 50.38,0"
- class="st7"
- id="path124"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape13-62"
- v:mID="13"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,176.37498,-312.06416)">
- <title
- id="title127">Dynamic connector.13</title>
- <path
- d="m 0,612 25.17,0 0,68 25.21,0"
- class="st7"
- id="path129"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape14-67"
- v:mID="14"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,252.79052,-259.2658)">
- <title
- id="title132">Dynamic connector.14</title>
- <path
- d="m 0,612 26.88,0 0,-68 23.5,0"
- class="st7"
- id="path134"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape15-72"
- v:mID="15"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,252.79052,-305.07614)">
- <title
- id="title137">Dynamic connector.15</title>
- <path
- d="m 0,603 50.38,0"
- class="st7"
- id="path139"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape19-77"
- v:mID="19"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,389.70366,-296.14701)">
- <title
- id="title142">Circle.19</title>
- <desc
- id="desc144">W ..</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow19-78"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path147"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ <g id="group1032-55" transform="translate(468,-73.2)" v:mID="1032" v:groupContext="group">
+ <title>Sheet.1032</title>
+ <g id="group1033-56" transform="translate(-2.19824E-14,-0.0534178)" v:mID="1033" v:groupContext="group">
+ <title>Sheet.1033</title>
+ <g id="shape1034-57" v:mID="1034" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1035-59" v:mID="1035" v:groupContext="shape" v:layerMember="0"
+ transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1036-62" v:mID="1036" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1037-65" v:mID="1037" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
+ <g id="group1038-68" transform="translate(22.6034,0)" v:mID="1038" v:groupContext="group">
+ <title>Sheet.1038</title>
+ <g id="shape1039-69" v:mID="1039" v:groupContext="shape" transform="translate(0,-4.86)">
+ <title>Rectangle.38</title>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <rect x="0" y="123.913" width="22.3966" height="8.99999" class="st3"/>
+ </g>
+ <g id="shape1040-71" v:mID="1040" v:groupContext="shape" v:layerMember="0"
+ transform="translate(2.19832,-18.18)">
+ <title>Dynamic connector.162</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1041-74" v:mID="1041" v:groupContext="shape" v:layerMember="0"
+ transform="translate(7.79747,-18.18)">
+ <title>Dynamic connector.163</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ <g id="shape1042-77" v:mID="1042" v:groupContext="shape" v:layerMember="0" transform="translate(-3.40084,-18)">
+ <title>Dynamic connector.164</title>
+ <path d="M9 137.41 L9 146.41" class="st4"/>
+ </g>
+ </g>
</g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path149"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="12.4"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text151"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W ..</text>
-
- </g>
- <g
- id="shape20-83"
- v:mID="20"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,389.70366,-243.34865)">
- <title
- id="title154">Circle.20</title>
- <desc
- id="desc156">W N</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow20-84"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path159"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ <g id="shape1044-80" v:mID="1044" v:groupContext="shape" transform="translate(651.291,179.381) rotate(179.228)">
+ <title>Simple Arrow.1044</title>
+ <desc>Out Intf</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ <v:ud v:nameU="ArrowType" v:prompt="" v:val="VT0(2):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="30.3028" cy="132.913" width="60.61" height="0" transform="rotate(180)"/>
+ <path d="M0 132.91 L12 120.92 L12 126.92 L60.61 126.92 L60.61 132.91 L60.61 138.91 L12 138.91 L12 144.91 L0 132.91
+ Z" class="st1"/>
+ <text x="-46.13" y="-129.91" transform="rotate(180)" class="st2" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Out Intf</text> </g>
+ <g id="shape1045-83" v:mID="1045" v:groupContext="shape" transform="translate(67.8,-50.9334)">
+ <title>Rounded Rectangle.1045</title>
+ <desc>RX Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="25.3125" cy="101.413" width="50.63" height="63"/>
+ <path d="M5.06 132.91 L45.56 132.91 A5.06242 5.06242 -180 0 0 50.62 127.85 L50.62 74.98 A5.06242 5.06242 -180 0 0
+ 45.56 69.91 L5.06 69.91 A5.06242 5.06242 -180 0 0 0 74.98 L0 127.85 A5.06242 5.06242 -180 0 0 5.06 132.91
+ Z" class="st5"/>
+ <text x="17.88" y="88.81" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>RX<v:newlineChar/><v:newlineChar/><tspan
+ x="11.97" dy="2.4em" class="st7">Core</tspan></text> </g>
+ <g id="shape1056-87" v:mID="1056" v:groupContext="shape" transform="translate(532.5,-54)">
+ <title>Rounded Rectangle.1056</title>
+ <desc>TX Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.0703125):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="25.3125" cy="101.413" width="50.63" height="63"/>
+ <path d="M5.06 132.91 L45.56 132.91 A5.06242 5.06242 -180 0 0 50.62 127.85 L50.62 74.98 A5.06242 5.06242 -180 0 0
+ 45.56 69.91 L5.06 69.91 A5.06242 5.06242 -180 0 0 0 74.98 L0 127.85 A5.06242 5.06242 -180 0 0 5.06 132.91
+ Z" class="st5"/>
+ <text x="18.27" y="88.81" class="st6" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>TX<v:newlineChar/><v:newlineChar/><tspan
+ x="11.97" dy="2.4em" class="st7">Core</tspan></text> </g>
+ <g id="shape1057-91" v:mID="1057" v:groupContext="shape" transform="translate(123.188,-59.0334)">
+ <title>Rectangle.1057</title>
+ <desc>Atomic Q 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.8125" cy="123.797" width="59.63" height="18.2334"/>
+ <rect x="0" y="114.68" width="59.625" height="18.2334" class="st8"/>
+ <text x="7.19" y="126.8" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Atomic Q 1</text> </g>
+ <g id="shape1058-94" v:mID="1058" v:groupContext="shape" transform="translate(295.5,-59.4)">
+ <title>Rectangle.1058</title>
+ <desc>Atomic Q 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.8125" cy="123.797" width="59.63" height="18.2334"/>
+ <rect x="0" y="114.68" width="59.625" height="18.2334" class="st8"/>
+ <text x="7.19" y="126.8" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Atomic Q 2</text> </g>
+ <g id="shape1059-97" v:mID="1059" v:groupContext="shape" transform="translate(460.687,-58.3167)">
+ <title>Rectangle.1059</title>
+ <desc>Single Link</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.8125" cy="123.797" width="59.63" height="18.2334"/>
+ <rect x="0" y="114.68" width="59.625" height="18.2334" class="st8"/>
+ <text x="8.47" y="126.8" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Single Link</text> </g>
+ <g id="shape1060-100" v:mID="1060" v:groupContext="shape" transform="translate(198,-1.2)">
+ <title>Rectangle.1060</title>
+ <desc>Stage 1</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.8125" cy="123.797" width="59.63" height="18.2334"/>
+ <rect x="0" y="114.68" width="59.625" height="18.2334" class="st8"/>
+ <text x="14.94" y="126.8" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Stage 1</text> </g>
+ <g id="shape1061-103" v:mID="1061" v:groupContext="shape" transform="translate(366.188,0)">
+ <title>Rectangle.1061</title>
+ <desc>Stage 2</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)"/>
+ <v:textRect cx="29.8125" cy="123.797" width="59.63" height="18.2334"/>
+ <rect x="0" y="114.68" width="59.625" height="18.2334" class="st8"/>
+ <text x="14.94" y="126.8" class="st9" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Stage 2</text> </g>
+ <g id="group1062-106" transform="translate(199.2,-18.7134)" v:mID="1062" v:groupContext="group">
+ <title>Sheet.1062</title>
+ <g id="shape1052-107" v:mID="1052" v:groupContext="shape" transform="translate(18.66,-50.7)">
+ <title>Rounded Rectangle.1049</title>
+ <desc>Worker4 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.07" cy="101.413" width="70.14" height="63"/>
+ <path d="M7.01 132.91 L63.13 132.91 A7.01389 7.01389 -180 0 0 70.14 125.9 L70.14 76.93 A7.01389 7.01389 -180
+ 0 0 63.13 69.91 L7.01 69.91 A7.01389 7.01389 -180 0 0 0 76.93 L0 125.9 A7.01389 7.01389 -180 0 0
+ 7.01 132.91 Z" class="st5"/>
+ <text x="13.63" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker4<v:newlineChar/><v:newlineChar/><tspan
+ x="21.72" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1053-111" v:mID="1053" v:groupContext="shape" transform="translate(12.9,-33.6)">
+ <title>Rounded Rectangle.1048</title>
+ <desc>Worker3 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.7" cy="101.413" width="71.4" height="63"/>
+ <path d="M7.14 132.91 L64.26 132.91 A7.13988 7.13988 -180 0 0 71.4 125.77 L71.4 77.05 A7.13988 7.13988 -180 0
+ 0 64.26 69.91 L7.14 69.91 A7.13988 7.13988 -180 0 0 0 77.05 L0 125.77 A7.13988 7.13988 -180 0 0
+ 7.14 132.91 Z" class="st5"/>
+ <text x="14.26" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker3<v:newlineChar/><v:newlineChar/><tspan
+ x="22.35" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1054-115" v:mID="1054" v:groupContext="shape" transform="translate(5.89875,-16.8)">
+ <title>Rounded Rectangle.1047</title>
+ <desc>Worker2 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.2631" cy="101.413" width="70.53" height="63"/>
+ <path d="M7.05 132.91 L63.47 132.91 A7.05251 7.05251 -180 0 0 70.53 125.86 L70.53 76.97 A7.05251 7.05251 -180
+ 0 0 63.47 69.91 L7.05 69.91 A7.05251 7.05251 -180 0 0 0 76.97 L0 125.86 A7.05251 7.05251 -180 0
+ 0 7.05 132.91 Z" class="st5"/>
+ <text x="13.82" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker2<v:newlineChar/><v:newlineChar/><tspan
+ x="21.92" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1055-119" v:mID="1055" v:groupContext="shape">
+ <title>Rounded Rectangle.1046</title>
+ <desc>Worker1 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.4" cy="101.413" width="70.8" height="63"/>
+ <path d="M7.08 132.91 L63.72 132.91 A7.07988 7.07988 -180 0 0 70.8 125.83 L70.8 76.99 A7.07988 7.07988 -180 0
+ 0 63.72 69.91 L7.08 69.91 A7.07988 7.07988 -180 0 0 0 76.99 L0 125.83 A7.07988 7.07988 -180 0 0
+ 7.08 132.91 Z" class="st5"/>
+ <text x="13.96" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker1<v:newlineChar/><v:newlineChar/><tspan
+ x="22.05" dy="2.357em" class="st11">Core</tspan></text> </g>
</g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path161"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="11.69"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text163"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W N</text>
-
- </g>
- <g
- id="shape21-89"
- v:mID="21"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,389.70366,-348.94537)">
- <title
- id="title166">Circle.21</title>
- <desc
- id="desc168">W 1</desc>
- <v:userDefs>
- <v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" />
- </v:userDefs>
- <v:textBlock
- v:margins="rect(4,4,4,4)" />
- <v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" />
- <g
- id="shadow21-90"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible">
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path171"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2)" />
+ <g id="group1063-123" transform="translate(369.6,-18.6)" v:mID="1063" v:groupContext="group">
+ <title>Sheet.1063</title>
+ <g id="shape1064-124" v:mID="1064" v:groupContext="shape" transform="translate(18.66,-50.7)">
+ <title>Rounded Rectangle.1049</title>
+ <desc>Worker4 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.097416666666665):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.07" cy="101.413" width="70.14" height="63"/>
+ <path d="M7.01 132.91 L63.13 132.91 A7.01389 7.01389 -180 0 0 70.14 125.9 L70.14 76.93 A7.01389 7.01389 -180
+ 0 0 63.13 69.91 L7.01 69.91 A7.01389 7.01389 -180 0 0 0 76.93 L0 125.9 A7.01389 7.01389 -180 0 0
+ 7.01 132.91 Z" class="st5"/>
+ <text x="13.63" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker4<v:newlineChar/><v:newlineChar/><tspan
+ x="21.72" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1065-128" v:mID="1065" v:groupContext="shape" transform="translate(12.9,-33.6)">
+ <title>Rounded Rectangle.1048</title>
+ <desc>Worker3 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.099166666666667):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.7" cy="101.413" width="71.4" height="63"/>
+ <path d="M7.14 132.91 L64.26 132.91 A7.13988 7.13988 -180 0 0 71.4 125.77 L71.4 77.05 A7.13988 7.13988 -180 0
+ 0 64.26 69.91 L7.14 69.91 A7.13988 7.13988 -180 0 0 0 77.05 L0 125.77 A7.13988 7.13988 -180 0 0
+ 7.14 132.91 Z" class="st5"/>
+ <text x="14.26" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker3<v:newlineChar/><v:newlineChar/><tspan
+ x="22.35" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1066-132" v:mID="1066" v:groupContext="shape" transform="translate(5.89875,-16.8)">
+ <title>Rounded Rectangle.1047</title>
+ <desc>Worker2 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.097953125):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.2631" cy="101.413" width="70.53" height="63"/>
+ <path d="M7.05 132.91 L63.47 132.91 A7.05251 7.05251 -180 0 0 70.53 125.86 L70.53 76.97 A7.05251 7.05251 -180
+ 0 0 63.47 69.91 L7.05 69.91 A7.05251 7.05251 -180 0 0 0 76.97 L0 125.86 A7.05251 7.05251 -180 0
+ 0 7.05 132.91 Z" class="st5"/>
+ <text x="13.82" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker2<v:newlineChar/><v:newlineChar/><tspan
+ x="21.92" dy="2.357em" class="st11">Core</tspan></text> </g>
+ <g id="shape1067-136" v:mID="1067" v:groupContext="shape">
+ <title>Rounded Rectangle.1046</title>
+ <desc>Worker1 Core</desc>
+ <v:userDefs>
+ <v:ud v:nameU="CTypeTopLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeTopRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotLeftSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CTypeBotRightSnip" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="CornerLockHoriz" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockVert" v:prompt="" v:val="VT0(1):5"/>
+ <v:ud v:nameU="CornerLockDiag" v:prompt="" v:val="VT0(0):5"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.15):1"/>
+ <v:ud v:nameU="visVersion" v:prompt="" v:val="VT0(15):26"/>
+ <v:ud v:nameU="TopLeftOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="TopRightOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="BotLeftOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ <v:ud v:nameU="BotRightOffset" v:prompt="" v:val="VT0(0.098333333333333):1"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:verticalAlign="0"/>
+ <v:textRect cx="35.4" cy="101.413" width="70.8" height="63"/>
+ <path d="M7.08 132.91 L63.72 132.91 A7.07988 7.07988 -180 0 0 70.8 125.83 L70.8 76.99 A7.07988 7.07988 -180 0
+ 0 63.72 69.91 L7.08 69.91 A7.07988 7.07988 -180 0 0 0 76.99 L0 125.83 A7.07988 7.07988 -180 0 0
+ 7.08 132.91 Z" class="st5"/>
+ <text x="13.96" y="84.71" class="st10" v:langID="1033"><v:paragraph v:horizAlign="1"/><v:tabList/>Worker1<v:newlineChar/><v:newlineChar/><tspan
+ x="22.05" dy="2.357em" class="st11">Core</tspan></text> </g>
</g>
- <path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path173"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" />
- <text
- x="12.39"
- y="594.5"
- class="st4"
- v:langID="1033"
- id="text175"
- style="fill:#feffff;font-family:Calibri"><v:paragraph
- v:horizAlign="1" /><v:tabList />W 1</text>
-
- </g>
- <g
- id="shape28-95"
- v:mID="28"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,345.12321,-305.07614)">
- <title
- id="title178">Dynamic connector.28</title>
- <path
- d="m 0,603 50.38,0"
- class="st7"
- id="path180"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape29-100"
- v:mID="29"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,345.12321,-312.06416)">
- <title
- id="title183">Dynamic connector.29</title>
- <path
- d="m 0,612 28.33,0 0,-68 22.05,0"
- class="st7"
- id="path185"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape30-105"
- v:mID="30"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,345.12321,-312.06416)">
- <title
- id="title188">Dynamic connector.30</title>
- <path
- d="m 0,612 28.33,0 0,68 22.05,0"
- class="st7"
- id="path190"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape31-110"
- v:mID="31"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,421.53797,-259.2658)">
- <title
- id="title193">Dynamic connector.31</title>
- <path
- d="m 0,612 24.42,0 0,-68 25.96,0"
- class="st7"
- id="path195"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape32-115"
- v:mID="32"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,421.53797,-305.07614)">
- <title
- id="title198">Dynamic connector.32</title>
- <path
- d="m 0,603 50.38,0"
- class="st7"
- id="path200"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape33-120"
- v:mID="33"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,421.53797,-364.86253)">
- <title
- id="title203">Dynamic connector.33</title>
- <path
- d="m 0,612 24.42,0 0,68 25.96,0"
- class="st7"
- id="path205"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
- </g>
- <g
- id="shape34-125"
- v:mID="34"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,252.79052,-364.86253)">
- <title
- id="title208">Dynamic connector.34</title>
- <path
- d="m 0,612 26.88,0 0,68 23.5,0"
- class="st7"
- id="path210"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" />
</g>
- <text
- xml:space="preserve"
- style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="153.38116"
- y="165.90149"
- id="text3106"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="153.38116"
- y="165.90149"
- id="tspan3110"
- style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Atomic #1</tspan></text>
-<text
- xml:space="preserve"
- style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
- x="322.12939"
- y="165.90149"
- id="text3106-1"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="322.12939"
- y="165.90149"
- id="tspan3110-4"
- style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Atomic #2</tspan></text>
-<text
- xml:space="preserve"
- style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
- x="491.82089"
- y="172.79289"
- id="text3106-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="491.82089"
- y="172.79289"
- style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan3923" /></text>
-<text
- xml:space="preserve"
- style="font-size:24.84628868px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;overflow:visible;font-family:Sans"
- x="491.02899"
- y="165.03951"
- id="text3106-8-5"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="491.02899"
- y="165.03951"
- id="tspan3110-2-1"
- style="font-size:8.69620132px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#ffffff;fill-opacity:1;font-family:Sans;-inkscape-font-specification:Sans">Single Link</tspan></text>
-<g
- style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
- id="shape5-22-1"
- v:mID="5"
- v:groupContext="shape"
- transform="matrix(0.77644652,0,0,0.77644652,556.00223,-296.89447)"><title
- id="title64-5">Circle</title><desc
- id="desc66-2">RX</desc><v:userDefs><v:ud
- v:nameU="visVersion"
- v:val="VT0(15):26" /></v:userDefs><v:textBlock
- v:margins="rect(4,4,4,4)" /><v:textRect
- cx="20.5"
- cy="591.5"
- width="35.88"
- height="30.75" /><g
- id="shadow5-23-7"
- v:groupContext="shadow"
- v:shadowOffsetX="0.345598"
- v:shadowOffsetY="-1.97279"
- v:shadowType="1"
- transform="translate(0.345598,1.97279)"
- class="st1"
- style="visibility:visible"><path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st2"
- id="path69-6"
- inkscape:connector-curvature="0"
- style="fill:#5b9bd5;fill-opacity:0.22000002;stroke:#5b9bd5;stroke-opacity:0.22000002;filter:url(#filter_2-7)" /></g><path
- d="m 0,591.5 a 20.5,20.5 0 0 1 41,0 20.5,20.5 0 1 1 -41,0 z"
- class="st6"
- id="path71-1"
- inkscape:connector-curvature="0"
- style="fill:#ffd965;stroke:#c7c8c8;stroke-width:0.25" /><text
- x="11.06866"
- y="596.56067"
- class="st4"
- v:langID="1033"
- id="text73-4"
- style="fill:#feffff;font-family:Calibri"> TX</text>
-</g><g
- style="font-size:12px;fill:none;stroke-linecap:square;stroke-miterlimit:3;overflow:visible"
- id="shape28-95-5"
- v:mID="28"
- v:groupContext="shape"
- v:layerMember="0"
- transform="matrix(0.77644652,0,0,0.77644652,512.00213,-305.42637)"><title
- id="title178-7">Dynamic connector.28</title><path
- d="m 0,603 50.38,0"
- class="st7"
- id="path180-6"
- inkscape:connector-curvature="0"
- style="stroke:#5b9bd5;stroke-width:1;stroke-linecap:round;stroke-linejoin:round;marker-end:url(#mrkr4-33)" /></g></g>
+ </g>
</svg>
diff --git a/doc/guides/prog_guide/img/malloc_heap.svg b/doc/guides/prog_guide/img/malloc_heap.svg
index 14e50088..f70bd666 100644
--- a/doc/guides/prog_guide/img/malloc_heap.svg
+++ b/doc/guides/prog_guide/img/malloc_heap.svg
@@ -1,1021 +1,333 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export malloc_heap.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ width="11in" height="8.5in" viewBox="0 0 792 612" xml:space="preserve" color-interpolation-filters="sRGB" class="st34">
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#deebf6;stroke:#c7c8c8;stroke-width:0.25}
+ .st5 {fill:#ed7d31;stroke:#c7c8c8;stroke-width:0.25}
+ .st6 {fill:#fbe5d5;stroke:#c7c8c8;stroke-width:0.25}
+ .st7 {fill:#e2efd9;stroke:#c7c8c8;stroke-width:0.25}
+ .st8 {fill:#a8d08d;stroke:#c7c8c8;stroke-width:0.25}
+ .st9 {fill:url(#ptrn2-71);shape-rendering:crispEdges;stroke:#c7c8c8;stroke-width:0.25}
+ .st10 {fill:#5b9bd5;stroke:#2e75b5;stroke-width:0.25}
+ .st11 {fill:none;stroke:none;stroke-width:0.25}
+ .st12 {fill:#000000;font-family:Calibri;font-size:1.00001em}
+ .st13 {fill:#ed7d31;stroke:#2e75b5;stroke-width:0.25}
+ .st14 {fill:#deebf6;stroke:#2e75b5;stroke-width:0.25}
+ .st15 {fill:#fbe5d5;stroke:#2e75b5;stroke-width:0.25}
+ .st16 {fill:#a8d08d;stroke:#2e75b5;stroke-width:0.25}
+ .st17 {fill:#e2efd9;stroke:#2e75b5;stroke-width:0.25}
+ .st18 {fill:url(#ptrn2-71);shape-rendering:crispEdges;stroke:#2e75b5;stroke-width:0.25}
+ .st19 {fill:#f4b183;stroke:#4f87bb;stroke-width:0.75}
+ .st20 {fill:#305497;font-family:Calibri;font-size:0.833336em}
+ .st21 {fill:#5b9bd5;fill-opacity:0.25;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.25}
+ .st22 {fill:#538135;stroke:#40709c;stroke-width:0.75}
+ .st23 {fill:#e2efd9;font-family:Calibri;font-size:0.833336em}
+ .st24 {marker-end:url(#mrkr10-146);marker-start:url(#mrkr10-144);stroke:#70ad47;stroke-width:0.75}
+ .st25 {fill:#70ad47;fill-opacity:1;stroke:#70ad47;stroke-opacity:1;stroke-width:0.22935779816514}
+ .st26 {fill:#ffffff;stroke:none;stroke-linecap:butt;stroke-width:7.2}
+ .st27 {fill:#538135;font-family:Calibri;font-size:1.00001em}
+ .st28 {fill:#ffffff;stroke:none;stroke-linecap:butt}
+ .st29 {fill:#bdd7ee;stroke:#40709c;stroke-width:0.75}
+ .st30 {fill:#1e4e79;font-family:Calibri;font-size:0.833336em}
+ .st31 {marker-end:url(#mrkr5-171);stroke:#4f87bb;stroke-dasharray:11.25,6.75;stroke-width:0.75}
+ .st32 {fill:#4f87bb;fill-opacity:1;stroke:#4f87bb;stroke-opacity:1;stroke-width:0.22935779816514}
+ .st33 {fill:#1e4e79;font-family:Calibri;font-size:1.00001em}
+ .st34 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
-<!-- SPDX-License-Identifier: BSD-3-Clause -->
-<!-- Copyright(c) 2015 Intel Corporation -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- id="svg2985"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- width="983.76233"
- height="643.91644"
- sodipodi:docname="malloc_heap_svg.svg">
- <metadata
- id="metadata2991">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs2989">
- <marker
- inkscape:stockid="Arrow2Mstart"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mstart"
- style="overflow:visible">
- <path
- id="path4265"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(0.6,0.6)"
- inkscape:connector-curvature="0" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Lstart"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Lstart"
- style="overflow:visible">
- <path
- id="path4259"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="matrix(1.1,0,0,1.1,1.1,0)"
- inkscape:connector-curvature="0" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend"
- style="overflow:visible">
- <path
- id="path4268"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)"
- inkscape:connector-curvature="0" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Lend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Lend"
- style="overflow:visible">
- <path
- id="path4262"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="matrix(-1.1,0,0,-1.1,-1.1,0)"
- inkscape:connector-curvature="0" />
- </marker>
- <marker
- inkscape:stockid="Arrow1Lend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow1Lend"
- style="overflow:visible">
- <path
- id="path4244"
- d="M 0,0 5,-5 -12.5,0 5,5 0,0 z"
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1pt"
- transform="matrix(-0.8,0,0,-0.8,-10,0)"
- inkscape:connector-curvature="0" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1-1"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4-8"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1-9"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4-6"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mstart"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mstart-7"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4265-8"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(0.6,0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1-8"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4-2"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1-2"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4-0"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mstart"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mstart-5"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4265-7"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(0.6,0.6)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0"
- refX="0"
- id="Arrow2Mend-1-5"
- style="overflow:visible">
- <path
- inkscape:connector-curvature="0"
- id="path4268-4-4"
- style="fill-rule:evenodd;stroke-width:0.625;stroke-linejoin:round"
- d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
- transform="scale(-0.6,-0.6)" />
- </marker>
- </defs>
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#30ff00"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="1920"
- inkscape:window-height="1139"
- id="namedview2987"
- showgrid="false"
- inkscape:zoom="0.8"
- inkscape:cx="346.31962"
- inkscape:cy="474.02351"
- inkscape:window-x="-8"
- inkscape:window-y="-8"
- inkscape:window-maximized="1"
- inkscape:current-layer="layer4"
- borderlayer="false"
- fit-margin-top="-100.6"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- showborder="true"
- inkscape:showpageshadow="false" />
- <g
- inkscape:groupmode="layer"
- id="layer4"
- inkscape:label="bg"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#d1d1d1;fill-opacity:1;stroke-width:1.79999995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0"
- id="rect13505-6"
- width="98.575218"
- height="70.808708"
- x="328.8374"
- y="317.09564" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="boxes"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- id="rect2996-1"
- width="187.88171"
- height="52.881706"
- x="75.764778"
- y="5.5253706" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7"
- width="634.0592"
- height="73.027374"
- x="60.830574"
- y="130.24477" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2.02648067;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-4"
- width="635.80048"
- height="74.768661"
- x="62.169655"
- y="315.43158" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2.85834479;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-0"
- width="886.87543"
- height="106.64049"
- x="-48.78373"
- y="540.24988" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:3.13159013;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:6.26318017, 3.13159009;stroke-dashoffset:0;display:inline"
- id="rect2996-1-5"
- width="223.0157"
- height="109.20289"
- x="409.68008"
- y="420.63235" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2.90856051;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:5.81712091, 2.90856046;stroke-dashoffset:0;display:inline"
- id="rect2996-1-5-4"
- width="191.98872"
- height="109.42592"
- x="644.63062"
- y="419.66205" />
- <rect
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2.08755708;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:4.17511403, 2.08755702;stroke-dashoffset:0;display:inline"
- id="rect2996-1-5-4-6"
- width="154.05972"
- height="70.246925"
- x="678.59509"
- y="214.87654" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer3"
- inkscape:label="blue headers"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#749aba;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.85091281;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9"
- width="16.994427"
- height="73.79715"
- x="59.561817"
- y="129.601" />
- <rect
- style="fill:#749aba;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.83000004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-4"
- width="17.015339"
- height="72.050293"
- x="384.61731"
- y="130.22485" />
- <rect
- style="fill:#749aba;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.86642051;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-8"
- width="16.978918"
- height="75.107468"
- x="261.76944"
- y="315.16946" />
- <rect
- style="fill:#749aba;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.36914372;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-82"
- width="48.412117"
- height="14.17484"
- x="-42.956367"
- y="549.14984" />
- <rect
- style="fill:#97ffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.83000004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-4-1"
- width="17.015339"
- height="72.050293"
- x="241.39912"
- y="131.17525" />
- <rect
- style="fill:#97ffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.36399999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-4-1-3"
- width="16.981569"
- height="74.882637"
- x="568.40881"
- y="315.33447" />
- <rect
- style="fill:#97ffff;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.95599997;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-4-1-3-7"
- width="49.319912"
- height="12.752681"
- x="-43.016232"
- y="595.7439" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer5"
- inkscape:label="red headers"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.83000004;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45"
- width="17.015339"
- height="72.050293"
- x="501.49307"
- y="130.29137" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.84049058;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-5"
- width="17.004848"
- height="72.923683"
- x="678.04279"
- y="130.29662" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.85091281;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-1"
- width="16.994427"
- height="73.79715"
- x="681.8158"
- y="316.14957" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.86126781;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-7"
- width="16.984072"
- height="74.670677"
- x="500.62485"
- y="315.92252" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.82472873;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-11"
- width="17.020611"
- height="71.613625"
- x="175.33748"
- y="131.40486" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.86642051;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-52"
- width="16.978918"
- height="75.107468"
- x="62.221222"
- y="315.0412" />
- <rect
- style="fill:#ff7b6d;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.39574718;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-76"
- width="48.805244"
- height="14.612387"
- x="-42.996674"
- y="572.61749" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer9"
- inkscape:label="unused space"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#dddddd;fill-opacity:1;stroke-width:1.79999995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0"
- id="rect13505"
- width="98.575218"
- height="70.808708"
- x="402.22061"
- y="131.06841" />
- <rect
- style="fill:#dddddd;fill-opacity:1;stroke-width:1.79999995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0"
- id="rect13505-8"
- width="96.700218"
- height="70.808708"
- x="77.587402"
- y="131.47064" />
- <rect
- style="fill:#dddddd;fill-opacity:1;stroke-width:1.79999995;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
- id="rect13505-5"
- width="220.21585"
- height="72.839958"
- x="279.26709"
- y="316.08002" />
- <rect
- style="fill:#dddddd;fill-opacity:1;stroke:#000000;stroke-width:1.12016988;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
- id="rect13505-59"
- width="51.879829"
- height="15.10388"
- x="445.6301"
- y="550.76691" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.12016988;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;stroke-dashoffset:0;display:inline"
- id="rect13505-59-3"
- width="51.879829"
- height="15.10388"
- x="445.62964"
- y="574.00262" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer8"
- inkscape:label="pad headers"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <rect
- style="fill:#fffec5;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-7-3"
- width="49.88493"
- height="73.447571"
- x="518.21405"
- y="316.16635" />
- <rect
- style="fill:#fffec5;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.86126781;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-7-3-2"
- width="16.98407"
- height="74.670677"
- x="245.17551"
- y="315.48059" />
- <rect
- style="fill:#fffec5;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:2.02099991;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-7-3-4"
- width="49.474121"
- height="72.084908"
- x="193.07074"
- y="130.93698" />
- <rect
- style="fill:#fffec5;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;display:inline"
- id="rect2996-1-7-9-45-7-3-6"
- width="51.75993"
- height="14.072571"
- x="445.05756"
- y="596.40125" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer6"
- inkscape:label="arrows"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <path
- style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:9, 9;stroke-dashoffset:0;marker-mid:none;marker-end:url(#Arrow2Mend)"
- d="m 262.87951,51.152779 c 0,0 148.12631,-3.276651 187.01718,76.272861"
- id="path3973"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
- d="m 681.9161,128.72302 c -22.09709,-49.497478 -148.13393,-45.873109 -179.42835,0"
- id="path3988"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="M 386.69903,129.58525 C 361.95029,80.971668 231.48641,62.20327 177.21864,130.46914"
- id="path3990"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
- d="m 60.546017,172.89554 c 0,0 -32.703692,23.86486 -60.10407166,-3.53553"
- id="path3992"
- inkscape:connector-curvature="0" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="m 176.82896,203.22242 c -47.24941,74.32926 -107.438064,49.90804 -116.0476,3.53553"
- id="path4035"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="m 502.04581,203.43962 c -25.63262,33.58757 -82.31601,45.11485 -116.67261,2.65165"
- id="path4037"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="M 763.23339,214.04621 C 748.83403,184.37018 738.54555,166.795 699.15183,161.8971"
- id="path4039"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-mid:none;marker-end:url(#Arrow2Mend-1)"
- d="m 769.42057,285.19885 c -0.88389,83.96892 -68.50098,75.57203 -68.50098,75.57203"
- id="path4041"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="M 682.35804,313.04117 C 652.306,280.33749 539.16892,270.61477 501.16193,313.92506"
- id="path4043"
- inkscape:connector-curvature="0" />
- <path
- style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:9, 9;stroke-dashoffset:0;marker-end:url(#Arrow2Mend)"
- d="m 415.42523,202.55574 c 0,36.23922 -4.41941,88.38835 -35.35533,109.60155"
- id="path4045"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:3;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:9, 9;stroke-dashoffset:0;marker-end:url(#Arrow2Mend)"
- d="M 375.65048,315.69282 C 336.75961,232.60777 166.1701,311.27341 143.18912,205.20739"
- id="path4047"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="M 263.39727,315.69282 C 245.7196,288.29244 86.62058,275.91807 62.755726,313.04117"
- id="path4051"
- inkscape:connector-curvature="0" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend)"
- d="m 61.790091,352.05822 c -25.819377,20.1091 -49.573204,20.1091 -61.96650422,1.43636"
- id="path4053"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:2.54999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:7.65, 7.65;stroke-dashoffset:0;marker-end:url(#Arrow2Mend)"
- d="m 448.12892,630.25126 48.61359,0"
- id="path5241"
- inkscape:connector-curvature="0" />
- <path
- style="fill:none;stroke:#000000;stroke-width:2.09116507px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Mend);display:inline"
- d="m -39.741559,626.33548 c 10.599699,-0.12345 25.528414,-0.12564 43.719789,-0.81161"
- id="path4053-2"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1)"
- d="m 499.39416,389.93904 c -46.84583,17.67767 -206.82873,31.8198 -238.64854,1.76776"
- id="path13236"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1);display:inline"
- d="m 502.12201,419.58783 c 2.37436,-10.40132 1.73096,-5.65101 4.38262,-26.86421"
- id="path4043-4"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow2Mstart);marker-end:url(#Arrow2Mend-1);display:inline"
- d="m 517.94842,353.38466 c 19.7099,0 43.91577,-0.61421 66.57012,-0.61421"
- id="path4043-4-3"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow2Mstart);marker-end:url(#Arrow2Mend-1);display:inline"
- d="m 501.71494,363.4321 c 19.7099,0 157.04077,-0.61421 179.69512,-0.61421"
- id="path4043-4-3-9"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow2Mend-1);display:inline"
- d="M 728.67747,419.79091 C 702.92683,395.63959 592.90843,427.2649 577.43509,389.1767"
- id="path4043-4-9"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- <path
- style="fill:none;stroke:#000000;stroke-width:1.79999995;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-start:url(#Arrow2Mstart);marker-end:url(#Arrow2Mend-1);display:inline"
- d="m 60.975741,169.05711 c 19.709901,0 90.307569,-0.61421 112.961919,-0.61421"
- id="path4043-4-3-9-1"
- inkscape:connector-curvature="0"
- sodipodi:nodetypes="cc" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer7"
- inkscape:label="text"
- style="display:inline"
- transform="translate(79.549515,-4.4031235)">
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="90.732231"
- y="36.767765"
- id="text10506"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508"
- x="90.732231"
- y="36.767765"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">struct malloc_heap</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="580.66718"
- y="107.47876"
- id="text10506-2"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1"
- x="580.66718"
- y="107.47876"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="438.12686"
- y="223.50792"
- id="text10506-2-5"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-7"
- x="438.12686"
- y="223.50792"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="581.31598"
- y="298.638"
- id="text10506-2-61"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-89"
- x="581.31598"
- y="298.638"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="274.6084"
- y="99.764236"
- id="text10506-2-2"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-79"
- x="274.6084"
- y="99.764236"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="301.12491"
- y="423.26556"
- id="text10506-2-54"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-3"
- x="301.12491"
- y="423.26556"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="133.18704"
- y="303.94128"
- id="text10506-2-1"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-2"
- x="133.18704"
- y="303.94128"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="16.340637"
- y="561.27954"
- id="text10506-2-3"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34"
- x="16.340637"
- y="561.27954"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Free element header(struct malloc_elem, state = FREE)</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="16.996887"
- y="583.24792"
- id="text10506-2-3-1"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1"
- x="16.996887"
- y="583.24792"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Used element header(struct malloc_elem, state = BUSY)</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="108.84206"
- y="161.39597"
- id="text10506-2-6-8"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-7"
- x="108.84206"
- y="161.39597"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">size</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="62.299515"
- y="119.27286"
- id="text10506-2-6-4"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-2"
- x="62.299515"
- y="119.27286"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Memseg 0</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="63.905106"
- y="406.73242"
- id="text10506-2-6-4-7"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-2-7"
- x="63.905106"
- y="406.73242"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Memseg 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="-25.028084"
- y="192.57199"
- id="text10506-2-9"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-31"
- x="-25.028084"
- y="192.57199"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="-26.795866"
- y="379.95526"
- id="text10506-2-98"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-6"
- x="-26.795866"
- y="379.95526"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="416.73682"
- y="269.53305"
- id="text10506-2-6-5"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-0"
- x="416.73682"
- y="269.53305"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">next_free</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="228.00418"
- y="259.55359"
- id="text10506-2-6-5-2"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-0-8"
- x="228.00418"
- y="259.55359"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">next_free</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="356.16727"
- y="55.376503"
- id="text10506-2-6-5-6"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-0-0"
- x="356.16727"
- y="55.376503"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">free_head</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="49.218113"
- y="254.00189"
- id="text10506-2-9-0"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-31-9"
- x="49.218113"
- y="254.00189"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">prev</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="690.51538"
- y="236.82936"
- id="text10506-2-6-0"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-06"
- x="690.51538"
- y="236.82936"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Dummy Elements:</tspan><tspan
- sodipodi:role="line"
- x="690.51538"
- y="256.02936"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13581">Size = 0</tspan><tspan
- sodipodi:role="line"
- x="690.51538"
- y="275.22937"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13583">State = BUSY</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="541.03906"
- y="347.20566"
- id="text10506-2-6-8-8"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-7-9"
- x="541.03906"
- y="347.20566"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">pad</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="16.661926"
- y="605.21631"
- id="text10506-2-3-1-4"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1-4"
- x="16.661926"
- y="605.21631"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Pad element header(struct malloc_elem, state = PAD)</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="17.290833"
- y="627.77881"
- id="text10506-2-3-1-6"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1-0"
- x="17.290833"
- y="627.77881"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Generic element pointers</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="429.11118"
- y="449.84528"
- id="text10506-2-6-6"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- x="429.11118"
- y="449.84528"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13711">Malloc element header:</tspan><tspan
- sodipodi:role="line"
- x="429.11118"
- y="469.04529"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13713">state = BUSY</tspan><tspan
- sodipodi:role="line"
- x="429.11118"
- y="488.24527"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13715">size = &lt;size&gt;</tspan><tspan
- sodipodi:role="line"
- x="429.11118"
- y="507.44528"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan13717">pad = &lt;padsize&gt;</tspan></text>
- <flowRoot
- xml:space="preserve"
- id="flowRoot13719"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"><flowRegion
- id="flowRegion13721"><rect
- id="rect13723"
- width="968.73627"
- height="188.26718"
- x="-81.317276"
- y="460.64972" /></flowRegion><flowPara
- id="flowPara13725"></flowPara></flowRoot> <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="594.30859"
- y="378.91797"
- id="text10506-2-6-8-8-1"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-8-7-9-3"
- x="594.30859"
- y="378.91797"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">size</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="505.86865"
- y="563.34613"
- id="text10506-2-3-1-6-8"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1-0-4"
- x="505.86865"
- y="563.34613"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Free / Unallocated data space</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="660.39099"
- y="449.92532"
- id="text10506-2-6-6-0"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- x="660.39099"
- y="449.92532"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan14527">Pad element header:</tspan><tspan
- sodipodi:role="line"
- x="660.39099"
- y="469.12534"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan14531">state = PAD</tspan><tspan
- sodipodi:role="line"
- x="660.39099"
- y="488.32532"
- style="font-size:16px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas"
- id="tspan14533">pad = padsize</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="506.5249"
- y="584.28369"
- id="text10506-2-3-1-6-8-7"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1-0-4-2"
- x="506.5249"
- y="584.28369"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Used / allocated data space</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:120.00000477%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;display:inline;font-family:Sans"
- x="506.18994"
- y="605.30322"
- id="text10506-2-3-1-6-8-7-0"
- sodipodi:linespacing="120%"><tspan
- sodipodi:role="line"
- id="tspan10508-1-34-1-0-4-2-1"
- x="506.18994"
- y="605.30322"
- style="font-size:14px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:120.00000477%;writing-mode:lr-tb;text-anchor:start;font-family:Consolas;-inkscape-font-specification:Consolas">Padding / unavailable space</tspan></text>
- </g>
+ <defs id="Patterns_And_Gradients">
+ <pattern id="ptrn2-71" patternUnits="userSpaceOnUse" width="6" height="6" viewBox="0 0 64 64">
+ <image x="0" y="0" width="64" height="64" image-rendering="optimizeSpeed"
+ xlink:href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsMAAA7DAcdvqGQAAAA7SURBVChTY/i3f7/Pv3//MDFIXETEhwGfJIjGVIAkCeKjKkCTRFWARRKhAIckRAEeSYgCPJL/9u/3AQC1aLsBz7wFUwAAAABJRU5ErkJggg=="/>
+ </pattern>
+ <linearGradient id="grad0-168" x1="0" y1="0" x2="1" y2="0" gradientTransform="rotate(60 0.5 0.5)">
+ <stop offset="0" stop-color="#e9eff7" stop-opacity="1"/>
+ <stop offset="0.24" stop-color="#f4f7fb" stop-opacity="1"/>
+ <stop offset="0.54" stop-color="#feffff" stop-opacity="1"/>
+ </linearGradient>
+ </defs>
+ <defs id="Markers">
+ <g id="lend10">
+ <path
+ d="M 0 0.75 C -0.414214 0.75 -0.75 0.414214 -0.75 0 -0.75 -0.414214 -0.414214 -0.75 0 -0.75 0.414214 -0.75 0.75 -0.414214 0.75 0 0.75 0.414214 0.414214 0.75 0 0.75 Z "
+ style="stroke:none"/>
+ </g>
+ <marker id="mrkr10-144" class="st25" refX="2.79" orient="auto" markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend10" transform="scale(4.36) "/>
+ </marker>
+ <marker id="mrkr10-146" class="st25" refX="-2.79" orient="auto" markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend10" transform="scale(-4.36,-4.36) "/>
+ </marker>
+ <g id="lend5">
+ <path d="M 2 1 L 0 0 L 1.98117 -0.993387 C 1.67173 -0.364515 1.67301 0.372641 1.98465 1.00043 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr5-171" class="st32" refX="-7.15" orient="auto" markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend5" transform="scale(-4.36,-4.36) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g>
+ <title>Page-1</title>
+ <g id="group14-1" transform="translate(45,-360)">
+ <title>Sheet.14</title>
+ <g id="shape3-2">
+ <title>Sheet.3</title>
+ <g id="shadow3-3" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st3"/>
+ </g>
+ <g id="shape4-7" transform="translate(18,0)">
+ <title>Sheet.4</title>
+ <g id="shadow4-8" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="117" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="117" height="81" class="st4"/>
+ </g>
+ </g>
+ <g id="group15-12" transform="translate(180,-360)">
+ <title>Sheet.15</title>
+ <g id="shape5-13">
+ <title>Sheet.5</title>
+ <g id="shadow5-14" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st5"/>
+ </g>
+ <g id="shape6-18" transform="translate(18,0)">
+ <title>Sheet.6</title>
+ <g id="shadow6-19" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="117" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="117" height="81" class="st6"/>
+ </g>
+ </g>
+ <g id="shape7-23" transform="translate(612,-360)">
+ <title>Sheet.7</title>
+ <g id="shadow7-24" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st5"/>
+ </g>
+ <g id="shape10-28" transform="translate(630,-360)">
+ <title>Sheet.10</title>
+ <g id="shadow10-29" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="51.75" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="51.75" height="81" class="st7"/>
+ </g>
+ <g id="shape12-33" transform="translate(681.75,-360)">
+ <title>Sheet.12</title>
+ <g id="shadow12-34" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st8"/>
+ </g>
+ <g id="shape13-38" transform="translate(699.75,-360)">
+ <title>Sheet.13</title>
+ <g id="shadow13-39" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="47.25" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="47.25" height="81" class="st6"/>
+ </g>
+ <g id="group29-43" transform="translate(315,-360)">
+ <title>Sheet.29</title>
+ <g id="shape23-44">
+ <title>Sheet.23</title>
+ <g id="shadow23-45" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st3"/>
+ </g>
+ <g id="shape24-49" transform="translate(18,0)">
+ <title>Sheet.24</title>
+ <g id="shadow24-50" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="36" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="36" height="81" class="st4"/>
+ </g>
+ </g>
+ <g id="group30-54" transform="translate(477,-360)">
+ <title>Sheet.30</title>
+ <g id="shape27-55">
+ <title>Sheet.27</title>
+ <g id="shadow27-56" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="18" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="18" height="81" class="st3"/>
+ </g>
+ <g id="shape28-60" transform="translate(18,0)">
+ <title>Sheet.28</title>
+ <g id="shadow28-61" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="117" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="117" height="81" class="st4"/>
+ </g>
+ </g>
+ <g id="shape31-65" transform="translate(369,-360)">
+ <title>Sheet.31</title>
+ <g id="shadow31-66" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="531" width="108" height="81" class="st2"/>
+ </g>
+ <rect x="0" y="531" width="108" height="81" class="st9"/>
+ </g>
+ <g id="shape32-72" transform="translate(184.5,-260)">
+ <title>Sheet.32</title>
+ <g id="shadow32-73" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st10"/>
+ </g>
+ <g id="shape39-77" transform="translate(252,-259)">
+ <title>Sheet.39</title>
+ <desc>Free element header</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Free element header</text> </g>
+ <g id="shape43-80" transform="translate(184.5,-232)">
+ <title>Sheet.43</title>
+ <g id="shadow43-81" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st13"/>
+ </g>
+ <g id="shape44-85" transform="translate(252,-231)">
+ <title>Sheet.44</title>
+ <desc>Used element header</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Used element header</text> </g>
+ <g id="shape46-88" transform="translate(409.5,-260)">
+ <title>Sheet.46</title>
+ <g id="shadow46-89" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st14"/>
+ </g>
+ <g id="shape47-93" transform="translate(477,-259)">
+ <title>Sheet.47</title>
+ <desc>Free space</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Free space</text> </g>
+ <g id="shape49-96" transform="translate(409.5,-232)">
+ <title>Sheet.49</title>
+ <g id="shadow49-97" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st15"/>
+ </g>
+ <g id="shape50-101" transform="translate(477,-231)">
+ <title>Sheet.50</title>
+ <desc>Allocated data</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Allocated data</text> </g>
+ <g id="shape52-104" transform="translate(184.5,-204)">
+ <title>Sheet.52</title>
+ <g id="shadow52-105" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st16"/>
+ </g>
+ <g id="shape53-109" transform="translate(252,-203)">
+ <title>Sheet.53</title>
+ <desc>Pad element header</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Pad element header</text> </g>
+ <g id="shape62-112" transform="translate(409.5,-204)">
+ <title>Sheet.62</title>
+ <g id="shadow62-113" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st17"/>
+ </g>
+ <g id="shape63-117" transform="translate(477,-203)">
+ <title>Sheet.63</title>
+ <desc>Padding</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Padding</text> </g>
+ <g id="shape65-120" transform="translate(184.5,-176)">
+ <title>Sheet.65</title>
+ <g id="shadow65-121" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <rect x="0" y="594" width="63" height="18" class="st2"/>
+ </g>
+ <rect x="0" y="594" width="63" height="18" class="st18"/>
+ </g>
+ <g id="shape66-126" transform="translate(252,-175)">
+ <title>Sheet.66</title>
+ <desc>Unavailable space</desc>
+ <rect x="0" y="592" width="135" height="20" class="st11"/>
+ <text x="4" y="605.6" class="st12">Unavailable space</text> </g>
+ <g id="shape97-129" transform="translate(612,-375.75)">
+ <title>Simple Double Arrow</title>
+ <desc>size</desc>
+ <path d="M0 612 L18 598.5 L18 605.25 L117 605.25 L117 598.5 L135 612 L117 625.5 L117 618.75 L18 618.75 L18 625.5 L0 612
+ Z" class="st19"/>
+ <text x="59.93" y="615" class="st20">size</text> </g>
+ <g id="shape99-132" transform="translate(630,-400.5)">
+ <title>Simple Double Arrow.99</title>
+ <desc>pad</desc>
+ <g id="shadow99-133" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 612 L12 600 L12 606 L57.75 606 L57.75 600 L69.75 612 L57.75 624 L57.75 618 L12 618 L12 624 L0 612 Z"
+ class="st21"/>
+ </g>
+ <path d="M0 612 L12 600 L12 606 L57.75 606 L57.75 600 L69.75 612 L57.75 624 L57.75 618 L12 618 L12 624 L0 612 Z"
+ class="st22"/>
+ <text x="27.23" y="615" class="st23">pad</text> </g>
+ <g id="shape113-138" transform="translate(54,-337.5)">
+ <title>Sheet.113</title>
+ <desc>prev/next</desc>
+ <path d="M134.64 591.56 L134.58 591.92 A72 22.5 0 0 1 63 612 A63 22.5 0 0 1 0.37 591.92 L0.31 591.57" class="st24"/>
+ <rect x="43.4968" y="593.55" width="48.0064" height="14.4001" class="st26"/>
+ <text x="43.5" y="604.35" class="st27">prev/next</text> </g>
+ <g id="shape115-149" transform="translate(324,-337.5)">
+ <title>Sheet.115</title>
+ <desc>prev/next</desc>
+ <path d="M0.44 591.55 L0.51 591.9 A90 22.5 -180 0 0 90 612 A72 22.5 -180 0 0 161.58 591.92 L161.64 591.56" class="st24"/>
+ <rect x="56.9968" y="593.55" width="48.0064" height="14.4001" class="st28"/>
+ <text x="57" y="604.35" class="st27">prev/next</text> </g>
+ <g id="shape118-158" transform="translate(315,-390.375)">
+ <title>Simple Double Arrow.118</title>
+ <desc>size</desc>
+ <g id="shadow118-159" transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 612 L12 600 L12 606 L42 606 L42 600 L54 612 L42 624 L42 618 L12 618 L12 624 L0 612 Z" class="st21"/>
+ </g>
+ <path d="M0 612 L12 600 L12 606 L42 606 L42 600 L54 612 L42 624 L42 618 L12 618 L12 624 L0 612 Z" class="st29"/>
+ <text x="19.43" y="615" class="st30">size</text> </g>
+ <g id="shape119-164" transform="translate(54,-441)">
+ <title>Sheet.119</title>
+ <desc>next free</desc>
+ <path d="M-0 612 A135 22.5 0 0 1 135 589.5 A134.606 21.534 0 0 1 266.35 606.33 L266.56 606.62" class="st31"/>
+ <rect x="112.807" y="593.55" width="43.9926" height="14.4001" class="st26"/>
+ <text x="112.81" y="604.35" class="st33">next free</text> </g>
+ <g id="shape120-174" transform="translate(323.739,-441.34)">
+ <title>Sheet.120</title>
+ <desc>next free</desc>
+ <path d="M0.24 612 A78.4445 18.5592 178.15 0 1 72.26 589.84 A81.2523 26.5101 179.07 0 1 159.23 607.01 L159.43 607.31"
+ class="st31"/>
+ <rect x="59.193" y="593.55" width="43.9926" height="14.4001" class="st28"/>
+ <text x="59.19" y="604.35" class="st33">next free</text> </g>
+ <g id="shape122-182" transform="translate(189,-337.5)">
+ <title>Sheet.122</title>
+ <desc>prev/next</desc>
+ <path d="M0.33 591.57 L0.39 591.92 A67.5 22.5 -180 0 0 67.5 612 A69.1875 22.5 -180 0 0 136.29 591.92 L136.35 591.56"
+ class="st24"/>
+ <rect x="44.3405" y="593.55" width="48.0064" height="14.4001" class="st26"/>
+ <text x="44.34" y="604.35" class="st27">prev/next</text> </g>
+ <g id="shape123-191" transform="translate(486.563,-337.5)">
+ <title>Sheet.123</title>
+ <desc>prev/next</desc>
+ <path d="M0.35 591.56 L0.41 591.92 A71.4375 22.5 -180 0 0 71.44 612 A63 22.5 -180 0 0 134.07 591.92 L134.12 591.57"
+ class="st24"/>
+ <rect x="43.2155" y="593.55" width="48.0064" height="14.4001" class="st26"/>
+ <text x="43.22" y="604.35" class="st27">prev/next</text> </g>
+ </g>
</svg>
diff --git a/doc/guides/prog_guide/img/stateful-op.svg b/doc/guides/prog_guide/img/stateful-op.svg
new file mode 100644
index 00000000..e6ef6353
--- /dev/null
+++ b/doc/guides/prog_guide/img/stateful-op.svg
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export stateful-ops.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="1.49139in" height="1.35359in"
+ viewBox="0 0 107.38 97.4587" xml:space="preserve" color-interpolation-filters="sRGB" class="st6">
+ <v:documentProperties v:langID="16393" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st6 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="8.50394"
+ v:shadowOffsetY="-8.50394"/>
+ <g id="shape38-1" v:mID="38" v:groupContext="shape" transform="translate(58.305,-28.025)">
+ <title>Circle</title>
+ <desc>stream</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="22.6772" cy="74.7815" width="39.69" height="34.0157"/>
+ <g id="shadow38-2" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 74.78 A22.6772 22.6772 0 0 1 45.35 74.78 A22.6772 22.6772 0 1 1 0 74.78 Z" class="st2"/>
+ </g>
+ <path d="M0 74.78 A22.6772 22.6772 0 0 1 45.35 74.78 A22.6772 22.6772 0 1 1 0 74.78 Z" class="st3"/>
+ <text x="8.43" y="77.78" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>stream</text> </g>
+ <g id="shape39-7" v:mID="39" v:groupContext="shape" transform="translate(3.0294,-73.3793)">
+ <title>Circle.39</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="11.3386" cy="86.1201" width="19.85" height="17.0079"/>
+ <g id="shadow39-8" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st2"/>
+ </g>
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st3"/>
+ <text x="6.07" y="89.12" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape40-13" v:mID="40" v:groupContext="shape" transform="translate(3.0294,-50.7021)">
+ <title>Circle.40</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="11.3386" cy="86.1201" width="19.85" height="17.0079"/>
+ <g id="shadow40-14" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st2"/>
+ </g>
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st3"/>
+ <text x="6.07" y="89.12" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape41-19" v:mID="41" v:groupContext="shape" transform="translate(3.0294,-28.025)">
+ <title>Circle.41</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="11.3386" cy="86.1201" width="19.85" height="17.0079"/>
+ <g id="shadow41-20" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st2"/>
+ </g>
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st3"/>
+ <text x="6.07" y="89.12" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape42-25" v:mID="42" v:groupContext="shape" transform="translate(3.0294,-5.34779)">
+ <title>Circle.249</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="11.3386" cy="86.1201" width="19.85" height="17.0079"/>
+ <g id="shadow42-26" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st2"/>
+ </g>
+ <path d="M0 86.12 A11.3386 11.3386 0 0 1 22.68 86.12 A11.3386 11.3386 0 1 1 0 86.12 Z" class="st3"/>
+ <text x="6.07" y="89.12" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape43-31" v:mID="43" v:groupContext="shape" transform="translate(66.3024,-75.8604) rotate(24.6166)">
+ <title>Sheet.43</title>
+ <path d="M0 97.46 L43.16 97.46" class="st5"/>
+ </g>
+ <g id="shape44-34" v:mID="44" v:groupContext="shape" transform="translate(37.2064,-61.3598) rotate(6.77654)">
+ <title>Sheet.44</title>
+ <path d="M0 97.46 L34.05 97.46" class="st5"/>
+ </g>
+ <g id="shape45-37" v:mID="45" v:groupContext="shape" transform="translate(-6.31062,-33.9543) rotate(-19.179)">
+ <title>Sheet.45</title>
+ <path d="M0 97.46 L34.51 97.46" class="st5"/>
+ </g>
+ <g id="shape46-40" v:mID="46" v:groupContext="shape" transform="translate(-14.8893,-7.82888) rotate(-24.6166)">
+ <title>Sheet.46</title>
+ <path d="M0 97.46 L43.16 97.46" class="st5"/>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/stateless-op-shared.svg b/doc/guides/prog_guide/img/stateless-op-shared.svg
new file mode 100644
index 00000000..257a69a5
--- /dev/null
+++ b/doc/guides/prog_guide/img/stateless-op-shared.svg
@@ -0,0 +1,124 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export Drawing5.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="1.89687in" height="1.60662in"
+ viewBox="0 0 136.575 115.676" xml:space="preserve" color-interpolation-filters="sRGB" class="st7">
+ <v:documentProperties v:langID="16393" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:none}
+ .st6 {stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:0.999999}
+ .st7 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="8.50394"
+ v:shadowOffsetY="-8.50394"/>
+ <g id="group47-1" transform="translate(3.02997,-5.34779)" v:mID="47" v:groupContext="group">
+ <title>Sheet.47</title>
+ <g id="shape36-2" v:mID="36" v:groupContext="shape" transform="translate(66.2255,-27.0553)">
+ <title>Circle</title>
+ <desc>priv_xform</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(3.99999,3.99999,3.99999,3.99999)" v:tabSpace="42.5196"/>
+ <v:textRect cx="31.7998" cy="88.2699" width="55.66" height="40.7542"/>
+ <g id="shadow36-3" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 88.27 A31.7998 27.1694 0 1 1 63.6 88.27 A31.7998 27.1694 0 1 1 0 88.27 Z" class="st2"/>
+ </g>
+ <path d="M0 88.27 A31.7998 27.1694 0 1 1 63.6 88.27 A31.7998 27.1694 0 1 1 0 88.27 Z" class="st3"/>
+ <text x="9.47" y="91.27" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>priv_xform</text> </g>
+ <g id="shape39-8" v:mID="39" v:groupContext="shape" transform="translate(-5.9952E-015,-81.5083)">
+ <title>Circle.40</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(3.99999,3.99999,3.99999,3.99999)" v:tabSpace="42.5196"/>
+ <v:textRect cx="13.5848" cy="101.968" width="23.78" height="20.3771"/>
+ <g id="shadow39-9" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st2"/>
+ </g>
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st3"/>
+ <text x="8.32" y="104.97" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape40-14" v:mID="40" v:groupContext="shape" transform="translate(-5.9952E-015,-54.3389)">
+ <title>Circle.41</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(3.99999,3.99999,3.99999,3.99999)" v:tabSpace="42.5196"/>
+ <v:textRect cx="13.5848" cy="101.968" width="23.78" height="20.3771"/>
+ <g id="shadow40-15" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st2"/>
+ </g>
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st3"/>
+ <text x="8.32" y="104.97" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape41-20" v:mID="41" v:groupContext="shape" transform="translate(-5.9952E-015,-27.1694)">
+ <title>Circle.42</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(3.99999,3.99999,3.99999,3.99999)" v:tabSpace="42.5196"/>
+ <v:textRect cx="13.5848" cy="101.968" width="23.78" height="20.3771"/>
+ <g id="shadow41-21" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st2"/>
+ </g>
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st3"/>
+ <text x="8.32" y="104.97" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape42-26" v:mID="42" v:groupContext="shape">
+ <title>Circle.249</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(3.99999,3.99999,3.99999,3.99999)" v:tabSpace="42.5196"/>
+ <v:textRect cx="13.5848" cy="101.968" width="23.78" height="20.3771"/>
+ <g id="shadow42-27" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st2"/>
+ </g>
+ <path d="M0 101.97 A13.5848 13.5848 0 1 1 27.17 101.97 A13.5848 13.5848 0 1 1 0 101.97 Z" class="st3"/>
+ <text x="8.32" y="104.97" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape43-32" v:mID="43" v:groupContext="shape" transform="translate(75.3544,-84.7046) rotate(24.6166)">
+ <title>Sheet.43</title>
+ <path d="M0 115.68 L51.71 115.68 L0 115.68 Z" class="st5"/>
+ <path d="M0 115.68 L51.71 115.68" class="st6"/>
+ </g>
+ <g id="shape44-35" v:mID="44" v:groupContext="shape" transform="translate(40.8189,-67.2403) rotate(6.77654)">
+ <title>Sheet.44</title>
+ <path d="M0 115.68 L40.8 115.68 L0 115.68 Z" class="st5"/>
+ <path d="M0 115.68 L40.8 115.68" class="st6"/>
+ </g>
+ <g id="shape45-38" v:mID="45" v:groupContext="shape" transform="translate(-10.8336,-34.4585) rotate(-19.179)">
+ <title>Sheet.45</title>
+ <path d="M0 115.68 L41.35 115.68 L0 115.68 Z" class="st5"/>
+ <path d="M0 115.68 L41.35 115.68" class="st6"/>
+ </g>
+ <g id="shape46-41" v:mID="46" v:groupContext="shape" transform="translate(-21.0159,-3.19618) rotate(-24.6166)">
+ <title>Sheet.46</title>
+ <path d="M0 115.68 L51.71 115.68 L0 115.68 Z" class="st5"/>
+ <path d="M0 115.68 L51.71 115.68" class="st6"/>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/stateless-op.svg b/doc/guides/prog_guide/img/stateless-op.svg
new file mode 100644
index 00000000..fd951b7a
--- /dev/null
+++ b/doc/guides/prog_guide/img/stateless-op.svg
@@ -0,0 +1,140 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<!-- Generated by Microsoft Visio, SVG Export stateless-ops.svg Page-1 -->
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:ev="http://www.w3.org/2001/xml-events"
+ xmlns:v="http://schemas.microsoft.com/visio/2003/SVGExtensions/" width="2.24024in" height="2.70592in"
+ viewBox="0 0 161.298 194.826" xml:space="preserve" color-interpolation-filters="sRGB" class="st8">
+ <v:documentProperties v:langID="16393" v:metric="true" v:viewMarkup="false"/>
+
+ <style type="text/css">
+ <![CDATA[
+ .st1 {visibility:visible}
+ .st2 {fill:#5b9bd5;fill-opacity:0.22;filter:url(#filter_2);stroke:#5b9bd5;stroke-opacity:0.22}
+ .st3 {fill:#5b9bd5;stroke:#c7c8c8;stroke-width:0.25}
+ .st4 {fill:#feffff;font-family:Calibri;font-size:0.833336em}
+ .st5 {fill:#feffff;font-family:Calibri;font-size:0.75em}
+ .st6 {marker-start:url(#mrkr13-19);stroke:#5b9bd5;stroke-linecap:round;stroke-linejoin:round;stroke-width:1}
+ .st7 {fill:#5b9bd5;fill-opacity:1;stroke:#5b9bd5;stroke-opacity:1;stroke-width:0.28409094308259}
+ .st8 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3}
+ ]]>
+ </style>
+
+ <defs id="Markers">
+ <g id="lend13">
+ <path d="M 3 1 L 0 0 L 3 -1 L 3 1 " style="stroke:none"/>
+ </g>
+ <marker id="mrkr13-19" class="st7" v:arrowType="13" v:arrowSize="2" v:setback="0" refX="0" orient="auto"
+ markerUnits="strokeWidth" overflow="visible">
+ <use xlink:href="#lend13" transform="scale(3.5199995788296) "/>
+ </marker>
+ </defs>
+ <defs id="Filters">
+ <filter id="filter_2">
+ <feGaussianBlur stdDeviation="2"/>
+ </filter>
+ </defs>
+ <g v:mID="0" v:index="1" v:groupContext="foregroundPage">
+ <title>Page-1</title>
+ <v:pageProperties v:drawingScale="0.0393701" v:pageScale="0.0393701" v:drawingUnits="24" v:shadowOffsetX="9"
+ v:shadowOffsetY="-9"/>
+ <g id="group61-1" transform="translate(3.02943,-5.34782)" v:mID="61" v:groupContext="group">
+ <title>Sheet.61</title>
+ <g id="shape52-2" v:mID="52" v:groupContext="shape" transform="translate(97.856,-133.39)">
+ <title>Circle.40</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="167.479" width="49.62" height="41.4408"/>
+ <g id="shadow52-3" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 167.48 A28.3465 27.6272 180 0 1 56.69 167.48 A28.3465 27.6272 180 0 1 -0 167.48 Z" class="st2"/>
+ </g>
+ <path d="M-0 167.48 A28.3465 27.6272 180 0 1 56.69 167.48 A28.3465 27.6272 180 0 1 -0 167.48 Z" class="st3"/>
+ <text x="23.08" y="170.48" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape53-8" v:mID="53" v:groupContext="shape" transform="translate(-3.9968E-015,-133.39)">
+ <title>Circle.299</title>
+ <desc>priv_xform</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="167.479" width="49.62" height="41.4408"/>
+ <g id="shadow53-9" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M-0 167.48 A28.3465 27.6272 180 0 1 56.69 167.48 A28.3465 27.6272 180 0 1 -0 167.48 Z" class="st2"/>
+ </g>
+ <path d="M-0 167.48 A28.3465 27.6272 180 0 1 56.69 167.48 A28.3465 27.6272 180 0 1 -0 167.48 Z" class="st3"/>
+ <text x="8.25" y="170.18" class="st5" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>priv_xform</text> </g>
+ <g id="shape54-14" v:mID="54" v:groupContext="shape" transform="translate(56.693,-160.74)">
+ <title>Sheet.54</title>
+ <path d="M0 194.83 L10.2 194.83 L10.56 194.83 L41.16 194.83" class="st6"/>
+ </g>
+ <g id="shape55-20" v:mID="55" v:groupContext="shape" transform="translate(97.856,-65.1969)">
+ <title>Circle.479</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="166.185" width="49.62" height="42.5197"/>
+ <g id="shadow55-21" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 166.19 A28.3465 28.3465 0 1 1 56.69 166.19 A28.3465 28.3465 0 1 1 0 166.19 Z" class="st2"/>
+ </g>
+ <path d="M0 166.19 A28.3465 28.3465 0 1 1 56.69 166.19 A28.3465 28.3465 0 1 1 0 166.19 Z" class="st3"/>
+ <text x="23.08" y="169.19" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape56-26" v:mID="56" v:groupContext="shape" transform="translate(-3.9968E-015,-65.7801)">
+ <title>Circle.480</title>
+ <desc>priv_xform</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="166.768" width="49.62" height="42.5197"/>
+ <g id="shadow56-27" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 166.77 A28.3465 28.3465 0 0 1 56.69 166.77 A28.3465 28.3465 0 0 1 0 166.77 Z" class="st2"/>
+ </g>
+ <path d="M0 166.77 A28.3465 28.3465 0 0 1 56.69 166.77 A28.3465 28.3465 0 0 1 0 166.77 Z" class="st3"/>
+ <text x="8.25" y="169.47" class="st5" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>priv_xform</text> </g>
+ <g id="shape57-32" v:mID="57" v:groupContext="shape" transform="translate(56.693,-93.8414)">
+ <title>Sheet.57</title>
+ <path d="M0 194.83 L10.2 194.83 L10.56 194.83 L41.16 194.83" class="st6"/>
+ </g>
+ <g id="shape58-37" v:mID="58" v:groupContext="shape" transform="translate(97.856,0)">
+ <title>Circle.482</title>
+ <desc>op</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="166.185" width="49.62" height="42.5197"/>
+ <g id="shadow58-38" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 166.19 A28.3465 28.3465 0 1 1 56.69 166.19 A28.3465 28.3465 0 1 1 0 166.19 Z" class="st2"/>
+ </g>
+ <path d="M0 166.19 A28.3465 28.3465 0 1 1 56.69 166.19 A28.3465 28.3465 0 1 1 0 166.19 Z" class="st3"/>
+ <text x="23.08" y="169.19" class="st4" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>op</text> </g>
+ <g id="shape59-43" v:mID="59" v:groupContext="shape" transform="translate(-3.9968E-015,-0.583223)">
+ <title>Circle.483</title>
+ <desc>priv_xform</desc>
+ <v:userDefs>
+ <v:ud v:nameU="visVersion" v:val="VT0(15):26"/>
+ </v:userDefs>
+ <v:textBlock v:margins="rect(4,4,4,4)" v:tabSpace="42.5197"/>
+ <v:textRect cx="28.3463" cy="166.768" width="49.62" height="42.5197"/>
+ <g id="shadow59-44" v:groupContext="shadow" v:shadowOffsetX="0.345598" v:shadowOffsetY="-1.97279" v:shadowType="1"
+ transform="matrix(1,0,0,1,0.345598,1.97279)" class="st1">
+ <path d="M0 166.77 A28.3465 28.3465 0 0 1 56.69 166.77 A28.3465 28.3465 0 0 1 0 166.77 Z" class="st2"/>
+ </g>
+ <path d="M0 166.77 A28.3465 28.3465 0 0 1 56.69 166.77 A28.3465 28.3465 0 0 1 0 166.77 Z" class="st3"/>
+ <text x="8.25" y="169.47" class="st5" v:langID="16393"><v:paragraph v:horizAlign="1"/><v:tabList/>priv_xform</text> </g>
+ <g id="shape60-49" v:mID="60" v:groupContext="shape" transform="translate(56.693,-28.6446)">
+ <title>Sheet.60</title>
+ <path d="M0 194.83 L10.2 194.83 L10.56 194.83 L41.16 194.83" class="st6"/>
+ </g>
+ </g>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/turbo_tb_decode.svg b/doc/guides/prog_guide/img/turbo_tb_decode.svg
new file mode 100644
index 00000000..a259f458
--- /dev/null
+++ b/doc/guides/prog_guide/img/turbo_tb_decode.svg
@@ -0,0 +1,1471 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2018 Intel Corporation -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="194.21973mm"
+ height="163.25349mm"
+ viewBox="0 0 194.21973 163.25349"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.3 (2405546, 2018-03-11)"
+ sodipodi:docname="turbo_tb_decode.svg">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path6507"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5140"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5122"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5134"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5116"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Sstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5128"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.2,0,0,0.2,1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ id="path5143"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5131"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5119"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <filter
+ id="filter_2">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur4" />
+ </filter>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-9"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-11"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-2"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-6"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-1"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472-4" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.98994949"
+ inkscape:cx="148.9027"
+ inkscape:cy="256.96386"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:snap-text-baseline="true"
+ inkscape:window-width="1920"
+ inkscape:window-height="1137"
+ inkscape:window-x="1072"
+ inkscape:window-y="185"
+ inkscape:window-maximized="1"
+ fit-margin-top="0"
+ fit-margin-left="0"
+ fit-margin-right="0"
+ fit-margin-bottom="0" />
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title></dc:title>
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ style="display:inline"
+ transform="translate(-9.7553377,-54.351435)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="41.159508"
+ y="61.13464"
+ id="text873"><tspan
+ sodipodi:role="line"
+ id="tspan871"
+ x="41.159508"
+ y="64.996841"
+ style="font-size:4.23333311px;stroke-width:0.26458332" /></text>
+ <g
+ id="g4997">
+ <a
+ id="a990">
+ <rect
+ style="opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.1950596;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect854"
+ width="44.771584"
+ height="14.03559"
+ x="18.573261"
+ y="54.450935" />
+ </a>
+ <text
+ id="text877"
+ y="62.738258"
+ x="21.648832"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:3.52777767px;stroke-width:0.26458332"
+ y="62.738258"
+ x="21.648832"
+ id="tspan875"
+ sodipodi:role="line">w<tspan
+ id="tspan885"
+ style="font-size:64.99999762%;baseline-shift:sub">k</tspan> LLR circular buffer</tspan></text>
+ </g>
+ <rect
+ style="opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.1981452;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967"
+ width="25.196077"
+ height="14.03559"
+ x="63.344845"
+ y="54.450935" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="73.52343"
+ y="62.445942"
+ id="text877-8"><tspan
+ sodipodi:role="line"
+ id="tspan875-6"
+ x="73.52343"
+ y="62.445942"
+ style="font-size:3.52777767px;stroke-width:0.26458332">...</tspan></text>
+ <rect
+ style="opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.1950596;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect854-5"
+ width="44.771584"
+ height="14.03559"
+ x="88.540924"
+ y="54.450935" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="91.90699"
+ y="62.531521"
+ id="text877-4"><tspan
+ sodipodi:role="line"
+ id="tspan875-0"
+ x="91.90699"
+ y="62.531521"
+ style="font-size:3.52777767px;stroke-width:0.26458332">w<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan885-9">k</tspan> LLR circular buffer</tspan></text>
+ <rect
+ style="opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.199;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735"
+ width="6.6797671"
+ height="14.033618"
+ x="11.893495"
+ y="54.450935" />
+ <g
+ id="g4807"
+ transform="translate(0,0.188984)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <g
+ id="g5063">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot4811"
+ style="font-style:normal;font-weight:normal;font-size:13.33333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ transform="scale(0.26458333)"><flowRegion
+ id="flowRegion4813"><rect
+ id="rect4815"
+ width="41.785713"
+ height="14.642858"
+ x="39.285713"
+ y="287.16254" /></flowRegion><flowPara
+ id="flowPara4817">offse</flowPara></flowRoot> <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="74.16684"
+ y="75.043541"
+ id="text4821-3"
+ transform="scale(0.95903923,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0"
+ x="74.16684"
+ y="75.043541"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="11.603812"
+ y="75.449066"
+ id="text4821-3-9"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0"
+ x="11.603812"
+ y="75.449066"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="150.53461"
+ y="58.039307"
+ id="text4821-3-6"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-1"
+ x="150.53461"
+ y="58.039307"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144">The encoded TB is given as a</tspan><tspan
+ sodipodi:role="line"
+ x="150.53461"
+ y="61.97533"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan4877">contiguous buffer</tspan></text>
+ <path
+ style="fill:none;stroke:#7f7f7f;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1"
+ d="M 10.423511,83.31801 H 72.162772"
+ id="path4885"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1"
+ d="M 82.319012,83.31801 176.93243,83.184377"
+ id="path4887"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:5.39796209px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#818181;fill-opacity:1;stroke:none;stroke-width:0.40484715"
+ x="77.368088"
+ y="81.855705"
+ id="text4891"
+ transform="scale(0.96100989,1.040572)"><tspan
+ sodipodi:role="line"
+ id="tspan4889"
+ x="77.368088"
+ y="81.855705"
+ style="fill:#818181;fill-opacity:1;stroke-width:0.40484715">or</tspan></text>
+ <path
+ style="fill:none;stroke:#7f7f7f;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.11999992, 0.26499999;stroke-dashoffset:0;stroke-opacity:1"
+ d="M 9.7553377,181.2723 H 71.494599"
+ id="path4885-9"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.11999992, 0.26499999;stroke-dashoffset:0;stroke-opacity:1"
+ d="m 81.786039,181.24224 94.613421,-0.13363"
+ id="path4887-9"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:5.39796209px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#818181;fill-opacity:1;stroke:none;stroke-width:0.40484715"
+ x="76.813484"
+ y="175.96187"
+ id="text4891-9"
+ transform="scale(0.9610099,1.040572)"><tspan
+ sodipodi:role="line"
+ id="tspan4889-7"
+ x="76.813484"
+ y="175.96187"
+ style="fill:#818181;fill-opacity:1;stroke-width:0.40484715">or</tspan></text>
+ <rect
+ style="opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.199;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7"
+ width="6.6797671"
+ height="14.033618"
+ x="10.825401"
+ y="101.15727" />
+ <g
+ id="g4807-3"
+ transform="translate(-0.98393573,46.759016)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-2"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-81"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-3"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="10.57785"
+ y="120.11156"
+ id="text4821-3-9-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-3"
+ x="10.57785"
+ y="120.11156"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <g
+ transform="matrix(0.99106501,0,0,1.0000618,-0.90257595,46.700562)"
+ id="g4997-1">
+ <a
+ id="a990-9">
+ <rect
+ style="opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.1950596;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect854-4"
+ width="44.771584"
+ height="14.03559"
+ x="18.573261"
+ y="54.450935" />
+ </a>
+ <text
+ id="text877-1"
+ y="62.738258"
+ x="21.648832"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:3.52777767px;stroke-width:0.26458332"
+ y="62.738258"
+ x="21.648832"
+ id="tspan875-9"
+ sodipodi:role="line">w<tspan
+ id="tspan885-5"
+ style="font-size:64.99999762%;baseline-shift:sub">k</tspan> LLR circular buffer</tspan></text>
+ </g>
+ <g
+ transform="matrix(0.98808659,0,0,1.0000825,72.450284,46.566642)"
+ id="g4997-1-4">
+ <a
+ id="a990-9-5">
+ <rect
+ style="opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.1950596;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect854-4-6"
+ width="44.771584"
+ height="14.03559"
+ x="18.573261"
+ y="54.450935" />
+ </a>
+ <text
+ id="text877-1-0"
+ y="62.738258"
+ x="21.648832"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:3.52777767px;stroke-width:0.26458332"
+ y="62.738258"
+ x="21.648832"
+ id="tspan875-9-8"
+ sodipodi:role="line">w<tspan
+ id="tspan885-5-7"
+ style="font-size:64.99999762%;baseline-shift:sub">k</tspan> LLR circular buffer</tspan></text>
+ </g>
+ <g
+ transform="matrix(1.0292712,0,0,0.99978365,-1.5276486,46.585803)"
+ id="g5063-5">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-0"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-2"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-4"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-8"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="74.729973"
+ y="119.73718"
+ id="text4821-3-3"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7"
+ x="74.729973"
+ y="119.73718"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <path
+ style="fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 61.876283,101.15486 9.985811,-0.03 v 4.04245 l -2.53906,1.46999 5.57925,2.87314 -3.006781,1.60362 v 4.04245 l -10.01922,0.0348 z"
+ id="path5575"
+ inkscape:connector-curvature="0" />
+ <path
+ style="fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 90.802274,101.02207 -9.953242,0.003 -0.03341,3.94222 -2.639287,1.63703 5.679477,2.87314 -2.939965,1.57021 v 4.00905 l 9.886426,0.003 z"
+ id="path5581"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="64.84626"
+ y="109.37679"
+ id="text5879"><tspan
+ sodipodi:role="line"
+ id="tspan5877"
+ x="64.84626"
+ y="109.37679"
+ style="stroke-width:0.26458332">..</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="85.559654"
+ y="109.51042"
+ id="text5883"><tspan
+ sodipodi:role="line"
+ id="tspan5881"
+ x="85.559654"
+ y="109.51042"
+ style="stroke-width:0.26458332">..</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Mend)"
+ d="m 66.783973,101.05803 c 0,0 5.144937,-13.096212 18.942727,-0.33409"
+ id="path5899"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.06663418px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.22999756"
+ x="155.38319"
+ y="97.845207"
+ id="text4821-3-6-2"
+ transform="scale(0.93400804,1.0706546)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-1-0"
+ x="155.38319"
+ y="97.845207"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756">The encoded TB is given as a</tspan><tspan
+ sodipodi:role="line"
+ x="155.38319"
+ y="101.6785"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan4877-1">&quot;scattered&quot; buffer through a</tspan><tspan
+ sodipodi:role="line"
+ x="155.38319"
+ y="105.5118"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan5992">chained mbuf</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.06663418px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.22999756"
+ x="147.15628"
+ y="145.52435"
+ id="text4821-3-6-2-4"
+ transform="scale(0.93400803,1.0706546)"><tspan
+ sodipodi:role="line"
+ x="147.15628"
+ y="145.52435"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan5992-2">Result is decoded back into the given output</tspan><tspan
+ sodipodi:role="line"
+ x="147.15628"
+ y="149.35765"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6023">mbuf as one contiguous buffer with no </tspan><tspan
+ sodipodi:role="line"
+ x="147.15628"
+ y="153.19093"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6025">CRC24B retaining</tspan></text>
+ <g
+ id="g6253">
+ <g
+ transform="translate(10.356694,1.2027129)"
+ id="g6079">
+ <path
+ style="fill:#fec000;fill-opacity:1;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 64.144685,140.61392 -0.03341,-11.65963 h 4.209497 l -0.03341,11.65963 1.971111,-0.0334 -4.042449,3.90882 -4.04245,-3.90882 z"
+ id="path6066"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-142.49652"
+ y="67.226768"
+ id="text6074"
+ transform="rotate(-90)"><tspan
+ sodipodi:role="line"
+ id="tspan6072"
+ x="-142.49652"
+ y="67.226768"
+ style="font-size:2.82222223px;stroke-width:0.26458332">decode</tspan></text>
+ </g>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.06663418px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.22999756"
+ x="146.80908"
+ y="180.29805"
+ id="text4821-3-6-2-4-6"
+ transform="scale(0.93400803,1.0706546)"><tspan
+ sodipodi:role="line"
+ x="146.80908"
+ y="180.29805"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6025-0">Result is decoded back into the given output</tspan><tspan
+ sodipodi:role="line"
+ x="146.80908"
+ y="184.13135"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6110">mbuf as one contiguous buffer with CRC24B</tspan><tspan
+ sodipodi:role="line"
+ x="146.80908"
+ y="187.96463"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6112">retained in place when</tspan><tspan
+ sodipodi:role="line"
+ x="146.80908"
+ y="191.79793"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6114">RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP</tspan><tspan
+ sodipodi:role="line"
+ x="146.80908"
+ y="195.63123"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.22999756"
+ id="tspan6116">is set in op_flags</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="25.9951"
+ y="169.83803"
+ id="text4821-3-9-7-1"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-3-0"
+ x="25.9951"
+ y="169.83803"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="75.794968"
+ y="168.951"
+ id="text4821-3-3-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8"
+ x="75.794968"
+ y="168.951"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart);marker-end:url(#marker6509)"
+ d="m 32.606882,149.73449 c 19.777945,-0.0668 19.777945,-0.0668 19.777945,-0.0668"
+ id="path6255"
+ inkscape:connector-curvature="0" />
+ <g
+ style="display:inline"
+ id="g4807-5"
+ transform="translate(13.985119,97.480562)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-6"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-88"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-9"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <g
+ style="display:inline"
+ transform="matrix(0.78752716,0,0,1.0016782,17.928141,97.168708)"
+ id="g5063-5-2">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-0-0"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-2-1"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-4-5"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-8-8"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.199;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3"
+ width="6.6797671"
+ height="14.033618"
+ x="25.781357"
+ y="152.21487" />
+ <g
+ id="g8093">
+ <rect
+ y="152.21487"
+ x="32.461124"
+ height="14.0336"
+ width="16.248745"
+ id="rect6210"
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6214"
+ y="158.15347"
+ x="40.321487"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="158.15347"
+ x="40.321487"
+ id="tspan6212"
+ sodipodi:role="line">CB<tspan
+ id="tspan6218"
+ style="font-size:64.99999762%;text-align:center;baseline-shift:sub;text-anchor:middle">1</tspan></tspan><tspan
+ id="tspan6216"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="162.5632"
+ x="40.321487"
+ sodipodi:role="line">hard</tspan></text>
+ </g>
+ <g
+ id="g8100">
+ <rect
+ y="152.21487"
+ x="48.709869"
+ height="14.0336"
+ width="16.248745"
+ id="rect6210-1"
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6214-7"
+ y="158.15347"
+ x="56.570232"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="158.15347"
+ x="56.570232"
+ id="tspan6212-1"
+ sodipodi:role="line">CB<tspan
+ id="tspan6247"
+ style="font-size:64.99999762%;baseline-shift:sub">2</tspan></tspan><tspan
+ id="tspan6216-4"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="162.5632"
+ x="56.570232"
+ sodipodi:role="line">hard</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="39.47636"
+ y="140.81966"
+ id="text4821-3-3-0-5"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3"
+ x="39.47636"
+ y="140.81966"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">k_neg</tspan></text>
+ <g
+ id="g8252">
+ <rect
+ y="152.21487"
+ x="64.958618"
+ height="14.033598"
+ width="24.977577"
+ id="rect6693"
+ style="opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26701048;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6697"
+ y="160.55891"
+ x="75.637276"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="stroke-width:0.26458332"
+ y="160.55891"
+ x="75.637276"
+ id="tspan6695"
+ sodipodi:role="line">...</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8"
+ width="16.248745"
+ height="14.0336"
+ x="89.936195"
+ y="152.21487" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="98.231049"
+ y="158.29669"
+ id="text6214-78"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2"
+ x="98.231049"
+ y="158.29669"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3">c-1</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="98.231049"
+ y="162.70642"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2">hard</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.23060164;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8-3"
+ width="12.2741"
+ height="14.067998"
+ x="106.16774"
+ y="152.19768" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="112.17137"
+ y="158.42459"
+ id="text6214-78-6"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2-9"
+ x="112.17137"
+ y="158.42459"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3-7">c</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="112.17137"
+ y="162.83432"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2-9">hard</tspan></text>
+ <g
+ id="g6838"
+ transform="translate(-4.1092682)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779"
+ sodipodi:role="line">CRC24A</tspan></text>
+ </g>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0);marker-end:url(#marker6509-1)"
+ d="m 105.83873,149.28245 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-2"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="115.83596"
+ y="140.38614"
+ id="text4821-3-3-0-5-1"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7"
+ x="115.83596"
+ y="140.38614"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">k_pos</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.199;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3-8"
+ width="6.6797671"
+ height="14.033618"
+ x="17.908045"
+ y="192.83072" />
+ <g
+ style="display:inline"
+ id="g4807-5-7"
+ transform="translate(6.0254188,138.42182)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-6-7"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-88-6"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-9-4"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="17.775927"
+ y="208.62222"
+ id="text4821-3-9-7-1-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-3-0-0"
+ x="17.775927"
+ y="208.62222"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-4);marker-end:url(#marker6509-11)"
+ d="m 24.97933,190.59164 c 19.777946,-0.0668 19.777946,-0.0668 19.777946,-0.0668"
+ id="path6255-0"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="31.523031"
+ y="180.00327"
+ id="text4821-3-3-0-5-5"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-9"
+ x="31.523031"
+ y="180.00327"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">k_neg</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="75.525093"
+ y="208.04131"
+ id="text4821-3-3-0-9"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-8"
+ x="75.525093"
+ y="208.04131"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <g
+ style="display:inline"
+ transform="matrix(0.91917288,0,0,1.0006169,7.5499955,138.19202)"
+ id="g5063-5-2-9">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-0-0-9"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-2-1-3"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-4-5-3"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-8-8-4"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0-1);marker-end:url(#marker6509-1-3)"
+ d="m 109.83498,190.21366 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-2-4"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="120.00289"
+ y="179.64078"
+ id="text4821-3-3-0-5-1-5"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7-6"
+ x="120.00289"
+ y="179.64078"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">k_pos</tspan></text>
+ <g
+ transform="translate(-7.873312,40.61586)"
+ style="display:inline"
+ id="g8093-1">
+ <rect
+ y="152.21487"
+ x="32.461124"
+ height="14.0336"
+ width="16.248745"
+ id="rect6210-9"
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6214-9"
+ y="158.15347"
+ x="40.321487"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="158.15347"
+ x="40.321487"
+ id="tspan6212-7"
+ sodipodi:role="line">CB<tspan
+ id="tspan6218-8"
+ style="font-size:64.99999762%;text-align:center;baseline-shift:sub;text-anchor:middle">1</tspan></tspan><tspan
+ id="tspan6216-0"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="162.5632"
+ x="40.321487"
+ sodipodi:role="line">hard</tspan></text>
+ </g>
+ <g
+ style="display:inline"
+ id="g6838-7"
+ transform="translate(-81.714552,40.598663)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777-5"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779-7"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <g
+ transform="translate(-3.8722974,40.608749)"
+ style="display:inline"
+ id="g8100-3">
+ <rect
+ y="152.21487"
+ x="48.709869"
+ height="14.0336"
+ width="16.248745"
+ id="rect6210-1-8"
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6214-7-4"
+ y="158.15347"
+ x="56.570232"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="158.15347"
+ x="56.570232"
+ id="tspan6212-1-0"
+ sodipodi:role="line">CB<tspan
+ id="tspan6247-0"
+ style="font-size:64.99999762%;baseline-shift:sub">2</tspan></tspan><tspan
+ id="tspan6216-4-1"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ y="162.5632"
+ x="56.570232"
+ sodipodi:role="line">hard</tspan></text>
+ </g>
+ <g
+ style="display:inline"
+ id="g6838-7-2"
+ transform="translate(-61.464789,40.591552)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777-5-2"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779-7-1"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <g
+ transform="translate(0.12871686,40.608749)"
+ style="display:inline"
+ id="g8252-9">
+ <rect
+ y="152.21487"
+ x="64.958618"
+ height="14.033598"
+ width="24.977577"
+ id="rect6693-0"
+ style="opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26701048;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ id="text6697-7"
+ y="160.55891"
+ x="75.637276"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="stroke-width:0.26458332"
+ y="160.55891"
+ x="75.637276"
+ id="tspan6695-1"
+ sodipodi:role="line">...</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8-5"
+ width="16.248745"
+ height="14.0336"
+ x="90.064911"
+ y="192.82362" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="98.359756"
+ y="198.90544"
+ id="text6214-78-69"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2-98"
+ x="98.359756"
+ y="198.90544"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3-1">c-1</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="98.359756"
+ y="203.31517"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2-8">hard</tspan></text>
+ <g
+ style="display:inline"
+ id="g6838-7-2-9"
+ transform="translate(-16.23745,40.591553)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777-5-2-0"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-2"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779-7-1-9"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.23060165;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8-3-2"
+ width="12.2741"
+ height="14.067998"
+ x="110.31467"
+ y="192.81651" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="116.3183"
+ y="199.04343"
+ id="text6214-78-6-8"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2-9-7"
+ x="116.3183"
+ y="199.04343"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3-7-9">c</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="116.3183"
+ y="203.45316"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2-9-1">hard</tspan></text>
+ <g
+ style="display:inline"
+ id="g6838-8"
+ transform="translate(0.03765869,40.591553)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777-6"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-15"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779-6"
+ sodipodi:role="line">CRC24A</tspan></text>
+ </g>
+ <g
+ style="display:inline"
+ id="g6838-7-2-7"
+ transform="translate(4.0386734,40.591553)">
+ <rect
+ y="152.22496"
+ x="122.55111"
+ height="14.040706"
+ width="4.0010114"
+ id="rect6777-5-2-04"
+ style="opacity:1;fill:#375623;fill-opacity:1;stroke:#000000;stroke-width:0.26499999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5"
+ y="125.5383"
+ x="-164.82439"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="125.5383"
+ x="-164.82439"
+ id="tspan6779-7-1-0"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="27.138876"
+ y="85.186432"
+ id="text4821-3-9-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-1"
+ x="27.138876"
+ y="85.186432"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">mbuf seg 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="112.34333"
+ y="85.141403"
+ id="text4821-3-9-0-8"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-1-0"
+ x="112.34333"
+ y="85.141403"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">mbuf seg 2</tspan></text>
+ <path
+ style="fill:none;stroke:#000000;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8474)"
+ d="m 34.076864,90.734741 5.946746,5.746295"
+ id="path8464"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:none;stroke:#000000;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8474-2)"
+ d="m 115.35315,91.80841 -5.7463,5.946752"
+ id="path8464-8"
+ inkscape:connector-curvature="0" />
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/img/turbo_tb_encode.svg b/doc/guides/prog_guide/img/turbo_tb_encode.svg
new file mode 100644
index 00000000..e3708a93
--- /dev/null
+++ b/doc/guides/prog_guide/img/turbo_tb_encode.svg
@@ -0,0 +1,1948 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<!-- SPDX-License-Identifier: BSD-3-Clause -->
+<!-- Copyright(c) 2018 Intel Corporation -->
+
+<svg
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:cc="http://creativecommons.org/ns#"
+ xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:svg="http://www.w3.org/2000/svg"
+ xmlns="http://www.w3.org/2000/svg"
+ xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+ xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+ width="189.87321mm"
+ height="166.91023mm"
+ viewBox="0 0 189.87321 166.91023"
+ version="1.1"
+ id="svg8"
+ inkscape:version="0.92.3 (2405546, 2018-03-11)"
+ sodipodi:docname="turbo_tb_encode.svg">
+ <defs
+ id="defs2">
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path6507"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5140"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Mstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5122"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.4,0,0,0.4,4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5134"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="matrix(1.1,0,0,1.1,1.1,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5116"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.8,0,0,0.8,10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Sstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Sstart"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5128"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(0.2,0,0,0.2,1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Send"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Send"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5131"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.2,0,0,-0.2,-1.2,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path5119"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <filter
+ id="filter_2">
+ <feGaussianBlur
+ stdDeviation="2"
+ id="feGaussianBlur4" />
+ </filter>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-9"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-11"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-2"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-6"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-3"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-1"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472-4" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-9"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-7"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-38"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-6"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-6"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-22"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-1-2"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-6-4"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-7-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-22-7"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-9-1"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-7-6"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-38-0"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-6-0"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-1-2-5"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-6-4-6"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-7-7-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-22-7-9"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-9-1-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-7-6-8"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-38-0-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-6-0-8"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-9-1-4"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-7-6-2"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-38-0-7"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-6-0-5"
+ style="fill:#9cc3e5;fill-opacity:1;fill-rule:evenodd;stroke:#9cc3e5;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474-2-2"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#818181;fill-opacity:1;fill-rule:evenodd;stroke:#818181;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472-4-6" />
+ </marker>
+ <marker
+ inkscape:isstock="true"
+ style="overflow:visible"
+ id="marker8474-3"
+ refX="0"
+ refY="0"
+ orient="auto"
+ inkscape:stockid="Arrow2Mend">
+ <path
+ inkscape:connector-curvature="0"
+ transform="scale(-0.6)"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ style="fill:#818181;fill-opacity:1;fill-rule:evenodd;stroke:#818181;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ id="path8472-3" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mend-1"
+ style="overflow:visible"
+ inkscape:isstock="true"
+ inkscape:collect="always">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5143-7"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-0-6"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-5-77"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-1-39"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-0-9"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mstart"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow2Mstart-8"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path5140-99"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(0.6)" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow2Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="marker6509-78"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ inkscape:connector-curvature="0"
+ id="path6507-02"
+ style="fill:#a8d08d;fill-opacity:1;fill-rule:evenodd;stroke:#a8d08d;stroke-width:0.625;stroke-linejoin:round;stroke-opacity:1"
+ d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
+ transform="scale(-0.6)" />
+ </marker>
+ </defs>
+ <sodipodi:namedview
+ id="base"
+ pagecolor="#ffffff"
+ bordercolor="#666666"
+ borderopacity="1.0"
+ inkscape:pageopacity="0.0"
+ inkscape:pageshadow="2"
+ inkscape:zoom="0.98994949"
+ inkscape:cx="213.35065"
+ inkscape:cy="360.88227"
+ inkscape:document-units="mm"
+ inkscape:current-layer="layer1"
+ showgrid="false"
+ inkscape:snap-text-baseline="true"
+ inkscape:window-width="1920"
+ inkscape:window-height="1137"
+ inkscape:window-x="1072"
+ inkscape:window-y="185"
+ inkscape:window-maximized="1" />
+ <metadata
+ id="metadata5">
+ <rdf:RDF>
+ <cc:Work
+ rdf:about="">
+ <dc:format>image/svg+xml</dc:format>
+ <dc:type
+ rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+ <dc:title />
+ </cc:Work>
+ </rdf:RDF>
+ </metadata>
+ <g
+ inkscape:groupmode="layer"
+ id="layer3"
+ inkscape:label="bgImage"
+ style="display:inline"
+ transform="translate(-10.86151,-57.361626)" />
+ <g
+ inkscape:label="Layer 1"
+ inkscape:groupmode="layer"
+ id="layer1"
+ style="display:inline"
+ transform="translate(-10.86151,-57.361626)">
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="41.159508"
+ y="61.13464"
+ id="text873"><tspan
+ sodipodi:role="line"
+ id="tspan871"
+ x="41.159508"
+ y="64.996841"
+ style="font-size:4.23333311px;stroke-width:0.26458332" /></text>
+ <flowRoot
+ xml:space="preserve"
+ id="flowRoot4811"
+ style="font-style:normal;font-weight:normal;font-size:13.33333302px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
+ transform="scale(0.26458333)"><flowRegion
+ id="flowRegion4813"><rect
+ id="rect4815"
+ width="41.785713"
+ height="14.642858"
+ x="39.285713"
+ y="287.16254" /></flowRegion><flowPara
+ id="flowPara4817">offse</flowPara></flowRoot> <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="16.351753"
+ y="215.03786"
+ id="text4821-3-9-7-1-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-3-0-0"
+ x="16.351753"
+ y="215.03786"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3-8-5"
+ width="6.6797671"
+ height="14.033618"
+ x="13.480058"
+ y="65.465332" />
+ <g
+ style="display:inline"
+ id="g4807-4"
+ transform="translate(1.6626143,11.103676)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-8"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-5"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-5"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <g
+ style="display:inline"
+ id="g5063-4"
+ transform="matrix(0.96955809,0,0,1.0002284,2.2280641,10.898039)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-01"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-9"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-0"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-2"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="74.602684"
+ y="85.144012"
+ id="text4821-3-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-5"
+ x="74.602684"
+ y="85.144012"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="13.386705"
+ y="85.690132"
+ id="text4821-3-9-4"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-4"
+ x="13.386705"
+ y="85.690132"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <g
+ transform="translate(209.08086,-15.131588)"
+ style="display:inline"
+ id="g10789-0">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4885-7"
+ d="m -198.21935,107.88646 h 61.73926"
+ style="fill:none;stroke:#7f7f7f;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4887-5"
+ d="m -126.32385,107.88646 94.613422,-0.13363"
+ style="fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="scale(0.9610099,1.040572)"
+ id="text4891-3"
+ y="105.46623"
+ x="-139.73984"
+ style="font-style:normal;font-weight:normal;font-size:5.39796209px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#818181;fill-opacity:1;stroke:none;stroke-width:0.40484715"
+ xml:space="preserve"><tspan
+ style="fill:#818181;fill-opacity:1;stroke-width:0.40484715"
+ y="105.46623"
+ x="-139.73984"
+ id="tspan4889-9"
+ sodipodi:role="line">or</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15867083;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6"
+ width="16.144258"
+ height="14.033618"
+ x="20.159824"
+ y="65.465332" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="25.127815"
+ y="73.846748"
+ id="text877-8-6"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8"
+ x="25.127815"
+ y="73.846748"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;baseline-shift:sub"
+ id="tspan10851">1</tspan></tspan></text>
+ <g
+ id="g10891">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-1"
+ width="16.163868"
+ height="14.074809"
+ x="40.305096"
+ y="65.443756" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="44.893223"
+ y="73.780594"
+ id="text877-8-6-7"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-8"
+ x="44.893223"
+ y="73.780594"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;baseline-shift:sub"
+ id="tspan10935">2</tspan></tspan></text>
+ <g
+ transform="translate(20.164879)"
+ style="display:inline"
+ id="g10891-1">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-8"
+ width="30.921177"
+ height="14.014396"
+ x="60.480572"
+ y="65.493568" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="73.468872"
+ y="73.892609"
+ id="text877-8-4"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-3"
+ x="73.468872"
+ y="73.892609"
+ style="font-size:3.52777767px;stroke-width:0.26458332">...</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-0"
+ width="16.163868"
+ height="14.074809"
+ x="91.401749"
+ y="65.433159" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="95.206711"
+ y="73.84742"
+ id="text877-8-6-1"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-9"
+ x="95.206711"
+ y="73.84742"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-4">c-1</tspan></tspan></text>
+ <g
+ transform="translate(71.261528,-0.01059723)"
+ style="display:inline"
+ id="g10891-1-1">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5-4"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4-0"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5-4"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-0-8"
+ width="11.644219"
+ height="14.098742"
+ x="111.55466"
+ y="65.421196" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="114.16887"
+ y="73.780602"
+ id="text877-8-6-1-0"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-9-9"
+ x="114.16887"
+ y="73.780602"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-4-4">c</tspan></tspan></text>
+ <g
+ transform="translate(86.894791,0.00137329)"
+ style="display:inline"
+ id="g10891-1-1-1">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5-4-9"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4-0-4"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5-4-7"
+ sodipodi:role="line">CRC24A</tspan></text>
+ </g>
+ <g
+ transform="translate(90.895802,0.00137329)"
+ style="display:inline"
+ id="g10891-1-1-6">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5-4-95"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4-0-6"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5-4-2"
+ sodipodi:role="line">CRC24B</tspan></text>
+ </g>
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-1);marker-end:url(#marker6509-7)"
+ d="m 20.359726,62.756584 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-4"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="26.706127"
+ y="57.404415"
+ id="text4821-3-3-0-5-4"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-95"
+ x="26.706127"
+ y="57.404415"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_neg</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0-9);marker-end:url(#marker6509-1-38)"
+ d="m 111.58503,62.795193 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-2-3"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="121.82767"
+ y="57.441448"
+ id="text4821-3-3-0-5-1-6"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7-9"
+ x="121.82767"
+ y="57.441448"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_pos</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="144.51123"
+ y="64.972511"
+ id="text4821-3-6-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ x="144.51123"
+ y="64.972511"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan4877-6">- CRC24B &amp; CRC24A were pre-calculated</tspan><tspan
+ sodipodi:role="line"
+ x="144.51123"
+ y="68.908539"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11806">by the application</tspan><tspan
+ sodipodi:role="line"
+ x="144.51123"
+ y="72.844559"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11808">- The raw TB is given as a contiguous</tspan><tspan
+ sodipodi:role="line"
+ x="144.51123"
+ y="76.780586"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11810">buffer</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="143.92094"
+ y="97.043495"
+ id="text4821-3-6-0-2"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="97.043495"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11810-4">- Only CRC24A was pre-calculated by the</tspan><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="100.97952"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11848">application, therefore</tspan><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="104.91554"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11850">RTE_BBDEV_TURBO_CRC_24B_ATTACH</tspan><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="108.85157"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11852">is set in op_flags</tspan><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="112.78759"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11854">- The raw TB is given as a contiguous</tspan><tspan
+ sodipodi:role="line"
+ x="143.92094"
+ y="116.72362"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11856">buffer</tspan></text>
+ <g
+ style="display:inline"
+ id="g4807-4-6"
+ transform="translate(9.4628222,49.06356)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-8-0"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-5-5"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-5-6"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="21.520061"
+ y="122.09515"
+ id="text4821-3-9-4-8"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-4-4"
+ x="21.520061"
+ y="122.09515"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <g
+ style="display:inline"
+ id="g5063-4-7"
+ transform="matrix(0.83046983,0,0,1.0013214,12.615148,48.778811)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-01-3"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-9-4"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-0-6"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-2-9"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="72.49247"
+ y="121.91208"
+ id="text4821-3-0-1"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-5-7"
+ x="72.49247"
+ y="121.91208"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <g
+ transform="translate(213.42759,24.366924)"
+ style="display:inline"
+ id="g10789-0-0">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4885-7-2"
+ d="m -198.21935,107.88646 h 61.73926"
+ style="fill:none;stroke:#7f7f7f;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4887-5-0"
+ d="m -126.32385,107.88646 94.613422,-0.13363"
+ style="fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:2.1199999, 0.26499999;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="scale(0.9610099,1.040572)"
+ id="text4891-3-9"
+ y="105.46623"
+ x="-139.73984"
+ style="font-style:normal;font-weight:normal;font-size:5.39796209px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#818181;fill-opacity:1;stroke:none;stroke-width:0.40484715"
+ xml:space="preserve"><tspan
+ style="fill:#818181;fill-opacity:1;stroke-width:0.40484715"
+ y="105.46623"
+ x="-139.73984"
+ id="tspan4889-9-9"
+ sodipodi:role="line">or</tspan></text>
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3-8-5-5"
+ width="6.6797671"
+ height="14.033618"
+ x="20.985983"
+ y="103.46108" />
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-1-2);marker-end:url(#marker6509-7-7)"
+ d="m 28.597837,100.50577 c 19.777951,-0.0668 19.777951,-0.0668 19.777951,-0.0668"
+ id="path6255-4-7"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="35.296089"
+ y="93.607361"
+ id="text4821-3-3-0-5-4-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-95-5"
+ x="35.296089"
+ y="93.607361"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_neg</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-3"
+ width="16.163868"
+ height="14.074809"
+ x="27.665751"
+ y="103.46108" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="32.131104"
+ y="111.79969"
+ id="text877-8-6-9"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-0"
+ x="32.131104"
+ y="111.79969"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-5">1</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-1-0"
+ width="16.163868"
+ height="14.074809"
+ x="43.82962"
+ y="103.46108" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="48.417747"
+ y="111.79792"
+ id="text877-8-6-7-1"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-8-5"
+ x="48.417747"
+ y="111.79792"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10935-8">2</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-8-8"
+ width="30.921177"
+ height="14.014396"
+ x="59.993488"
+ y="103.46108" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="73.38308"
+ y="111.96056"
+ id="text877-8-4-6"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-3-9"
+ x="73.38308"
+ y="111.96056"
+ style="font-size:3.52777767px;stroke-width:0.26458332">...</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-0-7"
+ width="16.163868"
+ height="14.074809"
+ x="90.914665"
+ y="103.46108" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="94.719627"
+ y="111.87534"
+ id="text877-8-6-1-5"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-9-4"
+ x="94.719627"
+ y="111.87534"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-4-2">c-1</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-0-8-6"
+ width="12.207969"
+ height="14.095527"
+ x="107.08015"
+ y="103.43876" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="109.69276"
+ y="111.79655"
+ id="text877-8-6-1-0-1"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-9-9-0"
+ x="109.69276"
+ y="111.79655"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-4-4-5">c</tspan></tspan></text>
+ <g
+ transform="translate(82.984032,37.995003)"
+ style="display:inline"
+ id="g10891-1-1-1-9">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5-4-9-2"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4-0-4-0"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5-4-7-9"
+ sodipodi:role="line">CRC24A</tspan></text>
+ </g>
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0-9-1);marker-end:url(#marker6509-1-38-0)"
+ d="m 107.37813,100.63331 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-2-3-9"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="117.44109"
+ y="93.729691"
+ id="text4821-3-3-0-5-1-6-2"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7-9-7"
+ x="117.44109"
+ y="93.729691"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_pos</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3-8-5-5-2"
+ width="6.6797671"
+ height="14.033618"
+ x="13.230828"
+ y="152.20575" />
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-1-2-5);marker-end:url(#marker6509-7-7-6)"
+ d="m 20.312481,149.54973 c 19.777952,-0.0668 19.777952,-0.0668 19.777952,-0.0668"
+ id="path6255-4-7-7"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="26.656864"
+ y="140.64244"
+ id="text4821-3-3-0-5-4-7-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-95-5-0"
+ x="26.656864"
+ y="140.64244"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_neg</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#9cc3e5;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0-9-1-4);marker-end:url(#marker6509-1-38-0-7)"
+ d="m 114.65863,149.41609 c 19.77795,-0.0668 19.77795,-0.0668 19.77795,-0.0668"
+ id="path6255-2-3-9-5"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="125.03253"
+ y="140.5143"
+ id="text4821-3-3-0-5-1-6-2-0"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7-9-7-4"
+ x="125.03253"
+ y="140.5143"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">k_pos</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="26.321789"
+ y="132.12051"
+ id="text4821-3-9-0-4"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-1-04"
+ x="26.321789"
+ y="132.12051"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">mbuf seg 1</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="111.52624"
+ y="132.07547"
+ id="text4821-3-9-0-8-6"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-1-0-4"
+ x="111.52624"
+ y="132.07547"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">mbuf seg 2</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8474-3)"
+ d="m 33.293244,139.67339 5.946743,5.7463"
+ id="path8464-83"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:none;stroke:#818181;stroke-width:0.26499999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker8474-2-2)"
+ d="m 114.56953,140.74706 -5.7463,5.94675"
+ id="path8464-8-8"
+ inkscape:connector-curvature="0" />
+ <g
+ style="display:inline"
+ id="g4807-4-6-1"
+ transform="translate(1.5234255,97.894043)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-8-0-9"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-5-5-9"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-5-6-4"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="13.241572"
+ y="168.92551"
+ id="text4821-3-9-4-8-3"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-0-4-4-2"
+ x="13.241572"
+ y="168.92551"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">offset</tspan></text>
+ <g
+ style="display:inline"
+ id="g5063-4-7-3"
+ transform="matrix(0.96708187,0,0,1.0002483,2.1378366,97.68752)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-01-3-5"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-9-4-4"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-0-6-6"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-2-9-4"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="74.165497"
+ y="168.19868"
+ id="text4821-3-0-1-2"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-5-7-5"
+ x="74.165497"
+ y="168.19868"
+ style="fill:#9cc3e5;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-3-0"
+ width="16.163868"
+ height="14.074809"
+ x="19.910595"
+ y="152.20575" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="24.37595"
+ y="160.54436"
+ id="text877-8-6-9-1"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-0-4"
+ x="24.37595"
+ y="160.54436"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-5-1">1</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-1-0-2"
+ width="16.163868"
+ height="14.074809"
+ x="36.074467"
+ y="152.20575" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="40.662594"
+ y="160.54259"
+ id="text877-8-6-7-1-7"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-8-5-6"
+ x="40.662594"
+ y="160.54259"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10935-8-3">2</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-8-8-7"
+ width="8.9107389"
+ height="14.115565"
+ x="52.187752"
+ y="152.21559" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="54.513344"
+ y="160.1346"
+ id="text877-8-4-6-7"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-3-9-3"
+ x="54.513344"
+ y="160.1346"
+ style="font-size:3.52777767px;stroke-width:0.26458332">...</tspan></text>
+ <path
+ style="display:inline;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 61.098492,152.29471 9.985814,-0.03 v 4.04245 l -2.539064,1.46999 5.579254,2.87314 -3.00678,1.60362 v 4.04245 l -10.019224,0.0348 z"
+ id="path5575-6"
+ inkscape:connector-curvature="0" />
+ <path
+ style="display:inline;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ d="m 90.024487,152.16192 -9.953251,0.003 -0.0334,3.94222 -2.63928,1.63703 5.67947,2.87314 -2.93996,1.57021 v 4.00905 l 9.886431,0.003 z"
+ id="path5581-0"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="62.273087"
+ y="160.61116"
+ id="text5879-0"><tspan
+ sodipodi:role="line"
+ id="tspan5877-5"
+ x="62.273087"
+ y="160.61116"
+ style="stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;baseline-shift:sub"
+ id="tspan15311">N</tspan></tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="83.078026"
+ y="160.61731"
+ id="text5883-5"><tspan
+ sodipodi:role="line"
+ id="tspan5881-6"
+ x="83.078026"
+ y="160.61731"
+ style="stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;baseline-shift:sub"
+ id="tspan15313">N</tspan></tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Mend-1)"
+ d="m 66.006182,152.19788 c 0,0 5.144934,-13.09621 18.942724,-0.3341"
+ id="path5899-9"
+ inkscape:connector-curvature="0" />
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-8-8-7-8"
+ width="8.9107389"
+ height="14.115566"
+ x="90.024498"
+ y="152.084" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="92.247215"
+ y="160.20142"
+ id="text877-8-4-6-7-9"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-3-9-3-4"
+ x="92.247215"
+ y="160.20142"
+ style="font-size:3.52777767px;stroke-width:0.26458332">...</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-3-0-3"
+ width="16.163868"
+ height="14.074809"
+ x="98.935234"
+ y="152.12476" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="102.77936"
+ y="160.27509"
+ id="text877-8-6-9-1-0"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-0-4-7"
+ x="102.77936"
+ y="160.27509"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-5-1-1">c-1</tspan></tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#9cc3e5;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect967-6-0-8-6-9"
+ width="12.207969"
+ height="14.095527"
+ x="115.09911"
+ y="152.10405" />
+ <g
+ transform="translate(91.002987,86.660285)"
+ style="display:inline"
+ id="g10891-1-1-1-9-9">
+ <rect
+ y="65.443756"
+ x="36.304085"
+ height="14.074809"
+ width="4.0010114"
+ id="rect6777-5-2-04-3-5-4-9-2-3"
+ style="opacity:1;fill:#0070c0;fill-opacity:1;stroke:#000000;stroke-width:0.15919298;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
+ <text
+ transform="rotate(-90)"
+ id="text6781-1-7-5-6-4-0-4-0-6"
+ y="39.324684"
+ x="-78.343857"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ xml:space="preserve"><tspan
+ style="font-size:2.82222223px;fill:#ffffff;fill-opacity:1;stroke-width:0.26458332"
+ y="39.324684"
+ x="-78.343857"
+ id="tspan6779-7-1-0-4-5-4-7-9-7"
+ sodipodi:role="line">CRC24A</tspan></text>
+ </g>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:4.23333311px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="118.12826"
+ y="160.26109"
+ id="text877-8-6-1-0-1-8"><tspan
+ sodipodi:role="line"
+ id="tspan875-6-8-9-9-0-1"
+ x="118.12826"
+ y="160.26109"
+ style="font-size:3.52777767px;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;baseline-shift:sub;stroke-width:0.26458332"
+ id="tspan10851-4-4-5-0">c</tspan></tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="144.46991"
+ y="144.41747"
+ id="text4821-3-6-0-2-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ x="144.46991"
+ y="144.41747"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan11856-1">- CRC24A was pre-calculated and </tspan><tspan
+ sodipodi:role="line"
+ x="144.46991"
+ y="148.35349"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan15485">RTE_BBDEV_TURBO_CRC_24B_ATTACH</tspan><tspan
+ sodipodi:role="line"
+ x="144.46991"
+ y="152.28952"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan15487">is set in op_flags</tspan><tspan
+ sodipodi:role="line"
+ x="144.46991"
+ y="156.22554"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan15489">- The raw TB is given as a &quot;scattered&quot;</tspan><tspan
+ sodipodi:role="line"
+ x="144.46991"
+ y="160.16156"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan15491">buffer through a chained mbuf</tspan></text>
+ <path
+ style="fill:#fec000;fill-opacity:1;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
+ d="m 73.879064,190.90497 -0.03341,-11.65962 h 4.209497 l -0.03341,11.65962 1.971111,-0.0334 -4.042449,3.90882 -4.04245,-3.90882 z"
+ id="path6066-6"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="-190.93837"
+ y="76.819031"
+ id="text6074-7"
+ transform="rotate(-90)"><tspan
+ sodipodi:role="line"
+ id="tspan6072-6"
+ x="-190.93837"
+ y="76.819031"
+ style="font-size:2.82222223px;stroke-width:0.26458332">encode</tspan></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="74.319527"
+ y="213.86391"
+ id="text4821-3-3-0-8"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-6"
+ x="74.319527"
+ y="213.86391"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">length</tspan></text>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.25237256;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-8);marker-end:url(#marker6509-78)"
+ d="m 22.905754,197.23808 c 15.904037,-0.0753 15.904037,-0.0753 15.904037,-0.0753"
+ id="path6255-22"
+ inkscape:connector-curvature="0" />
+ <g
+ style="display:inline"
+ id="g4807-5-2"
+ transform="translate(4.1511086,145.38078)">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-6-72"
+ d="m 11.929873,70.237907 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-88-63"
+ d="m 18.686199,70.237907 c 0,0 -0.4016,2.480469 -1.370164,2.456847 -0.968564,-0.02363 -2.007999,-0.02363 -2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-9-0"
+ d="m 15.308036,72.671124 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <g
+ style="display:inline"
+ transform="matrix(0.9338915,0,0,1.0005032,5.402228,145.15927)"
+ id="g5063-5-2-4">
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-0-0-0-2"
+ d="m 18.686199,70.426891 c 0,0 0.4016,2.480469 1.370164,2.456847 0.968564,-0.02363 2.007999,-0.02363 2.007999,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4741-5-8-2-1-6"
+ d="m 132.59878,70.332401 c 0,0 -0.4016,2.480469 -1.37017,2.456847 -0.96856,-0.02363 -2.008,-0.02363 -2.008,-0.02363"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4760-2-4-5-5"
+ d="m 75.831475,72.954606 -0.02362,2.527721"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ <path
+ inkscape:connector-curvature="0"
+ id="path4809-8-8-9"
+ d="M 22.064362,72.860108 129.22061,72.765618"
+ style="fill:none;stroke:#000000;stroke-width:0.26458332px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1" />
+ </g>
+ <rect
+ style="display:inline;opacity:1;fill:#d8d8d8;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"
+ id="rect4735-7-3-3"
+ width="6.6797671"
+ height="14.033618"
+ x="15.753516"
+ y="199.83667" />
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-97"
+ width="16.248745"
+ height="14.0336"
+ x="22.433283"
+ y="199.83669" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="30.577127"
+ y="208.42111"
+ id="text6214-8"><tspan
+ sodipodi:role="line"
+ id="tspan6212-4"
+ x="30.577127"
+ y="208.42111"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;text-align:center;baseline-shift:sub;text-anchor:middle"
+ id="tspan6218-7">1</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="30.577127"
+ y="212.83084"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-3" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="29.919048"
+ y="186.11703"
+ id="text4821-3-3-0-5-7"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-0"
+ x="29.919048"
+ y="186.11703"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">ea</tspan></text>
+ <rect
+ style="opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.15930426;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6693-01"
+ width="42.577148"
+ height="13.993384"
+ x="54.930775"
+ y="199.87691" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="74.028419"
+ y="208.31436"
+ id="text6697-5"><tspan
+ sodipodi:role="line"
+ id="tspan6695-3"
+ x="74.028419"
+ y="208.31436"
+ style="stroke-width:0.26458332">...</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.15922768;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8-1"
+ width="16.248745"
+ height="14.073822"
+ x="97.507919"
+ y="199.79646" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="105.80277"
+ y="207.61554"
+ id="text6214-78-1"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2-2"
+ x="105.80277"
+ y="207.61554"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3-0">c-1</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="105.80277"
+ y="212.02527"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2-3" /></text>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.15929575;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-8-3-28"
+ width="15.999747"
+ height="14.073821"
+ x="113.75667"
+ y="199.79646" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="121.63119"
+ y="208.12714"
+ id="text6214-78-6-0"><tspan
+ sodipodi:role="line"
+ id="tspan6212-2-9-72"
+ x="121.63119"
+ y="208.12714"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:2.2930553px;text-align:center;baseline-shift:sub;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6218-3-7-8">c</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="121.63119"
+ y="212.53687"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-2-9-4" /></text>
+ <path
+ style="display:inline;fill:none;stroke:#a8d08d;stroke-width:0.25187415;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-start:url(#Arrow2Mstart-0-6);marker-end:url(#marker6509-1-39)"
+ d="m 114.04452,197.18711 c 15.77071,-0.0757 15.77071,-0.0757 15.77071,-0.0757"
+ id="path6255-2-9"
+ inkscape:connector-curvature="0" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="125.36855"
+ y="186.19617"
+ id="text4821-3-3-0-5-1-9"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ id="tspan4819-0-7-8-3-7-5"
+ x="125.36855"
+ y="186.19617"
+ style="fill:#a8d08d;fill-opacity:1;stroke-width:0.23616144">eb</tspan></text>
+ <rect
+ style="display:inline;opacity:1;fill:#a8d08d;fill-opacity:1;stroke:#000000;stroke-width:0.15899999;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
+ id="rect6210-97-3"
+ width="16.248745"
+ height="14.0336"
+ x="38.68203"
+ y="199.83669" />
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.52777767px;line-height:1.25;font-family:sans-serif;text-align:center;letter-spacing:0px;word-spacing:0px;text-anchor:middle;display:inline;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.26458332"
+ x="46.825874"
+ y="208.42111"
+ id="text6214-8-2"><tspan
+ sodipodi:role="line"
+ id="tspan6212-4-7"
+ x="46.825874"
+ y="208.42111"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332">CB<tspan
+ style="font-size:64.99999762%;text-align:center;baseline-shift:sub;text-anchor:middle"
+ id="tspan6218-7-0">2</tspan></tspan><tspan
+ sodipodi:role="line"
+ x="46.825874"
+ y="212.83084"
+ style="text-align:center;text-anchor:middle;stroke-width:0.26458332"
+ id="tspan6216-3-5" /></text>
+ <text
+ xml:space="preserve"
+ style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#7f8085;fill-opacity:1;stroke:none;stroke-width:0.23616144"
+ x="145.06511"
+ y="197.72243"
+ id="text4821-3-6-0-2-7-2"
+ transform="scale(0.95903924,1.0427102)"><tspan
+ sodipodi:role="line"
+ x="145.06511"
+ y="197.72243"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan15491-4">Result is encoded back into the given</tspan><tspan
+ sodipodi:role="line"
+ x="145.06511"
+ y="201.65845"
+ style="fill:#7f8085;fill-opacity:1;stroke-width:0.23616144"
+ id="tspan16671">output mbuf as one contiguous buffer</tspan></text>
+ </g>
+</svg>
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index bbbe7895..3b920e53 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -17,10 +17,12 @@ Programmer's Guide
mbuf_lib
poll_mode_drv
rte_flow
+ switch_representation
traffic_metering_and_policing
traffic_management
bbdev
cryptodev_lib
+ compressdev
rte_security
rawdev
link_bonding_poll_mode_drv_lib
@@ -42,6 +44,8 @@ Programmer's Guide
thread_safety_dpdk_functions
eventdev
event_ethernet_rx_adapter
+ event_timer_adapter
+ event_crypto_adapter
qos_framework
power_man
packet_classif_access_ctrl
@@ -49,6 +53,7 @@ Programmer's Guide
vhost_lib
metrics_lib
port_hotplug_framework
+ bpf_lib
source_org
dev_kit_build_system
dev_kit_root_make_help
diff --git a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
index 0da1e637..56abee54 100644
--- a/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
+++ b/doc/guides/prog_guide/link_bonding_poll_mode_drv_lib.rst
@@ -148,6 +148,8 @@ the RX and TX queues are also reconfigured using ``rte_eth_tx_queue_setup`` /
``rte_eth_rx_queue_setup`` with the parameters use to configure the bonding
device. If RSS is enabled for bonding device, this mode is also enabled on new
slave and configured as well.
+Any flow which was configured to the bond device also is configured to the added
+slave.
Setting up multi-queue mode for bonding device to RSS, makes it fully
RSS-capable, so all slaves are synchronized with its configuration. This mode is
@@ -166,6 +168,43 @@ it can be easily used as a pattern providing expected behavior, even if slave
RETAs' sizes are different. If RSS Key is not set for bonded device, it's not
changed on the slaves and default key for device is used.
+As RSS configurations, there is flow consistency in the bonded slaves for the
+next rte flow operations:
+
+Validate:
+ - Validate flow for each slave, failure at least for one slave causes to
+ bond validation failure.
+
+Create:
+ - Create the flow in all slaves.
+ - Save all the slaves created flows objects in bonding internal flow
+ structure.
+ - Failure in flow creation for existed slave rejects the flow.
+ - Failure in flow creation for new slaves in slave adding time rejects
+ the slave.
+
+Destroy:
+ - Destroy the flow in all slaves and release the bond internal flow
+ memory.
+
+Flush:
+ - Destroy all the bonding PMD flows in all the slaves.
+
+.. note::
+
+ Don't call slaves flush directly, It destroys all the slave flows which
+ may include external flows or the bond internal LACP flow.
+
+Query:
+ - Summarize flow counters from all the slaves, relevant only for
+ ``RTE_FLOW_ACTION_TYPE_COUNT``.
+
+Isolate:
+ - Call to flow isolate for all slaves.
+ - Failure in flow isolation for existed slave rejects the isolate mode.
+ - Failure in flow isolation for new slaves in slave adding time rejects
+ the slave.
+
All settings are managed through the bonding port API and always are propagated
in one direction (from bonding to slaves).
@@ -207,8 +246,8 @@ common hash function available for each of them. Changing RSS key is only
possible, when all slave devices support the same key size.
To prevent inconsistency on how slaves process packets, once a device is added
-to a bonding device, RSS configuration should be managed through the bonding
-device API, and not directly on the slave.
+to a bonding device, RSS and rte flow configurations should be managed through
+the bonding device API, and not directly on the slave.
Like all other PMD, all functions exported by a PMD are lock-free functions
that are assumed not to be invoked in parallel on different logical cores to
diff --git a/doc/guides/prog_guide/mbuf_lib.rst b/doc/guides/prog_guide/mbuf_lib.rst
index 210a9af9..0d3223b0 100644
--- a/doc/guides/prog_guide/mbuf_lib.rst
+++ b/doc/guides/prog_guide/mbuf_lib.rst
@@ -10,9 +10,8 @@ The mbuf library provides the ability to allocate and free buffers (mbufs)
that may be used by the DPDK application to store message buffers.
The message buffers are stored in a mempool, using the :ref:`Mempool Library <Mempool_Library>`.
-A rte_mbuf struct can carry network packet buffers
-or generic control buffers (indicated by the CTRL_MBUF_FLAG).
-This can be extended to other types.
+A rte_mbuf struct generally carries network packet buffers, but it can actually
+be any data (control data, events, ...).
The rte_mbuf header structure is kept as small as possible and currently uses
just two cache lines, with the most frequently used fields being on the first
of the two cache lines.
@@ -68,13 +67,13 @@ Buffers Stored in Memory Pools
The Buffer Manager uses the :ref:`Mempool Library <Mempool_Library>` to allocate buffers.
Therefore, it ensures that the packet header is interleaved optimally across the channels and ranks for L3 processing.
An mbuf contains a field indicating the pool that it originated from.
-When calling rte_ctrlmbuf_free(m) or rte_pktmbuf_free(m), the mbuf returns to its original pool.
+When calling rte_pktmbuf_free(m), the mbuf returns to its original pool.
Constructors
------------
-Packet and control mbuf constructors are provided by the API.
-The rte_pktmbuf_init() and rte_ctrlmbuf_init() functions initialize some fields in the mbuf structure that
+Packet mbuf constructors are provided by the API.
+The rte_pktmbuf_init() function initializes some fields in the mbuf structure that
are not modified by the user once created (mbuf type, origin pool, buffer start address, and so on).
This function is given as a callback function to the rte_mempool_create() function at pool creation time.
diff --git a/doc/guides/prog_guide/multi_proc_support.rst b/doc/guides/prog_guide/multi_proc_support.rst
index 90596794..1384fe33 100644
--- a/doc/guides/prog_guide/multi_proc_support.rst
+++ b/doc/guides/prog_guide/multi_proc_support.rst
@@ -29,6 +29,9 @@ after a primary process has already configured the hugepage shared memory for th
Secondary processes should run alongside primary process with same DPDK version.
+ Secondary processes which requires access to physical devices in Primary process, must
+ be passed with the same whitelist and blacklist options.
+
To support these two process types, and other multi-process setups described later,
two additional command-line parameters are available to the EAL:
@@ -60,6 +63,10 @@ and point to the same objects, in both processes.
Refer to `Multi-process Limitations`_ for details of
how Linux kernel Address-Space Layout Randomization (ASLR) can affect memory sharing.
+ If the primary process was run with ``--legacy-mem`` or
+ ``--single-file-segments`` switch, secondary processes must be run with the
+ same switch specified. Otherwise, memory corruption may occur.
+
.. _figure_multi_process_memory:
.. figure:: img/multi_process_memory.*
@@ -113,8 +120,13 @@ The rte part of the filenames of each of the above is configurable using the fil
In addition to specifying the file-prefix parameter,
any DPDK applications that are to be run side-by-side must explicitly limit their memory use.
-This is done by passing the -m flag to each process to specify how much hugepage memory, in megabytes,
-each process can use (or passing ``--socket-mem`` to specify how much hugepage memory on each socket each process can use).
+This is less of a problem on Linux, as by default, applications will not
+allocate more memory than they need. However if ``--legacy-mem`` is used, DPDK
+will attempt to preallocate all memory it can get to, and memory use must be
+explicitly limited. This is done by passing the ``-m`` flag to each process to
+specify how much hugepage memory, in megabytes, each process can use (or passing
+``--socket-mem`` to specify how much hugepage memory on each socket each process
+can use).
.. note::
@@ -141,8 +153,10 @@ There are a number of limitations to what can be done when running DPDK multi-pr
Some of these are documented below:
* The multi-process feature requires that the exact same hugepage memory mappings be present in all applications.
- The Linux security feature - Address-Space Layout Randomization (ASLR) can interfere with this mapping,
- so it may be necessary to disable this feature in order to reliably run multi-process applications.
+ This makes secondary process startup process generally unreliable. Disabling
+ Linux security feature - Address-Space Layout Randomization (ASLR) may
+ help getting more consistent mappings, but not necessarily more reliable -
+ if the mappings are wrong, they will be consistently wrong!
.. warning::
@@ -175,3 +189,153 @@ instead of the functions which do the hashing internally, such as rte_hash_add()
which means that only the first, primary DPDK process instance can open and mmap /dev/hpet.
If the number of required DPDK processes exceeds that of the number of available HPET comparators,
the TSC (which is the default timer in this release) must be used as a time source across all processes instead of the HPET.
+
+Communication between multiple processes
+----------------------------------------
+
+While there are multiple ways one can approach inter-process communication in
+DPDK, there is also a native DPDK IPC API available. It is not intended to be
+performance-critical, but rather is intended to be a convenient, general
+purpose API to exchange short messages between primary and secondary processes.
+
+DPDK IPC API supports the following communication modes:
+
+* Unicast message from secondary to primary
+* Broadcast message from primary to all secondaries
+
+In other words, any IPC message sent in a primary process will be delivered to
+all secondaries, while any IPC message sent in a secondary process will only be
+delivered to primary process. Unicast from primary to secondary or from
+secondary to secondary is not supported.
+
+There are three types of communications that are available within DPDK IPC API:
+
+* Message
+* Synchronous request
+* Asynchronous request
+
+A "message" type does not expect a response and is meant to be a best-effort
+notification mechanism, while the two types of "requests" are meant to be a two
+way communication mechanism, with the requester expecting a response from the
+other side.
+
+Both messages and requests will trigger a named callback on the receiver side.
+These callbacks will be called from within a dedicated IPC or interrupt thread
+that are not part of EAL lcore threads.
+
+Registering for incoming messages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before any messages can be received, a callback will need to be registered.
+This is accomplished by calling ``rte_mp_action_register()`` function. This
+function accepts a unique callback name, and a function pointer to a callback
+that will be called when a message or a request matching this callback name
+arrives.
+
+If the application is no longer willing to receive messages intended for a
+specific callback function, ``rte_mp_action_unregister()`` function can be
+called to ensure that callback will not be triggered again.
+
+Sending messages
+~~~~~~~~~~~~~~~~
+
+To send a message, a ``rte_mp_msg`` descriptor must be populated first. The list
+of fields to be populated are as follows:
+
+* ``name`` - message name. This name must match receivers' callback name.
+* ``param`` - message data (up to 256 bytes).
+* ``len_param`` - length of message data.
+* ``fds`` - file descriptors to pass long with the data (up to 8 fd's).
+* ``num_fds`` - number of file descriptors to send.
+
+Once the structure is populated, calling ``rte_mp_sendmsg()`` will send the
+descriptor either to all secondary processes (if sent from primary process), or
+to primary process (if sent from secondary process). The function will return
+a value indicating whether sending the message succeeded or not.
+
+Sending requests
+~~~~~~~~~~~~~~~~
+
+Sending requests involves waiting for the other side to reply, so they can block
+for a relatively long time.
+
+To send a request, a message descriptor ``rte_mp_msg`` must be populated.
+Additionally, a ``timespec`` value must be specified as a timeout, after which
+IPC will stop waiting and return.
+
+For synchronous synchronous requests, the ``rte_mp_reply`` descriptor must also
+be created. This is where the responses will be stored. The list of fields that
+will be populated by IPC are as follows:
+
+* ``nb_sent`` - number indicating how many requests were sent (i.e. how many
+ peer processes were active at the time of the request).
+* ``nb_received`` - number indicating how many responses were received (i.e. of
+ those peer processes that were active at the time of request, how many have
+ replied)
+* ``msgs`` - pointer to where all of the responses are stored. The order in
+ which responses appear is undefined. Whendoing sycnrhonous requests, this
+ memory must be freed by the requestor after request completes!
+
+For asynchronous requests, a function pointer to the callback function must be
+provided instead. This callback will be called when the request either has timed
+out, or will have received a response to all the messages that were sent.
+
+.. warning::
+
+ When an asynchronous request times out, the callback will be called not by
+ a dedicated IPC thread, but rather from EAL interrupt thread. Because of
+ this, it may not be possible for DPDK to trigger another interrupt-based
+ event (such as an alarm) while handling asynchronous IPC callback.
+
+When the callback is called, the original request descriptor will be provided
+(so that it would be possible to determine for which sent message this is a
+callback to), along with a response descriptor like the one described above.
+When doing asynchronous requests, there is no need to free the resulting
+``rte_mp_reply`` descriptor.
+
+Receiving and responding to messages
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To receive a message, a name callback must be registered using the
+``rte_mp_action_register()`` function. The name of the callback must match the
+``name`` field in sender's ``rte_mp_msg`` message descriptor in order for this
+message to be delivered and for the callback to be trigger.
+
+The callback's definition is ``rte_mp_t``, and consists of the incoming message
+pointer ``msg``, and an opaque pointer ``peer``. Contents of ``msg`` will be
+identical to ones sent by the sender.
+
+If a response is required, a new ``rte_mp_msg`` message descriptor must be
+constructed and sent via ``rte_mp_reply()`` function, along with ``peer``
+pointer. The resulting response will then be delivered to the correct requestor.
+
+Misc considerations
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Due to the underlying IPC implementation being single-threaded, recursive
+requests (i.e. sending a request while responding to another request) is not
+supported. However, since sending messages (not requests) does not involve an
+IPC thread, sending messages while processing another message or request is
+supported.
+
+Asynchronous request callbacks may be triggered either from IPC thread or from
+interrupt thread, depending on whether the request has timed out. It is
+therefore suggested to avoid waiting for interrupt-based events (such as alarms)
+inside asynchronous IPC request callbacks. This limitation does not apply to
+messages or synchronous requests.
+
+If callbacks spend a long time processing the incoming requests, the requestor
+might time out, so setting the right timeout value on the requestor side is
+imperative.
+
+If some of the messages timed out, ``nb_sent`` and ``nb_received`` fields in the
+``rte_mp_reply`` descriptor will not have matching values. This is not treated
+as error by the IPC API, and it is expected that the user will be responsible
+for deciding how to handle such cases.
+
+If a callback has been registered, IPC will assume that it is safe to call it.
+This is important when registering callbacks during DPDK initialization.
+During initialization, IPC will consider the receiving side as non-existing if
+the callback has not been registered yet. However, once the callback has been
+registered, it is expected that IPC should be safe to trigger it, even if the
+rest of the DPDK initialization hasn't finished yet.
diff --git a/doc/guides/prog_guide/overview.rst b/doc/guides/prog_guide/overview.rst
index 2663fe0e..c01f37e3 100644
--- a/doc/guides/prog_guide/overview.rst
+++ b/doc/guides/prog_guide/overview.rst
@@ -130,8 +130,8 @@ The mbuf library provides the facility to create and destroy buffers
that may be used by the DPDK application to store message buffers.
The message buffers are created at startup time and stored in a mempool, using the DPDK mempool library.
-This library provides an API to allocate/free mbufs, manipulate control message buffers (ctrlmbuf) which are generic message buffers,
-and packet buffers (pktmbuf) which are used to carry network packets.
+This library provides an API to allocate/free mbufs, manipulate
+packet buffers which are used to carry network packets.
Network Packet Buffer Management is described in :ref:`Mbuf Library <Mbuf_Library>`.
diff --git a/doc/guides/prog_guide/poll_mode_drv.rst b/doc/guides/prog_guide/poll_mode_drv.rst
index e5d01874..b2cf4835 100644
--- a/doc/guides/prog_guide/poll_mode_drv.rst
+++ b/doc/guides/prog_guide/poll_mode_drv.rst
@@ -296,22 +296,37 @@ described in the mbuf API documentation and in the in :ref:`Mbuf Library
Per-Port and Per-Queue Offloads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-In the DPDK offload API, offloads are divided into per-port and per-queue offloads.
+In the DPDK offload API, offloads are divided into per-port and per-queue offloads as follows:
+
+* A per-queue offloading can be enabled on a queue and disabled on another queue at the same time.
+* A pure per-port offload is the one supported by device but not per-queue type.
+* A pure per-port offloading can't be enabled on a queue and disabled on another queue at the same time.
+* A pure per-port offloading must be enabled or disabled on all queues at the same time.
+* Any offloading is per-queue or pure per-port type, but can't be both types at same devices.
+* Port capabilities = per-queue capabilities + pure per-port capabilities.
+* Any supported offloading can be enabled on all queues.
+
The different offloads capabilities can be queried using ``rte_eth_dev_info_get()``.
+The ``dev_info->[rt]x_queue_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all per-queue offloading capabilities.
+The ``dev_info->[rt]x_offload_capa`` returned from ``rte_eth_dev_info_get()`` includes all pure per-port and per-queue offloading capabilities.
Supported offloads can be either per-port or per-queue.
Offloads are enabled using the existing ``DEV_TX_OFFLOAD_*`` or ``DEV_RX_OFFLOAD_*`` flags.
-Per-port offload configuration is set using ``rte_eth_dev_configure``.
-Per-queue offload configuration is set using ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup``.
-To enable per-port offload, the offload should be set on both device configuration and queue setup.
-In case of a mixed configuration the queue setup shall return with an error.
-To enable per-queue offload, the offload can be set only on the queue setup.
-Offloads which are not enabled are disabled by default.
-
-For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
-In such cases it is not required to set other flags in ``txq_flags``.
-For an application to use the Rx offloads API it should set the ``ignore_offload_bitfield`` bit in the ``rte_eth_rxmode`` struct.
-In such cases it is not required to set other bitfield offloads in the ``rxmode`` struct.
+Any requested offloading by an application must be within the device capabilities.
+Any offloading is disabled by default if it is not set in the parameter
+``dev_conf->[rt]xmode.offloads`` to ``rte_eth_dev_configure()`` and
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
+If any offloading is enabled in ``rte_eth_dev_configure()`` by an application,
+it is enabled on all queues no matter whether it is per-queue or
+per-port type and no matter whether it is set or cleared in
+``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()``.
+
+If a per-queue offloading hasn't been enabled in ``rte_eth_dev_configure()``,
+it can be enabled or disabled in ``rte_eth_[rt]x_queue_setup()`` for individual queue.
+A newly added offloads in ``[rt]x_conf->offloads`` to ``rte_eth_[rt]x_queue_setup()`` input by application
+is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled
+in ``rte_eth_[rt]x_queue_setup()``. It must be per-queue type, otherwise trigger an error log.
Poll Mode Driver API
--------------------
@@ -345,6 +360,27 @@ Ethernet Device API
The Ethernet device API exported by the Ethernet PMDs is described in the *DPDK API Reference*.
+.. _ethernet_device_standard_device_arguments:
+
+Ethernet Device Standard Device Arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Standard Ethernet device arguments allow for a set of commonly used arguments/
+parameters which are applicable to all Ethernet devices to be available to for
+specification of specific device and for passing common configuration
+parameters to those ports.
+
+* ``representor`` for a device which supports the creation of representor ports
+ this argument allows user to specify which switch ports to enable port
+ representors for.::
+
+ -w BDBF,representor=0
+ -w BDBF,representor=[0,4,6,9]
+ -w BDBF,representor=[0-31]
+
+Note: PMDs are not required to support the standard device arguments and users
+should consult the relevant PMD documentation to see support devargs.
+
Extended Statistics API
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/prog_guide/rawdev.rst b/doc/guides/prog_guide/rawdev.rst
index 54bffc58..42c195ce 100644
--- a/doc/guides/prog_guide/rawdev.rst
+++ b/doc/guides/prog_guide/rawdev.rst
@@ -43,7 +43,7 @@ Key factors guiding design of the Rawdevice library:
* Firmware Management - Firmware load/unload/status
2. Application API should be able to pass along arbitrary state information
- to/fro device driver. This can be achieved by maintaining context
+ to/from device driver. This can be achieved by maintaining context
information through opaque data or pointers.
Figure below outlines the layout of the rawdevice library and device vis-a-vis
diff --git a/doc/guides/prog_guide/rte_flow.rst b/doc/guides/prog_guide/rte_flow.rst
index 961943dd..b305a72a 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1,32 +1,6 @@
-.. BSD LICENSE
+.. SPDX-License-Identifier: BSD-3-Clause
Copyright 2016 6WIND S.A.
- Copyright 2016 Mellanox.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of 6WIND S.A. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ Copyright 2016 Mellanox Technologies, Ltd
.. _Generic_flow_API:
@@ -55,9 +29,6 @@ encompasses and supersedes (including all functions and filter types) in
order to expose a single interface with an unambiguous behavior that is
common to all poll-mode drivers (PMDs).
-Several methods to migrate existing applications are described in `API
-migration`_.
-
Flow rule
---------
@@ -93,8 +64,12 @@ Thus predictable results for a given priority level can only be achieved
with non-overlapping rules, using perfect matching on all protocol layers.
Flow rules can also be grouped, the flow rule priority is specific to the
-group they belong to. All flow rules in a given group are thus processed
-either before or after another group.
+group they belong to. All flow rules in a given group are thus processed within
+the context of that group. Groups are not linked by default, so the logical
+hierarchy of groups must be explicitly defined by flow rules themselves in each
+group using the JUMP action to define the next group to redirect too. Only flow
+rules defined in the default group 0 are guarantee to be matched against, this
+makes group 0 the origin of any group hierarchy defined by an application.
Support for multiple actions per rule may be implemented internally on top
of non-default hardware priorities, as a result both features may not be
@@ -141,29 +116,34 @@ Attributes
Attribute: Group
^^^^^^^^^^^^^^^^
-Flow rules can be grouped by assigning them a common group number. Lower
-values have higher priority. Group 0 has the highest priority.
+Flow rules can be grouped by assigning them a common group number. Groups
+allow a logical hierarchy of flow rule groups (tables) to be defined. These
+groups can be supported virtually in the PMD or in the physical device.
+Group 0 is the default group and this is the only group which flows are
+guarantee to matched against, all subsequent groups can only be reached by
+way of the JUMP action from a matched flow rule.
Although optional, applications are encouraged to group similar rules as
much as possible to fully take advantage of hardware capabilities
(e.g. optimized matching) and work around limitations (e.g. a single pattern
-type possibly allowed in a given group).
+type possibly allowed in a given group), while being aware that the groups
+hierarchies must be programmed explicitly.
Note that support for more than a single group is not guaranteed.
Attribute: Priority
^^^^^^^^^^^^^^^^^^^
-A priority level can be assigned to a flow rule. Like groups, lower values
+A priority level can be assigned to a flow rule, lower values
denote higher priority, with 0 as the maximum.
-A rule with priority 0 in group 8 is always matched after a rule with
-priority 8 in group 0.
-
-Group and priority levels are arbitrary and up to the application, they do
+Priority levels are arbitrary and up to the application, they do
not need to be contiguous nor start from 0, however the maximum number
varies between devices and may be affected by existing flow rules.
+A flow which matches multiple rules in the same group will always matched by
+the rule with the highest priority in that group.
+
If a packet is matched by several rules of a given group for a given
priority level, the outcome is undefined. It can take any path, may be
duplicated or even cause unrecoverable errors.
@@ -173,7 +153,13 @@ Note that support for more than a single priority level is not guaranteed.
Attribute: Traffic direction
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Flow rules can apply to inbound and/or outbound traffic (ingress/egress).
+Flow rule patterns apply to inbound and/or outbound traffic.
+
+In the context of this API, **ingress** and **egress** respectively stand
+for **inbound** and **outbound** based on the standpoint of the application
+creating a flow rule.
+
+There are no exceptions to this definition.
Several pattern items and actions are valid and can be used in both
directions. At least one direction must be specified.
@@ -181,17 +167,36 @@ directions. At least one direction must be specified.
Specifying both directions at once for a given rule is not recommended but
may be valid in a few cases (e.g. shared counters).
+Attribute: Transfer
+^^^^^^^^^^^^^^^^^^^
+
+Instead of simply matching the properties of traffic as it would appear on a
+given DPDK port ID, enabling this attribute transfers a flow rule to the
+lowest possible level of any device endpoints found in the pattern.
+
+When supported, this effectively enables an application to reroute traffic
+not necessarily intended for it (e.g. coming from or addressed to different
+physical ports, VFs or applications) at the device level.
+
+It complements the behavior of some pattern items such as `Item: PHY_PORT`_
+and is meaningless without them.
+
+When transferring flow rules, **ingress** and **egress** attributes
+(`Attribute: Traffic direction`_) keep their original meaning, as if
+processing traffic emitted or received by the application.
+
Pattern item
~~~~~~~~~~~~
Pattern items fall in two categories:
-- Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
- IPV6, ICMP, UDP, TCP, SCTP, VXLAN, MPLS, GRE, ESP and so on), usually
- associated with a specification structure.
+- Matching protocol headers and packet data, usually associated with a
+ specification structure. These must be stacked in the same order as the
+ protocol layers to match inside packets, starting from the lowest.
-- Matching meta-data or affecting pattern processing (END, VOID, INVERT, PF,
- VF, PORT and so on), often without a specification structure.
+- Matching meta-data or affecting pattern processing, often without a
+ specification structure. Since they do not match packet contents, their
+ position in the list is usually not relevant.
Item specification structures are used to match specific values among
protocol fields (or item properties). Documentation describes for each item
@@ -506,15 +511,12 @@ Usage example, matching non-TCPv4 packets only:
Item: ``PF``
^^^^^^^^^^^^
-Matches packets addressed to the physical function of the device.
+Matches traffic originating from (ingress) or going to (egress) the physical
+function of the current device.
-If the underlying device function differs from the one that would normally
-receive the matched traffic, specifying this item prevents it from reaching
-that device unless the flow rule contains a `Action: PF`_. Packets are not
-duplicated between device instances by default.
+If supported, should work even if the physical function is not managed by
+the application and thus not associated with a DPDK port ID.
-- Likely to return an error or never match any traffic if applied to a VF
- device.
- Can be combined with any number of `Item: VF`_ to match both PF and VF
traffic.
- ``spec``, ``last`` and ``mask`` must not be set.
@@ -536,15 +538,15 @@ duplicated between device instances by default.
Item: ``VF``
^^^^^^^^^^^^
-Matches packets addressed to a virtual function ID of the device.
+Matches traffic originating from (ingress) or going to (egress) a given
+virtual function of the current device.
+
+If supported, should work even if the virtual function is not managed by the
+application and thus not associated with a DPDK port ID.
-If the underlying device function differs from the one that would normally
-receive the matched traffic, specifying this item prevents it from reaching
-that device unless the flow rule contains a `Action: VF`_. Packets are not
-duplicated between device instances by default.
+Note this pattern item does not match VF representors traffic which, as
+separate entities, should be addressed through their own DPDK port IDs.
-- Likely to return an error or never match any traffic if this causes a VF
- device to match traffic addressed to a different VF.
- Can be specified multiple times to match traffic addressed to several VF
IDs.
- Can be combined with a PF item to match both PF and VF traffic.
@@ -564,15 +566,15 @@ duplicated between device instances by default.
| ``mask`` | ``id`` | zeroed to match any VF ID |
+----------+----------+---------------------------+
-Item: ``PORT``
-^^^^^^^^^^^^^^
+Item: ``PHY_PORT``
+^^^^^^^^^^^^^^^^^^
-Matches packets coming from the specified physical port of the underlying
-device.
+Matches traffic originating from (ingress) or going to (egress) a physical
+port of the underlying device.
-The first PORT item overrides the physical port normally associated with the
-specified DPDK input port (port_id). This item can be provided several times
-to match additional physical ports.
+The first PHY_PORT item overrides the physical port normally associated with
+the specified DPDK input port (port_id). This item can be provided several
+times to match additional physical ports.
Note that physical ports are not necessarily tied to DPDK input ports
(port_id) when those are not under DPDK control. Possible values are
@@ -584,9 +586,9 @@ associated with a port_id should be retrieved by other means.
- Default ``mask`` matches any port index.
-.. _table_rte_flow_item_port:
+.. _table_rte_flow_item_phy_port:
-.. table:: PORT
+.. table:: PHY_PORT
+----------+-----------+--------------------------------+
| Field | Subfield | Value |
@@ -598,6 +600,66 @@ associated with a port_id should be retrieved by other means.
| ``mask`` | ``index`` | zeroed to match any port index |
+----------+-----------+--------------------------------+
+Item: ``PORT_ID``
+^^^^^^^^^^^^^^^^^
+
+Matches traffic originating from (ingress) or going to (egress) a given DPDK
+port ID.
+
+Normally only supported if the port ID in question is known by the
+underlying PMD and related to the device the flow rule is created against.
+
+This must not be confused with `Item: PHY_PORT`_ which refers to the
+physical port of a device, whereas `Item: PORT_ID`_ refers to a ``struct
+rte_eth_dev`` object on the application side (also known as "port
+representor" depending on the kind of underlying device).
+
+- Default ``mask`` matches the specified DPDK port ID.
+
+.. _table_rte_flow_item_port_id:
+
+.. table:: PORT_ID
+
+ +----------+----------+-----------------------------+
+ | Field | Subfield | Value |
+ +==========+==========+=============================+
+ | ``spec`` | ``id`` | DPDK port ID |
+ +----------+----------+-----------------------------+
+ | ``last`` | ``id`` | upper range value |
+ +----------+----------+-----------------------------+
+ | ``mask`` | ``id`` | zeroed to match any port ID |
+ +----------+----------+-----------------------------+
+
+Item: ``MARK``
+^^^^^^^^^^^^^^
+
+Matches an arbitrary integer value which was set using the ``MARK`` action in
+a previously matched rule.
+
+This item can only specified once as a match criteria as the ``MARK`` action can
+only be specified once in a flow action.
+
+Note the value of MARK field is arbitrary and application defined.
+
+Depending on the underlying implementation the MARK item may be supported on
+the physical device, with virtual groups in the PMD or not at all.
+
+- Default ``mask`` matches any integer value.
+
+.. _table_rte_flow_item_mark:
+
+.. table:: MARK
+
+ +----------+----------+---------------------------+
+ | Field | Subfield | Value |
+ +==========+==========+===========================+
+ | ``spec`` | ``id`` | integer value |
+ +----------+--------------------------------------+
+ | ``last`` | ``id`` | upper range value |
+ +----------+----------+---------------------------+
+ | ``mask`` | ``id`` | zeroed to match any value |
+ +----------+----------+---------------------------+
+
Data matching item types
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -786,9 +848,15 @@ Item: ``ETH``
Matches an Ethernet header.
+The ``type`` field either stands for "EtherType" or "TPID" when followed by
+so-called layer 2.5 pattern items such as ``RTE_FLOW_ITEM_TYPE_VLAN``. In
+the latter case, ``type`` refers to that of the outer header, with the inner
+EtherType/TPID provided by the subsequent pattern item. This is the same
+order as on the wire.
+
- ``dst``: destination MAC.
- ``src``: source MAC.
-- ``type``: EtherType.
+- ``type``: EtherType or TPID.
- Default ``mask`` matches destination and source addresses only.
Item: ``VLAN``
@@ -796,9 +864,13 @@ Item: ``VLAN``
Matches an 802.1Q/ad VLAN tag.
-- ``tpid``: tag protocol identifier.
+The corresponding standard outer EtherType (TPID) values are
+``ETHER_TYPE_VLAN`` or ``ETHER_TYPE_QINQ``. It can be overridden by the
+preceding pattern item.
+
- ``tci``: tag control information.
-- Default ``mask`` matches TCI only.
+- ``inner_type``: inner EtherType or TPID.
+- Default ``mask`` matches the VID part of TCI only (lower 12 bits).
Item: ``IPV4``
^^^^^^^^^^^^^^
@@ -815,7 +887,8 @@ Item: ``IPV6``
Matches an IPv6 header.
-Note: IPv6 options are handled by dedicated pattern items.
+Note: IPv6 options are handled by dedicated pattern items, see `Item:
+IPV6_EXT`_.
- ``hdr``: IPv6 header definition (``rte_ip.h``).
- Default ``mask`` matches source and destination addresses only.
@@ -868,12 +941,15 @@ Item: ``E_TAG``
Matches an IEEE 802.1BR E-Tag header.
-- ``tpid``: tag protocol identifier (0x893F)
+The corresponding standard outer EtherType (TPID) value is
+``ETHER_TYPE_ETAG``. It can be overridden by the preceding pattern item.
+
- ``epcp_edei_in_ecid_b``: E-Tag control information (E-TCI), E-PCP (3b),
E-DEI (1b), ingress E-CID base (12b).
- ``rsvd_grp_ecid_b``: reserved (2b), GRP (2b), E-CID base (12b).
- ``in_ecid_e``: ingress E-CID ext.
- ``ecid_e``: E-CID ext.
+- ``inner_type``: inner EtherType or TPID.
- Default ``mask`` simultaneously matches GRP and E-CID base.
Item: ``NVGRE``
@@ -992,35 +1068,155 @@ Matches a GENEVE header.
- ``rsvd1``: reserved, normally 0x00.
- Default ``mask`` matches VNI only.
+Item: ``VXLAN-GPE``
+^^^^^^^^^^^^^^^^^^^
+
+Matches a VXLAN-GPE header (draft-ietf-nvo3-vxlan-gpe-05).
+
+- ``flags``: normally 0x0C (I and P flags).
+- ``rsvd0``: reserved, normally 0x0000.
+- ``protocol``: protocol type.
+- ``vni``: VXLAN network identifier.
+- ``rsvd1``: reserved, normally 0x00.
+- Default ``mask`` matches VNI only.
+
+Item: ``ARP_ETH_IPV4``
+^^^^^^^^^^^^^^^^^^^^^^
+
+Matches an ARP header for Ethernet/IPv4.
+
+- ``hdr``: hardware type, normally 1.
+- ``pro``: protocol type, normally 0x0800.
+- ``hln``: hardware address length, normally 6.
+- ``pln``: protocol address length, normally 4.
+- ``op``: opcode (1 for request, 2 for reply).
+- ``sha``: sender hardware address.
+- ``spa``: sender IPv4 address.
+- ``tha``: target hardware address.
+- ``tpa``: target IPv4 address.
+- Default ``mask`` matches SHA, SPA, THA and TPA.
+
+Item: ``IPV6_EXT``
+^^^^^^^^^^^^^^^^^^
+
+Matches the presence of any IPv6 extension header.
+
+- ``next_hdr``: next header.
+- Default ``mask`` matches ``next_hdr``.
+
+Normally preceded by any of:
+
+- `Item: IPV6`_
+- `Item: IPV6_EXT`_
+
+Item: ``ICMP6``
+^^^^^^^^^^^^^^^
+
+Matches any ICMPv6 header.
+
+- ``type``: ICMPv6 type.
+- ``code``: ICMPv6 code.
+- ``checksum``: ICMPv6 checksum.
+- Default ``mask`` matches ``type`` and ``code``.
+
+Item: ``ICMP6_ND_NS``
+^^^^^^^^^^^^^^^^^^^^^
+
+Matches an ICMPv6 neighbor discovery solicitation.
+
+- ``type``: ICMPv6 type, normally 135.
+- ``code``: ICMPv6 code, normally 0.
+- ``checksum``: ICMPv6 checksum.
+- ``reserved``: reserved, normally 0.
+- ``target_addr``: target address.
+- Default ``mask`` matches target address only.
+
+Item: ``ICMP6_ND_NA``
+^^^^^^^^^^^^^^^^^^^^^
+
+Matches an ICMPv6 neighbor discovery advertisement.
+
+- ``type``: ICMPv6 type, normally 136.
+- ``code``: ICMPv6 code, normally 0.
+- ``checksum``: ICMPv6 checksum.
+- ``rso_reserved``: route flag (1b), solicited flag (1b), override flag
+ (1b), reserved (29b).
+- ``target_addr``: target address.
+- Default ``mask`` matches target address only.
+
+Item: ``ICMP6_ND_OPT``
+^^^^^^^^^^^^^^^^^^^^^^
+
+Matches the presence of any ICMPv6 neighbor discovery option.
+
+- ``type``: ND option type.
+- ``length``: ND option length.
+- Default ``mask`` matches type only.
+
+Normally preceded by any of:
+
+- `Item: ICMP6_ND_NA`_
+- `Item: ICMP6_ND_NS`_
+- `Item: ICMP6_ND_OPT`_
+
+Item: ``ICMP6_ND_OPT_SLA_ETH``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches an ICMPv6 neighbor discovery source Ethernet link-layer address
+option.
+
+- ``type``: ND option type, normally 1.
+- ``length``: ND option length, normally 1.
+- ``sla``: source Ethernet LLA.
+- Default ``mask`` matches source link-layer address only.
+
+Normally preceded by any of:
+
+- `Item: ICMP6_ND_NA`_
+- `Item: ICMP6_ND_OPT`_
+
+Item: ``ICMP6_ND_OPT_TLA_ETH``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Matches an ICMPv6 neighbor discovery target Ethernet link-layer address
+option.
+
+- ``type``: ND option type, normally 2.
+- ``length``: ND option length, normally 1.
+- ``tla``: target Ethernet LLA.
+- Default ``mask`` matches target link-layer address only.
+
+Normally preceded by any of:
+
+- `Item: ICMP6_ND_NS`_
+- `Item: ICMP6_ND_OPT`_
+
Actions
~~~~~~~
Each possible action is represented by a type. Some have associated
configuration structures. Several actions combined in a list can be assigned
-to a flow rule. That list is not ordered.
+to a flow rule and are performed in order.
They fall in three categories:
-- Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
- processing matched packets by subsequent flow rules, unless overridden
- with PASSTHRU.
+- Actions that modify the fate of matching traffic, for instance by dropping
+ or assigning it a specific destination.
-- Non-terminating actions (PASSTHRU, DUP) that leave matched packets up for
- additional processing by subsequent flow rules.
+- Actions that modify matching traffic contents or its properties. This
+ includes adding/removing encapsulation, encryption, compression and marks.
-- Other non-terminating meta actions that do not affect the fate of packets
- (END, VOID, MARK, FLAG, COUNT, SECURITY).
+- Actions related to the flow rule itself, such as updating counters or
+ making it non-terminating.
-When several actions are combined in a flow rule, they should all have
-different types (e.g. dropping a packet twice is not possible).
+Flow rules being terminating by default, not specifying any action of the
+fate kind results in undefined behavior. This applies to both ingress and
+egress.
-Only the last action of a given type is taken into account. PMDs still
-perform error checking on the entire list.
+PASSTHRU, when supported, makes a flow rule non-terminating.
Like matching patterns, action lists are terminated by END items.
-*Note that PASSTHRU is the only action able to override a terminating rule.*
-
Example of action that redirects packets to queue index 10:
.. _table_rte_flow_action_example:
@@ -1033,12 +1229,11 @@ Example of action that redirects packets to queue index 10:
| ``index`` | 10 |
+-----------+-------+
-Action lists examples, their order is not significant, applications must
-consider all actions to be performed simultaneously:
+Actions are performed in list order:
-.. _table_rte_flow_count_and_drop:
+.. _table_rte_flow_count_then_drop:
-.. table:: Count and drop
+.. table:: Count then drop
+-------+--------+
| Index | Action |
@@ -1054,19 +1249,21 @@ consider all actions to be performed simultaneously:
.. _table_rte_flow_mark_count_redirect:
-.. table:: Mark, count and redirect
-
- +-------+--------+-----------+-------+
- | Index | Action | Field | Value |
- +=======+========+===========+=======+
- | 0 | MARK | ``mark`` | 0x2a |
- +-------+--------+-----------+-------+
- | 1 | COUNT |
- +-------+--------+-----------+-------+
- | 2 | QUEUE | ``queue`` | 10 |
- +-------+--------+-----------+-------+
- | 3 | END |
- +-------+----------------------------+
+.. table:: Mark, count then redirect
+
+ +-------+--------+------------+-------+
+ | Index | Action | Field | Value |
+ +=======+========+============+=======+
+ | 0 | MARK | ``mark`` | 0x2a |
+ +-------+--------+------------+-------+
+ | 1 | COUNT | ``shared`` | 0 |
+ | | +------------+-------+
+ | | | ``id`` | 0 |
+ +-------+--------+------------+-------+
+ | 2 | QUEUE | ``queue`` | 10 |
+ +-------+--------+------------+-------+
+ | 3 | END |
+ +-------+-----------------------------+
|
@@ -1084,12 +1281,15 @@ consider all actions to be performed simultaneously:
| 2 | END |
+-------+----------------------------+
-In the above example, considering both actions are performed simultaneously,
-the end result is that only QUEUE has any effect.
+In the above example, while DROP and QUEUE must be performed in order, both
+have to happen before reaching END. Only QUEUE has a visible effect.
+
+Note that such a list may be thought as ambiguous and rejected on that
+basis.
-.. _table_rte_flow_redirect_queue_3:
+.. _table_rte_flow_redirect_queue_5_3:
-.. table:: Redirect to queue 3
+.. table:: Redirect to queues 5 and 3
+-------+--------+-----------+-------+
| Index | Action | Field | Value |
@@ -1103,9 +1303,9 @@ the end result is that only QUEUE has any effect.
| 3 | END |
+-------+----------------------------+
-As previously described, only the last action of a given type found in the
-list is taken into account. The above example also shows that VOID is
-ignored.
+As previously described, all actions must be taken into account. This
+effectively duplicates traffic to both queues. The above example also shows
+that VOID is ignored.
Action types
~~~~~~~~~~~~
@@ -1155,9 +1355,8 @@ PMDs.
Action: ``PASSTHRU``
^^^^^^^^^^^^^^^^^^^^
-Leaves packets up for additional processing by subsequent flow rules. This
-is the default when a rule does not contain a terminating action, but can be
-specified to force a rule to become non-terminating.
+Leaves traffic up for additional processing by subsequent flow rules; makes
+a flow rule non-terminating.
- No configurable properties.
@@ -1188,6 +1387,38 @@ flow rules:
| 2 | END |
+-------+----------------------------+
+Action: ``JUMP``
+^^^^^^^^^^^^^^^^
+
+Redirects packets to a group on the current device.
+
+In a hierarchy of groups, which can be used to represent physical or logical
+flow group/tables on the device, this action redirects the matched flow to
+the specified group on that device.
+
+If a matched flow is redirected to a table which doesn't contain a matching
+rule for that flow then the behavior is undefined and the resulting behavior
+is up to the specific device. Best practice when using groups would be define
+a default flow rule for each group which a defines the default actions in that
+group so a consistent behavior is defined.
+
+Defining an action for matched flow in a group to jump to a group which is
+higher in the group hierarchy may not be supported by physical devices,
+depending on how groups are mapped to the physical devices. In the
+definitions of jump actions, applications should be aware that it may be
+possible to define flow rules which trigger an undefined behavior causing
+flows to loop between groups.
+
+.. _table_rte_flow_action_jump:
+
+.. table:: JUMP
+
+ +-----------+------------------------------+
+ | Field | Value |
+ +===========+==============================+
+ | ``group`` | Group to redirect packets to |
+ +-----------+------------------------------+
+
Action: ``MARK``
^^^^^^^^^^^^^^^^
@@ -1231,8 +1462,6 @@ Action: ``QUEUE``
Assigns packets to a given queue index.
-- Terminating by default.
-
.. _table_rte_flow_action_queue:
.. table:: QUEUE
@@ -1249,8 +1478,6 @@ Action: ``DROP``
Drop packets.
- No configurable properties.
-- Terminating by default.
-- PASSTHRU overrides this action if both are specified.
.. _table_rte_flow_action_drop:
@@ -1265,23 +1492,36 @@ Drop packets.
Action: ``COUNT``
^^^^^^^^^^^^^^^^^
-Enables counters for this rule.
+Adds a counter action to a matched flow.
+
+If more than one count action is specified in a single flow rule, then each
+action must specify a unique id.
-These counters can be retrieved and reset through ``rte_flow_query()``, see
+Counters can be retrieved and reset through ``rte_flow_query()``, see
``struct rte_flow_query_count``.
-- Counters can be retrieved with ``rte_flow_query()``.
-- No configurable properties.
+The shared flag indicates whether the counter is unique to the flow rule the
+action is specified with, or whether it is a shared counter.
+
+For a count action with the shared flag set, then then a global device
+namespace is assumed for the counter id, so that any matched flow rules using
+a count action with the same counter id on the same port will contribute to
+that counter.
+
+For ports within the same switch domain then the counter id namespace extends
+to all ports within that switch domain.
.. _table_rte_flow_action_count:
.. table:: COUNT
- +---------------+
- | Field |
- +===============+
- | no properties |
- +---------------+
+ +------------+---------------------+
+ | Field | Value |
+ +============+=====================+
+ | ``shared`` | shared counter flag |
+ +------------+---------------------+
+ | ``id`` | counter id |
+ +------------+---------------------+
Query structure to retrieve and reset flow rule counters:
@@ -1303,59 +1543,75 @@ Query structure to retrieve and reset flow rule counters:
| ``bytes`` | out | number of bytes through this rule |
+---------------+-----+-----------------------------------+
-Action: ``DUP``
+Action: ``RSS``
^^^^^^^^^^^^^^^
-Duplicates packets to a given queue index.
+Similar to QUEUE, except RSS is additionally performed on packets to spread
+them among several queues according to the provided parameters.
-This is normally combined with QUEUE, however when used alone, it is
-actually similar to QUEUE + PASSTHRU.
+Unlike global RSS settings used by other DPDK APIs, unsetting the ``types``
+field does not disable RSS in a flow rule. Doing so instead requests safe
+unspecified "best-effort" settings from the underlying PMD, which depending
+on the flow rule, may result in anything ranging from empty (single queue)
+to all-inclusive RSS.
-- Non-terminating by default.
+Note: RSS hash result is stored in the ``hash.rss`` mbuf field which
+overlaps ``hash.fdir.lo``. Since `Action: MARK`_ sets the ``hash.fdir.hi``
+field only, both can be requested simultaneously.
-.. _table_rte_flow_action_dup:
+Also, regarding packet encapsulation ``level``:
-.. table:: DUP
+- ``0`` requests the default behavior. Depending on the packet type, it can
+ mean outermost, innermost, anything in between or even no RSS.
- +-----------+------------------------------------+
- | Field | Value |
- +===========+====================================+
- | ``index`` | queue index to duplicate packet to |
- +-----------+------------------------------------+
+ It basically stands for the innermost encapsulation level RSS can be
+ performed on according to PMD and device capabilities.
-Action: ``RSS``
-^^^^^^^^^^^^^^^
+- ``1`` requests RSS to be performed on the outermost packet encapsulation
+ level.
-Similar to QUEUE, except RSS is additionally performed on packets to spread
-them among several queues according to the provided parameters.
+- ``2`` and subsequent values request RSS to be performed on the specified
+ inner packet encapsulation level, from outermost to innermost (lower to
+ higher values).
-Note: RSS hash result is stored in the ``hash.rss`` mbuf field which
-overlaps ``hash.fdir.lo``. Since `Action: MARK`_ sets the ``hash.fdir.hi``
-field only, both can be requested simultaneously.
+Values other than ``0`` are not necessarily supported.
-- Terminating by default.
+Requesting a specific RSS level on unrecognized traffic results in undefined
+behavior. For predictable results, it is recommended to make the flow rule
+pattern match packet headers up to the requested encapsulation level so that
+only matching traffic goes through.
.. _table_rte_flow_action_rss:
.. table:: RSS
- +--------------+------------------------------+
- | Field | Value |
- +==============+==============================+
- | ``rss_conf`` | RSS parameters |
- +--------------+------------------------------+
- | ``num`` | number of entries in queue[] |
- +--------------+------------------------------+
- | ``queue[]`` | queue indices to use |
- +--------------+------------------------------+
+ +---------------+---------------------------------------------+
+ | Field | Value |
+ +===============+=============================================+
+ | ``func`` | RSS hash function to apply |
+ +---------------+---------------------------------------------+
+ | ``level`` | encapsulation level for ``types`` |
+ +---------------+---------------------------------------------+
+ | ``types`` | specific RSS hash types (see ``ETH_RSS_*``) |
+ +---------------+---------------------------------------------+
+ | ``key_len`` | hash key length in bytes |
+ +---------------+---------------------------------------------+
+ | ``queue_num`` | number of entries in ``queue`` |
+ +---------------+---------------------------------------------+
+ | ``key`` | hash key |
+ +---------------+---------------------------------------------+
+ | ``queue`` | queue indices to use |
+ +---------------+---------------------------------------------+
Action: ``PF``
^^^^^^^^^^^^^^
-Redirects packets to the physical function (PF) of the current device.
+Directs matching traffic to the physical function (PF) of the current
+device.
+
+See `Item: PF`_.
- No configurable properties.
-- Terminating by default.
.. _table_rte_flow_action_pf:
@@ -1370,14 +1626,14 @@ Redirects packets to the physical function (PF) of the current device.
Action: ``VF``
^^^^^^^^^^^^^^
-Redirects packets to a virtual function (VF) of the current device.
+Directs matching traffic to a given virtual function of the current device.
Packets matched by a VF pattern item can be redirected to their original VF
ID instead of the specified one. This parameter may not be available and is
not guaranteed to work properly if the VF part is matched by a prior flow
rule or if packets are not addressed to a VF in the first place.
-- Terminating by default.
+See `Item: VF`_.
.. _table_rte_flow_action_vf:
@@ -1388,9 +1644,47 @@ rule or if packets are not addressed to a VF in the first place.
+==============+================================+
| ``original`` | use original VF ID if possible |
+--------------+--------------------------------+
- | ``vf`` | VF ID to redirect packets to |
+ | ``id`` | VF ID |
+--------------+--------------------------------+
+Action: ``PHY_PORT``
+^^^^^^^^^^^^^^^^^^^^
+
+Directs matching traffic to a given physical port index of the underlying
+device.
+
+See `Item: PHY_PORT`_.
+
+.. _table_rte_flow_action_phy_port:
+
+.. table:: PHY_PORT
+
+ +--------------+-------------------------------------+
+ | Field | Value |
+ +==============+=====================================+
+ | ``original`` | use original port index if possible |
+ +--------------+-------------------------------------+
+ | ``index`` | physical port index |
+ +--------------+-------------------------------------+
+
+Action: ``PORT_ID``
+^^^^^^^^^^^^^^^^^^^
+Directs matching traffic to a given DPDK port ID.
+
+See `Item: PORT_ID`_.
+
+.. _table_rte_flow_action_port_id:
+
+.. table:: PORT_ID
+
+ +--------------+---------------------------------------+
+ | Field | Value |
+ +==============+=======================================+
+ | ``original`` | use original DPDK port ID if possible |
+ +--------------+---------------------------------------+
+ | ``id`` | DPDK port ID |
+ +--------------+---------------------------------------+
+
Action: ``METER``
^^^^^^^^^^^^^^^^^
@@ -1402,8 +1696,6 @@ action parameter. More than one flow can use the same MTR object through
the meter action. The MTR object can be further updated or queried using
the rte_mtr* API.
-- Non-terminating by default.
-
.. _table_rte_flow_action_meter:
.. table:: METER
@@ -1439,8 +1731,6 @@ direction.
Multiple flows can be configured to use the same security session.
-- Non-terminating by default.
-
.. _table_rte_flow_action_security:
.. table:: SECURITY
@@ -1486,6 +1776,306 @@ fields in the pattern items.
| 1 | END |
+-------+----------+
+Action: ``OF_SET_MPLS_TTL``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_SET_MPLS_TTL`` ("MPLS TTL") as defined by the `OpenFlow
+Switch Specification`_.
+
+.. _table_rte_flow_action_of_set_mpls_ttl:
+
+.. table:: OF_SET_MPLS_TTL
+
+ +--------------+----------+
+ | Field | Value |
+ +==============+==========+
+ | ``mpls_ttl`` | MPLS TTL |
+ +--------------+----------+
+
+Action: ``OF_DEC_MPLS_TTL``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_DEC_MPLS_TTL`` ("decrement MPLS TTL") as defined by the
+`OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_dec_mpls_ttl:
+
+.. table:: OF_DEC_MPLS_TTL
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``OF_SET_NW_TTL``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_SET_NW_TTL`` ("IP TTL") as defined by the `OpenFlow
+Switch Specification`_.
+
+.. _table_rte_flow_action_of_set_nw_ttl:
+
+.. table:: OF_SET_NW_TTL
+
+ +------------+--------+
+ | Field | Value |
+ +============+========+
+ | ``nw_ttl`` | IP TTL |
+ +------------+--------+
+
+Action: ``OF_DEC_NW_TTL``
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_DEC_NW_TTL`` ("decrement IP TTL") as defined by the
+`OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_dec_nw_ttl:
+
+.. table:: OF_DEC_NW_TTL
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``OF_COPY_TTL_OUT``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_COPY_TTL_OUT`` ("copy TTL "outwards" -- from
+next-to-outermost to outermost") as defined by the `OpenFlow Switch
+Specification`_.
+
+.. _table_rte_flow_action_of_copy_ttl_out:
+
+.. table:: OF_COPY_TTL_OUT
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``OF_COPY_TTL_IN``
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_COPY_TTL_IN`` ("copy TTL "inwards" -- from outermost to
+next-to-outermost") as defined by the `OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_copy_ttl_in:
+
+.. table:: OF_COPY_TTL_IN
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``OF_POP_VLAN``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_POP_VLAN`` ("pop the outer VLAN tag") as defined
+by the `OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_pop_vlan:
+
+.. table:: OF_POP_VLAN
+
+ +---------------+
+ | Field |
+ +===============+
+ | no properties |
+ +---------------+
+
+Action: ``OF_PUSH_VLAN``
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_PUSH_VLAN`` ("push a new VLAN tag") as defined by the
+`OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_push_vlan:
+
+.. table:: OF_PUSH_VLAN
+
+ +---------------+-----------+
+ | Field | Value |
+ +===============+===========+
+ | ``ethertype`` | EtherType |
+ +---------------+-----------+
+
+Action: ``OF_SET_VLAN_VID``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_SET_VLAN_VID`` ("set the 802.1q VLAN id") as defined by
+the `OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_set_vlan_vid:
+
+.. table:: OF_SET_VLAN_VID
+
+ +--------------+---------+
+ | Field | Value |
+ +==============+=========+
+ | ``vlan_vid`` | VLAN id |
+ +--------------+---------+
+
+Action: ``OF_SET_VLAN_PCP``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_SET_LAN_PCP`` ("set the 802.1q priority") as defined by
+the `OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_set_vlan_pcp:
+
+.. table:: OF_SET_VLAN_PCP
+
+ +--------------+---------------+
+ | Field | Value |
+ +==============+===============+
+ | ``vlan_pcp`` | VLAN priority |
+ +--------------+---------------+
+
+Action: ``OF_POP_MPLS``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_POP_MPLS`` ("pop the outer MPLS tag") as defined by the
+`OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_pop_mpls:
+
+.. table:: OF_POP_MPLS
+
+ +---------------+-----------+
+ | Field | Value |
+ +===============+===========+
+ | ``ethertype`` | EtherType |
+ +---------------+-----------+
+
+Action: ``OF_PUSH_MPLS``
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Implements ``OFPAT_PUSH_MPLS`` ("push a new MPLS tag") as defined by the
+`OpenFlow Switch Specification`_.
+
+.. _table_rte_flow_action_of_push_mpls:
+
+.. table:: OF_PUSH_MPLS
+
+ +---------------+-----------+
+ | Field | Value |
+ +===============+===========+
+ | ``ethertype`` | EtherType |
+ +---------------+-----------+
+
+Action: ``VXLAN_ENCAP``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Performs a VXLAN encapsulation action by encapsulating the matched flow in the
+VXLAN tunnel as defined in the``rte_flow_action_vxlan_encap`` flow items
+definition.
+
+This action modifies the payload of matched flows. The flow definition specified
+in the ``rte_flow_action_tunnel_encap`` action structure must define a valid
+VLXAN network overlay which conforms with RFC 7348 (Virtual eXtensible Local
+Area Network (VXLAN): A Framework for Overlaying Virtualized Layer 2 Networks
+over Layer 3 Networks). The pattern must be terminated with the
+RTE_FLOW_ITEM_TYPE_END item type.
+
+.. _table_rte_flow_action_vxlan_encap:
+
+.. table:: VXLAN_ENCAP
+
+ +----------------+-------------------------------------+
+ | Field | Value |
+ +================+=====================================+
+ | ``definition`` | Tunnel end-point overlay definition |
+ +----------------+-------------------------------------+
+
+.. _table_rte_flow_action_vxlan_encap_example:
+
+.. table:: IPv4 VxLAN flow pattern example.
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | Ethernet |
+ +-------+----------+
+ | 1 | IPv4 |
+ +-------+----------+
+ | 2 | UDP |
+ +-------+----------+
+ | 3 | VXLAN |
+ +-------+----------+
+ | 4 | END |
+ +-------+----------+
+
+Action: ``VXLAN_DECAP``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Performs a decapsulation action by stripping all headers of the VXLAN tunnel
+network overlay from the matched flow.
+
+The flow items pattern defined for the flow rule with which a ``VXLAN_DECAP``
+action is specified, must define a valid VXLAN tunnel as per RFC7348. If the
+flow pattern does not specify a valid VXLAN tunnel then a
+RTE_FLOW_ERROR_TYPE_ACTION error should be returned.
+
+This action modifies the payload of matched flows.
+
+Action: ``NVGRE_ENCAP``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Performs a NVGRE encapsulation action by encapsulating the matched flow in the
+NVGRE tunnel as defined in the``rte_flow_action_tunnel_encap`` flow item
+definition.
+
+This action modifies the payload of matched flows. The flow definition specified
+in the ``rte_flow_action_tunnel_encap`` action structure must defined a valid
+NVGRE network overlay which conforms with RFC 7637 (NVGRE: Network
+Virtualization Using Generic Routing Encapsulation). The pattern must be
+terminated with the RTE_FLOW_ITEM_TYPE_END item type.
+
+.. _table_rte_flow_action_nvgre_encap:
+
+.. table:: NVGRE_ENCAP
+
+ +----------------+-------------------------------------+
+ | Field | Value |
+ +================+=====================================+
+ | ``definition`` | NVGRE end-point overlay definition |
+ +----------------+-------------------------------------+
+
+.. _table_rte_flow_action_nvgre_encap_example:
+
+.. table:: IPv4 NVGRE flow pattern example.
+
+ +-------+----------+
+ | Index | Item |
+ +=======+==========+
+ | 0 | Ethernet |
+ +-------+----------+
+ | 1 | IPv4 |
+ +-------+----------+
+ | 2 | NVGRE |
+ +-------+----------+
+ | 3 | END |
+ +-------+----------+
+
+Action: ``NVGRE_DECAP``
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Performs a decapsulation action by stripping all headers of the NVGRE tunnel
+network overlay from the matched flow.
+
+The flow items pattern defined for the flow rule with which a ``NVGRE_DECAP``
+action is specified, must define a valid NVGRE tunnel as per RFC7637. If the
+flow pattern does not specify a valid NVGRE tunnel then a
+RTE_FLOW_ERROR_TYPE_ACTION error should be returned.
+
+This action modifies the payload of matched flows.
+
Negative types
~~~~~~~~~~~~~~
@@ -1681,7 +2271,7 @@ definition.
int
rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
- enum rte_flow_action_type action,
+ const struct rte_flow_action *action,
void *data,
struct rte_flow_error *error);
@@ -1689,7 +2279,7 @@ Arguments:
- ``port_id``: port identifier of Ethernet device.
- ``flow``: flow rule handle to query.
-- ``action``: action type to query.
+- ``action``: action to query, this must match prototype from flow rule.
- ``data``: pointer to storage for the associated query data type.
- ``error``: perform verbose error reporting if not NULL. PMDs initialize
this structure in case of error only.
@@ -2014,9 +2604,6 @@ Unsupported actions
and tagging (`Action: MARK`_ or `Action: FLAG`_) may be implemented in
software as long as the target queue is used by a single rule.
-- A rule specifying both `Action: DUP`_ + `Action: QUEUE`_ may be translated
- to two hidden rules combining `Action: QUEUE`_ and `Action: PASSTHRU`_.
-
- When a single target queue is provided, `Action: RSS`_ can also be
implemented through `Action: QUEUE`_.
@@ -2070,297 +2657,4 @@ Future evolutions
- Optional software fallback when PMDs are unable to handle requested flow
rules so applications do not have to implement their own.
-API migration
--------------
-
-Exhaustive list of deprecated filter types (normally prefixed with
-*RTE_ETH_FILTER_*) found in ``rte_eth_ctrl.h`` and methods to convert them
-to *rte_flow* rules.
-
-``MACVLAN`` to ``ETH`` → ``VF``, ``PF``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*MACVLAN* can be translated to a basic `Item: ETH`_ flow rule with a
-terminating `Action: VF`_ or `Action: PF`_.
-
-.. _table_rte_flow_migration_macvlan:
-
-.. table:: MACVLAN conversion
-
- +--------------------------+---------+
- | Pattern | Actions |
- +===+=====+==========+=====+=========+
- | 0 | ETH | ``spec`` | any | VF, |
- | | +----------+-----+ PF |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-----+----------+-----+---------+
- | 1 | END | END |
- +---+----------------------+---------+
-
-``ETHERTYPE`` to ``ETH`` → ``QUEUE``, ``DROP``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*ETHERTYPE* is basically an `Item: ETH`_ flow rule with a terminating
-`Action: QUEUE`_ or `Action: DROP`_.
-
-.. _table_rte_flow_migration_ethertype:
-
-.. table:: ETHERTYPE conversion
-
- +--------------------------+---------+
- | Pattern | Actions |
- +===+=====+==========+=====+=========+
- | 0 | ETH | ``spec`` | any | QUEUE, |
- | | +----------+-----+ DROP |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-----+----------+-----+---------+
- | 1 | END | END |
- +---+----------------------+---------+
-
-``FLEXIBLE`` to ``RAW`` → ``QUEUE``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*FLEXIBLE* can be translated to one `Item: RAW`_ pattern with a terminating
-`Action: QUEUE`_ and a defined priority level.
-
-.. _table_rte_flow_migration_flexible:
-
-.. table:: FLEXIBLE conversion
-
- +--------------------------+---------+
- | Pattern | Actions |
- +===+=====+==========+=====+=========+
- | 0 | RAW | ``spec`` | any | QUEUE |
- | | +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-----+----------+-----+---------+
- | 1 | END | END |
- +---+----------------------+---------+
-
-``SYN`` to ``TCP`` → ``QUEUE``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*SYN* is a `Item: TCP`_ rule with only the ``syn`` bit enabled and masked,
-and a terminating `Action: QUEUE`_.
-
-Priority level can be set to simulate the high priority bit.
-
-.. _table_rte_flow_migration_syn:
-
-.. table:: SYN conversion
-
- +-----------------------------------+---------+
- | Pattern | Actions |
- +===+======+==========+=============+=========+
- | 0 | ETH | ``spec`` | unset | QUEUE |
- | | +----------+-------------+ |
- | | | ``last`` | unset | |
- | | +----------+-------------+ |
- | | | ``mask`` | unset | |
- +---+------+----------+-------------+---------+
- | 1 | IPV4 | ``spec`` | unset | END |
- | | +----------+-------------+ |
- | | | ``mask`` | unset | |
- | | +----------+-------------+ |
- | | | ``mask`` | unset | |
- +---+------+----------+---------+---+ |
- | 2 | TCP | ``spec`` | ``syn`` | 1 | |
- | | +----------+---------+---+ |
- | | | ``mask`` | ``syn`` | 1 | |
- +---+------+----------+---------+---+ |
- | 3 | END | |
- +---+-------------------------------+---------+
-
-``NTUPLE`` to ``IPV4``, ``TCP``, ``UDP`` → ``QUEUE``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*NTUPLE* is similar to specifying an empty L2, `Item: IPV4`_ as L3 with
-`Item: TCP`_ or `Item: UDP`_ as L4 and a terminating `Action: QUEUE`_.
-
-A priority level can be specified as well.
-
-.. _table_rte_flow_migration_ntuple:
-
-.. table:: NTUPLE conversion
-
- +-----------------------------+---------+
- | Pattern | Actions |
- +===+======+==========+=======+=========+
- | 0 | ETH | ``spec`` | unset | QUEUE |
- | | +----------+-------+ |
- | | | ``last`` | unset | |
- | | +----------+-------+ |
- | | | ``mask`` | unset | |
- +---+------+----------+-------+---------+
- | 1 | IPV4 | ``spec`` | any | END |
- | | +----------+-------+ |
- | | | ``last`` | unset | |
- | | +----------+-------+ |
- | | | ``mask`` | any | |
- +---+------+----------+-------+ |
- | 2 | TCP, | ``spec`` | any | |
- | | UDP +----------+-------+ |
- | | | ``last`` | unset | |
- | | +----------+-------+ |
- | | | ``mask`` | any | |
- +---+------+----------+-------+ |
- | 3 | END | |
- +---+-------------------------+---------+
-
-``TUNNEL`` to ``ETH``, ``IPV4``, ``IPV6``, ``VXLAN`` (or other) → ``QUEUE``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*TUNNEL* matches common IPv4 and IPv6 L3/L4-based tunnel types.
-
-In the following table, `Item: ANY`_ is used to cover the optional L4.
-
-.. _table_rte_flow_migration_tunnel:
-
-.. table:: TUNNEL conversion
-
- +-------------------------------------------------------+---------+
- | Pattern | Actions |
- +===+==========================+==========+=============+=========+
- | 0 | ETH | ``spec`` | any | QUEUE |
- | | +----------+-------------+ |
- | | | ``last`` | unset | |
- | | +----------+-------------+ |
- | | | ``mask`` | any | |
- +---+--------------------------+----------+-------------+---------+
- | 1 | IPV4, IPV6 | ``spec`` | any | END |
- | | +----------+-------------+ |
- | | | ``last`` | unset | |
- | | +----------+-------------+ |
- | | | ``mask`` | any | |
- +---+--------------------------+----------+-------------+ |
- | 2 | ANY | ``spec`` | any | |
- | | +----------+-------------+ |
- | | | ``last`` | unset | |
- | | +----------+---------+---+ |
- | | | ``mask`` | ``num`` | 0 | |
- +---+--------------------------+----------+---------+---+ |
- | 3 | VXLAN, GENEVE, TEREDO, | ``spec`` | any | |
- | | NVGRE, GRE, ... +----------+-------------+ |
- | | | ``last`` | unset | |
- | | +----------+-------------+ |
- | | | ``mask`` | any | |
- +---+--------------------------+----------+-------------+ |
- | 4 | END | |
- +---+---------------------------------------------------+---------+
-
-``FDIR`` to most item types → ``QUEUE``, ``DROP``, ``PASSTHRU``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*FDIR* is more complex than any other type, there are several methods to
-emulate its functionality. It is summarized for the most part in the table
-below.
-
-A few features are intentionally not supported:
-
-- The ability to configure the matching input set and masks for the entire
- device, PMDs should take care of it automatically according to the
- requested flow rules.
-
- For example if a device supports only one bit-mask per protocol type,
- source/address IPv4 bit-masks can be made immutable by the first created
- rule. Subsequent IPv4 or TCPv4 rules can only be created if they are
- compatible.
-
- Note that only protocol bit-masks affected by existing flow rules are
- immutable, others can be changed later. They become mutable again after
- the related flow rules are destroyed.
-
-- Returning four or eight bytes of matched data when using flex bytes
- filtering. Although a specific action could implement it, it conflicts
- with the much more useful 32 bits tagging on devices that support it.
-
-- Side effects on RSS processing of the entire device. Flow rules that
- conflict with the current device configuration should not be
- allowed. Similarly, device configuration should not be allowed when it
- affects existing flow rules.
-
-- Device modes of operation. "none" is unsupported since filtering cannot be
- disabled as long as a flow rule is present.
-
-- "MAC VLAN" or "tunnel" perfect matching modes should be automatically set
- according to the created flow rules.
-
-- Signature mode of operation is not defined but could be handled through
- "FUZZY" item.
-
-.. _table_rte_flow_migration_fdir:
-
-.. table:: FDIR conversion
-
- +----------------------------------------+-----------------------+
- | Pattern | Actions |
- +===+===================+==========+=====+=======================+
- | 0 | ETH, RAW | ``spec`` | any | QUEUE, DROP, PASSTHRU |
- | | +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-------------------+----------+-----+-----------------------+
- | 1 | IPV4, IPv6 | ``spec`` | any | MARK |
- | | +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-------------------+----------+-----+-----------------------+
- | 2 | TCP, UDP, SCTP | ``spec`` | any | END |
- | | +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-------------------+----------+-----+ |
- | 3 | VF, PF, FUZZY | ``spec`` | any | |
- | | (optional) +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | any | |
- +---+-------------------+----------+-----+ |
- | 4 | END | |
- +---+------------------------------------+-----------------------+
-
-``HASH``
-~~~~~~~~
-
-There is no counterpart to this filter type because it translates to a
-global device setting instead of a pattern item. Device settings are
-automatically set according to the created flow rules.
-
-``L2_TUNNEL`` to ``VOID`` → ``VXLAN`` (or others)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-All packets are matched. This type alters incoming packets to encapsulate
-them in a chosen tunnel type, optionally redirect them to a VF as well.
-
-The destination pool for tag based forwarding can be emulated with other
-flow rules using `Action: DUP`_.
-
-.. _table_rte_flow_migration_l2tunnel:
-
-.. table:: L2_TUNNEL conversion
-
- +---------------------------+--------------------+
- | Pattern | Actions |
- +===+======+==========+=====+====================+
- | 0 | VOID | ``spec`` | N/A | VXLAN, GENEVE, ... |
- | | | | | |
- | | | | | |
- | | +----------+-----+ |
- | | | ``last`` | N/A | |
- | | +----------+-----+ |
- | | | ``mask`` | N/A | |
- | | | | | |
- +---+------+----------+-----+--------------------+
- | 1 | END | VF (optional) |
- +---+ +--------------------+
- | 2 | | END |
- +---+-----------------------+--------------------+
+.. _OpenFlow Switch Specification: https://www.opennetworking.org/software-defined-standards/specifications/
diff --git a/doc/guides/prog_guide/source_org.rst b/doc/guides/prog_guide/source_org.rst
index a8f5832b..cee4ce6b 100644
--- a/doc/guides/prog_guide/source_org.rst
+++ b/doc/guides/prog_guide/source_org.rst
@@ -40,13 +40,13 @@ The lib directory contains::
+-- librte_cmdline # Command line interface helper
+-- librte_distributor # Packet distributor
+-- librte_eal # Environment abstraction layer
- +-- librte_ether # Generic interface to poll mode driver
+ +-- librte_ethdev # Generic interface to poll mode driver
+-- librte_hash # Hash library
+-- librte_ip_frag # IP fragmentation library
+-- librte_kni # Kernel NIC interface
+-- librte_kvargs # Argument parsing library
+-- librte_lpm # Longest prefix match library
- +-- librte_mbuf # Packet and control mbuf manipulation
+ +-- librte_mbuf # Packet buffer manipulation
+-- librte_mempool # Memory pool manager (fixed sized objects)
+-- librte_meter # QoS metering library
+-- librte_net # Various IP-related headers
diff --git a/doc/guides/prog_guide/switch_representation.rst b/doc/guides/prog_guide/switch_representation.rst
new file mode 100644
index 00000000..f5ee516f
--- /dev/null
+++ b/doc/guides/prog_guide/switch_representation.rst
@@ -0,0 +1,837 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 6WIND S.A.
+
+.. _switch_representation:
+
+Switch Representation within DPDK Applications
+==============================================
+
+.. contents:: :local:
+
+Introduction
+------------
+
+Network adapters with multiple physical ports and/or SR-IOV capabilities
+usually support the offload of traffic steering rules between their virtual
+functions (VFs), physical functions (PFs) and ports.
+
+Like for standard Ethernet switches, this involves a combination of
+automatic MAC learning and manual configuration. For most purposes it is
+managed by the host system and fully transparent to users and applications.
+
+On the other hand, applications typically found on hypervisors that process
+layer 2 (L2) traffic (such as OVS) need to steer traffic themselves
+according on their own criteria.
+
+Without a standard software interface to manage traffic steering rules
+between VFs, PFs and the various physical ports of a given device,
+applications cannot take advantage of these offloads; software processing is
+mandatory even for traffic which ends up re-injected into the device it
+originates from.
+
+This document describes how such steering rules can be configured through
+the DPDK flow API (**rte_flow**), with emphasis on the SR-IOV use case
+(PF/VF steering) using a single physical port for clarity, however the same
+logic applies to any number of ports without necessarily involving SR-IOV.
+
+Port Representors
+-----------------
+
+In many cases, traffic steering rules cannot be determined in advance;
+applications usually have to process a bit of traffic in software before
+thinking about offloading specific flows to hardware.
+
+Applications therefore need the ability to receive and inject traffic to
+various device endpoints (other VFs, PFs or physical ports) before
+connecting them together. Device drivers must provide means to hook the
+"other end" of these endpoints and to refer them when configuring flow
+rules.
+
+This role is left to so-called "port representors" (also known as "VF
+representors" in the specific context of VFs), which are to DPDK what the
+Ethernet switch device driver model (**switchdev**) [1]_ is to Linux, and
+which can be thought as a software "patch panel" front-end for applications.
+
+- DPDK port representors are implemented as additional virtual Ethernet
+ device (**ethdev**) instances, spawned on an as needed basis through
+ configuration parameters passed to the driver of the underlying
+ device using devargs.
+
+::
+
+ -w pci:dbdf,representor=0
+ -w pci:dbdf,representor=[0-3]
+ -w pci:dbdf,representor=[0,5-11]
+
+- As virtual devices, they may be more limited than their physical
+ counterparts, for instance by exposing only a subset of device
+ configuration callbacks and/or by not necessarily having Rx/Tx capability.
+
+- Among other things, they can be used to assign MAC addresses to the
+ resource they represent.
+
+- Applications can tell port representors apart from other physical of virtual
+ port by checking the dev_flags field within their device information
+ structure for the RTE_ETH_DEV_REPRESENTOR bit-field.
+
+.. code-block:: c
+
+ struct rte_eth_dev_info {
+ ...
+ uint32_t dev_flags; /**< Device flags */
+ ...
+ };
+
+- The device or group relationship of ports can be discovered using the
+ switch ``domain_id`` field within the devices switch information structure. By
+ default the switch ``domain_id`` of a port will be
+ ``RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID`` to indicate that the port doesn't
+ support the concept of a switch domain, but ports which do support the concept
+ will be allocated a unique switch ``domain_id``, ports within the same switch
+ domain will share the same ``domain_id``. The switch ``port_id`` is used to
+ specify the port_id in terms of the switch, so in the case of SR-IOV devices
+ the switch ``port_id`` would represent the virtual function identifier of the
+ port.
+
+.. code-block:: c
+
+ /**
+ * Ethernet device associated switch information
+ */
+ struct rte_eth_switch_info {
+ const char *name; /**< switch name */
+ uint16_t domain_id; /**< switch domain id */
+ uint16_t port_id; /**< switch port id */
+ };
+
+
+.. [1] `Ethernet switch device driver model (switchdev)
+ <https://www.kernel.org/doc/Documentation/networking/switchdev.txt>`_
+
+Basic SR-IOV
+------------
+
+"Basic" in the sense that it is not managed by applications, which
+nonetheless expect traffic to flow between the various endpoints and the
+outside as if everything was linked by an Ethernet hub.
+
+The following diagram pictures a setup involving a device with one PF, two
+VFs and one shared physical port
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+----------' `----------+--' `--+----------'
+ | | |
+ .-----+-----. | |
+ | port_id 3 | | |
+ `-----+-----' | |
+ | | |
+ .-+--. .---+--. .--+---.
+ | PF | | VF 1 | | VF 2 |
+ `-+--' `---+--' `--+---'
+ | | |
+ `---------. .-----------------------' |
+ | | .-------------------------'
+ | | |
+ .--+-----+-----+--.
+ | interconnection |
+ `--------+--------'
+ |
+ .----+-----.
+ | physical |
+ | port 0 |
+ `----------'
+
+- A DPDK application running on the hypervisor owns the PF device, which is
+ arbitrarily assigned port index 3.
+
+- Both VFs are assigned to VMs and used by unknown applications; they may be
+ DPDK-based or anything else.
+
+- Interconnection is not necessarily done through a true Ethernet switch and
+ may not even exist as a separate entity. The role of this block is to show
+ that something brings PF, VFs and physical ports together and enables
+ communication between them, with a number of built-in restrictions.
+
+Subsequent sections in this document describe means for DPDK applications
+running on the hypervisor to freely assign specific flows between PF, VFs
+and physical ports based on traffic properties, by managing this
+interconnection.
+
+Controlled SR-IOV
+-----------------
+
+Initialization
+~~~~~~~~~~~~~~
+
+When a DPDK application gets assigned a PF device and is deliberately not
+started in `basic SR-IOV`_ mode, any traffic coming from physical ports is
+received by PF according to default rules, while VFs remain isolated.
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+----------' `----------+--' `--+----------'
+ | | |
+ .-----+-----. | |
+ | port_id 3 | | |
+ `-----+-----' | |
+ | | |
+ .-+--. .---+--. .--+---.
+ | PF | | VF 1 | | VF 2 |
+ `-+--' `------' `------'
+ |
+ `-----.
+ |
+ .--+----------------------.
+ | managed interconnection |
+ `------------+------------'
+ |
+ .----+-----.
+ | physical |
+ | port 0 |
+ `----------'
+
+In this mode, interconnection must be configured by the application to
+enable VF communication, for instance by explicitly directing traffic with a
+given destination MAC address to VF 1 and allowing that with the same source
+MAC address to come out of it.
+
+For this to work, hypervisor applications need a way to refer to either VF 1
+or VF 2 in addition to the PF. This is addressed by `VF representors`_.
+
+VF Representors
+~~~~~~~~~~~~~~~
+
+VF representors are virtual but standard DPDK network devices (albeit with
+limited capabilities) created by PMDs when managing a PF device.
+
+Since they represent VF instances used by other applications, configuring
+them (e.g. assigning a MAC address or setting up promiscuous mode) affects
+interconnection accordingly. If supported, they may also be used as two-way
+communication ports with VFs (assuming **switchdev** topology)
+
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+---+---+--' `----------+--' `--+----------'
+ | | | | |
+ | | `-------------------. | |
+ | `---------. | | |
+ | | | | |
+ .-----+-----. .-----+-----. .-----+-----. | |
+ | port_id 3 | | port_id 4 | | port_id 5 | | |
+ `-----+-----' `-----+-----' `-----+-----' | |
+ | | | | |
+ .-+--. .-----+-----. .-----+-----. .---+--. .--+---.
+ | PF | | VF 1 rep. | | VF 2 rep. | | VF 1 | | VF 2 |
+ `-+--' `-----+-----' `-----+-----' `---+--' `--+---'
+ | | | | |
+ | | .---------' | |
+ `-----. | | .-----------------' |
+ | | | | .---------------------'
+ | | | | |
+ .--+-------+---+---+---+--.
+ | managed interconnection |
+ `------------+------------'
+ |
+ .----+-----.
+ | physical |
+ | port 0 |
+ `----------'
+
+- VF representors are assigned arbitrary port indices 4 and 5 in the
+ hypervisor application and are respectively associated with VF 1 and VF 2.
+
+- They can't be dissociated; even if VF 1 and VF 2 were not connected,
+ representors could still be used for configuration.
+
+- In this context, port index 3 can be thought as a representor for physical
+ port 0.
+
+As previously described, the "interconnection" block represents a logical
+concept. Interconnection occurs when hardware configuration enables traffic
+flows from one place to another (e.g. physical port 0 to VF 1) according to
+some criteria.
+
+This is discussed in more detail in `traffic steering`_.
+
+Traffic Steering
+~~~~~~~~~~~~~~~~
+
+In the following diagram, each meaningful traffic origin or endpoint as seen
+by the hypervisor application is tagged with a unique letter from A to F.
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+---+---+--' `----------+--' `--+----------'
+ | | | | |
+ | | `-------------------. | |
+ | `---------. | | |
+ | | | | |
+ .----(A)----. .----(B)----. .----(C)----. | |
+ | port_id 3 | | port_id 4 | | port_id 5 | | |
+ `-----+-----' `-----+-----' `-----+-----' | |
+ | | | | |
+ .-+--. .-----+-----. .-----+-----. .---+--. .--+---.
+ | PF | | VF 1 rep. | | VF 2 rep. | | VF 1 | | VF 2 |
+ `-+--' `-----+-----' `-----+-----' `--(D)-' `-(E)--'
+ | | | | |
+ | | .---------' | |
+ `-----. | | .-----------------' |
+ | | | | .---------------------'
+ | | | | |
+ .--+-------+---+---+---+--.
+ | managed interconnection |
+ `------------+------------'
+ |
+ .---(F)----.
+ | physical |
+ | port 0 |
+ `----------'
+
+- **A**: PF device.
+- **B**: port representor for VF 1.
+- **C**: port representor for VF 2.
+- **D**: VF 1 proper.
+- **E**: VF 2 proper.
+- **F**: physical port.
+
+Although uncommon, some devices do not enforce a one to one mapping between
+PF and physical ports. For instance, by default all ports of **mlx4**
+adapters are available to all their PF/VF instances, in which case
+additional ports appear next to **F** in the above diagram.
+
+Assuming no interconnection is provided by default in this mode, setting up
+a `basic SR-IOV`_ configuration involving physical port 0 could be broken
+down as:
+
+PF:
+
+- **A to F**: let everything through.
+- **F to A**: PF MAC as destination.
+
+VF 1:
+
+- **A to D**, **E to D** and **F to D**: VF 1 MAC as destination.
+- **D to A**: VF 1 MAC as source and PF MAC as destination.
+- **D to E**: VF 1 MAC as source and VF 2 MAC as destination.
+- **D to F**: VF 1 MAC as source.
+
+VF 2:
+
+- **A to E**, **D to E** and **F to E**: VF 2 MAC as destination.
+- **E to A**: VF 2 MAC as source and PF MAC as destination.
+- **E to D**: VF 2 MAC as source and VF 1 MAC as destination.
+- **E to F**: VF 2 MAC as source.
+
+Devices may additionally support advanced matching criteria such as
+IPv4/IPv6 addresses or TCP/UDP ports.
+
+The combination of matching criteria with target endpoints fits well with
+**rte_flow** [6]_, which expresses flow rules as combinations of patterns
+and actions.
+
+Enhancing **rte_flow** with the ability to make flow rules match and target
+these endpoints provides a standard interface to manage their
+interconnection without introducing new concepts and whole new API to
+implement them. This is described in `flow API (rte_flow)`_.
+
+.. [6] `Generic flow API (rte_flow)
+ <http://dpdk.org/doc/guides/prog_guide/rte_flow.html>`_
+
+Flow API (rte_flow)
+-------------------
+
+Extensions
+~~~~~~~~~~
+
+Compared to creating a brand new dedicated interface, **rte_flow** was
+deemed flexible enough to manage representor traffic only with minor
+extensions:
+
+- Using physical ports, PF, VF or port representors as targets.
+
+- Affecting traffic that is not necessarily addressed to the DPDK port ID a
+ flow rule is associated with (e.g. forcing VF traffic redirection to PF).
+
+For advanced uses:
+
+- Rule-based packet counters.
+
+- The ability to combine several identical actions for traffic duplication
+ (e.g. VF representor in addition to a physical port).
+
+- Dedicated actions for traffic encapsulation / decapsulation before
+ reaching an endpoint.
+
+Traffic Direction
+~~~~~~~~~~~~~~~~~
+
+From an application standpoint, "ingress" and "egress" flow rule attributes
+apply to the DPDK port ID they are associated with. They select a traffic
+direction for matching patterns, but have no impact on actions.
+
+When matching traffic coming from or going to a different place than the
+immediate port ID a flow rule is associated with, these attributes keep
+their meaning while applying to the chosen origin, as highlighted by the
+following diagram
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+---+---+--' `----------+--' `--+----------'
+ | | | | |
+ | | `-------------------. | |
+ | `---------. | | |
+ | ^ | ^ | ^ | |
+ | | ingress | | ingress | | ingress | |
+ | | egress | | egress | | egress | |
+ | v | v | v | |
+ .----(A)----. .----(B)----. .----(C)----. | |
+ | port_id 3 | | port_id 4 | | port_id 5 | | |
+ `-----+-----' `-----+-----' `-----+-----' | |
+ | | | | |
+ .-+--. .-----+-----. .-----+-----. .---+--. .--+---.
+ | PF | | VF 1 rep. | | VF 2 rep. | | VF 1 | | VF 2 |
+ `-+--' `-----+-----' `-----+-----' `--(D)-' `-(E)--'
+ | | | ^ | | ^
+ | | | egress | | | | egress
+ | | | ingress | | | | ingress
+ | | .---------' v | | v
+ `-----. | | .-----------------' |
+ | | | | .---------------------'
+ | | | | |
+ .--+-------+---+---+---+--.
+ | managed interconnection |
+ `------------+------------'
+ ^ |
+ ingress | |
+ egress | |
+ v |
+ .---(F)----.
+ | physical |
+ | port 0 |
+ `----------'
+
+Ingress and egress are defined as relative to the application creating the
+flow rule.
+
+For instance, matching traffic sent by VM 2 would be done through an ingress
+flow rule on VF 2 (**E**). Likewise for incoming traffic on physical port
+(**F**). This also applies to **C** and **A** respectively.
+
+Transferring Traffic
+~~~~~~~~~~~~~~~~~~~~
+
+Without Port Representors
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+`Traffic direction`_ describes how an application could match traffic coming
+from or going to a specific place reachable from a DPDK port ID. This makes
+sense when the traffic in question is normally seen (i.e. sent or received)
+by the application creating the flow rule (e.g. as in "redirect all traffic
+coming from VF 1 to local queue 6").
+
+However this does not force such traffic to take a specific route. Creating
+a flow rule on **A** matching traffic coming from **D** is only meaningful
+if it can be received by **A** in the first place, otherwise doing so simply
+has no effect.
+
+A new flow rule attribute named "transfer" is necessary for that. Combining
+it with "ingress" or "egress" and a specific origin requests a flow rule to
+be applied at the lowest level
+
+::
+
+ ingress only : ingress + transfer
+ :
+ .-------------. .-------------. : .-------------. .-------------.
+ | hypervisor | | VM 1 | : | hypervisor | | VM 1 |
+ | application | | application | : | application | | application |
+ `------+------' `--+----------' : `------+------' `--+----------'
+ | | | traffic : | | | traffic
+ .----(A)----. | v : .----(A)----. | v
+ | port_id 3 | | : | port_id 3 | |
+ `-----+-----' | : `-----+-----' |
+ | | : | ^ |
+ | | : | | traffic |
+ .-+--. .---+--. : .-+--. .---+--.
+ | PF | | VF 1 | : | PF | | VF 1 |
+ `-+--' `--(D)-' : `-+--' `--(D)-'
+ | | | traffic : | ^ | | traffic
+ | | v : | | traffic | v
+ .--+-----------+--. : .--+-----------+--.
+ | interconnection | : | interconnection |
+ `--------+--------' : `--------+--------'
+ | | traffic : |
+ | v : |
+ .---(F)----. : .---(F)----.
+ | physical | : | physical |
+ | port 0 | : | port 0 |
+ `----------' : `----------'
+
+With "ingress" only, traffic is matched on **A** thus still goes to physical
+port **F** by default
+
+
+::
+
+ testpmd> flow create 3 ingress pattern vf id is 1 / end
+ actions queue index 6 / end
+
+With "ingress + transfer", traffic is matched on **D** and is therefore
+successfully assigned to queue 6 on **A**
+
+
+::
+
+ testpmd> flow create 3 ingress transfer pattern vf id is 1 / end
+ actions queue index 6 / end
+
+
+With Port Representors
+^^^^^^^^^^^^^^^^^^^^^^
+
+When port representors exist, implicit flow rules with the "transfer"
+attribute (described in `without port representors`_) are be assumed to
+exist between them and their represented resources. These may be immutable.
+
+In this case, traffic is received by default through the representor and
+neither the "transfer" attribute nor traffic origin in flow rule patterns
+are necessary. They simply have to be created on the representor port
+directly and may target a different representor as described in `PORT_ID
+action`_.
+
+Implicit traffic flow with port representor
+
+::
+
+ .-------------. .-------------.
+ | hypervisor | | VM 1 |
+ | application | | application |
+ `--+-------+--' `----------+--'
+ | | ^ | | traffic
+ | | | traffic | v
+ | `-----. |
+ | | |
+ .----(A)----. .----(B)----. |
+ | port_id 3 | | port_id 4 | |
+ `-----+-----' `-----+-----' |
+ | | |
+ .-+--. .-----+-----. .---+--.
+ | PF | | VF 1 rep. | | VF 1 |
+ `-+--' `-----+-----' `--(D)-'
+ | | |
+ .--|-------------|-----------|--.
+ | | | | |
+ | | `-----------' |
+ | | <-- traffic |
+ `--|----------------------------'
+ |
+ .---(F)----.
+ | physical |
+ | port 0 |
+ `----------'
+
+Pattern Items And Actions
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+PORT Pattern Item
+^^^^^^^^^^^^^^^^^
+
+Matches traffic originating from (ingress) or going to (egress) a physical
+port of the underlying device.
+
+Using this pattern item without specifying a port index matches the physical
+port associated with the current DPDK port ID by default. As described in
+`traffic steering`_, specifying it should be rarely needed.
+
+- Matches **F** in `traffic steering`_.
+
+PORT Action
+^^^^^^^^^^^
+
+Directs matching traffic to a given physical port index.
+
+- Targets **F** in `traffic steering`_.
+
+PORT_ID Pattern Item
+^^^^^^^^^^^^^^^^^^^^
+
+Matches traffic originating from (ingress) or going to (egress) a given DPDK
+port ID.
+
+Normally only supported if the port ID in question is known by the
+underlying PMD and related to the device the flow rule is created against.
+
+This must not be confused with the `PORT pattern item`_ which refers to the
+physical port of a device. ``PORT_ID`` refers to a ``struct rte_eth_dev``
+object on the application side (also known as "port representor" depending
+on the kind of underlying device).
+
+- Matches **A**, **B** or **C** in `traffic steering`_.
+
+PORT_ID Action
+^^^^^^^^^^^^^^
+
+Directs matching traffic to a given DPDK port ID.
+
+Same restrictions as `PORT_ID pattern item`_.
+
+- Targets **A**, **B** or **C** in `traffic steering`_.
+
+PF Pattern Item
+^^^^^^^^^^^^^^^
+
+Matches traffic originating from (ingress) or going to (egress) the physical
+function of the current device.
+
+If supported, should work even if the physical function is not managed by
+the application and thus not associated with a DPDK port ID. Its behavior is
+otherwise similar to `PORT_ID pattern item`_ using PF port ID.
+
+- Matches **A** in `traffic steering`_.
+
+PF Action
+^^^^^^^^^
+
+Directs matching traffic to the physical function of the current device.
+
+Same restrictions as `PF pattern item`_.
+
+- Targets **A** in `traffic steering`_.
+
+VF Pattern Item
+^^^^^^^^^^^^^^^
+
+Matches traffic originating from (ingress) or going to (egress) a given
+virtual function of the current device.
+
+If supported, should work even if the virtual function is not managed by
+the application and thus not associated with a DPDK port ID. Its behavior is
+otherwise similar to `PORT_ID pattern item`_ using VF port ID.
+
+Note this pattern item does not match VF representors traffic which, as
+separate entities, should be addressed through their own port IDs.
+
+- Matches **D** or **E** in `traffic steering`_.
+
+VF Action
+^^^^^^^^^
+
+Directs matching traffic to a given virtual function of the current device.
+
+Same restrictions as `VF pattern item`_.
+
+- Targets **D** or **E** in `traffic steering`_.
+
+\*_ENCAP actions
+^^^^^^^^^^^^^^^^
+
+These actions are named according to the protocol they encapsulate traffic
+with (e.g. ``VXLAN_ENCAP``) and using specific parameters (e.g. VNI for
+VXLAN).
+
+While they modify traffic and can be used multiple times (order matters),
+unlike `PORT_ID action`_ and friends, they have no impact on steering.
+
+As described in `actions order and repetition`_ this means they are useless
+if used alone in an action list, the resulting traffic gets dropped unless
+combined with either ``PASSTHRU`` or other endpoint-targeting actions.
+
+\*_DECAP actions
+^^^^^^^^^^^^^^^^
+
+They perform the reverse of `\*_ENCAP actions`_ by popping protocol headers
+from traffic instead of pushing them. They can be used multiple times as
+well.
+
+Note that using these actions on non-matching traffic results in undefined
+behavior. It is recommended to match the protocol headers to decapsulate on
+the pattern side of a flow rule in order to use these actions or otherwise
+make sure only matching traffic goes through.
+
+Actions Order and Repetition
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Flow rules are currently restricted to at most a single action of each
+supported type, performed in an unpredictable order (or all at once). To
+repeat actions in a predictable fashion, applications have to make rules
+pass-through and use priority levels.
+
+It's now clear that PMD support for chaining multiple non-terminating flow
+rules of varying priority levels is prohibitively difficult to implement
+compared to simply allowing multiple identical actions performed in a
+defined order by a single flow rule.
+
+- This change is required to support protocol encapsulation offloads and the
+ ability to perform them multiple times (e.g. VLAN then VXLAN).
+
+- It makes the ``DUP`` action redundant since multiple ``QUEUE`` actions can
+ be combined for duplication.
+
+- The (non-)terminating property of actions must be discarded. Instead, flow
+ rules themselves must be considered terminating by default (i.e. dropping
+ traffic if there is no specific target) unless a ``PASSTHRU`` action is
+ also specified.
+
+Switching Examples
+------------------
+
+This section provides practical examples based on the established testpmd
+flow command syntax [2]_, in the context described in `traffic steering`_
+
+::
+
+ .-------------. .-------------. .-------------.
+ | hypervisor | | VM 1 | | VM 2 |
+ | application | | application | | application |
+ `--+---+---+--' `----------+--' `--+----------'
+ | | | | |
+ | | `-------------------. | |
+ | `---------. | | |
+ | | | | |
+ .----(A)----. .----(B)----. .----(C)----. | |
+ | port_id 3 | | port_id 4 | | port_id 5 | | |
+ `-----+-----' `-----+-----' `-----+-----' | |
+ | | | | |
+ .-+--. .-----+-----. .-----+-----. .---+--. .--+---.
+ | PF | | VF 1 rep. | | VF 2 rep. | | VF 1 | | VF 2 |
+ `-+--' `-----+-----' `-----+-----' `--(D)-' `-(E)--'
+ | | | | |
+ | | .---------' | |
+ `-----. | | .-----------------' |
+ | | | | .---------------------'
+ | | | | |
+ .--|-------|---|---|---|--.
+ | | | `---|---' |
+ | | `-------' |
+ | `---------. |
+ `------------|------------'
+ |
+ .---(F)----.
+ | physical |
+ | port 0 |
+ `----------'
+
+By default, PF (**A**) can communicate with the physical port it is
+associated with (**F**), while VF 1 (**D**) and VF 2 (**E**) are isolated
+and restricted to communicate with the hypervisor application through their
+respective representors (**B** and **C**) if supported.
+
+Examples in subsequent sections apply to hypervisor applications only and
+are based on port representors **A**, **B** and **C**.
+
+.. [2] `Flow syntax
+ <http://dpdk.org/doc/guides/testpmd_app_ug/testpmd_funcs.html#flow-syntax>`_
+
+Associating VF 1 with Physical Port 0
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Assign all port traffic (**F**) to VF 1 (**D**) indiscriminately through
+their representors
+
+::
+
+ flow create 3 ingress pattern / end actions port_id id 4 / end
+ flow create 4 ingress pattern / end actions port_id id 3 / end
+
+More practical example with MAC address restrictions
+
+::
+
+ flow create 3 ingress
+ pattern eth dst is {VF 1 MAC} / end
+ actions port_id id 4 / end
+
+::
+
+ flow create 4 ingress
+ pattern eth src is {VF 1 MAC} / end
+ actions port_id id 3 / end
+
+
+Sharing Broadcasts
+~~~~~~~~~~~~~~~~~~
+
+From outside to PF and VFs
+
+::
+
+ flow create 3 ingress
+ pattern eth dst is ff:ff:ff:ff:ff:ff / end
+ actions port_id id 3 / port_id id 4 / port_id id 5 / end
+
+Note ``port_id id 3`` is necessary otherwise only VFs would receive matching
+traffic.
+
+From PF to outside and VFs
+
+::
+
+ flow create 3 egress
+ pattern eth dst is ff:ff:ff:ff:ff:ff / end
+ actions port / port_id id 4 / port_id id 5 / end
+
+From VFs to outside and PF
+
+::
+
+ flow create 4 ingress
+ pattern eth dst is ff:ff:ff:ff:ff:ff src is {VF 1 MAC} / end
+ actions port_id id 3 / port_id id 5 / end
+
+ flow create 5 ingress
+ pattern eth dst is ff:ff:ff:ff:ff:ff src is {VF 2 MAC} / end
+ actions port_id id 4 / port_id id 4 / end
+
+Similar ``33:33:*`` rules based on known MAC addresses should be added for
+IPv6 traffic.
+
+Encapsulating VF 2 Traffic in VXLAN
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Assuming pass-through flow rules are supported
+
+::
+
+ flow create 5 ingress
+ pattern eth / end
+ actions vxlan_encap vni 42 / passthru / end
+
+::
+
+ flow create 5 egress
+ pattern vxlan vni is 42 / end
+ actions vxlan_decap / passthru / end
+
+Here ``passthru`` is needed since as described in `actions order and
+repetition`_, flow rules are otherwise terminating; if supported, a rule
+without a target endpoint will drop traffic.
+
+Without pass-through support, ingress encapsulation on the destination
+endpoint might not be supported and action list must provide one
+
+::
+
+ flow create 5 ingress
+ pattern eth src is {VF 2 MAC} / end
+ actions vxlan_encap vni 42 / port_id id 3 / end
+
+ flow create 3 ingress
+ pattern vxlan vni is 42 / end
+ actions vxlan_decap / port_id id 5 / end
diff --git a/doc/guides/prog_guide/traffic_metering_and_policing.rst b/doc/guides/prog_guide/traffic_metering_and_policing.rst
index dc9bdd5b..90c781eb 100644
--- a/doc/guides/prog_guide/traffic_metering_and_policing.rst
+++ b/doc/guides/prog_guide/traffic_metering_and_policing.rst
@@ -28,7 +28,7 @@ The metering and policing stage typically sits on top of flow classification,
which is why the MTR objects are enabled through a special "meter" action.
The MTR objects are created and updated in their own name space (``rte_mtr``)
-within the ``librte_ether`` library. Whether an MTR object is private to a
+within the ``librte_ethdev`` library. Whether an MTR object is private to a
flow or potentially shared by several flows has to be specified at its
creation time.
diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index 18227b6a..77af4d77 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -65,14 +65,18 @@ The following is an overview of some key Vhost API functions:
* zero copy is really good for VM2VM case. For iperf between two VMs, the
boost could be above 70% (when TSO is enableld).
- * for VM2NIC case, the ``nb_tx_desc`` has to be small enough: <= 64 if virtio
- indirect feature is not enabled and <= 128 if it is enabled.
+ * For zero copy in VM2NIC case, guest Tx used vring may be starved if the
+ PMD driver consume the mbuf but not release them timely.
- This is because when dequeue zero copy is enabled, guest Tx used vring will
- be updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
- has to be small enough so that the PMD driver will run out of available
- Tx descriptors and free mbufs timely. Otherwise, guest Tx vring would be
- starved.
+ For example, i40e driver has an optimization to maximum NIC pipeline which
+ postpones returning transmitted mbuf until only tx_free_threshold free
+ descs left. The virtio TX used ring will be starved if the formula
+ (num_i40e_tx_desc - num_virtio_tx_desc > tx_free_threshold) is true, since
+ i40e will not return back mbuf.
+
+ A performance tip for tuning zero copy in VM2NIC case is to adjust the
+ frequency of mbuf free (i.e. adjust tx_free_threshold of i40e driver) to
+ balance consumer and producer.
* Guest memory should be backended with huge pages to achieve better
performance. Using 1G page size is the best.
@@ -83,6 +87,11 @@ The following is an overview of some key Vhost API functions:
of those segments, thus the fewer the segments, the quicker we will get
the mapping. NOTE: we may speed it by using tree searching in future.
+ * zero copy can not work when using vfio-pci with iommu mode currently, this
+ is because we don't setup iommu dma mapping for guest memory. If you have
+ to use vfio-pci driver, please insert vfio-pci kernel module in noiommu
+ mode.
+
- ``RTE_VHOST_USER_IOMMU_SUPPORT``
IOMMU support will be enabled when this flag is set. It is disabled by
@@ -160,6 +169,31 @@ The following is an overview of some key Vhost API functions:
Receives (dequeues) ``count`` packets from guest, and stored them at ``pkts``.
+* ``rte_vhost_crypto_create(vid, cryptodev_id, sess_mempool, socket_id)``
+
+ As an extension of new_device(), this function adds virtio-crypto workload
+ acceleration capability to the device. All crypto workload is processed by
+ DPDK cryptodev with the device ID of ``cryptodev_id``.
+
+* ``rte_vhost_crypto_free(vid)``
+
+ Frees the memory and vhost-user message handlers created in
+ rte_vhost_crypto_create().
+
+* ``rte_vhost_crypto_fetch_requests(vid, queue_id, ops, nb_ops)``
+
+ Receives (dequeues) ``nb_ops`` virtio-crypto requests from guest, parses
+ them to DPDK Crypto Operations, and fills the ``ops`` with parsing results.
+
+* ``rte_vhost_crypto_finalize_requests(queue_id, ops, nb_ops)``
+
+ After the ``ops`` are dequeued from Cryptodev, finalizes the jobs and
+ notifies the guest(s).
+
+* ``rte_vhost_crypto_set_zero_copy(vid, option)``
+
+ Enable or disable zero copy feature of the vhost crypto backend.
+
Vhost-user Implementations
--------------------------
@@ -214,8 +248,88 @@ the vhost device from the data plane.
When the socket connection is closed, vhost will destroy the device.
+Guest memory requirement
+------------------------
+
+* Memory pre-allocation
+
+ For non-zerocopy, guest memory pre-allocation is not a must. This can help
+ save of memory. If users really want the guest memory to be pre-allocated
+ (e.g., for performance reason), we can add option ``-mem-prealloc`` when
+ starting QEMU. Or, we can lock all memory at vhost side which will force
+ memory to be allocated when mmap at vhost side; option --mlockall in
+ ovs-dpdk is an example in hand.
+
+ For zerocopy, we force the VM memory to be pre-allocated at vhost lib when
+ mapping the guest memory; and also we need to lock the memory to prevent
+ pages being swapped out to disk.
+
+* Memory sharing
+
+ Make sure ``share=on`` QEMU option is given. vhost-user will not work with
+ a QEMU version without shared memory mapping.
+
Vhost supported vSwitch reference
---------------------------------
For more vhost details and how to support vhost in vSwitch, please refer to
the vhost example in the DPDK Sample Applications Guide.
+
+Vhost data path acceleration (vDPA)
+-----------------------------------
+
+vDPA supports selective datapath in vhost-user lib by enabling virtio ring
+compatible devices to serve virtio driver directly for datapath acceleration.
+
+``rte_vhost_driver_attach_vdpa_device`` is used to configure the vhost device
+with accelerated backend.
+
+Also vhost device capabilities are made configurable to adopt various devices.
+Such capabilities include supported features, protocol features, queue number.
+
+Finally, a set of device ops is defined for device specific operations:
+
+* ``get_queue_num``
+
+ Called to get supported queue number of the device.
+
+* ``get_features``
+
+ Called to get supported features of the device.
+
+* ``get_protocol_features``
+
+ Called to get supported protocol features of the device.
+
+* ``dev_conf``
+
+ Called to configure the actual device when the virtio device becomes ready.
+
+* ``dev_close``
+
+ Called to close the actual device when the virtio device is stopped.
+
+* ``set_vring_state``
+
+ Called to change the state of the vring in the actual device when vring state
+ changes.
+
+* ``set_features``
+
+ Called to set the negotiated features to device.
+
+* ``migration_done``
+
+ Called to allow the device to response to RARP sending.
+
+* ``get_vfio_group_fd``
+
+ Called to get the VFIO group fd of the device.
+
+* ``get_vfio_device_fd``
+
+ Called to get the VFIO device fd of the device.
+
+* ``get_notify_area``
+
+ Called to get the notify area info of the queue.
diff --git a/doc/guides/rawdevs/dpaa2_cmdif.rst b/doc/guides/rawdevs/dpaa2_cmdif.rst
new file mode 100644
index 00000000..20a60993
--- /dev/null
+++ b/doc/guides/rawdevs/dpaa2_cmdif.rst
@@ -0,0 +1,144 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 NXP
+
+NXP DPAA2 CMDIF Driver
+======================
+
+The DPAA2 CMDIF is an implementation of the rawdev API, that provides
+communication between the GPP and AIOP (Firmware). This is achieved
+via using the DPCI devices exposed by MC for GPP <--> AIOP interaction.
+
+More information can be found at `NXP Official Website
+<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
+
+Features
+--------
+
+The DPAA2 CMDIF implements following features in the rawdev API;
+
+- Getting the object ID of the device (DPCI) using attributes
+- I/O to and from the AIOP device using DPCI
+
+Supported DPAA2 SoCs
+--------------------
+
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+Prerequisites
+-------------
+
+There are three main pre-requisities for executing DPAA2 CMDIF on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+As an alternative method, DPAA2 CMDIF can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+- **DPDK Extra Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP SDK **2.0+**.
+- MC Firmware version **10.0.0** and higher.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+.. note::
+
+ Some part of fslmc bus code (mc flib - object library) routines are
+ dual licensed (BSD & GPLv2).
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV`` (default ``y``)
+
+ Toggle compilation of the ``lrte_pmd_dpaa2_cmdif`` driver.
+
+Enabling logs
+-------------
+
+For enabling logs, use the following EAL parameter:
+
+.. code-block:: console
+
+ ./your_cmdif_application <EAL args> --log-level=pmd.raw.dpaa2.cmdif,<level>
+
+Using ``pmd.raw.dpaa2.cmdif`` as log matching criteria, all Event PMD logs can be
+enabled which are lower than logging ``level``.
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the DPAA2 CMDIF PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-dpaa2-linuxapp-gcc install
+
+Initialization
+--------------
+
+The DPAA2 CMDIF is exposed as a vdev device which consists of dpci devices.
+On EAL initialization, dpci devices will be probed and then vdev device
+can be created from the application code by
+
+* Invoking ``rte_vdev_init("dpaa2_dpci")`` from the application
+
+* Using ``--vdev="dpaa2_dpci"`` in the EAL options, which will call
+ rte_vdev_init() internally
+
+Example:
+
+.. code-block:: console
+
+ ./your_cmdif_application <EAL args> --vdev="dpaa2_dpci"
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+
+DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
+``Supported DPAA2 SoCs``.
diff --git a/doc/guides/rawdevs/dpaa2_qdma.rst b/doc/guides/rawdevs/dpaa2_qdma.rst
new file mode 100644
index 00000000..b9bc4ec6
--- /dev/null
+++ b/doc/guides/rawdevs/dpaa2_qdma.rst
@@ -0,0 +1,140 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 NXP
+
+NXP DPAA2 QDMA Driver
+=====================
+
+The DPAA2 QDMA is an implementation of the rawdev API, that provide means
+to initiate a DMA transaction from CPU. The initiated DMA is performed
+without CPU being involved in the actual DMA transaction. This is achieved
+via using the DPDMAI device exposed by MC.
+
+More information can be found at `NXP Official Website
+<http://www.nxp.com/products/microcontrollers-and-processors/arm-processors/qoriq-arm-processors:QORIQ-ARM>`_.
+
+Features
+--------
+
+The DPAA2 QDMA implements following features in the rawdev API;
+
+- Supports issuing DMA of data within memory without hogging CPU while
+ performing DMA operation.
+- Supports configuring to optionally get status of the DMA translation on
+ per DMA operation basis.
+
+Supported DPAA2 SoCs
+--------------------
+
+- LS2084A/LS2044A
+- LS2088A/LS2048A
+- LS1088A/LS1048A
+
+Prerequisites
+-------------
+
+There are three main pre-requisities for executing DPAA2 QDMA on a DPAA2
+compatible board:
+
+1. **ARM 64 Tool Chain**
+
+ For example, the `*aarch64* Linaro Toolchain <https://releases.linaro.org/components/toolchain/binaries/6.3-2017.02/aarch64-linux-gnu>`_.
+
+2. **Linux Kernel**
+
+ It can be obtained from `NXP's Github hosting <https://github.com/qoriq-open-source/linux>`_.
+
+3. **Rootfile system**
+
+ Any *aarch64* supporting filesystem can be used. For example,
+ Ubuntu 15.10 (Wily) or 16.04 LTS (Xenial) userland which can be obtained
+ from `here <http://cdimage.ubuntu.com/ubuntu-base/releases/16.04/release/ubuntu-base-16.04.1-base-arm64.tar.gz>`_.
+
+As an alternative method, DPAA2 QDMA can also be executed using images provided
+as part of SDK from NXP. The SDK includes all the above prerequisites necessary
+to bring up a DPAA2 board.
+
+The following dependencies are not part of DPDK and must be installed
+separately:
+
+- **NXP Linux SDK**
+
+ NXP Linux software development kit (SDK) includes support for family
+ of QorIQ® ARM-Architecture-based system on chip (SoC) processors
+ and corresponding boards.
+
+ It includes the Linux board support packages (BSPs) for NXP SoCs,
+ a fully operational tool chain, kernel and board specific modules.
+
+ SDK and related information can be obtained from: `NXP QorIQ SDK <http://www.nxp.com/products/software-and-tools/run-time-software/linux-sdk/linux-sdk-for-qoriq-processors:SDKLINUX>`_.
+
+- **DPDK Extra Scripts**
+
+ DPAA2 based resources can be configured easily with the help of ready scripts
+ as provided in the DPDK Extra repository.
+
+ `DPDK Extras Scripts <https://github.com/qoriq-open-source/dpdk-extras>`_.
+
+Currently supported by DPDK:
+
+- NXP LSDK **17.12+**.
+- MC Firmware version **10.3.0** and higher.
+- Supported architectures: **arm64 LE**.
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux <linux_gsg>` to setup the basic DPDK environment.
+
+.. note::
+
+ Some part of fslmc bus code (mc flib - object library) routines are
+ dual licensed (BSD & GPLv2).
+
+Pre-Installation Configuration
+------------------------------
+
+Config File Options
+~~~~~~~~~~~~~~~~~~~
+
+The following options can be modified in the ``config`` file.
+
+- ``CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV`` (default ``y``)
+
+ Toggle compilation of the ``lrte_pmd_dpaa2_qdma`` driver.
+
+Enabling logs
+-------------
+
+For enabling logs, use the following EAL parameter:
+
+.. code-block:: console
+
+ ./your_qdma_application <EAL args> --log-level=pmd.raw.dpaa2.qdma,<level>
+
+Using ``pmd.raw.dpaa2.qdma`` as log matching criteria, all Event PMD logs can be
+enabled which are lower than logging ``level``.
+
+Driver Compilation
+~~~~~~~~~~~~~~~~~~
+
+To compile the DPAA2 QDMA PMD for Linux arm64 gcc target, run the
+following ``make`` command:
+
+.. code-block:: console
+
+ cd <DPDK-source-directory>
+ make config T=arm64-dpaa2-linuxapp-gcc install
+
+Initialization
+--------------
+
+The DPAA2 QDMA is exposed as a vdev device which consists of dpdmai devices.
+On EAL initialization, dpdmai devices will be probed and populated into the
+rawdevices. The rawdev ID of the device can be obtained using
+
+* Invoking ``rte_rawdev_get_dev_id("dpdmai.x")`` from the application
+ where x is the object ID of the DPDMAI object created by MC. Use can
+ use this index for further rawdev function calls.
+
+Platform Requirement
+~~~~~~~~~~~~~~~~~~~~
+
+DPAA2 drivers for DPDK can only work on NXP SoCs as listed in the
+``Supported DPAA2 SoCs``.
diff --git a/doc/guides/rawdevs/ifpga_rawdev.rst b/doc/guides/rawdevs/ifpga_rawdev.rst
new file mode 100644
index 00000000..d400db6e
--- /dev/null
+++ b/doc/guides/rawdevs/ifpga_rawdev.rst
@@ -0,0 +1,112 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2018 Intel Corporation.
+
+IFPGA Rawdev Driver
+======================
+
+FPGA is used more and more widely in Cloud and NFV, one primary reason is
+that FPGA not only provides ASIC performance but also it's more flexible
+than ASIC.
+
+FPGA uses Partial Reconfigure (PR) Parts of Bit Stream to achieve its
+flexibility. That means one FPGA Device Bit Stream is divided into many Parts
+of Bit Stream(each Part of Bit Stream is defined as AFU-Accelerated Function
+Unit), and each AFU is a hardware acceleration unit which can be dynamically
+reloaded respectively.
+
+By PR (Partial Reconfiguration) AFUs, one FPGA resources can be time-shared by
+different users. FPGA hot upgrade and fault tolerance can be provided easily.
+
+The SW IFPGA Rawdev Driver (**ifpga_rawdev**) provides a Rawdev driver
+that utilizes Intel FPGA Software Stack OPAE(Open Programmable Acceleration
+Engine) for FPGA management.
+
+Implementation details
+----------------------
+
+Each instance of IFPGA Rawdev Driver is probed by Intel FpgaDev. In coordination
+with OPAE share code IFPGA Rawdev Driver provides common FPGA management ops
+for FPGA operation, OPAE provides all following operations:
+- FPGA PR (Partial Reconfiguration) management
+- FPGA AFUs Identifying
+- FPGA Thermal Management
+- FPGA Power Management
+- FPGA Performance reporting
+- FPGA Remote Debug
+
+All configuration parameters are taken by vdev_ifpga_cfg driver. Besides
+configuration, vdev_ifpga_cfg driver also hot plugs in IFPGA Bus.
+
+All of the AFUs of one FPGA may share same PCI BDF and AFUs scan depend on
+IFPGA Rawdev Driver so IFPGA Bus takes AFU device scan and AFU drivers probe.
+All AFU device driver bind to AFU device by its UUID (Universally Unique
+Identifier).
+
+To avoid unnecessary code duplication and ensure maximum performance,
+handling of AFU devices is left to different PMDs; all the design as
+summarized by the following block diagram::
+
+ +---------------------------------------------------------------+
+ | Application(s) |
+ +----------------------------.----------------------------------+
+ |
+ |
+ +----------------------------'----------------------------------+
+ | DPDK Framework (APIs) |
+ +----------|------------|--------.---------------------|--------+
+ / \ |
+ / \ |
+ +-------'-------+ +-------'-------+ +--------'--------+
+ | Eth PMD | | Crypto PMD | | |
+ +-------.-------+ +-------.-------+ | |
+ | | | |
+ | | | |
+ +-------'-------+ +-------'-------+ | IFPGA |
+ | Eth AFU Dev | |Crypto AFU Dev | | Rawdev Driver |
+ +-------.-------+ +-------.-------+ |(OPAE Share Code)|
+ | | | |
+ | | Rawdev | |
+ +-------'------------------'-------+ Ops | |
+ | IFPGA Bus | -------->| |
+ +-----------------.----------------+ +--------.--------+
+ | |
+ Hot-plugin -->| |
+ | |
+ +-----------------'------------------+ +--------'--------+
+ | vdev_ifpga_cfg driver | | Intel FpgaDev |
+ +------------------------------------+ +-----------------+
+
+Build options
+-------------
+
+- ``CONFIG_RTE_LIBRTE_IFPGA_BUS`` (default ``y``)
+
+ Toggle compilation of IFPGA Bus library.
+
+- ``CONFIG_RTE_LIBRTE_IFPGA_RAWDEV`` (default ``y``)
+
+ Toggle compilation of the ``ifpga_rawdev`` driver.
+
+Run-time parameters
+-------------------
+
+This driver is invoked automatically in systems added with Intel FPGA,
+but PR and IFPGA Bus scan is trigged by command line using
+``--vdev 'ifpga_rawdev_cfg`` EAL option.
+
+The following device parameters are supported:
+
+- ``ifpga`` [string]
+
+ Provide a specific Intel FPGA device PCI BDF. Can be provided multiple
+ times for additional instances.
+
+- ``port`` [int]
+
+ Each FPGA can provide many channels to PR AFU by software, each channels
+ is identified by this parameter.
+
+- ``afu_bts`` [string]
+
+ If null, the AFU Bit Stream has been PR in FPGA, if not forces PR and
+ identifies AFU Bit Stream file.
diff --git a/doc/guides/rawdevs/index.rst b/doc/guides/rawdevs/index.rst
new file mode 100644
index 00000000..7c3bd958
--- /dev/null
+++ b/doc/guides/rawdevs/index.rst
@@ -0,0 +1,16 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 NXP
+
+Rawdev Drivers
+==============
+
+The following are a list of raw device PMDs, which can be used from an
+application through rawdev API.
+
+.. toctree::
+ :maxdepth: 2
+ :numbered:
+
+ dpaa2_cmdif
+ dpaa2_qdma
+ ifpga_rawdev
diff --git a/doc/guides/rel_notes/deprecation.rst b/doc/guides/rel_notes/deprecation.rst
index 9b557615..e2dbee31 100644
--- a/doc/guides/rel_notes/deprecation.rst
+++ b/doc/guides/rel_notes/deprecation.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
ABI and API Deprecation
=======================
@@ -8,23 +11,31 @@ API and ABI deprecation notices are to be posted here.
Deprecation Notices
-------------------
-* eal: both declaring and identifying devices will be streamlined in v18.05.
+* eal: certain structures will change in EAL on account of upcoming external
+ memory support. Aside from internal changes leading to an ABI break, the
+ following externally visible changes will also be implemented:
+
+ - ``rte_memseg_list`` will change to include a boolean flag indicating
+ whether a particular memseg list is externally allocated. This will have
+ implications for any users of memseg-walk-related functions, as they will
+ now have to skip externally allocated segments in most cases if the intent
+ is to only iterate over internal DPDK memory.
+ - ``socket_id`` parameter across the entire DPDK will gain additional meaning,
+ as some socket ID's will now be representing externally allocated memory. No
+ changes will be required for existing code as backwards compatibility will
+ be kept, and those who do not use this feature will not see these extra
+ socket ID's.
+
+* eal: both declaring and identifying devices will be streamlined in v18.11.
New functions will appear to query a specific port from buses, classes of
device and device drivers. Device declaration will be made coherent with the
new scheme of device identification.
As such, ``rte_devargs`` device representation will change.
- - removal of ``name`` and ``args`` fields.
- The enum ``rte_devtype`` was used to identify a bus and will disappear.
- - The ``rte_devargs_list`` will be made private.
- Functions previously deprecated will change or disappear:
- + ``rte_eal_devargs_add``
+ ``rte_eal_devargs_type_count``
- + ``rte_eal_parse_devargs_str``, replaced by ``rte_eal_devargs_parse``
- + ``rte_eal_devargs_parse`` will change its format and use.
- + all ``rte_devargs`` related functions will be renamed, changing the
- ``rte_eal_devargs_`` prefix to ``rte_devargs_``.
* pci: Several exposed functions are misnamed.
The following functions are deprecated starting from v17.11 and are replaced:
@@ -33,75 +44,23 @@ Deprecation Notices
- ``eal_parse_pci_DomBDF`` replaced by ``rte_pci_addr_parse``
- ``rte_eal_compare_pci_addr`` replaced by ``rte_pci_addr_cmp``
-* eal: The semantics of the return value for the ``rte_lcore_has_role`` function
- are planned to change in v18.05. The function currently returns 0 and <0 for
- success and failure, respectively. This will change to 1 and 0 for true and
- false, respectively, to make use of the function more intuitive.
-
-* eal: new ``numa_node_count`` member will be added to ``rte_config`` structure
- in v18.05.
-
-* eal: due to internal data layout reorganization, there will be changes to
- several structures and functions as a result of coming changes to support
- memory hotplug in v18.05.
- ``rte_eal_get_physmem_layout`` will be deprecated and removed in subsequent
- releases.
- ``rte_mem_config`` contents will change due to switch to memseg lists.
- ``rte_memzone`` member ``memseg_id`` will no longer serve any useful purpose
- and will be removed.
-
-* eal: a new set of mbuf mempool ops name APIs for user, platform and best
- mempool names have been defined in ``rte_mbuf`` in v18.02. The uses of
- ``rte_eal_mbuf_default_mempool_ops`` shall be replaced by
- ``rte_mbuf_best_mempool_ops``.
- The following function is now redundant and it is target to be deprecated
- in 18.05:
-
- - ``rte_eal_mbuf_default_mempool_ops``
-
-* mempool: several API and ABI changes are planned in v18.05.
- The following functions, introduced for Xen, which is not supported
- anymore since v17.11, are hard to use, not used anywhere else in DPDK.
- Therefore they will be deprecated in v18.05 and removed in v18.08:
-
- - ``rte_mempool_xmem_create``
- - ``rte_mempool_xmem_size``
- - ``rte_mempool_xmem_usage``
-
- The following changes are planned:
-
- - removal of ``get_capabilities`` mempool ops and related flags.
- - substitute ``register_memory_area`` with ``populate`` ops.
- - addition of new ops to customize required memory chunk calculation,
- customize objects population and allocate contiguous
- block of objects if underlying driver supports it.
-
-* mbuf: The control mbuf API will be removed in v18.05. The impacted
- functions and macros are:
-
- - ``rte_ctrlmbuf_init()``
- - ``rte_ctrlmbuf_alloc()``
- - ``rte_ctrlmbuf_free()``
- - ``rte_ctrlmbuf_data()``
- - ``rte_ctrlmbuf_len()``
- - ``rte_is_ctrlmbuf()``
- - ``CTRL_MBUF_FLAG``
-
- The packet mbuf API should be used as a replacement.
-
* mbuf: The opaque ``mbuf->hash.sched`` field will be updated to support generic
definition in line with the ethdev TM and MTR APIs. Currently, this field
is defined in librte_sched in a non-generic way. The new generic format
will contain: queue ID, traffic class, color. Field size will not change.
-* ethdev: a new Tx and Rx offload API was introduced on 17.11.
- In the new API, offloads are divided into per-port and per-queue offloads.
- Offloads are disabled by default and enabled per application request.
- The old offloads API is target to be deprecated on 18.05. This includes:
+* mbuf: the macro ``RTE_MBUF_INDIRECT()`` will be removed in v18.08 or later and
+ replaced with ``RTE_MBUF_CLONED()`` which is already added in v18.05. As
+ ``EXT_ATTACHED_MBUF`` is newly introduced in v18.05, ``RTE_MBUF_INDIRECT()``
+ can no longer be mutually exclusive with ``RTE_MBUF_DIRECT()`` if the new
+ experimental API ``rte_pktmbuf_attach_extbuf()`` is used. Removal of the macro
+ is to fix this semantic inconsistency.
- - removal of ``ETH_TXQ_FLAGS_NO*`` flags.
- - removal of ``txq_flags`` field from ``rte_eth_txconf`` struct.
- - removal of the offloads bit-field from ``rte_eth_rxmode`` struct.
+* ethdev: In v18.11 ``DEV_RX_OFFLOAD_CRC_STRIP`` offload flag will be removed, default
+ behavior without any flag will be changed to CRC strip.
+ To keep CRC ``DEV_RX_OFFLOAD_KEEP_CRC`` flag is required.
+ ``KEEP_CRC``: Keep CRC in packet
+ No flag: Strip CRC from packet
* ethdev: the legacy filter API, including
``rte_eth_dev_filter_supported()``, ``rte_eth_dev_filter_ctrl()`` as well
@@ -111,65 +70,28 @@ Deprecation Notices
Target release for removal of the legacy API will be defined once most
PMDs have switched to rte_flow.
-* ethdev: A new rss level field planned in 18.05.
- The new API add rss_level field to ``rte_eth_rss_conf`` to enable a choice
- of RSS hash calculation on outer or inner header of tunneled packet.
-
-* ethdev: Currently, if the rte_eth_rx_burst() function returns a value less
- than *nb_pkts*, the application will assume that no more packets are present.
- Some of the hw queue based hardware can only support smaller burst for RX
- and TX and thus break the expectation of the rx_burst API. Similar is the
- case for TX burst as well as ring sizes. ``rte_eth_dev_info`` will be added
- with following new parameters so as to support semantics for drivers to
- define a preferred size for Rx/Tx burst and rings.
-
- - Member ``struct preferred_size`` would be added to enclose all preferred
- size to be fetched from driver/implementation.
- - Members ``uint16_t rx_burst``, ``uint16_t tx_burst``, ``uint16_t rx_ring``,
- and ``uint16_t tx_ring`` would be added to ``struct preferred_size``.
-
-* ethdev: A work is being planned for 18.05 to expose VF port representors
- as a mean to perform control and data path operation on the different VFs.
- As VF representor is an ethdev port, new fields are needed in order to map
- between the VF representor and the VF or the parent PF. Those new fields
- are to be included in ``rte_eth_dev_info`` struct.
-
-* ethdev: The prototype and the behavior of
- ``dev_ops->eth_mac_addr_set()`` will change in v18.05. A return code
- will be added to notify the caller if an error occurred in the PMD. In
- ``rte_eth_dev_default_mac_addr_set()``, the new default MAC address
- will be copied in ``dev->data->mac_addrs[0]`` only if the operation is
- successful. This modification will only impact the PMDs, not the
- applications.
-
-* ethdev: functions add rx/tx callback will return named opaque type
- ``rte_eth_add_rx_callback()``, ``rte_eth_add_first_rx_callback()`` and
- ``rte_eth_add_tx_callback()`` functions currently return callback object as
- ``void \*`` but APIs to delete callbacks get ``struct rte_eth_rxtx_callback \*``
- as parameter. For consistency functions adding callback will return
- ``struct rte_eth_rxtx_callback \*`` instead of ``void \*``.
-
-* ethdev: The size of variables ``flow_types_mask`` in
- ``rte_eth_fdir_info structure``, ``sym_hash_enable_mask`` and
- ``valid_bit_mask`` in ``rte_eth_hash_global_conf`` structure
- will be increased from 32 to 64 bits to fulfill hardware requirements.
- This change will break existing ABI as size of the structures will increase.
-
-* ethdev: ``rte_eth_dev_get_sec_ctx()`` fix port id storage
- ``rte_eth_dev_get_sec_ctx()`` is using ``uint8_t`` for ``port_id``,
- which should be ``uint16_t``.
-
-* i40e: The default flexible payload configuration which extracts the first 16
- bytes of the payload for RSS will be deprecated starting from 18.02. If
- required the previous behavior can be configured using existing flow
- director APIs. There is no ABI/API break. This change will just remove a
- global configuration setting and require explicit configuration.
-
-* librte_meter: The API will change to accommodate configuration profiles.
- Most of the API functions will have an additional opaque parameter.
-
-* ring: The alignment constraints on the ring structure will be relaxed
- to one cache line instead of two, and an empty cache line padding will
- be added between the producer and consumer structures. The size of the
- structure and the offset of the fields will remain the same on
- platforms with 64B cache line, but will change on other platforms.
+* ethdev: In v18.11 ``rte_eth_dev_attach()`` and ``rte_eth_dev_detach()``
+ will be removed.
+ Hotplug functions ``rte_eal_hotplug_add()`` and ``rte_eal_hotplug_remove()``
+ should be used instread.
+ Function ``rte_eth_dev_get_port_by_name()`` may be used to find
+ identifier of the added port.
+
+* eal: In v18.11 ``rte_eal_dev_attach()`` and ``rte_eal_dev_detach()``
+ will be removed.
+ Hotplug functions ``rte_eal_hotplug_add()`` and ``rte_eal_hotplug_remove()``
+ should be used directly.
+
+* pdump: As we changed to use generic IPC, some changes in APIs and structure
+ are expected in subsequent release.
+
+ - ``rte_pdump_set_socket_dir`` will be removed;
+ - The parameter, ``path``, of ``rte_pdump_init`` will be removed;
+ - The enum ``rte_pdump_socktype`` will be removed.
+
+* ethdev: flow API function ``rte_flow_copy()`` will be deprecated in v18.11
+ in favor of ``rte_flow_conv()`` (which will appear in that version) and
+ subsequently removed for v19.02.
+
+ This is due to a lack of flexibility and reliance on a type unusable with
+ C++ programs (struct rte_flow_desc).
diff --git a/doc/guides/rel_notes/index.rst b/doc/guides/rel_notes/index.rst
index d8b5dfeb..d125342c 100644
--- a/doc/guides/rel_notes/index.rst
+++ b/doc/guides/rel_notes/index.rst
@@ -9,6 +9,8 @@ Release Notes
:numbered:
rel_description
+ release_18_08
+ release_18_05
release_18_02
release_17_11
release_17_08
diff --git a/doc/guides/rel_notes/known_issues.rst b/doc/guides/rel_notes/known_issues.rst
index afcc2c44..95e4ce69 100644
--- a/doc/guides/rel_notes/known_issues.rst
+++ b/doc/guides/rel_notes/known_issues.rst
@@ -714,3 +714,48 @@ igb_uio can not be used when running l3fwd-power
**Driver/Module**:
``igb_uio`` module.
+
+
+Linux kernel 4.10.0 iommu attribute read error
+----------------------------------------------
+
+**Description**:
+ When VT-d is enabled (``iommu=pt intel_iommu=on``), reading IOMMU attributes from
+ /sys/devices/virtual/iommu/dmarXXX/intel-iommu/cap on Linux kernel 4.10.0 error.
+ This bug is fixed in `Linux commmit a7fdb6e648fb
+ <https://patchwork.kernel.org/patch/9595727/>`_,
+ This bug is introduced in `Linux commmit 39ab9555c241
+ <https://patchwork.kernel.org/patch/9554403/>`_,
+
+**Implication**:
+ When binding devices to VFIO and attempting to run testpmd application,
+ testpmd (and other DPDK applications) will not initialize.
+
+**Resolution/Workaround**:
+ Use other linux kernel version. It only happens in linux kernel 4.10.0.
+
+**Affected Environment/Platform**:
+ ALL OS of linux kernel 4.10.0.
+
+**Driver/Module**:
+ ``vfio-pci`` module.
+
+Netvsc driver and application restart
+-------------------------------------
+
+**Description**:
+ The Linux kernel uio_hv_generic driver does not completely shutdown and clean up
+ resources properly if application using Netvsc PMD exits.
+
+**Implication**:
+ When application using Netvsc PMD is restarted it can not complete initialization
+ handshake sequence with the host.
+
+**Resolution/Workaround**:
+ Either reboot the guest or remove and reinsert the hv_uio_generic module.
+
+**Affected Environment/Platform**:
+ Linux Hyper-V.
+
+**Driver/Module**:
+ ``uio_hv_generic`` module.
diff --git a/doc/guides/rel_notes/release_16_04.rst b/doc/guides/rel_notes/release_16_04.rst
index d0a09eff..e9f1e6ff 100644
--- a/doc/guides/rel_notes/release_16_04.rst
+++ b/doc/guides/rel_notes/release_16_04.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2016 The DPDK contributors
+
DPDK Release 16.04
==================
diff --git a/doc/guides/rel_notes/release_16_07.rst b/doc/guides/rel_notes/release_16_07.rst
index a8a3fc11..2904aacf 100644
--- a/doc/guides/rel_notes/release_16_07.rst
+++ b/doc/guides/rel_notes/release_16_07.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2016 The DPDK contributors
+
DPDK Release 16.07
==================
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 8c9ec65c..92e0ec69 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2016 The DPDK contributors
+
DPDK Release 16.11
==================
diff --git a/doc/guides/rel_notes/release_17_02.rst b/doc/guides/rel_notes/release_17_02.rst
index 357965ac..d6c1c567 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 The DPDK contributors
+
DPDK Release 17.02
==================
diff --git a/doc/guides/rel_notes/release_17_05.rst b/doc/guides/rel_notes/release_17_05.rst
index 68922840..64182406 100644
--- a/doc/guides/rel_notes/release_17_05.rst
+++ b/doc/guides/rel_notes/release_17_05.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 The DPDK contributors
+
DPDK Release 17.05
==================
diff --git a/doc/guides/rel_notes/release_17_08.rst b/doc/guides/rel_notes/release_17_08.rst
index 0bcdfb7b..dc622409 100644
--- a/doc/guides/rel_notes/release_17_08.rst
+++ b/doc/guides/rel_notes/release_17_08.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 The DPDK contributors
+
DPDK Release 17.08
==================
diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
index 088778bf..2a93af32 100644
--- a/doc/guides/rel_notes/release_17_11.rst
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 The DPDK contributors
+
DPDK Release 17.11
==================
@@ -56,7 +59,7 @@ New Features
* **Added a new driver for Marvell Armada 7k/8k devices.**
Added the new ``mrvl`` net driver for Marvell Armada 7k/8k devices. See the
- :doc:`../nics/mrvl` NIC guide for more details on this new driver.
+ :doc:`../nics/mvpp2` NIC guide for more details on this new driver.
* **Updated mlx4 driver.**
@@ -184,7 +187,7 @@ New Features
A new crypto PMD has been added, which provides several ciphering and hashing
algorithms. All cryptography operations use the MUSDK library crypto API.
- See the :doc:`../cryptodevs/mrvl` document for more details.
+ See the :doc:`../cryptodevs/mvsam` document for more details.
* **Add new benchmarking mode to dpdk-test-crypto-perf application.**
diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
index 44b7de59..8e403118 100644
--- a/doc/guides/rel_notes/release_18_02.rst
+++ b/doc/guides/rel_notes/release_18_02.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
DPDK Release 18.02
==================
diff --git a/doc/guides/rel_notes/release_18_05.rst b/doc/guides/rel_notes/release_18_05.rst
new file mode 100644
index 00000000..8dc22b01
--- /dev/null
+++ b/doc/guides/rel_notes/release_18_05.rst
@@ -0,0 +1,983 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
+DPDK Release 18.05
+==================
+
+.. **Read this first.**
+
+ The text in the sections below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text:
+ ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ xdg-open build/doc/html/guides/rel_notes/release_18_05.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release. Sample
+ format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense. The description
+ should be enough to allow someone scanning the release notes to
+ understand the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list like
+ this:
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Reworked memory subsystem.**
+
+ Memory subsystem has been reworked to support new functionality.
+
+ On Linux, support for reserving/unreserving hugepage memory at runtime has been
+ added, so applications no longer need to pre-reserve memory at startup. Due to
+ reorganized internal workings of memory subsystem, any memory allocated
+ through ``rte_malloc()`` or ``rte_memzone_reserve()`` is no longer guaranteed
+ to be IOVA-contiguous.
+
+ This functionality has introduced the following changes:
+
+ * ``rte_eal_get_physmem_layout()`` was removed.
+ * A new flag for memzone reservation (``RTE_MEMZONE_IOVA_CONTIG``) was added
+ to ensure reserved memory will be IOVA-contiguous, for use with device
+ drivers and other cases requiring such memory.
+ * New callbacks for memory allocation/deallocation events, allowing users (or
+ drivers) to be notified of new memory being allocated or deallocated
+ * New callbacks for validating memory allocations above a specified limit,
+ allowing user to permit or deny memory allocations.
+ * A new command-line switch ``--legacy-mem`` to enable EAL behavior similar to
+ how older versions of DPDK worked (memory segments that are IOVA-contiguous,
+ but hugepages are reserved at startup only, and can never be released).
+ * A new command-line switch ``--single-file-segments`` to put all memory
+ segments within a segment list in a single file.
+ * A set of convenience function calls to look up and iterate over allocated
+ memory segments.
+ * ``-m`` and ``--socket-mem`` command-line arguments now carry an additional
+ meaning and mark pre-reserved hugepages as "unfree-able", thereby acting as
+ a mechanism guaranteeing minimum availability of hugepage memory to the
+ application.
+
+ Reserving/unreserving memory at runtime is not currently supported on FreeBSD.
+
+* **Added bucket mempool driver.**
+
+ Added a bucket mempool driver which provides a way to allocate contiguous
+ block of objects.
+ The number of objects in the block depends on how many objects fit in the
+ ``RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB`` memory chunk which is a build time option.
+ The number may be obtained using ``rte_mempool_ops_get_info()`` API.
+ Contiguous blocks may be allocated using ``rte_mempool_get_contig_blocks()`` API.
+
+* **Added support for port representors.**
+
+ Added DPDK port representors (also known as "VF representors" in the specific
+ context of VFs), which are to DPDK what the Ethernet switch device driver
+ model (**switchdev**) is to Linux, and which can be thought as a software
+ "patch panel" front-end for applications. DPDK port representors are
+ implemented as additional virtual Ethernet device (**ethdev**) instances,
+ spawned on an as-needed basis through configuration parameters passed to the
+ driver of the underlying device using devargs.
+
+* **Added support for VXLAN and NVGRE tunnel endpoint.**
+
+ New actions types have been added to support encapsulation and decapsulation
+ operations for a tunnel endpoint. The new action types are
+ ``RTE_FLOW_ACTION_TYPE_[VXLAN/NVGRE]_ENCAP``, ``RTE_FLOW_ACTION_TYPE_[VXLAN/NVGRE]_DECAP``,
+ ``RTE_FLOW_ACTION_TYPE_JUMP``. A new item type ``RTE_FLOW_ACTION_TYPE_MARK`` has been
+ added to match a flow against a previously marked flow. A shared counter has also been
+ introduced to the flow API to count a group of flows.
+
+* **Added PMD-recommended Tx and Rx parameters.**
+
+ Applications can now query drivers for device-tuned values of
+ ring sizes, burst sizes, and number of queues.
+
+* **Added RSS hash and key update to CXGBE PMD.**
+
+ Added support for updating the RSS hash and key to the CXGBE PMD.
+
+* **Added CXGBE VF PMD.**
+
+ CXGBE VF Poll Mode Driver has been added to run DPDK over Chelsio
+ T5/T6 NIC VF instances.
+
+* **Updated mlx5 driver.**
+
+ Updated the mlx5 driver including the following changes:
+
+ * Introduced Multi-packet Rx to enable 100Gb/sec with 64B frames.
+ * Support for being run by non-root users given a reduced set of capabilities
+ ``CAP_NET_ADMIN``, ``CAP_NET_RAW`` and ``CAP_IPC_LOCK``.
+ * Support for TSO and checksum for generic UDP and IP tunnels.
+ * Support for inner checksum and RSS for GRE, VXLAN-GPE, MPLSoGRE
+ and MPLSoUDP tunnels.
+ * Accommodate the new memory hotplug model.
+ * Support for non virtually contiguous mempools.
+ * Support for MAC adding along with allmulti and promiscuous modes from VF.
+ * Support for Mellanox BlueField SoC device.
+ * Support for PMD defaults for queue number and depth to improve the out
+ of the box performance.
+
+* **Updated mlx4 driver.**
+
+ Updated the mlx4 driver including the following changes:
+
+ * Support for to being run by non-root users given a reduced set of capabilities
+ ``CAP_NET_ADMIN``, ``CAP_NET_RAW`` and ``CAP_IPC_LOCK``.
+ * Supported CRC strip toggling.
+ * Accommodate the new memory hotplug model.
+ * Support non virtually contiguous mempools.
+ * Dropped support for Mellanox OFED 4.2.
+
+* **Updated Solarflare network PMD.**
+
+ Updated the sfc_efx driver including the following changes:
+
+ * Added support for Solarflare XtremeScale X2xxx family adapters.
+ * Added support for NVGRE, VXLAN and GENEVE filters in flow API.
+ * Added support for DROP action in flow API.
+ * Added support for equal stride super-buffer Rx mode (X2xxx only).
+ * Added support for MARK and FLAG actions in flow API (X2xxx only).
+
+* **Added Ethernet poll mode driver for AMD XGBE devices.**
+
+ Added the new ``axgbe`` ethernet poll mode driver for AMD XGBE devices.
+ See the :doc:`../nics/axgbe` nic driver guide for more details on this
+ new driver.
+
+* **Updated szedata2 PMD.**
+
+ Added support for new NFB-200G2QL card.
+ A new API was introduced in the libsze2 library which the szedata2 PMD depends
+ on, thus the new version of the library was needed.
+ New versions of the packages are available and the minimum required version
+ is 4.4.1.
+
+* **Added support for Broadcom NetXtreme-S (BCM58800) family of controllers (aka Stingray).**
+
+ Added support for the Broadcom NetXtreme-S (BCM58800) family of controllers
+ (aka Stingray). The BCM58800 devices feature a NetXtreme E-Series advanced
+ network controller, a high-performance ARM CPU block, PCI Express (PCIe)
+ Gen3 interfaces, key accelerators for compute offload and a high-speed
+ memory subsystem including L3 cache and DDR4 interfaces, all interconnected
+ by a coherent Network-on-chip (NOC) fabric.
+
+ The ARM CPU subsystem features eight ARMv8 Cortex-A72 CPUs at 3.0 GHz,
+ arranged in a multi-cluster configuration.
+
+* **Added vDPA in vhost-user lib.**
+
+ Added support for selective datapath in the vhost-user lib. vDPA stands for vhost
+ Data Path Acceleration. It supports virtio ring compatible devices to serve
+ the virtio driver directly to enable datapath acceleration.
+
+* **Added IFCVF vDPA driver.**
+
+ Added IFCVF vDPA driver to support Intel FPGA 100G VF devices. IFCVF works
+ as a HW vhost data path accelerator, it supports live migration and is
+ compatible with virtio 0.95 and 1.0. This driver registers the ifcvf vDPA driver
+ to vhost lib, when virtio connects. With the help of the registered vDPA
+ driver the assigned VF gets configured to Rx/Tx directly to VM's virtio
+ vrings.
+
+* **Added support for vhost dequeue interrupt mode.**
+
+ Added support for vhost dequeue interrupt mode to release CPUs to others
+ when there is no data to transmit. Applications can register an epoll event
+ file descriptor to associate Rx queues with interrupt vectors.
+
+* **Added support for virtio-user server mode.**
+
+ In a container environment if the vhost-user backend restarts, there's no way
+ for it to reconnect to virtio-user. To address this, support for server mode
+ has been added. In this mode the socket file is created by virtio-user, which the
+ backend connects to. This means that if the backend restarts, it can reconnect
+ to virtio-user and continue communications.
+
+* **Added crypto workload support to vhost library.**
+
+ New APIs have been introduced in the vhost library to enable virtio crypto support
+ including session creation/deletion handling and translating virtio-crypto
+ requests into DPDK crypto operations. A sample application has also been introduced.
+
+* **Added virtio crypto PMD.**
+
+ Added a new Poll Mode Driver for virtio crypto devices, which provides
+ AES-CBC ciphering and AES-CBC with HMAC-SHA1 algorithm-chaining. See the
+ :doc:`../cryptodevs/virtio` crypto driver guide for more details on
+ this new driver.
+
+* **Added AMD CCP Crypto PMD.**
+
+ Added the new ``ccp`` crypto driver for AMD CCP devices. See the
+ :doc:`../cryptodevs/ccp` crypto driver guide for more details on
+ this new driver.
+
+* **Updated AESNI MB PMD.**
+
+ The AESNI MB PMD has been updated with additional support for:
+
+ * AES-CMAC (128-bit key).
+
+* **Added the Compressdev Library, a generic compression service library.**
+
+ Added the Compressdev library which provides an API for offload of compression and
+ decompression operations to hardware or software accelerator devices.
+
+* **Added a new compression poll mode driver using Intels ISA-L.**
+
+ Added the new ``ISA-L`` compression driver, for compression and decompression
+ operations in software. See the :doc:`../compressdevs/isal` compression driver
+ guide for details on this new driver.
+
+* **Added the Event Timer Adapter Library.**
+
+ The Event Timer Adapter Library extends the event-based model by introducing
+ APIs that allow applications to arm/cancel event timers that generate
+ timer expiry events. This new type of event is scheduled by an event device
+ along with existing types of events.
+
+* **Added OcteonTx TIM Driver (Event timer adapter).**
+
+ The OcteonTx Timer block enables software to schedule events for a future
+ time, it is exposed to an application via the Event timer adapter library.
+
+ See the :doc:`../eventdevs/octeontx` guide for more details
+
+* **Added Event Crypto Adapter Library.**
+
+ Added the Event Crypto Adapter Library. This library extends the
+ event-based model by introducing APIs that allow applications to
+ enqueue/dequeue crypto operations to/from cryptodev as events scheduled
+ by an event device.
+
+* **Added Ifpga Bus, a generic Intel FPGA Bus library.**
+
+ Added the Ifpga Bus library which provides support for integrating any Intel
+ FPGA device with the DPDK framework. It provides Intel FPGA Partial Bit
+ Stream AFU (Accelerated Function Unit) scan and drivers probe.
+
+* **Added IFPGA (Intel FPGA) Rawdev Driver.**
+
+ Added a new Rawdev driver called IFPGA (Intel FPGA) Rawdev Driver, which cooperates
+ with OPAE (Open Programmable Acceleration Engine) shared code to provide common FPGA
+ management ops for FPGA operation.
+
+ See the :doc:`../rawdevs/ifpga_rawdev` programmer's guide for more details.
+
+* **Added DPAA2 QDMA Driver (in rawdev).**
+
+ The DPAA2 QDMA is an implementation of the rawdev API, that provide a means
+ of initiating a DMA transaction from CPU. The initiated DMA is performed
+ without the CPU being involved in the actual DMA transaction.
+
+ See the :doc:`../rawdevs/dpaa2_qdma` guide for more details.
+
+* **Added DPAA2 Command Interface Driver (in rawdev).**
+
+ The DPAA2 CMDIF is an implementation of the rawdev API, that provides
+ communication between the GPP and NXP's QorIQ based AIOP Block (Firmware).
+ Advanced IO Processor i.e. AIOP are clusters of programmable RISC engines
+ optimized for flexible networking and I/O operations. The communication
+ between GPP and AIOP is achieved via using DPCI devices exposed by MC for
+ GPP <--> AIOP interaction.
+
+ See the :doc:`../rawdevs/dpaa2_cmdif` guide for more details.
+
+* **Added device event monitor framework.**
+
+ Added a general device event monitor framework to EAL, for device dynamic
+ management to facilitate device hotplug awareness and associated
+ actions. The list of new APIs is:
+
+ * ``rte_dev_event_monitor_start`` and ``rte_dev_event_monitor_stop`` for
+ the event monitor enabling and disabling.
+ * ``rte_dev_event_callback_register`` and ``rte_dev_event_callback_unregister``
+ for registering and un-registering user callbacks.
+
+ Linux uevent is supported as a backend of this device event notification framework.
+
+* **Added support for procinfo and pdump on eth vdev.**
+
+ For ethernet virtual devices (like TAP, PCAP, etc.), with this feature, we can get
+ stats/xstats on shared memory from a secondary process, and also pdump packets on
+ those virtual devices.
+
+* **Enhancements to the Packet Framework Library.**
+
+ Design and development of new API functions for Packet Framework library that
+ implement a common set of actions such as traffic metering, packet
+ encapsulation, network address translation, TTL update, etc., for pipeline
+ table and input ports to speed up application development. The API functions
+ includes creating action profiles, registering actions to the profiles,
+ instantiating action profiles for pipeline table and input ports, etc.
+
+* **Added the BPF Library.**
+
+ The BPF Library provides the ability to load and execute
+ Enhanced Berkeley Packet Filters (eBPF) within user-space DPDK applications.
+ It also introduces a basic framework to load/unload BPF-based filters
+ on Eth devices (right now only via SW RX/TX callbacks).
+ It also adds a dependency on libelf.
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change. Use fixed width
+ quotes for ``rte_function_names`` or ``rte_struct_names``. Use the past
+ tense.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* service cores: No longer marked as experimental.
+
+ The service cores functions are no longer marked as experimental, and have
+ become part of the normal DPDK API and ABI. Any future ABI changes will be
+ announced at least one release before the ABI change is made. There are no
+ ABI breaking changes planned.
+
+* eal: The ``rte_lcore_has_role()`` return value changed.
+
+ This function now returns true or false, respectively,
+ rather than 0 or < 0 for success or failure.
+ It makes use of the function more intuitive.
+
+* mempool: The capability flags and related functions have been removed.
+
+ Flags ``MEMPOOL_F_CAPA_PHYS_CONTIG`` and
+ ``MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS`` were used by octeontx mempool
+ driver to customize generic mempool library behavior.
+ Now the new driver callbacks ``calc_mem_size`` and ``populate`` may be
+ used to achieve it without specific knowledge in the generic code.
+
+* mempool: The following xmem functions have been deprecated:
+
+ - ``rte_mempool_xmem_create``
+ - ``rte_mempool_xmem_size``
+ - ``rte_mempool_xmem_usage``
+ - ``rte_mempool_populate_iova_tab``
+
+* mbuf: The control mbuf API has been removed in v18.05. The impacted
+ functions and macros are:
+
+ - ``rte_ctrlmbuf_init()``
+ - ``rte_ctrlmbuf_alloc()``
+ - ``rte_ctrlmbuf_free()``
+ - ``rte_ctrlmbuf_data()``
+ - ``rte_ctrlmbuf_len()``
+ - ``rte_is_ctrlmbuf()``
+ - ``CTRL_MBUF_FLAG``
+
+ The packet mbuf API should be used as a replacement.
+
+* meter: API updated to accommodate configuration profiles.
+
+ The meter API has been changed to support meter configuration profiles. The
+ configuration profile represents the set of configuration parameters
+ for a given meter object, such as the rates and sizes for the token
+ buckets. These configuration parameters were previously part of the meter
+ object internal data structure. The separation of the configuration
+ parameters from the meter object data structure results in reducing its
+ memory footprint which helps in better cache utilization when a large number
+ of meter objects are used.
+
+* ethdev: The function ``rte_eth_dev_count()``, often mis-used to iterate
+ over ports, is deprecated and replaced by ``rte_eth_dev_count_avail()``.
+ There is also a new function ``rte_eth_dev_count_total()`` to get the
+ total number of allocated ports, available or not.
+ The hotplug-proof applications should use ``RTE_ETH_FOREACH_DEV`` or
+ ``RTE_ETH_FOREACH_DEV_OWNED_BY`` as port iterators.
+
+* ethdev: In struct ``struct rte_eth_dev_info``, field ``rte_pci_device *pci_dev``
+ has been replaced with field ``struct rte_device *device``.
+
+* ethdev: Changes to the semantics of ``rte_eth_dev_configure()`` parameters.
+
+ If both the ``nb_rx_q`` and ``nb_tx_q`` parameters are zero,
+ ``rte_eth_dev_configure()`` will now use PMD-recommended queue sizes, or if
+ recommendations are not provided by the PMD the function will use ethdev
+ fall-back values. Previously setting both of the parameters to zero would
+ have resulted in ``-EINVAL`` being returned.
+
+* ethdev: Changes to the semantics of ``rte_eth_rx_queue_setup()`` parameters.
+
+ If the ``nb_rx_desc`` parameter is zero, ``rte_eth_rx_queue_setup`` will
+ now use the PMD-recommended Rx ring size, or in the case where the PMD
+ does not provide a recommendation, will use an ethdev-provided
+ fall-back value. Previously, setting ``nb_rx_desc`` to zero would have
+ resulted in an error.
+
+* ethdev: Changes to the semantics of ``rte_eth_tx_queue_setup()`` parameters.
+
+ If the ``nb_tx_desc`` parameter is zero, ``rte_eth_tx_queue_setup`` will
+ now use the PMD-recommended Tx ring size, or in the case where the PMD
+ does not provide a recommendation, will use an ethdev-provided
+ fall-back value. Previously, setting ``nb_tx_desc`` to zero would have
+ resulted in an error.
+
+* ethdev: Several changes were made to the flow API.
+
+ * The unused DUP action was removed.
+ * Actions semantics in flow rules: list order now matters ("first
+ to last" instead of "all simultaneously"), repeated actions are now
+ all performed, and they do not individually have (non-)terminating
+ properties anymore.
+ * Flow rules are now always terminating unless a ``PASSTHRU`` action is
+ present.
+ * C99-style flexible arrays were replaced with standard pointers in RSS
+ action and in RAW pattern item structures due to compatibility issues.
+ * The RSS action was modified to not rely on external
+ ``struct rte_eth_rss_conf`` anymore to instead expose its own and more
+ appropriately named configuration fields directly
+ (``rss_conf->rss_key`` => ``key``,
+ ``rss_conf->rss_key_len`` => ``key_len``,
+ ``rss_conf->rss_hf`` => ``types``,
+ ``num`` => ``queue_num``), and the addition of missing RSS parameters
+ (``func`` for RSS hash function to apply and ``level`` for the
+ encapsulation level).
+ * The VLAN pattern item (``struct rte_flow_item_vlan``) was modified to
+ include inner EtherType instead of outer TPID. Its default mask was also
+ modified to cover the VID part (lower 12 bits) of TCI only.
+ * A new transfer attribute was added to ``struct rte_flow_attr`` in order
+ to clarify the behavior of some pattern items.
+ * PF and VF pattern items are now only accepted by PMDs that implement
+ them (bnxt and i40e) when the transfer attribute is also present, for
+ consistency.
+ * Pattern item PORT was renamed PHY_PORT to avoid confusion with DPDK port
+ IDs.
+ * An action counterpart to the PHY_PORT pattern item was added in order to
+ redirect matching traffic to a specific physical port.
+ * PORT_ID pattern item and actions were added to match and target DPDK
+ port IDs at a higher level than PHY_PORT.
+ * ``RTE_FLOW_ACTION_TYPE_[VXLAN/NVGRE]_ENCAP`` action items were added to support
+ tunnel encapsulation operation for VXLAN and NVGRE type tunnel endpoint.
+ * ``RTE_FLOW_ACTION_TYPE_[VXLAN/NVGRE]_DECAP`` action items were added to support
+ tunnel decapsulation operation for VXLAN and NVGRE type tunnel endpoint.
+ * ``RTE_FLOW_ACTION_TYPE_JUMP`` action item was added to support a matched flow
+ to be redirected to the specific group.
+ * ``RTE_FLOW_ACTION_TYPE_MARK`` item type has been added to match a flow against
+ a previously marked flow.
+
+* ethdev: Change flow APIs regarding count action:
+
+ * ``rte_flow_create()`` API count action now requires the ``struct rte_flow_action_count``.
+ * ``rte_flow_query()`` API parameter changed from action type to action structure.
+
+* ethdev: Changes to offload API
+
+ A pure per-port offloading isn't requested to be repeated in [rt]x_conf->offloads to
+ ``rte_eth_[rt]x_queue_setup()``. Now any offloading enabled in ``rte_eth_dev_configure()``
+ can't be disabled by ``rte_eth_[rt]x_queue_setup()``. Any new added offloading which has
+ not been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled in
+ ``rte_eth_[rt]x_queue_setup()`` must be per-queue type, or otherwise trigger an error log.
+
+* ethdev: Runtime queue setup
+
+ ``rte_eth_rx_queue_setup`` and ``rte_eth_tx_queue_setup`` can be called after
+ ``rte_eth_dev_start`` if the device supports runtime queue setup. The device driver can
+ expose this capability through ``rte_eth_dev_info_get``. A Rx or Tx queue
+ set up at runtime need to be started explicitly by ``rte_eth_dev_rx_queue_start``
+ or ``rte_eth_dev_tx_queue_start``.
+
+
+ABI Changes
+-----------
+
+.. This section should contain ABI changes. Sample format:
+
+ * Add a short 1-2 sentence description of the ABI change that was announced
+ in the previous releases and made in this release. Use fixed width quotes
+ for ``rte_function_names`` or ``rte_struct_names``. Use the past tense.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* ring: The alignment constraints on the ring structure has been relaxed
+ to one cache line instead of two, and an empty cache line padding is
+ added between the producer and consumer structures. The size of the
+ structure and the offset of the fields remains the same on platforms
+ with 64B cache line, but changes on other platforms.
+
+* mempool: Some ops have changed.
+
+ A new callback ``calc_mem_size`` has been added to ``rte_mempool_ops``
+ to allow customization of the required memory size calculation.
+ A new callback ``populate`` has been added to ``rte_mempool_ops``
+ to allow customized object population.
+ Callback ``get_capabilities`` has been removed from ``rte_mempool_ops``
+ since its features are covered by ``calc_mem_size`` and ``populate``
+ callbacks.
+ Callback ``register_memory_area`` has been removed from ``rte_mempool_ops``
+ since the new callback ``populate`` may be used instead of it.
+
+* ethdev: Additional fields in rte_eth_dev_info.
+
+ The ``rte_eth_dev_info`` structure has had two extra entries appended to the
+ end of it: ``default_rxportconf`` and ``default_txportconf``. Each of these
+ in turn are ``rte_eth_dev_portconf`` structures containing three fields of
+ type ``uint16_t``: ``burst_size``, ``ring_size``, and ``nb_queues``. These
+ are parameter values recommended for use by the PMD.
+
+* ethdev: ABI for all flow API functions was updated.
+
+ This includes functions ``rte_flow_copy``, ``rte_flow_create``,
+ ``rte_flow_destroy``, ``rte_flow_error_set``, ``rte_flow_flush``,
+ ``rte_flow_isolate``, ``rte_flow_query`` and ``rte_flow_validate``, due to
+ changes in error type definitions (``enum rte_flow_error_type``), removal
+ of the unused DUP action (``enum rte_flow_action_type``), modified
+ behavior for flow rule actions (see API changes), removal of C99 flexible
+ array from RAW pattern item (``struct rte_flow_item_raw``), complete
+ rework of the RSS action definition (``struct rte_flow_action_rss``),
+ sanity fix in the VLAN pattern item (``struct rte_flow_item_vlan``) and
+ new transfer attribute (``struct rte_flow_attr``).
+
+* bbdev: New parameter added to rte_bbdev_op_cap_turbo_dec.
+
+ A new parameter ``max_llr_modulus`` has been added to
+ ``rte_bbdev_op_cap_turbo_dec`` structure to specify maximal LLR (likelihood
+ ratio) absolute value.
+
+* bbdev: Queue Groups split into UL/DL Groups.
+
+ Queue Groups have been split into UL/DL Groups in the Turbo Software Driver.
+ They are independent for Decode/Encode. ``rte_bbdev_driver_info`` reflects
+ introduced changes.
+
+
+Known Issues
+------------
+
+.. This section should contain new known issues in this release. Sample format:
+
+ * **Add title in present tense with full stop.**
+
+ Add a short 1-2 sentence description of the known issue in the present
+ tense. Add information on any known workarounds.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Secondary process launch is not reliable.**
+
+ Recent memory hotplug patches have made multiprocess startup less reliable
+ than it was in past releases. A number of workarounds are known to work depending
+ on the circumstances. As such it isn't recommended to use the secondary
+ process mechanism for critical systems. The underlying issues will be
+ addressed in upcoming releases.
+
+ The issue is explained in more detail, including potential workarounds,
+ in the Bugzilla entry referenced below.
+
+ Bugzilla entry: https://dpdk.org/tracker/show_bug.cgi?id=50
+
+* **pdump is not compatible with old applications.**
+
+ As we changed to use generic multi-process communication for pdump
+ negotiation instead of previous dedicated unix socket way, pdump
+ applications, including the dpdk-pdump example and any other applications
+ using ``librte_pdump``, will not work with older version DPDK primary
+ applications.
+
+* **rte_abort takes a long time on FreeBSD.**
+
+ DPDK processes now allocates a large area of virtual memory address space.
+ As a result ``rte_abort`` on FreeBSD now dumps the contents of the
+ whole reserved memory range, not just the used portion, to a core dump file.
+ Writing this large core file can take a significant amount of time, causing
+ processes to appear to hang on the system.
+
+ The work around for the issue is to set the system resource limits for core
+ dumps before running any tests, e.g. ``limit coredumpsize 0``. This will
+ effectively disable core dumps on FreeBSD. If they are not to be completely
+ disabled, a suitable limit, e.g. 1G might be specified instead of 0. This
+ needs to be run per-shell session, or before every test run. This change
+ can also be made persistent by adding ``kern.coredump=0`` to ``/etc/sysctl.conf``.
+
+ Bugzilla entry: https://dpdk.org/tracker/show_bug.cgi?id=53
+
+* **ixgbe PMD crash on hotplug detach when no VF created.**
+
+ ixgbe PMD uninit path cause null pointer dereference because of port representor
+ cleanup when number of VF is zero.
+
+ Bugzilla entry: https://dpdk.org/tracker/show_bug.cgi?id=57
+
+* **Bonding PMD may fail to accept new slave ports in certain conditions.**
+
+ In certain conditions when using testpmd,
+ bonding may fail to register new slave ports.
+
+ Bugzilla entry: https://dpdk.org/tracker/show_bug.cgi?id=52.
+
+* **Unexpected performance regression in Vhost library.**
+
+ Patches fixing CVE-2018-1059 were expected to introduce a small performance
+ drop. However, in some setups, bigger performance drops have been measured
+ when running micro-benchmarks.
+
+ Bugzilla entry: https://dpdk.org/tracker/show_bug.cgi?id=48
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release and prepend with a ``+``
+ sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. Do not overwrite or remove it.
+ =========================================================
+
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ librte_bbdev.so.1
+ librte_bitratestats.so.2
+ + librte_bpf.so.1
+ librte_bus_dpaa.so.1
+ librte_bus_fslmc.so.1
+ librte_bus_pci.so.1
+ librte_bus_vdev.so.1
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ + librte_common_octeontx.so.1
+ + librte_compressdev.so.1
+ librte_cryptodev.so.4
+ librte_distributor.so.1
+ + librte_eal.so.7
+ + librte_ethdev.so.9
+ + librte_eventdev.so.4
+ librte_flow_classify.so.1
+ librte_gro.so.1
+ librte_gso.so.1
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_latencystats.so.1
+ librte_lpm.so.2
+ + librte_mbuf.so.4
+ + librte_mempool.so.4
+ + librte_meter.so.2
+ librte_metrics.so.1
+ librte_net.so.1
+ librte_pci.so.1
+ librte_pdump.so.2
+ librte_pipeline.so.3
+ librte_pmd_bnxt.so.2
+ librte_pmd_bond.so.2
+ librte_pmd_i40e.so.2
+ librte_pmd_ixgbe.so.2
+ + librte_pmd_dpaa2_cmdif.so.1
+ + librte_pmd_dpaa2_qdma.so.1
+ librte_pmd_ring.so.2
+ librte_pmd_softnic.so.1
+ librte_pmd_vhost.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_rawdev.so.1
+ librte_reorder.so.1
+ + librte_ring.so.2
+ librte_sched.so.1
+ librte_security.so.1
+ librte_table.so.3
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested with this
+ release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * CPU
+
+ * Intel(R) Atom(TM) CPU C2758 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU D-1541 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+ * Intel(R) Xeon(R) Platinum 8180 CPU @ 2.50GHz
+
+ * OS:
+
+ * CentOS 7.4
+ * Fedora 25
+ * Fedora 27
+ * Fedora 28
+ * FreeBSD 11.1
+ * Red Hat Enterprise Linux Server release 7.3
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 14.04
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+ * Ubuntu 17.10
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 5.2.3 (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800003e7
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.4.6 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel Corporation Ethernet Connection X722 for 10GbE SFP+ (4x10G)
+
+ * Firmware version: 3.33 0x80000fd5 0.0.0
+ * Device id (pf/vf): 8086:37d0 / 8086:37cd
+ * Driver version: 2.4.3 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XXV710-DA2 (2x25G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:158b / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 6.01 0x8000321c
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.63, 0x80000dda
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.4.0-k (igb)
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * CPU:
+
+ * Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz
+ * Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2640 @ 2.50GHz
+ * Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.5 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.4 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.3 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.2 (Maipo)
+ * Ubuntu 18.04
+ * Ubuntu 17.10
+ * Ubuntu 16.10
+ * Ubuntu 16.04
+ * SUSE Linux Enterprise Server 15
+
+ * MLNX_OFED: 4.2-1.0.0.0
+ * MLNX_OFED: 4.3-2.0.2.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.42.5000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.21.1000 and above
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.21.1000 and above
+
+* ARM platforms with Mellanox(R) NICs combinations
+
+ * CPU:
+
+ * Qualcomm ARM 1.1 2500MHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.5 (Maipo)
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.22.0428
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.22.0428
+
+* ARM SoC combinations from Cavium (with integrated NICs)
+
+ * SoC:
+
+ * Cavium CN81xx
+ * Cavium CN83xx
+
+ * OS:
+
+ * Ubuntu 16.04.2 LTS with Cavium SDK-6.2.0-Patch2 release support package.
+
+* ARM SoC combinations from NXP (with integrated NICs)
+
+ * SoC:
+
+ * NXP/Freescale QorIQ LS1046A with ARM Cortex A72
+ * NXP/Freescale QorIQ LS2088A with ARM Cortex A72
+
+ * OS:
+
+ * Ubuntu 16.04.3 LTS with NXP QorIQ LSDK 1803 support packages
diff --git a/doc/guides/rel_notes/release_18_08.rst b/doc/guides/rel_notes/release_18_08.rst
new file mode 100644
index 00000000..321fa845
--- /dev/null
+++ b/doc/guides/rel_notes/release_18_08.rst
@@ -0,0 +1,549 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2018 The DPDK contributors
+
+DPDK Release 18.08
+==================
+
+.. **Read this first.**
+
+ The text in the sections below explains how to update the release notes.
+
+ Use proper spelling, capitalization and punctuation in all sections.
+
+ Variable and config names should be quoted as fixed width text:
+ ``LIKE_THIS``.
+
+ Build the docs and view the output file to ensure the changes are correct::
+
+ make doc-guides-html
+
+ xdg-open build/doc/html/guides/rel_notes/release_18_08.html
+
+
+New Features
+------------
+
+.. This section should contain new features added in this release.
+ Sample format:
+
+ * **Add a title in the past tense with a full stop.**
+
+ Add a short 1-2 sentence description in the past tense.
+ The description should be enough to allow someone scanning
+ the release notes to understand the new feature.
+
+ If the feature adds a lot of sub-features you can use a bullet list
+ like this:
+
+ * Added feature foo to do something.
+ * Enhanced feature bar to do something else.
+
+ Refer to the previous release notes for examples.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* **Added support for Hyper-V netvsc PMD.**
+
+ The new ``netvsc`` poll mode driver provides native support for
+ networking on Hyper-V. See the :doc:`../nics/netvsc` NIC driver guide
+ for more details on this new driver.
+
+* **Added Flow API support for CXGBE PMD.**
+
+ Flow API support has been added to CXGBE Poll Mode Driver to offload
+ flows to Chelsio T5/T6 NICs. Support added for:
+
+ * Wildcard (LE-TCAM) and Exact (HASH) match filters.
+ * Match items: physical ingress port, IPv4, IPv6, TCP and UDP.
+ * Action items: queue, drop, count, and physical egress port redirect.
+
+* **Added ixgbe preferred Rx/Tx parameters.**
+
+ Rather than applications providing explicit Rx and Tx parameters such as
+ queue and burst sizes, they can request that the EAL instead uses preferred
+ values provided by the PMD, falling back to defaults within the EAL if the
+ PMD does not provide any. The provision of such tuned values now includes
+ the ixgbe PMD.
+
+* **Added descriptor status check support for fm10k.**
+
+ The ``rte_eth_rx_descriptor_status`` and ``rte_eth_tx_descriptor_status``
+ APIs are now supported by fm10K.
+
+* **Updated the enic driver.**
+
+ * Add low cycle count Tx handler for no-offload Tx.
+ * Add low cycle count Rx handler for non-scattered Rx.
+ * Minor performance improvements to scattered Rx handler.
+ * Add handlers to add/delete VxLAN port number.
+ * Add devarg to specify ingress VLAN rewrite mode.
+
+* **Updated mlx5 driver.**
+
+ Updated the mlx5 driver including the following changes:
+
+ * Added port representors support.
+ * Added Flow API support for e-switch rules.
+ Added support for ACTION_PORT_ID, ACTION_DROP, ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN, ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP
+ and ITEM_PORT_ID.
+ * Added support for 32-bit compilation.
+
+* **Added TSO support for the mlx4 driver.**
+
+ Added TSO support for the mlx4 drivers from MLNX_OFED_4.4 and above.
+
+* **SoftNIC PMD rework.**
+
+ The SoftNIC PMD infrastructure has been restructured to use the Packet
+ Framework, which makes it more flexible, modular and easier to add new
+ functionality in the future.
+
+* **Updated the AESNI MB PMD.**
+
+ The AESNI MB PMD has been updated with additional support for:
+
+ * 3DES for 8, 16 and 24 byte keys.
+
+* **Added a new compression PMD using Intel's QuickAssist (QAT) device family.**
+
+ Added the new ``QAT`` compression driver, for compression and decompression
+ operations in software. See the :doc:`../compressdevs/qat_comp` compression
+ driver guide for details on this new driver.
+
+* **Updated the ISA-L PMD.**
+
+ Added support for chained mbufs (input and output).
+
+
+API Changes
+-----------
+
+.. This section should contain API changes. Sample format:
+
+ * Add a short 1-2 sentence description of the API change.
+ Use fixed width quotes for ``function_names`` or ``struct_names``.
+ Use the past tense.
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* The path to the runtime config file has changed. The new path is determined
+ as follows:
+
+ - If DPDK is running as root, ``/var/run/dpdk/<prefix>/config``
+ - If DPDK is not running as root:
+
+ * If ``$XDG_RUNTIME_DIR`` is set, ``${XDG_RUNTIME_DIR}/dpdk/<prefix>/config``
+ * Otherwise, ``/tmp/dpdk/<prefix>/config``
+
+* eal: The function ``rte_eal_mbuf_default_mempool_ops`` was deprecated
+ and is removed in 18.08. It shall be replaced by
+ ``rte_mbuf_best_mempool_ops``.
+
+* mempool: Following functions were deprecated and are removed in 18.08:
+
+ - ``rte_mempool_populate_iova_tab``
+ - ``rte_mempool_populate_phys_tab``
+ - ``rte_mempool_populate_phys`` (``rte_mempool_populate_iova`` should be used)
+ - ``rte_mempool_virt2phy`` (``rte_mempool_virt2iova`` should be used)
+ - ``rte_mempool_xmem_create``
+ - ``rte_mempool_xmem_size``
+ - ``rte_mempool_xmem_usage``
+
+* ethdev: The old offload API is removed:
+
+ - Rx per-port ``rte_eth_conf.rxmode.[bit-fields]``
+ - Tx per-queue ``rte_eth_txconf.txq_flags``
+ - ``ETH_TXQ_FLAGS_NO*``
+
+ The transition bits are removed:
+
+ - ``rte_eth_conf.rxmode.ignore_offload_bitfield``
+ - ``ETH_TXQ_FLAGS_IGNORE``
+
+* cryptodev: The following API changes have been made in 18.08:
+
+ - In struct ``struct rte_cryptodev_info``, field ``rte_pci_device *pci_dev``
+ has been replaced with field ``struct rte_device *device``.
+ - Value 0 is accepted in ``sym.max_nb_sessions``, meaning that a device
+ supports an unlimited number of sessions.
+ - Two new fields of type ``uint16_t`` have been added:
+ ``min_mbuf_headroom_req`` and ``min_mbuf_tailroom_req``. These parameters
+ specify the recommended headroom and tailroom for mbufs to be processed by
+ the PMD.
+
+* cryptodev: The following functions were deprecated and are removed in 18.08:
+
+ - ``rte_cryptodev_queue_pair_start``
+ - ``rte_cryptodev_queue_pair_stop``
+ - ``rte_cryptodev_queue_pair_attach_sym_session``
+ - ``rte_cryptodev_queue_pair_detach_sym_session``
+
+* cryptodev: The following functions were deprecated and are replaced by other
+ functions in 18.08:
+
+ - ``rte_cryptodev_get_header_session_size`` is replaced with
+ ``rte_cryptodev_sym_get_header_session_size``
+ - ``rte_cryptodev_get_private_session_size`` is replaced with
+ ``rte_cryptodev_sym_get_private_session_size``
+
+* cryptodev: Feature flag ``RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER`` is
+ replaced with the following more explicit flags:
+
+ - ``RTE_CRYPTODEV_FF_IN_PLACE_SGL``
+ - ``RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT``
+ - ``RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT``
+ - ``RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT``
+ - ``RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT``
+
+* cryptodev: Renamed cryptodev experimental APIs:
+
+ Used user_data instead of private_data in following APIs to avoid confusion
+ with the existing session parameter ``sess_private_data[]`` and related APIs.
+
+ - ``rte_cryptodev_sym_session_set_private_data()`` changed to
+ ``rte_cryptodev_sym_session_set_user_data()``
+ - ``rte_cryptodev_sym_session_get_private_data()`` changed to
+ ``rte_cryptodev_sym_session_get_user_data()``
+
+* compressdev: Feature flag ``RTE_COMP_FF_MBUF_SCATTER_GATHER`` is
+ replaced with the following more explicit flags:
+
+ - ``RTE_COMP_FF_OOP_SGL_IN_SGL_OUT``
+ - ``RTE_COMP_FF_OOP_SGL_IN_LB_OUT``
+ - ``RTE_COMP_FF_OOP_LB_IN_SGL_OUT``
+
+
+Shared Library Versions
+-----------------------
+
+.. Update any library version updated in this release
+ and prepend with a ``+`` sign, like this:
+
+ librte_acl.so.2
+ + librte_cfgfile.so.2
+ librte_cmdline.so.2
+
+ This section is a comment. Do not overwrite or remove it.
+ =========================================================
+
+The libraries prepended with a plus sign were incremented in this version.
+
+.. code-block:: diff
+
+ librte_acl.so.2
+ librte_bbdev.so.1
+ librte_bitratestats.so.2
+ librte_bpf.so.1
+ librte_bus_dpaa.so.1
+ librte_bus_fslmc.so.1
+ librte_bus_pci.so.1
+ librte_bus_vdev.so.1
+ + librte_bus_vmbus.so.1
+ librte_cfgfile.so.2
+ librte_cmdline.so.2
+ librte_common_octeontx.so.1
+ librte_compressdev.so.1
+ + librte_cryptodev.so.5
+ librte_distributor.so.1
+ + librte_eal.so.8
+ + librte_ethdev.so.10
+ librte_eventdev.so.4
+ librte_flow_classify.so.1
+ librte_gro.so.1
+ librte_gso.so.1
+ librte_hash.so.2
+ librte_ip_frag.so.1
+ librte_jobstats.so.1
+ librte_kni.so.2
+ librte_kvargs.so.1
+ librte_latencystats.so.1
+ librte_lpm.so.2
+ librte_mbuf.so.4
+ + librte_mempool.so.5
+ librte_meter.so.2
+ librte_metrics.so.1
+ librte_net.so.1
+ librte_pci.so.1
+ librte_pdump.so.2
+ librte_pipeline.so.3
+ librte_pmd_bnxt.so.2
+ librte_pmd_bond.so.2
+ librte_pmd_i40e.so.2
+ librte_pmd_ixgbe.so.2
+ librte_pmd_dpaa2_cmdif.so.1
+ librte_pmd_dpaa2_qdma.so.1
+ librte_pmd_ring.so.2
+ librte_pmd_softnic.so.1
+ librte_pmd_vhost.so.2
+ librte_port.so.3
+ librte_power.so.1
+ librte_rawdev.so.1
+ librte_reorder.so.1
+ librte_ring.so.2
+ librte_sched.so.1
+ librte_security.so.1
+ librte_table.so.3
+ librte_timer.so.1
+ librte_vhost.so.3
+
+
+Tested Platforms
+----------------
+
+.. This section should contain a list of platforms that were tested
+ with this release.
+
+ The format is:
+
+ * <vendor> platform with <vendor> <type of devices> combinations
+
+ * List of CPU
+ * List of OS
+ * List of devices
+ * Other relevant details...
+
+ This section is a comment. Do not overwrite or remove it.
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
+* Intel(R) platforms with Intel(R) NICs combinations
+
+ * CPU
+
+ * Intel(R) Atom(TM) CPU C3858 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU D-1541 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-4667 v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2695 v4 @ 2.10GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v2 @ 2.40GHz
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+ * Intel(R) Xeon(R) Platinum 8180 CPU @ 2.50GHz
+
+ * OS:
+
+ * CentOS 7.4
+ * Fedora 25
+ * Fedora 27
+ * Fedora 28
+ * FreeBSD 11.1
+ * Red Hat Enterprise Linux Server release 7.5
+ * SUSE Enterprise Linux 12
+ * Wind River Linux 8
+ * Ubuntu 14.04
+ * Ubuntu 16.04
+ * Ubuntu 16.10
+ * Ubuntu 17.10
+ * Ubuntu 18.04
+
+ * NICs:
+
+ * Intel(R) 82599ES 10 Gigabit Ethernet Controller
+
+ * Firmware version: 0x61bf0001
+ * Device id (pf/vf): 8086:10fb / 8086:10ed
+ * Driver version: 5.2.3 (ixgbe)
+
+ * Intel(R) Corporation Ethernet Connection X552/X557-AT 10GBASE-T
+
+ * Firmware version: 0x800003e7
+ * Device id (pf/vf): 8086:15ad / 8086:15a8
+ * Driver version: 4.4.6 (ixgbe)
+
+ * Intel(R) Ethernet Converged Network Adapter X710-DA4 (4x10G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:1572 / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel Corporation Ethernet Connection X722 for 10GbE SFP+ (4x10G)
+
+ * Firmware version: 3.33 0x80000fd5 0.0.0
+ * Device id (pf/vf): 8086:37d0 / 8086:37cd
+ * Driver version: 2.4.3 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XXV710-DA2 (2x25G)
+
+ * Firmware version: 6.01 0x80003221
+ * Device id (pf/vf): 8086:158b / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel(R) Ethernet Converged Network Adapter XL710-QDA2 (2X40G)
+
+ * Firmware version: 6.01 0x8000321c
+ * Device id (pf/vf): 8086:1583 / 8086:154c
+ * Driver version: 2.4.6 (i40e)
+
+ * Intel(R) Corporation I350 Gigabit Network Connection
+
+ * Firmware version: 1.63, 0x80000dda
+ * Device id (pf/vf): 8086:1521 / 8086:1520
+ * Driver version: 5.4.0-k (igb)
+
+* Intel(R) platforms with Mellanox(R) NICs combinations
+
+ * CPU:
+
+ * Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz
+ * Intel(R) Xeon(R) CPU E5-2697A v4 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2697 v3 @ 2.60GHz
+ * Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz
+ * Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2640 @ 2.50GHz
+ * Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.5 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.4 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.3 (Maipo)
+ * Red Hat Enterprise Linux Server release 7.2 (Maipo)
+ * Ubuntu 18.04
+ * Ubuntu 17.10
+ * Ubuntu 16.04
+ * SUSE Linux Enterprise Server 15
+
+ * MLNX_OFED: 4.3-2.0.2.0
+ * MLNX_OFED: 4.4-2.0.1.0
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-3 Pro 40G MCX354A-FCC_Ax (2x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1007
+ * Firmware version: 2.42.5000
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4111A-XCAT (1x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4111A-ACAT (1x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX4131A-BCAT/MCX413A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 40G MCX415A-BCAT (1x40G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX4131A-GCAT/MCX413A-GCAT (1x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX414A-BCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-GCAT/MCX416A-BCAT/MCX416A-GCAT (2x50G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 50G MCX415A-CCAT (1x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 100G MCX416A-CCAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1013
+ * Firmware version: 12.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 Lx 10G MCX4121A-XCAT (2x10G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.21.1000 and above
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.21.1000 and above
+
+ * Mellanox(R) ConnectX-5 Ex EN 100G MCX516A-CDAT (2x100G)
+
+ * Host interface: PCI Express 4.0 x16
+ * Device ID: 15b3:1019
+ * Firmware version: 16.21.1000 and above
+
+* ARM platforms with Mellanox(R) NICs combinations
+
+ * CPU:
+
+ * Qualcomm ARM 1.1 2500MHz
+
+ * OS:
+
+ * Red Hat Enterprise Linux Server release 7.5 (Maipo)
+
+ * NICs:
+
+ * Mellanox(R) ConnectX(R)-4 Lx 25G MCX4121A-ACAT (2x25G)
+
+ * Host interface: PCI Express 3.0 x8
+ * Device ID: 15b3:1015
+ * Firmware version: 14.23.1000
+
+ * Mellanox(R) ConnectX(R)-5 100G MCX556A-ECAT (2x100G)
+
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:1017
+ * Firmware version: 16.23.1000
+
+* Mellanox BlueField SmartNIC
+
+ * Mellanox(R) BlueField SmartNIC MT416842 (2x25G)
+ * Host interface: PCI Express 3.0 x16
+ * Device ID: 15b3:a2d2
+ * Firmware version: 18.99.3950
+
+ * SoC ARM cores running OS:
+ * CentOS Linux release 7.4.1708 (AltArch)
+ * Mellanox MLNX_OFED 4.2-1.4.21.0
+
+ * DPDK application running on ARM cores inside SmartNIC
+ * Bluefield representors support planned for next release.
diff --git a/doc/guides/rel_notes/release_2_2.rst b/doc/guides/rel_notes/release_2_2.rst
index bb7d15a6..cea5c874 100644
--- a/doc/guides/rel_notes/release_2_2.rst
+++ b/doc/guides/rel_notes/release_2_2.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2016 The DPDK contributors
+
DPDK Release 2.2
================
diff --git a/doc/guides/sample_app_ug/bbdev_app.rst b/doc/guides/sample_app_ug/bbdev_app.rst
index f17125da..653f972f 100644
--- a/doc/guides/sample_app_ug/bbdev_app.rst
+++ b/doc/guides/sample_app_ug/bbdev_app.rst
@@ -31,8 +31,8 @@ Limitations
Compiling the Application
-------------------------
-#. DPDK needs to be built with ``turbo_sw`` PMD driver enabled along with
- ``FLEXRAN SDK`` Libraries. Refer to *SW Turbo Poll Mode Driver*
+#. DPDK needs to be built with ``baseband_turbo_sw`` PMD driver enabled along
+ with ``FLEXRAN SDK`` Libraries. Refer to *SW Turbo Poll Mode Driver*
documentation for more details on this.
#. Go to the example directory:
@@ -84,13 +84,14 @@ issue the command:
.. code-block:: console
- $ ./build/bbdev --vdev='turbo_sw' -w <NIC0PCIADDR> -c 0x38 --socket-mem=2,2 \
+ $ ./build/bbdev --vdev='baseband_turbo_sw' -w <NIC0PCIADDR> -c 0x38 --socket-mem=2,2 \
--file-prefix=bbdev -- -e 0x10 -d 0x20
where, NIC0PCIADDR is the PCI addresse of the Rx port
-This command creates one virtual bbdev devices ``turbo_sw`` where the device
-gets linked to a corresponding ethernet port as whitelisted by the parameter -w.
+This command creates one virtual bbdev devices ``baseband_turbo_sw`` where the
+device gets linked to a corresponding ethernet port as whitelisted by
+the parameter -w.
3 cores are allocated to the application, and assigned as:
- core 3 is the master and used to print the stats live on screen,
diff --git a/doc/guides/sample_app_ug/ethtool.rst b/doc/guides/sample_app_ug/ethtool.rst
index 1b79aca1..47e09f6e 100644
--- a/doc/guides/sample_app_ug/ethtool.rst
+++ b/doc/guides/sample_app_ug/ethtool.rst
@@ -44,6 +44,7 @@ they do as as follows:
* ``drvinfo``: Print driver info
* ``eeprom``: Dump EEPROM to file
+* ``module-eeprom``: Dump plugin module EEPROM to file
* ``link``: Print port link states
* ``macaddr``: Gets/sets MAC address
* ``mtu``: Set NIC MTU
@@ -97,6 +98,8 @@ the following functions:
- ``rte_ethtool_get_eeprom_len()``
- ``rte_ethtool_get_eeprom()``
- ``rte_ethtool_set_eeprom()``
+- ``rte_ethtool_get_module_info()``
+- ``rte_ethtool_get_module_eeprom()``
- ``rte_ethtool_get_pauseparam()``
- ``rte_ethtool_set_pauseparam()``
- ``rte_ethtool_net_open()``
diff --git a/doc/guides/sample_app_ug/flow_classify.rst b/doc/guides/sample_app_ug/flow_classify.rst
index 52474774..003ed035 100644
--- a/doc/guides/sample_app_ug/flow_classify.rst
+++ b/doc/guides/sample_app_ug/flow_classify.rst
@@ -187,7 +187,7 @@ The ``main()`` function also initializes all the ports using the user defined
.. code-block:: c
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (port_init(portid, mbuf_pool) != 0) {
rte_exit(EXIT_FAILURE,
"Cannot init port %" PRIu8 "\n", portid);
@@ -279,9 +279,6 @@ Forwarding application is shown below:
int retval;
uint16_t q;
- if (port >= rte_eth_dev_count())
- return -1;
-
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
@@ -424,14 +421,13 @@ following:
static __attribute__((noreturn)) void
lcore_main(cls_app)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
* for best performance.
*/
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
printf("\n\n");
@@ -451,7 +447,7 @@ following:
* Receive packets on a port and forward them on the paired
* port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
*/
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
@@ -501,7 +497,7 @@ The main work of the application is done within the loop:
.. code-block:: c
for (;;) {
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
diff --git a/doc/guides/sample_app_ug/flow_filtering.rst b/doc/guides/sample_app_ug/flow_filtering.rst
index 06230c07..bd0ae1e2 100644
--- a/doc/guides/sample_app_ug/flow_filtering.rst
+++ b/doc/guides/sample_app_ug/flow_filtering.rst
@@ -1,33 +1,5 @@
-.. BSD LICENSE
- Copyright(c) 2017 Mellanox Corporation. All rights reserved.
- All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Mellanox Corporation nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright 2017 Mellanox Technologies, Ltd
Basic RTE Flow Filtering Sample Application
===========================================
@@ -167,7 +139,6 @@ application is shown below:
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -244,7 +215,6 @@ The Ethernet port is configured with default settings using the
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
diff --git a/doc/guides/sample_app_ug/img/ip_pipelines_1.svg b/doc/guides/sample_app_ug/img/ip_pipelines_1.svg
deleted file mode 100644
index a114ed82..00000000
--- a/doc/guides/sample_app_ug/img/ip_pipelines_1.svg
+++ /dev/null
@@ -1,738 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="638.18219"
- height="273.16391"
- id="svg2"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="ipPipelines_1_update.svg">
- <defs
- id="defs4">
- <marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Mend"
- style="overflow:visible;">
- <path
- id="path3887"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
- </marker>
- <marker
- inkscape:stockid="Arrow1Lstart"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Lstart"
- style="overflow:visible">
- <path
- id="path3878"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
- transform="scale(0.8) translate(12.5,0)" />
- </marker>
- <clipPath
- id="clipEmfPath1"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect2990"
- height="213.83858"
- width="585"
- y="0"
- x="0" />
- </clipPath>
- <clipPath
- id="clipEmfPath2"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect2993"
- height="71.379494"
- width="66.300003"
- y="118.0161"
- x="132.75" />
- </clipPath>
- <clipPath
- id="clipEmfPath3"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect2996"
- height="72.879066"
- width="66.300003"
- y="19.794313"
- x="132.3" />
- </clipPath>
- <clipPath
- id="clipEmfPath4"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect2999"
- height="75.428337"
- width="65.699997"
- y="56.983631"
- x="31.200001" />
- </clipPath>
- <clipPath
- id="clipEmfPath5"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect3002"
- height="157.60474"
- width="168"
- y="16.795176"
- x="331.95001" />
- </clipPath>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="3.959798"
- inkscape:cx="287.6677"
- inkscape:cy="153.39982"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- inkscape:window-width="1200"
- inkscape:window-height="1857"
- inkscape:window-x="-8"
- inkscape:window-y="-8"
- inkscape:window-maximized="1"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" />
- <metadata
- id="metadata7">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(-82.82706,-337.43917)">
- <text
- xml:space="preserve"
- x="797.93707"
- y="551.73584"
- style="font-size:17.86045074px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Calibri"
- id="text3006"
- transform="scale(0.90359163,1.1066946)"> </text>
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath1)"
- d="m 132.375,113.1425 0,80.97674 66.9,0 0,-80.97674 -66.9,0 z"
- id="path3008"
- transform="matrix(1.1694604,0,0,1.4323235,83.010165,320.85569)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.03640038,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 237.46665,598.89724 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.43658 0.70167,0 0,3.43658 z m 0,-6.01404 0,-3.43658 0.70167,0 0,3.43658 z m 0,-6.01402 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.4366 0.70167,0 0,3.4366 z m 0,-6.01404 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01402 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01402 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.43658 0.70167,0 0,3.43658 z m 0,-6.01402 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,-3.43658 0.70167,0 0,3.43658 z m 0,-6.01404 0,-3.43658 0.70167,0 0,3.43658 z m 0,-6.01402 0,-3.43659 0.70167,0 0,3.43659 z m 0,-6.01403 0,
--1.7183 c 0,-0.22821 0.16445,-0.42957 0.35084,-0.42957 l 1.40335,0 0,0.85914 -1.40335,0 0.35083,-0.42957 0,1.7183 z m 3.85922,-2.14787 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 1.05252,0 c 0.19734,0 0.35083,0.20136 0.35083,
-0.42957 l 0,2.14787 -0.70167,0 0,-2.14787 0.35084,0.42957 -1.05252,0 z m 1.40335,5.15488 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01402 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43658 -0.70167,0 0,-3.43658 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43658 -0.70167,0 0,-3.43658 z m 0,6.01402 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01402 0,3.43661 -0.70167,0 0,-3.43661 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,
-0 0,-3.43659 z m 0,6.01402 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.00702 c 0,0.24163 -0.15349,0.42958 -0.35083,0.42958 l -0.35084,0 0,-0.85915 0.35084,0 -0.35084,0.42957 0,-3.00702 z m -2.8067,3.4366 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.10502,0 0,-0.85915 2.10502,0 z"
- id="path3010" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath2)"
- d="m 131.925,14.920714 0,82.626267 66.9,0 0,-82.626267 -66.9,0 z"
- id="path3012"
- transform="matrix(1.1694604,0,0,1.4323235,83.010165,320.85569)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.04853384,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 236.94039,460.57452 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01404 0,-3.43658 0.70168,0 0,3.43658 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,
-0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 1.92961,-4.51052 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85914 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85914 -2.8067,0 z m 3.33296,2.79222 0,
-3.43659 -0.70168,0 0,-3.43659 z m 0,6.01404 0,3.43658 -0.70168,0 0,-3.43658 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 z m 0,6.01404 0,3.43658 -0.70168,0 0,-3.43658 z m 0,6.01403 0,3.43659 -0.70168,
-0 0,-3.43659 z m 0,6.01403 0,1.71829 c 0,0.24164 -0.15349,0.42958 -0.35084,0.42958 l -1.40335,0 0,-0.85915 1.40335,0 -0.35084,0.42957 0,-1.71829 z m -3.85922,2.14787 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 z m -4.91174,0 -1.05251,0 0,-0.85915 1.05251,0 z"
- id="path3014" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath3)"
- d="m 30.825,52.110032 0,85.025578 66.3,0 0,-85.025578 -66.3,0 z"
- id="path3016"
- transform="matrix(1.1694604,0,0,1.4323235,83.010165,320.85569)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.03640038,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 118.70794,517.27825 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.4366 0.70168,0 0,3.4366 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.4366 0.70168,0 0,3.4366 z m 0,-6.01404 0,-3.43658 0.70168,0 0,3.43658 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01404 0,-3.43659 0.70168,
-0 0,3.43659 z m 0,-6.01403 0,-3.43658 0.70168,0 0,3.43658 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 z m 0,-6.01403 0,-1.50351 c 0,-0.22821 0.16446,-0.42957 0.35084,-0.42957 l 1.57877,0 0,0.85915 -1.57877,0 0.35084,-0.42958 0,1.50351 z m 4.03464,-1.93308 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 z m 4.91173,0 2.8067,
-0 0,0.85915 -2.8067,0 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 z m 4.91174,0 0.17542,0 c 0.19734,0 0.35083,0.20136 0.35083,0.42957 l 0,3.22181 -0.70167,0 0,-3.22181 0.35084,0.42958 -0.17542,0 z m 0.52625,6.22882 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43658 -0.70167,0 0,-3.43658 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01402 0,3.43659 -0.70167,
-0 0,-3.43659 z m 0,6.01403 0,3.4366 -0.70167,0 0,-3.4366 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01402 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01404 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,3.43659 -0.70167,0 0,-3.43659 z m 0,6.01403 0,1.7183 c 0,0.24163 -0.15349,0.42957 -0.35083,0.42957 l -1.40336,0 0,-0.85914 1.40336,0 -0.35084,0.42957 0,-1.7183 z m -3.85921,2.14787 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,
-0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -2.80671,0 0,-0.85914 2.80671,0 z m -4.91174,0 -2.8067,0 0,-0.85914 2.8067,0 z m -4.91173,0 -0.35084,0 0,-0.85914 0.35084,0 z"
- id="path3018" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- clip-path="url(#clipEmfPath4)"
- d="m 15.225,83.001159 3.253125,0 c 0.35625,0 0.646875,0.299914 0.646875,0.656061 l 0,16.73894 -3.9,0 z"
- id="path3020"
- transform="matrix(1.1694604,0,0,1.4323235,83.010165,320.85569)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 105.3761,439.52541 3.8044,0 c 0.41662,0 0.75649,0.42958 0.75649,0.9397 l 0,24.19036 -4.56089,0 z"
- id="path3022" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 109.93699,439.7402 3.66187,0 c 0.40566,0 0.72361,0.40273 0.72361,0.89942 l 0,24.01585 -4.38548,0 z"
- id="path3024" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 130.46102,435.44446 0,75.17539 60.695,0 0,-75.17539 -60.695,0 z"
- id="path3026" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 130.46102,435.44446 60.695,0 0,75.17539 -60.695,0 z"
- id="path3028" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 100.8152,483.12714 3.8044,0 c 0.41662,0 0.7565,0.42957 0.7565,0.93969 l 0,23.97558 -4.5609,0 z"
- id="path3030" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 105.3761,482.91235 3.8044,0 c 0.41662,0 0.75649,0.42957 0.75649,0.93969 l 0,24.19037 -4.56089,0 z"
- id="path3032" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 109.93699,483.12714 3.66187,0 c 0.40566,0 0.72361,0.40273 0.72361,0.89942 l 0,24.01585 -4.38548,0 z"
- id="path3034" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 216.06552,512.55294 3.66187,0 c 0.40566,0 0.72361,0.40273 0.72361,0.89941 l 0,24.23065 -4.38548,0 z"
- id="path3036" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 220.451,512.55294 3.80439,0 c 0.41662,0 0.7565,0.42957 0.7565,0.93969 l 0,23.97558 -4.56089,0 z"
- id="path3038" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 225.01189,512.55294 3.8044,0 c 0.41662,0 0.7565,0.42957 0.7565,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3040" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 248.86889,377.88159 0,75.17539 60.51957,0 0,-75.17539 -60.51957,0 z"
- id="path3042" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 248.86889,377.88159 60.51957,0 0,75.17539 -60.51957,0 z"
- id="path3044" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 249.57056,488.06724 0,75.17539 60.695,0 0,-75.17539 -60.695,0 z"
- id="path3046" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 249.57056,488.06724 60.695,0 0,75.17539 -60.695,0 z"
- id="path3048" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 216.06552,404.30037 3.66187,0 c 0.40566,0 0.72361,0.40273 0.72361,0.89942 l 0,24.01586 -4.38548,0 z"
- id="path3050" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 220.451,404.30037 3.80439,0 c 0.41662,0 0.7565,0.42958 0.7565,0.9397 l 0,23.97558 -4.56089,0 z"
- id="path3052" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 225.01189,404.30037 3.8044,0 c 0.41662,0 0.7565,0.42958 0.7565,0.9397 l 0,23.97558 -4.5609,0 z"
- id="path3054" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 114.36632,451.31184 9.51649,0.57724 -0.0768,1.78542 -9.51648,-0.57724 0.0768,-1.78542 z m 9.48359,1.47666 -2.70803,-5.54418 8.54072,5.89321 -8.97926,4.8327 3.14657,-5.18173 z"
- id="path3056" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 115.24342,495.34314 9.51648,0.56382 -0.0768,1.78541 -9.51648,-0.56381 0.0768,-1.78542 z m 9.48359,1.46324 -2.70803,-5.54418 8.54071,5.89321 -8.9683,4.8327 3.13562,-5.18173 z"
- id="path3058" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 191.15602,445.78108 12.2574,0 0,-28.37871 -0.73457,0.89942 5.66823,0 0,-1.78542 -6.40279,0 0,28.36529 0.73456,-0.886 -11.52283,0 0,1.78542 z m 17.19106,-28.37871 -2.91634,5.36967 8.77095,-5.36967 -8.77095,-5.36967 2.91634,5.36967 z"
- id="path3060" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 192.03311,499.85366 11.87367,0 0,23.8145 -0.73456,-0.89943 5.2845,0 0,1.78542 -6.01906,0 0,-23.81448 0.73456,0.89941 -11.13911,0 0,-1.78542 z m 16.42361,23.8145 -2.91634,-5.36968 8.77095,5.36968 -8.77095,5.36966 2.91634,-5.36966 z"
- id="path3062" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 82.96631,451.31184 9.516484,0.57724 -0.07675,1.78542 -9.516484,-0.57724 0.07675,-1.78542 z m 9.483593,1.47666 -2.708032,-5.54418 8.540716,5.89321 -8.979263,4.8327 3.146579,-5.18173 z"
- id="path3064" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 83.667986,494.48399 9.516484,0.57724 -0.07675,1.78542 -9.516484,-0.57724 0.07675,-1.78542 z m 9.483593,1.47666 -2.708031,-5.54418 8.540715,5.89321 -8.979263,4.83271 3.146579,-5.18174 z"
- id="path3066" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 230.28542,414.58329 12.57536,0.0268 0,1.78542 -12.58631,-0.0268 0.0105,-1.78542 z m 12.57536,0.92627 -2.91634,-5.38309 8.75999,5.39651 -8.77096,5.34283 2.92731,-5.35625 z"
- id="path3068" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 229.59472,524.12458 14.12123,0.36245 -0.0329,1.78542 -14.12123,-0.36246 0.0329,-1.78541 z m 14.0993,1.24844 -2.82863,-5.43678 8.67228,5.58446 -8.85866,5.15487 3.01501,-5.30255 z"
- id="path3070" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 310.27652,412.865 12.57535,0.0268 0,1.78541 -12.58631,-0.0269 0.0105,-1.78542 z m 12.57535,0.92627 -2.91634,-5.3831 8.75999,5.39652 -8.77096,5.34282 2.92731,-5.35624 z"
- id="path3072" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 310.97819,524.55415 12.57536,0.0268 0,1.78542 -12.58632,-0.0268 0.0105,-1.78542 z m 12.57536,0.92627 -2.91634,-5.3831 8.75998,5.39652 -8.77095,5.34282 2.92731,-5.35624 z"
- id="path3074" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 329.38623,401.50814 3.8044,0 c 0.41662,0 0.7565,0.42958 0.7565,0.9397 l 0,24.19036 -4.5609,0 z"
- id="path3076" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 333.94713,401.50814 3.8044,0 c 0.41662,0 0.7565,0.42958 0.7565,0.9397 l 0,23.97558 -4.5609,0 z"
- id="path3078" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 338.50803,401.50814 3.8044,0 c 0.41661,0 0.75649,0.42958 0.75649,0.9397 l 0,24.19036 -4.56089,0 z"
- id="path3080" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 330.78959,512.55294 3.66187,0 c 0.40566,0 0.7236,0.40273 0.7236,0.89941 l 0,24.23065 -4.38547,0 z"
- id="path3082" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 335.17506,512.55294 3.8044,0 c 0.41663,0 0.7565,0.42957 0.7565,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3084" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 339.73596,512.55294 3.80439,0 c 0.41664,0 0.7565,0.42957 0.7565,0.93969 l 0,24.19037 -4.56089,0 z"
- id="path3086" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.012;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.012,0.012;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 343.95699,412.865 12.57534,0.0268 0,1.78541 -12.58631,-0.0269 0.0105,-1.78542 z m 12.57534,0.92627 -2.91634,-5.3831 8.75999,5.39652 -8.77096,5.34282 2.92731,-5.35624 z"
- id="path3088" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01213346;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.01213346,0.01213346;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 344.30781,524.76894 12.57536,0.0268 0,1.78542 -12.58632,-0.0268 0.0105,-1.7854 z m 12.57536,0.92627 -2.91634,-5.3831 8.75998,5.39652 -8.77095,5.34283 2.92731,-5.35625 z"
- id="path3090" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.09706768,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 470.42316,577.41856 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43658 0.70168,0 0,3.43658 -0.70168,0 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43658 0.70168,0 0,3.43658 -0.70168,0 z m 0,-6.01402 0,-3.4366 0.70168,0 0,3.4366 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.4366 0.70168,0 0,3.4366 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,
-0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.4366 0.70168,0 0,3.4366 -0.70168,0 z m 0,-6.01404 0,-3.43658 0.70168,0 0,3.43658 -0.70168,0 z m 0,-6.01402 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43658 0.70168,0 0,3.43658 -0.70168,0 z m 0,-6.01403 0,
--3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01404 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 0,-6.01403 0,-3.43659 0.70168,0 0,3.43659 -0.70168,0 z m 1.22793,-5.36967 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,
-0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,
-0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,
-0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 4.91173,0 2.80671,0 0,0.85915 -2.80671,0 0,-0.85915 z m 4.91174,0 2.8067,0 0,0.85915 -2.8067,0 0,-0.85915 z m 5.08715,0.64436 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,
--3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01402 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,
-0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01402 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43658 -0.70168,0 0,-3.43658 0.70168,0 z m 0,6.01402 0,3.4366 -0.70168,0 0,-3.4366 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.43658 -0.70168,0 0,-3.43658 0.70168,0 z m 0,6.01404 0,3.43658 -0.70168,0 0,-3.43658 0.70168,0 z m 0,6.01402 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01403 0,3.4366 -0.70168,0 0,-3.4366 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01402 0,3.43659 -0.70168,0 0,-3.43659 0.70168,0 z m 0,6.01404 0,3.43659 -0.70168,
-0 0,-3.43659 0.70168,0 z m -1.40335,5.15488 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,
-0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,
-0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z m -4.91173,0 -2.80671,0 0,-0.85915 2.80671,0 0,0.85915 z m -4.91174,0 -2.8067,0 0,-0.85915 2.8067,0 0,0.85915 z"
- id="path3094" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- clip-path="url(#clipEmfPath5)"
- d="m 315.975,85.250513 3.2625,0 c 0.35625,0 0.6375,0.299914 0.6375,0.656061 l 0,16.738946 -3.9,0 z"
- id="path3096"
- transform="matrix(1.1694604,0,0,1.4323235,83.010165,320.85569)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 457.09131,442.74722 3.81537,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3098" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 461.65221,442.962 3.66187,0 c 0.41662,0 0.7236,0.40273 0.7236,0.91285 l 0,24.00243 -4.38547,0 z"
- id="path3100" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 482.17624,438.66627 0,75.17539 60.69499,0 0,-75.17539 -60.69499,0 z"
- id="path3102" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 482.17624,438.66627 60.69499,0 0,75.17539 -60.69499,0 z"
- id="path3104" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 452.53041,486.34894 3.81537,0 c 0.41662,0 0.74553,0.42958 0.74553,0.93969 l 0,23.97559 -4.5609,0 z"
- id="path3106" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 457.09131,486.13415 3.81537,0 c 0.41662,0 0.74553,0.42958 0.74553,0.9397 l 0,24.19037 -4.5609,0 z"
- id="path3108" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 461.65221,486.34894 3.66187,0 c 0.41662,0 0.7236,0.40273 0.7236,0.91285 l 0,24.00243 -4.38547,0 z"
- id="path3110" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 567.78074,515.77474 3.66187,0 c 0.41662,0 0.7236,0.40272 0.7236,0.91285 l 0,24.21721 -4.38547,0 z"
- id="path3112" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 572.16621,515.77474 3.81537,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,23.97557 -4.5609,0 z"
- id="path3114" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 576.72711,515.77474 3.81536,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,24.19037 -4.56089,0 z"
- id="path3116" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 600.5841,381.1034 0,75.17539 60.51958,0 0,-75.17539 -60.51958,0 z"
- id="path3118" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 600.5841,381.1034 60.51958,0 0,75.17539 -60.51958,0 z"
- id="path3120" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 601.28578,491.28904 0,75.1754 60.69499,0 0,-75.1754 -60.69499,0 z"
- id="path3122" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.77654135px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 601.28578,491.28904 60.69499,0 0,75.1754 -60.69499,0 z"
- id="path3124" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 567.78074,407.52218 3.66187,0 c 0.41662,0 0.7236,0.40272 0.7236,0.91284 l 0,24.00243 -4.38547,0 z"
- id="path3126" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 572.16621,407.52218 3.81537,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,23.97558 -4.5609,0 z"
- id="path3128" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 576.72711,407.52218 3.81536,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,23.97558 -4.56089,0 z"
- id="path3130" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 466.08154,454.53364 9.51648,0.59067 -0.0657,1.77199 -9.51649,-0.56382 0.0657,-1.79884 z m 9.49455,1.47666 -2.71899,-5.53076 8.55168,5.87979 -8.99023,4.83271 3.15754,-5.18174 z"
- id="path3132" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 466.95863,498.56494 9.51649,0.56382 -0.0657,1.79884 -9.51648,-0.56382 0.0657,-1.79884 z m 9.47263,1.47667 -2.69707,-5.55762 8.55168,5.90665 -8.9683,4.8327 3.11369,-5.18173 z"
- id="path3134" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 542.87123,449.00288 12.25741,0 0,-28.37871 -0.72361,0.91285 5.65727,0 0,-1.79884 -6.4028,0 0,28.37871 0.74553,-0.886 -11.5338,0 0,1.77199 z m 17.19107,-28.37871 -2.91634,5.36967 8.77095,-5.36967 -8.77095,-5.36967 2.91634,5.36967 z"
- id="path3136" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 543.74833,503.07547 11.88464,0 0,23.81448 -0.74553,-0.88599 5.2845,0 0,1.77199 -6.00811,0 0,-23.81449 0.72361,0.91285 -11.13911,0 0,-1.79884 z m 16.42361,23.81448 -2.91635,-5.36966 8.77096,5.36966 -8.77096,5.36967 2.91635,-5.36967 z"
- id="path3138" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 434.68153,454.53364 9.51648,0.59067 -0.0658,1.77199 -9.51649,-0.56382 0.0657,-1.79884 z m 9.49455,1.47666 -2.71899,-5.53076 8.55168,5.87979 -8.99023,4.83271 3.15754,-5.18174 z"
- id="path3140" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 435.3832,497.70579 9.51649,0.59068 -0.0657,1.77198 -9.51648,-0.56382 0.0657,-1.79884 z m 9.49456,1.47667 -2.719,-5.53076 8.55168,5.87979 -8.99022,4.8327 3.15754,-5.18173 z"
- id="path3142" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 582.01161,417.8051 12.56439,0.0268 0,1.79884 -12.58632,-0.0269 0.022,-1.79884 z m 12.56439,0.93969 -2.91634,-5.39652 8.77095,5.39652 -8.77095,5.34282 2.91634,-5.34282 z"
- id="path3144" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 581.30993,527.34638 14.12124,0.37587 -0.022,1.772 -14.12123,-0.34903 0.022,-1.79884 z m 14.09931,1.26188 -2.82863,-5.45022 8.68324,5.58445 -8.85866,5.15489 3.00405,-5.28912 z"
- id="path3146" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 662.0027,416.0868 12.56439,0.0269 0,1.79884 -12.58632,-0.0269 0.022,-1.79884 z m 12.56439,0.93969 -2.91634,-5.39652 8.77095,5.39652 -8.77095,5.34282 2.91634,-5.34282 z"
- id="path3148" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-dasharray:0.02426692,0.02426692;stroke-miterlimit:4;stroke-dashoffset:0;marker-mid:none"
- d="m 662.70438,527.77595 12.56439,0.0268 0,1.79884 -12.58632,-0.0268 0.022,-1.79884 z m 12.56439,0.9397 -2.91635,-5.39652 8.77096,5.39652 -8.77096,5.34281 2.91635,-5.34281 z"
- id="path3150" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 681.10145,404.72995 3.81536,0 c 0.41662,0 0.74554,0.42957 0.74554,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3152" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 685.66235,404.72995 3.81536,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,23.97558 -4.56089,0 z"
- id="path3154" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 690.22324,404.72995 3.81537,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3156" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 682.5048,515.77474 3.66188,0 c 0.41662,0 0.7236,0.40272 0.7236,0.91285 l 0,24.21721 -4.38548,0 z"
- id="path3158" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 686.89028,515.77474 3.81536,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,24.19037 -4.56089,0 z"
- id="path3160" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 691.45117,515.77474 3.81537,0 c 0.41662,0 0.74553,0.42957 0.74553,0.93969 l 0,24.19037 -4.5609,0 z"
- id="path3162" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 695.68316,416.0868 12.56439,0.0269 0,1.79884 -12.58632,-0.0269 0.0219,-1.79884 z m 12.56439,0.93969 -2.91634,-5.39652 8.77095,5.39652 -8.77095,5.34282 2.91634,-5.34282 z"
- id="path3164" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.02426692px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 696.034,527.99074 12.56439,0.0269 0,1.79882 -12.58632,-0.0268 0.022,-1.79884 z m 12.56439,0.93969 -2.91634,-5.39652 8.77095,5.39652 -8.77095,5.34282 2.91634,-5.34282 z"
- id="path3166" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 374.1181,472.44149 42.53912,0 0,-3.49028 5.70112,6.98057 -5.70112,6.98057 0,-3.49028 -42.53912,0 z"
- id="path3168" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.5530827px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 374.1181,472.44149 42.53912,0 0,-3.49028 5.70112,6.98057 -5.70112,6.98057 0,-3.49028 -42.53912,0 z"
- id="path3170" />
- </g>
- <g
- inkscape:groupmode="layer"
- id="layer2"
- inkscape:label="blocks"
- transform="translate(-82.82706,-337.43917)">
- <rect
- style="fill:none;stroke:#000000;stroke-width:0;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:0,0;stroke-dashoffset:0;stroke-linejoin:miter"
- id="rect4431"
- width="61.07143"
- height="75.714287"
- x="247.14285"
- y="377.00504" />
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="129.47055"
- y="417.48993"
- id="text4458-5-6"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan4460-7-1"
- x="129.47055"
- y="417.48993"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans">CPU Core 1</tspan><tspan
- sodipodi:role="line"
- x="129.47055"
- y="431.23993"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668" /></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="248.86055"
- y="358.53632"
- id="text4458-5-6-8-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan4460-7-1-9-9"
- x="248.86055"
- y="358.53632"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans">CPU Core 2</tspan><tspan
- sodipodi:role="line"
- x="248.86055"
- y="372.28632"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5" /></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="249.61816"
- y="589.6087"
- id="text4458-5-6-8-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="249.61816"
- y="589.6087"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-1">CPU Core 3</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:40px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
- x="145.96704"
- y="472.28207"
- id="text4731"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan4733"
- x="145.96704"
- y="472.28207" /></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="154.6613"
- y="476.98529"
- id="text4458-5-6-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="154.6613"
- y="476.98529"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1">P1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="272.26172"
- y="419.42242"
- id="text4458-5-6-4-3"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="272.26172"
- y="419.42242"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1-8">P2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="273.77084"
- y="529.60803"
- id="text4458-5-6-4-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="273.77084"
- y="529.60803"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1-4">P3</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="547.64105"
- y="361.29749"
- id="text4458-5-6-3"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- id="tspan4460-7-1-1"
- x="547.64105"
- y="361.29749"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans">CPU Core 1</tspan><tspan
- sodipodi:role="line"
- x="547.64105"
- y="375.04749"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-9" /></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="506.37653"
- y="480.20709"
- id="text4458-5-6-4-8"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="506.37653"
- y="480.20709"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1-6">P1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="623.97693"
- y="422.64423"
- id="text4458-5-6-4-3-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="623.97693"
- y="422.64423"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1-8-2">P2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="624.72607"
- y="532.76007"
- id="text4458-5-6-4-7-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="624.72607"
- y="532.76007"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-1-4-8">P3</tspan></text>
- </g>
-</svg>
diff --git a/doc/guides/sample_app_ug/img/ip_pipelines_2.svg b/doc/guides/sample_app_ug/img/ip_pipelines_2.svg
deleted file mode 100644
index 5252b174..00000000
--- a/doc/guides/sample_app_ug/img/ip_pipelines_2.svg
+++ /dev/null
@@ -1,997 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="520.43439"
- height="438.61716"
- id="svg5382"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="ipPipelines_2_update.svg">
- <defs
- id="defs5384">
- <marker
- inkscape:stockid="Arrow1Lstart"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Lstart"
- style="overflow:visible">
- <path
- id="path4018"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt"
- transform="scale(0.8) translate(12.5,0)" />
- </marker>
- <clipPath
- id="clipEmfPath1"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5395"
- height="465.83859"
- width="585"
- y="0"
- x="0" />
- </clipPath>
- <clipPath
- id="clipEmfPath2"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5398"
- height="0"
- width="37.650002"
- y="202.92325"
- x="120.9" />
- </clipPath>
- <clipPath
- id="clipEmfPath3"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5401"
- height="186.87537"
- width="391.64999"
- y="247.76733"
- x="49.650002" />
- </clipPath>
- <clipPath
- id="clipEmfPath4"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5404"
- height="93.137718"
- width="142.64999"
- y="280.01309"
- x="72.150002" />
- </clipPath>
- <clipPath
- id="clipEmfPath5"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5407"
- height="13.648201"
- width="45.900002"
- y="308.50934"
- x="76.650002" />
- </clipPath>
- <clipPath
- id="clipEmfPath6"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5410"
- height="13.648201"
- width="83.400002"
- y="308.50934"
- x="126.15" />
- </clipPath>
- <clipPath
- id="clipEmfPath7"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5413"
- height="90.888016"
- width="161.39999"
- y="333.25607"
- x="257.39999" />
- </clipPath>
- <clipPath
- id="clipEmfPath8"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5416"
- height="13.648201"
- width="54.599998"
- y="364.75192"
- x="262.64999" />
- </clipPath>
- <clipPath
- id="clipEmfPath9"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5419"
- height="13.648201"
- width="90.150002"
- y="364.75192"
- x="321.60001" />
- </clipPath>
- <clipPath
- id="clipEmfPath10"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5422"
- height="133.63239"
- width="240"
- y="75.590034"
- x="240" />
- </clipPath>
- <clipPath
- id="clipEmfPath11"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5425"
- height="137.3819"
- width="191.85001"
- y="20.847252"
- x="39.75" />
- </clipPath>
- <clipPath
- id="clipEmfPath12"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5428"
- height="93.137718"
- width="142.64999"
- y="54.592804"
- x="66.449997" />
- </clipPath>
- <clipPath
- id="clipEmfPath13"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5431"
- height="13.648201"
- width="45.900002"
- y="83.08905"
- x="70.949997" />
- </clipPath>
- <clipPath
- id="clipEmfPath14"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5434"
- height="13.648201"
- width="83.400002"
- y="83.08905"
- x="120.45" />
- </clipPath>
- <clipPath
- id="clipEmfPath15"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5437"
- height="90.888016"
- width="161.39999"
- y="107.83578"
- x="298.95001" />
- </clipPath>
- <clipPath
- id="clipEmfPath16"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5440"
- height="13.648201"
- width="54.599998"
- y="139.33163"
- x="304.20001" />
- </clipPath>
- <clipPath
- id="clipEmfPath17"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect5443"
- height="13.648201"
- width="90.150002"
- y="139.33163"
- x="363.29999" />
- </clipPath>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="2.8"
- inkscape:cx="223.36548"
- inkscape:cy="227.70504"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- inkscape:window-width="1083"
- inkscape:window-height="851"
- inkscape:window-x="40"
- inkscape:window-y="767"
- inkscape:window-maximized="0"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0" />
- <metadata
- id="metadata5387">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(-132.40505,-338.57503)">
- <text
- xml:space="preserve"
- x="652.83942"
- y="777.1922"
- style="font-size:13.80000019px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Calibri"
- id="text5447"> </text>
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath2)"
- d="m 49.275,242.89298 0,196.4741 392.25,0 0,-196.4741 -392.25,0 z"
- id="path5449"
- transform="translate(112.83943,322.9021)" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.002;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.006,1.002;stroke-dashoffset:0"
- id="rect4458-1-7"
- width="393.01892"
- height="197.3044"
- x="161.22458"
- y="565.61206" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-9"
- width="15.714286"
- height="18.571428"
- x="153.47649"
- y="664.6922" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-4"
- width="15.714286"
- height="18.571428"
- x="547.40503"
- y="689.6922" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath3)"
- d="m 71.775,275.13873 0,102.73646 143.25,0 0,-102.73646 -143.25,0 z"
- id="path5453"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 184.61443,598.04083 143.25,0 0,102.73646 -143.25,0 z"
- id="path5455" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath4)"
- d="m 76.275,303.63497 0,23.24694 46.5,0 0,-23.24694 -46.5,0 z"
- id="path5457"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 189.11443,626.53707 46.5,0 0,23.24694 -46.5,0 z"
- id="path5459" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath5)"
- d="m 125.775,303.63497 0,23.24694 84,0 0,-23.24694 -84,0 z"
- id="path5461"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 238.61443,626.53707 84,0 0,23.24694 -84,0 z"
- id="path5463" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath6)"
- d="m 76.275,329.88151 0,41.99447 46.5,0 0,-41.99447 -46.5,0 z"
- id="path5465"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 189.11443,652.78361 46.5,0 0,41.99447 -46.5,0 z"
- id="path5467" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 238.61443,652.78361 0,42.74437 84,0 0,-42.74437 -84,0 z"
- id="path5469" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 238.61443,652.78361 84,0 0,42.74437 -84,0 z"
- id="path5471" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 369.86443,651.28381 0,100.48676 162,0 0,-100.48676 -162,0 z"
- id="path5473" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 369.86443,651.28381 162,0 0,100.48676 -162,0 z"
- id="path5475" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath7)"
- d="m 262.275,359.87756 0,23.24694 55.2,0 0,-23.24694 -55.2,0 z"
- id="path5477"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 375.11443,682.77966 55.2,0 0,23.24694 -55.2,0 z"
- id="path5479" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath8)"
- d="m 321.225,359.87756 0,23.24694 90.9,0 0,-23.24694 -90.9,0 z"
- id="path5481"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 434.06443,682.77966 90.9,0 0,23.24694 -90.9,0 z"
- id="path5483" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath9)"
- d="m 261.525,386.874 0,36.74516 55.95,0 0,-36.74516 -55.95,0 z"
- id="path5485"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 374.36443,709.7761 55.95,0 0,36.74516 -55.95,0 z"
- id="path5487" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 434.81443,709.7761 0,36.74516 90.15,0 0,-36.74516 -90.15,0 z"
- id="path5489" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 434.81443,709.7761 90.15,0 0,36.74516 -90.15,0 z"
- id="path5491" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-0"
- width="15.714286"
- height="18.571428"
- x="152.76219"
- y="617.90649" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 155.21443,618.13818 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.74154 -3.9,0 z"
- id="path5493" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 159.11443,617.9882 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63742 l 0,16.91027 -3.75,0 z"
- id="path5495" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 162.86443,618.13818 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.74154 -3.9,0 z"
- id="path5497" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 159.11443,665.23197 3.13125,0 c 0.35625,0 0.61875,0.28122 0.61875,0.63742 l 0,16.91027 -3.75,0 z"
- id="path5501" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 162.86443,665.38195 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.74154 -3.9,0 z"
- id="path5503" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.05625,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 138.13318,627.94314 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33745 0.0188,1.25609 z m 12.09375,-0.95612 -2.4,3.80574 7.3875,-3.95573 -7.59375,-3.52453 2.60625,3.67452 z"
- id="path5505" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.05625,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 167.53318,627.94314 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33745 0.0187,1.25609 z m 12.09375,-0.95612 -2.4,3.80574 7.3875,-3.95573 -7.59375,-3.52453 2.60625,3.67452 z"
- id="path5507" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.05625,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 166.78318,674.43701 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33746 0.0187,1.25608 z m 12.09375,-0.95612 -2.4,3.80574 7.3875,-3.95572 -7.59375,-3.52454 2.60625,3.67452 z"
- id="path5509" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.05625,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 138.13318,674.43701 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33746 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80574 7.3875,-3.95572 -7.59375,-3.52454 2.60625,3.67452 z"
- id="path5511" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 155.21443,665.38195 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.74154 -3.9,0 z"
- id="path5499" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945"
- width="15.714286"
- height="18.571428"
- x="547.40509"
- y="612.90656" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 548.96443,613.33881 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.89152 -3.9,0 z"
- id="path5513" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 552.86443,613.33881 3.13125,0 c 0.35625,0 0.61875,0.28122 0.61875,0.63742 l 0,16.76029 -3.75,0 z"
- id="path5515" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 556.61443,613.33881 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.89152 -3.9,0 z"
- id="path5517" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 328.76443,621.419 215.04375,0 0,1.25608 -215.04375,0 0,-1.25608 z m 215.04375,0.61867 -2.49375,-3.74951 7.5,3.74951 -7.5,3.7495 2.49375,-3.7495 z"
- id="path5519" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.05625,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 560.38318,623.14377 12.1125,-0.33745 -0.0375,-1.25609 -12.09375,0.33746 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80575 7.3875,-3.95573 -7.59375,-3.52454 2.60625,3.67452 z"
- id="path5521" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 329.36443,682.16099 20.86875,0 0,29.93981 -0.61875,-0.63742 15.225,0 0,1.25608 -15.8625,0 0,-29.9398 0.6375,0.63742 -20.25,0 0,-1.25609 z m 35.475,29.93981 -2.49375,-3.74951 7.5,3.74951 -7.5,3.7495 2.49375,-3.7495 z"
- id="path5523" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 548.81443,690.57863 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.89152 -3.9,0 z"
- id="path5525" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 552.71443,690.57863 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65617 l 0,16.74154 -3.9,0 z"
- id="path5527" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 556.61443,690.57863 3.13125,0 c 0.35625,0 0.61875,0.28122 0.61875,0.63742 l 0,16.91027 -3.75,0 z"
- id="path5529" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 530.98318,699.18375 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33745 0.0188,1.25609 z m 12.09375,-0.95613 -2.4,3.80575 7.3875,-3.95573 -7.59375,-3.52453 2.60625,3.67451 z"
- id="path5531" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 561.88318,698.73381 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33745 0.0188,1.25609 z m 12.09375,-0.95613 -2.4,3.80575 7.3875,-3.95573 -7.59375,-3.52453 2.60625,3.67451 z"
- id="path5533" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.03885794;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.11657381,1.03885794;stroke-dashoffset:0"
- id="rect4458-1"
- width="212.04707"
- height="146.38971"
- x="384.98563"
- y="391.8602" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-48"
- width="15.714286"
- height="18.571428"
- x="586.69073"
- y="463.62076" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath10)"
- d="m 39.225,15.972894 0,146.980626 192.6,0 0,-146.980626 -192.6,0 z"
- id="path5539"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath11)"
- d="m 66.075,49.718446 0,102.736454 143.25,0 0,-102.736454 -143.25,0 z"
- id="path5543"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 178.91443,372.62055 143.25,0 0,102.73645 -143.25,0 z"
- id="path5545" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath12)"
- d="m 70.575,78.21469 0,23.24694 46.5,0 0,-23.24694 -46.5,0 z"
- id="path5547"
- transform="translate(112.83943,322.9021)" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:0.99358916;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:2.98076741, 0.99358914;stroke-dashoffset:0"
- id="rect4458"
- width="193.90961"
- height="146.43498"
- x="155.25899"
- y="342.90326" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 183.41443,401.11679 46.5,0 0,23.24694 -46.5,0 z"
- id="path5549" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath13)"
- d="m 120.075,78.21469 0,23.24694 84,0 0,-23.24694 -84,0 z"
- id="path5551"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 232.91443,401.11679 84,0 0,23.24694 -84,0 z"
- id="path5553" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath14)"
- d="m 70.575,104.46123 0,41.99446 46.5,0 0,-41.99446 -46.5,0 z"
- id="path5555"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 183.41443,427.36333 46.5,0 0,41.99446 -46.5,0 z"
- id="path5557" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 232.91443,427.36333 0,42.74437 84,0 0,-42.74437 -84,0 z"
- id="path5559" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 232.91443,427.36333 84,0 0,42.74437 -84,0 z"
- id="path5561" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 408.38397,422.83307 0,100.48675 162,0 0,-100.48675 -162,0 z"
- id="path5563" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 408.38397,422.83307 162,0 0,100.48675 -162,0 z"
- id="path5565" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath15)"
- d="m 303.825,134.45728 0,23.24693 55.2,0 0,-23.24693 -55.2,0 z"
- id="path5567"
- transform="translate(109.80897,319.87164)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 413.63397,454.32892 55.2,0 0,23.24693 -55.2,0 z"
- id="path5569" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath16)"
- d="m 362.775,134.45728 0,23.24693 90.9,0 0,-23.24693 -90.9,0 z"
- id="path5571"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 475.58964,454.32892 90.9,0 0,23.24693 -90.9,0 z"
- id="path5573" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath17)"
- d="m 303.075,161.45372 0,36.74515 55.95,0 0,-36.74515 -55.95,0 z"
- id="path5575"
- transform="translate(112.83943,322.9021)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 412.88397,481.32536 55.95,0 0,36.74515 -55.95,0 z"
- id="path5577" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 473.33397,481.32536 0,36.74515 90.15,0 0,-36.74515 -90.15,0 z"
- id="path5579" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 473.33397,481.32536 90.15,0 0,36.74515 -90.15,0 z"
- id="path5581" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-45"
- width="15.714286"
- height="18.571428"
- x="147.94077"
- y="392.37076" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 149.51443,392.7179 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74154 -3.9,0 z"
- id="path5583" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 153.41443,392.56792 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.89152 -3.9,0 z"
- id="path5585" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 157.31443,392.7179 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76966 -3.75,0 z"
- id="path5587" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-5"
- width="15.714286"
- height="18.571428"
- x="147.76219"
- y="439.6922" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 149.51443,439.96167 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74155 -3.9,0 z"
- id="path5589" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 153.41443,439.81169 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.89153 -3.9,0 z"
- id="path5591" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 157.31443,439.96167 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76967 -3.75,0 z"
- id="path5593" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009,0.009;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 132.43318,402.51348 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.81512 7.3875,-3.95573 -7.59375,-3.53391 2.59687,3.67452 z"
- id="path5595" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009,0.009;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 161.83318,402.51348 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.81512 7.3875,-3.95573 -7.59375,-3.53391 2.59687,3.67452 z"
- id="path5597" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 161.08318,449.00735 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.81512 7.3875,-3.95573 -7.59375,-3.53391 2.59687,3.67452 z"
- id="path5599" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009,0.009;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 132.43318,449.00735 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.81512 7.3875,-3.95573 -7.59375,-3.53391 2.59687,3.67452 z"
- id="path5601" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-2"
- width="15.714286"
- height="18.571428"
- x="341.36935"
- y="371.26364" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 343.61443,372.17061 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76966 -3.75,0 z"
- id="path5603" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 347.36443,372.17061 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74154 -3.9,0 z"
- id="path5605" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 351.26443,372.17061 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74154 -3.9,0 z"
- id="path5607" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 323.21443,378.75099 13.7625,0 0,1.24671 -13.7625,0 0,-1.24671 z m 13.7625,0.61867 -2.50312,-3.74951 7.5,3.74951 -7.5,3.7495 2.50312,-3.7495 z"
- id="path5609" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:0.009375, 0.009375;stroke-dashoffset:0"
- d="m 356.53318,380.16643 259.24599,-0.32809 -0.0375,-1.24671 -259.23661,0.32809 0.0281,1.24671 z m 259.22723,-0.95613 -2.39062,3.81512 7.3875,-3.95572 -7.59375,-3.53391 z"
- id="path5611"
- sodipodi:nodetypes="ccccccccccc" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 590.51443,465.15835 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63741 l 0,16.91027 -3.75,0 z"
- id="path5613" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 594.26443,465.15835 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74154 -3.9,0 z"
- id="path5615" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 598.16443,465.15835 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.89152 -3.9,0 z"
- id="path5617" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 572.68318,473.76346 12.1125,-0.33745 -0.0375,-1.25609 -12.09375,0.33746 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80575 7.3875,-3.95573 -7.59375,-3.52454 2.60625,3.67452 z"
- id="path5619" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 603.43318,473.31352 12.1125,-0.33745 -0.0375,-1.25609 -12.09375,0.33746 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80575 7.3875,-3.95573 -7.59375,-3.52454 2.60625,3.67452 z"
- id="path5621" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3945-8"
- width="15.714286"
- height="18.571428"
- x="342.40506"
- y="438.62076" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 343.61443,439.96167 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76967 -3.75,0 z"
- id="path5623" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 347.36443,439.96167 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74155 -3.9,0 z"
- id="path5625" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 351.26443,439.96167 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74155 -3.9,0 z"
- id="path5627" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 323.21443,446.54205 13.7625,0 0,1.24671 -13.7625,0 0,-1.24671 z m 13.7625,0.61867 -2.50312,-3.74951 7.5,3.74951 -7.5,3.74951 2.50312,-3.74951 z"
- id="path5629" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01777934;stroke-linecap:butt;stroke-linejoin:round;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:0.01777934, 0.01777934;stroke-dashoffset:0"
- d="m 356.50859,445.94165 25.2708,0 0,30.18438 -0.55633,-0.61869 20.19642,0 0,1.23737 -20.7696,0 0,-30.18438 0.57318,0.63744 -24.71447,0 0,-1.25612 z m 44.91089,30.18438 -2.24217,-3.74961 6.74338,3.74961 -6.74338,3.74961 2.24217,-3.74961 z"
- id="path5631" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 256.12693,502.50342 0,36.37021 2.4375,0 -4.875,4.87436 -4.875,-4.87436 2.4375,0 0,-36.37021 z"
- id="path5633" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 256.12693,502.50342 0,36.37021 2.4375,0 -4.875,4.87436 -4.875,-4.87436 2.4375,0 0,-36.37021 z"
- id="path5635" />
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="224.7719"
- y="353.7381"
- id="text4458-5-6-8-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="224.7719"
- y="353.7381"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5">Pipeline 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="233.06458"
- y="387.51147"
- id="text4458-5-6-8-7-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="233.06458"
- y="387.51147"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6">Table 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="191.77844"
- y="416.61282"
- id="text4458-5-6-8-7-0-1"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="191.77844"
- y="416.61282"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-3">Route</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="251.78381"
- y="415.58426"
- id="text4458-5-6-8-7-0-1-8"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="251.78381"
- y="415.58426"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-3-9">Next Hop</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="462.5943"
- y="411.55435"
- id="text4458-5-6-8-7-3"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="462.5943"
- y="411.55435"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-4">Pipeline 2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="471.90912"
- y="442.29727"
- id="text4458-5-6-8-7-0-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="471.90912"
- y="442.29727"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6">Table 2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="431.39413"
- y="468.73193"
- id="text4458-5-6-8-7-0-4-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="431.39413"
- y="468.73193"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6-6">Key</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="486.88754"
- y="469.8894"
- id="text4458-5-6-8-7-0-4-0-6"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="486.88754"
- y="469.8894"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6-6-1">MAC Address</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="334.64691"
- y="584.99622"
- id="text4458-5-6-8-7-8"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="334.64691"
- y="584.99622"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-49">Pipeline 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="197.47844"
- y="642.03308"
- id="text4458-5-6-8-7-0-1-6"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="197.47844"
- y="642.03308"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-3-3">Route</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="257.48383"
- y="641.00452"
- id="text4458-5-6-8-7-0-1-8-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="257.48383"
- y="641.00452"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-3-9-8">Next Hop</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="392.87457"
- y="697.18268"
- id="text4458-5-6-8-7-0-4-0-8"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="392.87457"
- y="697.18268"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6-6-2">Key</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="445.36234"
- y="698.34015"
- id="text4458-5-6-8-7-0-4-0-6-9"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="445.36234"
- y="698.34015"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6-6-1-1">MAC Address</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="238.76457"
- y="611.66492"
- id="text4458-5-6-8-7-0-3"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="238.76457"
- y="611.66492"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-5">Table 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="432.66986"
- y="666.95581"
- id="text4458-5-6-8-7-0-4-9"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="432.66986"
- y="666.95581"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6-8">Table 2</tspan></text>
- </g>
-</svg>
diff --git a/doc/guides/sample_app_ug/img/ip_pipelines_3.svg b/doc/guides/sample_app_ug/img/ip_pipelines_3.svg
deleted file mode 100644
index 1cf54136..00000000
--- a/doc/guides/sample_app_ug/img/ip_pipelines_3.svg
+++ /dev/null
@@ -1,826 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="469.492"
- height="382.11536"
- id="svg6126"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="ipPipelines_3_update.svg">
- <defs
- id="defs6128">
- <clipPath
- id="clipEmfPath1"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6139"
- height="406.59448"
- width="585"
- y="0"
- x="0" />
- </clipPath>
- <clipPath
- id="clipEmfPath2"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6142"
- height="134.38165"
- width="277.79999"
- y="247.76617"
- x="49.650002" />
- </clipPath>
- <clipPath
- id="clipEmfPath3"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6145"
- height="93.137283"
- width="230.55"
- y="280.01178"
- x="72.150002" />
- </clipPath>
- <clipPath
- id="clipEmfPath4"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6148"
- height="13.648136"
- width="45.900002"
- y="308.50787"
- x="76.650002" />
- </clipPath>
- <clipPath
- id="clipEmfPath5"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6151"
- height="13.648136"
- width="83.400002"
- y="308.50787"
- x="126.15" />
- </clipPath>
- <clipPath
- id="clipEmfPath6"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6154"
- height="137.38124"
- width="208.2"
- y="18.147522"
- x="240.75" />
- </clipPath>
- <clipPath
- id="clipEmfPath7"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6157"
- height="137.38124"
- width="190.2"
- y="18.147522"
- x="42.75" />
- </clipPath>
- <clipPath
- id="clipEmfPath8"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6160"
- height="93.137283"
- width="142.64999"
- y="54.592545"
- x="66.449997" />
- </clipPath>
- <clipPath
- id="clipEmfPath9"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6163"
- height="13.648136"
- width="45.900002"
- y="83.088654"
- x="70.949997" />
- </clipPath>
- <clipPath
- id="clipEmfPath10"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6166"
- height="13.648136"
- width="83.400002"
- y="83.088654"
- x="120.45" />
- </clipPath>
- <clipPath
- id="clipEmfPath11"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6169"
- height="90.887589"
- width="161.39999"
- y="53.092751"
- x="265.20001" />
- </clipPath>
- <clipPath
- id="clipEmfPath12"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6172"
- height="13.648136"
- width="54.599998"
- y="84.588448"
- x="270.45001" />
- </clipPath>
- <clipPath
- id="clipEmfPath13"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6175"
- height="13.648136"
- width="90.150002"
- y="84.588448"
- x="329.54999" />
- </clipPath>
- <clipPath
- id="clipEmfPath14"
- clipPathUnits="userSpaceOnUse">
- <rect
- id="rect6178"
- height="13.648136"
- width="83.400002"
- y="308.50787"
- x="213.89999" />
- </clipPath>
- </defs>
- <sodipodi:namedview
- id="base"
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1.0"
- inkscape:pageopacity="0.0"
- inkscape:pageshadow="2"
- inkscape:zoom="1.979899"
- inkscape:cx="228.24994"
- inkscape:cy="175.22545"
- inkscape:document-units="px"
- inkscape:current-layer="layer1"
- showgrid="false"
- inkscape:window-width="1033"
- inkscape:window-height="1284"
- inkscape:window-x="86"
- inkscape:window-y="249"
- inkscape:window-maximized="0"
- fit-margin-top="0"
- fit-margin-left="0"
- fit-margin-right="0"
- fit-margin-bottom="0"
- showborder="true"
- borderlayer="false" />
- <metadata
- id="metadata6131">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <g
- inkscape:label="Layer 1"
- inkscape:groupmode="layer"
- id="layer1"
- transform="translate(-156.65375,-367.02875)">
- <text
- xml:space="preserve"
- x="626.14575"
- y="749.1441"
- style="font-size:13.80000019px;font-style:normal;font-weight:normal;text-align:start;text-anchor:start;fill:#000000;font-family:Calibri"
- id="text6182"> </text>
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.1530993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.45929811, 1.15309937;stroke-dashoffset:0"
- id="rect3114"
- width="279.14426"
- height="142.7157"
- x="186.01018"
- y="597.78625" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath2)"
- d="m 71.775,275.13743 0,102.73597 231.15,0 0,-102.73597 -231.15,0 z"
- id="path6188"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 208.92075,629.23551 231.15,0 0,102.73597 -231.15,0 z"
- id="path6190" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath3)"
- d="m 76.275,303.63354 0,23.24683 46.5,0 0,-23.24683 -46.5,0 z"
- id="path6192"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 213.42075,657.73162 46.5,0 0,23.24683 -46.5,0 z"
- id="path6194" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath4)"
- d="m 125.775,303.63354 0,23.24683 84,0 0,-23.24683 -84,0 z"
- id="path6196"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 262.92075,657.73162 84,0 0,23.24683 -84,0 z"
- id="path6198" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath5)"
- d="m 76.275,329.87996 0,41.99426 46.5,0 0,-41.99426 -46.5,0 z"
- id="path6200"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 213.42075,683.97804 46.5,0 0,41.99426 -46.5,0 z"
- id="path6202" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 262.92075,683.97804 0,42.74416 84,0 0,-42.74416 -84,0 z"
- id="path6204" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 262.92075,683.97804 84,0 0,42.74416 -84,0 z"
- id="path6206" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-48"
- width="16.515228"
- height="21.21936"
- x="177.68185"
- y="647.82019" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 179.52075,649.33277 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6208" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 183.42075,649.18279 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63741 l 0,16.91019 -3.75,0 z"
- id="path6210" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 187.17075,649.33277 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6212" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-48-8"
- width="16.515228"
- height="21.21936"
- x="176.61043"
- y="694.24872" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 179.52075,696.57632 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6214" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 183.42075,696.42634 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63741 l 0,16.91019 -3.75,0 z"
- id="path6216" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 187.17075,696.57632 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6218" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 162.4395,659.13768 12.1125,-0.33745 -0.0375,-1.25608 -12.09375,0.33745 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6220" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 191.8395,659.13768 12.1125,-0.33745 -0.0375,-1.25608 -12.09375,0.33745 0.0187,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6222" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.0375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 191.0895,705.63133 12.1125,-0.33745 -0.0375,-1.25608 -12.09375,0.33745 0.0187,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6224" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 162.4395,705.63133 12.1125,-0.33745 -0.0375,-1.25608 -12.09375,0.33745 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6226" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 377.52075,367.37127 0,146.97993 208.8,0 0,-146.97993 -208.8,0 z"
- id="path6228" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath6)"
- d="m 42.375,13.273188 0,146.979932 190.8,0 0,-146.979932 -190.8,0 z"
- id="path6232"
- transform="translate(137.14575,354.09808)" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.1530993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.45929801, 1.15309934;stroke-dashoffset:0"
- id="rect3114-1"
- width="191.96552"
- height="148.42998"
- x="180.02544"
- y="368.29129" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath7)"
- d="m 66.075,49.718211 0,102.735969 143.25,0 0,-102.735969 -143.25,0 z"
- id="path6236"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 203.22075,403.81629 143.25,0 0,102.73597 -143.25,0 z"
- id="path6238" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath8)"
- d="m 70.575,78.21432 0,23.24683 46.5,0 0,-23.24683 -46.5,0 z"
- id="path6240"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 207.72075,432.3124 46.5,0 0,23.24683 -46.5,0 z"
- id="path6242" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath9)"
- d="m 120.075,78.21432 0,23.24683 84,0 0,-23.24683 -84,0 z"
- id="path6244"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 257.22075,432.3124 84,0 0,23.24683 -84,0 z"
- id="path6246" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath10)"
- d="m 70.575,104.46074 0,41.99426 46.5,0 0,-41.99426 -46.5,0 z"
- id="path6248"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 207.72075,458.55882 46.5,0 0,41.99426 -46.5,0 z"
- id="path6250" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 257.22075,458.55882 0,42.74416 84,0 0,-42.74416 -84,0 z"
- id="path6252" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 257.22075,458.55882 84,0 0,42.74416 -84,0 z"
- id="path6254" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 401.97075,402.3165 0,100.48628 162,0 0,-100.48628 -162,0 z"
- id="path6256" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 401.97075,402.3165 162,0 0,100.48628 -162,0 z"
- id="path6258" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath11)"
- d="m 270.075,79.714116 0,23.246824 55.2,0 0,-23.246824 -55.2,0 z"
- id="path6260"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 407.22075,433.8122 55.2,0 0,23.24682 -55.2,0 z"
- id="path6262" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath12)"
- d="m 329.025,79.714116 0,23.246824 90.9,0 0,-23.246824 -90.9,0 z"
- id="path6264"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 466.17075,433.8122 90.9,0 0,23.24682 -90.9,0 z"
- id="path6266" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath13)"
- d="m 269.325,106.71043 0,36.74498 55.95,0 0,-36.74498 -55.95,0 z"
- id="path6268"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 406.47075,460.80851 55.95,0 0,36.74498 -55.95,0 z"
- id="path6270" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 466.92075,460.80851 0,36.74498 90.15,0 0,-36.74498 -90.15,0 z"
- id="path6272" />
- <rect
- style="fill:none;stroke:#000000;stroke-width:1.1530993;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:3.45929818, 1.15309941;stroke-dashoffset:0"
- id="rect3114-1-7"
- width="208.7556"
- height="148.37921"
- x="377.85892"
- y="367.84274" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 466.92075,460.80851 90.15,0 0,36.74498 -90.15,0 z"
- id="path6274" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-9"
- width="16.515228"
- height="21.21936"
- x="171.55568"
- y="422.16541" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 173.82075,423.91355 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6276" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 177.72075,423.76357 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.89144 -3.9,0 z"
- id="path6278" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 181.62075,423.91355 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76958 -3.75,0 z"
- id="path6280" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-0"
- width="16.515228"
- height="21.21936"
- x="171.50339"
- y="469.72925" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 173.82075,471.1571 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74146 -3.9,0 z"
- id="path6282" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 177.72075,471.00712 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.89144 -3.9,0 z"
- id="path6284" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 181.62075,471.1571 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76958 -3.75,0 z"
- id="path6286" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 156.7395,433.70908 12.10312,-0.32808 -0.0375,-1.2467 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95611 -2.39062,3.8151 7.3875,-3.95571 -7.59375,-3.53389 2.59687,3.6745 z"
- id="path6288" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 186.1395,433.70908 12.10312,-0.32808 -0.0375,-1.2467 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95611 -2.39062,3.8151 7.3875,-3.95571 -7.59375,-3.53389 2.59687,3.6745 z"
- id="path6290" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 185.3895,480.20274 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.8151 7.3875,-3.95571 -7.59375,-3.53389 2.59687,3.6745 z"
- id="path6292" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 156.7395,480.20274 12.10312,-0.32808 -0.0375,-1.24671 -12.09375,0.32808 0.0281,1.24671 z m 12.08437,-0.95612 -2.39062,3.8151 7.3875,-3.95571 -7.59375,-3.53389 2.59687,3.6745 z"
- id="path6294" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-4"
- width="16.515228"
- height="21.21936"
- x="578.84741"
- y="448.11557" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 581.07075,450.6099 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63741 l 0,16.9102 -3.75,0 z"
- id="path6296" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 584.82075,450.6099 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.74147 -3.9,0 z"
- id="path6298" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 588.72075,450.6099 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.89145 -3.9,0 z"
- id="path6300" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 563.2395,459.21498 12.1125,-0.33746 -0.0375,-1.25608 -12.09375,0.33746 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6302" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 593.9895,458.76504 12.1125,-0.33746 -0.0375,-1.25607 -12.09375,0.33745 0.0188,1.25608 z m 12.09375,-0.95612 -2.4,3.80573 7.3875,-3.95571 -7.59375,-3.52452 2.60625,3.6745 z"
- id="path6304" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920"
- width="16.515228"
- height="21.21936"
- x="366.35788"
- y="448.70401" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 368.67075,450.90986 3.13125,0 c 0.34687,0 0.61875,0.28121 0.61875,0.62804 l 0,16.76959 -3.75,0 z"
- id="path6306" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 372.42075,450.90986 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74147 -3.9,0 z"
- id="path6308" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 376.32075,450.90986 3.25312,0 c 0.35625,0 0.64688,0.29996 0.64688,0.65616 l 0,16.74147 -3.9,0 z"
- id="path6310" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 347.52075,457.79017 13.7625,0 0,1.24671 -13.7625,0 0,-1.24671 z m 13.7625,0.61867 -2.50312,-3.74949 7.5,3.74949 -7.5,3.74949 2.50312,-3.74949 z"
- id="path6312" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.009375;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.009375,0.009375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 381.27075,457.79017 13.7625,0 0,1.24671 -13.7625,0 0,-1.24671 z m 13.7625,0.61867 -2.50313,-3.74949 7.5,3.74949 -7.5,3.74949 2.50313,-3.74949 z"
- id="path6314" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 350.67075,657.73162 0,23.24683 84,0 0,-23.24683 -84,0 z"
- id="path6316" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 350.67075,657.73162 84,0 0,23.24683 -84,0 z"
- id="path6318" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- clip-path="url(#clipEmfPath14)"
- d="m 213.675,329.87996 0,42.74416 84,0 0,-42.74416 -84,0 z"
- id="path6320"
- transform="translate(137.14575,354.09808)" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:0.60000002px;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:none"
- d="m 350.82075,683.97804 84,0 0,42.74416 -84,0 z"
- id="path6322" />
- <rect
- style="fill:#ffffff;fill-opacity:1;stroke:none"
- id="rect3920-48-2"
- width="16.515228"
- height="21.21936"
- x="456.96756"
- y="669.96301" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 459.57075,671.67972 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.89144 -3.9,0 z"
- id="path6324" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 463.47075,671.67972 3.13125,0 c 0.35625,0 0.61875,0.28121 0.61875,0.63741 l 0,16.76021 -3.75,0 z"
- id="path6326" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 467.22075,671.67972 3.2625,0 c 0.35625,0 0.6375,0.29996 0.6375,0.65616 l 0,16.89144 -3.9,0 z"
- id="path6328" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.01875;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 440.0895,681.16592 14.475,-0.11248 -0.0188,-1.25608 -14.475,0.11248 0.0188,1.25608 z m 14.45625,-0.73115 -2.45625,3.76824 7.4625,-3.82448 -7.5375,-3.6745 2.53125,3.73074 z"
- id="path6330" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#000000;fill-opacity:1;fill-rule:nonzero;stroke:#000000;stroke-width:0.01875;stroke-linecap:butt;stroke-linejoin:round;stroke-opacity:1;stroke-dasharray:0.01875,0.0375;stroke-miterlimit:4;stroke-dashoffset:0"
- d="m 471.12075,679.4599 13.48125,0 0,1.25608 -13.48125,0 0,-1.25608 z m 13.48125,0.61867 -2.5125,-3.74949 7.5,3.74949 -7.5,3.74949 2.5125,-3.74949 z"
- id="path6332" />
- <path
- inkscape:connector-curvature="0"
- style="fill:#ffffff;fill-opacity:1;fill-rule:evenodd;stroke:none"
- d="m 316.58325,533.69856 0,36.37003 2.4375,0 -4.875,4.87434 -4.875,-4.87434 2.4375,0 0,-36.37003 z"
- id="path6334" />
- <path
- inkscape:connector-curvature="0"
- style="fill:none;stroke:#000000;stroke-width:1.20000005px;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none"
- d="m 316.58325,533.69856 0,36.37003 2.4375,0 -4.875,4.87434 -4.875,-4.87434 2.4375,0 0,-36.37003 z"
- id="path6336" />
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="251.32822"
- y="385.15088"
- id="text4458-5-6-8-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="251.32822"
- y="385.15088"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5">Pipeline 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="458.32822"
- y="385.15088"
- id="text4458-5-6-8-7-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="458.32822"
- y="385.15088"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan6590">Pipeline 2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="252.12177"
- y="418.59369"
- id="text4458-5-6-8-7-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="252.12177"
- y="418.59369"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6">Flow Table 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="452.02521"
- y="418.59369"
- id="text4458-5-6-8-7-7-3"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="452.02521"
- y="418.59369"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-6">Flow Table 2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="214.38213"
- y="447.87283"
- id="text4458-5-6-8-7-7-1"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="214.38213"
- y="447.87283"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-5">Flow #</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="418.23212"
- y="449.37262"
- id="text4458-5-6-8-7-7-1-4"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="418.23212"
- y="449.37262"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-5-2">Flow #</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="489.28775"
- y="449.32428"
- id="text4458-5-6-8-7-7-1-4-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="489.28775"
- y="449.32428"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan6678">Actions 2</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="277.60748"
- y="447.82449"
- id="text4458-5-6-8-7-7-1-4-0-7"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="277.60748"
- y="447.82449"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan6678-3">Actions 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="302.02823"
- y="613.45862"
- id="text4458-5-6-8-7-72"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="302.02823"
- y="613.45862"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-60">Pipeline 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="293.5502"
- y="644.90143"
- id="text4458-5-6-8-7-7-16"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="293.5502"
- y="644.90143"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-57">Flow Table 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="220.08212"
- y="673.29205"
- id="text4458-5-6-8-7-7-1-5"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="220.08212"
- y="673.29205"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan4668-2-5-6-5-4">Flow #</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="283.30746"
- y="673.24371"
- id="text4458-5-6-8-7-7-1-4-0-7-1"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="283.30746"
- y="673.24371"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan6678-3-2">Actions 1</tspan></text>
- <text
- xml:space="preserve"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;letter-spacing:0px;word-spacing:0px;writing-mode:lr-tb;text-anchor:start;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans;-inkscape-font-specification:Sans"
- x="370.33774"
- y="673.24371"
- id="text4458-5-6-8-7-7-1-4-0-7-1-0"
- sodipodi:linespacing="125%"><tspan
- sodipodi:role="line"
- x="370.33774"
- y="673.24371"
- style="font-size:11px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Sans;-inkscape-font-specification:Sans"
- id="tspan6678-3-2-0">Actions 2</tspan></text>
- </g>
-</svg>
diff --git a/doc/guides/sample_app_ug/img/master_slave_proc.png b/doc/guides/sample_app_ug/img/master_slave_proc.png
deleted file mode 100644
index a0065e85..00000000
--- a/doc/guides/sample_app_ug/img/master_slave_proc.png
+++ /dev/null
Binary files differ
diff --git a/doc/guides/sample_app_ug/img/slave_proc_recov.png b/doc/guides/sample_app_ug/img/slave_proc_recov.png
deleted file mode 100644
index 62f1f088..00000000
--- a/doc/guides/sample_app_ug/img/slave_proc_recov.png
+++ /dev/null
Binary files differ
diff --git a/doc/guides/sample_app_ug/index.rst b/doc/guides/sample_app_ug/index.rst
index e87afda8..5bedf4f6 100644
--- a/doc/guides/sample_app_ug/index.rst
+++ b/doc/guides/sample_app_ug/index.rst
@@ -44,6 +44,7 @@ Sample Applications User Guides
vmdq_dcb_forwarding
vhost
vhost_scsi
+ vhost_crypto
netmap_compatibility
ip_pipeline
test_pipeline
@@ -82,10 +83,6 @@ Sample Applications User Guides
:numref:`figure_client_svr_sym_multi_proc_app` :ref:`figure_client_svr_sym_multi_proc_app`
-:numref:`figure_master_slave_proc` :ref:`figure_master_slave_proc`
-
-:numref:`figure_slave_proc_recov` :ref:`figure_slave_proc_recov`
-
:numref:`figure_qos_sched_app_arch` :ref:`figure_qos_sched_app_arch`
:numref:`figure_pipeline_overview` :ref:`figure_pipeline_overview`
diff --git a/doc/guides/sample_app_ug/ip_pipeline.rst b/doc/guides/sample_app_ug/ip_pipeline.rst
index 69af6b8d..b75509a0 100644
--- a/doc/guides/sample_app_ug/ip_pipeline.rst
+++ b/doc/guides/sample_app_ug/ip_pipeline.rst
@@ -1,5 +1,5 @@
.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2015-2016 Intel Corporation.
+ Copyright(c) 2015-2018 Intel Corporation.
Internet Protocol (IP) Pipeline Application
===========================================
@@ -8,1163 +8,523 @@ Application overview
--------------------
The *Internet Protocol (IP) Pipeline* application is intended to be a vehicle for rapid development of packet processing
-applications running on multi-core CPUs.
+applications on multi-core CPUs.
-The application provides a library of reusable functional blocks called pipelines.
-These pipelines can be seen as prefabricated blocks that can be instantiated and inter-connected through packet queues
+Following OpenFlow and P4 design principles, the application can be used to create functional blocks called pipelines out
+of input/output ports, tables and actions in a modular way. Multiple pipelines can be inter-connected through packet queues
to create complete applications (super-pipelines).
-Pipelines are created and inter-connected through the application configuration file.
-By using different configuration files, different applications are effectively created, therefore this application
-can be seen as an application generator.
-The configuration of each pipeline can be updated at run-time through the application Command Line Interface (CLI).
+The pipelines are mapped to application threads, with each pipeline executed by a single thread and each thread able to run
+one or several pipelines. The possibilities of creating pipelines out of ports, tables and actions, connecting multiple
+pipelines together and mapping the pipelines to execution threads are endless, therefore this application can be seen as
+a true application generator.
-Main application components are:
+Pipelines are created and managed through Command Line Interface (CLI):
-**A Library of reusable pipelines**
+ * Any standard TCP client (e.g. telnet, netcat, custom script, etc) is typically able to connect to the application, send
+ commands through the network and wait for the response before pushing the next command.
- * Each pipeline represents a functional block, e.g. flow classification, firewall, routing, master, etc.
-
- * Each pipeline type can be instantiated several times in the same application, which each instance configured
- separately and mapped to a single CPU core.
- Each CPU core can run one or several pipeline instances, which can be of same or different type.
-
- * Pipeline instances are inter-connected through packet queues (for packet processing) and message queues
- (for run-time configuration).
-
- * Pipelines are implemented using DPDK Packet Framework.
-
- * More pipeline types can always be built and added to the existing pipeline types.
-
-**The Configuration file**
-
- * The configuration file defines the application structure.
- By using different configuration files, different applications are created.
-
- * All the application resources are created and configured through the application configuration file:
- pipeline instances, buffer pools, links (i.e. network interfaces), hardware device RX/TX queues,
- software queues, traffic manager devices, EAL startup arguments, etc.
-
- * The configuration file syntax is “define by reference”, meaning that resources are defined as they are referenced.
- First time a resource name is detected, it is registered with default parameters.
- Optionally, the resource parameters can be further refined through a configuration file section dedicated to
- that resource.
-
- * Command Line Interface (CLI)
-
-**Global CLI commands: link configuration, etc.**
-
- * Common pipeline CLI commands: ping (keep-alive), statistics, etc.
-
- * Pipeline type specific CLI commands: used to configure instances of specific pipeline type.
- These commands are registered with the application when the pipeline type is registered.
- For example, the commands for routing pipeline instances include: route add, route delete, route list, etc.
-
- * CLI commands can be grouped into scripts that can be invoked at initialization and at runtime.
-
-
-Design goals
-------------
-
-
-Rapid development
-~~~~~~~~~~~~~~~~~
-
-This application enables rapid development through quick connectivity of standard components called pipelines.
-These components are built using DPDK Packet Framework and encapsulate packet processing features at different levels:
-ports, tables, actions, pipelines and complete applications.
-
-Pipeline instances are instantiated, configured and inter-connected through low complexity configuration files loaded
-during application initialization.
-Each pipeline instance is mapped to a single CPU core, with each CPU core able to run one or multiple pipeline
-instances of same or different types. By loading a different configuration file, a different application is
-effectively started.
-
-
-Flexibility
-~~~~~~~~~~~
-
-Each packet processing application is typically represented as a chain of functional stages which is often called
-the functional pipeline of the application.
-These stages are mapped to CPU cores to create chains of CPU cores (pipeline model), clusters of CPU cores
-(run-to-completion model) or chains of clusters of CPU cores (hybrid model).
-
-This application allows all the above programming models.
-By applying changes to the configuration file, the application provides the flexibility to reshuffle its
-building blocks in different ways until the configuration providing the best performance is identified.
-
-
-Move pipelines around
-^^^^^^^^^^^^^^^^^^^^^
-
-The mapping of pipeline instances to CPU cores can be reshuffled through the configuration file.
-One or several pipeline instances can be mapped to the same CPU core.
-
-.. _figure_ip_pipelines_1:
-
-.. figure:: img/ip_pipelines_1.*
-
- Example of moving pipeline instances across different CPU cores
-
-
-Move tables around
-^^^^^^^^^^^^^^^^^^
-
-There is some degree of flexibility for moving tables from one pipeline instance to another.
-Based on the configuration arguments passed to each pipeline instance in the configuration file, specific tables
-can be enabled or disabled.
-This way, a specific table can be “moved” from pipeline instance A to pipeline instance B by simply disabling its
-associated functionality for pipeline instance A while enabling it for pipeline instance B.
-
-Due to requirement to have simple syntax for the configuration file, moving tables across different pipeline
-instances is not as flexible as the mapping of pipeline instances to CPU cores, or mapping actions to pipeline tables.
-Complete flexibility in moving tables from one pipeline to another could be achieved through a complex pipeline
-description language that would detail the structural elements of the pipeline (ports, tables and actions) and
-their connectivity, resulting in complex syntax for the configuration file, which is not acceptable.
-Good configuration file readability through simple syntax is preferred.
-
-*Example*: the IP routing pipeline can run the routing function only (with ARP function run by a different
-pipeline instance), or it can run both the routing and ARP functions as part of the same pipeline instance.
-
-
-.. _figure_ip_pipelines_2:
-
-.. figure:: img/ip_pipelines_2.*
-
- Example of moving tables across different pipeline instances
-
-
-Move actions around
-^^^^^^^^^^^^^^^^^^^
-
-When it makes sense, packet processing actions can be moved from one pipeline instance to another.
-Based on the configuration arguments passed to each pipeline instance in the configuration file, specific actions
-can be enabled or disabled.
-This way, a specific action can be "moved" from pipeline instance A to pipeline instance B by simply disabling its
-associated functionality for pipeline instance A while enabling it for pipeline instance B.
-
-*Example*: The flow actions of accounting, traffic metering, application identification, NAT, etc can be run as part
-of the flow classification pipeline instance or split across several flow actions pipeline instances, depending on
-the number of flow instances and their compute requirements.
-
-
-.. _figure_ip_pipelines_3:
-
-.. figure:: img/ip_pipelines_3.*
-
- Example of moving actions across different tables and pipeline instances
-
-
-Performance
-~~~~~~~~~~~
-
-Performance of the application is the highest priority requirement.
-Flexibility is not provided at the expense of performance.
-
-The purpose of flexibility is to provide an incremental development methodology that allows monitoring the
-performance evolution:
-
-* Apply incremental changes in the configuration (e.g. mapping on pipeline instances to CPU cores)
- in order to identify the configuration providing the best performance for a given application;
-
-* Add more processing incrementally (e.g. by enabling more actions for specific pipeline instances) until
- the application is feature complete while checking the performance impact at each step.
-
-
-Debug capabilities
-~~~~~~~~~~~~~~~~~~
-
-The application provides a significant set of debug capabilities:
-
-* Command Line Interface (CLI) support for statistics polling: pipeline instance ping (keep-alive checks),
- pipeline instance statistics per input port/output port/table, link statistics, etc;
-
-* Logging: Turn on/off application log messages based on priority level;
+ * All the application objects are created and managed through CLI commands:
+ * 'Primitive' objects used to create pipeline ports: memory pools, links (i.e. network interfaces), SW queues, traffic managers, etc.
+ * Action profiles: used to define the actions to be executed by pipeline input/output ports and tables.
+ * Pipeline components: input/output ports, tables, pipelines, mapping of pipelines to execution threads.
Running the application
-----------------------
The application startup command line is::
- ip_pipeline [-f CONFIG_FILE] [-s SCRIPT_FILE] -p PORT_MASK [-l LOG_LEVEL]
+ ip_pipeline [EAL_ARGS] -- [-s SCRIPT_FILE] [-h HOST] [-p PORT]
The application startup arguments are:
-``-f CONFIG_FILE``
-
- * Optional: Yes
-
- * Default: ``./config/ip_pipeline.cfg``
-
- * Argument: Path to the configuration file to be loaded by the application.
- Please refer to the :ref:`ip_pipeline_configuration_file` for details on how to write the configuration file.
-
``-s SCRIPT_FILE``
* Optional: Yes
* Default: Not present
- * Argument: Path to the CLI script file to be run by the master pipeline at application startup.
- No CLI script file will be run at startup if this argument is not present.
-
-``-p PORT_MASK``
-
- * Optional: No
+ * Argument: Path to the CLI script file to be run at application startup.
+ No CLI script file will run at startup if this argument is not present.
- * Default: N/A
-
- * Argument: Hexadecimal mask of NIC port IDs to be used by the application.
- First port enabled in this mask will be referenced as LINK0 as part of the application configuration file,
- next port as LINK1, etc.
-
-``-l LOG_LEVEL``
+``-h HOST``
* Optional: Yes
- * Default: 1 (High priority)
-
- * Argument: Log level to determine which application messages are to be printed to standard output.
- Available log levels are: 0 (None), 1 (High priority), 2 (Low priority).
- Only application messages whose priority is higher than or equal to the application log level will be printed.
-
-
-Application stages
-------------------
-
-
-Configuration
-~~~~~~~~~~~~~
-
-During this stage, the application configuration file is parsed and its content is loaded into the application data
-structures.
-In case of any configuration file parse error, an error message is displayed and the application is terminated.
-Please refer to the :ref:`ip_pipeline_configuration_file` for a description of the application configuration file format.
-
-
-Configuration checking
-~~~~~~~~~~~~~~~~~~~~~~
-
-In the absence of any parse errors, the loaded content of application data structures is checked for overall consistency.
-In case of any configuration check error, an error message is displayed and the application is terminated.
-
-
-Initialization
-~~~~~~~~~~~~~~
-
-During this stage, the application resources are initialized and the handles to access them are saved into the
-application data structures.
-In case of any initialization error, an error message is displayed and the application is terminated.
-
-The typical resources to be initialized are: pipeline instances, buffer pools, links (i.e. network interfaces),
-hardware device RX/TX queues, software queues, traffic management devices, etc.
-
-
-.. _ip_pipeline_runtime:
+ * Default: ``0.0.0.0``
-Run-time
-~~~~~~~~
+ * Argument: IP Address of the host running ip pipeline application to be used by
+ remote TCP based client (telnet, netcat, etc.) for connection.
-Each CPU core runs the pipeline instances assigned to it in time sharing mode and in round robin order:
+``-p PORT``
-1. *Packet processing task*: The pipeline run-time code is typically a packet *processing* task built on top of
- DPDK Packet Framework rte_pipeline library, which reads bursts of packets from the pipeline input ports,
- performs table lookups and executes the identified actions for all tables in the pipeline, with packet
- eventually written to pipeline output ports or dropped.
+ * Optional: Yes
-2. *Message handling task*: Each CPU core will also periodically execute the *message handling* code of each
- of the pipelines mapped to it.
- The pipeline message handling code is processing the messages that are pending in the pipeline input message
- queues, which are typically sent by the master CPU core for the on-the-fly pipeline configuration: check
- that pipeline is still alive (ping), add/delete entries in the pipeline tables, get statistics, etc.
- The frequency of executing the message handling code is usually much smaller than the frequency of executing
- the packet processing work.
+ * Default: ``8086``
-Please refer to the :ref:`ip_pipeline_pipeline_section` for more details about the application pipeline module encapsulation.
+ * Argument: TCP port number at which the ip pipeline is running.
+ This port number should be used by remote TCP client (such as telnet, netcat, etc.) to connect to host application.
-.. _ip_pipeline_configuration_file:
+Refer to *DPDK Getting Started Guide* for general information on running applications and the Environment Abstraction Layer (EAL) options.
-Configuration file syntax
--------------------------
+The following is an example command to run ip pipeline application configured for layer 2 forwarding:
-Syntax overview
-~~~~~~~~~~~~~~~
+.. code-block:: console
-The syntax of the configuration file is designed to be simple, which favors readability.
-The configuration file is parsed using the DPDK library librte_cfgfile, which supports simple
-`INI file format <http://en.wikipedia.org/wiki/INI_file>`__ for configuration files.
+ $ ./build/ip_pipeline -c 0x3 -- -s examples/route_ecmp.cli
-As result, the configuration file is split into several sections, with each section containing one or more entries.
-The scope of each entry is its section, and each entry specifies a variable that is assigned a specific value.
-Any text after the ``;`` character is considered a comment and is therefore ignored.
+The application should start successfully and display as follows:
-The following are application specific: number of sections, name of each section, number of entries of each section,
-name of the variables used for each section entry, the value format (e.g. signed/unsigned integer, string, etc)
-and range of each section entry variable.
+.. code-block:: console
-Generic example of configuration file section:
+ EAL: Detected 40 lcore(s)
+ EAL: Detected 2 NUMA nodes
+ EAL: Multi-process socket /var/run/.rte_unix
+ EAL: Probing VFIO support...
+ EAL: PCI device 0000:02:00.0 on NUMA socket 0
+ EAL: probe driver: 8086:10fb net_ixgbe
+ ...
-.. code-block:: ini
+To run remote client (e.g. telnet) to communicate with the ip pipeline application:
- [<section_name>]
+.. code-block:: console
- <variable_name_1> = <value_1>
+ $ telnet 127.0.0.1 8086
- ; ...
+When running a telnet client as above, command prompt is displayed:
- <variable_name_N> = <value_N>
+.. code-block:: console
+ Trying 127.0.0.1...
+ Connected to 127.0.0.1.
+ Escape character is '^]'.
-Application resources present in the configuration file
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Welcome to IP Pipeline!
-.. _table_ip_pipelines_resource_name:
+ pipeline>
-.. tabularcolumns:: |p{4cm}|p{6cm}|p{6cm}|
+Once application and telnet client start running, messages can be sent from client to application.
+At any stage, telnet client can be terminated using the quit command.
-.. table:: Application resource names in the configuration file
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Resource type | Format | Examples |
- +============================+=============================+=================================================+
- | Pipeline | ``PIPELINE<ID>`` | ``PIPELINE0``, ``PIPELINE1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Mempool | ``MEMPOOL<ID>`` | ``MEMPOOL0``, ``MEMPOOL1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Link (network interface) | ``LINK<ID>`` | ``LINK0``, ``LINK1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Link RX queue | ``RXQ<LINK_ID>.<QUEUE_ID>`` | ``RXQ0.0``, ``RXQ1.5`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Link TX queue | ``TXQ<LINK_ID>.<QUEUE_ID>`` | ``TXQ0.0``, ``TXQ1.5`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Software queue | ``SWQ<ID>`` | ``SWQ0``, ``SWQ1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Traffic Manager | ``TM<LINK_ID>`` | ``TM0``, ``TM1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | KNI (kernel NIC interface) | ``KNI<LINK_ID>`` | ``KNI0``, ``KNI1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Source | ``SOURCE<ID>`` | ``SOURCE0``, ``SOURCE1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Sink | ``SINK<ID>`` | ``SINK0``, ``SINK1`` |
- +----------------------------+-----------------------------+-------------------------------------------------+
- | Message queue | ``MSGQ<ID>`` | ``MSGQ0``, ``MSGQ1``, |
- | | ``MSGQ-REQ-PIPELINE<ID>`` | ``MSGQ-REQ-PIPELINE2``, ``MSGQ-RSP-PIPELINE2,`` |
- | | ``MSGQ-RSP-PIPELINE<ID>`` | ``MSGQ-REQ-CORE-s0c1``, ``MSGQ-RSP-CORE-s0c1`` |
- | | ``MSGQ-REQ-CORE-<CORE_ID>`` | |
- | | ``MSGQ-RSP-CORE-<CORE_ID>`` | |
- +----------------------------+-----------------------------+-------------------------------------------------+
-
-``LINK`` instances are created implicitly based on the ``PORT_MASK`` application startup argument.
-``LINK0`` is the first port enabled in the ``PORT_MASK``, port 1 is the next one, etc.
-The LINK ID is different than the DPDK PMD-level NIC port ID, which is the actual position in the bitmask mentioned above.
-For example, if bit 5 is the first bit set in the bitmask, then ``LINK0`` is having the PMD ID of 5.
-This mechanism creates a contiguous LINK ID space and isolates the configuration file against changes in the board
-PCIe slots where NICs are plugged in.
-
-``RXQ``, ``TXQ``, ``TM`` and ``KNI`` instances have the LINK ID as part of their name.
-For example, ``RXQ2.1``, ``TXQ2.1`` and ``TM2`` are all associated with ``LINK2``.
-
-
-Rules to parse the configuration file
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The main rules used to parse the configuration file are:
-
-1. Application resource name determines the type of resource based on the name prefix.
-
- *Example*: all software queues need to start with ``SWQ`` prefix, so ``SWQ0`` and ``SWQ5`` are valid software
- queue names.
-
-2. An application resource is defined by creating a configuration file section with its name.
- The configuration file section allows fine tuning on any of the resource parameters.
- Some resource parameters are mandatory, in which case it is required to have them specified as part of the
- section, while some others are optional, in which case they get assigned their default value when not present.
-
- *Example*: section ``SWQ0`` defines a software queue named SWQ0, whose parameters are detailed as part of this section.
-
-3. An application resource can also be defined by referencing it.
- Referencing a resource takes place by simply using its name as part of the value assigned to a variable in any
- configuration file section.
- In this case, the resource is registered with all its parameters having their default values.
- Optionally, a section with the resource name can be added to the configuration file to fine tune some or all
- of the resource parameters.
-
- *Example*: in section ``PIPELINE3``, variable ``pktq_in`` includes ``SWQ5`` as part of its list, which results
- in defining a software queue named ``SWQ5``; when there is no ``SWQ5`` section present in the configuration file,
- ``SWQ5`` gets registered with default parameters.
-
-
-.. _ip_pipeline_pipeline_section:
-
-PIPELINE section
-~~~~~~~~~~~~~~~~
-
-.. _table_ip_pipelines_pipeline_section_1:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
-
-.. table:: Configuration file PIPELINE section (1/2)
-
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | Section | Description | Optional | Range | Default value |
- +===============+===========================================================+===============+========================+================+
- | type | Pipeline type. Defines the functionality to be | NO | See "List | N/A |
- | | executed. | | of pipeline types" | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | core | CPU core to run the current pipeline. | YES | See "CPU Core | CPU socket 0, |
- | | | | notation" | core 0, |
- | | | | | hyper-thread 0 |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | pktq_in | Packet queues to serve as input ports for the | YES | List of input | Empty list |
- | | current pipeline instance. The acceptable packet | | packet queue IDs | |
- | | queue types are: ``RXQ``, ``SWQ``, ``TM`` and ``SOURCE``. | | | |
- | | First device in this list is used as pipeline input port | | | |
- | | 0, second as pipeline input port 1, etc. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | pktq_out | Packet queues to serve as output ports for the | YES | List of output | Empty list |
- | | current pipeline instance. The acceptable packet | | packet queue IDs. | |
- | | queue types are: ``TXQ``, ``SWQ``, ``TM`` and ``SINK``. | | | |
- | | First device in this list is used as pipeline output | | | |
- | | port 0, second as pipeline output port 1, etc. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
-
-.. _table_ip_pipelines_pipeline_section_2:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
-
-.. table:: Configuration file PIPELINE section (2/2)
-
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | Section | Description | Optional | Range | Default value |
- +===============+===========================================================+===============+========================+================+
- | msgq_in | Input message queues. These queues contain | YES | List of message | Empty list |
- | | request messages that need to be handled by the | | queue IDs | |
- | | current pipeline instance. The type and format of | | | |
- | | request messages is defined by the pipeline type. | | | |
- | | For each pipeline instance, there is an input | | | |
- | | message queue defined implicitly, whose name is: | | | |
- | | ``MSGQ-REQ-<PIPELINE_ID>``. This message queue | | | |
- | | should not be mentioned as part of msgq_in list. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | msgq_out | Output message queues. These queues are used by | YES | List of message | Empty list |
- | | the current pipeline instance to write response | | queue IDs | |
- | | messages as result of request messages being | | | |
- | | handled. The type and format of response | | | |
- | | messages is defined by the pipeline type. | | | |
- | | For each pipeline instance, there is an output | | | |
- | | message queue defined implicitly, whose name is: | | | |
- | | ``MSGQ-RSP-<PIPELINE_ID>``. This message queue | | | |
- | | should not be mentioned as part of msgq_out list. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | timer_period | Time period, measured in milliseconds, | YES | milliseconds | 1 ms |
- | | for handling the input message queues. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
- | <any other> | Arguments to be passed to the current pipeline | Depends on | Depends on | Depends on |
- | | instance. Format of the arguments, their type, | pipeline type | pipeline type | pipeline type |
- | | whether each argument is optional or mandatory | | | |
- | | and its default value (when optional) are defined | | | |
- | | by the pipeline type. | | | |
- | | The value of the arguments is applicable to the | | | |
- | | current pipeline instance only. | | | |
- +---------------+-----------------------------------------------------------+---------------+------------------------+----------------+
-
-
-CPU core notation
-^^^^^^^^^^^^^^^^^
-
-The CPU Core notation is::
-
- <CPU core> ::= [s|S<CPU socket ID>][c|C]<CPU core ID>[h|H]
-
-For example::
-
- CPU socket 0, core 0, hyper-thread 0: 0, c0, s0c0
-
- CPU socket 0, core 0, hyper-thread 1: 0h, c0h, s0c0h
-
- CPU socket 3, core 9, hyper-thread 1: s3c9h
-
-
-MEMPOOL section
-~~~~~~~~~~~~~~~
+Application stages
+------------------
-.. _table_ip_pipelines_mempool_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{6cm}|p{1.5cm}|p{1.5cm}|p{3cm}|
-
-.. table:: Configuration file MEMPOOL section
-
- +---------------+-----------------------------------------------+----------+----------+---------------------------+
- | Section | Description | Optional | Type | Default value |
- +===============+===============================================+==========+==========+===========================+
- | buffer_size | Buffer size (in bytes) for the current | YES | uint32_t | 2048 |
- | | buffer pool. | | | + sizeof(struct rte_mbuf) |
- | | | | | + HEADROOM |
- +---------------+-----------------------------------------------+----------+----------+---------------------------+
- | pool_size | Number of buffers in the current buffer pool. | YES | uint32_t | 32K |
- +---------------+-----------------------------------------------+----------+----------+---------------------------+
- | cache_size | Per CPU thread cache size (in number of | YES | uint32_t | 256 |
- | | buffers) for the current buffer pool. | | | |
- +---------------+-----------------------------------------------+----------+----------+---------------------------+
- | cpu | CPU socket ID where to allocate memory for | YES | uint32_t | 0 |
- | | the current buffer pool. | | | |
- +---------------+-----------------------------------------------+----------+----------+---------------------------+
-
-
-LINK section
-~~~~~~~~~~~~
-
-.. _table_ip_pipelines_link_section:
-
-.. tabularcolumns:: |p{3cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{2cm}|
-
-.. table:: Configuration file LINK section
-
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | Section entry | Description | Optional | Type | Default value |
- +=================+==============================================+==========+==========+===================+
- | arp_q | NIC RX queue where ARP packets should | YES | 0 .. 127 | 0 (default queue) |
- | | be filtered. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | tcp_syn_local_q | NIC RX queue where TCP packets with SYN | YES | 0 .. 127 | 0 (default queue) |
- | | flag should be filtered. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | ip_local_q | NIC RX queue where IP packets with local | YES | 0 .. 127 | 0 (default queue) |
- | | destination should be filtered. | | | |
- | | When TCP, UDP and SCTP local queues are | | | |
- | | defined, they take higher priority than this | | | |
- | | queue. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | tcp_local_q | NIC RX queue where TCP packets with local | YES | 0 .. 127 | 0 (default queue) |
- | | destination should be filtered. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | udp_local_q | NIC RX queue where TCP packets with local | YES | 0 .. 127 | 0 (default queue) |
- | | destination should be filtered. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | sctp_local_q | NIC RX queue where TCP packets with local | YES | 0 .. 127 | 0 (default queue) |
- | | destination should be filtered. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
- | promisc | Indicates whether current link should be | YES | YES/NO | YES |
- | | started in promiscuous mode. | | | |
- +-----------------+----------------------------------------------+----------+----------+-------------------+
-
-
-RXQ section
-~~~~~~~~~~~
-
-.. _table_ip_pipelines_rxq_section:
-
-.. tabularcolumns:: |p{3cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{2cm}|
-
-.. table:: Configuration file RXQ section
-
- +---------------+--------------------------------------------+----------+----------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+============================================+==========+==========+===============+
- | mempool | Mempool to use for buffer allocation for | YES | uint32_t | MEMPOOL0 |
- | | current NIC RX queue. The mempool ID has | | | |
- | | to be associated with a valid instance | | | |
- | | defined in the mempool entry of the global | | | |
- | | section. | | | |
- +---------------+--------------------------------------------+----------+----------+---------------+
- | Size | NIC RX queue size (number of descriptors) | YES | uint32_t | 128 |
- +---------------+--------------------------------------------+----------+----------+---------------+
- | burst | Read burst size (number of descriptors) | YES | uint32_t | 32 |
- +---------------+--------------------------------------------+----------+----------+---------------+
-
-
-TXQ section
-~~~~~~~~~~~
-
-.. _table_ip_pipelines_txq_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{2cm}|p{1.5cm}|
-
-.. table:: Configuration file TXQ section
-
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+==============================================+==========+==================+===============+
- | size | NIC TX queue size (number of descriptors) | YES | uint32_t | 512 |
- | | | | power of 2 | |
- | | | | > 0 | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | burst | Write burst size (number of descriptors) | YES | uint32_t | 32 |
- | | | | power of 2 | |
- | | | | 0 < burst < size | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | dropless | When dropless is set to NO, packets can be | YES | YES/NO | NO |
- | | dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is non- | | | |
- | | blocking. | | | |
- | | When dropless is set to YES, packets cannot | | | |
- | | be dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is blocking, as | | | |
- | | the write operation is retried until enough | | | |
- | | free slots become available and all the | | | |
- | | packets are successfully written to the | | | |
- | | queue. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | n_retries | Number of retries. Valid only when dropless | YES | uint32_t | 0 |
- | | is set to YES. When set to 0, it indicates | | | |
- | | unlimited number of retries. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
-
-
-SWQ section
-~~~~~~~~~~~
-
-.. _table_ip_pipelines_swq_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
-
-.. table:: Configuration file SWQ section
-
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+==============================================+==========+==================+===============+
- | size | Queue size (number of packets) | YES | uint32_t | 256 |
- | | | | power of 2 | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | burst_read | Read burst size (number of packets) | YES | uint32_t | 32 |
- | | | | power of 2 | |
- | | | | 0 < burst < size | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | burst_write | Write burst size (number of packets) | YES | uint32_t | 32 |
- | | | | power of 2 | |
- | | | | 0 < burst < size | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | dropless | When dropless is set to NO, packets can be | YES | YES/NO | NO |
- | | dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is non- | | | |
- | | blocking. | | | |
- | | When dropless is set to YES, packets cannot | | | |
- | | be dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is blocking, as | | | |
- | | the write operation is retried until enough | | | |
- | | free slots become available and all the | | | |
- | | packets are successfully written to the | | | |
- | | queue. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | n_retries | Number of retries. Valid only when dropless | YES | uint32_t | 0 |
- | | is set to YES. When set to 0, it indicates | | | |
- | | unlimited number of retries. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | cpu | CPU socket ID where to allocate memory | YES | uint32_t | 0 |
- | | for this SWQ. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
-
-
-TM section
-~~~~~~~~~~
-
-.. _table_ip_pipelines_tm_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
-
-.. table:: Configuration file TM section
-
- +---------------+---------------------------------------------+----------+----------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+=============================================+==========+==========+===============+
- | Cfg | File name to parse for the TM configuration | YES | string | tm_profile |
- | | to be applied. The syntax of this file is | | | |
- | | described in the examples/qos_sched DPDK | | | |
- | | application documentation. | | | |
- +---------------+---------------------------------------------+----------+----------+---------------+
- | burst_read | Read burst size (number of packets) | YES | uint32_t | 64 |
- +---------------+---------------------------------------------+----------+----------+---------------+
- | burst_write | Write burst size (number of packets) | YES | uint32_t | 32 |
- +---------------+---------------------------------------------+----------+----------+---------------+
-
-
-KNI section
-~~~~~~~~~~~
-
-.. _table_ip_pipelines_kni_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
-
-.. table:: Configuration file KNI section
-
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+==============================================+==========+==================+===============+
- | core | CPU core to run the KNI kernel thread. | YES | See "CPU Core | Not set |
- | | When core config is set, the KNI kernel | | notation" | |
- | | thread will be bound to the particular core. | | | |
- | | When core config is not set, the KNI kernel | | | |
- | | thread will be scheduled by the OS. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | mempool | Mempool to use for buffer allocation for | YES | uint32_t | MEMPOOL0 |
- | | current KNI port. The mempool ID has | | | |
- | | to be associated with a valid instance | | | |
- | | defined in the mempool entry of the global | | | |
- | | section. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | burst_read | Read burst size (number of packets) | YES | uint32_t | 32 |
- | | | | power of 2 | |
- | | | | 0 < burst < size | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | burst_write | Write burst size (number of packets) | YES | uint32_t | 32 |
- | | | | power of 2 | |
- | | | | 0 < burst < size | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | dropless | When dropless is set to NO, packets can be | YES | YES/NO | NO |
- | | dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is non- | | | |
- | | blocking. | | | |
- | | When dropless is set to YES, packets cannot | | | |
- | | be dropped if not enough free slots are | | | |
- | | currently available in the queue, so the | | | |
- | | write operation to the queue is blocking, as | | | |
- | | the write operation is retried until enough | | | |
- | | free slots become available and all the | | | |
- | | packets are successfully written to the | | | |
- | | queue. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
- | n_retries | Number of retries. Valid only when dropless | YES | uint64_t | 0 |
- | | is set to YES. When set to 0, it indicates | | | |
- | | unlimited number of retries. | | | |
- +---------------+----------------------------------------------+----------+------------------+---------------+
-
-
-SOURCE section
+Initialization
~~~~~~~~~~~~~~
-.. _table_ip_pipelines_source_section:
-
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{2cm}|
-
-.. table:: Configuration file SOURCE section
-
- +---------------+---------------------------------------+----------+----------+---------------+
- | Section | Description | Optional | Type | Default value |
- +===============+=======================================+==========+==========+===============+
- | Mempool | Mempool to use for buffer allocation. | YES | uint32_t | MEMPOOL0 |
- +---------------+---------------------------------------+----------+----------+---------------+
- | Burst | Read burst size (number of packets) | | uint32_t | 32 |
- +---------------+---------------------------------------+----------+----------+---------------+
-
-
-SINK section
-~~~~~~~~~~~~
+During this stage, EAL layer is initialised and application specific arguments are parsed. Furthermore, the data strcutures
+(i.e. linked lists) for application objects are initialized. In case of any initialization error, an error message
+is displayed and the application is terminated.
-Currently, there are no parameters to be passed to a sink device, so
-SINK section is not allowed.
+.. _ip_pipeline_runtime:
-MSGQ section
-~~~~~~~~~~~~
+Run-time
+~~~~~~~~
-.. _table_ip_pipelines_msgq_section:
+The master thread is creating and managing all the application objects based on CLI input.
-.. tabularcolumns:: |p{2.5cm}|p{7cm}|p{1.5cm}|p{1.5cm}|p{1.5cm}|
+Each data plane thread runs one or several pipelines previously assigned to it in round-robin order. Each data plane thread
+executes two tasks in time-sharing mode:
-.. table:: Configuration file MSGQ section
+1. *Packet processing task*: Process bursts of input packets read from the pipeline input ports.
- +---------+--------------------------------------------+----------+------------+---------------+
- | Section | Description | Optional | Type | Default value |
- +=========+============================================+==========+============+===============+
- | size | Queue size (number of packets) | YES | uint32_t | 64 |
- | | | | != 0 | |
- | | | | power of 2 | |
- +---------+--------------------------------------------+----------+------------+---------------+
- | cpu | CPU socket ID where to allocate memory for | YES | uint32_t | 0 |
- | | the current queue. | | | |
- +---------+--------------------------------------------+----------+------------+---------------+
+2. *Message handling task*: Periodically, the data plane thread pauses the packet processing task and polls for request
+ messages send by the master thread. Examples: add/remove pipeline to/from current data plane thread, add/delete rules
+ to/from given table of a specific pipeline owned by the current data plane thread, read statistics, etc.
+Examples
+--------
-EAL section
-~~~~~~~~~~~
+.. _table_examples:
-The application generates the EAL parameters rather than reading them from the command line.
+.. tabularcolumns:: |p{3cm}|p{5cm}|p{4cm}|p{4cm}|
-The CPU core mask parameter is generated based on the core entry of all PIPELINE sections.
-All the other EAL parameters can be set from this section of the application configuration file.
+.. table:: Pipeline examples provided with the application
+
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | Name | Table(s) | Actions | Messages |
+ +=======================+======================+================+====================================+
+ | L2fwd | Stub | Forward | 1. Mempool create |
+ | | | | 2. Link create |
+ | Note: Implemented | | | 3. Pipeline create |
+ | using pipeline with | | | 4. Pipeline port in/out |
+ | a simple pass-through | | | 5. Pipeline table |
+ | connection between | | | 6. Pipeline port in table |
+ | input and output | | | 7. Pipeline enable |
+ | ports. | | | 8. Pipeline table rule add |
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | Flow classification | Exact match | Forward | 1. Mempool create |
+ | | | | 2. Link create |
+ | | * Key = byte array | | 3. Pipeline create |
+ | | (16 bytes) | | 4. Pipeline port in/out |
+ | | * Offset = 278 | | 5. Pipeline table |
+ | | * Table size = 64K | | 6. Pipeline port in table |
+ | | | | 7. Pipeline enable |
+ | | | | 8. Pipeline table rule add default |
+ | | | | 9. Pipeline table rule add |
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | KNI | Stub | Forward | 1. Mempool create |
+ | | | | 2. Link create |
+ | | | | 3. Pipeline create |
+ | | | | 4. Pipeline port in/out |
+ | | | | 5. Pipeline table |
+ | | | | 6. Pipeline port in table |
+ | | | | 7. Pipeline enable |
+ | | | | 8. Pipeline table rule add |
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | Firewall | ACL | Allow/Drop | 1. Mempool create |
+ | | | | 2. Link create |
+ | | * Key = n-tuple | | 3. Pipeline create |
+ | | * Offset = 270 | | 4. Pipeline port in/out |
+ | | * Table size = 4K | | 5. Pipeline table |
+ | | | | 6. Pipeline port in table |
+ | | | | 7. Pipeline enable |
+ | | | | 8. Pipeline table rule add default |
+ | | | | 9. Pipeline table rule add |
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | IP routing | LPM (IPv4) | Forward | 1. Mempool Create |
+ | | | | 2. Link create |
+ | | * Key = IP dest addr | | 3. Pipeline creat |
+ | | * Offset = 286 | | 4. Pipeline port in/out |
+ | | * Table size = 4K | | 5. Pipeline table |
+ | | | | 6. Pipeline port in table |
+ | | | | 7. Pipeline enable |
+ | | | | 8. Pipeline table rule add default |
+ | | | | 9. Pipeline table rule add |
+ +-----------------------+----------------------+----------------+------------------------------------+
+ | Equal-cost multi-path | LPM (IPv4) | Forward, | 1. Mempool Create |
+ | routing (ECMP) | | load balance, | 2. Link create |
+ | | * Key = IP dest addr | encap ether | 3. Pipeline create |
+ | | * Offset = 286 | | 4. Pipeline port in/out |
+ | | * Table size = 4K | | 5. Pipeline table (LPM) |
+ | | | | 6. Pipeline table (Array) |
+ | | | | 7. Pipeline port in table (LPM) |
+ | | Array | | 8. Pipeline enable |
+ | | | | 9. Pipeline table rule add default |
+ | | * Key = Array index | | 10. Pipeline table rule add(LPM) |
+ | | * Offset = 256 | | 11. Pipeline table rule add(Array) |
+ | | * Size = 64K | | |
+ | | | | |
+ +-----------------------+----------------------+----------------+------------------------------------+
+Command Line Interface (CLI)
+----------------------------
-Library of pipeline types
--------------------------
+Link
+~~~~
-Pipeline module
-~~~~~~~~~~~~~~~
-
-A pipeline is a self-contained module that implements a packet processing function and is typically implemented on
-top of the DPDK Packet Framework *librte_pipeline* library.
-The application provides a run-time mechanism to register different pipeline types.
-
-Depending on the required configuration, each registered pipeline type (pipeline class) is instantiated one or
-several times, with each pipeline instance (pipeline object) assigned to one of the available CPU cores.
-Each CPU core can run one or more pipeline instances, which might be of same or different types.
-For more information of the CPU core threading model, please refer to the :ref:`ip_pipeline_runtime` section.
-
-
-Pipeline type
-^^^^^^^^^^^^^
-
-Each pipeline type is made up of a back-end and a front-end. The back-end represents the packet processing engine
-of the pipeline, typically implemented using the DPDK Packet Framework libraries, which reads packets from the
-input packet queues, handles them and eventually writes them to the output packet queues or drops them.
-The front-end represents the run-time configuration interface of the pipeline, which is exposed as CLI commands.
-The front-end communicates with the back-end through message queues.
-
-.. _table_ip_pipelines_back_end:
-
-.. tabularcolumns:: |p{1cm}|p{2cm}|p{12cm}|
-
-.. table:: Pipeline back-end
-
- +------------+------------------+--------------------------------------------------------------------+
- | Field name | Field type | Description |
- +============+==================+====================================================================+
- | f_init | Function pointer | Function to initialize the back-end of the current pipeline |
- | | | instance. Typical work implemented by this function for the |
- | | | current pipeline instance: |
- | | | Memory allocation; |
- | | | Parse the pipeline type specific arguments; |
- | | | Initialize the pipeline input ports, output ports and tables, |
- | | | interconnect input ports to tables; |
- | | | Set the message handlers. |
- +------------+------------------+--------------------------------------------------------------------+
- | f_free | Function pointer | Function to free the resources allocated by the back-end of the |
- | | | current pipeline instance. |
- +------------+------------------+--------------------------------------------------------------------+
- | f_run | Function pointer | Set to NULL for pipelines implemented using the DPDK library |
- | | | librte_pipeline (typical case), and to non-NULL otherwise. This |
- | | | mechanism is made available to support quick integration of |
- | | | legacy code. |
- | | | This function is expected to provide the packet processing |
- | | | related code to be called as part of the CPU thread dispatch |
- | | | loop, so this function is not allowed to contain an infinite loop. |
- +------------+------------------+--------------------------------------------------------------------+
- | f_timer | Function pointer | Function to read the pipeline input message queues, handle |
- | | | the request messages, create response messages and write |
- | | | the response queues. The format of request and response |
- | | | messages is defined by each pipeline type, with the exception |
- | | | of some requests which are mandatory for all pipelines (e.g. |
- | | | ping, statistics). |
- +------------+------------------+--------------------------------------------------------------------+
+ Link configuration ::
+ link <link_name>
+ dev <device_name>|port <port_id>
+ rxq <n_queues> <queue_size> <mempool_name>
+ txq <n_queues> <queue_size> promiscuous on | off
+ [rss <qid_0> ... <qid_n>]
-.. _table_ip_pipelines_front_end:
-
-.. tabularcolumns:: |p{1cm}|p{2cm}|p{12cm}|
+ Note: The PCI device name must be specified in the Domain:Bus:Device.Function format.
-.. table:: Pipeline front-end
-
- +------------+-----------------------+-------------------------------------------------------------------+
- | Field name | Field type | Description |
- +============+=======================+===================================================================+
- | f_init | Function pointer | Function to initialize the front-end of the current pipeline |
- | | | instance. |
- +------------+-----------------------+-------------------------------------------------------------------+
- | f_post_init| Function pointer | Function to run once after f_init. |
- +------------+-----------------------+-------------------------------------------------------------------+
- | f_track | Function pointer | See section Tracking pipeline output port to physical link. |
- +------------+-----------------------+-------------------------------------------------------------------+
- | f_free | Function pointer | Function to free the resources allocated by the front-end of |
- | | | the current pipeline instance. |
- +------------+-----------------------+-------------------------------------------------------------------+
- | cmds | Array of CLI commands | Array of CLI commands to be registered to the application CLI |
- | | | for the current pipeline type. Even though the CLI is executed |
- | | | by a different pipeline (typically, this is the master pipeline), |
- | | | from modularity perspective is more efficient to keep the |
- | | | message client side (part of the front-end) together with the |
- | | | message server side (part of the back-end). |
- +------------+-----------------------+-------------------------------------------------------------------+
+Mempool
+~~~~~~~
-Tracking pipeline output port to physical link
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Each pipeline instance is a standalone block that does not have visibility into the other pipeline instances or
-the application-level pipeline inter-connectivity.
-In some cases, it is useful for a pipeline instance to get application level information related to pipeline
-connectivity, such as to identify the output link (e.g. physical NIC port) where one of its output ports connected,
-either directly or indirectly by traversing other pipeline instances.
+ Mempool create ::
-Tracking can be successful or unsuccessful.
-Typically, tracking for a specific pipeline instance is successful when each one of its input ports can be mapped
-to a single output port, meaning that all packets read from the current input port can only go out on a single
-output port.
-Depending on the pipeline type, some exceptions may be allowed: a small portion of the packets, considered exception
-packets, are sent out on an output port that is pre-configured for this purpose.
-
-For pass-through pipeline type, the tracking is always successful.
-For pipeline types as flow classification, firewall or routing, the tracking is only successful when the number of
-output ports for the current pipeline instance is 1.
-
-This feature is used by the IP routing pipeline for adding/removing implicit routes every time a link is brought
-up/down.
+ mempool <mempool_name> buffer <buffer_size>
+ pool <pool_size> cache <cache_size> cpu <cpu_id>
-Table copies
-^^^^^^^^^^^^
-
-Fast table copy: pipeline table used by pipeline for the packet processing task, updated through messages, table
-data structures are optimized for lookup operation.
-
-Slow table copy: used by the configuration layer, typically updated through CLI commands, kept in sync with the fast
-copy (its update triggers the fast copy update).
-Required for executing advanced table queries without impacting the packet processing task, therefore the slow copy
-is typically organized using different criteria than the fast copy.
-
-Examples:
-
-* Flow classification: Search through current set of flows (e.g. list all flows with a specific source IP address);
-
-* Firewall: List rules in descending order of priority;
-
-* Routing table: List routes sorted by prefix depth and their type (local, remote, default);
-
-* ARP: List entries sorted per output interface.
-
-
-Packet meta-data
-^^^^^^^^^^^^^^^^
-
-Packet meta-data field offsets provided as argument to pipeline instances are essentially defining the data structure
-for the packet meta-data used by the current application use-case.
-It is very useful to put it in the configuration file as a comment in order to facilitate the readability of the
-configuration file.
-
-The reason to use field offsets for defining the data structure for the packet meta-data is due to the C language
-limitation of not being able to define data structures at run-time.
-Feature to consider: have the configuration file parser automatically generate and print the data structure defining
-the packet meta-data for the current application use-case.
-
-Packet meta-data typically contains:
+Software queue
+~~~~~~~~~~~~~~
-1. Pure meta-data: intermediate data per packet that is computed internally, passed between different tables of
- the same pipeline instance (e.g. lookup key for the ARP table is obtained from the routing table), or between
- different pipeline instances (e.g. flow ID, traffic metering color, etc);
+ Create software queue ::
-2. Packet fields: typically, packet header fields that are read directly from the packet, or read from the packet
- and saved (duplicated) as a working copy at a different location within the packet meta-data (e.g. Diffserv
- 5-tuple, IP destination address, etc).
+ swq <swq_name> size <size> cpu <cpu_id>
-Several strategies are used to design the packet meta-data, as described in the next subsections.
+Traffic manager
+~~~~~~~~~~~~~~~
-Store packet meta-data in a different cache line as the packet headers
-""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+ Add traffic manager subport profile ::
-This approach is able to support protocols with variable header length, like MPLS, where the offset of IP header
-from the start of the packet (and, implicitly, the offset of the IP header in the packet buffer) is not fixed.
-Since the pipelines typically require the specification of a fixed offset to the packet fields (e.g. Diffserv
-5-tuple, used by the flow classification pipeline, or the IP destination address, used by the IP routing pipeline),
-the workaround is to have the packet RX pipeline copy these fields at fixed offsets within the packet meta-data.
+ tmgr subport profile
+ <tb_rate> <tb_size>
+ <tc0_rate> <tc1_rate> <tc2_rate> <tc3_rate>
+ <tc_period>
-As this approach duplicates some of the packet fields, it requires accessing more cache lines per packet for filling
-in selected packet meta-data fields (on RX), as well as flushing selected packet meta-data fields into the
-packet (on TX).
-Example:
+ Add traffic manager pipe profile ::
-.. code-block:: ini
+ tmgr pipe profile
+ <tb_rate> <tb_size>
+ <tc0_rate> <tc1_rate> <tc2_rate> <tc3_rate>
+ <tc_period>
+ <tc_ov_weight> <wrr_weight0..15>
+ Create traffic manager port ::
- ; struct app_pkt_metadata {
- ; uint32_t ip_da;
- ; uint32_t hash;
- ; uint32_t flow_id;
- ; uint32_t color;
- ; } __attribute__((__packed__));
- ;
+ tmgr <tmgr_name>
+ rate <rate>
+ spp <n_subports_per_port>
+ pps <n_pipes_per_subport>
+ qsize <qsize_tc0>
+ <qsize_tc1> <qsize_tc2> <qsize_tc3>
+ fo <frame_overhead> mtu <mtu> cpu <cpu_id>
- [PIPELINE1]
- ; Packet meta-data offsets
- ip_da_offset = 0; Used by: routing
- hash_offset = 4; Used by: RX, flow classification
- flow_id_offset = 8; Used by: flow classification, flow actions
- color_offset = 12; Used by: flow actions, routing
+ Configure traffic manager subport ::
+ tmgr <tmgr_name>
+ subport <subport_id>
+ profile <subport_profile_id>
-Overlay the packet meta-data in the same cache line with the packet headers
-"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+ Configure traffic manager pipe ::
-This approach is minimizing the number of cache line accessed per packet by storing the packet metadata in the
-same cache line with the packet headers.
-To enable this strategy, either some headroom is reserved for meta-data at the beginning of the packet headers
-cache line (e.g. if 16 bytes are needed for meta-data, then the packet headroom can be set to 128+16 bytes, so
-that NIC writes the first byte of the packet at offset 16 from the start of the first packet cache line),
-or meta-data is reusing the space of some packet headers that are discarded from the packet (e.g. input Ethernet
-header).
+ tmgr <tmgr_name>
+ subport <subport_id>
+ pipe from <pipe_id_first> to <pipe_id_last>
+ profile <pipe_profile_id>
-Example:
-.. code-block:: ini
+Tap
+~~~
- ; struct app_pkt_metadata {
- ; uint8_t headroom[RTE_PKTMBUF_HEADROOM]; /* 128 bytes (default) */
- ; union {
- ; struct {
- ; struct ether_hdr ether; /* 14 bytes */
- ; struct qinq_hdr qinq; /* 8 bytes */
- ; };
- ; struct {
- ; uint32_t hash;
- ; uint32_t flow_id;
- ; uint32_t color;
- ; };
- ; };
- ; struct ipv4_hdr ip; /* 20 bytes */
- ; } __attribute__((__packed__));
- ;
- [PIPELINE2]
- ; Packet meta-data offsets
- qinq_offset = 142; Used by: RX, flow classification
- ip_da_offset = 166; Used by: routing
- hash_offset = 128; Used by: RX, flow classification
- flow_id_offset = 132; Used by: flow classification, flow actions
- color_offset = 136; Used by: flow actions, routing
+ Create tap port ::
+ tap <name>
-List of pipeline types
-~~~~~~~~~~~~~~~~~~~~~~
-.. _table_ip_pipelines_types:
+Kni
+~~~
-.. tabularcolumns:: |p{3cm}|p{5cm}|p{4cm}|p{4cm}|
+ Create kni port ::
-.. table:: List of pipeline types provided with the application
-
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
- | Name | Table(s) | Actions | Messages |
- +=======================+=============================+=======================+==========================================+
- | Pass-through | Passthrough | 1. Pkt metadata build | 1. Ping |
- | | | 2. Flow hash | 2. Stats |
- | Note: depending on | | 3. Pkt checks | |
- | port type, can be | | 4. Load balancing | |
- | used for RX, TX, IP | | | |
- | fragmentation, IP | | | |
- | reassembly or Traffic | | | |
- | Management | | | |
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
- | Flow classification | Exact match | 1. Flow ID | 1. Ping |
- | | | | |
- | | * Key = byte array | 2. Flow stats | 2. Stats |
- | | (source: pkt metadata) | 3. Metering | 3. Flow stats |
- | | * Data = action dependent | 4. Network Address | 4. Action stats |
- | | | 5. Translation (NAT) | 5. Flow add/ update/ delete |
- | | | | 6. Default flow add/ update/ delete |
- | | | | 7. Action update |
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
- | Flow actions | Array | 1. Flow stats | 1. Ping |
- | | | | |
- | | * Key = Flow ID | 2. Metering | 2. Stats |
- | | (source: pkt metadata) | 3. Network Address | 3. Action stats |
- | | * Data = action dependent | 4. Translation (NAT) | 4. Action update |
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
- | Firewall | ACL | 1. Allow/Drop | 1. Ping |
- | | | | |
- | | * Key = n-tuple | | 2. Stats |
- | | (source: pkt headers) | | 3. Rule add/ update/ delete |
- | | * Data = none | | 4. Default rule add/ update/ delete |
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
- | IP routing | LPM (IPv4 or IPv6, | 1. TTL decrement and | 1. Ping |
- | | depending on pipeline type) | 2. IPv4 checksum | 2. Stats |
- | | | | |
- | | * Key = IP destination | 3. update | 3. Route add/ update/ delete |
- | | (source: pkt metadata) | 4. Header | 4. Default route add/ update/ delete |
- | | * Data = Dependent on | 5. encapsulation | 5. ARP entry add/ update/ delete |
- | | actions and next hop | 6. (based on next hop | 6. Default ARP entry add/ update/ delete |
- | | type | 7. type) | |
- | | | | |
- | | Hash table (for ARP, only | | |
- | | | | |
- | | when ARP is enabled) | | |
- | | | | |
- | | * Key = (Port ID, | | |
- | | next hop IP address) | | |
- | | (source: pkt meta-data) | | |
- | | * Data: MAC address | | |
- +-----------------------+-----------------------------+-----------------------+------------------------------------------+
+ kni <kni_name>
+ link <link_name>
+ mempool <mempool_name>
+ [thread <thread_id>]
+Action profile
+~~~~~~~~~~~~~~
-Command Line Interface (CLI)
-----------------------------
+ Create action profile for pipeline input port ::
+
+ port in action profile <profile_name>
+ [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
+ [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
+
+ Create action profile for the pipeline table ::
+
+ table action profile <profile_name>
+ ipv4 | ipv6
+ offset <ip_offset>
+ fwd
+ [balance offset <key_offset> mask <key_mask> outoffset <out_offset>]
+ [meter srtcm | trtcm
+ tc <n_tc>
+ stats none | pkts | bytes | both]
+ [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
+ [encap ether | vlan | qinq | mpls | pppoe]
+ [nat src | dst
+ proto udp | tcp]
+ [ttl drop | fwd
+ stats none | pkts]
+ [stats pkts | bytes | both]
+ [time]
+
+
+Pipeline
+~~~~~~~~
-Global CLI commands
-~~~~~~~~~~~~~~~~~~~
+Create pipeline ::
+
+ pipeline <pipeline_name>
+ period <timer_period_ms>
+ offset_port_id <offset_port_id>
+ cpu <cpu_id>
+
+Create pipeline input port ::
+
+ pipeline <pipeline_name> port in
+ bsz <burst_size>
+ link <link_name> rxq <queue_id>
+ | swq <swq_name>
+ | tmgr <tmgr_name>
+ | tap <tap_name> mempool <mempool_name> mtu <mtu>
+ | kni <kni_name>
+ | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ [action <port_in_action_profile_name>]
+ [disabled]
+
+Create pipeline output port ::
+
+ pipeline <pipeline_name> port out
+ bsz <burst_size>
+ link <link_name> txq <txq_id>
+ | swq <swq_name>
+ | tmgr <tmgr_name>
+ | tap <tap_name>
+ | kni <kni_name>
+ | sink [file <file_name> pkts <max_n_pkts>]
+
+Create pipeline table ::
+
+ pipeline <pipeline_name> table
+ match
+ acl
+ ipv4 | ipv6
+ offset <ip_header_offset>
+ size <n_rules>
+ | array
+ offset <key_offset>
+ size <n_keys>
+ | hash
+ ext | lru
+ key <key_size>
+ mask <key_mask>
+ offset <key_offset>
+ buckets <n_buckets>
+ size <n_keys>
+ | lpm
+ ipv4 | ipv6
+ offset <ip_header_offset>
+ size <n_rules>
+ | stub
+ [action <table_action_profile_name>]
+
+Connect pipeline input port to table ::
+
+ pipeline <pipeline_name> port in <port_id> table <table_id>
+
+Display statistics for specific pipeline input port, output port
+or table ::
+
+ pipeline <pipeline_name> port in <port_id> stats read [clear]
+ pipeline <pipeline_name> port out <port_id> stats read [clear]
+ pipeline <pipeline_name> table <table_id> stats read [clear]
+
+Enable given input port for specific pipeline instance ::
+
+ pipeline <pipeline_name> port out <port_id> disable
+
+Disable given input port for specific pipeline instance ::
+
+ pipeline <pipeline_name> port out <port_id> disable
+
+Add default rule to table for specific pipeline instance ::
+
+ pipeline <pipeline_name> table <table_id> rule add
+ match
+ default
+ action
+ fwd
+ drop
+ | port <port_id>
+ | meta
+ | table <table_id>
+
+Add rule to table for specific pipeline instance ::
+
+ pipeline <pipeline_name> table <table_id> rule add
+
+ match
+ acl
+ priority <priority>
+ ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth>
+ <sp0> <sp1> <dp0> <dp1> <proto>
+ | array <pos>
+ | hash
+ raw <key>
+ | ipv4_5tuple <sa> <da> <sp> <dp> <proto>
+ | ipv6_5tuple <sa> <da> <sp> <dp> <proto>
+ | ipv4_addr <addr>
+ | ipv6_addr <addr>
+ | qinq <svlan> <cvlan>
+ | lpm
+ ipv4 | ipv6 <addr> <depth>
+
+ action
+ fwd
+ drop
+ | port <port_id>
+ | meta
+ | table <table_id>
+ [balance <out0> ... <out7>]
+ [meter
+ tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]]
+ [tm subport <subport_id> pipe <pipe_id>]
+ [encap
+ ether <da> <sa>
+ | vlan <da> <sa> <pcp> <dei> <vid>
+ | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid>
+ | mpls unicast | multicast
+ <da> <sa>
+ label0 <label> <tc> <ttl>
+ [label1 <label> <tc> <ttl>
+ [label2 <label> <tc> <ttl>
+ [label3 <label> <tc> <ttl>]]]
+ | pppoe <da> <sa> <session_id>]
+ [nat ipv4 | ipv6 <addr> <port>]
+ [ttl dec | keep]
+ [stats]
+ [time]
+
+ where:
+ <pa> ::= g | y | r | drop
+
+Add bulk rules to table for specific pipeline instance ::
+
+ pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>
-.. _table_ip_pipelines_cli_commands:
+ Where:
+ - file_name = path to file
+ - File line format = match <match> action <action>
-.. tabularcolumns:: |p{3cm}|p{6cm}|p{6cm}|
+Delete table rule for specific pipeline instance ::
-.. table:: Global CLI commands
+ pipeline <pipeline_name> table <table_id> rule delete
+ match <match>
- +---------+---------------------------------------+--------------------------------------------+
- | Command | Description | Syntax |
- +=========+=======================================+============================================+
- | run | Run CLI commands script file. | run <file> |
- | | | <file> = path to file with CLI commands to |
- | | | execute |
- +---------+---------------------------------------+--------------------------------------------+
- | quit | Gracefully terminate the application. | quit |
- +---------+---------------------------------------+--------------------------------------------+
+Delete default table rule for specific pipeline instance ::
+ pipeline <pipeline_name> table <table_id> rule delete
+ match
+ default
-CLI commands for link configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Add meter profile to the table for specific pipeline instance ::
-.. _table_ip_pipelines_runtime_config:
+ pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id>
+ add srtcm cir <cir> cbs <cbs> ebs <ebs>
+ | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs>
-.. tabularcolumns:: |p{3cm}|p{6cm}|p{6cm}|
+Delete meter profile from the table for specific pipeline instance ::
-.. table:: List of run-time configuration commands for link configuration
+ pipeline <pipeline_name> table <table_id>
+ meter profile <meter_profile_id> delete
- +-------------+--------------------+--------------------------------------------+
- | Command | Description | Syntax |
- +=============+====================+============================================+
- | link config | Link configuration | link <link ID> config <IP address> <depth> |
- +-------------+--------------------+--------------------------------------------+
- | link up | Link up | link <link ID> up |
- +-------------+--------------------+--------------------------------------------+
- | link down | Link down | link <link ID> down |
- +-------------+--------------------+--------------------------------------------+
- | link ls | Link list | link ls |
- +-------------+--------------------+--------------------------------------------+
+Update the dscp table for meter or traffic manager action for specific
+pipeline instance ::
-CLI commands common for all pipeline types
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ pipeline <pipeline_name> table <table_id> dscp <file_name>
-.. _table_ip_pipelines_mandatory:
+ Where:
+ - file_name = path to file
+ - exactly 64 lines
+ - File line format = <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r
+
+
+Pipeline enable/disable
+~~~~~~~~~~~~~~~~~~~~~~~
-.. tabularcolumns:: |p{3cm}|p{6cm}|p{6cm}|
+ Enable given pipeline instance for specific data plane thread ::
-.. table:: CLI commands mandatory for all pipelines
+ thread <thread_id> pipeline <pipeline_name> enable
- +--------------------+------------------------------------------------------+----------------------------------------------+
- | Command | Description | Syntax |
- +====================+======================================================+==============================================+
- | ping | Check whether specific pipeline instance is alive. | p <pipeline ID> ping |
- | | The master pipeline sends a ping request | |
- | | message to given pipeline instance and waits for | |
- | | a response message back. | |
- | | Timeout message is displayed when the response | |
- | | message is not received before the timer | |
- | | expires. | |
- +--------------------+------------------------------------------------------+----------------------------------------------+
- | stats | Display statistics for specific pipeline input port, | p <pipeline ID> stats port in <port in ID> |
- | | output port or table. | p <pipeline ID> stats port out <port out ID> |
- | | | p <pipeline ID> stats table <table ID> |
- +--------------------+------------------------------------------------------+----------------------------------------------+
- | input port enable | Enable given input port for specific pipeline | p <pipeline ID> port in <port ID> enable |
- | | instance. | |
- +--------------------+------------------------------------------------------+----------------------------------------------+
- | input port disable | Disable given input port for specific pipeline | p <pipeline ID> port in <port ID> disable |
- | | instance. | |
- +--------------------+------------------------------------------------------+----------------------------------------------+
-Pipeline type specific CLI commands
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Disable given pipeline instance for specific data plane thread ::
-The pipeline specific CLI commands are part of the pipeline type front-end.
+ thread <thread_id> pipeline <pipeline_name> disable
diff --git a/doc/guides/sample_app_ug/kernel_nic_interface.rst b/doc/guides/sample_app_ug/kernel_nic_interface.rst
index 580d06b5..1b3ee9a5 100644
--- a/doc/guides/sample_app_ug/kernel_nic_interface.rst
+++ b/doc/guides/sample_app_ug/kernel_nic_interface.rst
@@ -202,74 +202,8 @@ Setup of mbuf pool, driver and queues is similar to the setup done in the :doc:`
In addition, one or more kernel NIC interfaces are allocated for each
of the configured ports according to the command line parameters.
-The code for allocating the kernel NIC interfaces for a specific port is as follows:
-
-.. code-block:: c
-
- static int
- kni_alloc(uint16_t port_id)
- {
- uint8_t i;
- struct rte_kni *kni;
- struct rte_kni_conf conf;
- struct kni_port_params **params = kni_port_params_array;
-
- if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
- return -1;
-
- params[port_id]->nb_kni = params[port_id]->nb_lcore_k ? params[port_id]->nb_lcore_k : 1;
-
- for (i = 0; i < params[port_id]->nb_kni; i++) {
-
- /* Clear conf at first */
-
- memset(&conf, 0, sizeof(conf));
- if (params[port_id]->nb_lcore_k) {
- snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u_%u", port_id, i);
- conf.core_id = params[port_id]->lcore_k[i];
- conf.force_bind = 1;
- } else
- snprintf(conf.name, RTE_KNI_NAMESIZE, "vEth%u", port_id);
- conf.group_id = (uint16_t)port_id;
- conf.mbuf_size = MAX_PACKET_SZ;
-
- /*
- * The first KNI device associated to a port
- * is the master, for multiple kernel thread
- * environment.
- */
-
- if (i == 0) {
- struct rte_kni_ops ops;
- struct rte_eth_dev_info dev_info;
-
- memset(&dev_info, 0, sizeof(dev_info)); rte_eth_dev_info_get(port_id, &dev_info);
-
- conf.addr = dev_info.pci_dev->addr;
- conf.id = dev_info.pci_dev->id;
-
- /* Get the interface default mac address */
- rte_eth_macaddr_get(port_id, (struct ether_addr *)&conf.mac_addr);
-
- memset(&ops, 0, sizeof(ops));
-
- ops.port_id = port_id;
- ops.change_mtu = kni_change_mtu;
- ops.config_network_if = kni_config_network_interface;
- ops.config_mac_address = kni_config_mac_address;
-
- kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
- } else
- kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
-
- if (!kni)
- rte_exit(EXIT_FAILURE, "Fail to create kni for "
- "port: %d\n", port_id);
-
- params[port_id]->kni[i] = kni;
- }
- return 0;
- }
+The code for allocating the kernel NIC interfaces for a specific port is
+in the function ``kni_alloc``.
The other step in the initialization process that is unique to this sample application
is the association of each port with lcores for RX, TX and kernel threads.
@@ -280,105 +214,8 @@ is the association of each port with lcores for RX, TX and kernel threads.
* Other lcores for pinning the kernel threads on one by one
-This is done by using the`kni_port_params_array[]` array, which is indexed by the port ID.
-The code is as follows:
-
-.. code-block:: console
-
- static int
- parse_config(const char *arg)
- {
- const char *p, *p0 = arg;
- char s[256], *end;
- unsigned size;
- enum fieldnames {
- FLD_PORT = 0,
- FLD_LCORE_RX,
- FLD_LCORE_TX,
- _NUM_FLD = KNI_MAX_KTHREAD + 3,
- };
- int i, j, nb_token;
- char *str_fld[_NUM_FLD];
- unsigned long int_fld[_NUM_FLD];
- uint16_t port_id, nb_kni_port_params = 0;
-
- memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
-
- while (((p = strchr(p0, '(')) != NULL) && nb_kni_port_params < RTE_MAX_ETHPORTS) {
- p++;
- if ((p0 = strchr(p, ')')) == NULL)
- goto fail;
-
- size = p0 - p;
-
- if (size >= sizeof(s)) {
- printf("Invalid config parameters\n");
- goto fail;
- }
-
- snprintf(s, sizeof(s), "%.*s", size, p);
- nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
-
- if (nb_token <= FLD_LCORE_TX) {
- printf("Invalid config parameters\n");
- goto fail;
- }
-
- for (i = 0; i < nb_token; i++) {
- errno = 0;
- int_fld[i] = strtoul(str_fld[i], &end, 0);
- if (errno != 0 || end == str_fld[i]) {
- printf("Invalid config parameters\n");
- goto fail;
- }
- }
-
- i = 0;
- port_id = (uint8_t)int_fld[i++];
-
- if (port_id >= RTE_MAX_ETHPORTS) {
- printf("Port ID %u could not exceed the maximum %u\n", port_id, RTE_MAX_ETHPORTS);
- goto fail;
- }
-
- if (kni_port_params_array[port_id]) {
- printf("Port %u has been configured\n", port_id);
- goto fail;
- }
-
- kni_port_params_array[port_id] = (struct kni_port_params*)rte_zmalloc("KNI_port_params", sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
- kni_port_params_array[port_id]->port_id = port_id;
- kni_port_params_array[port_id]->lcore_rx = (uint8_t)int_fld[i++];
- kni_port_params_array[port_id]->lcore_tx = (uint8_t)int_fld[i++];
-
- if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE || kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
- printf("lcore_rx %u or lcore_tx %u ID could not "
- "exceed the maximum %u\n",
- kni_port_params_array[port_id]->lcore_rx, kni_port_params_array[port_id]->lcore_tx, RTE_MAX_LCORE);
- goto fail;
- }
-
- for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
- kni_port_params_array[port_id]->lcore_k[j] = (uint8_t)int_fld[i];
- kni_port_params_array[port_id]->nb_lcore_k = j;
- }
-
- print_config();
-
- return 0;
-
- fail:
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (kni_port_params_array[i]) {
- rte_free(kni_port_params_array[i]);
- kni_port_params_array[i] = NULL;
- }
- }
-
- return -1;
-
- }
+This is done by using the ``kni_port_params_array[]`` array, which is indexed by the port ID.
+The code is in the function ``parse_config``.
Packet Forwarding
~~~~~~~~~~~~~~~~~
@@ -387,99 +224,18 @@ After the initialization steps are completed, the main_loop() function is run on
This function first checks the lcore_id against the user provided lcore_rx and lcore_tx
to see if this lcore is reading from or writing to kernel NIC interfaces.
-For the case that reads from a NIC port and writes to the kernel NIC interfaces,
+For the case that reads from a NIC port and writes to the kernel NIC interfaces (``kni_ingress``),
the packet reception is the same as in L2 Forwarding sample application
(see :ref:`l2_fwd_app_rx_tx_packets`).
The packet transmission is done by sending mbufs into the kernel NIC interfaces by rte_kni_tx_burst().
The KNI library automatically frees the mbufs after the kernel successfully copied the mbufs.
-.. code-block:: c
-
- /**
- * Interface to burst rx and enqueue mbufs into rx_q
- */
-
- static void
- kni_ingress(struct kni_port_params *p)
- {
- uint8_t i, nb_kni, port_id;
- unsigned nb_rx, num;
- struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
-
- if (p == NULL)
- return;
-
- nb_kni = p->nb_kni;
- port_id = p->port_id;
-
- for (i = 0; i < nb_kni; i++) {
- /* Burst rx from eth */
- nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
- if (unlikely(nb_rx > PKT_BURST_SZ)) {
- RTE_LOG(ERR, APP, "Error receiving from eth\n");
- return;
- }
-
- /* Burst tx to kni */
- num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
- kni_stats[port_id].rx_packets += num;
- rte_kni_handle_request(p->kni[i]);
-
- if (unlikely(num < nb_rx)) {
- /* Free mbufs not tx to kni interface */
- kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
- kni_stats[port_id].rx_dropped += nb_rx - num;
- }
- }
- }
-
-For the other case that reads from kernel NIC interfaces and writes to a physical NIC port, packets are retrieved by reading
-mbufs from kernel NIC interfaces by `rte_kni_rx_burst()`.
+For the other case that reads from kernel NIC interfaces
+and writes to a physical NIC port (``kni_egress``),
+packets are retrieved by reading mbufs from kernel NIC interfaces by ``rte_kni_rx_burst()``.
The packet transmission is the same as in the L2 Forwarding sample application
(see :ref:`l2_fwd_app_rx_tx_packets`).
-.. code-block:: c
-
- /**
- * Interface to dequeue mbufs from tx_q and burst tx
- */
-
- static void
-
- kni_egress(struct kni_port_params *p)
- {
- uint8_t i, nb_kni, port_id;
- unsigned nb_tx, num;
- struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
-
- if (p == NULL)
- return;
-
- nb_kni = p->nb_kni;
- port_id = p->port_id;
-
- for (i = 0; i < nb_kni; i++) {
- /* Burst rx from kni */
- num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
- if (unlikely(num > PKT_BURST_SZ)) {
- RTE_LOG(ERR, APP, "Error receiving from KNI\n");
- return;
- }
-
- /* Burst tx to eth */
-
- nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
-
- kni_stats[port_id].tx_packets += nb_tx;
-
- if (unlikely(nb_tx < num)) {
- /* Free mbufs not tx to NIC */
- kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
- kni_stats[port_id].tx_dropped += num - nb_tx;
- }
- }
- }
-
Callbacks for Kernel Requests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -492,106 +248,3 @@ Application may choose to not implement following callbacks:
- ``config_mac_address``
- ``config_promiscusity``
-
-
-.. code-block:: c
-
- static struct rte_kni_ops kni_ops = {
- .change_mtu = kni_change_mtu,
- .config_network_if = kni_config_network_interface,
- .config_mac_address = kni_config_mac_address,
- .config_promiscusity = kni_config_promiscusity,
- };
-
- /* Callback for request of changing MTU */
-
- static int
- kni_change_mtu(uint16_t port_id, unsigned new_mtu)
- {
- int ret;
- struct rte_eth_conf conf;
-
- if (port_id >= rte_eth_dev_count()) {
- RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
- return -EINVAL;
- }
-
- RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
-
- /* Stop specific port */
-
- rte_eth_dev_stop(port_id);
-
- memcpy(&conf, &port_conf, sizeof(conf));
-
- /* Set new MTU */
-
- if (new_mtu > ETHER_MAX_LEN)
- conf.rxmode.jumbo_frame = 1;
- else
- conf.rxmode.jumbo_frame = 0;
-
- /* mtu + length of header + length of FCS = max pkt length */
-
- conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE + KNI_ENET_FCS_SIZE;
-
- ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
- if (ret < 0) {
- RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
- return ret;
- }
-
- /* Restart specific port */
-
- ret = rte_eth_dev_start(port_id);
- if (ret < 0) {
- RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
- return ret;
- }
-
- return 0;
- }
-
- /* Callback for request of configuring network interface up/down */
-
- static int
- kni_config_network_interface(uint16_t port_id, uint8_t if_up)
- {
- int ret = 0;
-
- if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
- return -EINVAL;
- }
-
- RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
-
- port_id, if_up ? "up" : "down");
-
- if (if_up != 0) {
- /* Configure network interface up */
- rte_eth_dev_stop(port_id);
- ret = rte_eth_dev_start(port_id);
- } else /* Configure network interface down */
- rte_eth_dev_stop(port_id);
-
- if (ret < 0)
- RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
- return ret;
- }
-
- /* Callback for request of configuring device mac address */
-
- static int
- kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
- {
- .....
- }
-
- /* Callback for request of configuring promiscuous mode */
-
- static int
- kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
- {
- .....
- }
diff --git a/doc/guides/sample_app_ug/l2_forward_job_stats.rst b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
index bfdf9c8f..ba73d855 100644
--- a/doc/guides/sample_app_ug/l2_forward_job_stats.rst
+++ b/doc/guides/sample_app_ug/l2_forward_job_stats.rst
@@ -178,11 +178,6 @@ in the *DPDK Programmer's Guide* and the *DPDK API Reference*.
.. code-block:: c
- nb_ports = rte_eth_dev_count();
-
- if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
@@ -193,7 +188,7 @@ in the *DPDK Programmer's Guide* and the *DPDK API Reference*.
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -223,25 +218,6 @@ The rte_eth_dev_configure() function is used to configure the number of queues f
"err=%d, port=%u\n",
ret, portid);
-The global configuration is stored in a static structure:
-
-.. code-block:: c
-
- static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc= 0, /**< CRC stripped by hardware */
- },
-
- .txmode = {
- .mq_mode = ETH_DCB_NONE
- },
- };
-
RX Queue Initialization
~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
index f02be05c..2b2d5afa 100644
--- a/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
+++ b/doc/guides/sample_app_ug/l2_forward_real_virtual.rst
@@ -197,11 +197,6 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
if (rte_pci_probe() < 0)
rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
- nb_ports = rte_eth_dev_count();
-
- if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
-
/* reset l2fwd_dst_ports */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
@@ -213,7 +208,7 @@ in the *DPDK Programmer's Guide* - Rel 1.4 EAR and the *DPDK API Reference*.
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
@@ -250,25 +245,6 @@ The rte_eth_dev_configure() function is used to configure the number of queues f
"err=%d, port=%u\n",
ret, portid);
-The global configuration is stored in a static structure:
-
-.. code-block:: c
-
- static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc= 0, /**< CRC stripped by hardware */
- },
-
- .txmode = {
- .mq_mode = ETH_DCB_NONE
- },
- };
-
.. _l2_fwd_app_rx_init:
RX Queue Initialization
diff --git a/doc/guides/sample_app_ug/link_status_intr.rst b/doc/guides/sample_app_ug/link_status_intr.rst
index 8fa6d76c..c7665fe5 100644
--- a/doc/guides/sample_app_ug/link_status_intr.rst
+++ b/doc/guides/sample_app_ug/link_status_intr.rst
@@ -91,15 +91,11 @@ To fully understand this code, it is recommended to study the chapters that rela
if (rte_pci_probe() < 0)
rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
- nb_ports = rte_eth_dev_count();
- if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
-
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((lsi_enabled_port_mask & (1 << portid)) == 0)
@@ -141,10 +137,7 @@ The global configuration is stored in a static structure:
static const struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .hw_strip_crc= 0, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {},
.intr_conf = {
diff --git a/doc/guides/sample_app_ug/multi_process.rst b/doc/guides/sample_app_ug/multi_process.rst
index b84e9a74..9c374da6 100644
--- a/doc/guides/sample_app_ug/multi_process.rst
+++ b/doc/guides/sample_app_ug/multi_process.rst
@@ -321,409 +321,3 @@ In both the server and the client processes, outgoing packets are buffered befor
so as to allow the sending of multiple packets in a single burst to improve efficiency.
For example, the client process will buffer packets to send,
until either the buffer is full or until we receive no further packets from the server.
-
-Master-slave Multi-process Example
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The fourth example of DPDK multi-process support demonstrates a master-slave model that
-provide the capability of application recovery if a slave process crashes or meets unexpected conditions.
-In addition, it also demonstrates the floating process,
-which can run among different cores in contrast to the traditional way of binding a process/thread to a specific CPU core,
-using the local cache mechanism of mempool structures.
-
-This application performs the same functionality as the L2 Forwarding sample application,
-therefore this chapter does not cover that part but describes functionality that is introduced in this multi-process example only.
-Please refer to :doc:`l2_forward_real_virtual` for more information.
-
-Unlike previous examples where all processes are started from the command line with input arguments, in this example,
-only one process is spawned from the command line and that process creates other processes.
-The following section describes this in more detail.
-
-Master-slave Process Models
-^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The process spawned from the command line is called the *master process* in this document.
-A process created by the master is called a *slave process*.
-The application has only one master process, but could have multiple slave processes.
-
-Once the master process begins to run, it tries to initialize all the resources such as
-memory, CPU cores, driver, ports, and so on, as the other examples do.
-Thereafter, it creates slave processes, as shown in the following figure.
-
-.. _figure_master_slave_proc:
-
-.. figure:: img/master_slave_proc.*
-
- Master-slave Process Workflow
-
-
-The master process calls the rte_eal_mp_remote_launch() EAL function to launch an application function for each pinned thread through the pipe.
-Then, it waits to check if any slave processes have exited.
-If so, the process tries to re-initialize the resources that belong to that slave and launch them in the pinned thread entry again.
-The following section describes the recovery procedures in more detail.
-
-For each pinned thread in EAL, after reading any data from the pipe, it tries to call the function that the application specified.
-In this master specified function, a fork() call creates a slave process that performs the L2 forwarding task.
-Then, the function waits until the slave exits, is killed or crashes. Thereafter, it notifies the master of this event and returns.
-Finally, the EAL pinned thread waits until the new function is launched.
-
-After discussing the master-slave model, it is necessary to mention another issue, global and static variables.
-
-For multiple-thread cases, all global and static variables have only one copy and they can be accessed by any thread if applicable.
-So, they can be used to sync or share data among threads.
-
-In the previous examples, each process has separate global and static variables in memory and are independent of each other.
-If it is necessary to share the knowledge, some communication mechanism should be deployed, such as, memzone, ring, shared memory, and so on.
-The global or static variables are not a valid approach to share data among processes.
-For variables in this example, on the one hand, the slave process inherits all the knowledge of these variables after being created by the master.
-On the other hand, other processes cannot know if one or more processes modifies them after slave creation since that
-is the nature of a multiple process address space.
-But this does not mean that these variables cannot be used to share or sync data; it depends on the use case.
-The following are the possible use cases:
-
-#. The master process starts and initializes a variable and it will never be changed after slave processes created. This case is OK.
-
-#. After the slave processes are created, the master or slave cores need to change a variable, but other processes do not need to know the change.
- This case is also OK.
-
-#. After the slave processes are created, the master or a slave needs to change a variable.
- In the meantime, one or more other process needs to be aware of the change.
- In this case, global and static variables cannot be used to share knowledge. Another communication mechanism is needed.
- A simple approach without lock protection can be a heap buffer allocated by rte_malloc or mem zone.
-
-Slave Process Recovery Mechanism
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Before talking about the recovery mechanism, it is necessary to know what is needed before a new slave instance can run if a previous one exited.
-
-When a slave process exits, the system returns all the resources allocated for this process automatically.
-However, this does not include the resources that were allocated by the DPDK. All the hardware resources are shared among the processes,
-which include memzone, mempool, ring, a heap buffer allocated by the rte_malloc library, and so on.
-If the new instance runs and the allocated resource is not returned, either resource allocation failed or the hardware resource is lost forever.
-
-When a slave process runs, it may have dependencies on other processes.
-They could have execution sequence orders; they could share the ring to communicate; they could share the same port for reception and forwarding;
-they could use lock structures to do exclusive access in some critical path.
-What happens to the dependent process(es) if the peer leaves?
-The consequence are varied since the dependency cases are complex.
-It depends on what the processed had shared.
-However, it is necessary to notify the peer(s) if one slave exited.
-Then, the peer(s) will be aware of that and wait until the new instance begins to run.
-
-Therefore, to provide the capability to resume the new slave instance if the previous one exited, it is necessary to provide several mechanisms:
-
-#. Keep a resource list for each slave process.
- Before a slave process run, the master should prepare a resource list.
- After it exits, the master could either delete the allocated resources and create new ones,
- or re-initialize those for use by the new instance.
-
-#. Set up a notification mechanism for slave process exit cases. After the specific slave leaves,
- the master should be notified and then help to create a new instance.
- This mechanism is provided in Section `Master-slave Process Models`_.
-
-#. Use a synchronization mechanism among dependent processes.
- The master should have the capability to stop or kill slave processes that have a dependency on the one that has exited.
- Then, after the new instance of exited slave process begins to run, the dependency ones could resume or run from the start.
- The example sends a STOP command to slave processes dependent on the exited one, then they will exit.
- Thereafter, the master creates new instances for the exited slave processes.
-
-The following diagram describes slave process recovery.
-
-.. _figure_slave_proc_recov:
-
-.. figure:: img/slave_proc_recov.*
-
- Slave Process Recovery Process Flow
-
-
-Floating Process Support
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-When the DPDK application runs, there is always a -c option passed in to indicate the cores that are enabled.
-Then, the DPDK creates a thread for each enabled core.
-By doing so, it creates a 1:1 mapping between the enabled core and each thread.
-The enabled core always has an ID, therefore, each thread has a unique core ID in the DPDK execution environment.
-With the ID, each thread can easily access the structures or resources exclusively belonging to it without using function parameter passing.
-It can easily use the rte_lcore_id() function to get the value in every function that is called.
-
-For threads/processes not created in that way, either pinned to a core or not, they will not own a unique ID and the
-rte_lcore_id() function will not work in the correct way.
-However, sometimes these threads/processes still need the unique ID mechanism to do easy access on structures or resources.
-For example, the DPDK mempool library provides a local cache mechanism
-(refer to :ref:`mempool_local_cache`)
-for fast element allocation and freeing.
-If using a non-unique ID or a fake one,
-a race condition occurs if two or more threads/ processes with the same core ID try to use the local cache.
-
-Therefore, unused core IDs from the passing of parameters with the -c option are used to organize the core ID allocation array.
-Once the floating process is spawned, it tries to allocate a unique core ID from the array and release it on exit.
-
-A natural way to spawn a floating process is to use the fork() function and allocate a unique core ID from the unused core ID array.
-However, it is necessary to write new code to provide a notification mechanism for slave exit
-and make sure the process recovery mechanism can work with it.
-
-To avoid producing redundant code, the Master-Slave process model is still used to spawn floating processes,
-then cancel the affinity to specific cores.
-Besides that, clear the core ID assigned to the DPDK spawning a thread that has a 1:1 mapping with the core mask.
-Thereafter, get a new core ID from the unused core ID allocation array.
-
-Run the Application
-^^^^^^^^^^^^^^^^^^^
-
-This example has a command line similar to the L2 Forwarding sample application with a few differences.
-
-To run the application, start one copy of the l2fwd_fork binary in one terminal.
-Unlike the L2 Forwarding example,
-this example requires at least three cores since the master process will wait and be accountable for slave process recovery.
-The command is as follows:
-
-.. code-block:: console
-
- #./build/l2fwd_fork -l 2-4 -n 4 -- -p 3 -f
-
-This example provides another -f option to specify the use of floating process.
-If not specified, the example will use a pinned process to perform the L2 forwarding task.
-
-To verify the recovery mechanism, proceed as follows: First, check the PID of the slave processes:
-
-.. code-block:: console
-
- #ps -fe | grep l2fwd_fork
- root 5136 4843 29 11:11 pts/1 00:00:05 ./build/l2fwd_fork
- root 5145 5136 98 11:11 pts/1 00:00:11 ./build/l2fwd_fork
- root 5146 5136 98 11:11 pts/1 00:00:11 ./build/l2fwd_fork
-
-Then, kill one of the slaves:
-
-.. code-block:: console
-
- #kill -9 5145
-
-After 1 or 2 seconds, check whether the slave has resumed:
-
-.. code-block:: console
-
- #ps -fe | grep l2fwd_fork
- root 5136 4843 3 11:11 pts/1 00:00:06 ./build/l2fwd_fork
- root 5247 5136 99 11:14 pts/1 00:00:01 ./build/l2fwd_fork
- root 5248 5136 99 11:14 pts/1 00:00:01 ./build/l2fwd_fork
-
-It can also monitor the traffic generator statics to see whether slave processes have resumed.
-
-Explanation
-^^^^^^^^^^^
-
-As described in previous sections,
-not all global and static variables need to change to be accessible in multiple processes;
-it depends on how they are used.
-In this example,
-the statics info on packets dropped/forwarded/received count needs to be updated by the slave process,
-and the master needs to see the update and print them out.
-So, it needs to allocate a heap buffer using rte_zmalloc.
-In addition, if the -f option is specified,
-an array is needed to store the allocated core ID for the floating process so that the master can return it
-after a slave has exited accidentally.
-
-.. code-block:: c
-
- static int
- l2fwd_malloc_shared_struct(void)
- {
- port_statistics = rte_zmalloc("port_stat", sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS, 0);
-
- if (port_statistics == NULL)
- return -1;
-
- /* allocate mapping_id array */
-
- if (float_proc) {
- int i;
-
- mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE, 0);
- if (mapping_id == NULL)
- return -1;
-
- for (i = 0 ;i < RTE_MAX_LCORE; i++)
- mapping_id[i] = INVALID_MAPPING_ID;
-
- }
- return 0;
- }
-
-For each slave process, packets are received from one port and forwarded to another port that another slave is operating on.
-If the other slave exits accidentally, the port it is operating on may not work normally,
-so the first slave cannot forward packets to that port.
-There is a dependency on the port in this case. So, the master should recognize the dependency.
-The following is the code to detect this dependency:
-
-.. code-block:: c
-
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
-
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- /* Find pair ports' lcores */
-
- find_lcore = find_pair_lcore = 0;
- pair_port = l2fwd_dst_ports[portid];
-
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- if (!rte_lcore_is_enabled(i))
- continue;
-
- for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) {
- if (lcore_queue_conf[i].rx_port_list[j] == portid) {
- lcore = i;
- find_lcore = 1;
- break;
- }
-
- if (lcore_queue_conf[i].rx_port_list[j] == pair_port) {
- pair_lcore = i;
- find_pair_lcore = 1;
- break;
- }
- }
-
- if (find_lcore && find_pair_lcore)
- break;
- }
-
- if (!find_lcore || !find_pair_lcore)
- rte_exit(EXIT_FAILURE, "Not find port=%d pair\\n", portid);
-
- printf("lcore %u and %u paired\\n", lcore, pair_lcore);
-
- lcore_resource[lcore].pair_id = pair_lcore;
- lcore_resource[pair_lcore].pair_id = lcore;
- }
-
-Before launching the slave process,
-it is necessary to set up the communication channel between the master and slave so that
-the master can notify the slave if its peer process with the dependency exited.
-In addition, the master needs to register a callback function in the case where a specific slave exited.
-
-.. code-block:: c
-
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- if (lcore_resource[i].enabled) {
- /* Create ring for master and slave communication */
-
- ret = create_ms_ring(i);
- if (ret != 0)
- rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",i);
-
- if (flib_register_slave_exit_notify(i,slave_exit_cb) != 0)
- rte_exit(EXIT_FAILURE, "Register master_trace_slave_exit failed");
- }
- }
-
-After launching the slave process, the master waits and prints out the port statics periodically.
-If an event indicating that a slave process exited is detected,
-it sends the STOP command to the peer and waits until it has also exited.
-Then, it tries to clean up the execution environment and prepare new resources.
-Finally, the new slave instance is launched.
-
-.. code-block:: c
-
- while (1) {
- sleep(1);
- cur_tsc = rte_rdtsc();
- diff_tsc = cur_tsc - prev_tsc;
-
- /* if timer is enabled */
-
- if (timer_period > 0) {
- /* advance the timer */
- timer_tsc += diff_tsc;
-
- /* if timer has reached its timeout */
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
- print_stats();
-
- /* reset the timer */
- timer_tsc = 0;
- }
- }
-
- prev_tsc = cur_tsc;
-
- /* Check any slave need restart or recreate */
-
- rte_spinlock_lock(&res_lock);
-
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- struct lcore_resource_struct *res = &lcore_resource[i];
- struct lcore_resource_struct *pair = &lcore_resource[res->pair_id];
-
- /* If find slave exited, try to reset pair */
-
- if (res->enabled && res->flags && pair->enabled) {
- if (!pair->flags) {
- master_sendcmd_with_ack(pair->lcore_id, CMD_STOP);
- rte_spinlock_unlock(&res_lock);
- sleep(1);
- rte_spinlock_lock(&res_lock);
- if (pair->flags)
- continue;
- }
-
- if (reset_pair(res->lcore_id, pair->lcore_id) != 0)
- rte_exit(EXIT_FAILURE, "failed to reset slave");
-
- res->flags = 0;
- pair->flags = 0;
- }
- }
- rte_spinlock_unlock(&res_lock);
- }
-
-When the slave process is spawned and starts to run, it checks whether the floating process option is applied.
-If so, it clears the affinity to a specific core and also sets the unique core ID to 0.
-Then, it tries to allocate a new core ID.
-Since the core ID has changed, the resource allocated by the master cannot work,
-so it remaps the resource to the new core ID slot.
-
-.. code-block:: c
-
- static int
- l2fwd_launch_one_lcore( attribute ((unused)) void *dummy)
- {
- unsigned lcore_id = rte_lcore_id();
-
- if (float_proc) {
- unsigned flcore_id;
-
- /* Change it to floating process, also change it's lcore_id */
-
- clear_cpu_affinity();
-
- RTE_PER_LCORE(_lcore_id) = 0;
-
- /* Get a lcore_id */
-
- if (flib_assign_lcore_id() < 0 ) {
- printf("flib_assign_lcore_id failed\n");
- return -1;
- }
-
- flcore_id = rte_lcore_id();
-
- /* Set mapping id, so master can return it after slave exited */
-
- mapping_id[lcore_id] = flcore_id;
- printf("Org lcore_id = %u, cur lcore_id = %u\n",lcore_id, flcore_id);
- remapping_slave_resource(lcore_id, flcore_id);
- }
-
- l2fwd_main_loop();
-
- /* return lcore_id before return */
- if (float_proc) {
- flib_free_lcore_id(rte_lcore_id());
- mapping_id[lcore_id] = INVALID_MAPPING_ID;
- }
- return 0;
- }
diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index 8baec4df..67200e15 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -163,7 +163,7 @@ Then, a call to init_dpdk(), defined in init.c, is made to initialize the poll m
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_pci_probe(): error %d\n", ret);
- if (rte_eth_dev_count() < 2)
+ if (rte_eth_dev_count_avail() < 2)
rte_exit(EXIT_FAILURE, "Not enough Ethernet port available\n");
}
diff --git a/doc/guides/sample_app_ug/rxtx_callbacks.rst b/doc/guides/sample_app_ug/rxtx_callbacks.rst
index 0bb0d3e0..85d96d8a 100644
--- a/doc/guides/sample_app_ug/rxtx_callbacks.rst
+++ b/doc/guides/sample_app_ug/rxtx_callbacks.rst
@@ -83,9 +83,6 @@ comments:
int retval;
uint16_t q;
- if (port >= rte_eth_dev_count())
- return -1;
-
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
diff --git a/doc/guides/sample_app_ug/skeleton.rst b/doc/guides/sample_app_ug/skeleton.rst
index 0503584d..95956171 100644
--- a/doc/guides/sample_app_ug/skeleton.rst
+++ b/doc/guides/sample_app_ug/skeleton.rst
@@ -81,7 +81,7 @@ The ``main()`` function also initializes all the ports using the user defined
.. code-block:: c
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (port_init(portid, mbuf_pool) != 0) {
rte_exit(EXIT_FAILURE,
"Cannot init port %" PRIu8 "\n", portid);
@@ -119,7 +119,7 @@ Forwarding application is shown below:
int retval;
uint16_t q;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
/* Configure the Ethernet device. */
@@ -192,14 +192,13 @@ looks like the following:
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint16_t nb_ports = rte_eth_dev_count();
uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
* for best performance.
*/
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) !=
(int)rte_socket_id())
@@ -216,7 +215,7 @@ looks like the following:
* Receive packets on a port and forward them on the paired
* port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
*/
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
@@ -246,7 +245,7 @@ The main work of the application is done within the loop:
.. code-block:: c
for (;;) {
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index a4bdc6a4..fd42cb3f 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -147,7 +147,10 @@ retries on an RX burst, it takes effect only when rx retry is enabled. The
default value is 15.
**--dequeue-zero-copy**
-Dequeue zero copy will be enabled when this option is given.
+Dequeue zero copy will be enabled when this option is given. it is worth to
+note that if NIC is binded to driver with iommu enabled, dequeue zero copy
+cannot work at VM2NIC mode (vm2vm=0) due to currently we don't setup iommu
+dma mapping for guest memory.
**--vlan-strip 0|1**
VLAN strip option is removed, because different NICs have different behaviors
@@ -155,6 +158,10 @@ when disabling VLAN strip. Such feature, which heavily depends on hardware,
should be removed from this example to reduce confusion. Now, VLAN strip is
enabled and cannot be disabled.
+**--builtin-net-driver**
+A very simple vhost-user net driver which demonstrates how to use the generic
+vhost APIs will be used when this option is given. It is disabled by default.
+
Common Issues
-------------
@@ -175,15 +182,22 @@ Common Issues
The command above indicates how many hugepages are free to support QEMU's
allocation request.
-* vhost-user will not work with QEMU without the ``-mem-prealloc`` option
+* Failed to build DPDK in VM
- The current implementation works properly only when the guest memory is
- pre-allocated.
+ Make sure "-cpu host" QEMU option is given.
-* vhost-user will not work with a QEMU version without shared memory mapping:
+* Device start fails if NIC's max queues > the default number of 128
- Make sure ``share=on`` QEMU option is given.
+ mbuf pool size is dependent on the MAX_QUEUES configuration, if NIC's
+ max queue number is larger than 128, device start will fail due to
+ insufficient mbuf.
-* Failed to build DPDK in VM
+ Change the default number to make it work as below, just set the number
+ according to the NIC's property. ::
- Make sure "-cpu host" QEMU option is given.
+ make EXTRA_CFLAGS="-DMAX_QUEUES=320"
+
+* Option "builtin-net-driver" is incompatible with QEMU
+
+ QEMU vhost net device start will fail if protocol feature is not negotiated.
+ DPDK virtio-user pmd can be the replacement of QEMU.
diff --git a/doc/guides/sample_app_ug/vhost_crypto.rst b/doc/guides/sample_app_ug/vhost_crypto.rst
new file mode 100644
index 00000000..65c86a53
--- /dev/null
+++ b/doc/guides/sample_app_ug/vhost_crypto.rst
@@ -0,0 +1,82 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2017-2018 Intel Corporation.
+
+Vhost_Crypto Sample Application
+===============================
+
+The vhost_crypto sample application implemented a simple Crypto device,
+which used as the backend of Qemu vhost-user-crypto device. Similar with
+vhost-user-net and vhost-user-scsi device, the sample application used
+domain socket to communicate with Qemu, and the virtio ring was processed
+by vhost_crypto sample application.
+
+Testing steps
+-------------
+
+This section shows the steps how to start a VM with the crypto device as
+fast data path for critical application.
+
+Compiling the Application
+-------------------------
+
+To compile the sample application see :doc:`compiling`.
+
+The application is located in the ``examples`` sub-directory.
+
+Start the vhost_crypto example
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ ./vhost_crypto [EAL options] -- [--socket-file PATH]
+ [--cdev-id ID] [--cdev-queue-id ID] [--zero-copy] [--guest-polling]
+
+where,
+
+* socket-file PATH: the path of UNIX socket file to be created, multiple
+ instances of this config item is supported. Upon absence of this item,
+ the default socket-file `/tmp/vhost_crypto1.socket` is used.
+
+* cdev-id ID: the target DPDK Cryptodev's ID to process the actual crypto
+ workload. Upon absence of this item the default value of `0` will be used.
+ For details of DPDK Cryptodev, please refer to DPDK Cryptodev Library
+ Programmers' Guide.
+
+* cdev-queue-id ID: the target DPDK Cryptodev's queue ID to process the
+ actual crypto workload. Upon absence of this item the default value of `0`
+ will be used. For details of DPDK Cryptodev, please refer to DPDK Cryptodev
+ Library Programmers' Guide.
+
+* zero-copy: the presence of this item means the ZERO-COPY feature will be
+ enabled. Otherwise it is disabled. PLEASE NOTE the ZERO-COPY feature is still
+ in experimental stage and may cause the problem like segmentation fault. If
+ the user wants to use LKCF in the guest, this feature shall be turned off.
+
+* guest-polling: the presence of this item means the application assumes the
+ guest works in polling mode, thus will NOT notify the guest completion of
+ processing.
+
+The application requires that crypto devices capable of performing
+the specified crypto operation are available on application initialization.
+This means that HW crypto device/s must be bound to a DPDK driver or
+a SW crypto device/s (virtual crypto PMD) must be created (using --vdev).
+
+.. _vhost_crypto_app_run_vm:
+
+Start the VM
+~~~~~~~~~~~~
+
+.. code-block:: console
+
+ qemu-system-x86_64 -machine accel=kvm \
+ -m $mem -object memory-backend-file,id=mem,size=$mem,\
+ mem-path=/dev/hugepages,share=on -numa node,memdev=mem \
+ -drive file=os.img,if=none,id=disk \
+ -device ide-hd,drive=disk,bootindex=0 \
+ -chardev socket,id={chardev_id},path={PATH} \
+ -object cryptodev-vhost-user,id={obj_id},chardev={chardev_id} \
+ -device virtio-crypto-pci,id={dev_id},cryptodev={obj_id} \
+ ...
+
+.. note::
+ You must check whether your Qemu can support "vhost-user-crypto" or not.
diff --git a/doc/guides/sample_app_ug/vm_power_management.rst b/doc/guides/sample_app_ug/vm_power_management.rst
index cd7c7f34..855570d6 100644
--- a/doc/guides/sample_app_ug/vm_power_management.rst
+++ b/doc/guides/sample_app_ug/vm_power_management.rst
@@ -20,7 +20,7 @@ running on Virtual Machines(VMs).
The Virtual Machine Power Management solution shows an example of
how a DPDK application can indicate its processing requirements using VM local
-only information(vCPU/lcore) to a Host based Monitor which is responsible
+only information(vCPU/lcore, etc.) to a Host based Monitor which is responsible
for accepting requests for frequency changes for a vCPU, translating the vCPU
to a pCPU via libvirt and affecting the change in frequency.
@@ -38,6 +38,26 @@ The solution is comprised of two high-level components:
to the librte_power ACPI cpufreq sysfs based library.
The Host Application relies on both qemu-kvm and libvirt to function.
+ This monitoring application is responsible for:
+
+ - Accepting requests from client applications: Client applications can
+ request frequency changes for a vCPU, translating
+ the vCPU to a pCPU via libvirt and affecting the change in frequency.
+
+ - Accepting policies from client applications: Client application can
+ send a policy to the host application. The
+ host application will then apply the rules of the policy independent
+ of the application. For example, the policy can contain time-of-day
+ information for busy/quiet periods, and the host application can scale
+ up/down the relevant cores when required. See the details of the guest
+ application below for more information on setting the policy values.
+
+ - Out-of-band monitoring of workloads via cores hardware event counters:
+ The host application can manage power for an application in a virtualised
+ OR non-virtualised environment by looking at the event counters of the
+ cores and taking action based on the branch hit/miss ratio. See the host
+ application '--core-list' command line parameter below.
+
#. librte_power for Virtual Machines
Using an alternate implementation for the librte_power API, requests for
@@ -174,13 +194,20 @@ Compiling and Running the Host Application
Compiling
~~~~~~~~~
-Compiling the Application
--------------------------
-
-To compile the sample application see :doc:`compiling`.
+For information on compiling DPDK and the sample applications
+see :doc:`compiling`.
The application is located in the ``vm_power_manager`` sub-directory.
+To build just the ``vm_power_manager`` application:
+
+.. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+ export RTE_TARGET=build
+ cd ${RTE_SDK}/examples/vm_power_manager/
+ make
+
Running
~~~~~~~
@@ -287,38 +314,129 @@ Manual control and inspection can also be carried in relation CPU frequency scal
set_cpu_freq {core_num} up|down|min|max
+There are also some command line parameters for enabling the out-of-band
+monitoring of branch ratio on cores doing busy polling via PMDs.
+
+ .. code-block:: console
+
+ --core-list {list of cores}
+
+ When this parameter is used, the list of cores specified will monitor the ratio
+ between branch hits and branch misses. A tightly polling PMD thread will have a
+ very low branch ratio, so the core frequency will be scaled down to the minimim
+ allowed value. When packets are received, the code path will alter, causing the
+ branch ratio to increase. When the ratio goes above the ratio threshold, the
+ core frequency will be scaled up to the maximum allowed value.
+
+ .. code-block:: console
+
+ --branch-ratio {ratio}
+
+ The branch ratio is a floating point number that specifies the threshold at which
+ to scale up or down for the given workload. The default branch ratio is 0.01,
+ and will need to be adjusted for different workloads.
+
+
Compiling and Running the Guest Applications
--------------------------------------------
-For compiling and running l3fwd-power, see :doc:`l3_forward_power_man`.
+l3fwd-power is one sample application that can be used with vm_power_manager.
A guest CLI is also provided for validating the setup.
For both l3fwd-power and guest CLI, the channels for the VM must be monitored by the
-host application using the *add_channels* command on the host.
+host application using the *add_channels* command on the host. This typically uses
+the following commands in the host application:
+
+.. code-block:: console
+
+ vm_power> add_vm vmname
+ vm_power> add_channels vmname all
+ vm_power> set_channel_status vmname all enabled
+ vm_power> show_vm vmname
+
Compiling
~~~~~~~~~
-#. export RTE_SDK=/path/to/rte_sdk
-#. cd ${RTE_SDK}/examples/vm_power_manager/guest_cli
-#. make
+For information on compiling DPDK and the sample applications
+see :doc:`compiling`.
+
+For compiling and running l3fwd-power, see :doc:`l3_forward_power_man`.
+
+The application is located in the ``guest_cli`` sub-directory under ``vm_power_manager``.
+
+To build just the ``guest_vm_power_manager`` application:
+
+.. code-block:: console
+
+ export RTE_SDK=/path/to/rte_sdk
+ export RTE_TARGET=build
+ cd ${RTE_SDK}/examples/vm_power_manager/guest_cli/
+ make
Running
~~~~~~~
-The application does not have any specific command line options other than *EAL*:
+The standard *EAL* command line parameters are required:
.. code-block:: console
- ./build/vm_power_mgr [EAL options]
+ ./build/guest_vm_power_mgr [EAL options] -- [guest options]
-The application for example purposes uses a channel for each lcore enabled,
-for example to run on cores 0,1,2,3 on a system with 4 memory channels:
+The guest example uses a channel for each lcore enabled. For example,
+to run on cores 0,1,2,3:
.. code-block:: console
- ./build/guest_vm_power_mgr -l 0-3 -n 4
+ ./build/guest_vm_power_mgr -l 0-3
+
+Optionally, there is a list of command line parameter should the user wish to send a power
+policy down to the host application. These parameters are as follows:
+
+ .. code-block:: console
+
+ --vm-name {name of guest vm}
+
+ This parameter allows the user to change the Virtual Machine name passed down to the
+ host application via the power policy. The default is "ubuntu2"
+
+ .. code-block:: console
+
+ --vcpu-list {list vm cores}
+
+ A comma-separated list of cores in the VM that the user wants the host application to
+ monitor. The list of cores in any vm starts at zero, and these are mapped to the
+ physical cores by the host application once the policy is passed down.
+ Valid syntax includes individial cores '2,3,4', or a range of cores '2-4', or a
+ combination of both '1,3,5-7'
+
+ .. code-block:: console
+
+ --busy-hours {list of busy hours}
+
+ A comma-separated list of hours within which to set the core frequency to maximum.
+ Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a
+ combination of both '1,3,5-7'. Valid hours are 0 to 23.
+
+ .. code-block:: console
+
+ --quiet-hours {list of quiet hours}
+
+ A comma-separated list of hours within which to set the core frequency to minimum.
+ Valid syntax includes individial hours '2,3,4', or a range of hours '2-4', or a
+ combination of both '1,3,5-7'. Valid hours are 0 to 23.
+
+ .. code-block:: console
+
+ --policy {policy type}
+
+ The type of policy. This can be one of the following values:
+ TRAFFIC - based on incoming traffic rates on the NIC.
+ TIME - busy/quiet hours policy.
+ BRANCH_RATIO - uses branch ratio counters to determine core busyness.
+ Not all parameters are needed for all policy types. For example, BRANCH_RATIO
+ only needs the vcpu-list parameter, not any of the hours.
After successful initialization the user is presented with VM Power Manager Guest CLI:
@@ -333,3 +451,20 @@ Where {core_num} is the lcore and channel to change frequency by scaling up/down
.. code-block:: console
set_cpu_freq {core_num} up|down|min|max
+
+To start the application and configure the power policy, and send it to the host:
+
+.. code-block:: console
+
+ ./build/guest_vm_power_mgr -l 0-3 -n 4 -- --vm-name=ubuntu --policy=BRANCH_RATIO --vcpu-list=2-4
+
+Once the VM Power Manager Guest CLI appears, issuing the 'send_policy now' command
+will send the policy to the host:
+
+.. code-block:: console
+
+ send_policy now
+
+Once the policy is sent to the host, the host application takes over the power monitoring
+of the specified cores in the policy.
+
diff --git a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
index 83fcdf63..0e9da970 100644
--- a/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
+++ b/doc/guides/sample_app_ug/vmdq_dcb_forwarding.rst
@@ -102,10 +102,6 @@ a default structure is provided for VMDQ and DCB configuration to be filled in l
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_DCB,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
},
.txmode = {
.mq_mode = ETH_MQ_TX_VMDQ_DCB,
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 1fd53958..f301c2b6 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -372,7 +372,9 @@ The commandline options are:
* ``--burst=N``
Set the number of packets per burst to N, where 1 <= N <= 512.
- The default value is 16.
+ The default value is 32.
+ If set to 0, driver default is used if defined. Else, if driver
+ default is not defined, default of 32 is used.
* ``--mbcache=N``
@@ -479,3 +481,20 @@ The commandline options are:
Set the hexadecimal bitmask of TX queue offloads.
The default value is 0.
+
+* ``--hot-plug``
+
+ Enable device event monitor machenism for hotplug.
+
+* ``--vxlan-gpe-port=N``
+
+ Set the UDP port number of tunnel VXLAN-GPE to N.
+ The default value is 4790.
+
+* ``--mlockall``
+
+ Enable locking all memory.
+
+* ``--no-mlockall``
+
+ Disable locking all memory.
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index a766ac79..dde205a2 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -320,12 +320,8 @@ The available information categories are:
* ``ieee1588``: Demonstrate L2 IEEE1588 V2 PTP timestamping for RX and TX. Requires ``CONFIG_RTE_LIBRTE_IEEE1588=y``.
-* ``tm``: Traffic Management forwarding mode
- Demonstrates the use of ethdev traffic management APIs and softnic PMD for
- QoS traffic management. In this mode, 5-level hierarchical QoS scheduler is
- available as an default option that can be enabled through CLI. The user can
- also modify the default hierarchy or specify the new hierarchy through CLI for
- implementing QoS scheduler. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y`` ``CONFIG_RTE_LIBRTE_SCHED=y``.
+* ``softnic``: Demonstrates the softnic forwarding operation. In this mode, packet forwarding is
+ similar to I/O mode except for the fact that packets are loopback to the softnic ports only. Therefore, portmask parameter should be set to softnic port only. The various software based custom NIC pipelines specified through the softnic firmware (DPDK packet framework script) can be tested in this mode. Furthermore, it allows to build 5-level hierarchical QoS scheduler as a default option that can be enabled through CLI once testpmd application is initialised. The user can modify the default scheduler hierarchy or can specify the new QoS Scheduler hierarchy through CLI. Requires ``CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y``.
Example::
@@ -393,6 +389,34 @@ List all items from the pctype mapping table::
testpmd> show port (port_id) pctype mapping
+show rx offloading capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+List all per queue and per port Rx offloading capabilities of a port::
+
+ testpmd> show port (port_id) rx_offload capabilities
+
+show rx offloading configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+List port level and all queue level Rx offloading configuration::
+
+ testpmd> show port (port_id) rx_offload configuration
+
+show tx offloading capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+List all per queue and per port Tx offloading capabilities of a port::
+
+ testpmd> show port (port_id) tx_offload capabilities
+
+show tx offloading configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+List port level and all queue level Tx offloading configuration::
+
+ testpmd> show port (port_id) tx_offload configuration
+
Configuration Functions
-----------------------
@@ -1031,6 +1055,13 @@ By default, GSO is disabled for all ports.
testpmd> csum set tcp hw <port_id>
+ UDP GSO is the same as IP fragmentation, which treats the UDP header
+ as the payload and does not modify it during segmentation. That is,
+ after UDP GSO, only the first output fragment has the original UDP
+ header. Therefore, users need to enable HW IP checksum calculation
+ and SW UDP checksum calculation for GSO-enabled ports, if they want
+ correct checksums for UDP/IPv4 packets.
+
set gso segsz
~~~~~~~~~~~~~
@@ -1444,6 +1475,101 @@ Reset ptype mapping table::
testpmd> ptype mapping reset (port_id)
+config per port Rx offloading
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Enable or disable a per port Rx offloading on all Rx queues of a port::
+
+ testpmd> port config (port_id) rx_offload (offloading) on|off
+
+* ``offloading``: can be any of these offloading capability:
+ vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro,
+ qinq_strip, outer_ipv4_cksum, macsec_strip,
+ header_split, vlan_filter, vlan_extend, jumbo_frame,
+ crc_strip, scatter, timestamp, security, keep_crc
+
+This command should be run when the port is stopped, or else it will fail.
+
+config per queue Rx offloading
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Enable or disable a per queue Rx offloading only on a specific Rx queue::
+
+ testpmd> port (port_id) rxq (queue_id) rx_offload (offloading) on|off
+
+* ``offloading``: can be any of these offloading capability:
+ vlan_strip, ipv4_cksum, udp_cksum, tcp_cksum, tcp_lro,
+ qinq_strip, outer_ipv4_cksum, macsec_strip,
+ header_split, vlan_filter, vlan_extend, jumbo_frame,
+ crc_strip, scatter, timestamp, security, keep_crc
+
+This command should be run when the port is stopped, or else it will fail.
+
+config per port Tx offloading
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Enable or disable a per port Tx offloading on all Tx queues of a port::
+
+ testpmd> port config (port_id) tx_offload (offloading) on|off
+
+* ``offloading``: can be any of these offloading capability:
+ vlan_insert, ipv4_cksum, udp_cksum, tcp_cksum,
+ sctp_cksum, tcp_tso, udp_tso, outer_ipv4_cksum,
+ qinq_insert, vxlan_tnl_tso, gre_tnl_tso,
+ ipip_tnl_tso, geneve_tnl_tso, macsec_insert,
+ mt_lockfree, multi_segs, mbuf_fast_free, security
+
+This command should be run when the port is stopped, or else it will fail.
+
+config per queue Tx offloading
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Enable or disable a per queue Tx offloading only on a specific Tx queue::
+
+ testpmd> port (port_id) txq (queue_id) tx_offload (offloading) on|off
+
+* ``offloading``: can be any of these offloading capability:
+ vlan_insert, ipv4_cksum, udp_cksum, tcp_cksum,
+ sctp_cksum, tcp_tso, udp_tso, outer_ipv4_cksum,
+ qinq_insert, vxlan_tnl_tso, gre_tnl_tso,
+ ipip_tnl_tso, geneve_tnl_tso, macsec_insert,
+ mt_lockfree, multi_segs, mbuf_fast_free, security
+
+This command should be run when the port is stopped, or else it will fail.
+
+Config VXLAN Encap outer layers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the outer layer to encapsulate a packet inside a VXLAN tunnel::
+
+ set vxlan ip-version (ipv4|ipv6) vni (vni) udp-src (udp-src) \
+ udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) eth-src (eth-src) \
+ eth-dst (eth-dst)
+
+ set vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni) udp-src (udp-src) \
+ udp-dst (udp-dst) ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci) \
+ eth-src (eth-src) eth-dst (eth-dst)
+
+Those command will set an internal configuration inside testpmd, any following
+flow rule using the action vxlan_encap will use the last configuration set.
+To have a different encapsulation header, one of those commands must be called
+before the flow rule creation.
+
+Config NVGRE Encap outer layers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure the outer layer to encapsulate a packet inside a NVGRE tunnel::
+
+ set nvgre ip-version (ipv4|ipv6) tni (tni) ip-src (ip-src) ip-dst (ip-dst) \
+ eth-src (eth-src) eth-dst (eth-dst)
+ set nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni) ip-src (ip-src) \
+ ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src) eth-dst (eth-dst)
+
+Those command will set an internal configuration inside testpmd, any following
+flow rule using the action nvgre_encap will use the last configuration set.
+To have a different encapsulation header, one of those commands must be called
+before the flow rule creation.
+
Port Functions
--------------
@@ -1623,6 +1749,15 @@ Close all ports or a specific port::
testpmd> port close (port_id|all)
+port config - queue ring size
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Configure a rx/tx queue ring size::
+
+ testpmd> port (port_id) (rxq|txq) (queue_id) ring_size (value)
+
+Only take effect after command that (re-)start the port or command that setup specific queue.
+
port start/stop queue
~~~~~~~~~~~~~~~~~~~~~
@@ -1630,6 +1765,13 @@ Start/stop a rx/tx queue on a specific port::
testpmd> port (port_id) (rxq|txq) (queue_id) (start|stop)
+port setup queue
+~~~~~~~~~~~~~~~~~~~~~
+
+Setup a rx/tx queue on a specific port::
+
+ testpmd> port (port_id) (rxq|txq) (queue_id) setup
+
Only take effect when port is started.
port config - speed
@@ -1751,10 +1893,12 @@ port config - RSS
Set the RSS (Receive Side Scaling) mode on or off::
- testpmd> port config all rss (all|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none)
+ testpmd> port config all rss (all|default|ip|tcp|udp|sctp|ether|port|vxlan|geneve|nvgre|none)
RSS is on by default.
+The ``all`` option is equivalent to ip|tcp|udp|sctp|ether.
+The ``default`` option enables all supported RSS types reported by device info.
The ``none`` option is equivalent to the ``--disable-rss`` command-line option.
port config - RSS Reta
@@ -1856,6 +2000,12 @@ where:
* ``pctype_id``: hardware packet classification types.
* ``field_idx``: hardware field index.
+port config udp_tunnel_port
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Add/remove UDP tunnel port for VXLAN/GENEVE tunneling protocols::
+ testpmd> port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)
+
Link Bonding Functions
----------------------
@@ -1871,7 +2021,7 @@ Create a new bonding device::
For example, to create a bonded device in mode 1 on socket 0::
- testpmd> create bonded 1 0
+ testpmd> create bonded device 1 0
created new bonded device (port X)
add bonding slave
@@ -2483,6 +2633,16 @@ success depends on the port support for this operation, as advertised through
the port capability set. This function is valid for all nodes of the traffic
management hierarchy except root node.
+Suspend port traffic management hierarchy node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ testpmd> suspend port tm node (port_id) (node_id)
+
+Resume port traffic management hierarchy node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ testpmd> resume port tm node (port_id) (node_id)
+
Commit port traffic management hierarchy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -2496,8 +2656,8 @@ where:
call failure. On the other hand, hierarchy is preserved when this parameter
is equal to zero.
-Set port traffic management default hierarchy (tm forwarding mode)
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Set port traffic management default hierarchy (softnic forwarding mode)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
set the traffic management default hierarchy on the port::
@@ -2970,14 +3130,14 @@ following sections.
- Check whether a flow rule can be created::
flow validate {port_id}
- [group {group_id}] [priority {level}] [ingress] [egress]
+ [group {group_id}] [priority {level}] [ingress] [egress] [transfer]
pattern {item} [/ {item} [...]] / end
actions {action} [/ {action} [...]] / end
- Create a flow rule::
flow create {port_id}
- [group {group_id}] [priority {level}] [ingress] [egress]
+ [group {group_id}] [priority {level}] [ingress] [egress] [transfer]
pattern {item} [/ {item} [...]] / end
actions {action} [/ {action} [...]] / end
@@ -3010,7 +3170,7 @@ underlying device in its current state but stops short of creating it. It is
bound to ``rte_flow_validate()``::
flow validate {port_id}
- [group {group_id}] [priority {level}] [ingress] [egress]
+ [group {group_id}] [priority {level}] [ingress] [egress] [transfer]
pattern {item} [/ {item} [...]] / end
actions {action} [/ {action} [...]] / end
@@ -3047,7 +3207,7 @@ Creating flow rules
to ``rte_flow_create()``::
flow create {port_id}
- [group {group_id}] [priority {level}] [ingress] [egress]
+ [group {group_id}] [priority {level}] [ingress] [egress] [transfer]
pattern {item} [/ {item} [...]] / end
actions {action} [/ {action} [...]] / end
@@ -3061,7 +3221,7 @@ Otherwise it will show an error message of the form::
Parameters describe in the following order:
-- Attributes (*group*, *priority*, *ingress*, *egress* tokens).
+- Attributes (*group*, *priority*, *ingress*, *egress*, *transfer* tokens).
- A matching pattern, starting with the *pattern* token and terminated by an
*end* pattern item.
- Actions, starting with the *actions* token and terminated by an *end*
@@ -3089,6 +3249,7 @@ specified before the ``pattern`` token.
- ``priority {level}``: priority level within group.
- ``ingress``: rule applies to ingress traffic.
- ``egress``: rule applies to egress traffic.
+- ``transfer``: apply rule directly to endpoints found in pattern.
Each instance of an attribute specified several times overrides the previous
value as shown below (group 4 is used)::
@@ -3201,16 +3362,24 @@ This section lists supported pattern items and their attributes, if any.
- ``num {unsigned}``: number of layers covered.
-- ``pf``: match packets addressed to the physical function.
+- ``pf``: match traffic from/to the physical function.
-- ``vf``: match packets addressed to a virtual function ID.
+- ``vf``: match traffic from/to a virtual function ID.
- - ``id {unsigned}``: destination VF ID.
+ - ``id {unsigned}``: VF ID.
-- ``port``: device-specific physical port index to use.
+- ``phy_port``: match traffic from/to a specific physical port.
- ``index {unsigned}``: physical port index.
+- ``port_id``: match traffic from/to a given DPDK port ID.
+
+ - ``id {unsigned}``: DPDK port ID.
+
+- ``mark``: match value set in previously matched flow rule using the mark action.
+
+ - ``id {unsigned}``: arbitrary integer value.
+
- ``raw``: match an arbitrary byte string.
- ``relative {boolean}``: look for pattern after the previous item.
@@ -3223,15 +3392,15 @@ This section lists supported pattern items and their attributes, if any.
- ``dst {MAC-48}``: destination MAC.
- ``src {MAC-48}``: source MAC.
- - ``type {unsigned}``: EtherType.
+ - ``type {unsigned}``: EtherType or TPID.
- ``vlan``: match 802.1Q/ad VLAN tag.
- - ``tpid {unsigned}``: tag protocol identifier.
- ``tci {unsigned}``: tag control information.
- ``pcp {unsigned}``: priority code point.
- ``dei {unsigned}``: drop eligible indicator.
- ``vid {unsigned}``: VLAN identifier.
+ - ``inner_type {unsigned}``: inner EtherType or TPID.
- ``ipv4``: match IPv4 header.
@@ -3305,6 +3474,48 @@ This section lists supported pattern items and their attributes, if any.
- ``vni {unsigned}``: virtual network identifier.
- ``protocol {unsigned}``: protocol type.
+- ``vxlan-gpe``: match VXLAN-GPE header.
+
+ - ``vni {unsigned}``: VXLAN-GPE identifier.
+
+- ``arp_eth_ipv4``: match ARP header for Ethernet/IPv4.
+
+ - ``sha {MAC-48}``: sender hardware address.
+ - ``spa {ipv4 address}``: sender IPv4 address.
+ - ``tha {MAC-48}``: target hardware address.
+ - ``tpa {ipv4 address}``: target IPv4 address.
+
+- ``ipv6_ext``: match presence of any IPv6 extension header.
+
+ - ``next_hdr {unsigned}``: next header.
+
+- ``icmp6``: match any ICMPv6 header.
+
+ - ``type {unsigned}``: ICMPv6 type.
+ - ``code {unsigned}``: ICMPv6 code.
+
+- ``icmp6_nd_ns``: match ICMPv6 neighbor discovery solicitation.
+
+ - ``target_addr {ipv6 address}``: target address.
+
+- ``icmp6_nd_na``: match ICMPv6 neighbor discovery advertisement.
+
+ - ``target_addr {ipv6 address}``: target address.
+
+- ``icmp6_nd_opt``: match presence of any ICMPv6 neighbor discovery option.
+
+ - ``type {unsigned}``: ND option type.
+
+- ``icmp6_nd_opt_sla_eth``: match ICMPv6 neighbor discovery source Ethernet
+ link-layer address option.
+
+ - ``sla {MAC-48}``: source Ethernet LLA.
+
+- ``icmp6_nd_opt_sla_eth``: match ICMPv6 neighbor discovery target Ethernet
+ link-layer address option.
+
+ - ``tla {MAC-48}``: target Ethernet LLA.
+
Actions list
^^^^^^^^^^^^
@@ -3363,10 +3574,6 @@ actions can sometimes be combined when the end result is unambiguous::
::
- drop / dup index 6 / end # same as above
-
-::
-
queue index 6 / rss queues 6 7 8 / end # queue has no effect
::
@@ -3386,6 +3593,10 @@ This section lists supported actions and their attributes, if any.
- ``passthru``: let subsequent rule process matched packets.
+- ``jump``: redirect traffic to group on device.
+
+ - ``group {unsigned}``: group to redirect to.
+
- ``mark``: attach 32 bit value to packets.
- ``id {unsigned}``: 32 bit value to return with packets.
@@ -3400,20 +3611,91 @@ This section lists supported actions and their attributes, if any.
- ``count``: enable counters for this rule.
-- ``dup``: duplicate packets to a given queue index.
+- ``rss``: spread packets among several queues.
- - ``index {unsigned}``: queue index to duplicate packets to.
+ - ``func {hash function}``: RSS hash function to apply, allowed tokens are
+ the same as `set_hash_global_config`_.
-- ``rss``: spread packets among several queues.
+ - ``level {unsigned}``: encapsulation level for ``types``.
+
+ - ``types [{RSS hash type} [...]] end``: specific RSS hash types, allowed
+ tokens are the same as `set_hash_input_set`_, except that an empty list
+ does not disable RSS but instead requests unspecified "best-effort"
+ settings.
+
+ - ``key {string}``: RSS hash key, overrides ``key_len``.
+
+ - ``key_len {unsigned}``: RSS hash key length in bytes, can be used in
+ conjunction with ``key`` to pad or truncate it.
- ``queues [{unsigned} [...]] end``: queue indices to use.
-- ``pf``: redirect packets to physical device function.
+- ``pf``: direct traffic to physical function.
-- ``vf``: redirect packets to virtual device function.
+- ``vf``: direct traffic to a virtual function ID.
- ``original {boolean}``: use original VF ID if possible.
- - ``id {unsigned}``: VF ID to redirect packets to.
+ - ``id {unsigned}``: VF ID.
+
+- ``phy_port``: direct packets to physical port index.
+
+ - ``original {boolean}``: use original port index if possible.
+ - ``index {unsigned}``: physical port index.
+
+- ``port_id``: direct matching traffic to a given DPDK port ID.
+
+ - ``original {boolean}``: use original DPDK port ID if possible.
+ - ``id {unsigned}``: DPDK port ID.
+
+- ``of_set_mpls_ttl``: OpenFlow's ``OFPAT_SET_MPLS_TTL``.
+
+ - ``mpls_ttl``: MPLS TTL.
+
+- ``of_dec_mpls_ttl``: OpenFlow's ``OFPAT_DEC_MPLS_TTL``.
+
+- ``of_set_nw_ttl``: OpenFlow's ``OFPAT_SET_NW_TTL``.
+
+ - ``nw_ttl``: IP TTL.
+
+- ``of_dec_nw_ttl``: OpenFlow's ``OFPAT_DEC_NW_TTL``.
+
+- ``of_copy_ttl_out``: OpenFlow's ``OFPAT_COPY_TTL_OUT``.
+
+- ``of_copy_ttl_in``: OpenFlow's ``OFPAT_COPY_TTL_IN``.
+
+- ``of_pop_vlan``: OpenFlow's ``OFPAT_POP_VLAN``.
+
+- ``of_push_vlan``: OpenFlow's ``OFPAT_PUSH_VLAN``.
+
+ - ``ethertype``: Ethertype.
+
+- ``of_set_vlan_vid``: OpenFlow's ``OFPAT_SET_VLAN_VID``.
+
+ - ``vlan_vid``: VLAN id.
+
+- ``of_set_vlan_pcp``: OpenFlow's ``OFPAT_SET_VLAN_PCP``.
+
+ - ``vlan_pcp``: VLAN priority.
+
+- ``of_pop_mpls``: OpenFlow's ``OFPAT_POP_MPLS``.
+
+ - ``ethertype``: Ethertype.
+
+- ``of_push_mpls``: OpenFlow's ``OFPAT_PUSH_MPLS``.
+
+ - ``ethertype``: Ethertype.
+
+- ``vxlan_encap``: Performs a VXLAN encapsulation, outer layer configuration
+ is done through `Config VXLAN Encap outer layers`_.
+
+- ``vxlan_decap``: Performs a decapsulation action by stripping all headers of
+ the VXLAN tunnel network overlay from the matched flow.
+
+- ``nvgre_encap``: Performs a NVGRE encapsulation, outer layer configuration
+ is done through `Config NVGRE Encap outer layers`_.
+
+- ``nvgre_decap``: Performs a decapsulation action by stripping all headers of
+ the NVGRE tunnel network overlay from the matched flow.
Destroying flow rules
~~~~~~~~~~~~~~~~~~~~~
@@ -3679,3 +3961,122 @@ Validate and create a QinQ rule on port 0 to steer traffic to a queue on the hos
ID Group Prio Attr Rule
0 0 0 i- ETH VLAN VLAN=>VF QUEUE
1 0 0 i- ETH VLAN VLAN=>PF QUEUE
+
+Sample VXLAN encapsulation rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+VXLAN encapsulation outer layer has default value pre-configured in testpmd
+source code, those can be changed by using the following commands
+
+IPv4 VXLAN outer header::
+
+ testpmd> set vxlan ip-version ipv4 vni 4 udp-src 4 udp-dst 4 ip-src 127.0.0.1
+ ip-dst 128.0.0.1 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions vxlan_encap /
+ queue index 0 / end
+
+ testpmd> set vxlan-with-vlan ip-version ipv4 vni 4 udp-src 4 udp-dst 4 ip-src
+ 127.0.0.1 ip-dst 128.0.0.1 vlan-tci 34 eth-src 11:11:11:11:11:11
+ eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions vxlan_encap /
+ queue index 0 / end
+
+IPv6 VXLAN outer header::
+
+ testpmd> set vxlan ip-version ipv6 vni 4 udp-src 4 udp-dst 4 ip-src ::1
+ ip-dst ::2222 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions vxlan_encap /
+ queue index 0 / end
+
+ testpmd> set vxlan-with-vlan ip-version ipv6 vni 4 udp-src 4 udp-dst 4
+ ip-src ::1 ip-dst ::2222 vlan-tci 34 eth-src 11:11:11:11:11:11
+ eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions vxlan_encap /
+ queue index 0 / end
+
+Sample NVGRE encapsulation rule
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NVGRE encapsulation outer layer has default value pre-configured in testpmd
+source code, those can be changed by using the following commands
+
+IPv4 NVGRE outer header::
+
+ testpmd> set nvgre ip-version ipv4 tni 4 ip-src 127.0.0.1 ip-dst 128.0.0.1
+ eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions nvgre_encap /
+ queue index 0 / end
+
+ testpmd> set nvgre-with-vlan ip-version ipv4 tni 4 ip-src 127.0.0.1
+ ip-dst 128.0.0.1 vlan-tci 34 eth-src 11:11:11:11:11:11
+ eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions nvgre_encap /
+ queue index 0 / end
+
+IPv6 NVGRE outer header::
+
+ testpmd> set nvgre ip-version ipv6 tni 4 ip-src ::1 ip-dst ::2222
+ eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions nvgre_encap /
+ queue index 0 / end
+
+ testpmd> set nvgre-with-vlan ip-version ipv6 tni 4 ip-src ::1 ip-dst ::2222
+ vlan-tci 34 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22
+ testpmd> flow create 0 ingress pattern end actions nvgre_encap /
+ queue index 0 / end
+
+BPF Functions
+--------------
+
+The following sections show functions to load/unload eBPF based filters.
+
+bpf-load
+~~~~~~~~
+
+Load an eBPF program as a callback for partciular RX/TX queue::
+
+ testpmd> bpf-load rx|tx (portid) (queueid) (load-flags) (bpf-prog-filename)
+
+The available load-flags are:
+
+* ``J``: use JIT generated native code, otherwise BPF interpreter will be used.
+
+* ``M``: assume input parameter is a pointer to rte_mbuf, otherwise assume it is a pointer to first segment's data.
+
+* ``-``: none.
+
+.. note::
+
+ You'll need clang v3.7 or above to build bpf program you'd like to load
+
+For example:
+
+.. code-block:: console
+
+ cd test/bpf
+ clang -O2 -target bpf -c t1.c
+
+Then to load (and JIT compile) t1.o at RX queue 0, port 1::
+
+.. code-block:: console
+
+ testpmd> bpf-load rx 1 0 J ./dpdk.org/test/bpf/t1.o
+
+To load (not JITed) t1.o at TX queue 0, port 0::
+
+.. code-block:: console
+
+ testpmd> bpf-load tx 0 0 - ./dpdk.org/test/bpf/t1.o
+
+bpf-unload
+~~~~~~~~~~
+
+Unload previously loaded eBPF program for partciular RX/TX queue::
+
+ testpmd> bpf-unload rx|tx (portid) (queueid)
+
+For example to unload BPF filter from TX queue 0, port 0:
+
+.. code-block:: console
+
+ testpmd> bpf-load tx 0 0 - ./dpdk.org/test/bpf/t1.o
diff --git a/doc/guides/tools/cryptoperf.rst b/doc/guides/tools/cryptoperf.rst
index 3c0e7d94..c366af4e 100644
--- a/doc/guides/tools/cryptoperf.rst
+++ b/doc/guides/tools/cryptoperf.rst
@@ -181,7 +181,7 @@ The following are the appication command-line options:
crypto_dpaa2_sec
crypto_armv8
crypto_scheduler
- crypto_mrvl
+ crypto_mvsam
* ``--optype <name>``
diff --git a/doc/guides/tools/testbbdev.rst b/doc/guides/tools/testbbdev.rst
index 5c7112de..234a64f5 100644
--- a/doc/guides/tools/testbbdev.rst
+++ b/doc/guides/tools/testbbdev.rst
@@ -21,7 +21,7 @@ The bbdevice drivers PMD which should be tested can be enabled by setting
``CONFIG_RTE_LIBRTE_PMD_<name>=y``
-Setting example for (*turbo_sw*) PMD
+Setting example for (*baseband_turbo_sw*) PMD
``CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=y``
@@ -70,30 +70,6 @@ The following are the command-line options:
``-c TEST_CASE [TEST_CASE ...], --test_cases TEST_CASE [TEST_CASE ...]``
Defines test cases to run. If not specified all available tests are run.
- The following tests can be run:
-
- * unittest
- Small unit tests witch check basic functionality of bbdev library.
- * latency
- Test calculates three latency metrics:
-
- * offload_latency_tc
- measures the cost of offloading enqueue and dequeue operations.
- * offload_latency_empty_q_tc
- measures the cost of offloading a dequeue operation from an empty queue.
- checks how long last dequeueing if there is no operations to dequeue
- * operation_latency_tc
- measures the time difference from the first attempt to enqueue till the
- first successful dequeue.
- * validation
- Test do enqueue on given vector and compare output after dequeueing.
- * throughput
- Test measures the achieved throughput on the available lcores.
- Results are printed in million operations per second and million bits per second.
- * interrupt
- The same test as 'throughput' but uses interrupts instead of PMD to perform
- the dequeue.
-
**Example usage:**
``./test-bbdev.py -c validation``
@@ -105,18 +81,18 @@ The following are the command-line options:
``-v TEST_VECTOR [TEST_VECTOR ...], --test_vector TEST_VECTOR [TEST_VECTOR ...]``
Specifies paths to the test vector files. If not specified path is set based
on *$RTE_SDK* environment variable concatenated with
- "*/app/test-bbdev/test_vectors/bbdev_vector_null.data*" and indicates default
+ "*/app/test-bbdev/test_vectors/bbdev_null.data*" and indicates default
data file.
**Example usage:**
- ``./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_td_test1.data``
- Fills vector based on bbdev_vector_td_test1.data file and runs all tests
+ ``./test-bbdev.py -v app/test-bbdev/test_vectors/turbo_dec_test1.data``
+ Fills vector based on turbo_dec_test1.data file and runs all tests
- ``./test-bbdev.py -v bbdev_vector_td_test1.data bbdev_vector_te_test2.data``
+ ``./test-bbdev.py -v turbo_dec_test1.data turbo_enc_test2.data``
The bbdev test app is executed twice. First time vector is filled based on
- *bbdev_vector_td_test1.data* file and second time based on
- *bbdev_vector_te_test2.data* file. For both executions all tests are run.
+ *turbo_dec_test1.data* file and second time based on
+ *turb_enc_test2.data* file. For both executions all tests are run.
``-n NUM_OPS, --num_ops NUM_OPS``
Specifies number of operations to process on device. If not specified num_ops
@@ -130,8 +106,54 @@ The following are the command-line options:
Specifies operations enqueue/dequeue burst size. If not specified burst_size is
set to 32. Maximum is 512.
-
-Parameter globbing
+Test Cases
+~~~~~~~~~~
+
+There are 6 main test cases that can be executed using testbbdev tool:
+
+* Sanity checks [-c unittest]
+ - Performs sanity checks on BBDEV interface, validating basic functionality
+
+* Validation tests [-c validation]
+ - Performs full operation of enqueue and dequeue
+ - Compares the dequeued data buffer with a expected values in the test
+ vector (TV) being used
+ - Fails if any dequeued value does not match the data in the TV
+
+* Offload Cost measurement [-c offload]
+ - Measures the CPU cycles consumed from the receipt of a user enqueue
+ until it is put on the device queue
+ - The test measures 4 metrics
+ (a) *SW Enq Offload Cost*: Software only enqueue offload cost, the cycle
+ counts and time (us) from the point the enqueue API is called until
+ the point the operation is put on the accelerator queue.
+ (b) *Acc Enq Offload Cost*: The cycle count and time (us) from the
+ point the operation is put on the accelerator queue until the return
+ from enqueue.
+ (c) *SW Deq Offload Cost*: Software dequeue cost, the cycle counts and
+ time (us) consumed to dequeue one operation.
+ (d) *Empty Queue Enq Offload Cost*: The cycle count and time (us)
+ consumed to dequeue from an empty queue.
+
+* Latency measurement [-c latency]
+ - Measures the time consumed from the first enqueue until the first
+ appearance of a dequeued result
+ - This measurment represents the full latency of a bbdev operation
+ (encode or decode) to execute
+
+* Poll-mode Throughput measurement [-c throughput]
+ - Performs full operation of enqueue and dequeue
+ - Executes in poll mode
+ - Measures the achieved throughput on a subset or all available CPU cores
+ - Dequeued data is not validated against expected values stored in TV
+ - Results are printed in million operations per second and million bits
+ per second
+
+* Interrupt-mode Throughput [-c interrupt]
+ - Similar to Throughput test case, but using interrupts. No polling.
+
+
+Parameter Globbing
~~~~~~~~~~~~~~~~~~
Thanks to the globbing functionality in python test-bbdev.py script allows to
@@ -141,26 +163,63 @@ run tests with different set of vector files without giving all of them explicit
.. code-block:: console
- ./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_*.data
+ ./test-bbdev.py -v app/test-bbdev/test_vectors/turbo_<enc/dec>_c<c>_k<k>_r<r>_e<e>_<extra-info>.data
It runs all tests with following vectors:
-- ``bbdev_vector_null.data``
+- ``bbdev_null.data``
+
+- ``turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data``
+
+- ``turbo_enc_c1_k40_r0_e1196_rm.data``
+
+- ``turbo_enc_c2_k5952_r0_e17868_crc24b.data``
+
+- ``turbo_dec_c1_k40_r0_e17280_sbd_negllr.data``
+
+- ``turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data``
+
+- ``turbo_enc_c1_k40_r0_e272_rm.data``
+
+- ``turbo_enc_c3_k4800_r2_e14412_crc24b.data``
+
+- ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data``
+
+- ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data``
+
+- ``turbo_enc_c1_k6144_r0_e120_rm_rvidx.data``
+
+- ``turbo_enc_c4_k4800_r2_e14412_crc24b.data``
+
+- ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data``
+
+- ``turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data``
+
+- ``turbo_enc_c1_k6144_r0_e18444.data``
+
+- ``turbo_dec_c1_k6144_r0_e34560_negllr.data``
+
+- ``turbo_enc_c1_k40_r0_e1190_rm.data``
+
+- ``turbo_enc_c1_k6144_r0_e18448_crc24a.data``
-- ``bbdev_vector_td_default.data``
+- ``turbo_dec_c1_k6144_r0_e34560_posllr.data``
-- ``bbdev_vector_te_default.data``
+- ``turbo_enc_c1_k40_r0_e1194_rm.data``
+- ``turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data``
.. code-block:: console
- ./test-bbdev.py -v app/test-bbdev/test_vectors/bbdev_vector_t?_default.data
+ ./test-bbdev.py -v app/test-bbdev/turbo_*_default.data
-It runs all tests with "default" vectors:
+It runs all tests with "default" vectors.
-- ``bbdev_vector_te_default.data``
+* ``turbo_dec_default.data`` is a soft link to
+ ``turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data``
-- ``bbdev_vector_td_default.data``
+* ``turbo_enc_default.data`` is a soft link to
+ ``turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data``
Running Tests
@@ -174,9 +233,27 @@ x86_64-native-linuxapp-icc target:
|-- app
|-- test-bbdev
|-- test_vectors
- |-- bbdev_vector_null.data
- |-- bbdev_vector_td_default.data
- |-- bbdev_vector_te_default.data
+ |-- bbdev_null.data
+ |-- turbo_dec_c1_k6144_r0_e34560_sbd_negllr.data
+ |-- turbo_enc_c1_k40_r0_e1196_rm.data
+ |-- turbo_enc_c2_k5952_r0_e17868_crc24b.data
+ |-- turbo_dec_c1_k40_r0_e17280_sbd_negllr.data
+ |-- turbo_dec_c1_k6144_r0_e34560_sbd_posllr.data
+ |-- turbo_enc_c1_k40_r0_e272_rm.data
+ |-- turbo_enc_c3_k4800_r2_e14412_crc24b.data
+ |-- turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_high_snr.data
+ |-- turbo_dec_c2_k3136_r0_e4920_sbd_negllr_crc24b.data
+ |-- turbo_enc_c1_k6144_r0_e120_rm_rvidx.data
+ |-- turbo_enc_c4_k4800_r2_e14412_crc24b.data
+ |-- turbo_dec_c1_k6144_r0_e10376_crc24b_sbd_negllr_low_snr.data
+ |-- turbo_dec_c2_k3136_r0_e4920_sbd_negllr.data
+ |-- turbo_enc_c1_k6144_r0_e18444.data
+ |-- turbo_dec_c1_k6144_r0_e34560_negllr.data
+ |-- turbo_enc_c1_k40_r0_e1190_rm.data
+ |-- turbo_enc_c1_k6144_r0_e18448_crc24a.data
+ |-- turbo_dec_c1_k6144_r0_e34560_posllr.data
+ |-- turbo_enc_c1_k40_r0_e1194_rm.data
+ |-- turbo_enc_c1_k6144_r0_e32256_crc24b_rm.data
|-- x86_64-native-linuxapp-icc
|-- app
@@ -188,43 +265,43 @@ All bbdev devices
.. code-block:: console
./test-bbdev.py -p ../../x86_64-native-linuxapp-icc/app/testbbdev
- -v ./test_vectors/bbdev_vector_td_default.data
+ -v turbo_dec_default.data
It runs all available tests using the test vector filled based on
-*bbdev_vector_td_default.data* file.
+*turbo_dec_default.data* file.
By default number of operations to process on device is set to 32, timeout is
set to 300s and operations enqueue/dequeue burst size is set to 32.
-Moreover a bbdev (*bbdev_null*) device will be created.
+Moreover a bbdev (*baseband_null*) device will be created.
-bbdev turbo_sw device
-~~~~~~~~~~~~~~~~~~~~~
+baseband turbo_sw device
+~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: console
./test-bbdev.py -p ../../x86_64-native-linuxapp-icc/app/testbbdev
- -e="--vdev=turbo_sw" -t 120 -c validation
- -v ./test_vectors/bbdev_vector_t?_default.data -n 64 -b 8 32
+ -e="--vdev=baseband_turbo_sw" -t 120 -c validation
+ -v ./test_vectors/turbo_* -n 64 -b 8 32
It runs **validation** test for each vector file that matches the given pattern.
Number of operations to process on device is set to 64 and operations timeout is
set to 120s and enqueue/dequeue burst size is set to 8 and to 32.
-Moreover a bbdev (*turbo_sw*) device will be created.
+Moreover a bbdev (*baseband_turbo_sw*) device will be created.
bbdev null device
~~~~~~~~~~~~~~~~~
-Executing bbdev null device with *bbdev_vector_null.data* helps in measuring the
+Executing bbdev null device with *bbdev_null.data* helps in measuring the
overhead introduced by the bbdev framework.
.. code-block:: console
- ./test-bbdev.py -e="--vdev=bbdev_null0"
- -v ./test_vectors/bbdev_vector_null.data
+ ./test-bbdev.py -e="--vdev=baseband_null0"
+ -v ./test_vectors/bbdev_null.data
**Note:**
-bbdev_null device does not have to be defined explicitly as it is created by default.
+baseband_null device does not have to be defined explicitly as it is created by default.
diff --git a/doc/guides/tools/testeventdev.rst b/doc/guides/tools/testeventdev.rst
index 77480ffe..46effd87 100644
--- a/doc/guides/tools/testeventdev.rst
+++ b/doc/guides/tools/testeventdev.rst
@@ -123,6 +123,36 @@ The following are the application command-line options:
Use ethernet device as producer.
+* ``--prod_type_timerdev``
+
+ Use event timer adapter as producer.
+
+ * ``--prod_type_timerdev_burst``
+
+ Use burst mode event timer adapter as producer.
+
+ * ``--timer_tick_nsec``
+
+ Used to dictate number of nano seconds between bucket traversal of the
+ event timer adapter. Refer `rte_event_timer_adapter_conf`.
+
+ * ``--max_tmo_nsec``
+
+ Used to configure event timer adapter max arm timeout in nano seconds.
+
+ * ``--expiry_nsec``
+
+ Dictate the number of nano seconds after which the event timer expires.
+
+ * ``--nb_timers``
+
+ Number of event timers each producer core will generate.
+
+ * ``--nb_timer_adptrs``
+
+ Number of event timer adapters to be used. Each adapter is used in
+ round robin manner by the producer cores.
+
Eventdev Tests
--------------
@@ -347,6 +377,13 @@ Supported application command line options are following::
--fwd_latency
--queue_priority
--prod_type_ethdev
+ --prod_type_timerdev_burst
+ --prod_type_timerdev
+ --timer_tick_nsec
+ --max_tmo_nsec
+ --expiry_nsec
+ --nb_timers
+ --nb_timer_adptrs
Example
^^^^^^^
@@ -365,6 +402,14 @@ Example command to run perf queue test with ethernet ports:
sudo build/app/dpdk-test-eventdev --vdev=event_sw0 -- \
--test=perf_queue --plcores=2 --wlcore=3 --stlist=p --prod_type_ethdev
+Example command to run perf queue test with event timer adapter:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev="event_octeontx" -- \
+ --wlcores 4 --plcores 12 --test perf_queue --stlist=a \
+ --prod_type_timerdev --fwd_latency
+
PERF_ATQ Test
~~~~~~~~~~~~~~~
@@ -431,6 +476,13 @@ Supported application command line options are following::
--worker_deq_depth
--fwd_latency
--prod_type_ethdev
+ --prod_type_timerdev_burst
+ --prod_type_timerdev
+ --timer_tick_nsec
+ --max_tmo_nsec
+ --expiry_nsec
+ --nb_timers
+ --nb_timer_adptrs
Example
^^^^^^^
@@ -442,6 +494,14 @@ Example command to run perf ``all types queue`` test:
sudo build/app/dpdk-test-eventdev --vdev=event_octeontx -- \
--test=perf_atq --plcores=2 --wlcore=3 --stlist=p --nb_pkts=0
+Example command to run perf ``all types queue`` test with event timer adapter:
+
+.. code-block:: console
+
+ sudo build/app/dpdk-test-eventdev --vdev="event_octeontx" -- \
+ --wlcores 4 --plcores 12 --test perf_atq --verbose 20 \
+ --stlist=a --prod_type_timerdev --fwd_latency
+
PIPELINE_QUEUE Test
~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/Makefile b/drivers/Makefile
index ee65c87b..75660765 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -3,18 +3,23 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-y += common
DIRS-y += bus
DIRS-y += mempool
-DEPDIRS-mempool := bus
+DEPDIRS-mempool := common bus
DIRS-y += net
-DEPDIRS-net := bus mempool
-DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += bbdev
-DEPDIRS-bbdev := bus mempool
+DEPDIRS-net := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += baseband
+DEPDIRS-baseband := common bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
-DEPDIRS-crypto := bus mempool
+DEPDIRS-crypto := common bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += common/qat
+DEPDIRS-common/qat := bus mempool
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += compress
+DEPDIRS-compress := bus mempool
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += event
-DEPDIRS-event := bus mempool net
+DEPDIRS-event := common bus mempool net
DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += raw
-DEPDIRS-raw := bus mempool net event
+DEPDIRS-raw := common bus mempool net event
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bbdev/Makefile b/drivers/baseband/Makefile
index 4ec83b0a..4ec83b0a 100644
--- a/drivers/bbdev/Makefile
+++ b/drivers/baseband/Makefile
diff --git a/drivers/bbdev/null/Makefile b/drivers/baseband/null/Makefile
index f885a97b..f885a97b 100644
--- a/drivers/bbdev/null/Makefile
+++ b/drivers/baseband/null/Makefile
diff --git a/drivers/bbdev/null/bbdev_null.c b/drivers/baseband/null/bbdev_null.c
index 6bc84917..2f251510 100644
--- a/drivers/bbdev/null/bbdev_null.c
+++ b/drivers/baseband/null/bbdev_null.c
@@ -13,7 +13,7 @@
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
-#define DRIVER_NAME bbdev_null
+#define DRIVER_NAME baseband_null
/* NULL BBDev logging ID */
static int bbdev_null_logtype;
@@ -71,7 +71,8 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
dev_info->max_num_queues = internals->max_nb_queues;
dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
dev_info->hardware_accelerated = false;
- dev_info->max_queue_priority = 0;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
dev_info->default_queue_conf = default_queue_conf;
dev_info->capabilities = bbdev_capabilities;
dev_info->cpu_flag_reqs = NULL;
@@ -345,10 +346,9 @@ RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_null_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
BBDEV_NULL_MAX_NB_QUEUES_ARG"=<int> "
BBDEV_NULL_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, bbdev_null);
-RTE_INIT(null_bbdev_init_log);
-static void
-null_bbdev_init_log(void)
+RTE_INIT(null_bbdev_init_log)
{
bbdev_null_logtype = rte_log_register("pmd.bb.null");
if (bbdev_null_logtype >= 0)
diff --git a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map b/drivers/baseband/null/rte_pmd_bbdev_null_version.map
index 58b94270..58b94270 100644
--- a/drivers/bbdev/null/rte_pmd_bbdev_null_version.map
+++ b/drivers/baseband/null/rte_pmd_bbdev_null_version.map
diff --git a/drivers/bbdev/turbo_sw/Makefile b/drivers/baseband/turbo_sw/Makefile
index 79eb5547..79eb5547 100644
--- a/drivers/bbdev/turbo_sw/Makefile
+++ b/drivers/baseband/turbo_sw/Makefile
diff --git a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
index 302abf5c..8ceb2769 100644
--- a/drivers/bbdev/turbo_sw/bbdev_turbo_software.c
+++ b/drivers/baseband/turbo_sw/bbdev_turbo_software.c
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include <rte_ring.h>
#include <rte_kvargs.h>
+#include <rte_cycles.h>
#include <rte_bbdev.h>
#include <rte_bbdev_pmd.h>
@@ -18,7 +19,7 @@
#include <phy_rate_match.h>
#include <divide.h>
-#define DRIVER_NAME turbo_sw
+#define DRIVER_NAME baseband_turbo_sw
/* Turbo SW PMD logging ID */
static int bbdev_turbo_sw_logtype;
@@ -32,11 +33,9 @@ static int bbdev_turbo_sw_logtype;
rte_bbdev_log(DEBUG, RTE_STR(__LINE__) ":%s() " fmt, __func__, \
##__VA_ARGS__)
-/* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
-#define C_SUBBLOCK (32)
-#define MAX_TB_SIZE (391656)
-#define MAX_CB_SIZE (6144)
-#define MAX_KW (18528)
+#define DEINT_INPUT_BUF_SIZE (((RTE_BBDEV_MAX_CB_SIZE >> 3) + 1) * 48)
+#define DEINT_OUTPUT_BUF_SIZE (DEINT_INPUT_BUF_SIZE * 6)
+#define ADAPTER_OUTPUT_BUF_SIZE ((RTE_BBDEV_MAX_CB_SIZE + 4) * 48)
/* private data structure */
struct bbdev_private {
@@ -90,7 +89,7 @@ compute_idx(uint16_t k)
{
int32_t result = 0;
- if (k < 40 || k > MAX_CB_SIZE)
+ if (k < RTE_BBDEV_MIN_CB_SIZE || k > RTE_BBDEV_MAX_CB_SIZE)
return -1;
if (k > 2048) {
@@ -140,7 +139,9 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
RTE_BBDEV_TURBO_POS_LLR_1_BIT_IN |
RTE_BBDEV_TURBO_NEG_LLR_1_BIT_IN |
RTE_BBDEV_TURBO_CRC_TYPE_24B |
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP |
RTE_BBDEV_TURBO_EARLY_TERMINATION,
+ .max_llr_modulus = 16,
.num_buffers_src = RTE_BBDEV_MAX_CODE_BLOCKS,
.num_buffers_hard_out =
RTE_BBDEV_MAX_CODE_BLOCKS,
@@ -174,7 +175,8 @@ info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
dev_info->max_num_queues = internals->max_nb_queues;
dev_info->queue_size_lim = RTE_BBDEV_QUEUE_SIZE_LIMIT;
dev_info->hardware_accelerated = false;
- dev_info->max_queue_priority = 0;
+ dev_info->max_dl_queue_priority = 0;
+ dev_info->max_ul_queue_priority = 0;
dev_info->default_queue_conf = default_queue_conf;
dev_info->capabilities = bbdev_capabilities;
dev_info->cpu_flag_reqs = &cpu_flag;
@@ -225,7 +227,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
}
/* Allocate memory for encoder output. */
- ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_out%u:%u",
+ ret = snprintf(name, RTE_RING_NAMESIZE, RTE_STR(DRIVER_NAME)"_enc_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -234,7 +236,8 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->enc_out = rte_zmalloc_socket(name,
- ((MAX_TB_SIZE >> 3) + 3) * sizeof(*q->enc_out) * 3,
+ ((RTE_BBDEV_MAX_TB_SIZE >> 3) + 3) *
+ sizeof(*q->enc_out) * 3,
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->enc_out == NULL) {
rte_bbdev_log(ERR,
@@ -244,7 +247,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for rate matching output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_enc_in%u:%u", dev->data->dev_id,
+ RTE_STR(DRIVER_NAME)"_enc_i%u:%u", dev->data->dev_id,
q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -253,7 +256,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->enc_in = rte_zmalloc_socket(name,
- (MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
+ (RTE_BBDEV_MAX_CB_SIZE >> 3) * sizeof(*q->enc_in),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->enc_in == NULL) {
rte_bbdev_log(ERR,
@@ -271,7 +274,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->ag = rte_zmalloc_socket(name,
- MAX_CB_SIZE * 10 * sizeof(*q->ag),
+ RTE_BBDEV_MAX_CB_SIZE * 10 * sizeof(*q->ag),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->ag == NULL) {
rte_bbdev_log(ERR,
@@ -289,7 +292,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->code_block = rte_zmalloc_socket(name,
- (6144 >> 3) * sizeof(*q->code_block),
+ RTE_BBDEV_MAX_CB_SIZE * sizeof(*q->code_block),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->code_block == NULL) {
rte_bbdev_log(ERR,
@@ -299,7 +302,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Deinterleaver input. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_deint_input%u:%u",
+ RTE_STR(DRIVER_NAME)"_de_i%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -308,7 +311,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->deint_input = rte_zmalloc_socket(name,
- MAX_KW * sizeof(*q->deint_input),
+ DEINT_INPUT_BUF_SIZE * sizeof(*q->deint_input),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->deint_input == NULL) {
rte_bbdev_log(ERR,
@@ -318,7 +321,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Deinterleaver output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_deint_output%u:%u",
+ RTE_STR(DRIVER_NAME)"_de_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -327,7 +330,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->deint_output = rte_zmalloc_socket(NULL,
- MAX_KW * sizeof(*q->deint_output),
+ DEINT_OUTPUT_BUF_SIZE * sizeof(*q->deint_output),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->deint_output == NULL) {
rte_bbdev_log(ERR,
@@ -337,7 +340,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
/* Allocate memory for Adapter output. */
ret = snprintf(name, RTE_RING_NAMESIZE,
- RTE_STR(DRIVER_NAME)"_adapter_output%u:%u",
+ RTE_STR(DRIVER_NAME)"_ada_o%u:%u",
dev->data->dev_id, q_id);
if ((ret < 0) || (ret >= (int)RTE_RING_NAMESIZE)) {
rte_bbdev_log(ERR,
@@ -346,7 +349,7 @@ q_setup(struct rte_bbdev *dev, uint16_t q_id,
return -ENAMETOOLONG;
}
q->adapter_output = rte_zmalloc_socket(NULL,
- MAX_CB_SIZE * 6 * sizeof(*q->adapter_output),
+ ADAPTER_OUTPUT_BUF_SIZE * sizeof(*q->adapter_output),
RTE_CACHE_LINE_SIZE, queue_conf->socket);
if (q->adapter_output == NULL) {
rte_bbdev_log(ERR,
@@ -414,9 +417,9 @@ is_enc_input_valid(const uint16_t k, const int32_t k_idx,
return -1;
}
- if (k > MAX_CB_SIZE) {
+ if (k > RTE_BBDEV_MAX_CB_SIZE) {
rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
- k, MAX_CB_SIZE);
+ k, RTE_BBDEV_MAX_CB_SIZE);
return -1;
}
@@ -441,9 +444,9 @@ is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
return -1;
}
- if (kw > MAX_KW) {
+ if (kw > RTE_BBDEV_MAX_KW) {
rte_bbdev_log(ERR, "Input length (%u) is too big, max: %d",
- kw, MAX_KW);
+ kw, RTE_BBDEV_MAX_KW);
return -1;
}
@@ -452,20 +455,28 @@ is_dec_input_valid(int32_t k_idx, int16_t kw, int16_t in_length)
static inline void
process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
- uint8_t cb_idx, uint8_t c, uint16_t k, uint16_t ncb,
+ uint8_t r, uint8_t c, uint16_t k, uint16_t ncb,
uint32_t e, struct rte_mbuf *m_in, struct rte_mbuf *m_out,
- uint16_t in_offset, uint16_t out_offset, uint16_t total_left)
+ uint16_t in_offset, uint16_t out_offset, uint16_t total_left,
+ struct rte_bbdev_stats *q_stats)
{
int ret;
int16_t k_idx;
uint16_t m;
uint8_t *in, *out0, *out1, *out2, *tmp_out, *rm_out;
+ uint64_t first_3_bytes = 0;
struct rte_bbdev_op_turbo_enc *enc = &op->turbo_enc;
struct bblib_crc_request crc_req;
+ struct bblib_crc_response crc_resp;
struct bblib_turbo_encoder_request turbo_req;
struct bblib_turbo_encoder_response turbo_resp;
struct bblib_rate_match_dl_request rm_req;
struct bblib_rate_match_dl_response rm_resp;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ uint64_t start_time;
+#else
+ RTE_SET_USED(q_stats);
+#endif
k_idx = compute_idx(k);
in = rte_pktmbuf_mtod_offset(m_in, uint8_t *, in_offset);
@@ -478,18 +489,31 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
return;
}
- /* copy the input to the temporary buffer to be able to extend
- * it by 3 CRC bytes
+ crc_req.data = in;
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if not use
+ * the temporary buffer.
*/
- rte_memcpy(q->enc_in, in, (k - 24) >> 3);
- crc_req.data = q->enc_in;
- crc_req.len = (k - 24) >> 3;
- if (bblib_lte_crc24a_gen(&crc_req) == -1) {
- op->status |= 1 << RTE_BBDEV_CRC_ERROR;
- rte_bbdev_log(ERR, "CRC24a generation failed");
- return;
+ if (rte_pktmbuf_append(m_in, 3) == NULL) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
}
- in = q->enc_in;
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24a_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
} else if (enc->op_flags & RTE_BBDEV_TURBO_CRC_24B_ATTACH) {
/* CRC24B */
ret = is_enc_input_valid(k - 24, k_idx, total_left);
@@ -497,18 +521,31 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
return;
}
- /* copy the input to the temporary buffer to be able to extend
- * it by 3 CRC bytes
+ crc_req.data = in;
+ crc_req.len = k - 24;
+ /* Check if there is a room for CRC bits if this is the last
+ * CB in TB. If not use temporary buffer.
*/
- rte_memcpy(q->enc_in, in, (k - 24) >> 3);
- crc_req.data = q->enc_in;
- crc_req.len = (k - 24) >> 3;
- if (bblib_lte_crc24b_gen(&crc_req) == -1) {
- op->status |= 1 << RTE_BBDEV_CRC_ERROR;
- rte_bbdev_log(ERR, "CRC24b generation failed");
- return;
+ if ((c - r == 1) && (rte_pktmbuf_append(m_in, 3) == NULL)) {
+ rte_memcpy(q->enc_in, in, (k - 24) >> 3);
+ in = q->enc_in;
+ } else if (c - r > 1) {
+ /* Store 3 first bytes of next CB as they will be
+ * overwritten by CRC bytes. If it is the last CB then
+ * there is no point to store 3 next bytes and this
+ * if..else branch will be omitted.
+ */
+ first_3_bytes = *((uint64_t *)&in[(k - 32) >> 3]);
}
- in = q->enc_in;
+
+ crc_resp.data = in;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+ bblib_lte_crc24b_gen(&crc_req, &crc_resp);
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
} else {
ret = is_enc_input_valid(k, k_idx, total_left);
if (ret != 0) {
@@ -522,10 +559,32 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
/* Each bit layer output from turbo encoder is (k+4) bits long, i.e.
* input length + 4 tail bits. That's (k/8) + 1 bytes after rounding up.
* So dst_data's length should be 3*(k/8) + 3 bytes.
+ * In Rate-matching bypass case outputs pointers passed to encoder
+ * (out0, out1 and out2) can directly point to addresses of output from
+ * turbo_enc entity.
*/
- out0 = q->enc_out;
- out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
- out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+ if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ out0 = q->enc_out;
+ out1 = RTE_PTR_ADD(out0, (k >> 3) + 1);
+ out2 = RTE_PTR_ADD(out1, (k >> 3) + 1);
+ } else {
+ out0 = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3) * 3 + 2);
+ if (out0 == NULL) {
+ op->status |= 1 << RTE_BBDEV_DATA_ERROR;
+ rte_bbdev_log(ERR,
+ "Too little space in output mbuf");
+ return;
+ }
+ enc->output.length += (k >> 3) * 3 + 2;
+ /* rte_bbdev_op_data.offset can be different than the
+ * offset of the appended bytes
+ */
+ out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
+ out1 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + (k >> 3) + 1);
+ out2 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
+ out_offset + 2 * ((k >> 3) + 1));
+ }
turbo_req.case_id = k_idx;
turbo_req.input_win = in;
@@ -533,16 +592,37 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
turbo_resp.output_win_0 = out0;
turbo_resp.output_win_1 = out1;
turbo_resp.output_win_2 = out2;
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
if (bblib_turbo_encoder(&turbo_req, &turbo_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Turbo Encoder failed");
return;
}
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
+ /* Restore 3 first bytes of next CB if they were overwritten by CRC*/
+ if (first_3_bytes != 0)
+ *((uint64_t *)&in[(k - 32) >> 3]) = first_3_bytes;
+
/* Rate-matching */
if (enc->op_flags & RTE_BBDEV_TURBO_RATE_MATCH) {
+ uint8_t mask_id;
+ /* Integer round up division by 8 */
+ uint16_t out_len = (e + 7) >> 3;
+ /* The mask array is indexed using E%8. E is an even number so
+ * there are only 4 possible values.
+ */
+ const uint8_t mask_out[] = {0xFF, 0xC0, 0xF0, 0xFC};
+
/* get output data starting address */
- rm_out = (uint8_t *)rte_pktmbuf_append(m_out, (e >> 3));
+ rm_out = (uint8_t *)rte_pktmbuf_append(m_out, out_len);
if (rm_out == NULL) {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log(ERR,
@@ -555,7 +635,7 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
rm_out = rte_pktmbuf_mtod_offset(m_out, uint8_t *, out_offset);
/* index of current code block */
- rm_req.r = cb_idx;
+ rm_req.r = r;
/* total number of code block */
rm_req.C = c;
/* For DL - 1, UL - 0 */
@@ -582,17 +662,32 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
rm_req.tin1 = out1;
rm_req.tin2 = out2;
rm_resp.output = rm_out;
- rm_resp.OutputLen = (e >> 3);
+ rm_resp.OutputLen = out_len;
if (enc->op_flags & RTE_BBDEV_TURBO_RV_INDEX_BYPASS)
rm_req.bypass_rvidx = 1;
else
rm_req.bypass_rvidx = 0;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ start_time = rte_rdtsc_precise();
+#endif
+
if (bblib_rate_match_dl(&rm_req, &rm_resp) != 0) {
op->status |= 1 << RTE_BBDEV_DRV_ERROR;
rte_bbdev_log(ERR, "Rate matching failed");
return;
}
+
+ /* SW fills an entire last byte even if E%8 != 0. Clear the
+ * superfluous data bits for consistency with HW device.
+ */
+ mask_id = (e & 7) >> 1;
+ rm_out[out_len - 1] &= mask_out[mask_id];
+
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ q_stats->offload_time += rte_rdtsc_precise() - start_time;
+#endif
+
enc->output.length += rm_resp.OutputLen;
} else {
/* Rate matching is bypassed */
@@ -616,28 +711,12 @@ process_enc_cb(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
tmp_out++;
}
*tmp_out = 0;
-
- /* copy shifted output to turbo_enc entity */
- out0 = (uint8_t *)rte_pktmbuf_append(m_out,
- (k >> 3) * 3 + 2);
- if (out0 == NULL) {
- op->status |= 1 << RTE_BBDEV_DATA_ERROR;
- rte_bbdev_log(ERR,
- "Too little space in output mbuf");
- return;
- }
- enc->output.length += (k >> 3) * 3 + 2;
- /* rte_bbdev_op_data.offset can be different than the
- * offset of the appended bytes
- */
- out0 = rte_pktmbuf_mtod_offset(m_out, uint8_t *,
- out_offset);
- rte_memcpy(out0, q->enc_out, (k >> 3) * 3 + 2);
}
}
static inline void
-enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
+enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op,
+ struct rte_bbdev_stats *queue_stats)
{
uint8_t c, r, crc24_bits = 0;
uint16_t k, ncb;
@@ -652,9 +731,9 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
/* Clear op status */
op->status = 0;
- if (total_left > MAX_TB_SIZE >> 3) {
+ if (total_left > RTE_BBDEV_MAX_TB_SIZE >> 3) {
rte_bbdev_log(ERR, "TB size (%u) is too big, max: %d",
- total_left, MAX_TB_SIZE);
+ total_left, RTE_BBDEV_MAX_TB_SIZE);
op->status = 1 << RTE_BBDEV_DATA_ERROR;
return;
}
@@ -692,7 +771,8 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
}
process_enc_cb(q, op, r, c, k, ncb, e, m_in,
- m_out, in_offset, out_offset, total_left);
+ m_out, in_offset, out_offset, total_left,
+ queue_stats);
/* Update total_left */
total_left -= (k - crc24_bits) >> 3;
/* Update offsets for next CBs (if exist) */
@@ -714,12 +794,15 @@ enqueue_enc_one_op(struct turbo_sw_queue *q, struct rte_bbdev_enc_op *op)
static inline uint16_t
enqueue_enc_all_ops(struct turbo_sw_queue *q, struct rte_bbdev_enc_op **ops,
- uint16_t nb_ops)
+ uint16_t nb_ops, struct rte_bbdev_stats *queue_stats)
{
uint16_t i;
+#ifdef RTE_BBDEV_OFFLOAD_COST
+ queue_stats->offload_time = 0;
+#endif
for (i = 0; i < nb_ops; ++i)
- enqueue_enc_one_op(q, ops[i]);
+ enqueue_enc_one_op(q, ops[i], queue_stats);
return rte_ring_enqueue_burst(q->processed_pkts, (void **)ops, nb_ops,
NULL);
@@ -739,11 +822,11 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
const uint32_t d = k + 4;
const uint32_t kw = (ncb / 3);
const uint32_t nd = kw - d;
- const uint32_t r_subblock = kw / C_SUBBLOCK;
+ const uint32_t r_subblock = kw / RTE_BBDEV_C_SUBBLOCK;
/* Inter-column permutation pattern */
- const uint32_t P[C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10,
- 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19,
- 11, 27, 7, 23, 15, 31};
+ const uint32_t P[RTE_BBDEV_C_SUBBLOCK] = {0, 16, 8, 24, 4, 20, 12, 28,
+ 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13,
+ 29, 3, 19, 11, 27, 7, 23, 15, 31};
in_idx = 0;
out_idx = 0;
@@ -786,7 +869,7 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
}
/* Last interlaced row is different - its last byte is the only padding
- * byte. We can have from 2 up to 26 padding bytes (Nd) per sub-block.
+ * byte. We can have from 4 up to 28 padding bytes (Nd) per sub-block.
* After interlacing the 1st and 2nd parity sub-blocks we can have 0, 1
* or 2 padding bytes each time we make a step of 2 * R_SUBBLOCK bytes
* (moving to another column). 2nd parity sub-block uses the same
@@ -797,10 +880,10 @@ remove_nulls_from_circular_buf(const uint8_t *in, uint8_t *out, uint16_t k,
* 32nd (31+1) byte, then 64th etc. (step is C_SUBBLOCK == 32) and the
* last byte will be the first byte from the sub-block:
* (32 + 32 * (R_SUBBLOCK-1)) % Kw == Kw % Kw == 0. Nd can't be smaller
- * than 2 so we know that bytes with ids 0 and 1 must be the padding
- * bytes. The bytes from the 1st parity sub-block are the bytes from the
- * 31st column - Nd can't be greater than 26 so we are sure that there
- * are no padding bytes in 31st column.
+ * than 4 so we know that bytes with ids 0, 1, 2 and 3 must be the
+ * padding bytes. The bytes from the 1st parity sub-block are the bytes
+ * from the 31st column - Nd can't be greater than 28 so we are sure
+ * that there are no padding bytes in 31st column.
*/
rte_memcpy(&out[out_idx], &in[in_idx], 2 * r_subblock - 1);
}
@@ -815,14 +898,14 @@ move_padding_bytes(const uint8_t *in, uint8_t *out, uint16_t k,
rte_memcpy(&out[nd], in, d);
rte_memcpy(&out[nd + kpi + 64], &in[kpi], d);
- rte_memcpy(&out[nd + 2 * (kpi + 64)], &in[2 * kpi], d);
+ rte_memcpy(&out[(nd - 1) + 2 * (kpi + 64)], &in[2 * kpi], d);
}
static inline void
process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
uint8_t c, uint16_t k, uint16_t kw, struct rte_mbuf *m_in,
struct rte_mbuf *m_out, uint16_t in_offset, uint16_t out_offset,
- bool check_crc_24b, uint16_t total_left)
+ bool check_crc_24b, uint16_t crc24_overlap, uint16_t total_left)
{
int ret;
int32_t k_idx;
@@ -880,7 +963,7 @@ process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
adapter_resp.pharqout = q->adapter_output;
bblib_turbo_adapter_ul(&adapter_req, &adapter_resp);
- out = (uint8_t *)rte_pktmbuf_append(m_out, (k >> 3));
+ out = (uint8_t *)rte_pktmbuf_append(m_out, ((k - crc24_overlap) >> 3));
if (out == NULL) {
op->status |= 1 << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log(ERR, "Too little space in output mbuf");
@@ -898,6 +981,8 @@ process_dec_cb(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op,
turbo_req.k = k;
turbo_req.k_idx = k_idx;
turbo_req.max_iter_num = dec->iter_max;
+ turbo_req.early_term_disable = !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_EARLY_TERMINATION);
turbo_resp.ag_buf = q->ag;
turbo_resp.cb_buf = q->code_block;
turbo_resp.output = out;
@@ -920,6 +1005,7 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
{
uint8_t c, r = 0;
uint16_t kw, k = 0;
+ uint16_t crc24_overlap = 0;
struct rte_bbdev_op_turbo_dec *dec = &op->turbo_dec;
struct rte_mbuf *m_in = dec->input.data;
struct rte_mbuf *m_out = dec->hard_output.data;
@@ -943,6 +1029,10 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
c = 1;
}
+ if ((c > 1) && !check_bit(dec->op_flags,
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP))
+ crc24_overlap = 24;
+
while (total_left > 0) {
if (dec->code_block_mode == 0)
k = (r < dec->tb_params.c_neg) ?
@@ -958,21 +1048,22 @@ enqueue_dec_one_op(struct turbo_sw_queue *q, struct rte_bbdev_dec_op *op)
* where D is the size of each output from turbo encoder block
* (k + 4).
*/
- kw = RTE_ALIGN_CEIL(k + 4, C_SUBBLOCK) * 3;
+ kw = RTE_ALIGN_CEIL(k + 4, RTE_BBDEV_C_SUBBLOCK) * 3;
process_dec_cb(q, op, c, k, kw, m_in, m_out, in_offset,
out_offset, check_bit(dec->op_flags,
- RTE_BBDEV_TURBO_CRC_TYPE_24B), total_left);
- /* As a result of decoding we get Code Block with included
- * decoded CRC24 at the end of Code Block. Type of CRC24 is
- * specified by flag.
+ RTE_BBDEV_TURBO_CRC_TYPE_24B), crc24_overlap,
+ total_left);
+ /* To keep CRC24 attached to end of Code block, use
+ * RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP flag as it
+ * removed by default once verified.
*/
/* Update total_left */
total_left -= kw;
/* Update offsets for next CBs (if exist) */
in_offset += kw;
- out_offset += (k >> 3);
+ out_offset += ((k - crc24_overlap) >> 3);
r++;
}
if (total_left != 0) {
@@ -1004,7 +1095,7 @@ enqueue_enc_ops(struct rte_bbdev_queue_data *q_data,
struct turbo_sw_queue *q = queue;
uint16_t nb_enqueued = 0;
- nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops);
+ nb_enqueued = enqueue_enc_all_ops(q, ops, nb_ops, &q_data->queue_stats);
q_data->queue_stats.enqueue_err_count += nb_ops - nb_enqueued;
q_data->queue_stats.enqueued_count += nb_enqueued;
@@ -1206,10 +1297,9 @@ RTE_PMD_REGISTER_VDEV(DRIVER_NAME, bbdev_turbo_sw_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(DRIVER_NAME,
TURBO_SW_MAX_NB_QUEUES_ARG"=<int> "
TURBO_SW_SOCKET_ID_ARG"=<int>");
+RTE_PMD_REGISTER_ALIAS(DRIVER_NAME, turbo_sw);
-RTE_INIT(null_bbdev_init_log);
-static void
-null_bbdev_init_log(void)
+RTE_INIT(turbo_sw_bbdev_init_log)
{
bbdev_turbo_sw_logtype = rte_log_register("pmd.bb.turbo_sw");
if (bbdev_turbo_sw_logtype >= 0)
diff --git a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
index 58b94270..58b94270 100644
--- a/drivers/bbdev/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
+++ b/drivers/baseband/turbo_sw/rte_pmd_bbdev_turbo_sw_version.map
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 7ef25939..cea3b55e 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -4,8 +4,12 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += dpaa
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga
DIRS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += pci
DIRS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += vdev
+DIRS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
index bda62e01..bdb70042 100644
--- a/drivers/bus/dpaa/base/fman/fman.c
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -300,7 +300,7 @@ fman_if_init(const struct device_node *dpa_node)
_errno = fman_get_mac_index(regs_addr_host, &__if->__if.mac_idx);
if (_errno) {
- FMAN_ERR(-EINVAL, "Invalid register address: %lu",
+ FMAN_ERR(-EINVAL, "Invalid register address: %" PRIx64,
regs_addr_host);
goto err;
}
@@ -442,6 +442,7 @@ fman_if_init(const struct device_node *dpa_node)
if (!pool_node) {
FMAN_ERR(-ENXIO, "%s: bad fsl,bman-buffer-pools\n",
dname);
+ free(bpool);
goto err;
}
pname = pool_node->full_name;
@@ -449,6 +450,7 @@ fman_if_init(const struct device_node *dpa_node)
prop = of_get_property(pool_node, "fsl,bpid", &proplen);
if (!prop) {
FMAN_ERR(-EINVAL, "%s: no fsl,bpid\n", pname);
+ free(bpool);
goto err;
}
assert(proplen == sizeof(*prop));
@@ -502,7 +504,7 @@ fman_if_init(const struct device_node *dpa_node)
/* Parsing of the network interface is complete, add it to the list */
DPAA_BUS_LOG(DEBUG, "Found %s, Tx Channel = %x, FMAN = %x,"
- "Port ID = %x\n",
+ "Port ID = %x",
dname, __if->__if.tx_channel_id, __if->__if.fman_idx,
__if->__if.mac_idx);
diff --git a/drivers/bus/dpaa/base/fman/fman_hw.c b/drivers/bus/dpaa/base/fman/fman_hw.c
index 0148b98e..4ebbc3d3 100644
--- a/drivers/bus/dpaa/base/fman/fman_hw.c
+++ b/drivers/bus/dpaa/base/fman/fman_hw.c
@@ -16,6 +16,9 @@
#include <fsl_fman_crc64.h>
#include <fsl_bman.h>
+#define FMAN_SP_SG_DISABLE 0x80000000
+#define FMAN_SP_EXT_BUF_MARG_START_SHIFT 16
+
/* Instantiate the global variable that the inline CRC64 implementation (in
* <fsl_fman.h>) depends on.
*/
@@ -422,20 +425,16 @@ fman_if_set_fc_quanta(struct fman_if *fm_if, u16 pause_quanta)
int
fman_if_get_fdoff(struct fman_if *fm_if)
{
- u32 fmbm_ricp;
+ u32 fmbm_rebm;
int fdoff;
- int iceof_mask = 0x001f0000;
- int icsz_mask = 0x0000001f;
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
assert(fman_ccsr_map_fd != -1);
- fmbm_ricp =
- in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_ricp);
- /*iceof + icsz*/
- fdoff = ((fmbm_ricp & iceof_mask) >> 16) * 16 +
- (fmbm_ricp & icsz_mask) * 16;
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ fdoff = (fmbm_rebm >> FMAN_SP_EXT_BUF_MARG_START_SHIFT) & 0x1ff;
return fdoff;
}
@@ -502,12 +501,16 @@ fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset)
{
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
unsigned int *fmbm_rebm;
+ int val = 0;
+ int fmbm_mask = 0x01ff0000;
+
+ val = fd_offset << FMAN_SP_EXT_BUF_MARG_START_SHIFT;
assert(fman_ccsr_map_fd != -1);
fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
- out_be32(fmbm_rebm, in_be32(fmbm_rebm) | (fd_offset << 16));
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
}
void
@@ -536,6 +539,47 @@ fman_if_get_maxfrm(struct fman_if *fm_if)
return (in_be32(reg_maxfrm) | 0x0000FFFF);
}
+/* MSB in fmbm_rebm register
+ * 0 - If BMI cannot store the frame in a single buffer it may select a buffer
+ * of smaller size and store the frame in scatter gather (S/G) buffers
+ * 1 - Scatter gather format is not enabled for frame storage. If BMI cannot
+ * store the frame in a single buffer, the frame is discarded.
+ */
+
+int
+fman_if_get_sg_enable(struct fman_if *fm_if)
+{
+ u32 fmbm_rebm;
+
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = in_be32(&((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm);
+
+ return (fmbm_rebm & FMAN_SP_SG_DISABLE) ? 0 : 1;
+}
+
+void
+fman_if_set_sg(struct fman_if *fm_if, int enable)
+{
+ struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
+ unsigned int *fmbm_rebm;
+ int val;
+ int fmbm_mask = FMAN_SP_SG_DISABLE;
+
+ if (enable)
+ val = 0;
+ else
+ val = FMAN_SP_SG_DISABLE;
+
+ assert(fman_ccsr_map_fd != -1);
+
+ fmbm_rebm = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rebm;
+
+ out_be32(fmbm_rebm, (in_be32(fmbm_rebm) & ~fmbm_mask) | val);
+}
+
void
fman_if_set_dnia(struct fman_if *fm_if, uint32_t nia)
{
diff --git a/drivers/bus/dpaa/base/fman/netcfg_layer.c b/drivers/bus/dpaa/base/fman/netcfg_layer.c
index 3e956ce1..031c6f1a 100644
--- a/drivers/bus/dpaa/base/fman/netcfg_layer.c
+++ b/drivers/bus/dpaa/base/fman/netcfg_layer.c
@@ -18,11 +18,6 @@
#include <rte_dpaa_logs.h>
#include <netcfg.h>
-/* Structure contains information about all the interfaces given by user
- * on command line.
- */
-struct netcfg_interface *netcfg_interface;
-
/* This data structure contaings all configurations information
* related to usages of DPA devices.
*/
diff --git a/drivers/bus/dpaa/base/fman/of.c b/drivers/bus/dpaa/base/fman/of.c
index 1b2dbe26..a7f3174e 100644
--- a/drivers/bus/dpaa/base/fman/of.c
+++ b/drivers/bus/dpaa/base/fman/of.c
@@ -182,6 +182,11 @@ linear_dir(struct dt_dir *d)
DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
d->node.node.full_name);
d->lphandle = f;
+ } else if (!strcmp(f->node.node.name, "phandle")) {
+ if (d->lphandle)
+ DPAA_BUS_LOG(DEBUG, "Duplicate lphandle in %s",
+ d->node.node.full_name);
+ d->lphandle = f;
} else if (!strcmp(f->node.node.name, "#address-cells")) {
if (d->a_cells)
DPAA_BUS_LOG(DEBUG, "Duplicate a_cells in %s",
@@ -541,3 +546,42 @@ of_device_is_compatible(const struct device_node *dev_node,
return true;
return false;
}
+
+static const void *of_get_mac_addr(const struct device_node *np,
+ const char *name)
+{
+ return of_get_property(np, name, NULL);
+}
+
+/**
+ * Search the device tree for the best MAC address to use. 'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the device tree, but were not set by U-Boot. For example, the
+ * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
+ * addresses. Some older U-Boots only initialized 'local-mac-address'. In
+ * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
+ * but is all zeros.
+ */
+const void *of_get_mac_address(const struct device_node *np)
+{
+ const void *addr;
+
+ addr = of_get_mac_addr(np, "mac-address");
+ if (addr)
+ return addr;
+
+ addr = of_get_mac_addr(np, "local-mac-address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr(np, "address");
+}
diff --git a/drivers/bus/dpaa/base/qbman/bman_driver.c b/drivers/bus/dpaa/base/qbman/bman_driver.c
index a1ef3921..b14b5905 100644
--- a/drivers/bus/dpaa/base/qbman/bman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/bman_driver.c
@@ -15,9 +15,9 @@
/*
* Global variables of the max portal/pool number this bman version supported
*/
-u16 bman_ip_rev;
+static u16 bman_ip_rev;
u16 bman_pool_max;
-void *bman_ccsr_map;
+static void *bman_ccsr_map;
/*****************/
/* Portal driver */
@@ -161,7 +161,7 @@ int bman_init_ccsr(const struct device_node *node)
PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
if (bman_ccsr_map == MAP_FAILED) {
pr_err("Can not map BMan CCSR base Bman: "
- "0x%x Phys: 0x%lx size 0x%lx",
+ "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
*bman_addr, phys_addr, regs_size);
return -EINVAL;
}
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index 2b97671b..7c17027f 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -314,9 +314,9 @@ loop:
if (!msg)
return 0;
}
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
/* We aren't draining anything but FQRNIs */
- pr_err("Found verb 0x%x in MR\n", msg->verb);
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
return -1;
}
qm_mr_next(p);
@@ -483,7 +483,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
/* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT;
@@ -625,7 +625,7 @@ fail_eqcr:
#define MAX_GLOBAL_PORTALS 8
static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
-rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
+static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
static struct qman_portal *
qman_alloc_global_portal(void)
@@ -832,7 +832,7 @@ mr_loop:
goto mr_done;
swapped_msg = *msg;
hw_fd_to_cpu(&swapped_msg.ern.fd);
- verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
/* The message is a software ERN iff the 0x20 bit is set */
if (verb & 0x20) {
switch (verb) {
@@ -1058,7 +1058,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
struct qm_portal *portal = &p->p;
register struct qm_dqrr *dqrr = &portal->dqrr;
struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
- struct qman_fq *fq[QM_DQRR_SIZE];
+ struct qman_fq *fq;
unsigned int limit = 0, rx_number = 0;
uint32_t consume = 0;
@@ -1087,18 +1087,18 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
shadow[rx_number]->fd.opaque =
be32_to_cpu(dq[rx_number]->fd.opaque);
#else
- shadow = dq;
+ shadow[rx_number] = dq[rx_number];
#endif
/* SDQCR: context_b points to the FQ */
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- fq[rx_number] = qman_fq_lookup_table[be32_to_cpu(
- dq[rx_number]->contextB)];
+ fq = qman_fq_lookup_table[be32_to_cpu(dq[rx_number]->contextB)];
#else
- fq[rx_number] = (void *)(uintptr_t)be32_to_cpu(dq->contextB);
+ fq = (void *)be32_to_cpu(dq[rx_number]->contextB);
#endif
- fq[rx_number]->cb.dqrr_prepare(shadow[rx_number],
- &bufs[rx_number]);
+ if (fq->cb.dqrr_prepare)
+ fq->cb.dqrr_prepare(shadow[rx_number],
+ &bufs[rx_number]);
consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
rx_number++;
@@ -1106,7 +1106,7 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
} while (++limit < poll_limit);
if (rx_number)
- fq[0]->cb.dqrr_dpdk_pull_cb(fq, shadow, bufs, rx_number);
+ fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
/* Consume all the DQRR enries together */
qm_out(DQRR_DCAP, (1 << 8) | consume);
@@ -1665,7 +1665,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
*/
struct qm_mr_entry msg;
- msg.verb = QM_MR_VERB_FQRNI;
+ msg.ern.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
msg.fq.fqid = fq->fqid;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
@@ -2002,13 +2002,13 @@ int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
return 0;
}
-int qman_set_vdq(struct qman_fq *fq, u16 num)
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
{
struct qman_portal *p = get_affine_portal();
uint32_t vdqcr;
int ret = -EBUSY;
- vdqcr = QM_VDQCR_EXACT;
+ vdqcr = vdqcr_flags;
vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
if ((fq->state != qman_fq_state_parked) &&
@@ -2642,7 +2642,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb &
+ if ((msg->ern.verb &
QM_MR_VERB_TYPE_MASK)
== QM_MR_VERB_FQRN)
found_fqrn = 1;
@@ -2710,7 +2710,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
QM_MR_VERB_FQRL)
orl_empty = 1;
qm_mr_next(low_p);
diff --git a/drivers/bus/dpaa/base/qbman/qman_driver.c b/drivers/bus/dpaa/base/qbman/qman_driver.c
index 7cfa8ee4..f6ecd6b2 100644
--- a/drivers/bus/dpaa/base/qbman/qman_driver.c
+++ b/drivers/bus/dpaa/base/qbman/qman_driver.c
@@ -20,9 +20,9 @@ u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
u16 qm_channel_pme = QMAN_CHANNEL_PME;
/* Ccsr map address to access ccsrbased register */
-void *qman_ccsr_map;
+static void *qman_ccsr_map;
/* The qman clock frequency */
-u32 qman_clk;
+static u32 qman_clk;
static __thread int qmfd = -1;
static __thread struct qm_portal_config qpcfg;
@@ -160,6 +160,7 @@ struct qman_portal *fsl_qman_portal_create(void)
&cpuset);
if (ret) {
error(0, ret, "pthread_getaffinity_np()");
+ kfree(q_pcfg);
return NULL;
}
@@ -168,12 +169,14 @@ struct qman_portal *fsl_qman_portal_create(void)
if (CPU_ISSET(loop, &cpuset)) {
if (q_pcfg->cpu != -1) {
pr_err("Thread is not affine to 1 cpu\n");
+ kfree(q_pcfg);
return NULL;
}
q_pcfg->cpu = loop;
}
if (q_pcfg->cpu == -1) {
pr_err("Bug in getaffinity handling!\n");
+ kfree(q_pcfg);
return NULL;
}
@@ -183,6 +186,7 @@ struct qman_portal *fsl_qman_portal_create(void)
ret = process_portal_map(&q_map);
if (ret) {
error(0, ret, "process_portal_map()");
+ kfree(q_pcfg);
return NULL;
}
q_pcfg->channel = q_map.channel;
@@ -217,6 +221,7 @@ err2:
close(q_fd);
err1:
process_portal_unmap(&q_map.addr);
+ kfree(q_pcfg);
return NULL;
}
@@ -246,7 +251,6 @@ int fsl_qman_portal_destroy(struct qman_portal *qp)
int qman_global_init(void)
{
const struct device_node *dt_node;
- int ret = 0;
size_t lenp;
const u32 *chanid;
static int ccsr_map_fd;
@@ -352,9 +356,7 @@ int qman_global_init(void)
qman_clk = be32_to_cpu(*clk);
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
- ret = qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
- if (ret)
- return ret;
+ return qman_setup_fq_lookup_table(CONFIG_FSL_QMAN_FQ_LOOKUP_MAX);
#endif
return 0;
}
diff --git a/drivers/bus/dpaa/base/qbman/qman_priv.h b/drivers/bus/dpaa/base/qbman/qman_priv.h
index 9e4471e6..02f6301f 100644
--- a/drivers/bus/dpaa/base/qbman/qman_priv.h
+++ b/drivers/bus/dpaa/base/qbman/qman_priv.h
@@ -139,7 +139,6 @@ struct qm_portal_config {
#define QMAN_REV31 0x0301
#define QMAN_REV32 0x0302
extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
-extern u32 qman_clk;
int qm_set_wpm(int wpm);
int qm_get_wpm(int *wpm);
diff --git a/drivers/bus/dpaa/dpaa_bus.c b/drivers/bus/dpaa/dpaa_bus.c
index f2bb3b15..16fabd1b 100644
--- a/drivers/bus/dpaa/dpaa_bus.c
+++ b/drivers/bus/dpaa/dpaa_bus.c
@@ -19,7 +19,6 @@
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
@@ -51,10 +50,12 @@ struct rte_dpaa_bus rte_dpaa_bus;
struct netcfg_info *dpaa_netcfg;
/* define a variable to hold the portal_key, once created.*/
-pthread_key_t dpaa_portal_key;
+static pthread_key_t dpaa_portal_key;
unsigned int dpaa_svr_family;
+#define FSL_DPAA_BUS_NAME dpaa_bus
+
RTE_DEFINE_PER_LCORE(bool, dpaa_io);
RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
@@ -130,6 +131,22 @@ dpaa_sec_available(void)
static void dpaa_clean_device_list(void);
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
static int
dpaa_create_device_list(void)
{
@@ -161,8 +178,9 @@ dpaa_create_device_list(void)
memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
fman_intf->mac_idx);
- DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
dpaa_add_to_device_list(dev);
}
@@ -198,7 +216,9 @@ dpaa_create_device_list(void)
*/
memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
sprintf(dev->name, "dpaa-sec%d", i);
- DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
dpaa_add_to_device_list(dev);
}
@@ -235,7 +255,7 @@ int rte_dpaa_portal_init(void *arg)
BUS_INIT_FUNC_TRACE();
- if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
cpu = rte_get_master_lcore();
/* if the core id is not supported */
else
@@ -309,9 +329,15 @@ rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
/* Affine above created portal with channel*/
u32 sdqcr;
struct qman_portal *qp;
+ int ret;
- if (unlikely(!RTE_PER_LCORE(dpaa_io)))
- rte_dpaa_portal_init(arg);
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
/* Initialise qman specific portals */
qp = fsl_qman_portal_create();
@@ -352,6 +378,51 @@ dpaa_portal_finish(void *arg)
RTE_PER_LCORE(dpaa_io) = false;
}
+static int
+rte_dpaa_bus_parse(const char *name, void *out_name)
+{
+ int i, j;
+ int max_fman = 2, max_macs = 16;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
+ strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ sep = (char *) (sep + 1);
+
+ for (i = 0; i < max_fman; i++) {
+ for (j = 0; j < max_macs; j++) {
+ char fm_name[16];
+ snprintf(fm_name, 16, "fm%d-mac%d", i, j);
+ if (strcmp(fm_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+ }
+
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ char sec_name[16];
+
+ snprintf(sec_name, 16, "dpaa-sec%d", i);
+ if (strcmp(sec_name, sep) == 0) {
+ if (out_name)
+ strcpy(out_name, sep);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
#define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
@@ -390,14 +461,11 @@ rte_dpaa_bus_scan(void)
return 0;
}
- DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
- dpaa_netcfg, dpaa_netcfg->num_ethports);
-
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
dump_netcfg(dpaa_netcfg);
#endif
- DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
+ DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
dpaa_netcfg->num_ethports);
ret = dpaa_create_device_list();
if (ret) {
@@ -415,9 +483,6 @@ rte_dpaa_bus_scan(void)
return ret;
}
- DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
- (unsigned int)dpaa_portal_key, ret);
-
return 0;
}
@@ -453,22 +518,15 @@ static int
rte_dpaa_device_match(struct rte_dpaa_driver *drv,
struct rte_dpaa_device *dev)
{
- int ret = -1;
-
- BUS_INIT_FUNC_TRACE();
-
if (!drv || !dev) {
DPAA_BUS_DEBUG("Invalid drv or dev received.");
- return ret;
+ return -1;
}
- if (drv->drv_type == dev->device_type) {
- DPAA_BUS_INFO("Device: %s matches for driver: %s",
- dev->name, drv->driver.name);
- ret = 0; /* Found a match */
- }
+ if (drv->drv_type == dev->device_type)
+ return 0;
- return ret;
+ return -1;
}
static int
@@ -479,8 +537,14 @@ rte_dpaa_bus_probe(void)
struct rte_dpaa_driver *drv;
FILE *svr_file = NULL;
unsigned int svr_ver;
+ int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
- BUS_INIT_FUNC_TRACE();
+ svr_file = fopen(DPAA_SOC_ID_FILE, "r");
+ if (svr_file) {
+ if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
+ dpaa_svr_family = svr_ver & SVR_MASK;
+ fclose(svr_file);
+ }
/* For each registered driver, and device, call the driver->probe */
TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
@@ -489,13 +553,19 @@ rte_dpaa_bus_probe(void)
if (ret)
continue;
- if (!drv->probe)
+ if (!drv->probe ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
continue;
- ret = drv->probe(drv, dev);
- if (ret)
- DPAA_BUS_ERR("Unable to probe.\n");
-
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA_BUS_ERR("Unable to probe.\n");
+ }
break;
}
}
@@ -506,13 +576,6 @@ rte_dpaa_bus_probe(void)
if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
- svr_file = fopen(DPAA_SOC_ID_FILE, "r");
- if (svr_file) {
- if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
- dpaa_svr_family = svr_ver & SVR_MASK;
- fclose(svr_file);
- }
-
return 0;
}
@@ -552,6 +615,7 @@ struct rte_dpaa_bus rte_dpaa_bus = {
.bus = {
.scan = rte_dpaa_bus_scan,
.probe = rte_dpaa_bus_probe,
+ .parse = rte_dpaa_bus_parse,
.find_device = rte_dpaa_find_device,
.get_iommu_class = rte_dpaa_get_iommu_class,
},
@@ -562,9 +626,7 @@ struct rte_dpaa_bus rte_dpaa_bus = {
RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
-RTE_INIT(dpaa_init_log);
-static void
-dpaa_init_log(void)
+RTE_INIT(dpaa_init_log)
{
dpaa_logtype_bus = rte_log_register("bus.dpaa");
if (dpaa_logtype_bus >= 0)
@@ -574,11 +636,11 @@ dpaa_init_log(void)
if (dpaa_logtype_mempool >= 0)
rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
- dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
+ dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
if (dpaa_logtype_pmd >= 0)
rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
- dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
+ dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
if (dpaa_logtype_eventdev >= 0)
rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
}
diff --git a/drivers/bus/dpaa/include/compat.h b/drivers/bus/dpaa/include/compat.h
index 53707bb4..92241d23 100644
--- a/drivers/bus/dpaa/include/compat.h
+++ b/drivers/bus/dpaa/include/compat.h
@@ -39,6 +39,7 @@
#include <rte_spinlock.h>
#include <rte_common.h>
#include <rte_debug.h>
+#include <rte_cycles.h>
/* The following definitions are primarily to allow the single-source driver
* interfaces to be included by arbitrary program code. Ie. for interfaces that
@@ -47,9 +48,15 @@
*/
/* Required compiler attributes */
+#ifndef __maybe_unused
#define __maybe_unused __rte_unused
+#endif
+#ifndef __always_unused
#define __always_unused __rte_unused
+#endif
+#ifndef __packed
#define __packed __rte_packed
+#endif
#define noinline __attribute__((noinline))
#define L1_CACHE_BYTES 64
@@ -127,13 +134,15 @@ static inline void out_be32(volatile void *__p, u32 val)
*p = rte_cpu_to_be_32(val);
}
+#define hwsync() rte_rmb()
+#define lwsync() rte_wmb()
+
#define dcbt_ro(p) __builtin_prefetch(p, 0)
#define dcbt_rw(p) __builtin_prefetch(p, 1)
+#if defined(RTE_ARCH_ARM64)
#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
#define dcbz_64(p) dcbz(p)
-#define hwsync() rte_rmb()
-#define lwsync() rte_wmb()
#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
#define dcbf_64(p) dcbf(p)
#define dccivac(p) { asm volatile("dc civac, %0" : : "r"(p) : "memory"); }
@@ -144,9 +153,27 @@ static inline void out_be32(volatile void *__p, u32 val)
asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); \
} while (0)
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset((p), 0, 32)
+#define dcbz_64(p) memset((p), 0, 64)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define dcbz_64(p) dcbz(p)
+#define dcbf(p) RTE_SET_USED(p)
+#define dcbf_64(p) dcbf(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define dcbit_ro(p) RTE_SET_USED(p)
+#endif
+
#define barrier() { asm volatile ("" : : : "memory"); }
#define cpu_relax barrier
+#if defined(RTE_ARCH_ARM64)
static inline uint64_t mfatb(void)
{
uint64_t ret, ret_new, timeout = 200;
@@ -160,6 +187,11 @@ static inline uint64_t mfatb(void)
DPAA_BUG_ON(!timeout && (ret != ret_new));
return ret * 64;
}
+#else
+
+#define mfatb rte_rdtsc
+
+#endif
/* Spin for a few cycles without bothering the bus */
static inline void cpu_spin(int cycles)
diff --git a/drivers/bus/dpaa/include/fsl_fman.h b/drivers/bus/dpaa/include/fsl_fman.h
index c0ef1bff..1d1ce867 100644
--- a/drivers/bus/dpaa/include/fsl_fman.h
+++ b/drivers/bus/dpaa/include/fsl_fman.h
@@ -108,6 +108,12 @@ int fman_if_get_fdoff(struct fman_if *fm_if);
/* Set interface fd->offset value */
void fman_if_set_fdoff(struct fman_if *fm_if, uint32_t fd_offset);
+/* Get interface SG enable status value */
+int fman_if_get_sg_enable(struct fman_if *fm_if);
+
+/* Set interface SG support mode */
+void fman_if_set_sg(struct fman_if *fm_if, int enable);
+
/* Get interface Max Frame length (MTU) */
uint16_t fman_if_get_maxfrm(struct fman_if *fm_if);
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index e9793f30..b18cf037 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -284,20 +284,20 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
} while (0)
/* See 1.5.8.1: "Enqueue Command" */
-struct qm_eqcr_entry {
+struct __rte_aligned(8) qm_eqcr_entry {
u8 __dont_write_directly__verb;
u8 dca;
u16 seqnum;
u32 orp; /* 24-bit */
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved3[32];
} __packed;
/* "Frame Dequeue Response" */
-struct qm_dqrr_entry {
+struct __rte_aligned(8) qm_dqrr_entry {
u8 verb;
u8 stat;
u16 seqnum; /* 15-bit */
@@ -305,7 +305,7 @@ struct qm_dqrr_entry {
u8 __reserved2[3];
u32 fqid; /* 24-bit */
u32 contextB;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved4[32];
};
@@ -323,18 +323,19 @@ struct qm_dqrr_entry {
/* "ERN Message Response" */
/* "FQ State Change Notification" */
struct qm_mr_entry {
- u8 verb;
union {
struct {
+ u8 verb;
u8 dca;
u16 seqnum;
u8 rc; /* Rejection Code */
u32 orp:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed ern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
struct {
+ u8 verb;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
u8 __reserved1:4;
@@ -349,18 +350,19 @@ struct qm_mr_entry {
u32 __reserved3:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed dcern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
struct {
+ u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
u32 fqid; /* 24-bit */
u32 contextB;
u8 __reserved2[16];
- } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
};
u8 __reserved2[32];
-} __packed;
+} __packed __rte_aligned(8);
#define QM_MR_VERB_VBIT 0x80
/*
* ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
@@ -1330,10 +1332,11 @@ unsigned int qman_portal_poll_rx(unsigned int poll_limit,
* qman_set_vdq - Issue a volatile dequeue command
* @fq: Frame Queue on which the volatile dequeue command is issued
* @num: Number of Frames requested for volatile dequeue
+ * @vdqcr_flags: QM_VDQCR_EXACT flag to for VDQCR command
*
* This function will issue a volatile dequeue command to the QMAN.
*/
-int qman_set_vdq(struct qman_fq *fq, u16 num);
+int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags);
/**
* qman_dequeue - Get the DQRR entry after volatile dequeue command
diff --git a/drivers/bus/dpaa/include/of.h b/drivers/bus/dpaa/include/of.h
index 151be5a3..7ea7608f 100644
--- a/drivers/bus/dpaa/include/of.h
+++ b/drivers/bus/dpaa/include/of.h
@@ -109,6 +109,8 @@ const struct device_node *of_get_parent(const struct device_node *dev_node);
const struct device_node *of_get_next_child(const struct device_node *dev_node,
const struct device_node *prev);
+const void *of_get_mac_address(const struct device_node *np);
+
#define for_each_child_node(parent, child) \
for (child = of_get_next_child(parent, NULL); child != NULL; \
child = of_get_next_child(parent, child))
diff --git a/drivers/bus/dpaa/meson.build b/drivers/bus/dpaa/meson.build
new file mode 100644
index 00000000..d10b62c0
--- /dev/null
+++ b/drivers/bus/dpaa/meson.build
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev']
+sources = files('base/fman/fman.c',
+ 'base/fman/fman_hw.c',
+ 'base/fman/netcfg_layer.c',
+ 'base/fman/of.c',
+ 'base/qbman/bman.c',
+ 'base/qbman/bman_driver.c',
+ 'base/qbman/dpaa_alloc.c',
+ 'base/qbman/dpaa_sys.c',
+ 'base/qbman/process.c',
+ 'base/qbman/qman.c',
+ 'base/qbman/qman_driver.c',
+ 'dpaa_bus.c')
+
+allow_experimental_apis = true
+
+if cc.has_argument('-Wno-cast-qual')
+ cflags += '-Wno-cast-qual'
+endif
+
+includes += include_directories('include', 'base/qbman')
+cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/dpaa/rte_bus_dpaa_version.map b/drivers/bus/dpaa/rte_bus_dpaa_version.map
index 8d902854..7d6d6243 100644
--- a/drivers/bus/dpaa/rte_bus_dpaa_version.map
+++ b/drivers/bus/dpaa/rte_bus_dpaa_version.map
@@ -92,3 +92,13 @@ DPDK_18.02 {
local: *;
} DPDK_17.11;
+
+DPDK_18.08 {
+ global:
+
+ fman_if_get_sg_enable;
+ fman_if_set_sg;
+ of_get_mac_address;
+
+ local: *;
+} DPDK_18.02;
diff --git a/drivers/bus/dpaa/rte_dpaa_bus.h b/drivers/bus/dpaa/rte_dpaa_bus.h
index 718701b1..15dc6a4a 100644
--- a/drivers/bus/dpaa/rte_dpaa_bus.h
+++ b/drivers/bus/dpaa/rte_dpaa_bus.h
@@ -15,8 +15,6 @@
#include <of.h>
#include <netcfg.h>
-#define FSL_DPAA_BUS_NAME "FSL_DPAA_BUS"
-
#define DPAA_MEMPOOL_OPS_NAME "dpaa"
#define DEV_TO_DPAA_DEVICE(ptr) \
@@ -95,20 +93,35 @@ struct dpaa_portal {
uint64_t tid;/**< Parent Thread id for this portal */
};
-/* TODO - this is costly, need to write a fast coversion routine */
+/* Various structures representing contiguous memory maps */
+struct dpaa_memseg {
+ TAILQ_ENTRY(dpaa_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa_memseg_list, dpaa_memseg);
+extern struct dpaa_memseg_list rte_dpaa_memsegs;
+
+/* Either iterate over the list of internal memseg references or fallback to
+ * EAL memseg based iova2virt.
+ */
static inline void *rte_dpaa_mem_ptov(phys_addr_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr != NULL; i++) {
- if (paddr >= memseg[i].iova && paddr <
- memseg[i].iova + memseg[i].len)
- return (uint8_t *)(memseg[i].addr) +
- (paddr - memseg[i].iova);
+ struct dpaa_memseg *ms;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
}
- return NULL;
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
}
/**
@@ -151,8 +164,7 @@ void dpaa_portal_finish(void *arg);
/** Helper for DPAA device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_DPAA(nm, dpaa_drv) \
-RTE_INIT(dpaainitfn_ ##nm); \
-static void dpaainitfn_ ##nm(void) \
+RTE_INIT(dpaainitfn_ ##nm) \
{\
(dpaa_drv).driver.name = RTE_STR(nm);\
rte_dpaa_driver_register(&dpaa_drv); \
diff --git a/drivers/bus/dpaa/rte_dpaa_logs.h b/drivers/bus/dpaa/rte_dpaa_logs.h
index 0fd70bbf..e4143543 100644
--- a/drivers/bus/dpaa/rte_dpaa_logs.h
+++ b/drivers/bus/dpaa/rte_dpaa_logs.h
@@ -15,10 +15,7 @@ extern int dpaa_logtype_pmd;
extern int dpaa_logtype_eventdev;
#define DPAA_BUS_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "%s(): " fmt "\n", \
- __func__, ##args)
-
-#define BUS_INIT_FUNC_TRACE() DPAA_BUS_LOG(DEBUG, " >>")
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_bus, "dpaa: " fmt "\n", ##args)
#ifdef RTE_LIBRTE_DPAA_DEBUG_BUS
#define DPAA_BUS_HWWARN(cond, fmt, args...) \
@@ -31,7 +28,11 @@ extern int dpaa_logtype_eventdev;
#endif
#define DPAA_BUS_DEBUG(fmt, args...) \
- DPAA_BUS_LOG(DEBUG, fmt, ## args)
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_bus, "dpaa: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA_BUS_DEBUG(" >>")
+
#define DPAA_BUS_INFO(fmt, args...) \
DPAA_BUS_LOG(INFO, fmt, ## args)
#define DPAA_BUS_ERR(fmt, args...) \
diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index de237f0f..515d0f53 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -9,23 +9,13 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_bus_fslmc.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
-endif
-
CFLAGS += -DALLOW_EXPERIMENTAL_API
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
-CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev
@@ -42,11 +32,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += \
mc/dpmng.c \
- mc/dpbp.c \
- mc/dpio.c \
- mc/mc_sys.c \
+ mc/dpbp.c \
+ mc/dpio.c \
+ mc/mc_sys.c \
mc/dpcon.c \
- mc/dpci.c
+ mc/dpci.c \
+ mc/dpdmai.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpio.c
SRCS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += portal/dpaa2_hw_dpbp.c
diff --git a/drivers/bus/fslmc/fslmc_bus.c b/drivers/bus/fslmc/fslmc_bus.c
index 5ee0beb8..d2900edc 100644
--- a/drivers/bus/fslmc/fslmc_bus.c
+++ b/drivers/bus/fslmc/fslmc_bus.c
@@ -18,11 +18,12 @@
#include <rte_fslmc.h>
#include <fslmc_vfio.h>
+#include "fslmc_logs.h"
-#define FSLMC_BUS_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, fmt "\n", ##args)
+int dpaa2_logtype_bus;
#define VFIO_IOMMU_GROUP_PATH "/sys/kernel/iommu_groups"
+#define FSLMC_BUS_NAME fslmc
struct rte_fslmc_bus rte_fslmc_bus;
uint8_t dpaa2_virt_mode;
@@ -93,6 +94,41 @@ insert_in_device_list(struct rte_dpaa2_device *newdev)
TAILQ_INSERT_TAIL(&rte_fslmc_bus.device_list, newdev, next);
}
+static struct rte_devargs *
+fslmc_devargs_lookup(struct rte_dpaa2_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("fslmc", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA2_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static void
+dump_device_list(void)
+{
+ struct rte_dpaa2_device *dev;
+ uint32_t global_log_level;
+ int local_log_level;
+
+ /* Only if the log level has been set to Debugging, print list */
+ global_log_level = rte_log_get_global_level();
+ local_log_level = rte_log_get_level(dpaa2_logtype_bus);
+ if (global_log_level == RTE_LOG_DEBUG ||
+ local_log_level == RTE_LOG_DEBUG) {
+ DPAA2_BUS_LOG(DEBUG, "List of devices scanned on bus:");
+ TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
+ DPAA2_BUS_LOG(DEBUG, "\t\t%s", dev->device.name);
+ }
+ }
+}
+
static int
scan_one_fslmc_device(char *dev_name)
{
@@ -109,7 +145,7 @@ scan_one_fslmc_device(char *dev_name)
/* Creating a temporary copy to perform cut-parse over string */
dup_dev_name = strdup(dev_name);
if (!dup_dev_name) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to allocate device name memory");
return -ENOMEM;
}
@@ -120,7 +156,7 @@ scan_one_fslmc_device(char *dev_name)
*/
dev = calloc(1, sizeof(struct rte_dpaa2_device));
if (!dev) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to allocate device object");
free(dup_dev_name);
return -ENOMEM;
}
@@ -128,7 +164,7 @@ scan_one_fslmc_device(char *dev_name)
/* Parse the device name and ID */
t_ptr = strtok(dup_dev_name, ".");
if (!t_ptr) {
- FSLMC_BUS_LOG(ERR, "Incorrect device string observed.");
+ DPAA2_BUS_ERR("Incorrect device name observed");
goto cleanup;
}
if (!strncmp("dpni", t_ptr, 4))
@@ -145,6 +181,8 @@ scan_one_fslmc_device(char *dev_name)
dev->dev_type = DPAA2_CI;
else if (!strncmp("dpmcp", t_ptr, 5))
dev->dev_type = DPAA2_MPORTAL;
+ else if (!strncmp("dpdmai", t_ptr, 6))
+ dev->dev_type = DPAA2_QDMA;
else
dev->dev_type = DPAA2_UNKNOWN;
@@ -153,17 +191,17 @@ scan_one_fslmc_device(char *dev_name)
t_ptr = strtok(NULL, ".");
if (!t_ptr) {
- FSLMC_BUS_LOG(ERR, "Incorrect device string observed (%s).",
- t_ptr);
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
goto cleanup;
}
sscanf(t_ptr, "%hu", &dev->object_id);
dev->device.name = strdup(dev_name);
if (!dev->device.name) {
- FSLMC_BUS_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Unable to clone device name. Out of memory");
goto cleanup;
}
+ dev->device.devargs = fslmc_devargs_lookup(dev);
/* Add device in the fslmc device list */
insert_in_device_list(dev);
@@ -182,6 +220,54 @@ cleanup:
}
static int
+rte_fslmc_parse(const char *name, void *addr)
+{
+ uint16_t dev_id;
+ char *t_ptr;
+ char *sep = strchr(name, ':');
+
+ if (strncmp(name, RTE_STR(FSLMC_BUS_NAME),
+ strlen(RTE_STR(FSLMC_BUS_NAME)))) {
+ return -EINVAL;
+ }
+
+ if (!sep) {
+ DPAA2_BUS_ERR("Incorrect device name observed");
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(sep + 1);
+
+ if (strncmp("dpni", t_ptr, 4) &&
+ strncmp("dpseci", t_ptr, 6) &&
+ strncmp("dpcon", t_ptr, 5) &&
+ strncmp("dpbp", t_ptr, 4) &&
+ strncmp("dpio", t_ptr, 4) &&
+ strncmp("dpci", t_ptr, 4) &&
+ strncmp("dpmcp", t_ptr, 5) &&
+ strncmp("dpdmai", t_ptr, 6)) {
+ DPAA2_BUS_ERR("Unknown or unsupported device");
+ return -EINVAL;
+ }
+
+ t_ptr = strchr(name, '.');
+ if (!t_ptr) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ t_ptr = (char *)(t_ptr + 1);
+ if (sscanf(t_ptr, "%hu", &dev_id) <= 0) {
+ DPAA2_BUS_ERR("Incorrect device string observed (%s)", t_ptr);
+ return -EINVAL;
+ }
+
+ if (addr)
+ strcpy(addr, (char *)(sep + 1));
+ return 0;
+}
+
+static int
rte_fslmc_scan(void)
{
int ret;
@@ -193,8 +279,7 @@ rte_fslmc_scan(void)
int groupid;
if (process_once) {
- FSLMC_BUS_LOG(DEBUG,
- "Fslmc bus already scanned. Not rescanning");
+ DPAA2_BUS_DEBUG("Fslmc bus already scanned. Not rescanning");
return 0;
}
process_once = 1;
@@ -208,7 +293,7 @@ rte_fslmc_scan(void)
groupid);
dir = opendir(fslmc_dirpath);
if (!dir) {
- FSLMC_BUS_LOG(ERR, "Unable to open VFIO group dir.");
+ DPAA2_BUS_ERR("Unable to open VFIO group directory");
goto scan_fail;
}
@@ -224,9 +309,12 @@ rte_fslmc_scan(void)
device_count += 1;
}
- FSLMC_BUS_LOG(INFO, "fslmc: Bus scan completed");
-
closedir(dir);
+
+ DPAA2_BUS_INFO("FSLMC Bus scan completed");
+ /* If debugging is enabled, device list is dumped to log output */
+ dump_device_list();
+
return 0;
scan_fail_cleanup:
@@ -235,7 +323,7 @@ scan_fail_cleanup:
/* Remove all devices in the list */
cleanup_fslmc_device_list();
scan_fail:
- FSLMC_BUS_LOG(DEBUG, "FSLMC Bus Not Available. Skipping.");
+ DPAA2_BUS_DEBUG("FSLMC Bus Not Available. Skipping");
/* Irrespective of failure, scan only return success */
return 0;
}
@@ -254,6 +342,8 @@ static int
rte_fslmc_probe(void)
{
int ret = 0;
+ int probe_all;
+
struct rte_dpaa2_device *dev;
struct rte_dpaa2_driver *drv;
@@ -262,16 +352,29 @@ rte_fslmc_probe(void)
ret = fslmc_vfio_setup_group();
if (ret) {
- FSLMC_BUS_LOG(ERR, "Unable to setup VFIO %d", ret);
+ DPAA2_BUS_ERR("Unable to setup VFIO %d", ret);
+ return 0;
+ }
+
+ /* Map existing segments as well as, in case of hotpluggable memory,
+ * install callback handler.
+ */
+ ret = rte_fslmc_vfio_dmamap();
+ if (ret) {
+ DPAA2_BUS_ERR("Unable to DMA map existing VAs: (%d)", ret);
+ /* Not continuing ahead */
+ DPAA2_BUS_ERR("FSLMC VFIO Mapping failed");
return 0;
}
ret = fslmc_vfio_process_group();
if (ret) {
- FSLMC_BUS_LOG(ERR, "Unable to setup devices %d", ret);
+ DPAA2_BUS_ERR("Unable to setup devices %d", ret);
return 0;
}
+ probe_all = rte_fslmc_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
+
TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
TAILQ_FOREACH(drv, &rte_fslmc_bus.driver_list, next) {
ret = rte_fslmc_match(drv, dev);
@@ -281,9 +384,21 @@ rte_fslmc_probe(void)
if (!drv->probe)
continue;
- ret = drv->probe(drv, dev);
- if (ret)
- FSLMC_BUS_LOG(ERR, "Unable to probe.\n");
+ if (dev->device.devargs &&
+ dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
+ DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
+ dev->device.name);
+ continue;
+ }
+
+ if (probe_all ||
+ (dev->device.devargs &&
+ dev->device.devargs->policy ==
+ RTE_DEV_WHITELISTED)) {
+ ret = drv->probe(drv, dev);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to probe");
+ }
break;
}
}
@@ -298,16 +413,19 @@ static struct rte_device *
rte_fslmc_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
+ const struct rte_dpaa2_device *dstart;
struct rte_dpaa2_device *dev;
- TAILQ_FOREACH(dev, &rte_fslmc_bus.device_list, next) {
- if (start && &dev->device == start) {
- start = NULL; /* starting point found */
- continue;
- }
-
+ if (start != NULL) {
+ dstart = RTE_DEV_TO_FSLMC_CONST(start);
+ dev = TAILQ_NEXT(dstart, next);
+ } else {
+ dev = TAILQ_FIRST(&rte_fslmc_bus.device_list);
+ }
+ while (dev != NULL) {
if (cmp(&dev->device, data) == 0)
return &dev->device;
+ dev = TAILQ_NEXT(dev, next);
}
return NULL;
@@ -390,6 +508,7 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.bus = {
.scan = rte_fslmc_scan,
.probe = rte_fslmc_probe,
+ .parse = rte_fslmc_parse,
.find_device = rte_fslmc_find_device,
.get_iommu_class = rte_dpaa2_get_iommu_class,
},
@@ -398,4 +517,12 @@ struct rte_fslmc_bus rte_fslmc_bus = {
.device_count = {0},
};
-RTE_REGISTER_BUS(fslmc, rte_fslmc_bus.bus);
+RTE_REGISTER_BUS(FSLMC_BUS_NAME, rte_fslmc_bus.bus);
+
+RTE_INIT(fslmc_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_bus = rte_log_register("bus.fslmc");
+ if (dpaa2_logtype_bus >= 0)
+ rte_log_set_level(dpaa2_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/fslmc/fslmc_logs.h b/drivers/bus/fslmc/fslmc_logs.h
index d87b6386..dd74cb7d 100644
--- a/drivers/bus/fslmc/fslmc_logs.h
+++ b/drivers/bus/fslmc/fslmc_logs.h
@@ -7,44 +7,35 @@
#ifndef _FSLMC_LOGS_H_
#define _FSLMC_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa2_logtype_bus;
+
+#define DPAA2_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_bus, "fslmc: " fmt "\n", \
+ ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_BUS_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_bus, "fslmc: %s(): " fmt "\n", \
+ __func__, ##args)
+
+#define BUS_INIT_FUNC_TRACE() DPAA2_BUS_DEBUG(" >>")
+
+#define DPAA2_BUS_INFO(fmt, args...) \
+ DPAA2_BUS_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_ERR(fmt, args...) \
+ DPAA2_BUS_LOG(ERR, fmt, ## args)
+#define DPAA2_BUS_WARN(fmt, args...) \
+ DPAA2_BUS_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_BUS_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_BUS_DP_DEBUG(fmt, args...) \
+ DPAA2_BUS_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_BUS_DP_INFO(fmt, args...) \
+ DPAA2_BUS_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_BUS_DP_WARN(fmt, args...) \
+ DPAA2_BUS_DP_LOG(WARNING, fmt, ## args)
#endif /* _FSLMC_LOGS_H_ */
diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 1241295b..4c2cd2a8 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -30,17 +30,16 @@
#include <rte_kvargs.h>
#include <rte_dev.h>
#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
#include "rte_fslmc.h"
#include "fslmc_vfio.h"
+#include "fslmc_logs.h"
#include <mc/fsl_dpmng.h>
#include "portal/dpaa2_hw_pvt.h"
#include "portal/dpaa2_hw_dpio.h"
-#define FSLMC_VFIO_LOG(level, fmt, args...) \
- RTE_LOG(level, EAL, fmt "\n", ##args)
-
/** Pathname of FSL-MC devices directory. */
#define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
@@ -53,7 +52,6 @@ static int container_device_fd;
static char *g_container;
static uint32_t *msi_intr_vaddr;
void *(*rte_mcp_ptr_list);
-static int is_dma_done;
static struct rte_dpaa2_object_list dpaa2_obj_list =
TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
@@ -76,33 +74,32 @@ fslmc_get_container_group(int *groupid)
if (!g_container) {
container = getenv("DPRC");
if (container == NULL) {
- RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n");
+ DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
return -EINVAL;
}
if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
- FSLMC_VFIO_LOG(ERR, "Invalid container name: %s\n",
- container);
+ DPAA2_BUS_ERR("Invalid container name: %s", container);
return -1;
}
g_container = strdup(container);
if (!g_container) {
- FSLMC_VFIO_LOG(ERR, "Out of memory.");
+ DPAA2_BUS_ERR("Mem alloc failure; Container name");
return -ENOMEM;
}
}
/* get group number */
- ret = vfio_get_group_no(SYSFS_FSL_MC_DEVICES, g_container, groupid);
+ ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
+ g_container, groupid);
if (ret <= 0) {
- FSLMC_VFIO_LOG(ERR, "Unable to find %s IOMMU group",
- g_container);
+ DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
return -1;
}
- FSLMC_VFIO_LOG(DEBUG, "Container: %s has VFIO iommu group id = %d",
- g_container, *groupid);
+ DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
+ g_container, *groupid);
return 0;
}
@@ -113,14 +110,14 @@ vfio_connect_container(void)
int fd, ret;
if (vfio_container.used) {
- FSLMC_VFIO_LOG(DEBUG, "No container available.");
+ DPAA2_BUS_DEBUG("No container available");
return -1;
}
/* Try connecting to vfio container if already created */
if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
&vfio_container.fd)) {
- FSLMC_VFIO_LOG(INFO,
+ DPAA2_BUS_DEBUG(
"Container pre-exists with FD[0x%x] for this group",
vfio_container.fd);
vfio_group.container = &vfio_container;
@@ -128,9 +125,9 @@ vfio_connect_container(void)
}
/* Opens main vfio file descriptor which represents the "container" */
- fd = vfio_get_container_fd();
+ fd = rte_vfio_get_container_fd();
if (fd < 0) {
- FSLMC_VFIO_LOG(ERR, "Failed to open VFIO container");
+ DPAA2_BUS_ERR("Failed to open VFIO container");
return -errno;
}
@@ -139,19 +136,19 @@ vfio_connect_container(void)
/* Connect group to container */
ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "Failed to setup group container");
+ DPAA2_BUS_ERR("Failed to setup group container");
close(fd);
return -errno;
}
ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "Failed to setup VFIO iommu");
+ DPAA2_BUS_ERR("Failed to setup VFIO iommu");
close(fd);
return -errno;
}
} else {
- FSLMC_VFIO_LOG(ERR, "No supported IOMMU available");
+ DPAA2_BUS_ERR("No supported IOMMU available");
close(fd);
return -EINVAL;
}
@@ -179,7 +176,7 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
if (vaddr == MAP_FAILED) {
- FSLMC_VFIO_LOG(ERR, "Unable to map region (errno = %d)", errno);
+ DPAA2_BUS_ERR("Unable to map region (errno = %d)", errno);
return -errno;
}
@@ -189,88 +186,195 @@ static int vfio_map_irq_region(struct fslmc_vfio_group *group)
if (ret == 0)
return 0;
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA fails (errno = %d)", errno);
+ DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
return -errno;
}
-int rte_fslmc_vfio_dmamap(void)
+static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
+
+static void
+fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
{
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0, map_len = 0;
+ uint64_t virt_addr;
+ rte_iova_t iova_addr;
int ret;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ while (cur_len < len) {
+ const void *va = RTE_PTR_ADD(addr, cur_len);
+
+ ms = rte_mem_virt2memseg(va, msl);
+ iova_addr = ms->iova;
+ virt_addr = ms->addr_64;
+ map_len = ms->len;
+
+ DPAA2_BUS_DEBUG("Request for %s, va=%p, "
+ "virt_addr=0x%" PRIx64 ", "
+ "iova=0x%" PRIx64 ", map_len=%zu",
+ type == RTE_MEM_EVENT_ALLOC ?
+ "alloc" : "dealloc",
+ va, virt_addr, iova_addr, map_len);
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
+ else
+ ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
+
+ if (ret != 0) {
+ DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
+ "Map=%d, addr=%p, len=%zu, err:(%d)",
+ type, va, map_len, ret);
+ return;
+ }
+
+ cur_len += map_len;
+ }
+
+ if (type == RTE_MEM_EVENT_ALLOC)
+ DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
+ addr, len);
+ else
+ DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
+ addr, len);
+}
+
+static int
+fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
+{
struct fslmc_vfio_group *group;
struct vfio_iommu_type1_dma_map dma_map = {
.argsz = sizeof(struct vfio_iommu_type1_dma_map),
.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
};
+ int ret;
- int i;
- const struct rte_memseg *memseg;
+ dma_map.size = len;
+ dma_map.vaddr = vaddr;
- if (is_dma_done)
- return 0;
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+ dma_map.iova = iovaddr;
+#else
+ dma_map.iova = dma_map.vaddr;
+#endif
- memseg = rte_eal_get_physmem_layout();
- if (memseg == NULL) {
- FSLMC_VFIO_LOG(ERR, "Cannot get physical layout.");
- return -ENODEV;
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
}
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (memseg[i].addr == NULL && memseg[i].len == 0) {
- FSLMC_VFIO_LOG(DEBUG, "Total %d segments found.", i);
- break;
- }
+ DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
+ errno);
+ return -1;
+ }
- dma_map.size = memseg[i].len;
- dma_map.vaddr = memseg[i].addr_64;
-#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dma_map.iova = dma_map.vaddr;
- else
- dma_map.iova = memseg[i].iova;
-#else
- dma_map.iova = dma_map.vaddr;
-#endif
+ return 0;
+}
- /* SET DMA MAP for IOMMU */
- group = &vfio_group;
+static int
+fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
+{
+ struct fslmc_vfio_group *group;
+ struct vfio_iommu_type1_dma_unmap dma_unmap = {
+ .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
+ .flags = 0,
+ };
+ int ret;
- if (!group->container) {
- FSLMC_VFIO_LOG(ERR, "Container is not connected ");
- return -1;
- }
+ dma_unmap.size = len;
+ dma_unmap.iova = vaddr;
- FSLMC_VFIO_LOG(DEBUG, "-->Initial SHM Virtual ADDR %llX",
- dma_map.vaddr);
- FSLMC_VFIO_LOG(DEBUG, "-----> DMA size 0x%llX", dma_map.size);
- ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA,
- &dma_map);
- if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO_IOMMU_MAP_DMA API(errno = %d)",
- errno);
- return ret;
- }
+ /* SET DMA MAP for IOMMU */
+ group = &vfio_group;
+
+ if (!group->container) {
+ DPAA2_BUS_ERR("Container is not connected ");
+ return -1;
}
- /* Verifying that at least single segment is available */
- if (i <= 0) {
- FSLMC_VFIO_LOG(ERR, "No Segments found for VFIO Mapping");
+ DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
+ (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
+ ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
+ if (ret) {
+ DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
+ errno);
return -1;
}
+ return 0;
+}
+
+static int
+fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ int *n_segs = arg;
+ int ret;
+
+ ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
+ if (ret)
+ DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
+ ms->addr, ms->len);
+ else
+ (*n_segs)++;
+
+ return ret;
+}
+
+int rte_fslmc_vfio_dmamap(void)
+{
+ int i = 0, ret;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
+
+ /* Lock before parsing and registering callback to memory subsystem */
+ rte_rwlock_read_lock(mem_lock);
+
+ if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
+ rte_rwlock_read_unlock(mem_lock);
+ return -1;
+ }
+
+ ret = rte_mem_event_callback_register("fslmc_memevent_clb",
+ fslmc_memevent_cb, NULL);
+ if (ret && rte_errno == ENOTSUP)
+ DPAA2_BUS_DEBUG("Memory event callbacks not supported");
+ else if (ret)
+ DPAA2_BUS_DEBUG("Unable to install memory handler");
+ else
+ DPAA2_BUS_DEBUG("Installed memory callback handler");
+
+ DPAA2_BUS_DEBUG("Total %d segments found.", i);
+
/* TODO - This is a W.A. as VFIO currently does not add the mapping of
* the interrupt region to SMMU. This should be removed once the
* support is added in the Kernel.
*/
- vfio_map_irq_region(group);
+ vfio_map_irq_region(&vfio_group);
- is_dma_done = 1;
+ /* Existing segments have been mapped and memory callback for hotplug
+ * has been installed.
+ */
+ rte_rwlock_read_unlock(mem_lock);
return 0;
}
static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
{
- int64_t v_addr = (int64_t)MAP_FAILED;
+ intptr_t v_addr = (intptr_t)MAP_FAILED;
int32_t ret, mc_fd;
struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
@@ -279,29 +383,26 @@ static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
/* getting the mcp object's fd*/
mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
if (mc_fd < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO get dev %s fd from group %d",
- mcp_obj, group->fd);
+ DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
+ mcp_obj, group->fd);
return v_addr;
}
/* getting device info*/
ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO getting DEVICE_INFO");
+ DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
goto MC_FAILURE;
}
/* getting device region info*/
ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "error in VFIO getting REGION_INFO");
+ DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
goto MC_FAILURE;
}
- FSLMC_VFIO_LOG(DEBUG, "region offset = %llx , region size = %llx",
- reg_info.offset, reg_info.size);
-
- v_addr = (uint64_t)mmap(NULL, reg_info.size,
+ v_addr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
mc_fd, reg_info.offset);
@@ -334,8 +435,8 @@ int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret) {
- RTE_LOG(ERR, EAL, "Error:dpaa2 SET IRQs fd=%d, err = %d(%s)\n",
- intr_handle->fd, errno, strerror(errno));
+ DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
+ intr_handle->fd, errno, strerror(errno));
return ret;
}
@@ -359,8 +460,8 @@ int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
if (ret)
- RTE_LOG(ERR, EAL,
- "Error disabling dpaa2 interrupts for fd %d\n",
+ DPAA2_BUS_ERR(
+ "Error disabling dpaa2 interrupts for fd %d",
intr_handle->fd);
return ret;
@@ -383,9 +484,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR,
- "cannot get IRQ(%d) info, error %i (%s)",
- i, errno, strerror(errno));
+ DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
+ i, errno, strerror(errno));
return -1;
}
@@ -399,9 +499,8 @@ rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
/* set up an eventfd for interrupts */
fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
if (fd < 0) {
- FSLMC_VFIO_LOG(ERR,
- "cannot set up eventfd, error %i (%s)\n",
- errno, strerror(errno));
+ DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
+ errno, strerror(errno));
return -1;
}
@@ -430,13 +529,14 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
dev->device.name);
if (dev_fd <= 0) {
- FSLMC_VFIO_LOG(ERR, "Unable to obtain device FD for device:%s",
- dev->device.name);
+ DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
+ dev->device.name);
return -1;
}
if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
- FSLMC_VFIO_LOG(ERR, "DPAA2 VFIO_DEVICE_GET_INFO fail");
+ DPAA2_BUS_ERR("Unable to obtain information for device:%s",
+ dev->device.name);
return -1;
}
@@ -461,61 +561,74 @@ fslmc_process_iodevices(struct rte_dpaa2_device *dev)
break;
}
- FSLMC_VFIO_LOG(DEBUG, "Device (%s) abstracted from VFIO",
- dev->device.name);
+ DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
+ dev->device.name);
return 0;
}
static int
fslmc_process_mcp(struct rte_dpaa2_device *dev)
{
- int64_t v_addr;
- char *dev_name;
+ int ret;
+ intptr_t v_addr;
+ char *dev_name = NULL;
struct fsl_mc_io dpmng = {0};
struct mc_version mc_ver_info = {0};
rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
if (!rte_mcp_ptr_list) {
- FSLMC_VFIO_LOG(ERR, "Out of memory");
- return -ENOMEM;
+ DPAA2_BUS_ERR("Unable to allocate MC portal memory");
+ ret = -ENOMEM;
+ goto cleanup;
}
dev_name = strdup(dev->device.name);
if (!dev_name) {
- FSLMC_VFIO_LOG(ERR, "Out of memory.");
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -ENOMEM;
+ DPAA2_BUS_ERR("Unable to allocate MC device name memory");
+ ret = -ENOMEM;
+ goto cleanup;
}
v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
- if (v_addr == (int64_t)MAP_FAILED) {
- FSLMC_VFIO_LOG(ERR, "Error mapping region (errno = %d)",
- errno);
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -1;
+ if (v_addr == (intptr_t)MAP_FAILED) {
+ DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
+ ret = -1;
+ goto cleanup;
}
/* check the MC version compatibility */
dpmng.regs = (void *)v_addr;
- if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+ if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
+ DPAA2_BUS_ERR("Unable to obtain MC version");
+ ret = -1;
+ goto cleanup;
+ }
if ((mc_ver_info.major != MC_VER_MAJOR) ||
(mc_ver_info.minor < MC_VER_MINOR)) {
- RTE_LOG(ERR, PMD, "DPAA2 MC version not compatible!"
- " Expected %d.%d.x, Detected %d.%d.%d\n",
- MC_VER_MAJOR, MC_VER_MINOR,
- mc_ver_info.major, mc_ver_info.minor,
- mc_ver_info.revision);
- free(rte_mcp_ptr_list);
- rte_mcp_ptr_list = NULL;
- return -1;
+ DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
+ " Expected %d.%d.x, Detected %d.%d.%d",
+ MC_VER_MAJOR, MC_VER_MINOR,
+ mc_ver_info.major, mc_ver_info.minor,
+ mc_ver_info.revision);
+ ret = -1;
+ goto cleanup;
}
rte_mcp_ptr_list[0] = (void *)v_addr;
+ free(dev_name);
return 0;
+
+cleanup:
+ if (dev_name)
+ free(dev_name);
+
+ if (rte_mcp_ptr_list) {
+ free(rte_mcp_ptr_list);
+ rte_mcp_ptr_list = NULL;
+ }
+
+ return ret;
}
int
@@ -530,7 +643,7 @@ fslmc_vfio_process_group(void)
if (dev->dev_type == DPAA2_MPORTAL) {
ret = fslmc_process_mcp(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG, "Unable to map Portal.");
+ DPAA2_BUS_ERR("Unable to map MC Portal");
return -1;
}
if (!found_mportal)
@@ -547,23 +660,19 @@ fslmc_vfio_process_group(void)
/* Cannot continue if there is not even a single mportal */
if (!found_mportal) {
- FSLMC_VFIO_LOG(DEBUG,
- "No MC Portal device found. Not continuing.");
+ DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
return -1;
}
TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
- if (!dev)
- break;
-
switch (dev->dev_type) {
case DPAA2_ETH:
case DPAA2_CRYPTO:
+ case DPAA2_QDMA:
ret = fslmc_process_iodevices(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG,
- "Dev (%s) init failed.",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
return ret;
}
break;
@@ -576,9 +685,8 @@ fslmc_vfio_process_group(void)
*/
ret = fslmc_process_iodevices(dev);
if (ret) {
- FSLMC_VFIO_LOG(DEBUG,
- "Dev (%s) init failed.",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Dev (%s) init failed",
+ dev->device.name);
return -1;
}
@@ -592,8 +700,8 @@ fslmc_vfio_process_group(void)
case DPAA2_UNKNOWN:
default:
/* Unknown - ignore */
- FSLMC_VFIO_LOG(DEBUG, "Found unknown device (%s).",
- dev->device.name);
+ DPAA2_BUS_DEBUG("Found unknown device (%s)",
+ dev->device.name);
TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
free(dev);
dev = NULL;
@@ -622,12 +730,12 @@ fslmc_vfio_setup_group(void)
* processing.
*/
if (vfio_group.groupid == groupid) {
- FSLMC_VFIO_LOG(ERR, "groupid already exists %d", groupid);
+ DPAA2_BUS_ERR("groupid already exists %d", groupid);
return 0;
}
/* Get the actual group fd */
- ret = vfio_get_group_fd(groupid);
+ ret = rte_vfio_get_group_fd(groupid);
if (ret < 0)
return ret;
vfio_group.fd = ret;
@@ -635,14 +743,14 @@ fslmc_vfio_setup_group(void)
/* Check group viability */
ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
if (ret) {
- FSLMC_VFIO_LOG(ERR, "VFIO error getting group status");
+ DPAA2_BUS_ERR("VFIO error getting group status");
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return ret;
}
if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- FSLMC_VFIO_LOG(ERR, "VFIO group not viable");
+ DPAA2_BUS_ERR("VFIO group not viable");
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return -EPERM;
@@ -655,7 +763,7 @@ fslmc_vfio_setup_group(void)
/* Now connect this IOMMU group to given container */
ret = vfio_connect_container();
if (ret) {
- FSLMC_VFIO_LOG(ERR,
+ DPAA2_BUS_ERR(
"Error connecting container with groupid %d",
groupid);
close(vfio_group.fd);
@@ -667,15 +775,15 @@ fslmc_vfio_setup_group(void)
/* Get Device information */
ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
if (ret < 0) {
- FSLMC_VFIO_LOG(ERR, "Error getting device %s fd from group %d",
- g_container, vfio_group.groupid);
+ DPAA2_BUS_ERR("Error getting device %s fd from group %d",
+ g_container, vfio_group.groupid);
close(vfio_group.fd);
rte_vfio_clear_group(vfio_group.fd);
return ret;
}
container_device_fd = ret;
- FSLMC_VFIO_LOG(DEBUG, "VFIO Container FD is [0x%X]",
- container_device_fd);
+ DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
+ container_device_fd);
return 0;
}
diff --git a/drivers/bus/fslmc/fslmc_vfio.h b/drivers/bus/fslmc/fslmc_vfio.h
index e8fb3445..9e2c4fee 100644
--- a/drivers/bus/fslmc/fslmc_vfio.h
+++ b/drivers/bus/fslmc/fslmc_vfio.h
@@ -10,8 +10,6 @@
#include <rte_vfio.h>
-#include "eal_vfio.h"
-
#define DPAA2_MC_DPNI_DEVID 7
#define DPAA2_MC_DPSECI_DEVID 3
#define DPAA2_MC_DPCON_DEVID 5
diff --git a/drivers/bus/fslmc/mc/dpdmai.c b/drivers/bus/fslmc/mc/dpdmai.c
new file mode 100644
index 00000000..528889df
--- /dev/null
+++ b/drivers/bus/fslmc/mc/dpdmai.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpdmai.h>
+#include <fsl_dpdmai_cmd.h>
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token)
+{
+ struct dpdmai_cmd_open *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpdmai_cmd_open *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+ cmd_flags, token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct dpdmai_cmd_create *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_create *)cmd.params;
+ cmd_params->priorities[0] = cfg->priorities[0];
+ cmd_params->priorities[1] = cfg->priorities[1];
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct dpdmai_cmd_destroy *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmai_cmd_destroy *)cmd.params;
+ cmd_params->dpdmai_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct dpdmai_rsp_is_enabled *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_is_enabled *)cmd.params;
+ *en = dpdmai_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr)
+{
+ struct dpdmai_rsp_get_attr *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_of_priorities = rsp_params->num_of_priorities;
+
+ return 0;
+}
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation; use
+ * DPDMAI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg)
+{
+ struct dpdmai_cmd_set_rx_queue *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->priority = priority;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpdmai_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_rx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = le32_to_cpu(rsp_params->dest_priority);
+ attr->dest_cfg.dest_type = dpdmai_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMAI object
+ * @priority: Select the queue relative to number of
+ * priorities configured at DPDMAI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr)
+{
+ struct dpdmai_cmd_get_queue *cmd_params;
+ struct dpdmai_rsp_get_tx_queue *rsp_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmai_cmd_get_queue *)cmd.params;
+ cmd_params->priority = priority;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+
+ return 0;
+}
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai.h b/drivers/bus/fslmc/mc/fsl_dpdmai.h
new file mode 100644
index 00000000..03e46ec1
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+struct fsl_mc_io;
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/* General DPDMAI macros */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM 2
+
+/**
+ * All queues considered; see dpdmai_set_rx_queue()
+ */
+#define DPDMAI_ALL_QUEUES (uint8_t)(-1)
+
+int dpdmai_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpdmai_id,
+ uint16_t *token);
+
+int dpdmai_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ * configured with values 1-8; the entry following last valid entry
+ * should be configured with 0
+ */
+struct dpdmai_cfg {
+ uint8_t priorities[DPDMAI_PRIO_NUM];
+};
+
+int dpdmai_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpdmai_cfg *cfg,
+ uint32_t *obj_id);
+
+int dpdmai_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+int dpdmai_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+int dpdmai_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+int dpdmai_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+ int id;
+ uint8_t num_of_priorities;
+};
+
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpdmai_attr *attr);
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+ DPDMAI_DEST_NONE = 0,
+ DPDMAI_DEST_DPIO = 1,
+ DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+ enum dpdmai_dest dest_type;
+ int dest_id;
+ uint8_t priority;
+};
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+ uint32_t options;
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+
+};
+
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ const struct dpdmai_rx_queue_cfg *cfg);
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+ uint64_t user_ctx;
+ struct dpdmai_dest_cfg dest_cfg;
+ uint32_t fqid;
+};
+
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_rx_queue_attr *attr);
+
+/**
+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware
+ */
+
+struct dpdmai_tx_queue_attr {
+ uint32_t fqid;
+};
+
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t priority,
+ struct dpdmai_tx_queue_attr *attr);
+
+#endif /* __FSL_DPDMAI_H */
diff --git a/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
new file mode 100644
index 00000000..618e19ea
--- /dev/null
+++ b/drivers/bus/fslmc/mc/fsl_dpdmai_cmd.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef _FSL_DPDMAI_CMD_H
+#define _FSL_DPDMAI_CMD_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR 3
+#define DPDMAI_VER_MINOR 2
+
+/* Command versioning */
+#define DPDMAI_CMD_BASE_VERSION 1
+#define DPDMAI_CMD_ID_OFFSET 4
+
+#define DPDMAI_CMD(id) ((id << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE DPDMAI_CMD(0x800)
+#define DPDMAI_CMDID_OPEN DPDMAI_CMD(0x80E)
+#define DPDMAI_CMDID_CREATE DPDMAI_CMD(0x90E)
+#define DPDMAI_CMDID_DESTROY DPDMAI_CMD(0x98E)
+#define DPDMAI_CMDID_GET_API_VERSION DPDMAI_CMD(0xa0E)
+
+#define DPDMAI_CMDID_ENABLE DPDMAI_CMD(0x002)
+#define DPDMAI_CMDID_DISABLE DPDMAI_CMD(0x003)
+#define DPDMAI_CMDID_GET_ATTR DPDMAI_CMD(0x004)
+#define DPDMAI_CMDID_RESET DPDMAI_CMD(0x005)
+#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMD(0x006)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMD(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMD(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMD(0x1A2)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPDMAI_MASK(field) \
+ GENMASK(DPDMAI_##field##_SHIFT + DPDMAI_##field##_SIZE - 1, \
+ DPDMAI_##field##_SHIFT)
+#define dpdmai_set_field(var, field, val) \
+ ((var) |= (((val) << DPDMAI_##field##_SHIFT) & DPDMAI_MASK(field)))
+#define dpdmai_get_field(var, field) \
+ (((var) & DPDMAI_MASK(field)) >> DPDMAI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpdmai_cmd_open {
+ uint32_t dpdmai_id;
+};
+
+struct dpdmai_cmd_create {
+ uint8_t pad;
+ uint8_t priorities[2];
+};
+
+struct dpdmai_cmd_destroy {
+ uint32_t dpdmai_id;
+};
+
+#define DPDMAI_ENABLE_SHIFT 0
+#define DPDMAI_ENABLE_SIZE 1
+
+struct dpdmai_rsp_is_enabled {
+ /* only the LSB bit */
+ uint8_t en;
+};
+
+struct dpdmai_rsp_get_attr {
+ uint32_t id;
+ uint8_t num_of_priorities;
+};
+
+#define DPDMAI_DEST_TYPE_SHIFT 0
+#define DPDMAI_DEST_TYPE_SIZE 4
+
+struct dpdmai_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t priority;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+};
+
+struct dpdmai_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t priority;
+};
+
+struct dpdmai_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+};
+
+struct dpdmai_rsp_get_tx_queue {
+ uint64_t pad;
+ uint32_t fqid;
+};
+
+#pragma pack(pop)
+#endif /* _FSL_DPDMAI_CMD_H */
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index a3c3e798..ac919610 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -27,7 +27,7 @@
#define le32_to_cpu rte_le_to_cpu_32
#define le16_to_cpu rte_le_to_cpu_16
-#define BITS_PER_LONG 64
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) \
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
diff --git a/drivers/bus/fslmc/meson.build b/drivers/bus/fslmc/meson.build
new file mode 100644
index 00000000..22a56a6f
--- /dev/null
+++ b/drivers/bus/fslmc/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['eventdev', 'kvargs']
+sources = files('fslmc_bus.c',
+ 'fslmc_vfio.c',
+ 'mc/dpbp.c',
+ 'mc/dpci.c',
+ 'mc/dpcon.c',
+ 'mc/dpdmai.c',
+ 'mc/dpio.c',
+ 'mc/dpmng.c',
+ 'mc/mc_sys.c',
+ 'portal/dpaa2_hw_dpbp.c',
+ 'portal/dpaa2_hw_dpci.c',
+ 'portal/dpaa2_hw_dpio.c',
+ 'qbman/qbman_portal.c',
+ 'qbman/qbman_debug.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'qbman/include', 'portal')
+cflags += ['-D_GNU_SOURCE']
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
index f1f14e29..39c5adf9 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpbp.c
@@ -44,7 +44,7 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
/* Allocate DPAA2 dpbp handle */
dpbp_node = rte_malloc(NULL, sizeof(struct dpaa2_dpbp_dev), 0);
if (!dpbp_node) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPBP Device");
+ DPAA2_BUS_ERR("Memory allocation failed for DPBP Device");
return -1;
}
@@ -53,8 +53,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
ret = dpbp_open(&dpbp_node->dpbp,
CMD_PRI_LOW, dpbp_id, &dpbp_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
+ DPAA2_BUS_ERR("Unable to open buffer pool object: err(%d)",
+ ret);
rte_free(dpbp_node);
return -1;
}
@@ -62,8 +62,8 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
/* Clean the device first */
ret = dpbp_reset(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpbp device with"
- " error code %d\n", ret);
+ DPAA2_BUS_ERR("Unable to reset buffer pool device. err(%d)",
+ ret);
dpbp_close(&dpbp_node->dpbp, CMD_PRI_LOW, dpbp_node->token);
rte_free(dpbp_node);
return -1;
@@ -74,8 +74,6 @@ dpaa2_create_dpbp_device(int vdev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpbp_dev_list, dpbp_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpbp.%d]\n", dpbp_id);
-
if (!register_once) {
rte_mbuf_set_platform_mempool_ops(DPAA2_MEMPOOL_OPS_NAME);
register_once = 1;
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
index fb28e497..5ad0374d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpci.c
@@ -39,13 +39,14 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
struct dpci_attr attr;
struct dpci_rx_queue_cfg rx_queue_cfg;
struct dpci_rx_queue_attr rx_attr;
+ struct dpci_tx_queue_attr tx_attr;
int ret, i;
/* Allocate DPAA2 dpci handle */
dpci_node = rte_malloc(NULL, sizeof(struct dpaa2_dpci_dev), 0);
if (!dpci_node) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPCI Device");
- return -1;
+ DPAA2_BUS_ERR("Memory allocation failed for DPCI Device");
+ return -ENOMEM;
}
/* Open the dpci object */
@@ -53,43 +54,57 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
ret = dpci_open(&dpci_node->dpci,
CMD_PRI_LOW, dpci_id, &dpci_node->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Resource alloc failure with err code: %d", ret);
+ goto err;
}
/* Get the device attributes */
ret = dpci_get_attributes(&dpci_node->dpci,
CMD_PRI_LOW, dpci_node->token, &attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Reading device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Reading device failed with err code: %d", ret);
+ goto err;
}
- /* Set up the Rx Queue */
- memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
- ret = dpci_set_rx_queue(&dpci_node->dpci,
- CMD_PRI_LOW,
- dpci_node->token,
- 0, &rx_queue_cfg);
- if (ret) {
- PMD_INIT_LOG(ERR, "Setting Rx queue failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpci_rx_queue_cfg));
+ ret = dpci_set_rx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_BUS_ERR("Setting Rx queue failed with err code: %d",
+ ret);
+ goto err;
+ }
+
+ /* Allocate DQ storage for the DPCI Rx queues */
+ rxq = &(dpci_node->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_BUS_ERR("q_storage allocation failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_BUS_ERR("dpaa2_alloc_dq_storage failed\n");
+ goto err;
+ }
}
/* Enable the device */
ret = dpci_enable(&dpci_node->dpci,
CMD_PRI_LOW, dpci_node->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Enabling device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Enabling device failed with err code: %d", ret);
+ goto err;
}
for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
@@ -99,14 +114,22 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
dpci_node->token, i,
&rx_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR,
- "Reading device failed with err code: %d",
- ret);
- rte_free(dpci_node);
- return -1;
+ DPAA2_BUS_ERR("Rx queue fetch failed with err code: %d",
+ ret);
+ goto err;
}
+ dpci_node->rx_queue[i].fqid = rx_attr.fqid;
- dpci_node->queue[i].fqid = rx_attr.fqid;
+ ret = dpci_get_tx_queue(&dpci_node->dpci,
+ CMD_PRI_LOW,
+ dpci_node->token, i,
+ &tx_attr);
+ if (ret != 0) {
+ DPAA2_BUS_ERR("Reading device failed with err code: %d",
+ ret);
+ goto err;
+ }
+ dpci_node->tx_queue[i].fqid = tx_attr.fqid;
}
dpci_node->dpci_id = dpci_id;
@@ -114,9 +137,20 @@ rte_dpaa2_create_dpci_device(int vdev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpci_dev_list, dpci_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpci.%d]\n", dpci_id);
-
return 0;
+
+err:
+ for (i = 0; i < DPAA2_DPCI_MAX_QUEUES; i++) {
+ struct dpaa2_queue *rxq = &(dpci_node->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+ rte_free(dpci_node);
+
+ return ret;
}
struct dpaa2_dpci_dev *rte_dpaa2_alloc_dpci_dev(void)
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index eefde155..99f70be1 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -101,7 +101,7 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
snprintf(string, STRING_LEN, "dpio.%d", dpio_id);
file = fopen("/proc/interrupts", "r");
if (!file) {
- PMD_DRV_LOG(WARNING, "Failed to open /proc/interrupts file\n");
+ DPAA2_BUS_WARN("Failed to open /proc/interrupts file");
return;
}
while (getline(&temp, &len, file) != -1) {
@@ -112,8 +112,8 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
}
if (!token) {
- PMD_DRV_LOG(WARNING, "Failed to get interrupt id for dpio.%d\n",
- dpio_id);
+ DPAA2_BUS_WARN("Failed to get interrupt id for dpio.%d",
+ dpio_id);
if (temp)
free(temp);
fclose(file);
@@ -125,10 +125,10 @@ static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
cpu_mask, token);
ret = system(command);
if (ret < 0)
- PMD_DRV_LOG(WARNING,
- "Failed to affine interrupts on respective core\n");
+ DPAA2_BUS_WARN(
+ "Failed to affine interrupts on respective core");
else
- PMD_DRV_LOG(WARNING, " %s command is executed\n", command);
+ DPAA2_BUS_DEBUG(" %s command is executed", command);
free(temp);
fclose(file);
@@ -143,7 +143,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
dpio_epoll_fd = epoll_create(1);
ret = rte_dpaa2_intr_enable(&dpio_dev->intr_handle, 0);
if (ret) {
- PMD_DRV_LOG(ERR, "Interrupt registeration failed\n");
+ DPAA2_BUS_ERR("Interrupt registeration failed");
return -1;
}
@@ -166,7 +166,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
ret = epoll_ctl(dpio_epoll_fd, EPOLL_CTL_ADD, eventfd, &epoll_ev);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "epoll_ctl failed\n");
+ DPAA2_BUS_ERR("epoll_ctl failed");
return -1;
}
dpio_dev->epoll_fd = dpio_epoll_fd;
@@ -185,28 +185,27 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
dpio_dev->dpio = malloc(sizeof(struct fsl_mc_io));
if (!dpio_dev->dpio) {
- PMD_INIT_LOG(ERR, "Memory allocation failure\n");
+ DPAA2_BUS_ERR("Memory allocation failure");
return -1;
}
- PMD_DRV_LOG(DEBUG, "Allocated DPIO Portal[%p]", dpio_dev->dpio);
dpio_dev->dpio->regs = dpio_dev->mc_portal;
if (dpio_open(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->hw_id,
&dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to allocate IO space\n");
+ DPAA2_BUS_ERR("Failed to allocate IO space");
free(dpio_dev->dpio);
return -1;
}
if (dpio_reset(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to reset dpio\n");
+ DPAA2_BUS_ERR("Failed to reset dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
}
if (dpio_enable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token)) {
- PMD_INIT_LOG(ERR, "Failed to Enable dpio\n");
+ DPAA2_BUS_ERR("Failed to Enable dpio");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
@@ -214,7 +213,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
if (dpio_get_attributes(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, &attr)) {
- PMD_INIT_LOG(ERR, "DPIO Get attribute failed\n");
+ DPAA2_BUS_ERR("DPIO Get attribute failed");
dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
@@ -231,7 +230,7 @@ configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
dpio_dev->sw_portal = qbman_swp_init(&p_des);
if (dpio_dev->sw_portal == NULL) {
- PMD_DRV_LOG(ERR, " QBMan SW Portal Init failed\n");
+ DPAA2_BUS_ERR("QBMan SW Portal Init failed");
dpio_close(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
free(dpio_dev->dpio);
return -1;
@@ -249,7 +248,7 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
if (cpu_id < 0) {
cpu_id = rte_get_master_lcore();
if (cpu_id < 0) {
- RTE_LOG(ERR, PMD, "\tGetting CPU Index failed\n");
+ DPAA2_BUS_ERR("Getting CPU Index failed");
return -1;
}
}
@@ -258,19 +257,19 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
*/
sdest = dpaa2_core_cluster_sdest(cpu_id);
- PMD_DRV_LOG(DEBUG, "Portal= %d CPU= %u SDEST= %d",
- dpio_dev->index, cpu_id, sdest);
+ DPAA2_BUS_DEBUG("Portal= %d CPU= %u SDEST= %d",
+ dpio_dev->index, cpu_id, sdest);
ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
dpio_dev->token, sdest);
if (ret) {
- PMD_DRV_LOG(ERR, "%d ERROR in SDEST\n", ret);
+ DPAA2_BUS_ERR("%d ERROR in SDEST", ret);
return -1;
}
#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
if (dpaa2_dpio_intr_init(dpio_dev)) {
- PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
+ DPAA2_BUS_ERR("Interrupt registration failed for dpio");
return -1;
}
#endif
@@ -291,12 +290,12 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
if (!dpio_dev)
return NULL;
- PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
- dpio_dev, dpio_dev->index, syscall(SYS_gettid));
+ DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
+ dpio_dev, dpio_dev->index, syscall(SYS_gettid));
ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
if (ret)
- PMD_DRV_LOG(ERR, "dpaa2_configure_stashing failed");
+ DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
return dpio_dev;
}
@@ -314,8 +313,9 @@ dpaa2_affine_qbman_swp(void)
return -1;
if (dpaa2_io_portal[lcore_id].dpio_dev) {
- PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
- " between thread %lu and current %lu",
+ DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
+ " between thread %" PRIu64 " and current "
+ "%" PRIu64 "\n",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
dpaa2_io_portal[lcore_id].net_tid,
@@ -326,7 +326,8 @@ dpaa2_affine_qbman_swp(void)
[lcore_id].dpio_dev->ref_count);
dpaa2_io_portal[lcore_id].net_tid = tid;
- PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+ DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
+ "%" PRIu64 "\n",
dpaa2_io_portal[lcore_id].dpio_dev,
dpaa2_io_portal[lcore_id].dpio_dev->index,
tid);
@@ -348,7 +349,7 @@ dpaa2_affine_qbman_swp(void)
}
int
-dpaa2_affine_qbman_swp_sec(void)
+dpaa2_affine_qbman_ethrx_swp(void)
{
unsigned int lcore_id = rte_lcore_id();
uint64_t tid = syscall(SYS_gettid);
@@ -359,32 +360,36 @@ dpaa2_affine_qbman_swp_sec(void)
else if (lcore_id >= RTE_MAX_LCORE)
return -1;
- if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
- PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
- " between thread %lu and current %lu",
- dpaa2_io_portal[lcore_id].sec_dpio_dev,
- dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
- dpaa2_io_portal[lcore_id].sec_tid,
- tid);
- RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
- = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ DPAA2_BUS_DP_INFO(
+ "DPAA Portal=%p (%d) is being shared between thread"
+ " %" PRIu64 " and current %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ dpaa2_io_portal[lcore_id].sec_tid,
+ tid);
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
rte_atomic16_inc(&dpaa2_io_portal
- [lcore_id].sec_dpio_dev->ref_count);
+ [lcore_id].ethrx_dpio_dev->ref_count);
dpaa2_io_portal[lcore_id].sec_tid = tid;
- PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
- dpaa2_io_portal[lcore_id].sec_dpio_dev,
- dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
- tid);
+ DPAA2_BUS_DP_DEBUG(
+ "Old Portal=%p (%d) affined thread"
+ " - %" PRIu64 "\n",
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
+ tid);
return 0;
}
/* Populate the dpaa2_io_portal structure */
- dpaa2_io_portal[lcore_id].sec_dpio_dev = dpaa2_get_qbman_swp(lcore_id);
+ dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
+ dpaa2_get_qbman_swp(lcore_id);
- if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
- RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
- = dpaa2_io_portal[lcore_id].sec_dpio_dev;
+ if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
+ RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+ = dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
dpaa2_io_portal[lcore_id].sec_tid = tid;
return 0;
} else {
@@ -401,15 +406,14 @@ dpaa2_create_dpio_device(int vdev_fd,
struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
if (obj_info->num_regions < NUM_DPIO_REGIONS) {
- PMD_INIT_LOG(ERR, "ERROR, Not sufficient number "
- "of DPIO regions.\n");
+ DPAA2_BUS_ERR("Not sufficient number of DPIO regions");
return -1;
}
dpio_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpio_dev),
RTE_CACHE_LINE_SIZE);
if (!dpio_dev) {
- PMD_INIT_LOG(ERR, "Memory allocation failed for DPIO Device\n");
+ DPAA2_BUS_ERR("Memory allocation failed for DPIO Device");
return -1;
}
@@ -421,31 +425,31 @@ dpaa2_create_dpio_device(int vdev_fd,
reg_info.index = 0;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
}
dpio_dev->ce_size = reg_info.size;
- dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
reg_info.index = 1;
if (ioctl(vdev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info)) {
- PMD_INIT_LOG(ERR, "vfio: error getting region info\n");
+ DPAA2_BUS_ERR("vfio: error getting region info");
rte_free(dpio_dev);
return -1;
}
dpio_dev->ci_size = reg_info.size;
- dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
+ dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
PROT_WRITE | PROT_READ, MAP_SHARED,
vdev_fd, reg_info.offset);
if (configure_dpio_qbman_swp(dpio_dev)) {
- PMD_INIT_LOG(ERR,
- "Fail to configure the dpio qbman portal for %d\n",
+ DPAA2_BUS_ERR(
+ "Fail to configure the dpio qbman portal for %d",
dpio_dev->hw_id);
rte_free(dpio_dev);
return -1;
@@ -455,8 +459,8 @@ dpaa2_create_dpio_device(int vdev_fd,
dpio_dev->index = io_space_count;
if (rte_dpaa2_vfio_setup_intr(&dpio_dev->intr_handle, vdev_fd, 1)) {
- PMD_INIT_LOG(ERR, "Fail to setup interrupt for %d\n",
- dpio_dev->hw_id);
+ DPAA2_BUS_ERR("Fail to setup interrupt for %d",
+ dpio_dev->hw_id);
rte_free(dpio_dev);
}
@@ -466,21 +470,20 @@ dpaa2_create_dpio_device(int vdev_fd,
if (mc_get_soc_version(dpio_dev->dpio,
CMD_PRI_LOW, &mc_plat_info)) {
- PMD_INIT_LOG(ERR, "\tmc_get_soc_version failed\n");
+ DPAA2_BUS_ERR("Unable to get SoC version information");
} else if ((mc_plat_info.svr & 0xffff0000) == SVR_LS1080A) {
dpaa2_core_cluster_base = 0x02;
dpaa2_cluster_sz = 4;
- PMD_INIT_LOG(DEBUG, "\tLS108x (A53) Platform Detected");
+ DPAA2_BUS_DEBUG("LS108x (A53) Platform Detected");
} else if ((mc_plat_info.svr & 0xffff0000) == SVR_LX2160A) {
dpaa2_core_cluster_base = 0x00;
dpaa2_cluster_sz = 2;
- PMD_INIT_LOG(DEBUG, "\tLX2160 Platform Detected");
+ DPAA2_BUS_DEBUG("LX2160 Platform Detected");
}
dpaa2_svr_family = (mc_plat_info.svr & 0xffff0000);
}
TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpio.%d]\n", object_id);
return 0;
}
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index c0bd8782..d593eea7 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -13,7 +13,7 @@
struct dpaa2_io_portal_t {
struct dpaa2_dpio_dev *dpio_dev;
- struct dpaa2_dpio_dev *sec_dpio_dev;
+ struct dpaa2_dpio_dev *ethrx_dpio_dev;
uint64_t net_tid;
uint64_t sec_tid;
void *eventdev;
@@ -25,8 +25,8 @@ RTE_DECLARE_PER_LCORE(struct dpaa2_io_portal_t, _dpaa2_io);
#define DPAA2_PER_LCORE_DPIO RTE_PER_LCORE(_dpaa2_io).dpio_dev
#define DPAA2_PER_LCORE_PORTAL DPAA2_PER_LCORE_DPIO->sw_portal
-#define DPAA2_PER_LCORE_SEC_DPIO RTE_PER_LCORE(_dpaa2_io).sec_dpio_dev
-#define DPAA2_PER_LCORE_SEC_PORTAL DPAA2_PER_LCORE_SEC_DPIO->sw_portal
+#define DPAA2_PER_LCORE_ETHRX_DPIO RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
+#define DPAA2_PER_LCORE_ETHRX_PORTAL DPAA2_PER_LCORE_ETHRX_DPIO->sw_portal
/* Variable to store DPAA2 platform type */
extern uint32_t dpaa2_svr_family;
@@ -39,7 +39,7 @@ struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id);
int dpaa2_affine_qbman_swp(void);
/* Affine additional DPIO portal to current crypto processing thread */
-int dpaa2_affine_qbman_swp_sec(void);
+int dpaa2_affine_qbman_ethrx_swp(void);
/* allocate memory for FQ - dq storage */
int
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index d421dbf0..82075936 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -142,7 +142,8 @@ struct dpaa2_dpci_dev {
uint16_t token;
rte_atomic16_t in_use;
uint32_t dpci_id; /*HW ID for DPCI object */
- struct dpaa2_queue queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue rx_queue[DPAA2_DPCI_MAX_QUEUES];
+ struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
};
/*! Global MCP list */
@@ -174,7 +175,7 @@ enum qbman_fd_format {
};
/*Macros to define operations on FD*/
#define DPAA2_SET_FD_ADDR(fd, addr) do { \
- (fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.addr_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
#define DPAA2_SET_FD_LEN(fd, length) ((fd)->simple.len = length)
@@ -188,50 +189,55 @@ enum qbman_fd_format {
((fd)->simple.frc = (0x80000000 | (len)))
#define DPAA2_GET_FD_FRC_PARSE_SUM(fd) \
((uint16_t)(((fd)->simple.frc & 0xffff0000) >> 16))
-#define DPAA2_SET_FD_FRC(fd, frc) ((fd)->simple.frc = frc)
+#define DPAA2_SET_FD_FRC(fd, _frc) ((fd)->simple.frc = _frc)
#define DPAA2_RESET_FD_CTRL(fd) ((fd)->simple.ctrl = 0)
#define DPAA2_SET_FD_ASAL(fd, asal) ((fd)->simple.ctrl |= (asal << 16))
#define DPAA2_SET_FD_FLC(fd, addr) do { \
- (fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr)); \
+ (fd)->simple.flc_lo = lower_32_bits((size_t)(addr)); \
(fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr)); \
} while (0)
#define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
#define DPAA2_GET_FLE_ADDR(fle) \
- (uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
+ (size_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
#define DPAA2_SET_FLE_ADDR(fle, addr) do { \
- (fle)->addr_lo = lower_32_bits((uint64_t)addr); \
- (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
+ (fle)->addr_lo = lower_32_bits((size_t)addr); \
+ (fle)->addr_hi = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_GET_FLE_CTXT(fle) \
- (uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
- (fle)->reserved[0])
+ ((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
#define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
- (fle)->reserved[0] = lower_32_bits((uint64_t)addr); \
- (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
+ (fle)->reserved[0] = lower_32_bits((size_t)addr); \
+ (fle)->reserved[1] = upper_32_bits((uint64_t)addr); \
} while (0)
#define DPAA2_SET_FLE_OFFSET(fle, offset) \
((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
-#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
+#define DPAA2_SET_FLE_LEN(fle, len) ((fle)->length = len)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
#define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
-#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_FIN(fle) ((fle)->fin_bpid_offset |= 1 << 31)
#define DPAA2_SET_FLE_IVP(fle) (((fle)->fin_bpid_offset |= 0x00004000))
+#define DPAA2_SET_FLE_BMT(fle) (((fle)->fin_bpid_offset |= 0x00008000))
#define DPAA2_SET_FD_COMPOUND_FMT(fd) \
((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
#define DPAA2_GET_FD_ADDR(fd) \
-((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
#define DPAA2_GET_FD_LEN(fd) ((fd)->simple.len)
#define DPAA2_GET_FD_BPID(fd) (((fd)->simple.bpid_offset & 0x00003FFF))
#define DPAA2_GET_FD_IVP(fd) (((fd)->simple.bpid_offset & 0x00004000) >> 14)
#define DPAA2_GET_FD_OFFSET(fd) (((fd)->simple.bpid_offset & 0x0FFF0000) >> 16)
+#define DPAA2_GET_FD_FRC(fd) ((fd)->simple.frc)
+#define DPAA2_GET_FD_FLC(fd) \
+ (((uint64_t)((fd)->simple.flc_hi) << 32) + (fd)->simple.flc_lo)
+#define DPAA2_GET_FD_ERR(fd) ((fd)->simple.bpid_offset & 0x000000FF)
#define DPAA2_GET_FLE_OFFSET(fle) (((fle)->fin_bpid_offset & 0x0FFF0000) >> 16)
#define DPAA2_SET_FLE_SG_EXT(fle) ((fle)->fin_bpid_offset |= (uint64_t)1 << 29)
#define DPAA2_IS_SET_FLE_SG_EXT(fle) \
(((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
#define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
- ((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
+ ((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
#define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
@@ -255,47 +261,53 @@ enum qbman_fd_format {
*/
#define DPAA2_EQ_RESP_ALWAYS 1
+/* Various structures representing contiguous memory maps */
+struct dpaa2_memseg {
+ TAILQ_ENTRY(dpaa2_memseg) next;
+ char *vaddr;
+ rte_iova_t iova;
+ size_t len;
+};
+
+TAILQ_HEAD(dpaa2_memseg_list, dpaa2_memseg);
+extern struct dpaa2_memseg_list rte_dpaa2_memsegs;
+
#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
extern uint8_t dpaa2_virt_mode;
static void *dpaa2_mem_ptov(phys_addr_t paddr) __attribute__((unused));
/* todo - this is costly, need to write a fast coversion routine */
static void *dpaa2_mem_ptov(phys_addr_t paddr)
{
- const struct rte_memseg *memseg;
- int i;
+ struct dpaa2_memseg *ms;
if (dpaa2_virt_mode)
- return (void *)paddr;
-
- memseg = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64
- + (paddr - memseg[i].iova));
+ return (void *)(size_t)paddr;
+
+ /* Check if the address is already part of the memseg list internally
+ * maintained by the dpaa2 driver.
+ */
+ TAILQ_FOREACH(ms, &rte_dpaa2_memsegs, next) {
+ if (paddr >= ms->iova && paddr <
+ ms->iova + ms->len)
+ return RTE_PTR_ADD(ms->vaddr, (uintptr_t)(paddr - ms->iova));
}
- return NULL;
+
+ /* If not, Fallback to full memseg list searching */
+ return rte_mem_iova2virt(paddr);
}
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr) __attribute__((unused));
static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
{
const struct rte_memseg *memseg;
- int i;
if (dpaa2_virt_mode)
return vaddr;
- memseg = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr >= memseg[i].addr_64 &&
- vaddr < memseg[i].addr_64 + memseg[i].len)
- return memseg[i].iova
- + (vaddr - memseg[i].addr_64);
- }
- return (phys_addr_t)(NULL);
+ memseg = rte_mem_virt2memseg((void *)(uintptr_t)vaddr, NULL);
+ if (memseg)
+ return memseg->phys_addr + RTE_PTR_DIFF(vaddr, memseg->addr);
+ return (size_t)NULL;
}
/**
@@ -306,28 +318,26 @@ static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
*/
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_iova)
-#define DPAA2_OP_VADDR_TO_IOVA(op) (op->phys_addr)
/**
* macro to convert Virtual address to IOVA
*/
-#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
/**
* macro to convert IOVA to Virtual address
*/
-#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
/**
* macro to convert modify the memory containing IOVA to Virtual address
*/
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
- {_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+ {_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
#else /* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
#define DPAA2_MBUF_VADDR_TO_IOVA(mbuf) ((mbuf)->buf_addr)
-#define DPAA2_OP_VADDR_TO_IOVA(op) (op)
#define DPAA2_VADDR_TO_IOVA(_vaddr) (_vaddr)
#define DPAA2_IOVA_TO_VADDR(_iova) (_iova)
#define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type)
diff --git a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
index 96269ed4..bb60a98f 100644
--- a/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
+++ b/drivers/bus/fslmc/qbman/include/fsl_qbman_base.h
@@ -6,8 +6,6 @@
#ifndef _FSL_QBMAN_BASE_H
#define _FSL_QBMAN_BASE_H
-typedef uint64_t dma_addr_t;
-
/**
* DOC: QBMan basic structures
*
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index e2217335..07145005 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -122,8 +122,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
p->vdq.valid_bit = QB_VALID_BIT;
p->dqrr.next_idx = 0;
p->dqrr.valid_bit = QB_VALID_BIT;
- qman_version = p->desc.qman_version;
- if ((qman_version & 0xFFFF0000) < QMAN_REV_4100) {
+ if ((p->desc.qman_version & 0xFFFF0000) < QMAN_REV_4100) {
p->dqrr.dqrr_size = 4;
p->dqrr.reset_bug = 1;
} else {
@@ -553,10 +552,9 @@ int qbman_swp_enqueue_multiple(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- addr_cena = (uint64_t)s->sys.addr_cena;
+ addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((uint64_t *)(addr_cena +
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
eqcr_pi++;
eqcr_pi &= 0xF;
}
@@ -620,10 +618,9 @@ int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- addr_cena = (uint64_t)s->sys.addr_cena;
+ addr_cena = (size_t)s->sys.addr_cena;
for (i = 0; i < num_enqueued; i++) {
- dcbf((uint64_t *)(addr_cena +
- QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+ dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
eqcr_pi++;
eqcr_pi &= 0xF;
}
@@ -690,7 +687,7 @@ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
dma_addr_t storage_phys,
int stash)
{
- d->pull.rsp_addr_virt = (uint64_t)storage;
+ d->pull.rsp_addr_virt = (size_t)storage;
if (!storage) {
d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
@@ -749,7 +746,7 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
}
d->pull.tok = s->sys.idx + 1;
- s->vdq.storage = (void *)d->pull.rsp_addr_virt;
+ s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
memcpy(&p[1], &cl[1], 12);
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.h b/drivers/bus/fslmc/qbman/qbman_portal.h
index 8bff0b4f..dbea22a1 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.h
+++ b/drivers/bus/fslmc/qbman/qbman_portal.h
@@ -7,7 +7,6 @@
#include "qbman_sys.h"
#include <fsl_qbman_portal.h>
-uint32_t qman_version;
#define QMAN_REV_4000 0x04000000
#define QMAN_REV_4100 0x04010000
#define QMAN_REV_4101 0x04010001
diff --git a/drivers/bus/fslmc/qbman/qbman_sys.h b/drivers/bus/fslmc/qbman/qbman_sys.h
index 846788ea..2bd33ea5 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys.h
@@ -20,6 +20,9 @@
#include "qbman_sys_decl.h"
+#define CENA_WRITE_ENABLE 0
+#define CINH_WRITE_ENABLE 1
+
/* Debugging assists */
static inline void __hexdump(unsigned long start, unsigned long end,
unsigned long p, size_t sz, const unsigned char *c)
@@ -178,7 +181,11 @@ static inline void *qbman_cena_write_start_wo_shadow(struct qbman_swp_sys *s,
s->addr_cena, s->idx, offset);
#endif
QBMAN_BUG_ON(offset & 63);
+#ifdef RTE_ARCH_64
return (s->addr_cena + offset);
+#else
+ return (s->addr_cinh + offset);
+#endif
}
static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
@@ -191,11 +198,19 @@ static inline void qbman_cena_write_complete(struct qbman_swp_sys *s,
s->addr_cena, s->idx, offset, shadow);
hexdump(cmd, 64);
#endif
+#ifdef RTE_ARCH_64
for (loop = 15; loop >= 1; loop--)
__raw_writel(shadow[loop], s->addr_cena +
offset + loop * 4);
lwsync();
__raw_writel(shadow[0], s->addr_cena + offset);
+#else
+ for (loop = 15; loop >= 1; loop--)
+ __raw_writel(shadow[loop], s->addr_cinh +
+ offset + loop * 4);
+ lwsync();
+ __raw_writel(shadow[0], s->addr_cinh + offset);
+#endif
dcbf(s->addr_cena + offset);
}
@@ -224,9 +239,15 @@ static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset)
s->addr_cena, s->idx, offset, shadow);
#endif
+#ifdef RTE_ARCH_64
for (loop = 0; loop < 16; loop++)
shadow[loop] = __raw_readl(s->addr_cena + offset
+ loop * 4);
+#else
+ for (loop = 0; loop < 16; loop++)
+ shadow[loop] = __raw_readl(s->addr_cinh + offset
+ + loop * 4);
+#endif
#ifdef QBMAN_CENA_TRACE
hexdump(shadow, 64);
#endif
@@ -313,6 +334,11 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
uint8_t dqrr_size)
{
uint32_t reg;
+#ifdef RTE_ARCH_64
+ uint8_t wn = CENA_WRITE_ENABLE;
+#else
+ uint8_t wn = CINH_WRITE_ENABLE;
+#endif
s->addr_cena = d->cena_bar;
s->addr_cinh = d->cinh_bar;
@@ -333,10 +359,10 @@ static inline int qbman_swp_sys_init(struct qbman_swp_sys *s,
QBMAN_BUG_ON(reg);
#endif
if (s->eqcr_mode == qman_eqcr_vb_array)
- reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 1, 1, 1, 1,
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 0, 3, 2, 3, 1, 1, 1, 1,
1, 1);
else
- reg = qbman_set_swp_cfg(dqrr_size, 0, 1, 3, 2, 2, 1, 1, 1, 1,
+ reg = qbman_set_swp_cfg(dqrr_size, wn, 1, 3, 2, 2, 1, 1, 1, 1,
1, 1);
qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG);
diff --git a/drivers/bus/fslmc/qbman/qbman_sys_decl.h b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
index f82bb18c..fa6977fe 100644
--- a/drivers/bus/fslmc/qbman/qbman_sys_decl.h
+++ b/drivers/bus/fslmc/qbman/qbman_sys_decl.h
@@ -15,6 +15,7 @@
/****************/
/* arch assists */
/****************/
+#if defined(RTE_ARCH_ARM64)
#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
#define lwsync() { asm volatile("dmb st" : : : "memory"); }
#define dcbf(p) { asm volatile("dc cvac, %0" : : "r"(p) : "memory"); }
@@ -28,3 +29,25 @@ static inline void prefetch_for_store(void *p)
{
asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p));
}
+#elif defined(RTE_ARCH_ARM)
+#define dcbz(p) memset(p, 0, 64)
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+#define prefetch_for_load(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+#define prefetch_for_store(p) { asm volatile ("pld [%0]" : : "r" (p)); }
+
+#else
+#define dcbz(p) RTE_SET_USED(p)
+#define lwsync()
+#define dcbf(p) RTE_SET_USED(p)
+#define dccivac(p) RTE_SET_USED(p)
+static inline void prefetch_for_load(void *p)
+{
+ RTE_SET_USED(p);
+}
+static inline void prefetch_for_store(void *p)
+{
+ RTE_SET_USED(p);
+}
+#endif
diff --git a/drivers/bus/fslmc/rte_bus_fslmc_version.map b/drivers/bus/fslmc/rte_bus_fslmc_version.map
index b7db0741..fe45a113 100644
--- a/drivers/bus/fslmc/rte_bus_fslmc_version.map
+++ b/drivers/bus/fslmc/rte_bus_fslmc_version.map
@@ -2,7 +2,6 @@ DPDK_17.05 {
global:
dpaa2_affine_qbman_swp;
- dpaa2_affine_qbman_swp_sec;
dpaa2_alloc_dpbp_dev;
dpaa2_alloc_dq_storage;
dpaa2_free_dpbp_dev;
@@ -101,3 +100,19 @@ DPDK_18.02 {
rte_fslmc_get_device_count;
} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ dpaa2_affine_qbman_ethrx_swp;
+ dpdmai_close;
+ dpdmai_disable;
+ dpdmai_enable;
+ dpdmai_get_attributes;
+ dpdmai_get_rx_queue;
+ dpdmai_get_tx_queue;
+ dpdmai_open;
+ dpdmai_set_rx_queue;
+ rte_dpaa2_free_dpci_dev;
+
+} DPDK_18.02;
diff --git a/drivers/bus/fslmc/rte_fslmc.h b/drivers/bus/fslmc/rte_fslmc.h
index 69d0fec7..cea5b78f 100644
--- a/drivers/bus/fslmc/rte_fslmc.h
+++ b/drivers/bus/fslmc/rte_fslmc.h
@@ -31,6 +31,7 @@ extern "C" {
#include <rte_dev.h>
#include <rte_bus.h>
#include <rte_tailq.h>
+#include <rte_devargs.h>
#include <fslmc_vfio.h>
@@ -49,6 +50,9 @@ struct rte_dpaa2_driver;
TAILQ_HEAD(rte_fslmc_device_list, rte_dpaa2_device);
TAILQ_HEAD(rte_fslmc_driver_list, rte_dpaa2_driver);
+#define RTE_DEV_TO_FSLMC_CONST(ptr) \
+ container_of(ptr, const struct rte_dpaa2_device, device)
+
extern struct rte_fslmc_bus rte_fslmc_bus;
enum rte_dpaa2_dev_type {
@@ -61,6 +65,7 @@ enum rte_dpaa2_dev_type {
DPAA2_IO, /**< DPIO type device */
DPAA2_CI, /**< DPCI type device */
DPAA2_MPORTAL, /**< DPMCP type device */
+ DPAA2_QDMA, /**< DPDMAI type device */
/* Unknown device placeholder */
DPAA2_UNKNOWN,
DPAA2_DEVTYPE_MAX,
@@ -91,6 +96,7 @@ struct rte_dpaa2_device {
union {
struct rte_eth_dev *eth_dev; /**< ethernet device */
struct rte_cryptodev *cryptodev; /**< Crypto Device */
+ struct rte_rawdev *rawdev; /**< Raw Device */
};
enum rte_dpaa2_dev_type dev_type; /**< Device Type */
uint16_t object_id; /**< DPAA2 Object ID */
@@ -167,8 +173,7 @@ void rte_fslmc_driver_unregister(struct rte_dpaa2_driver *driver);
/** Helper for DPAA2 device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_DPAA2(nm, dpaa2_drv) \
-RTE_INIT(dpaa2initfn_ ##nm); \
-static void dpaa2initfn_ ##nm(void) \
+RTE_INIT(dpaa2initfn_ ##nm) \
{\
(dpaa2_drv).driver.name = RTE_STR(nm);\
rte_fslmc_driver_register(&dpaa2_drv); \
@@ -197,8 +202,7 @@ uint32_t rte_fslmc_get_device_count(enum rte_dpaa2_dev_type device_type);
/** Helper for DPAA2 object registration */
#define RTE_PMD_REGISTER_DPAA2_OBJECT(nm, dpaa2_obj) \
-RTE_INIT(dpaa2objinitfn_ ##nm); \
-static void dpaa2objinitfn_ ##nm(void) \
+RTE_INIT(dpaa2objinitfn_ ##nm) \
{\
(dpaa2_obj).name = RTE_STR(nm);\
rte_fslmc_object_register(&dpaa2_obj); \
diff --git a/drivers/bus/ifpga/Makefile b/drivers/bus/ifpga/Makefile
new file mode 100644
index 00000000..3ff3bdb8
--- /dev/null
+++ b/drivers/bus/ifpga/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_bus_ifpga.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_bus_ifpga_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_bus.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += ifpga_common.c
+
+#
+# Export include files
+#
+SYMLINK-$(CONFIG_RTE_LIBRTE_IFPGA_BUS)-include += rte_bus_ifpga.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/ifpga/ifpga_bus.c b/drivers/bus/ifpga/ifpga_bus.c
new file mode 100644
index 00000000..b324872e
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_bus.c
@@ -0,0 +1,467 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+#include <rte_string_fns.h>
+
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int ifpga_bus_logtype;
+
+/* Forward declaration to access Intel FPGA bus
+ * on which iFPGA devices are connected
+ */
+static struct rte_bus rte_ifpga_bus;
+
+static struct ifpga_afu_dev_list ifpga_afu_dev_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_dev_list);
+static struct ifpga_afu_drv_list ifpga_afu_drv_list =
+ TAILQ_HEAD_INITIALIZER(ifpga_afu_drv_list);
+
+
+/* register a ifpga bus based driver */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver)
+{
+ RTE_VERIFY(driver);
+
+ TAILQ_INSERT_TAIL(&ifpga_afu_drv_list, driver, next);
+}
+
+/* un-register a fpga bus based driver */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver)
+{
+ TAILQ_REMOVE(&ifpga_afu_drv_list, driver, next);
+}
+
+static struct rte_afu_device *
+ifpga_find_afu_dev(const struct rte_rawdev *rdev,
+ const struct rte_afu_id *afu_id)
+{
+ struct rte_afu_device *afu_dev = NULL;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev &&
+ afu_dev->rawdev == rdev &&
+ !ifpga_afu_id_cmp(&afu_dev->id, afu_id))
+ return afu_dev;
+ }
+ return NULL;
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static struct rte_afu_device *
+ifpga_scan_one(struct rte_rawdev *rawdev,
+ struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_afu_pr_conf afu_pr_conf;
+ int ret = 0;
+ char *path = NULL;
+
+ memset(&afu_pr_conf, 0, sizeof(struct rte_afu_pr_conf));
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg, &afu_pr_conf.afu_id.port) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_AFU_BTS) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_AFU_BTS,
+ &rte_ifpga_get_string_arg, &path) < 0) {
+ IFPGA_BUS_ERR("Failed to parse %s",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_AFU_BTS);
+ goto end;
+ }
+
+ afu_pr_conf.afu_id.uuid.uuid_low = 0;
+ afu_pr_conf.afu_id.uuid.uuid_high = 0;
+ afu_pr_conf.pr_enable = path?1:0;
+
+ if (ifpga_find_afu_dev(rawdev, &afu_pr_conf.afu_id))
+ goto end;
+
+ afu_dev = calloc(1, sizeof(*afu_dev));
+ if (!afu_dev)
+ goto end;
+
+ afu_dev->device.devargs = devargs;
+ afu_dev->device.numa_node = SOCKET_ID_ANY;
+ afu_dev->device.name = devargs->name;
+ afu_dev->rawdev = rawdev;
+ afu_dev->id.uuid.uuid_low = 0;
+ afu_dev->id.uuid.uuid_high = 0;
+ afu_dev->id.port = afu_pr_conf.afu_id.port;
+
+ if (rawdev->dev_ops && rawdev->dev_ops->dev_info_get)
+ rawdev->dev_ops->dev_info_get(rawdev, afu_dev);
+
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->dev_start &&
+ rawdev->dev_ops->dev_start(rawdev))
+ goto end;
+
+ strlcpy(afu_pr_conf.bs_path, path, sizeof(afu_pr_conf.bs_path));
+ if (rawdev->dev_ops &&
+ rawdev->dev_ops->firmware_load &&
+ rawdev->dev_ops->firmware_load(rawdev,
+ &afu_pr_conf)){
+ IFPGA_BUS_ERR("firmware load error %d\n", ret);
+ goto end;
+ }
+ afu_dev->id.uuid.uuid_low = afu_pr_conf.afu_id.uuid.uuid_low;
+ afu_dev->id.uuid.uuid_high = afu_pr_conf.afu_id.uuid.uuid_high;
+
+ rte_kvargs_free(kvlist);
+ free(path);
+ return afu_dev;
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (path)
+ free(path);
+ if (afu_dev)
+ free(afu_dev);
+
+ return NULL;
+}
+
+/*
+ * Scan the content of the FPGA bus, and the devices in the devices
+ * list
+ */
+static int
+ifpga_scan(void)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ struct rte_rawdev *rawdev = NULL;
+ char *name = NULL;
+ char name1[RTE_RAWDEV_NAME_MAX_LEN];
+ struct rte_afu_device *afu_dev = NULL;
+
+ /* for FPGA devices we scan the devargs_list populated via cmdline */
+ RTE_EAL_DEVARGS_FOREACH(IFPGA_ARG_NAME, devargs) {
+ if (devargs->bus != &rte_ifpga_bus)
+ continue;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_BUS_ERR("error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_BUS_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_BUS_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ memset(name1, 0, sizeof(name1));
+ snprintf(name1, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", name);
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name1);
+ if (!rawdev)
+ goto end;
+
+ afu_dev = ifpga_scan_one(rawdev, devargs);
+ if (afu_dev != NULL)
+ TAILQ_INSERT_TAIL(&ifpga_afu_dev_list, afu_dev, next);
+ }
+
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+/*
+ * Match the AFU Driver and AFU Device using the ID Table
+ */
+static int
+rte_afu_match(const struct rte_afu_driver *afu_drv,
+ const struct rte_afu_device *afu_dev)
+{
+ const struct rte_afu_uuid *id_table;
+
+ for (id_table = afu_drv->id_table;
+ ((id_table->uuid_low != 0) && (id_table->uuid_high != 0));
+ id_table++) {
+ /* check if device's identifiers match the driver's ones */
+ if ((id_table->uuid_low != afu_dev->id.uuid.uuid_low) ||
+ id_table->uuid_high !=
+ afu_dev->id.uuid.uuid_high)
+ continue;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+ifpga_probe_one_driver(struct rte_afu_driver *drv,
+ struct rte_afu_device *afu_dev)
+{
+ int ret;
+
+ if (!rte_afu_match(drv, afu_dev))
+ /* Match of device and driver failed */
+ return 1;
+
+ /* reference driver structure */
+ afu_dev->driver = drv;
+ afu_dev->device.driver = &drv->driver;
+
+ /* call the driver probe() function */
+ ret = drv->probe(afu_dev);
+ if (ret) {
+ afu_dev->driver = NULL;
+ afu_dev->device.driver = NULL;
+ }
+
+ return ret;
+}
+
+static int
+ifpga_probe_all_drivers(struct rte_afu_device *afu_dev)
+{
+ struct rte_afu_driver *drv = NULL;
+ int ret = 0;
+
+ if (afu_dev == NULL)
+ return -1;
+
+ /* Check if a driver is already loaded */
+ if (afu_dev->driver != NULL)
+ return 0;
+
+ TAILQ_FOREACH(drv, &ifpga_afu_drv_list, next) {
+ if (ifpga_probe_one_driver(drv, afu_dev)) {
+ ret = -1;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Scan the content of the Intel FPGA bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+static int
+ifpga_probe(void)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ int ret = 0;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (afu_dev->device.driver)
+ continue;
+
+ ret = ifpga_probe_all_drivers(afu_dev);
+ if (ret < 0)
+ IFPGA_BUS_ERR("failed to initialize %s device\n",
+ rte_ifpga_device_name(afu_dev));
+ }
+
+ return ret;
+}
+
+static int
+ifpga_plug(struct rte_device *dev)
+{
+ return ifpga_probe_all_drivers(RTE_DEV_TO_AFU(dev));
+}
+
+static int
+ifpga_remove_driver(struct rte_afu_device *afu_dev)
+{
+ const char *name;
+ const struct rte_afu_driver *driver;
+
+ name = rte_ifpga_device_name(afu_dev);
+ if (!afu_dev->device.driver) {
+ IFPGA_BUS_DEBUG("no driver attach to device %s\n", name);
+ return 1;
+ }
+
+ driver = RTE_DRV_TO_AFU_CONST(afu_dev->device.driver);
+ return driver->remove(afu_dev);
+}
+
+static int
+ifpga_unplug(struct rte_device *dev)
+{
+ struct rte_afu_device *afu_dev = NULL;
+ struct rte_devargs *devargs = NULL;
+ int ret;
+
+ if (dev == NULL)
+ return -EINVAL;
+
+ afu_dev = RTE_DEV_TO_AFU(dev);
+ if (!afu_dev)
+ return -ENOENT;
+
+ devargs = dev->devargs;
+
+ ret = ifpga_remove_driver(afu_dev);
+ if (ret)
+ return ret;
+
+ TAILQ_REMOVE(&ifpga_afu_dev_list, afu_dev, next);
+
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(afu_dev);
+ return 0;
+
+}
+
+static struct rte_device *
+ifpga_find_device(const struct rte_device *start,
+ rte_dev_cmp_t cmp, const void *data)
+{
+ struct rte_afu_device *afu_dev;
+
+ TAILQ_FOREACH(afu_dev, &ifpga_afu_dev_list, next) {
+ if (start && &afu_dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&afu_dev->device, data) == 0)
+ return &afu_dev->device;
+ }
+
+ return NULL;
+}
+static int
+ifpga_parse(const char *name, void *addr)
+{
+ int *out = addr;
+ struct rte_rawdev *rawdev = NULL;
+ char rawdev_name[RTE_RAWDEV_NAME_MAX_LEN];
+ char *c1 = NULL;
+ char *c2 = NULL;
+ int port = IFPGA_BUS_DEV_PORT_MAX;
+ char str_port[8];
+ int str_port_len = 0;
+ int ret;
+
+ memset(str_port, 0, 8);
+ c1 = strchr(name, '|');
+ if (c1 != NULL) {
+ str_port_len = c1 - name;
+ c2 = c1 + 1;
+ }
+
+ if (str_port_len < 8 &&
+ str_port_len > 0) {
+ memcpy(str_port, name, str_port_len);
+ ret = sscanf(str_port, "%d", &port);
+ if (ret == -1)
+ return 0;
+ }
+
+ memset(rawdev_name, 0, sizeof(rawdev_name));
+ snprintf(rawdev_name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%s", c2);
+ rawdev = rte_rawdev_pmd_get_named_dev(rawdev_name);
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev &&
+ (addr != NULL))
+ *out = port;
+
+ if ((port < IFPGA_BUS_DEV_PORT_MAX) &&
+ rawdev)
+ return 0;
+ else
+ return 1;
+}
+
+static struct rte_bus rte_ifpga_bus = {
+ .scan = ifpga_scan,
+ .probe = ifpga_probe,
+ .find_device = ifpga_find_device,
+ .plug = ifpga_plug,
+ .unplug = ifpga_unplug,
+ .parse = ifpga_parse,
+};
+
+RTE_REGISTER_BUS(IFPGA_BUS_NAME, rte_ifpga_bus);
+
+RTE_INIT(ifpga_init_log)
+{
+ ifpga_bus_logtype = rte_log_register("bus.ifpga");
+ if (ifpga_bus_logtype >= 0)
+ rte_log_set_level(ifpga_bus_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/ifpga/ifpga_common.c b/drivers/bus/ifpga/ifpga_common.c
new file mode 100644
index 00000000..78e2eaee
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_common.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include <rte_errno.h>
+#include <rte_bus.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include "rte_bus_ifpga.h"
+#include "ifpga_logs.h"
+#include "ifpga_common.h"
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(char **)extra_args = strdup(value);
+
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
+ return 0;
+}
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(int *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint64_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+int ifpga_get_unsigned_long(const char *str, int base)
+{
+ unsigned long num;
+ char *end = NULL;
+
+ errno = 0;
+
+ num = strtoul(str, &end, base);
+ if ((str[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
+ return -1;
+
+ return num;
+}
+
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1)
+{
+ if ((afu_id0->uuid.uuid_low == afu_id1->uuid.uuid_low) &&
+ (afu_id0->uuid.uuid_high == afu_id1->uuid.uuid_high) &&
+ (afu_id0->port == afu_id1->port)) {
+ return 0;
+ } else
+ return 1;
+}
diff --git a/drivers/bus/ifpga/ifpga_common.h b/drivers/bus/ifpga/ifpga_common.h
new file mode 100644
index 00000000..f9254b9d
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_common.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMMON_H_
+#define _IFPGA_COMMON_H_
+
+int rte_ifpga_get_string_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int rte_ifpga_get_integer32_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_integer64_arg(const char *key __rte_unused,
+ const char *value, void *extra_args);
+int ifpga_get_unsigned_long(const char *str, int base);
+int ifpga_afu_id_cmp(const struct rte_afu_id *afu_id0,
+ const struct rte_afu_id *afu_id1);
+
+#endif /* _IFPGA_COMMON_H_ */
diff --git a/drivers/bus/ifpga/ifpga_logs.h b/drivers/bus/ifpga/ifpga_logs.h
new file mode 100644
index 00000000..873e0a4f
--- /dev/null
+++ b/drivers/bus/ifpga/ifpga_logs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_LOGS_H_
+#define _IFPGA_LOGS_H_
+
+#include <rte_log.h>
+
+extern int ifpga_bus_logtype;
+
+#define IFPGA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_bus_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#define IFPGA_BUS_FUNC_TRACE() IFPGA_BUS_LOG(DEBUG, ">>")
+
+#define IFPGA_BUS_DEBUG(fmt, args...) \
+ IFPGA_BUS_LOG(DEBUG, fmt, ## args)
+#define IFPGA_BUS_INFO(fmt, args...) \
+ IFPGA_BUS_LOG(INFO, fmt, ## args)
+#define IFPGA_BUS_ERR(fmt, args...) \
+ IFPGA_BUS_LOG(ERR, fmt, ## args)
+#define IFPGA_BUS_WARN(fmt, args...) \
+ IFPGA_BUS_LOG(WARNING, fmt, ## args)
+
+#endif /* _IFPGA_BUS_LOGS_H_ */
diff --git a/drivers/bus/ifpga/meson.build b/drivers/bus/ifpga/meson.build
new file mode 100644
index 00000000..c9b08c86
--- /dev/null
+++ b/drivers/bus/ifpga/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2018 Intel Corporation
+
+deps += ['pci', 'kvargs', 'rawdev']
+install_headers('rte_bus_ifpga.h')
+sources = files('ifpga_common.c', 'ifpga_bus.c')
+
+allow_experimental_apis = true
diff --git a/drivers/bus/ifpga/rte_bus_ifpga.h b/drivers/bus/ifpga/rte_bus_ifpga.h
new file mode 100644
index 00000000..51d5ae0d
--- /dev/null
+++ b/drivers/bus/ifpga/rte_bus_ifpga.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _RTE_BUS_IFPGA_H_
+#define _RTE_BUS_IFPGA_H_
+
+/**
+ * @file
+ *
+ * RTE Intel FPGA Bus Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_bus.h>
+#include <rte_pci.h>
+
+/** Name of Intel FPGA Bus */
+#define IFPGA_BUS_NAME ifpga
+
+/* Forward declarations */
+struct rte_afu_device;
+struct rte_afu_driver;
+
+/** Double linked list of Intel FPGA AFU device. */
+TAILQ_HEAD(ifpga_afu_dev_list, rte_afu_device);
+/** Double linked list of Intel FPGA AFU device drivers. */
+TAILQ_HEAD(ifpga_afu_drv_list, rte_afu_driver);
+
+#define IFPGA_BUS_BITSTREAM_PATH_MAX_LEN 256
+
+struct rte_afu_uuid {
+ uint64_t uuid_low;
+ uint64_t uuid_high;
+} __attribute__ ((packed));
+
+#define IFPGA_BUS_DEV_PORT_MAX 4
+
+/**
+ * A structure describing an ID for a AFU driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_afu_id {
+ struct rte_afu_uuid uuid;
+ int port; /**< port number */
+} __attribute__ ((packed));
+
+/**
+ * A structure PR (Partial Reconfiguration) configuration AFU driver.
+ */
+
+struct rte_afu_pr_conf {
+ struct rte_afu_id afu_id;
+ int pr_enable;
+ char bs_path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+};
+
+#define AFU_PRI_STR_SIZE (PCI_PRI_STR_SIZE + 8)
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_device {
+ TAILQ_ENTRY(rte_afu_device) next; /**< Next in device list. */
+ struct rte_device device; /**< Inherit core device */
+ struct rte_rawdev *rawdev; /**< Point Rawdev */
+ struct rte_afu_id id; /**< AFU id within FPGA. */
+ uint32_t num_region; /**< number of regions found */
+ struct rte_mem_resource mem_resource[PCI_MAX_RESOURCE];
+ /**< AFU Memory Resource */
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_afu_driver *driver; /**< Associated driver */
+ char path[IFPGA_BUS_BITSTREAM_PATH_MAX_LEN];
+} __attribute__ ((packed));
+
+/**
+ * @internal
+ * Helper macro for drivers that need to convert to struct rte_afu_device.
+ */
+#define RTE_DEV_TO_AFU(ptr) \
+ container_of(ptr, struct rte_afu_device, device)
+
+#define RTE_DRV_TO_AFU_CONST(ptr) \
+ container_of(ptr, const struct rte_afu_driver, driver)
+
+/**
+ * Initialization function for the driver called during FPGA BUS probing.
+ */
+typedef int (afu_probe_t)(struct rte_afu_device *);
+
+/**
+ * Uninitialization function for the driver called during hotplugging.
+ */
+typedef int (afu_remove_t)(struct rte_afu_device *);
+
+/**
+ * A structure describing a AFU device.
+ */
+struct rte_afu_driver {
+ TAILQ_ENTRY(rte_afu_driver) next; /**< Next afu driver. */
+ struct rte_driver driver; /**< Inherit core driver. */
+ afu_probe_t *probe; /**< Device Probe function. */
+ afu_remove_t *remove; /**< Device Remove function. */
+ const struct rte_afu_uuid *id_table; /**< AFU uuid within FPGA. */
+};
+
+static inline const char *
+rte_ifpga_device_name(const struct rte_afu_device *afu)
+{
+ if (afu && afu->device.name)
+ return afu->device.name;
+ return NULL;
+}
+
+/**
+ * Register a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be registered.
+ */
+void rte_ifpga_driver_register(struct rte_afu_driver *driver);
+
+/**
+ * Unregister a ifpga afu device driver.
+ *
+ * @param driver
+ * A pointer to a rte_afu_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_ifpga_driver_unregister(struct rte_afu_driver *driver);
+
+#define RTE_PMD_REGISTER_AFU(nm, afudrv)\
+static const char *afudrvinit_ ## nm ## _alias;\
+RTE_INIT(afudrvinitfn_ ##afudrv)\
+{\
+ (afudrv).driver.name = RTE_STR(nm);\
+ (afudrv).driver.alias = afudrvinit_ ## nm ## _alias;\
+ rte_ifpga_driver_register(&afudrv);\
+} \
+RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#define RTE_PMD_REGISTER_AFU_ALIAS(nm, alias)\
+static const char *afudrvinit_ ## nm ## _alias = RTE_STR(alias)
+
+#endif /* _RTE_BUS_IFPGA_H_ */
diff --git a/drivers/bus/ifpga/rte_bus_ifpga_version.map b/drivers/bus/ifpga/rte_bus_ifpga_version.map
new file mode 100644
index 00000000..a0279797
--- /dev/null
+++ b/drivers/bus/ifpga/rte_bus_ifpga_version.map
@@ -0,0 +1,10 @@
+DPDK_18.05 {
+ global:
+
+ rte_ifpga_get_integer32_arg;
+ rte_ifpga_get_string_arg;
+ rte_ifpga_driver_register;
+ rte_ifpga_driver_unregister;
+
+ local: *;
+};
diff --git a/drivers/bus/meson.build b/drivers/bus/meson.build
index c6af500e..80de2d91 100644
--- a/drivers/bus/meson.build
+++ b/drivers/bus/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['pci', 'vdev']
+drivers = ['dpaa', 'fslmc', 'ifpga', 'pci', 'vdev', 'vmbus']
std_deps = ['eal']
config_flag_fmt = 'RTE_LIBRTE_@0@_BUS'
driver_name_fmt = 'rte_bus_@0@'
diff --git a/drivers/bus/pci/Makefile b/drivers/bus/pci/Makefile
index f3df1c4c..cf373068 100644
--- a/drivers/bus/pci/Makefile
+++ b/drivers/bus/pci/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -37,6 +9,7 @@ EXPORT_MAP := rte_bus_pci_version.map
CFLAGS := -I$(SRCDIR) $(CFLAGS)
CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
SYSTEM := linux
@@ -49,6 +22,9 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/pci/$(SYSTEM)
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+# memseg walk is not part of stable API yet
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_pci
diff --git a/drivers/bus/pci/bsd/Makefile b/drivers/bus/pci/bsd/Makefile
index 4450913e..c1b54c05 100644
--- a/drivers/bus/pci/bsd/Makefile
+++ b/drivers/bus/pci/bsd/Makefile
@@ -1,32 +1,4 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
SRCS += pci.c
diff --git a/drivers/bus/pci/linux/Makefile b/drivers/bus/pci/linux/Makefile
index 77c5f970..96ea1d54 100644
--- a/drivers/bus/pci/linux/Makefile
+++ b/drivers/bus/pci/linux/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
SRCS += pci.c
SRCS += pci_uio.c
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index abde6411..04648ac9 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -15,7 +15,6 @@
#include <rte_memcpy.h>
#include <rte_vfio.h>
-#include "eal_private.h"
#include "eal_filesystem.h"
#include "private.h"
@@ -33,7 +32,8 @@
extern struct rte_pci_bus rte_pci_bus;
static int
-pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
+ size_t len)
{
int count;
char path[PATH_MAX];
@@ -54,7 +54,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
name = strrchr(path, '/');
if (name) {
- strncpy(dri_name, name + 1, strlen(name + 1) + 1);
+ strlcpy(dri_name, name + 1, len);
return 0;
}
@@ -116,24 +116,28 @@ rte_pci_unmap_device(struct rte_pci_device *dev)
}
}
-void *
-pci_find_max_end_va(void)
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
- const struct rte_memseg *last = seg;
- unsigned i = 0;
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
- for (i = 0; i < RTE_MAX_MEMSEG; i++, seg++) {
- if (seg->addr == NULL)
- break;
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
- if (seg->addr > last->addr)
- last = seg;
+void *
+pci_find_max_end_va(void)
+{
+ void *va = NULL;
- }
- return RTE_PTR_ADD(last->addr, last->len);
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
}
+
/* parse one line of the "resource" sysfs file (note that the 'line'
* string is modified)
*/
@@ -310,7 +314,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
/* parse driver */
snprintf(filename, sizeof(filename), "%s/driver", dirname);
- ret = pci_get_kernel_driver_by_path(filename, driver);
+ ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
if (ret < 0) {
RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
free(dev);
diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
index d423e4bb..a7c14421 100644
--- a/drivers/bus/pci/linux/pci_uio.c
+++ b/drivers/bus/pci/linux/pci_uio.c
@@ -282,22 +282,19 @@ int
pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
struct mapped_pci_resource *uio_res, int map_idx)
{
- int fd;
+ int fd = -1;
char devname[PATH_MAX];
void *mapaddr;
struct rte_pci_addr *loc;
struct pci_map *maps;
+ int wc_activate = 0;
+
+ if (dev->driver != NULL)
+ wc_activate = dev->driver->drv_flags & RTE_PCI_DRV_WC_ACTIVATE;
loc = &dev->addr;
maps = uio_res->maps;
- /* update devname for mmap */
- snprintf(devname, sizeof(devname),
- "%s/" PCI_PRI_FMT "/resource%d",
- rte_pci_get_sysfs_path(),
- loc->domain, loc->bus, loc->devid,
- loc->function, res_idx);
-
/* allocate memory to keep path */
maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
if (maps[map_idx].path == NULL) {
@@ -309,11 +306,37 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
/*
* open resource file, to mmap it
*/
- fd = open(devname, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
+ if (wc_activate) {
+ /* update devname for mmap */
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d_wc",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ if (access(devname, R_OK|W_OK) != -1) {
+ fd = open(devname, O_RDWR);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL, "%s cannot be mapped. "
+ "Fall-back to non prefetchable mode.\n",
+ devname);
+ }
+ }
+
+ if (!wc_activate || fd < 0) {
+ snprintf(devname, sizeof(devname),
+ "%s/" PCI_PRI_FMT "/resource%d",
+ rte_pci_get_sysfs_path(),
+ loc->domain, loc->bus, loc->devid,
+ loc->function, res_idx);
+
+ /* then try to map resource file */
+ fd = open(devname, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
devname, strerror(errno));
- goto error;
+ goto error;
+ }
}
/* try mapping somewhere close to the end of hugepages */
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index aeeaa9ed..686386d6 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -584,6 +584,9 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
dev->mem_resource[i].addr = maps[i].addr;
}
+ /* we need save vfio_dev_fd, so it can be used during release */
+ dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
+
return 0;
err_vfio_dev_fd:
close(vfio_dev_fd);
@@ -603,22 +606,58 @@ pci_vfio_map_resource(struct rte_pci_device *dev)
return pci_vfio_map_resource_secondary(dev);
}
-int
-pci_vfio_unmap_resource(struct rte_pci_device *dev)
+static struct mapped_pci_resource *
+find_and_unmap_vfio_resource(struct mapped_pci_res_list *vfio_res_list,
+ struct rte_pci_device *dev,
+ const char *pci_addr)
+{
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct pci_map *maps;
+ int i;
+
+ /* Get vfio_res */
+ TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
+ if (rte_pci_addr_cmp(&vfio_res->pci_addr, &dev->addr))
+ continue;
+ break;
+ }
+
+ if (vfio_res == NULL)
+ return vfio_res;
+
+ RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
+ pci_addr);
+
+ maps = vfio_res->maps;
+ for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+
+ /*
+ * We do not need to be aware of MSI-X table BAR mappings as
+ * when mapping. Just using current maps array is enough
+ */
+ if (maps[i].addr) {
+ RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
+ pci_addr, maps[i].addr);
+ pci_unmap_resource(maps[i].addr, maps[i].size);
+ }
+ }
+
+ return vfio_res;
+}
+
+static int
+pci_vfio_unmap_resource_primary(struct rte_pci_device *dev)
{
char pci_addr[PATH_MAX] = {0};
struct rte_pci_addr *loc = &dev->addr;
- int i, ret;
struct mapped_pci_resource *vfio_res = NULL;
struct mapped_pci_res_list *vfio_res_list;
-
- struct pci_map *maps;
+ int ret;
/* store PCI address string */
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
-
if (close(dev->intr_handle.fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
pci_addr);
@@ -639,13 +678,10 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
return ret;
}
- vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
- /* Get vfio_res */
- TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
- if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
- continue;
- break;
- }
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
/* if we haven't found our tailq entry, something's wrong */
if (vfio_res == NULL) {
RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
@@ -653,30 +689,56 @@ pci_vfio_unmap_resource(struct rte_pci_device *dev)
return -1;
}
- /* unmap BARs */
- maps = vfio_res->maps;
+ TAILQ_REMOVE(vfio_res_list, vfio_res, next);
- RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
- pci_addr);
- for (i = 0; i < (int) vfio_res->nb_maps; i++) {
+ return 0;
+}
- /*
- * We do not need to be aware of MSI-X table BAR mappings as
- * when mapping. Just using current maps array is enough
- */
- if (maps[i].addr) {
- RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
- pci_addr, maps[i].addr);
- pci_unmap_resource(maps[i].addr, maps[i].size);
- }
+static int
+pci_vfio_unmap_resource_secondary(struct rte_pci_device *dev)
+{
+ char pci_addr[PATH_MAX] = {0};
+ struct rte_pci_addr *loc = &dev->addr;
+ struct mapped_pci_resource *vfio_res = NULL;
+ struct mapped_pci_res_list *vfio_res_list;
+ int ret;
+
+ /* store PCI address string */
+ snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
+
+ ret = rte_vfio_release_device(rte_pci_get_sysfs_path(), pci_addr,
+ dev->intr_handle.vfio_dev_fd);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot release device\n", __func__);
+ return ret;
}
- TAILQ_REMOVE(vfio_res_list, vfio_res, next);
+ vfio_res_list =
+ RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
+ vfio_res = find_and_unmap_vfio_resource(vfio_res_list, dev, pci_addr);
+
+ /* if we haven't found our tailq entry, something's wrong */
+ if (vfio_res == NULL) {
+ RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
+ pci_addr);
+ return -1;
+ }
return 0;
}
int
+pci_vfio_unmap_resource(struct rte_pci_device *dev)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return pci_vfio_unmap_resource_primary(dev);
+ else
+ return pci_vfio_unmap_resource_secondary(dev);
+}
+
+int
pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
struct rte_pci_ioport *p)
{
diff --git a/drivers/bus/pci/meson.build b/drivers/bus/pci/meson.build
index 12756a4d..72939e59 100644
--- a/drivers/bus/pci/meson.build
+++ b/drivers/bus/pci/meson.build
@@ -14,3 +14,6 @@ else
sources += files('bsd/pci.c')
includes += include_directories('bsd')
endif
+
+# memseg walk is not part of stable API yet
+allow_experimental_apis = true
diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c
index 2a00f365..7736b3f9 100644
--- a/drivers/bus/pci/pci_common.c
+++ b/drivers/bus/pci/pci_common.c
@@ -26,6 +26,7 @@
#include "private.h"
+
extern struct rte_pci_bus rte_pci_bus;
#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
@@ -45,12 +46,8 @@ static struct rte_devargs *pci_devargs_lookup(struct rte_pci_device *dev)
{
struct rte_devargs *devargs;
struct rte_pci_addr addr;
- struct rte_bus *pbus;
- pbus = rte_bus_find_by_name("pci");
- TAILQ_FOREACH(devargs, &devargs_list, next) {
- if (devargs->bus != pbus)
- continue;
+ RTE_EAL_DEVARGS_FOREACH("pci", devargs) {
devargs->bus->parse(devargs->name, &addr);
if (!rte_pci_addr_cmp(&dev->addr, &addr))
return devargs;
@@ -73,7 +70,7 @@ pci_name_set(struct rte_pci_device *dev)
*/
if (devargs != NULL)
/* If an rte_devargs exists, the generic rte_device uses the
- * given name as its namea
+ * given name as its name.
*/
dev->device.name = dev->device.devargs->name;
else
@@ -159,17 +156,24 @@ rte_pci_probe_one_driver(struct rte_pci_driver *dr,
RTE_LOG(INFO, EAL, " probe driver: %x:%x %s\n", dev->id.vendor_id,
dev->id.device_id, dr->driver.name);
+ /*
+ * reference driver structure
+ * This needs to be before rte_pci_map_device(), as it enables to use
+ * driver flags for adjusting configuration.
+ */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
if (dr->drv_flags & RTE_PCI_DRV_NEED_MAPPING) {
/* map resources for devices that use igb_uio */
ret = rte_pci_map_device(dev);
- if (ret != 0)
+ if (ret != 0) {
+ dev->driver = NULL;
+ dev->device.driver = NULL;
return ret;
+ }
}
- /* reference driver structure */
- dev->driver = dr;
- dev->device.driver = &dr->driver;
-
/* call the driver probe() function */
ret = dr->probe(dr, dev);
if (ret) {
@@ -259,81 +263,6 @@ pci_probe_all_drivers(struct rte_pci_device *dev)
}
/*
- * Find the pci device specified by pci address, then invoke probe function of
- * the driver of the device.
- */
-int
-rte_pci_probe_one(const struct rte_pci_addr *addr)
-{
- struct rte_pci_device *dev = NULL;
-
- int ret = 0;
-
- if (addr == NULL)
- return -1;
-
- /* update current pci device in global list, kernel bindings might have
- * changed since last time we looked at it.
- */
- if (pci_update_device(addr) < 0)
- goto err_return;
-
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_pci_addr_cmp(&dev->addr, addr))
- continue;
-
- ret = pci_probe_all_drivers(dev);
- if (ret)
- goto err_return;
- return 0;
- }
- return -1;
-
-err_return:
- RTE_LOG(WARNING, EAL,
- "Requested device " PCI_PRI_FMT " cannot be used\n",
- addr->domain, addr->bus, addr->devid, addr->function);
- return -1;
-}
-
-/*
- * Detach device specified by its pci address.
- */
-int
-rte_pci_detach(const struct rte_pci_addr *addr)
-{
- struct rte_pci_device *dev = NULL;
- int ret = 0;
-
- if (addr == NULL)
- return -1;
-
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (rte_pci_addr_cmp(&dev->addr, addr))
- continue;
-
- ret = rte_pci_detach_dev(dev);
- if (ret < 0)
- /* negative value is an error */
- goto err_return;
- if (ret > 0)
- /* positive value means driver doesn't support it */
- continue;
-
- rte_pci_remove_device(dev);
- free(dev);
- return 0;
- }
- return -1;
-
-err_return:
- RTE_LOG(WARNING, EAL, "Requested device " PCI_PRI_FMT
- " cannot be used\n", dev->addr.domain, dev->addr.bus,
- dev->addr.devid, dev->addr.function);
- return -1;
-}
-
-/*
* Scan the content of the PCI bus, and call the probe() function for
* all registered drivers that have a matching entry in its id_table
* for discovered devices.
@@ -449,7 +378,7 @@ rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
}
/* Remove a device from PCI bus */
-void
+static void
rte_pci_remove_device(struct rte_pci_device *pci_dev)
{
TAILQ_REMOVE(&rte_pci_bus.device_list, pci_dev, next);
@@ -459,17 +388,20 @@ static struct rte_device *
pci_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
- struct rte_pci_device *dev;
+ const struct rte_pci_device *pstart;
+ struct rte_pci_device *pdev;
- FOREACH_DEVICE_ON_PCIBUS(dev) {
- if (start && &dev->device == start) {
- start = NULL; /* starting point found */
- continue;
- }
- if (cmp(&dev->device, data) == 0)
- return &dev->device;
+ if (start != NULL) {
+ pstart = RTE_DEV_TO_PCI_CONST(start);
+ pdev = TAILQ_NEXT(pstart, next);
+ } else {
+ pdev = TAILQ_FIRST(&rte_pci_bus.device_list);
+ }
+ while (pdev != NULL) {
+ if (cmp(&pdev->device, data) == 0)
+ return &pdev->device;
+ pdev = TAILQ_NEXT(pdev, next);
}
-
return NULL;
}
diff --git a/drivers/bus/pci/private.h b/drivers/bus/pci/private.h
index 88fa587e..8ddd03e1 100644
--- a/drivers/bus/pci/private.h
+++ b/drivers/bus/pci/private.h
@@ -33,36 +33,6 @@ rte_pci_probe(void);
int rte_pci_scan(void);
/**
- * Probe the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the probe() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to probe.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_probe_one(const struct rte_pci_addr *addr);
-
-/**
- * Close the single PCI device.
- *
- * Scan the content of the PCI bus, and find the pci device specified by pci
- * address, then call the remove() function for registered driver that has a
- * matching entry in its id_table for discovered device.
- *
- * @param addr
- * The PCI Bus-Device-Function address to close.
- * @return
- * - 0 on success.
- * - Negative on error.
- */
-int rte_pci_detach(const struct rte_pci_addr *addr);
-
-/**
* Find the name of a PCI device.
*/
void
@@ -94,16 +64,6 @@ void rte_pci_insert_device(struct rte_pci_device *exist_pci_dev,
struct rte_pci_device *new_pci_dev);
/**
- * Remove a PCI device from the PCI Bus. This sets to NULL the bus references
- * in the PCI device object as well as the generic device object.
- *
- * @param pci_device
- * PCI device to be removed from PCI Bus
- * @return void
- */
-void rte_pci_remove_device(struct rte_pci_device *pci_device);
-
-/**
* Update a pci device object by asking the kernel for the latest information.
*
* This function is private to EAL.
@@ -117,16 +77,6 @@ void rte_pci_remove_device(struct rte_pci_device *pci_device);
int pci_update_device(const struct rte_pci_addr *addr);
/**
- * Unbind kernel driver for this device
- *
- * This function is private to EAL.
- *
- * @return
- * 0 on success, negative on error
- */
-int pci_unbind_kernel_driver(struct rte_pci_device *dev);
-
-/**
* Map the PCI resource of a PCI device in virtual memory
*
* This function is private to EAL.
diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h
index 357afb91..0d1955ff 100644
--- a/drivers/bus/pci/rte_bus_pci.h
+++ b/drivers/bus/pci/rte_bus_pci.h
@@ -74,6 +74,9 @@ struct rte_pci_device {
*/
#define RTE_DEV_TO_PCI(ptr) container_of(ptr, struct rte_pci_device, device)
+#define RTE_DEV_TO_PCI_CONST(ptr) \
+ container_of(ptr, const struct rte_pci_device, device)
+
#define RTE_ETH_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
/** Any PCI device identifier (vendor, device, ...) */
@@ -132,6 +135,8 @@ struct rte_pci_bus {
/** Device needs PCI BAR mapping (done with either IGB_UIO or VFIO) */
#define RTE_PCI_DRV_NEED_MAPPING 0x0001
+/** Device needs PCI BAR mapping with enabled write combining (wc) */
+#define RTE_PCI_DRV_WC_ACTIVATE 0x0002
/** Device driver supports link state interrupt */
#define RTE_PCI_DRV_INTR_LSC 0x0008
/** Device driver supports device removal interrupt */
@@ -186,8 +191,7 @@ void rte_pci_register(struct rte_pci_driver *driver);
/** Helper for PCI device registration from driver (eth, crypto) instance */
#define RTE_PMD_REGISTER_PCI(nm, pci_drv) \
-RTE_INIT(pciinitfn_ ##nm); \
-static void pciinitfn_ ##nm(void) \
+RTE_INIT(pciinitfn_ ##nm) \
{\
(pci_drv).driver.name = RTE_STR(nm);\
rte_pci_register(&pci_drv); \
diff --git a/drivers/bus/vdev/Makefile b/drivers/bus/vdev/Makefile
index 24d424a3..bd0bb895 100644
--- a/drivers/bus/vdev/Makefile
+++ b/drivers/bus/vdev/Makefile
@@ -10,6 +10,7 @@ LIB = librte_bus_vdev.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
# versioning export map
EXPORT_MAP := rte_bus_vdev_version.map
diff --git a/drivers/bus/vdev/meson.build b/drivers/bus/vdev/meson.build
index 03cb87eb..2ee648b4 100644
--- a/drivers/bus/vdev/meson.build
+++ b/drivers/bus/vdev/meson.build
@@ -3,3 +3,5 @@
sources = files('vdev.c')
install_headers('rte_bus_vdev.h')
+
+allow_experimental_apis = true
diff --git a/drivers/bus/vdev/rte_bus_vdev.h b/drivers/bus/vdev/rte_bus_vdev.h
index f9d8a238..9ae3eaae 100644
--- a/drivers/bus/vdev/rte_bus_vdev.h
+++ b/drivers/bus/vdev/rte_bus_vdev.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#ifndef RTE_VDEV_H
@@ -53,6 +25,9 @@ struct rte_vdev_device {
#define RTE_DEV_TO_VDEV(ptr) \
container_of(ptr, struct rte_vdev_device, device)
+#define RTE_DEV_TO_VDEV_CONST(ptr) \
+ container_of(ptr, const struct rte_vdev_device, device)
+
static inline const char *
rte_vdev_device_name(const struct rte_vdev_device *dev)
{
@@ -111,9 +86,8 @@ void rte_vdev_register(struct rte_vdev_driver *driver);
void rte_vdev_unregister(struct rte_vdev_driver *driver);
#define RTE_PMD_REGISTER_VDEV(nm, vdrv)\
-RTE_INIT(vdrvinitfn_ ##vdrv);\
static const char *vdrvinit_ ## nm ## _alias;\
-static void vdrvinitfn_ ##vdrv(void)\
+RTE_INIT(vdrvinitfn_ ##vdrv)\
{\
(vdrv).driver.name = RTE_STR(nm);\
(vdrv).driver.alias = vdrvinit_ ## nm ## _alias;\
diff --git a/drivers/bus/vdev/vdev.c b/drivers/bus/vdev/vdev.c
index e4bc7246..6139dd55 100644
--- a/drivers/bus/vdev/vdev.c
+++ b/drivers/bus/vdev/vdev.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <string.h>
@@ -46,11 +18,14 @@
#include <rte_memory.h>
#include <rte_tailq.h>
#include <rte_spinlock.h>
+#include <rte_string_fns.h>
#include <rte_errno.h>
#include "rte_bus_vdev.h"
#include "vdev_logs.h"
+#define VDEV_MP_KEY "bus_vdev_mp"
+
int vdev_logtype_bus;
/* Forward declare to access virtual bus name */
@@ -61,6 +36,10 @@ TAILQ_HEAD(vdev_device_list, rte_vdev_device);
static struct vdev_device_list vdev_device_list =
TAILQ_HEAD_INITIALIZER(vdev_device_list);
+/* The lock needs to be recursive because a vdev can manage another vdev. */
+static rte_spinlock_recursive_t vdev_device_list_lock =
+ RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
struct vdev_driver_list vdev_driver_list =
TAILQ_HEAD_INITIALIZER(vdev_driver_list);
@@ -165,7 +144,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
- VDEV_LOG(DEBUG, "Search driver %s to probe device %s\n", name,
+ VDEV_LOG(DEBUG, "Search driver %s to probe device %s", name,
rte_vdev_device_name(dev));
if (vdev_parse(name, &driver))
@@ -177,6 +156,7 @@ vdev_probe_all_drivers(struct rte_vdev_device *dev)
return ret;
}
+/* The caller shall be responsible for thread-safe */
static struct rte_vdev_device *
find_vdev(const char *name)
{
@@ -188,7 +168,7 @@ find_vdev(const char *name)
TAILQ_FOREACH(dev, &vdev_device_list, next) {
const char *devname = rte_vdev_device_name(dev);
- if (!strncmp(devname, name, strlen(name)))
+ if (!strcmp(devname, name))
return dev;
}
@@ -221,8 +201,8 @@ alloc_devargs(const char *name, const char *args)
return devargs;
}
-int
-rte_vdev_init(const char *name, const char *args)
+static int
+insert_vdev(const char *name, const char *args, struct rte_vdev_device **p_dev)
{
struct rte_vdev_device *dev;
struct rte_devargs *devargs;
@@ -231,10 +211,6 @@ rte_vdev_init(const char *name, const char *args)
if (name == NULL)
return -EINVAL;
- dev = find_vdev(name);
- if (dev)
- return -EEXIST;
-
devargs = alloc_devargs(name, args);
if (!devargs)
return -ENOMEM;
@@ -249,18 +225,18 @@ rte_vdev_init(const char *name, const char *args)
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
- ret = vdev_probe_all_drivers(dev);
- if (ret) {
- if (ret > 0)
- VDEV_LOG(ERR, "no driver found for %s\n", name);
+ if (find_vdev(name)) {
+ ret = -EEXIST;
goto fail;
}
- TAILQ_INSERT_TAIL(&devargs_list, devargs, next);
-
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
- return 0;
+ rte_devargs_insert(devargs);
+
+ if (p_dev)
+ *p_dev = dev;
+ return 0;
fail:
free(devargs->args);
free(devargs);
@@ -268,6 +244,31 @@ fail:
return ret;
}
+int
+rte_vdev_init(const char *name, const char *args)
+{
+ struct rte_vdev_device *dev;
+ struct rte_devargs *devargs;
+ int ret;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ ret = insert_vdev(name, args, &dev);
+ if (ret == 0) {
+ ret = vdev_probe_all_drivers(dev);
+ if (ret) {
+ if (ret > 0)
+ VDEV_LOG(ERR, "no driver found for %s", name);
+ /* If fails, remove it from vdev list */
+ devargs = dev->device.devargs;
+ TAILQ_REMOVE(&vdev_device_list, dev, next);
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
+ }
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
static int
vdev_remove_driver(struct rte_vdev_device *dev)
{
@@ -275,7 +276,7 @@ vdev_remove_driver(struct rte_vdev_device *dev)
const struct rte_vdev_driver *driver;
if (!dev->device.driver) {
- VDEV_LOG(DEBUG, "no driver attach to device %s\n", name);
+ VDEV_LOG(DEBUG, "no driver attach to device %s", name);
return 1;
}
@@ -294,23 +295,98 @@ rte_vdev_uninit(const char *name)
if (name == NULL)
return -EINVAL;
- dev = find_vdev(name);
- if (!dev)
- return -ENOENT;
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
- devargs = dev->device.devargs;
+ dev = find_vdev(name);
+ if (!dev) {
+ ret = -ENOENT;
+ goto unlock;
+ }
ret = vdev_remove_driver(dev);
if (ret)
- return ret;
+ goto unlock;
TAILQ_REMOVE(&vdev_device_list, dev, next);
+ devargs = dev->device.devargs;
+ rte_devargs_remove(devargs->bus->name, devargs->name);
+ free(dev);
- TAILQ_REMOVE(&devargs_list, devargs, next);
+unlock:
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ return ret;
+}
+
+struct vdev_param {
+#define VDEV_SCAN_REQ 1
+#define VDEV_SCAN_ONE 2
+#define VDEV_SCAN_REP 3
+ int type;
+ int num;
+ char name[RTE_DEV_NAME_MAX_LEN];
+};
+
+static int vdev_plug(struct rte_device *dev);
+
+/**
+ * This function works as the action for both primary and secondary process
+ * for static vdev discovery when a secondary process is booting.
+ *
+ * step 1, secondary process sends a sync request to ask for vdev in primary;
+ * step 2, primary process receives the request, and send vdevs one by one;
+ * step 3, primary process sends back reply, which indicates how many vdevs
+ * are sent.
+ */
+static int
+vdev_action(const struct rte_mp_msg *mp_msg, const void *peer)
+{
+ struct rte_vdev_device *dev;
+ struct rte_mp_msg mp_resp;
+ struct vdev_param *ou = (struct vdev_param *)&mp_resp.param;
+ const struct vdev_param *in = (const struct vdev_param *)mp_msg->param;
+ const char *devname;
+ int num;
+
+ strlcpy(mp_resp.name, VDEV_MP_KEY, sizeof(mp_resp.name));
+ mp_resp.len_param = sizeof(*ou);
+ mp_resp.num_fds = 0;
+
+ switch (in->type) {
+ case VDEV_SCAN_REQ:
+ ou->type = VDEV_SCAN_ONE;
+ ou->num = 1;
+ num = 0;
+
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ devname = rte_vdev_device_name(dev);
+ if (strlen(devname) == 0) {
+ VDEV_LOG(INFO, "vdev with no name is not sent");
+ continue;
+ }
+ VDEV_LOG(INFO, "send vdev, %s", devname);
+ strlcpy(ou->name, devname, RTE_DEV_NAME_MAX_LEN);
+ if (rte_mp_sendmsg(&mp_resp) < 0)
+ VDEV_LOG(ERR, "send vdev, %s, failed, %s",
+ devname, strerror(rte_errno));
+ num++;
+ }
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ ou->type = VDEV_SCAN_REP;
+ ou->num = num;
+ if (rte_mp_reply(&mp_resp, peer) < 0)
+ VDEV_LOG(ERR, "Failed to reply a scan request");
+ break;
+ case VDEV_SCAN_ONE:
+ VDEV_LOG(INFO, "receive vdev, %s", in->name);
+ if (insert_vdev(in->name, NULL, NULL) < 0)
+ VDEV_LOG(ERR, "failed to add vdev, %s", in->name);
+ break;
+ default:
+ VDEV_LOG(ERR, "vdev cannot recognize this message");
+ }
- free(devargs->args);
- free(devargs);
- free(dev);
return 0;
}
@@ -321,13 +397,41 @@ vdev_scan(void)
struct rte_devargs *devargs;
struct vdev_custom_scan *custom_scan;
+ if (rte_mp_action_register(VDEV_MP_KEY, vdev_action) < 0 &&
+ rte_errno != EEXIST) {
+ VDEV_LOG(ERR, "Failed to add vdev mp action");
+ return -1;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vdev_param *req = (struct vdev_param *)mp_req.param;
+ struct vdev_param *resp;
+
+ strlcpy(mp_req.name, VDEV_MP_KEY, sizeof(mp_req.name));
+ mp_req.len_param = sizeof(*req);
+ mp_req.num_fds = 0;
+ req->type = VDEV_SCAN_REQ;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ resp = (struct vdev_param *)mp_rep->param;
+ VDEV_LOG(INFO, "Received %d vdevs", resp->num);
+ } else
+ VDEV_LOG(ERR, "Failed to request vdev from primary");
+
+ /* Fall through to allow private vdevs in secondary process */
+ }
+
/* call custom scan callbacks if any */
rte_spinlock_lock(&vdev_custom_scan_lock);
TAILQ_FOREACH(custom_scan, &vdev_custom_scans, next) {
if (custom_scan->callback != NULL)
/*
* the callback should update devargs list
- * by calling rte_eal_devargs_insert() with
+ * by calling rte_devargs_insert() with
* devargs.bus = rte_bus_find_by_name("vdev");
* devargs.type = RTE_DEVTYPE_VIRTUAL;
* devargs.policy = RTE_DEV_WHITELISTED;
@@ -337,24 +441,27 @@ vdev_scan(void)
rte_spinlock_unlock(&vdev_custom_scan_lock);
/* for virtual devices we scan the devargs_list populated via cmdline */
- TAILQ_FOREACH(devargs, &devargs_list, next) {
-
- if (devargs->bus != &rte_vdev_bus)
- continue;
-
- dev = find_vdev(devargs->name);
- if (dev)
- continue;
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs) {
dev = calloc(1, sizeof(*dev));
if (!dev)
return -1;
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+
+ if (find_vdev(devargs->name)) {
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+ free(dev);
+ continue;
+ }
+
dev->device.devargs = devargs;
dev->device.numa_node = SOCKET_ID_ANY;
dev->device.name = devargs->name;
TAILQ_INSERT_TAIL(&vdev_device_list, dev, next);
+
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
}
return 0;
@@ -368,12 +475,16 @@ vdev_probe(void)
/* call the init function for each virtual device */
TAILQ_FOREACH(dev, &vdev_device_list, next) {
+ /* we don't use the vdev lock here, as it's only used in DPDK
+ * initialization; and we don't want to hold such a lock when
+ * we call each driver probe.
+ */
if (dev->device.driver)
continue;
if (vdev_probe_all_drivers(dev)) {
- VDEV_LOG(ERR, "failed to initialize %s device\n",
+ VDEV_LOG(ERR, "failed to initialize %s device",
rte_vdev_device_name(dev));
ret = -1;
}
@@ -386,17 +497,24 @@ static struct rte_device *
vdev_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
const void *data)
{
+ const struct rte_vdev_device *vstart;
struct rte_vdev_device *dev;
- TAILQ_FOREACH(dev, &vdev_device_list, next) {
- if (start && &dev->device == start) {
- start = NULL;
- continue;
- }
+ rte_spinlock_recursive_lock(&vdev_device_list_lock);
+ if (start != NULL) {
+ vstart = RTE_DEV_TO_VDEV_CONST(start);
+ dev = TAILQ_NEXT(vstart, next);
+ } else {
+ dev = TAILQ_FIRST(&vdev_device_list);
+ }
+ while (dev != NULL) {
if (cmp(&dev->device, data) == 0)
- return &dev->device;
+ break;
+ dev = TAILQ_NEXT(dev, next);
}
- return NULL;
+ rte_spinlock_recursive_unlock(&vdev_device_list_lock);
+
+ return dev ? &dev->device : NULL;
}
static int
diff --git a/drivers/bus/vmbus/Makefile b/drivers/bus/vmbus/Makefile
new file mode 100644
index 00000000..deee9dd1
--- /dev/null
+++ b/drivers/bus/vmbus/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_bus_vmbus.a
+LIBABIVER := 1
+EXPORT_MAP := rte_bus_vmbus_version.map
+
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),)
+SYSTEM := linux
+endif
+ifneq ($(CONFIG_RTE_EXEC_ENV_BSDAPP),)
+$(error "VMBUS not implemented for BSD yet")
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/$(SYSTEM)app/eal
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev
+
+include $(RTE_SDK)/drivers/bus/vmbus/$(SYSTEM)/Makefile
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) := $(addprefix $(SYSTEM)/,$(SRCS))
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_channel.c vmbus_bufring.c
+SRCS-$(CONFIG_RTE_LIBRTE_VMBUS) += vmbus_common_uio.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_bus_vmbus.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_VMBUS)-include += rte_vmbus_reg.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/bus/vmbus/linux/Makefile b/drivers/bus/vmbus/linux/Makefile
new file mode 100644
index 00000000..ef0d30b2
--- /dev/null
+++ b/drivers/bus/vmbus/linux/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+SRCS += vmbus_bus.c vmbus_uio.c
diff --git a/drivers/bus/vmbus/linux/vmbus_bus.c b/drivers/bus/vmbus/linux/vmbus_bus.c
new file mode 100644
index 00000000..52d6a3c0
--- /dev/null
+++ b/drivers/bus/vmbus/linux/vmbus_bus.c
@@ -0,0 +1,355 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include <rte_eal.h>
+#include <rte_uuid.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_devargs.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+
+#include "eal_filesystem.h"
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* Read sysfs file to get UUID */
+static int
+parse_sysfs_uuid(const char *filename, rte_uuid_t uu)
+{
+ char buf[BUFSIZ];
+ char *cp, *in = buf;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s: %s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ cp = strchr(buf, '\n');
+ if (cp)
+ *cp = '\0';
+
+ /* strip { } notation */
+ if (buf[0] == '{') {
+ in = buf + 1;
+ cp = strchr(in, '}');
+ if (cp)
+ *cp = '\0';
+ }
+
+ if (rte_uuid_parse(in, uu) < 0) {
+ VMBUS_LOG(ERR, "%s %s not a valid UUID",
+ filename, buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+get_sysfs_string(const char *filename, char *buf, size_t buflen)
+{
+ char *cp;
+ FILE *f;
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ VMBUS_LOG(ERR, "cannot open sysfs value %s:%s",
+ filename, strerror(errno));
+ return -1;
+ }
+
+ if (fgets(buf, buflen, f) == NULL) {
+ VMBUS_LOG(ERR, "cannot read sysfs value %s",
+ filename);
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+
+ /* remove trailing newline */
+ cp = memchr(buf, '\n', buflen);
+ if (cp)
+ *cp = '\0';
+
+ return 0;
+}
+
+static int
+vmbus_get_uio_dev(const struct rte_vmbus_device *dev,
+ char *dstbuf, size_t buflen)
+{
+ char dirname[PATH_MAX];
+ unsigned int uio_num;
+ struct dirent *e;
+ DIR *dir;
+
+ /* Assume recent kernel where uio is in uio/uioX */
+ snprintf(dirname, sizeof(dirname),
+ SYSFS_VMBUS_DEVICES "/%s/uio", dev->device.name);
+
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1; /* Not a UIO device */
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ const int prefix_len = 3;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", prefix_len) != 0)
+ continue;
+
+ /* try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + prefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + prefix_len)) {
+ snprintf(dstbuf, buflen, "%s/uio%u", dirname, uio_num);
+ break;
+ }
+ }
+ closedir(dir);
+
+ if (e == NULL)
+ return -1;
+
+ return uio_num;
+}
+
+/* Check map names with kernel names */
+static const char *map_names[VMBUS_MAX_RESOURCE] = {
+ [HV_TXRX_RING_MAP] = "txrx_rings",
+ [HV_INT_PAGE_MAP] = "int_page",
+ [HV_MON_PAGE_MAP] = "monitor_page",
+ [HV_RECV_BUF_MAP] = "recv:",
+ [HV_SEND_BUF_MAP] = "send:",
+};
+
+
+/* map the resources of a vmbus device in virtual memory */
+int
+rte_vmbus_map_device(struct rte_vmbus_device *dev)
+{
+ char uioname[PATH_MAX], filename[PATH_MAX];
+ char dirname[PATH_MAX], mapname[64];
+ int i;
+
+ dev->uio_num = vmbus_get_uio_dev(dev, uioname, sizeof(uioname));
+ if (dev->uio_num < 0) {
+ VMBUS_LOG(DEBUG, "Not managed by UIO driver, skipped");
+ return 1;
+ }
+
+ /* Extract resource value */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ struct rte_mem_resource *res = &dev->resource[i];
+ unsigned long len, gpad = 0;
+ char *cp;
+
+ snprintf(dirname, sizeof(dirname),
+ "%s/maps/map%d", uioname, i);
+
+ snprintf(filename, sizeof(filename),
+ "%s/name", dirname);
+
+ if (get_sysfs_string(filename, mapname, sizeof(mapname)) < 0) {
+ VMBUS_LOG(ERR, "could not read %s", filename);
+ return -1;
+ }
+
+ if (strncmp(map_names[i], mapname, strlen(map_names[i])) != 0) {
+ VMBUS_LOG(ERR,
+ "unexpected resource %s (expected %s)",
+ mapname, map_names[i]);
+ return -1;
+ }
+
+ snprintf(filename, sizeof(filename),
+ "%s/size", dirname);
+ if (eal_parse_sysfs_value(filename, &len) < 0) {
+ VMBUS_LOG(ERR,
+ "could not read %s", filename);
+ return -1;
+ }
+ res->len = len;
+
+ /* both send and receive buffers have gpad in name */
+ cp = memchr(mapname, ':', sizeof(mapname));
+ if (cp)
+ gpad = strtoul(cp+1, NULL, 0);
+
+ /* put the GPAD value in physical address */
+ res->phys_addr = gpad;
+ }
+
+ return vmbus_uio_map_resource(dev);
+}
+
+void
+rte_vmbus_unmap_device(struct rte_vmbus_device *dev)
+{
+ vmbus_uio_unmap_resource(dev);
+}
+
+/* Scan one vmbus sysfs entry, and fill the devices list from it. */
+static int
+vmbus_scan_one(const char *name)
+{
+ struct rte_vmbus_device *dev, *dev2;
+ char filename[PATH_MAX];
+ char dirname[PATH_MAX];
+ unsigned long tmp;
+
+ dev = calloc(1, sizeof(*dev));
+ if (dev == NULL)
+ return -1;
+
+ dev->device.name = strdup(name);
+ if (!dev->device.name)
+ goto error;
+
+ /* sysfs base directory
+ * /sys/bus/vmbus/devices/7a08391f-f5a0-4ac0-9802-d13fd964f8df
+ * or on older kernel
+ * /sys/bus/vmbus/devices/vmbus_1
+ */
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_VMBUS_DEVICES, name);
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->device_id) < 0)
+ goto error;
+
+ /* get device class */
+ snprintf(filename, sizeof(filename), "%s/class_id", dirname);
+ if (parse_sysfs_uuid(filename, dev->class_id) < 0)
+ goto error;
+
+ /* get relid */
+ snprintf(filename, sizeof(filename), "%s/id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->relid = tmp;
+
+ /* get monitor id */
+ snprintf(filename, sizeof(filename), "%s/monitor_id", dirname);
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->monitor_id = tmp;
+
+ /* get numa node (if present) */
+ snprintf(filename, sizeof(filename), "%s/numa_node",
+ dirname);
+
+ if (access(filename, R_OK) == 0) {
+ if (eal_parse_sysfs_value(filename, &tmp) < 0)
+ goto error;
+ dev->device.numa_node = tmp;
+ } else {
+ /* if no NUMA support, set default to 0 */
+ dev->device.numa_node = SOCKET_ID_ANY;
+ }
+
+ /* device is valid, add in list (sorted) */
+ VMBUS_LOG(DEBUG, "Adding vmbus device %s", name);
+
+ TAILQ_FOREACH(dev2, &rte_vmbus_bus.device_list, next) {
+ int ret;
+
+ ret = rte_uuid_compare(dev->device_id, dev2->device_id);
+ if (ret > 0)
+ continue;
+
+ if (ret < 0) {
+ vmbus_insert_device(dev2, dev);
+ } else { /* already registered */
+ VMBUS_LOG(NOTICE,
+ "%s already registered", name);
+ free(dev);
+ }
+ return 0;
+ }
+
+ vmbus_add_device(dev);
+ return 0;
+error:
+ VMBUS_LOG(DEBUG, "failed");
+
+ free(dev);
+ return -1;
+}
+
+/*
+ * Scan the content of the vmbus, and the devices in the devices list
+ */
+int
+rte_vmbus_scan(void)
+{
+ struct dirent *e;
+ DIR *dir;
+
+ dir = opendir(SYSFS_VMBUS_DEVICES);
+ if (dir == NULL) {
+ if (errno == ENOENT)
+ return 0;
+
+ VMBUS_LOG(ERR, "opendir %s failed: %s",
+ SYSFS_VMBUS_DEVICES, strerror(errno));
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ if (e->d_name[0] == '.')
+ continue;
+
+ if (vmbus_scan_one(e->d_name) < 0)
+ goto error;
+ }
+ closedir(dir);
+ return 0;
+
+error:
+ closedir(dir);
+ return -1;
+}
+
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 1);
+}
+
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device)
+{
+ vmbus_uio_irq_control(device, 0);
+}
+
+int rte_vmbus_irq_read(struct rte_vmbus_device *device)
+{
+ return vmbus_uio_irq_read(device);
+}
diff --git a/drivers/bus/vmbus/linux/vmbus_uio.c b/drivers/bus/vmbus/linux/vmbus_uio.c
new file mode 100644
index 00000000..856c6d66
--- /dev/null
+++ b/drivers/bus/vmbus/linux/vmbus_uio.c
@@ -0,0 +1,398 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_bus_vmbus.h>
+#include <rte_string_fns.h>
+
+#include "private.h"
+
+/** Pathname of VMBUS devices directory. */
+#define SYSFS_VMBUS_DEVICES "/sys/bus/vmbus/devices"
+
+static void *vmbus_map_addr;
+
+/* Control interrupts */
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff)
+{
+ if (write(dev->intr_handle.fd, &onoff, sizeof(onoff)) < 0) {
+ VMBUS_LOG(ERR, "cannot write to %d:%s",
+ dev->intr_handle.fd, strerror(errno));
+ }
+}
+
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev)
+{
+ int32_t count;
+ int cc;
+
+ cc = read(dev->intr_handle.fd, &count, sizeof(count));
+ if (cc < (int)sizeof(count)) {
+ if (cc < 0) {
+ VMBUS_LOG(ERR, "IRQ read failed %s",
+ strerror(errno));
+ return -errno;
+ }
+ VMBUS_LOG(ERR, "can't read IRQ count");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+void
+vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res)
+{
+ rte_free(uio_res);
+
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ if (dev->intr_handle.fd >= 0) {
+ close(dev->intr_handle.fd);
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ }
+}
+
+int
+vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res)
+{
+ char devname[PATH_MAX]; /* contains the /dev/uioX */
+
+ /* save fd if in primary process */
+ snprintf(devname, sizeof(devname), "/dev/uio%u", dev->uio_num);
+ dev->intr_handle.fd = open(devname, O_RDWR);
+ if (dev->intr_handle.fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ devname, strerror(errno));
+ goto error;
+ }
+ dev->intr_handle.type = RTE_INTR_HANDLE_UIO_INTX;
+
+ /* allocate the mapping details for secondary processes*/
+ *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
+ if (*uio_res == NULL) {
+ VMBUS_LOG(ERR, "cannot store uio mmap details");
+ goto error;
+ }
+
+ strlcpy((*uio_res)->path, devname, PATH_MAX);
+ rte_uuid_copy((*uio_res)->id, dev->device_id);
+
+ return 0;
+
+error:
+ vmbus_uio_free_resource(dev, *uio_res);
+ return -1;
+}
+
+static int
+find_max_end_va(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t sz = msl->memseg_arr.len * msl->page_sz;
+ void *end_va = RTE_PTR_ADD(msl->base_va, sz);
+ void **max_va = arg;
+
+ if (*max_va < end_va)
+ *max_va = end_va;
+ return 0;
+}
+
+/*
+ * TODO: this should be part of memseg api.
+ * code is duplicated from PCI.
+ */
+static void *
+vmbus_find_max_end_va(void)
+{
+ void *va = NULL;
+
+ rte_memseg_list_walk(find_max_end_va, &va);
+ return va;
+}
+
+int
+vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags)
+{
+ size_t size = dev->resource[idx].len;
+ struct vmbus_map *maps = uio_res->maps;
+ void *mapaddr;
+ off_t offset;
+ int fd;
+
+ /* devname for mmap */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ /* try mapping somewhere close to the end of hugepages */
+ if (vmbus_map_addr == NULL)
+ vmbus_map_addr = vmbus_find_max_end_va();
+
+ /* offset is special in uio it indicates which resource */
+ offset = idx * PAGE_SIZE;
+
+ mapaddr = vmbus_map_resource(vmbus_map_addr, fd, offset, size, flags);
+ close(fd);
+
+ if (mapaddr == MAP_FAILED)
+ return -1;
+
+ dev->resource[idx].addr = mapaddr;
+ vmbus_map_addr = RTE_PTR_ADD(mapaddr, size);
+
+ /* Record result of sucessful mapping for use by secondary */
+ maps[idx].addr = mapaddr;
+ maps[idx].size = size;
+
+ return 0;
+}
+
+static int vmbus_uio_map_primary(struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ struct mapped_vmbus_resource *uio_res;
+
+ uio_res = vmbus_uio_find_resource(chan->device);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -ENOMEM;
+ }
+
+ if (uio_res->nb_maps < VMBUS_MAX_RESOURCE) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ *ring_size = uio_res->maps[HV_TXRX_RING_MAP].size / 2;
+ *ring_buf = uio_res->maps[HV_TXRX_RING_MAP].addr;
+ return 0;
+}
+
+static int vmbus_uio_map_subchan(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan,
+ void **ring_buf, uint32_t *ring_size)
+{
+ char ring_path[PATH_MAX];
+ size_t file_size;
+ struct stat sb;
+ int fd;
+
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name,
+ chan->relid);
+
+ fd = open(ring_path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ ring_path, strerror(errno));
+ return -errno;
+ }
+
+ if (fstat(fd, &sb) < 0) {
+ VMBUS_LOG(ERR, "Cannot state %s: %s",
+ ring_path, strerror(errno));
+ close(fd);
+ return -errno;
+ }
+ file_size = sb.st_size;
+
+ if (file_size == 0 || (file_size & (PAGE_SIZE - 1))) {
+ VMBUS_LOG(ERR, "incorrect size %s: %zu",
+ ring_path, file_size);
+
+ close(fd);
+ return -EINVAL;
+ }
+
+ *ring_size = file_size / 2;
+ *ring_buf = vmbus_map_resource(vmbus_map_addr, fd,
+ 0, sb.st_size, 0);
+ close(fd);
+
+ if (ring_buf == MAP_FAILED)
+ return -EIO;
+
+ vmbus_map_addr = RTE_PTR_ADD(ring_buf, file_size);
+ return 0;
+}
+
+int vmbus_uio_map_rings(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ uint32_t ring_size;
+ void *ring_buf;
+ int ret;
+
+ /* Primary channel */
+ if (chan->subchannel_id == 0)
+ ret = vmbus_uio_map_primary(chan, &ring_buf, &ring_size);
+ else
+ ret = vmbus_uio_map_subchan(dev, chan, &ring_buf, &ring_size);
+
+ if (ret)
+ return ret;
+
+ vmbus_br_setup(&chan->txbr, ring_buf, ring_size);
+ vmbus_br_setup(&chan->rxbr, (char *)ring_buf + ring_size, ring_size);
+ return 0;
+}
+
+static int vmbus_uio_sysfs_read(const char *dir, const char *name,
+ unsigned long *val, unsigned long max_range)
+{
+ char path[PATH_MAX];
+ FILE *f;
+ int ret;
+
+ snprintf(path, sizeof(path), "%s/%s", dir, name);
+ f = fopen(path, "r");
+ if (!f) {
+ VMBUS_LOG(ERR, "can't open %s:%s",
+ path, strerror(errno));
+ return -errno;
+ }
+
+ if (fscanf(f, "%lu", val) != 1)
+ ret = -EIO;
+ else if (*val > max_range)
+ ret = -ERANGE;
+ else
+ ret = 0;
+ fclose(f);
+
+ return ret;
+}
+
+static bool vmbus_uio_ring_present(const struct rte_vmbus_device *dev,
+ uint32_t relid)
+{
+ char ring_path[PATH_MAX];
+
+ /* Check if kernel has subchannel sysfs files */
+ snprintf(ring_path, sizeof(ring_path),
+ "%s/%s/channels/%u/ring",
+ SYSFS_VMBUS_DEVICES, dev->device.name, relid);
+
+ return access(ring_path, R_OK|W_OK) == 0;
+}
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ return vmbus_uio_ring_present(dev, chan->relid);
+}
+
+static bool vmbus_isnew_subchannel(struct vmbus_channel *primary,
+ unsigned long id)
+{
+ const struct vmbus_channel *c;
+
+ STAILQ_FOREACH(c, &primary->subchannel_list, next) {
+ if (c->relid == id)
+ return false;
+ }
+ return true;
+}
+
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan)
+{
+ const struct rte_vmbus_device *dev = primary->device;
+ char chan_path[PATH_MAX], subchan_path[PATH_MAX];
+ struct dirent *ent;
+ DIR *chan_dir;
+
+ snprintf(chan_path, sizeof(chan_path),
+ "%s/%s/channels",
+ SYSFS_VMBUS_DEVICES, dev->device.name);
+
+ chan_dir = opendir(chan_path);
+ if (!chan_dir) {
+ VMBUS_LOG(ERR, "cannot open %s: %s",
+ chan_path, strerror(errno));
+ return -errno;
+ }
+
+ while ((ent = readdir(chan_dir))) {
+ unsigned long relid, subid, monid;
+ char *endp;
+ int err;
+
+ if (ent->d_name[0] == '.')
+ continue;
+
+ errno = 0;
+ relid = strtoul(ent->d_name, &endp, 0);
+ if (*endp || errno != 0 || relid > UINT16_MAX) {
+ VMBUS_LOG(NOTICE, "not a valid channel relid: %s",
+ ent->d_name);
+ continue;
+ }
+
+ snprintf(subchan_path, sizeof(subchan_path), "%s/%lu",
+ chan_path, relid);
+ err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id",
+ &subid, UINT16_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid subchannel id %lu",
+ subid);
+ closedir(chan_dir);
+ return err;
+ }
+
+ if (subid == 0)
+ continue; /* skip primary channel */
+
+ if (!vmbus_isnew_subchannel(primary, relid))
+ continue;
+
+ if (!vmbus_uio_ring_present(dev, relid))
+ continue; /* Ring may not be ready yet */
+
+ err = vmbus_uio_sysfs_read(subchan_path, "monitor_id",
+ &monid, UINT8_MAX);
+ if (err) {
+ VMBUS_LOG(NOTICE, "invalid monitor id %lu",
+ monid);
+ return err;
+ }
+
+ err = vmbus_chan_create(dev, relid, subid, monid, subchan);
+ if (err) {
+ VMBUS_LOG(NOTICE, "subchannel setup failed");
+ return err;
+ }
+ break;
+ }
+ closedir(chan_dir);
+
+ return (ent == NULL) ? -ENOENT : 0;
+}
diff --git a/drivers/bus/vmbus/meson.build b/drivers/bus/vmbus/meson.build
new file mode 100644
index 00000000..18daabec
--- /dev/null
+++ b/drivers/bus/vmbus/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+allow_experimental_apis = true
+
+install_headers('rte_bus_vmbus.h','rte_vmbus_reg.h')
+
+sources = files('vmbus_common.c',
+ 'vmbus_channel.c',
+ 'vmbus_bufring.c',
+ 'vmbus_common_uio.c')
+
+if host_machine.system() == 'linux'
+ sources += files('linux/vmbus_bus.c',
+ 'linux/vmbus_uio.c')
+ includes += include_directories('linux')
+else
+ build = false
+endif
diff --git a/drivers/bus/vmbus/private.h b/drivers/bus/vmbus/private.h
new file mode 100644
index 00000000..9964fc42
--- /dev/null
+++ b/drivers/bus/vmbus/private.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_PRIVATE_H_
+#define _VMBUS_PRIVATE_H_
+
+#include <stdbool.h>
+#include <sys/uio.h>
+#include <rte_log.h>
+#include <rte_vmbus_reg.h>
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+extern int vmbus_logtype_bus;
+#define VMBUS_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vmbus_logtype_bus, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+struct vmbus_br {
+ struct vmbus_bufring *vbr;
+ uint32_t dsize;
+ uint32_t windex; /* next available location */
+};
+
+#define UIO_NAME_MAX 64
+
+struct vmbus_map {
+ void *addr; /* user mmap of resource */
+ uint64_t size; /* length */
+};
+
+/*
+ * For multi-process we need to reproduce all vmbus mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct mapped_vmbus_resource {
+ TAILQ_ENTRY(mapped_vmbus_resource) next;
+
+ rte_uuid_t id;
+ int nb_maps;
+ struct vmbus_map maps[VMBUS_MAX_RESOURCE];
+ char path[PATH_MAX];
+};
+
+TAILQ_HEAD(mapped_vmbus_res_list, mapped_vmbus_resource);
+
+#define HV_MON_TRIG_LEN 32
+#define HV_MON_TRIG_MAX 4
+
+struct vmbus_channel {
+ STAILQ_HEAD(, vmbus_channel) subchannel_list;
+ STAILQ_ENTRY(vmbus_channel) next;
+ const struct rte_vmbus_device *device;
+
+ struct vmbus_br rxbr;
+ struct vmbus_br txbr;
+
+ uint16_t relid;
+ uint16_t subchannel_id;
+ uint8_t monitor_id;
+};
+
+#define VMBUS_MAX_CHANNELS 64
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan);
+
+void vmbus_add_device(struct rte_vmbus_device *vmbus_dev);
+void vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev);
+void vmbus_remove_device(struct rte_vmbus_device *vmbus_device);
+
+void vmbus_uio_irq_control(struct rte_vmbus_device *dev, int32_t onoff);
+int vmbus_uio_irq_read(struct rte_vmbus_device *dev);
+
+int vmbus_uio_map_resource(struct rte_vmbus_device *dev);
+void vmbus_uio_unmap_resource(struct rte_vmbus_device *dev);
+
+int vmbus_uio_alloc_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource **uio_res);
+void vmbus_uio_free_resource(struct rte_vmbus_device *dev,
+ struct mapped_vmbus_resource *uio_res);
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev);
+int vmbus_uio_map_resource_by_index(struct rte_vmbus_device *dev, int res_idx,
+ struct mapped_vmbus_resource *uio_res,
+ int flags);
+
+void *vmbus_map_resource(void *requested_addr, int fd, off_t offset,
+ size_t size, int additional_flags);
+void vmbus_unmap_resource(void *requested_addr, size_t size);
+
+bool vmbus_uio_subchannels_supported(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan);
+int vmbus_uio_get_subchan(struct vmbus_channel *primary,
+ struct vmbus_channel **subchan);
+int vmbus_uio_map_rings(struct vmbus_channel *chan);
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen);
+
+/* Amount of space available for write */
+static inline uint32_t
+vmbus_br_availwrite(const struct vmbus_br *br, uint32_t windex)
+{
+ uint32_t rindex = br->vbr->rindex;
+
+ if (windex >= rindex)
+ return br->dsize - (windex - rindex);
+ else
+ return rindex - windex;
+}
+
+static inline uint32_t
+vmbus_br_availread(const struct vmbus_br *br)
+{
+ return br->dsize - vmbus_br_availwrite(br, br->vbr->windex);
+}
+
+int vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig);
+
+int vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen);
+
+int vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t hlen);
+
+#endif /* _VMBUS_PRIVATE_H_ */
diff --git a/drivers/bus/vmbus/rte_bus_vmbus.h b/drivers/bus/vmbus/rte_bus_vmbus.h
new file mode 100644
index 00000000..4a2c1f6f
--- /dev/null
+++ b/drivers/bus/vmbus/rte_bus_vmbus.h
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_H_
+#define _VMBUS_H_
+
+/**
+ * @file
+ *
+ * VMBUS Interface
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_compat.h>
+#include <rte_uuid.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_dev.h>
+#include <rte_vmbus_reg.h>
+
+/* Forward declarations */
+struct rte_vmbus_device;
+struct rte_vmbus_driver;
+struct rte_vmbus_bus;
+struct vmbus_channel;
+struct vmbus_mon_page;
+
+TAILQ_HEAD(rte_vmbus_device_list, rte_vmbus_device);
+TAILQ_HEAD(rte_vmbus_driver_list, rte_vmbus_driver);
+
+/* VMBus iterators */
+#define FOREACH_DEVICE_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.device_list), next)
+
+#define FOREACH_DRIVER_ON_VMBUS(p) \
+ TAILQ_FOREACH(p, &(rte_vmbus_bus.driver_list), next)
+
+/** Maximum number of VMBUS resources. */
+enum hv_uio_map {
+ HV_TXRX_RING_MAP = 0,
+ HV_INT_PAGE_MAP,
+ HV_MON_PAGE_MAP,
+ HV_RECV_BUF_MAP,
+ HV_SEND_BUF_MAP
+};
+#define VMBUS_MAX_RESOURCE 5
+
+/**
+ * A structure describing a VMBUS device.
+ */
+struct rte_vmbus_device {
+ TAILQ_ENTRY(rte_vmbus_device) next; /**< Next probed VMBUS device */
+ const struct rte_vmbus_driver *driver; /**< Associated driver */
+ struct rte_device device; /**< Inherit core device */
+ rte_uuid_t device_id; /**< VMBUS device id */
+ rte_uuid_t class_id; /**< VMBUS device type */
+ uint32_t relid; /**< id for primary */
+ uint8_t monitor_id; /**< monitor page */
+ int uio_num; /**< UIO device number */
+ uint32_t *int_page; /**< VMBUS interrupt page */
+ struct vmbus_channel *primary; /**< VMBUS primary channel */
+ struct vmbus_mon_page *monitor_page; /**< VMBUS monitor page */
+
+ struct rte_intr_handle intr_handle; /**< Interrupt handle */
+ struct rte_mem_resource resource[VMBUS_MAX_RESOURCE];
+};
+
+/**
+ * Initialization function for the driver called during VMBUS probing.
+ */
+typedef int (vmbus_probe_t)(struct rte_vmbus_driver *,
+ struct rte_vmbus_device *);
+
+/**
+ * Initialization function for the driver called during hot plugging.
+ */
+typedef int (vmbus_remove_t)(struct rte_vmbus_device *);
+
+/**
+ * A structure describing a VMBUS driver.
+ */
+struct rte_vmbus_driver {
+ TAILQ_ENTRY(rte_vmbus_driver) next; /**< Next in list. */
+ struct rte_driver driver;
+ struct rte_vmbus_bus *bus; /**< VM bus reference. */
+ vmbus_probe_t *probe; /**< Device Probe function. */
+ vmbus_remove_t *remove; /**< Device Remove function. */
+
+ const rte_uuid_t *id_table; /**< ID table. */
+};
+
+
+/**
+ * Structure describing the VM bus
+ */
+struct rte_vmbus_bus {
+ struct rte_bus bus; /**< Inherit the generic class */
+ struct rte_vmbus_device_list device_list; /**< List of devices */
+ struct rte_vmbus_driver_list driver_list; /**< List of drivers */
+};
+
+/**
+ * Scan the content of the VMBUS bus, and the devices in the devices
+ * list
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_vmbus_scan(void);
+
+/**
+ * Probe the VMBUS bus
+ *
+ * @return
+ * - 0 on success.
+ * - !0 on error.
+ */
+int rte_vmbus_probe(void);
+
+/**
+ * Map the VMBUS device resources in user space virtual memory address
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ *
+ * @return
+ * 0 on success, negative on error and positive if no driver
+ * is found for the device.
+ */
+int rte_vmbus_map_device(struct rte_vmbus_device *dev);
+
+/**
+ * Unmap this device
+ *
+ * @param dev
+ * A pointer to a rte_vmbus_device structure describing the device
+ * to use
+ */
+void rte_vmbus_unmap_device(struct rte_vmbus_device *dev);
+
+/**
+ * Get connection to primary VMBUS channel
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @param chan
+ * A pointer to a VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **chan);
+
+/**
+ * Free connection to VMBUS channel
+ *
+ * @param chan
+ * VMBUS channel
+ */
+void rte_vmbus_chan_close(struct vmbus_channel *chan);
+
+/**
+ * Gets the maximum number of channels supported on device
+ *
+ * @param device
+ * A pointer to a rte_vmbus_device structure describing the device
+ * @return
+ * Number of channels available.
+ */
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device);
+
+/**
+ * Get a connection to new secondary vmbus channel
+ *
+ * @param primary
+ * A pointer to primary VMBUS channel
+ * @param chan
+ * A pointer to a secondary VMBUS channel pointer that will be filled.
+ * @return
+ * - 0 Success; channel opened.
+ * - -ENOMEM: Not enough memory available.
+ * - -EINVAL: Regions could not be mapped.
+ */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan);
+
+/**
+ * Disable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_mask(struct rte_vmbus_device *device);
+
+/**
+ * Enable IRQ for device
+ *
+ * @param device
+ * VMBUS device
+ */
+void rte_vmbus_irq_unmask(struct rte_vmbus_device *device);
+
+/**
+ * Read (and wait) for IRQ
+ *
+ * @param device
+ * VMBUS device
+ */
+int rte_vmbus_irq_read(struct rte_vmbus_device *device);
+
+/**
+ * Test if channel is empty
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Return true if no data present in incoming ring.
+ */
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel);
+
+/**
+ * Send the specified buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param data
+ * Pointer to the buffer to send
+ * @param dlen
+ * Number of bytes of data to send
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send(struct vmbus_channel *channel, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xact, uint32_t flags, bool *need_sig);
+
+/**
+ * Explicitly signal host that data is available
+ *
+ * @param
+ * Pointer to vmbus_channel structure.
+ *
+ * Used when batching multiple sends and only signaling host
+ * after the last send.
+ */
+void rte_vmbus_chan_signal_tx(const struct vmbus_channel *channel);
+
+/* Structure for scatter/gather I/O */
+struct iova_list {
+ rte_iova_t addr;
+ uint32_t len;
+};
+#define MAX_PAGE_BUFFER_COUNT 32
+
+/**
+ * Send a scattered buffer on the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param type
+ * Type of packet that is being send e.g. negotiate, time
+ * packet etc.
+ * @param gpa
+ * Array of buffers to send
+ * @param gpacnt
+ * Number of elements in iov
+ * @param data
+ * Pointer to the buffer additional data to send
+ * @param dlen
+ * Maximum size of what the the buffer will hold
+ * @param xact
+ * Identifier of the request
+ * @param flags
+ * Message type inband, rxbuf, gpa
+ * @param need_sig
+ * Is host signal tx is required (optional)
+ *
+ * Sends data in buffer directly to hyper-v via the vmbus
+ */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *channel,
+ struct vmbus_gpa gpa[], uint32_t gpacnt,
+ void *data, uint32_t dlen,
+ uint64_t xact, bool *need_sig);
+/**
+ * Receive response to request on the given channel
+ * skips the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @param
+ * Pointer to received transaction_id
+ * @return
+ * On success, returns 0
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv(struct vmbus_channel *chan,
+ void *data, uint32_t *len,
+ uint64_t *request_id);
+
+/**
+ * Receive response to request on the given channel
+ * includes the channel header.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param data
+ * Pointer to the buffer you want to receive the data into.
+ * @param len
+ * Pointer to size of receive buffer (in/out)
+ * @return
+ * On success, returns number of bytes read.
+ * On failure, returns negative errno.
+ */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len);
+
+/**
+ * Notify host of bytes read (after recv_raw)
+ * Signals host if required.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @param bytes_read
+ * Number of bytes read since last signal
+ */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read);
+
+/**
+ * Determine sub channel index of the given channel
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ * @return
+ * Sub channel index (0 for primary)
+ */
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan);
+
+/**
+ * Register a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be registered.
+ */
+void rte_vmbus_register(struct rte_vmbus_driver *driver);
+
+/**
+ * For debug dump contents of ring buffer.
+ *
+ * @param channel
+ * Pointer to vmbus_channel structure.
+ */
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan);
+
+/**
+ * Unregister a VMBUS driver.
+ *
+ * @param driver
+ * A pointer to a rte_vmbus_driver structure describing the driver
+ * to be unregistered.
+ */
+void rte_vmbus_unregister(struct rte_vmbus_driver *driver);
+
+/** Helper for VMBUS device registration from driver instance */
+#define RTE_PMD_REGISTER_VMBUS(nm, vmbus_drv) \
+ RTE_INIT(vmbusinitfn_ ##nm); \
+ static void vmbusinitfn_ ##nm(void) \
+ { \
+ (vmbus_drv).driver.name = RTE_STR(nm); \
+ rte_vmbus_register(&vmbus_drv); \
+ } \
+ RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VMBUS_H_ */
diff --git a/drivers/bus/vmbus/rte_bus_vmbus_version.map b/drivers/bus/vmbus/rte_bus_vmbus_version.map
new file mode 100644
index 00000000..dabb9203
--- /dev/null
+++ b/drivers/bus/vmbus/rte_bus_vmbus_version.map
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ global:
+
+ rte_vmbus_chan_close;
+ rte_vmbus_chan_open;
+ rte_vmbus_chan_recv;
+ rte_vmbus_chan_recv_raw;
+ rte_vmbus_chan_rx_empty;
+ rte_vmbus_chan_send;
+ rte_vmbus_chan_send_sglist;
+ rte_vmbus_chan_signal_read;
+ rte_vmbus_chan_signal_tx;
+ rte_vmbus_irq_mask;
+ rte_vmbus_irq_read;
+ rte_vmbus_irq_unmask;
+ rte_vmbus_map_device;
+ rte_vmbus_max_channels;
+ rte_vmbus_probe;
+ rte_vmbus_register;
+ rte_vmbus_scan;
+ rte_vmbus_sub_channel_index;
+ rte_vmbus_subchan_open;
+ rte_vmbus_unmap_device;
+ rte_vmbus_unregister;
+
+ local: *;
+};
diff --git a/drivers/bus/vmbus/rte_vmbus_reg.h b/drivers/bus/vmbus/rte_vmbus_reg.h
new file mode 100644
index 00000000..f5a0693d
--- /dev/null
+++ b/drivers/bus/vmbus/rte_vmbus_reg.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#ifndef _VMBUS_REG_H_
+#define _VMBUS_REG_H_
+
+/*
+ * Hyper-V SynIC message format.
+ */
+#define VMBUS_MSG_DSIZE_MAX 240
+#define VMBUS_MSG_SIZE 256
+
+struct vmbus_message {
+ uint32_t type; /* HYPERV_MSGTYPE_ */
+ uint8_t dsize; /* data size */
+ uint8_t flags; /* VMBUS_MSGFLAG_ */
+ uint16_t rsvd;
+ uint64_t id;
+ uint8_t data[VMBUS_MSG_DSIZE_MAX];
+} __rte_packed;
+
+#define VMBUS_MSGFLAG_PENDING 0x01
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+
+struct vmbus_mon_trig {
+ uint32_t pending;
+ uint32_t armed;
+} __rte_packed;
+
+#define VMBUS_MONTRIGS_MAX 4
+#define VMBUS_MONTRIG_LEN 32
+
+/*
+ * Hyper-V Monitor Notification Facility
+ */
+struct hyperv_mon_param {
+ uint32_t connid;
+ uint16_t evtflag_ofs;
+ uint16_t rsvd;
+} __rte_packed;
+
+struct vmbus_mon_page {
+ uint32_t state;
+ uint32_t rsvd1;
+
+ struct vmbus_mon_trig trigs[VMBUS_MONTRIGS_MAX];
+ uint8_t rsvd2[536];
+
+ uint16_t lat[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd3[256];
+
+ struct hyperv_mon_param
+ param[VMBUS_MONTRIGS_MAX][VMBUS_MONTRIG_LEN];
+ uint8_t rsvd4[1984];
+} __rte_packed;
+
+/*
+ * Buffer ring
+ */
+
+struct vmbus_bufring {
+ volatile uint32_t windex;
+ volatile uint32_t rindex;
+
+ /*
+ * Interrupt mask {0,1}
+ *
+ * For TX bufring, host set this to 1, when it is processing
+ * the TX bufring, so that we can safely skip the TX event
+ * notification to host.
+ *
+ * For RX bufring, once this is set to 1 by us, host will not
+ * further dispatch interrupts to us, even if there are data
+ * pending on the RX bufring. This effectively disables the
+ * interrupt of the channel to which this RX bufring is attached.
+ */
+ volatile uint32_t imask;
+
+ /*
+ * Win8 uses some of the reserved bits to implement
+ * interrupt driven flow management. On the send side
+ * we can request that the receiver interrupt the sender
+ * when the ring transitions from being full to being able
+ * to handle a message of size "pending_send_sz".
+ *
+ * Add necessary state for this enhancement.
+ */
+ volatile uint32_t pending_send;
+ uint32_t reserved1[12];
+
+ union {
+ struct {
+ uint32_t feat_pending_send_sz:1;
+ };
+ uint32_t value;
+ } feature_bits;
+
+ /* Pad it to PAGE_SIZE so that data starts on page boundary */
+ uint8_t reserved2[4028];
+
+ /*
+ * Ring data starts here + RingDataStartOffset
+ * !!! DO NOT place any fields below this !!!
+ */
+ uint8_t data[0];
+} __rte_packed;
+
+/*
+ * Channel packets
+ */
+
+/* Channel packet flags */
+#define VMBUS_CHANPKT_TYPE_INBAND 0x0006
+#define VMBUS_CHANPKT_TYPE_RXBUF 0x0007
+#define VMBUS_CHANPKT_TYPE_GPA 0x0009
+#define VMBUS_CHANPKT_TYPE_COMP 0x000b
+
+#define VMBUS_CHANPKT_FLAG_NONE 0
+#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
+
+#define VMBUS_CHANPKT_SIZE_SHIFT 3
+#define VMBUS_CHANPKT_SIZE_ALIGN (1 << VMBUS_CHANPKT_SIZE_SHIFT)
+#define VMBUS_CHANPKT_HLEN_MIN \
+ (sizeof(struct vmbus_chanpkt_hdr) >> VMBUS_CHANPKT_SIZE_SHIFT)
+
+static inline uint32_t
+vmbus_chanpkt_getlen(uint16_t pktlen)
+{
+ return (uint32_t)pktlen << VMBUS_CHANPKT_SIZE_SHIFT;
+}
+
+/*
+ * GPA stuffs.
+ */
+struct vmbus_gpa_range {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page[0];
+} __rte_packed;
+
+/* This is actually vmbus_gpa_range.gpa_page[1] */
+struct vmbus_gpa {
+ uint32_t len;
+ uint32_t ofs;
+ uint64_t page;
+} __rte_packed;
+
+struct vmbus_chanpkt_hdr {
+ uint16_t type; /* VMBUS_CHANPKT_TYPE_ */
+ uint16_t hlen; /* header len, in 8 bytes */
+ uint16_t tlen; /* total len, in 8 bytes */
+ uint16_t flags; /* VMBUS_CHANPKT_FLAG_ */
+ uint64_t xactid;
+} __rte_packed;
+
+static inline uint32_t
+vmbus_chanpkt_datalen(const struct vmbus_chanpkt_hdr *pkt)
+{
+ return vmbus_chanpkt_getlen(pkt->tlen)
+ - vmbus_chanpkt_getlen(pkt->hlen);
+}
+
+struct vmbus_chanpkt {
+ struct vmbus_chanpkt_hdr hdr;
+} __rte_packed;
+
+struct vmbus_rxbuf_desc {
+ uint32_t len;
+ uint32_t ofs;
+} __rte_packed;
+
+struct vmbus_chanpkt_rxbuf {
+ struct vmbus_chanpkt_hdr hdr;
+ uint16_t rxbuf_id;
+ uint16_t rsvd;
+ uint32_t rxbuf_cnt;
+ struct vmbus_rxbuf_desc rxbuf[];
+} __rte_packed;
+
+struct vmbus_chanpkt_sglist {
+ struct vmbus_chanpkt_hdr hdr;
+ uint32_t rsvd;
+ uint32_t gpa_cnt;
+ struct vmbus_gpa gpa[];
+} __rte_packed;
+
+/*
+ * Channel messages
+ * - Embedded in vmbus_message.msg_data, e.g. response and notification.
+ * - Embedded in hypercall_postmsg_in.hc_data, e.g. request.
+ */
+
+#define VMBUS_CHANMSG_TYPE_CHOFFER 1 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHRESCIND 2 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHREQUEST 3 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOFFER_DONE 4 /* NOTE */
+#define VMBUS_CHANMSG_TYPE_CHOPEN 5 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CHOPEN_RESP 6 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHCLOSE 7 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONN 8 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_SUBCONN 9 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_CONNRESP 10 /* RESP */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONN 11 /* REQ */
+#define VMBUS_CHANMSG_TYPE_GPADL_DISCONNRESP 12 /* RESP */
+#define VMBUS_CHANMSG_TYPE_CHFREE 13 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT 14 /* REQ */
+#define VMBUS_CHANMSG_TYPE_CONNECT_RESP 15 /* RESP */
+#define VMBUS_CHANMSG_TYPE_DISCONNECT 16 /* REQ */
+#define VMBUS_CHANMSG_TYPE_MAX 22
+
+struct vmbus_chanmsg_hdr {
+ uint32_t type; /* VMBUS_CHANMSG_TYPE_ */
+ uint32_t rsvd;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT */
+struct vmbus_chanmsg_connect {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t ver;
+ uint32_t rsvd;
+ uint64_t evtflags;
+ uint64_t mnf1;
+ uint64_t mnf2;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CONNECT_RESP */
+struct vmbus_chanmsg_connect_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint8_t done;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHREQUEST */
+struct vmbus_chanmsg_chrequest {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_DISCONNECT */
+struct vmbus_chanmsg_disconnect {
+ struct vmbus_chanmsg_hdr hdr;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN */
+struct vmbus_chanmsg_chopen {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t gpadl;
+ uint32_t vcpuid;
+ uint32_t txbr_pgcnt;
+#define VMBUS_CHANMSG_CHOPEN_UDATA_SIZE 120
+ uint8_t udata[VMBUS_CHANMSG_CHOPEN_UDATA_SIZE];
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOPEN_RESP */
+struct vmbus_chanmsg_chopen_resp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t openid;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONN */
+struct vmbus_chanmsg_gpadl_conn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint16_t range_len;
+ uint16_t range_cnt;
+ struct vmbus_gpa_range range;
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_CONN_PGMAX 26
+
+/* VMBUS_CHANMSG_TYPE_GPADL_SUBCONN */
+struct vmbus_chanmsg_gpadl_subconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t msgno;
+ uint32_t gpadl;
+ uint64_t gpa_page[];
+} __rte_packed;
+
+#define VMBUS_CHANMSG_GPADL_SUBCONN_PGMAX 28
+
+/* VMBUS_CHANMSG_TYPE_GPADL_CONNRESP */
+struct vmbus_chanmsg_gpadl_connresp {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+ uint32_t status;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHCLOSE */
+struct vmbus_chanmsg_chclose {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_GPADL_DISCONN */
+struct vmbus_chanmsg_gpadl_disconn {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+ uint32_t gpadl;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHFREE */
+struct vmbus_chanmsg_chfree {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHRESCIND */
+struct vmbus_chanmsg_chrescind {
+ struct vmbus_chanmsg_hdr hdr;
+ uint32_t chanid;
+} __rte_packed;
+
+/* VMBUS_CHANMSG_TYPE_CHOFFER */
+struct vmbus_chanmsg_choffer {
+ struct vmbus_chanmsg_hdr hdr;
+ rte_uuid_t chtype;
+ rte_uuid_t chinst;
+ uint64_t chlat; /* unit: 100ns */
+ uint32_t chrev;
+ uint32_t svrctx_sz;
+ uint16_t chflags;
+ uint16_t mmio_sz; /* unit: MB */
+ uint8_t udata[120];
+ uint16_t subidx;
+ uint16_t rsvd;
+ uint32_t chanid;
+ uint8_t montrig;
+ uint8_t flags1; /* VMBUS_CHOFFER_FLAG1_ */
+ uint16_t flags2;
+ uint32_t connid;
+} __rte_packed;
+
+#define VMBUS_CHOFFER_FLAG1_HASMNF 0x01
+
+#endif /* !_VMBUS_REG_H_ */
diff --git a/drivers/bus/vmbus/vmbus_bufring.c b/drivers/bus/vmbus/vmbus_bufring.c
new file mode 100644
index 00000000..c8800160
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_bufring.c
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_pause.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+/* Increase bufring index by inc with wraparound */
+static inline uint32_t vmbus_br_idxinc(uint32_t idx, uint32_t inc, uint32_t sz)
+{
+ idx += inc;
+ if (idx >= sz)
+ idx -= sz;
+
+ return idx;
+}
+
+void vmbus_br_setup(struct vmbus_br *br, void *buf, unsigned int blen)
+{
+ br->vbr = buf;
+ br->windex = br->vbr->windex;
+ br->dsize = blen - sizeof(struct vmbus_bufring);
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to be
+ * signaled.
+ *
+ * The contract:
+ * - The host guarantees that while it is draining the TX bufring,
+ * it will set the br_imask to indicate it does not need to be
+ * interrupted when new data are added.
+ * - The host guarantees that it will completely drain the TX bufring
+ * before exiting the read loop. Further, once the TX bufring is
+ * empty, it will clear the br_imask and re-check to see if new
+ * data have arrived.
+ */
+static inline bool
+vmbus_txbr_need_signal(const struct vmbus_br *tbr, uint32_t old_windex)
+{
+ rte_smp_mb();
+ if (tbr->vbr->imask)
+ return false;
+
+ rte_smp_rmb();
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ return old_windex == tbr->vbr->rindex;
+}
+
+static inline uint32_t
+vmbus_txbr_copyto(const struct vmbus_br *tbr, uint32_t windex,
+ const void *src0, uint32_t cplen)
+{
+ uint8_t *br_data = tbr->vbr->data;
+ uint32_t br_dsize = tbr->dsize;
+ const uint8_t *src = src0;
+
+ /* XXX use double mapping like Linux kernel? */
+ if (cplen > br_dsize - windex) {
+ uint32_t fraglen = br_dsize - windex;
+
+ /* Wrap-around detected */
+ memcpy(br_data + windex, src, fraglen);
+ memcpy(br_data, src + fraglen, cplen - fraglen);
+ } else {
+ memcpy(br_data + windex, src, cplen);
+ }
+
+ return vmbus_br_idxinc(windex, cplen, br_dsize);
+}
+
+/*
+ * Write scattered channel packet to TX bufring.
+ *
+ * The offset of this channel packet is written as a 64bits value
+ * immediately after this channel packet.
+ *
+ * The write goes through three stages:
+ * 1. Reserve space in ring buffer for the new data.
+ * Writer atomically moves priv_write_index.
+ * 2. Copy the new data into the ring.
+ * 3. Update the tail of the ring (visible to host) that indicates
+ * next read location. Writer updates write_index
+ */
+int
+vmbus_txbr_write(struct vmbus_br *tbr, const struct iovec iov[], int iovlen,
+ bool *need_sig)
+{
+ struct vmbus_bufring *vbr = tbr->vbr;
+ uint32_t ring_size = tbr->dsize;
+ uint32_t old_windex, next_windex, windex, total;
+ uint64_t save_windex;
+ int i;
+
+ total = 0;
+ for (i = 0; i < iovlen; i++)
+ total += iov[i].iov_len;
+ total += sizeof(save_windex);
+
+ /* Reserve space in ring */
+ do {
+ uint32_t avail;
+
+ /* Get current free location */
+ old_windex = tbr->windex;
+
+ /* Prevent compiler reordering this with calculation */
+ rte_compiler_barrier();
+
+ avail = vmbus_br_availwrite(tbr, old_windex);
+
+ /* If not enough space in ring, then tell caller. */
+ if (avail <= total)
+ return -EAGAIN;
+
+ next_windex = vmbus_br_idxinc(old_windex, total, ring_size);
+
+ /* Atomic update of next write_index for other threads */
+ } while (!rte_atomic32_cmpset(&tbr->windex, old_windex, next_windex));
+
+ /* Space from old..new is now reserved */
+ windex = old_windex;
+ for (i = 0; i < iovlen; i++) {
+ windex = vmbus_txbr_copyto(tbr, windex,
+ iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /* Set the offset of the current channel packet. */
+ save_windex = ((uint64_t)old_windex) << 32;
+ windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
+ sizeof(save_windex));
+
+ /* The region reserved should match region used */
+ RTE_ASSERT(windex == next_windex);
+
+ /* Ensure that data is available before updating host index */
+ rte_smp_wmb();
+
+ /* Checkin for our reservation. wait for our turn to update host */
+ while (!rte_atomic32_cmpset(&vbr->windex, old_windex, next_windex))
+ rte_pause();
+
+ /* If host had read all data before this, then need to signal */
+ *need_sig |= vmbus_txbr_need_signal(tbr, old_windex);
+ return 0;
+}
+
+static inline uint32_t
+vmbus_rxbr_copyfrom(const struct vmbus_br *rbr, uint32_t rindex,
+ void *dst0, size_t cplen)
+{
+ const uint8_t *br_data = rbr->vbr->data;
+ uint32_t br_dsize = rbr->dsize;
+ uint8_t *dst = dst0;
+
+ if (cplen > br_dsize - rindex) {
+ uint32_t fraglen = br_dsize - rindex;
+
+ /* Wrap-around detected. */
+ memcpy(dst, br_data + rindex, fraglen);
+ memcpy(dst + fraglen, br_data, cplen - fraglen);
+ } else {
+ memcpy(dst, br_data + rindex, cplen);
+ }
+
+ return vmbus_br_idxinc(rindex, cplen, br_dsize);
+}
+
+/* Copy data from receive ring but don't change index */
+int
+vmbus_rxbr_peek(const struct vmbus_br *rbr, void *data, size_t dlen)
+{
+ uint32_t avail;
+
+ /*
+ * The requested data and the 64bits channel packet
+ * offset should be there at least.
+ */
+ avail = vmbus_br_availread(rbr);
+ if (avail < dlen + sizeof(uint64_t))
+ return -EAGAIN;
+
+ vmbus_rxbr_copyfrom(rbr, rbr->vbr->rindex, data, dlen);
+ return 0;
+}
+
+/*
+ * Copy data from receive ring and change index
+ * NOTE:
+ * We assume (dlen + skip) == sizeof(channel packet).
+ */
+int
+vmbus_rxbr_read(struct vmbus_br *rbr, void *data, size_t dlen, size_t skip)
+{
+ struct vmbus_bufring *vbr = rbr->vbr;
+ uint32_t br_dsize = rbr->dsize;
+ uint32_t rindex;
+
+ if (vmbus_br_availread(rbr) < dlen + skip + sizeof(uint64_t))
+ return -EAGAIN;
+
+ /* Record where host was when we started read (for debug) */
+ rbr->windex = rbr->vbr->windex;
+
+ /*
+ * Copy channel packet from RX bufring.
+ */
+ rindex = vmbus_br_idxinc(rbr->vbr->rindex, skip, br_dsize);
+ rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
+
+ /*
+ * Discard this channel packet's 64bits offset, which is useless to us.
+ */
+ rindex = vmbus_br_idxinc(rindex, sizeof(uint64_t), br_dsize);
+
+ /* Update the read index _after_ the channel packet is fetched. */
+ rte_compiler_barrier();
+
+ vbr->rindex = rindex;
+
+ return 0;
+}
diff --git a/drivers/bus/vmbus/vmbus_channel.c b/drivers/bus/vmbus/vmbus_channel.c
new file mode 100644
index 00000000..cc5f3e83
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_channel.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/uio.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static inline void
+vmbus_sync_set_bit(volatile uint32_t *addr, uint32_t mask)
+{
+ /* Use GCC builtin which atomic does atomic OR operation */
+ __sync_or_and_fetch(addr, mask);
+}
+
+static inline void
+vmbus_send_interrupt(const struct rte_vmbus_device *dev, uint32_t relid)
+{
+ uint32_t *int_addr;
+ uint32_t int_mask;
+
+ int_addr = dev->int_page + relid / 32;
+ int_mask = 1u << (relid % 32);
+
+ vmbus_sync_set_bit(int_addr, int_mask);
+}
+
+static inline void
+vmbus_set_monitor(const struct rte_vmbus_device *dev, uint32_t monitor_id)
+{
+ uint32_t *monitor_addr, monitor_mask;
+ unsigned int trigger_index;
+
+ trigger_index = monitor_id / HV_MON_TRIG_LEN;
+ monitor_mask = 1u << (monitor_id % HV_MON_TRIG_LEN);
+
+ monitor_addr = &dev->monitor_page->trigs[trigger_index].pending;
+ vmbus_sync_set_bit(monitor_addr, monitor_mask);
+}
+
+static void
+vmbus_set_event(const struct rte_vmbus_device *dev,
+ const struct vmbus_channel *chan)
+{
+ vmbus_send_interrupt(dev, chan->relid);
+ vmbus_set_monitor(dev, chan->monitor_id);
+}
+
+/*
+ * Notify host that there are data pending on our TX bufring.
+ *
+ * Since this in userspace, rely on the monitor page.
+ * Can't do a hypercall from userspace.
+ */
+void
+rte_vmbus_chan_signal_tx(const struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *dev = chan->device;
+ const struct vmbus_br *tbr = &chan->txbr;
+
+ /* Make sure all updates are done before signaling host */
+ rte_smp_wmb();
+
+ /* If host is ignoring interrupts? */
+ if (tbr->vbr->imask)
+ return;
+
+ vmbus_set_event(dev, chan);
+}
+
+
+/* Do a simple send directly using transmit ring. */
+int rte_vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
+ void *data, uint32_t dlen,
+ uint64_t xactid, uint32_t flags, bool *need_sig)
+{
+ struct vmbus_chanpkt pkt;
+ unsigned int pktlen, pad_pktlen;
+ const uint32_t hlen = sizeof(pkt);
+ bool send_evt = false;
+ uint64_t pad = 0;
+ struct iovec iov[3];
+ int error;
+
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = type;
+ pkt.hdr.flags = flags;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = hlen;
+ iov[1].iov_base = data;
+ iov[1].iov_len = dlen;
+ iov[2].iov_base = &pad;
+ iov[2].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 3, &send_evt);
+
+ /*
+ * caller sets need_sig to non-NULL if it will handle
+ * signaling if required later.
+ * if need_sig is NULL, signal now if needed.
+ */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+/* Do a scatter/gather send where the descriptor points to data. */
+int rte_vmbus_chan_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], uint32_t sglen,
+ void *data, uint32_t dlen,
+ uint64_t xactid, bool *need_sig)
+{
+ struct vmbus_chanpkt_sglist pkt;
+ unsigned int pktlen, pad_pktlen, hlen;
+ bool send_evt = false;
+ struct iovec iov[4];
+ uint64_t pad = 0;
+ int error;
+
+ hlen = offsetof(struct vmbus_chanpkt_sglist, gpa[sglen]);
+ pktlen = hlen + dlen;
+ pad_pktlen = RTE_ALIGN(pktlen, sizeof(uint64_t));
+
+ pkt.hdr.type = VMBUS_CHANPKT_TYPE_GPA;
+ pkt.hdr.flags = VMBUS_CHANPKT_FLAG_RC;
+ pkt.hdr.hlen = hlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.tlen = pad_pktlen >> VMBUS_CHANPKT_SIZE_SHIFT;
+ pkt.hdr.xactid = xactid;
+ pkt.rsvd = 0;
+ pkt.gpa_cnt = sglen;
+
+ iov[0].iov_base = &pkt;
+ iov[0].iov_len = sizeof(pkt);
+ iov[1].iov_base = sg;
+ iov[1].iov_len = sizeof(struct vmbus_gpa) * sglen;
+ iov[2].iov_base = data;
+ iov[2].iov_len = dlen;
+ iov[3].iov_base = &pad;
+ iov[3].iov_len = pad_pktlen - pktlen;
+
+ error = vmbus_txbr_write(&chan->txbr, iov, 4, &send_evt);
+
+ /* if caller is batching, just propagate the status */
+ if (need_sig)
+ *need_sig |= send_evt;
+ else if (error == 0 && send_evt)
+ rte_vmbus_chan_signal_tx(chan);
+ return error;
+}
+
+bool rte_vmbus_chan_rx_empty(const struct vmbus_channel *channel)
+{
+ const struct vmbus_br *br = &channel->rxbr;
+
+ return br->vbr->rindex == br->vbr->windex;
+}
+
+/* Signal host after reading N bytes */
+void rte_vmbus_chan_signal_read(struct vmbus_channel *chan, uint32_t bytes_read)
+{
+ struct vmbus_br *rbr = &chan->rxbr;
+ uint32_t write_sz, pending_sz;
+
+ /* No need for signaling on older versions */
+ if (!rbr->vbr->feature_bits.feat_pending_send_sz)
+ return;
+
+ /* Make sure reading of pending happens after new read index */
+ rte_mb();
+
+ pending_sz = rbr->vbr->pending_send;
+ if (!pending_sz)
+ return;
+
+ rte_smp_rmb();
+ write_sz = vmbus_br_availwrite(rbr, rbr->vbr->windex);
+
+ /* If there was space before then host was not blocked */
+ if (write_sz - bytes_read > pending_sz)
+ return;
+
+ /* If pending write will not fit */
+ if (write_sz <= pending_sz)
+ return;
+
+ vmbus_set_event(chan->device, chan);
+}
+
+int rte_vmbus_chan_recv(struct vmbus_channel *chan, void *data, uint32_t *len,
+ uint64_t *request_id)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, hlen, bufferlen = *len;
+ int error;
+
+ *len = 0;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ hlen = pkt.hlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ dlen = (pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT) - hlen;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ if (request_id)
+ *request_id = pkt.xactid;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, hlen);
+ if (error)
+ return error;
+
+ rte_vmbus_chan_signal_read(chan, dlen + hlen + sizeof(uint64_t));
+ return 0;
+}
+
+/* TODO: replace this with inplace ring buffer (no copy) */
+int rte_vmbus_chan_recv_raw(struct vmbus_channel *chan,
+ void *data, uint32_t *len)
+{
+ struct vmbus_chanpkt_hdr pkt;
+ uint32_t dlen, bufferlen = *len;
+ int error;
+
+ error = vmbus_rxbr_peek(&chan->rxbr, &pkt, sizeof(pkt));
+ if (error)
+ return error;
+
+ if (unlikely(pkt.hlen < VMBUS_CHANPKT_HLEN_MIN)) {
+ VMBUS_LOG(ERR, "VMBUS recv, invalid hlen %u", pkt.hlen);
+ /* XXX this channel is dead actually. */
+ return -EIO;
+ }
+
+ if (unlikely(pkt.hlen > pkt.tlen)) {
+ VMBUS_LOG(ERR, "VMBUS recv,invalid hlen %u and tlen %u",
+ pkt.hlen, pkt.tlen);
+ return -EIO;
+ }
+
+ /* Length are in quad words */
+ dlen = pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT;
+ *len = dlen;
+
+ /* If caller buffer is not large enough */
+ if (unlikely(dlen > bufferlen))
+ return -ENOBUFS;
+
+ /* Read data and skip packet header */
+ error = vmbus_rxbr_read(&chan->rxbr, data, dlen, 0);
+ if (error)
+ return error;
+
+ /* Return the number of bytes read */
+ return dlen + sizeof(uint64_t);
+}
+
+int vmbus_chan_create(const struct rte_vmbus_device *device,
+ uint16_t relid, uint16_t subid, uint8_t monitor_id,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ chan = rte_zmalloc_socket("VMBUS", sizeof(*chan), RTE_CACHE_LINE_SIZE,
+ device->device.numa_node);
+ if (!chan)
+ return -ENOMEM;
+
+ STAILQ_INIT(&chan->subchannel_list);
+ chan->device = device;
+ chan->subchannel_id = subid;
+ chan->relid = relid;
+ chan->monitor_id = monitor_id;
+ *new_chan = chan;
+
+ err = vmbus_uio_map_rings(chan);
+ if (err) {
+ rte_free(chan);
+ return err;
+ }
+
+ return 0;
+}
+
+/* Setup the primary channel */
+int rte_vmbus_chan_open(struct rte_vmbus_device *device,
+ struct vmbus_channel **new_chan)
+{
+ int err;
+
+ err = vmbus_chan_create(device, device->relid, 0,
+ device->monitor_id, new_chan);
+ if (!err)
+ device->primary = *new_chan;
+
+ return err;
+}
+
+int rte_vmbus_max_channels(const struct rte_vmbus_device *device)
+{
+ if (vmbus_uio_subchannels_supported(device, device->primary))
+ return VMBUS_MAX_CHANNELS;
+ else
+ return 1;
+}
+
+/* Setup secondary channel */
+int rte_vmbus_subchan_open(struct vmbus_channel *primary,
+ struct vmbus_channel **new_chan)
+{
+ struct vmbus_channel *chan;
+ int err;
+
+ err = vmbus_uio_get_subchan(primary, &chan);
+ if (err)
+ return err;
+
+ STAILQ_INSERT_TAIL(&primary->subchannel_list, chan, next);
+ *new_chan = chan;
+ return 0;
+}
+
+uint16_t rte_vmbus_sub_channel_index(const struct vmbus_channel *chan)
+{
+ return chan->subchannel_id;
+}
+
+void rte_vmbus_chan_close(struct vmbus_channel *chan)
+{
+ const struct rte_vmbus_device *device = chan->device;
+ struct vmbus_channel *primary = device->primary;
+
+ if (chan != primary)
+ STAILQ_REMOVE(&primary->subchannel_list, chan,
+ vmbus_channel, next);
+
+ rte_free(chan);
+}
+
+static void vmbus_dump_ring(FILE *f, const char *id, const struct vmbus_br *br)
+{
+ const struct vmbus_bufring *vbr = br->vbr;
+ struct vmbus_chanpkt_hdr pkt;
+
+ fprintf(f, "%s windex=%u rindex=%u mask=%u pending=%u feature=%#x\n",
+ id, vbr->windex, vbr->rindex, vbr->imask,
+ vbr->pending_send, vbr->feature_bits.value);
+ fprintf(f, " size=%u avail write=%u read=%u\n",
+ br->dsize, vmbus_br_availwrite(br, vbr->windex),
+ vmbus_br_availread(br));
+
+ if (vmbus_rxbr_peek(br, &pkt, sizeof(pkt)) == 0)
+ fprintf(f, " pkt type %#x len %u flags %#x xactid %#"PRIx64"\n",
+ pkt.type,
+ pkt.tlen << VMBUS_CHANPKT_SIZE_SHIFT,
+ pkt.flags, pkt.xactid);
+}
+
+void rte_vmbus_chan_dump(FILE *f, const struct vmbus_channel *chan)
+{
+ fprintf(f, "channel[%u] relid=%u monitor=%u\n",
+ chan->subchannel_id, chan->relid, chan->monitor_id);
+ vmbus_dump_ring(f, "rxbr", &chan->rxbr);
+ vmbus_dump_ring(f, "txbr", &chan->txbr);
+}
diff --git a/drivers/bus/vmbus/vmbus_common.c b/drivers/bus/vmbus/vmbus_common.c
new file mode 100644
index 00000000..c7165ad5
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_common.c
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+int vmbus_logtype_bus;
+extern struct rte_vmbus_bus rte_vmbus_bus;
+
+/* map a particular resource from a file */
+void *
+vmbus_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
+ int flags)
+{
+ void *mapaddr;
+
+ /* Map the memory resource of device */
+ mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | flags, fd, offset);
+ if (mapaddr == MAP_FAILED) {
+ VMBUS_LOG(ERR,
+ "mmap(%d, %p, %zu, %ld) failed: %s",
+ fd, requested_addr, size, (long)offset,
+ strerror(errno));
+ }
+ return mapaddr;
+}
+
+/* unmap a particular resource */
+void
+vmbus_unmap_resource(void *requested_addr, size_t size)
+{
+ if (requested_addr == NULL)
+ return;
+
+ /* Unmap the VMBUS memory resource of device */
+ if (munmap(requested_addr, size)) {
+ VMBUS_LOG(ERR, "munmap(%p, 0x%lx) failed: %s",
+ requested_addr, (unsigned long)size,
+ strerror(errno));
+ } else
+ VMBUS_LOG(DEBUG, " VMBUS memory unmapped at %p",
+ requested_addr);
+}
+
+/**
+ * Match the VMBUS driver and device using UUID table
+ *
+ * @param drv
+ * VMBUS driver from which ID table would be extracted
+ * @param pci_dev
+ * VMBUS device to match against the driver
+ * @return
+ * true for successful match
+ * false for unsuccessful match
+ */
+static bool
+vmbus_match(const struct rte_vmbus_driver *dr,
+ const struct rte_vmbus_device *dev)
+{
+ const rte_uuid_t *id_table;
+
+ for (id_table = dr->id_table; !rte_uuid_is_null(*id_table); ++id_table) {
+ if (rte_uuid_compare(*id_table, dev->class_id) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * If device ID match, call the devinit() function of the driver.
+ */
+static int
+vmbus_probe_one_driver(struct rte_vmbus_driver *dr,
+ struct rte_vmbus_device *dev)
+{
+ char guid[RTE_UUID_STRLEN];
+ int ret;
+
+ if (!vmbus_match(dr, dev))
+ return 1; /* not supported */
+
+ rte_uuid_unparse(dev->device_id, guid, sizeof(guid));
+ VMBUS_LOG(INFO, "VMBUS device %s on NUMA socket %i",
+ guid, dev->device.numa_node);
+
+ /* TODO add blacklisted */
+
+ /* map resources for device */
+ ret = rte_vmbus_map_device(dev);
+ if (ret != 0)
+ return ret;
+
+ /* reference driver structure */
+ dev->driver = dr;
+ dev->device.driver = &dr->driver;
+
+ if (dev->device.numa_node < 0) {
+ VMBUS_LOG(WARNING, " Invalid NUMA socket, default to 0");
+ dev->device.numa_node = 0;
+ }
+
+ /* call the driver probe() function */
+ VMBUS_LOG(INFO, " probe driver: %s", dr->driver.name);
+ ret = dr->probe(dr, dev);
+ if (ret) {
+ dev->driver = NULL;
+ rte_vmbus_unmap_device(dev);
+ }
+
+ return ret;
+}
+
+/*
+ * IF device class GUID mathces, call the probe function of
+ * registere drivers for the vmbus device.
+ * Return -1 if initialization failed,
+ * and 1 if no driver found for this device.
+ */
+static int
+vmbus_probe_all_drivers(struct rte_vmbus_device *dev)
+{
+ struct rte_vmbus_driver *dr;
+ int rc;
+
+ /* Check if a driver is already loaded */
+ if (dev->driver != NULL) {
+ VMBUS_LOG(DEBUG, "VMBUS driver already loaded");
+ return 0;
+ }
+
+ FOREACH_DRIVER_ON_VMBUS(dr) {
+ rc = vmbus_probe_one_driver(dr, dev);
+ if (rc < 0) /* negative is an error */
+ return -1;
+
+ if (rc > 0) /* positive driver doesn't support it */
+ continue;
+
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Scan the vmbus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_vmbus_probe(void)
+{
+ struct rte_vmbus_device *dev;
+ size_t probed = 0, failed = 0;
+ char ubuf[RTE_UUID_STRLEN];
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ probed++;
+
+ rte_uuid_unparse(dev->device_id, ubuf, sizeof(ubuf));
+
+ /* TODO: add whitelist/blacklist */
+
+ if (vmbus_probe_all_drivers(dev) < 0) {
+ VMBUS_LOG(NOTICE,
+ "Requested device %s cannot be used", ubuf);
+ rte_errno = errno;
+ failed++;
+ }
+ }
+
+ return (probed && probed == failed) ? -1 : 0;
+}
+
+static int
+vmbus_parse(const char *name, void *addr)
+{
+ rte_uuid_t guid;
+ int ret;
+
+ ret = rte_uuid_parse(name, guid);
+ if (ret == 0 && addr)
+ memcpy(addr, &guid, sizeof(guid));
+
+ return ret;
+}
+
+/* register vmbus driver */
+void
+rte_vmbus_register(struct rte_vmbus_driver *driver)
+{
+ VMBUS_LOG(DEBUG,
+ "Registered driver %s", driver->driver.name);
+
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = &rte_vmbus_bus;
+}
+
+/* unregister vmbus driver */
+void
+rte_vmbus_unregister(struct rte_vmbus_driver *driver)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.driver_list, driver, next);
+ driver->bus = NULL;
+}
+
+/* Add a device to VMBUS bus */
+void
+vmbus_add_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_INSERT_TAIL(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* Insert a device into a predefined position in VMBUS bus */
+void
+vmbus_insert_device(struct rte_vmbus_device *exist_vmbus_dev,
+ struct rte_vmbus_device *new_vmbus_dev)
+{
+ TAILQ_INSERT_BEFORE(exist_vmbus_dev, new_vmbus_dev, next);
+}
+
+/* Remove a device from VMBUS bus */
+void
+vmbus_remove_device(struct rte_vmbus_device *vmbus_dev)
+{
+ TAILQ_REMOVE(&rte_vmbus_bus.device_list, vmbus_dev, next);
+}
+
+/* VMBUS doesn't support hotplug */
+static struct rte_device *
+vmbus_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
+ const void *data)
+{
+ struct rte_vmbus_device *dev;
+
+ FOREACH_DEVICE_ON_VMBUS(dev) {
+ if (start && &dev->device == start) {
+ start = NULL;
+ continue;
+ }
+ if (cmp(&dev->device, data) == 0)
+ return &dev->device;
+ }
+
+ return NULL;
+}
+
+
+struct rte_vmbus_bus rte_vmbus_bus = {
+ .bus = {
+ .scan = rte_vmbus_scan,
+ .probe = rte_vmbus_probe,
+ .find_device = vmbus_find_device,
+ .parse = vmbus_parse,
+ },
+ .device_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.device_list),
+ .driver_list = TAILQ_HEAD_INITIALIZER(rte_vmbus_bus.driver_list),
+};
+
+RTE_REGISTER_BUS(vmbus, rte_vmbus_bus.bus);
+
+RTE_INIT(vmbus_init_log)
+{
+ vmbus_logtype_bus = rte_log_register("bus.vmbus");
+ if (vmbus_logtype_bus >= 0)
+ rte_log_set_level(vmbus_logtype_bus, RTE_LOG_NOTICE);
+}
diff --git a/drivers/bus/vmbus/vmbus_common_uio.c b/drivers/bus/vmbus/vmbus_common_uio.c
new file mode 100644
index 00000000..5ddd36ab
--- /dev/null
+++ b/drivers/bus/vmbus/vmbus_common_uio.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018, Microsoft Corporation.
+ * All Rights Reserved.
+ */
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <rte_eal.h>
+#include <rte_tailq.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_bus.h>
+#include <rte_bus_vmbus.h>
+
+#include "private.h"
+
+static struct rte_tailq_elem vmbus_tailq = {
+ .name = "VMBUS_RESOURCE_LIST",
+};
+EAL_REGISTER_TAILQ(vmbus_tailq)
+
+static int
+vmbus_uio_map_secondary(struct rte_vmbus_device *dev)
+{
+ int fd, i;
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list
+ = RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+
+ /* skip this element if it doesn't match our UUID */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) != 0)
+ continue;
+
+ /* open /dev/uioX */
+ fd = open(uio_res->path, O_RDWR);
+ if (fd < 0) {
+ VMBUS_LOG(ERR, "Cannot open %s: %s",
+ uio_res->path, strerror(errno));
+ return -1;
+ }
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ void *mapaddr;
+
+ mapaddr = vmbus_map_resource(uio_res->maps[i].addr,
+ fd, 0,
+ uio_res->maps[i].size, 0);
+
+ if (mapaddr == uio_res->maps[i].addr)
+ continue;
+
+ VMBUS_LOG(ERR,
+ "Cannot mmap device resource file %s to address: %p",
+ uio_res->path, uio_res->maps[i].addr);
+
+ if (mapaddr != MAP_FAILED)
+ /* unmap addr wrongly mapped */
+ vmbus_unmap_resource(mapaddr,
+ (size_t)uio_res->maps[i].size);
+
+ /* unmap addrs correctly mapped */
+ while (--i >= 0)
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+
+ close(fd);
+ return -1;
+ }
+
+ /* fd is not needed in slave process, close it */
+ close(fd);
+ return 0;
+ }
+
+ VMBUS_LOG(ERR, "Cannot find resource for device");
+ return 1;
+}
+
+static int
+vmbus_uio_map_primary(struct rte_vmbus_device *dev)
+{
+ int i, ret;
+ struct mapped_vmbus_resource *uio_res = NULL;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ /* allocate uio resource */
+ ret = vmbus_uio_alloc_resource(dev, &uio_res);
+ if (ret)
+ return ret;
+
+ /* Map the resources */
+ for (i = 0; i < VMBUS_MAX_RESOURCE; i++) {
+ /* skip empty BAR */
+ if (dev->resource[i].len == 0)
+ continue;
+
+ ret = vmbus_uio_map_resource_by_index(dev, i, uio_res, 0);
+ if (ret)
+ goto error;
+ }
+
+ uio_res->nb_maps = i;
+
+ TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+ return 0;
+error:
+ while (--i >= 0) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+ vmbus_uio_free_resource(dev, uio_res);
+ return -1;
+}
+
+
+struct mapped_vmbus_resource *
+vmbus_uio_find_resource(const struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(uio_res, uio_res_list, next) {
+ /* skip this element if it doesn't match our VMBUS address */
+ if (rte_uuid_compare(uio_res->id, dev->device_id) == 0)
+ return uio_res;
+ }
+ return NULL;
+}
+
+/* map the VMBUS resource of a VMBUS device in virtual memory */
+int
+vmbus_uio_map_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ int ret;
+
+ /* TODO: handle rescind */
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.uio_cfg_fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+ /* secondary processes - use already recorded details */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ ret = vmbus_uio_map_secondary(dev);
+ else
+ ret = vmbus_uio_map_primary(dev);
+
+ if (ret != 0)
+ return ret;
+
+ uio_res = vmbus_uio_find_resource(dev);
+ if (!uio_res) {
+ VMBUS_LOG(ERR, "can not find resources!");
+ return -EIO;
+ }
+
+ if (uio_res->nb_maps <= HV_MON_PAGE_MAP) {
+ VMBUS_LOG(ERR, "VMBUS: only %u resources found!",
+ uio_res->nb_maps);
+ return -EINVAL;
+ }
+
+ dev->int_page = (uint32_t *)((char *)uio_res->maps[HV_INT_PAGE_MAP].addr
+ + (PAGE_SIZE >> 1));
+ dev->monitor_page = uio_res->maps[HV_MON_PAGE_MAP].addr;
+ return 0;
+}
+
+static void
+vmbus_uio_unmap(struct mapped_vmbus_resource *uio_res)
+{
+ int i;
+
+ if (uio_res == NULL)
+ return;
+
+ for (i = 0; i != uio_res->nb_maps; i++) {
+ vmbus_unmap_resource(uio_res->maps[i].addr,
+ (size_t)uio_res->maps[i].size);
+ }
+}
+
+/* unmap the VMBUS resource of a VMBUS device in virtual memory */
+void
+vmbus_uio_unmap_resource(struct rte_vmbus_device *dev)
+{
+ struct mapped_vmbus_resource *uio_res;
+ struct mapped_vmbus_res_list *uio_res_list =
+ RTE_TAILQ_CAST(vmbus_tailq.head, mapped_vmbus_res_list);
+
+ if (dev == NULL)
+ return;
+
+ /* find an entry for the device */
+ uio_res = vmbus_uio_find_resource(dev);
+ if (uio_res == NULL)
+ return;
+
+ /* secondary processes - just free maps */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return vmbus_uio_unmap(uio_res);
+
+ TAILQ_REMOVE(uio_res_list, uio_res, next);
+
+ /* unmap all resources */
+ vmbus_uio_unmap(uio_res);
+
+ /* free uio resource */
+ rte_free(uio_res);
+
+ /* close fd if in primary process */
+ close(dev->intr_handle.fd);
+ if (dev->intr_handle.uio_cfg_fd >= 0) {
+ close(dev->intr_handle.uio_cfg_fd);
+ dev->intr_handle.uio_cfg_fd = -1;
+ }
+
+ dev->intr_handle.fd = -1;
+ dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+}
diff --git a/drivers/common/Makefile b/drivers/common/Makefile
new file mode 100644
index 00000000..0fd22376
--- /dev/null
+++ b/drivers/common/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
+DIRS-y += octeontx
+endif
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/common/meson.build b/drivers/common/meson.build
new file mode 100644
index 00000000..d7b7d8cf
--- /dev/null
+++ b/drivers/common/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+std_deps = ['eal']
+drivers = ['octeontx', 'qat']
+config_flag_fmt = 'RTE_LIBRTE_@0@_COMMON'
+driver_name_fmt = 'rte_common_@0@'
diff --git a/drivers/common/octeontx/Makefile b/drivers/common/octeontx/Makefile
new file mode 100644
index 00000000..dfdb9f19
--- /dev/null
+++ b/drivers/common/octeontx/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_common_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_common_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y += octeontx_mbox.c
+
+LDLIBS += -lrte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/octeontx/meson.build b/drivers/common/octeontx/meson.build
new file mode 100644
index 00000000..203d1ef4
--- /dev/null
+++ b/drivers/common/octeontx/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+sources = files('octeontx_mbox.c')
diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/common/octeontx/octeontx_mbox.c
index f8cb6a45..880f8a40 100644
--- a/drivers/mempool/octeontx/octeontx_mbox.c
+++ b/drivers/common/octeontx/octeontx_mbox.c
@@ -11,7 +11,6 @@
#include <rte_spinlock.h>
#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
/* Mbox operation timeout in seconds */
#define MBOX_WAIT_TIME_SEC 3
@@ -60,6 +59,15 @@ struct mbox_ram_hdr {
};
};
+int octeontx_logtype_mbox;
+
+RTE_INIT(otx_init_log)
+{
+ octeontx_logtype_mbox = rte_log_register("pmd.octeontx.mbox");
+ if (octeontx_logtype_mbox >= 0)
+ rte_log_set_level(octeontx_logtype_mbox, RTE_LOG_NOTICE);
+}
+
static inline void
mbox_msgcpy(volatile uint8_t *d, volatile const uint8_t *s, uint16_t size)
{
@@ -181,33 +189,60 @@ mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
return res;
}
-static inline int
-mbox_setup(struct mbox *m)
+int
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (ram_mbox_base == NULL) {
+ mbox_log_err("Invalid ram_mbox_base=%p", ram_mbox_base);
+ return -EINVAL;
+ }
+
+ m->ram_mbox_base = ram_mbox_base;
+
+ if (m->reg != NULL) {
+ rte_spinlock_init(&m->lock);
+ m->init_once = 1;
+ }
+
+ return 0;
+}
+
+int
+octeontx_mbox_set_reg(uint8_t *reg)
{
- if (unlikely(m->init_once == 0)) {
+ struct mbox *m = &octeontx_mbox;
+
+ if (m->init_once)
+ return -EALREADY;
+
+ if (reg == NULL) {
+ mbox_log_err("Invalid reg=%p", reg);
+ return -EINVAL;
+ }
+
+ m->reg = reg;
+
+ if (m->ram_mbox_base != NULL) {
rte_spinlock_init(&m->lock);
- m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
- m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
- m->reg += SSO_VHGRP_PF_MBOX(1);
-
- if (m->ram_mbox_base == NULL || m->reg == NULL) {
- mbox_log_err("Invalid ram_mbox_base=%p or reg=%p",
- m->ram_mbox_base, m->reg);
- return -EINVAL;
- }
m->init_once = 1;
}
+
return 0;
}
int
-octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+octeontx_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
uint16_t txlen, void *rxdata, uint16_t rxlen)
{
struct mbox *m = &octeontx_mbox;
RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
- if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EINVAL;
return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
diff --git a/drivers/common/octeontx/octeontx_mbox.h b/drivers/common/octeontx/octeontx_mbox.h
new file mode 100644
index 00000000..43fbda28
--- /dev/null
+++ b/drivers/common/octeontx/octeontx_mbox.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
+
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+#define MBOX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, octeontx_logtype_mbox,\
+ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
+#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
+#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
+#define mbox_func_trace mbox_log_dbg
+
+extern int octeontx_logtype_mbox;
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
+int octeontx_mbox_set_reg(uint8_t *reg);
+int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/common/octeontx/rte_common_octeontx_version.map b/drivers/common/octeontx/rte_common_octeontx_version.map
new file mode 100644
index 00000000..f04b3b7f
--- /dev/null
+++ b/drivers/common/octeontx/rte_common_octeontx_version.map
@@ -0,0 +1,7 @@
+DPDK_18.05 {
+ global:
+
+ octeontx_mbox_set_ram_mbox_base;
+ octeontx_mbox_set_reg;
+ octeontx_mbox_send;
+};
diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
new file mode 100644
index 00000000..c68a032a
--- /dev/null
+++ b/drivers/common/qat/Makefile
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# build directories
+QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat
+QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat
+VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I$(QAT_CRYPTO_DIR)
+CFLAGS += -I$(QAT_COMPRESS_DIR)
+
+
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+ CFLAGS += -DALLOW_EXPERIMENTAL_API
+ LDLIBS += -lrte_compressdev
+ SRCS-y += qat_comp.c
+ SRCS-y += qat_comp_pmd.c
+ build_qat = yes
+endif
+
+# library symmetric crypto source files
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y)
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lcrypto
+ CFLAGS += -DBUILD_QAT_SYM
+ SRCS-y += qat_sym.c
+ SRCS-y += qat_sym_session.c
+ SRCS-y += qat_sym_pmd.c
+ build_qat = yes
+endif
+endif
+
+ifdef build_qat
+
+ # library name
+ LIB = librte_pmd_qat.a
+
+ # library version
+ LIBABIVER := 1
+ # build flags
+ CFLAGS += $(WERROR_FLAGS)
+ CFLAGS += -O3
+
+ # library common source files
+ SRCS-y += qat_device.c
+ SRCS-y += qat_common.c
+ SRCS-y += qat_logs.c
+ SRCS-y += qat_qp.c
+
+ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+ LDLIBS += -lrte_pci -lrte_bus_pci
+
+ # export include files
+ SYMLINK-y-include +=
+
+ # versioning export map
+ EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build
new file mode 100644
index 00000000..80b6b25a
--- /dev/null
+++ b/drivers/common/qat/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# This does not build a driver, but instead holds common files for
+# the crypto and compression drivers.
+build = false
+qat_deps = ['bus_pci']
+qat_sources = files('qat_common.c',
+ 'qat_qp.c',
+ 'qat_device.c',
+ 'qat_logs.c')
+qat_includes = [include_directories('.', 'qat_adf')]
+qat_ext_deps = []
+qat_cflags = []
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
index 4f8f3d13..1eef5513 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
@@ -93,8 +50,10 @@
#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-#define ADF_NUM_BUNDLES_PER_DEV 1
-#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+/* Maximum number of qps on a device for any service type */
+#define ADF_MAX_QPS_ON_ANY_SERVICE 2
+#define ADF_RING_DIR_TX 0
+#define ADF_RING_DIR_RX 1
/* Valid internal msg size values */
#define ADF_MSG_SIZE_32 0x01
@@ -173,4 +132,5 @@ do { \
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_INT_FLAG_AND_COL, value)
-#endif
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h
index 5de34d55..8f7cb37b 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_H_
#define _ICP_QAT_FW_H_
@@ -160,6 +117,10 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6
+#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5
+#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1
#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
icp_qat_fw_comn_req_hdr_t.service_type
@@ -176,6 +137,16 @@ struct icp_qat_fw_comn_resp {
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNVNR_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_CNV_FLAG_MASK)
+
#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
@@ -247,29 +218,44 @@ struct icp_qat_fw_comn_resp {
& ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK)) \
+ } while (0)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \
+ do { \
+ (next_curr_id) = \
+ (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \
+ } while (0)
+
#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
- ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
- QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
- (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
- QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
- (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
- QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
- (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2
+#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1
+#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0
+#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1
#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+ QAT_COMN_RESP_PKE_STATUS_MASK)
+
#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
QAT_COMN_RESP_CMP_STATUS_MASK)
@@ -278,10 +264,18 @@ struct icp_qat_fw_comn_resp {
QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
QAT_COMN_RESP_XLAT_STATUS_MASK)
+#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \
+ QAT_COMN_RESP_XLT_WA_APPLIED_MASK)
+
#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \
+ QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK)
+
#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
@@ -300,8 +294,16 @@ struct icp_qat_fw_comn_resp {
#define ERR_CODE_OVERFLOW_ERROR -11
#define ERR_CODE_SOFT_ERROR -12
#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
+#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14
+#define ERR_CODE_HW_INCOMPLETE_FILE -15
+#define ERR_CODE_SSM_ERROR -16
+#define ERR_CODE_ENDPOINT_ERROR -17
+#define ERR_CODE_CNV_ERROR -18
+#define ERR_CODE_EMPTY_DYM_BLOCK -19
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21
+#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22
+#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23
enum icp_qat_fw_slice {
ICP_QAT_FW_SLICE_NULL = 0,
diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
new file mode 100644
index 00000000..81381772
--- /dev/null
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _ICP_QAT_FW_COMP_H_
+#define _ICP_QAT_FW_COMP_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_comp_cmd_id {
+ ICP_QAT_FW_COMP_CMD_STATIC = 0,
+ /*!< Static Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DYNAMIC = 1,
+ /*!< Dynamic Compress Request */
+
+ ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2,
+ /*!< Decompress Request */
+
+ ICP_QAT_FW_COMP_CMD_DELIMITER
+ /**< Delimiter type */
+};
+
+/**< Flag usage */
+
+#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateless
+ */
+
+#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that session is stateful
+ */
+
+#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is NOT used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that enhanced autoselectbest is used
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is DISABLED.
+ */
+
+#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing secure RAM from being used as
+ * an intermediate buffer is ENABLED.
+ */
+
+/**< Flag mask & bit position */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the session type
+ */
+
+#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine the session type
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for enhanced auto select best
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for disabling type zero header write back
+ * when Enhanced autoselect best is enabled. If set firmware does
+ * not return type0 store block header, only copies src to dest.
+ * (if best output is Type0)
+ */
+
+#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for auto select best
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for flag used to disable secure ram from
+ * being used as an intermediate buffer.
+ */
+
+#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for disable secure ram for use as an intermediate
+ * buffer.
+ */
+
+#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \
+ ret_uncomp, secure_ram) \
+ ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \
+ << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \
+ (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \
+ (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \
+ << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \
+ (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \
+ << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \
+ (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \
+ << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS))
+
+union icp_qat_fw_comp_req_hdr_cd_pars {
+ /**< LWs 2-5 */
+ struct {
+ uint64_t content_desc_addr;
+ /**< Address of the content descriptor */
+
+ uint16_t content_desc_resrvd1;
+ /**< Content descriptor reserved field */
+
+ uint8_t content_desc_params_sz;
+ /**< Size of the content descriptor parameters in quad words.
+ * These parameters describe the session setup configuration
+ * info for the slices that this request relies upon i.e.
+ * the configuration word and cipher key needed by the cipher
+ * slice if there is a request for cipher processing.
+ */
+
+ uint8_t content_desc_hdr_resrvd2;
+ /**< Content descriptor reserved field */
+
+ uint32_t content_desc_resrvd3;
+ /**< Content descriptor reserved field */
+ } s;
+
+ struct {
+ uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /* Compression Slice Config Word */
+
+ uint32_t content_desc_resrvd4;
+ /**< Content descriptor reserved field */
+
+ } sl;
+
+};
+
+struct icp_qat_fw_comp_req_params {
+ /**< LW 14 */
+ uint32_t comp_len;
+ /**< Size of input to process in bytes Note: Only EOP requests can be
+ * odd for decompression. IA must set LSB to zero for odd sized
+ * intermediate inputs
+ */
+
+ /**< LW 15 */
+ uint32_t out_buffer_sz;
+ /**< Size of output buffer in bytes */
+
+ /**< LW 16 */
+ uint32_t initial_crc32;
+ /**< CRC of previously processed bytes */
+
+ /**< LW 17 */
+ uint32_t initial_adler;
+ /**< Adler of previously processed bytes */
+
+ /**< LW 18 */
+ uint32_t req_par_flags;
+
+ /**< LW 19 */
+ uint32_t rsrvd;
+};
+
+#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \
+ ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \
+ (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \
+ (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \
+ << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \
+ ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \
+ ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \
+ << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS))
+
+#define ICP_QAT_FW_COMP_NOT_SOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_SOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_EOP 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request is NOT Start of Packet
+ */
+
+#define ICP_QAT_FW_COMP_EOP 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing that a request IS End of Packet
+ */
+
+#define ICP_QAT_FW_COMP_NOT_BFINAL 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is not the last block
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag representing to indicate firmware this is the last block
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv check is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv check IS to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that NO cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY 1
+/**< @ingroup icp_qat_fw_comp
+ * Flag indicating that a cnv recovery is to be performed on the request
+ */
+
+#define ICP_QAT_FW_COMP_SOP_BITPOS 0
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for SOP
+ */
+
+#define ICP_QAT_FW_COMP_SOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine SOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_BITPOS 1
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for EOP
+ */
+
+#define ICP_QAT_FW_COMP_EOP_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask used to determine EOP
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the bfinal bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_BITPOS 16
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1
+/**< @ingroup icp_qat_fw_comp
+ * One bit mask for the CNV Recovery bit
+ */
+
+#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17
+/**< @ingroup icp_qat_fw_comp
+ * Starting bit position for the CNV Recovery bit
+ */
+
+struct icp_qat_fw_xlt_req_params {
+ /**< LWs 20-21 */
+ uint64_t inter_buff_ptr;
+ /**< This field specifies the physical address of an intermediate
+ * buffer SGL array. The array contains a pair of 64-bit
+ * intermediate buffer pointers to SGL buffer descriptors, one pair
+ * per CPM. Please refer to the CPM1.6 Firmware Interface HLD
+ * specification for more details.
+ */
+};
+
+
+struct icp_qat_fw_comp_cd_hdr {
+ /**< LW 24 */
+ uint16_t ram_bank_flags;
+ /**< Flags to show which ram banks to access */
+
+ uint8_t comp_cfg_offset;
+ /**< Quad word offset from the content descriptor parameters address
+ * to the parameters for the compression processing
+ */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the compressed data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * anymore slices after compression
+ * Current Id: Initialised with the compression slice type
+ */
+
+ /**< LW 25 */
+ uint32_t resrvd;
+ /**< LWs 26-27 */
+
+ uint64_t comp_state_addr;
+ /**< Pointer to compression state */
+
+ /**< LWs 28-29 */
+ uint64_t ram_banks_addr;
+ /**< Pointer to banks */
+
+};
+
+
+struct icp_qat_fw_xlt_cd_hdr {
+ /**< LW 30 */
+ uint16_t resrvd1;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t resrvd2;
+ /**< Reserved field and assumed set to 0 */
+
+ uint8_t next_curr_id;
+ /**< This field combines the next and current id (each four bits) -
+ * the next id is the most significant nibble.
+ * Next Id: Set to the next slice to pass the translated data through.
+ * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through
+ * any more slices after compression
+ * Current Id: Initialised with the translation slice type
+ */
+
+ /**< LW 31 */
+ uint32_t resrvd3;
+ /**< Reserved and should be set to zero, needed for quadword
+ * alignment
+ */
+};
+
+struct icp_qat_fw_comp_req {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ /**< Common request header - for Service Command Id,
+ * use service-specific Compression Command Id.
+ * Service Specific Flags - use Compression Command Flags
+ */
+
+ /**< LWs 2-5 */
+ union icp_qat_fw_comp_req_hdr_cd_pars cd_pars;
+ /**< Compression service-specific content descriptor field which points
+ * either to a content descriptor parameter block or contains the
+ * compression slice config word.
+ */
+
+ /**< LWs 6-13 */
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ /**< Common request middle section */
+
+ /**< LWs 14-19 */
+ struct icp_qat_fw_comp_req_params comp_pars;
+ /**< Compression request Parameters block */
+
+ /**< LWs 20-21 */
+ union {
+ struct icp_qat_fw_xlt_req_params xlt_pars;
+ /**< Translation request Parameters block */
+ uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+
+ } u1;
+
+ /**< LWs 22-23 */
+ union {
+ uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved - not used if Batch and Pack is disabled.*/
+
+ uint64_t bnp_res_table_addr;
+ /**< A generic pointer to the unbounded list of
+ * icp_qat_fw_resp_comp_pars members. This pointer is only
+ * used when the Batch and Pack is enabled.
+ */
+ } u3;
+
+ /**< LWs 24-29 */
+ struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl;
+ /**< Compression request content descriptor control block header */
+
+ /**< LWs 30-31 */
+ union {
+ struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl;
+ /**< Translation request content descriptor
+ * control block header
+ */
+
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2];
+ /**< Reserved if not used for translation */
+ } u2;
+};
+
+struct icp_qat_fw_resp_comp_pars {
+ /**< LW 4 */
+ uint32_t input_byte_counter;
+ /**< Input byte counter */
+
+ /**< LW 5 */
+ uint32_t output_byte_counter;
+ /**< Output byte counter */
+
+ /**< LW 6 & 7*/
+ union {
+ uint64_t curr_chksum;
+ struct {
+ /**< LW 6 */
+ uint32_t curr_crc32;
+ /**< LW 7 */
+ uint32_t curr_adler_32;
+ };
+ };
+};
+
+struct icp_qat_fw_comp_resp {
+ /**< LWs 0-1 */
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ /**< Common interface response format see icp_qat_fw.h */
+
+ /**< LWs 2-3 */
+ uint64_t opaque_data;
+ /**< Opaque data passed from the request to the response message */
+
+ /**< LWs 4-7 */
+ struct icp_qat_fw_resp_comp_pars comp_resp_pars;
+ /**< Common response params (checksums and byte counts) */
+};
+
+#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
index fbf2b839..c33bc3fe 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_LA_H_
#define _ICP_QAT_FW_LA_H_
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h
index d03688c7..e7961dba 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/common/qat/qat_adf/icp_qat_hw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
@@ -115,19 +72,44 @@ struct icp_qat_hw_auth_config {
#define QAT_AUTH_ALGO_MASK 0xF
#define QAT_AUTH_CMP_BITPOS 8
#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1
#define QAT_AUTH_ALGO_SHA3_BITPOS 22
#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
- (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
- ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
- (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
- QAT_AUTH_ALGO_SHA3_BITPOS) | \
- (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
- (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
- & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
- ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF
+#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0
+#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1
+#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0
+#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1
+#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0
+#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \
+ << QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \
+ (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \
+ QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \
+ << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \
+ (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \
+ ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \
+ (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \
+ QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \
+ << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS))
struct icp_qat_hw_auth_counter {
uint32_t counter;
@@ -150,13 +132,13 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_MD5_STATE1_SZ 16
#define ICP_QAT_HW_SHA1_STATE1_SZ 20
#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
#define ICP_QAT_HW_SHA256_STATE1_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_SHA512_STATE1_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
@@ -164,17 +146,18 @@ struct icp_qat_hw_auth_setup {
#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+
#define ICP_QAT_HW_NULL_STATE2_SZ 32
#define ICP_QAT_HW_MD5_STATE2_SZ 16
#define ICP_QAT_HW_SHA1_STATE2_SZ 20
#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
#define ICP_QAT_HW_SHA256_STATE2_SZ 32
#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_SHA512_STATE2_SZ 64
#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
@@ -197,6 +180,12 @@ struct icp_qat_hw_auth_sha512 {
uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
};
+struct icp_qat_hw_auth_sha3_512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+};
+
struct icp_qat_hw_auth_algo_blk {
struct icp_qat_hw_auth_sha512 sha;
};
@@ -326,4 +315,72 @@ struct icp_qat_hw_cipher_algo_blk {
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
+/* ========================================================================= */
+/* COMPRESSION SLICE */
+/* ========================================================================= */
+
+enum icp_qat_hw_compression_direction {
+ ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0,
+ ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1,
+ ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_delayed_match {
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1,
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2
+};
+
+enum icp_qat_hw_compression_algo {
+ ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0,
+ ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1,
+ ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2
+};
+
+
+enum icp_qat_hw_compression_depth {
+ ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0,
+ ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1,
+ ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2,
+ ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3,
+ ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4
+};
+
+enum icp_qat_hw_compression_file_type {
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4,
+ ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5
+};
+
+struct icp_qat_hw_compression_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+#define QAT_COMPRESSION_DIR_BITPOS 4
+#define QAT_COMPRESSION_DIR_MASK 0x7
+#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16
+#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1
+#define QAT_COMPRESSION_ALGO_BITPOS 31
+#define QAT_COMPRESSION_ALGO_MASK 0x1
+#define QAT_COMPRESSION_DEPTH_BITPOS 28
+#define QAT_COMPRESSION_DEPTH_MASK 0x7
+#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24
+#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF
+
+#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \
+ dir, delayed, algo, depth, filetype) \
+ ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \
+ (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \
+ << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \
+ (((algo) & QAT_COMPRESSION_ALGO_MASK) \
+ << QAT_COMPRESSION_ALGO_BITPOS) | \
+ (((depth) & QAT_COMPRESSION_DEPTH_MASK) \
+ << QAT_COMPRESSION_DEPTH_BITPOS) | \
+ (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \
+ << QAT_COMPRESSION_FILE_TYPE_BITPOS))
+
#endif
diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c
new file mode 100644
index 00000000..47538669
--- /dev/null
+++ b/drivers/common/qat/qat_common.c
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "qat_common.h"
+#include "qat_device.h"
+#include "qat_logs.h"
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs)
+{
+ int res = -EINVAL;
+ uint32_t buf_len, nr;
+ struct qat_sgl *list = (struct qat_sgl *)list_in;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ uint8_t *virt_addr[max_segs];
+#endif
+
+ for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) {
+ if (offset >= rte_pktmbuf_data_len(buf)) {
+ offset -= rte_pktmbuf_data_len(buf);
+ continue;
+ }
+
+ list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset;
+ list->buffers[nr].resrvd = 0;
+ list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset);
+#endif
+ offset = 0;
+ buf_len += list->buffers[nr].len;
+
+ if (buf_len >= data_len) {
+ list->buffers[nr].len -= buf_len - data_len;
+ res = 0;
+ break;
+ }
+ ++nr;
+ }
+
+ if (unlikely(res != 0)) {
+ if (nr == max_segs) {
+ QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)",
+ max_segs);
+ } else {
+ QAT_DP_LOG(ERR, "Mbuf chain is too short");
+ }
+ } else {
+
+ list->num_bufs = ++nr;
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs);
+ for (nr = 0; nr < list->num_bufs; nr++) {
+ QAT_DP_LOG(INFO,
+ "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64,
+ nr, list->buffers[nr].len,
+ list->buffers[nr].addr);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL",
+ virt_addr[nr],
+ list->buffers[nr].len);
+ }
+#endif
+ }
+
+ return res;
+}
+
+void qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d",
+ stats, dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ }
+}
+
+void qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service)
+{
+ int i;
+ struct qat_qp **qp;
+
+ if (dev == NULL || service >= QAT_SERVICE_INVALID) {
+ QAT_LOG(ERR, "invalid param: dev %p, service %d",
+ dev, service);
+ return;
+ }
+
+ qp = dev->qps_in_use[service];
+ for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) {
+ if (qp[i] == NULL) {
+ QAT_LOG(DEBUG, "Service %d Uninitialised qp %d",
+ service, i);
+ continue;
+ }
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ }
+
+ QAT_LOG(DEBUG, "QAT: %d stats cleared", service);
+}
diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h
new file mode 100644
index 00000000..d4bef539
--- /dev/null
+++ b/drivers/common/qat/qat_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_COMMON_H_
+#define _QAT_COMMON_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+
+/**< Intel(R) QAT device name for PCI registration */
+#define QAT_PCI_NAME qat
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/* Intel(R) QuickAssist Technology device generation is enumerated
+ * from one according to the generation of the device
+ */
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2
+};
+
+enum qat_service_type {
+ QAT_SERVICE_ASYMMETRIC = 0,
+ QAT_SERVICE_SYMMETRIC,
+ QAT_SERVICE_COMPRESSION,
+ QAT_SERVICE_INVALID
+};
+
+#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
+
+/**< Common struct for scatter-gather list operations */
+struct qat_flat_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+#define qat_sgl_hdr struct { \
+ uint64_t resrvd; \
+ uint32_t num_bufs; \
+ uint32_t num_mapped_bufs; \
+}
+
+__extension__
+struct qat_sgl {
+ qat_sgl_hdr;
+ /* flexible array of flat buffers*/
+ struct qat_flat_buf buffers[0];
+} __rte_packed __rte_cache_aligned;
+
+/** Common, i.e. not service-specific, statistics */
+struct qat_common_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+struct qat_pci_device;
+
+int
+qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset,
+ void *list_in, uint32_t data_len,
+ const uint16_t max_segs);
+void
+qat_stats_get(struct qat_pci_device *dev,
+ struct qat_common_stats *stats,
+ enum qat_service_type service);
+void
+qat_stats_reset(struct qat_pci_device *dev,
+ enum qat_service_type service);
+
+#endif /* _QAT_COMMON_H_ */
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
new file mode 100644
index 00000000..f32d7235
--- /dev/null
+++ b/drivers/common/qat/qat_device.c
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+
+#include "qat_device.h"
+#include "adf_transport_access_macros.h"
+#include "qat_sym_pmd.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qat_gen_config[] = {
+ [QAT_GEN1] = {
+ .dev_gen = QAT_GEN1,
+ .qp_hw_data = qat_gen1_qps,
+ },
+ [QAT_GEN2] = {
+ .dev_gen = QAT_GEN2,
+ .qp_hw_data = qat_gen1_qps,
+ /* gen2 has same ring layout as gen1 */
+ },
+};
+
+
+static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES];
+static int qat_nb_pci_devices;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {.device_id = 0},
+};
+
+
+static struct qat_pci_device *
+qat_pci_get_dev(uint8_t dev_id)
+{
+ return &qat_pci_devices[dev_id];
+}
+
+static struct qat_pci_device *
+qat_pci_get_named_dev(const char *name)
+{
+ struct qat_pci_device *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) {
+ dev = &qat_pci_devices[i];
+
+ if ((dev->attached == QAT_ATTACHED) &&
+ (strcmp(dev->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static uint8_t
+qat_pci_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) {
+ if (qat_pci_devices[dev_id].attached == QAT_DETACHED)
+ break;
+ }
+ return dev_id;
+}
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev)
+{
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return qat_pci_get_named_dev(name);
+}
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ uint8_t qat_dev_id;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ if (qat_pci_get_named_dev(name) != NULL) {
+ QAT_LOG(ERR, "QAT device with name %s already allocated!",
+ name);
+ return NULL;
+ }
+
+ qat_dev_id = qat_pci_find_free_device_index();
+ if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) {
+ QAT_LOG(ERR, "Reached maximum number of QAT devices");
+ return NULL;
+ }
+
+ qat_dev = qat_pci_get_dev(qat_dev_id);
+ memset(qat_dev, 0, sizeof(*qat_dev));
+ strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN);
+ qat_dev->qat_dev_id = qat_dev_id;
+ qat_dev->pci_dev = pci_dev;
+ switch (qat_dev->pci_dev->id.device_id) {
+ case 0x0443:
+ qat_dev->qat_dev_gen = QAT_GEN1;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ qat_dev->qat_dev_gen = QAT_GEN2;
+ break;
+ default:
+ QAT_LOG(ERR, "Invalid dev_id, can't determine generation");
+ return NULL;
+ }
+
+ rte_spinlock_init(&qat_dev->arb_csr_lock);
+
+ qat_dev->attached = QAT_ATTACHED;
+
+ qat_nb_pci_devices++;
+
+ QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d",
+ qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices);
+
+ return qat_dev;
+}
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_dev;
+ char name[QAT_DEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+ snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat");
+ qat_dev = qat_pci_get_named_dev(name);
+ if (qat_dev != NULL) {
+
+ /* Check that there are no service devs still on pci device */
+ if (qat_dev->sym_dev != NULL)
+ return -EBUSY;
+
+ qat_dev->attached = QAT_DETACHED;
+ qat_nb_pci_devices--;
+ }
+ QAT_LOG(DEBUG, "QAT device %s released, total QATs %d",
+ name, qat_nb_pci_devices);
+ return 0;
+}
+
+static int
+qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev,
+ struct rte_pci_device *pci_dev)
+{
+ qat_sym_dev_destroy(qat_pci_dev);
+ qat_comp_dev_destroy(qat_pci_dev);
+ qat_asym_dev_destroy(qat_pci_dev);
+ return qat_pci_device_release(pci_dev);
+}
+
+static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ struct qat_pci_device *qat_pci_dev;
+
+ QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ qat_pci_dev = qat_pci_device_allocate(pci_dev);
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ ret = qat_sym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_comp_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ ret = qat_asym_dev_create(qat_pci_dev);
+ if (ret != 0)
+ goto error_out;
+
+ return 0;
+
+error_out:
+ qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+ return ret;
+
+}
+
+static int qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct qat_pci_device *qat_pci_dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev);
+ if (qat_pci_dev == NULL)
+ return 0;
+
+ return qat_pci_dev_destroy(qat_pci_dev, pci_dev);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = qat_pci_probe,
+ .remove = qat_pci_remove
+};
+
+__attribute__((weak)) int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+__attribute__((weak)) int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
+{
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map);
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
new file mode 100644
index 00000000..9599fc59
--- /dev/null
+++ b/drivers/common/qat/qat_device.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_DEVICE_H_
+#define _QAT_DEVICE_H_
+
+#include <rte_bus_pci.h>
+
+#include "qat_common.h"
+#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+#define QAT_DETACHED (0)
+#define QAT_ATTACHED (1)
+
+#define QAT_DEV_NAME_MAX_LEN 64
+
+/*
+ * This struct holds all the data about a QAT pci device
+ * including data about all services it supports.
+ * It contains
+ * - hw_data
+ * - config data
+ * - runtime data
+ */
+struct qat_sym_dev_private;
+struct qat_comp_dev_private;
+
+struct qat_pci_device {
+
+ /* Data used by all services */
+ char name[QAT_DEV_NAME_MAX_LEN];
+ /**< Name of qat pci device */
+ uint8_t qat_dev_id;
+ /**< Device instance for this qat pci device */
+ struct rte_pci_device *pci_dev;
+ /**< PCI information. */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
+ rte_spinlock_t arb_csr_lock;
+ /**< lock to protect accesses to the arbiter CSR */
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+
+ struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE];
+ /**< links to qps set up for each service, index same as on API */
+
+ /* Data relating to symmetric crypto service */
+ struct qat_sym_dev_private *sym_dev;
+ /**< link back to cryptodev private data */
+ struct rte_device sym_rte_dev;
+ /**< This represents the crypto subset of this pci device.
+ * Register with this rather than with the one in
+ * pci_dev so that its driver can have a crypto-specific name
+ */
+
+ /* Data relating to compression service */
+ struct qat_comp_dev_private *comp_dev;
+ /**< link back to compressdev private data */
+
+ /* Data relating to asymmetric crypto service */
+
+};
+
+struct qat_gen_hw_data {
+ enum qat_device_gen dev_gen;
+ const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE];
+};
+
+extern struct qat_gen_hw_data qat_gen_config[];
+
+struct qat_pci_device *
+qat_pci_device_allocate(struct rte_pci_device *pci_dev);
+
+int
+qat_pci_device_release(struct rte_pci_device *pci_dev);
+
+struct qat_pci_device *
+qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev);
+
+/* declaration needed for weak functions */
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused);
+
+#endif /* _QAT_DEVICE_H_ */
diff --git a/drivers/common/qat/qat_logs.c b/drivers/common/qat/qat_logs.c
new file mode 100644
index 00000000..7a861709
--- /dev/null
+++ b/drivers/common/qat/qat_logs.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+
+int qat_gen_logtype;
+int qat_dp_logtype;
+
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len)
+{
+ if (level > rte_log_get_global_level())
+ return 0;
+ if (level > (uint32_t)(rte_log_get_level(logtype)))
+ return 0;
+
+ rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file,
+ title, buf, len);
+ return 0;
+}
+
+RTE_INIT(qat_pci_init_log)
+{
+ /* Non-data-path logging for pci device and all services */
+ qat_gen_logtype = rte_log_register("pmd.qat_general");
+ if (qat_gen_logtype >= 0)
+ rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE);
+
+ /* data-path logging for all services */
+ qat_dp_logtype = rte_log_register("pmd.qat_dp");
+ if (qat_dp_logtype >= 0)
+ rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/common/qat/qat_logs.h b/drivers/common/qat/qat_logs.h
new file mode 100644
index 00000000..4baea12c
--- /dev/null
+++ b/drivers/common/qat/qat_logs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+extern int qat_gen_logtype;
+extern int qat_dp_logtype;
+
+#define QAT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_gen_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, qat_dp_logtype, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \
+ qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len)
+
+/**
+ * qat_hexdump_log - Dump out memory in a special hex dump format.
+ *
+ * Dump out the message buffer in a special hex dump output format with
+ * characters printed for each line of 16 hex values. The message will be sent
+ * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file
+ * is undefined.
+ */
+int
+qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title,
+ const void *buf, unsigned int len);
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c
new file mode 100644
index 00000000..7ca7a45e
--- /dev/null
+++ b/drivers/common/qat/qat_qp.c
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
+#include "qat_comp.h"
+#include "adf_transport_access_macros.h"
+
+
+#define ADF_MAX_DESC 4096
+#define ADF_MIN_DESC 128
+
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 8,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+
+ }, {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 1,
+ .rx_ring_num = 9,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 2,
+ .rx_ring_num = 10,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ },
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 11,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 6,
+ .rx_ring_num = 14,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 7,
+ .rx_ring_num = 15,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }
+ }
+};
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct qat_pci_device *qat_dev,
+ struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service)
+{
+ int i, count;
+
+ for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
+ if (qp_hw_data[i].service_type == service)
+ count++;
+ return count;
+}
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ QAT_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ QAT_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+int qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr,
+ uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf)
+
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+ queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
+
+ if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
+ (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
+ QAT_LOG(ERR, "Can't create qp for %u descriptors",
+ qat_qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ QAT_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qat_qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+ if (qp->op_cookies == NULL) {
+ QAT_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ qp->inflights16 = 0;
+
+ if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf,
+ ADF_RING_DIR_TX) != 0) {
+ QAT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf,
+ ADF_RING_DIR_RX) != 0) {
+ QAT_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
+
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+ "%s%d_cookies_%s_qp%hu",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qat_qp_conf->service_str, queue_pair_id);
+
+ QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ qat_qp_conf->cookie_size, 64, 0,
+ NULL, NULL, NULL, NULL, qat_qp_conf->socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ QAT_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ QAT_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ goto create_err;
+ }
+ }
+
+ qp->qat_dev_gen = qat_dev->qat_dev_gen;
+ qp->build_request = qat_qp_conf->build_request;
+ qp->service_type = qat_qp_conf->hw->service_type;
+ qp->qat_dev = qat_dev;
+
+ QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
+ queue_pair_id, op_cookie_pool_name);
+
+ *qp_addr = qp;
+ return 0;
+
+create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_qp_release(struct qat_qp **qp_addr)
+{
+ struct qat_qp *qp = *qp_addr;
+ uint32_t i;
+
+ if (qp == NULL) {
+ QAT_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ QAT_LOG(DEBUG, "Free qp on qat_pci device %d",
+ qp->qat_dev->qat_dev_id);
+
+ /* Don't free memory if there are still responses to be processed */
+ if (qp->inflights16 == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+ &qp->qat_dev->arb_csr_lock);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ *qp_addr = NULL;
+ return 0;
+}
+
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ QAT_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ QAT_LOG(DEBUG, "Free ring %d, memzone: %s",
+ queue->hw_queue_number, queue->memz_name);
+
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ QAT_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ QAT_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
+ struct qat_qp_config *qp_conf, uint8_t dir)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ struct rte_pci_device *pci_dev = qat_dev->pci_dev;
+ int ret = 0;
+ uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
+ uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
+
+ queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
+ queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
+ qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
+
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ QAT_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name),
+ "%s_%d_%s_%s_%d_%d",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qp_conf->service_str, "qp_mem",
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ qp_conf->socket_id);
+ if (qp_mz == NULL) {
+ QAT_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->iova;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ QAT_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ ret = -EFAULT;
+ goto queue_create_err;
+ }
+
+ if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors,
+ &(queue->queue_size)) != 0) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
+
+ if (queue->max_inflights < 2) {
+ QAT_LOG(ERR, "Invalid num inflights");
+ ret = -EINVAL;
+ goto queue_create_err;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+
+ QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
+ " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
+ queue->memz_name,
+ queue->queue_size, queue_size_bytes,
+ qp_conf->nb_descriptors, desc_size,
+ queue->max_inflights, queue->modulo_mask);
+
+ return 0;
+
+queue_create_err:
+ rte_memzone_free(qp_mz);
+ return ret;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ rte_spinlock_lock(lock);
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value &= ~(0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
+{
+ return data & modulo_mask;
+}
+
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ tmp_qp->inflights16 += nb_ops;
+ overflow = tmp_qp->inflights16 - queue->max_inflights;
+ if (overflow > 0) {
+ tmp_qp->inflights16 -= overflow;
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = tmp_qp->build_request(*ops, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size],
+ tmp_qp->qat_dev_gen);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasn't sent
+ */
+ tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
+ ops++;
+ nb_ops_sent++;
+ }
+kick_tail:
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops)
+{
+ struct qat_queue *rx_queue, *tx_queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t head;
+ uint32_t resp_counter = 0;
+ uint8_t *resp_msg;
+
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
+ resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head;
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ resp_counter != nb_ops) {
+
+ if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC)
+ qat_sym_process_response(ops, resp_msg);
+ else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
+ qat_comp_process_response(ops, resp_msg);
+
+ head = adf_modulo(head + rx_queue->msg_size,
+ rx_queue->modulo_mask);
+
+ resp_msg = (uint8_t *)rx_queue->base_addr + head;
+ ops++;
+ resp_counter++;
+ }
+ if (resp_counter > 0) {
+ rx_queue->head = head;
+ tmp_qp->stats.dequeued_count += resp_counter;
+ rx_queue->nb_processed_responses += resp_counter;
+ tmp_qp->inflights16 -= resp_counter;
+
+ if (rx_queue->nb_processed_responses >
+ QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
+ }
+ return resp_counter;
+}
+
+__attribute__((weak)) int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h
new file mode 100644
index 00000000..69f8a613
--- /dev/null
+++ b/drivers/common/qat/qat_qp.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#ifndef _QAT_QP_H_
+#define _QAT_QP_H_
+
+#include "qat_common.h"
+#include "adf_transport_access_macros.h"
+
+struct qat_pci_device;
+
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
+typedef int (*build_request_t)(void *op,
+ uint8_t *req, void *op_cookie,
+ enum qat_device_gen qat_dev_gen);
+/**< Build a request from an op. */
+
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_hw_data {
+ enum qat_service_type service_type;
+ uint8_t hw_bundle_num;
+ uint8_t tx_ring_num;
+ uint8_t rx_ring_num;
+ uint16_t tx_msg_size;
+ uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+ const struct qat_qp_hw_data *hw;
+ uint32_t nb_descriptors;
+ uint32_t cookie_size;
+ int socket_id;
+ build_request_t build_request;
+ const char *service_str;
+};
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo_mask;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+ uint16_t nb_pending_requests;
+ /* number of requests pending since last CSR tail write */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ uint16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct qat_common_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
+ build_request_t build_request;
+ enum qat_service_type service_type;
+ struct qat_pci_device *qat_dev;
+ /**< qat device this qp is on */
+} __rte_cache_aligned;
+
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
+
+uint16_t
+qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+uint16_t
+qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);
+
+int
+qat_qp_release(struct qat_qp **qp_addr);
+
+int
+qat_qp_setup(struct qat_pci_device *qat_dev,
+ struct qat_qp **qp_addr, uint16_t queue_pair_id,
+ struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service);
+
+/* Needed for weak function*/
+int
+qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused);
+
+#endif /* _QAT_QP_H_ */
diff --git a/drivers/compress/Makefile b/drivers/compress/Makefile
new file mode 100644
index 00000000..286ea6ee
--- /dev/null
+++ b/drivers/compress/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/compress/isal/Makefile b/drivers/compress/isal/Makefile
new file mode 100644
index 00000000..95904f64
--- /dev/null
+++ b/drivers/compress/isal/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_isal_comp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# external library dependencies
+LDLIBS += -lisal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_isal_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd_ops.c
+
+# export include files
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c
new file mode 100644
index 00000000..e943336b
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd.c
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev_pmd.h>
+
+#include "isal_compress_pmd_private.h"
+
+#define RTE_COMP_ISAL_WINDOW_SIZE 15
+#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
+#define RTE_COMP_ISAL_LEVEL_ONE 1
+#define RTE_COMP_ISAL_LEVEL_TWO 2
+#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
+
+int isal_logtype_driver;
+
+/* Verify and set private xform parameters */
+int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform)
+{
+ if (xform == NULL)
+ return -EINVAL;
+
+ /* Set compression private xform variables */
+ if (xform->type == RTE_COMP_COMPRESS) {
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_COMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By-pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->compress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform huffman type */
+ switch (xform->compress.deflate.huffman) {
+ case(RTE_COMP_HUFFMAN_DEFAULT):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ break;
+ case(RTE_COMP_HUFFMAN_FIXED):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_FIXED;
+ break;
+ case(RTE_COMP_HUFFMAN_DYNAMIC):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DYNAMIC;
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform level.
+ * Checking compliance with compressdev API, -1 <= level => 9
+ */
+ if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
+ xform->compress.level > RTE_COMP_LEVEL_MAX) {
+ ISAL_PMD_LOG(ERR, "Compression level out of range\n");
+ return -EINVAL;
+ }
+ /* Check for Compressdev API level 0, No compression
+ * not supported in ISA-L
+ */
+ else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+ ISAL_PMD_LOG(ERR, "No Compression not supported\n");
+ return -ENOTSUP;
+ }
+ /* If using fixed huffman code, level must be 0 */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_FIXED) {
+ ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
+ " fixed huffman code\n");
+ priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL0_DEFAULT;
+ } else {
+ /* Mapping API levels to ISA-L levels 1,2 & 3 */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ /* Default is 1 if not using fixed huffman */
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_ISAL_LEVEL_TWO:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ break;
+ /* Level 3 or higher requested */
+ default:
+ /* Check for AVX512, to use ISA-L level 3 */
+ if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX512F)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ }
+ /* Check for AVX2, to use ISA-L level 3 */
+ else if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX2)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ } else {
+ ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
+ " 3 or above; Level 3 optimized"
+ " for AVX512 & AVX2 only."
+ " level changed to 2.\n");
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ }
+ }
+ }
+ }
+
+ /* Set decompression private xform variables */
+ else if (xform->type == RTE_COMP_DECOMPRESS) {
+
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_DECOMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->decompress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/* Compression using chained mbufs for input/output data */
+static int
+chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for source/destination offset passing multiple segments
+ * and point compression stream to input/output buffer.
+ */
+ remaining_offset = op->src.offset;
+ while (remaining_offset >= src->data_len) {
+ remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset,
+ op->src.length);
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ remaining_offset);
+
+ remaining_offset = op->dst.offset;
+ while (remaining_offset >= dst->data_len) {
+ remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->stream->avail_out = dst->data_len - remaining_offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ remaining_offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ while (qp->stream->internal_state.state != ZSTATE_END) {
+ /* Last segment of data */
+ if (remaining_data <= src->data_len)
+ qp->stream->end_of_stream = 1;
+
+ /* Execute compression operation */
+ ret = isal_deflate(qp->stream);
+
+ remaining_data = op->src.length - qp->stream->total_in;
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->stream->avail_in == 0 &&
+ qp->stream->total_in != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->stream->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->stream->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough input buffer segments\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+ }
+
+ if (qp->stream->avail_out == 0 &&
+ qp->stream->internal_state.state != ZSTATE_END) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->stream->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->stream->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Decompression using chained mbufs for input/output data */
+static int
+chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t consumed_data, src_remaining_offset, dst_remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for offset passing multiple segments
+ * and point decompression state to input/output buffer
+ */
+ src_remaining_offset = op->src.offset;
+ while (src_remaining_offset >= src->data_len) {
+ src_remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset,
+ op->src.length);
+ qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ src_remaining_offset);
+
+ dst_remaining_offset = op->dst.offset;
+ while (dst_remaining_offset >= dst->data_len) {
+ dst_remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->state->avail_out = dst->data_len - dst_remaining_offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ dst_remaining_offset);
+
+ while (qp->state->block_state != ISAL_BLOCK_FINISH) {
+
+ ret = isal_inflate(qp->state);
+
+ /* Check for first segment, offset needs to be accounted for */
+ if (remaining_data == op->src.length) {
+ consumed_data = src->data_len - qp->state->avail_in -
+ src_remaining_offset;
+ } else
+ consumed_data = src->data_len - qp->state->avail_in;
+
+ op->consumed += consumed_data;
+ remaining_data -= consumed_data;
+
+ if (ret != ISAL_DECOMP_OK) {
+ ISAL_PMD_LOG(ERR, "Decompression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->state->avail_in == 0
+ && op->consumed != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->state->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->state->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ }
+ }
+
+ if (qp->state->avail_out == 0 &&
+ qp->state->block_state != ISAL_BLOCK_FINISH) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->state->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->state->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Stateless Compression Function */
+static int
+process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ struct isal_priv_xform *priv_xform)
+{
+ int ret = 0;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Required due to init clearing level_buf */
+ uint8_t *temp_level_buf = qp->stream->level_buf;
+
+ /* Initialize compression stream */
+ isal_deflate_stateless_init(qp->stream);
+
+ qp->stream->level_buf = temp_level_buf;
+
+ /* Stateless operation, input will be consumed in one go */
+ qp->stream->flush = NO_FLUSH;
+
+ /* set compression level & intermediate level buffer size */
+ qp->stream->level = priv_xform->compress.level;
+ qp->stream->level_buf_size = priv_xform->level_buffer_size;
+
+ /* Set op huffman code */
+ if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_STATIC);
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+ /* Dynamically change the huffman code to suit the input data */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough"
+ " for offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_compression(op, qp);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ qp->stream->end_of_stream = 1; /* All input consumed in one */
+ /* Point compression stream to input buffer */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point compression stream to output buffer */
+ qp->stream->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that output buffer did not run out of space */
+ if (ret == STATELESS_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ }
+ op->consumed = qp->stream->total_in;
+ op->produced = qp->stream->total_out;
+
+ return ret;
+}
+
+/* Stateless Decompression Function */
+static int
+process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize decompression state */
+ isal_inflate_init(qp->state);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf not big enough for "
+ "offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_decompression(op, qp);
+ if (ret != 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ /* Point decompression state to input buffer */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point decompression state to output buffer */
+ qp->state->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
+
+ if (ret == ISAL_OUT_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ op->consumed = op->src.length - qp->state->avail_in;
+ }
+ op->produced = qp->state->total_out;
+
+ return ret;
+}
+
+/* Process compression/decompression operation */
+static int
+process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
+ struct isal_priv_xform *priv_xform)
+{
+ switch (priv_xform->type) {
+ case RTE_COMP_COMPRESS:
+ process_isal_deflate(op, qp, priv_xform);
+ break;
+ case RTE_COMP_DECOMPRESS:
+ process_isal_inflate(op, qp);
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+/* Enqueue burst */
+static uint16_t
+isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t i;
+ int retval;
+ int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
+
+ for (i = 0; i < num_enq; i++) {
+ if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
+ ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
+ qp->qp_stats.enqueue_err_count++;
+ continue;
+ }
+ retval = process_op(qp, ops[i], ops[i]->private_xform);
+ if (unlikely(retval < 0) ||
+ ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ qp->qp_stats.enqueue_err_count++;
+ }
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
+ num_enq, NULL);
+ qp->num_free_elements -= retval;
+ qp->qp_stats.enqueued_count += retval;
+
+ return retval;
+}
+
+/* Dequeue burst */
+static uint16_t
+isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
+ nb_ops, NULL);
+ qp->num_free_elements += nb_dequeued;
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Create ISA-L compression device */
+static int
+compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct isal_comp_private), init_params);
+ if (dev == NULL) {
+ ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
+ return -EFAULT;
+ }
+
+ dev->dev_ops = isal_compress_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
+ dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
+
+ return 0;
+}
+
+/** Remove compression device */
+static int
+compdev_isal_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compdev);
+}
+
+/** Initialise ISA-L compression device */
+static int
+compdev_isal_probe(struct rte_vdev_device *dev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ ISAL_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
+
+ return compdev_isal_create(name, dev, &init_params);
+}
+
+static struct rte_vdev_driver compdev_isal_pmd_drv = {
+ .probe = compdev_isal_probe,
+ .remove = compdev_isal_remove_dev,
+};
+
+RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
+ "socket_id=<int>");
+
+RTE_INIT(isal_init_log)
+{
+ isal_logtype_driver = rte_log_register("pmd.compress.isal");
+ if (isal_logtype_driver >= 0)
+ rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
new file mode 100644
index 00000000..41cade87
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_common.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+
+#include "isal_compress_pmd_private.h"
+
+static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ .window_size = {
+ .min = 15,
+ .max = 15,
+ .increment = 0
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+isal_comp_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int ret = 0;
+ unsigned int n;
+ char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ unsigned int elt_size = sizeof(struct isal_priv_xform);
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
+ dev->data->dev_id);
+ if (n > sizeof(mp_name)) {
+ ISAL_PMD_LOG(ERR,
+ "Unable to create unique name for xform mempool");
+ return -ENOMEM;
+ }
+
+ internals->priv_xform_mp = rte_mempool_lookup(mp_name);
+
+ if (internals->priv_xform_mp != NULL) {
+ if (((internals->priv_xform_mp)->elt_size != elt_size) ||
+ ((internals->priv_xform_mp)->size <
+ config->max_nb_priv_xforms)) {
+
+ ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
+ " initialization parameters", mp_name);
+ internals->priv_xform_mp = NULL;
+ return -ENOMEM;
+ }
+ } else { /* First time configuration */
+ internals->priv_xform_mp = rte_mempool_create(
+ mp_name, /* mempool name */
+ /* number of elements*/
+ config->max_nb_priv_xforms,
+ elt_size, /* element size*/
+ 0, /* Cache size*/
+ 0, /* private data size */
+ NULL, /* obj initialization constructor */
+ NULL, /* obj initialization constructor arg */
+ NULL, /**< obj constructor*/
+ NULL, /* obj constructor arg */
+ config->socket_id, /* socket id */
+ 0); /* flags */
+ }
+
+ if (internals->priv_xform_mp == NULL) {
+ ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
+ return -ENOMEM;
+ }
+
+ dev->data->dev_private = internals;
+
+ return ret;
+}
+
+/** Start device */
+static int
+isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+isal_comp_pmd_close(struct rte_compressdev *dev)
+{
+ /* Free private data */
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->priv_xform_mp);
+ return 0;
+}
+
+/** Get device statistics */
+static void
+isal_comp_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Get device info */
+static void
+isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->capabilities = isal_pmd_capabilities;
+ dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
+ RTE_COMPDEV_FF_CPU_AVX2 |
+ RTE_COMPDEV_FF_CPU_AVX |
+ RTE_COMPDEV_FF_CPU_SSE;
+ }
+}
+
+/** Reset device statistics */
+static void
+isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Release queue pair */
+static int
+isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ if (qp->stream != NULL)
+ rte_free(qp->stream);
+
+ if (qp->stream->level_buf != NULL)
+ rte_free(qp->stream->level_buf);
+
+ if (qp->state != NULL)
+ rte_free(qp->state);
+
+ if (qp->processed_pkts != NULL)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ISAL_PMD_LOG(DEBUG,
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ISAL_PMD_LOG(ERR,
+ "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+struct isal_comp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "isal_compression_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct isal_comp_qp *qp = NULL;
+ int retval;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ isal_comp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ /* Initialize memory for compression stream structure */
+ qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
+ sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for compression level buffer */
+ qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
+ ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for decompression state structure */
+ qp->state = rte_zmalloc_socket("Isa-l decompression state",
+ sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Set private xform data*/
+static int
+isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **priv_xform)
+{
+ int ret;
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ISAL_PMD_LOG(ERR, "Invalid Xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
+ ISAL_PMD_LOG(ERR,
+ "Couldn't get object from private xform mempool");
+ return -ENOMEM;
+ }
+
+ ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
+ if (ret != 0) {
+ ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
+
+ /* Return private xform to mempool */
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ return ret;
+ }
+ return 0;
+}
+
+/** Clear memory of the private xform so it doesn't leave key material behind */
+static int
+isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ /* Zero out the whole structure */
+ if (priv_xform) {
+ memset(priv_xform, 0, sizeof(struct isal_priv_xform));
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ }
+ return 0;
+}
+
+struct rte_compressdev_ops isal_pmd_ops = {
+ .dev_configure = isal_comp_pmd_config,
+ .dev_start = isal_comp_pmd_start,
+ .dev_stop = isal_comp_pmd_stop,
+ .dev_close = isal_comp_pmd_close,
+
+ .stats_get = isal_comp_pmd_stats_get,
+ .stats_reset = isal_comp_pmd_stats_reset,
+
+ .dev_infos_get = isal_comp_pmd_info_get,
+
+ .queue_pair_setup = isal_comp_pmd_qp_setup,
+ .queue_pair_release = isal_comp_pmd_qp_release,
+
+ .private_xform_create = isal_comp_pmd_priv_xform_create,
+ .private_xform_free = isal_comp_pmd_priv_xform_free,
+};
+
+struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
diff --git a/drivers/compress/isal/isal_compress_pmd_private.h b/drivers/compress/isal/isal_compress_pmd_private.h
new file mode 100644
index 00000000..46e9fcfa
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_private.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ISAL_COMP_PMD_PRIVATE_H_
+#define _ISAL_COMP_PMD_PRIVATE_H_
+
+#define COMPDEV_NAME_ISAL_PMD compress_isal
+/**< ISA-L comp PMD device name */
+
+extern int isal_logtype_driver;
+#define ISAL_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/* private data structure for each ISA-L compression device */
+struct isal_comp_private {
+ struct rte_mempool *priv_xform_mp;
+};
+
+/** ISA-L queue pair */
+struct isal_comp_qp {
+ /* Queue Pair Identifier */
+ uint16_t id;
+ /* Unique Queue Pair Name */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /* Ring for placing process packets */
+ struct rte_ring *processed_pkts;
+ /* Queue pair statistics */
+ struct rte_compressdev_stats qp_stats;
+ /* Compression stream information*/
+ struct isal_zstream *stream;
+ /* Decompression state information*/
+ struct inflate_state *state;
+ /* Number of free elements on ring */
+ uint16_t num_free_elements;
+} __rte_cache_aligned;
+
+/** ISA-L private xform structure */
+struct isal_priv_xform {
+ enum rte_comp_xform_type type;
+ union {
+ struct rte_comp_compress_xform compress;
+ struct rte_comp_decompress_xform decompress;
+ };
+ uint32_t level_buffer_size;
+} __rte_cache_aligned;
+
+/** Set and validate NULL comp private xform parameters */
+extern int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_compressdev_ops *isal_compress_pmd_ops;
+
+#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */
diff --git a/drivers/compress/isal/meson.build b/drivers/compress/isal/meson.build
new file mode 100644
index 00000000..94c10fd6
--- /dev/null
+++ b/drivers/compress/isal/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Intel Corporation
+
+dep = dependency('libisal', required: false)
+if not dep.found()
+ build =false
+endif
+
+deps += 'bus_vdev'
+sources = files('isal_compress_pmd.c', 'isal_compress_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lisal'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/isal/rte_pmd_isal_version.map b/drivers/compress/isal/rte_pmd_isal_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/compress/isal/rte_pmd_isal_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/compress/meson.build b/drivers/compress/meson.build
new file mode 100644
index 00000000..817ef3be
--- /dev/null
+++ b/drivers/compress/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+drivers = ['isal', 'octeontx', 'qat', 'zlib']
+
+std_deps = ['compressdev'] # compressdev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/compress/octeontx/Makefile b/drivers/compress/octeontx/Makefile
new file mode 100644
index 00000000..f34424c8
--- /dev/null
+++ b/drivers/compress/octeontx/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_zip.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(RTE_SDK)/drivers/compress/octeontx/include
+
+# external library include paths
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip.c
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_compress_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h
new file mode 100644
index 00000000..1e74db43
--- /dev/null
+++ b/drivers/compress/octeontx/include/zip_regs.h
@@ -0,0 +1,711 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_REGS_H_
+#define _RTE_OCTEONTX_ZIP_REGS_H_
+
+
+/**
+ * Enumeration zip_cc
+ *
+ * ZIP compression coding Enumeration
+ * Enumerates ZIP_INST_S[CC].
+ */
+enum {
+ ZIP_CC_DEFAULT = 0,
+ ZIP_CC_DYN_HUFF,
+ ZIP_CC_FIXED_HUFF,
+ ZIP_CC_LZS
+} zip_cc;
+
+/**
+ * Register (NCB) zip_vq#_ena
+ *
+ * ZIP VF Queue Enable Register
+ * If a queue is disabled, ZIP CTL stops fetching instructions from the queue.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_ena_s cn; */
+} zip_vqx_ena_t;
+
+/**
+ * Register (NCB) zip_vq#_sbuf_addr
+ *
+ * ZIP VF Queue Starting Buffer Address Registers
+ * These registers set the buffer parameters for the instruction queues.
+ * When quiescent (i.e.
+ * outstanding doorbell count is 0), it is safe to rewrite this register
+ * to effectively reset the
+ * command buffer state machine.
+ * These registers must be programmed after software programs the
+ * corresponding ZIP_QUE()_SBUF_CTL.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42;
+ uint64_t off : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t off : 7;
+ uint64_t ptr : 42;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_sbuf_addr_s cn; */
+} zip_vqx_sbuf_addr_t;
+
+/**
+ * Register (NCB) zip_que#_doorbell
+ *
+ * ZIP Queue Doorbell Registers
+ * Doorbells for the ZIP instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_quex_doorbell_s cn; */
+} zip_quex_doorbell_t;
+
+/**
+ * Structure zip_nptr_s
+ *
+ * ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) Structure
+ * This structure is used to chain all the ZIP instruction buffers
+ * together. ZIP instruction buffers are managed
+ * (allocated and released) by software.
+ */
+union zip_nptr_s {
+ uint64_t u;
+ struct zip_nptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_nptr_s_s cn83xx; */
+};
+
+/**
+ * generic ptr address
+ */
+union zip_zptr_addr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr address */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/**
+ * generic ptr ctl
+ */
+union zip_zptr_ctl_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr ctl */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ uint64_t reserved_112_127 : 16;
+ uint64_t length : 16;
+ uint64_t reserved_67_95 : 29;
+ uint64_t fw : 1;
+ uint64_t nc : 1;
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } s;
+
+};
+
+/**
+ * Structure zip_inst_s
+ *
+ * ZIP Instruction Structure
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15
+ * within the structure).
+ */
+union zip_inst_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[16];
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ // uint64_t reserved_3_4 : 2;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ //uint64_t reserved_3_4 : 2;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+
+#endif /* Word 0 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 88xx Instruction Structure */zip88xx;
+
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_cn83xx {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 8 - Big Endian */
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 9 - Big Endian */
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 10 - Big Endian */
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 11 - Big Endian */
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 14 - Big Endian */
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 15 - Big Endian */
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 83xx Instruction Structure */s;
+};
+
+/**
+ * Structure zip_zres_s
+ *
+ * ZIP Result Structure
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation
+ * of the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[8];
+ /** ZIP Result Structure */
+ struct zip_zres_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** crc32 checksum of uncompressed stream */
+ uint64_t crc32 : 32;
+ /** adler32 checksum of uncompressed stream*/
+ uint64_t adler32 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t adler32 : 32;
+ uint64_t crc32 : 32;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** Total numer of Bytes produced in output stream */
+ uint64_t totalbyteswritten : 32;
+ /** Total number of bytes processed from the input stream */
+ uint64_t totalbytesread : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t totalbytesread : 32;
+ uint64_t totalbyteswritten : 32;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Total number of compressed input bits
+ * consumed to decompress all blocks in the file
+ */
+ uint64_t totalbitsprocessed : 32;
+ /** Done interrupt*/
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_155_158 : 4;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** reserved */
+ uint64_t reserved_151 : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** reserved */
+ uint64_t reserved_137_143 : 7;
+ /** End of file */
+ uint64_t ef : 1;
+ /** Completion/error code */
+ uint64_t compcode : 8;
+#else /* Word 2 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t ef : 1;
+ uint64_t reserved_137_143 : 7;
+ uint64_t exbits : 7;
+ uint64_t reserved_151 : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_155_158 : 4;
+ uint64_t doneint : 1;
+ uint64_t totalbitsprocessed : 32;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** reserved */
+ uint64_t reserved_253_255 : 3;
+ /** Hash length in bytes */
+ uint64_t hshlen : 61;
+#else /* Word 3 - Little Endian */
+ uint64_t hshlen : 61;
+ uint64_t reserved_253_255 : 3;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Double-word 0 of computed hash */
+ uint64_t hash0 : 64;
+#else /* Word 4 - Little Endian */
+ uint64_t hash0 : 64;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Double-word 1 of computed hash */
+ uint64_t hash1 : 64;
+#else /* Word 5 - Little Endian */
+ uint64_t hash1 : 64;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Double-word 2 of computed hash */
+ uint64_t hash2 : 64;
+#else /* Word 6 - Little Endian */
+ uint64_t hash2 : 64;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Double-word 3 of computed hash */
+ uint64_t hash3 : 64;
+#else /* Word 7 - Little Endian */
+ uint64_t hash3 : 64;
+#endif /* Word 7 - End */
+ } /** ZIP Result Structure */s;
+
+ /* struct zip_zres_s_s cn83xx; */
+};
+
+/**
+ * Structure zip_zptr_s
+ *
+ * ZIP Generic Pointer Structure
+ * This structure is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[2];
+ /** ZIP Generic Pointer Structure */
+ struct zip_zptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Pointer to Data or scatter-gather list */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** reserved */
+ uint64_t reserved_112_127 : 16;
+ /** Length of Data or scatter-gather list*/
+ uint64_t length : 16;
+ /** reserved */
+ uint64_t reserved_67_95 : 29;
+ /** Full-block write */
+ uint64_t fw : 1;
+ /** No cache allocation */
+ uint64_t nc : 1;
+ /** reserved */
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } /** ZIP Generic Pointer Structure */s;
+};
+
+/**
+ * Enumeration zip_comp_e
+ *
+ * ZIP Completion Enumeration
+ * Enumerates the values of ZIP_ZRES_S[COMPCODE].
+ */
+#define ZIP_COMP_E_NOTDONE (0)
+#define ZIP_COMP_E_SUCCESS (1)
+#define ZIP_COMP_E_DTRUNC (2)
+#define ZIP_COMP_E_DSTOP (3)
+#define ZIP_COMP_E_ITRUNC (4)
+#define ZIP_COMP_E_RBLOCK (5)
+#define ZIP_COMP_E_NLEN (6)
+#define ZIP_COMP_E_BADCODE (7)
+#define ZIP_COMP_E_BADCODE2 (8)
+#define ZIP_COMP_E_ZERO_LEN (9)
+#define ZIP_COMP_E_PARITY (0xa)
+#define ZIP_COMP_E_FATAL (0xb)
+#define ZIP_COMP_E_TIMEOUT (0xc)
+#define ZIP_COMP_E_INSTR_ERR (0xd)
+#define ZIP_COMP_E_HCTX_ERR (0xe)
+#define ZIP_COMP_E_STOP (3)
+
+/**
+ * Enumeration zip_op_e
+ *
+ * ZIP Operation Enumeration
+ * Enumerates ZIP_INST_S[OP].
+ * Internal:
+ */
+#define ZIP_OP_E_DECOMP (0)
+#define ZIP_OP_E_NOCOMP (1)
+#define ZIP_OP_E_COMP (2)
+
+/**
+ * Enumeration zip compression levels
+ *
+ * ZIP Compression Level Enumeration
+ * Enumerates ZIP_INST_S[SS].
+ * Internal:
+ */
+#define ZIP_COMP_E_LEVEL_MAX (0)
+#define ZIP_COMP_E_LEVEL_MED (1)
+#define ZIP_COMP_E_LEVEL_LOW (2)
+#define ZIP_COMP_E_LEVEL_MIN (3)
+
+#endif /* _RTE_ZIP_REGS_H_ */
diff --git a/drivers/compress/octeontx/meson.build b/drivers/compress/octeontx/meson.build
new file mode 100644
index 00000000..7cd202d0
--- /dev/null
+++ b/drivers/compress/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+name = 'octeontx_compress'
+sources = files('otx_zip.c', 'otx_zip_pmd.c')
+allow_experimental_apis = true
+includes += include_directories('include')
+deps += ['mempool_octeontx', 'bus_pci']
+ext_deps += dep
diff --git a/drivers/compress/octeontx/otx_zip.c b/drivers/compress/octeontx/otx_zip.c
new file mode 100644
index 00000000..a9046ff3
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include "otx_zip.h"
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
+{
+ uint8_t *base = hw_addr;
+ return *(volatile uint64_t *)(base + offset);
+}
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
+{
+ uint8_t *base = hw_addr;
+ *(uint64_t *)(base + offset) = val;
+}
+
+static void
+zip_q_enable(struct zipvf_qp *qp)
+{
+ zip_vqx_ena_t que_ena;
+
+ /*ZIP VFx command queue init*/
+ que_ena.u = 0ull;
+ que_ena.s.ena = 1;
+
+ zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+ rte_wmb();
+}
+
+/* initialize given qp on zip device */
+int
+zipvf_q_init(struct zipvf_qp *qp)
+{
+ zip_vqx_sbuf_addr_t que_sbuf_addr;
+
+ uint64_t size;
+ void *cmdq_addr;
+ uint64_t iova;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ struct zip_vf *vf = qp->vf;
+
+ /* allocate and setup instruction queue */
+ size = ZIP_MAX_CMDQ_SIZE;
+ size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
+
+ cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
+ if (cmdq_addr == NULL)
+ return -1;
+
+ cmdq->sw_head = (uint64_t *)cmdq_addr;
+ cmdq->va = (uint8_t *)cmdq_addr;
+ iova = rte_mem_virt2iova(cmdq_addr);
+
+ cmdq->iova = iova;
+
+ que_sbuf_addr.u = 0ull;
+ que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
+ zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
+
+ zip_q_enable(qp);
+
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_spinlock_init(&cmdq->qlock);
+
+ return 0;
+}
+
+int
+zipvf_q_term(struct zipvf_qp *qp)
+{
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ zip_vqx_ena_t que_ena;
+ struct zip_vf *vf = qp->vf;
+
+ if (cmdq->va != NULL) {
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_free(cmdq->va);
+ }
+
+ /*Disabling the ZIP queue*/
+ que_ena.u = 0ull;
+ zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+
+ return 0;
+}
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+ zip_quex_doorbell_t dbell;
+ union zip_nptr_s ncp;
+ uint64_t *ncb_ptr;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ void *reg_base = qp->vf->vbar0;
+
+ /*Held queue lock*/
+ rte_spinlock_lock(&(cmdq->qlock));
+
+ /* Check space availability in zip cmd queue */
+ if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+ ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
+ /*Last buffer of the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ /* move pointer to next loc in unit of 64-bit word */
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+ /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = cmdq->sw_head;
+ /* Pointing head again to cmdqueue base*/
+ cmdq->sw_head = (uint64_t *)cmdq->va;
+
+ ncp.u = 0ull;
+ ncp.s.addr = cmdq->iova;
+ *ncb_ptr = ncp.u;
+ } else {
+ /*Enough buffers available in the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+ }
+
+ rte_wmb();
+
+ /* Ringing ZIP VF doorbell */
+ dbell.u = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+ rte_spinlock_unlock(&(cmdq->qlock));
+}
+
+int
+zipvf_create(struct rte_compressdev *compressdev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
+ struct zip_vf *zipvf = NULL;
+ char *dev_name = compressdev->data->name;
+ void *vbar0;
+ uint64_t reg;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ vbar0 = pdev->mem_resource[0].addr;
+ if (!vbar0) {
+ ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
+ return -ENODEV;
+ }
+
+ zipvf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ if (!zipvf)
+ return -ENOMEM;
+
+ zipvf->vbar0 = vbar0;
+ reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
+ /* Storing domain in local to ZIP VF */
+ zipvf->dom_sdom = reg;
+ zipvf->pdev = pdev;
+ zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
+ return 0;
+}
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev)
+{
+ struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
+ zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
+
+ return 0;
+}
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
new file mode 100644
index 00000000..99a38d00
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_VF_H_
+#define _RTE_OCTEONTX_ZIP_VF_H_
+
+#include <unistd.h>
+
+#include <rte_bus_pci.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include <zip_regs.h>
+
+int octtx_zip_logtype_driver;
+
+/* ZIP VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define ZIP_VQ_ENA (0x10)
+#define ZIP_VQ_SBUF_ADDR (0x20)
+#define ZIP_VF_PF_MBOXX(x) (0x400 | (x)<<3)
+#define ZIP_VQ_DOORBELL (0x1000)
+
+/**< Vendor ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+/**< PCI device id of ZIP VF */
+#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
+
+/* maxmum number of zip vf devices */
+#define ZIP_MAX_VFS 8
+
+/* max size of one chunk */
+#define ZIP_MAX_CHUNK_SIZE 8192
+
+/* each instruction is fixed 128 bytes */
+#define ZIP_CMD_SIZE 128
+
+#define ZIP_CMD_SIZE_WORDS (ZIP_CMD_SIZE >> 3) /* 16 64_bit words */
+
+/* size of next chunk buffer pointer */
+#define ZIP_MAX_NCBP_SIZE 8
+
+/* size of instruction queue in units of instruction size */
+#define ZIP_MAX_NUM_CMDS ((ZIP_MAX_CHUNK_SIZE - ZIP_MAX_NCBP_SIZE) / \
+ ZIP_CMD_SIZE) /* 63 */
+
+/* size of instruct queue in bytes */
+#define ZIP_MAX_CMDQ_SIZE ((ZIP_MAX_NUM_CMDS * ZIP_CMD_SIZE) + \
+ ZIP_MAX_NCBP_SIZE)/* ~8072ull */
+
+#define ZIP_BUF_SIZE 256
+
+#define ZIP_SGPTR_ALIGN 16
+#define ZIP_CMDQ_ALIGN 128
+#define MAX_SG_LEN ((ZIP_BUF_SIZE - ZIP_SGPTR_ALIGN) / sizeof(void *))
+
+/**< ZIP PMD specified queue pairs */
+#define ZIP_MAX_VF_QUEUE 1
+
+#define ZIP_ALIGN_ROUNDUP(x, _align) \
+ ((_align) * (((x) + (_align) - 1) / (_align)))
+
+/**< ZIP PMD device name */
+#define COMPRESSDEV_NAME_ZIP_PMD compress_octeonx
+
+#define ZIP_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, \
+ octtx_zip_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZIP_PMD_INFO(fmt, args...) \
+ ZIP_PMD_LOG(INFO, fmt, ## args)
+#define ZIP_PMD_ERR(fmt, args...) \
+ ZIP_PMD_LOG(ERR, fmt, ## args)
+
+/* resources required to process stream */
+enum {
+ RES_BUF = 0,
+ CMD_BUF,
+ HASH_CTX_BUF,
+ DECOMP_CTX_BUF,
+ IN_DATA_BUF,
+ OUT_DATA_BUF,
+ HISTORY_DATA_BUF,
+ MAX_BUFS_PER_STREAM
+} NUM_BUFS_PER_STREAM;
+
+struct zip_stream;
+struct zipvf_qp;
+
+/* Algorithm handler function prototype */
+typedef int (*comp_func_t)(struct rte_comp_op *op,
+ struct zipvf_qp *qp, struct zip_stream *zstrm);
+
+/**
+ * ZIP private stream structure
+ */
+struct zip_stream {
+ union zip_inst_s *inst;
+ /* zip instruction pointer */
+ comp_func_t func;
+ /* function to process comp operation */
+ void *bufs[MAX_BUFS_PER_STREAM];
+} _rte_cache_aligned;
+
+
+/**
+ * ZIP instruction Queue
+ */
+struct zipvf_cmdq {
+ rte_spinlock_t qlock;
+ /* queue lock */
+ uint64_t *sw_head;
+ /* pointer to start of 8-byte word length queue-head */
+ uint8_t *va;
+ /* pointer to instruction queue virtual address */
+ rte_iova_t iova;
+ /* iova addr of cmdq head*/
+};
+
+/**
+ * ZIP device queue structure
+ */
+struct zipvf_qp {
+ struct zipvf_cmdq cmdq;
+ /* Hardware instruction queue structure */
+ struct rte_ring *processed_pkts;
+ /* Ring for placing processed packets */
+ struct rte_compressdev_stats qp_stats;
+ /* Queue pair statistics */
+ uint16_t id;
+ /* Queue Pair Identifier */
+ const char *name;
+ /* Unique Queue Pair Name */
+ struct zip_vf *vf;
+ /* pointer to device, queue belongs to */
+} __rte_cache_aligned;
+
+/**
+ * ZIP VF device structure.
+ */
+struct zip_vf {
+ int vfid;
+ /* vf index */
+ struct rte_pci_device *pdev;
+ /* pci device */
+ void *vbar0;
+ /* CSR base address for underlying BAR0 VF.*/
+ uint64_t dom_sdom;
+ /* Storing mbox domain and subdomain id for app rerun*/
+ uint32_t max_nb_queue_pairs;
+ /* pointer to device qps */
+ struct rte_mempool *zip_mp;
+ /* pointer to pools */
+} __rte_cache_aligned;
+
+
+static inline void
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset, inlen;
+ struct rte_mbuf *m_src;
+ union zip_inst_s *inst = zstrm->inst;
+
+ inlen = op->src.length;
+ offset = op->src.offset;
+ m_src = op->m_src;
+
+ /* Prepare direct input data pointer */
+ inst->s.dg = 0;
+ inst->s.inp_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_src, offset);
+ inst->s.inp_ptr_ctl.s.length = inlen;
+}
+
+static inline void
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset;
+ struct rte_mbuf *m_dst;
+ union zip_inst_s *inst = zstrm->inst;
+
+ offset = op->dst.offset;
+ m_dst = op->m_dst;
+
+ /* Prepare direct input data pointer */
+ inst->s.ds = 0;
+ inst->s.out_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_dst, offset);
+ inst->s.totaloutputlength = rte_pktmbuf_pkt_len(m_dst) -
+ op->dst.offset;
+ inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+}
+
+static inline void
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+
+ /* set flush flag to always 1*/
+ inst->s.ef = 1;
+
+ if (inst->s.op == ZIP_OP_E_DECOMP)
+ inst->s.sf = 1;
+ else
+ inst->s.sf = 0;
+
+ /* Set input checksum */
+ inst->s.adlercrc32 = op->input_chksum;
+
+ /* Prepare gather buffers */
+ zipvf_prepare_in_buf(zstrm, op);
+ zipvf_prepare_out_buf(zstrm, op);
+}
+
+#ifdef ZIP_DBG
+static inline void
+zip_dump_instruction(void *inst)
+{
+ union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+ printf("####### START ########\n");
+ printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+ cmd83->s.totaloutputlength);
+ printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+ cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+ printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+ cmd83->s.ss, cmd83->s.cc);
+ printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+ cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+ cmd83->s.dg, cmd83->s.hg);
+ printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+ cmd83->s.adlercrc32);
+ printf("ctx_ptr.addr:0x%"PRIx64"\n", cmd83->s.ctx_ptr_addr.s.addr);
+ printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+ printf("history_ptr.addr:0x%"PRIx64"\n", cmd83->s.his_ptr_addr.s.addr);
+ printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+ printf("inp_ptr.addr:0x%"PRIx64"\n", cmd83->s.inp_ptr_addr.s.addr);
+ printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+ printf("out_ptr.addr:0x%"PRIx64"\n", cmd83->s.out_ptr_addr.s.addr);
+ printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+ printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+ printf("####### END ########\n");
+}
+#endif
+
+int
+zipvf_create(struct rte_compressdev *compressdev);
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev);
+
+int
+zipvf_q_init(struct zipvf_qp *qp);
+
+int
+zipvf_q_term(struct zipvf_qp *qp);
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm);
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val);
+
+#endif /* _RTE_ZIP_VF_H_ */
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
new file mode 100644
index 00000000..9d13f933
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -0,0 +1,658 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_malloc.h>
+
+#include "otx_zip.h"
+
+static const struct rte_compressdev_capabilities
+ octtx_zip_pmd_capabilities[] = {
+ { .algo = RTE_COMP_ALGO_DEFLATE,
+ /* Deflate */
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ /* Non sharable Priv XFORM and Stateless */
+ .window_size = {
+ .min = 1,
+ .max = 14,
+ .increment = 1
+ /* size supported 2^1 to 2^14 */
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+ union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+ inst->s.bf = 1;
+ inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+ volatile union zip_zres_s *zresult = NULL;
+
+
+ if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
+ (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZIP_PMD_ERR("Segmented packet is not supported\n");
+ return 0;
+ }
+
+ zipvf_prepare_cmd_stateless(op, zstrm);
+
+ zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+ zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+ zip_dump_instruction(inst);
+#endif
+
+ /* Submit zip command */
+ zipvf_push_command(qp, (void *)inst);
+
+ /* Check and Process results in sync mode */
+ do {
+ } while (!zresult->s.compcode);
+
+ if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ } else {
+ /* FATAL error cannot do anything */
+ ZIP_PMD_ERR("operation failed with error code:%d\n",
+ zresult->s.compcode);
+ if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ }
+
+ ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
+
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed = zresult->s.totalbytesread;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced = zresult->s.totalbyteswritten;
+ break;
+ default:
+ ZIP_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ break;
+ }
+ /* zstream is reset irrespective of result */
+ reset_stream(zstrm);
+
+ zresult->s.compcode = ZIP_COMP_E_NOTDONE;
+ return 0;
+}
+
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ struct zip_stream *z_stream)
+{
+ int ret;
+ union zip_inst_s *inst;
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ void *res;
+
+ /* Allocate resources required by a stream */
+ ret = rte_mempool_get_bulk(vf->zip_mp,
+ z_stream->bufs, MAX_BUFS_PER_STREAM);
+ if (ret < 0)
+ return -1;
+
+ /* get one command buffer from pool and set up */
+ inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+ res = z_stream->bufs[RES_BUF];
+
+ memset(inst->u, 0, sizeof(inst->u));
+
+ /* set bf for only first ops of stream */
+ inst->s.bf = 1;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ inst->s.op = ZIP_OP_E_COMP;
+
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ inst->s.cc = ZIP_CC_DEFAULT;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ inst->s.cc = ZIP_CC_FIXED_HUFF;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ inst->s.cc = ZIP_CC_DYN_HUFF;
+ break;
+ default:
+ ret = -1;
+ goto err;
+ }
+
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_MIN:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ ZIP_PMD_ERR("Compression level not supported");
+ ret = -1;
+ goto err;
+ default:
+ /* for any value between min and max , choose
+ * PMD default.
+ */
+ inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+ break;
+ }
+ } else if (xform->type == RTE_COMP_DECOMPRESS) {
+ inst->s.op = ZIP_OP_E_DECOMP;
+ /* from HRM,
+ * For DEFLATE decompression, [CC] must be 0x0.
+ * For decompression, [SS] must be 0x0
+ */
+ inst->s.cc = 0;
+ /* Speed bit should not be set for decompression */
+ inst->s.ss = 0;
+ /* decompression context is supported only for STATEFUL
+ * operations. Currently we support STATELESS ONLY so
+ * skip setting of ctx pointer
+ */
+
+ } else {
+ ZIP_PMD_ERR("\nxform type not supported");
+ ret = -1;
+ goto err;
+ }
+
+ inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+ inst->s.res_ptr_ctl.s.length = 0;
+
+ z_stream->inst = inst;
+ z_stream->func = zip_process_op;
+
+ return 0;
+
+err:
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ return ret;
+}
+
+/** Configure device */
+static int
+zip_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int nb_streams;
+ char res_pool[RTE_MEMZONE_NAMESIZE];
+ struct zip_vf *vf;
+ struct rte_mempool *zip_buf_mp;
+
+ if (!config || !dev)
+ return -EIO;
+
+ vf = (struct zip_vf *)(dev->data->dev_private);
+
+ /* create pool with maximum numbers of resources
+ * required by streams
+ */
+
+ /* use common pool for non-shareable priv_xform and stream */
+ nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
+
+ snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
+ dev->data->dev_id);
+
+ /** TBD Should we use the per core object cache for stream resources */
+ zip_buf_mp = rte_mempool_create(
+ res_pool,
+ nb_streams * MAX_BUFS_PER_STREAM,
+ ZIP_BUF_SIZE,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ SOCKET_ID_ANY,
+ 0);
+
+ if (zip_buf_mp == NULL) {
+ ZIP_PMD_ERR(
+ "Failed to create buf mempool octtx_zip_res_pool%u",
+ dev->data->dev_id);
+ return -1;
+ }
+
+ vf->zip_mp = zip_buf_mp;
+
+ return 0;
+}
+
+/** Start device */
+static int
+zip_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+
+}
+
+/** Close device */
+static int
+zip_pmd_close(struct rte_compressdev *dev)
+{
+ if (dev == NULL)
+ return -1;
+
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ rte_mempool_free(vf->zip_mp);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zip_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zip_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zip_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = octtx_zip_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
+ }
+}
+
+/** Release queue pair */
+static int
+zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ zipvf_q_term(qp);
+
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZIP_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zipvf_qp *qp = NULL;
+ struct zip_vf *vf;
+ char *name;
+ int ret;
+
+ if (!dev)
+ return -1;
+
+ vf = (struct zip_vf *) (dev->data->dev_private);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
+ return 0;
+ }
+
+ name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "zip_pmd_%u_qp_%u",
+ dev->data->dev_id, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket(name, sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->name = name;
+
+ /* Create completion queue upto max_inflight_ops */
+ qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->id = qp_id;
+ qp->vf = vf;
+
+ ret = zipvf_q_init(qp);
+ if (ret < 0)
+ goto qp_setup_cleanup;
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream)
+{
+ int ret;
+ struct zip_stream *strm = NULL;
+
+ strm = rte_malloc(NULL,
+ sizeof(struct zip_stream), 0);
+
+ if (strm == NULL)
+ return (-ENOMEM);
+
+ ret = zip_set_stream_parameters(dev, xform, strm);
+ if (ret < 0) {
+ ZIP_PMD_ERR("failed configure xform parameters");
+ rte_free(strm);
+ return ret;
+ }
+ *stream = strm;
+ return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+ struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+ struct zip_stream *z_stream;
+
+ if (stream == NULL)
+ return 0;
+
+ z_stream = (struct zip_stream *)stream;
+
+ /* Free resources back to pool */
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zip_stream));
+ rte_free(stream);
+
+ return 0;
+}
+
+
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+ struct rte_comp_op *op;
+ struct zip_stream *zstrm;
+ int i, ret = 0;
+ uint16_t enqd = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ } else {
+ /* process stateless ops */
+ zstrm = (struct zip_stream *)op->private_xform;
+ if (unlikely(zstrm == NULL))
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ else
+ ret = zstrm->func(op, qp, zstrm);
+ }
+
+ /* Whatever is out of op, put it into completion queue with
+ * its status
+ */
+ if (!ret)
+ ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to enqueue op*/
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+struct rte_compressdev_ops octtx_zip_pmd_ops = {
+ .dev_configure = zip_pmd_config,
+ .dev_start = zip_pmd_start,
+ .dev_stop = zip_pmd_stop,
+ .dev_close = zip_pmd_close,
+
+ .stats_get = zip_pmd_stats_get,
+ .stats_reset = zip_pmd_stats_reset,
+
+ .dev_infos_get = zip_pmd_info_get,
+
+ .queue_pair_setup = zip_pmd_qp_setup,
+ .queue_pair_release = zip_pmd_qp_release,
+
+ .private_xform_create = zip_pmd_stream_create,
+ .private_xform_free = zip_pmd_stream_free,
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+static int
+zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+
+ ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
+ (unsigned int)pci_dev->id.vendor_id,
+ (unsigned int)pci_dev->id.device_id);
+
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_create(compressdev_name,
+ &pci_dev->device, sizeof(struct zip_vf), &init_params);
+ if (compressdev == NULL) {
+ ZIP_PMD_ERR("driver %s: create failed", init_params.name);
+ return -ENODEV;
+ }
+
+ /*
+ * create only if proc_type is primary.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* create vf dev with given pmd dev id */
+ ret = zipvf_create(compressdev);
+ if (ret < 0) {
+ ZIP_PMD_ERR("Device creation failed");
+ rte_compressdev_pmd_destroy(compressdev);
+ return ret;
+ }
+ }
+
+ compressdev->dev_ops = &octtx_zip_pmd_ops;
+ /* register rx/tx burst functions for data path */
+ compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+ compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ return ret;
+}
+
+static int
+zip_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_compressdev *compressdev;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL) {
+ ZIP_PMD_ERR(" Invalid PCI Device\n");
+ return -EINVAL;
+ }
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (zipvf_destroy(compressdev) < 0)
+ return -ENODEV;
+ }
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_ZIPVF),
+ },
+ {
+ .device_id = 0
+ },
+};
+
+/**
+ * Structure that represents a PCI driver
+ */
+static struct rte_pci_driver octtx_zip_pmd = {
+ .id_table = pci_id_octtx_zipvf_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zip_pci_probe,
+ .remove = zip_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
+
+RTE_INIT(octtx_zip_init_log);
+
+static void
+octtx_zip_init_log(void)
+{
+ octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
+ if (octtx_zip_logtype_driver >= 0)
+ rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/qat/meson.build b/drivers/compress/qat/meson.build
new file mode 100644
index 00000000..9d15076d
--- /dev/null
+++ b/drivers/compress/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+
+# Add our sources files to the list
+allow_experimental_apis = true
+qat_sources += files('qat_comp_pmd.c',
+ 'qat_comp.c')
+qat_includes += include_directories('.')
+qat_deps += 'compressdev'
+qat_ext_deps += dep
+
+# build the whole driver
+sources += qat_sources
+cflags += qat_cflags
+deps += qat_deps
+ext_deps += qat_ext_deps
+includes += qat_includes
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
new file mode 100644
index 00000000..38c8a5b8
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.c
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>
+#include <rte_comp.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "qat_logs.h"
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_xform *qat_xform = op->private_xform;
+ const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
+ "operation requests, op (%p) is not a "
+ "stateless operation.", op);
+ return -EINVAL;
+ }
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
+ (rx_op->private_xform);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ return 0;
+ }
+ }
+
+ if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
+ | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
+ resp_msg->comn_resp.comn_status)) !=
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct qat_comp_xform *qat_xform = rx_op->private_xform;
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+
+unsigned int
+qat_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
+}
+
+static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_comp_request_type request)
+{
+ if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ else if (request == QAT_COMP_REQUEST_DECOMPRESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
+ QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
+}
+
+static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
+ const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_comp_xform *xform)
+{
+ struct icp_qat_fw_comp_req *comp_req;
+ int comp_level, algo;
+ uint32_t req_par_flags;
+ int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
+
+ if (unlikely(qat_xform == NULL)) {
+ QAT_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+
+ } else {
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level == 1)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ else if (xform->compress.level == 2)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
+ else if (xform->compress.level == 3)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level >= 4 &&
+ xform->compress.level <= 9)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
+ else {
+ QAT_LOG(ERR, "compression level not supported");
+ return -EINVAL;
+ }
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY);
+ }
+
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
+ break;
+ case RTE_COMP_ALGO_LZS:
+ default:
+ /* RTE_COMP_NULL */
+ QAT_LOG(ERR, "compression algorithm not supported");
+ return -EINVAL;
+ }
+
+ comp_req = &qat_xform->qat_comp_req_tmpl;
+
+ /* Initialize header */
+ qat_comp_create_req_hdr(&comp_req->comn_hdr,
+ qat_xform->qat_comp_request_type);
+
+ comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
+ direction,
+ /* In CPM 1.6 only valid mode ! */
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
+ /* Translate level to depth */
+ comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ comp_req->comp_pars.initial_adler = 1;
+ comp_req->comp_pars.initial_crc32 = 0;
+ comp_req->comp_pars.req_par_flags = req_par_flags;
+
+
+ if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+ } else if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+
+ QAT_LOG(ERR, "Dynamic huffman encoding not supported");
+ return -EINVAL;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data from application
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct qat_comp_dev_private *qat = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
+ return -EINVAL;
+ }
+ if (unlikely(qat->xformpool == NULL)) {
+ QAT_LOG(ERR, "QAT device has no private_xform mempool");
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(qat->xformpool, private_xform)) {
+ QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
+ return -ENOMEM;
+ }
+
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)*private_xform;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ QAT_LOG(ERR,
+ "QAT device doesn't support dynamic compression");
+ return -ENOTSUP;
+ }
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
+ ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
+ && qat->interm_buff_mz == NULL))
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+
+
+ } else {
+ qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
+
+ if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+ QAT_LOG(ERR, "QAT: Problem with setting compression");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)private_xform;
+
+ if (qat_xform) {
+ memset(qat_xform, 0, qat_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
+
+ rte_mempool_put(mp, qat_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
new file mode 100644
index 00000000..8d315efb
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_H_
+#define _QAT_COMP_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_fw_la.h"
+
+#define ERR_CODE_QAT_COMP_WRONG_FW -99
+
+enum qat_comp_request_type {
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
+ QAT_COMP_REQUEST_DECOMPRESS,
+ REQ_COMP_END
+};
+
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+struct qat_comp_xform {
+ struct icp_qat_fw_comp_req qat_comp_req_tmpl;
+ enum qat_comp_request_type qat_comp_request_type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused);
+
+int
+qat_comp_process_response(void **op, uint8_t *resp);
+
+
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
+
+unsigned int
+qat_comp_xform_size(void);
+
+#endif
+#endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
new file mode 100644
index 00000000..b89975fc
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+
+static void
+qat_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_comp_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void
+qat_comp_stats_reset(struct rte_compressdev *dev)
+{
+ struct qat_comp_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
+
+}
+
+static int
+qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release comp qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+ const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_comp_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
+ qat_qp_conf.nb_descriptors = max_inflight_ops;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "comp";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_mempool *
+qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_xforms", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name,
+ num_elements,
+ qat_comp_xform_size(), 0, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(),
+ 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
+ xform_pool_name, num_elements, qat_comp_xform_size());
+ return NULL;
+ }
+
+ return mp;
+}
+
+static void
+_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
+{
+ /* Free private_xform pool */
+ if (comp_dev->xformpool) {
+ /* Free internal mempool for private xforms */
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+ }
+}
+
+static int
+qat_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ if (config->max_nb_streams != 0) {
+ QAT_LOG(ERR,
+ "QAT device does not support STATEFUL so max_nb_streams must be 0");
+ return -EINVAL;
+ }
+
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+error_out:
+ _qat_comp_dev_config_clear(comp_dev);
+ return ret;
+}
+
+static int
+qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
+{
+
+}
+
+static int
+qat_comp_dev_close(struct rte_compressdev *dev)
+{
+ int i;
+ int ret = 0;
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ _qat_comp_dev_config_clear(comp_dev);
+
+ return ret;
+}
+
+
+static void
+qat_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->qat_dev_capabilities;
+ }
+}
+
+static uint16_t
+qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
+ struct rte_comp_op **ops __rte_unused,
+ uint16_t nb_ops __rte_unused)
+{
+ QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
+ return 0;
+}
+
+static struct rte_compressdev_ops compress_qat_dummy_ops = {
+
+ /* Device related operations */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = NULL,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+static uint16_t
+qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (ret) {
+ if ((*ops)->debug_status ==
+ (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
+ tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+
+ tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
+ &compress_qat_dummy_ops;
+ QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
+
+ } else {
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_dequeue_op_burst;
+ }
+ }
+ return ret;
+}
+
+static struct rte_compressdev_ops compress_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
+ return 0;
+ }
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ };
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct qat_comp_dev_private *comp_dev;
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "comp");
+ QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+
+ compressdev = rte_compressdev_pmd_create(name,
+ &qat_pci_dev->pci_dev->device,
+ sizeof(struct qat_comp_dev_private),
+ &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_qat_ops;
+
+ compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->qat_dev = qat_pci_dev;
+ comp_dev->compressdev = compressdev;
+ qat_pci_dev->comp_dev = comp_dev;
+
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ case QAT_GEN2:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ break;
+ default:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN1",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG,
+ "Created QAT COMP device %s as compressdev instance %d",
+ name, compressdev->data->dev_id);
+ return 0;
+}
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct qat_comp_dev_private *comp_dev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = qat_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ /* clean up any resources used by the device */
+ qat_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ qat_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
new file mode 100644
index 00000000..9ad2a283
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_PMD_H_
+#define _QAT_COMP_PMD_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_device.h"
+
+/** private data structure for a QAT compression device.
+ * This QAT device is a device offering only a compression service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_comp_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *qat_dev_capabilities;
+ /* QAT device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for qat_comp_xforms */
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_COMP_PMD_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/Makefile b/drivers/compress/zlib/Makefile
new file mode 100644
index 00000000..5cf8de6f
--- /dev/null
+++ b/drivers/compress/zlib/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_zlib.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zlib_version.map
+
+# external library dependencies
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lz
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/zlib/meson.build b/drivers/compress/zlib/meson.build
new file mode 100644
index 00000000..7748de2d
--- /dev/null
+++ b/drivers/compress/zlib/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+dep = dependency('zlib', required: false)
+if not dep.found()
+ build = false
+endif
+
+deps += 'bus_vdev'
+sources = files('zlib_pmd.c', 'zlib_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lz'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/zlib/rte_pmd_zlib_version.map b/drivers/compress/zlib/rte_pmd_zlib_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/zlib/rte_pmd_zlib_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/zlib_pmd.c b/drivers/compress/zlib/zlib_pmd.c
new file mode 100644
index 00000000..7d6871b1
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+
+#include "zlib_pmd_private.h"
+
+/** Compute next mbuf in the list, assign data buffer and length,
+ * returns 0 if mbuf is NULL
+ */
+#define COMPUTE_BUF(mbuf, data, len) \
+ ((mbuf = mbuf->next) ? \
+ (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \
+ (len = rte_pktmbuf_data_len(mbuf)) : 0)
+
+static void
+process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush, fin_flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ switch (op->flush_flag) {
+ case RTE_COMP_FLUSH_FULL:
+ case RTE_COMP_FLUSH_FINAL:
+ fin_flush = Z_FINISH;
+ break;
+ default:
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid flush value\n");
+ }
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ /* Update z_stream with the inputs provided by application */
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /* Set flush value to NO_FLUSH unless it is last mbuf */
+ flush = Z_NO_FLUSH;
+ /* Initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ /* Set flush value to Z_FINISH for last block */
+ if ((op->src.length - strm->total_in) <= strm->avail_in) {
+ strm->avail_in = (op->src.length - strm->total_in);
+ flush = fin_flush;
+ }
+ do {
+ ret = deflate(strm, flush);
+ if (unlikely(ret == Z_STREAM_ERROR)) {
+ /* error return, do not process further */
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ goto def_end;
+ }
+ /* Break if Z_STREAM_END is encountered */
+ if (ret == Z_STREAM_END)
+ goto def_end;
+
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no space for compressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+
+ /* Update source buffer to next mbuf
+ * Exit if input buffers are fully consumed
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+def_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ }
+
+ deflateReset(strm);
+}
+
+static void
+process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /** Ignoring flush value provided from application for decompression */
+ flush = Z_NO_FLUSH;
+ /* initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ do {
+ ret = inflate(strm, flush);
+
+ switch (ret) {
+ /* Fall-through */
+ case Z_NEED_DICT:
+ ret = Z_DATA_ERROR;
+ /* Fall-through */
+ case Z_DATA_ERROR:
+ /* Fall-through */
+ case Z_MEM_ERROR:
+ /* Fall-through */
+ case Z_STREAM_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* Fall-through */
+ case Z_STREAM_END:
+ /* no further computation needed if
+ * Z_STREAM_END is encountered
+ */
+ goto inf_end;
+ default:
+ /* success */
+ break;
+
+ }
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no more space for decompressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+ /* Read next input buffer to be processed, exit if compressed
+ * blocks are fully read
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+inf_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not produced for status:%d\n",
+ op->status);
+ }
+
+ inflateReset(strm);
+}
+
+/** Process comp operation for mbuf */
+static inline int
+process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
+{
+ struct zlib_stream *stream;
+ struct zlib_priv_xform *private_xform;
+
+ if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
+ (op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid source or destination buffers or "
+ "invalid Operation requested\n");
+ } else {
+ private_xform = (struct zlib_priv_xform *)op->private_xform;
+ stream = &private_xform->stream;
+ stream->comp(op, &stream->strm);
+ }
+ /* whatever is out of op, put it into completion queue with
+ * its status
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+/** Parse comp xform and set private xform/Stream parameters */
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream)
+{
+ int strategy, level, wbits;
+ z_stream *strm = &stream->strm;
+
+ /* allocate deflate state */
+ strm->zalloc = Z_NULL;
+ strm->zfree = Z_NULL;
+ strm->opaque = Z_NULL;
+
+ switch (xform->type) {
+ case RTE_COMP_COMPRESS:
+ stream->comp = process_zlib_deflate;
+ stream->free = deflateEnd;
+ /** Compression window bits */
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->compress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+ /** Compression Level */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ level = Z_DEFAULT_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ level = Z_NO_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ level = Z_BEST_SPEED;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ level = Z_BEST_COMPRESSION;
+ break;
+ default:
+ level = xform->compress.level;
+ if (level < RTE_COMP_LEVEL_MIN ||
+ level > RTE_COMP_LEVEL_MAX) {
+ ZLIB_PMD_ERR("Compression level %d "
+ "not supported\n",
+ level);
+ return -1;
+ }
+ break;
+ }
+ /** Compression strategy */
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ strategy = Z_FIXED;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression strategy not supported\n");
+ return -1;
+ }
+ if (deflateInit2(strm, level,
+ Z_DEFLATED, wbits,
+ DEF_MEM_LEVEL, strategy) != Z_OK) {
+ ZLIB_PMD_ERR("Deflate init failed\n");
+ return -1;
+ }
+ break;
+
+ case RTE_COMP_DECOMPRESS:
+ stream->comp = process_zlib_inflate;
+ stream->free = inflateEnd;
+ /** window bits */
+ switch (xform->decompress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->decompress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+
+ if (inflateInit2(strm, wbits) != Z_OK) {
+ ZLIB_PMD_ERR("Inflate init failed\n");
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zlib_pmd_enqueue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+ int ret;
+ uint16_t i;
+ uint16_t enqd = 0;
+ for (i = 0; i < nb_ops; i++) {
+ ret = process_zlib_op(qp, ops[i]);
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to push to completion
+ * queue
+ */
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zlib_pmd_dequeue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int
+zlib_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct zlib_private), init_params);
+ if (dev == NULL) {
+ ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
+ dev->dev_ops = rte_zlib_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = zlib_pmd_dequeue_burst;
+ dev->enqueue_burst = zlib_pmd_enqueue_burst;
+
+ return 0;
+}
+
+static int
+zlib_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id()
+ };
+ const char *name;
+ const char *input_args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
+ if (retval < 0) {
+ ZLIB_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n",
+ input_args);
+ return -EINVAL;
+ }
+
+ return zlib_create(name, vdev, &init_params);
+}
+
+static int
+zlib_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compressdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compressdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_vdev_driver zlib_pmd_drv = {
+ .probe = zlib_probe,
+ .remove = zlib_remove
+};
+
+RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
+RTE_INIT(zlib_init_log);
+
+static void
+zlib_init_log(void)
+{
+ zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
+ if (zlib_logtype_driver >= 0)
+ rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 00000000..0a73aed9
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+ { /* Deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC),
+ .window_size = {
+ .min = 8,
+ .max = 15,
+ .increment = 1
+ },
+ },
+
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct rte_mempool *mp;
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct zlib_private *internals = dev->data->dev_private;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "stream_mp_%u", dev->data->dev_id);
+ mp = internals->mp;
+ if (mp == NULL) {
+ mp = rte_mempool_create(mp_name,
+ config->max_nb_priv_xforms +
+ config->max_nb_streams,
+ sizeof(struct zlib_priv_xform),
+ 0, 0, NULL, NULL, NULL,
+ NULL, config->socket_id,
+ 0);
+ if (mp == NULL) {
+ ZLIB_PMD_ERR("Cannot create private xform pool on "
+ "socket %d\n", config->socket_id);
+ return -ENOMEM;
+ }
+ internals->mp = mp;
+ }
+ return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+zlib_pmd_close(struct rte_compressdev *dev)
+{
+ struct zlib_private *internals = dev->data->dev_private;
+ rte_mempool_free(internals->mp);
+ internals->mp = NULL;
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zlib_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_pkts);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+ struct zlib_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "zlib_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r = qp->processed_pkts;
+
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZLIB_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zlib_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zlib_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zlib_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp) {
+ rte_free(qp);
+ qp = NULL;
+ }
+ return -1;
+}
+
+/** Configure stream */
+static int
+zlib_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **zstream)
+{
+ int ret = 0;
+ struct zlib_stream *stream;
+ struct zlib_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ZLIB_PMD_ERR("invalid xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->mp, zstream)) {
+ ZLIB_PMD_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ stream = *((struct zlib_stream **)zstream);
+
+ ret = zlib_set_stream_parameters(xform, stream);
+
+ if (ret < 0) {
+ ZLIB_PMD_ERR("failed configure session parameters");
+
+ memset(stream, 0, sizeof(struct zlib_stream));
+ /* Return session to mempool */
+ rte_mempool_put(internals->mp, stream);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** Configure private xform */
+static int
+zlib_pmd_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ return zlib_pmd_stream_create(dev, xform, private_xform);
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev,
+ void *zstream)
+{
+ struct zlib_stream *stream = (struct zlib_stream *)zstream;
+ if (!stream)
+ return -EINVAL;
+
+ stream->free(&stream->strm);
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zlib_stream));
+ struct rte_mempool *mp = rte_mempool_from_obj(stream);
+ rte_mempool_put(mp, stream);
+
+ return 0;
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_private_xform_free(struct rte_compressdev *dev,
+ void *private_xform)
+{
+ return zlib_pmd_stream_free(dev, private_xform);
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+ .dev_configure = zlib_pmd_config,
+ .dev_start = zlib_pmd_start,
+ .dev_stop = zlib_pmd_stop,
+ .dev_close = zlib_pmd_close,
+
+ .stats_get = zlib_pmd_stats_get,
+ .stats_reset = zlib_pmd_stats_reset,
+
+ .dev_infos_get = zlib_pmd_info_get,
+
+ .queue_pair_setup = zlib_pmd_qp_setup,
+ .queue_pair_release = zlib_pmd_qp_release,
+
+ .private_xform_create = zlib_pmd_private_xform_create,
+ .private_xform_free = zlib_pmd_private_xform_free,
+
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h
new file mode 100644
index 00000000..2c6e83d4
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_private.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
+#define _RTE_ZLIB_PMD_PRIVATE_H_
+
+#include <zlib.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#define COMPRESSDEV_NAME_ZLIB_PMD compress_zlib
+/**< ZLIB PMD device name */
+
+#define DEF_MEM_LEVEL 8
+
+int zlib_logtype_driver;
+#define ZLIB_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZLIB_PMD_INFO(fmt, args...) \
+ ZLIB_PMD_LOG(INFO, fmt, ## args)
+#define ZLIB_PMD_ERR(fmt, args...) \
+ ZLIB_PMD_LOG(ERR, fmt, ## args)
+#define ZLIB_PMD_WARN(fmt, args...) \
+ ZLIB_PMD_LOG(WARNING, fmt, ## args)
+
+struct zlib_private {
+ struct rte_mempool *mp;
+};
+
+struct zlib_qp {
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_compressdev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+ z_stream strm;
+ /**< zlib stream structure */
+ comp_func_t comp;
+ /**< Operation (compression/decompression) */
+ comp_free_t free;
+ /**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+ struct zlib_stream stream;
+} __rte_cache_aligned;
+
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream);
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
+#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd142..c480cbd3 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,15 +6,20 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index d06c5444..0a5c1a87 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_gcm.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 59e504ee..45061669 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <gcm_defines.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
/** Supported vector modes */
enum aesni_gcm_vector_mode {
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 83e54480..752e0cd6 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -31,8 +31,8 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform;
if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
- GCM_LOG_ERR("Only AES GMAC is supported as an "
- "authentication only algorithm");
+ AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an "
+ "authentication only algorithm");
return -ENOTSUP;
}
/* Set IV parameters */
@@ -54,7 +54,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
aead_xform = xform;
if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
- GCM_LOG_ERR("The only combined operation "
+ AESNI_GCM_LOG(ERR, "The only combined operation "
"supported is AES GCM");
return -ENOTSUP;
}
@@ -75,7 +75,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->aad_length = aead_xform->aead.aad_length;
digest_length = aead_xform->aead.digest_length;
} else {
- GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication");
+ AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
return -ENOTSUP;
}
@@ -83,7 +83,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
/* IV check */
if (sess->iv.length != 16 && sess->iv.length != 12 &&
sess->iv.length != 0) {
- GCM_LOG_ERR("Wrong IV length");
+ AESNI_GCM_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
@@ -99,7 +99,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
sess->key = AESNI_GCM_KEY_256;
break;
default:
- GCM_LOG_ERR("Invalid key length");
+ AESNI_GCM_LOG(ERR, "Invalid key length");
return -EINVAL;
}
@@ -109,7 +109,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
if (digest_length != 16 &&
digest_length != 12 &&
digest_length != 8) {
- GCM_LOG_ERR("digest");
+ AESNI_GCM_LOG(ERR, "Invalid digest length");
return -EINVAL;
}
sess->digest_length = digest_length;
@@ -127,7 +127,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(sym_op->session != NULL))
sess = (struct aesni_gcm_session *)
- get_session_private_data(
+ get_sym_session_private_data(
sym_op->session,
cryptodev_driver_id);
} else {
@@ -149,8 +149,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(sym_op->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(sym_op->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -352,7 +352,7 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *digest;
- uint8_t *tag = (uint8_t *)&qp->temp_digest;
+ uint8_t *tag = qp->temp_digest;
if (session->op == AESNI_GMAC_OP_VERIFY)
digest = op->sym->auth.digest.data;
@@ -392,7 +392,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_gcm_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -464,13 +464,13 @@ aesni_gcm_create(const char *name,
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- GCM_LOG_ERR("AES instructions not supported by CPU");
+ AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU");
return -EFAULT;
}
-
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- GCM_LOG_ERR("driver %s: create failed", init_params->name);
+ AESNI_GCM_LOG(ERR, "driver %s: create failed",
+ init_params->name);
return -ENODEV;
}
@@ -492,7 +492,8 @@ aesni_gcm_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
switch (vector_mode) {
case RTE_AESNI_GCM_SSE:
@@ -513,7 +514,13 @@ aesni_gcm_create(const char *name,
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
return 0;
}
@@ -525,8 +532,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct aesni_gcm_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -568,7 +574,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
+
+
+RTE_INIT(aesni_gcm_init_log)
+{
+ aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm");
+}
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 6f542137..b6b4dd02 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -143,7 +143,8 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
dev_info->capabilities = aesni_gcm_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -183,12 +184,11 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- GCM_LOG_INFO("Reusing existing ring %s for processed"
- " packets", qp->name);
+ AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed"
+ " packets", qp->name);
return r;
}
-
- GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+ AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed"
" packets", qp->name);
return NULL;
}
@@ -242,22 +242,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
@@ -267,14 +251,14 @@ aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni gcm session structure */
static unsigned
-aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_gcm_session);
}
/** Configure a aesni gcm session from a crypto xform chain */
static int
-aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -284,26 +268,26 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
struct aesni_gcm_private *internals = dev->data->dev_private;
if (unlikely(sess == NULL)) {
- GCM_LOG_ERR("invalid session struct");
+ AESNI_GCM_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ AESNI_GCM_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode],
sess_private_data, xform);
if (ret != 0) {
- GCM_LOG_ERR("failed configure session parameters");
+ AESNI_GCM_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -311,17 +295,17 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev,
+aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_gcm_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -339,13 +323,11 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
.queue_pair_setup = aesni_gcm_pmd_qp_setup,
.queue_pair_release = aesni_gcm_pmd_qp_release,
- .queue_pair_start = aesni_gcm_pmd_qp_start,
- .queue_pair_stop = aesni_gcm_pmd_qp_stop,
.queue_pair_count = aesni_gcm_pmd_qp_count,
- .session_get_size = aesni_gcm_pmd_session_get_size,
- .session_configure = aesni_gcm_pmd_session_configure,
- .session_clear = aesni_gcm_pmd_session_clear
+ .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_gcm_pmd_sym_session_configure,
+ .sym_session_clear = aesni_gcm_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 3d60583b..c13a12a5 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -7,28 +7,24 @@
#include "aesni_gcm_ops.h"
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm
/**< AES-NI GCM PMD device name */
-#define GCM_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
-#define GCM_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-
-#define GCM_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define GCM_LOG_INFO(fmt, args...)
-#define GCM_LOG_DBG(fmt, args...)
-#endif
+/** AES-NI GCM PMD LOGTYPE DRIVER */
+int aesni_gcm_logtype_driver;
+#define AESNI_GCM_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/* Maximum length for digest */
#define DIGEST_LENGTH_MAX 16
@@ -39,8 +35,6 @@ struct aesni_gcm_private {
/**< Vector mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
struct aesni_gcm_qp {
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index d9f8fb98..806a95eb 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_mb.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 4d596e85..5a1cba6c 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <mb_mgr.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
@@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_256_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-
typedef void (*aes_xcbc_expand_key_t)
(const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -78,9 +80,12 @@ struct aesni_mb_op_fns {
/**< AES192 key expansions */
aes_keyexp_256_t aes256;
/**< AES256 key expansions */
-
aes_xcbc_expand_key_t aes_xcbc;
- /**< AES XCBC key expansions */
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
} keyexp;
/**< Key expansion functions */
} aux;
@@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_sse,
aes_keyexp_192_sse,
aes_keyexp_256_sse,
- aes_xcbc_expand_key_sse
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
}
}
},
@@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx,
aes_keyexp_192_avx,
aes_keyexp_256_avx,
- aes_xcbc_expand_key_avx
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
}
}
},
@@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx2,
aes_keyexp_192_avx2,
aes_keyexp_256_avx2,
- aes_xcbc_expand_key_avx2
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
}
}
},
@@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx512,
aes_keyexp_192_avx512,
aes_keyexp_256_avx512,
- aes_xcbc_expand_key_avx512
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
}
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 636c6c37..93dc7a44 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2,7 +2,7 @@
* Copyright(c) 2015-2017 Intel Corporation
*/
-#include <des.h>
+#include <intel-ipsec-mb.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -108,7 +108,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MB_LOG_ERR("Crypto xform struct not of type auth");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
return -1;
}
@@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -150,7 +161,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
hash_oneblock_fn = mb_ops->aux.one_block.sha512;
break;
default:
- MB_LOG_ERR("Unsupported authentication algorithm selection");
+ AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
return -ENOTSUP;
}
@@ -171,6 +182,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
const struct rte_crypto_sym_xform *xform)
{
uint8_t is_aes = 0;
+ uint8_t is_3DES = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
@@ -179,7 +191,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MB_LOG_ERR("Crypto xform struct not of type cipher");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
return -EINVAL;
}
@@ -192,7 +204,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Invalid cipher operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
return -EINVAL;
}
@@ -216,8 +228,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
sess->cipher.mode = DOCSIS_DES;
break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.mode = DES3;
+ is_3DES = 1;
+ break;
default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
return -ENOTSUP;
}
@@ -241,7 +257,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -250,9 +266,52 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.expanded_aes_keys.encode,
sess->cipher.expanded_aes_keys.decode);
+ } else if (is_3DES) {
+ uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+ sess->cipher.exp_3des_keys.key[1],
+ sess->cipher.exp_3des_keys.key[2]};
+
+ switch (xform->cipher.key.length) {
+ case 24:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+ des_key_schedule(keys[2], xform->cipher.key.data+16);
+
+ /* Initialize keys - 24 bytes: [K1-K2-K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+ break;
+ case 16:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+
+ /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ case 8:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+
+ /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ sess->cipher.key_length_in_bytes = 24;
+#else
+ sess->cipher.key_length_in_bytes = 8;
+#endif
} else {
if (xform->cipher.key.length != 8) {
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
sess->cipher.key_length_in_bytes = 8;
@@ -283,7 +342,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
break;
default:
- MB_LOG_ERR("Invalid aead operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
return -EINVAL;
}
@@ -293,7 +352,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.algo = AES_CCM;
break;
default:
- MB_LOG_ERR("Unsupported aead mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
return -ENOTSUP;
}
@@ -309,7 +368,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -338,16 +397,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -366,18 +428,18 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
default:
- MB_LOG_ERR("Unsupported operation chain order parameter");
+ AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -386,14 +448,14 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported authentication parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
return ret;
}
ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
cipher_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported cipher parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
return ret;
}
@@ -401,7 +463,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
aead_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported aead parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
return ret;
}
}
@@ -444,7 +506,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct aesni_mb_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -466,8 +528,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -510,22 +572,39 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->cipher_mode = session->cipher.mode;
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
- job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
- job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+
+
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
if (job->hash_alg == AES_XCBC) {
- job->_k1_expanded = session->auth.xcbc.k1_expanded;
- job->_k2 = session->auth.xcbc.k2;
- job->_k3 = session->auth.xcbc.k3;
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
} else if (job->hash_alg == AES_CCM) {
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
} else {
- job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
- job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
}
/* Mutable crypto operation parameters */
@@ -536,7 +615,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
char *odata = rte_pktmbuf_append(m_dst,
rte_pktmbuf_data_len(op->sym->m_src));
if (odata == NULL) {
- MB_LOG_ERR("failed to allocate space in destination "
+ AESNI_MB_LOG(ERR, "failed to allocate space in destination "
"mbuf for source data");
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return -1;
@@ -568,11 +647,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM)
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
else
- job->auth_tag_output_len_in_bytes = session->aead.digest_len;
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
/* Set IV parameters */
@@ -639,7 +718,7 @@ static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
- struct aesni_mb_session *sess = get_session_private_data(
+ struct aesni_mb_session *sess = get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
@@ -663,7 +742,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_mb_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -702,7 +781,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
if (processed_jobs == nb_ops)
break;
- job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr);
}
return processed_jobs;
@@ -715,7 +794,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
int processed_ops = 0;
/* Flush the remaining jobs */
- JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr);
if (job)
processed_ops += handle_completed_jobs(qp, job,
@@ -760,14 +839,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
break;
/* Get next free mb job struct from mb manager */
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
(nb_ops - processed_jobs) - 1);
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
}
retval = set_mb_job_params(job, qp, op, &digest_idx);
@@ -777,7 +856,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
/* Submit job to multi-buffer for processing */
- job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.submit)(qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
@@ -813,13 +892,13 @@ cryptodev_aesni_mb_create(const char *name,
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
+ AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
return -EFAULT;
}
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
+ AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
return -ENODEV;
}
@@ -866,7 +945,13 @@ cryptodev_aesni_mb_create(const char *name,
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
return 0;
}
@@ -878,8 +963,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct aesni_mb_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name, *args;
int retval;
@@ -892,7 +976,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
if (retval) {
- MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
args);
return -EINVAL;
}
@@ -928,8 +1012,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
- cryptodev_aesni_mb_pmd_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+ aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 9d685a09..ab26e5ae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -239,6 +239,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* DES DOCSIS BPI */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -289,8 +309,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
-
-
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -368,7 +407,8 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_mb_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -383,6 +423,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
r = rte_ring_lookup(qp->name);
if (r)
rte_ring_free(r);
+ if (qp->mb_mgr)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
}
@@ -422,12 +464,12 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
r = rte_ring_lookup(ring_name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- MB_LOG_INFO("Reusing existing ring %s for processed ops",
+ AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
ring_name);
return r;
}
- MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
+ AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
ring_name);
return NULL;
}
@@ -444,6 +486,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
{
struct aesni_mb_qp *qp = NULL;
struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret = -1;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -462,12 +505,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto qp_setup_cleanup;
+ qp->mb_mgr = alloc_mb_mgr(0);
+ if (qp->mb_mgr == NULL) {
+ ret = -ENOMEM;
+ goto qp_setup_cleanup;
+ }
+
qp->op_fns = &job_ops[internals->vector_mode];
qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
"ingress", qp_conf->nb_descriptors, socket_id);
- if (qp->ingress_queue == NULL)
+ if (qp->ingress_queue == NULL) {
+ ret = -1;
goto qp_setup_cleanup;
+ }
qp->sess_mp = session_pool;
@@ -479,30 +530,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
"digest_mp_%u_%u", dev->data->dev_id, qp_id);
/* Initialise multi-buffer manager */
- (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ (*qp->op_fns->job.init_mgr)(qp->mb_mgr);
return 0;
qp_setup_cleanup:
- if (qp)
+ if (qp) {
+ if (qp->mb_mgr == NULL)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
+ }
- return -1;
-}
-
-/** Start queue pair */
-static int
-aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
+ return ret;
}
/** Return the number of allocated queue pairs */
@@ -514,14 +552,14 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni multi-buffer session structure */
static unsigned
-aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_mb_session);
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
static int
-aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -531,27 +569,27 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
int ret;
if (unlikely(sess == NULL)) {
- MB_LOG_ERR("invalid session struct");
+ AESNI_MB_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ AESNI_MB_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
sess_private_data, xform);
if (ret != 0) {
- MB_LOG_ERR("failed configure session parameters");
+ AESNI_MB_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -559,17 +597,17 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_mb_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -587,13 +625,11 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = {
.queue_pair_setup = aesni_mb_pmd_qp_setup,
.queue_pair_release = aesni_mb_pmd_qp_release,
- .queue_pair_start = aesni_mb_pmd_qp_start,
- .queue_pair_stop = aesni_mb_pmd_qp_stop,
.queue_pair_count = aesni_mb_pmd_qp_count,
- .session_get_size = aesni_mb_pmd_session_get_size,
- .session_configure = aesni_mb_pmd_session_configure,
- .session_clear = aesni_mb_pmd_session_clear
+ .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_mb_pmd_sym_session_configure,
+ .sym_session_clear = aesni_mb_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 948e091c..70e9d18e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -7,28 +7,26 @@
#include "aesni_mb_ops.h"
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
-#define MB_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
-#define MB_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-
-#define MB_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-#else
-#define MB_LOG_INFO(fmt, args...)
-#define MB_LOG_DBG(fmt, args...)
-#endif
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
@@ -66,8 +64,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [AES_CMAC] = 16,
[AES_CCM] = 8,
- [NULL_HASH] = 0
+ [NULL_HASH] = 0
};
/**
@@ -91,7 +90,8 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_384] = 48,
[SHA_512] = 64,
[AES_XCBC] = 16,
- [NULL_HASH] = 0
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
};
/**
@@ -122,8 +122,6 @@ struct aesni_mb_private {
/**< CPU vector instruction set mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** AESNI Multi buffer queue pair */
@@ -134,7 +132,7 @@ struct aesni_mb_qp {
/**< Unique Queue Pair Name */
const struct aesni_mb_op_fns *op_fns;
/**< Vector mode dependent pointer table of the multi-buffer APIs */
- MB_MGR mb_mgr;
+ MB_MGR *mb_mgr;
/**< Multi-buffer instance */
struct rte_ring *ingress_queue;
/**< Ring for placing operations ready for processing */
@@ -171,12 +169,18 @@ struct aesni_mb_session {
uint64_t key_length_in_bytes;
- struct {
- uint32_t encode[60] __rte_aligned(16);
- /**< encode key */
- uint32_t decode[60] __rte_aligned(16);
- /**< decode key */
- } expanded_aes_keys;
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+ };
/**< Expanded AES keys - Allocating space to
* contain the maximum expanded key size which
* is 240 bytes for 256 bit AES, calculate by:
@@ -211,14 +215,24 @@ struct aesni_mb_session {
uint8_t k3[16] __rte_aligned(16);
/**< k3. */
} xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
/**< Expanded XCBC authentication keys */
};
+ /** digest size */
+ uint16_t digest_len;
+
} auth;
struct {
/** AAD data length */
uint16_t aad_len;
- /** digest size */
- uint16_t digest_len;
} aead;
} __rte_cache_aligned;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 59fffcf1..9d15fee5 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -502,7 +502,7 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
/* get existing session */
if (likely(op->sym->session != NULL)) {
sess = (struct armv8_crypto_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
}
@@ -526,8 +526,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -654,7 +654,7 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct armv8_crypto_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -779,7 +779,6 @@ cryptodev_armv8_crypto_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
@@ -800,8 +799,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
"",
sizeof(struct armv8_crypto_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -848,7 +846,6 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 3817ad7b..ae03117e 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -27,9 +27,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -154,7 +154,8 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = armv8_crypto_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -257,22 +258,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
@@ -282,14 +267,14 @@ armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the session structure */
static unsigned
-armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct armv8_crypto_session);
}
/** Configure the session from a crypto xform chain */
static int
-armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
+armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -317,7 +302,7 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -325,17 +310,17 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct armv8_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -353,13 +338,11 @@ struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
.queue_pair_setup = armv8_crypto_pmd_qp_setup,
.queue_pair_release = armv8_crypto_pmd_qp_release,
- .queue_pair_start = armv8_crypto_pmd_qp_start,
- .queue_pair_stop = armv8_crypto_pmd_qp_stop,
.queue_pair_count = armv8_crypto_pmd_qp_count,
- .session_get_size = armv8_crypto_pmd_session_get_size,
- .session_configure = armv8_crypto_pmd_session_configure,
- .session_clear = armv8_crypto_pmd_session_clear
+ .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = armv8_crypto_pmd_sym_session_configure,
+ .sym_session_clear = armv8_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index b8966e93..7feb021d 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -106,8 +106,6 @@ typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
struct armv8_crypto_private {
unsigned int max_nb_qpairs;
/**< Max number of queue pairs */
- unsigned int max_nb_sessions;
- /**< Max number of sessions */
};
/** ARMv8 crypto queue pair */
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..19ae9153
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..6984913f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_sym_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .sym_session_get_size = ccp_pmd_sym_session_get_size,
+ .sym_session_configure = ccp_pmd_sym_session_configure,
+ .sym_session_clear = ccp_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..79752f68
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..92d8a955
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index cb6c63e6..da3d8f84 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -18,13 +18,8 @@ LIB = librte_pmd_dpaa2_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9a790ddd..2a3c61c6 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -30,6 +30,9 @@
#include "dpaa2_sec_priv.h"
#include "dpaa2_sec_logs.h"
+/* Required types */
+typedef uint64_t dma_addr_t;
+
/* RTA header files */
#include <hw/desc/ipsec.h>
#include <hw/desc/algo.h>
@@ -56,6 +59,8 @@ enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
static uint8_t cryptodev_driver_id;
+int dpaa2_logtype_sec;
+
static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -77,11 +82,11 @@ build_proto_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
- DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
/* save physical address of mbuf */
op->sym->aead.digest.phys_addr = mbuf->buf_iova;
- mbuf->buf_iova = (uint64_t)op;
+ mbuf->buf_iova = (size_t)op;
return 0;
}
@@ -113,12 +118,12 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -132,11 +137,11 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -264,12 +269,12 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -297,11 +302,11 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -409,12 +414,12 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -428,16 +433,16 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sym_op->auth.digest.length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_SG_EXT(op_fle);
@@ -558,12 +563,12 @@ build_authenc_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -591,15 +596,16 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -686,13 +692,13 @@ static inline int build_auth_sg_fd(
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
sge = fle + 3;
@@ -762,7 +768,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -772,8 +778,8 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -859,13 +865,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (!fle) {
- RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -873,12 +879,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* o/p fle */
DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -901,10 +908,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
DPAA2_SET_FLE_FIN(sge);
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
/* i/p fle */
mbuf = sym_op->m_src;
@@ -944,13 +951,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -976,7 +984,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -986,8 +994,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
@@ -1012,12 +1020,13 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
@@ -1025,10 +1034,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle->length = sym_op->cipher.data.length + sess->iv.length;
- PMD_TX_LOG(DEBUG,
- "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
fle++;
@@ -1049,13 +1058,14 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- PMD_TX_LOG(DEBUG,
- "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -1070,7 +1080,7 @@ build_sec_fd(struct rte_crypto_op *op,
PMD_INIT_FUNC_TRACE();
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
- sess = (dpaa2_sec_session *)get_session_private_data(
+ sess = (dpaa2_sec_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
sess = (dpaa2_sec_session *)get_sec_session_private_data(
@@ -1095,7 +1105,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
} else {
switch (sess->ctxt_type) {
@@ -1116,7 +1126,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
}
return ret;
@@ -1143,7 +1153,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
return 0;
if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
return 0;
}
/*Prepare enqueue descriptor*/
@@ -1152,14 +1162,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
@@ -1171,8 +1181,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
- PMD_DRV_LOG(ERR, "error: Improper packet"
- " contents for crypto operation\n");
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
goto skip_tx;
}
ops++;
@@ -1206,7 +1216,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- op = (struct rte_crypto_op *)mbuf->buf_iova;
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1236,8 +1246,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
- fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
@@ -1248,11 +1258,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
+ DPAA2_SEC_ERR("error: non inline buffer");
return NULL;
}
- op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FLE_ADDR((fle - 1)));
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* Prefeth op */
src = op->sym->m_src;
@@ -1264,19 +1273,19 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
- PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)dst, dst->buf_addr);
-
- PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
if (likely(rte_pktmbuf_is_contiguous(src))) {
- priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
rte_mempool_put(priv->fle_pool, (void *)(fle-1));
} else
rte_free((void *)(fle-1));
@@ -1300,14 +1309,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
@@ -1322,8 +1331,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD,
- "SEC VDQ command is not issued : QBMAN busy\n");
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -1355,7 +1364,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- PMD_RX_LOG(DEBUG, "No frame is delivered");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
continue;
}
}
@@ -1365,8 +1374,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
- RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
- fd->simple.frc);
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -1378,7 +1387,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_qp->rx_vq.rx_pkts += num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
}
@@ -1420,11 +1429,11 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
/* If qp is already in use free ring memory and qp metadata. */
if (dev->data->queue_pairs[qp_id] != NULL) {
- PMD_DRV_LOG(INFO, "QP already setup");
+ DPAA2_SEC_INFO("QP already setup");
return 0;
}
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
dev, qp_id, qp_conf);
memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
@@ -1432,7 +1441,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
RTE_CACHE_LINE_SIZE);
if (!qp) {
- RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
return -1;
}
@@ -1442,45 +1451,25 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!qp->rx_vq.q_storage) {
- RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ DPAA2_SEC_ERR("malloc failed for q_storage");
return -1;
}
memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
- RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
return -1;
}
dev->data->queue_pairs[qp_id] = qp;
cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
- cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
qp_id, &cfg);
return retcode;
}
-/** Start queue pair */
-static int
-dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
@@ -1492,7 +1481,7 @@ dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni gcm session structure */
static unsigned int
-dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
PMD_INIT_FUNC_TRACE();
@@ -1517,7 +1506,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1528,7 +1517,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1536,7 +1525,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
memcpy(session->cipher_key.data, xform->cipher.key.data,
xform->cipher.key.length);
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -1571,11 +1560,11 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
xform->cipher.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
xform->cipher.algo);
goto error_out;
}
@@ -1586,7 +1575,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
&cipherdata, NULL, session->iv.length,
session->dir);
if (bufsize < 0) {
- RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
goto error_out;
}
flc->dhr = 0;
@@ -1595,16 +1584,15 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[0].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1621,7 +1609,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -1633,7 +1621,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1643,7 +1631,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
rte_free(priv);
return -1;
}
@@ -1651,7 +1639,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, xform->auth.key.data,
xform->auth.key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1703,12 +1691,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
goto error_out;
}
session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
@@ -1717,18 +1705,22 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
return 0;
@@ -1747,7 +1739,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo aeaddata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
@@ -1765,7 +1757,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1775,7 +1767,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ DPAA2_SEC_ERR("No Memory for aead key");
rte_free(priv);
return -1;
}
@@ -1786,7 +1778,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.length = aead_xform->key.length;
ctxt->auth_only_len = aead_xform->aad_length;
- aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.key = (size_t)session->aead_key.data;
aeaddata.keylen = session->aead_key.length;
aeaddata.key_enc_flags = 0;
aeaddata.key_type = RTA_DATA_IMM;
@@ -1798,12 +1790,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
goto error_out;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
@@ -1816,7 +1808,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[1], 1);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[1] & 1) {
@@ -1838,16 +1830,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
priv->flc_desc[0].desc, 1, 0,
&aeaddata, session->iv.length,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1867,7 +1864,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1899,7 +1896,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1909,7 +1906,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1917,7 +1914,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1928,7 +1925,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1980,15 +1977,15 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto error_out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2014,12 +2011,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto error_out;
}
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -2033,7 +2030,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -2059,21 +2056,25 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
ctxt->auth_only_len,
session->digest_length,
session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
+ DPAA2_SEC_ERR("Hash before cipher not supported");
goto error_out;
}
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -2094,7 +2095,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA2_SEC_ERR("Invalid session struct");
return -1;
}
@@ -2130,7 +2131,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa2_sec_aead_init(dev, xform, session);
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ DPAA2_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
@@ -2150,7 +2151,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct ipsec_encap_pdb encap_pdb;
struct ipsec_decap_pdb decap_pdb;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ int bufsize;
struct sec_flow_context *flc;
PMD_INIT_FUNC_TRACE();
@@ -2168,7 +2169,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ DPAA2_SEC_ERR("No memory for priv CTXT");
return -ENOMEM;
}
@@ -2180,7 +2181,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -ENOMEM;
}
@@ -2191,7 +2192,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -ENOMEM;
@@ -2202,7 +2203,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -2253,15 +2254,15 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2289,12 +2290,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto out;
}
@@ -2340,15 +2341,21 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
1, 0, &decap_pdb, &cipherdata, &authdata);
} else
goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
/* Enable the stashing control bit */
DPAA2_SET_FLC_RSC(flc);
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq) | 0x14);
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
/* Set EWS bit i.e. enable write-safe */
@@ -2379,8 +2386,7 @@ dpaa2_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2395,9 +2401,7 @@ dpaa2_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2432,7 +2436,7 @@ dpaa2_sec_security_session_destroy(void *dev __rte_unused,
}
static int
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -2441,22 +2445,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
- "session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -2464,12 +2465,12 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
if (sess_priv) {
@@ -2478,7 +2479,7 @@ dpaa2_sec_session_clear(struct rte_cryptodev *dev,
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -2511,14 +2512,13 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
- priv->hw_id);
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
goto get_attr_failure;
}
ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
goto get_attr_failure;
}
for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
@@ -2526,14 +2526,14 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&rx_attr);
dpaa2_q->fqid = rx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
}
for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
dpaa2_q = &qp[i]->tx_vq;
dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&tx_attr);
dpaa2_q->fqid = tx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
}
return 0;
@@ -2553,15 +2553,14 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
priv->hw_id);
return;
}
ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
- ret);
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
return;
}
}
@@ -2585,8 +2584,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Close the device at underlying layer*/
ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
- " error code %d\n", ret);
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
return -1;
}
@@ -2608,7 +2606,8 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
info->driver_id = cryptodev_driver_id;
}
}
@@ -2626,12 +2625,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
return;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
@@ -2644,16 +2643,16 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
&counters);
if (ret) {
- PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ DPAA2_SEC_ERR("SEC counters failed");
} else {
- PMD_DRV_LOG(INFO, "dpseci hw stats:"
- "\n\tNumber of Requests Dequeued = %lu"
- "\n\tNumber of Outbound Encrypt Requests = %lu"
- "\n\tNumber of Inbound Decrypt Requests = %lu"
- "\n\tNumber of Outbound Bytes Encrypted = %lu"
- "\n\tNumber of Outbound Bytes Protected = %lu"
- "\n\tNumber of Inbound Bytes Decrypted = %lu"
- "\n\tNumber of Inbound Bytes Validated = %lu",
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
counters.dequeued_requests,
counters.ob_enc_requests,
counters.ib_dec_requests,
@@ -2675,7 +2674,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
qp[i]->tx_vq.rx_pkts = 0;
@@ -2697,12 +2696,10 @@ static struct rte_cryptodev_ops crypto_ops = {
.stats_reset = dpaa2_sec_stats_reset,
.queue_pair_setup = dpaa2_sec_queue_pair_setup,
.queue_pair_release = dpaa2_sec_queue_pair_release,
- .queue_pair_start = dpaa2_sec_queue_pair_start,
- .queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
- .session_get_size = dpaa2_sec_session_get_size,
- .session_configure = dpaa2_sec_session_configure,
- .session_clear = dpaa2_sec_session_clear,
+ .sym_session_get_size = dpaa2_sec_sym_session_get_size,
+ .sym_session_configure = dpaa2_sec_sym_session_configure,
+ .sym_session_clear = dpaa2_sec_sym_session_clear,
};
static const struct rte_security_capability *
@@ -2729,8 +2726,8 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
rte_mempool_free(internals->fle_pool);
- PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2751,7 +2748,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
if (dpaa2_dev == NULL) {
- PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
return -1;
}
hw_id = dpaa2_dev->object_id;
@@ -2765,10 +2762,13 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_SECURITY |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
/*
* For secondary processes, we don't initialise any further as primary
@@ -2776,7 +2776,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA2_SEC_DEBUG("Device already init by primary process");
return 0;
}
@@ -2794,21 +2794,21 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
if (!dpseci) {
- PMD_INIT_LOG(ERR,
- "Error in allocating the memory for dpsec object");
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
return -1;
}
dpseci->regs = rte_mcp_ptr_list[0];
retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
- retcode);
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
goto init_error;
}
retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
if (retcode != 0) {
- PMD_INIT_LOG(ERR,
+ DPAA2_SEC_ERR(
"Cannot get dpsec device attributed: Error = %x",
retcode);
goto init_error;
@@ -2828,15 +2828,15 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (!internals->fle_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
goto init_error;
}
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
/* dpaa2_sec_uninit(crypto_dev_name); */
return -EFAULT;
@@ -2866,7 +2866,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
if (cryptodev->data->dev_private == NULL)
rte_panic("Cannot allocate memzone for private "
- "device data");
+ "device data");
}
dpaa2_dev->cryptodev = cryptodev;
@@ -2919,5 +2919,13 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
static struct cryptodev_driver dpaa2_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 23251141..8a990442 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -8,37 +8,35 @@
#ifndef _DPAA2_SEC_LOGS_H_
#define _DPAA2_SEC_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa2_logtype_sec;
+
+#define DPAA2_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA2_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>")
+
+#define DPAA2_SEC_INFO(fmt, args...) \
+ DPAA2_SEC_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_ERR(fmt, args...) \
+ DPAA2_SEC_LOG(ERR, fmt, ## args)
+#define DPAA2_SEC_WARN(fmt, args...) \
+ DPAA2_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
+ DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_SEC_DP_INFO(fmt, args...) \
+ DPAA2_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_DP_WARN(fmt, args...) \
+ DPAA2_SEC_DP_LOG(WARNING, fmt, ## args)
+
#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index e8ac95ba..d015be1e 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -23,8 +23,6 @@ struct dpaa2_sec_dev_private {
uint16_t token; /**< Token required by DPxxx objects */
unsigned int max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned int max_nb_sessions;
- /**< Max number of sessions supported by device */
};
struct dpaa2_sec_qp {
@@ -185,9 +183,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -206,9 +204,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -227,9 +225,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -248,9 +246,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
+ .min = 1,
+ .max = 32,
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -269,9 +267,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -290,9 +288,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
new file mode 100644
index 00000000..01afc587
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['security', 'mempool_dpaa2']
+sources = files('dpaa2_sec_dpseci.c',
+ 'mc/dpseci.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'hw')
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index fe2c5932..9be44704 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -12,13 +12,8 @@ LIB = librte_pmd_dpaa_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -D _GNU_SOURCE
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 18681cf3..f571050b 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2018 NXP
*
*/
@@ -39,6 +39,8 @@
enum rta_sec_era rta_sec_era;
+int dpaa_logtype_sec;
+
static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
@@ -53,7 +55,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
if (!ctx->fd_status) {
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
@@ -69,7 +71,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
if (!ctx || retval) {
- PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
return NULL;
}
/*
@@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
ctx->ctx_pool = ses->ctx_pool;
- ctx->vtop_offset = (uint64_t) ctx
+ ctx->vtop_offset = (size_t) ctx
- rte_mempool_virt2iova(ctx);
return ctx;
@@ -93,43 +95,18 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
static inline rte_iova_t
dpaa_mem_vtop(void *vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- uint64_t vaddr_64, paddr;
- int i;
-
- vaddr_64 = (uint64_t)vaddr;
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr_64 >= memseg[i].addr_64 &&
- vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].iova +
- (vaddr_64 - memseg[i].addr_64);
-
- return (rte_iova_t)paddr;
- }
- }
- return (rte_iova_t)(NULL);
-}
+ const struct rte_memseg *ms;
-/* virtual address conversin when mempool support is available for ctx */
-static inline phys_addr_t
-dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
-{
- return (uint64_t)vaddr - ctx->vtop_offset;
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
}
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].iova));
- }
- return NULL;
+ return rte_mem_iova2virt(paddr);
}
static void
@@ -137,8 +114,8 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
- fq->fqid, msg->ern.rc, msg->ern.seqnum);
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
}
/* initialize the queue with dest chan as caam chan so that
@@ -166,11 +143,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
fq_in->cb.ern = ern_sec_fq_handler;
- PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
ret = qman_init_fq(fq_in, flags, &fq_opts);
if (unlikely(ret != 0))
- PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
return ret;
}
@@ -229,7 +206,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_create_fq(0, flags, fq);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ DPAA_SEC_ERR("qman_create_fq failed");
return ret;
}
@@ -244,7 +221,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_init_fq(fq, 0, &opts);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ DPAA_SEC_ERR("unable to init caam source fq!");
return ret;
}
@@ -336,7 +313,7 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
}
}
@@ -365,7 +342,7 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
alginfo_c->algmode = OP_ALG_AAI_CTR;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
}
}
@@ -378,7 +355,7 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
alginfo->algmode = OP_ALG_AAI_GCM;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
}
}
@@ -388,7 +365,7 @@ static int
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
{
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
- uint32_t shared_desc_len = 0;
+ int32_t shared_desc_len = 0;
struct sec_cdb *cdb = &ses->cdb;
int err;
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
@@ -402,11 +379,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
@@ -420,11 +397,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_auth_only(ses)) {
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -436,10 +413,10 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_aead(ses)) {
caam_aead_alg(ses, &alginfo);
if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported aead alg\n");
+ DPAA_SEC_ERR("not supported aead alg");
return -ENOTSUP;
}
- alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.key = (size_t)ses->aead_key.data;
alginfo.keylen = ses->aead_key.length;
alginfo.key_enc_flags = 0;
alginfo.key_type = RTA_DATA_IMM;
@@ -459,22 +436,22 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -487,21 +464,21 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
&cdb->sh_desc[2], 2);
if (err < 0) {
- PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
return err;
}
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_c.key);
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_a.key);
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
cdb->sh_desc[0] = 0;
@@ -530,6 +507,12 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
ses->digest_length, ses->dir);
}
}
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
@@ -543,12 +526,25 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
{
struct qman_fq *fq;
unsigned int pkts = 0;
- int ret;
+ int num_rx_bufs, ret;
struct qm_dqrr_entry *dq;
+ uint32_t vdqcr_flags = 0;
fq = &qp->outq;
- ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
- DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+ /*
+ * Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_ops < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_ops;
+ } else {
+ num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
if (ret)
return 0;
@@ -585,7 +581,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
if (!ctx->fd_status) {
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- printf("\nSEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
ops[pkts++] = op;
@@ -616,8 +612,8 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
extra_segs = 2;
if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
ctx = dpaa_sec_alloc_ctx(ses);
@@ -640,7 +636,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = sym->auth.data.length;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
@@ -664,7 +660,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
@@ -718,7 +714,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
if (is_decode(ses)) {
/* need to extend the input to a compound frame */
sg->extension = 1;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
sg->length = sym->auth.data.length + ses->digest_length;
sg->final = 1;
cpu_to_hw_sg(sg);
@@ -732,7 +728,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
cpu_to_hw_sg(sg);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -769,8 +765,8 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
@@ -785,7 +781,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = sym->cipher.data.length;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -814,7 +810,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->length = sym->cipher.data.length + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
@@ -881,7 +877,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = sym->cipher.data.length + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
@@ -922,7 +918,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
req_segs++;
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -947,7 +943,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -991,7 +987,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1028,7 +1024,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1066,7 +1062,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1111,7 +1107,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1125,7 +1121,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -1170,7 +1166,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -1194,7 +1190,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1236,7 +1232,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1266,7 +1262,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1303,7 +1299,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1333,7 +1329,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1347,7 +1343,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -1422,7 +1418,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct rte_crypto_op *op;
struct dpaa_sec_job *cf;
dpaa_sec_session *ses;
- struct dpaa_sec_op_ctx *ctx;
uint32_t auth_only_len;
struct qman_fq *inq[DPAA_SEC_BURST];
@@ -1434,7 +1429,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
switch (op->sess_type) {
case RTE_CRYPTO_OP_WITH_SESSION:
ses = (dpaa_sec_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
break;
@@ -1444,15 +1439,15 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
op->sym->sec_session);
break;
default:
- PMD_TX_LOG(ERR,
+ DPAA_SEC_DP_ERR(
"sessionless crypto op not supported");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
if (unlikely(!ses->qp || ses->qp != qp)) {
- PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
- ses->qp, qp);
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
@@ -1475,7 +1470,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_proto_ipsec(ses)) {
cf = build_proto(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1491,7 +1486,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth_sg(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1507,8 +1502,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
inq[loop] = ses->inq;
fd->opaque_addr = 0;
fd->cmd = 0;
- ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
- qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
/* Auth_only_len is set as 0 in descriptor and it is
@@ -1547,7 +1541,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa_qp->rx_pkts += num_rx;
dpaa_qp->rx_errs += nb_ops - num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
return num_rx;
}
@@ -1562,11 +1556,11 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1588,12 +1582,11 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct dpaa_sec_dev_private *internals;
struct dpaa_sec_qp *qp = NULL;
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
- dev, qp_id, qp_conf);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1605,26 +1598,6 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
-/** Start queue pair */
-static int
-dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
-/** Stop queue pair */
-static int
-dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- PMD_INIT_FUNC_TRACE();
-
- return 0;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
@@ -1636,7 +1609,7 @@ dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
/** Returns the size of session structure */
static unsigned int
-dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
PMD_INIT_FUNC_TRACE();
@@ -1654,7 +1627,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
session->cipher_key.length = xform->cipher.key.length;
@@ -1676,7 +1649,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
return -ENOMEM;
}
session->auth_key.length = xform->auth.key.length;
@@ -1702,7 +1675,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ DPAA_SEC_ERR("No Memory for aead key\n");
return -ENOMEM;
}
session->aead_key.length = xform->aead.key.length;
@@ -1727,7 +1700,7 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
return &qi->inq[i];
}
}
- PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
return NULL;
}
@@ -1756,47 +1729,25 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
sess->qp = qp;
ret = dpaa_sec_prep_cdb(sess);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
return -1;
}
-
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
- PMD_DRV_LOG(ERR, "Unable to init sec queue");
+ DPAA_SEC_ERR("Unable to init sec queue");
return ret;
}
static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
- uint16_t qp_id __rte_unused,
- void *ses __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
- uint16_t qp_id __rte_unused,
- void *ses)
-{
- dpaa_sec_session *sess = ses;
- struct dpaa_sec_dev_private *qi = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
-
- if (sess->inq)
- dpaa_sec_detach_rxq(qi, sess->inq);
- sess->inq = NULL;
-
- sess->qp = NULL;
-
- return 0;
-}
-
-static int
dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
@@ -1806,7 +1757,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
@@ -1831,7 +1782,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_cipher_init(dev, xform, session);
dpaa_sec_auth_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1842,7 +1793,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_auth_init(dev, xform, session);
dpaa_sec_cipher_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1852,13 +1803,13 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_aead_init(dev, xform, session);
} else {
- PMD_DRV_LOG(ERR, "Invalid crypto type");
+ DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
}
@@ -1873,7 +1824,7 @@ err1:
}
static int
-dpaa_sec_session_configure(struct rte_cryptodev *dev,
+dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -1884,22 +1835,20 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
- "session parameters");
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
@@ -1908,12 +1857,12 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa_sec_session_clear(struct rte_cryptodev *dev,
+dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
struct dpaa_sec_dev_private *qi = dev->data->dev_private;
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
PMD_INIT_FUNC_TRACE();
@@ -1927,7 +1876,7 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev,
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(s, 0, sizeof(dpaa_sec_session));
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -1958,7 +1907,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
@@ -1968,7 +1917,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
return -ENOMEM;
}
@@ -2013,11 +1962,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
auth_xform->algo);
goto out;
}
@@ -2037,11 +1986,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
cipher_xform->algo);
goto out;
}
@@ -2086,7 +2035,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto out;
}
@@ -2110,8 +2059,7 @@ dpaa_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2126,9 +2074,7 @@ dpaa_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2163,11 +2109,32 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused,
static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config __rte_unused)
{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
return 0;
}
@@ -2185,9 +2152,19 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
}
static int
-dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
{
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
return 0;
}
@@ -2203,9 +2180,6 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->sym.max_nb_sessions_per_qp =
- RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
- RTE_DPAA_MAX_NB_SEC_QPS;
info->driver_id = cryptodev_driver_id;
}
}
@@ -2218,14 +2192,10 @@ static struct rte_cryptodev_ops crypto_ops = {
.dev_infos_get = dpaa_sec_dev_infos_get,
.queue_pair_setup = dpaa_sec_queue_pair_setup,
.queue_pair_release = dpaa_sec_queue_pair_release,
- .queue_pair_start = dpaa_sec_queue_pair_start,
- .queue_pair_stop = dpaa_sec_queue_pair_stop,
.queue_pair_count = dpaa_sec_queue_pair_count,
- .session_get_size = dpaa_sec_session_get_size,
- .session_configure = dpaa_sec_session_configure,
- .session_clear = dpaa_sec_session_clear,
- .qp_attach_session = dpaa_sec_qp_attach_sess,
- .qp_detach_session = dpaa_sec_qp_detach_sess,
+ .sym_session_get_size = dpaa_sec_sym_session_get_size,
+ .sym_session_configure = dpaa_sec_sym_session_configure,
+ .sym_session_clear = dpaa_sec_sym_session_clear
};
static const struct rte_security_capability *
@@ -2246,18 +2216,20 @@ struct rte_security_ops dpaa_sec_security_ops = {
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
- struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct dpaa_sec_dev_private *internals;
if (dev == NULL)
return -ENODEV;
+ internals = dev->data->dev_private;
rte_free(dev->security_ctx);
+ /* In case close has been called, internals->ctx_pool would be NULL */
rte_mempool_free(internals->ctx_pool);
rte_free(internals);
- PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2270,7 +2242,6 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
struct dpaa_sec_qp *qp;
uint32_t i, flags;
int ret;
- char str[20];
PMD_INIT_FUNC_TRACE();
@@ -2283,7 +2254,11 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_HW_ACCELERATED |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_SECURITY |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
internals = cryptodev->data->dev_private;
internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
@@ -2295,7 +2270,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA_SEC_WARN("Device already init by primary process");
return 0;
}
@@ -2314,7 +2289,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
qp = &internals->qps[i];
ret = dpaa_sec_init_tx(&qp->outq);
if (ret) {
- PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
goto init_error;
}
}
@@ -2325,28 +2300,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
/* create rx qman fq for sessions*/
ret = qman_create_fq(0, flags, &internals->inq[i]);
if (unlikely(ret != 0)) {
- PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
+ DPAA_SEC_ERR("sec qman_create_fq failed");
goto init_error;
}
}
- sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
- internals->ctx_pool = rte_mempool_create((const char *)str,
- CTX_POOL_NUM_BUFS,
- CTX_POOL_BUF_SIZE,
- CTX_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->ctx_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
- goto init_error;
- }
-
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
dpaa_sec_uninit(cryptodev);
return -EFAULT;
@@ -2445,5 +2408,12 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = {
static struct cryptodev_driver dpaa_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f45b36cb..ac6c00a6 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -7,6 +7,9 @@
#ifndef _DPAA_SEC_H_
#define _DPAA_SEC_H_
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
+/**< NXP DPAA - SEC PMD device name */
+
#define NUM_POOL_CHANNELS 4
#define DPAA_SEC_BURST 7
#define DPAA_SEC_ALG_UNSUPPORT (-1)
@@ -23,6 +26,7 @@
#define CTX_POOL_NUM_BUFS 32000
#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
#define CTX_POOL_CACHE_SIZE 512
+#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048
#define DIR_ENC 1
#define DIR_DEC 0
@@ -133,7 +137,7 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_NB_SEC_QPS 8
#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
@@ -182,10 +186,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -202,10 +207,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -222,10 +228,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -242,10 +249,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -262,10 +270,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -282,10 +291,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index 992a79f5..fb895a8b 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -1,44 +1,43 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright NXP 2017.
+ * Copyright 2017-2018 NXP
*
*/
#ifndef _DPAA_SEC_LOG_H_
#define _DPAA_SEC_LOG_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa_logtype_sec;
+
+#define DPAA_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>")
+
+#define DPAA_SEC_INFO(fmt, args...) \
+ DPAA_SEC_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_ERR(fmt, args...) \
+ DPAA_SEC_LOG(ERR, fmt, ## args)
+#define DPAA_SEC_WARN(fmt, args...) \
+ DPAA_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA_SEC_DP_DEBUG(fmt, args...) \
+ DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_SEC_DP_INFO(fmt, args...) \
+ DPAA_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_DP_WARN(fmt, args...) \
+ DPAA_SEC_DP_LOG(WARNING, fmt, ## args)
+#define DPAA_SEC_DP_ERR(fmt, args...) \
+ DPAA_SEC_DP_LOG(ERR, fmt, ## args)
#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
new file mode 100644
index 00000000..8a570984
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa', 'security']
+sources = files('dpaa_sec.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 356621d4..239a1cf4 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -79,18 +79,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
break;
case KASUMI_OP_NOT_SUPPORTED:
default:
- KASUMI_LOG_ERR("Unsupported operation chain order parameter");
+ KASUMI_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
if (cipher_xform) {
/* Only KASUMI F8 supported */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) {
+ KASUMI_LOG(ERR, "Unsupported cipher algorithm ");
return -ENOTSUP;
+ }
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) {
- KASUMI_LOG_ERR("Wrong IV length");
+ KASUMI_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
@@ -101,11 +103,13 @@ kasumi_set_session_parameters(struct kasumi_session *sess,
if (auth_xform) {
/* Only KASUMI F9 supported */
- if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) {
+ KASUMI_LOG(ERR, "Unsupported authentication");
return -ENOTSUP;
+ }
if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) {
- KASUMI_LOG_ERR("Wrong digest length");
+ KASUMI_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
@@ -131,7 +135,7 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct kasumi_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -153,8 +157,8 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -213,7 +217,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("bit-level in-place not supported\n");
+ KASUMI_LOG(ERR, "bit-level in-place not supported");
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
@@ -244,7 +248,7 @@ process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- KASUMI_LOG_ERR("offset");
+ KASUMI_LOG(ERR, "Invalid Offset");
break;
}
@@ -320,7 +324,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct kasumi_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -409,9 +413,9 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
(curr_c_op->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
curr_c_op->sym->m_dst))) {
- KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
+ KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
- "source/destination buffer.\n", curr_c_op);
+ "source/destination buffer.", curr_c_op);
curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
break;
}
@@ -531,7 +535,7 @@ cryptodev_kasumi_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ KASUMI_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -555,11 +559,10 @@ cryptodev_kasumi_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed",
+ KASUMI_LOG(ERR, "driver %s: failed",
init_params->name);
cryptodev_kasumi_remove(vdev);
@@ -573,8 +576,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct kasumi_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -617,7 +619,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(kasumi_init_log)
+{
+ kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi");
+}
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index a388dbb6..9e4bf1b5 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -126,7 +126,8 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = kasumi_pmd_capabilities;
}
@@ -171,13 +172,13 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) == ring_size) {
- KASUMI_LOG_INFO("Reusing existing ring %s"
+ KASUMI_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- KASUMI_LOG_ERR("Unable to reuse existing ring %s"
+ KASUMI_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -228,22 +229,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
kasumi_pmd_qp_count(struct rte_cryptodev *dev)
@@ -253,14 +238,14 @@ kasumi_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the KASUMI session structure */
static unsigned
-kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct kasumi_session);
}
/** Configure a KASUMI session from a crypto xform chain */
static int
-kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -269,26 +254,26 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- KASUMI_LOG_ERR("invalid session struct");
+ KASUMI_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ KASUMI_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = kasumi_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- KASUMI_LOG_ERR("failed configure session parameters");
+ KASUMI_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -296,17 +281,17 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-kasumi_pmd_session_clear(struct rte_cryptodev *dev,
+kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct kasumi_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -324,13 +309,11 @@ struct rte_cryptodev_ops kasumi_pmd_ops = {
.queue_pair_setup = kasumi_pmd_qp_setup,
.queue_pair_release = kasumi_pmd_qp_release,
- .queue_pair_start = kasumi_pmd_qp_start,
- .queue_pair_stop = kasumi_pmd_qp_stop,
.queue_pair_count = kasumi_pmd_qp_count,
- .session_get_size = kasumi_pmd_session_get_size,
- .session_configure = kasumi_pmd_session_configure,
- .session_clear = kasumi_pmd_session_clear
+ .sym_session_get_size = kasumi_pmd_sym_session_get_size,
+ .sym_session_configure = kasumi_pmd_sym_session_configure,
+ .sym_session_clear = kasumi_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index a397bee6..488777ca 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
@@ -10,25 +10,13 @@
#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
/**< KASUMI PMD device name */
-#define KASUMI_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
+/** KASUMI PMD LOGTYPE DRIVER */
+int kasumi_logtype_driver;
-#ifdef RTE_LIBRTE_KASUMI_DEBUG
-#define KASUMI_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
-
-#define KASUMI_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define KASUMI_LOG_INFO(fmt, args...)
-#define KASUMI_LOG_DBG(fmt, args...)
-#endif
+#define KASUMI_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define KASUMI_DIGEST_LENGTH 4
@@ -36,8 +24,6 @@
struct kasumi_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** KASUMI buffer queue pair */
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 17041ad8..d64ca418 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['qat', 'null', 'openssl']
+drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
+ 'null', 'openssl', 'qat', 'virtio']
+
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
deleted file mode 100644
index bc5c2270..00000000
--- a/drivers/crypto/mrvl/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Marvell International Ltd.
-# Copyright(c) 2017 Semihalf.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),config)
-ifeq ($(LIBMUSDK_PATH),)
-$(error "Please define LIBMUSDK_PATH environment variable")
-endif
-endif
-endif
-
-# library name
-LIB = librte_pmd_mrvl_crypto.a
-
-# build flags
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_TYPES_PUBLIC
-CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
-
-# library version
-LIBABIVER := 1
-
-# versioning export map
-EXPORT_MAP := rte_pmd_mrvl_version.map
-
-# external library dependencies
-LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
-LDLIBS += -lrte_bus_vdev
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
deleted file mode 100644
index 22cd1840..00000000
--- a/drivers/crypto/mrvl/rte_mrvl_compat.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MRVL_COMPAT_H_
-#define _RTE_MRVL_COMPAT_H_
-
-/* Unluckily, container_of is defined by both DPDK and MUSDK,
- * we'll declare only one version.
- *
- * Note that it is not used in this PMD anyway.
- */
-#ifdef container_of
-#undef container_of
-#endif
-#include "env/mv_autogen_comp_flags.h"
-#include "drivers/mv_sam.h"
-#include "drivers/mv_sam_cio.h"
-#include "drivers/mv_sam_session.h"
-
-#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile
new file mode 100644
index 00000000..c3dc72c1
--- /dev/null
+++ b/drivers/crypto/mvsam/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvsam_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvsam_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build
new file mode 100644
index 00000000..3c8ea3cf
--- /dev/null
+++ b/drivers/crypto/mvsam/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
+
+deps += ['bus_vdev']
diff --git a/drivers/crypto/mvsam/rte_mrvl_compat.h b/drivers/crypto/mvsam/rte_mrvl_compat.h
new file mode 100644
index 00000000..4ab28d39
--- /dev/null
+++ b/drivers/crypto/mvsam/rte_mrvl_compat.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "env/mv_autogen_comp_flags.h"
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 31f3fe58..73eff757 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_common.h>
@@ -44,8 +16,23 @@
#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
+
static uint8_t cryptodev_driver_id;
+struct mrvl_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params common;
+ uint32_t max_nb_sessions;
+};
+
+const char *mrvl_pmd_valid_params[] = {
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ MRVL_PMD_MAX_NB_SESS_ARG
+};
+
/**
* Flag if particular crypto algorithm is supported by PMD/MUSDK.
*
@@ -460,7 +447,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
return -EINVAL;
}
- sess = (struct mrvl_crypto_session *)get_session_private_data(
+ sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
if (unlikely(sess == NULL)) {
MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
@@ -719,14 +706,15 @@ mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
static int
cryptodev_mrvl_crypto_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_cryptodev_pmd_init_params *init_params)
+ struct mrvl_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct mrvl_crypto_private *internals;
struct sam_init_params sam_params;
int ret;
- dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->common);
if (dev == NULL) {
MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -746,7 +734,7 @@ cryptodev_mrvl_crypto_create(const char *name,
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
- internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
/*
@@ -768,12 +756,99 @@ cryptodev_mrvl_crypto_create(const char *name,
init_error:
MRVL_CRYPTO_LOG_ERR(
- "driver %s: %s failed", init_params->name, __func__);
+ "driver %s: %s failed", init_params->common.name, __func__);
cryptodev_mrvl_crypto_uninit(vdev);
return -EFAULT;
}
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
+mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ mrvl_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ /* Common VDEV parameters */
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->common.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
+ &parse_integer_arg,
+ &params->common.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_PMD_NAME_ARG,
+ &parse_name_arg,
+ &params->common);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ MRVL_PMD_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
/**
* Initialize the crypto device.
*
@@ -783,7 +858,18 @@ init_error:
static int
cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
{
- struct rte_cryptodev_pmd_init_params init_params = { };
+ struct mrvl_pmd_init_params init_params = {
+ .common = {
+ .name = "",
+ .private_data_size =
+ sizeof(struct mrvl_crypto_private),
+ .max_nb_queue_pairs =
+ sam_get_num_inst() * SAM_HW_RING_NUM,
+ .socket_id = rte_socket_id()
+ },
+ .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
+ };
+
const char *name, *args;
int ret;
@@ -792,13 +878,7 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
return -EINVAL;
args = rte_vdev_device_args(vdev);
- init_params.private_data_size = sizeof(struct mrvl_crypto_private);
- init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
- init_params.max_nb_sessions =
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
- init_params.socket_id = rte_socket_id();
-
- ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ ret = mrvl_pmd_parse_input_args(&init_params, args);
if (ret) {
RTE_LOG(ERR, PMD,
"Failed to parse initialisation arguments[%s]\n",
@@ -853,5 +933,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index a1de33ae..c045562c 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <string.h>
@@ -623,32 +595,6 @@ mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return -1;
}
-/** Start queue pair (PMD ops callback) - not supported.
- *
- * @param dev Pointer to the device structure.
- * @param qp_id ID of the Queue Pair.
- * @returns -ENOTSUP. Always.
- */
-static int
-mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair (PMD ops callback) - not supported.
- *
- * @param dev Pointer to the device structure.
- * @param qp_id ID of the Queue Pair.
- * @returns -ENOTSUP. Always.
- */
-static int
-mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs (PMD ops callback).
*
* @param dev Pointer to the device structure.
@@ -666,7 +612,7 @@ mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
* @returns Size of Marvell crypto session.
*/
static unsigned
-mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
+mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev)
{
return sizeof(struct mrvl_crypto_session);
}
@@ -679,7 +625,7 @@ mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
* @returns 0 upon success, negative value otherwise.
*/
static int
-mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
+mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mp)
@@ -707,7 +653,7 @@ mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
return ret;
}
- set_session_private_data(sess, dev->driver_id, sess_private_data);
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
if (sam_session_create(&mrvl_sess->sam_sess_params,
@@ -726,12 +672,12 @@ mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
* @returns 0. Always.
*/
static void
-mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
@@ -745,7 +691,7 @@ mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
memset(sess, 0, sizeof(struct mrvl_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -766,13 +712,11 @@ static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
.queue_pair_setup = mrvl_crypto_pmd_qp_setup,
.queue_pair_release = mrvl_crypto_pmd_qp_release,
- .queue_pair_start = mrvl_crypto_pmd_qp_start,
- .queue_pair_stop = mrvl_crypto_pmd_qp_stop,
.queue_pair_count = mrvl_crypto_pmd_qp_count,
- .session_get_size = mrvl_crypto_pmd_session_get_size,
- .session_configure = mrvl_crypto_pmd_session_configure,
- .session_clear = mrvl_crypto_pmd_session_clear
+ .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = mrvl_crypto_pmd_sym_session_configure,
+ .sym_session_clear = mrvl_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
index 923faaf9..c16d95b4 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _RTE_MRVL_PMD_PRIVATE_H_
@@ -37,7 +9,7 @@
#include "rte_mrvl_compat.h"
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
/**< Marvell PMD device name */
#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
index a7530317..a7530317 100644
--- a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
+++ b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index dc8e5776..6e29a21a 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -78,7 +78,7 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(sym_op->session != NULL))
sess = (struct null_crypto_session *)
- get_session_private_data(
+ get_sym_session_private_data(
sym_op->session, cryptodev_driver_id);
} else {
void *_sess = NULL;
@@ -99,8 +99,8 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(sym_op->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
return sess;
@@ -161,10 +161,9 @@ cryptodev_null_create(const char *name,
{
struct rte_cryptodev *dev;
struct null_crypto_private *internals;
-
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ NULL_LOG(ERR, "failed to create cryptodev vdev");
return -EFAULT;
}
@@ -177,12 +176,11 @@ cryptodev_null_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL;
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
}
@@ -195,8 +193,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
"",
sizeof(struct null_crypto_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name, *args;
int retval;
@@ -209,8 +206,9 @@ cryptodev_null_probe(struct rte_vdev_device *dev)
retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
if (retval) {
- RTE_LOG(ERR, PMD,
- "Failed to parse initialisation arguments[%s]\n", args);
+ NULL_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]",
+ args);
return -EINVAL;
}
@@ -245,7 +243,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(null_init_log)
+{
+ null_logtype_driver = rte_log_register("pmd.crypto.null");
+}
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index f8e5f61f..bb2b6e14 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -121,7 +121,8 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = null_crypto_pmd_capabilities;
}
@@ -163,15 +164,15 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- NULL_CRYPTO_LOG_INFO(
- "Reusing existing ring %s for processed packets",
- qp->name);
+ NULL_LOG(INFO,
+ "Reusing existing ring %s for "
+ " processed packets", qp->name);
return r;
}
- NULL_CRYPTO_LOG_INFO(
- "Unable to reuse existing ring %s for processed packets",
- qp->name);
+ NULL_LOG(INFO,
+ "Unable to reuse existing ring %s for "
+ " processed packets", qp->name);
return NULL;
}
@@ -190,7 +191,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
int retval;
if (qp_id >= internals->max_nb_qpairs) {
- NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum "
"number of queue pairs supported (%u).",
qp_id, internals->max_nb_qpairs);
return (-EINVAL);
@@ -204,7 +205,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
RTE_CACHE_LINE_SIZE, socket_id);
if (qp == NULL) {
- NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ NULL_LOG(ERR, "Failed to allocate queue pair memory");
return (-ENOMEM);
}
@@ -213,15 +214,16 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
if (retval) {
- NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ NULL_LOG(ERR, "Failed to create unique name for null "
"crypto device");
+
goto qp_setup_cleanup;
}
qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
qp_conf->nb_descriptors, socket_id);
if (qp->processed_pkts == NULL) {
- NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ NULL_LOG(ERR, "Failed to create unique name for null "
"crypto device");
goto qp_setup_cleanup;
}
@@ -239,22 +241,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
@@ -264,14 +250,14 @@ null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the NULL crypto session structure */
static unsigned
-null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct null_crypto_session);
}
/** Configure a null crypto session from a crypto xform chain */
static int
-null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mp)
@@ -280,26 +266,26 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- NULL_CRYPTO_LOG_ERR("invalid session struct");
+ NULL_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mp, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ NULL_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = null_crypto_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- NULL_CRYPTO_LOG_ERR("failed configure session parameters");
+ NULL_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mp, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -307,17 +293,17 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-null_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct null_crypto_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -335,13 +321,11 @@ struct rte_cryptodev_ops pmd_ops = {
.queue_pair_setup = null_crypto_pmd_qp_setup,
.queue_pair_release = null_crypto_pmd_qp_release,
- .queue_pair_start = null_crypto_pmd_qp_start,
- .queue_pair_stop = null_crypto_pmd_qp_stop,
.queue_pair_count = null_crypto_pmd_qp_count,
- .session_get_size = null_crypto_pmd_session_get_size,
- .session_configure = null_crypto_pmd_session_configure,
- .session_clear = null_crypto_pmd_session_clear
+ .sym_session_get_size = null_crypto_pmd_sym_session_get_size,
+ .sym_session_configure = null_crypto_pmd_sym_session_configure,
+ .sym_session_clear = null_crypto_pmd_sym_session_clear
};
struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index 0fd13362..d5905afd 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -8,31 +8,17 @@
#define CRYPTODEV_NAME_NULL_PMD crypto_null
/**< Null crypto PMD device name */
-#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
+int null_logtype_driver;
-#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG
-#define NULL_CRYPTO_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
-
-#define NULL_CRYPTO_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define NULL_CRYPTO_LOG_INFO(fmt, args...)
-#define NULL_CRYPTO_LOG_DBG(fmt, args...)
-#endif
+#define NULL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, null_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/** private data structure for each NULL crypto device */
struct null_crypto_private {
unsigned max_nb_qpairs; /**< Max number of queue pairs */
- unsigned max_nb_sessions; /**< Max number of sessions */
};
/** NULL crypto queue pair */
diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h
new file mode 100644
index 00000000..45f9a33d
--- /dev/null
+++ b/drivers/crypto/openssl/compat.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+#define set_rsa_params(rsa, p, q, ret) \
+ do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ do { \
+ rsa->dmp1 = dmp1; \
+ rsa->dmq1 = dmq1; \
+ rsa->iqmp = iqmp; \
+ ret = 0; \
+ } while (0)
+
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ do { \
+ rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
+ } while (0)
+
+#define set_dh_params(dh, p, g, ret) \
+ do { \
+ dh->p = p; \
+ dh->q = NULL; \
+ dh->g = g; \
+ ret = 0; \
+ } while (0)
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ do { dh->priv_key = priv_key; ret = 0; } while (0)
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
+
+#define get_dh_pub_key(dh, pub_key) \
+ (pub_key = dh->pub_key)
+
+#define get_dh_priv_key(dh, priv_key) \
+ (priv_key = dh->priv_key)
+
+#define set_dsa_sign(sign, r, s) \
+ do { sign->r = r; sign->s = s; } while (0)
+
+#define get_dsa_sign(sign, r, s) \
+ do { r = sign->r; s = sign->s; } while (0)
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (dsa->pub_key = pub_key)
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (priv_key = dsa->priv_key)
+
+#else
+
+#define set_rsa_params(rsa, p, q, ret) \
+ (ret = !RSA_set0_factors(rsa, p, q))
+
+#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
+ (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+
+/* n, e must be non-null, d can be NULL */
+#define set_rsa_keys(rsa, n, e, d, ret) \
+ (ret = !RSA_set0_key(rsa, n, e, d))
+
+#define set_dh_params(dh, p, g, ret) \
+ (ret = !DH_set0_pqg(dh, p, NULL, g))
+
+#define set_dh_priv_key(dh, priv_key, ret) \
+ (ret = !DH_set0_key(dh, NULL, priv_key))
+
+#define get_dh_pub_key(dh, pub_key) \
+ (DH_get0_key(dh_key, &pub_key, NULL))
+
+#define get_dh_priv_key(dh, priv_key) \
+ (DH_get0_key(dh_key, NULL, &priv_key))
+
+#define set_dsa_params(dsa, p, q, g, ret) \
+ (ret = !DSA_set0_pqg(dsa, p, q, g))
+
+#define set_dsa_priv_key(dsa, priv_key) \
+ (DSA_set0_key(dsa, NULL, priv_key))
+
+#define set_dsa_sign(sign, r, s) \
+ (DSA_SIG_set0(sign, r, s))
+
+#define get_dsa_sign(sign, r, s) \
+ (DSA_SIG_get0(sign, &r, &s))
+
+#define set_dsa_keys(dsa, pub, priv, ret) \
+ (ret = !DSA_set0_key(dsa, pub, priv))
+
+#define set_dsa_pub_key(dsa, pub_key) \
+ (DSA_set0_key(dsa, pub_key, NULL))
+
+#define get_dsa_priv_key(dsa, priv_key) \
+ (DSA_get0_key(dsa, NULL, &priv_key))
+
+#endif /* version < 10100000 */
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 0d9bedc0..7d263aba 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -14,6 +14,7 @@
#include <openssl/evp.h>
#include "rte_openssl_pmd_private.h"
+#include "compat.h"
#define DES_BLOCK_SIZE 8
@@ -119,7 +120,7 @@ get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
memcpy(key_ede + 16, key, 8);
break;
default:
- OPENSSL_LOG_ERR("Unsupported key size");
+ OPENSSL_LOG(ERR, "Unsupported key size");
res = -EINVAL;
}
@@ -137,6 +138,9 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
switch (sess_algo) {
case RTE_CRYPTO_CIPHER_3DES_CBC:
switch (keylen) {
+ case 8:
+ *algo = EVP_des_cbc();
+ break;
case 16:
*algo = EVP_des_ede_cbc();
break;
@@ -677,7 +681,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
ret = openssl_set_session_cipher_parameters(
sess, cipher_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported cipher parameters");
return ret;
}
@@ -686,7 +690,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
if (auth_xform) {
ret = openssl_set_session_auth_parameters(sess, auth_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported auth parameters");
return ret;
}
@@ -695,7 +699,7 @@ openssl_set_session_parameters(struct openssl_session *sess,
if (aead_xform) {
ret = openssl_set_session_aead_parameters(sess, aead_xform);
if (ret != 0) {
- OPENSSL_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Invalid/unsupported AEAD parameters");
return ret;
}
@@ -727,19 +731,36 @@ openssl_reset_session(struct openssl_session *sess)
}
/** Provide session for operation */
-static struct openssl_session *
+static void *
get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
{
struct openssl_session *sess = NULL;
+ struct openssl_asym_session *asym_sess = NULL;
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
- /* get existing session */
- if (likely(op->sym->session != NULL))
- sess = (struct openssl_session *)
- get_session_private_data(
- op->sym->session,
- cryptodev_driver_id);
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL))
+ sess = (struct openssl_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
+ if (likely(op->asym->session != NULL))
+ asym_sess = (struct openssl_asym_session *)
+ get_asym_session_private_data(
+ op->asym->session,
+ cryptodev_driver_id);
+ if (asym_sess == NULL)
+ op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return asym_sess;
+ }
} else {
+ /* sessionless asymmetric not supported */
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return NULL;
+
/* provide internal session */
void *_sess = NULL;
void *_sess_private_data = NULL;
@@ -759,8 +780,8 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (sess == NULL)
@@ -884,7 +905,7 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_encrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed");
return -EINVAL;
}
@@ -908,7 +929,7 @@ process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
return 0;
process_cipher_encrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed");
return -EINVAL;
}
/** Process standard openssl cipher decryption */
@@ -932,7 +953,7 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_decrypt_err:
- OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed");
return -EINVAL;
}
@@ -989,7 +1010,7 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
return 0;
process_cipher_des3ctr_err:
- OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
+ OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed");
return -EINVAL;
}
@@ -1027,7 +1048,7 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_encryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed");
return -EINVAL;
}
@@ -1068,7 +1089,7 @@ process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_encryption_ccm_err:
- OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed");
return -EINVAL;
}
@@ -1106,7 +1127,7 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_decryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed");
return -EINVAL;
}
@@ -1145,7 +1166,7 @@ process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
return 0;
process_auth_decryption_ccm_err:
- OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed");
+ OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed");
return -EINVAL;
}
@@ -1198,7 +1219,7 @@ process_auth_final:
return 0;
process_auth_err:
- OPENSSL_LOG_ERR("Process openssl auth failed");
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
return -EINVAL;
}
@@ -1251,7 +1272,7 @@ process_auth_final:
return 0;
process_auth_err:
- OPENSSL_LOG_ERR("Process openssl auth failed");
+ OPENSSL_LOG(ERR, "Process openssl auth failed");
return -EINVAL;
}
@@ -1525,6 +1546,433 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
+/* process dsa sign operation */
+static int
+process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ DSA_SIG *sign = NULL;
+
+ sign = DSA_do_sign(op->message.data,
+ op->message.length,
+ dsa);
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ const BIGNUM *r = NULL, *s = NULL;
+ get_dsa_sign(sign, r, s);
+
+ op->r.length = BN_bn2bin(r, op->r.data);
+ op->s.length = BN_bn2bin(s, op->s.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dsa verify operation */
+static int
+process_openssl_dsa_verify_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dsa_op_param *op = &cop->asym->dsa;
+ DSA *dsa = sess->u.s.dsa;
+ int ret;
+ DSA_SIG *sign = DSA_SIG_new();
+ BIGNUM *r = NULL, *s = NULL;
+ BIGNUM *pub_key = NULL;
+
+ if (sign == NULL) {
+ OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ r = BN_bin2bn(op->r.data,
+ op->r.length,
+ r);
+ s = BN_bin2bn(op->s.data,
+ op->s.length,
+ s);
+ pub_key = BN_bin2bn(op->y.data,
+ op->y.length,
+ pub_key);
+ if (!r || !s || !pub_key) {
+ if (r)
+ BN_free(r);
+ if (s)
+ BN_free(s);
+ if (pub_key)
+ BN_free(pub_key);
+
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dsa_sign(sign, r, s);
+ set_dsa_pub_key(dsa, pub_key);
+
+ ret = DSA_do_verify(op->message.data,
+ op->message.length,
+ sign,
+ dsa);
+
+ if (ret != 1)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ DSA_SIG_free(sign);
+
+ return 0;
+}
+
+/* process dh operation */
+static int
+process_openssl_dh_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_dh_op_param *op = &cop->asym->dh;
+ DH *dh_key = sess->u.dh.dh_key;
+ BIGNUM *priv_key = NULL;
+ int ret = 0;
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) {
+ /* compute shared secret using peer public key
+ * and current private key
+ * shared secret = peer_key ^ priv_key mod p
+ */
+ BIGNUM *peer_key = NULL;
+
+ /* copy private key and peer key and compute shared secret */
+ peer_key = BN_bin2bn(op->pub_key.data,
+ op->pub_key.length,
+ peer_key);
+ if (peer_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ BN_free(peer_key);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ BN_free(priv_key);
+ return 0;
+ }
+
+ ret = DH_compute_key(
+ op->shared_secret.data,
+ peer_key, dh_key);
+ if (ret < 0) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(peer_key);
+ /* priv key is already loaded into dh,
+ * let's not free that directly here.
+ * DH_free() will auto free it later.
+ */
+ return 0;
+ }
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->shared_secret.length = ret;
+ BN_free(peer_key);
+ return 0;
+ }
+
+ /*
+ * other options are public and private key generations.
+ *
+ * if user provides private key,
+ * then first set DH with user provided private key
+ */
+ if ((sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) &&
+ !(sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) {
+ /* generate public key using user-provided private key
+ * pub_key = g ^ priv_key mod p
+ */
+
+ /* load private key into DH */
+ priv_key = BN_bin2bn(op->priv_key.data,
+ op->priv_key.length,
+ priv_key);
+ if (priv_key == NULL) {
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+ set_dh_priv_key(dh_key, priv_key, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to set private key\n");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ BN_free(priv_key);
+ return 0;
+ }
+ }
+
+ /* generate public and private key pair.
+ *
+ * if private key already set, generates only public key.
+ *
+ * if private key is not already set, then set it to random value
+ * and update internal private key.
+ */
+ if (!DH_generate_key(dh_key)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return 0;
+ }
+
+ if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) {
+ const BIGNUM *pub_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d update public key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_pub_key(dh_key, pub_key);
+
+ /* output public key */
+ op->pub_key.length = BN_bn2bin(pub_key,
+ op->pub_key.data);
+ }
+
+ if (sess->u.dh.key_op &
+ (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) {
+ const BIGNUM *priv_key = NULL;
+
+ OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n",
+ __func__, __LINE__);
+
+ /* get the generated keys */
+ get_dh_priv_key(dh_key, priv_key);
+
+ /* provide generated private key back to user */
+ op->priv_key.length = BN_bn2bin(priv_key,
+ op->priv_key.data);
+ }
+
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ return 0;
+}
+
+/* process modinv operation */
+static int
+process_openssl_modinv_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.m.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.m.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process modexp operation */
+static int
+process_openssl_modexp_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ BIGNUM *base = BN_CTX_get(sess->u.e.ctx);
+ BIGNUM *res = BN_CTX_get(sess->u.e.ctx);
+
+ if (unlikely(base == NULL || res == NULL)) {
+ if (base)
+ BN_free(base);
+ if (res)
+ BN_free(res);
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return -1;
+ }
+
+ base = BN_bin2bn((const unsigned char *)op->modinv.base.data,
+ op->modinv.base.length, base);
+
+ if (BN_mod_exp(res, base, sess->u.e.exp,
+ sess->u.e.mod, sess->u.e.ctx)) {
+ op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data);
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ return 0;
+}
+
+/* process rsa operations */
+static int
+process_openssl_rsa_op(struct rte_crypto_op *cop,
+ struct openssl_asym_session *sess)
+{
+ int ret = 0;
+ struct rte_crypto_asym_op *op = cop->asym;
+ RSA *rsa = sess->u.r.rsa;
+ uint32_t pad = (op->rsa.pad);
+
+ switch (pad) {
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1:
+ case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2:
+ pad = RSA_PKCS1_PADDING;
+ break;
+ case RTE_CRYPTO_RSA_PADDING_NONE:
+ pad = RSA_NO_PADDING;
+ break;
+ default:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ OPENSSL_LOG(ERR,
+ "rsa pad type not supported %d\n", pad);
+ return 0;
+ }
+
+ switch (op->rsa.op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ ret = RSA_public_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ OPENSSL_LOG(DEBUG,
+ "length of encrypted text %d\n", ret);
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ ret = RSA_private_decrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.message.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.message.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ ret = RSA_private_encrypt(op->rsa.message.length,
+ op->rsa.message.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+ if (ret > 0)
+ op->rsa.sign.length = ret;
+ break;
+
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ ret = RSA_public_decrypt(op->rsa.sign.length,
+ op->rsa.sign.data,
+ op->rsa.sign.data,
+ rsa,
+ pad);
+
+ OPENSSL_LOG(DEBUG,
+ "Length of public_decrypt %d "
+ "length of message %zd\n",
+ ret, op->rsa.message.length);
+
+ if (memcmp(op->rsa.sign.data, op->rsa.message.data,
+ op->rsa.message.length)) {
+ OPENSSL_LOG(ERR,
+ "RSA sign Verification failed");
+ return -1;
+ }
+ break;
+
+ default:
+ /* allow ops with invalid args to be pushed to
+ * completion queue
+ */
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+
+ if (ret < 0)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ return 0;
+}
+
+static int
+process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_asym_session *sess)
+{
+ int retval = 0;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ retval = process_openssl_rsa_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ retval = process_openssl_modexp_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ retval = process_openssl_modinv_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ retval = process_openssl_dh_op(op, sess);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN)
+ retval = process_openssl_dsa_sign_op(op, sess);
+ else if (op->asym->dsa.op_type ==
+ RTE_CRYPTO_ASYM_OP_VERIFY)
+ retval =
+ process_openssl_dsa_verify_op(op, sess);
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+ if (!retval) {
+ /* op processed so push to completion queue as processed */
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ if (retval)
+ /* return error if failed to put in completion queue */
+ retval = -1;
+ }
+
+ return retval;
+}
+
/** Process crypto operation for mbuf */
static int
process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
@@ -1569,7 +2017,7 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -1597,7 +2045,7 @@ static uint16_t
openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct openssl_session *sess;
+ void *sess;
struct openssl_qp *qp = queue_pair;
int i, retval;
@@ -1606,7 +2054,12 @@ openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
if (unlikely(sess == NULL))
goto enqueue_err;
- retval = process_op(qp, ops[i], sess);
+ if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ retval = process_op(qp, ops[i],
+ (struct openssl_session *) sess);
+ else
+ retval = process_asym_op(qp, ops[i],
+ (struct openssl_asym_session *) sess);
if (unlikely(retval < 0))
goto enqueue_err;
}
@@ -1646,7 +2099,7 @@ cryptodev_openssl_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- OPENSSL_LOG_ERR("failed to create cryptodev vdev");
+ OPENSSL_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -1660,18 +2113,19 @@ cryptodev_openssl_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
RTE_CRYPTODEV_FF_CPU_AESNI |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed",
+ OPENSSL_LOG(ERR, "driver %s: create failed",
init_params->name);
cryptodev_openssl_remove(vdev);
@@ -1686,8 +2140,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct openssl_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -1731,7 +2184,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
cryptodev_openssl_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
+ cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(openssl_init_log)
+{
+ openssl_logtype_driver = rte_log_register("pmd.crypto.openssl");
+}
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 1cb87d59..de228439 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -9,6 +9,7 @@
#include <rte_cryptodev_pmd.h>
#include "rte_openssl_pmd_private.h"
+#include "compat.h"
static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
@@ -397,7 +398,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_CIPHER_3DES_CBC,
.block_size = 8,
.key_size = {
- .min = 16,
+ .min = 8,
.max = 24,
.increment = 8
},
@@ -469,6 +470,105 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {
+ .modlen = {
+ /* min length is based on openssl rsa keygen */
+ .min = 30,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modexp */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* modinv */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .op_types = 0,
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dh */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) |
+ (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE |
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE))),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
+ { /* dsa */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .op_types =
+ ((1<<RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ {
+ .modlen = {
+ /* value 0 symbolizes no limit on min length */
+ .min = 0,
+ /* value 0 symbolizes no limit on max length */
+ .max = 0,
+ .increment = 1
+ }, }
+ }
+ },
+ }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -547,7 +647,8 @@ openssl_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = openssl_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -588,14 +689,14 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- OPENSSL_LOG_INFO(
- "Reusing existing ring %s for processed ops",
+ OPENSSL_LOG(INFO,
+ "Reusing existing ring %s for processed ops",
qp->name);
return r;
}
- OPENSSL_LOG_ERR(
- "Unable to reuse existing ring %s for processed ops",
+ OPENSSL_LOG(ERR,
+ "Unable to reuse existing ring %s for processed ops",
qp->name);
return NULL;
}
@@ -647,22 +748,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
openssl_pmd_qp_count(struct rte_cryptodev *dev)
@@ -670,16 +755,23 @@ openssl_pmd_qp_count(struct rte_cryptodev *dev)
return dev->data->nb_queue_pairs;
}
-/** Returns the size of the session structure */
+/** Returns the size of the symmetric session structure */
static unsigned
-openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct openssl_session);
}
+/** Returns the size of the asymmetric session structure */
+static unsigned
+openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_asym_session);
+}
+
/** Configure the session from a crypto xform chain */
static int
-openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -688,46 +780,460 @@ openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- OPENSSL_LOG_ERR("invalid session struct");
+ OPENSSL_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ OPENSSL_LOG(ERR,
"Couldn't get object from session mempool");
return -ENOMEM;
}
ret = openssl_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- OPENSSL_LOG_ERR("failed configure session parameters");
+ OPENSSL_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
}
+static int openssl_set_asym_session_parameters(
+ struct openssl_asym_session *asym_session,
+ struct rte_crypto_asym_xform *xform)
+{
+ int ret = 0;
+
+ if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next != NULL)) {
+ OPENSSL_LOG(ERR, "chained xfrms are not supported on %s",
+ rte_crypto_asym_xform_strings[xform->xform_type]);
+ return -1;
+ }
+
+ switch (xform->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ {
+ BIGNUM *n = NULL;
+ BIGNUM *e = NULL;
+ BIGNUM *d = NULL;
+ BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL;
+ BIGNUM *iqmp = NULL, *dmq1 = NULL;
+
+ /* copy xfrm data into rsa struct */
+ n = BN_bin2bn((const unsigned char *)xform->rsa.n.data,
+ xform->rsa.n.length, n);
+ e = BN_bin2bn((const unsigned char *)xform->rsa.e.data,
+ xform->rsa.e.length, e);
+
+ if (!n || !e)
+ goto err_rsa;
+
+ RSA *rsa = RSA_new();
+ if (rsa == NULL)
+ goto err_rsa;
+
+ if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) {
+ d = BN_bin2bn(
+ (const unsigned char *)xform->rsa.d.data,
+ xform->rsa.d.length,
+ d);
+ if (!d) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ } else {
+ p = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.p.data,
+ xform->rsa.qt.p.length,
+ p);
+ q = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.q.data,
+ xform->rsa.qt.q.length,
+ q);
+ dmp1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dP.data,
+ xform->rsa.qt.dP.length,
+ dmp1);
+ dmq1 = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.dQ.data,
+ xform->rsa.qt.dQ.length,
+ dmq1);
+ iqmp = BN_bin2bn((const unsigned char *)
+ xform->rsa.qt.qInv.data,
+ xform->rsa.qt.qInv.length,
+ iqmp);
+
+ if (!p || !q || !dmp1 || !dmq1 || !iqmp) {
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_params(rsa, p, q, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set rsa params\n");
+ RSA_free(rsa);
+ goto err_rsa;
+ }
+ set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR,
+ "failed to set crt params\n");
+ RSA_free(rsa);
+ /*
+ * set already populated params to NULL
+ * as its freed by call to RSA_free
+ */
+ p = q = NULL;
+ goto err_rsa;
+ }
+ }
+
+ set_rsa_keys(rsa, n, e, d, ret);
+ if (ret) {
+ OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
+ RSA_free(rsa);
+ return -1;
+ }
+ asym_session->u.r.rsa = rsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+ break;
+err_rsa:
+ if (n)
+ BN_free(n);
+ if (e)
+ BN_free(e);
+ if (d)
+ BN_free(d);
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (dmp1)
+ BN_free(dmp1);
+ if (dmq1)
+ BN_free(dmq1);
+ if (iqmp)
+ BN_free(iqmp);
+
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ {
+ struct rte_crypto_modex_xform *xfrm = &(xform->modex);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ BIGNUM *exp = BN_CTX_get(ctx);
+ if (mod == NULL || exp == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length, mod);
+ exp = BN_bin2bn((const unsigned char *)
+ xfrm->exponent.data,
+ xfrm->exponent.length, exp);
+ asym_session->u.e.ctx = ctx;
+ asym_session->u.e.mod = mod;
+ asym_session->u.e.exp = exp;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ {
+ struct rte_crypto_modinv_xform *xfrm = &(xform->modinv);
+
+ BN_CTX *ctx = BN_CTX_new();
+ if (ctx == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ return -1;
+ }
+ BN_CTX_start(ctx);
+ BIGNUM *mod = BN_CTX_get(ctx);
+ if (mod == NULL) {
+ BN_CTX_end(ctx);
+ BN_CTX_free(ctx);
+ return -1;
+ }
+
+ mod = BN_bin2bn((const unsigned char *)
+ xfrm->modulus.data,
+ xfrm->modulus.length,
+ mod);
+ asym_session->u.m.ctx = ctx;
+ asym_session->u.m.modulus = mod;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV;
+ break;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ {
+ BIGNUM *p = NULL;
+ BIGNUM *g = NULL;
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dh.p.data,
+ xform->dh.p.length,
+ p);
+ g = BN_bin2bn((const unsigned char *)
+ xform->dh.g.data,
+ xform->dh.g.length,
+ g);
+ if (!p || !g)
+ goto err_dh;
+
+ DH *dh = DH_new();
+ if (dh == NULL) {
+ OPENSSL_LOG(ERR,
+ "failed to allocate resources\n");
+ goto err_dh;
+ }
+ set_dh_params(dh, p, g, ret);
+ if (ret) {
+ DH_free(dh);
+ goto err_dh;
+ }
+
+ /*
+ * setup xfrom for
+ * public key generate, or
+ * DH Priv key generate, or both
+ * public and private key generate
+ */
+ asym_session->u.dh.key_op = (1 << xform->dh.type);
+
+ if (xform->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) {
+ /* check if next is pubkey */
+ if ((xform->next != NULL) &&
+ (xform->next->xform_type ==
+ RTE_CRYPTO_ASYM_XFORM_DH) &&
+ (xform->next->dh.type ==
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)
+ ) {
+ /*
+ * setup op as pub/priv key
+ * pair generationi
+ */
+ asym_session->u.dh.key_op |=
+ (1 <<
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE);
+ }
+ }
+ asym_session->u.dh.dh_key = dh;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ break;
+
+err_dh:
+ OPENSSL_LOG(ERR, " failed to set dh params\n");
+ if (p)
+ BN_free(p);
+ if (g)
+ BN_free(g);
+ return -1;
+ }
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ {
+ BIGNUM *p = NULL, *g = NULL;
+ BIGNUM *q = NULL, *priv_key = NULL;
+ BIGNUM *pub_key = BN_new();
+ BN_zero(pub_key);
+
+ p = BN_bin2bn((const unsigned char *)
+ xform->dsa.p.data,
+ xform->dsa.p.length,
+ p);
+
+ g = BN_bin2bn((const unsigned char *)
+ xform->dsa.g.data,
+ xform->dsa.g.length,
+ g);
+
+ q = BN_bin2bn((const unsigned char *)
+ xform->dsa.q.data,
+ xform->dsa.q.length,
+ q);
+ if (!p || !q || !g)
+ goto err_dsa;
+
+ priv_key = BN_bin2bn((const unsigned char *)
+ xform->dsa.x.data,
+ xform->dsa.x.length,
+ priv_key);
+ if (priv_key == NULL)
+ goto err_dsa;
+
+ DSA *dsa = DSA_new();
+ if (dsa == NULL) {
+ OPENSSL_LOG(ERR,
+ " failed to allocate resources\n");
+ goto err_dsa;
+ }
+
+ set_dsa_params(dsa, p, q, g, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to dsa params\n");
+ goto err_dsa;
+ }
+
+ /*
+ * openssl 1.1.0 mandate that public key can't be
+ * NULL in very first call. so set a dummy pub key.
+ * to keep consistency, lets follow same approach for
+ * both versions
+ */
+ /* just set dummy public for very 1st call */
+ set_dsa_keys(dsa, pub_key, priv_key, ret);
+ if (ret) {
+ DSA_free(dsa);
+ OPENSSL_LOG(ERR, "Failed to set keys\n");
+ return -1;
+ }
+ asym_session->u.s.dsa = dsa;
+ asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA;
+ break;
+
+err_dsa:
+ if (p)
+ BN_free(p);
+ if (q)
+ BN_free(q);
+ if (g)
+ BN_free(g);
+ if (priv_key)
+ BN_free(priv_key);
+ if (pub_key)
+ BN_free(pub_key);
+ return -1;
+ }
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Configure the session from a crypto xform chain */
+static int
+openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *asym_sess_private_data;
+ int ret;
+
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG(ERR, "invalid asymmetric session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &asym_sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = openssl_set_asym_session_parameters(asym_sess_private_data,
+ xform);
+ if (ret != 0) {
+ OPENSSL_LOG(ERR, "failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, asym_sess_private_data);
+ return ret;
+ }
+
+ set_asym_session_private_data(sess, dev->driver_id,
+ asym_sess_private_data);
+
+ return 0;
+}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-openssl_pmd_session_clear(struct rte_cryptodev *dev,
+openssl_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
openssl_reset_session(sess_priv);
memset(sess_priv, 0, sizeof(struct openssl_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static void openssl_reset_asym_session(struct openssl_asym_session *sess)
+{
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ if (sess->u.r.rsa)
+ RSA_free(sess->u.r.rsa);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ if (sess->u.e.ctx) {
+ BN_CTX_end(sess->u.e.ctx);
+ BN_CTX_free(sess->u.e.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ if (sess->u.m.ctx) {
+ BN_CTX_end(sess->u.m.ctx);
+ BN_CTX_free(sess->u.m.ctx);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ if (sess->u.dh.dh_key)
+ DH_free(sess->u.dh.dh_key);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ if (sess->u.s.dsa)
+ DSA_free(sess->u.s.dsa);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Clear the memory of asymmetric session
+ * so it doesn't leave key material behind
+ */
+static void
+openssl_pmd_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_asym_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ openssl_reset_asym_session(sess_priv);
+ memset(sess_priv, 0, sizeof(struct openssl_asym_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_asym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -745,13 +1251,14 @@ struct rte_cryptodev_ops openssl_pmd_ops = {
.queue_pair_setup = openssl_pmd_qp_setup,
.queue_pair_release = openssl_pmd_qp_release,
- .queue_pair_start = openssl_pmd_qp_start,
- .queue_pair_stop = openssl_pmd_qp_stop,
.queue_pair_count = openssl_pmd_qp_count,
- .session_get_size = openssl_pmd_session_get_size,
- .session_configure = openssl_pmd_session_configure,
- .session_clear = openssl_pmd_session_clear
+ .sym_session_get_size = openssl_pmd_sym_session_get_size,
+ .asym_session_get_size = openssl_pmd_asym_session_get_size,
+ .sym_session_configure = openssl_pmd_sym_session_configure,
+ .asym_session_configure = openssl_pmd_asym_session_configure,
+ .sym_session_clear = openssl_pmd_sym_session_clear,
+ .asym_session_clear = openssl_pmd_asym_session_clear
};
struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index bc8dc7cd..a8f2c848 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -8,29 +8,19 @@
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/des.h>
+#include <openssl/rsa.h>
+#include <openssl/dh.h>
+#include <openssl/dsa.h>
#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
/**< Open SSL Crypto PMD device name */
-#define OPENSSL_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_OPENSSL_DEBUG
-#define OPENSSL_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-
-#define OPENSSL_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define OPENSSL_LOG_INFO(fmt, args...)
-#define OPENSSL_LOG_DBG(fmt, args...)
-#endif
+/** OPENSSL PMD LOGTYPE DRIVER */
+int openssl_logtype_driver;
+#define OPENSSL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/* Maximum length for digest (SHA-512 needs 64 bytes) */
#define DIGEST_LENGTH_MAX 64
@@ -62,8 +52,6 @@ enum openssl_auth_mode {
struct openssl_private {
unsigned int max_nb_qpairs;
/**< Max number of queue pairs */
- unsigned int max_nb_sessions;
- /**< Max number of sessions */
};
/** OPENSSL crypto queue pair */
@@ -157,6 +145,31 @@ struct openssl_session {
} __rte_cache_aligned;
+/** OPENSSL crypto private asymmetric session structure */
+struct openssl_asym_session {
+ enum rte_crypto_asym_xform_type xfrm_type;
+ union {
+ struct rsa {
+ RSA *rsa;
+ } r;
+ struct exp {
+ BIGNUM *exp;
+ BIGNUM *mod;
+ BN_CTX *ctx;
+ } e;
+ struct mod {
+ BIGNUM *modulus;
+ BN_CTX *ctx;
+ } m;
+ struct dh {
+ DH *dh_key;
+ uint32_t key_op;
+ } dh;
+ struct {
+ DSA *dsa;
+ } s;
+ } u;
+} __rte_cache_aligned;
/** Set and validate OPENSSL crypto session parameters */
extern int
openssl_set_session_parameters(struct openssl_session *sess,
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644
index 260912dc..00000000
--- a/drivers/crypto/qat/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_qat.a
-
-# library version
-LIBABIVER := 1
-
-# build flags
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-
-# external library include paths
-CFLAGS += -I$(SRCDIR)/qat_adf
-LDLIBS += -lcrypto
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_cryptodev
-LDLIBS += -lrte_pci -lrte_bus_pci
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
-
-# export include files
-SYMLINK-y-include +=
-
-# versioning export map
-EXPORT_MAP := rte_pmd_qat_version.map
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README
new file mode 100644
index 00000000..444ae605
--- /dev/null
+++ b/drivers/crypto/qat/README
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index 7b904630..9cc98d2c 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -1,14 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
+# this does not build the QAT driver, instead that is done in the compression
+# driver which comes later. Here we just add our sources files to the list
+build = false
dep = dependency('libcrypto', required: false)
-if not dep.found()
- build = false
+qat_includes += include_directories('.')
+qat_deps += 'cryptodev'
+if dep.found()
+ # Add our sources files to the list
+ qat_sources += files('qat_sym_pmd.c',
+ 'qat_sym.c',
+ 'qat_sym_session.c')
+ qat_ext_deps += dep
+ pkgconfig_extra_libs += '-lcrypto'
+ qat_cflags += '-DBUILD_QAT_SYM'
endif
-sources = files('qat_crypto.c', 'qat_qp.c',
- 'qat_adf/qat_algs_build_desc.c',
- 'rte_qat_cryptodev.c')
-includes += include_directories('qat_adf')
-deps += ['bus_pci']
-ext_deps += dep
-pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
deleted file mode 100644
index 802ba95d..00000000
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_ALGS_H_
-#define _ICP_QAT_ALGS_H_
-#include <rte_memory.h>
-#include <rte_crypto.h>
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "../qat_crypto.h"
-
-/*
- * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
- * Integrity Key (IK)
- */
-#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
-
-#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
-
-/* 3DES key sizes */
-#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
-#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
-
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
-
-struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
-} __rte_packed;
-
-enum qat_crypto_proto_flag {
- QAT_CRYPTO_PROTO_FLAG_NONE = 0,
- QAT_CRYPTO_PROTO_FLAG_CCM = 1,
- QAT_CRYPTO_PROTO_FLAG_GCM = 2,
- QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
- QAT_CRYPTO_PROTO_FLAG_ZUC = 4
-};
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
-struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_crypto_op_cookie {
- struct qat_alg_buf_list qat_sgl_list_src;
- struct qat_alg_buf_list qat_sgl_list_dst;
- rte_iova_t qat_sgl_src_phys_addr;
- rte_iova_t qat_sgl_dst_phys_addr;
-};
-
-/* Common content descriptor */
-struct qat_alg_cd {
- struct icp_qat_hw_cipher_algo_blk cipher;
- struct icp_qat_hw_auth_algo_blk hash;
-} __rte_packed __rte_cache_aligned;
-
-struct qat_session {
- enum icp_qat_fw_la_cmd_id qat_cmd;
- enum icp_qat_hw_cipher_algo qat_cipher_alg;
- enum icp_qat_hw_cipher_dir qat_dir;
- enum icp_qat_hw_cipher_mode qat_mode;
- enum icp_qat_hw_auth_algo qat_hash_alg;
- enum icp_qat_hw_auth_op auth_op;
- void *bpi_ctx;
- struct qat_alg_cd cd;
- uint8_t *cd_cur_ptr;
- rte_iova_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- uint8_t aad_len;
- struct qat_crypto_instance *inst;
- struct {
- uint16_t offset;
- uint16_t length;
- } cipher_iv;
- struct {
- uint16_t offset;
- uint16_t length;
- } auth_iv;
- uint16_t digest_length;
- rte_spinlock_t lock; /* protects this struct */
- enum qat_device_gen min_qat_dev_gen;
-};
-
-int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
-
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
- uint8_t *enckey,
- uint32_t enckeylen);
-
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
- uint8_t *authkey,
- uint32_t authkeylen,
- uint32_t aad_length,
- uint32_t digestsize,
- unsigned int operation);
-
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags);
-
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
- enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-#endif
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
deleted file mode 100644
index 4afe159d..00000000
--- a/drivers/crypto/qat/qat_crypto.c
+++ /dev/null
@@ -1,1696 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <string.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_tailq.h>
-#include <rte_malloc.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-#include <rte_spinlock.h>
-#include <rte_hexdump.h>
-#include <rte_crypto_sym.h>
-#include <rte_byteorder.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-
-#include <openssl/evp.h>
-
-#include "qat_logs.h"
-#include "qat_algs.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-static int
-qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
- continue;
-
- if (capability->sym.cipher.algo == algo)
- return 1;
- }
- return 0;
-}
-
-static int
-qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
- continue;
-
- if (capability->sym.auth.algo == algo)
- return 1;
- }
- return 0;
-}
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_encrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
-
-/** Decrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_decrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
- return -EINVAL;
-}
-
-/** Creates a context in either AES or DES in ECB mode
- * Depends on openssl libcrypto
- */
-static int
-bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
- enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key, void **ctx)
-{
- const EVP_CIPHER *algo = NULL;
- int ret;
- *ctx = EVP_CIPHER_CTX_new();
-
- if (*ctx == NULL) {
- ret = -ENOMEM;
- goto ctx_init_err;
- }
-
- if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
- algo = EVP_des_ecb();
- else
- algo = EVP_aes_128_ecb();
-
- /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
- ret = -EINVAL;
- goto ctx_init_err;
- }
-
- return 0;
-
-ctx_init_err:
- if (*ctx != NULL)
- EVP_CIPHER_CTX_free(*ctx);
- return ret;
-}
-
-/** Frees a context previously created
- * Depends on openssl libcrypto
- */
-static void
-bpi_cipher_ctx_free(void *bpi_ctx)
-{
- if (bpi_ctx != NULL)
- EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
-}
-
-static inline uint32_t
-adf_modulo(uint32_t data, uint32_t shift);
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-
-void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *sess)
-{
- PMD_INIT_FUNC_TRACE();
- uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
- struct qat_session *s = (struct qat_session *)sess_priv;
-
- if (sess_priv) {
- if (s->bpi_ctx)
- bpi_cipher_ctx_free(s->bpi_ctx);
- memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
-}
-
-static int
-qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
-{
- /* Cipher Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_CIPHER;
-
- /* Authentication Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_AUTH;
-
- /* AEAD */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- /* AES-GCM and AES-CCM works with different direction
- * GCM first encrypts and generate hash where AES-CCM
- * first generate hash and encrypts. Similar relation
- * applies to decryption.
- */
- if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- }
-
- if (xform->next == NULL)
- return -1;
-
- /* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
-
- /* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
-
- return -1;
-}
-
-static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return &xform->auth;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return &xform->cipher;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
- int ret;
-
- /* Get cipher xform from crypto xform chain */
- cipher_xform = qat_get_cipher_xform(xform);
-
- session->cipher_iv.offset = cipher_xform->iv.offset;
- session->cipher_iv.length = cipher_xform->iv.length;
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_CBC:
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_ZUC_EEA3:
- if (!qat_is_cipher_alg_supported(
- cipher_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_cipher_algorithm_strings
- [cipher_xform->algo]);
- ret = -ENOTSUP;
- goto error_out;
- }
- if (qat_alg_validate_zuc_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_F8:
- case RTE_CRYPTO_CIPHER_AES_XTS:
- case RTE_CRYPTO_CIPHER_ARC4:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
- cipher_xform->algo);
- ret = -ENOTSUP;
- goto error_out;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
- ret = -EINVAL;
- goto error_out;
- }
-
- if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- else
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- cipher_xform->key.data,
- cipher_xform->key.length)) {
- ret = -EINVAL;
- goto error_out;
- }
-
- return 0;
-
-error_out:
- if (session->bpi_ctx) {
- bpi_cipher_ctx_free(session->bpi_ctx);
- session->bpi_ctx = NULL;
- }
- return ret;
-}
-
-int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool)
-{
- void *sess_private_data;
- int ret;
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
- return -ENOMEM;
- }
-
- ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
- "session parameters");
-
- /* Return session to mempool */
- rte_mempool_put(mempool, sess_private_data);
- return ret;
- }
-
- set_session_private_data(sess, dev->driver_id,
- sess_private_data);
-
- return 0;
-}
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
-{
- struct qat_session *session = session_private;
- int ret;
-
- int qat_cmd_id;
- PMD_INIT_FUNC_TRACE();
-
- /* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2iova(session) +
- offsetof(struct qat_session, cd);
-
- session->min_qat_dev_gen = QAT_GEN1;
-
- /* Get requested QAT command id */
- qat_cmd_id = qat_get_cmd_id(xform);
- if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
- PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- return -ENOTSUP;
- }
- session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
- switch (session->qat_cmd) {
- case ICP_QAT_FW_LA_CMD_CIPHER:
- ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_AUTH:
- ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
- case ICP_QAT_FW_LA_CMD_TRNG_TEST:
- case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_MGF1:
- case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_DELIMITER:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_auth_xform *auth_xform = NULL;
- struct qat_pmd_private *internals = dev->data->dev_private;
- auth_xform = qat_get_auth_xform(xform);
- uint8_t *key_data = auth_xform->key.data;
- uint8_t key_length = auth_xform->key.length;
-
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
- break;
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- if (qat_alg_validate_aes_key(auth_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
-
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
- break;
- case RTE_CRYPTO_AUTH_NULL:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
- break;
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
- break;
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_auth_algorithm_strings
- [auth_xform->algo]);
- return -ENOTSUP;
- }
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
- break;
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CMAC:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
- auth_xform->algo);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
- auth_xform->algo);
- return -EINVAL;
- }
-
- session->auth_iv.offset = auth_xform->iv.offset;
- session->auth_iv.length = auth_xform->iv.length;
-
- if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- } else {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
- }
- /* Restore to authentication only only */
- session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
- } else {
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- }
-
- session->digest_length = auth_xform->digest_length;
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_aead_xform *aead_xform = &xform->aead;
- enum rte_crypto_auth_operation crypto_operation;
-
- /*
- * Store AEAD IV parameters as cipher IV,
- * to avoid unnecessary memory usage
- */
- session->cipher_iv.offset = xform->aead.iv.offset;
- session->cipher_iv.length = xform->aead.iv.length;
-
- switch (aead_xform->algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
- case RTE_CRYPTO_AEAD_AES_CCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- break;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
- return -EINVAL;
- }
-
- if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
- (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
- } else {
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
- }
-
- session->digest_length = aead_xform->digest_length;
- return 0;
-}
-
-unsigned qat_crypto_sym_get_session_private_size(
- struct rte_cryptodev *dev __rte_unused)
-{
- return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
-}
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
- /* Decrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = last_block - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
- last_block_len);
-#endif
- bpi_cipher_decrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
- last_block_len);
-#endif
- }
-
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src before post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src after post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after post-process:", dst,
- last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, q->tail);
- q->nb_pending_requests = 0;
- q->csr_tail = q->tail;
-}
-
-uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register struct rte_crypto_op **cur_op = ops;
- register int ret;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
- int overflow;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- tmp_qp->inflights16 += nb_ops;
- overflow = tmp_qp->inflights16 - queue->max_inflights;
- if (overflow > 0) {
- tmp_qp->inflights16 -= overflow;
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
- if (ret != 0) {
- tmp_qp->stats.enqueue_err_count++;
- /*
- * This message cannot be enqueued,
- * decrease number of ops that wasn't sent
- */
- tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
-
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_ops_sent++;
- cur_op++;
- }
-kick_tail:
- queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- queue->nb_pending_requests += nb_ops_sent;
- if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
- queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
- txq_write_tail(tmp_qp, queue);
- }
- return nb_ops_sent;
-}
-
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
-{
- uint32_t old_head, new_head;
- uint32_t max_head;
-
- old_head = q->csr_head;
- new_head = q->head;
- max_head = qp->nb_descriptors * q->msg_size;
-
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)q->base_addr + old_head;
-
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
- } else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
- }
- q->nb_processed_responses = 0;
- q->csr_head = new_head;
-
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, new_head);
-}
-
-uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct qat_queue *rx_queue, *tx_queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- uint32_t msg_counter = 0;
- struct rte_crypto_op *rx_op;
- struct icp_qat_fw_comn_resp *resp_msg;
- uint32_t head;
-
- rx_queue = &(tmp_qp->rx_q);
- tx_queue = &(tmp_qp->tx_q);
- head = rx_queue->head;
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
-
- while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_ops) {
- rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_session *sess = (struct qat_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
-
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
- *ops = rx_op;
- ops++;
- msg_counter++;
- }
- if (msg_counter > 0) {
- rx_queue->head = head;
- tmp_qp->stats.dequeued_count += msg_counter;
- rx_queue->nb_processed_responses += msg_counter;
- tmp_qp->inflights16 -= msg_counter;
-
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, rx_queue);
- }
- /* also check if tail needs to be advanced */
- if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
- tx_queue->tail != tx_queue->csr_tail) {
- txq_write_tail(tmp_qp, tx_queue);
- }
- return msg_counter;
-}
-
-static inline int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
- struct qat_alg_buf_list *list, uint32_t data_len)
-{
- int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buff_start + rte_pktmbuf_data_len(buf);
-
- list->bufers[0].addr = buff_start;
- list->bufers[0].resrvd = 0;
- list->bufers[0].len = buf_len;
-
- if (data_len <= buf_len) {
- list->num_bufs = nr;
- list->bufers[0].len = data_len;
- return 0;
- }
-
- buf = buf->next;
- while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
- " entry(%u)",
- QAT_SGL_MAX_NUMBER);
- return -EINVAL;
- }
-
- list->bufers[nr].len = rte_pktmbuf_data_len(buf);
- list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_iova(buf);
-
- buf_len += list->bufers[nr].len;
- buf = buf->next;
-
- if (buf_len > data_len) {
- list->bufers[nr].len -=
- buf_len - data_len;
- buf = NULL;
- }
- ++nr;
- }
- list->num_bufs = nr;
-
- return 0;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- /* copy IV into request if it fits */
- if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset),
- iv_length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- rte_crypto_op_ctophys_offset(op,
- iv_offset);
- }
-}
-
-/** Set IV for CCM is special case, 0th byte is set to q-1
- * where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- if (aad_len_field_sz)
- rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
-}
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
-{
- int ret = 0;
- struct qat_session *ctx;
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
- uint32_t cipher_len = 0, cipher_ofs = 0;
- uint32_t auth_len = 0, auth_ofs = 0;
- uint32_t min_ofs = 0;
- uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint8_t do_sgl = 0;
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- return -EINVAL;
- }
-#endif
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests, op (%p) is sessionless.", op);
- return -EINVAL;
- }
-
- ctx = (struct qat_session *)get_session_private_data(
- op->sym->session, cryptodev_qat_driver_id);
-
- if (unlikely(ctx == NULL)) {
- PMD_DRV_LOG(ERR, "Session was not created for this device");
- return -EINVAL;
- }
-
- if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
- PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
-
-
- qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
-
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- do_aead = 1;
- } else {
- do_auth = 1;
- do_cipher = 1;
- }
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- do_auth = 1;
- do_cipher = 0;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- do_auth = 0;
- do_cipher = 1;
- }
-
- if (do_cipher) {
-
- if (ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-
- if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cipher_len = op->sym->cipher.data.length >> 3;
- cipher_ofs = op->sym->cipher.data.offset >> 3;
-
- } else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device
- * Process any partial block using CFB mode.
- * Even if 0 complete blocks, still send this to device
- * to get into rx queue for post-process and dequeuing
- */
- cipher_len = qat_bpicipher_preprocess(ctx, op);
- cipher_ofs = op->sym->cipher.data.offset;
- } else {
- cipher_len = op->sym->cipher.data.length;
- cipher_ofs = op->sym->cipher.data.offset;
- }
-
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
- min_ofs = cipher_ofs;
- }
-
- if (do_auth) {
-
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_ofs = op->sym->auth.data.offset >> 3;
- auth_len = op->sym->auth.data.length >> 3;
-
- auth_param->u1.aad_adr =
- rte_crypto_op_ctophys_offset(op,
- ctx->auth_iv.offset);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /* AES-GMAC */
- set_cipher_iv(ctx->auth_iv.length,
- ctx->auth_iv.offset,
- cipher_param, op, qat_req);
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- auth_param->u1.aad_adr = 0;
- auth_param->u2.aad_sz = 0;
-
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->auth_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-
- }
- } else {
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- }
- min_ofs = auth_ofs;
-
- if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
-
- }
-
- if (do_aead) {
- /*
- * This address may used for setting AAD physical pointer
- * into IV offset from op
- */
- rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->cipher_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
-
- set_cipher_iv(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
- /* In case of AES-CCM this may point to user selected memory
- * or iv offset in cypto_op
- */
- uint8_t *aad_data = op->sym->aead.aad.data;
- /* This is true AAD length, it not includes 18 bytes of
- * preceding data
- */
- uint8_t aad_ccm_real_len = 0;
-
- uint8_t aad_len_field_sz = 0;
- uint32_t msg_len_be =
- rte_bswap32(op->sym->aead.data.length);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- /*
- * aad_len not greater than 18, so no actual aad data,
- * then use IV after op for B0 block
- */
- aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
- aad_phys_addr_aead =
- rte_crypto_op_ctophys_offset(op,
- ctx->cipher_iv.offset);
- }
-
- uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
- ctx->digest_length, q);
-
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET
- + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be
- + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
-
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
- = rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len + aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len + aad_len_field_sz;
- memset(&aad_data[pad_idx],
- 0, pad_len);
- }
-
- }
-
- set_cipher_iv_ccm(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, q,
- aad_len_field_sz);
-
- }
-
- cipher_len = op->sym->aead.data.length;
- cipher_ofs = op->sym->aead.data.offset;
- auth_len = op->sym->aead.data.length;
- auth_ofs = op->sym->aead.data.offset;
-
- auth_param->u1.aad_adr = aad_phys_addr_aead;
- auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- min_ofs = op->sym->aead.data.offset;
- }
-
- if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
- do_sgl = 1;
-
- /* adjust for chain case */
- if (do_cipher && do_auth)
- min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
- if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
- min_ofs = 0;
-
- if (unlikely(op->sym->m_dst != NULL)) {
- /* Out-of-place operation (OOP)
- * Don't align DMA start. DMA the minimum data-set
- * so as not to overwrite data in dest buffer
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- dst_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-
- } else {
- /* In-place operation
- * Start DMA at nearest aligned address below min_ofs
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
- & QAT_64_BTYE_ALIGN_MASK;
-
- if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
- rte_pktmbuf_headroom(op->sym->m_src))
- > src_buf_start)) {
- /* alignment has pushed addr ahead of start of mbuf
- * so revert and take the performance hit
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src,
- min_ofs);
- }
- dst_buf_start = src_buf_start;
- }
-
- if (do_cipher || do_aead) {
- cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, cipher_ofs) - src_buf_start;
- cipher_param->cipher_length = cipher_len;
- } else {
- cipher_param->cipher_offset = 0;
- cipher_param->cipher_length = 0;
- }
-
- if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
- }
-
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- > (auth_param->auth_off + auth_param->auth_len) ?
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- : (auth_param->auth_off + auth_param->auth_len);
-
- if (do_sgl) {
-
- ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &qat_op_cookie->qat_sgl_list_src,
- qat_req->comn_mid.src_length);
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
- return ret;
- }
-
- if (likely(op->sym->m_dst == NULL))
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- else {
- ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &qat_op_cookie->qat_sgl_list_dst,
- qat_req->comn_mid.dst_length);
-
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot "
- "fill sgl array");
- return ret;
- }
-
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- qat_req->comn_mid.dest_data_addr =
- qat_op_cookie->qat_sgl_dst_phys_addr;
- }
- } else {
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
- }
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- rte_hexdump(stdout, "src_data:",
- rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
- rte_pktmbuf_data_len(op->sym->m_src));
- if (do_cipher) {
- uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
- ctx->cipher_iv.length);
- }
-
- if (do_auth) {
- if (ctx->auth_iv.length) {
- uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->auth_iv.offset);
- rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
- ctx->auth_iv.length);
- }
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- ctx->digest_length);
- }
-
- if (do_aead) {
- rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
- ctx->digest_length);
- rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
- ctx->aad_len);
- }
-#endif
- return 0;
-}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
-{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
-}
-
-int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
- __rte_unused struct rte_cryptodev_config *config)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
-int qat_dev_close(struct rte_cryptodev *dev)
-{
- int i, ret;
-
- PMD_INIT_FUNC_TRACE();
-
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- ret = qat_crypto_sym_qp_release(dev, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
- if (info != NULL) {
- info->max_nb_queue_pairs =
- ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV;
- info->feature_flags = dev->feature_flags;
- info->capabilities = internals->qat_dev_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->driver_id = cryptodev_qat_driver_id;
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
- }
-}
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
- return;
- }
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
- continue;
- }
-
- stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.dequeued_count;
- stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
- }
-}
-
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_queue_pairs; i++)
- memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
- PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
-}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
deleted file mode 100644
index c182af2e..00000000
--- a/drivers/crypto/qat/qat_crypto.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
- */
-
-#ifndef _QAT_CRYPTO_H_
-#define _QAT_CRYPTO_H_
-
-#include <rte_cryptodev_pmd.h>
-#include <rte_memzone.h>
-
-#include "qat_crypto_capabilities.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-/**< Intel QAT Symmetric Crypto PMD device name */
-
-/*
- * This macro rounds up a number to a be a multiple of
- * the alignment when the alignment is a power of 2
- */
-#define ALIGN_POW2_ROUNDUP(num, align) \
- (((num) + (align) - 1) & ~((align) - 1))
-#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-
-#define QAT_CSR_HEAD_WRITE_THRESH 32U
-/* number of requests to accumulate before writing head CSR */
-#define QAT_CSR_TAIL_WRITE_THRESH 32U
-/* number of requests to accumulate before writing tail CSR */
-#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
-/* number of inflights below which no tail write coalescing should occur */
-
-struct qat_session;
-
-enum qat_device_gen {
- QAT_GEN1 = 1,
- QAT_GEN2,
-};
-
-/**
- * Structure associated with each queue.
- */
-struct qat_queue {
- char memz_name[RTE_MEMZONE_NAMESIZE];
- void *base_addr; /* Base address */
- rte_iova_t base_phys_addr; /* Queue physical address */
- uint32_t head; /* Shadow copy of the head */
- uint32_t tail; /* Shadow copy of the tail */
- uint32_t modulo;
- uint32_t msg_size;
- uint16_t max_inflights;
- uint32_t queue_size;
- uint8_t hw_bundle_number;
- uint8_t hw_queue_number;
- /* HW queue aka ring offset on bundle */
- uint32_t csr_head; /* last written head value */
- uint32_t csr_tail; /* last written tail value */
- uint16_t nb_processed_responses;
- /* number of responses processed since last CSR head write */
- uint16_t nb_pending_requests;
- /* number of requests pending since last CSR tail write */
-};
-
-struct qat_qp {
- void *mmap_bar_addr;
- uint16_t inflights16;
- struct qat_queue tx_q;
- struct qat_queue rx_q;
- struct rte_cryptodev_stats stats;
- struct rte_mempool *op_cookie_pool;
- void **op_cookies;
- uint32_t nb_descriptors;
- enum qat_device_gen qat_dev_gen;
-} __rte_cache_aligned;
-
-/** private data structure for each QAT device */
-struct qat_pmd_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
- enum qat_device_gen qat_dev_gen;
- /**< QAT device generation */
- const struct rte_cryptodev_capabilities *qat_dev_capabilities;
-};
-
-extern uint8_t cryptodev_qat_driver_id;
-
-int qat_dev_config(struct rte_cryptodev *dev,
- struct rte_cryptodev_config *config);
-int qat_dev_start(struct rte_cryptodev *dev);
-void qat_dev_stop(struct rte_cryptodev *dev);
-int qat_dev_close(struct rte_cryptodev *dev);
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info);
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats);
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
- struct rte_mempool *session_pool);
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
- uint16_t queue_pair_id);
-
-int
-qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
- unsigned nb_objs, unsigned obj_cache_size, int socket_id);
-
-extern unsigned
-qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-
-extern int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool);
-
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-
-extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *session);
-
-extern uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-extern uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
deleted file mode 100644
index 565089a4..00000000
--- a/drivers/crypto/qat/qat_logs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef _QAT_LOGS_H_
-#define _QAT_LOGS_H_
-
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
- "PMD: %s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
-#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
deleted file mode 100644
index 87b9ce0b..00000000
--- a/drivers/crypto/qat/qat_qp.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-#include <rte_atomic.h>
-#include <rte_prefetch.h>
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_algs.h"
-#include "adf_transport_access_macros.h"
-
-#define ADF_MAX_SYM_DESC 4096
-#define ADF_MIN_SYM_DESC 128
-#define ADF_SYM_TX_RING_DESC_SIZE 128
-#define ADF_SYM_RX_RING_DESC_SIZE 32
-#define ADF_SYM_TX_QUEUE_STARTOFF 2
-/* Offset from bundle start to 1st Sym Tx queue */
-#define ADF_SYM_RX_QUEUE_STARTOFF 10
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * index), value)
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes);
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static void qat_queue_delete(struct qat_queue *queue);
-static int qat_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
- int socket_id);
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *queue_size_for_csr);
-static void adf_configure_queues(struct qat_qp *queue);
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
-
-static const struct rte_memzone *
-queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
- int socket_id)
-{
- const struct rte_memzone *mz;
- unsigned memzone_flags = 0;
- const struct rte_memseg *ms;
-
- PMD_INIT_FUNC_TRACE();
- mz = rte_memzone_lookup(queue_name);
- if (mz != 0) {
- if (((size_t)queue_size <= mz->len) &&
- ((socket_id == SOCKET_ID_ANY) ||
- (socket_id == mz->socket_id))) {
- PMD_DRV_LOG(DEBUG, "re-use memzone already "
- "allocated for %s", queue_name);
- return mz;
- }
-
- PMD_DRV_LOG(ERR, "Incompatible memzone already "
- "allocated %s, size %u, socket %d. "
- "Requested size %u, socket %u",
- queue_name, (uint32_t)mz->len,
- mz->socket_id, queue_size, socket_id);
- return NULL;
- }
-
- PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
- queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
- switch (ms[0].hugepage_sz) {
- case(RTE_PGSIZE_2M):
- memzone_flags = RTE_MEMZONE_2MB;
- break;
- case(RTE_PGSIZE_1G):
- memzone_flags = RTE_MEMZONE_1GB;
- break;
- case(RTE_PGSIZE_16M):
- memzone_flags = RTE_MEMZONE_16MB;
- break;
- case(RTE_PGSIZE_16G):
- memzone_flags = RTE_MEMZONE_16GB;
- break;
- default:
- memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
- }
- return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
- memzone_flags, queue_size);
-}
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id, struct rte_mempool *session_pool __rte_unused)
-{
- struct qat_qp *qp;
- struct rte_pci_device *pci_dev;
- int ret;
- char op_cookie_pool_name[RTE_RING_NAMESIZE];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
-
- /* If qp is already in use free ring memory and qp metadata. */
- if (dev->data->queue_pairs[queue_pair_id] != NULL) {
- ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
- if (ret < 0)
- return ret;
- }
-
- if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
- (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
- PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
- qp_conf->nb_descriptors);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- if (pci_dev->mem_resource[0].addr == NULL) {
- PMD_DRV_LOG(ERR, "Could not find VF config space "
- "(UIO driver attached?).");
- return -EINVAL;
- }
-
- if (queue_pair_id >=
- (ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV)) {
- PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
- queue_pair_id);
- return -EINVAL;
- }
- /* Allocate the queue pair data structure. */
- qp = rte_zmalloc("qat PMD qp metadata",
- sizeof(*qp), RTE_CACHE_LINE_SIZE);
- if (qp == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
- return -ENOMEM;
- }
- qp->nb_descriptors = qp_conf->nb_descriptors;
- qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
- qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
- RTE_CACHE_LINE_SIZE);
- if (qp->op_cookies == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
- rte_free(qp);
- return -ENOMEM;
- }
-
- qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
- qp->inflights16 = 0;
-
- if (qat_tx_queue_create(dev, &(qp->tx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_INIT_LOG(ERR, "Tx queue create failed "
- "queue_pair_id=%u", queue_pair_id);
- goto create_err;
- }
-
- if (qat_rx_queue_create(dev, &(qp->rx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_DRV_LOG(ERR, "Rx queue create failed "
- "queue_pair_id=%hu", queue_pair_id);
- qat_queue_delete(&(qp->tx_q));
- goto create_err;
- }
-
- adf_configure_queues(qp);
- adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
- snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- pci_dev->driver->driver.name, dev->data->dev_id,
- queue_pair_id);
-
- qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
- if (qp->op_cookie_pool == NULL)
- qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
- qp->nb_descriptors,
- sizeof(struct qat_crypto_op_cookie), 64, 0,
- NULL, NULL, NULL, NULL, socket_id,
- 0);
- if (!qp->op_cookie_pool) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
- " op mempool");
- goto create_err;
- }
-
- for (i = 0; i < qp->nb_descriptors; i++) {
- if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
- goto create_err;
- }
-
- struct qat_crypto_op_cookie *sql_cookie =
- qp->op_cookies[i];
-
- sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_src);
-
- sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_dst);
- }
-
- struct qat_pmd_private *internals
- = dev->data->dev_private;
- qp->qat_dev_gen = internals->qat_dev_gen;
-
- dev->data->queue_pairs[queue_pair_id] = qp;
- return 0;
-
-create_err:
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
- rte_free(qp->op_cookies);
- rte_free(qp);
- return -EFAULT;
-}
-
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
-{
- struct qat_qp *qp =
- (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
- if (qp == NULL) {
- PMD_DRV_LOG(DEBUG, "qp already freed");
- return 0;
- }
-
- /* Don't free memory if there are still responses to be processed */
- if (qp->inflights16 == 0) {
- qat_queue_delete(&(qp->tx_q));
- qat_queue_delete(&(qp->rx_q));
- } else {
- return -EAGAIN;
- }
-
- adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
-
- for (i = 0; i < qp->nb_descriptors; i++)
- rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
-
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
-
- rte_free(qp->op_cookies);
- rte_free(qp);
- dev->data->queue_pairs[queue_pair_id] = NULL;
- return 0;
-}
-
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id,
- uint32_t nb_desc, int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_TX_QUEUE_STARTOFF;
- PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
-
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_TX_RING_DESC_SIZE, socket_id);
-}
-
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
- int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_RX_QUEUE_STARTOFF;
-
- PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_RX_RING_DESC_SIZE, socket_id);
-}
-
-static void qat_queue_delete(struct qat_queue *queue)
-{
- const struct rte_memzone *mz;
- int status = 0;
-
- if (queue == NULL) {
- PMD_DRV_LOG(DEBUG, "Invalid queue");
- return;
- }
- mz = rte_memzone_lookup(queue->memz_name);
- if (mz != NULL) {
- /* Write an unused pattern to the queue memory. */
- memset(queue->base_addr, 0x7F, queue->queue_size);
- status = rte_memzone_free(mz);
- if (status != 0)
- PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
- status, queue->memz_name);
- } else {
- PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
- queue->memz_name);
- }
-}
-
-static int
-qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
- uint32_t nb_desc, uint8_t desc_size, int socket_id)
-{
- uint64_t queue_base;
- void *io_addr;
- const struct rte_memzone *qp_mz;
- uint32_t queue_size_bytes = nb_desc*desc_size;
- struct rte_pci_device *pci_dev;
-
- PMD_INIT_FUNC_TRACE();
- if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
- PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- /*
- * Allocate a memzone for the queue - create a unique name.
- */
- snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
- queue->hw_bundle_number, queue->hw_queue_number);
- qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
- socket_id);
- if (qp_mz == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
- return -ENOMEM;
- }
-
- queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->iova;
- if (qat_qp_check_queue_alignment(queue->base_phys_addr,
- queue_size_bytes)) {
- PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
- " 0x%"PRIx64"\n",
- queue->base_phys_addr);
- return -EFAULT;
- }
-
- if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
- != 0) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
-
- queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
- ADF_BYTES_TO_MSG_SIZE(desc_size));
- queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
- PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
- " msg_size %u, max_inflights %u modulo %u",
- queue->queue_size, queue_size_bytes,
- nb_desc, desc_size, queue->max_inflights,
- queue->modulo);
-
- if (queue->max_inflights < 2) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
- queue->head = 0;
- queue->tail = 0;
- queue->msg_size = desc_size;
-
- /*
- * Write an unused pattern to the queue memory.
- */
- memset(queue->base_addr, 0x7F, queue_size_bytes);
-
- queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
- queue->queue_size);
-
- io_addr = pci_dev->mem_resource[0].addr;
-
- WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_base);
- return 0;
-}
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes)
-{
- PMD_INIT_FUNC_TRACE();
- if (((queue_size_bytes - 1) & phys_addr) != 0)
- return -EINVAL;
- return 0;
-}
-
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *p_queue_size_for_csr)
-{
- uint8_t i = ADF_MIN_RING_SIZE;
-
- PMD_INIT_FUNC_TRACE();
- for (; i <= ADF_MAX_RING_SIZE; i++)
- if ((msg_size * msg_num) ==
- (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
- *p_queue_size_for_csr = i;
- return 0;
- }
- PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
- return -EINVAL;
-}
-
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value |= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value ^= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_configure_queues(struct qat_qp *qp)
-{
- uint32_t queue_config;
- struct qat_queue *queue = &qp->tx_q;
-
- PMD_INIT_FUNC_TRACE();
- queue_config = BUILD_RING_CONFIG(queue->queue_size);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-
- queue = &qp->rx_q;
- queue_config =
- BUILD_RESP_RING_CONFIG(queue->queue_size,
- ADF_RING_NEAR_WATERMARK_512,
- ADF_RING_NEAR_WATERMARK_0);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
new file mode 100644
index 00000000..10cdf2e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.c
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
+{
+ int ret = 0;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
+ QAT_DP_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ }
+ min_ofs = auth_ofs;
+
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+
+ }
+
+ if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad
+ * data, then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
+ ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ min_ofs = op->sym->aead.data.offset;
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher || do_aead) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+
+ if (do_auth || do_aead) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
new file mode 100644
index 00000000..bc6426c3
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index 37a6b7cb..eea08bc7 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
+ * Copyright(c) 2017-2018 Intel Corporation
*/
-#ifndef _QAT_CRYPTO_CAPABILITIES_H_
-#define _QAT_CRYPTO_CAPABILITIES_H_
+#ifndef _QAT_SYM_CAPABILITIES_H_
+#define _QAT_SYM_CAPABILITIES_H_
#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
@@ -434,7 +434,7 @@
.algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
.block_size = 8, \
.key_size = { \
- .min = 16, \
+ .min = 8, \
.max = 24, \
.increment = 8 \
}, \
@@ -554,4 +554,4 @@
}, } \
}
-#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
+#endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
new file mode 100644
index 00000000..96f442e8
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+#include "qat_sym.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return;
+}
+
+static int qat_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_sym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_sym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
+
+}
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release sym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_sym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "sym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_sym_dev_config,
+ .dev_start = qat_sym_dev_start,
+ .dev_stop = qat_sym_dev_stop,
+ .dev_close = qat_sym_dev_close,
+ .dev_infos_get = qat_sym_dev_info_get,
+
+ .stats_get = qat_sym_stats_get,
+ .stats_reset = qat_sym_stats_reset,
+ .queue_pair_setup = qat_sym_qp_setup,
+ .queue_pair_release = qat_sym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = qat_sym_session_get_private_size,
+ .sym_session_configure = qat_sym_session_configure,
+ .sym_session_clear = qat_sym_session_clear
+};
+
+static uint16_t
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_sym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_sym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_pci_dev->sym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->sym_dev = internals;
+
+ internals->sym_dev_id = cryptodev->data->dev_id;
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ break;
+ case QAT_GEN2:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ break;
+ default:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN2",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->sym_dev_id);
+ return 0;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ cryptodev_qat_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
new file mode 100644
index 00000000..d3432854
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_PMD_H_
+#define _QAT_SYM_PMD_H_
+
+#ifdef BUILD_QAT_SYM
+
+#include <rte_cryptodev.h>
+
+#include "qat_sym_capabilities.h"
+#include "qat_device.h"
+
+/**< Intel(R) QAT Symmetric Crypto PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+extern uint8_t cryptodev_qat_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only symmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF),
+ * in future there may also be private data structures for other services.
+ */
+struct qat_sym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t sym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device symmetric crypto capabilities */
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_sym_session.c
index 26f854c2..1d58220a 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,50 +1,12 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include <openssl/evp.h> /* Needed for bpi runt block processing */
+
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_spinlock.h>
@@ -53,13 +15,714 @@
#include <rte_malloc.h>
#include <rte_crypto_sym.h>
-#include "../qat_logs.h"
+#include "qat_logs.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
-#include <openssl/sha.h> /* Needed to calculate pre-compute values */
-#include <openssl/aes.h> /* Needed to calculate pre-compute values */
-#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static int
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key, void **ctx)
+{
+ const EVP_CIPHER *algo = NULL;
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
+
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
+ goto ctx_init_err;
+ }
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
+ goto ctx_init_err;
+ }
+
+ return 0;
+
+ctx_init_err:
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
+}
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ }
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ ret = -ENOTSUP;
+ goto error_out;
+ }
+ if (qat_sym_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ ret = -ENOTSUP;
+ goto error_out;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return ret;
+}
-#include "qat_algs.h"
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR,
+ "Crypto QAT PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_sym_session *session = session_private;
+ int ret;
+ int qat_cmd_id;
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2iova(session) +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ ret = qat_sym_session_configure_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_sym_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ return -ENOTSUP;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -EINVAL;
+ }
+
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+ }
+
+ session->digest_length = aead_xform->digest_length;
+ return 0;
+}
+
+unsigned int qat_sym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
+}
/* returns block size in bytes per cipher algo */
int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
@@ -74,7 +737,7 @@ int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
case ICP_QAT_HW_CIPHER_ALGO_AES256:
return ICP_QAT_HW_AES_BLK_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
return -EFAULT;
};
return -EFAULT;
@@ -121,18 +784,18 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_NULL:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -158,7 +821,7 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -186,7 +849,7 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum block size in this case */
return SHA512_CBLOCK;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -270,7 +933,6 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
uint64_t *hash_state_out_be64;
int i;
- PMD_INIT_FUNC_TRACE();
digest_size = qat_hash_get_digest_size(hash_alg);
if (digest_size <= 0)
return -EFAULT;
@@ -319,7 +981,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
return -EFAULT;
break;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
return -EFAULT;
}
@@ -329,7 +991,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
#define HMAC_OPAD_VALUE 0x5c
#define HASH_XCBC_PRECOMP_KEY_NUM 3
-static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
const uint8_t *auth_key,
uint16_t auth_keylen,
uint8_t *p_state_buf,
@@ -340,7 +1002,6 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
int i;
- PMD_INIT_FUNC_TRACE();
if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
static uint8_t qat_aes_xcbc_key_seed[
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
@@ -360,7 +1021,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -395,7 +1056,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_GALOIS_H_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -420,7 +1081,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
memset(opad, 0, block_size);
if (auth_keylen > (unsigned int)block_size) {
- PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
return -EFAULT;
}
rte_memcpy(ipad, auth_key, auth_keylen);
@@ -437,7 +1098,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "ipad precompute failed");
+ QAT_LOG(ERR, "ipad precompute failed");
return -EFAULT;
}
@@ -449,7 +1110,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "opad precompute failed");
+ QAT_LOG(ERR, "opad precompute failed");
return -EFAULT;
}
@@ -459,10 +1120,10 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
return 0;
}
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags)
+static void
+qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags)
{
- PMD_INIT_FUNC_TRACE();
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
@@ -508,11 +1169,11 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
* and set its protocol flag in both cipher and auth part of content
* descriptor building function
*/
-static enum qat_crypto_proto_flag
+static enum qat_sym_proto_flag
qat_get_crypto_proto_flag(uint16_t flags)
{
int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
switch (proto) {
@@ -527,7 +1188,7 @@ qat_get_crypto_proto_flag(uint16_t flags)
return qat_proto_flag;
}
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
uint8_t *cipherkey,
uint32_t cipherkeylen)
{
@@ -539,13 +1200,12 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
uint32_t total_key_size;
uint16_t cipher_offset, cd_size;
uint32_t wordIndex = 0;
uint32_t *temp_key = NULL;
- PMD_INIT_FUNC_TRACE();
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
@@ -570,7 +1230,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ QAT_LOG(ERR, "Invalid param, must be a cipher command.");
return -EFAULT;
}
@@ -631,7 +1291,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
header->service_cmd_id = cdesc->qat_cmd;
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
cipher->cipher_config.val =
@@ -662,11 +1322,19 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
if (total_key_size > cipherkeylen) {
uint32_t padding_size = total_key_size-cipherkeylen;
if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
- && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
/* K3 not provided so use K1 = K3*/
memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
- else
+ } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
+ /* K2 and K3 not provided so use K1 = K2 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey,
+ cipherkeylen);
+ memcpy(cdesc->cd_cur_ptr+cipherkeylen,
+ cipherkey, cipherkeylen);
+ } else
memset(cdesc->cd_cur_ptr, 0, padding_size);
+
cdesc->cd_cur_ptr += padding_size;
}
cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
@@ -675,7 +1343,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
return 0;
}
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
uint32_t aad_length,
@@ -699,11 +1367,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
- PMD_INIT_FUNC_TRACE();
-
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
ICP_QAT_FW_SLICE_AUTH);
@@ -721,7 +1387,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ QAT_LOG(ERR, "Invalid param, must be a hash command.");
return -EFAULT;
}
@@ -764,51 +1430,51 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
*/
switch (cdesc->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ QAT_LOG(ERR, "(XCBC)precompute failed");
return -EFAULT;
}
break;
@@ -816,10 +1482,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ QAT_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
/*
@@ -876,10 +1542,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
authkey, authkeylen, cdesc->cd_cur_ptr,
&state1_size)) {
- PMD_DRV_LOG(ERR, "(MD5)precompute failed");
+ QAT_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
@@ -898,14 +1564,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
if (aad_length > 0) {
aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(aad_length,
- ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
} else {
auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
}
-
cdesc->aad_len = aad_length;
hash->auth_counter.counter = 0;
@@ -935,12 +1600,12 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
return -EFAULT;
}
/* Request template setup */
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
header->service_cmd_id = cdesc->qat_cmd;
/* Auth CD config setup */
@@ -966,7 +1631,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
return 0;
}
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_AES_128_KEY_SZ:
@@ -984,7 +1649,7 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
+int qat_sym_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
@@ -997,7 +1662,7 @@ int qat_alg_validate_aes_docsisbpi_key(int key_len,
return 0;
}
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
@@ -1009,7 +1674,7 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_KASUMI_KEY_SZ:
@@ -1021,7 +1686,7 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_DES_KEY_SZ:
@@ -1033,11 +1698,12 @@ int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case QAT_3DES_KEY_SZ_OPT1:
case QAT_3DES_KEY_SZ_OPT2:
+ case QAT_3DES_KEY_SZ_OPT3:
*alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
break;
default:
@@ -1046,7 +1712,7 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
new file mode 100644
index 00000000..e8f51e5b
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _QAT_SYM_SESSION_H_
+#define _QAT_SYM_SESSION_H_
+
+#include <rte_crypto.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+enum qat_sym_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/* Common content descriptor */
+struct qat_sym_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_sym_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
+ rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
+};
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
+
+unsigned int
+qat_sym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags);
+int
+qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int
+qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif /* _QAT_SYM_SESSION_H_ */
diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map
deleted file mode 100644
index bbaf1c85..00000000
--- a/drivers/crypto/qat/rte_pmd_qat_version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_2.2 {
- local: *;
-}; \ No newline at end of file
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
deleted file mode 100644
index bf837401..00000000
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <rte_cryptodev_pmd.h>
-
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-uint8_t cryptodev_qat_driver_id;
-
-static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- QAT_EXTRA_GEN2_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
- /* Device related operations */
- .dev_configure = qat_dev_config,
- .dev_start = qat_dev_start,
- .dev_stop = qat_dev_stop,
- .dev_close = qat_dev_close,
- .dev_infos_get = qat_dev_info_get,
-
- .stats_get = qat_crypto_sym_stats_get,
- .stats_reset = qat_crypto_sym_stats_reset,
- .queue_pair_setup = qat_crypto_sym_qp_setup,
- .queue_pair_release = qat_crypto_sym_qp_release,
- .queue_pair_start = NULL,
- .queue_pair_stop = NULL,
- .queue_pair_count = NULL,
-
- /* Crypto related operations */
- .session_get_size = qat_crypto_sym_get_session_private_size,
- .session_configure = qat_crypto_sym_configure_session,
- .session_clear = qat_crypto_sym_clear_session
-};
-
-/*
- * The set of PCI devices this driver supports
- */
-
-static const struct rte_pci_id pci_id_qat_map[] = {
- {
- RTE_PCI_DEVICE(0x8086, 0x0443),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x37c9),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x19e3),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x6f55),
- },
- {.device_id = 0},
-};
-
-static int
-crypto_qat_create(const char *name, struct rte_pci_device *pci_dev,
- struct rte_cryptodev_pmd_init_params *init_params)
-{
- struct rte_cryptodev *cryptodev;
- struct qat_pmd_private *internals;
-
- PMD_INIT_FUNC_TRACE();
-
- cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
- init_params);
- if (cryptodev == NULL)
- return -ENODEV;
-
- cryptodev->driver_id = cryptodev_qat_driver_id;
- cryptodev->dev_ops = &crypto_qat_ops;
-
- cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
-
- cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
-
- internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = init_params->max_nb_sessions;
- switch (pci_dev->id.device_id) {
- case 0x0443:
- internals->qat_dev_gen = QAT_GEN1;
- internals->qat_dev_capabilities = qat_gen1_capabilities;
- break;
- case 0x37c9:
- case 0x19e3:
- case 0x6f55:
- internals->qat_dev_gen = QAT_GEN2;
- internals->qat_dev_capabilities = qat_gen2_capabilities;
- break;
- default:
- PMD_DRV_LOG(ERR,
- "Invalid dev_id, can't determine capabilities");
- break;
- }
-
- /*
- * For secondary processes, we don't initialise any further as primary
- * has already done this work. Only check we don't need a different
- * RX function
- */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
- return 0;
- }
-
- return 0;
-}
-
-static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = rte_socket_id(),
- .private_data_size = sizeof(struct qat_pmd_private),
- .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
- };
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
- pci_dev->addr.bus,
- pci_dev->addr.devid,
- pci_dev->addr.function);
-
- rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
- return crypto_qat_create(name, pci_dev, &init_params);
-}
-
-static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
- if (cryptodev == NULL)
- return -ENODEV;
-
- /* free crypto device */
- return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_pci_driver rte_qat_pmd = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = crypto_qat_pci_probe,
- .remove = crypto_qat_pci_remove
-};
-
-static struct cryptodev_driver qat_crypto_drv;
-
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
- cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 140c8b41..6e4919c4 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -9,6 +9,8 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
+int scheduler_logtype_driver;
+
/** update the scheduler pmd's capability with attaching device's
* capability.
* For each device to be attached, the scheduler's capability should be
@@ -91,8 +93,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
uint32_t nb_caps = 0, i;
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct rte_cryptodev_info dev_info;
@@ -166,30 +170,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (sched_ctx->nb_slaves >=
RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves attached");
+ CR_SCHED_LOG(ERR, "Too many slaves attached");
return -ENOMEM;
}
for (i = 0; i < sched_ctx->nb_slaves; i++)
if (sched_ctx->slaves[i].dev_id == slave_id) {
- CS_LOG_ERR("Slave already added");
+ CR_SCHED_LOG(ERR, "Slave already added");
return -ENOTSUP;
}
@@ -206,7 +210,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
slave->driver_id = 0;
sched_ctx->nb_slaves--;
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -225,17 +229,17 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
uint32_t i, slave_pos;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -245,12 +249,12 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
break;
if (slave_pos == sched_ctx->nb_slaves) {
- CS_LOG_ERR("Cannot find slave");
+ CR_SCHED_LOG(ERR, "Cannot find slave");
return -ENOTSUP;
}
if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
- CS_LOG_ERR("Failed to detach slave");
+ CR_SCHED_LOG(ERR, "Failed to detach slave");
return -ENOTSUP;
}
@@ -263,7 +267,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
sched_ctx->nb_slaves--;
if (update_scheduler_capability(sched_ctx) < 0) {
- CS_LOG_ERR("capabilities update failed");
+ CR_SCHED_LOG(ERR, "capabilities update failed");
return -ENOTSUP;
}
@@ -282,17 +286,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -305,33 +309,33 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
roundrobin_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
pkt_size_based_distr_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
failover_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
multicore_scheduler) < 0) {
- CS_LOG_ERR("Failed to load scheduler");
+ CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
default:
- CS_LOG_ERR("Not yet supported");
+ CR_SCHED_LOG(ERR, "Not yet supported");
return -ENOTSUP;
}
@@ -345,12 +349,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -367,17 +371,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -395,12 +399,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -417,25 +421,25 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
sched_ctx = dev->data->dev_private;
if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", scheduler->name,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", scheduler->name,
RTE_CRYPTODEV_NAME_MAX_LEN);
return -EINVAL;
}
@@ -444,8 +448,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
if (strlen(scheduler->description) >
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid description %s, should be less than "
- "%u bytes.\n", scheduler->description,
+ CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
+ "%u bytes.", scheduler->description,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
return -EINVAL;
}
@@ -462,14 +466,16 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx->ops.option_set = scheduler->ops->option_set;
sched_ctx->ops.option_get = scheduler->ops->option_get;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
if (sched_ctx->ops.create_private_ctx) {
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
if (ret < 0) {
- CS_LOG_ERR("Unable to create scheduler private "
+ CR_SCHED_LOG(ERR, "Unable to create scheduler private "
"context");
return ret;
}
@@ -488,12 +494,12 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
uint32_t nb_slaves = 0;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -521,17 +527,17 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
option_type >= CDEV_SCHED_OPTION_COUNT) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->data->dev_started) {
- CS_LOG_ERR("Illegal operation");
+ CR_SCHED_LOG(ERR, "Illegal operation");
return -EBUSY;
}
@@ -551,17 +557,17 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
struct scheduler_ctx *sched_ctx;
if (!dev) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
if (!option) {
- CS_LOG_ERR("Invalid option parameter");
+ CR_SCHED_LOG(ERR, "Invalid option parameter");
return -EINVAL;
}
if (dev->driver_id != cryptodev_driver_id) {
- CS_LOG_ERR("Operation not supported");
+ CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -571,3 +577,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return (*sched_ctx->ops.option_get)(dev, option_type, option);
}
+
+RTE_INIT(scheduler_init_log)
+{
+ scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
+}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 01e7646c..3faea409 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -30,7 +30,7 @@ extern "C" {
#endif
/** Maximum number of multi-core worker cores */
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
@@ -76,6 +76,7 @@ enum rte_cryptodev_schedule_option_type {
/**
* Threshold option structure
*/
+#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold"
struct rte_cryptodev_scheduler_threshold_option {
uint32_t threshold; /**< Threshold for packet-size mode */
};
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index 005b1638..ddfb5b81 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -139,7 +139,7 @@ scheduler_start(struct rte_cryptodev *dev)
uint16_t i;
if (sched_ctx->nb_slaves < 2) {
- CS_LOG_ERR("Number of slaves shall no less than 2");
+ CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2");
return -ENOMEM;
}
@@ -182,7 +182,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
rte_socket_id());
if (!fo_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index b2ce44ce..d410e69d 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
};
struct mc_scheduler_qp_ctx {
@@ -178,7 +178,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev)
}
}
if (worker_idx == -1) {
- CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id);
+ CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!",
+ core_id);
return -1;
}
@@ -313,7 +314,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0,
rte_socket_id());
if (!mc_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -328,16 +329,18 @@ static int
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- struct mc_scheduler_ctx *mc_ctx;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
uint16_t i;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
if (!mc_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -345,25 +348,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
for (i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CR_SCHED_LOG(ERR, "Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
}
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 96bf0161..74129b66 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -258,7 +258,7 @@ scheduler_start(struct rte_cryptodev *dev)
/* for packet size based scheduler, nb_slaves have to >= 2 */
if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
- CS_LOG_ERR("not enough slaves to start");
+ CR_SCHED_LOG(ERR, "not enough slaves to start");
return -1;
}
@@ -302,7 +302,7 @@ scheduler_stop(struct rte_cryptodev *dev)
if (ps_qp_ctx->primary_slave.nb_inflight_cops +
ps_qp_ctx->secondary_slave.nb_inflight_cops) {
- CS_LOG_ERR("Some crypto ops left in slave queue");
+ CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue");
return -1;
}
}
@@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
rte_socket_id());
if (!ps_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
@@ -334,13 +334,15 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct psd_scheduler_ctx *psd_ctx;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
if (!psd_ctx) {
- CS_LOG_ERR("failed allocate memory");
+ CR_SCHED_LOG(ERR, "failed allocate memory");
return -ENOMEM;
}
@@ -360,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
option)->threshold;
if (!rte_is_power_of_2(threshold)) {
- CS_LOG_ERR("Threshold is not power of 2");
+ CR_SCHED_LOG(ERR, "Threshold is not power of 2");
return -EINVAL;
}
@@ -386,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
if ((enum rte_cryptodev_schedule_option_type)option_type !=
CDEV_SCHED_OPTION_THRESHOLD) {
- CS_LOG_ERR("Option not supported");
+ CR_SCHED_LOG(ERR, "Option not supported");
return -EINVAL;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 51a85fa6..a9221a94 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -9,6 +9,7 @@
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
+#include <rte_string_fns.h>
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
@@ -19,8 +20,10 @@ struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
+ char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
uint32_t enable_ordering;
- uint64_t wcmask;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -28,9 +31,9 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_NAME ("name")
#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param")
#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
@@ -39,9 +42,9 @@ const char *scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_SLAVE,
RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_MODE_PARAM,
RTE_CRYPTODEV_VDEV_ORDERING,
RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_VDEV_SOCKET_ID,
RTE_CRYPTODEV_VDEV_COREMASK,
RTE_CRYPTODEV_VDEV_CORELIST
@@ -68,6 +71,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = {
{"disable", 0}
};
+#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':'
+
static int
cryptodev_scheduler_create(const char *name,
struct rte_vdev_device *vdev,
@@ -81,15 +86,11 @@ cryptodev_scheduler_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->def_p);
if (dev == NULL) {
- CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+ CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
- if (init_params->wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params->wcmask);
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -100,20 +101,26 @@ cryptodev_scheduler_create(const char *name,
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
uint16_t i;
- sched_ctx->nb_wc = 0;
+ sched_ctx->nb_wc = init_params->nb_wc;
- for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
- if (init_params->wcmask & (1ULL << i)) {
- sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
- RTE_LOG(INFO, PMD,
- " Worker core[%u]=%u added\n",
- sched_ctx->nb_wc-1, i);
- }
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ CR_SCHED_LOG(INFO, " Worker core[%u]=%u added",
+ i, sched_ctx->wc_pool[i]);
}
}
if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ union {
+ struct rte_cryptodev_scheduler_threshold_option
+ threshold_option;
+ } option;
+ enum rte_cryptodev_schedule_option_type option_type;
+ char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0};
+ char *s, *end;
+
ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
init_params->mode);
if (ret < 0) {
@@ -125,10 +132,52 @@ cryptodev_scheduler_create(const char *name,
if (scheduler_mode_map[i].val != sched_ctx->mode)
continue;
- RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
+ CR_SCHED_LOG(INFO, " Scheduling mode = %s",
scheduler_mode_map[i].name);
break;
}
+
+ if (strlen(init_params->mode_param_str) > 0) {
+ s = strchr(init_params->mode_param_str,
+ CDEV_SCHED_MODE_PARAM_SEP_CHAR);
+ if (s == NULL) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ strlcpy(param_name, init_params->mode_param_str,
+ s - init_params->mode_param_str + 1);
+ s++;
+ strlcpy(param_val, s,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ switch (init_params->mode) {
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (strcmp(param_name,
+ RTE_CRYPTODEV_SCHEDULER_PARAM_THRES)
+ != 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+ option_type = CDEV_SCHED_OPTION_THRESHOLD;
+
+ option.threshold_option.threshold =
+ strtoul(param_val, &end, 0);
+ break;
+ default:
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ if (sched_ctx->ops.option_set(dev, option_type,
+ (void *)&option) < 0) {
+ CR_SCHED_LOG(ERR, "Invalid mode param");
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n",
+ param_name, param_val);
+ }
}
sched_ctx->reordering_enabled = init_params->enable_ordering;
@@ -138,7 +187,7 @@ cryptodev_scheduler_create(const char *name,
sched_ctx->reordering_enabled)
continue;
- RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
+ CR_SCHED_LOG(INFO, " Packet ordering = %s",
scheduler_ordering_map[i].name);
break;
@@ -153,7 +202,7 @@ cryptodev_scheduler_create(const char *name,
if (!sched_ctx->init_slave_names[
sched_ctx->nb_init_slaves]) {
- CS_LOG_ERR("driver %s: Insufficient memory",
+ CR_SCHED_LOG(ERR, "driver %s: Insufficient memory",
name);
return -ENOMEM;
}
@@ -175,8 +224,8 @@ cryptodev_scheduler_create(const char *name,
0, SOCKET_ID_ANY);
if (!sched_ctx->capabilities) {
- RTE_LOG(ERR, PMD, "Not enough memory for capability "
- "information\n");
+ CR_SCHED_LOG(ERR, "Not enough memory for capability "
+ "information");
return -ENOMEM;
}
@@ -220,7 +269,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = atoi(value);
if (*i < 0) {
- CS_LOG_ERR("Argument has to be positive.\n");
+ CR_SCHED_LOG(ERR, "Argument has to be positive.");
return -EINVAL;
}
@@ -232,9 +281,47 @@ static int
parse_coremask_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
struct scheduler_init_params *params = extra_args;
- params->wcmask = strtoull(value, NULL, 16);
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
return 0;
}
@@ -246,7 +333,7 @@ parse_corelist_arg(const char *key __rte_unused,
{
struct scheduler_init_params *params = extra_args;
- params->wcmask = 0ULL;
+ params->nb_wc = 0;
const char *token = value;
@@ -254,7 +341,11 @@ parse_corelist_arg(const char *key __rte_unused,
char *rval;
unsigned int core = strtoul(token, &rval, 10);
- params->wcmask |= 1ULL << core;
+ if (core >= RTE_MAX_LCORE) {
+ CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller "
+ "than %u.", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
token = (const char *)rval;
if (token[0] == '\0')
break;
@@ -272,8 +363,8 @@ parse_name_arg(const char *key __rte_unused,
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- CS_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
+ CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
+ "%u bytes.", value,
RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
@@ -291,7 +382,7 @@ parse_slave_arg(const char *key __rte_unused,
struct scheduler_init_params *param = extra_args;
if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
- CS_LOG_ERR("Too many slaves.\n");
+ CR_SCHED_LOG(ERR, "Too many slaves.");
return -ENOMEM;
}
@@ -312,12 +403,13 @@ parse_mode_arg(const char *key __rte_unused,
if (strcmp(value, scheduler_mode_map[i].name) == 0) {
param->mode = (enum rte_cryptodev_scheduler_mode)
scheduler_mode_map[i].val;
+
break;
}
}
if (i == RTE_DIM(scheduler_mode_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -325,6 +417,18 @@ parse_mode_arg(const char *key __rte_unused,
}
static int
+parse_mode_param_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ strlcpy(param->mode_param_str, value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+
+ return 0;
+}
+
+static int
parse_ordering_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
@@ -340,7 +444,7 @@ parse_ordering_arg(const char *key __rte_unused,
}
if (i == RTE_DIM(scheduler_ordering_map)) {
- CS_LOG_ERR("Unrecognized input.\n");
+ CR_SCHED_LOG(ERR, "Unrecognized input.");
return -EINVAL;
}
@@ -370,13 +474,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
- ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
- &parse_integer_arg,
- &params->def_p.max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
&parse_integer_arg,
&params->def_p.socket_id);
@@ -411,6 +508,11 @@ scheduler_parse_init_params(struct scheduler_init_params *params,
if (ret < 0)
goto free_kvlist;
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM,
+ &parse_mode_param_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
&parse_ordering_arg, params);
if (ret < 0)
@@ -430,8 +532,7 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct scheduler_ctx),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
},
.nb_slaves = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
@@ -464,9 +565,8 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
cryptodev_scheduler_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
- cryptodev_scheduler_pmd_drv,
+ cryptodev_scheduler_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 680c2afb..778071ca 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -27,7 +27,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
int status;
if (!slave_dev) {
- CS_LOG_ERR("Failed to locate slave dev %s",
+ CR_SCHED_LOG(ERR, "Failed to locate slave dev %s",
dev_name);
return -EINVAL;
}
@@ -36,16 +36,17 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
scheduler_id, slave_dev->data->dev_id);
if (status < 0) {
- CS_LOG_ERR("Failed to attach slave cryptodev %u",
+ CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u",
slave_dev->data->dev_id);
return status;
}
- CS_LOG_INFO("Scheduler %s attached slave %s\n",
+ CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s",
dev->data->name,
sched_ctx->init_slave_names[i]);
rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
sched_ctx->nb_init_slaves -= 1;
}
@@ -101,7 +102,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("failed to create unique reorder buffer "
+ CR_SCHED_LOG(ERR, "failed to create unique reorder buffer"
"name");
return -ENOMEM;
}
@@ -110,7 +111,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
buff_size, rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (!qp_ctx->order_ring) {
- CS_LOG_ERR("failed to create order ring");
+ CR_SCHED_LOG(ERR, "failed to create order ring");
return -ENOMEM;
}
} else {
@@ -144,18 +145,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
ret = update_order_ring(dev, i);
if (ret < 0) {
- CS_LOG_ERR("Failed to update reorder buffer");
+ CR_SCHED_LOG(ERR, "Failed to update reorder buffer");
return ret;
}
}
if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
- CS_LOG_ERR("Scheduler mode is not set");
+ CR_SCHED_LOG(ERR, "Scheduler mode is not set");
return -1;
}
if (!sched_ctx->nb_slaves) {
- CS_LOG_ERR("No slave in the scheduler");
+ CR_SCHED_LOG(ERR, "No slave in the scheduler");
return -1;
}
@@ -165,7 +166,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
return -ENOTSUP;
}
}
@@ -173,7 +174,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
- CS_LOG_ERR("Scheduler start failed");
+ CR_SCHED_LOG(ERR, "Scheduler start failed");
return -1;
}
@@ -185,7 +186,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev)
ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to start slave dev %u",
+ CR_SCHED_LOG(ERR, "Failed to start slave dev %u",
slave_dev_id);
return ret;
}
@@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
}
}
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
return 0;
}
@@ -316,8 +321,9 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
- UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
+ uint32_t max_nb_sess = 0;
+ uint16_t headroom_sz = 0;
+ uint16_t tailroom_sz = 0;
uint32_t i;
if (!dev_info)
@@ -333,17 +339,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
struct rte_cryptodev_info slave_info;
rte_cryptodev_info_get(slave_dev_id, &slave_info);
- max_nb_sessions = slave_info.sym.max_nb_sessions <
- max_nb_sessions ?
- slave_info.sym.max_nb_sessions :
- max_nb_sessions;
+ uint32_t dev_max_sess = slave_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0) {
+ if (max_nb_sess == 0 || dev_max_sess < max_nb_sess)
+ max_nb_sess = slave_info.sym.max_nb_sessions;
+ }
+
+ /* Get the max headroom requirement among slave PMDs */
+ headroom_sz = slave_info.min_mbuf_headroom_req >
+ headroom_sz ?
+ slave_info.min_mbuf_headroom_req :
+ headroom_sz;
+
+ /* Get the max tailroom requirement among slave PMDs */
+ tailroom_sz = slave_info.min_mbuf_tailroom_req >
+ tailroom_sz ?
+ slave_info.min_mbuf_tailroom_req :
+ tailroom_sz;
}
dev_info->driver_id = dev->driver_id;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = sched_ctx->capabilities;
dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = max_nb_sessions;
+ dev_info->min_mbuf_headroom_req = headroom_sz;
+ dev_info->min_mbuf_tailroom_req = tailroom_sz;
+ dev_info->sym.max_nb_sessions = max_nb_sess;
}
/** Release queue pair */
@@ -381,7 +402,7 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
"CRYTO_SCHE PMD %u QP %u",
dev->data->dev_id, qp_id) < 0) {
- CS_LOG_ERR("Failed to create unique queue pair name");
+ CR_SCHED_LOG(ERR, "Failed to create unique queue pair name");
return -EFAULT;
}
@@ -419,14 +440,14 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
*/
ret = scheduler_attach_init_slave(dev);
if (ret < 0) {
- CS_LOG_ERR("Failed to attach slave");
+ CR_SCHED_LOG(ERR, "Failed to attach slave");
scheduler_pmd_qp_release(dev, qp_id);
return ret;
}
if (*sched_ctx->ops.config_queue_pair) {
if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
- CS_LOG_ERR("Unable to configure queue pair");
+ CR_SCHED_LOG(ERR, "Unable to configure queue pair");
return -1;
}
}
@@ -434,22 +455,6 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
return 0;
}
-/** Start queue pair */
-static int
-scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
scheduler_pmd_qp_count(struct rte_cryptodev *dev)
@@ -458,7 +463,7 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev)
}
static uint32_t
-scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint8_t i = 0;
@@ -468,7 +473,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
for (i = 0; i < sched_ctx->nb_slaves; i++) {
uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id];
- uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+ uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
if (max_priv_sess_size < priv_sess_size)
max_priv_sess_size = priv_sess_size;
@@ -478,7 +483,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
}
static int
-scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -493,7 +498,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
ret = rte_cryptodev_sym_session_init(slave->dev_id, sess,
xform, mempool);
if (ret < 0) {
- CS_LOG_ERR("unabled to config sym session");
+ CR_SCHED_LOG(ERR, "unable to config sym session");
return ret;
}
}
@@ -503,7 +508,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
@@ -530,13 +535,11 @@ struct rte_cryptodev_ops scheduler_pmd_ops = {
.queue_pair_setup = scheduler_pmd_qp_setup,
.queue_pair_release = scheduler_pmd_qp_release,
- .queue_pair_start = scheduler_pmd_qp_start,
- .queue_pair_stop = scheduler_pmd_qp_stop,
.queue_pair_count = scheduler_pmd_qp_count,
- .session_get_size = scheduler_pmd_session_get_size,
- .session_configure = scheduler_pmd_session_configure,
- .session_clear = scheduler_pmd_session_clear,
+ .sym_session_get_size = scheduler_pmd_sym_session_get_size,
+ .sym_session_configure = scheduler_pmd_sym_session_configure,
+ .sym_session_clear = scheduler_pmd_sym_session_clear,
};
struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index dd7ca5a4..d5e602a2 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -12,25 +12,11 @@
#define PER_SLAVE_BUFF_SIZE (256)
-#define CS_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
-#define CS_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-
-#define CS_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define CS_LOG_INFO(fmt, args...)
-#define CS_LOG_DBG(fmt, args...)
-#endif
+extern int scheduler_logtype_driver;
+
+#define CR_SCHED_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
+ "%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
struct scheduler_slave {
uint8_t dev_id;
@@ -60,7 +46,7 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
- uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index c6e03e21..c7082a64 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -175,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
rte_socket_id());
if (!rr_qp_ctx) {
- CS_LOG_ERR("failed allocate memory for private queue pair");
+ CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair");
return -ENOMEM;
}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 27af69f2..a17536b7 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -79,7 +79,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
break;
case SNOW3G_OP_NOT_SUPPORTED:
default:
- SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
+ SNOW3G_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -89,7 +89,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
return -ENOTSUP;
if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) {
- SNOW3G_LOG_ERR("Wrong IV length");
+ SNOW3G_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
@@ -105,14 +105,14 @@ snow3g_set_session_parameters(struct snow3g_session *sess,
return -ENOTSUP;
if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) {
- SNOW3G_LOG_ERR("Wrong digest length");
+ SNOW3G_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) {
- SNOW3G_LOG_ERR("Wrong IV length");
+ SNOW3G_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -137,7 +137,7 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct snow3g_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -159,8 +159,8 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -216,7 +216,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
if (op->sym->m_dst == NULL) {
op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+ SNOW3G_LOG(ERR, "bit-level in-place not supported\n");
return 0;
}
dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
@@ -246,7 +246,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- SNOW3G_LOG_ERR("Offset");
+ SNOW3G_LOG(ERR, "Offset");
break;
}
@@ -295,7 +295,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
- SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
+ SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct snow3g_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -537,7 +537,7 @@ cryptodev_snow3g_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- SNOW3G_LOG_ERR("failed to create cryptodev vdev");
+ SNOW3G_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -555,11 +555,10 @@ cryptodev_snow3g_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
+ SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed",
init_params->name);
cryptodev_snow3g_remove(vdev);
@@ -573,8 +572,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct snow3g_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -617,7 +615,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
+ cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
+
+RTE_INIT(snow3g_init_log)
+{
+ snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g");
+}
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index f60b4759..cfbc9522 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -130,7 +130,8 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = snow3g_pmd_capabilities;
}
@@ -172,13 +173,13 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- SNOW3G_LOG_INFO("Reusing existing ring %s"
+ SNOW3G_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- SNOW3G_LOG_ERR("Unable to reuse existing ring %s"
+ SNOW3G_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -230,22 +231,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
snow3g_pmd_qp_count(struct rte_cryptodev *dev)
@@ -255,14 +240,14 @@ snow3g_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the SNOW 3G session structure */
static unsigned
-snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct snow3g_session);
}
/** Configure a SNOW 3G session from a crypto xform chain */
static int
-snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -271,26 +256,26 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- SNOW3G_LOG_ERR("invalid session struct");
+ SNOW3G_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ SNOW3G_LOG(ERR,
"Couldn't get object from session mempool");
return -ENOMEM;
}
ret = snow3g_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- SNOW3G_LOG_ERR("failed configure session parameters");
+ SNOW3G_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -298,17 +283,17 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-snow3g_pmd_session_clear(struct rte_cryptodev *dev,
+snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct snow3g_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -326,13 +311,11 @@ struct rte_cryptodev_ops snow3g_pmd_ops = {
.queue_pair_setup = snow3g_pmd_qp_setup,
.queue_pair_release = snow3g_pmd_qp_release,
- .queue_pair_start = snow3g_pmd_qp_start,
- .queue_pair_stop = snow3g_pmd_qp_stop,
.queue_pair_count = snow3g_pmd_qp_count,
- .session_get_size = snow3g_pmd_session_get_size,
- .session_configure = snow3g_pmd_session_configure,
- .session_clear = snow3g_pmd_session_clear
+ .sym_session_get_size = snow3g_pmd_sym_session_get_size,
+ .sym_session_configure = snow3g_pmd_sym_session_configure,
+ .sym_session_clear = snow3g_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index eea900e0..b7807b62 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
@@ -10,25 +10,13 @@
#define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g
/**< SNOW 3G PMD device name */
-#define SNOW3G_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_SNOW3G_DEBUG
-#define SNOW3G_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-
-#define SNOW3G_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define SNOW3G_LOG_INFO(fmt, args...)
-#define SNOW3G_LOG_DBG(fmt, args...)
-#endif
+/** SNOW 3G PMD LOGTYPE DRIVER */
+int snow3g_logtype_driver;
+
+#define SNOW3G_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define SNOW3G_DIGEST_LENGTH 4
@@ -36,8 +24,6 @@
struct snow3g_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** SNOW 3G buffer queue pair */
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.h b/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..568b5a40
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
+ .sym_session_configure = virtio_crypto_sym_configure_session,
+ .sym_session_clear = virtio_crypto_sym_clear_session
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_sym_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
+ set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
+ rte_mempool_put(sess_mp, session);
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "cipher IV size cannot be longer than %u",
+ VIRTIO_CRYPTO_MAX_IV_SIZE);
+ return -1;
+ }
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..0fd7b722
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+ uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..e32a1ecd
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+ sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_sym_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ if (cop->phys_addr)
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ else {
+ rte_memcpy(crypto_op_cookie->iv,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, session->iv.offset),
+ session->iv.length);
+ desc[idx].addr = indirect_op_data_req_phys_addr +
+ indirect_iv_addr_offset;
+ }
+
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index ad65b80c..313f4590 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -11,8 +11,7 @@
#include <rte_cpuflags.h>
#include "rte_zuc_pmd_private.h"
-
-#define ZUC_MAX_BURST 8
+#define ZUC_MAX_BURST 4
#define BYTE_LEN 8
static uint8_t cryptodev_driver_id;
@@ -78,7 +77,7 @@ zuc_set_session_parameters(struct zuc_session *sess,
break;
case ZUC_OP_NOT_SUPPORTED:
default:
- ZUC_LOG_ERR("Unsupported operation chain order parameter");
+ ZUC_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -88,7 +87,7 @@ zuc_set_session_parameters(struct zuc_session *sess,
return -ENOTSUP;
if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) {
- ZUC_LOG_ERR("Wrong IV length");
+ ZUC_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
@@ -104,14 +103,14 @@ zuc_set_session_parameters(struct zuc_session *sess,
return -ENOTSUP;
if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) {
- ZUC_LOG_ERR("Wrong digest length");
+ ZUC_LOG(ERR, "Wrong digest length");
return -EINVAL;
}
sess->auth_op = auth_xform->auth.op;
if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) {
- ZUC_LOG_ERR("Wrong IV length");
+ ZUC_LOG(ERR, "Wrong IV length");
return -EINVAL;
}
sess->auth_iv_offset = auth_xform->auth.iv.offset;
@@ -135,7 +134,7 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
- sess = (struct zuc_session *)get_session_private_data(
+ sess = (struct zuc_session *)get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -157,8 +156,8 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -168,10 +167,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
return sess;
}
-/** Encrypt/decrypt mbufs with same cipher key. */
+/** Encrypt/decrypt mbufs. */
static uint8_t
process_zuc_cipher_op(struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -180,22 +179,25 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
uint8_t *iv[ZUC_MAX_BURST];
uint32_t num_bytes[ZUC_MAX_BURST];
uint8_t *cipher_keys[ZUC_MAX_BURST];
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
|| ((ops[i]->sym->cipher.data.offset
% BYTE_LEN) != 0)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("Data Length or offset");
+ ZUC_LOG(ERR, "Data Length or offset");
break;
}
+ sess = sessions[i];
+
#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
!rte_pktmbuf_is_contiguous(
ops[i]->sym->m_dst))) {
- ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
+ ZUC_LOG(ERR, "PMD supports only contiguous mbufs, "
"op (%p) provides noncontiguous mbuf as "
"source/destination buffer.\n", ops[i]);
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
@@ -211,10 +213,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
+ sess->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
- cipher_keys[i] = session->pKey_cipher;
+ cipher_keys[i] = sess->pKey_cipher;
processed_ops++;
}
@@ -225,10 +227,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
return processed_ops;
}
-/** Generate/verify hash from mbufs with same hash key. */
+/** Generate/verify hash from mbufs. */
static int
process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -237,26 +239,29 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
uint32_t *dst;
uint32_t length_in_bits;
uint8_t *iv;
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
/* Data must be byte aligned */
if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- ZUC_LOG_ERR("Offset");
+ ZUC_LOG(ERR, "Offset");
break;
}
+ sess = sessions[i];
+
length_in_bits = ops[i]->sym->auth.data.length;
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
+ sess->auth_iv_offset);
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)qp->temp_digest;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
/* Verify digest. */
@@ -266,7 +271,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
}
@@ -276,33 +281,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
return processed_ops;
}
-/** Process a batch of crypto ops which shares the same session. */
+/** Process a batch of crypto ops which shares the same operation type. */
static int
-process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+ struct zuc_session **sessions,
struct zuc_qp *qp, uint8_t num_ops,
uint16_t *accumulated_enqueued_ops)
{
unsigned i;
unsigned enqueued_ops, processed_ops;
- switch (session->op) {
+ switch (op_type) {
case ZUC_OP_ONLY_CIPHER:
processed_ops = process_zuc_cipher_op(ops,
- session, num_ops);
+ sessions, num_ops);
break;
case ZUC_OP_ONLY_AUTH:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
break;
case ZUC_OP_CIPHER_AUTH:
- processed_ops = process_zuc_cipher_op(ops, session,
+ processed_ops = process_zuc_cipher_op(ops, sessions,
num_ops);
- process_zuc_hash_op(qp, ops, session, processed_ops);
+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
break;
case ZUC_OP_AUTH_CIPHER:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
- process_zuc_cipher_op(ops, session, processed_ops);
+ process_zuc_cipher_op(ops, sessions, processed_ops);
break;
default:
/* Operation not supported. */
@@ -318,10 +324,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct zuc_session));
+ memset(sessions[i], 0, sizeof(struct zuc_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
- rte_mempool_put(qp->sess_mp, session);
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sessions[i]);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -342,7 +348,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
struct rte_crypto_op *curr_c_op;
- struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_session *curr_sess;
+ struct zuc_session *sessions[ZUC_MAX_BURST];
+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
+ enum zuc_operation curr_zuc_op;
struct zuc_qp *qp = queue_pair;
unsigned i;
uint8_t burst_size = 0;
@@ -352,61 +361,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
curr_sess = zuc_get_session(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ if (unlikely(curr_sess == NULL)) {
curr_c_op->status =
RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
break;
}
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
+ curr_zuc_op = curr_sess->op;
+
+ /*
+ * Batch ops that share the same operation type
+ * (cipher only, auth only...).
+ */
+ if (burst_size == 0) {
+ prev_zuc_op = curr_zuc_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ } else if (curr_zuc_op == prev_zuc_op) {
+ c_ops[burst_size] = curr_c_op;
+ sessions[burst_size] = curr_sess;
+ burst_size++;
/*
* When there are enough ops to process in a batch,
* process them, and start a new batch.
*/
if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, curr_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = NULL;
}
} else {
/*
- * Different session, process the ops
- * of the previous session.
+ * Different operation type, process the ops
+ * of the previous type.
*/
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = curr_sess;
+ prev_zuc_op = curr_zuc_op;
- c_ops[burst_size++] = curr_c_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
}
}
if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ /* Process the crypto ops of the last operation type. */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
}
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
@@ -442,7 +460,7 @@ cryptodev_zuc_create(const char *name,
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- ZUC_LOG_ERR("failed to create cryptodev vdev");
+ ZUC_LOG(ERR, "failed to create cryptodev vdev");
goto init_error;
}
@@ -460,11 +478,10 @@ cryptodev_zuc_create(const char *name,
internals = dev->data->dev_private;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed",
+ ZUC_LOG(ERR, "driver %s: failed",
init_params->name);
cryptodev_zuc_remove(vdev);
@@ -478,8 +495,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct zuc_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name;
const char *input_args;
@@ -522,7 +538,11 @@ static struct cryptodev_driver zuc_crypto_drv;
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(zuc_init_log)
+{
+ zuc_logtype_driver = rte_log_register("pmd.crypto.zuc");
+}
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 8abac898..6da39654 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <string.h>
@@ -130,7 +130,8 @@ zuc_pmd_info_get(struct rte_cryptodev *dev,
if (dev_info != NULL) {
dev_info->driver_id = dev->driver_id;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = zuc_pmd_capabilities;
}
@@ -172,13 +173,13 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
r = rte_ring_lookup(qp->name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- ZUC_LOG_INFO("Reusing existing ring %s"
+ ZUC_LOG(INFO, "Reusing existing ring %s"
" for processed packets",
qp->name);
return r;
}
- ZUC_LOG_ERR("Unable to reuse existing ring %s"
+ ZUC_LOG(ERR, "Unable to reuse existing ring %s"
" for processed packets",
qp->name);
return NULL;
@@ -230,22 +231,6 @@ qp_setup_cleanup:
return -1;
}
-/** Start queue pair */
-static int
-zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
/** Return the number of allocated queue pairs */
static uint32_t
zuc_pmd_qp_count(struct rte_cryptodev *dev)
@@ -255,14 +240,14 @@ zuc_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the ZUC session structure */
static unsigned
-zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct zuc_session);
}
/** Configure a ZUC session from a crypto xform chain */
static int
-zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -271,26 +256,27 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
int ret;
if (unlikely(sess == NULL)) {
- ZUC_LOG_ERR("invalid session struct");
+ ZUC_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
+ ZUC_LOG(ERR,
"Couldn't get object from session mempool");
+
return -ENOMEM;
}
ret = zuc_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- ZUC_LOG_ERR("failed configure session parameters");
+ ZUC_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -298,17 +284,17 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-zuc_pmd_session_clear(struct rte_cryptodev *dev,
+zuc_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct zuc_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -326,13 +312,11 @@ struct rte_cryptodev_ops zuc_pmd_ops = {
.queue_pair_setup = zuc_pmd_qp_setup,
.queue_pair_release = zuc_pmd_qp_release,
- .queue_pair_start = zuc_pmd_qp_start,
- .queue_pair_stop = zuc_pmd_qp_stop,
.queue_pair_count = zuc_pmd_qp_count,
- .session_get_size = zuc_pmd_session_get_size,
- .session_configure = zuc_pmd_session_configure,
- .session_clear = zuc_pmd_session_clear
+ .sym_session_get_size = zuc_pmd_sym_session_get_size,
+ .sym_session_configure = zuc_pmd_sym_session_configure,
+ .sym_session_clear = zuc_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index b83c4a04..5e5906dd 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016-2017 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#ifndef _RTE_ZUC_PMD_PRIVATE_H_
@@ -10,25 +10,12 @@
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
/**< KASUMI PMD device name */
-#define ZUC_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_ZUC_DEBUG
-#define ZUC_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-
-#define ZUC_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
- __func__, __LINE__, ## args)
-#else
-#define ZUC_LOG_INFO(fmt, args...)
-#define ZUC_LOG_DBG(fmt, args...)
-#endif
+/** ZUC PMD LOGTYPE DRIVER */
+int zuc_logtype_driver;
+#define ZUC_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \
+ "%s()... line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
#define ZUC_IV_KEY_LENGTH 16
#define ZUC_DIGEST_LENGTH 4
@@ -37,8 +24,6 @@
struct zuc_private {
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** ZUC buffer queue pair */
diff --git a/drivers/event/Makefile b/drivers/event/Makefile
index c3d89a15..f301d8dc 100644
--- a/drivers/event/Makefile
+++ b/drivers/event/Makefile
@@ -7,8 +7,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += dpaa2
+endif
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += opdl
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/event/dpaa/dpaa_eventdev.c b/drivers/event/dpaa/dpaa_eventdev.c
index 00068015..5443ef56 100644
--- a/drivers/event/dpaa/dpaa_eventdev.c
+++ b/drivers/event/dpaa/dpaa_eventdev.c
@@ -29,7 +29,7 @@
#include <rte_event_eth_rx_adapter.h>
#include <rte_dpaa_bus.h>
#include <rte_dpaa_logs.h>
-#include <rte_cycles_64.h>
+#include <rte_cycles.h>
#include <dpaa_ethdev.h>
#include "dpaa_eventdev.h"
@@ -571,7 +571,7 @@ dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa_eventdev_ops = {
+static struct rte_eventdev_ops dpaa_eventdev_ops = {
.dev_infos_get = dpaa_event_dev_info_get,
.dev_configure = dpaa_event_dev_configure,
.dev_start = dpaa_event_dev_start,
diff --git a/drivers/event/dpaa/dpaa_eventdev.h b/drivers/event/dpaa/dpaa_eventdev.h
index 918fe35c..583e46ca 100644
--- a/drivers/event/dpaa/dpaa_eventdev.h
+++ b/drivers/event/dpaa/dpaa_eventdev.h
@@ -26,7 +26,7 @@
#define DPAA_EVENT_MAX_QUEUE_FLOWS 2048
#define DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS 8
#define DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS 0
-#define DPAA_EVENT_MAX_EVENT_PORT RTE_MAX_LCORE
+#define DPAA_EVENT_MAX_EVENT_PORT RTE_MIN(RTE_MAX_LCORE, INT8_MAX)
#define DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH 8
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS 100UL
#define DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID ((uint64_t)-1)
diff --git a/drivers/event/dpaa/meson.build b/drivers/event/dpaa/meson.build
new file mode 100644
index 00000000..0914f858
--- /dev/null
+++ b/drivers/event/dpaa/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['pmd_dpaa']
+sources = files('dpaa_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/dpaa2/Makefile b/drivers/event/dpaa2/Makefile
index b26862cd..5e1a6320 100644
--- a/drivers/event/dpaa2/Makefile
+++ b/drivers/event/dpaa2/Makefile
@@ -18,7 +18,8 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_bus_fslmc -lrte_pmd_dpaa2
+LDLIBS += -lrte_eal -lrte_eventdev
+LDLIBS += -lrte_bus_fslmc -lrte_mempool_dpaa2 -lrte_pmd_dpaa2
LDLIBS += -lrte_bus_vdev
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
@@ -28,6 +29,9 @@ EXPORT_MAP := rte_pmd_dpaa2_event_version.map
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
#
# all source are stored in SRCS-y
#
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c3e6fbff..ea1e5cc6 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -72,7 +72,7 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -87,10 +87,10 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
const struct rte_event *event = &ev[num_tx + loop];
if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
else
- fqid = evq_info->dpci->queue[
+ fqid = evq_info->dpci->rx_queue[
DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
/* Prepare enqueue descriptor */
@@ -122,11 +122,12 @@ dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
if (!loop)
return num_tx;
frames_to_send = loop;
- DPAA2_EVENTDEV_ERR("Unable to allocate memory");
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
goto send_partial;
}
rte_memcpy(ev_temp, event, sizeof(struct rte_event));
- DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
DPAA2_SET_FD_LEN((&fd_arr[loop]),
sizeof(struct rte_event));
}
@@ -153,26 +154,12 @@ dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
{
struct epoll_event epoll_ev;
- int ret, i = 0;
qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
QBMAN_SWP_INTERRUPT_DQRI);
-RETRY:
- ret = epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
+ epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
&epoll_ev, 1, timeout_ticks);
- if (ret < 1) {
- /* sometimes due to some spurious interrupts epoll_wait fails
- * with errno EINTR. so here we are retrying epoll_wait in such
- * case to avoid the problem.
- */
- if (errno == EINTR) {
- DPAA2_EVENTDEV_DEBUG("epoll_wait fails\n");
- if (i++ > 10)
- DPAA2_EVENTDEV_DEBUG("Dequeue burst Failed\n");
- goto RETRY;
- }
- }
}
static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
@@ -182,7 +169,7 @@ static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
RTE_SET_USED(rxq);
@@ -199,7 +186,7 @@ static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
struct rte_event *ev)
{
struct rte_event *ev_temp =
- (struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+ (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
RTE_SET_USED(swp);
@@ -227,7 +214,7 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- DPAA2_EVENTDEV_ERR("Failure in affining portal\n");
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
return 0;
}
}
@@ -258,12 +245,12 @@ dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
qbman_swp_prefetch_dqrr_next(swp);
fd = qbman_result_DQ_fd(dq);
- rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+ rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
if (rxq) {
rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
} else {
qbman_swp_dqrr_consume(swp, dq);
- DPAA2_EVENTDEV_ERR("Null Return VQ received\n");
+ DPAA2_EVENTDEV_ERR("Null Return VQ received");
return 0;
}
@@ -335,7 +322,7 @@ dpaa2_eventdev_configure(const struct rte_eventdev *dev)
priv->event_dev_cfg = conf->event_dev_cfg;
DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
- dev->data->dev_id);
+ dev->data->dev_id);
return 0;
}
@@ -473,7 +460,6 @@ dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return (int)nb_unlinks;
@@ -494,23 +480,20 @@ dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
for (i = 0; i < nb_links; i++) {
evq_info = &priv->evq_info[queues[i]];
- if (evq_info->link)
- continue;
ret = dpio_add_static_dequeue_channel(
dpaa2_portal->dpio_dev->dpio,
CMD_PRI_LOW, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id, &channel_index);
if (ret < 0) {
- DPAA2_EVENTDEV_ERR("Static dequeue cfg failed with ret: %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
goto err;
}
qbman_swp_push_set(dpaa2_portal->dpio_dev->sw_portal,
channel_index, 1);
evq_info->dpcon->channel_index = channel_index;
- evq_info->link = 1;
}
RTE_SET_USED(priorities);
@@ -524,7 +507,6 @@ err:
dpio_remove_static_dequeue_channel(dpaa2_portal->dpio_dev->dpio,
0, dpaa2_portal->dpio_dev->token,
evq_info->dpcon->dpcon_id);
- evq_info->link = 0;
}
return ret;
}
@@ -587,8 +569,8 @@ dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, i,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
goto fail;
}
}
@@ -620,7 +602,8 @@ dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
dpcon_id, queue_conf);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_attach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
return ret;
}
return 0;
@@ -639,8 +622,8 @@ dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_detach(eth_dev, i);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret %d\n",
- ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
}
@@ -662,7 +645,8 @@ dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
if (ret) {
- DPAA2_EVENTDEV_ERR("dpaa2_eth_eventq_detach failed: ret: %d\n", ret);
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
return ret;
}
@@ -693,7 +677,7 @@ dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
return 0;
}
-static const struct rte_eventdev_ops dpaa2_eventdev_ops = {
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
.dev_infos_get = dpaa2_eventdev_info_get,
.dev_configure = dpaa2_eventdev_configure,
.dev_start = dpaa2_eventdev_start,
@@ -730,20 +714,21 @@ dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
- dpci_dev->queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
dpaa2_eventdev_process_parallel;
- dpci_dev->queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
+ dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
dpaa2_eventdev_process_atomic;
for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
- rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+ rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
ret = dpci_set_rx_queue(&dpci_dev->dpci,
CMD_PRI_LOW,
dpci_dev->token, i,
&rx_queue_cfg);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "set_rx_q failed with err code: %d", ret);
+ "DPCI Rx queue setup failed: err(%d)",
+ ret);
return ret;
}
}
@@ -763,7 +748,7 @@ dpaa2_eventdev_create(const char *name)
sizeof(struct dpaa2_eventdev),
rte_socket_id());
if (eventdev == NULL) {
- DPAA2_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
+ DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
goto fail;
}
@@ -798,7 +783,7 @@ dpaa2_eventdev_create(const char *name)
ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
if (ret) {
DPAA2_EVENTDEV_ERR(
- "dpci setup failed with err code: %d", ret);
+ "DPCI setup failed: err(%d)", ret);
return ret;
}
priv->max_event_queues++;
@@ -836,3 +821,10 @@ static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
};
RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
+
+RTE_INIT(dpaa2_eventdev_init_log)
+{
+ dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2");
+ if (dpaa2_logtype_event >= 0)
+ rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE);
+}
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.h b/drivers/event/dpaa2/dpaa2_eventdev.h
index 91c8f2a3..229f66af 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev.h
@@ -63,7 +63,6 @@ struct evq_info_t {
struct dpaa2_dpci_dev *dpci;
/* Configuration provided by the user */
uint32_t event_queue_cfg;
- uint8_t link;
};
struct dpaa2_eventdev {
diff --git a/drivers/event/dpaa2/dpaa2_eventdev_logs.h b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
index 7d250c3f..a2c2060c 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev_logs.h
+++ b/drivers/event/dpaa2/dpaa2_eventdev_logs.h
@@ -9,13 +9,15 @@
extern int dpaa2_logtype_event;
#define DPAA2_EVENTDEV_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "%s(): " fmt "\n", \
- __func__, ##args)
-
-#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_LOG(DEBUG, " >>")
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_event, "dpaa2_event: " \
+ fmt "\n", ##args)
#define DPAA2_EVENTDEV_DEBUG(fmt, args...) \
- DPAA2_EVENTDEV_LOG(DEBUG, fmt, ## args)
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_event, "dpaa2_event: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define EVENTDEV_INIT_FUNC_TRACE() DPAA2_EVENTDEV_DEBUG(" >>")
+
#define DPAA2_EVENTDEV_INFO(fmt, args...) \
DPAA2_EVENTDEV_LOG(INFO, fmt, ## args)
#define DPAA2_EVENTDEV_ERR(fmt, args...) \
diff --git a/drivers/event/dpaa2/dpaa2_hw_dpcon.c b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
index f2377b98..d64e588a 100644
--- a/drivers/event/dpaa2/dpaa2_hw_dpcon.c
+++ b/drivers/event/dpaa2/dpaa2_hw_dpcon.c
@@ -20,11 +20,11 @@
#include <rte_dev.h>
#include <rte_ethdev_driver.h>
-#include <fslmc_logs.h>
#include <rte_fslmc.h>
#include <mc/fsl_dpcon.h>
#include <portal/dpaa2_hw_pvt.h>
#include "dpaa2_eventdev.h"
+#include "dpaa2_eventdev_logs.h"
TAILQ_HEAD(dpcon_dev_list, dpaa2_dpcon_dev);
static struct dpcon_dev_list dpcon_dev_list
@@ -42,7 +42,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
/* Allocate DPAA2 dpcon handle */
dpcon_node = rte_malloc(NULL, sizeof(struct dpaa2_dpcon_dev), 0);
if (!dpcon_node) {
- PMD_DRV_LOG(ERR, "Memory allocation failed for DPCON Device");
+ DPAA2_EVENTDEV_ERR(
+ "Memory allocation failed for dpcon device");
return -1;
}
@@ -51,8 +52,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_open(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_id, &dpcon_node->token);
if (ret) {
- PMD_DRV_LOG(ERR, "Resource alloc failure with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("Unable to open dpcon device: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -61,8 +62,8 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
ret = dpcon_get_attributes(&dpcon_node->dpcon,
CMD_PRI_LOW, dpcon_node->token, &attr);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "Reading device failed with err code: %d",
- ret);
+ DPAA2_EVENTDEV_ERR("dpcon attribute fetch failed: err(%d)",
+ ret);
rte_free(dpcon_node);
return -1;
}
@@ -75,8 +76,6 @@ rte_dpaa2_create_dpcon_device(int dev_fd __rte_unused,
TAILQ_INSERT_TAIL(&dpcon_dev_list, dpcon_node, next);
- RTE_LOG(DEBUG, PMD, "DPAA2: Added [dpcon.%d]\n", dpcon_id);
-
return 0;
}
diff --git a/drivers/event/dpaa2/meson.build b/drivers/event/dpaa2/meson.build
new file mode 100644
index 00000000..de7a4615
--- /dev/null
+++ b/drivers/event/dpaa2/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['bus_vdev', 'pmd_dpaa2']
+sources = files('dpaa2_hw_dpcon.c',
+ 'dpaa2_eventdev.c')
+
+allow_experimental_apis = true
diff --git a/drivers/event/meson.build b/drivers/event/meson.build
index d7bc4854..e9511993 100644
--- a/drivers/event/meson.build
+++ b/drivers/event/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['skeleton', 'sw', 'octeontx']
+drivers = ['dpaa', 'dpaa2', 'octeontx', 'skeleton', 'sw']
std_deps = ['eventdev', 'kvargs']
config_flag_fmt = 'RTE_LIBRTE_@0@_EVENTDEV_PMD'
driver_name_fmt = 'rte_pmd_@0@_event'
diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 0e49efd8..90ad2217 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -10,10 +10,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx_ssovf.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
+CFLAGS += -DALLOW_EXPERIMENTAL_API
-LDLIBS += -lrte_eal -lrte_eventdev -lrte_mempool_octeontx -lrte_pmd_octeontx
+LDLIBS += -lrte_eal -lrte_eventdev -lrte_common_octeontx -lrte_pmd_octeontx
LDLIBS += -lrte_bus_pci -lrte_mempool -lrte_mbuf -lrte_kvargs
LDLIBS += -lrte_bus_vdev
@@ -27,18 +29,26 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev_selftest.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += timvf_probe.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+CFLAGS_timvf_worker.o += -fno-prefetch-loop-arrays
ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
else
CFLAGS_ssovf_worker.o += -O3 -ffast-math
+CFLAGS_timvf_worker.o += -O3 -ffast-math
endif
else
CFLAGS_ssovf_worker.o += -Ofast
+CFLAGS_timvf_worker.o += -Ofast
endif
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/event/octeontx/meson.build b/drivers/event/octeontx/meson.build
index 358fc9fc..04185533 100644
--- a/drivers/event/octeontx/meson.build
+++ b/drivers/event/octeontx/meson.build
@@ -3,7 +3,12 @@
sources = files('ssovf_worker.c',
'ssovf_evdev.c',
- 'ssovf_evdev_selftest.c'
+ 'ssovf_evdev_selftest.c',
+ 'ssovf_probe.c',
+ 'timvf_worker.c',
+ 'timvf_evdev.c',
+ 'timvf_probe.c'
)
-deps += ['mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
+allow_experimental_apis = true
+deps += ['common_octeontx', 'mempool_octeontx', 'bus_vdev', 'pmd_octeontx']
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index a1086077..16a3a04b 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -18,12 +18,12 @@
#include <rte_bus_vdev.h>
#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
int otx_logtype_ssovf;
+static uint8_t timvf_enable_stats;
-RTE_INIT(otx_ssovf_init_log);
-static void
-otx_ssovf_init_log(void)
+RTE_INIT(otx_ssovf_init_log)
{
otx_logtype_ssovf = rte_log_register("pmd.event.octeontx");
if (otx_logtype_ssovf >= 0)
@@ -49,7 +49,7 @@ ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
hdr.vfid = 0;
memset(info, 0, len);
- return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
}
struct ssovf_mbox_getwork_wait {
@@ -69,7 +69,7 @@ ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
hdr.vfid = 0;
tmo_set.wait_ns = timeout_ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set getwork timeout(%d)", ret);
@@ -99,7 +99,7 @@ ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
grp.affinity = 0xff;
grp.priority = prio / 32; /* Normalize to 0 to 7 */
- ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
if (ret)
ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
@@ -125,7 +125,7 @@ ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
memset(&ns2iter, 0, len);
ns2iter.wait_ns = ns;
- ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
if (ret < 0 || (ret != len)) {
ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
return -EIO;
@@ -276,7 +276,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
return -ENOMEM;
}
- ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
if (ws->base == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get hws base addr port=%d", port_id);
@@ -290,7 +290,7 @@ ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
ws->port = port_id;
for (q = 0; q < edev->nb_event_queues; q++) {
- ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
if (ws->grps[q] == NULL) {
rte_free(ws);
ssovf_log_err("Failed to get grp%d base addr", q);
@@ -474,14 +474,9 @@ static int
ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- int ret;
- const struct octeontx_nic *nic = eth_dev->data->dev_private;
RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
- ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
- if (ret)
- return 0;
- octeontx_pki_port_start(nic->port_id);
return 0;
}
@@ -490,14 +485,9 @@ static int
ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev)
{
- int ret;
- const struct octeontx_nic *nic = eth_dev->data->dev_private;
RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
- ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
- if (ret)
- return 0;
- octeontx_pki_port_stop(nic->port_id);
return 0;
}
@@ -529,9 +519,9 @@ ssovf_start(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(1, base); /* Enable SSO group */
}
@@ -541,6 +531,16 @@ ssovf_start(struct rte_eventdev *dev)
}
static void
+ssows_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *dev = arg;
+
+ if (dev->dev_ops->dev_stop_flush != NULL)
+ dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
+ dev->data->dev_stop_flush_arg);
+}
+
+static void
ssovf_stop(struct rte_eventdev *dev)
{
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
@@ -557,9 +557,10 @@ ssovf_stop(struct rte_eventdev *dev)
for (i = 0; i < edev->nb_event_queues; i++) {
/* Consume all the events through HWS0 */
- ssows_flush_events(dev->data->ports[0], i);
+ ssows_flush_events(dev->data->ports[0], i,
+ ssows_handle_event, dev);
- base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
base += SSO_VHGRP_QCTL;
ssovf_write64(0, base); /* Disable SSO group */
}
@@ -590,8 +591,16 @@ ssovf_selftest(const char *key __rte_unused, const char *value,
return 0;
}
+static int
+ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops)
+{
+ return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
+ timvf_enable_stats);
+}
+
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops ssovf_ops = {
+static struct rte_eventdev_ops ssovf_ops = {
.dev_infos_get = ssovf_info_get,
.dev_configure = ssovf_configure,
.queue_def_conf = ssovf_queue_def_conf,
@@ -610,6 +619,8 @@ static const struct rte_eventdev_ops ssovf_ops = {
.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+ .timer_adapter_caps_get = ssovf_timvf_caps_get,
+
.dev_selftest = test_eventdev_octeontx,
.dump = ssovf_dump,
@@ -621,7 +632,7 @@ static const struct rte_eventdev_ops ssovf_ops = {
static int
ssovf_vdev_probe(struct rte_vdev_device *vdev)
{
- struct octeontx_ssovf_info oinfo;
+ struct ssovf_info oinfo;
struct ssovf_mbox_dev_info info;
struct ssovf_evdev *edev;
struct rte_eventdev *eventdev;
@@ -633,6 +644,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
static const char *const args[] = {
SSOVF_SELFTEST_ARG,
+ TIMVF_ENABLE_STATS_ARG,
NULL
};
@@ -660,6 +672,15 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
rte_kvargs_free(kvlist);
return ret;
}
+
+ ret = rte_kvargs_process(kvlist,
+ TIMVF_ENABLE_STATS_ARG,
+ ssovf_selftest, &timvf_enable_stats);
+ if (ret != 0) {
+ ssovf_log_err("%s: Error in timvf stats", name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
}
rte_kvargs_free(kvlist);
@@ -679,7 +700,7 @@ ssovf_vdev_probe(struct rte_vdev_device *vdev)
return 0;
}
- ret = octeontx_ssovf_info(&oinfo);
+ ret = ssovf_info(&oinfo);
if (ret) {
ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
goto error;
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index d1825b4f..18293e96 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -119,6 +119,16 @@ do { \
} while (0)
#endif
+struct ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
struct ssovf_evdev {
uint8_t max_event_queues;
@@ -164,8 +174,13 @@ uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
uint64_t timeout_ticks);
uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
uint16_t nb_events, uint64_t timeout_ticks);
-void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+
+typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg);
void ssows_reset(struct ssows *ws);
+int ssovf_info(struct ssovf_info *info);
+void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
int test_eventdev_octeontx(void);
#endif /* __SSOVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/ssovf_evdev_selftest.c b/drivers/event/octeontx/ssovf_evdev_selftest.c
index 5e012a95..239362fc 100644
--- a/drivers/event/octeontx/ssovf_evdev_selftest.c
+++ b/drivers/event/octeontx/ssovf_evdev_selftest.c
@@ -688,6 +688,40 @@ test_multi_queue_enq_multi_port_deq(void)
nr_ports, 0xff /* invalid */);
}
+static
+void flush(uint8_t dev_id, struct rte_event event, void *arg)
+{
+ unsigned int *count = arg;
+
+ RTE_SET_USED(dev_id);
+ if (event.event_type == RTE_EVENT_TYPE_CPU)
+ *count = *count + 1;
+
+}
+
+static int
+test_dev_stop_flush(void)
+{
+ unsigned int total_events = MAX_EVENTS, count = 0;
+ int ret;
+
+ ret = generate_random_events(total_events);
+ if (ret)
+ return -1;
+
+ ret = rte_event_dev_stop_flush_callback_register(evdev, flush, &count);
+ if (ret)
+ return -2;
+ rte_event_dev_stop(evdev);
+ ret = rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL);
+ if (ret)
+ return -3;
+ RTE_TEST_ASSERT_EQUAL(total_events, count,
+ "count mismatch total_events=%d count=%d",
+ total_events, count);
+ return 0;
+}
+
static int
validate_queue_to_port_single_link(uint32_t index, uint8_t port,
struct rte_event *ev)
@@ -1415,6 +1449,8 @@ test_eventdev_octeontx(void)
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_single_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
+ test_dev_stop_flush);
+ OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_multi_queue_enq_multi_port_deq);
OCTEONTX_TEST_RUN(eventdev_setup, eventdev_teardown,
test_queue_to_port_single_link);
diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/event/octeontx/ssovf_probe.c
index 97b24066..b3db596d 100644
--- a/drivers/mempool/octeontx/octeontx_ssovf.c
+++ b/drivers/event/octeontx/ssovf_probe.c
@@ -10,7 +10,7 @@
#include <rte_bus_pci.h>
#include "octeontx_mbox.h"
-#include "octeontx_pool_logs.h"
+#include "ssovf_evdev.h"
#define PCI_VENDOR_ID_CAVIUM 0x177D
#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
@@ -52,7 +52,7 @@ static struct ssodev sdev;
/* Interface functions */
int
-octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+ssovf_info(struct ssovf_info *info)
{
uint8_t i;
uint16_t domain;
@@ -97,7 +97,7 @@ octeontx_ssovf_info(struct octeontx_ssovf_info *info)
}
void*
-octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+ssovf_bar(enum ssovf_type type, uint8_t id, uint8_t bar)
{
if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
type > OCTEONTX_SSO_HWS)
@@ -142,6 +142,7 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
uint16_t vfid;
struct ssowvf_res *res;
struct ssowvf_identify *id;
+ uint8_t *ram_mbox_base;
RTE_SET_USED(pci_drv);
@@ -180,6 +181,14 @@ ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res->domain = id->domain;
sdev.total_ssowvfs++;
+ if (vfid == 0) {
+ ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+ mbox_log_err("Invalid Failed to set ram mbox base");
+ return -EINVAL;
+ }
+ }
+
rte_wmb();
mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
res->vfid, sdev.total_ssowvfs);
@@ -213,6 +222,7 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
uint16_t vfid;
uint8_t *idreg;
struct ssovf_res *res;
+ uint8_t *reg;
RTE_SET_USED(pci_drv);
@@ -246,6 +256,15 @@ ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
res->domain = val & 0xffff;
sdev.total_ssovfs++;
+ if (vfid == 0) {
+ reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ reg += SSO_VHGRP_PF_MBOX(1);
+ if (octeontx_mbox_set_reg(reg)) {
+ mbox_log_err("Invalid Failed to set mbox_reg");
+ return -EINVAL;
+ }
+ }
+
rte_wmb();
mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
res->vfid, sdev.total_ssovfs);
diff --git a/drivers/event/octeontx/ssovf_worker.c b/drivers/event/octeontx/ssovf_worker.c
index 753c1e9f..fffa9024 100644
--- a/drivers/event/octeontx/ssovf_worker.c
+++ b/drivers/event/octeontx/ssovf_worker.c
@@ -198,13 +198,15 @@ ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
}
void
-ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+ssows_flush_events(struct ssows *ws, uint8_t queue_id,
+ ssows_handle_event_t fn, void *arg)
{
uint32_t reg_off;
- uint64_t aq_cnt = 1;
- uint64_t cq_ds_cnt = 1;
- uint64_t enable, get_work0, get_work1;
- uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+ struct rte_event ev;
+ uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+ uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
enable = ssovf_read64(base + SSO_VHGRP_QCTL);
if (!enable)
@@ -219,11 +221,23 @@ ssows_flush_events(struct ssows *ws, uint8_t queue_id)
cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
/* Extract cq and ds count */
cq_ds_cnt &= 0x1FFF1FFF0000;
+
ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
- }
- RTE_SET_USED(get_work0);
- RTE_SET_USED(get_work1);
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+ ev.event = sched_type_queue | (get_work0 & 0xffffffff);
+ if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
+ ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+ (ev.event >> 20) & 0x7F);
+ else
+ ev.u64 = get_work1;
+
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ }
}
void
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index d55018a9..7c7306b5 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -28,11 +28,11 @@ ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
{
struct rte_mbuf *mbuf;
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
- rte_prefetch_non_temporal(wqe);
/* Get mbuf from wqe */
mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
OCTTX_PACKET_WQE_SKIP);
+ rte_prefetch_non_temporal(mbuf);
mbuf->packet_type =
ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
new file mode 100644
index 00000000..abbc9a77
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_evdev.h"
+
+int otx_logtype_timvf;
+
+RTE_INIT(otx_timvf_init_log)
+{
+ otx_logtype_timvf = rte_log_register("pmd.event.octeontx.timer");
+ if (otx_logtype_timvf >= 0)
+ rte_log_set_level(otx_logtype_timvf, RTE_LOG_NOTICE);
+}
+
+struct __rte_packed timvf_mbox_dev_info {
+ uint64_t ring_active[4];
+ uint64_t clk_freq;
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+static int
+timvf_mbox_dev_info_get(struct timvf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_mbox_dev_info);
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_GET_DEV_INFO;
+ hdr.vfid = 0; /* TIM DEV is always 0. TIM RING ID changes. */
+
+ memset(info, 0, len);
+ return octeontx_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+static void
+timvf_ring_info_get(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer_adapter_info *adptr_info)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ adptr_info->max_tmo_ns = timr->max_tout;
+ adptr_info->min_resolution_ns = timr->tck_nsec;
+ rte_memcpy(&adptr_info->conf, &adptr->data->conf,
+ sizeof(struct rte_event_timer_adapter_conf));
+}
+
+static int
+timvf_ring_conf_set(struct timvf_ctrl_reg *rctl, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct timvf_ctrl_reg);
+ int ret;
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_SET_RING_INFO;
+ hdr.vfid = ring_id;
+
+ ret = octeontx_mbox_send(&hdr, rctl, len, NULL, 0);
+ if (ret < 0 || hdr.res_code != MBOX_RET_SUCCESS)
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_get_start_cyc(uint64_t *now, uint8_t ring_id)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+
+ hdr.coproc = TIM_COPROC;
+ hdr.msg = TIM_RING_START_CYC_GET;
+ hdr.vfid = ring_id;
+ *now = 0;
+ return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t));
+}
+
+static int
+optimize_bucket_parameters(struct timvf_ring *timr)
+{
+ uint32_t hbkts;
+ uint32_t lbkts;
+ uint64_t tck_nsec;
+
+ hbkts = rte_align32pow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ hbkts = 0;
+
+ lbkts = rte_align32prevpow2(timr->nb_bkts);
+ tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10);
+
+ if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS))
+ lbkts = 0;
+
+ if (!hbkts && !lbkts)
+ return 0;
+
+ if (!hbkts) {
+ timr->nb_bkts = lbkts;
+ goto end;
+ } else if (!lbkts) {
+ timr->nb_bkts = hbkts;
+ goto end;
+ }
+
+ timr->nb_bkts = (hbkts - timr->nb_bkts) <
+ (timr->nb_bkts - lbkts) ? hbkts : lbkts;
+end:
+ timr->get_target_bkt = bkt_and;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout /
+ (timr->nb_bkts - 1)), 10);
+ return 1;
+}
+
+static int
+timvf_ring_start(const struct rte_event_timer_adapter *adptr)
+{
+ int ret;
+ uint8_t use_fpa = 0;
+ uint64_t interval;
+ uintptr_t pool;
+ struct timvf_ctrl_reg rctrl;
+ struct timvf_mbox_dev_info dinfo;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_mbox_dev_info_get(&dinfo);
+ if (ret < 0 || ret != sizeof(struct timvf_mbox_dev_info))
+ return -EINVAL;
+
+ /* Calculate the interval cycles according to clock source. */
+ switch (timr->clk_src) {
+ case TIM_CLK_SRC_SCLK:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_GPIO:
+ /* GPIO doesn't work on tck_nsec. */
+ interval = 0;
+ break;
+ case TIM_CLK_SRC_GTI:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ case TIM_CLK_SRC_PTP:
+ interval = NSEC2CLK(timr->tck_nsec, dinfo.clk_freq);
+ break;
+ default:
+ timvf_log_err("Unsupported clock source configured %d",
+ timr->clk_src);
+ return -EINVAL;
+ }
+
+ if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+ use_fpa = 1;
+
+ /*CTRL0 register.*/
+ rctrl.rctrl0 = interval;
+
+ /*CTRL1 register.*/
+ rctrl.rctrl1 = (uint64_t)(timr->clk_src) << 51 |
+ 1ull << 48 /* LOCK_EN (Enable hw bucket lock mechanism) */ |
+ 1ull << 47 /* ENA */ |
+ 1ull << 44 /* ENA_LDWB */ |
+ (timr->nb_bkts - 1);
+
+ rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
+
+ if (use_fpa) {
+ pool = (uintptr_t)((struct rte_mempool *)
+ timr->chunk_pool)->pool_id;
+ ret = octeontx_fpa_bufpool_gaura(pool);
+ if (ret < 0) {
+ timvf_log_dbg("Unable to get gaura id");
+ ret = -ENOMEM;
+ goto error;
+ }
+ timvf_write64((uint64_t)ret,
+ (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+ } else {
+ rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+ }
+
+ timvf_write64((uintptr_t)timr->bkt,
+ (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_set_chunk_refill(timr, use_fpa);
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
+ ret = -EACCES;
+ goto error;
+ }
+
+ if (timvf_get_start_cyc(&timr->ring_start_cyc,
+ timr->tim_ring_id) < 0) {
+ ret = -EACCES;
+ goto error;
+ }
+ timr->tck_int = NSEC2CLK(timr->tck_nsec, rte_get_timer_hz());
+ timr->fast_div = rte_reciprocal_value_u64(timr->tck_int);
+ timvf_log_info("nb_bkts %d min_ns %"PRIu64" min_cyc %"PRIu64""
+ " maxtmo %"PRIu64"\n",
+ timr->nb_bkts, timr->tck_nsec, interval,
+ timr->max_tout);
+
+ return 0;
+error:
+ rte_free(timr->bkt);
+ rte_mempool_free(timr->chunk_pool);
+ return ret;
+}
+
+static int
+timvf_ring_stop(const struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct timvf_ctrl_reg rctrl = {0};
+ rctrl.rctrl0 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL0);
+ rctrl.rctrl1 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL1);
+ rctrl.rctrl1 &= ~(1ull << 47); /* Disable */
+ rctrl.rctrl2 = timvf_read64((uint8_t *)timr->vbar0 + TIM_VRING_CTL2);
+
+ if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id))
+ return -EACCES;
+ return 0;
+}
+
+static int
+timvf_ring_create(struct rte_event_timer_adapter *adptr)
+{
+ char pool_name[25];
+ int ret;
+ uint64_t nb_timers;
+ struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ struct timvf_ring *timr;
+ struct timvf_info tinfo;
+ const char *mempool_ops;
+ unsigned int mp_flags = 0;
+
+ if (timvf_info(&tinfo) < 0)
+ return -ENODEV;
+
+ if (adptr->data->id >= tinfo.total_timvfs)
+ return -ENODEV;
+
+ timr = rte_zmalloc("octeontx_timvf_priv",
+ sizeof(struct timvf_ring), 0);
+ if (timr == NULL)
+ return -ENOMEM;
+
+ adptr->data->adapter_priv = timr;
+ /* Check config parameters. */
+ if ((rcfg->clk_src != RTE_EVENT_TIMER_ADAPTER_CPU_CLK) &&
+ (!rcfg->timer_tick_ns ||
+ rcfg->timer_tick_ns < TIM_MIN_INTERVAL)) {
+ timvf_log_err("Too low timer ticks");
+ goto cfg_err;
+ }
+
+ timr->clk_src = (int) rcfg->clk_src;
+ timr->tim_ring_id = adptr->data->id;
+ timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10);
+ timr->max_tout = rcfg->max_tmo_ns;
+ timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
+ timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
+ timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
+ nb_timers = rcfg->nb_timers;
+ timr->get_target_bkt = bkt_mod;
+
+ timr->nb_chunks = nb_timers / nb_chunk_slots;
+
+ /* Try to optimize the bucket parameters. */
+ if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
+ && !rte_is_power_of_2(timr->nb_bkts)) {
+ if (optimize_bucket_parameters(timr)) {
+ timvf_log_info("Optimized configured values");
+ timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts);
+ timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec);
+ } else
+ timvf_log_info("Failed to Optimize configured values");
+ }
+
+ if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) {
+ mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET;
+ timvf_log_info("Using single producer mode");
+ }
+
+ timr->bkt = rte_zmalloc("octeontx_timvf_bucket",
+ (timr->nb_bkts) * sizeof(struct tim_mem_bucket),
+ 0);
+ if (timr->bkt == NULL)
+ goto mem_err;
+
+ snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d",
+ timr->tim_ring_id);
+ timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name,
+ timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(),
+ mp_flags);
+
+ if (!timr->chunk_pool) {
+ rte_free(timr->bkt);
+ timvf_log_err("Unable to create chunkpool.");
+ return -ENOMEM;
+ }
+
+ mempool_ops = rte_mbuf_best_mempool_ops();
+ ret = rte_mempool_set_ops_byname(timr->chunk_pool,
+ mempool_ops, NULL);
+
+ if (ret != 0) {
+ timvf_log_err("Unable to set chunkpool ops.");
+ goto mem_err;
+ }
+
+ ret = rte_mempool_populate_default(timr->chunk_pool);
+ if (ret < 0) {
+ timvf_log_err("Unable to set populate chunkpool.");
+ goto mem_err;
+ }
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT);
+ timvf_write64(0, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_INT_W1S);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1C);
+ timvf_write64(0x7, (uint8_t *)timr->vbar0 + TIM_VF_NRSPERR_ENA_W1S);
+
+ return 0;
+mem_err:
+ rte_free(timr);
+ return -ENOMEM;
+cfg_err:
+ rte_free(timr);
+ return -EINVAL;
+}
+
+static int
+timvf_ring_free(struct rte_event_timer_adapter *adptr)
+{
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ rte_mempool_free(timr->chunk_pool);
+ rte_free(timr->bkt);
+ rte_free(adptr->data->adapter_priv);
+ return 0;
+}
+
+static int
+timvf_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+ uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+
+ stats->evtim_exp_count = timr->tim_arm_cnt;
+ stats->ev_enq_count = timr->tim_arm_cnt;
+ stats->adapter_tick_count = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div);
+ return 0;
+}
+
+static int
+timvf_stats_reset(const struct rte_event_timer_adapter *adapter)
+{
+ struct timvf_ring *timr = adapter->data->adapter_priv;
+
+ timr->tim_arm_cnt = 0;
+ return 0;
+}
+
+static struct rte_event_timer_adapter_ops timvf_ops = {
+ .init = timvf_ring_create,
+ .uninit = timvf_ring_free,
+ .start = timvf_ring_start,
+ .stop = timvf_ring_stop,
+ .get_info = timvf_ring_info_get,
+};
+
+int
+timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats)
+{
+ RTE_SET_USED(dev);
+
+ if (enable_stats) {
+ timvf_ops.stats_get = timvf_stats_get;
+ timvf_ops.stats_reset = timvf_stats_reset;
+ }
+
+ if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT)
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_sp_stats :
+ timvf_timer_arm_burst_sp;
+ else
+ timvf_ops.arm_burst = enable_stats ?
+ timvf_timer_arm_burst_mp_stats :
+ timvf_timer_arm_burst_mp;
+
+ timvf_ops.arm_tmo_tick_burst = enable_stats ?
+ timvf_timer_arm_tmo_brst_stats :
+ timvf_timer_arm_tmo_brst;
+ timvf_ops.cancel_burst = timvf_timer_cancel_burst;
+ *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT;
+ *ops = &timvf_ops;
+ return 0;
+}
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
new file mode 100644
index 00000000..0185593f
--- /dev/null
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef __TIMVF_EVDEV_H__
+#define __TIMVF_EVDEV_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_event_timer_adapter_pmd.h>
+#include <rte_io.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
+#include <rte_memzone.h>
+#include <rte_pci.h>
+#include <rte_prefetch.h>
+#include <rte_reciprocal.h>
+
+#include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
+
+#define timvf_log(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
+ "[%s] %s() " fmt "\n", \
+ RTE_STR(event_timer_octeontx), __func__, ## args)
+
+#define timvf_log_info(fmt, ...) timvf_log(INFO, fmt, ##__VA_ARGS__)
+#define timvf_log_dbg(fmt, ...) timvf_log(DEBUG, fmt, ##__VA_ARGS__)
+#define timvf_log_err(fmt, ...) timvf_log(ERR, fmt, ##__VA_ARGS__)
+#define timvf_func_trace timvf_log_dbg
+
+#define TIM_COPROC (8)
+#define TIM_GET_DEV_INFO (1)
+#define TIM_GET_RING_INFO (2)
+#define TIM_SET_RING_INFO (3)
+#define TIM_RING_START_CYC_GET (4)
+
+#define TIM_MAX_RINGS (64)
+#define TIM_DEV_PER_NODE (1)
+#define TIM_VF_PER_DEV (64)
+#define TIM_RING_PER_DEV (TIM_VF_PER_DEV)
+#define TIM_RING_NODE_SHIFT (6)
+#define TIM_RING_MASK ((TIM_RING_PER_DEV) - 1)
+#define TIM_RING_INVALID (-1)
+
+#define TIM_MIN_INTERVAL (1E3)
+#define TIM_MAX_INTERVAL ((1ull << 32) - 1)
+#define TIM_MAX_BUCKETS (1ull << 20)
+#define TIM_CHUNK_SIZE (4096)
+#define TIM_MAX_CHUNKS_PER_BUCKET (1ull << 32)
+
+#define TIMVF_MAX_BURST (8)
+
+/* TIM VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define TIM_VF_NRSPERR_INT (0x0)
+#define TIM_VF_NRSPERR_INT_W1S (0x8)
+#define TIM_VF_NRSPERR_ENA_W1C (0x10)
+#define TIM_VF_NRSPERR_ENA_W1S (0x18)
+#define TIM_VRING_FR_RN_CYCLES (0x20)
+#define TIM_VRING_FR_RN_GPIOS (0x28)
+#define TIM_VRING_FR_RN_GTI (0x30)
+#define TIM_VRING_FR_RN_PTP (0x38)
+#define TIM_VRING_CTL0 (0x40)
+#define TIM_VRING_CTL1 (0x50)
+#define TIM_VRING_CTL2 (0x60)
+#define TIM_VRING_BASE (0x100)
+#define TIM_VRING_AURA (0x108)
+#define TIM_VRING_REL (0x110)
+
+#define TIM_CTL1_W0_S_BUCKET 20
+#define TIM_CTL1_W0_M_BUCKET ((1ull << (40 - 20)) - 1)
+
+#define TIM_BUCKET_W1_S_NUM_ENTRIES (0) /*Shift*/
+#define TIM_BUCKET_W1_M_NUM_ENTRIES ((1ull << (32 - 0)) - 1)
+#define TIM_BUCKET_W1_S_SBT (32)
+#define TIM_BUCKET_W1_M_SBT ((1ull << (33 - 32)) - 1)
+#define TIM_BUCKET_W1_S_HBT (33)
+#define TIM_BUCKET_W1_M_HBT ((1ull << (34 - 33)) - 1)
+#define TIM_BUCKET_W1_S_BSK (34)
+#define TIM_BUCKET_W1_M_BSK ((1ull << (35 - 34)) - 1)
+#define TIM_BUCKET_W1_S_LOCK (40)
+#define TIM_BUCKET_W1_M_LOCK ((1ull << (48 - 40)) - 1)
+#define TIM_BUCKET_W1_S_CHUNK_REMAINDER (48)
+#define TIM_BUCKET_W1_M_CHUNK_REMAINDER ((1ull << (64 - 48)) - 1)
+
+#define TIM_BUCKET_SEMA \
+ (TIM_BUCKET_CHUNK_REMAIN)
+
+#define TIM_BUCKET_CHUNK_REMAIN \
+ (TIM_BUCKET_W1_M_CHUNK_REMAINDER << TIM_BUCKET_W1_S_CHUNK_REMAINDER)
+
+#define TIM_BUCKET_LOCK \
+ (TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK)
+
+#define TIM_BUCKET_SEMA_WLOCK \
+ (TIM_BUCKET_CHUNK_REMAIN | (1ull << TIM_BUCKET_W1_S_LOCK))
+
+#define NSEC_PER_SEC 1E9
+#define NSEC2CLK(__ns, __freq) (((__ns) * (__freq)) / NSEC_PER_SEC)
+#define CLK2NSEC(__clk, __freq) (((__clk) * NSEC_PER_SEC) / (__freq))
+
+#define timvf_read64 rte_read64_relaxed
+#define timvf_write64 rte_write64_relaxed
+
+#define TIMVF_ENABLE_STATS_ARG ("timvf_stats")
+
+extern int otx_logtype_timvf;
+static const uint16_t nb_chunk_slots = (TIM_CHUNK_SIZE / 16) - 1;
+
+struct timvf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_timvfs; /* Total timvf available in domain */
+};
+
+enum timvf_clk_src {
+ TIM_CLK_SRC_SCLK = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ TIM_CLK_SRC_GPIO = RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ TIM_CLK_SRC_GTI = RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ TIM_CLK_SRC_PTP = RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+};
+
+/* TIM_MEM_BUCKET */
+struct tim_mem_bucket {
+ uint64_t first_chunk;
+ union {
+ uint64_t w1;
+ struct {
+ uint32_t nb_entry;
+ uint8_t sbt:1;
+ uint8_t hbt:1;
+ uint8_t bsk:1;
+ uint8_t rsvd:5;
+ uint8_t lock;
+ int16_t chunk_remainder;
+ };
+ };
+ uint64_t current_chunk;
+ uint64_t pad;
+} __rte_packed __rte_aligned(8);
+
+struct tim_mem_entry {
+ uint64_t w0;
+ uint64_t wqe;
+} __rte_packed;
+
+struct timvf_ctrl_reg {
+ uint64_t rctrl0;
+ uint64_t rctrl1;
+ uint64_t rctrl2;
+ uint8_t use_pmu;
+} __rte_packed;
+
+struct timvf_ring;
+
+typedef uint32_t (*bkt_id)(const uint32_t bkt_tcks, const uint32_t nb_bkts);
+typedef struct tim_mem_entry * (*refill_chunk)(
+ struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr);
+
+struct timvf_ring {
+ bkt_id get_target_bkt;
+ refill_chunk refill_chunk;
+ struct rte_reciprocal_u64 fast_div;
+ uint64_t ring_start_cyc;
+ uint32_t nb_bkts;
+ struct tim_mem_bucket *bkt;
+ void *chunk_pool;
+ uint64_t tck_int;
+ volatile uint64_t tim_arm_cnt;
+ uint64_t tck_nsec;
+ void *vbar0;
+ void *bkt_pos;
+ uint64_t max_tout;
+ uint64_t nb_chunks;
+ enum timvf_clk_src clk_src;
+ uint16_t tim_ring_id;
+} __rte_cache_aligned;
+
+static __rte_always_inline uint32_t
+bkt_mod(const uint32_t rel_bkt, const uint32_t nb_bkts)
+{
+ return rel_bkt % nb_bkts;
+}
+
+static __rte_always_inline uint32_t
+bkt_and(uint32_t rel_bkt, uint32_t nb_bkts)
+{
+ return rel_bkt & (nb_bkts - 1);
+}
+
+int timvf_info(struct timvf_info *tinfo);
+void *timvf_bar(uint8_t id, uint8_t bar);
+int timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags,
+ uint32_t *caps, const struct rte_event_timer_adapter_ops **ops,
+ uint8_t enable_stats);
+uint16_t timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_sp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_burst_mp_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+uint16_t timvf_timer_arm_tmo_brst_stats(
+ const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
+
+#endif /* __TIMVF_EVDEV_H__ */
diff --git a/drivers/event/octeontx/timvf_probe.c b/drivers/event/octeontx/timvf_probe.c
new file mode 100644
index 00000000..08dbd2be
--- /dev/null
+++ b/drivers/event/octeontx/timvf_probe.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include <octeontx_mbox.h>
+
+#include "ssovf_evdev.h"
+#include "timvf_evdev.h"
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM (0x177D)
+#endif
+
+#define PCI_DEVICE_ID_OCTEONTX_TIM_VF (0xA051)
+#define TIM_MAX_RINGS (64)
+
+struct timvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct timdev {
+ uint8_t total_timvfs;
+ struct timvf_res rings[TIM_MAX_RINGS];
+};
+
+static struct timdev tdev;
+
+int
+timvf_info(struct timvf_info *tinfo)
+{
+ int i;
+ struct ssovf_info info;
+
+ if (tinfo == NULL)
+ return -EINVAL;
+
+ if (!tdev.total_timvfs)
+ return -ENODEV;
+
+ if (ssovf_info(&info) < 0)
+ return -EINVAL;
+
+ for (i = 0; i < tdev.total_timvfs; i++) {
+ if (info.domain != tdev.rings[i].domain) {
+ timvf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, tdev.rings[i].vfid,
+ info.domain, tdev.rings[i].domain,
+ tdev.rings[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ tinfo->total_timvfs = tdev.total_timvfs;
+ tinfo->domain = info.domain;
+ return 0;
+}
+
+void*
+timvf_bar(uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return NULL;
+
+ if (id > tdev.total_timvfs)
+ return NULL;
+
+ switch (bar) {
+ case 0:
+ return tdev.rings[id].bar0;
+ case 4:
+ return tdev.rings[id].bar4;
+ default:
+ return NULL;
+ }
+}
+
+static int
+timvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ struct timvf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ timvf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ val = rte_read64((uint8_t *)pci_dev->mem_resource[0].addr +
+ 0x100 /* TIM_VRINGX_BASE */);
+ vfid = (val >> 23) & 0xff;
+ if (vfid >= TIM_MAX_RINGS) {
+ timvf_log_err("Invalid vfid(%d/%d)", vfid, TIM_MAX_RINGS);
+ return -EINVAL;
+ }
+
+ res = &tdev.rings[tdev.total_timvfs];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = (val >> 7) & 0xffff;
+ tdev.total_timvfs++;
+ rte_wmb();
+
+ timvf_log_dbg("Domain=%d VFid=%d bar0 %p total_timvfs=%d", res->domain,
+ res->vfid, pci_dev->mem_resource[0].addr,
+ tdev.total_timvfs);
+ return 0;
+}
+
+
+static const struct rte_pci_id pci_timvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_TIM_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_timvf = {
+ .id_table = pci_timvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = timvf_probe,
+ .remove = NULL,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_timvf, pci_timvf);
diff --git a/drivers/event/octeontx/timvf_worker.c b/drivers/event/octeontx/timvf_worker.c
new file mode 100644
index 00000000..50790e19
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "timvf_worker.h"
+
+static inline int
+timvf_timer_reg_checks(const struct timvf_ring * const timr,
+ struct rte_event_timer * const tim)
+{
+ if (unlikely(tim->state)) {
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EALREADY;
+ goto fail;
+ }
+
+ if (unlikely(!tim->timeout_ticks ||
+ tim->timeout_ticks >= timr->nb_bkts)) {
+ tim->state = tim->timeout_ticks ? RTE_EVENT_TIMER_ERROR_TOOLATE
+ : RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return -EINVAL;
+}
+
+static inline void
+timvf_format_event(const struct rte_event_timer * const tim,
+ struct tim_mem_entry * const entry)
+{
+ entry->w0 = (tim->ev.event & 0xFFC000000000) >> 6 |
+ (tim->ev.event & 0xFFFFFFFFF);
+ entry->wqe = tim->ev.u64;
+}
+
+uint16_t
+timvf_timer_cancel_burst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ RTE_SET_USED(adptr);
+ int ret;
+ uint16_t index;
+
+ for (index = 0; index < nb_timers; index++) {
+ if (tim[index]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ }
+
+ if (tim[index]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+ ret = timvf_rem_entry(tim[index]);
+ if (ret) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_sp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_sp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_sp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t index;
+ struct tim_mem_entry entry;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ for (index = 0; index < nb_timers; index++) {
+ if (timvf_timer_reg_checks(timr, tim[index]))
+ break;
+ timvf_format_event(tim[index], &entry);
+ ret = timvf_add_entry_mp(timr, tim[index]->timeout_ticks,
+ tim[index], &entry);
+ if (unlikely(ret)) {
+ rte_errno = -ret;
+ break;
+ }
+ }
+
+ return index;
+}
+
+uint16_t
+timvf_timer_arm_burst_mp_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint16_t nb_timers)
+{
+ uint16_t ret;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ ret = timvf_timer_arm_burst_mp(adptr, tim, nb_timers);
+ timr->tim_arm_cnt += ret;
+
+ return ret;
+}
+
+uint16_t
+timvf_timer_arm_tmo_brst(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ int ret;
+ uint16_t set_timers = 0;
+ uint16_t idx;
+ uint16_t arr_idx = 0;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct tim_mem_entry entry[TIMVF_MAX_BURST] __rte_cache_aligned;
+
+ if (unlikely(!timeout_tick || timeout_tick >= timr->nb_bkts)) {
+ const enum rte_event_timer_state state = timeout_tick ?
+ RTE_EVENT_TIMER_ERROR_TOOLATE :
+ RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ for (idx = 0; idx < nb_timers; idx++)
+ tim[idx]->state = state;
+ rte_errno = EINVAL;
+ return 0;
+ }
+
+ while (arr_idx < nb_timers) {
+ for (idx = 0; idx < TIMVF_MAX_BURST && (arr_idx < nb_timers);
+ idx++, arr_idx++) {
+ timvf_format_event(tim[arr_idx], &entry[idx]);
+ }
+ ret = timvf_add_entry_brst(timr, timeout_tick, &tim[set_timers],
+ entry, idx);
+ set_timers += ret;
+ if (ret != idx)
+ break;
+ }
+
+ return set_timers;
+}
+
+
+uint16_t
+timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
+ struct rte_event_timer **tim, const uint64_t timeout_tick,
+ const uint16_t nb_timers)
+{
+ uint16_t set_timers;
+ struct timvf_ring *timr = adptr->data->adapter_priv;
+
+ set_timers = timvf_timer_arm_tmo_brst(adptr, tim, timeout_tick,
+ nb_timers);
+ timr->tim_arm_cnt += set_timers;
+
+ return set_timers;
+}
+
+void
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
+{
+ if (use_fpa)
+ timr->refill_chunk = timvf_refill_chunk_fpa;
+ else
+ timr->refill_chunk = timvf_refill_chunk_generic;
+}
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
new file mode 100644
index 00000000..dede1a4a
--- /dev/null
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -0,0 +1,443 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <rte_common.h>
+#include <rte_branch_prediction.h>
+
+#include "timvf_evdev.h"
+
+static inline int16_t
+timr_bkt_fetch_rem(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_CHUNK_REMAINDER) &
+ TIM_BUCKET_W1_M_CHUNK_REMAINDER;
+}
+
+static inline int16_t
+timr_bkt_get_rem(struct tim_mem_bucket *bktp)
+{
+ return __atomic_load_n(&bktp->chunk_remainder,
+ __ATOMIC_ACQUIRE);
+}
+
+static inline void
+timr_bkt_set_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_store_n(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline void
+timr_bkt_sub_rem(struct tim_mem_bucket *bktp, uint16_t v)
+{
+ __atomic_fetch_sub(&bktp->chunk_remainder, v,
+ __ATOMIC_RELEASE);
+}
+
+static inline uint8_t
+timr_bkt_get_sbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT;
+}
+
+static inline uint64_t
+timr_bkt_set_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT;
+ return __atomic_fetch_or(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_clr_sbt(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_SBT << TIM_BUCKET_W1_S_SBT);
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint8_t
+timr_bkt_get_shbt(uint64_t w1)
+{
+ return ((w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT) |
+ ((w1 >> TIM_BUCKET_W1_S_SBT) & TIM_BUCKET_W1_M_SBT);
+}
+
+static inline uint8_t
+timr_bkt_get_hbt(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_HBT) & TIM_BUCKET_W1_M_HBT;
+}
+
+static inline uint8_t
+timr_bkt_get_bsk(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_BSK) & TIM_BUCKET_W1_M_BSK;
+}
+
+static inline uint64_t
+timr_bkt_clr_bsk(struct tim_mem_bucket *bktp)
+{
+ /*Clear everything except lock. */
+ const uint64_t v = TIM_BUCKET_W1_M_LOCK << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_and(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema_lock(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA_WLOCK,
+ __ATOMIC_ACQ_REL);
+}
+
+static inline uint64_t
+timr_bkt_fetch_sema(struct tim_mem_bucket *bktp)
+{
+ return __atomic_fetch_add(&bktp->w1, TIM_BUCKET_SEMA,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_inc_lock(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = 1ull << TIM_BUCKET_W1_S_LOCK;
+ return __atomic_fetch_add(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline void
+timr_bkt_dec_lock(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->lock, 0xff, __ATOMIC_ACQ_REL);
+}
+
+static inline uint32_t
+timr_bkt_get_nent(uint64_t w1)
+{
+ return (w1 >> TIM_BUCKET_W1_S_NUM_ENTRIES) &
+ TIM_BUCKET_W1_M_NUM_ENTRIES;
+}
+
+static inline void
+timr_bkt_inc_nent(struct tim_mem_bucket *bktp)
+{
+ __atomic_add_fetch(&bktp->nb_entry, 1, __ATOMIC_RELAXED);
+}
+
+static inline void
+timr_bkt_add_nent(struct tim_mem_bucket *bktp, uint32_t v)
+{
+ __atomic_add_fetch(&bktp->nb_entry, v, __ATOMIC_RELAXED);
+}
+
+static inline uint64_t
+timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
+{
+ const uint64_t v = ~(TIM_BUCKET_W1_M_NUM_ENTRIES <<
+ TIM_BUCKET_W1_S_NUM_ENTRIES);
+ return __atomic_and_fetch(&bktp->w1, v, __ATOMIC_ACQ_REL);
+}
+
+static inline struct tim_mem_entry *
+timr_clr_bkt(struct timvf_ring * const timr, struct tim_mem_bucket * const bkt)
+{
+ struct tim_mem_entry *chunk;
+ struct tim_mem_entry *pnext;
+ chunk = ((struct tim_mem_entry *)(uintptr_t)bkt->first_chunk);
+ chunk = (struct tim_mem_entry *)(uintptr_t)(chunk + nb_chunk_slots)->w0;
+
+ while (chunk) {
+ pnext = (struct tim_mem_entry *)(uintptr_t)
+ ((chunk + nb_chunk_slots)->w0);
+ rte_mempool_put(timr->chunk_pool, chunk);
+ chunk = pnext;
+ }
+ return (struct tim_mem_entry *)(uintptr_t)bkt->first_chunk;
+}
+
+static inline int
+timvf_rem_entry(struct rte_event_timer *tim)
+{
+ uint64_t lock_sema;
+ struct tim_mem_entry *entry;
+ struct tim_mem_bucket *bkt;
+ if (tim->impl_opaque[1] == 0 ||
+ tim->impl_opaque[0] == 0)
+ return -ENOENT;
+
+ entry = (struct tim_mem_entry *)(uintptr_t)tim->impl_opaque[0];
+ if (entry->wqe != tim->ev.u64) {
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+ bkt = (struct tim_mem_bucket *)(uintptr_t)tim->impl_opaque[1];
+ lock_sema = timr_bkt_inc_lock(bkt);
+ if (timr_bkt_get_shbt(lock_sema)
+ || !timr_bkt_get_nent(lock_sema)) {
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return -ENOENT;
+ }
+
+ entry->w0 = entry->wqe = 0;
+ timr_bkt_dec_lock(bkt);
+
+ tim->state = RTE_EVENT_TIMER_CANCELED;
+ tim->impl_opaque[1] = tim->impl_opaque[0] = 0;
+ return 0;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (bkt->nb_entry || !bkt->first_chunk) {
+ if (unlikely(rte_mempool_get(timr->chunk_pool,
+ (void **)&chunk))) {
+ return NULL;
+ }
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+ } else {
+ chunk = timr_clr_bkt(timr, bkt);
+ bkt->first_chunk = (uintptr_t)chunk;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+
+ return chunk;
+}
+
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+ struct timvf_ring * const timr)
+{
+ struct tim_mem_entry *chunk;
+
+ if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+ return NULL;
+
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ if (bkt->nb_entry) {
+ *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+ bkt->current_chunk) +
+ nb_chunk_slots) =
+ (uintptr_t) chunk;
+ } else {
+ bkt->first_chunk = (uintptr_t) chunk;
+ }
+
+ return chunk;
+}
+
+static inline struct tim_mem_bucket *
+timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
+{
+ const uint64_t bkt_cyc = rte_rdtsc() - timr->ring_start_cyc;
+ const uint32_t bucket = rte_reciprocal_divide_u64(bkt_cyc,
+ &timr->fast_div) + rel_bkt;
+ const uint32_t tbkt_id = timr->get_target_bkt(bucket,
+ timr->nb_bkts);
+ return &timr->bkt[tbkt_id];
+}
+
+/* Single producer functions. */
+static inline int
+timvf_add_entry_sp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+__retry:
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema(bkt);
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema)))
+ goto __retry;
+
+ /* Insert the work. */
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (!rem) {
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+/* Multi producer functions. */
+static inline int
+timvf_add_entry_mp(struct timvf_ring * const timr, const uint32_t rel_bkt,
+ struct rte_event_timer * const tim,
+ const struct tim_mem_entry * const pent)
+{
+ int16_t rem;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+ /* Bucket related checks. */
+ /*Get Bucket sema*/
+ lock_sema = timr_bkt_fetch_sema_lock(bkt);
+ if (unlikely(timr_bkt_get_shbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ rem = timr_bkt_fetch_rem(lock_sema);
+
+ if (rem < 0) {
+ /* goto diff bucket. */
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ } else if (!rem) {
+ /*Only one thread can be here*/
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_set_rem(bkt, 0);
+ timr_bkt_dec_lock(bkt);
+ tim->impl_opaque[0] = tim->impl_opaque[1] = 0;
+ tim->state = RTE_EVENT_TIMER_ERROR;
+ return -ENOMEM;
+ }
+ bkt->current_chunk = (uintptr_t) chunk;
+ timr_bkt_set_rem(bkt, nb_chunk_slots - 1);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += nb_chunk_slots - rem;
+ }
+ /* Copy work entry. */
+ *chunk = *pent;
+ timr_bkt_inc_nent(bkt);
+ timr_bkt_dec_lock(bkt);
+
+ tim->impl_opaque[0] = (uintptr_t)chunk;
+ tim->impl_opaque[1] = (uintptr_t)bkt;
+ tim->state = RTE_EVENT_TIMER_ARMED;
+ return 0;
+}
+
+static inline uint16_t
+timvf_cpy_wrk(uint16_t index, uint16_t cpy_lmt,
+ struct tim_mem_entry *chunk,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry * const ents,
+ const struct tim_mem_bucket * const bkt)
+{
+ for (; index < cpy_lmt; index++) {
+ *chunk = *(ents + index);
+ tim[index]->impl_opaque[0] = (uintptr_t)chunk++;
+ tim[index]->impl_opaque[1] = (uintptr_t)bkt;
+ tim[index]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ return index;
+}
+
+/* Burst mode functions */
+static inline int
+timvf_add_entry_brst(struct timvf_ring * const timr, const uint16_t rel_bkt,
+ struct rte_event_timer ** const tim,
+ const struct tim_mem_entry *ents,
+ const uint16_t nb_timers)
+{
+ int16_t rem;
+ int16_t crem;
+ uint8_t lock_cnt;
+ uint16_t index = 0;
+ uint16_t chunk_remainder;
+ uint64_t lock_sema;
+ struct tim_mem_bucket *bkt;
+ struct tim_mem_entry *chunk;
+
+__retry:
+ bkt = timvf_get_target_bucket(timr, rel_bkt);
+
+ /* Only one thread beyond this. */
+ lock_sema = timr_bkt_inc_lock(bkt);
+ lock_cnt = (uint8_t)
+ ((lock_sema >> TIM_BUCKET_W1_S_LOCK) & TIM_BUCKET_W1_M_LOCK);
+
+ if (lock_cnt) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ /* Bucket related checks. */
+ if (unlikely(timr_bkt_get_hbt(lock_sema))) {
+ timr_bkt_dec_lock(bkt);
+ goto __retry;
+ }
+
+ chunk_remainder = timr_bkt_fetch_rem(lock_sema);
+ rem = chunk_remainder - nb_timers;
+ if (rem < 0) {
+ crem = nb_chunk_slots - chunk_remainder;
+ if (chunk_remainder && crem) {
+ chunk = ((struct tim_mem_entry *)
+ (uintptr_t)bkt->current_chunk) + crem;
+
+ index = timvf_cpy_wrk(index, chunk_remainder,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, chunk_remainder);
+ timr_bkt_add_nent(bkt, chunk_remainder);
+ }
+ rem = nb_timers - chunk_remainder;
+ ents = ents + chunk_remainder;
+
+ chunk = timr->refill_chunk(bkt, timr);
+ if (unlikely(chunk == NULL)) {
+ timr_bkt_dec_lock(bkt);
+ rte_errno = ENOMEM;
+ tim[index]->state = RTE_EVENT_TIMER_ERROR;
+ return crem;
+ }
+ *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+ bkt->current_chunk = (uintptr_t) chunk;
+
+ index = timvf_cpy_wrk(index, nb_timers, chunk, tim, ents, bkt);
+ timr_bkt_set_rem(bkt, nb_chunk_slots - rem);
+ timr_bkt_add_nent(bkt, rem);
+ } else {
+ chunk = (struct tim_mem_entry *)(uintptr_t)bkt->current_chunk;
+ chunk += (nb_chunk_slots - chunk_remainder);
+
+ index = timvf_cpy_wrk(index, nb_timers,
+ chunk, tim, ents, bkt);
+ timr_bkt_sub_rem(bkt, nb_timers);
+ timr_bkt_add_nent(bkt, nb_timers);
+ }
+
+ timr_bkt_dec_lock(bkt);
+ return nb_timers;
+}
diff --git a/drivers/event/opdl/opdl_evdev.c b/drivers/event/opdl/opdl_evdev.c
index 77083691..a4f0bc8b 100644
--- a/drivers/event/opdl/opdl_evdev.c
+++ b/drivers/event/opdl/opdl_evdev.c
@@ -607,7 +607,7 @@ set_do_test(const char *key __rte_unused, const char *value, void *opaque)
static int
opdl_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_opdl_ops = {
+ static struct rte_eventdev_ops evdev_opdl_ops = {
.dev_configure = opdl_dev_configure,
.dev_infos_get = opdl_info_get,
.dev_close = opdl_close,
@@ -753,10 +753,7 @@ static struct rte_vdev_driver evdev_opdl_pmd_drv = {
.remove = opdl_remove
};
-RTE_INIT(opdl_init_log);
-
-static void
-opdl_init_log(void)
+RTE_INIT(opdl_init_log)
{
opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
if (opdl_logtype_driver >= 0)
diff --git a/drivers/event/opdl/opdl_evdev_init.c b/drivers/event/opdl/opdl_evdev_init.c
index 1454de53..582ad698 100644
--- a/drivers/event/opdl/opdl_evdev_init.c
+++ b/drivers/event/opdl/opdl_evdev_init.c
@@ -733,6 +733,9 @@ initialise_all_other_ports(struct rte_eventdev *dev)
queue->ports[queue->nb_ports] = port;
port->instance_id = queue->nb_ports;
queue->nb_ports++;
+ opdl_stage_set_queue_id(stage_inst,
+ port->queue_id);
+
} else if (queue->q_pos == OPDL_Q_POS_END) {
/* tx port */
diff --git a/drivers/event/opdl/opdl_ring.c b/drivers/event/opdl/opdl_ring.c
index eca7712b..8aca481c 100644
--- a/drivers/event/opdl/opdl_ring.c
+++ b/drivers/event/opdl/opdl_ring.c
@@ -25,7 +25,10 @@
#define OPDL_NAME_SIZE 64
-#define OPDL_EVENT_MASK (0xFFFF0000000FFFFFULL)
+#define OPDL_EVENT_MASK (0x00000000000FFFFFULL)
+#define OPDL_FLOWID_MASK (0xFFFFF)
+#define OPDL_OPA_MASK (0xFF)
+#define OPDL_OPA_OFFSET (0x38)
int opdl_logtype_driver;
@@ -86,7 +89,6 @@ struct opdl_stage {
*/
uint32_t available_seq;
uint32_t head; /* Current head for single-thread operation */
- uint32_t shadow_head; /* Shadow head for single-thread operation */
uint32_t nb_instance; /* Number of instances */
uint32_t instance_id; /* ID of this stage instance */
uint16_t num_claimed; /* Number of slots claimed */
@@ -102,6 +104,9 @@ struct opdl_stage {
/* For managing disclaims in multi-threaded processing stages */
struct claim_manager pending_disclaims[RTE_MAX_LCORE]
__rte_cache_aligned;
+ uint32_t shadow_head; /* Shadow head for single-thread operation */
+ uint32_t queue_id; /* ID of Queue which is assigned to this stage */
+ uint32_t pos; /* Atomic scan position */
} __rte_cache_aligned;
/* Context for opdl_ring */
@@ -494,6 +499,9 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
uint32_t num_entries, uint32_t *seq, bool block, bool atomic)
{
uint32_t i = 0, j = 0, offset;
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
void *get_slots;
struct rte_event *ev;
RTE_SET_USED(seq);
@@ -520,7 +528,17 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
for (j = 0; j < num_entries; j++) {
ev = (struct rte_event *)get_slot(t, s->head+j);
- if ((ev->flow_id%s->nb_instance) == s->instance_id) {
+
+ event = __atomic_load_n(&(ev->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
memcpy(entries_offset, ev, t->slot_size);
entries_offset += t->slot_size;
i++;
@@ -531,6 +549,7 @@ opdl_stage_claim_singlethread(struct opdl_stage *s, void *entries,
s->head += num_entries;
s->num_claimed = num_entries;
s->num_event = i;
+ s->pos = 0;
/* automatically disclaim entries if number of rte_events is zero */
if (unlikely(i == 0))
@@ -953,21 +972,26 @@ opdl_ring_get_slot(const struct opdl_ring *t, uint32_t index)
}
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic)
{
- uint32_t i = 0, j = 0, offset;
+ uint32_t i = 0, offset;
struct opdl_ring *t = s->t;
struct rte_event *ev_orig = NULL;
bool ev_updated = false;
- uint64_t ev_temp = 0;
+ uint64_t ev_temp = 0;
+ uint64_t ev_update = 0;
+
+ uint32_t opa_id = 0;
+ uint32_t flow_id = 0;
+ uint64_t event = 0;
if (index > s->num_event) {
PMD_DRV_LOG(ERR, "index is overflow");
return ev_updated;
}
- ev_temp = ev->event&OPDL_EVENT_MASK;
+ ev_temp = ev->event & OPDL_EVENT_MASK;
if (!atomic) {
offset = opdl_first_entry_id(s->seq, s->nb_instance,
@@ -984,27 +1008,39 @@ opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
}
} else {
- for (i = 0; i < s->num_claimed; i++) {
+ for (i = s->pos; i < s->num_claimed; i++) {
ev_orig = (struct rte_event *)
get_slot(t, s->shadow_head+i);
- if ((ev_orig->flow_id%s->nb_instance) ==
- s->instance_id) {
-
- if (j == index) {
- if ((ev_orig->event&OPDL_EVENT_MASK) !=
- ev_temp) {
- ev_orig->event = ev->event;
- ev_updated = true;
- }
- if (ev_orig->u64 != ev->u64) {
- ev_orig->u64 = ev->u64;
- ev_updated = true;
- }
-
- break;
+ event = __atomic_load_n(&(ev_orig->event),
+ __ATOMIC_ACQUIRE);
+
+ opa_id = OPDL_OPA_MASK & (event >> OPDL_OPA_OFFSET);
+ flow_id = OPDL_FLOWID_MASK & event;
+
+ if (opa_id >= s->queue_id)
+ continue;
+
+ if ((flow_id % s->nb_instance) == s->instance_id) {
+ ev_update = s->queue_id;
+ ev_update = (ev_update << OPDL_OPA_OFFSET)
+ | ev->event;
+
+ s->pos = i + 1;
+
+ if ((event & OPDL_EVENT_MASK) !=
+ ev_temp) {
+ __atomic_store_n(&(ev_orig->event),
+ ev_update,
+ __ATOMIC_RELEASE);
+ ev_updated = true;
}
- j++;
+ if (ev_orig->u64 != ev->u64) {
+ ev_orig->u64 = ev->u64;
+ ev_updated = true;
+ }
+
+ break;
}
}
@@ -1049,11 +1085,7 @@ check_deps(struct opdl_ring *t, struct opdl_stage *deps[],
return -EINVAL;
}
}
- if (num_deps > t->num_stages) {
- PMD_DRV_LOG(ERR, "num_deps (%u) > number stages (%u)",
- num_deps, t->num_stages);
- return -EINVAL;
- }
+
return 0;
}
@@ -1154,6 +1186,13 @@ opdl_stage_get_opdl_ring(const struct opdl_stage *s)
}
void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id)
+{
+ s->queue_id = queue_id;
+}
+
+void
opdl_ring_dump(const struct opdl_ring *t, FILE *f)
{
uint32_t i;
diff --git a/drivers/event/opdl/opdl_ring.h b/drivers/event/opdl/opdl_ring.h
index 9e8c33e6..751a59db 100644
--- a/drivers/event/opdl/opdl_ring.h
+++ b/drivers/event/opdl/opdl_ring.h
@@ -518,6 +518,20 @@ opdl_stage_find_num_available(struct opdl_stage *s, uint32_t num_entries);
struct opdl_stage *
opdl_stage_create(struct opdl_ring *t, bool threadsafe);
+
+/**
+ * Set the internal queue id for each stage instance.
+ *
+ * @param s
+ * The pointer of stage instance.
+ *
+ * @param queue_id
+ * The value of internal queue id.
+ */
+void
+opdl_stage_set_queue_id(struct opdl_stage *s,
+ uint32_t queue_id);
+
/**
* Prints information on opdl_ring instance and all its stages
*
@@ -590,7 +604,7 @@ opdl_ring_set_stage_threadsafe(struct opdl_stage *s, bool threadsafe);
*/
bool
-opdl_ring_cas_slot(const struct opdl_stage *s, const struct rte_event *ev,
+opdl_ring_cas_slot(struct opdl_stage *s, const struct rte_event *ev,
uint32_t index, bool atomic);
#ifdef __cplusplus
diff --git a/drivers/event/skeleton/skeleton_eventdev.c b/drivers/event/skeleton/skeleton_eventdev.c
index 7f467568..c889220e 100644
--- a/drivers/event/skeleton/skeleton_eventdev.c
+++ b/drivers/event/skeleton/skeleton_eventdev.c
@@ -319,7 +319,7 @@ skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
/* Initialize and register event driver with DPDK Application */
-static const struct rte_eventdev_ops skeleton_eventdev_ops = {
+static struct rte_eventdev_ops skeleton_eventdev_ops = {
.dev_infos_get = skeleton_eventdev_info_get,
.dev_configure = skeleton_eventdev_configure,
.dev_start = skeleton_eventdev_start,
diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
index 6672fd8e..a6bb9138 100644
--- a/drivers/event/sw/sw_evdev.c
+++ b/drivers/event/sw/sw_evdev.c
@@ -361,9 +361,99 @@ sw_init_qid_iqs(struct sw_evdev *sw)
}
}
+static int
+sw_qids_empty(struct sw_evdev *sw)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++) {
+ if (iq_count(&sw->qids[i].iq[j]))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int
+sw_ports_empty(struct sw_evdev *sw)
+{
+ unsigned int i;
+
+ for (i = 0; i < sw->port_count; i++) {
+ if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
+ rte_event_ring_count(sw->ports[i].cq_worker_ring))
+ return 0;
+ }
+
+ return 1;
+}
+
static void
-sw_clean_qid_iqs(struct sw_evdev *sw)
+sw_drain_ports(struct rte_eventdev *dev)
{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ unsigned int i;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ for (i = 0; i < sw->port_count; i++) {
+ struct rte_event ev;
+
+ while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
+ if (flush)
+ flush(dev_id, ev, arg);
+
+ ev.op = RTE_EVENT_OP_RELEASE;
+ rte_event_enqueue_burst(dev_id, i, &ev, 1);
+ }
+ }
+}
+
+static void
+sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ eventdev_stop_flush_t flush;
+ uint8_t dev_id;
+ void *arg;
+
+ flush = dev->dev_ops->dev_stop_flush;
+ dev_id = dev->data->dev_id;
+ arg = dev->data->dev_stop_flush_arg;
+
+ while (iq_count(iq) > 0) {
+ struct rte_event ev;
+
+ iq_dequeue_burst(sw, iq, &ev, 1);
+
+ if (flush)
+ flush(dev_id, ev, arg);
+ }
+}
+
+static void
+sw_drain_queues(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ for (i = 0; i < sw->qid_count; i++) {
+ for (j = 0; j < SW_IQS_MAX; j++)
+ sw_drain_queue(dev, &sw->qids[i].iq[j]);
+ }
+}
+
+static void
+sw_clean_qid_iqs(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
int i, j;
/* Release the IQ memory of all configured qids */
@@ -464,6 +554,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
return 0;
}
+static int
+sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(flags);
+ *caps = 0;
+
+ /* Use default SW ops */
+ *ops = NULL;
+
+ return 0;
+}
+
+static int
+sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cdev);
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
+ return 0;
+}
+
static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
{
@@ -702,10 +819,30 @@ static void
sw_stop(struct rte_eventdev *dev)
{
struct sw_evdev *sw = sw_pmd_priv(dev);
- sw_clean_qid_iqs(sw);
+ int32_t runstate;
+
+ /* Stop the scheduler if it's running */
+ runstate = rte_service_runstate_get(sw->service_id);
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 0);
+
+ while (rte_service_may_be_active(sw->service_id))
+ rte_pause();
+
+ /* Flush all events out of the device */
+ while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
+ sw_event_schedule(dev);
+ sw_drain_ports(dev);
+ sw_drain_queues(dev);
+ }
+
+ sw_clean_qid_iqs(dev);
sw_xstats_uninit(sw);
sw->started = 0;
rte_smp_wmb();
+
+ if (runstate == 1)
+ rte_service_runstate_set(sw->service_id, 1);
}
static int
@@ -772,7 +909,7 @@ static int32_t sw_sched_service_func(void *args)
static int
sw_probe(struct rte_vdev_device *vdev)
{
- static const struct rte_eventdev_ops evdev_sw_ops = {
+ static struct rte_eventdev_ops evdev_sw_ops = {
.dev_configure = sw_dev_configure,
.dev_infos_get = sw_info_get,
.dev_close = sw_close,
@@ -791,6 +928,10 @@ sw_probe(struct rte_vdev_device *vdev)
.eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
+ .timer_adapter_caps_get = sw_timer_adapter_caps_get,
+
+ .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
+
.xstats_get = sw_xstats_get,
.xstats_get_names = sw_xstats_get_names,
.xstats_get_by_name = sw_xstats_get_by_name,
@@ -933,9 +1074,7 @@ RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
/* declared extern in header, for access from other .c files */
int eventdev_sw_log_level;
-RTE_INIT(evdev_sw_init_log);
-static void
-evdev_sw_init_log(void)
+RTE_INIT(evdev_sw_init_log)
{
eventdev_sw_log_level = rte_log_register("pmd.event.sw");
if (eventdev_sw_log_level >= 0)
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 3106eb33..e3a41e02 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -508,7 +508,7 @@ sw_event_schedule(struct rte_eventdev *dev)
uint32_t i;
sw->sched_called++;
- if (!sw->started)
+ if (unlikely(!sw->started))
return;
do {
@@ -532,8 +532,7 @@ sw_event_schedule(struct rte_eventdev *dev)
} while (in_pkts > 4 &&
(int)in_pkts_this_iteration < sched_quanta);
- out_pkts = 0;
- out_pkts += sw_schedule_qid_to_cq(sw);
+ out_pkts = sw_schedule_qid_to_cq(sw);
out_pkts_total += out_pkts;
in_pkts_total += in_pkts_this_iteration;
@@ -541,6 +540,12 @@ sw_event_schedule(struct rte_eventdev *dev)
break;
} while ((int)out_pkts_total < sched_quanta);
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
/* push all the internal buffered QEs in port->cq_ring to the
* worker cores: aka, do the ring transfers batched.
*/
@@ -552,10 +557,4 @@ sw_event_schedule(struct rte_eventdev *dev)
sw->ports[i].cq_buf_count = 0;
}
- sw->stats.tx_pkts += out_pkts_total;
- sw->stats.rx_pkts += in_pkts_total;
-
- sw->sched_no_iq_enqueues += (in_pkts_total == 0);
- sw->sched_no_cq_enqueues += (out_pkts_total == 0);
-
}
diff --git a/drivers/event/sw/sw_evdev_selftest.c b/drivers/event/sw/sw_evdev_selftest.c
index 78d30e07..c40912db 100644
--- a/drivers/event/sw/sw_evdev_selftest.c
+++ b/drivers/event/sw/sw_evdev_selftest.c
@@ -28,6 +28,7 @@
#define MAX_PORTS 16
#define MAX_QIDS 16
#define NUM_PACKETS (1<<18)
+#define DEQUEUE_DEPTH 128
static int evdev;
@@ -147,7 +148,7 @@ init(struct test *t, int nb_queues, int nb_ports)
.nb_event_ports = nb_ports,
.nb_event_queue_flows = 1024,
.nb_events_limit = 4096,
- .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_dequeue_depth = DEQUEUE_DEPTH,
.nb_event_port_enqueue_depth = 128,
};
int ret;
@@ -2807,6 +2808,78 @@ err:
return -1;
}
+static void
+flush(uint8_t dev_id __rte_unused, struct rte_event event, void *arg)
+{
+ *((uint8_t *) arg) += (event.u64 == 0xCA11BACC) ? 1 : 0;
+}
+
+static int
+dev_stop_flush(struct test *t) /* test to check we can properly flush events */
+{
+ const struct rte_event new_ev = {
+ .op = RTE_EVENT_OP_NEW,
+ .u64 = 0xCA11BACC,
+ .queue_id = 0
+ };
+ struct rte_event ev = new_ev;
+ uint8_t count = 0;
+ int i;
+
+ if (init(t, 1, 1) < 0 ||
+ create_ports(t, 1) < 0 ||
+ create_atomic_qids(t, 1) < 0) {
+ printf("%d: Error initializing device\n", __LINE__);
+ return -1;
+ }
+
+ /* Link the queue so *_start() doesn't error out */
+ if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1) {
+ printf("%d: Error linking queue to port\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_start(evdev) < 0) {
+ printf("%d: Error with start call\n", __LINE__);
+ goto err;
+ }
+
+ for (i = 0; i < DEQUEUE_DEPTH + 1; i++) {
+ if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) {
+ printf("%d: Error enqueuing events\n", __LINE__);
+ goto err;
+ }
+ }
+
+ /* Schedule the events from the port to the IQ. At least one event
+ * should be remaining in the queue.
+ */
+ rte_service_run_iter_on_app_lcore(t->service_id, 1);
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, flush, &count)) {
+ printf("%d: Error installing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ cleanup(t);
+
+ if (count == 0) {
+ printf("%d: Error executing the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ if (rte_event_dev_stop_flush_callback_register(evdev, NULL, NULL)) {
+ printf("%d: Error uninstalling the flush callback\n", __LINE__);
+ goto err;
+ }
+
+ return 0;
+err:
+ rte_event_dev_dump(evdev, stdout);
+ cleanup(t);
+ return -1;
+}
+
static int
worker_loopback_worker_fn(void *arg)
{
@@ -3211,6 +3284,12 @@ test_sw_eventdev(void)
printf("ERROR - Head-of-line-blocking test FAILED.\n");
goto test_fail;
}
+ printf("*** Running Stop Flush test...\n");
+ ret = dev_stop_flush(t);
+ if (ret != 0) {
+ printf("ERROR - Stop Flush test FAILED.\n");
+ goto test_fail;
+ }
if (rte_lcore_count() >= 3) {
printf("*** Running Worker loopback test...\n");
ret = worker_loopback(t, 0);
diff --git a/drivers/event/sw/sw_evdev_worker.c b/drivers/event/sw/sw_evdev_worker.c
index 67151f77..063b919c 100644
--- a/drivers/event/sw/sw_evdev_worker.c
+++ b/drivers/event/sw/sw_evdev_worker.c
@@ -77,8 +77,10 @@ sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
rte_atomic32_add(&sw->inflights, credit_update_quanta);
p->inflight_credits += (credit_update_quanta);
- if (p->inflight_credits < new)
- return 0;
+ /* If there are fewer inflight credits than new events, limit
+ * the number of enqueued events.
+ */
+ num = (p->inflight_credits < new) ? p->inflight_credits : new;
}
for (i = 0; i < num; i++) {
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
index aae2cb10..28c2e836 100644
--- a/drivers/mempool/Makefile
+++ b/drivers/mempool/Makefile
@@ -3,8 +3,13 @@
include $(RTE_SDK)/mk/rte.vars.mk
+DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += bucket
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+endif
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring
DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack
DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx
diff --git a/drivers/mempool/bucket/Makefile b/drivers/mempool/bucket/Makefile
new file mode 100644
index 00000000..7364916b
--- /dev/null
+++ b/drivers/mempool/bucket/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_bucket.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+
+EXPORT_MAP := rte_mempool_bucket_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += rte_mempool_bucket.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/bucket/meson.build b/drivers/mempool/bucket/meson.build
new file mode 100644
index 00000000..618d7912
--- /dev/null
+++ b/drivers/mempool/bucket/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Copyright (c) 2017-2018 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+
+sources = files('rte_mempool_bucket.c')
diff --git a/drivers/mempool/bucket/rte_mempool_bucket.c b/drivers/mempool/bucket/rte_mempool_bucket.c
new file mode 100644
index 00000000..78d2b9d0
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket.c
@@ -0,0 +1,628 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+
+/*
+ * The general idea of the bucket mempool driver is as follows.
+ * We keep track of physically contiguous groups (buckets) of objects
+ * of a certain size. Every such a group has a counter that is
+ * incremented every time an object from that group is enqueued.
+ * Until the bucket is full, no objects from it are eligible for allocation.
+ * If a request is made to dequeue a multiply of bucket size, it is
+ * satisfied by returning the whole buckets, instead of separate objects.
+ */
+
+
+struct bucket_header {
+ unsigned int lcore_id;
+ uint8_t fill_cnt;
+};
+
+struct bucket_stack {
+ unsigned int top;
+ unsigned int limit;
+ void *objects[];
+};
+
+struct bucket_data {
+ unsigned int header_size;
+ unsigned int total_elt_size;
+ unsigned int obj_per_bucket;
+ unsigned int bucket_stack_thresh;
+ uintptr_t bucket_page_mask;
+ struct rte_ring *shared_bucket_ring;
+ struct bucket_stack *buckets[RTE_MAX_LCORE];
+ /*
+ * Multi-producer single-consumer ring to hold objects that are
+ * returned to the mempool at a different lcore than initially
+ * dequeued
+ */
+ struct rte_ring *adoption_buffer_rings[RTE_MAX_LCORE];
+ struct rte_ring *shared_orphan_ring;
+ struct rte_mempool *pool;
+ unsigned int bucket_mem_size;
+};
+
+static struct bucket_stack *
+bucket_stack_create(const struct rte_mempool *mp, unsigned int n_elts)
+{
+ struct bucket_stack *stack;
+
+ stack = rte_zmalloc_socket("bucket_stack",
+ sizeof(struct bucket_stack) +
+ n_elts * sizeof(void *),
+ RTE_CACHE_LINE_SIZE,
+ mp->socket_id);
+ if (stack == NULL)
+ return NULL;
+ stack->limit = n_elts;
+ stack->top = 0;
+
+ return stack;
+}
+
+static void
+bucket_stack_push(struct bucket_stack *stack, void *obj)
+{
+ RTE_ASSERT(stack->top < stack->limit);
+ stack->objects[stack->top++] = obj;
+}
+
+static void *
+bucket_stack_pop_unsafe(struct bucket_stack *stack)
+{
+ RTE_ASSERT(stack->top > 0);
+ return stack->objects[--stack->top];
+}
+
+static void *
+bucket_stack_pop(struct bucket_stack *stack)
+{
+ if (stack->top == 0)
+ return NULL;
+ return bucket_stack_pop_unsafe(stack);
+}
+
+static int
+bucket_enqueue_single(struct bucket_data *bd, void *obj)
+{
+ int rc = 0;
+ uintptr_t addr = (uintptr_t)obj;
+ struct bucket_header *hdr;
+ unsigned int lcore_id = rte_lcore_id();
+
+ addr &= bd->bucket_page_mask;
+ hdr = (struct bucket_header *)addr;
+
+ if (likely(hdr->lcore_id == lcore_id)) {
+ if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ /* Stack is big enough to put all buckets */
+ bucket_stack_push(bd->buckets[lcore_id], hdr);
+ }
+ } else if (hdr->lcore_id != LCORE_ID_ANY) {
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[hdr->lcore_id];
+
+ rc = rte_ring_enqueue(adopt_ring, obj);
+ /* Ring is big enough to put all objects */
+ RTE_ASSERT(rc == 0);
+ } else if (hdr->fill_cnt < bd->obj_per_bucket - 1) {
+ hdr->fill_cnt++;
+ } else {
+ hdr->fill_cnt = 0;
+ rc = rte_ring_enqueue(bd->shared_bucket_ring, hdr);
+ /* Ring is big enough to put all buckets */
+ RTE_ASSERT(rc == 0);
+ }
+
+ return rc;
+}
+
+static int
+bucket_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ struct bucket_stack *local_stack = bd->buckets[rte_lcore_id()];
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < n; i++) {
+ rc = bucket_enqueue_single(bd, obj_table[i]);
+ RTE_ASSERT(rc == 0);
+ }
+ if (local_stack->top > bd->bucket_stack_thresh) {
+ rte_ring_enqueue_bulk(bd->shared_bucket_ring,
+ &local_stack->objects
+ [bd->bucket_stack_thresh],
+ local_stack->top -
+ bd->bucket_stack_thresh,
+ NULL);
+ local_stack->top = bd->bucket_stack_thresh;
+ }
+ return rc;
+}
+
+static void **
+bucket_fill_obj_table(const struct bucket_data *bd, void **pstart,
+ void **obj_table, unsigned int n)
+{
+ unsigned int i;
+ uint8_t *objptr = *pstart;
+
+ for (objptr += bd->header_size, i = 0; i < n;
+ i++, objptr += bd->total_elt_size)
+ *obj_table++ = objptr;
+ *pstart = objptr;
+ return obj_table;
+}
+
+static int
+bucket_dequeue_orphans(struct bucket_data *bd, void **obj_table,
+ unsigned int n_orphans)
+{
+ unsigned int i;
+ int rc;
+ uint8_t *objptr;
+
+ rc = rte_ring_dequeue_bulk(bd->shared_orphan_ring, obj_table,
+ n_orphans, NULL);
+ if (unlikely(rc != (int)n_orphans)) {
+ struct bucket_header *hdr;
+
+ objptr = bucket_stack_pop(bd->buckets[rte_lcore_id()]);
+ hdr = (struct bucket_header *)objptr;
+
+ if (objptr == NULL) {
+ rc = rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&objptr);
+ if (rc != 0) {
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr = (struct bucket_header *)objptr;
+ hdr->lcore_id = rte_lcore_id();
+ }
+ hdr->fill_cnt = 0;
+ bucket_fill_obj_table(bd, (void **)&objptr, obj_table,
+ n_orphans);
+ for (i = n_orphans; i < bd->obj_per_bucket; i++,
+ objptr += bd->total_elt_size) {
+ rc = rte_ring_enqueue(bd->shared_orphan_ring,
+ objptr);
+ if (rc != 0) {
+ RTE_ASSERT(0);
+ rte_errno = -rc;
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+bucket_dequeue_buckets(struct bucket_data *bd, void **obj_table,
+ unsigned int n_buckets)
+{
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n_buckets, cur_stack->top);
+ void **obj_table_base = obj_table;
+
+ n_buckets -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ void *obj = bucket_stack_pop_unsafe(cur_stack);
+
+ obj_table = bucket_fill_obj_table(bd, &obj, obj_table,
+ bd->obj_per_bucket);
+ }
+ while (n_buckets-- > 0) {
+ struct bucket_header *hdr;
+
+ if (unlikely(rte_ring_dequeue(bd->shared_bucket_ring,
+ (void **)&hdr) != 0)) {
+ /*
+ * Return the already-dequeued buffers
+ * back to the mempool
+ */
+ bucket_enqueue(bd->pool, obj_table_base,
+ obj_table - obj_table_base);
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ hdr->lcore_id = rte_lcore_id();
+ obj_table = bucket_fill_obj_table(bd, (void **)&hdr,
+ obj_table,
+ bd->obj_per_bucket);
+ }
+
+ return 0;
+}
+
+static int
+bucket_adopt_orphans(struct bucket_data *bd)
+{
+ int rc = 0;
+ struct rte_ring *adopt_ring =
+ bd->adoption_buffer_rings[rte_lcore_id()];
+
+ if (unlikely(!rte_ring_empty(adopt_ring))) {
+ void *orphan;
+
+ while (rte_ring_sc_dequeue(adopt_ring, &orphan) == 0) {
+ rc = bucket_enqueue_single(bd, orphan);
+ RTE_ASSERT(rc == 0);
+ }
+ }
+ return rc;
+}
+
+static int
+bucket_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int n_buckets = n / bd->obj_per_bucket;
+ unsigned int n_orphans = n - n_buckets * bd->obj_per_bucket;
+ int rc = 0;
+
+ bucket_adopt_orphans(bd);
+
+ if (unlikely(n_orphans > 0)) {
+ rc = bucket_dequeue_orphans(bd, obj_table +
+ (n_buckets * bd->obj_per_bucket),
+ n_orphans);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (likely(n_buckets > 0)) {
+ rc = bucket_dequeue_buckets(bd, obj_table, n_buckets);
+ if (unlikely(rc != 0) && n_orphans > 0) {
+ rte_ring_enqueue_bulk(bd->shared_orphan_ring,
+ obj_table + (n_buckets *
+ bd->obj_per_bucket),
+ n_orphans, NULL);
+ }
+ }
+
+ return rc;
+}
+
+static int
+bucket_dequeue_contig_blocks(struct rte_mempool *mp, void **first_obj_table,
+ unsigned int n)
+{
+ struct bucket_data *bd = mp->pool_data;
+ const uint32_t header_size = bd->header_size;
+ struct bucket_stack *cur_stack = bd->buckets[rte_lcore_id()];
+ unsigned int n_buckets_from_stack = RTE_MIN(n, cur_stack->top);
+ struct bucket_header *hdr;
+ void **first_objp = first_obj_table;
+
+ bucket_adopt_orphans(bd);
+
+ n -= n_buckets_from_stack;
+ while (n_buckets_from_stack-- > 0) {
+ hdr = bucket_stack_pop_unsafe(cur_stack);
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ if (n > 0) {
+ if (unlikely(rte_ring_dequeue_bulk(bd->shared_bucket_ring,
+ first_objp, n, NULL) != n)) {
+ /* Return the already dequeued buckets */
+ while (first_objp-- != first_obj_table) {
+ bucket_stack_push(cur_stack,
+ (uint8_t *)*first_objp -
+ header_size);
+ }
+ rte_errno = ENOBUFS;
+ return -rte_errno;
+ }
+ while (n-- > 0) {
+ hdr = (struct bucket_header *)*first_objp;
+ hdr->lcore_id = rte_lcore_id();
+ *first_objp++ = (uint8_t *)hdr + header_size;
+ }
+ }
+
+ return 0;
+}
+
+static void
+count_underfilled_buckets(struct rte_mempool *mp,
+ void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ __rte_unused unsigned int mem_idx)
+{
+ unsigned int *pcount = opaque;
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz =
+ (unsigned int)(~bd->bucket_page_mask + 1);
+ uintptr_t align;
+ uint8_t *iter;
+
+ align = (uintptr_t)RTE_PTR_ALIGN_CEIL(memhdr->addr, bucket_page_sz) -
+ (uintptr_t)memhdr->addr;
+
+ for (iter = (uint8_t *)memhdr->addr + align;
+ iter < (uint8_t *)memhdr->addr + memhdr->len;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+
+ *pcount += hdr->fill_cnt;
+ }
+}
+
+static unsigned int
+bucket_get_count(const struct rte_mempool *mp)
+{
+ const struct bucket_data *bd = mp->pool_data;
+ unsigned int count =
+ bd->obj_per_bucket * rte_ring_count(bd->shared_bucket_ring) +
+ rte_ring_count(bd->shared_orphan_ring);
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ count += bd->obj_per_bucket * bd->buckets[i]->top +
+ rte_ring_count(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_mempool_mem_iter((struct rte_mempool *)(uintptr_t)mp,
+ count_underfilled_buckets, &count);
+
+ return count;
+}
+
+static int
+bucket_alloc(struct rte_mempool *mp)
+{
+ int rg_flags = 0;
+ int rc = 0;
+ char rg_name[RTE_RING_NAMESIZE];
+ struct bucket_data *bd;
+ unsigned int i;
+ unsigned int bucket_header_size;
+
+ bd = rte_zmalloc_socket("bucket_pool", sizeof(*bd),
+ RTE_CACHE_LINE_SIZE, mp->socket_id);
+ if (bd == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_data;
+ }
+ bd->pool = mp;
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ bucket_header_size = sizeof(struct bucket_header);
+ else
+ bucket_header_size = RTE_CACHE_LINE_SIZE;
+ RTE_BUILD_BUG_ON(sizeof(struct bucket_header) > RTE_CACHE_LINE_SIZE);
+ bd->header_size = mp->header_size + bucket_header_size;
+ bd->total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
+ bd->bucket_mem_size = RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB * 1024;
+ bd->obj_per_bucket = (bd->bucket_mem_size - bucket_header_size) /
+ bd->total_elt_size;
+ bd->bucket_page_mask = ~(rte_align64pow2(bd->bucket_mem_size) - 1);
+ /* eventually this should be a tunable parameter */
+ bd->bucket_stack_thresh = (mp->size / bd->obj_per_bucket) * 4 / 3;
+
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
+ bd->buckets[i] =
+ bucket_stack_create(mp, mp->size / bd->obj_per_bucket);
+ if (bd->buckets[i] == NULL) {
+ rc = -ENOMEM;
+ goto no_mem_for_stacks;
+ }
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".a%u", mp->name, i);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto no_mem_for_stacks;
+ }
+ bd->adoption_buffer_rings[i] =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id,
+ rg_flags | RING_F_SC_DEQ);
+ if (bd->adoption_buffer_rings[i] == NULL) {
+ rc = -rte_errno;
+ goto no_mem_for_stacks;
+ }
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".0", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_orphan_ring;
+ }
+ bd->shared_orphan_ring =
+ rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
+ mp->socket_id, rg_flags);
+ if (bd->shared_orphan_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_orphan_ring;
+ }
+
+ rc = snprintf(rg_name, sizeof(rg_name),
+ RTE_MEMPOOL_MZ_FORMAT ".1", mp->name);
+ if (rc < 0 || rc >= (int)sizeof(rg_name)) {
+ rc = -ENAMETOOLONG;
+ goto invalid_shared_bucket_ring;
+ }
+ bd->shared_bucket_ring =
+ rte_ring_create(rg_name,
+ rte_align32pow2((mp->size + 1) /
+ bd->obj_per_bucket),
+ mp->socket_id, rg_flags);
+ if (bd->shared_bucket_ring == NULL) {
+ rc = -rte_errno;
+ goto cannot_create_shared_bucket_ring;
+ }
+
+ mp->pool_data = bd;
+
+ return 0;
+
+cannot_create_shared_bucket_ring:
+invalid_shared_bucket_ring:
+ rte_ring_free(bd->shared_orphan_ring);
+cannot_create_shared_orphan_ring:
+invalid_shared_orphan_ring:
+no_mem_for_stacks:
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+ rte_free(bd);
+no_mem_for_data:
+ rte_errno = -rc;
+ return rc;
+}
+
+static void
+bucket_free(struct rte_mempool *mp)
+{
+ unsigned int i;
+ struct bucket_data *bd = mp->pool_data;
+
+ if (bd == NULL)
+ return;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ rte_free(bd->buckets[i]);
+ rte_ring_free(bd->adoption_buffer_rings[i]);
+ }
+
+ rte_ring_free(bd->shared_orphan_ring);
+ rte_ring_free(bd->shared_bucket_ring);
+
+ rte_free(bd);
+}
+
+static ssize_t
+bucket_calc_mem_size(const struct rte_mempool *mp, uint32_t obj_num,
+ __rte_unused uint32_t pg_shift, size_t *min_total_elt_size,
+ size_t *align)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ *align = bucket_page_sz;
+ *min_total_elt_size = bucket_page_sz;
+ /*
+ * Each bucket occupies its own block aligned to
+ * bucket_page_sz, so the required amount of memory is
+ * a multiple of bucket_page_sz.
+ * We also need extra space for a bucket header
+ */
+ return ((obj_num + bd->obj_per_bucket - 1) /
+ bd->obj_per_bucket) * bucket_page_sz;
+}
+
+static int
+bucket_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct bucket_data *bd = mp->pool_data;
+ unsigned int bucket_page_sz;
+ unsigned int bucket_header_sz;
+ unsigned int n_objs;
+ uintptr_t align;
+ uint8_t *iter;
+ int rc;
+
+ if (bd == NULL)
+ return -EINVAL;
+
+ bucket_page_sz = rte_align32pow2(bd->bucket_mem_size);
+ align = RTE_PTR_ALIGN_CEIL((uintptr_t)vaddr, bucket_page_sz) -
+ (uintptr_t)vaddr;
+
+ bucket_header_sz = bd->header_size - mp->header_size;
+ if (iova != RTE_BAD_IOVA)
+ iova += align + bucket_header_sz;
+
+ for (iter = (uint8_t *)vaddr + align, n_objs = 0;
+ iter < (uint8_t *)vaddr + len && n_objs < max_objs;
+ iter += bucket_page_sz) {
+ struct bucket_header *hdr = (struct bucket_header *)iter;
+ unsigned int chunk_len = bd->bucket_mem_size;
+
+ if ((size_t)(iter - (uint8_t *)vaddr) + chunk_len > len)
+ chunk_len = len - (iter - (uint8_t *)vaddr);
+ if (chunk_len <= bucket_header_sz)
+ break;
+ chunk_len -= bucket_header_sz;
+
+ hdr->fill_cnt = 0;
+ hdr->lcore_id = LCORE_ID_ANY;
+ rc = rte_mempool_op_populate_default(mp,
+ RTE_MIN(bd->obj_per_bucket,
+ max_objs - n_objs),
+ iter + bucket_header_sz,
+ iova, chunk_len,
+ obj_cb, obj_cb_arg);
+ if (rc < 0)
+ return rc;
+ n_objs += rc;
+ if (iova != RTE_BAD_IOVA)
+ iova += bucket_page_sz;
+ }
+
+ return n_objs;
+}
+
+static int
+bucket_get_info(const struct rte_mempool *mp, struct rte_mempool_info *info)
+{
+ struct bucket_data *bd = mp->pool_data;
+
+ info->contig_block_size = bd->obj_per_bucket;
+ return 0;
+}
+
+
+static const struct rte_mempool_ops ops_bucket = {
+ .name = "bucket",
+ .alloc = bucket_alloc,
+ .free = bucket_free,
+ .enqueue = bucket_enqueue,
+ .dequeue = bucket_dequeue,
+ .get_count = bucket_get_count,
+ .calc_mem_size = bucket_calc_mem_size,
+ .populate = bucket_populate,
+ .get_info = bucket_get_info,
+ .dequeue_contig_blocks = bucket_dequeue_contig_blocks,
+};
+
+
+MEMPOOL_REGISTER_OPS(ops_bucket);
diff --git a/drivers/mempool/bucket/rte_mempool_bucket_version.map b/drivers/mempool/bucket/rte_mempool_bucket_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/mempool/bucket/rte_mempool_bucket_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile
index 4c0d7aaa..da8da1e9 100644
--- a/drivers/mempool/dpaa/Makefile
+++ b/drivers/mempool/dpaa/Makefile
@@ -22,6 +22,9 @@ EXPORT_MAP := rte_mempool_dpaa_version.map
# Lbrary version
LIBABIVER := 1
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c
diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c
index fb3b6ba0..10c536bf 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.c
+++ b/drivers/mempool/dpaa/dpaa_mempool.c
@@ -27,6 +27,13 @@
#include <dpaa_mempool.h>
+/* List of all the memseg information locally maintained in dpaa driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa_memseg_list rte_dpaa_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa_memsegs);
+
struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS];
static int
@@ -115,7 +122,8 @@ dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr)
struct bm_buffer buf;
int ret;
- DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid);
+ DPAA_MEMPOOL_DEBUG("Free 0x%" PRIx64 " to bpid: %d",
+ addr, bp_info->bpid);
bm_buffer_set64(&buf, addr);
retry:
@@ -154,8 +162,7 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
if (unlikely(!bp_info->ptov_off)) {
/* buffers are from single mem segment */
if (bp_info->flags & DPAA_MPOOL_SINGLE_SEGMENT) {
- bp_info->ptov_off
- = (uint64_t)obj_table[i] - phy;
+ bp_info->ptov_off = (size_t)obj_table[i] - phy;
rte_dpaa_bpid_info[bp_info->bpid].ptov_off
= bp_info->ptov_off;
}
@@ -264,10 +271,9 @@ dpaa_mbuf_get_count(const struct rte_mempool *mp)
}
static int
-dpaa_register_memory_area(const struct rte_mempool *mp,
- char *vaddr __rte_unused,
- rte_iova_t paddr __rte_unused,
- size_t len)
+dpaa_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
struct dpaa_bp_info *bp_info;
unsigned int total_elt_sz;
@@ -282,14 +288,40 @@ dpaa_register_memory_area(const struct rte_mempool *mp,
bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- DPAA_MEMPOOL_DEBUG("Req size %lu vs Available %u\n",
- len, total_elt_sz * mp->size);
+ DPAA_MEMPOOL_DEBUG("Req size %" PRIx64 " vs Available %u\n",
+ (uint64_t)len, total_elt_sz * mp->size);
/* Detect pool area has sufficient space for elements in this memzone */
if (len >= total_elt_sz * mp->size)
bp_info->flags |= DPAA_MPOOL_SINGLE_SEGMENT;
+ struct dpaa_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa_memseg), 0);
+ if (!ms) {
+ DPAA_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
- return 0;
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
}
struct rte_mempool_ops dpaa_mpool_ops = {
@@ -299,7 +331,7 @@ struct rte_mempool_ops dpaa_mpool_ops = {
.enqueue = dpaa_mbuf_free_bulk,
.dequeue = dpaa_mbuf_alloc_bulk,
.get_count = dpaa_mbuf_get_count,
- .register_memory_area = dpaa_register_memory_area,
+ .populate = dpaa_populate,
};
MEMPOOL_REGISTER_OPS(dpaa_mpool_ops);
diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h
index 9435dd2f..092f326c 100644
--- a/drivers/mempool/dpaa/dpaa_mempool.h
+++ b/drivers/mempool/dpaa/dpaa_mempool.h
@@ -46,7 +46,7 @@ static inline void *
DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
{
if (bp_info->ptov_off)
- return ((void *)(addr + bp_info->ptov_off));
+ return ((void *) (size_t)(addr + bp_info->ptov_off));
return rte_dpaa_mem_ptov(addr);
}
diff --git a/drivers/mempool/dpaa/meson.build b/drivers/mempool/dpaa/meson.build
new file mode 100644
index 00000000..9163b3db
--- /dev/null
+++ b/drivers/mempool/dpaa/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa']
+sources = files('dpaa_mempool.c')
+
+# depends on dpaa bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
index d05f274d..60bf50b2 100644
--- a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
+++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map
@@ -2,6 +2,7 @@ DPDK_17.11 {
global:
rte_dpaa_bpid_info;
+ rte_dpaa_memsegs;
local: *;
};
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
index efaac96e..9e4c87d7 100644
--- a/drivers/mempool/dpaa2/Makefile
+++ b/drivers/mempool/dpaa2/Makefile
@@ -9,14 +9,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_mempool_dpaa2.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
-
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
@@ -27,6 +21,9 @@ EXPORT_MAP := rte_mempool_dpaa2_version.map
# Lbrary version
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
@@ -34,4 +31,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring
+SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL)-include := rte_dpaa2_mempool.h
+
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 2bd62e88..7d0435f5 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -21,16 +21,28 @@
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
+#include "rte_dpaa2_mempool.h"
#include <fslmc_logs.h>
#include <mc/fsl_dpbp.h>
#include <portal/dpaa2_hw_pvt.h>
#include <portal/dpaa2_hw_dpio.h>
#include "dpaa2_hw_mempool.h"
+#include "dpaa2_hw_mempool_logs.h"
struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
static struct dpaa2_bp_list *h_bp_list;
+/* List of all the memseg information locally maintained in dpaa2 driver. This
+ * is to optimize the PA_to_VA searches until a better mechanism (algo) is
+ * available.
+ */
+struct dpaa2_memseg_list rte_dpaa2_memsegs
+ = TAILQ_HEAD_INITIALIZER(rte_dpaa2_memsegs);
+
+/* Dynamic logging identified for mempool */
+int dpaa2_logtype_mempool;
+
static int
rte_hw_mbuf_create_pool(struct rte_mempool *mp)
{
@@ -44,30 +56,30 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
avail_dpbp = dpaa2_alloc_dpbp_dev();
if (!avail_dpbp) {
- PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+ DPAA2_MEMPOOL_ERR("DPAA2 pool not available!");
return -ENOENT;
}
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_MEMPOOL_ERR("Failure in affining portal");
goto err1;
}
}
ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource enable failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource enable failure with err code: %d",
+ ret);
goto err1;
}
ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
avail_dpbp->token, &dpbp_attr);
if (ret != 0) {
- PMD_INIT_LOG(ERR, "Resource read failure with"
- " err code: %d\n", ret);
+ DPAA2_MEMPOOL_ERR("Resource read failure with err code: %d",
+ ret);
goto err2;
}
@@ -75,7 +87,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info),
RTE_CACHE_LINE_SIZE);
if (!bp_info) {
- PMD_INIT_LOG(ERR, "No heap memory available for bp_info");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err2;
}
@@ -84,7 +96,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
bp_list = rte_malloc(NULL, sizeof(struct dpaa2_bp_list),
RTE_CACHE_LINE_SIZE);
if (!bp_list) {
- PMD_INIT_LOG(ERR, "No heap memory available");
+ DPAA2_MEMPOOL_ERR("Unable to allocate buffer pool memory");
ret = -ENOMEM;
goto err3;
}
@@ -112,7 +124,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp)
sizeof(struct dpaa2_bp_info));
mp->pool_data = (void *)bp_info;
- PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+ DPAA2_MEMPOOL_DEBUG("BP List created for bpid =%d", dpbp_attr.bpid);
h_bp_list = bp_list;
return 0;
@@ -134,7 +146,7 @@ rte_hw_mbuf_free_pool(struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp->pool_data) {
- PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+ DPAA2_MEMPOOL_ERR("Not a valid dpaa2 buffer pool");
return;
}
@@ -180,7 +192,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return;
}
}
@@ -233,6 +245,35 @@ aligned:
}
}
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return -ENOMEM;
+ }
+
+ return bp_info->bpid;
+}
+
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr)
+{
+ struct dpaa2_bp_info *bp_info;
+
+ bp_info = mempool_to_bpinfo(mp);
+ if (!(bp_info->bp_list)) {
+ RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ return NULL;
+ }
+
+ return (struct rte_mbuf *)((uint8_t *)buf_addr -
+ bp_info->meta_data_size);
+}
+
int
rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
void **obj_table, unsigned int count)
@@ -242,7 +283,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#endif
struct qbman_swp *swp;
uint16_t bpid;
- uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+ size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
int i, ret;
unsigned int n = 0;
struct dpaa2_bp_info *bp_info;
@@ -250,7 +291,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
@@ -259,7 +300,7 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate IO portal\n");
+ DPAA2_MEMPOOL_ERR("Failed to allocate IO portal");
return ret;
}
}
@@ -270,18 +311,18 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
* then the remainder.
*/
if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
DPAA2_MBUF_MAX_ACQ_REL);
} else {
- ret = qbman_swp_acquire(swp, bpid, bufs,
+ ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
count - n);
}
/* In case of less than requested number of buffers available
* in pool, qbman_swp_acquire returns 0
*/
if (ret <= 0) {
- PMD_TX_LOG(ERR, "Buffer acquire failed with"
- " err code: %d", ret);
+ DPAA2_MEMPOOL_ERR("Buffer acquire failed with"
+ " err code: %d", ret);
/* The API expect the exact number of requested bufs */
/* Releasing all buffers allocated */
rte_dpaa2_mbuf_release(pool, obj_table, bpid,
@@ -290,10 +331,11 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i]; i++) {
- DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+ DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
obj_table[n] = (struct rte_mbuf *)
(bufs[i] - bp_info->meta_data_size);
- PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+ DPAA2_MEMPOOL_DP_DEBUG(
+ "Acquired %p address %p from BMAN\n",
(void *)bufs[i], (void *)obj_table[n]);
n++;
}
@@ -301,8 +343,8 @@ rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
alloc += n;
- PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
- alloc, count, n);
+ DPAA2_MEMPOOL_DP_DEBUG("Total = %d , req = %d done = %d\n",
+ alloc, count, n);
#endif
return 0;
}
@@ -315,7 +357,7 @@ rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
bp_info = mempool_to_bpinfo(pool);
if (!(bp_info->bp_list)) {
- RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+ DPAA2_MEMPOOL_ERR("DPAA2 buffer pool not configured");
return -ENOENT;
}
rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
@@ -333,7 +375,7 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
struct dpaa2_dpbp_dev *dpbp_node;
if (!mp || !mp->pool_data) {
- RTE_LOG(ERR, PMD, "Invalid mempool provided\n");
+ DPAA2_MEMPOOL_ERR("Invalid mempool provided");
return 0;
}
@@ -343,16 +385,51 @@ rte_hw_mbuf_get_count(const struct rte_mempool *mp)
ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
dpbp_node->token, &num_of_bufs);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)\n",
- ret);
+ DPAA2_MEMPOOL_ERR("Unable to obtain free buf count (err=%d)",
+ ret);
return 0;
}
- RTE_LOG(DEBUG, PMD, "Free bufs = %u\n", num_of_bufs);
+ DPAA2_MEMPOOL_DP_DEBUG("Free bufs = %u\n", num_of_bufs);
return num_of_bufs;
}
+static int
+dpaa2_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t paddr, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ struct dpaa2_memseg *ms;
+
+ /* For each memory chunk pinned to the Mempool, a linked list of the
+ * contained memsegs is created for searching when PA to VA
+ * conversion is required.
+ */
+ ms = rte_zmalloc(NULL, sizeof(struct dpaa2_memseg), 0);
+ if (!ms) {
+ DPAA2_MEMPOOL_ERR("Unable to allocate internal memory.");
+ DPAA2_MEMPOOL_WARN("Fast Physical to Virtual Addr translation would not be available.");
+ /* If the element is not added, it would only lead to failure
+ * in searching for the element and the logic would Fallback
+ * to traditional DPDK memseg traversal code. So, this is not
+ * a blocking error - but, error would be printed on screen.
+ */
+ return 0;
+ }
+
+ ms->vaddr = vaddr;
+ ms->iova = paddr;
+ ms->len = len;
+ /* Head insertions are generally faster than tail insertions as the
+ * buffers pinned are picked from rear end.
+ */
+ TAILQ_INSERT_HEAD(&rte_dpaa2_memsegs, ms, next);
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, paddr, len,
+ obj_cb, obj_cb_arg);
+}
+
struct rte_mempool_ops dpaa2_mpool_ops = {
.name = DPAA2_MEMPOOL_OPS_NAME,
.alloc = rte_hw_mbuf_create_pool,
@@ -360,6 +437,14 @@ struct rte_mempool_ops dpaa2_mpool_ops = {
.enqueue = rte_hw_mbuf_free_bulk,
.dequeue = rte_dpaa2_mbuf_alloc_bulk,
.get_count = rte_hw_mbuf_get_count,
+ .populate = dpaa2_populate,
};
MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
+
+RTE_INIT(dpaa2_mempool_init_log)
+{
+ dpaa2_logtype_mempool = rte_log_register("mempool.dpaa2");
+ if (dpaa2_logtype_mempool >= 0)
+ rte_log_set_level(dpaa2_logtype_mempool, RTE_LOG_NOTICE);
+}
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
new file mode 100644
index 00000000..c79b3d1c
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool_logs.h
@@ -0,0 +1,38 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_HW_MEMPOOL_LOGS_H_
+#define _DPAA2_HW_MEMPOOL_LOGS_H_
+
+extern int dpaa2_logtype_mempool;
+
+#define DPAA2_MEMPOOL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: " fmt "\n", ##args)
+
+/* Debug logs are with Function names */
+#define DPAA2_MEMPOOL_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_mempool, \
+ "mempool/dpaa2: %s(): " fmt "\n", __func__, ##args)
+
+#define DPAA2_MEMPOOL_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_ERR(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(ERR, fmt, ## args)
+#define DPAA2_MEMPOOL_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_MEMPOOL_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_MEMPOOL_DP_DEBUG(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_INFO(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_MEMPOOL_DP_WARN(fmt, args...) \
+ DPAA2_MEMPOOL_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_HW_MEMPOOL_LOGS_H_ */
diff --git a/drivers/mempool/dpaa2/meson.build b/drivers/mempool/dpaa2/meson.build
new file mode 100644
index 00000000..90bab606
--- /dev/null
+++ b/drivers/mempool/dpaa2/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_fslmc']
+sources = files('dpaa2_hw_mempool.c')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/mempool/dpaa2/rte_dpaa2_mempool.h b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
new file mode 100644
index 00000000..4a22b7c4
--- /dev/null
+++ b/drivers/mempool/dpaa2/rte_dpaa2_mempool.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_DPAA2_MEMPOOL_H__
+#define __RTE_DPAA2_MEMPOOL_H__
+
+/**
+ * @file
+ *
+ * NXP specific mempool related functions.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+
+/**
+ * Get BPID corresponding to the packet pool
+ *
+ * @param mp
+ * memory pool
+ *
+ * @return
+ * BPID of the buffer pool
+ */
+uint16_t
+rte_dpaa2_mbuf_pool_bpid(struct rte_mempool *mp);
+
+/**
+ * Get MBUF from the corresponding 'buf_addr'
+ *
+ * @param mp
+ * memory pool
+ * @param buf_addr
+ * The 'buf_addr' of the mbuf. This is the start buffer address
+ * of the packet buffer (mbuf).
+ *
+ * @return
+ * - MBUF pointer for success
+ * - NULL in case of error
+ */
+struct rte_mbuf *
+rte_dpaa2_mbuf_from_buf_addr(struct rte_mempool *mp, void *buf_addr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_DPAA2_MEMPOOL_H__ */
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
index a8aa685c..b9d996a6 100644
--- a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -3,6 +3,15 @@ DPDK_17.05 {
rte_dpaa2_bpid_info;
rte_dpaa2_mbuf_alloc_bulk;
+ rte_dpaa2_memsegs;
local: *;
};
+
+DPDK_18.05 {
+ global:
+
+ rte_dpaa2_mbuf_from_buf_addr;
+ rte_dpaa2_mbuf_pool_bpid;
+
+} DPDK_17.05;
diff --git a/drivers/mempool/meson.build b/drivers/mempool/meson.build
index 59918560..4527d980 100644
--- a/drivers/mempool/meson.build
+++ b/drivers/mempool/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['ring', 'stack', 'octeontx']
+drivers = ['bucket', 'dpaa', 'dpaa2', 'octeontx', 'ring', 'stack']
std_deps = ['mempool']
config_flag_fmt = 'RTE_LIBRTE_@0@_MEMPOOL'
driver_name_fmt = 'rte_mempool_@0@'
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
index dfc373e6..a3e1dce8 100644
--- a/drivers/mempool/octeontx/Makefile
+++ b/drivers/mempool/octeontx/Makefile
@@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool_octeontx.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
EXPORT_MAP := rte_mempool_octeontx_version.map
LIBABIVER := 1
@@ -17,8 +18,6 @@ LIBABIVER := 1
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c
-SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
@@ -36,6 +35,6 @@ CFLAGS_rte_mempool_octeontx.o += -Ofast
endif
LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_pci -lrte_common_octeontx
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/octeontx/meson.build b/drivers/mempool/octeontx/meson.build
index 1e894a56..3baaf7db 100644
--- a/drivers/mempool/octeontx/meson.build
+++ b/drivers/mempool/octeontx/meson.build
@@ -1,10 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
-sources = files('octeontx_ssovf.c',
- 'octeontx_mbox.c',
- 'octeontx_fpavf.c',
+sources = files('octeontx_fpavf.c',
'rte_mempool_octeontx.c'
)
-deps += ['mbuf', 'bus_pci']
+deps += ['mbuf', 'bus_pci', 'common_octeontx']
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
index 61c72c7c..4cf387e8 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.c
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -108,17 +108,11 @@ static struct octeontx_fpadev fpadev;
int octeontx_logtype_fpavf;
int octeontx_logtype_fpavf_mbox;
-RTE_INIT(otx_pool_init_log);
-static void
-otx_pool_init_log(void)
+RTE_INIT(otx_pool_init_log)
{
octeontx_logtype_fpavf = rte_log_register("pmd.mempool.octeontx");
if (octeontx_logtype_fpavf >= 0)
rte_log_set_level(octeontx_logtype_fpavf, RTE_LOG_NOTICE);
-
- octeontx_logtype_fpavf_mbox = rte_log_register("pmd.mempool.octeontx.mbox");
- if (octeontx_logtype_fpavf_mbox >= 0)
- rte_log_set_level(octeontx_logtype_fpavf_mbox, RTE_LOG_NOTICE);
}
/* lock is taken by caller */
@@ -247,13 +241,13 @@ octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
POOL_ENA;
- cfg.aid = 0;
+ cfg.aid = FPA_AURA_IDX(gpool);
cfg.pool_cfg = reg;
cfg.pool_stack_base = phys_addr;
cfg.pool_stack_end = phys_addr + memsz;
cfg.aura_cfg = (1 << 9);
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -298,7 +292,7 @@ octeontx_fpapf_pool_destroy(unsigned int gpool_index)
cfg.pool_stack_end = 0;
cfg.aura_cfg = 0;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
@@ -331,15 +325,16 @@ octeontx_fpapf_aura_attach(unsigned int gpool_index)
hdr.vfid = gpool_index;
hdr.res_code = 0;
memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
- cfg.aid = gpool_index; /* gpool is guara */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ ret = octeontx_mbox_send(&hdr, &cfg,
sizeof(struct octeontx_mbox_fpa_cfg),
&resp, sizeof(resp));
if (ret < 0) {
fpavf_log_err("Could not attach fpa ");
fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
- gpool_index, gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), gpool_index, ret,
+ hdr.res_code);
ret = -EACCES;
goto err;
}
@@ -359,14 +354,15 @@ octeontx_fpapf_aura_detach(unsigned int gpool_index)
goto err;
}
- cfg.aid = gpool_index; /* gpool is gaura */
+ cfg.aid = FPA_AURA_IDX(gpool_index);
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_DETACHAURA;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ ret = octeontx_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
if (ret < 0) {
fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
- gpool_index, ret, hdr.res_code);
+ FPA_AURA_IDX(gpool_index), ret,
+ hdr.res_code);
ret = -EINVAL;
}
@@ -410,7 +406,7 @@ octeontx_fpapf_start_count(uint16_t gpool_index)
hdr.coproc = FPA_COPROC;
hdr.msg = FPA_START_COUNT;
hdr.vfid = gpool_index;
- ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ ret = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (ret < 0) {
fpavf_log_err("Could not start buffer counting for ");
fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
@@ -473,6 +469,7 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
{
uint64_t cnt, limit, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
if (unlikely(!octeontx_fpa_handle_valid(handle)))
@@ -480,14 +477,16 @@ octeontx_fpa_bufpool_free_count(uintptr_t handle)
/* get the gpool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
limit = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
avail = fpavf_read64((void *)((uintptr_t)pool_bar +
FPA_VF_VHPOOL_AVAILABLE(gpool)));
@@ -500,6 +499,7 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
unsigned int buf_offset, int node_id)
{
unsigned int gpool;
+ unsigned int gaura;
uintptr_t gpool_handle;
uintptr_t pool_bar;
int res;
@@ -549,16 +549,18 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
goto error_pool_destroy;
}
+ gaura = FPA_AURA_IDX(gpool);
+
/* Release lock */
rte_spinlock_unlock(&fpadev.lock);
/* populate AURA registers */
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
octeontx_fpapf_start_count(gpool);
@@ -585,6 +587,7 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
uint64_t sz;
uint64_t cnt, avail;
uint8_t gpool;
+ uint16_t gaura;
uintptr_t pool_bar;
int ret;
@@ -598,13 +601,15 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* get the pool */
gpool = octeontx_fpa_bufpool_gpool(handle);
+ /* get the aura */
+ gaura = octeontx_fpa_bufpool_gaura(handle);
/* Get pool bar address from handle */
pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
/* Check for no outstanding buffers */
cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT(gpool)));
+ FPA_VF_VHAURA_CNT(gaura)));
if (cnt) {
fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
return -EBUSY;
@@ -617,9 +622,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Prepare to empty the entire POOL */
fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
/* Empty the pool */
/* Invalidate the POOL */
@@ -631,11 +636,11 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Yank a buffer from the pool */
node = (void *)(uintptr_t)
fpavf_read64((void *)
- (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gaura)));
if (node == NULL) {
fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
- gpool, avail);
+ gaura, avail);
break;
}
@@ -669,9 +674,9 @@ octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
/* Deactivate the AURA */
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ FPA_VF_VHAURA_CNT_LIMIT(gaura)));
fpavf_write64(0, (void *)((uintptr_t)pool_bar +
- FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+ FPA_VF_VHAURA_CNT_THRESHOLD(gaura)));
ret = octeontx_fpapf_aura_detach(gpool);
if (ret) {
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
index b76f40e7..b00be137 100644
--- a/drivers/mempool/octeontx/octeontx_fpavf.h
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -14,6 +14,7 @@
#define FPA_VF_MAX 32
#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+#define FPA_GAURA_SHIFT 4
/* FPA VF register offsets */
#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
@@ -36,6 +37,7 @@
#define FPA_VF_FREE_ADDRS_S(x, y, z) \
((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+#define FPA_AURA_IDX(gpool) (gpool << FPA_GAURA_SHIFT)
/* FPA VF register offsets from VF_BAR4, size 2 MByte */
#define FPA_VF_MSIX_VEC_ADDR 0x00000
#define FPA_VF_MSIX_VEC_CTL 0x00008
@@ -102,4 +104,11 @@ octeontx_fpa_bufpool_gpool(uintptr_t handle)
{
return (uint8_t)handle & FPA_GPOOL_MASK;
}
+
+static __rte_always_inline uint16_t
+octeontx_fpa_bufpool_gaura(uintptr_t handle)
+{
+ return octeontx_fpa_bufpool_gpool(handle) << FPA_GAURA_SHIFT;
+}
+
#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h
deleted file mode 100644
index 1b056071..00000000
--- a/drivers/mempool/octeontx/octeontx_mbox.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Cavium, Inc
- */
-
-#ifndef __OCTEONTX_MBOX_H__
-#define __OCTEONTX_MBOX_H__
-
-#include <rte_common.h>
-
-#define SSOW_BAR4_LEN (64 * 1024)
-#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
-
-struct octeontx_ssovf_info {
- uint16_t domain; /* Domain id */
- uint8_t total_ssovfs; /* Total sso groups available in domain */
- uint8_t total_ssowvfs;/* Total sso hws available in domain */
-};
-
-enum octeontx_ssovf_type {
- OCTEONTX_SSO_GROUP, /* SSO group vf */
- OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
-};
-
-struct octeontx_mbox_hdr {
- uint16_t vfid; /* VF index or pf resource index local to the domain */
- uint8_t coproc; /* Coprocessor id */
- uint8_t msg; /* Message id */
- uint8_t res_code; /* Functional layer response code */
-};
-
-int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
-void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
-int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
- void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
-
-#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
index 95865192..7b4e1b38 100644
--- a/drivers/mempool/octeontx/octeontx_pool_logs.h
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -11,21 +11,12 @@
rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\
"%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-#define MBOX_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf_mbox,\
- "%s() line %u: " fmt "\n", __func__, __LINE__, ## args)
-
#define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__)
#define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
#define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__)
#define fpavf_func_trace fpavf_log_dbg
-#define mbox_log_info(fmt, ...) MBOX_LOG(INFO, fmt, ##__VA_ARGS__)
-#define mbox_log_dbg(fmt, ...) MBOX_LOG(DEBUG, fmt, ##__VA_ARGS__)
-#define mbox_log_err(fmt, ...) MBOX_LOG(ERR, fmt, ##__VA_ARGS__)
-#define mbox_func_trace mbox_log_dbg
extern int octeontx_logtype_fpavf;
-extern int octeontx_logtype_fpavf_mbox;
#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
index d143d05c..ab94dfe9 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx.c
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -126,28 +126,66 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp)
return octeontx_fpa_bufpool_free_count(pool);
}
-static int
-octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
+static ssize_t
+octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
{
- RTE_SET_USED(mp);
- *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
- MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
- return 0;
+ ssize_t mem_size;
+
+ /*
+ * Simply need space for one more object to be able to
+ * fulfil alignment requirements.
+ */
+ mem_size = rte_mempool_op_calc_mem_size_default(mp, obj_num + 1,
+ pg_shift,
+ min_chunk_size, align);
+ if (mem_size >= 0) {
+ /*
+ * Memory area which contains objects must be physically
+ * contiguous.
+ */
+ *min_chunk_size = mem_size;
+ }
+
+ return mem_size;
}
static int
-octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, rte_iova_t paddr, size_t len)
+octeontx_fpavf_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
- RTE_SET_USED(paddr);
+ size_t total_elt_sz;
+ size_t off;
uint8_t gpool;
uintptr_t pool_bar;
+ int ret;
+
+ if (iova == RTE_BAD_IOVA)
+ return -EINVAL;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ /* align object start address to a multiple of total_elt_sz */
+ off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
+
+ if (len < off)
+ return -EINVAL;
+
+ vaddr = (char *)vaddr + off;
+ iova += off;
+ len -= off;
gpool = octeontx_fpa_bufpool_gpool(mp->pool_id);
pool_bar = mp->pool_id & ~(uint64_t)FPA_GPOOL_MASK;
- return octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ ret = octeontx_fpavf_pool_set_range(pool_bar, len, vaddr, gpool);
+ if (ret < 0)
+ return ret;
+
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr, iova, len,
+ obj_cb, obj_cb_arg);
}
static struct rte_mempool_ops octeontx_fpavf_ops = {
@@ -157,8 +195,8 @@ static struct rte_mempool_ops octeontx_fpavf_ops = {
.enqueue = octeontx_fpavf_enqueue,
.dequeue = octeontx_fpavf_dequeue,
.get_count = octeontx_fpavf_get_count,
- .get_capabilities = octeontx_fpavf_get_capabilities,
- .register_memory_area = octeontx_fpavf_register_memory_area,
+ .calc_mem_size = octeontx_fpavf_calc_mem_size,
+ .populate = octeontx_fpavf_populate,
};
MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
index fe8cdeca..a7530317 100644
--- a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -1,9 +1,3 @@
DPDK_17.11 {
- global:
-
- octeontx_ssovf_info;
- octeontx_ssovf_bar;
- octeontx_ssovf_mbox_send;
-
local: *;
};
diff --git a/drivers/meson.build b/drivers/meson.build
index b41a0f18..f94e2fe6 100644
--- a/drivers/meson.build
+++ b/drivers/meson.build
@@ -2,12 +2,19 @@
# Copyright(c) 2017 Intel Corporation
# Defines the order in which the drivers are buit.
-driver_classes = ['bus',
- 'mempool', # depends on bus.
- 'net', # depends on bus and mempool.
- 'crypto', # depenss on bus, mempool (net in future).
- 'event'] # depends on bus, mempool and net.
-
+driver_classes = ['common',
+ 'bus',
+ 'mempool', # depends on common and bus.
+ 'net', # depends on common, bus and mempool.
+ 'crypto', # depends on common, bus and mempool (net in future).
+ 'compress', # depends on common, bus, mempool.
+ 'event', # depends on common, bus, mempool and net.
+ 'raw'] # depends on common, bus, mempool, net and event.
+
+default_cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ default_cflags += '-Wno-format-truncation'
+endif
foreach class:driver_classes
drivers = []
std_deps = []
@@ -28,7 +35,7 @@ foreach class:driver_classes
allow_experimental_apis = false
sources = []
objs = []
- cflags = machine_args
+ cflags = default_cflags
includes = [include_directories(drv_path)]
# set up internal deps. Drivers can append/override as necessary
deps = std_deps
@@ -55,6 +62,10 @@ foreach class:driver_classes
shared_objs = []
static_objs = []
foreach d:deps
+ if not is_variable('shared_rte_' + d)
+ error('Missing dependency ' + d +
+ ' for driver ' + lib_name)
+ endif
shared_objs += [get_variable('shared_rte_' + d)]
static_objs += [get_variable('static_rte_' + d)]
endforeach
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e1127326..664398de 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -12,11 +12,16 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
+DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+endif
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
@@ -27,7 +32,8 @@ DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
-DIRS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl
+DIRS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mvpp2
+DIRS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += netvsc
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
@@ -53,9 +59,12 @@ endif # $(CONFIG_RTE_LIBRTE_SCHED)
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+DIRS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifc
+endif
endif # $(CONFIG_RTE_LIBRTE_VHOST)
-ifeq ($(CONFIG_RTE_LIBRTE_MRVL_PMD),y)
+ifeq ($(CONFIG_RTE_LIBRTE_MVPP2_PMD),y)
ifeq ($(CONFIG_RTE_LIBRTE_CFGFILE),n)
$(error "RTE_LIBRTE_CFGFILE must be enabled in configuration!")
endif
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index bb37d67a..39a1e0d2 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -1,35 +1,8 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014 John W. Linville <linville@redhat.com>
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014 John W. Linville <linville@redhat.com>
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 57eccfd0..eb3cce3a 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -94,9 +94,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int af_packet_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, af_packet_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -299,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -393,8 +400,8 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
if (data_size > buf_size) {
- RTE_LOG(ERR, PMD,
- "%s: %d bytes will not fit in mbuf (%d bytes)\n",
+ PMD_LOG(ERR,
+ "%s: %d bytes will not fit in mbuf (%d bytes)",
dev->device->name, data_size, buf_size);
return -ENOMEM;
}
@@ -515,7 +522,7 @@ open_packet_iface(const char *key __rte_unused,
/* Open an AF_PACKET socket... */
*sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (*sockfd == -1) {
- RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
+ PMD_LOG(ERR, "Could not open AF_PACKET socket");
return -1;
}
@@ -561,28 +568,20 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
break;
}
if (pair == NULL) {
- RTE_LOG(ERR, PMD,
- "%s: no interface specified for AF_PACKET ethdev\n",
+ PMD_LOG(ERR,
+ "%s: no interface specified for AF_PACKET ethdev",
name);
- goto error_early;
+ return -1;
}
- RTE_LOG(INFO, PMD,
- "%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
+ PMD_LOG(INFO,
+ "%s: creating AF_PACKET-backed ethdev on numa socket %u",
name, numa_node);
- /*
- * now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error_early;
-
*internals = rte_zmalloc_socket(name, sizeof(**internals),
0, numa_node);
if (*internals == NULL)
- goto error_early;
+ return -1;
for (q = 0; q < nb_queues; q++) {
(*internals)->rx_queue[q].map = MAP_FAILED;
@@ -601,27 +600,27 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
memcpy(ifr.ifr_name, pair->value, ifnamelen);
ifr.ifr_name[ifnamelen] = '\0';
} else {
- RTE_LOG(ERR, PMD,
- "%s: I/F name too long (%s)\n",
+ PMD_LOG(ERR,
+ "%s: I/F name too long (%s)",
name, pair->value);
- goto error_early;
+ return -1;
}
if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
- RTE_LOG(ERR, PMD,
- "%s: ioctl failed (SIOCGIFINDEX)\n",
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFINDEX)",
name);
- goto error_early;
+ return -1;
}
(*internals)->if_name = strdup(pair->value);
if ((*internals)->if_name == NULL)
- goto error_early;
+ return -1;
(*internals)->if_index = ifr.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
- RTE_LOG(ERR, PMD,
- "%s: ioctl failed (SIOCGIFHWADDR)\n",
+ PMD_LOG(ERR,
+ "%s: ioctl failed (SIOCGIFHWADDR)",
name);
- goto error_early;
+ return -1;
}
memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
@@ -642,8 +641,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
/* Open an AF_PACKET socket for this queue... */
qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (qsockfd == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not open AF_PACKET socket\n",
+ PMD_LOG(ERR,
+ "%s: could not open AF_PACKET socket",
name);
return -1;
}
@@ -652,9 +651,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
&tpver, sizeof(tpver));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_VERSION on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_VERSION on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
@@ -662,9 +661,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
&discard, sizeof(discard));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_LOSS on "
- "AF_PACKET socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_LOSS on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
@@ -672,10 +671,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
&qdisc_bypass, sizeof(qdisc_bypass));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_QDISC_BYPASS "
- "on AF_PACKET socket for %s\n", name,
- pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
#else
@@ -684,17 +682,17 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not set PACKET_RX_RING on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ PMD_LOG(ERR,
+ "%s: could not set PACKET_RX_RING on AF_PACKET socket for %s",
+ name, pair->value);
goto error;
}
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
+ PMD_LOG(ERR,
"%s: could not set PACKET_TX_RING on AF_PACKET "
- "socket for %s\n", name, pair->value);
+ "socket for %s", name, pair->value);
goto error;
}
@@ -705,8 +703,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
qsockfd, 0);
if (rx_queue->map == MAP_FAILED) {
- RTE_LOG(ERR, PMD,
- "%s: call to mmap failed on AF_PACKET socket for %s\n",
+ PMD_LOG(ERR,
+ "%s: call to mmap failed on AF_PACKET socket for %s",
name, pair->value);
goto error;
}
@@ -742,8 +740,8 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
- "%s: could not bind AF_PACKET socket to %s\n",
+ PMD_LOG(ERR,
+ "%s: could not bind AF_PACKET socket to %s",
name, pair->value);
goto error;
}
@@ -752,9 +750,9 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
&fanout_arg, sizeof(fanout_arg));
if (rc == -1) {
- RTE_LOG(ERR, PMD,
+ PMD_LOG(ERR,
"%s: could not set PACKET_FANOUT on AF_PACKET socket "
- "for %s\n", name, pair->value);
+ "for %s", name, pair->value);
goto error;
}
#endif
@@ -775,14 +773,13 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
(*internals)->nb_queues = nb_queues;
- rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+ data = (*eth_dev)->data;
data->dev_private = *internals;
data->nb_rx_queues = (uint16_t)nb_queues;
data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
- (*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
return 0;
@@ -802,8 +799,6 @@ error:
}
free((*internals)->if_name);
rte_free(*internals);
-error_early:
- rte_free(data);
return -1;
}
@@ -837,8 +832,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
qpairs = atoi(pair->value);
if (qpairs < 1 ||
qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
- RTE_LOG(ERR, PMD,
- "%s: invalid qpairs value\n",
+ PMD_LOG(ERR,
+ "%s: invalid qpairs value",
name);
return -1;
}
@@ -847,8 +842,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
blocksize = atoi(pair->value);
if (!blocksize) {
- RTE_LOG(ERR, PMD,
- "%s: invalid blocksize value\n",
+ PMD_LOG(ERR,
+ "%s: invalid blocksize value",
name);
return -1;
}
@@ -857,8 +852,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
framesize = atoi(pair->value);
if (!framesize) {
- RTE_LOG(ERR, PMD,
- "%s: invalid framesize value\n",
+ PMD_LOG(ERR,
+ "%s: invalid framesize value",
name);
return -1;
}
@@ -867,8 +862,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
framecount = atoi(pair->value);
if (!framecount) {
- RTE_LOG(ERR, PMD,
- "%s: invalid framecount value\n",
+ PMD_LOG(ERR,
+ "%s: invalid framecount value",
name);
return -1;
}
@@ -877,8 +872,8 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
if (strstr(pair->key, ETH_AF_PACKET_QDISC_BYPASS_ARG) != NULL) {
qdisc_bypass = atoi(pair->value);
if (qdisc_bypass > 1) {
- RTE_LOG(ERR, PMD,
- "%s: invalid bypass value\n",
+ PMD_LOG(ERR,
+ "%s: invalid bypass value",
name);
return -1;
}
@@ -887,24 +882,24 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
}
if (framesize > blocksize) {
- RTE_LOG(ERR, PMD,
- "%s: AF_PACKET MMAP frame size exceeds block size!\n",
+ PMD_LOG(ERR,
+ "%s: AF_PACKET MMAP frame size exceeds block size!",
name);
return -1;
}
blockcount = framecount / (blocksize / framesize);
if (!blockcount) {
- RTE_LOG(ERR, PMD,
- "%s: invalid AF_PACKET MMAP parameters\n", name);
+ PMD_LOG(ERR,
+ "%s: invalid AF_PACKET MMAP parameters", name);
return -1;
}
- RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
- RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
- RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
- RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
- RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
+ PMD_LOG(INFO, "%s: AF_PACKET MMAP parameters:", name);
+ PMD_LOG(INFO, "%s:\tblock size %d", name, blocksize);
+ PMD_LOG(INFO, "%s:\tblock count %d", name, blockcount);
+ PMD_LOG(INFO, "%s:\tframe size %d", name, framesize);
+ PMD_LOG(INFO, "%s:\tframe count %d", name, framecount);
if (rte_pmd_init_internals(dev, *sockfd, qpairs,
blocksize, blockcount,
@@ -917,6 +912,7 @@ rte_eth_from_packet(struct rte_vdev_device *dev,
eth_dev->rx_pkt_burst = eth_af_packet_rx;
eth_dev->tx_pkt_burst = eth_af_packet_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -926,9 +922,24 @@ rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
int ret = 0;
struct rte_kvargs *kvlist;
int sockfd = -1;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
+
+ PMD_LOG(INFO, "Initializing pmd_af_packet for %s", name);
- RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
- rte_vdev_device_name(dev));
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL) {
@@ -966,8 +977,8 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
struct pmd_internals *internals;
unsigned q;
- RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
- rte_socket_id());
+ PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
+ rte_socket_id());
if (dev == NULL)
return -1;
@@ -985,7 +996,6 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
free(internals->if_name);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -1006,3 +1016,10 @@ RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
"framesz=<int> "
"framecnt=<int> "
"qdisc_bypass=<0|1>");
+
+RTE_INIT(af_packet_init_log)
+{
+ af_packet_logtype = rte_log_register("pmd.net.packet");
+ if (af_packet_logtype >= 0)
+ rte_log_set_level(af_packet_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile
index f1433bd2..2e232be8 100644
--- a/drivers/net/ark/Makefile
+++ b/drivers/net/ark/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (c) 2015-2017 Atomic Rules LLC
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Atomic Rules LLC
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
index 929dc7d1..eea388a1 100644
--- a/drivers/net/ark/ark_ddm.c
+++ b/drivers/net/ark/ark_ddm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
index f67ad012..b37d1e09 100644
--- a/drivers/net/ark/ark_ddm.h
+++ b/drivers/net/ark/ark_ddm.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_DDM_H_
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
index ff87c20e..552ca01a 100644
--- a/drivers/net/ark/ark_ethdev.c
+++ b/drivers/net/ark/ark_ethdev.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
@@ -69,7 +40,7 @@ static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
static int eth_ark_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
-static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+static int eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
@@ -390,6 +361,7 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
if (p == 0) {
/* First port is already allocated by DPDK */
eth_dev = ark->eth_dev;
+ rte_eth_dev_probing_finish(eth_dev);
continue;
}
@@ -422,6 +394,8 @@ eth_ark_dev_init(struct rte_eth_dev *dev)
ark->user_data[eth_dev->data->port_id] =
ark->user_ext.dev_init(dev, ark->a_bar, p);
}
+
+ rte_eth_dev_probing_finish(eth_dev);
}
return ret;
@@ -771,7 +745,6 @@ eth_ark_dev_info_get(struct rte_eth_dev *dev,
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_50G |
ETH_LINK_SPEED_100G);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
}
static int
@@ -887,16 +860,19 @@ eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
ark->user_data[dev->data->port_id]);
}
-static void
+static int
eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
struct ark_adapter *ark =
(struct ark_adapter *)dev->data->dev_private;
- if (ark->user_ext.mac_addr_set)
+ if (ark->user_ext.mac_addr_set) {
ark->user_ext.mac_addr_set(dev, mac_addr,
ark->user_data[dev->data->port_id]);
+ return 0;
+ }
+ return -ENOTSUP;
}
static int
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
index 987d085e..16f0d11e 100644
--- a/drivers/net/ark/ark_ethdev_rx.c
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h
index 14678711..0fdd29b1 100644
--- a/drivers/net/ark/ark_ethdev_rx.h
+++ b/drivers/net/ark/ark_ethdev_rx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_ETHDEV_RX_H_
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
index 4ef55d10..57188c24 100644
--- a/drivers/net/ark/ark_ethdev_tx.c
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h
index 657f895a..e448ce22 100644
--- a/drivers/net/ark/ark_ethdev_tx.h
+++ b/drivers/net/ark/ark_ethdev_tx.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_ETHDEV_TX_H_
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
index 031cfddc..f5af2153 100644
--- a/drivers/net/ark/ark_ext.h
+++ b/drivers/net/ark/ark_ext.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_EXT_H_
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
index 41eb260e..f820091d 100644
--- a/drivers/net/ark/ark_global.h
+++ b/drivers/net/ark/ark_global.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_GLOBAL_H_
diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h
index 8aff2963..b90e9f0a 100644
--- a/drivers/net/ark/ark_logs.h
+++ b/drivers/net/ark/ark_logs.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_DEBUG_H_
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c
index d4ba6dc7..21f840f3 100644
--- a/drivers/net/ark/ark_mpu.c
+++ b/drivers/net/ark/ark_mpu.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h
index f6f6c808..92c3e67c 100644
--- a/drivers/net/ark/ark_mpu.h
+++ b/drivers/net/ark/ark_mpu.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_MPU_H_
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
index 2cadaab8..c21003a0 100644
--- a/drivers/net/ark/ark_pktchkr.c
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <getopt.h>
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
index f4025dd6..a50f428b 100644
--- a/drivers/net/ark/ark_pktchkr.h
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTCHKR_H_
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
index eb47dedb..1f2c8182 100644
--- a/drivers/net/ark/ark_pktdir.c
+++ b/drivers/net/ark/ark_pktdir.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <stdint.h>
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
index e13fe821..314e6dea 100644
--- a/drivers/net/ark/ark_pktdir.h
+++ b/drivers/net/ark/ark_pktdir.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTDIR_H_
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
index d3c3dee1..2a2b428e 100644
--- a/drivers/net/ark/ark_pktgen.c
+++ b/drivers/net/ark/ark_pktgen.c
@@ -1,34 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <getopt.h>
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
index bf5a241b..0e5f76aa 100644
--- a/drivers/net/ark/ark_pktgen.h
+++ b/drivers/net/ark/ark_pktgen.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_PKTGEN_H_
diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c
index 41c497b0..bf1af4d6 100644
--- a/drivers/net/ark/ark_rqp.c
+++ b/drivers/net/ark/ark_rqp.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h
index 0c380071..6c804606 100644
--- a/drivers/net/ark/ark_rqp.h
+++ b/drivers/net/ark/ark_rqp.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_RQP_H_
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
index 7a429ac7..03f1922c 100644
--- a/drivers/net/ark/ark_udm.c
+++ b/drivers/net/ark/ark_udm.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#include <unistd.h>
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
index 915343fe..5846c825 100644
--- a/drivers/net/ark/ark_udm.h
+++ b/drivers/net/ark/ark_udm.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015-2017 Atomic Rules LLC
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Atomic Rules LLC
*/
#ifndef _ARK_UDM_H_
diff --git a/drivers/net/ark/meson.build b/drivers/net/ark/meson.build
new file mode 100644
index 00000000..99151bba
--- /dev/null
+++ b/drivers/net/ark/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('ark_ddm.c',
+ 'ark_ethdev.c',
+ 'ark_ethdev_rx.c',
+ 'ark_ethdev_tx.c',
+ 'ark_mpu.c',
+ 'ark_pktchkr.c',
+ 'ark_pktdir.c',
+ 'ark_pktgen.c',
+ 'ark_rqp.c',
+ 'ark_udm.c')
diff --git a/drivers/net/avf/avf_ethdev.c b/drivers/net/avf/avf_ethdev.c
index 4df66170..3a2baaf2 100644
--- a/drivers/net/avf/avf_ethdev.c
+++ b/drivers/net/avf/avf_ethdev.c
@@ -65,7 +65,7 @@ static int avf_dev_rss_hash_update(struct rte_eth_dev *dev,
static int avf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
+static int avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int avf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id);
@@ -339,17 +339,18 @@ static int avf_config_rx_queues_irqs(struct rte_eth_dev *dev,
AVF_WRITE_FLUSH(hw);
/* map all queues to the same interrupt */
for (i = 0; i < dev->data->nb_rx_queues; i++)
- vf->rxq_map[0] |= 1 << i;
+ vf->rxq_map[vf->msix_base] |= 1 << i;
} else {
if (!rte_intr_allow_others(intr_handle)) {
vf->nb_msix = 1;
vf->msix_base = AVF_MISC_VEC_ID;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- vf->rxq_map[0] |= 1 << i;
+ vf->rxq_map[vf->msix_base] |= 1 << i;
intr_handle->intr_vec[i] = AVF_MISC_VEC_ID;
}
PMD_DRV_LOG(DEBUG,
- "vector 0 are mapping to all Rx queues");
+ "vector %u are mapping to all Rx queues",
+ vf->msix_base);
} else {
/* If Rx interrupt is reuquired, and we can use
* multi interrupts, then the vec is from 1
@@ -474,7 +475,7 @@ avf_dev_stop(struct rte_eth_dev *dev)
{
struct avf_adapter *adapter =
AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = dev->intr_handle;
int ret, i;
@@ -507,7 +508,6 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = AVF_BUF_SIZE_MIN;
@@ -518,27 +518,41 @@ avf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = AVF_NUM_MACADDR_MAX;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = AVF_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AVF_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = AVF_DEFAULT_TX_RS_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -608,7 +622,7 @@ avf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
- new_link.link_autoneg = !!(dev->data->dev_conf.link_speeds &
+ new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
if (rte_atomic64_cmpset((uint64_t *)&dev->data->dev_link,
@@ -759,7 +773,7 @@ avf_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
err = avf_enable_vlan_strip(adapter);
else
err = avf_disable_vlan_strip(adapter);
@@ -926,7 +940,7 @@ avf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
-static void
+static int
avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
@@ -940,11 +954,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
perm_addr = (struct ether_addr *)hw->mac.perm_addr;
if (is_same_ether_addr(mac_addr, old_addr))
- return;
+ return 0;
/* If the MAC address is configured by host, skip the setting */
if (is_valid_assigned_ether_addr(perm_addr))
- return;
+ return -EPERM;
ret = avf_add_del_eth_addr(adapter, old_addr, FALSE);
if (ret)
@@ -968,7 +982,11 @@ avf_dev_set_default_mac_addr(struct rte_eth_dev *dev,
mac_addr->addr_bytes[4],
mac_addr->addr_bytes[5]);
+ if (ret)
+ return -EIO;
+
ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
}
static int
@@ -1339,9 +1357,7 @@ static struct rte_pci_driver rte_avf_pmd = {
RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
-RTE_INIT(avf_init_log);
-static void
-avf_init_log(void)
+RTE_INIT(avf_init_log)
{
avf_logtype_init = rte_log_register("pmd.net.avf.init");
if (avf_logtype_init >= 0)
@@ -1365,8 +1381,8 @@ avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
return AVF_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return AVF_ERR_NO_MEMORY;
diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c
index d276d975..e03a136f 100644
--- a/drivers/net/avf/avf_rxtx.c
+++ b/drivers/net/avf/avf_rxtx.c
@@ -109,7 +109,7 @@ check_rx_vec_allow(struct avf_rx_queue *rxq)
static inline bool
check_tx_vec_allow(struct avf_tx_queue *txq)
{
- if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
+ if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) &&
txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
@@ -435,9 +435,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
nb_desc > AVF_MAX_RING_DESC ||
nb_desc < AVF_MIN_RING_DESC) {
@@ -474,7 +477,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->free_thresh = tx_free_thresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
@@ -1831,7 +1834,7 @@ avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->free_thresh;
qinfo->conf.tx_rs_thresh = txq->rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
diff --git a/drivers/net/avf/avf_rxtx.h b/drivers/net/avf/avf_rxtx.h
index d1701cd6..297d0776 100644
--- a/drivers/net/avf/avf_rxtx.h
+++ b/drivers/net/avf/avf_rxtx.h
@@ -22,8 +22,12 @@
#define AVF_VPMD_DESCS_PER_LOOP 4
#define AVF_VPMD_TX_MAX_FREE_BUF 64
-#define AVF_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
+#define AVF_NO_VECTOR_FLAGS ( \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM)
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
@@ -125,7 +129,7 @@ struct avf_tx_queue {
uint16_t port_id;
uint16_t queue_id;
- uint32_t txq_flags;
+ uint64_t offloads;
uint16_t next_dd; /* next to set RS, for VPMD */
uint16_t next_rs; /* next to check DD, for VPMD */
diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile
index c29ecf45..c9db667f 100644
--- a/drivers/net/avp/Makefile
+++ b/drivers/net/avp/Makefile
@@ -1,34 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Wind River Systems nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013-2017, Wind River Systems, Inc.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
index dba99120..761f6c1c 100644
--- a/drivers/net/avp/avp_ethdev.c
+++ b/drivers/net/avp/avp_ethdev.c
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2013-2017, Wind River Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2) Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3) Neither the name of Wind River Systems nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#include <stdint.h>
@@ -411,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev,
(host_phys_addr < (map->phys_addr + map->length))) {
/* address is within this segment */
offset += (host_phys_addr - map->phys_addr);
- addr = RTE_PTR_ADD(addr, offset);
+ addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
host_phys_addr, addr);
@@ -1076,19 +1048,8 @@ static int
eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct avp_adapter));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = eth_avp_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
+ eth_avp_dev_init);
}
static int
@@ -2074,12 +2035,6 @@ avp_dev_start(struct rte_eth_dev *eth_dev)
goto unlock;
}
- /* disable features that we do not support */
- eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
- eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
-
/* update link state */
ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
if (ret < 0) {
@@ -2206,7 +2161,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->max_rx_queues = avp->max_rx_queues;
dev_info->max_tx_queues = avp->max_tx_queues;
dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
@@ -2216,16 +2170,19 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
}
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+ uint64_t offloads = dev_conf->rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
@@ -2235,12 +2192,12 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
@@ -2314,9 +2271,7 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
-RTE_INIT(avp_init_log);
-static void
-avp_init_log(void)
+RTE_INIT(avp_init_log)
{
avp_logtype_driver = rte_log_register("pmd.net.avp.driver");
if (avp_logtype_driver >= 0)
diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h
index e29394d5..6e297c7a 100644
--- a/drivers/net/avp/avp_logs.h
+++ b/drivers/net/avp/avp_logs.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright (c) 2013-2015, Wind River Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2) Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3) Neither the name of Wind River Systems nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#ifndef _AVP_LOGS_H_
diff --git a/drivers/net/avp/meson.build b/drivers/net/avp/meson.build
new file mode 100644
index 00000000..6076c31b
--- /dev/null
+++ b/drivers/net/avp/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('avp_ethdev.c')
+install_headers('rte_avp_common.h', 'rte_avp_fifo.h')
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
index 81dfe5ea..aa95159c 100644
--- a/drivers/net/avp/rte_avp_common.h
+++ b/drivers/net/avp/rte_avp_common.h
@@ -1,57 +1,6 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * Contact Information:
- * Wind River Systems, Inc.
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc.
*/
#ifndef _RTE_AVP_COMMON_H_
diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h
index 803eb80a..c1658da6 100644
--- a/drivers/net/avp/rte_avp_fifo.h
+++ b/drivers/net/avp/rte_avp_fifo.h
@@ -1,57 +1,6 @@
-/*-
- * This file is provided under a dual BSD/LGPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GNU LESSER GENERAL PUBLIC LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2.1 of the GNU Lesser General Public License
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * Contact Information:
- * Wind River Systems, Inc.
- *
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
+/* SPDX-License-Identifier: (BSD-3-Clause OR LGPL-2.1)
+ * Copyright(c) 2010-2013 Intel Corporation.
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#ifndef _RTE_AVP_FIFO_H_
diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
new file mode 100644
index 00000000..72215aed
--- /dev/null
+++ b/drivers/net/axgbe/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_axgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_axgbe_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_ethdev
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
new file mode 100644
index 00000000..d25d54ca
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -0,0 +1,1710 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_COMMON_H__
+#define __AXGBE_COMMON_H__
+
+#include "axgbe_logs.h"
+
+#include <stdbool.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_hexdump.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_branch_prediction.h>
+#include <rte_eal.h>
+#include <rte_memzone.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#define BIT(nr) (1 << (nr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#define AXGBE_HZ 250
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWRCR 0x301c
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define EDMA_TX_CONTROL 0x3040
+#define EDMA_RX_CONTROL 0x3044
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_RDC_INDEX 24
+#define DMA_AXIAWCR_RDC_WIDTH 4
+#define DMA_AXIAWCR_RDD_INDEX 28
+#define DMA_AXIAWCR_RDD_WIDTH 2
+#define DMA_AXIAWRCR_TDWC_INDEX 0
+#define DMA_AXIAWRCR_TDWC_WIDTH 4
+#define DMA_AXIAWRCR_TDWD_INDEX 4
+#define DMA_AXIAWRCR_TDWD_WIDTH 4
+#define DMA_AXIAWRCR_RDRC_INDEX 8
+#define DMA_AXIAWRCR_RDRC_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_WR_OSR_INDEX 24
+#define DMA_SBMR_WR_OSR_WIDTH 6
+#define DMA_SBMR_RD_OSR_INDEX 16
+#define DMA_SBMR_RD_OSR_WIDTH 6
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_32_INDEX 4
+#define DMA_SBMR_BLEN_32_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
+#define MAC_ISR_LSI_INDEX 0
+#define MAC_ISR_LSI_WIDTH 1
+#define MAC_ISR_LS_INDEX 24
+#define MAC_ISR_LS_WIDTH 2
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCAR_REG_INDEX 0
+#define MAC_MDIOSCAR_REG_WIDTH 21
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
+/* MDIO mask values */
+#define AXGBE_AN_CL73_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL73_INC_LINK BIT(1)
+#define AXGBE_AN_CL73_PG_RCV BIT(2)
+#define AXGBE_AN_CL73_INT_MASK 0x07
+
+#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define AXGBE_XNP_ACK_PROCESSED BIT(12)
+#define AXGBE_XNP_MP_FORMATTED BIT(13)
+#define AXGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define AXGBE_KR_TRAINING_START BIT(0)
+#define AXGBE_KR_TRAINING_ENABLE BIT(1)
+
+#define AXGBE_PCS_CL37_BP BIT(12)
+
+#define AXGBE_AN_CL37_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL37_INT_MASK 0x01
+
+#define AXGBE_AN_CL37_HD_MASK 0x40
+#define AXGBE_AN_CL37_FD_MASK 0x20
+
+#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
+#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01
+
+/*generic*/
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t low32_value(uint64_t addr)
+{
+ return (addr) & 0x0ffffffff;
+}
+
+static inline uint32_t high32_value(uint64_t addr)
+{
+ return (addr >> 32) & 0x0ffffffff;
+}
+
+/*END*/
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) |= rte_cpu_to_le_32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define AXGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define AXGMAC_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define AXGMAC_DMA_IOREAD(_channel, _reg) \
+ rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ rte_write32(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS32_IOREAD(_pdata, _off) \
+ rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ rte_write16(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir1_regs) + (_reg))
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \
+ mmd_val &= ~(_mask); \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \
+} while (0)
+
+/*
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_before(a, b) time_after(b, a)
+
+#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
+#define time_before_eq(a, b) time_after_eq(b, a)
+
+/*---bitmap support apis---*/
+static inline int axgbe_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+static inline unsigned long msecs_to_timer_cycles(unsigned int m)
+{
+ return rte_get_timer_hz() * (m / 1000);
+}
+
+#endif /* __AXGBE_COMMON_H__ */
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
new file mode 100644
index 00000000..707f1ee9
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+#include "axgbe_rxtx.h"
+
+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
+{
+ return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_HLEN;
+}
+
+/* query busy bit */
+static int mdio_complete(struct axgbe_port *pdata)
+{
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
+ return 1;
+
+ return 0;
+}
+
+static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ goto success;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
+ return -ETIMEDOUT;
+
+success:
+ return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
+ enum axgbe_mdio_mode mode)
+{
+ unsigned int reg_val = 0;
+
+ switch (mode) {
+ case AXGBE_MDIO_MODE_CL22:
+ if (port > AXGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case AXGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+ AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
+static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused, int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return -1;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+}
+
+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ /* Flow control thresholds are established */
+ if (pdata->rx_rfd[i])
+ ehfc = 1;
+
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+ }
+
+ /* Set MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->tx_pause)
+ axgbe_enable_tx_flow_control(pdata);
+ else
+ axgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->rx_pause)
+ axgbe_enable_rx_flow_control(pdata);
+ else
+ axgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void axgbe_config_flow_control(struct axgbe_port *pdata)
+{
+ axgbe_config_tx_flow_control(pdata);
+ axgbe_config_rx_flow_control(pdata);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
+}
+
+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
+
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += AXGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+
+ pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int q_fifo_size;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
+
+ axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static int __axgbe_exit(struct axgbe_port *pdata)
+{
+ unsigned int count = 2000;
+
+ /* Issue a software reset */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ rte_delay_us(10);
+
+ /* Poll Until Poll Condition */
+ while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int axgbe_exit(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __axgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __axgbe_exit(pdata);
+}
+
+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
+{
+ unsigned int i, count;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void axgbe_config_dma_bus(struct axgbe_port *pdata)
+{
+ /* Set enhanced addressing mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Out standing read/write requests*/
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
+
+ /* Set the System Bus mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
+}
+
+static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+{
+ unsigned int arcache, awcache, arwcache;
+
+ arcache = 0;
+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+
+ arwcache = 0;
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+}
+
+static void axgbe_config_edma_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
+ AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
+}
+
+static int axgbe_config_osp_mode(struct axgbe_port *pdata)
+{
+ /* Force DMA to operate on second packet before closing descriptors
+ * of first packet
+ */
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_pblx8(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+ }
+ return 0;
+}
+
+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
+ ~(AXGBE_RX_BUF_ALIGN - 1);
+
+ if (rxq->buf_size > pdata->rx_buf_size)
+ pdata->rx_buf_size = rxq->buf_size;
+
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
+ rxq->buf_size);
+ }
+}
+
+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return -EBUSY;
+
+ AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return 0;
+
+ rte_delay_us(1500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key;
+ int ret;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ if (!rss_conf->rss_key)
+ key = (unsigned int *)&pdata->rss_key;
+ else
+ key = (unsigned int *)&rss_conf->rss_key;
+
+ while (key_regs--) {
+ ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = axgbe_write_rss_reg(pdata,
+ AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_rss(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Program the hash key */
+ ret = axgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = axgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static void axgbe_rss_options(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint64_t rss_hf;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ rss_hf = rss_conf->rss_hf;
+
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+}
+
+static int axgbe_config_rss(struct axgbe_port *pdata)
+{
+ uint32_t i;
+
+ if (pdata->rss_enable) {
+ /* Initialize RSS hash key and lookup table */
+ uint32_t *key = (uint32_t *)pdata->rss_key;
+
+ for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
+ *key++ = (uint32_t)rte_rand();
+ for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
+ AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->eth_dev->data->nb_rx_queues);
+ axgbe_rss_options(pdata);
+ if (axgbe_enable_rss(pdata)) {
+ PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+ return -1;
+ }
+ } else {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+ }
+
+ return 0;
+}
+
+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void wrapper_tx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ txq->cur = 0;
+ txq->dirty = 0;
+ /* Update the total number of Tx descriptors */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
+ high32_value(txq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
+ low32_value(txq->ring_phys_addr));
+ }
+}
+
+static int wrapper_rx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ struct rte_mbuf *mbuf;
+ volatile union axgbe_rx_desc *desc;
+ unsigned int i, j;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ /* Initialize software ring entries */
+ rxq->mbuf_alloc = 0;
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ desc = AXGBE_GET_DESC_PT(rxq, 0);
+
+ for (j = 0; j < rxq->nb_desc; j++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+ (unsigned int)rxq->queue_id, j);
+ axgbe_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->sw_ring[j] = mbuf;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ desc->read.baddr =
+ rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(mbuf));
+ rte_wmb();
+ AXGMAC_SET_BITS_LE(desc->read.desc3,
+ RX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+ rxq->mbuf_alloc++;
+ desc++;
+ }
+ /* Update the total number of Rx descriptors */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
+ rxq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
+ high32_value(rxq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
+ low32_value(rxq->ring_phys_addr));
+ /* Update the Rx Descriptor Tail Pointer */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (rxq->nb_desc - 1) *
+ sizeof(union axgbe_rx_desc)));
+ }
+ return 0;
+}
+
+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+/*Distrubting fifo size */
+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+ q_fifo_size = fifo_size / pdata->rx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
+ pdata->fifo = p_fifo;
+
+ /*Calculate and config Flow control threshold*/
+ axgbe_calculate_flow_control_threshold(pdata);
+ axgbe_config_flow_control_threshold(pdata);
+}
+
+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+ q_fifo_size = fifo_size / pdata->tx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
+}
+
+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ if (i < qptc_extra)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ }
+
+ if (pdata->rss_enable) {
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) &&
+ (i != pdata->rx_q_count))
+ continue;
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+ }
+}
+
+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static void axgbe_config_mac_address(struct axgbe_port *pdata)
+{
+ axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
+}
+
+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void axgbe_config_mac_speed(struct axgbe_port *pdata)
+{
+ axgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
+{
+ if (pdata->rx_csum_enable)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+ else
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+}
+
+static int axgbe_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Flush Tx queues */
+ ret = axgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+ /* Initialize DMA related features */
+ axgbe_config_dma_bus(pdata);
+ axgbe_config_dma_cache(pdata);
+ axgbe_config_edma_control(pdata);
+ axgbe_config_osp_mode(pdata);
+ axgbe_config_pblx8(pdata);
+ axgbe_config_tx_pbl_val(pdata);
+ axgbe_config_rx_pbl_val(pdata);
+ axgbe_config_rx_buffer_size(pdata);
+ axgbe_config_rss(pdata);
+ wrapper_tx_desc_init(pdata);
+ ret = wrapper_rx_desc_init(pdata);
+ if (ret)
+ return ret;
+ axgbe_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ axgbe_config_mtl_mode(pdata);
+ axgbe_config_queue_mapping(pdata);
+ axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ axgbe_config_tx_fifo_size(pdata);
+ axgbe_config_rx_fifo_size(pdata);
+
+ axgbe_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ axgbe_config_mac_address(pdata);
+ axgbe_config_jumbo_enable(pdata);
+ axgbe_config_flow_control(pdata);
+ axgbe_config_mac_speed(pdata);
+ axgbe_config_checksum_offload(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+{
+ hw_if->exit = axgbe_exit;
+ hw_if->config_flow_control = axgbe_config_flow_control;
+
+ hw_if->init = axgbe_init;
+
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+
+ hw_if->set_speed = axgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
new file mode 100644
index 00000000..9ae9f063
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_rxtx.h"
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int axgbe_dev_configure(struct rte_eth_dev *dev);
+static int axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
+static void axgbe_dev_interrupt_handler(void *param);
+static void axgbe_dev_close(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+/* The set of PCI devices this driver supports */
+#define AMD_PCI_VENDOR_ID 0x1022
+#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
+#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
+
+int axgbe_logtype_init;
+int axgbe_logtype_driver;
+
+static const struct rte_pci_id pci_id_axgbe_map[] = {
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
+ { .vendor_id = 0, },
+};
+
+static struct axgbe_version_data axgbe_v2a = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static struct axgbe_version_data axgbe_v2b = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_configure = axgbe_dev_configure,
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
+ .promiscuous_enable = axgbe_dev_promiscuous_enable,
+ .promiscuous_disable = axgbe_dev_promiscuous_disable,
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .stats_get = axgbe_dev_stats_get,
+ .stats_reset = axgbe_dev_stats_reset,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+ .tx_queue_setup = axgbe_dev_tx_queue_setup,
+ .tx_queue_release = axgbe_dev_tx_queue_release,
+};
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ return pdata->phy_if.phy_reset(pdata);
+}
+
+/*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+axgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int dma_isr, dma_ch_isr;
+
+ pdata->phy_if.an_isr(pdata);
+ /*DMA related interrupts*/
+ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+ if (dma_isr) {
+ if (dma_isr & 1) {
+ dma_ch_isr =
+ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR, dma_ch_isr);
+ }
+ }
+ /* Enable interrupts since disabled after generation*/
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ /* Checksum offload to hardware */
+ pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM;
+ return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ pdata->rss_enable = 1;
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ pdata->rss_enable = 0;
+ else
+ return -1;
+ return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ int ret;
+
+ /* Multiqueue RSS */
+ ret = axgbe_dev_rx_mq_config(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+ return ret;
+ }
+ ret = axgbe_phy_reset(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "phy reset failed\n");
+ return ret;
+ }
+ ret = pdata->hw_if.init(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dev_init failed\n");
+ return ret;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ /* phy start*/
+ pdata->phy_if.phy_start(pdata);
+ axgbe_dev_enable_tx(dev);
+ axgbe_dev_enable_rx(dev);
+
+ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+ return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+ return;
+
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_dev_disable_tx(dev);
+ axgbe_dev_disable_rx(dev);
+
+ pdata->phy_if.phy_stop(pdata);
+ pdata->hw_if.exit(pdata);
+ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+}
+
+/* Clear all resources like TX/RX queues. */
+static void
+axgbe_dev_close(struct rte_eth_dev *dev)
+{
+ axgbe_dev_clear_queues(dev);
+}
+
+static void
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+}
+
+static void
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+}
+
+static void
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+}
+
+static void
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_delay_ms(800);
+
+ pdata->phy_if.phy_status(pdata);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_duplex = pdata->phy.duplex;
+ link.link_status = pdata->phy_link;
+ link.link_speed = pdata->phy_speed;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ PMD_DRV_LOG(ERR, "No change in link status\n");
+
+ return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = rxq->pkts;
+ stats->ipackets += rxq->pkts;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->ibytes += rxq->bytes;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = txq->pkts;
+ stats->opackets += txq->pkts;
+ stats->q_obytes[i] = txq->bytes;
+ stats->obytes += txq->bytes;
+ }
+
+ return 0;
+}
+
+static void
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->pkts = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->pkts = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+}
+
+static void
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ dev_info->max_rx_queues = pdata->rx_ring_count;
+ dev_info->max_tx_queues = pdata->tx_ring_count;
+ dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
+ dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
+ dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pdata->hw_feat.rss) {
+ dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
+ dev_info->reta_size = pdata->hw_feat.hash_table_size;
+ dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
+ }
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AXGBE_RX_FREE_THRESH,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AXGBE_TX_FREE_THRESH,
+ };
+}
+
+static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
+ MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
+ AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+}
+
+static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
+{
+ axgbe_init_function_ptrs_dev(&pdata->hw_if);
+ axgbe_init_function_ptrs_phy(&pdata->phy_if);
+ axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+static void axgbe_set_counts(struct axgbe_port *pdata)
+{
+ /* Set all the function pointers */
+ axgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ axgbe_get_all_hw_features(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
+ pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
+}
+
+static void axgbe_default_config(struct axgbe_port *pdata)
+{
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_ENABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, mac_lo, mac_hi;
+ int ret;
+
+ eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ /* initial state */
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdata->pci_dev = pci_dev;
+
+ pdata->xgmac_regs =
+ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_MAC_PROP_OFFSET);
+ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_I2C_CTRL_OFFSET);
+ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+
+ /* version specific driver data*/
+ if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
+ pdata->vdata = &axgbe_v2a;
+ else
+ pdata->vdata = &axgbe_v2b;
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ PMD_INIT_LOG(DEBUG,
+ "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
+ pdata->xpcs_window_size, pdata->xpcs_window_mask);
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
+ pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
+ pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
+ pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
+ pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
+ pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
+ ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
+ eth_random_addr(pdata->mac_addr.addr_bytes);
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* Clock settings */
+ pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = AXGBE_DMA_OS_ARCACHE;
+ pdata->awcache = AXGBE_DMA_OS_AWCACHE;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+
+ /* Set the hardware channel and queue counts */
+ axgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ reg = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ /* Issue software reset to DMA */
+ ret = pdata->hw_if.exit(pdata);
+ if (ret)
+ PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
+
+ /* Set default configuration data */
+ axgbe_default_config(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
+ pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
+ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
+ pthread_mutex_init(&pdata->i2c_mutex, NULL);
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+ pthread_mutex_init(&pdata->phy_mutex, NULL);
+
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret) {
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
+ }
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ return 0;
+}
+
+static int
+eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ return 0;
+}
+
+static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct axgbe_port), eth_axgbe_dev_init);
+}
+
+static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_axgbe_pmd = {
+ .id_table = pci_id_axgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_axgbe_pci_probe,
+ .remove = eth_axgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(axgbe_init_log)
+{
+ axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
+ if (axgbe_logtype_init >= 0)
+ rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
+ axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
+ if (axgbe_logtype_driver >= 0)
+ rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
new file mode 100644
index 00000000..b1cd2980
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef RTE_ETH_AXGBE_H_
+#define RTE_ETH_AXGBE_H_
+
+#include <rte_mempool.h>
+#include <rte_lcore.h>
+#include "axgbe_common.h"
+
+#define IRQ 0xff
+#define VLAN_HLEN 4
+
+#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
+#define AXGBE_MAX_MAC_ADDRS 1
+
+#define AXGBE_RX_BUF_ALIGN 64
+
+#define AXGBE_MAX_DMA_CHANNELS 16
+#define AXGBE_MAX_QUEUES 16
+#define AXGBE_PRIORITY_QUEUES 8
+#define AXGBE_DMA_STOP_TIMEOUT 1
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define AXGBE_DMA_OS_AXDOMAIN 0x2
+#define AXGBE_DMA_OS_ARCACHE 0xb
+#define AXGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define AXGBE_DMA_SYS_AXDOMAIN 0x3
+#define AXGBE_DMA_SYS_ARCACHE 0x0
+#define AXGBE_DMA_SYS_AWCACHE 0x0
+
+/* DMA channel interrupt modes */
+#define AXGBE_IRQ_MODE_EDGE 0
+#define AXGBE_IRQ_MODE_LEVEL 1
+
+#define AXGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define AXGMAC_MIN_PACKET 60
+#define AXGMAC_STD_PACKET_MTU 1500
+#define AXGMAC_MAX_STD_PACKET 1518
+#define AXGMAC_JUMBO_PACKET_MTU 9000
+#define AXGMAC_MAX_JUMBO_PACKET 9018
+/* Inter-frame gap + preamble */
+#define AXGMAC_ETH_PREAMBLE (12 + 8)
+
+#define AXGMAC_PFC_DATA_LEN 46
+#define AXGMAC_PFC_DELAYS 14000
+
+/* PCI BAR mapping */
+#define AXGBE_AXGMAC_BAR 0
+#define AXGBE_XPCS_BAR 1
+#define AXGBE_MAC_PROP_OFFSET 0x1d000
+#define AXGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI clock frequencies */
+#define AXGBE_V2_DMA_CLOCK_FREQ 500000000
+#define AXGBE_V2_PTP_CLOCK_FREQ 125000000
+
+#define AXGMAC_FIFO_MIN_ALLOC 2048
+#define AXGMAC_FIFO_UNIT 256
+#define AXGMAC_FIFO_ALIGN(_x) \
+ (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define AXGMAC_FIFO_FC_OFF 2048
+#define AXGMAC_FIFO_FC_MIN 4096
+
+#define AXGBE_TC_MIN_QUANTUM 10
+
+/* Flow control queue count */
+#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Flow control threshold units */
+#define AXGMAC_FLOW_CONTROL_UNIT 512
+#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \
+ ~(AXGMAC_FLOW_CONTROL_UNIT - 1))
+#define AXGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2)
+#define AXGMAC_FLOW_CONTROL_MAX 33280
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define AXGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define AXGBE_RSS_OFFLOAD ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define AXGBE_RSS_HASH_KEY_SIZE 40
+#define AXGBE_RSS_MAX_TABLE_SIZE 256
+#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define AXGBE_RSS_HASH_KEY_TYPE 1
+
+/* Auto-negotiation */
+#define AXGBE_AN_MS_TIMEOUT 500
+#define AXGBE_LINK_TIMEOUT 5
+
+#define AXGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define AXGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define AXGMAC_MAX_C22_PORT 3
+
+/* Helper macro for descriptor handling
+ * Always use AXGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define AXGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+struct axgbe_port;
+
+enum axgbe_state {
+ AXGBE_DOWN,
+ AXGBE_LINK_INIT,
+ AXGBE_LINK_ERR,
+ AXGBE_STOPPED,
+};
+
+enum axgbe_int {
+ AXGMAC_INT_DMA_CH_SR_TI,
+ AXGMAC_INT_DMA_CH_SR_TPS,
+ AXGMAC_INT_DMA_CH_SR_TBU,
+ AXGMAC_INT_DMA_CH_SR_RI,
+ AXGMAC_INT_DMA_CH_SR_RBU,
+ AXGMAC_INT_DMA_CH_SR_RPS,
+ AXGMAC_INT_DMA_CH_SR_TI_RI,
+ AXGMAC_INT_DMA_CH_SR_FBE,
+ AXGMAC_INT_DMA_ALL,
+};
+
+enum axgbe_int_state {
+ AXGMAC_INT_STATE_SAVE,
+ AXGMAC_INT_STATE_RESTORE,
+};
+
+enum axgbe_ecc_sec {
+ AXGBE_ECC_SEC_TX,
+ AXGBE_ECC_SEC_RX,
+ AXGBE_ECC_SEC_DESC,
+};
+
+enum axgbe_speed {
+ AXGBE_SPEED_1000 = 0,
+ AXGBE_SPEED_2500,
+ AXGBE_SPEED_10000,
+ AXGBE_SPEEDS,
+};
+
+enum axgbe_xpcs_access {
+ AXGBE_XPCS_ACCESS_V1 = 0,
+ AXGBE_XPCS_ACCESS_V2,
+};
+
+enum axgbe_an_mode {
+ AXGBE_AN_MODE_CL73 = 0,
+ AXGBE_AN_MODE_CL73_REDRV,
+ AXGBE_AN_MODE_CL37,
+ AXGBE_AN_MODE_CL37_SGMII,
+ AXGBE_AN_MODE_NONE,
+};
+
+enum axgbe_an {
+ AXGBE_AN_READY = 0,
+ AXGBE_AN_PAGE_RECEIVED,
+ AXGBE_AN_INCOMPAT_LINK,
+ AXGBE_AN_COMPLETE,
+ AXGBE_AN_NO_LINK,
+ AXGBE_AN_ERROR,
+};
+
+enum axgbe_rx {
+ AXGBE_RX_BPA = 0,
+ AXGBE_RX_XNP,
+ AXGBE_RX_COMPLETE,
+ AXGBE_RX_ERROR,
+};
+
+enum axgbe_mode {
+ AXGBE_MODE_KX_1000 = 0,
+ AXGBE_MODE_KX_2500,
+ AXGBE_MODE_KR,
+ AXGBE_MODE_X,
+ AXGBE_MODE_SGMII_100,
+ AXGBE_MODE_SGMII_1000,
+ AXGBE_MODE_SFI,
+ AXGBE_MODE_UNKNOWN,
+};
+
+enum axgbe_speedset {
+ AXGBE_SPEEDSET_1000_10000 = 0,
+ AXGBE_SPEEDSET_2500_10000,
+};
+
+enum axgbe_mdio_mode {
+ AXGBE_MDIO_MODE_NONE = 0,
+ AXGBE_MDIO_MODE_CL22,
+ AXGBE_MDIO_MODE_CL45,
+};
+
+struct axgbe_phy {
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
+enum axgbe_i2c_cmd {
+ AXGBE_I2C_CMD_READ = 0,
+ AXGBE_I2C_CMD_WRITE,
+};
+
+struct axgbe_i2c_op {
+ enum axgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ uint8_t *buf;
+ unsigned int len;
+};
+
+struct axgbe_i2c_op_state {
+ struct axgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct axgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct axgbe_i2c_op_state op_state;
+};
+
+struct axgbe_hw_if {
+ void (*config_flow_control)(struct axgbe_port *);
+ int (*config_rx_mode)(struct axgbe_port *);
+
+ int (*init)(struct axgbe_port *);
+
+ int (*read_mmd_regs)(struct axgbe_port *, int, int);
+ void (*write_mmd_regs)(struct axgbe_port *, int, int, int);
+ int (*set_speed)(struct axgbe_port *, int);
+
+ int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int,
+ enum axgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct axgbe_port *, int, int);
+ int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct axgbe_port *);
+ int (*config_rx_flow_control)(struct axgbe_port *);
+
+ int (*exit)(struct axgbe_port *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * kr_training_pre, kr_training_post
+ */
+struct axgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct axgbe_port *);
+ void (*exit)(struct axgbe_port *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct axgbe_port *);
+ int (*start)(struct axgbe_port *);
+ void (*stop)(struct axgbe_port *);
+
+ /* Return the link status */
+ int (*link_status)(struct axgbe_port *, int *);
+
+ /* Indicate if a particular speed is valid */
+ int (*valid_speed)(struct axgbe_port *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum axgbe_mode (*get_mode)(struct axgbe_port *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum axgbe_mode (*switch_mode)(struct axgbe_port *);
+ /* Retrieve current mode */
+ enum axgbe_mode (*cur_mode)(struct axgbe_port *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum axgbe_an_mode (*an_mode)(struct axgbe_port *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct axgbe_port *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ unsigned int (*an_advertising)(struct axgbe_port *port);
+
+ /* Process results of auto-negotiation */
+ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct axgbe_port *port);
+ void (*an_post)(struct axgbe_port *port);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct axgbe_port *);
+ void (*kr_training_post)(struct axgbe_port *);
+};
+
+struct axgbe_phy_if {
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct axgbe_port *);
+ void (*phy_exit)(struct axgbe_port *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct axgbe_port *);
+ int (*phy_start)(struct axgbe_port *);
+ void (*phy_stop)(struct axgbe_port *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct axgbe_port *);
+ int (*phy_config_aneg)(struct axgbe_port *);
+
+ /* For PHY settings validation */
+ int (*phy_valid_speed)(struct axgbe_port *, int);
+ /* For single interrupt support */
+ void (*an_isr)(struct axgbe_port *);
+ /* PHY implementation specific services */
+ struct axgbe_phy_impl_if phy_impl;
+};
+
+struct axgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct axgbe_port *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct axgbe_port *);
+ void (*i2c_stop)(struct axgbe_port *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct axgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct axgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *);
+ enum axgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int an_cdr_workaround;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct axgbe_port {
+ /* Ethdev where port belongs*/
+ struct rte_eth_dev *eth_dev;
+ /* Pci dev info */
+ const struct rte_pci_device *pci_dev;
+ /* Version related data */
+ struct axgbe_version_data *vdata;
+
+ /* AXGMAC/XPCS related mmio registers */
+ void *xgmac_regs; /* AXGMAC CSRs */
+ void *xpcs_regs; /* XPCS MMD registers */
+ void *xprop_regs; /* AXGBE property registers */
+ void *xi2c_regs; /* AXGBE I2C CSRs */
+
+ bool cdr_track_early;
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* Flags representing axgbe_state */
+ unsigned long dev_state;
+
+ struct axgbe_hw_if hw_if;
+ struct axgbe_phy_if phy_if;
+ struct axgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
+ unsigned int rx_buf_size;
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+ unsigned long ptpclk_rate;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+ /* Current PHY settings */
+ int phy_link;
+ int phy_speed;
+
+ pthread_mutex_t xpcs_mutex;
+ pthread_mutex_t i2c_mutex;
+ pthread_mutex_t an_mutex;
+ pthread_mutex_t phy_mutex;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+ unsigned int rx_rfa[AXGBE_MAX_QUEUES];
+ unsigned int rx_rfd[AXGBE_MAX_QUEUES];
+ unsigned int fifo;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE];
+ uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE];
+ uint32_t rss_options;
+ int rss_enable;
+
+ /* Hardware features of the device */
+ struct axgbe_hw_features hw_feat;
+
+ struct ether_addr mac_addr;
+
+ /* Software Tx/Rx structure pointers*/
+ void **rx_queues;
+ void **tx_queues;
+
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+ struct axgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+ volatile int mdio_completion;
+
+ unsigned int kr_redrv;
+
+ /* Auto-negotiation atate machine support */
+ unsigned int an_int;
+ unsigned int an_status;
+ enum axgbe_an an_result;
+ enum axgbe_an an_state;
+ enum axgbe_rx kr_state;
+ enum axgbe_rx kx_state;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+ enum axgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct axgbe_i2c i2c;
+ volatile int i2c_complete;
+
+ /* CRC stripping by H/w for Rx packet*/
+ int crc_strip_enable;
+ /* csum enable to hardware */
+ uint32_t rx_csum_enable;
+};
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if);
+
+#endif /* RTE_ETH_AXGBE_H_ */
diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
new file mode 100644
index 00000000..204ec367
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_i2c.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+
+#define AXGBE_ABORT_COUNT 500
+#define AXGBE_DISABLE_COUNT 1000
+
+#define AXGBE_STD_SPEED 1
+
+#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \
+ AXGBE_INTR_TX_EMPTY | \
+ AXGBE_INTR_TX_ABRT | \
+ AXGBE_INTR_STOP_DET)
+
+#define AXGBE_I2C_READ BIT(8)
+#define AXGBE_I2C_STOP BIT(9)
+
+static int axgbe_i2c_abort(struct axgbe_port *pdata)
+{
+ unsigned int wait = AXGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+ rte_delay_us(500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable)
+{
+ unsigned int wait = AXGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ rte_delay_us(100);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_disable(struct axgbe_port *pdata)
+{
+ unsigned int ret;
+
+ ret = axgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = axgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = axgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int axgbe_i2c_enable(struct axgbe_port *pdata)
+{
+ return axgbe_i2c_set_enable(pdata, true);
+}
+
+static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK);
+}
+
+static void axgbe_i2c_write(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == AXGBE_I2C_CMD_READ)
+ cmd = AXGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void axgbe_i2c_read(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != AXGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata,
+ unsigned int isr)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & AXGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static int axgbe_i2c_isr(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+
+ axgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ axgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ axgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ axgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ return 1;
+
+ return 0;
+}
+
+static void axgbe_i2c_set_mode(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void axgbe_i2c_get_features(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+}
+
+static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+ uint64_t timeout;
+
+ pthread_mutex_lock(&pdata->i2c_mutex);
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = (unsigned char *)op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = (unsigned char *)op->buf;
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+ ret = axgbe_i2c_enable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
+ return ret;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ axgbe_i2c_enable_interrupts(pdata);
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) {
+ if (axgbe_i2c_isr(pdata))
+ goto success;
+ }
+ }
+
+ PMD_DRV_LOG(ERR, "i2c operation timed out\n");
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ ret = -ETIMEDOUT;
+ goto unlock;
+
+success:
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+unlock:
+ pthread_mutex_unlock(&pdata->i2c_mutex);
+ return ret;
+}
+
+static void axgbe_i2c_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ pdata->i2c.started = 0;
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ axgbe_i2c_clear_all_interrupts(pdata);
+}
+
+static int axgbe_i2c_start(struct axgbe_port *pdata)
+{
+ if (pdata->i2c.started)
+ return 0;
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int axgbe_i2c_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_i2c_disable_interrupts(pdata);
+
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_get_features(pdata);
+
+ axgbe_i2c_set_mode(pdata);
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = axgbe_i2c_init;
+ i2c_if->i2c_start = axgbe_i2c_start;
+ i2c_if->i2c_stop = axgbe_i2c_stop;
+ i2c_if->i2c_xfer = axgbe_i2c_xfer;
+}
diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h
new file mode 100644
index 00000000..d1487017
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_logs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_LOGS_H_
+#define _AXGBE_LOGS_H_
+
+#include <stdio.h>
+
+extern int axgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+extern int axgbe_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#endif /* _AXGBE_LOGS_H_ */
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
new file mode 100644
index 00000000..2721e5cc
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+}
+
+static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~AXGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+}
+
+static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+}
+
+static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
+
+static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ AXGBE_AN_CL73_INT_MASK);
+}
+
+static void axgbe_an_enable_interrupts(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_enable_interrupts(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_clear_interrupts(pdata);
+ axgbe_an37_clear_interrupts(pdata);
+}
+
+static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg |= AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg &= ~AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_kr_mode(struct axgbe_port *pdata)
+{
+ /* Enable KR training */
+ axgbe_an73_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR);
+}
+
+static void axgbe_kx_2500_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500);
+}
+
+static void axgbe_kx_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000);
+}
+
+static void axgbe_sfi_mode(struct axgbe_port *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return axgbe_kr_mode(pdata);
+
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI);
+}
+
+static void axgbe_x_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X);
+}
+
+static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000);
+}
+
+static void axgbe_sgmii_100_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100);
+}
+
+static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
+}
+
+static bool axgbe_in_kr_mode(struct axgbe_port *pdata)
+{
+ return axgbe_cur_mode(pdata) == AXGBE_MODE_KR;
+}
+
+static void axgbe_change_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ axgbe_kx_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_KX_2500:
+ axgbe_kx_2500_mode(pdata);
+ break;
+ case AXGBE_MODE_KR:
+ axgbe_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_100:
+ axgbe_sgmii_100_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_1000:
+ axgbe_sgmii_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_X:
+ axgbe_x_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_sfi_mode(pdata);
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode);
+ }
+}
+
+static void axgbe_switch_mode(struct axgbe_port *pdata)
+{
+ axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+}
+
+static void axgbe_set_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ if (mode == axgbe_cur_mode(pdata))
+ return;
+
+ axgbe_change_mode(pdata, mode);
+}
+
+static bool axgbe_use_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void axgbe_an37_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an37_disable(struct axgbe_port *pdata)
+{
+ axgbe_an37_set(pdata, false, false);
+ axgbe_an37_disable_interrupts(pdata);
+}
+
+static void axgbe_an73_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an73_restart(struct axgbe_port *pdata)
+{
+ axgbe_an73_enable_interrupts(pdata);
+ axgbe_an73_set(pdata, true, true);
+}
+
+static void axgbe_an73_disable(struct axgbe_port *pdata)
+{
+ axgbe_an73_set(pdata, false, false);
+ axgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+}
+
+static void axgbe_an_restart(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_restart(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_disable(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_disable(pdata);
+ axgbe_an37_disable(pdata);
+}
+
+static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = AXGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!axgbe_in_kr_mode(pdata))
+ return AXGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & AXGBE_KR_TRAINING_ENABLE) {
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg |= AXGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
+ }
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ u16 msg;
+
+ *state = AXGBE_RX_XNP;
+
+ msg = AXGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= AXGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return AXGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
+{
+ enum axgbe_rx *state;
+ unsigned long an_timeout;
+ enum axgbe_an ret;
+ unsigned long ticks;
+
+ if (!pdata->an_start) {
+ pdata->an_start = rte_get_timer_cycles();
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ pdata->an_start = rte_get_timer_cycles();
+ }
+ }
+
+ state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
+
+ switch (*state) {
+ case AXGBE_RX_BPA:
+ ret = axgbe_an73_rx_bpa(pdata, state);
+ break;
+ case AXGBE_RX_XNP:
+ ret = axgbe_an73_rx_xnp(pdata, state);
+ break;
+ default:
+ ret = AXGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (axgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ }
+
+ axgbe_an_disable(pdata);
+ axgbe_switch_mode(pdata);
+ axgbe_an_restart(pdata);
+
+ return AXGBE_AN_INCOMPAT_LINK;
+}
+
+static void axgbe_an73_state_machine(struct axgbe_port *pdata)
+{
+ enum axgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+next_int:
+ if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = AXGBE_AN_PAGE_RECEIVED;
+ pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = AXGBE_AN_INCOMPAT_LINK;
+ pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = AXGBE_AN_COMPLETE;
+ pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+again:
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case AXGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+ case AXGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = axgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+ case AXGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = axgbe_an73_incompat_link(pdata);
+ break;
+ case AXGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ break;
+ case AXGBE_AN_NO_LINK:
+ break;
+ default:
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == AXGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ pdata->eth_dev->data->dev_link.link_status =
+ ETH_LINK_DOWN;
+ } else if (pdata->an_state == AXGBE_AN_ERROR) {
+ PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ cur_state);
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= AXGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+ pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (pdata->an_int)
+ goto next_int;
+
+ axgbe_an73_enable_interrupts(pdata);
+}
+
+static void axgbe_an73_isr(struct axgbe_port *pdata)
+{
+ /* Disable AN interrupts */
+ axgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+ pthread_mutex_lock(&pdata->an_mutex);
+ axgbe_an73_state_machine(pdata);
+ pthread_mutex_unlock(&pdata->an_mutex);
+ } else {
+ /* Enable AN interrupts */
+ axgbe_an73_enable_interrupts(pdata);
+ }
+}
+
+static void axgbe_an_isr(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_isr(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_combined_isr(struct axgbe_port *pdata)
+{
+ axgbe_an_isr(pdata);
+}
+
+static void axgbe_an73_init(struct axgbe_port *pdata)
+{
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((advertising & ADVERTISED_1000baseKX_Full) ||
+ (advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~AXGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+}
+
+static void axgbe_an_init(struct axgbe_port *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_init(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
+{
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) {
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) {
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed)
+ pdata->phy_speed = pdata->phy.speed;
+ if (pdata->phy_link != pdata->phy.link)
+ pdata->phy_link = pdata->phy.link;
+ } else if (pdata->phy_link) {
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+}
+
+static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ /* Disable auto-negotiation */
+ axgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_KX_2500:
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_SFI:
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ axgbe_set_mode(pdata, mode);
+
+ return 0;
+}
+
+static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = rte_get_timer_cycles();
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ return ret;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = axgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ return ret;
+ }
+
+ /* Disable auto-negotiation interrupt */
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ /* Start auto-negotiation in a supported mode */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KR);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SFI);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_X);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
+ } else {
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = AXGBE_AN_READY;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ axgbe_an_init(pdata);
+ axgbe_an_restart(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pthread_mutex_lock(&pdata->an_mutex);
+
+ ret = __axgbe_phy_config_aneg(pdata);
+ if (ret)
+ axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+ else
+ axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+
+ pthread_mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool axgbe_phy_aneg_done(struct axgbe_port *pdata)
+{
+ return pdata->an_result == AXGBE_AN_COMPLETE;
+}
+
+static void axgbe_check_link_timeout(struct axgbe_port *pdata)
+{
+ unsigned long link_timeout;
+ unsigned long ticks;
+
+ link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
+ 2 * rte_get_timer_hz());
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, link_timeout))
+ axgbe_phy_config_aneg(pdata);
+}
+
+static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
+}
+
+static void axgbe_phy_status_result(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = axgbe_cur_mode(pdata);
+ else
+ mode = axgbe_phy_status_aneg(pdata);
+
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case AXGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ axgbe_set_mode(pdata, mode);
+}
+
+static void axgbe_phy_status(struct axgbe_port *pdata)
+{
+ unsigned int link_aneg;
+ int an_restart;
+
+ if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) {
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ axgbe_phy_config_aneg(pdata);
+ return;
+ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !axgbe_phy_aneg_done(pdata)) {
+ axgbe_check_link_timeout(pdata);
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state))
+ axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ } else {
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) {
+ axgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ }
+
+adjust_link:
+ axgbe_phy_adjust_link(pdata);
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->phy_started)
+ return;
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+ /* Disable auto-negotiation */
+ axgbe_an_disable_all(pdata);
+ pdata->phy_if.phy_impl.stop(pdata);
+ pdata->phy.link = 0;
+ axgbe_phy_adjust_link(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
+ return ret;
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_kr_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_kx_2500_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_kx_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_sfi_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_x_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_sgmii_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_stop;
+ }
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+ axgbe_an_init(pdata);
+ axgbe_an_enable_interrupts(pdata);
+ return axgbe_phy_config_aneg(pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
+
+ /* Disable auto-negotiation for now */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
+{
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return SPEED_2500;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+ pdata->phy.advertising = pdata->phy.supported;
+
+ pdata->phy.address = 0;
+
+ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+ return 0;
+}
+
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = axgbe_phy_init;
+ phy_if->phy_reset = axgbe_phy_reset;
+ phy_if->phy_start = axgbe_phy_start;
+ phy_if->phy_stop = axgbe_phy_stop;
+ phy_if->phy_status = axgbe_phy_status;
+ phy_if->phy_config_aneg = axgbe_phy_config_aneg;
+ phy_if->an_isr = axgbe_an_combined_isr;
+}
diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h
new file mode 100644
index 00000000..77ee20a3
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_PHY_H__
+#define __AXGBE_PHY_H__
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
+ */
+#define MII_ADDR_C45 (1 << 30)
+
+/* Basic mode status register. */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+/* Status register 1. */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
+#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
+#define MII_ESTATUS 0x0f /* Extended Status */
+#define MII_DCOUNTER 0x12 /* Disconnect counter */
+#define MII_FCSCOUNTER 0x13 /* False carrier counter */
+#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+#define MII_RERRCOUNTER 0x15 /* Receive error counter */
+#define MII_SREVISION 0x16 /* Silicon revision */
+#define MII_RESV1 0x17 /* Reserved... */
+#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
+#define MII_PHYADDR 0x19 /* PHY address */
+#define MII_RESV2 0x1a /* Reserved... */
+#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
+#define MII_NCONFIG 0x1c /* Network interface config */
+
+/* Basic mode control register. */
+#define BMCR_RESV 0x003f /* Unused... */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#define BMCR_CTST 0x0080 /* Collision test */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define BMCR_PDOWN 0x0800 /* Enable low power state */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
+
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment
+ * Physical Medium Dependent
+ */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
+#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136.
+ */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
+
+
+
+
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+
+/* Autoneg related */
+#define ADVERTISED_Autoneg (1 << 6)
+#define SUPPORTED_Autoneg (1 << 6)
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_TP (1 << 7)
+
+#define ADVERTISED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_FIBRE (1 << 10)
+
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_Backplane (1 << 16)
+
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_100baseT_Full (1 << 2)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+
+#define SPEED_UNKNOWN -1
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+#define DUPLEX_UNKNOWN 0xff
+
+#endif
+/* PHY */
diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
new file mode 100644
index 00000000..973177f6
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy_impl.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+#define AXGBE_PHY_PORT_SPEED_100 BIT(0)
+#define AXGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define AXGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define AXGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define AXGBE_MUTEX_RELEASE 0x80000000
+
+#define AXGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define AXGBE_SFP_PHY_ADDRESS 0x56
+#define AXGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define AXGBE_GPIO_NO_TX_FAULT BIT(0)
+#define AXGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define AXGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define AXGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define AXGBE_CDR_DELAY_INIT 10000
+#define AXGBE_CDR_DELAY_INC 10000
+#define AXGBE_CDR_DELAY_MAX 100000
+
+enum axgbe_port_mode {
+ AXGBE_PORT_MODE_RSVD = 0,
+ AXGBE_PORT_MODE_BACKPLANE,
+ AXGBE_PORT_MODE_BACKPLANE_2500,
+ AXGBE_PORT_MODE_1000BASE_T,
+ AXGBE_PORT_MODE_1000BASE_X,
+ AXGBE_PORT_MODE_NBASE_T,
+ AXGBE_PORT_MODE_10GBASE_T,
+ AXGBE_PORT_MODE_10GBASE_R,
+ AXGBE_PORT_MODE_SFP,
+ AXGBE_PORT_MODE_MAX,
+};
+
+enum axgbe_conn_type {
+ AXGBE_CONN_TYPE_NONE = 0,
+ AXGBE_CONN_TYPE_SFP,
+ AXGBE_CONN_TYPE_MDIO,
+ AXGBE_CONN_TYPE_RSVD1,
+ AXGBE_CONN_TYPE_BACKPLANE,
+ AXGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum axgbe_sfp_comm {
+ AXGBE_SFP_COMM_DIRECT = 0,
+ AXGBE_SFP_COMM_PCA9545,
+};
+
+enum axgbe_sfp_cable {
+ AXGBE_SFP_CABLE_UNKNOWN = 0,
+ AXGBE_SFP_CABLE_ACTIVE,
+ AXGBE_SFP_CABLE_PASSIVE,
+};
+
+enum axgbe_sfp_base {
+ AXGBE_SFP_BASE_UNKNOWN = 0,
+ AXGBE_SFP_BASE_1000_T,
+ AXGBE_SFP_BASE_1000_SX,
+ AXGBE_SFP_BASE_1000_LX,
+ AXGBE_SFP_BASE_1000_CX,
+ AXGBE_SFP_BASE_10000_SR,
+ AXGBE_SFP_BASE_10000_LR,
+ AXGBE_SFP_BASE_10000_LRM,
+ AXGBE_SFP_BASE_10000_ER,
+ AXGBE_SFP_BASE_10000_CR,
+};
+
+enum axgbe_sfp_speed {
+ AXGBE_SFP_SPEED_UNKNOWN = 0,
+ AXGBE_SFP_SPEED_100_1000,
+ AXGBE_SFP_SPEED_1000,
+ AXGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define AXGBE_SFP_BASE_ID 0
+#define AXGBE_SFP_ID_SFP 0x03
+
+#define AXGBE_SFP_BASE_EXT_ID 1
+#define AXGBE_SFP_EXT_ID_SFP 0x04
+
+#define AXGBE_SFP_BASE_10GBE_CC 3
+#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define AXGBE_SFP_BASE_1GBE_CC 6
+#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define AXGBE_SFP_BASE_CABLE 8
+#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define AXGBE_SFP_BASE_BR 12
+#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define AXGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define AXGBE_SFP_BASE_VENDOR_NAME 20
+#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_PN 40
+#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_REV 56
+#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define AXGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define AXGBE_SFP_BASE_VENDOR_SN 4
+#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define AXGBE_SFP_EXTD_DIAG 28
+#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define AXGBE_SFP_EXTD_SFF_8472 30
+
+#define AXGBE_SFP_EXTD_CC 31
+
+struct axgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE"
+#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06"
+
+struct axgbe_sfp_ascii {
+ union {
+ char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum axgbe_mdio_reset {
+ AXGBE_MDIO_RESET_NONE = 0,
+ AXGBE_MDIO_RESET_I2C_GPIO,
+ AXGBE_MDIO_RESET_INT_GPIO,
+ AXGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum axgbe_phy_redrv_if {
+ AXGBE_PHY_REDRV_IF_MDIO = 0,
+ AXGBE_PHY_REDRV_IF_I2C,
+ AXGBE_PHY_REDRV_IF_MAX,
+};
+
+enum axgbe_phy_redrv_model {
+ AXGBE_PHY_REDRV_MODEL_4223 = 0,
+ AXGBE_PHY_REDRV_MODEL_4227,
+ AXGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum axgbe_phy_redrv_mode {
+ AXGBE_PHY_REDRV_MODE_CX = 5,
+ AXGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define AXGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct axgbe_phy_data {
+ enum axgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum axgbe_conn_type conn_type;
+
+ enum axgbe_mode cur_mode;
+ enum axgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ unsigned int comm_owned;
+
+ /* SFP Support */
+ enum axgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum axgbe_sfp_base sfp_base;
+ enum axgbe_sfp_cable sfp_cable;
+ enum axgbe_sfp_speed sfp_speed;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum axgbe_mdio_mode phydev_mode;
+ enum axgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+};
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
+
+static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata,
+ struct axgbe_i2c_op *i2c_op)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Be sure we own the bus */
+ if (!phy_data->comm_owned)
+ return -EIO;
+
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint16_t *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (uint16_t *)&redrv_data[2];
+ *redrv_val = rte_cpu_to_be_16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct axgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->comm_owned = 0;
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+}
+
+static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ uint64_t timeout;
+ unsigned int mutex_id;
+
+ if (phy_data->comm_owned)
+ return 0;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ pthread_mutex_lock(&pdata->phy_mutex);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5);
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ rte_delay_us(100);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ phy_data->comm_owned = 1;
+ return 0;
+ }
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+
+ PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_TP;
+ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_CX:
+ case AXGBE_SFP_BASE_10000_CR:
+ pdata->phy.advertising |= ADVERTISED_TP;
+ break;
+ default:
+ pdata->phy.advertising |= ADVERTISED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case AXGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ }
+}
+
+static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
+ enum axgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case AXGBE_SFP_SPEED_1000:
+ min = AXGBE_SFP_BASE_BR_1GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ min = AXGBE_SFP_BASE_BR_10GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+ return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[AXGBE_SFP_BASE_BR] <= max));
+}
+
+static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return;
+}
+
+static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+
+ if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME],
+ AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR)))
+ return false;
+
+ if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
+ AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) {
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata)
+{
+ if (axgbe_phy_belfuse_parse_quirks(pdata))
+ return true;
+
+ return false;
+}
+
+static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ uint8_t *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP)
+ return;
+
+ if (axgbe_phy_sfp_parse_quirks(pdata))
+ return;
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
+ } else {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ }
+
+ /* Determine the type of SFP */
+ if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] &
+ AXGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
+ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf,
+ unsigned int len)
+{
+ uint8_t cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return (cc == cc_in) ? true : false;
+}
+
+static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+ uint8_t eeprom_addr;
+ int ret;
+
+ ret = axgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n");
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) {
+ uint8_t diag_type;
+ diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG];
+
+ if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+ phy_data->sfp_diags = 1;
+ }
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ axgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int gpio_input;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n");
+ return;
+ }
+
+ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) {
+ /* No GPIO, just assume the module is present for now */
+ phy_data->sfp_mod_absent = 0;
+ } else {
+ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+ phy_data->sfp_mod_absent = 0;
+ }
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+ phy_data->sfp_rx_los = 1;
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+ phy_data->sfp_tx_fault = 1;
+}
+
+static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_diags = 0;
+ phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ axgbe_phy_sfp_reset(phy_data);
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ axgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = axgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ axgbe_phy_sfp_parse_eeprom(pdata);
+ axgbe_phy_sfp_external_phy(pdata);
+
+put:
+ axgbe_phy_sfp_phy_settings(pdata);
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata)
+{
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+}
+
+static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ axgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KR;
+ break;
+ default:
+ mode = AXGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KX_1000;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ mode = AXGBE_MODE_X;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ default:
+ mode = AXGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = AXGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = AXGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = AXGBE_MODE_KX_1000;
+ else
+ mode = AXGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ return axgbe_phy_an73_outcome(pdata);
+ case AXGBE_AN_MODE_CL73_REDRV:
+ return axgbe_phy_an73_redrv_outcome(pdata);
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int advertising;
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return pdata->phy.advertising;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ advertising = pdata->phy.advertising;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
+ advertising &= ~ADVERTISED_10000baseKR_Full;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n");
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+
+ return advertising;
+}
+
+static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
+{
+ return 0;
+ /* Dummy API since there is no case to support
+ * external phy devices registred through kerenl apis
+ */
+}
+
+static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ return AXGBE_AN_MODE_CL37;
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return AXGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ return AXGBE_AN_MODE_CL37;
+ case AXGBE_PORT_MODE_NBASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = AXGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR))
+ mode = AXGBE_PHY_REDRV_MODE_SR;
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ axgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ axgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_start_ratechange(struct axgbe_port *pdata)
+{
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+}
+
+static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata)
+{
+ unsigned int wait;
+
+ /* Wait for command to complete */
+ wait = AXGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ rte_delay_us(1500);
+ }
+}
+
+static void axgbe_phy_rrc(struct axgbe_port *pdata)
+{
+ unsigned int s0;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Receiver Reset Cycle */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ axgbe_phy_complete_ratechange(pdata);
+}
+
+static void axgbe_phy_power_off(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+}
+
+static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/SFI */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) {
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ else
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ }
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_SFI;
+}
+
+static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/KR */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_KR;
+}
+
+static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T)
+ return axgbe_phy_cur_mode(pdata);
+
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata
+ __rte_unused)
+{
+ return AXGBE_MODE_KX_2500;
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_KX_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_KX_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_switch_bp_mode(pdata);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_switch_bp_2500_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_switch_baset_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ case AXGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return axgbe_phy_cur_mode(pdata);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return AXGBE_MODE_SGMII_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return AXGBE_MODE_SGMII_1000;
+ else
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return AXGBE_MODE_SFI;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return AXGBE_MODE_KX_2500;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata,
+ int speed)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_get_bp_mode(speed);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_get_bp_2500_mode(speed);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_get_baset_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_get_basex_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KR:
+ axgbe_phy_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_check_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum axgbe_mode cur_mode;
+
+ cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_X:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case AXGBE_MODE_X:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SFI:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_2500:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_use_bp_mode(pdata, mode);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_use_bp_2500_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_use_baset_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_use_basex_mode(pdata, mode);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ axgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++) {
+ phy_data->rrc_count = 0;
+ axgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RATE_SELECT);
+}
+
+static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg, mux_addr_hi, mux_addr_lo;
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+
+ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == AXGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+}
+
+static void axgbe_phy_sfp_setup(struct axgbe_port *pdata)
+{
+ axgbe_phy_sfp_comm_setup(pdata);
+ axgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case AXGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case AXGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO)
+ return 0;
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case AXGBE_MDIO_RESET_NONE:
+ case AXGBE_MDIO_RESET_I2C_GPIO:
+ case AXGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+ if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void axgbe_phy_cdr_track(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ rte_delay_us(phy_data->phy_cdr_delay + 400);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_OFF);
+
+ axgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void axgbe_phy_kr_training_post(struct axgbe_port *pdata)
+{
+ if (!pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata)
+{
+ if (pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_an_post(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case AXGBE_AN_READY:
+ case AXGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_an_pre(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Reset SFP data */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ axgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Start in highest supported mode */
+ axgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_SFP:
+ axgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode cur_mode;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ axgbe_phy_power_off(pdata);
+ /* First time reset is done with passed unknown mode*/
+ axgbe_phy_set_mode(pdata, cur_mode);
+ return 0;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data;
+ unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+ if (!axgbe_phy_port_enabled(pdata)) {
+ PMD_DRV_LOG(ERR, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
+ if (!phy_data) {
+ PMD_DRV_LOG(ERR, "phy_data allocation failed\n");
+ return -ENOMEM;
+ }
+ pdata->phy_data = phy_data;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+
+ /* Validate the connection requested */
+ if (axgbe_phy_conn_type_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (axgbe_phy_port_mode_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = axgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (axgbe_phy_redrv_error(phy_data)) {
+ PMD_DRV_LOG(ERR, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ pdata->phy.supported = 0;
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case AXGBE_PORT_MODE_BACKPLANE:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case AXGBE_PORT_MODE_1000BASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case AXGBE_PORT_MODE_1000BASE_X:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_X;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case AXGBE_PORT_MODE_NBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) {
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case AXGBE_PORT_MODE_10GBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* 10GBase-R support */
+ case AXGBE_PORT_MODE_10GBASE_R:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case AXGBE_PORT_MODE_SFP:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+
+ axgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ AXGBE_MDIO_MODE_CL22);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT;
+ return 0;
+}
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+{
+ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = axgbe_phy_init;
+ phy_impl->reset = axgbe_phy_reset;
+ phy_impl->start = axgbe_phy_start;
+ phy_impl->stop = axgbe_phy_stop;
+ phy_impl->link_status = axgbe_phy_link_status;
+ phy_impl->use_mode = axgbe_phy_use_mode;
+ phy_impl->set_mode = axgbe_phy_set_mode;
+ phy_impl->get_mode = axgbe_phy_get_mode;
+ phy_impl->switch_mode = axgbe_phy_switch_mode;
+ phy_impl->cur_mode = axgbe_phy_cur_mode;
+ phy_impl->an_mode = axgbe_phy_an_mode;
+ phy_impl->an_config = axgbe_phy_an_config;
+ phy_impl->an_advertising = axgbe_phy_an_advertising;
+ phy_impl->an_outcome = axgbe_phy_an_outcome;
+
+ phy_impl->an_pre = axgbe_phy_an_pre;
+ phy_impl->an_post = axgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = axgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = axgbe_phy_kr_training_post;
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
new file mode 100644
index 00000000..c5fd5f41
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+static void
+axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (rx_queue) {
+ sw_ring = rx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < rx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(rx_queue);
+ }
+}
+
+void axgbe_dev_rx_queue_release(void *rxq)
+{
+ axgbe_rx_queue_release(rxq);
+}
+
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t size;
+ const struct rte_memzone *dma;
+ struct axgbe_rx_queue *rxq;
+ uint32_t rx_desc = nb_desc;
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ /*
+ * validate Rx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(rx_desc)) ||
+ rx_desc > pdata->rx_desc_count)
+ return -EINVAL;
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue",
+ sizeof(struct axgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ return -ENOMEM;
+ }
+
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ rxq->pdata = pdata;
+ rxq->mb_pool = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->nb_desc = rx_desc;
+ rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * rxq->queue_id));
+ rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
+ DMA_CH_RDTR_LO);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /* CRC strip in AXGBE supports per port not per queue */
+ pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
+ rxq->free_thresh = rx_conf->rx_free_thresh ?
+ rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
+ if (rxq->free_thresh > rxq->nb_desc)
+ rxq->free_thresh = rxq->nb_desc >> 3;
+
+ /* Allocate RX ring hardware descriptors */
+ size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
+ dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
+ socket_id);
+ if (!dma) {
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+ rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
+ memset((void *)rxq->desc, 0, size);
+ /* Allocate software ring */
+ size = rxq->nb_desc * sizeof(struct rte_mbuf *);
+ rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!pdata->rx_queues)
+ pdata->rx_queues = dev->data->rx_queues;
+
+ return 0;
+}
+
+static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+
+ while (time_before(rte_get_timer_cycles(), rx_timeout)) {
+ rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), rx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ axgbe_prepare_rx_stop(pdata, i);
+ }
+ /* Disable each Rx queue */
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Disable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
+ }
+}
+
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+ unsigned int reg_val = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Enable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
+ }
+
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ /* Frame is forwarded after stripping CRC to application*/
+ if (pdata->crc_strip_enable) {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ }
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+/* Rx function one to one refresh */
+uint16_t
+axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, pkt_len;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[
+ rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR) &&
+ (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (
+ unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
+ PL) - rxq->crc_len;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ mbuf->pkt_len = pkt_len;
+ mbuf->data_len = pkt_len;
+ rxq->bytes += pkt_len;
+ rx_pkts[nb_rx++] = mbuf;
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+ }
+ rxq->pkts += nb_rx;
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+
+ return nb_rx;
+}
+
+/* Tx Apis */
+static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (tx_queue) {
+ sw_ring = tx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < tx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(tx_queue);
+ }
+}
+
+void axgbe_dev_tx_queue_release(void *txq)
+{
+ axgbe_tx_queue_release(txq);
+}
+
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t tx_desc;
+ struct axgbe_port *pdata;
+ struct axgbe_tx_queue *txq;
+ unsigned int tsize;
+ const struct rte_memzone *tz;
+
+ tx_desc = nb_desc;
+ pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ /*
+ * validate tx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(tx_desc)) ||
+ tx_desc > pdata->tx_desc_count ||
+ tx_desc < AXGBE_MIN_RING_DESC)
+ return -EINVAL;
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq)
+ return -ENOMEM;
+ txq->pdata = pdata;
+
+ txq->nb_desc = tx_desc;
+ txq->free_thresh = tx_conf->tx_free_thresh ?
+ tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
+ if (txq->free_thresh > txq->nb_desc)
+ txq->free_thresh = (txq->nb_desc >> 1);
+ txq->free_batch_cnt = txq->free_thresh;
+
+ /* In vector_tx path threshold should be multiple of queue_size*/
+ if (txq->nb_desc % txq->free_thresh != 0)
+ txq->vector_disable = 1;
+
+ if (tx_conf->offloads != 0)
+ txq->vector_disable = 1;
+
+ /* Allocate TX ring hardware descriptors */
+ tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tsize, AXGBE_DESC_ALIGN, socket_id);
+ if (!tz) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ memset(tz->addr, 0, tsize);
+ txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+ txq->desc = tz->addr;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * txq->queue_id));
+ txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
+ DMA_CH_TDTR_LO);
+ txq->cur = 0;
+ txq->dirty = 0;
+ txq->nb_desc_free = txq->nb_desc;
+ /* Allocate software ring */
+ tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
+ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->sw_ring) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!pdata->tx_queues)
+ pdata->tx_queues = dev->data->tx_queues;
+
+ if (txq->vector_disable)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+ else
+#ifdef RTE_ARCH_X86
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
+#else
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+#endif
+
+ return 0;
+}
+
+static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return axgbe_txq_prepare_tx_stop(pdata, queue);
+
+ /* Calculate the status register to read and the position within */
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
+}
+
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Prepare for stopping DMA channel */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ txq = dev->data->tx_queues[i];
+ axgbe_prepare_tx_stop(pdata, i);
+ }
+ /* Disable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+ /* Disable each Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ 0);
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
+ }
+}
+
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Enable Tx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
+ }
+ /* Enable Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+ /* Enable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+/* Free Tx conformed mbufs */
+static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
+/* Tx Descriptor formation
+ * Considering each mbuf requires one desc
+ * mbuf is linear
+ */
+static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(mbuf);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ mbuf->pkt_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ /* Mark it as First and Last Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & PKT_TX_L4_MASK;
+ if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = mbuf;
+ /* Update current index*/
+ txq->cur++;
+ /* Update stats */
+ txq->bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+/* Eal supported tx wrapper*/
+uint16_t
+axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+ if (axgbe_xmit_hw(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t i;
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq) {
+ axgbe_rx_queue_release(rxq);
+ dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq) {
+ axgbe_tx_queue_release(txq);
+ dev->data->tx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
new file mode 100644
index 00000000..917da58c
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_RXTX_H_
+#define _AXGBE_RXTX_H_
+
+/* to suppress gcc warnings related to descriptor casting*/
+#ifdef RTE_TOOLCHAIN_GCC
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+#ifdef RTE_TOOLCHAIN_CLANG
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/* Descriptor related defines */
+#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
+#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
+#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
+#define AXGBE_MIN_RING_DESC 32
+#define RTE_AXGBE_DESCS_PER_LOOP 4
+#define RTE_AXGBE_MAX_RX_BURST 32
+
+#define AXGBE_RX_FREE_THRESH 32
+#define AXGBE_TX_FREE_THRESH 32
+
+#define AXGBE_DESC_ALIGN 128
+#define AXGBE_DESC_OWN 0x80000000
+#define AXGBE_ERR_STATUS 0x000f0000
+#define AXGBE_L3_CSUM_ERR 0x00050000
+#define AXGBE_L4_CSUM_ERR 0x00060000
+
+#include "axgbe_common.h"
+
+#define AXGBE_GET_DESC_PT(_queue, _idx) \
+ (((_queue)->desc) + \
+ ((_idx) & ((_queue)->nb_desc - 1)))
+
+#define AXGBE_GET_DESC_IDX(_queue, _idx) \
+ ((_idx) & ((_queue)->nb_desc - 1)) \
+
+/* Rx desc format */
+union axgbe_rx_desc {
+ struct {
+ uint64_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+ } read;
+ struct {
+ uint32_t desc0;
+ uint32_t desc1;
+ uint32_t desc2;
+ uint32_t desc3;
+ } write;
+};
+
+struct axgbe_rx_queue {
+ /* membuf pool for rx buffers */
+ struct rte_mempool *mb_pool;
+ /* H/w Rx buffer size configured in DMA */
+ unsigned int buf_size;
+ /* CRC h/w offload */
+ uint16_t crc_len;
+ /* address of s/w rx buffers */
+ struct rte_mbuf **sw_ring;
+ /* Port private data */
+ struct axgbe_port *pdata;
+ /* Number of Rx descriptors in queue */
+ uint16_t nb_desc;
+ /* max free RX desc to hold */
+ uint16_t free_thresh;
+ /* Index of descriptor to check for packet availability */
+ uint64_t cur;
+ /* Index of descriptor to check for buffer reallocation */
+ uint64_t dirty;
+ /* Software Rx descriptor ring*/
+ volatile union axgbe_rx_desc *desc;
+ /* Ring physical address */
+ uint64_t ring_phys_addr;
+ /* Dma Channel register address */
+ void *dma_regs;
+ /* Dma channel tail register address*/
+ volatile uint32_t *dma_tail_reg;
+ /* DPDK queue index */
+ uint16_t queue_id;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+ /* Number of mbufs allocated from pool*/
+ uint64_t mbuf_alloc;
+
+} __rte_cache_aligned;
+
+/*Tx descriptor format */
+struct axgbe_tx_desc {
+ phys_addr_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+};
+
+struct axgbe_tx_queue {
+ /* Port private data reference */
+ struct axgbe_port *pdata;
+ /* Number of Tx descriptors in queue*/
+ uint16_t nb_desc;
+ /* Start freeing TX buffers if there are less free descriptors than
+ * this value
+ */
+ uint16_t free_thresh;
+ /* Available descriptors for Tx processing*/
+ uint16_t nb_desc_free;
+ /* Batch of mbufs/descs to release */
+ uint16_t free_batch_cnt;
+ /* Flag for vector support */
+ uint16_t vector_disable;
+ /* Index of descriptor to be used for current transfer */
+ uint64_t cur;
+ /* Index of descriptor to check for transfer complete */
+ uint64_t dirty;
+ /* Virtual address of ring */
+ volatile struct axgbe_tx_desc *desc;
+ /* Physical address of ring */
+ uint64_t ring_phys_addr;
+ /* Dma channel register space */
+ void *dma_regs;
+ /* Dma tail register address of ring*/
+ volatile uint32_t *dma_tail_reg;
+ /* Tx queue index/id*/
+ uint16_t queue_id;
+ /* Reference to hold Tx mbufs mapped to Tx descriptors freed
+ * after transmission confirmation
+ */
+ struct rte_mbuf **sw_ring;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+
+} __rte_cache_aligned;
+
+/*Queue related APIs */
+
+/*
+ * RX/TX function prototypes
+ */
+
+
+void axgbe_dev_tx_queue_release(void *txq);
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+
+void axgbe_dev_rx_queue_release(void *rxq);
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _AXGBE_RXTX_H_ */
diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
new file mode 100644
index 00000000..9be70371
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/* Useful to avoid shifting for every descriptor prepration*/
+#define TX_DESC_CTRL_FLAGS 0xb000000000000000
+#define TX_FREE_BULK 8
+#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)
+
+static inline void
+axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
+ struct rte_mbuf *mbuf)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
+ TX_DESC_CTRL_FLAGS | mbuf->data_len,
+ mbuf->buf_iova
+ + mbuf->data_off);
+ _mm_store_si128((__m128i *)desc, descriptor);
+}
+
+static void
+axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ int idx, i;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt
+ - 1);
+ desc = &txq->desc[idx];
+ if (desc->desc3 & AXGBE_DESC_OWN)
+ return;
+ /* memset avoided for desc ctrl fields since in vec_tx path
+ * all 128 bits are populated
+ */
+ for (i = 0; i < txq->free_batch_cnt; i++, idx--)
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+
+
+ txq->dirty += txq->free_batch_cnt;
+ txq->nb_desc_free += txq->free_batch_cnt;
+}
+
+uint16_t
+axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t idx, nb_commit, loop, i;
+ uint32_t tail_addr;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ if (txq->nb_desc_free < txq->free_thresh) {
+ axgbe_xmit_cleanup_vec(txq);
+ if (unlikely(txq->nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);
+ nb_commit = nb_pkts;
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ loop = txq->nb_desc - idx;
+ if (nb_commit >= loop) {
+ for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ nb_commit -= loop;
+ idx = 0;
+ }
+ for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ txq->cur += nb_pkts;
+ tail_addr = (uint32_t)(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ rte_write32(tail_addr, (void *)txq->dma_tail_reg);
+ txq->pkts += nb_pkts;
+ txq->nb_desc_free -= nb_pkts;
+
+ return nb_pkts;
+}
diff --git a/drivers/net/axgbe/meson.build b/drivers/net/axgbe/meson.build
new file mode 100644
index 00000000..548ffff7
--- /dev/null
+++ b/drivers/net/axgbe/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('axgbe_ethdev.c',
+ 'axgbe_dev.c',
+ 'axgbe_mdio.c',
+ 'axgbe_phy_impl.c',
+ 'axgbe_i2c.c',
+ 'axgbe_rxtx.c')
+
+cflags += '-Wno-cast-qual'
+
+if arch_subdir == 'x86'
+ sources += files('axgbe_rxtx_vec_sse.c')
+endif
diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map
new file mode 100644
index 00000000..b26efa67
--- /dev/null
+++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/net/bnx2x/LICENSE.bnx2x_pmd b/drivers/net/bnx2x/LICENSE.bnx2x_pmd
deleted file mode 100644
index 96c7c1e1..00000000
--- a/drivers/net/bnx2x/LICENSE.bnx2x_pmd
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Broadcom Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 90ff8b1e..55d1ad6e 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -1,3 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2014 - 2018 Cavium Inc.
+# All rights reserved.
+# www.cavium.com
include $(RTE_SDK)/mk/rte.vars.mk
#
@@ -17,10 +21,6 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map
LIBABIVER := 1
-ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-CFLAGS += -wd188 #188: enumerated type mixed with another type
-endif
-
#
# all source are stored in SRCS-y
#
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index fb02d0f3..4904eaf3 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1,4 +1,4 @@
-/*-
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
@@ -6,11 +6,9 @@
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#define BNX2X_DRIVER_VERSION "1.78.18"
@@ -31,7 +29,7 @@
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 5
+#define BNX2X_PMD_VERSION_REVISION 6
#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
@@ -125,7 +123,6 @@ int bnx2x_nic_load(struct bnx2x_softc *sc);
static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc);
static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
uint8_t storm, uint16_t index, uint8_t op,
uint8_t update);
@@ -170,16 +167,16 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
dma->sc = sc;
if (IS_PF(sc))
- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
rte_get_timer_cycles());
else
- sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
+ snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
rte_get_timer_cycles());
/* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
- z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size),
+ z = rte_memzone_reserve_aligned(mz_name, (uint64_t)size,
SOCKET_ID_ANY,
- 0, align);
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (z == NULL) {
PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
return -ENOMEM;
@@ -1969,9 +1966,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
- /* stop the periodic callout */
- bnx2x_periodic_stop(sc);
-
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
@@ -4490,6 +4484,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
+ PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4506,7 +4502,7 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
}
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
- le16toh(fp->fp_hc_idx), IGU_INT_DISABLE, 1);
+ le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
}
/*
@@ -6997,16 +6993,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
}
}
-static void bnx2x_periodic_start(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
-}
-
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc)
-{
- atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
-}
-
static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
{
int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
@@ -7041,10 +7027,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_link_report(sc);
}
- if (!CHIP_REV_IS_SLOW(sc)) {
- bnx2x_periodic_start(sc);
- }
-
sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
return rc;
}
@@ -8285,16 +8267,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
}
-
-/*
- * Enable internal target-read (in case we are probed after PF
- * FLR). Must be done prior to any BAR read access. Only for
- * 57712 and up
- */
- if (!CHIP_IS_E1x(sc)) {
- REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
- 1);
- }
}
/* get the nvram size */
@@ -9671,7 +9643,17 @@ int bnx2x_attach(struct bnx2x_softc *sc)
bnx2x_init_rte(sc);
if (IS_PF(sc)) {
-/* get device info and set params */
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!CHIP_IS_E1x(sc)) {
+ REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
+ 1);
+ DELAY(200000);
+ }
+
+ /* get device info and set params */
if (bnx2x_get_device_info(sc) != 0) {
PMD_DRV_LOG(NOTICE, "getting device info");
return -ENXIO;
@@ -9680,7 +9662,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
/* get phy settings from shmem and 'and' against admin settings */
bnx2x_get_phy_info(sc);
} else {
-/* Left mac of VF unfilled, PF should set it for VF */
+ /* Left mac of VF unfilled, PF should set it for VF */
memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 17075d38..0f6024fb 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1,4 +1,4 @@
-/*-
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
@@ -6,11 +6,9 @@
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __BNX2X_H__
@@ -1930,6 +1928,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc);
int bnx2x_complete_sp(struct bnx2x_softc *sc);
int bnx2x_set_storm_rx_mode(struct bnx2x_softc *sc);
void bnx2x_periodic_callout(struct bnx2x_softc *sc);
+void bnx2x_periodic_stop(void *param);
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count);
void bnx2x_vf_close(struct bnx2x_softc *sc);
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 483d5a17..575271a8 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
@@ -13,6 +10,7 @@
#include <rte_dev.h>
#include <rte_ethdev_pci.h>
+#include <rte_alarm.h>
int bnx2x_logtype_init;
int bnx2x_logtype_driver;
@@ -81,26 +79,31 @@ static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
};
-static void
+static int
bnx2x_link_update(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_link link;
PMD_INIT_FUNC_TRACE();
+
bnx2x_link_status_update(sc);
+ memset(&link, 0, sizeof(link));
mb();
- dev->data->dev_link.link_speed = sc->link_vars.line_speed;
+ link.link_speed = sc->link_vars.line_speed;
switch (sc->link_vars.duplex) {
case DUPLEX_FULL:
- dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case DUPLEX_HALF:
- dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
break;
}
- dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- dev->data->dev_link.link_status = sc->link_vars.link_up;
+ link.link_status = sc->link_vars.link_up;
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -109,8 +112,6 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
uint32_t link_status;
- PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
-
bnx2x_intr_legacy(sc, 0);
if (sc->periodic_flags & PERIODIC_GO)
@@ -128,10 +129,41 @@ bnx2x_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct bnx2x_softc *sc = dev->data->dev_private;
+ PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+
bnx2x_interrupt_action(dev);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
+static void bnx2x_periodic_start(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
+ bnx2x_interrupt_action(dev);
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ assert(false && "Unable to start periodic timer");
+ }
+ }
+}
+
+void bnx2x_periodic_stop(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
+
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
+
+ rte_eal_alarm_cancel(bnx2x_periodic_start, (void *)dev);
+}
+
/*
* Devops - helper functions can be called from user application
*/
@@ -140,11 +172,13 @@ static int
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
@@ -185,6 +219,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* start the periodic callout */
+ if (sc->periodic_flags & PERIODIC_STOP)
+ bnx2x_periodic_start(dev);
+
ret = bnx2x_init(sc);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
@@ -225,6 +263,9 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
bnx2x_interrupt_handler, (void *)dev);
}
+ /* stop the periodic callout */
+ bnx2x_periodic_stop(dev);
+
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
@@ -307,20 +348,16 @@ bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete
{
PMD_INIT_FUNC_TRACE();
- int old_link_status = dev->data->dev_link.link_status;
-
- bnx2x_link_update(dev);
-
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
+ return bnx2x_link_update(dev);
}
static int
bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- int old_link_status = dev->data->dev_link.link_status;
struct bnx2x_softc *sc = dev->data->dev_private;
+ int ret = 0;
- bnx2x_link_update(dev);
+ ret = bnx2x_link_update(dev);
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
@@ -329,7 +366,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_comple
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
- return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
+ return ret;
}
static int
@@ -447,13 +484,13 @@ static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
}
static int
@@ -583,6 +620,17 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
return ret;
}
+ /* schedule periodic poll for slowpath link events */
+ if (IS_PF(sc)) {
+ ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
+ bnx2x_periodic_start, (void *)eth_dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to start periodic"
+ " timer rc %d", ret);
+ return -EINVAL;
+ }
+ }
+
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
@@ -597,18 +645,20 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
if (IS_VF(sc)) {
rte_spinlock_init(&sc->vf2pf_lock);
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
- &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
+ &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
sc->vf2pf_mbox_mapping.vaddr;
- if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
- &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
- RTE_CACHE_LINE_SIZE) != 0)
- return -ENOMEM;
+ ret = bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
+ &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
+ RTE_CACHE_LINE_SIZE);
+ if (ret)
+ goto out;
sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
sc->pf2vf_bulletin_mapping.vaddr;
@@ -616,10 +666,14 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
sc->max_rx_queues);
if (ret)
- return ret;
+ goto out;
}
return 0;
+
+out:
+ bnx2x_periodic_stop(eth_dev);
+ return ret;
}
static int
@@ -642,24 +696,14 @@ static struct rte_pci_driver rte_bnx2xvf_pmd;
static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc));
- if (!eth_dev)
- return -ENOMEM;
-
if (pci_drv == &rte_bnx2x_pmd)
- ret = eth_bnx2x_dev_init(eth_dev);
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2x_dev_init);
else if (pci_drv == &rte_bnx2xvf_pmd)
- ret = eth_bnx2xvf_dev_init(eth_dev);
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct bnx2x_softc), eth_bnx2xvf_dev_init);
else
- ret = -EINVAL;
-
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return -EINVAL;
}
static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
@@ -691,14 +735,12 @@ RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio-pci");
-RTE_INIT(bnx2x_init_log);
-static void
-bnx2x_init_log(void)
+RTE_INIT(bnx2x_init_log)
{
- bnx2x_logtype_init = rte_log_register("pmd.bnx2x.init");
+ bnx2x_logtype_init = rte_log_register("pmd.net.bnx2x.init");
if (bnx2x_logtype_init >= 0)
rte_log_set_level(bnx2x_logtype_init, RTE_LOG_NOTICE);
- bnx2x_logtype_driver = rte_log_register("pmd.bnx2x.driver");
+ bnx2x_logtype_driver = rte_log_register("pmd.net.bnx2x.driver");
if (bnx2x_logtype_driver >= 0)
rte_log_set_level(bnx2x_logtype_driver, RTE_LOG_NOTICE);
}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.h b/drivers/net/bnx2x/bnx2x_ethdev.h
index 37cac158..807ba178 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.h
+++ b/drivers/net/bnx2x/bnx2x_ethdev.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef PMD_BNX2X_ETHDEV_H
@@ -58,7 +55,6 @@
#define wmb() rte_wmb()
#define rmb() rte_rmb()
-
#define MAX_QUEUES sysconf(_SC_NPROCESSORS_CONF)
#define BNX2X_MIN_RX_BUF_SIZE 1024
@@ -72,6 +68,8 @@
/* Maximum number of Rx packets to process at a time */
#define BNX2X_RX_BUDGET 0xffffffff
+#define BNX2X_SP_TIMER_PERIOD US_PER_S /* 1 second */
+
#endif
/* MAC address operations */
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index 08c1b764..9e232a9b 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _PMD_LOGS_H_
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index a0d4ac92..d9a4127d 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
@@ -26,7 +23,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0, BNX2X_PAGE_SIZE);
+ return rte_memzone_reserve_aligned(z_name, ring_size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, BNX2X_PAGE_SIZE);
}
static void
@@ -140,7 +138,8 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
rxq->sw_ring[idx] = mbuf;
- rxq->rx_ring[idx] = mbuf->buf_iova;
+ rxq->rx_ring[idx] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
@@ -400,7 +399,8 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb = rxq->sw_ring[bd_cons];
rxq->sw_ring[bd_cons] = new_mb;
- rxq->rx_ring[bd_prod] = new_mb->buf_iova;
+ rxq->rx_ring[bd_prod] =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
rx_pref = NEXT_RX_BD(bd_cons) & MAX_RX_BD(rxq);
rte_prefetch0(rxq->sw_ring[rx_pref]);
@@ -409,7 +409,7 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rte_prefetch0(&rxq->sw_ring[rx_pref]);
}
- rx_mb->data_off = pad;
+ rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
rx_mb->nb_segs = 1;
rx_mb->next = NULL;
rx_mb->pkt_len = rx_mb->data_len = len;
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index 9600e0f1..6ad4928c 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _BNX2X_RXTX_H_
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index b9b85963..edc86ccc 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 3396de31..635412bd 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef BNX2X_STATS_H
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 3c08f2a2..50099d46 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index d7cc11be..cc6fef95 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- *
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef BNX2X_VFPF_H
diff --git a/drivers/net/bnx2x/ecore_fw_defs.h b/drivers/net/bnx2x/ecore_fw_defs.h
index ab490efa..5984acd9 100644
--- a/drivers/net/bnx2x/ecore_fw_defs.h
+++ b/drivers/net/bnx2x/ecore_fw_defs.h
@@ -1,15 +1,13 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_FW_DEFS_H
diff --git a/drivers/net/bnx2x/ecore_hsi.h b/drivers/net/bnx2x/ecore_hsi.h
index 5cce6647..57085ebb 100644
--- a/drivers/net/bnx2x/ecore_hsi.h
+++ b/drivers/net/bnx2x/ecore_hsi.h
@@ -1,15 +1,13 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_HSI_H
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index 4576c565..f2de07e5 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_INIT_H
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index b6f98324..2b003afb 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_INIT_OPS_H
diff --git a/drivers/net/bnx2x/ecore_mfw_req.h b/drivers/net/bnx2x/ecore_mfw_req.h
index 57529097..fe945048 100644
--- a/drivers/net/bnx2x/ecore_mfw_req.h
+++ b/drivers/net/bnx2x/ecore_mfw_req.h
@@ -1,15 +1,13 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_MFW_REQ_H
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index d8203b45..ae8a93bb 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1,15 +1,13 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
- * Copyright (c) 2014-2015 QLogic Corporation.
+ * Copyright (c) 2014-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_REG_H
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index a75a7fa4..0c8685c6 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index ff40413c..6b65a496 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -1,16 +1,14 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ECORE_SP_H
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 9d0f3136..b63fd23e 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1,16 +1,14 @@
-/*
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bnx2x.h"
@@ -4143,9 +4141,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1);
}
-static void elink_warpcore_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint32_t serdes_net_if;
@@ -4222,7 +4220,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
if (vars->line_speed != ELINK_SPEED_20000) {
PMD_DRV_LOG(DEBUG, "Speed not supported yet");
- return;
+ return 0;
}
PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
elink_warpcore_set_20G_DXGXS(sc, phy, lane);
@@ -4242,13 +4240,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy,
PMD_DRV_LOG(DEBUG,
"Unsupported Serdes Net Interface 0x%x",
serdes_net_if);
- return;
+ return 0;
}
}
/* Take lane out of reset after configuration is finished */
elink_warpcore_reset_lane(sc, phy, 0);
PMD_DRV_LOG(DEBUG, "Exit config init");
+
+ return 0;
}
static void elink_warpcore_link_reset(struct elink_phy *phy,
@@ -5226,9 +5226,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_link_settings_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_link_settings_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -5299,9 +5299,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy,
return rc;
}
-static elink_status_t elink_warpcore_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t lane;
@@ -5520,9 +5520,9 @@ static void elink_set_preemphasis(struct elink_phy *phy,
}
}
-static void elink_xgxs_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) ||
(params->loopback_mode == ELINK_LOOPBACK_XGXS));
@@ -5567,6 +5567,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy,
elink_initialize_sgmii_process(phy, params, vars);
}
+
+ return 0;
}
static elink_status_t elink_prepare_xgxs(struct elink_phy *phy,
@@ -5751,8 +5753,8 @@ static void elink_link_int_ack(struct elink_params *params,
}
}
-static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
- uint16_t * len)
+static uint8_t elink_format_ver(uint32_t num, uint8_t * str,
+ uint16_t * len)
{
uint8_t *str_ptr = str;
uint32_t mask = 0xf0000000;
@@ -5790,8 +5792,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
- uint8_t * str, uint16_t * len)
+static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver,
+ uint8_t * str, uint16_t * len)
{
str[0] = '\0';
(*len)--;
@@ -6802,9 +6804,9 @@ static void elink_8073_specific_func(struct elink_phy *phy,
}
}
-static elink_status_t elink_8073_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8073_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0, tmp1;
@@ -7097,9 +7099,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
/******************************************************************/
/* BNX2X8705 PHY SECTION */
/******************************************************************/
-static elink_status_t elink_8705_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
+static uint8_t elink_8705_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
*vars)
{
struct bnx2x_softc *sc = params->sc;
@@ -8403,9 +8405,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_8706_read_status(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8706_read_status(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
return elink_8706_8726_read_status(phy, params, vars);
}
@@ -8477,9 +8479,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_8726_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_8726_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
@@ -8684,9 +8686,9 @@ static void elink_8727_config_speed(struct elink_phy *phy,
}
}
-static elink_status_t elink_8727_config_init(struct elink_phy *phy,
- struct elink_params *params,
- __rte_unused struct elink_vars
+static uint8_t elink_8727_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ __rte_unused struct elink_vars
*vars)
{
uint32_t tx_en_mode;
@@ -9291,7 +9293,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
return ELINK_STATUS_OK;
}
-static elink_status_t elink_8481_config_init(struct elink_phy *phy,
+static uint8_t elink_8481_config_init(struct elink_phy *phy,
struct elink_params *params,
struct elink_vars *vars)
{
@@ -9442,8 +9444,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc,
return reset_gpios;
}
-static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
- struct elink_params *params)
+static void elink_84833_hw_reset_phy(struct elink_phy *phy,
+ struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
uint8_t reset_gpios;
@@ -9471,8 +9473,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW);
DELAY(10);
PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
-
- return ELINK_STATUS_OK;
}
static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
@@ -9513,9 +9513,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
}
#define PHY84833_CONSTANT_LATENCY 1193
-static elink_status_t elink_848x3_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_848x3_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port, initialize = 1;
@@ -9819,7 +9819,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
+static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str,
uint16_t * len)
{
elink_status_t status = ELINK_STATUS_OK;
@@ -10146,9 +10146,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy,
}
}
-static elink_status_t elink_54618se_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_54618se_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
uint8_t port;
@@ -10542,9 +10542,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy,
MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
}
-static elink_status_t elink_7101_config_init(struct elink_phy *phy,
- struct elink_params *params,
- struct elink_vars *vars)
+static uint8_t elink_7101_config_init(struct elink_phy *phy,
+ struct elink_params *params,
+ struct elink_vars *vars)
{
uint16_t fw_ver1, fw_ver2, val;
struct bnx2x_softc *sc = params->sc;
@@ -10614,8 +10614,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
return link_up;
}
-static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
- uint16_t * len)
+static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str,
+ uint16_t * len)
{
if (*len < 5)
return ELINK_STATUS_ERROR;
@@ -10680,14 +10680,14 @@ static const struct elink_phy phy_null = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) NULL,
- .read_status = (read_status_t) NULL,
- .link_reset = (link_reset_t) NULL,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = NULL,
+ .read_status = NULL,
+ .link_reset = NULL,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_serdes = {
@@ -10714,14 +10714,14 @@ static const struct elink_phy phy_serdes = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_xgxs_config_init,
- .read_status = (read_status_t) elink_link_settings_status,
- .link_reset = (link_reset_t) elink_int_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_xgxs = {
@@ -10749,14 +10749,14 @@ static const struct elink_phy phy_xgxs = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_xgxs_config_init,
- .read_status = (read_status_t) elink_link_settings_status,
- .link_reset = (link_reset_t) elink_int_link_reset,
- .config_loopback = (config_loopback_t) elink_set_xgxs_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func
+ .config_init = elink_xgxs_config_init,
+ .read_status = elink_link_settings_status,
+ .link_reset = elink_int_link_reset,
+ .config_loopback = elink_set_xgxs_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_xgxs_specific_func
};
static const struct elink_phy phy_warpcore = {
@@ -10785,14 +10785,14 @@ static const struct elink_phy phy_warpcore = {
.speed_cap_mask = 0,
/* req_duplex = */ 0,
/* rsrv = */ 0,
- .config_init = (config_init_t) elink_warpcore_config_init,
- .read_status = (read_status_t) elink_warpcore_read_status,
- .link_reset = (link_reset_t) elink_warpcore_link_reset,
- .config_loopback = (config_loopback_t) elink_set_warpcore_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) elink_warpcore_hw_reset,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_warpcore_config_init,
+ .read_status = elink_warpcore_read_status,
+ .link_reset = elink_warpcore_link_reset,
+ .config_loopback = elink_set_warpcore_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = elink_warpcore_hw_reset,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_7101 = {
@@ -10814,14 +10814,14 @@ static const struct elink_phy phy_7101 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_7101_config_init,
- .read_status = (read_status_t) elink_7101_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) elink_7101_config_loopback,
- .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver,
- .hw_reset = (hw_reset_t) elink_7101_hw_reset,
- .set_link_led = (set_link_led_t) elink_7101_set_link_led,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_7101_config_init,
+ .read_status = elink_7101_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = elink_7101_config_loopback,
+ .format_fw_ver = elink_7101_format_ver,
+ .hw_reset = elink_7101_hw_reset,
+ .set_link_led = elink_7101_set_link_led,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8073 = {
@@ -10845,14 +10845,14 @@ static const struct elink_phy phy_8073 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8073_config_init,
- .read_status = (read_status_t) elink_8073_read_status,
- .link_reset = (link_reset_t) elink_8073_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func
+ .config_init = elink_8073_config_init,
+ .read_status = elink_8073_read_status,
+ .link_reset = elink_8073_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = elink_8073_specific_func
};
static const struct elink_phy phy_8705 = {
@@ -10873,14 +10873,14 @@ static const struct elink_phy phy_8705 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8705_config_init,
- .read_status = (read_status_t) elink_8705_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_null_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8705_config_init,
+ .read_status = elink_8705_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_null_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8706 = {
@@ -10902,14 +10902,14 @@ static const struct elink_phy phy_8706 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8706_config_init,
- .read_status = (read_status_t) elink_8706_read_status,
- .link_reset = (link_reset_t) elink_common_ext_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8706_config_init,
+ .read_status = elink_8706_read_status,
+ .link_reset = elink_common_ext_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8726 = {
@@ -10932,14 +10932,14 @@ static const struct elink_phy phy_8726 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8726_config_init,
- .read_status = (read_status_t) elink_8726_read_status,
- .link_reset = (link_reset_t) elink_8726_link_reset,
- .config_loopback = (config_loopback_t) elink_8726_config_loopback,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) NULL,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8726_config_init,
+ .read_status = elink_8726_read_status,
+ .link_reset = elink_8726_link_reset,
+ .config_loopback = elink_8726_config_loopback,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = NULL,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_8727 = {
@@ -10961,14 +10961,14 @@ static const struct elink_phy phy_8727 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8727_config_init,
- .read_status = (read_status_t) elink_8727_read_status,
- .link_reset = (link_reset_t) elink_8727_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_format_ver,
- .hw_reset = (hw_reset_t) elink_8727_hw_reset,
- .set_link_led = (set_link_led_t) elink_8727_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func
+ .config_init = elink_8727_config_init,
+ .read_status = elink_8727_read_status,
+ .link_reset = elink_8727_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_format_ver,
+ .hw_reset = elink_8727_hw_reset,
+ .set_link_led = elink_8727_set_link_led,
+ .phy_specific_func = elink_8727_specific_func
};
static const struct elink_phy phy_8481 = {
@@ -10996,14 +10996,14 @@ static const struct elink_phy phy_8481 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_8481_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_8481_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_8481_hw_reset,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) NULL
+ .config_init = elink_8481_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_8481_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_8481_hw_reset,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = NULL
};
static const struct elink_phy phy_84823 = {
@@ -11031,14 +11031,14 @@ static const struct elink_phy phy_84823 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = NULL,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_84833 = {
@@ -11065,14 +11065,14 @@ static const struct elink_phy phy_84833 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_84834 = {
@@ -11098,14 +11098,14 @@ static const struct elink_phy phy_84834 = {
.speed_cap_mask = 0,
.req_duplex = 0,
.rsrv = 0,
- .config_init = (config_init_t) elink_848x3_config_init,
- .read_status = (read_status_t) elink_848xx_read_status,
- .link_reset = (link_reset_t) elink_848x3_link_reset,
- .config_loopback = (config_loopback_t) NULL,
- .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver,
- .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy,
- .set_link_led = (set_link_led_t) elink_848xx_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func
+ .config_init = elink_848x3_config_init,
+ .read_status = elink_848xx_read_status,
+ .link_reset = elink_848x3_link_reset,
+ .config_loopback = NULL,
+ .format_fw_ver = elink_848xx_format_ver,
+ .hw_reset = elink_84833_hw_reset_phy,
+ .set_link_led = elink_848xx_set_link_led,
+ .phy_specific_func = elink_848xx_specific_func
};
static const struct elink_phy phy_54618se = {
@@ -11131,14 +11131,14 @@ static const struct elink_phy phy_54618se = {
.speed_cap_mask = 0,
/* req_duplex = */ 0,
/* rsrv = */ 0,
- .config_init = (config_init_t) elink_54618se_config_init,
- .read_status = (read_status_t) elink_54618se_read_status,
- .link_reset = (link_reset_t) elink_54618se_link_reset,
- .config_loopback = (config_loopback_t) elink_54618se_config_loopback,
- .format_fw_ver = (format_fw_ver_t) NULL,
- .hw_reset = (hw_reset_t) NULL,
- .set_link_led = (set_link_led_t) elink_5461x_set_link_led,
- .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func
+ .config_init = elink_54618se_config_init,
+ .read_status = elink_54618se_read_status,
+ .link_reset = elink_54618se_link_reset,
+ .config_loopback = elink_54618se_config_loopback,
+ .format_fw_ver = NULL,
+ .hw_reset = NULL,
+ .set_link_led = elink_5461x_set_link_led,
+ .phy_specific_func = elink_54618se_specific_func
};
/*****************************************************************/
@@ -12919,7 +12919,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index 9401b7cd..40000c24 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -1,16 +1,14 @@
-/*
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2007-2013 Broadcom Corporation.
*
* Eric Davis <edavis@broadcom.com>
* David Christensen <davidch@broadcom.com>
* Gary Zambrano <zambrano@broadcom.com>
*
* Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
+ * Copyright (c) 2015-2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef ELINK_H
diff --git a/drivers/net/bnx2x/meson.build b/drivers/net/bnx2x/meson.build
new file mode 100644
index 00000000..e3c68886
--- /dev/null
+++ b/drivers/net/bnx2x/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('z', required: false)
+build = dep.found()
+ext_deps += dep
+cflags += '-DZLIB_CONST'
+sources = files('bnx2x.c',
+ 'bnx2x_ethdev.c',
+ 'bnx2x_rxtx.c',
+ 'bnx2x_stats.c',
+ 'bnx2x_vfpf.c',
+ 'ecore_sp.c',
+ 'elink.c')
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 2aa04411..8be3cb0e 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -1,35 +1,8 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2014 6WIND S.A.
-# Copyright(c) Broadcom Limited.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright(c) 2014 6WIND S.A.
+# Copyright(c) Broadcom Limited.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -56,6 +29,7 @@ EXPORT_MAP := rte_pmd_bnxt_version.map
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c
@@ -65,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c
#
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index b5a0badf..db5c4eb0 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_H_
@@ -50,7 +22,24 @@
#define BNXT_MAX_MTU 9500
#define VLAN_TAG_SIZE 4
+#define BNXT_VF_RSV_NUM_RSS_CTX 1
+#define BNXT_VF_RSV_NUM_L2_CTX 4
+/* TODO: For now, do not support VMDq/RFS on VFs. */
+#define BNXT_VF_RSV_NUM_VNIC 1
#define BNXT_MAX_LED 4
+#define BNXT_NUM_VLANS 2
+#define BNXT_MIN_RING_DESC 16
+#define BNXT_MAX_TX_RING_DESC 4096
+#define BNXT_MAX_RX_RING_DESC 8192
+#define BNXT_DB_SIZE 0x80
+
+#define BNXT_INT_LAT_TMR_MIN 75
+#define BNXT_INT_LAT_TMR_MAX 150
+#define BNXT_NUM_CMPL_AGGR_INT 36
+#define BNXT_CMPL_AGGR_DMA_TMR 37
+#define BNXT_NUM_CMPL_DMA_AGGR 36
+#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50
+#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12
struct bnxt_led_info {
uint8_t led_id;
@@ -125,6 +114,7 @@ struct bnxt_child_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs)
+#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs)
#define BNXT_FIRST_VF_FID 128
#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp)
#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp))
@@ -132,6 +122,9 @@ struct bnxt_pf_info {
uint16_t first_vf_id;
uint16_t active_vfs;
uint16_t max_vfs;
+ uint16_t total_vfs; /* Total VFs possible.
+ * Not necessarily enabled.
+ */
uint32_t func_cfg_flags;
void *vf_req_buf;
rte_iova_t vf_req_buf_dma_addr;
@@ -229,6 +222,16 @@ struct bnxt_ptp_cfg {
uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
};
+struct bnxt_coal {
+ uint16_t num_cmpl_aggr_int;
+ uint16_t num_cmpl_dma_aggr;
+ uint16_t num_cmpl_dma_aggr_during_int;
+ uint16_t int_lat_tmr_max;
+ uint16_t int_lat_tmr_min;
+ uint16_t cmpl_aggr_dma_tmr;
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+};
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -236,6 +239,7 @@ struct bnxt {
struct rte_eth_dev *eth_dev;
struct rte_eth_rss_conf rss_conf;
struct rte_pci_device *pdev;
+ void *doorbell_base;
uint32_t flags;
#define BNXT_FLAG_REGISTERED (1 << 0)
@@ -246,6 +250,7 @@ struct bnxt {
#define BNXT_FLAG_UPDATE_HASH (1 << 5)
#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_NEW_RM (1 << 30)
#define BNXT_FLAG_INIT_DONE (1 << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
@@ -300,6 +305,7 @@ struct bnxt {
struct bnxt_link_info link_info;
struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT];
+ uint8_t tx_cosq_id;
uint16_t fw_fid;
uint8_t dflt_mac_addr[ETHER_ADDR_LEN];
@@ -321,19 +327,19 @@ struct bnxt {
uint16_t vxlan_fw_dst_port_id;
uint16_t geneve_fw_dst_port_id;
uint32_t fw_ver;
- rte_atomic64_t rx_mbuf_alloc_fail;
+ uint32_t hwrm_spec_code;
struct bnxt_led_info leds[BNXT_MAX_LED];
uint8_t num_leds;
struct bnxt_ptp_cfg *ptp_cfg;
+ uint16_t vf_resv_strategy;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
-#define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6
-
bool is_bnxt_supported(struct rte_eth_dev *dev);
+bool bnxt_stratus_device(struct bnxt *bp);
extern const struct rte_flow_ops bnxt_flow_ops;
extern int bnxt_logtype_driver;
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 737bb060..ff20b6fd 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <rte_malloc.h>
@@ -55,6 +27,7 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
+ /* FALLTHROUGH */
bnxt_link_update_op(bp->eth_dev, 1);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
@@ -159,69 +132,31 @@ reject:
return;
}
-/* For the default completion ring only */
-int bnxt_alloc_def_cp_ring(struct bnxt *bp)
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp)
{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
- int rc;
-
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
- 0, HWRM_NA_SIGNATURE,
- HWRM_NA_SIGNATURE);
- if (rc)
- goto err_out;
- cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- if (BNXT_PF(bp))
- rc = bnxt_hwrm_func_cfg_def_cp(bp);
- else
- rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
-
-err_out:
- return rc;
-}
+ bool evt = 0;
-void bnxt_free_def_cp_ring(struct bnxt *bp)
-{
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
-
- if (cpr == NULL)
- return;
+ if (bp == NULL || cmp == NULL) {
+ PMD_DRV_LOG(ERR, "invalid NULL argument\n");
+ return evt;
+ }
- bnxt_free_ring(cpr->cp_ring_struct);
- cpr->cp_ring_struct = NULL;
- rte_free(cpr->cp_ring_struct);
- rte_free(cpr);
- bp->def_cp_ring = NULL;
-}
+ switch (CMP_TYPE(cmp)) {
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ /* Handle any async event */
+ bnxt_handle_async_event(bp, cmp);
+ evt = 1;
+ break;
+ case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ /* Handle HWRM forwarded responses */
+ bnxt_handle_fwd_req(bp, cmp);
+ evt = 1;
+ break;
+ default:
+ /* Ignore any other events */
+ PMD_DRV_LOG(INFO, "Ignoring %02x completion\n", CMP_TYPE(cmp));
+ break;
+ }
-/* For the default completion ring only */
-int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id)
-{
- struct bnxt_cp_ring_info *cpr;
- struct bnxt_ring *ring;
-
- cpr = rte_zmalloc_socket("cpr",
- sizeof(struct bnxt_cp_ring_info),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (cpr == NULL)
- return -ENOMEM;
- bp->def_cp_ring = cpr;
-
- ring = rte_zmalloc_socket("bnxt_cp_ring_struct",
- sizeof(struct bnxt_ring),
- RTE_CACHE_LINE_SIZE, socket_id);
- if (ring == NULL)
- return -ENOMEM;
- cpr->cp_ring_struct = ring;
- ring->bd = (void *)cpr->cp_desc_ring;
- ring->bd_dma = cpr->cp_desc_mapping;
- ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE);
- ring->ring_mask = ring->ring_size - 1;
- ring->vmem_size = 0;
- ring->vmem = NULL;
-
- return 0;
+ return evt;
}
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index ce2b0cb8..c7af5698 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_CPR_H_
@@ -50,12 +22,20 @@
#define ADV_RAW_CMP(idx, n) ((idx) + (n))
#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1)
#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask)
+#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask))
#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1))
#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val))
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
+#define NEXT_CMPL(cpr, idx, v, inc) do { \
+ (idx) += (inc); \
+ if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \
+ (v) = !(v); \
+ (idx) = 0; \
+ } \
+} while (0)
#define B_CP_DB_REARM(cpr, raw_cons) \
rte_write32((DB_CP_REARM_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
@@ -78,6 +58,10 @@
rte_write32((DB_CP_FLAGS | \
RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
((cpr)->cp_doorbell))
+#define B_CP_DB(cpr, raw_cons, ring_mask) \
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMPL((ring_mask), raw_cons)), \
+ ((cpr)->cp_doorbell))
struct bnxt_ring;
struct bnxt_cp_ring_info {
@@ -100,12 +84,9 @@ struct bnxt_cp_ring_info {
#define RX_CMP_L2_ERRORS \
(RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR)
-
struct bnxt;
-int bnxt_alloc_def_cp_ring(struct bnxt *bp);
-void bnxt_free_def_cp_ring(struct bnxt *bp);
-int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id);
void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp);
void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp);
+int bnxt_event_hwrm_resp_handler(struct bnxt *bp, struct cmpl_base *cmp);
#endif
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 21c46f83..cc7e4391 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -54,15 +26,17 @@
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
#include "bnxt_nvm_defs.h"
+#include "bnxt_util.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
- "Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+ "Broadcom NetXtreme driver " DRV_MODULE_NAME "\n";
int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
-#define BROADCOM_DEV_ID_STRATUS_NIC_VF 0x1609
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF1 0x1606
+#define BROADCOM_DEV_ID_STRATUS_NIC_VF2 0x1609
#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
@@ -97,10 +71,16 @@ int bnxt_logtype_driver;
#define BROADCOM_DEV_ID_57407_MF 0x16ea
#define BROADCOM_DEV_ID_57414_MF 0x16ec
#define BROADCOM_DEV_ID_57416_MF 0x16ee
+#define BROADCOM_DEV_ID_58802 0xd802
+#define BROADCOM_DEV_ID_58804 0xd804
+#define BROADCOM_DEV_ID_58808 0x16f0
+#define BROADCOM_DEV_ID_58802_VF 0xd800
static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
- BROADCOM_DEV_ID_STRATUS_NIC_VF) },
+ BROADCOM_DEV_ID_STRATUS_NIC_VF1) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM,
+ BROADCOM_DEV_ID_STRATUS_NIC_VF2) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
@@ -135,6 +115,10 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -146,8 +130,33 @@ static const struct rte_pci_id bnxt_pci_id_map[] = {
ETH_RSS_NONFRAG_IPV6_TCP | \
ETH_RSS_NONFRAG_IPV6_UDP)
+#define BNXT_DEV_TX_OFFLOAD_SUPPORT (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define BNXT_DEV_RX_OFFLOAD_SUPPORT (DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP | \
+ DEV_RX_OFFLOAD_KEEP_CRC | \
+ DEV_RX_OFFLOAD_TCP_LRO)
+
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
+static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu);
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
/***********************/
@@ -164,23 +173,12 @@ static void bnxt_free_mem(struct bnxt *bp)
bnxt_free_stats(bp);
bnxt_free_tx_rings(bp);
bnxt_free_rx_rings(bp);
- bnxt_free_def_cp_ring(bp);
}
static int bnxt_alloc_mem(struct bnxt *bp)
{
int rc;
- /* Default completion ring */
- rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY);
- if (rc)
- goto alloc_mem_err;
-
- rc = bnxt_alloc_rings(bp, 0, NULL, NULL,
- bp->def_cp_ring, "def_cp");
- if (rc)
- goto alloc_mem_err;
-
rc = bnxt_alloc_vnic_mem(bp);
if (rc)
goto alloc_mem_err;
@@ -202,23 +200,26 @@ alloc_mem_err:
static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i;
+ struct bnxt_rx_queue *rxq;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
+ unsigned int i, j;
int rc;
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
if (bp->eth_dev->data->mtu > ETHER_MTU) {
- bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags |= BNXT_FLAG_JUMBO;
} else {
- bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags &= ~BNXT_FLAG_JUMBO;
}
@@ -248,7 +249,19 @@ static int bnxt_init_chip(struct bnxt *bp)
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
@@ -257,12 +270,15 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "HWRM vnic %d ctx alloc failure rc: %x\n",
- i, rc);
- goto err_out;
+ /* Alloc RSS context only if RSS mode is enabled */
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic %d ctx alloc failure rc: %x\n",
+ i, rc);
+ goto err_out;
+ }
}
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
@@ -280,6 +296,13 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ for (j = 0; j < bp->rx_nr_rings; j++) {
+ rxq = bp->eth_dev->data->rx_queues[j];
+
+ if (rxq->rx_deferred_start)
+ rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
+ }
+
rc = bnxt_vnic_rss_configure(bp, vnic);
if (rc) {
PMD_DRV_LOG(ERR,
@@ -289,7 +312,8 @@ static int bnxt_init_chip(struct bnxt *bp)
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (bp->eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_TCP_LRO)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
else
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
@@ -389,10 +413,6 @@ static int bnxt_init_nic(struct bnxt *bp)
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
- rc = bnxt_init_chip(bp);
- if (rc)
- return rc;
-
return 0;
}
@@ -407,8 +427,6 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
uint16_t max_vnics, i, j, vpool, vrxq;
unsigned int max_rx_rings;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-
/* MAC Specifics */
dev_info->max_mac_addrs = bp->max_l2_ctx;
dev_info->max_hash_mac_addrs = 0;
@@ -416,13 +434,11 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* PF/VF specifics */
if (BNXT_PF(bp))
dev_info->max_vfs = bp->pdev->max_vfs;
- max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
- RTE_MIN(bp->max_rsscos_ctx,
- bp->max_stat_ctx)));
+ max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
@@ -430,21 +446,12 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
dev_info->min_rx_bufsize = 1;
dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN
+ VLAN_TAG_SIZE;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO |
- DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
+ if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
/* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
@@ -454,7 +461,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
.wthresh = 0,
},
.rx_free_thresh = 32,
- .rx_drop_en = 0,
+ /* If no descriptors available, pkts are dropped by default */
+ .rx_drop_en = 1,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -465,12 +473,14 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
},
.tx_free_thresh = 32,
.tx_rs_thresh = 32,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
eth_dev->data->dev_conf.intr_conf.lsc = 1;
eth_dev->data->dev_conf.intr_conf.rxq = 1;
+ dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC;
/* *INDENT-ON* */
@@ -510,18 +520,45 @@ found:
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+ int rc;
bp->rx_queues = (void *)eth_dev->data->rx_queues;
bp->tx_queues = (void *)eth_dev->data->tx_queues;
+ bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+ bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+
+ if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
+ rc = bnxt_hwrm_check_vf_rings(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM insufficient resources\n");
+ return -ENOSPC;
+ }
+
+ rc = bnxt_hwrm_func_reserve_vf_resc(bp, false);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+ return -ENOSPC;
+ }
+ } else {
+ /* legacy driver needs to get updated values */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
+ return rc;
+ }
+ }
/* Inherit new configurations */
if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
- eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_cp_rings ||
eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
bp->max_stat_ctx ||
- (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
+ (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps ||
+ (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) &&
+ bp->max_vnics < eth_dev->data->nb_rx_queues)) {
PMD_DRV_LOG(ERR,
"Insufficient resources to support requested config\n");
PMD_DRV_LOG(ERR,
@@ -529,21 +566,22 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->nb_tx_queues,
eth_dev->data->nb_rx_queues);
PMD_DRV_LOG(ERR,
- "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
+ "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n",
bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
- bp->max_stat_ctx, bp->max_ring_grps);
+ bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics);
return -ENOSPC;
}
- bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
- bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
- if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS;
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+ }
return 0;
}
@@ -571,6 +609,7 @@ static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
int vlan_mask = 0;
int rc;
@@ -581,15 +620,15 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
}
bp->dev_stopped = 0;
- rc = bnxt_init_nic(bp);
+ rc = bnxt_init_chip(bp);
if (rc)
goto error;
bnxt_link_update_op(eth_dev, 1);
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= ETH_VLAN_FILTER_MASK;
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
vlan_mask |= ETH_VLAN_STRIP_MASK;
rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask);
if (rc)
@@ -635,13 +674,15 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ bp->flags &= ~BNXT_FLAG_INIT_DONE;
if (bp->eth_dev->data->dev_started) {
/* TBD: STOP HW queues DMA */
eth_dev->data->dev_link.link_status = 0;
}
bnxt_set_hwrm_link_config(bp, false);
bnxt_hwrm_port_clr_stats(bp);
- bp->flags &= ~BNXT_FLAG_INIT_DONE;
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -653,8 +694,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
- bnxt_free_tx_mbufs(bp);
- bnxt_free_rx_mbufs(bp);
bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
@@ -664,6 +703,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
rte_free(bp->grp_info);
bp->grp_info = NULL;
}
+
+ bnxt_dev_uninit(eth_dev);
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
@@ -771,6 +812,11 @@ out:
new.link_speed != eth_dev->data->dev_link.link_speed) {
memcpy(&eth_dev->data->dev_link, &new,
sizeof(struct rte_eth_link));
+
+ _rte_eth_dev_callback_process(eth_dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+
bnxt_print_link_info(eth_dev);
}
@@ -1282,9 +1328,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
struct bnxt_vnic_info *vnic;
unsigned int i;
int rc = 0;
- uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
/* Cycle through all VNICs */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1331,8 +1377,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
memcpy(new_filter->l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
- new_filter->l2_ovlan = vlan_id;
- new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
new_filter->enables |= en;
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
@@ -1366,30 +1412,31 @@ static int
bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
unsigned int i;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ if (!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
/* Remove any VLAN filters programmed */
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
- dev->data->dev_conf.rxmode.hw_vlan_filter);
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER));
}
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
vnic->vlan_strip = true;
else
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
- dev->data->dev_conf.rxmode.hw_vlan_strip);
+ !!(rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP));
}
if (mask & ETH_VLAN_EXTEND_MASK)
@@ -1398,7 +1445,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
return 0;
}
-static void
+static int
bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
@@ -1408,7 +1455,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
int rc;
if (BNXT_VF(bp))
- return;
+ return -EPERM;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
@@ -1418,7 +1465,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
continue;
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- break;
+ return rc;
memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN);
memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
@@ -1427,10 +1474,12 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter);
if (rc)
- break;
+ return rc;
filter->mac_index = 0;
PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
+
+ return 0;
}
static int
@@ -1515,7 +1564,6 @@ bnxt_txq_info_get_op(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = 0;
- qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -1540,9 +1588,11 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (new_mtu > ETHER_MTU) {
bp->flags |= BNXT_FLAG_JUMBO;
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ bp->eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
} else {
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ bp->eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
bp->flags &= ~BNXT_FLAG_JUMBO;
}
@@ -1554,6 +1604,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
@@ -1561,9 +1612,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (rc)
break;
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
}
return rc;
@@ -2358,7 +2414,8 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
}
static struct bnxt_filter_info *
-bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
+bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info **mvnic)
{
struct bnxt_filter_info *mf = NULL;
int i;
@@ -2396,8 +2453,11 @@ bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf)
!memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
sizeof(nf->dst_ipaddr)) &&
!memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask)))
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mvnic)
+ *mvnic = vnic;
return mf;
+ }
}
}
return NULL;
@@ -2411,7 +2471,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg;
struct bnxt_filter_info *filter, *match;
- struct bnxt_vnic_info *vnic;
+ struct bnxt_vnic_info *vnic, *mvnic;
int ret = 0, i;
if (filter_op == RTE_ETH_FILTER_NOP)
@@ -2436,11 +2496,31 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
goto free_filter;
filter->filter_type = HWRM_CFA_NTUPLE_FILTER;
- match = bnxt_match_fdir(bp, filter);
+ if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
+ vnic = STAILQ_FIRST(&bp->ff_pool[0]);
+ else
+ vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
+
+ match = bnxt_match_fdir(bp, filter, &mvnic);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- PMD_DRV_LOG(ERR, "Flow already exists.\n");
- ret = -EEXIST;
- goto free_filter;
+ if (match->dst_id == vnic->fw_vnic_id) {
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
+ ret = -EEXIST;
+ goto free_filter;
+ } else {
+ match->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp,
+ match->dst_id,
+ match);
+ STAILQ_REMOVE(&mvnic->filter, match,
+ bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, match, next);
+ PMD_DRV_LOG(ERR,
+ "Filter with matching pattern exist\n");
+ PMD_DRV_LOG(ERR,
+ "Updated it to new destination q\n");
+ goto free_filter;
+ }
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
PMD_DRV_LOG(ERR, "Flow does not exist.\n");
@@ -2448,12 +2528,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
goto free_filter;
}
- if (fdir->action.behavior == RTE_ETH_FDIR_REJECT)
- vnic = STAILQ_FIRST(&bp->ff_pool[0]);
- else
- vnic =
- STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
-
if (filter_op == RTE_ETH_FILTER_ADD) {
ret = bnxt_hwrm_set_ntuple_filter(bp,
filter->dst_id,
@@ -2489,7 +2563,6 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_UPDATE:
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
- /* FALLTHROUGH */
PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
@@ -2876,6 +2949,7 @@ static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type)
case BNX_DIR_TYPE_KONG_PATCH:
case BNX_DIR_TYPE_BONO_FW:
case BNX_DIR_TYPE_BONO_PATCH:
+ /* FALLTHROUGH */
return true;
}
@@ -2894,6 +2968,7 @@ static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type)
case BNX_DIR_TYPE_ISCSI_BOOT:
case BNX_DIR_TYPE_ISCSI_BOOT_IPV6:
case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6:
+ /* FALLTHROUGH */
return true;
}
@@ -3032,7 +3107,20 @@ static bool bnxt_vf_pciid(uint16_t id)
id == BROADCOM_DEV_ID_5731X_VF ||
id == BROADCOM_DEV_ID_5741X_VF ||
id == BROADCOM_DEV_ID_57414_VF ||
- id == BROADCOM_DEV_ID_STRATUS_NIC_VF)
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 ||
+ id == BROADCOM_DEV_ID_58802_VF)
+ return true;
+ return false;
+}
+
+bool bnxt_stratus_device(struct bnxt *bp)
+{
+ uint16_t id = bp->pdev->id.device_id;
+
+ if (id == BROADCOM_DEV_ID_STRATUS_NIC ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 ||
+ id == BROADCOM_DEV_ID_STRATUS_NIC_VF2)
return true;
return false;
}
@@ -3060,18 +3148,29 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
rc = -ENOMEM;
goto init_err_release;
}
+
+ if (!pci_dev->mem_resource[2].addr) {
+ PMD_DRV_LOG(ERR,
+ "Cannot find PCI device BAR 2 address, aborting\n");
+ rc = -ENODEV;
+ goto init_err_release;
+ } else {
+ bp->doorbell_base = (void *)pci_dev->mem_resource[2].addr;
+ }
+
return 0;
init_err_release:
if (bp->bar0)
bp->bar0 = NULL;
+ if (bp->doorbell_base)
+ bp->doorbell_base = NULL;
init_err_disable:
return rc;
}
-static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
#define ALLOW_FUNC(x) \
{ \
@@ -3098,7 +3197,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
bp = eth_dev->data->dev_private;
- rte_atomic64_init(&bp->rx_mbuf_alloc_fail);
bp->dev_stopped = 1;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -3115,12 +3213,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
}
skip_init:
eth_dev->dev_ops = &bnxt_dev_ops;
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return 0;
eth_dev->rx_pkt_burst = &bnxt_recv_pkts;
eth_dev->tx_pkt_burst = &bnxt_xmit_pkts;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- if (BNXT_PF(bp) && pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
+ if (pci_dev->id.device_id != BROADCOM_DEV_ID_NS2) {
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%s", pci_dev->addr.domain,
pci_dev->addr.bus, pci_dev->addr.devid,
@@ -3131,9 +3229,10 @@ skip_init:
sizeof(struct rx_port_stats) + 512);
if (!mz) {
mz = rte_memzone_reserve(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
@@ -3165,10 +3264,12 @@ skip_init:
total_alloc_len = RTE_CACHE_LINE_ROUNDUP(
sizeof(struct tx_port_stats) + 512);
if (!mz) {
- mz = rte_memzone_reserve(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ mz = rte_memzone_reserve(mz_name,
+ total_alloc_len,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (mz == NULL)
return -ENOMEM;
}
@@ -3236,7 +3337,7 @@ skip_init:
goto error_free;
}
- if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ if (bnxt_check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
PMD_DRV_LOG(ERR,
"Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
@@ -3344,17 +3445,13 @@ skip_init:
if (rc)
goto error_free_int;
- rc = bnxt_alloc_def_cp_ring(bp);
- if (rc)
- goto error_free_int;
-
bnxt_enable_int(bp);
+ bnxt_init_nic(bp);
return 0;
error_free_int:
bnxt_disable_int(bp);
- bnxt_free_def_cp_ring(bp);
bnxt_hwrm_func_buf_unrgtr(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
@@ -3365,13 +3462,15 @@ error:
}
static int
-bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
+bnxt_dev_uninit(struct rte_eth_dev *eth_dev)
+{
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
+ PMD_DRV_LOG(DEBUG, "Calling Device uninit\n");
bnxt_disable_int(bp);
bnxt_free_int(bp);
bnxt_free_mem(bp);
@@ -3385,8 +3484,17 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
}
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bnxt_free_hwrm_resources(bp);
- rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
- rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
+
+ if (bp->tx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
+ bp->tx_mem_zone = NULL;
+ }
+
+ if (bp->rx_mem_zone) {
+ rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
+ bp->rx_mem_zone = NULL;
+ }
+
if (bp->dev_stopped == 0)
bnxt_dev_close_op(eth_dev);
if (bp->pf.vf_info)
@@ -3432,13 +3540,11 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
-RTE_INIT(bnxt_init_log);
-static void
-bnxt_init_log(void)
+RTE_INIT(bnxt_init_log)
{
bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
if (bnxt_logtype_driver >= 0)
- rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_INFO);
}
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 032e8eed..1038941e 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -1,38 +1,11 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <sys/queue.h>
+#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_flow.h>
@@ -96,9 +69,9 @@ void bnxt_init_filters(struct bnxt *bp)
STAILQ_INIT(&bp->free_filter_list);
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
- filter->fw_l2_filter_id = -1;
- filter->fw_em_filter_id = -1;
- filter->fw_ntuple_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_ntuple_filter_id = UINT64_MAX;
STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
}
}
@@ -144,21 +117,42 @@ void bnxt_free_filter_mem(struct bnxt *bp)
max_filters = bp->max_l2_ctx;
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
- if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
+ if (filter->fw_l2_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_L2_FILTER) {
+ PMD_DRV_LOG(ERR, "L2 filter is not free\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
PMD_DRV_LOG(ERR,
- "HWRM filter cannot be freed rc = %d\n",
- rc);
+ "Cannot free L2 filter: %d\n",
+ rc);
}
filter->fw_l2_filter_id = UINT64_MAX;
+
+ if (filter->fw_ntuple_filter_id != ((uint64_t)-1) &&
+ filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ PMD_DRV_LOG(ERR, "NTUPLE filter is not free\n");
+ /* Call HWRM to try to free filter again */
+ rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ if (rc)
+ PMD_DRV_LOG(ERR,
+ "Cannot free NTUPLE filter: %d\n",
+ rc);
+ }
+ filter->fw_ntuple_filter_id = UINT64_MAX;
}
STAILQ_INIT(&bp->free_filter_list);
rte_free(bp->filter_info);
bp->filter_info = NULL;
+
+ for (i = 0; i < bp->pf.max_vfs; i++) {
+ STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
+ rte_free(filter);
+ STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
+ bnxt_filter_info, next);
+ }
+ }
}
int bnxt_alloc_filter_mem(struct bnxt *bp)
@@ -199,1046 +193,3 @@ void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
{
STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
}
-
-static int
-bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- if (!pattern) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_NUM,
- NULL, "NULL pattern.");
- return -rte_errno;
- }
-
- if (!actions) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM,
- NULL, "NULL action.");
- return -rte_errno;
- }
-
- if (!attr) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR,
- NULL, "NULL attribute.");
- return -rte_errno;
- }
-
- return 0;
-}
-
-static const struct rte_flow_item *
-nxt_non_void_pattern(const struct rte_flow_item *cur)
-{
- while (1) {
- if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
- return cur;
- cur++;
- }
-}
-
-static const struct rte_flow_action *
-nxt_non_void_action(const struct rte_flow_action *cur)
-{
- while (1) {
- if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
- return cur;
- cur++;
- }
-}
-
-int check_zero_bytes(const uint8_t *bytes, int len)
-{
- int i;
- for (i = 0; i < len; i++)
- if (bytes[i] != 0x00)
- return 0;
- return 1;
-}
-
-static int
-bnxt_filter_type_check(const struct rte_flow_item pattern[],
- struct rte_flow_error *error __rte_unused)
-{
- const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
- int use_ntuple = 1;
-
- while (item->type != RTE_FLOW_ITEM_TYPE_END) {
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- use_ntuple = 1;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- use_ntuple = 0;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- case RTE_FLOW_ITEM_TYPE_IPV6:
- case RTE_FLOW_ITEM_TYPE_TCP:
- case RTE_FLOW_ITEM_TYPE_UDP:
- /* FALLTHROUGH */
- /* need ntuple match, reset exact match */
- if (!use_ntuple) {
- PMD_DRV_LOG(ERR,
- "VLAN flow cannot use NTUPLE filter\n");
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Cannot use VLAN with NTUPLE");
- return -rte_errno;
- }
- use_ntuple |= 1;
- break;
- default:
- PMD_DRV_LOG(ERR, "Unknown Flow type");
- use_ntuple |= 1;
- }
- item++;
- }
- return use_ntuple;
-}
-
-static int
-bnxt_validate_and_parse_flow_type(struct bnxt *bp,
- const struct rte_flow_item pattern[],
- struct rte_flow_error *error,
- struct bnxt_filter_info *filter)
-{
- const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
- const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
- const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
- const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
- const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
- const struct rte_flow_item_udp *udp_spec, *udp_mask;
- const struct rte_flow_item_eth *eth_spec, *eth_mask;
- const struct rte_flow_item_nvgre *nvgre_spec;
- const struct rte_flow_item_nvgre *nvgre_mask;
- const struct rte_flow_item_vxlan *vxlan_spec;
- const struct rte_flow_item_vxlan *vxlan_mask;
- uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
- uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
- const struct rte_flow_item_vf *vf_spec;
- uint32_t tenant_id_be = 0;
- bool vni_masked = 0;
- bool tni_masked = 0;
- uint32_t vf = 0;
- int use_ntuple;
- uint32_t en = 0;
- int dflt_vnic;
-
- use_ntuple = bnxt_filter_type_check(pattern, error);
- PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
- if (use_ntuple < 0)
- return use_ntuple;
-
- filter->filter_type = use_ntuple ?
- HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
-
- while (item->type != RTE_FLOW_ITEM_TYPE_END) {
- if (item->last) {
- /* last or range is NOT supported as match criteria */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "No support for range");
- return -rte_errno;
- }
- if (!item->spec || !item->mask) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "spec/mask is NULL");
- return -rte_errno;
- }
- switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
-
- /* Source MAC address mask cannot be partially set.
- * Should be All 0's or all 1's.
- * Destination MAC address mask must not be partially
- * set. Should be all 1's or all 0's.
- */
- if ((!is_zero_ether_addr(&eth_mask->src) &&
- !is_broadcast_ether_addr(&eth_mask->src)) ||
- (!is_zero_ether_addr(&eth_mask->dst) &&
- !is_broadcast_ether_addr(&eth_mask->dst))) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "MAC_addr mask not valid");
- return -rte_errno;
- }
-
- /* Mask is not allowed. Only exact matches are */
- if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "ethertype mask not valid");
- return -rte_errno;
- }
-
- if (is_broadcast_ether_addr(&eth_mask->dst)) {
- rte_memcpy(filter->dst_macaddr,
- &eth_spec->dst, 6);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
- EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
- }
- if (is_broadcast_ether_addr(&eth_mask->src)) {
- rte_memcpy(filter->src_macaddr,
- &eth_spec->src, 6);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
- EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
- } /*
- * else {
- * RTE_LOG(ERR, PMD, "Handle this condition\n");
- * }
- */
- if (eth_spec->type) {
- filter->ethertype =
- rte_be_to_cpu_16(eth_spec->type);
- en |= use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
- EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
- if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
- /* Only the VLAN ID can be matched. */
- filter->l2_ovlan =
- rte_be_to_cpu_16(vlan_spec->tci &
- 0xFFF);
- en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
- } else {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VLAN mask is invalid");
- return -rte_errno;
- }
-
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- /* If mask is not involved, we could use EM filters. */
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
- /* Only IP DST and SRC fields are maskable. */
- if (ipv4_mask->hdr.version_ihl ||
- ipv4_mask->hdr.type_of_service ||
- ipv4_mask->hdr.total_length ||
- ipv4_mask->hdr.packet_id ||
- ipv4_mask->hdr.fragment_offset ||
- ipv4_mask->hdr.time_to_live ||
- ipv4_mask->hdr.next_proto_id ||
- ipv4_mask->hdr.hdr_checksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv4 mask.");
- return -rte_errno;
- }
- filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
- filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
- EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
- if (ipv4_mask->hdr.src_addr) {
- filter->src_ipaddr_mask[0] =
- ipv4_mask->hdr.src_addr;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
- }
- if (ipv4_mask->hdr.dst_addr) {
- filter->dst_ipaddr_mask[0] =
- ipv4_mask->hdr.dst_addr;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
- }
- filter->ip_addr_type = use_ntuple ?
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
- if (ipv4_spec->hdr.next_proto_id) {
- filter->ip_protocol =
- ipv4_spec->hdr.next_proto_id;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
-
- /* Only IP DST and SRC fields are maskable. */
- if (ipv6_mask->hdr.vtc_flow ||
- ipv6_mask->hdr.payload_len ||
- ipv6_mask->hdr.proto ||
- ipv6_mask->hdr.hop_limits) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid IPv6 mask.");
- return -rte_errno;
- }
-
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
- EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
- rte_memcpy(filter->src_ipaddr,
- ipv6_spec->hdr.src_addr, 16);
- rte_memcpy(filter->dst_ipaddr,
- ipv6_spec->hdr.dst_addr, 16);
- if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
- rte_memcpy(filter->src_ipaddr_mask,
- ipv6_mask->hdr.src_addr, 16);
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
- }
- if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
- rte_memcpy(filter->dst_ipaddr_mask,
- ipv6_mask->hdr.dst_addr, 16);
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
- }
- filter->ip_addr_type = use_ntuple ?
- NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
- EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
-
- /* Check TCP mask. Only DST & SRC ports are maskable */
- if (tcp_mask->hdr.sent_seq ||
- tcp_mask->hdr.recv_ack ||
- tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags ||
- tcp_mask->hdr.rx_win ||
- tcp_mask->hdr.cksum ||
- tcp_mask->hdr.tcp_urp) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TCP mask");
- return -rte_errno;
- }
- filter->src_port = tcp_spec->hdr.src_port;
- filter->dst_port = tcp_spec->hdr.dst_port;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
- EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
- if (tcp_mask->hdr.dst_port) {
- filter->dst_port_mask = tcp_mask->hdr.dst_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
- }
- if (tcp_mask->hdr.src_port) {
- filter->src_port_mask = tcp_mask->hdr.src_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
-
- if (udp_mask->hdr.dgram_len ||
- udp_mask->hdr.dgram_cksum) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid UDP mask");
- return -rte_errno;
- }
-
- filter->src_port = udp_spec->hdr.src_port;
- filter->dst_port = udp_spec->hdr.dst_port;
- if (use_ntuple)
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
- else
- en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
- EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
-
- if (udp_mask->hdr.dst_port) {
- filter->dst_port_mask = udp_mask->hdr.dst_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
- }
- if (udp_mask->hdr.src_port) {
- filter->src_port_mask = udp_mask->hdr.src_port;
- en |= !use_ntuple ? 0 :
- NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec =
- (const struct rte_flow_item_vxlan *)item->spec;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
- /* Check if VXLAN item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!vxlan_spec && vxlan_mask) ||
- (vxlan_spec && !vxlan_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VXLAN item");
- return -rte_errno;
- }
-
- if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
- vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
- vxlan_spec->flags != 0x8) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VXLAN item");
- return -rte_errno;
- }
-
- /* Check if VNI is masked. */
- if (vxlan_spec && vxlan_mask) {
- vni_masked =
- !!memcmp(vxlan_mask->vni, vni_mask,
- RTE_DIM(vni_mask));
- if (vni_masked) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid VNI mask");
- return -rte_errno;
- }
-
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- vxlan_spec->vni, 3);
- filter->vni =
- rte_be_to_cpu_32(tenant_id_be);
- filter->tunnel_type =
- CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
- /* Check if NVGRE item is used to describe protocol.
- * If yes, both spec and mask should be NULL.
- * If no, both spec and mask shouldn't be NULL.
- */
- if ((!nvgre_spec && nvgre_mask) ||
- (nvgre_spec && !nvgre_mask)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid NVGRE item");
- return -rte_errno;
- }
-
- if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
- nvgre_spec->protocol != 0x6558) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid NVGRE item");
- return -rte_errno;
- }
-
- if (nvgre_spec && nvgre_mask) {
- tni_masked =
- !!memcmp(nvgre_mask->tni, tni_mask,
- RTE_DIM(tni_mask));
- if (tni_masked) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Invalid TNI mask");
- return -rte_errno;
- }
- rte_memcpy(((uint8_t *)&tenant_id_be + 1),
- nvgre_spec->tni, 3);
- filter->vni =
- rte_be_to_cpu_32(tenant_id_be);
- filter->tunnel_type =
- CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
- }
- break;
- case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec = (const struct rte_flow_item_vf *)item->spec;
- vf = vf_spec->id;
- if (!BNXT_PF(bp)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Configuring on a VF!");
- return -rte_errno;
- }
-
- if (vf >= bp->pdev->max_vfs) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Incorrect VF id!");
- return -rte_errno;
- }
-
- filter->mirror_vnic_id =
- dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
- if (dflt_vnic < 0) {
- /* This simply indicates there's no driver
- * loaded. This is not an error.
- */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "Unable to get default VNIC for VF");
- return -rte_errno;
- }
- filter->mirror_vnic_id = dflt_vnic;
- en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
- break;
- default:
- break;
- }
- item++;
- }
- filter->enables = en;
-
- return 0;
-}
-
-/* Parse attributes */
-static int
-bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
-{
- /* Must be input direction */
- if (!attr->ingress) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- attr, "Only support ingress.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->egress) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- attr, "No support for egress.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->priority) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- attr, "No support for priority.");
- return -rte_errno;
- }
-
- /* Not supported */
- if (attr->group) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- attr, "No support for group.");
- return -rte_errno;
- }
-
- return 0;
-}
-
-struct bnxt_filter_info *
-bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
- struct bnxt_vnic_info *vnic)
-{
- struct bnxt_filter_info *filter1, *f0;
- struct bnxt_vnic_info *vnic0;
- int rc;
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- f0 = STAILQ_FIRST(&vnic0->filter);
-
- //This flow has same DST MAC as the port/l2 filter.
- if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
- return f0;
-
- //This flow needs DST MAC which is not same as port/l2
- PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
- filter1 = bnxt_get_unused_filter(bp);
- if (filter1 == NULL)
- return NULL;
- filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
- filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
- L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
- memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
- memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
- rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
- filter1);
- if (rc) {
- bnxt_free_filter(bp, filter1);
- return NULL;
- }
- return filter1;
-}
-
-static int
-bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error,
- struct bnxt_filter_info *filter)
-{
- const struct rte_flow_action *act = nxt_non_void_action(actions);
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- const struct rte_flow_action_queue *act_q;
- const struct rte_flow_action_vf *act_vf;
- struct bnxt_vnic_info *vnic, *vnic0;
- struct bnxt_filter_info *filter1;
- uint32_t vf = 0;
- int dflt_vnic;
- int rc;
-
- if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Cannot create flow on RSS queues");
- rc = -rte_errno;
- goto ret;
- }
-
- rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
- if (rc != 0)
- goto ret;
-
- rc = bnxt_flow_parse_attr(attr, error);
- if (rc != 0)
- goto ret;
- //Since we support ingress attribute only - right now.
- filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
-
- switch (act->type) {
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- /* Allow this flow. Redirect to a VNIC. */
- act_q = (const struct rte_flow_action_queue *)act->conf;
- if (act_q->index >= bp->rx_nr_rings) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid queue ID.");
- rc = -rte_errno;
- goto ret;
- }
- PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
- if (vnic == NULL) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "No matching VNIC for queue ID.");
- rc = -rte_errno;
- goto ret;
- }
- filter->dst_id = vnic->fw_vnic_id;
- filter1 = bnxt_get_l2_filter(bp, filter, vnic);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- PMD_DRV_LOG(DEBUG, "VNIC found\n");
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- filter->flags =
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
- else
- filter->flags =
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
- break;
- case RTE_FLOW_ACTION_TYPE_COUNT:
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
- break;
- case RTE_FLOW_ACTION_TYPE_VF:
- act_vf = (const struct rte_flow_action_vf *)act->conf;
- vf = act_vf->id;
- if (!BNXT_PF(bp)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Configuring on a VF!");
- rc = -rte_errno;
- goto ret;
- }
-
- if (vf >= bp->pdev->max_vfs) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Incorrect VF id!");
- rc = -rte_errno;
- goto ret;
- }
-
- filter->mirror_vnic_id =
- dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
- if (dflt_vnic < 0) {
- /* This simply indicates there's no driver loaded.
- * This is not an error.
- */
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act,
- "Unable to get default VNIC for VF");
- rc = -rte_errno;
- goto ret;
- }
- filter->mirror_vnic_id = dflt_vnic;
- filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
-
- vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
- filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
- if (filter1 == NULL) {
- rc = -ENOSPC;
- goto ret;
- }
- filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- break;
-
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, act,
- "Invalid action.");
- rc = -rte_errno;
- goto ret;
- }
-
- if (filter1) {
- bnxt_free_filter(bp, filter1);
- filter1->fw_l2_filter_id = -1;
- }
-
- act = nxt_non_void_action(++act);
- if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- act, "Invalid action.");
- rc = -rte_errno;
- goto ret;
- }
-ret:
- return rc;
-}
-
-static int
-bnxt_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter;
- int ret = 0;
-
- ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
- if (ret != 0)
- return ret;
-
- filter = bnxt_get_unused_filter(bp);
- if (filter == NULL) {
- PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
- return -ENOMEM;
- }
-
- ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
- error, filter);
- /* No need to hold on to this filter if we are just validating flow */
- filter->fw_l2_filter_id = -1;
- bnxt_free_filter(bp, filter);
-
- return ret;
-}
-
-static int
-bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
-{
- struct bnxt_filter_info *mf;
- struct rte_flow *flow;
- int i;
-
- for (i = bp->nr_vnics - 1; i >= 0; i--) {
- struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
-
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
- mf = flow->filter;
-
- if (mf->filter_type == nf->filter_type &&
- mf->flags == nf->flags &&
- mf->src_port == nf->src_port &&
- mf->src_port_mask == nf->src_port_mask &&
- mf->dst_port == nf->dst_port &&
- mf->dst_port_mask == nf->dst_port_mask &&
- mf->ip_protocol == nf->ip_protocol &&
- mf->ip_addr_type == nf->ip_addr_type &&
- mf->ethertype == nf->ethertype &&
- mf->vni == nf->vni &&
- mf->tunnel_type == nf->tunnel_type &&
- mf->l2_ovlan == nf->l2_ovlan &&
- mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
- mf->l2_ivlan == nf->l2_ivlan &&
- mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
- !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
- !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->src_macaddr, nf->src_macaddr,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->dst_macaddr, nf->dst_macaddr,
- ETHER_ADDR_LEN) &&
- !memcmp(mf->src_ipaddr, nf->src_ipaddr,
- sizeof(nf->src_ipaddr)) &&
- !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
- sizeof(nf->src_ipaddr_mask)) &&
- !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
- sizeof(nf->dst_ipaddr)) &&
- !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask))) {
- if (mf->dst_id == nf->dst_id)
- return -EEXIST;
- /* Same Flow, Different queue
- * Clear the old ntuple filter
- */
- if (nf->filter_type == HWRM_CFA_EM_FILTER)
- bnxt_hwrm_clear_em_filter(bp, mf);
- if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
- bnxt_hwrm_clear_ntuple_filter(bp, mf);
- /* Free the old filter, update flow
- * with new filter
- */
- bnxt_free_filter(bp, mf);
- flow->filter = nf;
- return -EXDEV;
- }
- }
- }
- return 0;
-}
-
-static struct rte_flow *
-bnxt_flow_create(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter;
- struct bnxt_vnic_info *vnic = NULL;
- bool update_flow = false;
- struct rte_flow *flow;
- unsigned int i;
- int ret = 0;
-
- flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
- if (!flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to allocate memory");
- return flow;
- }
-
- ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Not a validate flow.\n");
- goto free_flow;
- }
-
- filter = bnxt_get_unused_filter(bp);
- if (filter == NULL) {
- PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
- goto free_flow;
- }
-
- ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
- error, filter);
- if (ret != 0)
- goto free_filter;
-
- ret = bnxt_match_filter(bp, filter);
- if (ret == -EEXIST) {
- PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
- /* Clear the filter that was created as part of
- * validate_and_parse_flow() above
- */
- bnxt_hwrm_clear_l2_filter(bp, filter);
- goto free_filter;
- } else if (ret == -EXDEV) {
- PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
- PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
- update_flow = true;
- }
-
- if (filter->filter_type == HWRM_CFA_EM_FILTER) {
- filter->enables |=
- HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
- ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
- }
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
- filter->enables |=
- HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
- ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
- }
-
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- if (filter->dst_id == vnic->fw_vnic_id)
- break;
- }
-
- if (!ret) {
- flow->filter = filter;
- flow->vnic = vnic;
- if (update_flow) {
- ret = -EXDEV;
- goto free_flow;
- }
- PMD_DRV_LOG(ERR, "Successfully created flow.\n");
- STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
- return flow;
- }
-free_filter:
- bnxt_free_filter(bp, filter);
-free_flow:
- if (ret == -EEXIST)
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Matching Flow exists.");
- else if (ret == -EXDEV)
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Flow with pattern exists, updating destination queue");
- else
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to create flow.");
- rte_free(flow);
- flow = NULL;
- return flow;
-}
-
-static int
-bnxt_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_filter_info *filter = flow->filter;
- struct bnxt_vnic_info *vnic = flow->vnic;
- int ret = 0;
-
- ret = bnxt_match_filter(bp, filter);
- if (ret == 0)
- PMD_DRV_LOG(ERR, "Could not find matching flow\n");
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- ret = bnxt_hwrm_clear_em_filter(bp, filter);
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
- ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-
- bnxt_hwrm_clear_l2_filter(bp, filter);
- if (!ret) {
- STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
- rte_free(flow);
- } else {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to destroy flow.");
- }
-
- return ret;
-}
-
-static int
-bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
-{
- struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- struct bnxt_vnic_info *vnic;
- struct rte_flow *flow;
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < bp->nr_vnics; i++) {
- vnic = &bp->vnic_info[i];
- STAILQ_FOREACH(flow, &vnic->flow_list, next) {
- struct bnxt_filter_info *filter = flow->filter;
-
- if (filter->filter_type == HWRM_CFA_EM_FILTER)
- ret = bnxt_hwrm_clear_em_filter(bp, filter);
- if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
- ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
-
- if (ret) {
- rte_flow_error_set(error, -ret,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL,
- "Failed to flush flow in HW.");
- return -rte_errno;
- }
-
- STAILQ_REMOVE(&vnic->flow_list, flow,
- rte_flow, next);
- rte_free(flow);
- }
- }
-
- return ret;
-}
-
-const struct rte_flow_ops bnxt_flow_ops = {
- .validate = bnxt_flow_validate,
- .create = bnxt_flow_create,
- .destroy = bnxt_flow_destroy,
- .flush = bnxt_flow_flush,
-};
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index a3c702df..a1ecfb19 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_FILTER_H_
@@ -97,7 +69,6 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
-int check_zero_bytes(const uint8_t *bytes, int len);
#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c
new file mode 100644
index 00000000..ac765674
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_flow.c
@@ -0,0 +1,1171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "bnxt.h"
+#include "bnxt_filter.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_vnic.h"
+#include "bnxt_util.h"
+#include "hsi_struct_def_dpdk.h"
+
+static int
+bnxt_flow_args_validate(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL,
+ "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL,
+ "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "NULL attribute.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static const struct rte_flow_item *
+bnxt_flow_non_void_item(const struct rte_flow_item *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static const struct rte_flow_action *
+bnxt_flow_non_void_action(const struct rte_flow_action *cur)
+{
+ while (1) {
+ if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
+ return cur;
+ cur++;
+ }
+}
+
+static int
+bnxt_filter_type_check(const struct rte_flow_item pattern[],
+ struct rte_flow_error *error __rte_unused)
+{
+ const struct rte_flow_item *item =
+ bnxt_flow_non_void_item(pattern);
+ int use_ntuple = 1;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ use_ntuple = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ use_ntuple = 0;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* FALLTHROUGH */
+ /* need ntuple match, reset exact match */
+ if (!use_ntuple) {
+ PMD_DRV_LOG(ERR,
+ "VLAN flow cannot use NTUPLE filter\n");
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Cannot use VLAN with NTUPLE");
+ return -rte_errno;
+ }
+ use_ntuple |= 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown Flow type\n");
+ use_ntuple |= 1;
+ }
+ item++;
+ }
+ return use_ntuple;
+}
+
+static int
+bnxt_validate_and_parse_flow_type(struct bnxt *bp,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+ uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t tenant_id_be = 0;
+ bool vni_masked = 0;
+ bool tni_masked = 0;
+ uint32_t vf = 0;
+ int use_ntuple;
+ uint32_t en = 0;
+ uint32_t en_ethertype;
+ int dflt_vnic;
+
+ use_ntuple = bnxt_filter_type_check(pattern, error);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
+ if (use_ntuple < 0)
+ return use_ntuple;
+
+ filter->filter_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
+ en_ethertype = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
+ EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (item->last) {
+ /* last or range is NOT supported as match criteria */
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "No support for range");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "spec/mask is NULL");
+ return -rte_errno;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ /* Source MAC address mask cannot be partially set.
+ * Should be All 0's or all 1's.
+ * Destination MAC address mask must not be partially
+ * set. Should be all 1's or all 0's.
+ */
+ if ((!is_zero_ether_addr(&eth_mask->src) &&
+ !is_broadcast_ether_addr(&eth_mask->src)) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MAC_addr mask not valid");
+ return -rte_errno;
+ }
+
+ /* Mask is not allowed. Only exact matches are */
+ if (eth_mask->type &&
+ eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ethertype mask not valid");
+ return -rte_errno;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ rte_memcpy(filter->dst_macaddr,
+ &eth_spec->dst, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
+ }
+
+ if (is_broadcast_ether_addr(&eth_mask->src)) {
+ rte_memcpy(filter->src_macaddr,
+ &eth_spec->src, 6);
+ en |= use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
+ EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
+ } /*
+ * else {
+ * PMD_DRV_LOG(ERR, "Handle this condition\n");
+ * }
+ */
+ if (eth_mask->type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(eth_spec->type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (en & en_ethertype) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN TPID matching is not"
+ " supported");
+ return -rte_errno;
+ }
+ if (vlan_mask->tci &&
+ vlan_mask->tci == RTE_BE16(0x0fff)) {
+ /* Only the VLAN ID can be matched. */
+ filter->l2_ovlan =
+ rte_be_to_cpu_16(vlan_spec->tci &
+ RTE_BE16(0x0fff));
+ en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
+ } else {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN mask is invalid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type &&
+ vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "inner ethertype mask not"
+ " valid");
+ return -rte_errno;
+ }
+ if (vlan_mask->inner_type) {
+ filter->ethertype =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+ en |= en_ethertype;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ /* If mask is not involved, we could use EM filters. */
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
+ filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ if (ipv4_mask->hdr.src_addr) {
+ filter->src_ipaddr_mask[0] =
+ ipv4_mask->hdr.src_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (ipv4_mask->hdr.dst_addr) {
+ filter->dst_ipaddr_mask[0] =
+ ipv4_mask->hdr.dst_addr;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
+
+ if (ipv4_spec->hdr.next_proto_id) {
+ filter->ip_protocol =
+ ipv4_spec->hdr.next_proto_id;
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+
+ /* Only IP DST and SRC fields are maskable. */
+ if (ipv6_mask->hdr.vtc_flow ||
+ ipv6_mask->hdr.payload_len ||
+ ipv6_mask->hdr.proto ||
+ ipv6_mask->hdr.hop_limits) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask.");
+ return -rte_errno;
+ }
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
+ EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
+
+ rte_memcpy(filter->src_ipaddr,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->dst_ipaddr,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
+ 16)) {
+ rte_memcpy(filter->src_ipaddr_mask,
+ ipv6_mask->hdr.src_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
+ }
+
+ if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
+ 16)) {
+ rte_memcpy(filter->dst_ipaddr_mask,
+ ipv6_mask->hdr.dst_addr, 16);
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
+ }
+
+ filter->ip_addr_type = use_ntuple ?
+ NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
+ EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ /* Check TCP mask. Only DST & SRC ports are maskable */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (tcp_mask->hdr.dst_port) {
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (tcp_mask->hdr.src_port) {
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ filter->src_port = udp_spec->hdr.src_port;
+ filter->dst_port = udp_spec->hdr.dst_port;
+
+ if (use_ntuple)
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
+ else
+ en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
+ EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
+
+ if (udp_mask->hdr.dst_port) {
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
+ }
+
+ if (udp_mask->hdr.src_port) {
+ filter->src_port_mask = udp_mask->hdr.src_port;
+ en |= !use_ntuple ? 0 :
+ NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
+ vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
+ vxlan_spec->flags != 0x8) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_spec && vxlan_mask) {
+ vni_masked =
+ !!memcmp(vxlan_mask->vni, vni_mask,
+ RTE_DIM(vni_mask));
+ if (vni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
+ /* Check if NVGRE item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, both spec and mask shouldn't be NULL.
+ */
+ if ((!nvgre_spec && nvgre_mask) ||
+ (nvgre_spec && !nvgre_mask)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
+ nvgre_spec->protocol != 0x6558) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+
+ if (nvgre_spec && nvgre_mask) {
+ tni_masked =
+ !!memcmp(nvgre_mask->tni, tni_mask,
+ RTE_DIM(tni_mask));
+ if (tni_masked) {
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TNI mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ nvgre_spec->tni, 3);
+ filter->vni =
+ rte_be_to_cpu_32(tenant_id_be);
+ filter->tunnel_type =
+ CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = item->spec;
+ vf = vf_spec->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Configuring on a VF!");
+ return -rte_errno;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Incorrect VF id!");
+ return -rte_errno;
+ }
+
+ if (!attr->transfer) {
+ rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic without"
+ " affecting it (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver
+ * loaded. This is not an error.
+ */
+ rte_flow_error_set
+ (error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unable to get default VNIC for VF");
+ return -rte_errno;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+ break;
+ default:
+ break;
+ }
+ item++;
+ }
+ filter->enables = en;
+
+ return 0;
+}
+
+/* Parse attributes */
+static int
+bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr,
+ "No support for egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr,
+ "No support for priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "No support for group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+struct bnxt_filter_info *
+bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
+ struct bnxt_vnic_info *vnic)
+{
+ struct bnxt_filter_info *filter1, *f0;
+ struct bnxt_vnic_info *vnic0;
+ int rc;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ f0 = STAILQ_FIRST(&vnic0->filter);
+
+ /* This flow has same DST MAC as the port/l2 filter. */
+ if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
+ return f0;
+
+ /* This flow needs DST MAC which is not same as port/l2 */
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
+ filter1 = bnxt_get_unused_filter(bp);
+ if (filter1 == NULL)
+ return NULL;
+
+ filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
+ filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
+ L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
+ memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
+ memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
+ rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
+ filter1);
+ if (rc) {
+ bnxt_free_filter(bp, filter1);
+ return NULL;
+ }
+ return filter1;
+}
+
+static int
+bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct bnxt_filter_info *filter)
+{
+ const struct rte_flow_action *act =
+ bnxt_flow_non_void_action(actions);
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_filter_info *filter1;
+ uint32_t vf = 0;
+ int dflt_vnic;
+ int rc;
+
+ if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Cannot create flow on RSS queues");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ rc =
+ bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
+ if (rc != 0)
+ goto ret;
+
+ rc = bnxt_flow_parse_attr(attr, error);
+ if (rc != 0)
+ goto ret;
+
+ /* Since we support ingress attribute only - right now. */
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
+
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Allow this flow. Redirect to a VNIC. */
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ if (act_q->index >= bp->rx_nr_rings) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
+ if (vnic == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "No matching VNIC for queue ID.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->dst_id = vnic->fw_vnic_id;
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ filter->flags =
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
+ else
+ filter->flags =
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VF:
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ vf = act_vf->id;
+
+ if (!BNXT_PF(bp)) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Configuring on a VF!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (vf >= bp->pdev->max_vfs) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Incorrect VF id!");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id =
+ dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
+ if (dflt_vnic < 0) {
+ /* This simply indicates there's no driver loaded.
+ * This is not an error.
+ */
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Unable to get default VNIC for VF");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ filter->mirror_vnic_id = dflt_vnic;
+ filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
+
+ vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
+ filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
+ if (filter1 == NULL) {
+ rc = -ENOSPC;
+ goto ret;
+ }
+
+ filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
+ break;
+
+ default:
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+
+ if (filter1) {
+ bnxt_free_filter(bp, filter1);
+ filter1->fw_l2_filter_id = -1;
+ }
+
+ act = bnxt_flow_non_void_action(++act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "Invalid action.");
+ rc = -rte_errno;
+ goto ret;
+ }
+ret:
+ return rc;
+}
+
+static int
+bnxt_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ int ret = 0;
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0)
+ return ret;
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ return -ENOMEM;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ /* No need to hold on to this filter if we are just validating flow */
+ filter->fw_l2_filter_id = UINT64_MAX;
+ bnxt_free_filter(bp, filter);
+
+ return ret;
+}
+
+static int
+bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
+{
+ struct bnxt_filter_info *mf;
+ struct rte_flow *flow;
+ int i;
+
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ mf = flow->filter;
+
+ if (mf->filter_type == nf->filter_type &&
+ mf->flags == nf->flags &&
+ mf->src_port == nf->src_port &&
+ mf->src_port_mask == nf->src_port_mask &&
+ mf->dst_port == nf->dst_port &&
+ mf->dst_port_mask == nf->dst_port_mask &&
+ mf->ip_protocol == nf->ip_protocol &&
+ mf->ip_addr_type == nf->ip_addr_type &&
+ mf->ethertype == nf->ethertype &&
+ mf->vni == nf->vni &&
+ mf->tunnel_type == nf->tunnel_type &&
+ mf->l2_ovlan == nf->l2_ovlan &&
+ mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
+ mf->l2_ivlan == nf->l2_ivlan &&
+ mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
+ !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
+ !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_macaddr, nf->src_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->dst_macaddr, nf->dst_macaddr,
+ ETHER_ADDR_LEN) &&
+ !memcmp(mf->src_ipaddr, nf->src_ipaddr,
+ sizeof(nf->src_ipaddr)) &&
+ !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
+ sizeof(nf->src_ipaddr_mask)) &&
+ !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
+ sizeof(nf->dst_ipaddr)) &&
+ !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mf->dst_id == nf->dst_id)
+ return -EEXIST;
+ /*
+ * Same Flow, Different queue
+ * Clear the old ntuple filter
+ * Reuse the matching L2 filter
+ * ID for the new filter
+ */
+ nf->fw_l2_filter_id = mf->fw_l2_filter_id;
+ if (nf->filter_type == HWRM_CFA_EM_FILTER)
+ bnxt_hwrm_clear_em_filter(bp, mf);
+ if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ bnxt_hwrm_clear_ntuple_filter(bp, mf);
+ /* Free the old filter, update flow
+ * with new filter
+ */
+ bnxt_free_filter(bp, mf);
+ flow->filter = nf;
+ return -EXDEV;
+ }
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bnxt_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter;
+ struct bnxt_vnic_info *vnic = NULL;
+ bool update_flow = false;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = bnxt_flow_args_validate(attr, pattern, actions, error);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
+ goto free_flow;
+ }
+
+ filter = bnxt_get_unused_filter(bp);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
+ goto free_flow;
+ }
+
+ ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
+ error, filter);
+ if (ret != 0)
+ goto free_filter;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == -EEXIST) {
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ /* Clear the filter that was created as part of
+ * validate_and_parse_flow() above
+ */
+ bnxt_hwrm_clear_l2_filter(bp, filter);
+ goto free_filter;
+ } else if (ret == -EXDEV) {
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ update_flow = true;
+ }
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER) {
+ filter->enables |=
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
+ }
+
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
+ filter->enables |=
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
+ }
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ if (filter->dst_id == vnic->fw_vnic_id)
+ break;
+ }
+
+ if (!ret) {
+ flow->filter = filter;
+ flow->vnic = vnic;
+ if (update_flow) {
+ ret = -EXDEV;
+ goto free_flow;
+ }
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
+ STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
+ return flow;
+ }
+free_filter:
+ bnxt_free_filter(bp, filter);
+free_flow:
+ if (ret == -EEXIST)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Matching Flow exists.");
+ else if (ret == -EXDEV)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow with pattern exists, updating destination queue");
+ else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ flow = NULL;
+ return flow;
+}
+
+static int
+bnxt_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_filter_info *filter = flow->filter;
+ struct bnxt_vnic_info *vnic = flow->vnic;
+ int ret = 0;
+
+ ret = bnxt_match_filter(bp, filter);
+ if (ret == 0)
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ else
+ ret = bnxt_hwrm_clear_l2_filter(bp, filter);
+ if (!ret) {
+ STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
+ rte_free(flow);
+ } else {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+ }
+
+ return ret;
+}
+
+static int
+bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_vnic_info *vnic;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < bp->nr_vnics; i++) {
+ vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(flow, &vnic->flow_list, next) {
+ struct bnxt_filter_info *filter = flow->filter;
+
+ if (filter->filter_type == HWRM_CFA_EM_FILTER)
+ ret = bnxt_hwrm_clear_em_filter(bp, filter);
+ if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+
+ if (ret) {
+ rte_flow_error_set
+ (error,
+ -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Failed to flush flow in HW.");
+ return -rte_errno;
+ }
+
+ STAILQ_REMOVE(&vnic->flow_list, flow,
+ rte_flow, next);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+const struct rte_flow_ops bnxt_flow_ops = {
+ .validate = bnxt_flow_validate,
+ .create = bnxt_flow_create,
+ .destroy = bnxt_flow_destroy,
+ .flush = bnxt_flow_flush,
+};
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index b7843afe..c682488a 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <unistd.h>
@@ -55,6 +27,8 @@
#include <rte_io.h>
#define HWRM_CMD_TIMEOUT 10000
+#define HWRM_SPEC_CODE_1_8_3 0x10803
+#define HWRM_VERSION_1_9_1 0x10901
struct bnxt_plcmodes_cfg {
uint32_t flags;
@@ -115,7 +89,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
short_input.req_type = rte_cpu_to_le_16(req->req_type);
short_input.signature = rte_cpu_to_le_16(
- HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
short_input.size = rte_cpu_to_le_16(msg_len);
short_input.req_addr =
rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
@@ -192,10 +166,26 @@ err_ret:
req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
} while (0)
+#define HWRM_CHECK_RESULT_SILENT() do {\
+ if (rc) { \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+ if (resp->error_code) { \
+ rc = rte_le_to_cpu_16(resp->error_code); \
+ rte_spinlock_unlock(&bp->hwrm_lock); \
+ return rc; \
+ } \
+} while (0)
+
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
if (resp->error_code) { \
@@ -214,6 +204,10 @@ err_ret:
PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
} while (0)
@@ -248,6 +242,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t mask = 0;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ return rc;
+
HWRM_PREP(req, CFA_L2_SET_RX_MASK);
req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
@@ -344,7 +341,7 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_l2_filter_id = -1;
+ filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
@@ -399,13 +396,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
req.l2_ovlan = filter->l2_ovlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
- req.l2_ovlan = filter->l2_ivlan;
+ req.l2_ivlan = filter->l2_ivlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
- req.l2_ovlan_mask = filter->l2_ivlan_mask;
+ req.l2_ivlan_mask = filter->l2_ivlan_mask;
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -436,16 +433,18 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
HWRM_PREP(req, PORT_MAC_CFG);
if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ flags |=
+ HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
req.flags = rte_cpu_to_le_32(flags);
- req.enables =
- rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.enables = rte_cpu_to_le_32
+ (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -473,7 +472,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
HWRM_CHECK_RESULT();
- if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+ if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
return 0;
ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
@@ -505,7 +504,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
return 0;
}
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
@@ -527,6 +526,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (BNXT_PF(bp)) {
bp->pf.port_id = resp->port_id;
bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
+ bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
new_max_vfs = bp->pdev->max_vfs;
if (new_max_vfs != bp->pf.max_vfs) {
if (bp->pf.vf_info)
@@ -595,6 +595,20 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return rc;
}
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+ int rc;
+
+ rc = __bnxt_hwrm_func_qcaps(bp);
+ if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+ rc = bnxt_hwrm_func_resc_qcaps(bp);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+
+ return rc;
+}
+
int bnxt_hwrm_func_reset(struct bnxt *bp)
{
int rc = 0;
@@ -631,10 +645,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
if (BNXT_PF(bp)) {
req.enables |= rte_cpu_to_le_32(
- HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
RTE_MIN(sizeof(req.vf_req_fwd),
sizeof(bp->pf.vf_req_fwd)));
+
+ /*
+ * PF can sniff HWRM API issued by VF. This can be set up by
+ * linux driver and inherited by the DPDK PF driver. Clear
+ * this HWRM sniffer list in FW because DPDK PF driver does
+ * not support this.
+ */
+ req.flags =
+ rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
}
req.async_event_fwd[0] |=
@@ -655,6 +678,105 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
return rc;
}
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
+{
+ if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
+ return 0;
+
+ return bnxt_hwrm_func_reserve_vf_resc(bp, true);
+}
+
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
+{
+ int rc;
+ uint32_t flags = 0;
+ uint32_t enables;
+ struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_vf_cfg_input req = {0};
+
+ HWRM_PREP(req, FUNC_VF_CFG);
+
+ req.enables = rte_cpu_to_le_32
+ (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
+
+ req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
+ req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
+ AGG_RING_MULTIPLIER);
+ req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+ req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
+ bp->tx_nr_rings);
+ req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+ req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
+ if (bp->vf_resv_strategy ==
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
+ enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.enables |= rte_cpu_to_le_32(enables);
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
+ req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
+ req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+ }
+
+ if (test)
+ flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
+ HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
+
+ req.flags = rte_cpu_to_le_32(flags);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ if (test)
+ HWRM_CHECK_RESULT_SILENT();
+ else
+ HWRM_CHECK_RESULT();
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+ int rc;
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+
+ HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (BNXT_VF(bp)) {
+ bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+ bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+ bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+ bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+ bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+ bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+ }
+ bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
+ if (bp->vf_resv_strategy >
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
+ bp->vf_resv_strategy =
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
+
+ HWRM_UNLOCK();
+ return rc;
+}
+
int bnxt_hwrm_ver_get(struct bnxt *bp)
{
int rc = 0;
@@ -678,11 +800,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_CHECK_RESULT();
PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
- resp->hwrm_intf_maj, resp->hwrm_intf_min,
- resp->hwrm_intf_upd,
- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
- bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
- (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
+ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
+ resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
+ resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
+ bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
+ (resp->hwrm_fw_min_8b << 16) |
+ (resp->hwrm_fw_bld_8b << 8) |
+ resp->hwrm_fw_rsvd_8b;
PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
@@ -690,11 +814,12 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
my_version |= HWRM_VERSION_MINOR << 8;
my_version |= HWRM_VERSION_UPDATE;
- fw_version = resp->hwrm_intf_maj << 16;
- fw_version |= resp->hwrm_intf_min << 8;
- fw_version |= resp->hwrm_intf_upd;
+ fw_version = resp->hwrm_intf_maj_8b << 16;
+ fw_version |= resp->hwrm_intf_min_8b << 8;
+ fw_version |= resp->hwrm_intf_upd_8b;
+ bp->hwrm_spec_code = fw_version;
- if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
+ if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
@@ -750,7 +875,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
if ((dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
- HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
+ HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
PMD_DRV_LOG(DEBUG, "Short command supported\n");
rte_free(bp->hwrm_short_cmd_req_addr);
@@ -919,9 +1044,15 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
int rc = 0;
struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int i;
HWRM_PREP(req, QUEUE_QPORTCFG);
+ req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
+ /* HWRM Version >= 1.9.1 */
+ if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
+ req.drv_qmap_cap =
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT();
@@ -941,6 +1072,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
HWRM_UNLOCK();
+ if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
+ bp->tx_cosq_id = bp->cos_queue[0].id;
+ } else {
+ /* iterate and find the COSq profile to use for Tx */
+ for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
+ if (bp->cos_queue[i].profile ==
+ HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
+ bp->tx_cosq_id = bp->cos_queue[i].id;
+ break;
+ }
+ }
+ }
+ PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
+
return rc;
}
@@ -964,7 +1109,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
- req.queue_id = bp->cos_queue[0].id;
+ req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
/* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
@@ -1182,8 +1327,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* map ring groups to this vnic */
PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1193,7 +1339,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_PREP(req, VNIC_ALLOC);
if (vnic->func_default)
- req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
+ req.flags =
+ rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT();
@@ -1214,7 +1361,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_QCFG);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1242,7 +1389,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_CFG);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
req.flags = rte_cpu_to_le_32(pmode->flags);
req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
@@ -1451,6 +1598,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_RSS_CFG);
req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
+ req.hash_mode_flags = vnic->hash_mode;
req.ring_grp_tbl_addr =
rte_cpu_to_le_64(vnic->rss_table_dma_addr);
@@ -1474,6 +1622,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t size;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.flags = rte_cpu_to_le_32(
@@ -1486,7 +1639,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
size -= RTE_PKTMBUF_HEADROOM;
req.jumbo_thresh = rte_cpu_to_le_16(size);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1517,12 +1670,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
- req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
req.max_agg_segs = rte_cpu_to_le_16(5);
req.max_aggs =
rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
req.min_agg_len = rte_cpu_to_le_32(512);
}
+ req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1607,10 +1760,9 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
- stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
- stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
-
- stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
+ stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
+ stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
HWRM_UNLOCK();
@@ -1732,8 +1884,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
return rc;
}
-static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
- unsigned int idx __rte_unused)
+static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
{
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
@@ -1745,17 +1896,52 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->cp_raw_cons = 0;
}
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->rx_desc_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_desc_ring));
+ memset(rxr->rx_buf_ring, 0,
+ rxr->rx_ring_struct->ring_size *
+ sizeof(*rxr->rx_buf_ring));
+ rxr->rx_prod = 0;
+ }
+ ring = rxr->ag_ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ memset(rxr->ag_buf_ring, 0,
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
+ rxr->ag_prod = 0;
+ bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
+ }
+ if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
+ bnxt_free_cp_ring(bp, cpr);
+
+ bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
+}
+
int bnxt_free_all_hwrm_rings(struct bnxt *bp)
{
unsigned int i;
- int rc = 0;
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
struct bnxt_tx_queue *txq = bp->tx_queues[i];
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
- unsigned int idx = bp->rx_cp_nr_rings + i + 1;
if (ring->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_hwrm_ring_free(bp, ring,
@@ -1771,60 +1957,15 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
txr->tx_cons = 0;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, idx);
- cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
-
- for (i = 0; i < bp->rx_cp_nr_rings; i++) {
- struct bnxt_rx_queue *rxq = bp->rx_queues[i];
- struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
- struct bnxt_ring *ring = rxr->rx_ring_struct;
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
- unsigned int idx = i + 1;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
- memset(rxr->rx_desc_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_desc_ring));
- memset(rxr->rx_buf_ring, 0,
- rxr->rx_ring_struct->ring_size *
- sizeof(*rxr->rx_buf_ring));
- rxr->rx_prod = 0;
- }
- ring = rxr->ag_ring_struct;
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_hwrm_ring_free(bp, ring,
- HWRM_RING_FREE_INPUT_RING_TYPE_RX);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- memset(rxr->ag_buf_ring, 0,
- rxr->ag_ring_struct->ring_size *
- sizeof(*rxr->ag_buf_ring));
- rxr->ag_prod = 0;
- bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
- }
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, idx);
- bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
+ bnxt_free_cp_ring(bp, cpr);
cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
}
}
- /* Default completion ring */
- {
- struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ for (i = 0; i < bp->rx_cp_nr_rings; i++)
+ bnxt_free_hwrm_rx_ring(bp, i);
- if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
- bnxt_free_cp_ring(bp, cpr, 0);
- cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
- }
- }
-
- return rc;
+ return 0;
}
int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
@@ -1887,6 +2028,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
//if (rc)
//break;
}
@@ -1974,6 +2116,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
@@ -1992,6 +2136,7 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
switch (conf_link_speed) {
case ETH_LINK_SPEED_10M_HD:
case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
}
return hw_link_duplex;
@@ -2012,6 +2157,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_100M:
case ETH_LINK_SPEED_100M_HD:
+ /* FALLTHROUGH */
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
@@ -2176,6 +2322,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
switch (hw_link_duplex) {
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
+ /* FALLTHROUGH */
eth_link_duplex = ETH_LINK_FULL_DUPLEX;
break;
case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
@@ -2305,6 +2452,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
+ /* FALLTHROUGH */
bp->port_partition_type = resp->port_partition_type;
break;
default:
@@ -2362,7 +2510,8 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
@@ -2399,9 +2548,11 @@ static void populate_vf_func_cfg_req(struct bnxt *bp,
HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
- ETHER_CRC_LEN + VLAN_TAG_SIZE);
+ ETHER_CRC_LEN + VLAN_TAG_SIZE *
+ BNXT_NUM_VLANS);
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
@@ -2766,9 +2917,9 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_size = rte_cpu_to_le_16(
page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
- req.req_buf_page_addr[0] =
+ req.req_buf_page_addr0 =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
- if (req.req_buf_page_addr[0] == 0) {
+ if (req.req_buf_page_addr0 == 0) {
PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
@@ -2915,6 +3066,18 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
return rc;
}
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
+{
+ int rc;
+
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_func_cfg_def_cp(bp);
+ else
+ rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
+
+ return rc;
+}
+
int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
void *encaped, size_t ec_size)
{
@@ -3029,9 +3192,6 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- return 0;
-
HWRM_PREP(req, PORT_QSTATS);
req.port_id = rte_cpu_to_le_16(pf->port_id);
@@ -3052,7 +3212,9 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
struct bnxt_pf_info *pf = &bp->pf;
int rc;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS))
+ /* Not allowed on NS2 device, NPAR, MultiHost, VF */
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
+ BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
return 0;
HWRM_PREP(req, PORT_CLR_STATS);
@@ -3199,13 +3361,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
-
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -3237,12 +3398,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
if (rc == 0)
memcpy(data, buf, length);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -3273,14 +3435,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
rte_iova_t dma_handle;
uint8_t *buf;
- HWRM_PREP(req, NVM_WRITE);
-
- req.dir_type = rte_cpu_to_le_16(dir_type);
- req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
- req.dir_ext = rte_cpu_to_le_16(dir_ext);
- req.dir_attr = rte_cpu_to_le_16(dir_attr);
- req.dir_data_length = rte_cpu_to_le_32(data_len);
-
buf = rte_malloc("nvm_write", data_len, 0);
rte_mem_lock_page(buf);
if (!buf)
@@ -3293,14 +3447,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
return -ENOMEM;
}
memcpy(buf, data, data_len);
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rte_free(buf);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- rte_free(buf);
return rc;
}
@@ -3588,8 +3750,8 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_em_filter_id = -1;
- filter->fw_l2_filter_id = -1;
+ filter->fw_em_filter_id = UINT64_MAX;
+ filter->fw_l2_filter_id = UINT64_MAX;
return 0;
}
@@ -3700,7 +3862,7 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- filter->fw_ntuple_filter_id = -1;
+ filter->fw_ntuple_filter_id = UINT64_MAX;
return 0;
}
@@ -3732,3 +3894,54 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
}
return 0;
}
+
+static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+{
+ uint16_t flags;
+
+ req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
+
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ req->num_cmpl_dma_aggr_during_int =
+ rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
+
+ req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
+
+ /* min timer set to 1/2 of interrupt timer */
+ req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
+
+ /* buf timer set to 1/4 of interrupt timer */
+ req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
+
+ req->cmpl_aggr_dma_tmr_during_int =
+ rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
+
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+ req->flags = rte_cpu_to_le_16(flags);
+}
+
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id)
+{
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
+ bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* Set ring coalesce parameters only for Stratus 100G NIC */
+ if (!bnxt_stratus_device(bp))
+ return 0;
+
+ HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ bnxt_hwrm_set_coal_params(coal, &req);
+ req.ring_id = rte_cpu_to_le_16(ring_id);
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index f11e72a3..379aac6e 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_HWRM_H_
@@ -54,6 +26,12 @@ struct bnxt_cp_ring_info;
#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
(1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))
+#define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY
+
+#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic,
@@ -88,6 +66,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp);
int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+int bnxt_hwrm_set_async_event_cr(struct bnxt *bp);
int bnxt_hwrm_ring_alloc(struct bnxt *bp,
struct bnxt_ring *ring,
uint32_t ring_type, uint32_t map_index,
@@ -131,10 +110,13 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_free_all_hwrm_resources(struct bnxt *bp);
void bnxt_free_hwrm_resources(struct bnxt *bp);
+void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test);
int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
@@ -189,4 +171,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
int bnxt_vnic_rss_configure(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
+ struct bnxt_coal *coal, uint16_t ring_id);
+int bnxt_hwrm_check_vf_rings(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 8ab98693..7ef7023e 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -68,30 +40,10 @@ static void bnxt_int_handler(void *param)
if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
break;
- switch (CMP_TYPE(cmp)) {
- case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
- /* Handle any async event */
- bnxt_handle_async_event(bp, cmp);
- break;
- case CMPL_BASE_TYPE_HWRM_FWD_REQ:
- /* Handle HWRM forwarded responses */
- bnxt_handle_fwd_req(bp, cmp);
- break;
- default:
- /* Ignore any other events */
- if (cmp->type & rte_cpu_to_le_16(0x01)) {
- if (!CMP_VALID(cmp, raw_cons,
- cpr->cp_ring_struct))
- goto no_more;
- }
- PMD_DRV_LOG(INFO,
- "Ignoring %02x completion\n", CMP_TYPE(cmp));
- break;
- }
+ bnxt_event_hwrm_resp_handler(bp, cmp);
raw_cons = NEXT_RAW_CMP(raw_cons);
-
};
-no_more:
+
cpr->cp_raw_cons = raw_cons;
B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
}
@@ -127,7 +79,9 @@ void bnxt_enable_int(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
- B_CP_DB_ARM(cpr);
+ /* Only the default completion ring */
+ if (cpr != NULL && cpr->cp_doorbell != NULL)
+ B_CP_DB_ARM(cpr);
}
int bnxt_setup_int(struct bnxt *bp)
diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
index 4d2f7af9..75ba2135 100644
--- a/drivers/net/bnxt/bnxt_irq.h
+++ b/drivers/net/bnxt/bnxt_irq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_IRQ_H_
diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h
index c5ccc9bc..ea9d4a9d 100644
--- a/drivers/net/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/bnxt/bnxt_nvm_defs.h
@@ -1,11 +1,6 @@
-/* Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_NVM_DEFS_H_
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 8fb89721..fcbd6bc6 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <rte_bitmap.h>
@@ -52,11 +24,14 @@
void bnxt_free_ring(struct bnxt_ring *ring)
{
+ if (!ring)
+ return;
+
if (ring->vmem_size && *ring->vmem) {
memset((char *)*ring->vmem, 0, ring->vmem_size);
*ring->vmem = NULL;
}
- rte_memzone_free((const struct rte_memzone *)ring->mem_zone);
+ ring->mem_zone = NULL;
}
/*
@@ -89,22 +64,26 @@ int bnxt_init_ring_grps(struct bnxt *bp)
* rx bd ring - Only non-zero length if rx_ring_info is not NULL
*/
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix)
{
struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct;
+ struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL;
+ struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL;
struct bnxt_ring *tx_ring;
struct bnxt_ring *rx_ring;
struct rte_pci_device *pdev = bp->pdev;
+ uint64_t rx_offloads = bp->eth_dev->data->dev_conf.rxmode.offloads;
const struct rte_memzone *mz = NULL;
char mz_name[RTE_MEMZONE_NAMESIZE];
rte_iova_t mz_phys_addr;
int sz;
int stats_len = (tx_ring_info || rx_ring_info) ?
- RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0;
+ RTE_CACHE_LINE_ROUNDUP(sizeof(struct hwrm_stat_ctx_query_output) -
+ sizeof (struct hwrm_resp_hdr)) : 0;
int cp_vmem_start = stats_len;
int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size);
@@ -155,7 +134,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
sizeof(struct bnxt_tpa_info)) : 0;
int total_alloc_len = tpa_info_start;
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
@@ -166,10 +145,11 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve_aligned(mz_name, total_alloc_len,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY,
- getpagesize());
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG,
+ getpagesize());
if (mz == NULL)
return -ENOMEM;
}
@@ -191,6 +171,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
}
if (tx_ring_info) {
+ txq->mz = mz;
tx_ring = tx_ring_info->tx_ring_struct;
tx_ring->bd = ((char *)mz->addr + tx_ring_start);
@@ -210,6 +191,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
}
if (rx_ring_info) {
+ rxq->mz = mz;
rx_ring = rx_ring_info->rx_ring_struct;
rx_ring->bd = ((char *)mz->addr + rx_ring_start);
@@ -252,7 +234,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
ag_bitmap_start, ag_bitmap_len);
/* TPA info */
- if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
rx_ring_info->tpa_info =
((struct bnxt_tpa_info *)((char *)mz->addr +
tpa_info_start));
@@ -276,6 +258,116 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
return 0;
}
+static void bnxt_init_dflt_coal(struct bnxt_coal *coal)
+{
+ /* Tick values in micro seconds.
+ * 1 coal_buf x bufs_per_record = 1 completion record.
+ */
+ coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR;
+ /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
+ coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT;
+ coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX;
+ /* min timer set to 1/2 of interrupt timer */
+ coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN;
+ /* buf timer set to 1/4 of interrupt timer */
+ coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR;
+ coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT;
+}
+
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index)
+{
+ struct rte_pci_device *pci_dev = bp->pdev;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ struct bnxt_ring *ring = rxr->rx_ring_struct;
+ unsigned int map_idx = queue_index + bp->rx_cp_nr_rings;
+ int rc = 0;
+
+ bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id;
+
+ /* Rx cmpl */
+ rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ queue_index, HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
+ if (rc)
+ goto err_out;
+
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id;
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+
+ if (!queue_index) {
+ /*
+ * In order to save completion resources, use the first
+ * completion ring from PF or VF as the default completion ring
+ * for async event and HWRM forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
+ /* Rx ring */
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ queue_index, cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ rxr->rx_prod = 0;
+ rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ queue_index * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
+ ring = rxr->ag_ring_struct;
+ /* Agg ring */
+ if (!ring)
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
+
+ rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ map_idx, HWRM_NA_SIGNATURE,
+ cp_ring->fw_ring_id);
+ if (rc)
+ goto err_out;
+
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
+ rxr->ag_prod = 0;
+ rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr +
+ map_idx * BNXT_DB_SIZE;
+ bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id;
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
+
+ if (bp->eth_dev->data->rx_queue_state[queue_index] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ if (bnxt_init_one_rx_ring(rxq)) {
+ RTE_LOG(ERR, PMD,
+ "bnxt_init_one_rx_ring failed!\n");
+ bnxt_rx_queue_release_op(rxq);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ }
+ rxq->index = queue_index;
+ PMD_DRV_LOG(INFO,
+ "queue %d, rx_deferred_start %d, state %d!\n",
+ queue_index, rxq->rx_deferred_start,
+ bp->eth_dev->data->rx_queue_state[queue_index]);
+
+err_out:
+ return rc;
+}
/* ring_grp usage:
* [0] = default completion ring
* [1 -> +rx_cp_nr_rings] = rx_cp, rx rings
@@ -283,43 +375,61 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*/
int bnxt_alloc_hwrm_rings(struct bnxt *bp)
{
- struct rte_pci_device *pci_dev = bp->pdev;
+ struct bnxt_coal coal;
unsigned int i;
int rc = 0;
+ bnxt_init_dflt_coal(&coal);
+
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
struct bnxt_ring *ring = rxr->rx_ring_struct;
- unsigned int idx = i + 1;
- unsigned int map_idx = idx + bp->rx_cp_nr_rings;
+ unsigned int map_idx = i + bp->rx_cp_nr_rings;
bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
/* Rx cmpl */
- rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
- idx, HWRM_NA_SIGNATURE,
- HWRM_NA_SIGNATURE);
+ rc = bnxt_hwrm_ring_alloc
+ (bp,
+ cp_ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
+ i,
+ HWRM_NA_SIGNATURE,
+ HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80;
bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
+
+ if (!i) {
+ /*
+ * In order to save completion resource, use the first
+ * completion ring from PF or VF as the default
+ * completion ring for async event & HWRM
+ * forward response handling.
+ */
+ bp->def_cp_ring = cpr;
+ rc = bnxt_hwrm_set_async_event_cr(bp);
+ if (rc)
+ goto err_out;
+ }
/* Rx ring */
- rc = bnxt_hwrm_ring_alloc(bp, ring,
- HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
- idx, cpr->hw_stats_ctx_id,
- cp_ring->fw_ring_id);
+ rc = bnxt_hwrm_ring_alloc(bp,
+ ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX,
+ i,
+ cpr->hw_stats_ctx_id,
+ cp_ring->fw_ring_id);
if (rc)
goto err_out;
rxr->rx_prod = 0;
- rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ rxr->rx_doorbell = (char *)bp->doorbell_base + i * 0x80;
bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -338,9 +448,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
goto err_out;
PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
- rxr->ag_doorbell =
- (char *)pci_dev->mem_resource[2].addr +
- map_idx * 0x80;
+ rxr->ag_doorbell = (char *)bp->doorbell_base + map_idx * 0x80;
bp->grp_info[i].ag_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
@@ -353,7 +461,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
}
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
- rxq->index = idx;
+ rxq->index = i;
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -362,7 +470,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct bnxt_ring *ring = txr->tx_ring_struct;
- unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
+ unsigned int idx = i + bp->rx_cp_nr_rings;
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
@@ -372,8 +480,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ cpr->cp_doorbell = (char *)bp->doorbell_base + idx * 0x80;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Tx ring */
@@ -384,9 +491,9 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
- idx * 0x80;
+ txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80;
txq->index = idx;
+ bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id);
}
err_out:
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index ebf7228e..1446d784 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RING_H_
@@ -56,6 +28,7 @@
#define BNXT_TPA_MAX 64
#define AGG_RING_SIZE_FACTOR 2
+#define AGG_RING_MULTIPLIER 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
@@ -93,10 +66,11 @@ struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
- struct bnxt_tx_ring_info *tx_ring_info,
- struct bnxt_rx_ring_info *rx_ring_info,
+ struct bnxt_tx_queue *txq,
+ struct bnxt_rx_queue *rxq,
struct bnxt_cp_ring_info *cp_ring_info,
const char *suffix);
+int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index);
int bnxt_alloc_hwrm_rings(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index d49f3546..832fc9ec 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -51,10 +23,8 @@
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
{
- struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+ rxq->cp_ring->hw_stats = NULL;
}
int bnxt_mq_rx_configure(struct bnxt *bp)
@@ -107,6 +77,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_RSS:
case ETH_MQ_RX_VMDQ_ONLY:
+ /* FALLTHROUGH */
/* ETH_8/64_POOLs */
pools = conf->nb_queue_pools;
/* For each pool, allocate MACVLAN CFA rule & VNIC */
@@ -228,16 +199,19 @@ err_out:
return rc;
}
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
{
struct bnxt_sw_rx_bd *sw_ring;
struct bnxt_tpa_info *tpa_info;
uint16_t i;
+ rte_spinlock_lock(&rxq->lock);
+
if (rxq) {
sw_ring = rxq->rx_ring->rx_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
@@ -247,7 +221,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
/* Free up mbufs in Agg ring */
sw_ring = rxq->rx_ring->ag_buf_ring;
if (sw_ring) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
+ for (i = 0;
+ i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
if (sw_ring[i].mbuf) {
rte_pktmbuf_free_seg(sw_ring[i].mbuf);
sw_ring[i].mbuf = NULL;
@@ -266,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
}
}
}
+
+ rte_spinlock_unlock(&rxq->lock);
}
void bnxt_free_rx_mbufs(struct bnxt *bp)
@@ -295,6 +272,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
bnxt_free_rxq_stats(rxq);
+ rte_memzone_free(rxq->mz);
+ rxq->mz = NULL;
rte_free(rxq);
}
@@ -308,14 +287,16 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct rte_mempool *mp)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
struct bnxt_rx_queue *rxq;
int rc = 0;
+ uint8_t queue_state;
if (queue_idx >= bp->max_rx_rings) {
PMD_DRV_LOG(ERR,
"Cannot create Rx ring %d. Only %d rings available\n",
queue_idx, bp->max_rx_rings);
- return -ENOSPC;
+ return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
@@ -350,12 +331,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->queue_id = queue_idx;
rxq->port_id = eth_dev->data->port_id;
- rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
+ ETHER_CRC_LEN : 0;
eth_dev->data->rx_queues[queue_idx] = rxq;
/* Allocate RX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
"rxr")) {
PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
@@ -363,7 +344,13 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rc = -ENOMEM;
goto out;
}
+ rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
+ RTE_ETH_QUEUE_STATE_STARTED;
+ eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+ rte_spinlock_init(&rxq->lock);
out:
return rc;
}
@@ -412,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ int rc = 0;
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -419,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- rxq->rx_deferred_start = false;
+
+ bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+ bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
+
if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
return 0;
- PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
- vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+
+ PMD_DRV_LOG(DEBUG,
+ "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+
vnic->fw_grp_ids[rx_queue_id] =
- bp->grp_info[rx_queue_id + 1].fw_grp_id;
- return bnxt_vnic_rss_configure(bp, vnic);
+ bp->grp_info[rx_queue_id].fw_grp_id;
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+ if (rc == 0)
+ rxq->rx_deferred_start = false;
+
+ return rc;
}
int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
- struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
struct bnxt_vnic_info *vnic = NULL;
+ struct bnxt_rx_queue *rxq = NULL;
+ int rc = 0;
+
+ /* Rx CQ 0 also works as Default CQ for async notifications */
+ if (!rx_queue_id) {
+ PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ rxq = bp->rx_queues[rx_queue_id];
if (rxq == NULL) {
PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -454,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
vnic = rxq->vnic;
vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
- return bnxt_vnic_rss_configure(bp, vnic);
+ rc = bnxt_vnic_rss_configure(bp, vnic);
}
- return 0;
+
+ if (rc == 0)
+ bnxt_rx_queue_release_mbufs(rxq);
+
+ return rc;
}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index c7acaa75..e5d6001d 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RQX_H_
@@ -38,6 +10,9 @@ struct bnxt;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
struct bnxt_rx_queue {
+ rte_spinlock_t lock; /* Synchronize between rx_queue_stop
+ * and fast path
+ */
struct rte_mempool *mb_pool; /* mbuf pool for RX ring */
struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */
struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */
@@ -60,6 +35,8 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+ rte_atomic64_t rx_mbuf_alloc_fail;
+ const struct rte_memzone *mz;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
@@ -80,4 +57,5 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index aae9a635..c7bc8848 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -69,13 +41,14 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -90,7 +63,7 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
if (!mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
@@ -101,8 +74,9 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
rx_buf->mbuf = mbuf;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ rxbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -123,7 +97,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
prod_bd = &rxr->rx_desc_ring[prod];
- prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+ prod_bd->address = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxr->rx_prod = prod;
}
@@ -143,7 +117,7 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
prod_bd = &rxr->ag_desc_ring[prod];
cons_bd = &rxr->ag_desc_ring[cons];
- prod_bd->addr = cons_bd->addr;
+ prod_bd->address = cons_bd->addr;
}
#endif
@@ -327,7 +301,7 @@ static inline struct rte_mbuf *bnxt_tpa_end(
struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
RTE_ASSERT(new_data != NULL);
if (!new_data) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return NULL;
}
tpa_info->mbuf = new_data;
@@ -490,11 +464,15 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
else
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
else
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
@@ -560,9 +538,12 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t prod = rxr->rx_prod;
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;
+ bool evt = false;
- /* If Rx Q was stopped return */
- if (rxq->rx_deferred_start)
+ /* If Rx Q was stopped return. RxQ0 cannot be stopped. */
+ if (unlikely(((rxq->rx_deferred_start ||
+ !rte_spinlock_trylock(&rxq->lock)) &&
+ rxq->queue_id)))
return 0;
/* Handle RX burst request */
@@ -584,25 +565,37 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
nb_rx_pkts++;
if (rc == -EBUSY) /* partial completion */
break;
+ } else {
+ evt =
+ bnxt_event_hwrm_resp_handler(rxq->bp,
+ (struct cmpl_base *)rxcmp);
}
+
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (nb_rx_pkts == nb_pkts)
+ if (nb_rx_pkts == nb_pkts || evt)
break;
+ /* Post some Rx buf early in case of larger burst processing */
+ if (nb_rx_pkts == BNXT_RX_POST_THRESH)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
}
cpr->cp_raw_cons = raw_cons;
- if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
+ if (!nb_rx_pkts && !evt) {
/*
* For PMD, there is no need to keep on pushing to REARM
* the doorbell if there are no new completions
*/
- return nb_rx_pkts;
+ goto done;
}
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+ if (prod != rxr->rx_prod)
+ B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
+
/* Ring the AGG ring DB */
- B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+ if (ag_prod != rxr->ag_prod)
+ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod);
+
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
/* Attempt to alloc Rx buf in case of a previous allocation failure. */
if (rc == -ENOMEM) {
@@ -627,16 +620,22 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
}
+done:
+ rte_spinlock_unlock(&rxq->lock);
+
return nb_rx_pkts;
}
void bnxt_free_rx_rings(struct bnxt *bp)
{
int i;
+ struct bnxt_rx_queue *rxq;
- for (i = 0; i < (int)bp->rx_nr_rings; i++) {
- struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+ if (!bp->rx_queues)
+ return;
+ for (i = 0; i < (int)bp->rx_nr_rings; i++) {
+ rxq = bp->rx_queues[i];
if (!rxq)
continue;
@@ -755,7 +754,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
if (rxq->rx_buf_use_size <= size)
size = rxq->rx_buf_use_size;
- type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
+ type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD;
rxr = rxq->rx_ring;
ring = rxr->rx_ring_struct;
@@ -795,7 +794,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->tpa_info[i].mbuf =
__bnxt_alloc_rx_data(rxq->mb_pool);
if (!rxr->tpa_info[i].mbuf) {
- rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
+ rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail);
return -ENOMEM;
}
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index f3ed49bd..3815a219 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_RXR_H_
@@ -52,22 +24,38 @@
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
-#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC)
+#define RX_CMP_L4_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)
-#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR)
+#define RX_CMP_L4_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR)
#define RX_CMP_L4_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS))
-#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR)
+#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS)
-#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC)
+#define RX_CMP_IP_CS_ERR_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \
+ RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR)
+
+#define RX_CMP_IP_CS_BITS \
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \
+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)
#define RX_CMP_IP_CS_OK(rxcmp1) \
(((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \
!((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS))
+#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \
+ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS)
+
+#define BNXT_RX_POST_THRESH 32
+
enum pkt_hash_types {
PKT_HASH_TYPE_NONE, /* Undefined type */
PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index bd93cc83..a5d3c866 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -172,8 +144,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
tx_mcast_pkts)},
{"tx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
tx_bcast_pkts)},
- {"tx_err_pkts", offsetof(struct hwrm_func_qstats_output,
- tx_err_pkts)},
+ {"tx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ tx_discard_pkts)},
{"tx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
tx_drop_pkts)},
{"tx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
@@ -188,8 +160,8 @@ static const struct bnxt_xstats_name_off bnxt_func_stats_strings[] = {
rx_mcast_pkts)},
{"rx_bcast_pkts", offsetof(struct hwrm_func_qstats_output,
rx_bcast_pkts)},
- {"rx_err_pkts", offsetof(struct hwrm_func_qstats_output,
- rx_err_pkts)},
+ {"rx_discard_pkts", offsetof(struct hwrm_func_qstats_output,
+ rx_discard_pkts)},
{"rx_drop_pkts", offsetof(struct hwrm_func_qstats_output,
rx_drop_pkts)},
{"rx_ucast_bytes", offsetof(struct hwrm_func_qstats_output,
@@ -238,7 +210,7 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
- return 0;
+ return -1;
}
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
@@ -249,6 +221,8 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_stats, 1);
if (unlikely(rc))
return rc;
+ bnxt_stats->rx_nombuf +=
+ rte_atomic64_read(&rxq->rx_mbuf_alloc_fail);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -263,13 +237,13 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats);
if (unlikely(rc))
return rc;
- bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail);
return rc;
}
void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ unsigned int i;
if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
@@ -277,7 +251,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
}
bnxt_clear_all_hwrm_stat_ctxs(bp);
- rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ struct bnxt_rx_queue *rxq = bp->rx_queues[i];
+
+ rte_atomic64_clear(&rxq->rx_mbuf_alloc_fail);
+ }
}
int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
@@ -288,11 +266,6 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
unsigned int count, i;
uint64_t tx_drop_pkts;
- if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
- return 0;
- }
-
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_func_qstats_tx_drop(bp, 0xffff, &tx_drop_pkts);
@@ -305,6 +278,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count = 0;
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)rx_stats +
bnxt_rx_stats_strings[i].offset));
@@ -313,6 +287,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)tx_stats +
bnxt_tx_stats_strings[i].offset));
@@ -320,6 +295,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
}
/* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index c1c83d57..b0f135a5 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_STATS_H_
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 53524346..b9b975e4 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -47,10 +19,8 @@
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq)
{
- struct bnxt_cp_ring_info *cpr = txq->cp_ring;
-
- if (cpr->hw_stats)
- cpr->hw_stats = NULL;
+ if (txq && txq->cp_ring && txq->cp_ring->hw_stats)
+ txq->cp_ring->hw_stats = NULL;
}
static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
@@ -58,6 +28,9 @@ static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq)
struct bnxt_sw_tx_bd *sw_ring;
uint16_t i;
+ if (!txq)
+ return;
+
sw_ring = txq->tx_ring->tx_buf_ring;
if (sw_ring) {
for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) {
@@ -93,6 +66,8 @@ void bnxt_tx_queue_release_op(void *tx_queue)
bnxt_free_ring(txq->cp_ring->cp_ring_struct);
bnxt_free_txq_stats(txq);
+ rte_memzone_free(txq->mz);
+ txq->mz = NULL;
rte_free(txq);
}
@@ -112,7 +87,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
PMD_DRV_LOG(ERR,
"Cannot create Tx ring %d. Only %d rings available\n",
queue_idx, bp->max_tx_rings);
- return -ENOSPC;
+ return -EINVAL;
}
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
@@ -147,7 +122,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq->port_id = eth_dev->data->port_id;
/* Allocate TX ring hardware descriptors */
- if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
+ if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring,
"txr")) {
PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index e27c34fa..f2c712a7 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_TXQ_H_
@@ -50,9 +22,9 @@ struct bnxt_tx_queue {
uint8_t pthresh; /* Prefetch threshold register */
uint8_t hthresh; /* Host threshold register */
uint8_t wthresh; /* Write-back threshold reg */
- uint32_t txq_flags; /* Holds flags for this TXq */
uint32_t ctx_curr; /* Hardware context states */
uint8_t tx_deferred_start; /* not in global dev start */
+ uint8_t cmpl_next; /* Next BD to trigger a compl */
struct bnxt *bp;
int index;
@@ -61,6 +33,7 @@ struct bnxt_tx_queue {
unsigned int cp_nr_rings;
struct bnxt_cp_ring_info *cp_ring;
+ const struct rte_memzone *mz;
};
void bnxt_free_txq_stats(struct bnxt_tx_queue *txq);
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 2c81a37c..67bb35e0 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -142,7 +114,9 @@ static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
}
static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
- struct bnxt_tx_queue *txq)
+ struct bnxt_tx_queue *txq,
+ uint16_t *coal_pkts,
+ uint16_t *cmpl_next)
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
@@ -161,7 +135,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -174,14 +150,21 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
return -ENOMEM;
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->opaque = txr->tx_prod;
+ txbd->opaque = *coal_pkts;
txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
+ txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
+ if (!*cmpl_next) {
+ txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
+ } else {
+ *coal_pkts = 0;
+ *cmpl_next = false;
+ }
txbd->len = tx_pkt->data_len;
- if (txbd->len >= 2014)
+ if (tx_pkt->pkt_len >= 2014)
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
- txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
+ txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -222,16 +205,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
@@ -242,11 +255,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
@@ -262,14 +287,15 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
- txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
+ txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
+ txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
m_seg = m_seg->next;
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
@@ -306,35 +332,44 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
struct bnxt_cp_ring_info *cpr = txq->cp_ring;
uint32_t raw_cons = cpr->cp_raw_cons;
uint32_t cons;
- int nb_tx_pkts = 0;
+ uint32_t nb_tx_pkts = 0;
struct tx_cmpl *txcmp;
+ struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+ struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
+ uint32_t ring_mask = cp_ring_struct->ring_mask;
+ uint32_t opaque = 0;
- if ((txq->tx_ring->tx_ring_struct->ring_size -
- (bnxt_tx_avail(txq->tx_ring))) >
- txq->tx_free_thresh) {
- while (1) {
- cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
- txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
-
- if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
- break;
- cpr->valid = FLIP_VALID(cons,
- cpr->cp_ring_struct->ring_mask,
- cpr->valid);
-
- if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
- nb_tx_pkts++;
- else
- RTE_LOG_DP(DEBUG, PMD,
- "Unhandled CMP type %02x\n",
- CMP_TYPE(txcmp));
- raw_cons = NEXT_RAW_CMP(raw_cons);
- }
- if (nb_tx_pkts)
- bnxt_tx_cmp(txq, nb_tx_pkts);
+ if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) &
+ txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh)
+ return 0;
+
+ do {
+ cons = RING_CMPL(ring_mask, raw_cons);
+ txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
+ rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
+ ring_mask]);
+
+ if (!CMPL_VALID(txcmp, cpr->valid))
+ break;
+ opaque = rte_cpu_to_le_32(txcmp->opaque);
+ NEXT_CMPL(cpr, cons, cpr->valid, 1);
+ rte_prefetch0(&cp_desc_ring[cons]);
+
+ if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
+ nb_tx_pkts += opaque;
+ else
+ RTE_LOG_DP(ERR, PMD,
+ "Unhandled CMP type %02x\n",
+ CMP_TYPE(txcmp));
+ raw_cons = cons;
+ } while (nb_tx_pkts < ring_mask);
+
+ if (nb_tx_pkts) {
+ bnxt_tx_cmp(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask);
}
+
return nb_tx_pkts;
}
@@ -343,8 +378,8 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct bnxt_tx_queue *txq = tx_queue;
uint16_t nb_tx_pkts = 0;
- uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2;
- uint16_t last_db_mask = 0;
+ uint16_t coal_pkts = 0;
+ uint16_t cmpl_next = txq->cmpl_next;
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
@@ -354,16 +389,25 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
return 0;
}
+
+ txq->cmpl_next = 0;
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
- if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
+ int rc;
+
+ /* Request a completion on first and last packet */
+ cmpl_next |= (nb_pkts == nb_tx_pkts + 1);
+ coal_pkts++;
+ rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
+ &coal_pkts, &cmpl_next);
+
+ if (unlikely(rc)) {
+ /* Request a completion in next cycle */
+ txq->cmpl_next = 1;
break;
- } else if ((nb_tx_pkts & db_mask) != last_db_mask) {
- B_TX_DB(txq->tx_ring->tx_doorbell,
- txq->tx_ring->tx_prod);
- last_db_mask = nb_tx_pkts & db_mask;
}
}
+
if (nb_tx_pkts)
B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index d88b15ab..7f3c7cdb 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_TXR_H_
@@ -73,10 +45,20 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_util.c b/drivers/net/bnxt/bnxt_util.c
new file mode 100644
index 00000000..7d334271
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_util.c
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#include <inttypes.h>
+
+#include "bnxt_util.h"
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ if (bytes[i] != 0x00)
+ return 0;
+ return 1;
+}
diff --git a/drivers/net/bnxt/bnxt_util.h b/drivers/net/bnxt/bnxt_util.h
new file mode 100644
index 00000000..2378833c
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_util.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _BNXT_UTIL_H_
+#define _BNXT_UTIL_H_
+
+int bnxt_check_zero_bytes(const uint8_t *bytes, int len);
+
+#endif /* _BNXT_UTIL_H_ */
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index d4aeb4ca..c0577cd7 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
@@ -67,7 +39,7 @@ void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
- int i, j;
+ int i;
max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
@@ -77,9 +49,8 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
-
- for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
- vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
+ vnic->hash_mode =
+ HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
@@ -185,10 +156,10 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
mz = rte_memzone_lookup(mz_name);
if (!mz) {
mz = rte_memzone_reserve(mz_name,
- entry_length * max_vnics,
- SOCKET_ID_ANY,
- RTE_MEMZONE_2MB |
- RTE_MEMZONE_SIZE_HINT_ONLY);
+ entry_length * max_vnics, SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB |
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_IOVA_CONTIG);
if (!mz)
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index d8d35c7d..9029f78c 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Broadcom Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _BNXT_VNIC_H_
@@ -43,16 +15,13 @@ struct bnxt_vnic_info {
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
uint16_t rss_rule;
-#define MAX_NUM_TRAFFIC_CLASSES 8
-#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
-#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
- MAX_NUM_TRAFFIC_CLASSES)
uint16_t start_grp_id;
uint16_t end_grp_id;
- uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t *fw_grp_ids;
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;
+ uint8_t hash_mode;
rte_iova_t rss_table_dma_addr;
uint16_t *rss_table;
rte_iova_t rss_hash_key_dma_addr;
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 1e9c39f4..f5c7b422 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -1,254 +1,1266 @@
-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2014-2018 Broadcom Limited
+ * All rights reserved.
*
- * Copyright(c) 2001-2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * DO NOT MODIFY!!! This file is automatically generated.
*/
-#ifndef _HSI_STRUCT_DEF_DPDK_
-#define _HSI_STRUCT_DEF_DPDK_
-/* HSI and HWRM Specification 1.8.2 */
-#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 8
-#define HWRM_VERSION_UPDATE 2
+#ifndef _HSI_STRUCT_DEF_DPDK_H_
+#define _HSI_STRUCT_DEF_DPDK_H_
-#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */
+/* This is the HWRM command header. */
+/* hwrm_cmd_hdr (size:128b/16B) */
+struct hwrm_cmd_hdr {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
-#define HWRM_VERSION_STR "1.8.2.0"
-/*
- * Following is the signature for HWRM message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
- */
-#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
-#define HW_HASH_KEY_SIZE 40
-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
-#define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1
-#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8
-#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2
+/* This is the HWRM response header. */
+/* hwrm_resp_hdr (size:64b/8B) */
+struct hwrm_resp_hdr {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+} __attribute__((packed));
/*
- * Request types
+ * TLV encapsulated message. Use the TLV type field of the
+ * TLV to determine the type of message encapsulated.
*/
-#define HWRM_VER_GET (UINT32_C(0x0))
-#define HWRM_FUNC_BUF_UNRGTR (UINT32_C(0xe))
-#define HWRM_FUNC_VF_CFG (UINT32_C(0xf))
- /* Reserved for future use */
-#define RESERVED1 (UINT32_C(0x10))
-#define HWRM_FUNC_RESET (UINT32_C(0x11))
-#define HWRM_FUNC_GETFID (UINT32_C(0x12))
-#define HWRM_FUNC_VF_ALLOC (UINT32_C(0x13))
-#define HWRM_FUNC_VF_FREE (UINT32_C(0x14))
-#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
-#define HWRM_FUNC_QCFG (UINT32_C(0x16))
-#define HWRM_FUNC_CFG (UINT32_C(0x17))
-#define HWRM_FUNC_QSTATS (UINT32_C(0x18))
-#define HWRM_FUNC_CLR_STATS (UINT32_C(0x19))
-#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
-#define HWRM_FUNC_VF_RESC_FREE (UINT32_C(0x1b))
-#define HWRM_FUNC_VF_VNIC_IDS_QUERY (UINT32_C(0x1c))
-#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
-#define HWRM_FUNC_DRV_QVER (UINT32_C(0x1e))
-#define HWRM_FUNC_BUF_RGTR (UINT32_C(0x1f))
-#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
-#define HWRM_PORT_MAC_CFG (UINT32_C(0x21))
-#define HWRM_PORT_QSTATS (UINT32_C(0x23))
-#define HWRM_PORT_LPBK_QSTATS (UINT32_C(0x24))
-#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
-#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
-#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
-#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29))
-#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
-#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
-#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
-#define HWRM_PORT_LED_QCAPS (UINT32_C(0x2f))
-#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30))
-#define HWRM_QUEUE_QCFG (UINT32_C(0x31))
-#define HWRM_QUEUE_CFG (UINT32_C(0x32))
-#define HWRM_FUNC_VLAN_CFG (UINT32_C(0x33))
-#define HWRM_FUNC_VLAN_QCFG (UINT32_C(0x34))
-#define HWRM_QUEUE_PFCENABLE_QCFG (UINT32_C(0x35))
-#define HWRM_QUEUE_PFCENABLE_CFG (UINT32_C(0x36))
-#define HWRM_QUEUE_PRI2COS_QCFG (UINT32_C(0x37))
-#define HWRM_QUEUE_PRI2COS_CFG (UINT32_C(0x38))
-#define HWRM_QUEUE_COS2BW_QCFG (UINT32_C(0x39))
-#define HWRM_QUEUE_COS2BW_CFG (UINT32_C(0x3a))
-#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
-#define HWRM_VNIC_ALLOC (UINT32_C(0x40))
-#define HWRM_VNIC_FREE (UINT32_C(0x41))
-#define HWRM_VNIC_CFG (UINT32_C(0x42))
-#define HWRM_VNIC_QCFG (UINT32_C(0x43))
-#define HWRM_VNIC_TPA_CFG (UINT32_C(0x44))
-#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46))
-#define HWRM_VNIC_RSS_QCFG (UINT32_C(0x47))
-#define HWRM_VNIC_PLCMODES_CFG (UINT32_C(0x48))
-#define HWRM_VNIC_PLCMODES_QCFG (UINT32_C(0x49))
-#define HWRM_VNIC_QCAPS (UINT32_C(0x4a))
-#define HWRM_RING_ALLOC (UINT32_C(0x50))
-#define HWRM_RING_FREE (UINT32_C(0x51))
-#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS (UINT32_C(0x52))
-#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS (UINT32_C(0x53))
-#define HWRM_RING_RESET (UINT32_C(0x5e))
-#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60))
-#define HWRM_RING_GRP_FREE (UINT32_C(0x61))
-#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70))
-#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71))
-#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90))
-#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91))
-#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92))
-#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93))
- /* Reserved for future use */
-#define HWRM_CFA_VLAN_ANTISPOOF_CFG (UINT32_C(0x94))
-#define HWRM_CFA_TUNNEL_FILTER_ALLOC (UINT32_C(0x95))
-#define HWRM_CFA_TUNNEL_FILTER_FREE (UINT32_C(0x96))
-#define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99))
-#define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a))
-#define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b))
-#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c))
-#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d))
-#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e))
-#define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0))
-#define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1))
-#define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2))
-#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0))
-#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1))
-#define HWRM_STAT_CTX_QUERY (UINT32_C(0xb2))
-#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3))
-#define HWRM_FW_RESET (UINT32_C(0xc0))
-#define HWRM_FW_QSTATUS (UINT32_C(0xc1))
-#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0))
-#define HWRM_REJECT_FWD_RESP (UINT32_C(0xd1))
-#define HWRM_FWD_RESP (UINT32_C(0xd2))
-#define HWRM_FWD_ASYNC_EVENT_CMPL (UINT32_C(0xd3))
-#define HWRM_TEMP_MONITOR_QUERY (UINT32_C(0xe0))
-#define HWRM_WOL_FILTER_ALLOC (UINT32_C(0xf0))
-#define HWRM_WOL_FILTER_FREE (UINT32_C(0xf1))
-#define HWRM_WOL_FILTER_QCFG (UINT32_C(0xf2))
-#define HWRM_WOL_REASON_QCFG (UINT32_C(0xf3))
-#define HWRM_DBG_DUMP (UINT32_C(0xff14))
-#define HWRM_NVM_VALIDATE_OPTION (UINT32_C(0xffef))
-#define HWRM_NVM_FLUSH (UINT32_C(0xfff0))
-#define HWRM_NVM_GET_VARIABLE (UINT32_C(0xfff1))
-#define HWRM_NVM_SET_VARIABLE (UINT32_C(0xfff2))
-#define HWRM_NVM_INSTALL_UPDATE (UINT32_C(0xfff3))
-#define HWRM_NVM_MODIFY (UINT32_C(0xfff4))
-#define HWRM_NVM_VERIFY_UPDATE (UINT32_C(0xfff5))
-#define HWRM_NVM_GET_DEV_INFO (UINT32_C(0xfff6))
-#define HWRM_NVM_ERASE_DIR_ENTRY (UINT32_C(0xfff7))
-#define HWRM_NVM_MOD_DIR_ENTRY (UINT32_C(0xfff8))
-#define HWRM_NVM_FIND_DIR_ENTRY (UINT32_C(0xfff9))
-#define HWRM_NVM_GET_DIR_ENTRIES (UINT32_C(0xfffa))
-#define HWRM_NVM_GET_DIR_INFO (UINT32_C(0xfffb))
-#define HWRM_NVM_RAW_DUMP (UINT32_C(0xfffc))
-#define HWRM_NVM_READ (UINT32_C(0xfffd))
-#define HWRM_NVM_WRITE (UINT32_C(0xfffe))
-#define HWRM_NVM_RAW_WRITE_BLK (UINT32_C(0xffff))
+#define CMD_DISCR_TLV_ENCAP UINT32_C(0x8000)
+#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
+
+
+/* HWRM request message */
+#define TLV_TYPE_HWRM_REQUEST UINT32_C(0x1)
+/* HWRM response message */
+#define TLV_TYPE_HWRM_RESPONSE UINT32_C(0x2)
+/* RoCE slow path command */
+#define TLV_TYPE_ROCE_SP_COMMAND UINT32_C(0x3)
+/* Engine CKV - The device's serial number. */
+#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER UINT32_C(0x8001)
+/* Engine CKV - Per-function random nonce data. */
+#define TLV_TYPE_ENGINE_CKV_NONCE UINT32_C(0x8002)
+/* Engine CKV - Initialization vector. */
+#define TLV_TYPE_ENGINE_CKV_IV UINT32_C(0x8003)
+/* Engine CKV - Authentication tag. */
+#define TLV_TYPE_ENGINE_CKV_AUTH_TAG UINT32_C(0x8004)
+/* Engine CKV - The encrypted data. */
+#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT UINT32_C(0x8005)
+/* Engine CKV - Supported algorithms. */
+#define TLV_TYPE_ENGINE_CKV_ALGORITHMS UINT32_C(0x8006)
+/* Engine CKV - The EC curve name and ECC public key information. */
+#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY UINT32_C(0x8007)
+/* Engine CKV - The ECDSA signature. */
+#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE UINT32_C(0x8008)
+#define TLV_TYPE_LAST \
+ TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
+
+
+/* tlv (size:64b/8B) */
+struct tlv {
+ /*
+ * The command discriminator is used to differentiate between various
+ * types of HWRM messages. This includes legacy HWRM and RoCE slowpath
+ * command messages as well as newer TLV encapsulated HWRM commands.
+ *
+ * For TLV encapsulated messages this field must be 0x8000.
+ */
+ uint16_t cmd_discr;
+ uint8_t reserved_8b;
+ uint8_t flags;
+ /*
+ * Indicates the presence of additional TLV encapsulated data
+ * follows this TLV.
+ */
+ #define TLV_FLAGS_MORE UINT32_C(0x1)
+ /* Last TLV in a sequence of TLVs. */
+ #define TLV_FLAGS_MORE_LAST UINT32_C(0x0)
+ /* More TLVs follow this TLV. */
+ #define TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1)
+ /*
+ * When an HWRM receiver detects a TLV type that it does not
+ * support with the TLV required flag set, the receiver must
+ * reject the HWRM message with an error code indicating an
+ * unsupported TLV type.
+ */
+ #define TLV_FLAGS_REQUIRED UINT32_C(0x2)
+ /* No */
+ #define TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1)
+ /* Yes */
+ #define TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1)
+ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
+ /*
+ * This field defines the TLV type value which is divided into
+ * two ranges to differentiate between global and local TLV types.
+ * Global TLV types must be unique across all defined TLV types.
+ * Local TLV types are valid only for extensions to a given
+ * HWRM message and may be repeated across different HWRM message
+ * types. There is a direct correlation of each HWRM message type
+ * to a single global TLV type value.
+ *
+ * Global TLV range: `0 - (63k-1)`
+ *
+ * Local TLV range: `63k - (64k-1)`
+ */
+ uint16_t tlv_type;
+ /*
+ * Length of the message data encapsulated by this TLV in bytes.
+ * This length does not include the size of the TLV header itself
+ * and it must be an integer multiple of 8B.
+ */
+ uint16_t length;
+} __attribute__((packed));
+
+/* Input */
+/* input (size:128b/16B) */
+struct input {
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This value indicates the what completion ring the request will
+ * be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t cmpl_ring;
+ /* This value indicates the command sequence number. */
+ uint16_t seq_id;
+ /*
+ * Target ID of this command.
+ *
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* Output */
+/* output (size:64b/8B) */
+struct output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+} __attribute__((packed));
+
+/* Short Command Structure */
+/* hwrm_short_input (size:128b/16B) */
+struct hwrm_short_input {
+ /*
+ * This field indicates the type of request in the request buffer.
+ * The format for the rest of the command (request) is determined
+ * by this field.
+ */
+ uint16_t req_type;
+ /*
+ * This field indicates a signature that is used to identify short
+ * form of the command listed here. This field shall be set to
+ * 17185 (0x4321).
+ */
+ uint16_t signature;
+ /* Signature indicating this is a short form of HWRM command */
+ #define HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
+ #define HWRM_SHORT_INPUT_SIGNATURE_LAST \
+ HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD
+ /* Reserved for future use. */
+ uint16_t unused_0;
+ /* This value indicates the length of the request. */
+ uint16_t size;
+ /*
+ * This is the host address where the request was written.
+ * This area must be 16B aligned.
+ */
+ uint64_t req_addr;
+} __attribute__((packed));
/*
- * Note: The Host Software Interface (HSI) and Hardware Resource Manager (HWRM)
- * specification describes the data structures used in Ethernet packet or RDMA
- * message data transfers as well as an abstract interface for managing Ethernet
- * NIC hardware resources.
- */
-/* Ethernet Data path Host Structures */
-/*
- * Description: The following three sections document the host structures used
- * between device and software drivers for communicating Ethernet packets.
+ * Command numbering
+ * # NOTE - definitions already in hwrm_req_type, in hwrm_types.yaml
+ * # So only structure definition is provided here.
*/
-/* BD Ring Structures */
+/* cmd_nums (size:64b/8B) */
+struct cmd_nums {
+ /*
+ * This version of the specification defines the commands listed in
+ * the table below. The following are general implementation
+ * requirements for these commands:
+ *
+ * # All commands listed below that are marked neither
+ * reserved nor experimental shall be implemented by the HWRM.
+ * # A HWRM client compliant to this specification should not use
+ * commands outside of the list below.
+ * # A HWRM client compliant to this specification should not use
+ * command numbers marked reserved below.
+ * # A command marked experimental below may not be implemented
+ * by the HWRM.
+ * # A command marked experimental may change in the
+ * future version of the HWRM specification.
+ * # A command not listed below may be implemented by the HWRM.
+ * The behavior of commands that are not listed below is outside
+ * the scope of this specification.
+ */
+ uint16_t req_type;
+ #define HWRM_VER_GET UINT32_C(0x0)
+ #define HWRM_FUNC_BUF_UNRGTR UINT32_C(0xe)
+ #define HWRM_FUNC_VF_CFG UINT32_C(0xf)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED1 UINT32_C(0x10)
+ #define HWRM_FUNC_RESET UINT32_C(0x11)
+ #define HWRM_FUNC_GETFID UINT32_C(0x12)
+ #define HWRM_FUNC_VF_ALLOC UINT32_C(0x13)
+ #define HWRM_FUNC_VF_FREE UINT32_C(0x14)
+ #define HWRM_FUNC_QCAPS UINT32_C(0x15)
+ #define HWRM_FUNC_QCFG UINT32_C(0x16)
+ #define HWRM_FUNC_CFG UINT32_C(0x17)
+ #define HWRM_FUNC_QSTATS UINT32_C(0x18)
+ #define HWRM_FUNC_CLR_STATS UINT32_C(0x19)
+ #define HWRM_FUNC_DRV_UNRGTR UINT32_C(0x1a)
+ #define HWRM_FUNC_VF_RESC_FREE UINT32_C(0x1b)
+ #define HWRM_FUNC_VF_VNIC_IDS_QUERY UINT32_C(0x1c)
+ #define HWRM_FUNC_DRV_RGTR UINT32_C(0x1d)
+ #define HWRM_FUNC_DRV_QVER UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_CFG UINT32_C(0x20)
+ #define HWRM_PORT_MAC_CFG UINT32_C(0x21)
+ /* Experimental */
+ #define HWRM_PORT_TS_QUERY UINT32_C(0x22)
+ #define HWRM_PORT_QSTATS UINT32_C(0x23)
+ #define HWRM_PORT_LPBK_QSTATS UINT32_C(0x24)
+ /* Experimental */
+ #define HWRM_PORT_CLR_STATS UINT32_C(0x25)
+ /* Experimental */
+ #define HWRM_PORT_LPBK_CLR_STATS UINT32_C(0x26)
+ #define HWRM_PORT_PHY_QCFG UINT32_C(0x27)
+ #define HWRM_PORT_MAC_QCFG UINT32_C(0x28)
+ /* Experimental */
+ #define HWRM_PORT_MAC_PTP_QCFG UINT32_C(0x29)
+ #define HWRM_PORT_PHY_QCAPS UINT32_C(0x2a)
+ #define HWRM_PORT_PHY_I2C_WRITE UINT32_C(0x2b)
+ #define HWRM_PORT_PHY_I2C_READ UINT32_C(0x2c)
+ #define HWRM_PORT_LED_CFG UINT32_C(0x2d)
+ #define HWRM_PORT_LED_QCFG UINT32_C(0x2e)
+ #define HWRM_PORT_LED_QCAPS UINT32_C(0x2f)
+ #define HWRM_QUEUE_QPORTCFG UINT32_C(0x30)
+ #define HWRM_QUEUE_QCFG UINT32_C(0x31)
+ #define HWRM_QUEUE_CFG UINT32_C(0x32)
+ #define HWRM_FUNC_VLAN_CFG UINT32_C(0x33)
+ #define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34)
+ #define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35)
+ #define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36)
+ #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37)
+ #define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38)
+ #define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39)
+ #define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP_QCAPS UINT32_C(0x3b)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_QCFG UINT32_C(0x3c)
+ /* Experimental */
+ #define HWRM_QUEUE_DSCP2PRI_CFG UINT32_C(0x3d)
+ #define HWRM_VNIC_ALLOC UINT32_C(0x40)
+ #define HWRM_VNIC_FREE UINT32_C(0x41)
+ #define HWRM_VNIC_CFG UINT32_C(0x42)
+ #define HWRM_VNIC_QCFG UINT32_C(0x43)
+ #define HWRM_VNIC_TPA_CFG UINT32_C(0x44)
+ /* Experimental */
+ #define HWRM_VNIC_TPA_QCFG UINT32_C(0x45)
+ #define HWRM_VNIC_RSS_CFG UINT32_C(0x46)
+ #define HWRM_VNIC_RSS_QCFG UINT32_C(0x47)
+ #define HWRM_VNIC_PLCMODES_CFG UINT32_C(0x48)
+ #define HWRM_VNIC_PLCMODES_QCFG UINT32_C(0x49)
+ #define HWRM_VNIC_QCAPS UINT32_C(0x4a)
+ #define HWRM_RING_ALLOC UINT32_C(0x50)
+ #define HWRM_RING_FREE UINT32_C(0x51)
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS UINT32_C(0x52)
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS UINT32_C(0x53)
+ #define HWRM_RING_RESET UINT32_C(0x5e)
+ #define HWRM_RING_GRP_ALLOC UINT32_C(0x60)
+ #define HWRM_RING_GRP_FREE UINT32_C(0x61)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED5 UINT32_C(0x64)
+ /* Reserved for future use. */
+ #define HWRM_RESERVED6 UINT32_C(0x65)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC UINT32_C(0x70)
+ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE UINT32_C(0x71)
+ #define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90)
+ #define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91)
+ #define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92)
+ #define HWRM_CFA_L2_SET_RX_MASK UINT32_C(0x93)
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG UINT32_C(0x94)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC UINT32_C(0x95)
+ #define HWRM_CFA_TUNNEL_FILTER_FREE UINT32_C(0x96)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC UINT32_C(0x97)
+ /* Experimental */
+ #define HWRM_CFA_ENCAP_RECORD_FREE UINT32_C(0x98)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC UINT32_C(0x99)
+ #define HWRM_CFA_NTUPLE_FILTER_FREE UINT32_C(0x9a)
+ #define HWRM_CFA_NTUPLE_FILTER_CFG UINT32_C(0x9b)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_ALLOC UINT32_C(0x9c)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_FREE UINT32_C(0x9d)
+ /* Experimental */
+ #define HWRM_CFA_EM_FLOW_CFG UINT32_C(0x9e)
+ #define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1)
+ #define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2)
+ #define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0)
+ #define HWRM_STAT_CTX_FREE UINT32_C(0xb1)
+ #define HWRM_STAT_CTX_QUERY UINT32_C(0xb2)
+ #define HWRM_STAT_CTX_CLR_STATS UINT32_C(0xb3)
+ #define HWRM_PORT_QSTATS_EXT UINT32_C(0xb4)
+ #define HWRM_FW_RESET UINT32_C(0xc0)
+ #define HWRM_FW_QSTATUS UINT32_C(0xc1)
+ /* Experimental */
+ #define HWRM_FW_SET_TIME UINT32_C(0xc8)
+ /* Experimental */
+ #define HWRM_FW_GET_TIME UINT32_C(0xc9)
+ /* Experimental */
+ #define HWRM_FW_SET_STRUCTURED_DATA UINT32_C(0xca)
+ /* Experimental */
+ #define HWRM_FW_GET_STRUCTURED_DATA UINT32_C(0xcb)
+ /* Experimental */
+ #define HWRM_FW_IPC_MAILBOX UINT32_C(0xcc)
+ #define HWRM_EXEC_FWD_RESP UINT32_C(0xd0)
+ #define HWRM_REJECT_FWD_RESP UINT32_C(0xd1)
+ #define HWRM_FWD_RESP UINT32_C(0xd2)
+ #define HWRM_FWD_ASYNC_EVENT_CMPL UINT32_C(0xd3)
+ #define HWRM_OEM_CMD UINT32_C(0xd4)
+ #define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0)
+ #define HWRM_WOL_FILTER_ALLOC UINT32_C(0xf0)
+ #define HWRM_WOL_FILTER_FREE UINT32_C(0xf1)
+ #define HWRM_WOL_FILTER_QCFG UINT32_C(0xf2)
+ #define HWRM_WOL_REASON_QCFG UINT32_C(0xf3)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_ALLOC UINT32_C(0xf5)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_FREE UINT32_C(0xf6)
+ /* Experimental */
+ #define HWRM_CFA_METER_PROFILE_CFG UINT32_C(0xf7)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC UINT32_C(0xf8)
+ /* Experimental */
+ #define HWRM_CFA_METER_INSTANCE_FREE UINT32_C(0xf9)
+ /* Experimental */
+ #define HWRM_CFA_VFR_ALLOC UINT32_C(0xfd)
+ /* Experimental */
+ #define HWRM_CFA_VFR_FREE UINT32_C(0xfe)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_ALLOC UINT32_C(0x100)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_FREE UINT32_C(0x101)
+ /* Experimental */
+ #define HWRM_CFA_VF_PAIR_INFO UINT32_C(0x102)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_ALLOC UINT32_C(0x103)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FREE UINT32_C(0x104)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_FLUSH UINT32_C(0x105)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_STATS UINT32_C(0x106)
+ /* Experimental */
+ #define HWRM_CFA_FLOW_INFO UINT32_C(0x107)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC UINT32_C(0x108)
+ /* Experimental */
+ #define HWRM_CFA_DECAP_FILTER_FREE UINT32_C(0x109)
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG UINT32_C(0x10a)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC UINT32_C(0x10b)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE UINT32_C(0x10c)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_ALLOC UINT32_C(0x10d)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_FREE UINT32_C(0x10e)
+ /* Experimental */
+ #define HWRM_CFA_PAIR_INFO UINT32_C(0x10f)
+ /* Experimental */
+ #define HWRM_FW_IPC_MSG UINT32_C(0x110)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO UINT32_C(0x111)
+ /* Engine CKV - Ping the device and SRT firmware to get the public key. */
+ #define HWRM_ENGINE_CKV_HELLO UINT32_C(0x12d)
+ /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
+ #define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e)
+ /* Engine CKV - Add a new CKEK used to encrypt keys. */
+ #define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f)
+ /* Engine CKV - Delete a previously added CKEK. */
+ #define HWRM_ENGINE_CKV_CKEK_DELETE UINT32_C(0x130)
+ /* Engine CKV - Add a new key to the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_ADD UINT32_C(0x131)
+ /* Engine CKV - Delete a key from the key vault. */
+ #define HWRM_ENGINE_CKV_KEY_DELETE UINT32_C(0x132)
+ /* Engine CKV - Delete all keys from the key vault. */
+ #define HWRM_ENGINE_CKV_FLUSH UINT32_C(0x133)
+ /* Engine CKV - Get random data. */
+ #define HWRM_ENGINE_CKV_RNG_GET UINT32_C(0x134)
+ /* Engine CKV - Generate and encrypt a new AES key. */
+ #define HWRM_ENGINE_CKV_KEY_GEN UINT32_C(0x135)
+ /* Engine - Query the available queue groups configuration. */
+ #define HWRM_ENGINE_QG_CONFIG_QUERY UINT32_C(0x13c)
+ /* Engine - Query the queue groups assigned to a function. */
+ #define HWRM_ENGINE_QG_QUERY UINT32_C(0x13d)
+ /* Engine - Query the available queue group meter profile configuration. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY UINT32_C(0x13e)
+ /* Engine - Query the configuration of a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY UINT32_C(0x13f)
+ /* Engine - Allocate a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC UINT32_C(0x140)
+ /* Engine - Free a queue group meter profile. */
+ #define HWRM_ENGINE_QG_METER_PROFILE_FREE UINT32_C(0x141)
+ /* Engine - Query the meters assigned to a queue group. */
+ #define HWRM_ENGINE_QG_METER_QUERY UINT32_C(0x142)
+ /* Engine - Bind a queue group meter profile to a queue group. */
+ #define HWRM_ENGINE_QG_METER_BIND UINT32_C(0x143)
+ /* Engine - Unbind a queue group meter profile from a queue group. */
+ #define HWRM_ENGINE_QG_METER_UNBIND UINT32_C(0x144)
+ /* Engine - Bind a queue group to a function. */
+ #define HWRM_ENGINE_QG_FUNC_BIND UINT32_C(0x145)
+ /* Engine - Query the scheduling group configuration. */
+ #define HWRM_ENGINE_SG_CONFIG_QUERY UINT32_C(0x146)
+ /* Engine - Query the queue groups assigned to a scheduling group. */
+ #define HWRM_ENGINE_SG_QUERY UINT32_C(0x147)
+ /* Engine - Query the configuration of a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_QUERY UINT32_C(0x148)
+ /* Engine - Configure a scheduling group's meter profiles. */
+ #define HWRM_ENGINE_SG_METER_CONFIG UINT32_C(0x149)
+ /* Engine - Bind a queue group to a scheduling group. */
+ #define HWRM_ENGINE_SG_QG_BIND UINT32_C(0x14a)
+ /* Engine - Unbind a queue group from its scheduling group. */
+ #define HWRM_ENGINE_QG_SG_UNBIND UINT32_C(0x14b)
+ /* Engine - Query the Engine configuration. */
+ #define HWRM_ENGINE_CONFIG_QUERY UINT32_C(0x154)
+ /* Engine - Configure the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CONFIG UINT32_C(0x155)
+ /* Engine - Clear the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156)
+ /* Engine - Query the statistics accumulator for an Engine. */
+ #define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157)
+ /* Engine - Allocate an Engine RQ. */
+ #define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e)
+ /* Engine - Free an Engine RQ. */
+ #define HWRM_ENGINE_RQ_FREE UINT32_C(0x15f)
+ /* Engine - Allocate an Engine CQ. */
+ #define HWRM_ENGINE_CQ_ALLOC UINT32_C(0x160)
+ /* Engine - Free an Engine CQ. */
+ #define HWRM_ENGINE_CQ_FREE UINT32_C(0x161)
+ /* Engine - Allocate an NQ. */
+ #define HWRM_ENGINE_NQ_ALLOC UINT32_C(0x162)
+ /* Engine - Free an NQ. */
+ #define HWRM_ENGINE_NQ_FREE UINT32_C(0x163)
+ /* Engine - Set the on-die RQE credit update location. */
+ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS UINT32_C(0x164)
+ /* Experimental */
+ #define HWRM_FUNC_RESOURCE_QCAPS UINT32_C(0x190)
+ /* Experimental */
+ #define HWRM_FUNC_VF_RESOURCE_CFG UINT32_C(0x191)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS UINT32_C(0x192)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_CFG UINT32_C(0x193)
+ /* Experimental */
+ #define HWRM_FUNC_BACKING_STORE_QCFG UINT32_C(0x194)
+ /* Experimental */
+ #define HWRM_SELFTEST_QLIST UINT32_C(0x200)
+ /* Experimental */
+ #define HWRM_SELFTEST_EXEC UINT32_C(0x201)
+ /* Experimental */
+ #define HWRM_SELFTEST_IRQ UINT32_C(0x202)
+ /* Experimental */
+ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203)
+ /* Experimental */
+ #define HWRM_PCIE_QSTATS UINT32_C(0x204)
+ /* Experimental */
+ #define HWRM_DBG_READ_DIRECT UINT32_C(0xff10)
+ /* Experimental */
+ #define HWRM_DBG_READ_INDIRECT UINT32_C(0xff11)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_DIRECT UINT32_C(0xff12)
+ /* Experimental */
+ #define HWRM_DBG_WRITE_INDIRECT UINT32_C(0xff13)
+ #define HWRM_DBG_DUMP UINT32_C(0xff14)
+ /* Experimental */
+ #define HWRM_DBG_ERASE_NVM UINT32_C(0xff15)
+ /* Experimental */
+ #define HWRM_DBG_CFG UINT32_C(0xff16)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_LIST UINT32_C(0xff17)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_INITIATE UINT32_C(0xff18)
+ /* Experimental */
+ #define HWRM_DBG_COREDUMP_RETRIEVE UINT32_C(0xff19)
+ /* */
+ #define HWRM_DBG_I2C_CMD UINT32_C(0xff1b)
+ /* Experimental */
+ #define HWRM_NVM_FACTORY_DEFAULTS UINT32_C(0xffee)
+ #define HWRM_NVM_VALIDATE_OPTION UINT32_C(0xffef)
+ #define HWRM_NVM_FLUSH UINT32_C(0xfff0)
+ #define HWRM_NVM_GET_VARIABLE UINT32_C(0xfff1)
+ #define HWRM_NVM_SET_VARIABLE UINT32_C(0xfff2)
+ #define HWRM_NVM_INSTALL_UPDATE UINT32_C(0xfff3)
+ #define HWRM_NVM_MODIFY UINT32_C(0xfff4)
+ #define HWRM_NVM_VERIFY_UPDATE UINT32_C(0xfff5)
+ #define HWRM_NVM_GET_DEV_INFO UINT32_C(0xfff6)
+ #define HWRM_NVM_ERASE_DIR_ENTRY UINT32_C(0xfff7)
+ #define HWRM_NVM_MOD_DIR_ENTRY UINT32_C(0xfff8)
+ #define HWRM_NVM_FIND_DIR_ENTRY UINT32_C(0xfff9)
+ #define HWRM_NVM_GET_DIR_ENTRIES UINT32_C(0xfffa)
+ #define HWRM_NVM_GET_DIR_INFO UINT32_C(0xfffb)
+ #define HWRM_NVM_RAW_DUMP UINT32_C(0xfffc)
+ #define HWRM_NVM_READ UINT32_C(0xfffd)
+ #define HWRM_NVM_WRITE UINT32_C(0xfffe)
+ #define HWRM_NVM_RAW_WRITE_BLK UINT32_C(0xffff)
+ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Return Codes */
+/* ret_codes (size:64b/8B) */
+struct ret_codes {
+ uint16_t error_code;
+ /* Request was successfully executed by the HWRM. */
+ #define HWRM_ERR_CODE_SUCCESS UINT32_C(0x0)
+ /* The HWRM failed to execute the request. */
+ #define HWRM_ERR_CODE_FAIL UINT32_C(0x1)
+ /*
+ * The request contains invalid argument(s) or input
+ * parameters.
+ */
+ #define HWRM_ERR_CODE_INVALID_PARAMS UINT32_C(0x2)
+ /*
+ * The requester is not allowed to access the requested
+ * resource. This error code shall be provided in a
+ * response to a request to query or modify an existing
+ * resource that is not accessible by the requester.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED UINT32_C(0x3)
+ /*
+ * The HWRM is unable to allocate the requested resource.
+ * This code only applies to requests for HWRM resource
+ * allocations.
+ */
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR UINT32_C(0x4)
+ /*
+ * Invalid combination of flags is specified in the
+ * request.
+ */
+ #define HWRM_ERR_CODE_INVALID_FLAGS UINT32_C(0x5)
+ /*
+ * Invalid combination of enables fields is specified in
+ * the request.
+ */
+ #define HWRM_ERR_CODE_INVALID_ENABLES UINT32_C(0x6)
+ /*
+ * Request contains a required TLV that is not supported by
+ * the installed version of firmware.
+ */
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV UINT32_C(0x7)
+ /*
+ * No firmware buffer available to accept the request. Driver
+ * should retry the request.
+ */
+ #define HWRM_ERR_CODE_NO_BUFFER UINT32_C(0x8)
+ /*
+ * Generic HWRM execution error that represents an
+ * internal error.
+ */
+ #define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf)
+ /* Unknown error */
+ #define HWRM_ERR_CODE_UNKNOWN_ERR UINT32_C(0xfffe)
+ /* Unsupported or invalid command */
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED UINT32_C(0xffff)
+ #define HWRM_ERR_CODE_LAST \
+ HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output */
+/* hwrm_err_output (size:128b/16B) */
+struct hwrm_err_output {
+ /*
+ * Pass/Fail or error type
+ *
+ * Note: receiver to verify the in parameters, and fail the call
+ * with an error when appropriate
+ */
+ uint16_t error_code;
+ /* This field returns the type of original request. */
+ uint16_t req_type;
+ /* This field provides original sequence number of the command. */
+ uint16_t seq_id;
+ /*
+ * This field is the length of the response in bytes. The
+ * last byte of the response is a valid flag that will read
+ * as '1' when the command has been completely written to
+ * memory.
+ */
+ uint16_t resp_len;
+ /* debug info for this error response. */
+ uint32_t opaque_0;
+ /* debug info for this error response. */
+ uint16_t opaque_1;
+ /*
+ * In the case of an error response, command specific error
+ * code is returned in this field.
+ */
+ uint8_t cmd_err;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
/*
- * Description: This structure is used to inform the NIC of a location for and
- * an aggregation buffer that will be used for packet data that is received. An
- * aggregation buffer creates a different kind of completion operation for a
- * packet where a variable number of BDs may be used to place the packet in the
- * host. RX Rings that have aggregation buffers are known as aggregation rings
- * and must contain only aggregation buffers.
+ * Following is the signature for HWRM message field that indicates not
+ * applicable (All F's). Need to cast it the size of the field if needed.
*/
-/* Short TX BD (16 bytes) */
+#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
+/* hwrm_func_buf_rgtr */
+#define HWRM_MAX_REQ_LEN 128
+/* hwrm_selftest_qlist */
+#define HWRM_MAX_RESP_LEN 280
+/* 7 bit indirection table index. */
+#define HW_HASH_INDEX_SIZE 0x80
+#define HW_HASH_KEY_SIZE 40
+/* valid key for HWRM response */
+#define HWRM_RESP_VALID_KEY 1
+#define HWRM_VERSION_MAJOR 1
+#define HWRM_VERSION_MINOR 9
+#define HWRM_VERSION_UPDATE 2
+/* non-zero means beta version */
+#define HWRM_VERSION_RSVD 9
+#define HWRM_VERSION_STR "1.9.2.9"
+
+/****************
+ * hwrm_ver_get *
+ ****************/
+
+
+/* hwrm_ver_get_input (size:192b/24B) */
+struct hwrm_ver_get_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ */
+ uint8_t hwrm_intf_maj;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ */
+ uint8_t hwrm_intf_min;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ */
+ uint8_t hwrm_intf_upd;
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_ver_get_output (size:1408b/176B) */
+struct hwrm_ver_get_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 1 in this field.
+ */
+ uint8_t hwrm_intf_maj_8b;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification.
+ * This can be due to addition or removal of functionality.
+ * HWRM interface specifications with the same major version
+ * but different minor versions are compatible.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_min_8b;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface update version is used to reflect minor
+ * changes or bug fixes to a released HWRM interface
+ * specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint8_t hwrm_intf_upd_8b;
+ uint8_t hwrm_intf_rsvd_8b;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint8_t hwrm_fw_maj_8b;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint8_t hwrm_fw_min_8b;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes
+ * to a released firmware.
+ */
+ uint8_t hwrm_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version of the
+ * HWRM firmware.
+ */
+ uint8_t hwrm_fw_rsvd_8b;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t mgmt_fw_maj_8b;
+ /*
+ * This field represents the minor version of mgmt firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t mgmt_fw_min_8b;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t mgmt_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t mgmt_fw_rsvd_8b;
+ /*
+ * This field represents the major version of network
+ * control firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t netctrl_fw_maj_8b;
+ /*
+ * This field represents the minor version of network
+ * control firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t netctrl_fw_min_8b;
+ /*
+ * This field represents the build version of network
+ * control firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t netctrl_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t netctrl_fw_rsvd_8b;
+ /*
+ * This field is used to indicate device's capabilities and
+ * configurations.
+ */
+ uint32_t dev_caps_cfg;
+ /*
+ * If set to 1, then secure firmware update behavior
+ * is supported.
+ * If set to 0, then secure firmware update behavior is
+ * not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then firmware based DCBX agent is supported.
+ * If set to 0, then firmware based DCBX agent capability
+ * is not supported on this device.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, then HWRM short command format is supported.
+ * If set to 0, then HWRM short command format is not supported.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ UINT32_C(0x4)
+ /*
+ * If set to 1, then HWRM short command format is required.
+ * If set to 0, then HWRM short command format is not required.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED \
+ UINT32_C(0x8)
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint8_t roce_fw_maj_8b;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint8_t roce_fw_min_8b;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint8_t roce_fw_bld_8b;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint8_t roce_fw_rsvd_8b;
+ /*
+ * This field represents the name of HWRM FW (ASCII chars
+ * with NULL at the end).
+ */
+ char hwrm_fw_name[16];
+ /*
+ * This field represents the name of mgmt FW (ASCII chars
+ * with NULL at the end).
+ */
+ char mgmt_fw_name[16];
+ /*
+ * This field represents the name of network control
+ * firmware (ASCII chars with NULL at the end).
+ */
+ char netctrl_fw_name[16];
+ /*
+ * This field is reserved for future use.
+ * The responder should set it to 0.
+ * The requester should ignore this field.
+ */
+ uint8_t reserved2[16];
+ /*
+ * This field represents the name of RoCE FW (ASCII chars
+ * with NULL at the end).
+ */
+ char roce_fw_name[16];
+ /* This field returns the chip number. */
+ uint16_t chip_num;
+ /* This field returns the revision of chip. */
+ uint8_t chip_rev;
+ /* This field returns the chip metal number. */
+ uint8_t chip_metal;
+ /* This field returns the bond id of the chip. */
+ uint8_t chip_bond_id;
+ /* This value indicates the type of platform used for chip implementation. */
+ uint8_t chip_platform_type;
+ /* ASIC */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
+ /* FPGA platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
+ /* Palladium platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_LAST \
+ HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM
+ /*
+ * This field returns the maximum value of request window that
+ * is supported by the HWRM. The request window is mapped
+ * into device address space using MMIO.
+ */
+ uint16_t max_req_win_len;
+ /*
+ * This field returns the maximum value of response buffer in
+ * bytes.
+ */
+ uint16_t max_resp_len;
+ /*
+ * This field returns the default request timeout value in
+ * milliseconds.
+ */
+ uint16_t def_req_timeout;
+ /*
+ * This field will indicate if any subsystems is not fully
+ * initialized.
+ */
+ uint8_t flags;
+ /*
+ * If set to 1, device is not ready.
+ * If set to 0, device is ready to accept all HWRM commands.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY UINT32_C(0x1)
+ /*
+ * If set to 1, external version present.
+ * If set to 0, external version not present.
+ */
+ #define HWRM_VER_GET_OUTPUT_FLAGS_EXT_VER_AVAIL UINT32_C(0x2)
+ uint8_t unused_0[2];
+ /*
+ * For backward compatibility this field must be set to 1.
+ * Older drivers might look for this field to be 1 before
+ * processing the message.
+ */
+ uint8_t always_1;
+ /*
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification. A HWRM implementation that is
+ * compliant with this specification shall provide value of 1
+ * in this field.
+ */
+ uint16_t hwrm_intf_major;
+ /*
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation.
+ * A change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification. This can be due to addition or
+ * removal of functionality. HWRM interface specifications
+ * with the same major version but different minor versions are
+ * compatible. A HWRM implementation that is compliant with
+ * this specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_minor;
+ /*
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation. The
+ * interface update version is used to reflect minor changes or
+ * bug fixes to a released HWRM interface specification.
+ * A HWRM implementation that is compliant with this
+ * specification shall provide value of 2 in this field.
+ */
+ uint16_t hwrm_intf_build;
+ /*
+ * This field represents the patch version of HWRM interface
+ * specification supported by the HWRM implementation.
+ */
+ uint16_t hwrm_intf_patch;
+ /*
+ * This field represents the major version of HWRM firmware.
+ * A change in firmware major version represents a major
+ * firmware release.
+ */
+ uint16_t hwrm_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t hwrm_fw_minor;
+ /*
+ * This field represents the build version of HWRM firmware.
+ * A change in firmware build version represents bug fixes to
+ * a released firmware.
+ */
+ uint16_t hwrm_fw_build;
+ /*
+ * This field is a reserved field.
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major,minor,update) version
+ * of the HWRM firmware.
+ */
+ uint16_t hwrm_fw_patch;
+ /*
+ * This field represents the major version of mgmt firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t mgmt_fw_major;
+ /*
+ * This field represents the minor version of HWRM firmware.
+ * A change in firmware minor version represents significant
+ * firmware functionality changes.
+ */
+ uint16_t mgmt_fw_minor;
+ /*
+ * This field represents the build version of mgmt firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t mgmt_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version.
+ */
+ uint16_t mgmt_fw_patch;
+ /*
+ * This field represents the major version of network control
+ * firmware. A change in major version represents
+ * a major release.
+ */
+ uint16_t netctrl_fw_major;
+ /*
+ * This field represents the minor version of network control
+ * firmware. A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t netctrl_fw_minor;
+ /*
+ * This field represents the build version of network control
+ * firmware. A change in update version represents bug fixes.
+ */
+ uint16_t netctrl_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t netctrl_fw_patch;
+ /*
+ * This field represents the major version of RoCE firmware.
+ * A change in major version represents a major release.
+ */
+ uint16_t roce_fw_major;
+ /*
+ * This field represents the minor version of RoCE firmware.
+ * A change in minor version represents significant
+ * functionality changes.
+ */
+ uint16_t roce_fw_minor;
+ /*
+ * This field represents the build version of RoCE firmware.
+ * A change in update version represents bug fixes.
+ */
+ uint16_t roce_fw_build;
+ /*
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
+ */
+ uint16_t roce_fw_patch;
+ /*
+ * This field returns the maximum extended request length acceptable
+ * by the device which allows requests greater than mailbox size when
+ * used with the short cmd request format.
+ */
+ uint16_t max_ext_req_len;
+ uint8_t unused_1[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* bd_base (size:64b/8B) */
+struct bd_base {
+ uint8_t type;
+ /* This value identifies the type of buffer descriptor. */
+ #define BD_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define BD_BASE_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define BD_BASE_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ /*
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
+ */
+ #define BD_BASE_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define BD_BASE_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define BD_BASE_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define BD_BASE_TYPE_LAST BD_BASE_TYPE_TX_BD_LONG
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* tx_bd_short (size:128b/16B) */
struct tx_bd_short {
- uint16_t flags_type;
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
*/
+ uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
- #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_SHORT_TYPE_SFT 0
+ #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_SHORT_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define TX_BD_SHORT_TYPE_LAST TX_BD_SHORT_TYPE_TX_BD_SHORT
/*
- * Indicates that this BD is 16B long and is
- * used for normal L2 packet transmission.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
*/
- #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
+ #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_SHORT_FLAGS_SFT 6
/*
* If set to 1, the packet ends with the data in the buffer
- * pointed to by this descriptor. This flag must be valid on
- * every BD.
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
*/
- #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
+ #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
/*
* If set to 1, the device will not generate a completion for
* this transmit packet unless there is an error in it's
- * processing. If this bit is set to 0, then the packet will be
- * completed normally. This bit must be valid only on the first
- * BD of a packet.
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
*/
- #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
+ #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
/*
* This value indicates how many 16B BD locations are consumed
- * in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
- * 3 indicates either 3 short BDs or 1 long BD and one short BD
- * in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
- * only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It
- * is used by the chip to optimize internal processing. The
- * packet will be dropped if the hint is too short. This field
- * is valid only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
/* indicates packet length < 512B */
- #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
/* indicates 512 <= packet length < 1KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
/* indicates 1KB <= packet length < 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
#define TX_BD_SHORT_FLAGS_LHINT_LAST \
TX_BD_SHORT_FLAGS_LHINT_GTE2K
/*
@@ -256,961 +1268,1111 @@ struct tx_bd_short {
* Index after the buffer associated with this descriptor has
* been transferred via DMA to NIC memory from host memory. An
* interrupt may or may not be generated according to the state
- * of the interrupt avoidance mechanisms. If this bit is set to
- * 0, then the Consumer Index is only updated as soon as one of
- * the host interrupt coalescing conditions has been met. This
- * bit must be valid on the first BD of a packet.
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
+ #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
*/
- #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_SHORT_FLAGS_SFT 6
- uint16_t len;
+ uint16_t len;
/*
- * This is the length of the host physical buffer this BD
- * describes in bytes. This field must be valid on all BDs of a
- * packet.
- */
- uint32_t opaque;
- /*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with the transmit BD. This field must be valid on the first
- * BD of a packet.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
*/
- uint64_t addr;
+ uint32_t opaque;
/*
- * This is the host physical address for the portion of the
- * packet described by this TX BD. This value must be valid on
- * all BDs of a packet.
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
*/
+ uint64_t address;
} __attribute__((packed));
-/* Long TX BD (32 bytes split to 2 16-byte struct) */
+/* tx_bd_long (size:128b/16B) */
struct tx_bd_long {
- uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * This value indicates the type of buffer descriptor.
+ * packet.
*/
- /* This value identifies the type of buffer descriptor. */
- #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_LONG_TYPE_SFT 0
+ #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 32B long and is used for
+ * normal L2 packet transmission.
+ */
+ #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define TX_BD_LONG_TYPE_LAST TX_BD_LONG_TYPE_TX_BD_LONG
/*
- * Indicates that this BD is 32B long and is
- * used for normal L2 packet transmission.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Only the packet_end bit must be valid for the remaining BDs
+ * of a packet.
*/
- #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
+ #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_FLAGS_SFT 6
/*
* If set to 1, the packet ends with the data in the buffer
- * pointed to by this descriptor. This flag must be valid on
- * every BD.
+ * pointed to by this descriptor. This flag must be
+ * valid on every BD.
*/
- #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
+ #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
/*
* If set to 1, the device will not generate a completion for
* this transmit packet unless there is an error in it's
- * processing. If this bit is set to 0, then the packet will be
- * completed normally. This bit must be valid only on the first
- * BD of a packet.
+ * processing.
+ * If this bit
+ * is set to 0, then the packet will be completed normally.
+ *
+ * This bit must be valid only on the first BD of a packet.
*/
- #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+ #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
/*
* This value indicates how many 16B BD locations are consumed
- * in the ring by this packet. A value of 1 indicates that this
- * BD is the only BD (and that the it is a short BD). A value of
- * 3 indicates either 3 short BDs or 1 long BD and one short BD
- * in the packet. A value of 0 indicates that there are 32 BD
- * locations in the packet (the maximum). This field is valid
- * only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It
- * is used by the chip to optimize internal processing. The
- * packet will be dropped if the hint is too short. This field
- * is valid only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+ * in the ring by this packet.
+ * A value of 1 indicates that this BD is the only BD (and that
+ * the it is a short BD). A value
+ * of 3 indicates either 3 short BDs or 1 long BD and one short
+ * BD in the packet. A value of 0 indicates
+ * that there are 32 BD locations in the packet (the maximum).
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet.
+ * It is used by the chip to optimize internal processing.
+ *
+ * The packet will be dropped if the hint is too short.
+ *
+ * This field is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_FLAGS_LHINT_SFT 13
/* indicates packet length < 512B */
- #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
/* indicates 512 <= packet length < 1KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
/* indicates 1KB <= packet length < 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
/* indicates packet length >= 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_LONG_FLAGS_LHINT_LAST \
- TX_BD_LONG_FLAGS_LHINT_GTE2K
+ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
/*
* If set to 1, the device immediately updates the Send Consumer
* Index after the buffer associated with this descriptor has
* been transferred via DMA to NIC memory from host memory. An
* interrupt may or may not be generated according to the state
- * of the interrupt avoidance mechanisms. If this bit is set to
- * 0, then the Consumer Index is only updated as soon as one of
- * the host interrupt coalescing conditions has been met. This
- * bit must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
- /*
- * All bits in this field must be valid on the first BD of a
- * packet. Only the packet_end bit must be valid for the
- * remaining BDs of a packet.
+ * of the interrupt avoidance mechanisms. If this bit
+ * is set to 0, then the Consumer Index is only updated as soon
+ * as one of the host interrupt coalescing conditions has been met.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_LONG_FLAGS_SFT 6
- uint16_t len;
+ #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
/*
- * This is the length of the host physical buffer this BD
- * describes in bytes. This field must be valid on all BDs of a
- * packet.
+ * This is the length of the host physical buffer this BD describes
+ * in bytes.
+ *
+ * This field must be valid on all BDs of a packet.
*/
- uint32_t opaque;
+ uint16_t len;
/*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with the transmit BD. This field must be valid on the first
- * BD of a packet.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with the
+ * transmit BD.
+ *
+ * This field must be valid on the first BD of a packet.
*/
- uint64_t addr;
+ uint32_t opaque;
/*
- * This is the host physical address for the portion of the
- * packet described by this TX BD. This value must be valid on
- * all BDs of a packet.
+ * This is the host physical address for the portion of the packet
+ * described by this TX BD.
+ *
+ * This value must be valid on all BDs of a packet.
*/
+ uint64_t address;
} __attribute__((packed));
-/* last 16 bytes of Long TX BD */
+/* tx_bd_long_hi (size:128b/16B) */
struct tx_bd_long_hi {
- uint16_t lflags;
/*
- * All bits in this field must be valid on the first BD of a
- * packet. Their value on other BDs of the packet will be
- * ignored.
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
*/
+ uint16_t lflags;
/*
* If set to 1, the controller replaces the TCP/UPD checksum
* fields of normal TCP/UPD checksum, or the inner TCP/UDP
* checksum field of the encapsulated TCP/UDP packets with the
- * hardware calculated TCP/UDP checksum for the packet
- * associated with this descriptor. The flag is ignored if the
- * LSO flag is set. This bit must be valid on the first BD of a
- * packet.
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor. The flag is ignored if the LSO flag is set.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
/*
- * If set to 1, the controller replaces the IP checksum of the
+ * If set to 1, the controller replaces the IP checksum of the
* normal packets, or the inner IP checksum of the encapsulated
* packets with the hardware calculated IP checksum for the
- * packet associated with this descriptor. This bit must be
- * valid on the first BD of a packet.
+ * packet associated with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
/*
* If set to 1, the controller will not append an Ethernet CRC
- * to the end of the frame. This bit must be valid on the first
- * BD of a packet. Packet must be 64B or longer when this flag
- * is set. It is not useful to use this bit with any form of TX
- * offload such as CSO or LSO. The intent is that the packet
- * from the host already has a valid Ethernet CRC on the packet.
+ * to the end of the frame.
+ *
+ * This bit must be valid on the first BD of a packet.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
*/
- #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+ #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
/*
- * If set to 1, the device will record the time at which the
- * packet was actually transmitted at the TX MAC. This bit must
- * be valid on the first BD of a packet.
+ * If set to 1, the device will record the time at which the packet
+ * was actually transmitted at the TX MAC.
+ *
+ * This bit must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+ #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
/*
* If set to 1, The controller replaces the tunnel IP checksum
* field with hardware calculated IP checksum for the IP header
- * of the packet associated with this descriptor. For outer UDP
- * checksum, global outer UDP checksum TE_NIC register needs to
- * be enabled. If the global outer UDP checksum TE_NIC register
- * bit is set, outer UDP checksum will be calculated for the
- * following cases: 1. Packets with tcp_udp_chksum flag set to
- * offload checksum for inner packet AND the inner packet is
- * TCP/UDP. If the inner packet is ICMP for example (non-
- * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
- * checksum will not be calculated. 2. Packets with lso flag set
- * which implies inner TCP checksum calculation as part of LSO
- * operation.
- */
- #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
- /*
- * If set to 1, the device will treat this packet with LSO(Large
+ * of the packet associated with this descriptor.
+ *
+ * For outer UDP checksum, global outer UDP checksum TE_NIC register
+ * needs to be enabled. If the global outer UDP checksum TE_NIC register
+ * bit is set, outer UDP checksum will be calculated for the following
+ * cases:
+ * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner
+ * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for
+ * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
+ * checksum will not be calculated.
+ * 2. Packets with lso flag set which implies inner TCP checksum calculation
+ * as part of LSO operation.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, the device will treat this packet with LSO(Large
* Send Offload) processing for both normal or encapsulated
- * packets, which is a form of TCP segmentation. When this bit
+ * packets, which is a form of TCP segmentation. When this bit
* is 1, the hdr_size and mss fields must be valid. The driver
- * doesn't need to set t_ip_chksum, ip_chksum, and
- * tcp_udp_chksum flags since the controller will replace the
- * appropriate checksum fields for segmented packets. When this
- * bit is 1, the hdr_size and mss fields must be valid.
+ * doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum
+ * flags since the controller will replace the appropriate
+ * checksum fields for segmented packets.
+ *
+ * When this bit is 1, the hdr_size and mss fields must be valid.
*/
- #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+ #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
/*
* If set to zero when LSO is '1', then the IPID will be treated
* as a 16b number and will be wrapped if it exceeds a value of
- * 0xffff. If set to one when LSO is '1', then the IPID will be
- * treated as a 15b number and will be wrapped if it exceeds a
- * value 0f 0x7fff.
+ * 0xffff.
+ *
+ * If set to one when LSO is '1', then the IPID will be treated
+ * as a 15b number and will be wrapped if it exceeds a value 0f
+ * 0x7fff.
*/
- #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+ #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
/*
* If set to zero when LSO is '1', then the IPID of the tunnel
- * IP header will not be modified during LSO operations. If set
- * to one when LSO is '1', then the IPID of the tunnel IP header
- * will be incremented for each subsequent segment of an LSO
- * operation. The flag is ignored if the LSO packet is a normal
- * (non-tunneled) TCP packet.
+ * IP header will not be modified during LSO operations.
+ *
+ * If set to one when LSO is '1', then the IPID of the tunnel
+ * IP header will be incremented for each subsequent segment of an
+ * LSO operation.
+ *
+ * The flag is ignored if the LSO packet is a normal (non-tunneled)
+ * TCP packet.
*/
- #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
/*
* If set to '1', then the RoCE ICRC will be appended to the
- * packet. Packet must be a valid RoCE format packet.
+ * packet. Packet must be a valid RoCE format packet.
*/
- #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
/*
* If set to '1', then the FCoE CRC will be appended to the
- * packet. Packet must be a valid FCoE format packet.
+ * packet. Packet must be a valid FCoE format packet.
*/
- #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
- uint16_t hdr_size;
+ #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ uint16_t hdr_size;
/*
* When LSO is '1', this field must contain the offset of the
- * TCP payload from the beginning of the packet in as 16b words.
- * In case of encapsulated/tunneling packet, this field contains
- * the offset of the inner TCP payload from beginning of the
- * packet as 16-bit words. This value must be valid on the first
- * BD of a packet.
- */
- #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
- #define TX_BD_LONG_HDR_SIZE_SFT 0
- uint32_t mss;
- /*
- * This is the MSS value that will be used to do the LSO
- * processing. The value is the length in bytes of the TCP
- * payload for each segment generated by the LSO operation. This
- * value must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
- #define TX_BD_LONG_MSS_SFT 0
- uint16_t unused_2;
- uint16_t cfa_action;
- /*
- * This value selects a CFA action to perform on the packet. Set
- * this value to zero if no CFA action is desired. This value
- * must be valid on the first BD of a packet.
- */
- uint32_t cfa_meta;
- /*
- * This value is action meta-data that defines CFA edit
- * operations that are done in addition to any action editing.
- */
+ * TCP payload from the beginning of the packet in as
+ * 16b words. In case of encapsulated/tunneling packet, this field
+ * contains the offset of the inner TCP payload from beginning of the
+ * packet as 16-bit words.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+ #define TX_BD_LONG_HDR_SIZE_SFT 0
+ uint32_t mss;
+ /*
+ * This is the MSS value that will be used to do the LSO processing.
+ * The value is the length in bytes of the TCP payload for each
+ * segment generated by the LSO operation.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+ #define TX_BD_LONG_MSS_SFT 0
+ uint16_t unused2;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
/* When key=1, This is the VLAN tag VID value. */
- #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
- #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
+ #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
/* When key=1, This is the VLAN tag DE value. */
- #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+ #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
/* When key=1, This is the VLAN tag PRI value. */
- #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
- #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
/* When key=1, This is the VLAN tag TPID select value. */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
- #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
/* 0x88a8 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
/* 0x8100 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
/* 0x9100 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
/* 0x9200 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
/* 0x9300 */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
/* Value programmed in CFA VLANTPID register. */
- #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
#define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
/* When key=1, This is the VLAN tag TPID select value. */
- #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
- #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
/*
- * This field identifies the type of edit to be performed on the
- * packet. This value must be valid on the first BD of a packet.
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
*/
- #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
- #define TX_BD_LONG_CFA_META_KEY_SFT 28
+ #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define TX_BD_LONG_CFA_META_KEY_SFT 28
/* No editing */
- #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
/*
- * - meta[17:16] - TPID select value (0 =
- * 0x8100). - meta[15:12] - PRI/DE value. -
- * meta[11:0] - VID value.
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
*/
- #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
#define TX_BD_LONG_CFA_META_KEY_LAST \
TX_BD_LONG_CFA_META_KEY_VLAN_TAG
} __attribute__((packed));
-/* RX Producer Packet BD (16 bytes) */
+/* tx_bd_empty (size:128b/16B) */
+struct tx_bd_empty {
+ /* This value identifies the type of buffer descriptor. */
+ uint8_t type;
+ #define TX_BD_EMPTY_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_EMPTY_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 1BB long and is an empty
+ * TX BD. Not valid for use by the driver.
+ */
+ #define TX_BD_EMPTY_TYPE_TX_BD_EMPTY UINT32_C(0x1)
+ #define TX_BD_EMPTY_TYPE_LAST TX_BD_EMPTY_TYPE_TX_BD_EMPTY
+ uint8_t unused_1[3];
+ uint8_t unused_2;
+ uint8_t unused_3[3];
+ uint8_t unused_4[8];
+} __attribute__((packed));
+
+/* rx_prod_pkt_bd (size:128b/16B) */
struct rx_prod_pkt_bd {
- uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
- #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
- #define RX_PROD_PKT_BD_TYPE_SFT 0
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_PKT_BD_TYPE_SFT 0
/*
- * Indicates that this BD is 16B long and is an
- * RX Producer (ie. empty) buffer descriptor.
+ * Indicates that this BD is 16B long and is an RX Producer
+ * (ie. empty) buffer descriptor.
*/
- #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
+ #define RX_PROD_PKT_BD_TYPE_LAST \
+ RX_PROD_PKT_BD_TYPE_RX_PROD_PKT
+ #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_PKT_BD_FLAGS_SFT 6
/*
* If set to 1, the packet will be placed at the address plus
- * 2B. The 2 Bytes of padding will be written as zero.
- */
- /*
- * This is intended to be used when the host buffer is cache-
- * line aligned to produce packets that are easy to parse in
- * host memory while still allowing writes to be cache line
- * aligned.
+ * 2B. The 2 Bytes of padding will be written as zero.
*/
- #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+ #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
/*
* If set to 1, the packet write will be padded out to the
* nearest cache-line with zero value padding.
*/
+ #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ /*
+ * This value is the number of additional buffers in the ring that
+ * describe the buffer space to be consumed for the this packet.
+ * If the value is zero, then the packet must fit within the
+ * space described by this BD. If this value is 1 or more, it
+ * indicates how many additional "buffer" BDs are in the ring
+ * immediately following this BD to be used for the same
+ * network packet.
+ *
+ * Even if the packet to be placed does not need all the
+ * additional buffers, they will be consumed anyway.
+ */
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
+ #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
/*
- * If receive buffers start/end on cache-line boundaries, this
- * feature will ensure that all data writes on the PCI bus
- * start/end on cache line boundaries.
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
*/
- #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ uint16_t len;
/*
- * This value is the number of additional buffers in the ring
- * that describe the buffer space to be consumed for the this
- * packet. If the value is zero, then the packet must fit within
- * the space described by this BD. If this value is 1 or more,
- * it indicates how many additional "buffer" BDs are in the ring
- * immediately following this BD to be used for the same network
- * packet. Even if the packet to be placed does not need all the
- * additional buffers, they will be consumed anyway.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive buffer set.
+ */
+ uint32_t opaque;
+ /*
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
+ */
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_bfr_bd (size:128b/16B) */
+struct rx_prod_bfr_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_BFR_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_BFR_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an RX
+ * Producer Buffer BD.
*/
- #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
- #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
- #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PROD_PKT_BD_FLAGS_SFT 6
- uint16_t len;
+ #define RX_PROD_BFR_BD_TYPE_RX_PROD_BFR UINT32_C(0x5)
+ #define RX_PROD_BFR_BD_TYPE_LAST RX_PROD_BFR_BD_TYPE_RX_PROD_BFR
+ #define RX_PROD_BFR_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_BFR_BD_FLAGS_SFT 6
/*
* This is the length in Bytes of the host physical buffer where
* data for the packet may be placed in host memory.
*/
+ uint16_t len;
+ /* This field is not used. */
+ uint32_t opaque;
/*
- * While this is a Byte resolution value, it is often
- * advantageous to ensure that the buffers provided end on a
- * host cache line.
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
*/
- uint32_t opaque;
+ uint64_t address;
+} __attribute__((packed));
+
+/* rx_prod_agg_bd (size:128b/16B) */
+struct rx_prod_agg_bd {
+ /* This value identifies the type of buffer descriptor. */
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define RX_PROD_AGG_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_AGG_BD_TYPE_SFT 0
/*
- * The opaque data field is pass through to the completion and
- * can be used for any data that the driver wants to associate
- * with this receive buffer set.
+ * Indicates that this BD is 16B long and is an
+ * RX Producer Assembly Buffer Descriptor.
+ */
+ #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG UINT32_C(0x6)
+ #define RX_PROD_AGG_BD_TYPE_LAST \
+ RX_PROD_AGG_BD_TYPE_RX_PROD_AGG
+ #define RX_PROD_AGG_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_AGG_BD_FLAGS_SFT 6
+ /*
+ * If set to 1, the packet write will be padded out to the
+ * nearest cache-line with zero value padding.
+ */
+ #define RX_PROD_AGG_BD_FLAGS_EOP_PAD UINT32_C(0x40)
+ /*
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
*/
- uint64_t addr;
+ uint16_t len;
/*
- * This is the host physical address where data for the packet
- * may by placed in host memory.
+ * The opaque data field is pass through to the completion and can be
+ * used for any data that the driver wants to associate with this
+ * receive assembly buffer.
*/
+ uint32_t opaque;
/*
- * While this is a Byte resolution value, it is often
- * advantageous to ensure that the buffers provide start on a
- * host cache line.
+ * This is the host physical address where data for the packet may
+ * by placed in host memory.
*/
+ uint64_t address;
} __attribute__((packed));
-/* Completion Ring Structures */
-/* Note: This structure is used by the HWRM to communicate HWRM Error. */
-/* Base Completion Record (16 bytes) */
+/* cmpl_base (size:128b/16B) */
struct cmpl_base {
- uint16_t type;
- /* unused is 10 b */
+ uint16_t type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
- #define CMPL_BASE_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
+ #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define CMPL_BASE_TYPE_SFT 0
/*
- * RX L2 completion: Completion of and L2 RX
- * packet. Length = 32B
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
*/
- #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
+ #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
/*
- * RX Aggregation Buffer completion : Completion
- * of an L2 aggregation buffer in support of
- * TPA, HDS, or Jumbo packet completion. Length
- * = 16B
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
*/
- #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
+ #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
/*
- * RX L2 TPA Start Completion: Completion at the
- * beginning of a TPA operation. Length = 32B
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
*/
- #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
+ #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
/*
- * RX L2 TPA End Completion: Completion at the
- * end of a TPA operation. Length = 32B
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
*/
- #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
/*
- * Statistics Ejection Completion: Completion of
- * statistics data ejection buffer. Length = 16B
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
*/
- #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
- /* HWRM Command Completion: Completion of an HWRM command. */
- #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
+ #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
+ */
+ #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
/* Forwarded HWRM Request */
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
/* Forwarded HWRM Response */
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
/* HWRM Asynchronous Event Information */
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
/* CQ Notification */
- #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
+ #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
/* SRQ Threshold Event */
- #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
+ #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
/* DBQ Threshold Event */
- #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
+ #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
/* QP Async Notification */
- #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
/* Function Async Notification */
- #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
- /* unused is 10 b */
- uint16_t info1;
+ #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
+ #define CMPL_BASE_TYPE_LAST CMPL_BASE_TYPE_FUNC_EVENT
/* info1 is 16 b */
- uint32_t info2;
+ uint16_t info1;
/* info2 is 32 b */
- uint32_t info3_v;
- /* info3 is 31 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define CMPL_BASE_V UINT32_C(0x1)
- /* info3 is 31 b */
- #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
- #define CMPL_BASE_INFO3_SFT 1
- uint32_t info4;
+ uint32_t info2;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint32_t info3_v;
+ #define CMPL_BASE_V UINT32_C(0x1)
+ #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+ #define CMPL_BASE_INFO3_SFT 1
/* info4 is 32 b */
+ uint32_t info4;
} __attribute__((packed));
-/* TX Completion Record (16 bytes) */
+/* tx_cmpl (size:128b/16B) */
struct tx_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define TX_CMPL_TYPE_SFT 0
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * TX L2 completion:
+ * Completion of TX packet. Length = 16B
*/
- #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define TX_CMPL_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
+ #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
+ #define TX_CMPL_TYPE_LAST TX_CMPL_TYPE_TX_L2
+ #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_CMPL_FLAGS_SFT 6
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
*/
- #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
/*
* When this bit is '1', it indicates that the packet completed
- * was transmitted using the push acceleration data provided by
- * the driver. When this bit is '0', it indicates that the
+ * was transmitted using the push acceleration data provided
+ * by the driver. When this bit is '0', it indicates that the
* packet had not push acceleration data written or was executed
* as a normal packet even though push data was provided.
*/
- #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
- #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_CMPL_FLAGS_SFT 6
- uint16_t unused_0;
+ #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
/* unused1 is 16 b */
- uint32_t opaque;
+ uint16_t unused_0;
/*
- * This is a copy of the opaque field from the first TX BD of
- * this transmitted packet.
+ * This is a copy of the opaque field from the first TX BD of this
+ * transmitted packet.
*/
- uint16_t errors_v;
+ uint32_t opaque;
+ uint16_t errors_v;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define TX_CMPL_ERRORS_SFT 1
/*
- * This error indicates that there was some sort of problem with
- * the BDs for the packet.
+ * This error indicates that there was some sort of problem
+ * with the BDs for the packet.
*/
- #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/* No error */
- #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
- /* Bad Format: BDs were not formatted correctly. */
- #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
#define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
/*
- * When this bit is '1', it indicates that the length of the
- * packet was zero. No packet was transmitted.
+ * When this bit is '1', it indicates that the length of
+ * the packet was zero. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+ #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
/*
- * When this bit is '1', it indicates that the packet was longer
- * than the programmed limit in TDI. No packet was transmitted.
+ * When this bit is '1', it indicates that the packet
+ * was longer than the programmed limit in TDI. No
+ * packet was transmitted.
*/
- #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
+ #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
/*
* When this bit is '1', it indicates that one or more of the
- * BDs associated with this packet generated a PCI error. This
- * probably means the address was not valid.
+ * BDs associated with this packet generated a PCI error.
+ * This probably means the address was not valid.
*/
- #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+ #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
/*
* When this bit is '1', it indicates that the packet was longer
- * than indicated by the hint. No packet was transmitted.
+ * than indicated by the hint. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+ #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
/*
* When this bit is '1', it indicates that the packet was
- * dropped due to Poison TLP error on one or more of the TLPs in
- * the PXP completion.
+ * dropped due to Poison TLP error on one or more of the
+ * TLPs in the PXP completion.
*/
- #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
- #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define TX_CMPL_ERRORS_SFT 1
- uint16_t unused_1;
+ #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
/* unused2 is 16 b */
- uint32_t unused_2;
+ uint16_t unused_1;
/* unused3 is 32 b */
+ uint32_t unused_2;
} __attribute__((packed));
-/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_pkt_cmpl (size:128b/16B) */
struct rx_pkt_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_PKT_CMPL_TYPE_SFT 0
+ #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PKT_CMPL_TYPE_SFT 0
/*
- * RX L2 completion: Completion of and L2 RX
- * packet. Length = 32B
+ * RX L2 completion:
+ * Completion of and L2 RX packet. Length = 32B
*/
- #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_LAST RX_PKT_CMPL_TYPE_RX_L2
+ #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PKT_CMPL_FLAGS_SFT 6
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
*/
- #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
- /* Normal: Packet was placed using normal algorithm. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
- /* Jumbo: Packet was placed using jumbo algorithm. */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
- /*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Normal:
+ * Packet was placed using normal algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
+ /*
+ * Jumbo:
+ * Packet was placed using jumbo algorithm.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
*/
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
#define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
- #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
- #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ #define RX_PKT_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
- /* Not Known: Indicates that the packet type was not known. */
- #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
/*
- * IP Packet: Indicates that the packet was an
- * IP packet, but further classification was not
- * possible.
+ * Not Known:
+ * Indicates that the packet type was not known.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN \
+ (UINT32_C(0x0) << 12)
/*
- * TCP Packet: Indicates that the packet was IP
- * and TCP. This indicates that the
- * payload_offset field is valid.
+ * IP Packet:
+ * Indicates that the packet was an IP packet, but further
+ * classification was not possible.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_IP \
+ (UINT32_C(0x1) << 12)
/*
- * UDP Packet: Indicates that the packet was IP
- * and UDP. This indicates that the
- * payload_offset field is valid.
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
/*
- * FCoE Packet: Indicates that the packet was
- * recognized as a FCoE. This also indicates
- * that the payload_offset field is valid.
+ * UDP Packet:
+ * Indicates that the packet was IP and UDP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_UDP \
+ (UINT32_C(0x3) << 12)
/*
- * RoCE Packet: Indicates that the packet was
- * recognized as a RoCE. This also indicates
- * that the payload_offset field is valid.
+ * FCoE Packet:
+ * Indicates that the packet was recognized as a FCoE.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE \
+ (UINT32_C(0x4) << 12)
/*
- * ICMP Packet: Indicates that the packet was
- * recognized as ICMP. This indicates that the
- * payload_offset field is valid.
+ * RoCE Packet:
+ * Indicates that the packet was recognized as a RoCE.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE \
+ (UINT32_C(0x5) << 12)
/*
- * PtP packet wo/timestamp: Indicates that the
- * packet was recognized as a PtP packet.
+ * ICMP Packet:
+ * Indicates that the packet was recognized as ICMP.
+ * This indicates that the payload_offset field is valid.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP \
+ (UINT32_C(0x7) << 12)
/*
- * PtP packet w/timestamp: Indicates that the
- * packet was recognized as a PtP packet and
- * that a timestamp was taken for the packet.
+ * PtP packet wo/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \
+ (UINT32_C(0x8) << 12)
+ /*
+ * PtP packet w/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet and that a timestamp was taken for the packet.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP \
+ (UINT32_C(0x9) << 12)
#define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
- #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PKT_CMPL_FLAGS_SFT 6
- uint16_t len;
/*
* This is the length of the data for the packet stored in the
- * buffer(s) identified by the opaque value. This includes the
- * packet BD and any associated buffer BDs. This does not
- * include the length of any data places in aggregation BDs.
+ * buffer(s) identified by the opaque value. This includes
+ * the packet BD and any associated buffer BDs. This does not include
+ * the the length of any data places in aggregation BDs.
*/
- uint32_t opaque;
+ uint16_t len;
/*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
*/
- uint8_t agg_bufs_v1;
- /* unused1 is 2 b */
+ uint32_t opaque;
+ uint8_t agg_bufs_v1;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+ #define RX_PKT_CMPL_V1 UINT32_C(0x1)
/*
- * This value is the number of aggregation buffers that follow
- * this entry in the completion ring that are a part of this
- * packet. If the value is zero, then the packet is completely
- * contained in the buffer space provided for the packet in the
- * RX ring.
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided for the packet in the RX ring.
*/
- #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
- #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+ #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+ #define RX_PKT_CMPL_AGG_BUFS_SFT 1
/* unused1 is 2 b */
- uint8_t rss_hash_type;
- /*
- * This is the RSS hash type for the packet. The value is packed
- * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
- * . The value of tuple_extrac_op provides the information about
- * what fields the hash was computed on. * 0: The RSS hash was
- * computed over source IP address, destination IP address,
- * source port, and destination port of inner IP and TCP or UDP
- * headers. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 1: The RSS hash was computed over
- * source IP address and destination IP address of inner IP
- * header. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 2: The RSS hash was computed over
- * source IP address, destination IP address, source port, and
- * destination port of IP and TCP or UDP headers of outer tunnel
- * headers. Note: For non-tunneled packets, this value is not
- * applicable. * 3: The RSS hash was computed over source IP
- * address and destination IP address of IP header of outer
- * tunnel headers. Note: For non-tunneled packets, this value is
- * not applicable. Note that 4-tuples values listed above are
- * applicable for layer 4 protocols supported and enabled for
- * RSS in the hardware, HWRM firmware, and drivers. For example,
- * if RSS hash is supported and enabled for TCP traffic only,
- * then the values of tuple_extract_op corresponding to 4-tuples
- * are only valid for TCP traffic.
- */
- uint8_t payload_offset;
- /*
- * This value indicates the offset in bytes from the beginning
- * of the packet where the inner payload starts. This value is
- * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
- * indicates that header is 256B into the packet.
- */
- uint8_t unused_1;
+ #define RX_PKT_CMPL_UNUSED1_MASK UINT32_C(0xc0)
+ #define RX_PKT_CMPL_UNUSED1_SFT 6
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates that header is 256B into the packet.
+ */
+ uint8_t payload_offset;
/* unused2 is 8 b */
- uint32_t rss_hash;
+ uint8_t unused1;
/*
* This value is the RSS hash value calculated for the packet
* based on the mode bits and key value in the VNIC.
*/
+ uint32_t rss_hash;
} __attribute__((packed));
-/* last 16 bytes of RX Packet Completion Record */
+/* rx_pkt_cmpl_hi (size:128b/16B) */
struct rx_pkt_cmpl_hi {
- uint32_t flags2;
+ uint32_t flags2;
/*
* This indicates that the ip checksum was calculated for the
- * inner packet and that the ip_cs_error field indicates if
- * there was an error.
+ * inner packet and that the ip_cs_error field indicates if there
+ * was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
/*
* This indicates that the TCP, UDP or ICMP checksum was
- * calculated for the inner packet and that the l4_cs_error
- * field indicates if there was an error.
+ * calculated for the inner packet and that the l4_cs_error field
+ * indicates if there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
* This indicates that the ip checksum was calculated for the
- * tunnel header and that the t_ip_cs_error field indicates if
- * there was an error.
+ * tunnel header and that the t_ip_cs_error field indicates if there
+ * was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
- * This indicates that the UDP checksum was calculated for the
- * tunnel packet and that the t_l4_cs_error field indicates if
- * there was an error.
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the t_l4_cs_error field
+ * indicates if there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
/* This value indicates what format the metadata field is. */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
- /*
- * The metadata field contains the VLAN tag and
- * TPID value. - metadata[11:0] contains the
- * vlan VID value. - metadata[12] contains the
- * vlan DE value. - metadata[15:13] contains the
- * vlan PRI value. - metadata[31:16] contains
- * the vlan TPID value.
- */
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
/*
- * This field indicates the IP type for the inner-most IP
- * header. A value of '0' indicates IPv4. A value of '1'
- * indicates IPv6. This value is only valid if itype indicates a
- * packet with an IP header.
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * This value is only valid if itype indicates a packet
+ * with an IP header.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
- uint32_t metadata;
+ #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
/*
- * This is data from the CFA block as indicated by the
- * meta_format field.
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
*/
+ uint32_t metadata;
/* When meta_format=1, this value is the VLAN VID. */
- #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
- #define RX_PKT_CMPL_METADATA_VID_SFT 0
+ #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_PKT_CMPL_METADATA_VID_SFT 0
/* When meta_format=1, this value is the VLAN DE. */
- #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+ #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
/* When meta_format=1, this value is the VLAN PRI. */
- #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
- #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+ #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_PKT_CMPL_METADATA_PRI_SFT 13
/* When meta_format=1, this value is the VLAN TPID. */
- #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
- #define RX_PKT_CMPL_METADATA_TPID_SFT 16
- uint16_t errors_v2;
+ #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+ uint16_t errors_v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_V2 UINT32_C(0x1)
+ #define RX_PKT_CMPL_V2 \
+ UINT32_C(0x1)
+ #define RX_PKT_CMPL_ERRORS_MASK \
+ UINT32_C(0xfffe)
+ #define RX_PKT_CMPL_ERRORS_SFT 1
/*
* This error indicates that there was some sort of problem with
* the BDs for the packet that was found after part of the
- * packet was already placed. The packet should be treated as
+ * packet was already placed. The packet should be treated as
* invalid.
*/
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK \
+ UINT32_C(0xe)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/* No buffer error */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \
+ (UINT32_C(0x0) << 1)
/*
- * Did Not Fit: Packet did not fit into packet
- * buffer provided. For regular placement, this
- * means the packet did not fit in the buffer
- * provided. For HDS and jumbo placement, this
- * means that the packet could not be placed
- * into 7 physical buffers or less.
+ * Did Not Fit:
+ * Packet did not fit into packet buffer provided.
+ * For regular placement, this means the packet did not fit
+ * in the buffer provided. For HDS and jumbo placement, this
+ * means that the packet could not be placed into 7 physical
+ * buffers or less.
*/
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
(UINT32_C(0x1) << 1)
/*
- * Not On Chip: All BDs needed for the packet
- * were not on-chip when the packet arrived.
+ * Not On Chip:
+ * All BDs needed for the packet were not on-chip when
+ * the packet arrived.
*/
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
(UINT32_C(0x2) << 1)
- /* Bad Format: BDs were not formatted correctly. */
+ /*
+ * Bad Format:
+ * BDs were not formatted correctly.
+ */
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
(UINT32_C(0x3) << 1)
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
- /* This indicates that there was an error in the IP header checksum. */
- #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
/*
- * This indicates that there was an error in the TCP, UDP or
- * ICMP checksum.
+ * This indicates that there was an error in the IP header
+ * checksum.
*/
- #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR \
+ UINT32_C(0x10)
/*
- * This indicates that there was an error in the tunnel IP
- * header checksum.
+ * This indicates that there was an error in the TCP, UDP
+ * or ICMP checksum.
*/
- #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
+ #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR \
+ UINT32_C(0x20)
/*
- * This indicates that there was an error in the tunnel UDP
- * checksum.
+ * This indicates that there was an error in the tunnel
+ * IP header checksum.
*/
- #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
+ #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR \
+ UINT32_C(0x40)
+ /*
+ * This indicates that there was an error in the tunnel
+ * UDP checksum.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR \
+ UINT32_C(0x80)
/*
* This indicates that there was a CRC error on either an FCoE
- * or RoCE packet. The itype indicates the packet type.
+ * or RoCE packet. The itype indicates the packet type.
*/
- #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
+ #define RX_PKT_CMPL_ERRORS_CRC_ERROR \
+ UINT32_C(0x100)
/*
- * This indicates that there was an error in the tunnel portion
- * of the packet when this field is non-zero.
+ * This indicates that there was an error in the tunnel
+ * portion of the packet when this
+ * field is non-zero.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK \
+ UINT32_C(0xe00)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
/*
- * No additional error occurred on the tunnel
- * portion of the packet of the packet does not
- * have a tunnel.
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
*/
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 9)
/*
- * Indicates that IP header version does not
- * match expectation from L2 Ethertype for IPv4
- * and IPv6 in the tunnel header.
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6
+ * in the tunnel header.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
(UINT32_C(0x1) << 9)
/*
- * Indicates that header length is out of range
- * in the tunnel header. Valid for IPv4.
+ * Indicates that header length is out of range in the
+ * tunnel header. Valid for
+ * IPv4.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
(UINT32_C(0x2) << 9)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the PPPoE header length
- * for a tunnel PPPoE packet.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the PPPoE header length for a tunnel PPPoE
+ * packet.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
(UINT32_C(0x3) << 9)
/*
- * Indicates that physical packet is shorter
- * than that claimed by the tunnel l3 header
- * length. Valid for IPv4, or IPv6 tunnel packet
- * packets.
+ * Indicates that physical packet is shorter than that claimed
+ * by the tunnel l3 header length. Valid for IPv4, or IPv6
+ * tunnel packet packets.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
(UINT32_C(0x4) << 9)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the tunnel UDP header
- * length for a tunnel UDP packet that is not
- * fragmented.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel UDP header length for a tunnel
+ * UDP packet that is not fragmented.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
(UINT32_C(0x5) << 9)
/*
- * indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0) in the
- * tunnel header. Valid for IPv4, and IPv6.
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0) in the tunnel header. Valid
+ * for IPv4, and IPv6.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
(UINT32_C(0x6) << 9)
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
/*
- * This indicates that there was an error in the inner portion
- * of the packet when this field is non-zero.
+ * This indicates that there was an error in the inner
+ * portion of the packet when this
+ * field is non-zero.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK \
+ UINT32_C(0xf000)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
/*
- * No additional error occurred on the tunnel
- * portion of the packet of the packet does not
- * have a tunnel.
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR \
+ (UINT32_C(0x0) << 12)
/*
- * Indicates that IP header version does not
- * match expectation from L2 Ethertype for IPv4
- * and IPv6 or that option other than VFT was
- * parsed on FCoE packet.
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6 or that
+ * option other than VFT was parsed on
+ * FCoE packet.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
(UINT32_C(0x1) << 12)
/*
- * indicates that header length is out of range.
- * Valid for IPv4 and RoCE
+ * indicates that header length is out of range. Valid for
+ * IPv4 and RoCE
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
(UINT32_C(0x2) << 12)
/*
- * indicates that the IPv4 TTL or IPv6 hop limit
- * check have failed (e.g. TTL = 0). Valid for
- * IPv4, and IPv6
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6
*/
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL \
+ (UINT32_C(0x3) << 12)
/*
- * Indicates that physical packet is shorter
- * than that claimed by the l3 header length.
- * Valid for IPv4, IPv6 packet or RoCE packets.
+ * Indicates that physical packet is shorter than that
+ * claimed by the l3 header length. Valid for IPv4,
+ * IPv6 packet or RoCE packets.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
(UINT32_C(0x4) << 12)
/*
- * Indicates that the physical packet is shorter
- * than that claimed by the UDP header length
- * for a UDP packet that is not fragmented.
+ * Indicates that the physical packet is shorter than that
+ * claimed by the UDP header length for a UDP packet that is
+ * not fragmented.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
(UINT32_C(0x5) << 12)
/*
- * Indicates that TCP header length > IP
- * payload. Valid for TCP packets only.
+ * Indicates that TCP header length > IP payload. Valid for
+ * TCP packets only.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
(UINT32_C(0x6) << 12)
@@ -1218,1626 +2380,3238 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
(UINT32_C(0x7) << 12)
/*
- * Indicates that TCP option headers result in a
- * TCP header size that does not match data
- * offset in TCP header. Valid for TCP.
+ * Indicates that TCP option headers result in a TCP header
+ * size that does not match data offset in TCP header. Valid
+ * for TCP.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
(UINT32_C(0x8) << 12)
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
- #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define RX_PKT_CMPL_ERRORS_SFT 1
- uint16_t cfa_code;
/*
- * This field identifies the CFA action rule that was used for
- * this packet.
+ * This field identifies the CFA action rule that was used for this
+ * packet.
*/
- uint32_t reorder;
+ uint16_t cfa_code;
+ uint32_t reorder;
/*
- * This value holds the reordering sequence number for the
- * packet. If the reordering sequence is not valid, then this
- * value is zero. The reordering domain for the packet is in the
- * bottom 8 to 10b of the rss_hash value. The bottom 20b of this
- * value contain the ordering domain value for the packet.
+ * This value holds the reordering sequence number for the packet.
+ * If the reordering sequence is not valid, then this value is zero.
+ * The reordering domain for the packet is in the bottom 8 to 10b of
+ * the rss_hash value. The bottom 20b of this value contain the
+ * ordering domain value for the packet.
*/
- #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
- #define RX_PKT_CMPL_REORDER_SFT 0
+ #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+ #define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
-/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_tpa_start_cmpl (size:128b/16B) */
struct rx_tpa_start_cmpl {
- uint16_t flags_type;
- /*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
- */
- #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_TPA_START_CMPL_TYPE_SFT 0
- /*
- * RX L2 TPA Start Completion: Completion at the
- * beginning of a TPA operation. Length = 32B
- */
- #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion:
+ * Completion at the beginning of a TPA operation.
+ * Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ #define RX_TPA_START_CMPL_TYPE_LAST \
+ RX_TPA_START_CMPL_TYPE_RX_TPA_START
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
/* This bit will always be '0' for TPA start completions. */
- #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
/*
- * Jumbo: TPA Packet was placed using jumbo
- * algorithm. This means that the first buffer
- * will be filled with data before moving to
- * aggregation buffers. Each aggregation buffer
- * will be filled before moving to the next
- * aggregation buffer.
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
/*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
/*
- * GRO/Jumbo: Packet will be placed using
- * GRO/Jumbo where the first packet is filled
- * with data. Subsequent packets will be placed
- * such that any one packet does not span two
- * aggregation buffers unless it starts at the
- * beginning of an aggregation buffer.
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
#define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
(UINT32_C(0x5) << 7)
/*
- * GRO/Header-Data Separation: Packet will be
- * placed using GRO/HDS where the header is in
- * the first packet. Payload of each packet will
- * be placed such that any one packet does not
- * span two aggregation buffers unless it starts
- * at the beginning of an aggregation buffer.
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
- #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
#define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
- #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
/* unused is 1 b */
- #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
/*
* This value indicates what the inner packet determined for the
* packet was.
*/
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
- /* TCP Packet: Indicates that the packet was IP and TCP. */
- #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /*
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP \
+ (UINT32_C(0x2) << 12)
#define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
- #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_TPA_START_CMPL_FLAGS_SFT 6
- uint16_t len;
/*
* This value indicates the amount of packet data written to the
* buffer the opaque field in this completion corresponds to.
*/
- uint32_t opaque;
- /*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
- */
- uint8_t v1;
- /* unused1 is 7 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
- /* unused1 is 7 b */
- uint8_t rss_hash_type;
- /*
- * This is the RSS hash type for the packet. The value is packed
- * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
- * . The value of tuple_extrac_op provides the information about
- * what fields the hash was computed on. * 0: The RSS hash was
- * computed over source IP address, destination IP address,
- * source port, and destination port of inner IP and TCP or UDP
- * headers. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 1: The RSS hash was computed over
- * source IP address and destination IP address of inner IP
- * header. Note: For non-tunneled packets, the packet headers
- * are considered inner packet headers for the RSS hash
- * computation purpose. * 2: The RSS hash was computed over
- * source IP address, destination IP address, source port, and
- * destination port of IP and TCP or UDP headers of outer tunnel
- * headers. Note: For non-tunneled packets, this value is not
- * applicable. * 3: The RSS hash was computed over source IP
- * address and destination IP address of IP header of outer
- * tunnel headers. Note: For non-tunneled packets, this value is
- * not applicable. Note that 4-tuples values listed above are
- * applicable for layer 4 protocols supported and enabled for
- * RSS in the hardware, HWRM firmware, and drivers. For example,
- * if RSS hash is supported and enabled for TCP traffic only,
- * then the values of tuple_extract_op corresponding to 4-tuples
- * are only valid for TCP traffic.
- */
- uint16_t agg_id;
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t v1;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ #define RX_TPA_START_CMPL_LAST RX_TPA_START_CMPL_V1
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ *
+ * The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on.
+ * * 0: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of inner
+ * IP and TCP or UDP headers. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 1: The RSS hash was computed over source IP address and destination
+ * IP address of inner IP header. Note: For non-tunneled packets,
+ * the packet headers are considered inner packet headers for the RSS
+ * hash computation purpose.
+ * * 2: The RSS hash was computed over source IP address,
+ * destination IP address, source port, and destination port of
+ * IP and TCP or UDP headers of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ * * 3: The RSS hash was computed over source IP address and
+ * destination IP address of IP header of outer tunnel headers.
+ * Note: For non-tunneled packets, this value is not applicable.
+ *
+ * Note that 4-tuples values listed above are applicable
+ * for layer 4 protocols supported and enabled for RSS in the hardware,
+ * HWRM firmware, and drivers. For example, if RSS hash is supported and
+ * enabled for TCP traffic only, then the values of tuple_extract_op
+ * corresponding to 4-tuples are only valid for TCP traffic.
+ */
+ uint8_t rss_hash_type;
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
+ uint16_t agg_id;
/* unused2 is 9 b */
+ #define RX_TPA_START_CMPL_UNUSED2_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_UNUSED2_SFT 0
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
- #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
- #define RX_TPA_START_CMPL_AGG_ID_SFT 9
- uint32_t rss_hash;
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
/*
* This value is the RSS hash value calculated for the packet
* based on the mode bits and key value in the VNIC.
*/
+ uint32_t rss_hash;
} __attribute__((packed));
-/* last 16 bytes of RX L2 TPA Start Completion Record */
+/* rx_tpa_start_cmpl_hi (size:128b/16B) */
struct rx_tpa_start_cmpl_hi {
- uint32_t flags2;
+ uint32_t flags2;
/*
* This indicates that the ip checksum was calculated for the
* inner packet and that the sum passed for all segments
* included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
/*
* This indicates that the TCP, UDP or ICMP checksum was
- * calculated for the inner packet and that the sum passed for
- * all segments included in the aggregation.
+ * calculated for the inner packet and that the sum passed
+ * for all segments included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
* This indicates that the ip checksum was calculated for the
* tunnel header and that the sum passed for all segments
* included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
- * This indicates that the UDP checksum was calculated for the
- * tunnel packet and that the sum passed for all segments
- * included in the aggregation.
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the sum passed for
+ * all segments included in the aggregation.
*/
- #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
/* This value indicates what format the metadata field is. */
#define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
- /*
- * The metadata field contains the VLAN tag and
- * TPID value. - metadata[11:0] contains the
- * vlan VID value. - metadata[12] contains the
- * vlan DE value. - metadata[15:13] contains the
- * vlan PRI value. - metadata[31:16] contains
- * the vlan TPID value.
- */
- #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE \
+ (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and TPID value.
+ * - metadata[11:0] contains the vlan VID value.
+ * - metadata[12] contains the vlan DE value.
+ * - metadata[15:13] contains the vlan PRI value.
+ * - metadata[31:16] contains the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN \
+ (UINT32_C(0x1) << 4)
#define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
/*
- * This field indicates the IP type for the inner-most IP
- * header. A value of '0' indicates IPv4. A value of '1'
- * indicates IPv6.
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
*/
- #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
- uint32_t metadata;
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
/*
- * This is data from the CFA block as indicated by the
- * meta_format field.
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
*/
+ uint32_t metadata;
/* When meta_format=1, this value is the VLAN VID. */
- #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
- #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
/* When meta_format=1, this value is the VLAN DE. */
- #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
/* When meta_format=1, this value is the VLAN PRI. */
#define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
- #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
/* When meta_format=1, this value is the VLAN TPID. */
- #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
- #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
- uint16_t v2;
- /* unused4 is 15 b */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
- /* unused4 is 15 b */
- uint16_t cfa_code;
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
/*
- * This field identifies the CFA action rule that was used for
- * this packet.
+ * This field identifies the CFA action rule that was used for this
+ * packet.
*/
- uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ uint16_t cfa_code;
/*
- * This is the size in bytes of the inner most L4 header. This
- * can be subtracted from the payload_offset to determine the
- * start of the inner most L4 header.
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
*/
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
/*
- * This is the offset from the beginning of the packet in bytes
- * for the outer L3 header. If there is no outer L3 header, then
- * this value is zero.
+ * This is the offset from the beginning of the packet in bytes for
+ * the outer L3 header. If there is no outer L3 header, then this
+ * value is zero.
*/
- #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
- #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
/*
- * This is the offset from the beginning of the packet in bytes
- * for the inner most L2 header.
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L2 header.
*/
- #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
- #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
/*
- * This is the offset from the beginning of the packet in bytes
- * for the inner most L3 header.
+ * This is the offset from the beginning of the packet in bytes for
+ * the inner most L3 header.
*/
- #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
- #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
/*
- * This is the size in bytes of the inner most L4 header. This
- * can be subtracted from the payload_offset to determine the
- * start of the inner most L4 header.
+ * This is the size in bytes of the inner most L4 header.
+ * This can be subtracted from the payload_offset to determine
+ * the start of the inner most L4 header.
*/
- #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
- #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
} __attribute__((packed));
-/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */
+/* rx_tpa_end_cmpl (size:128b/16B) */
struct rx_tpa_end_cmpl {
- uint16_t flags_type;
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_TPA_END_CMPL_TYPE_SFT 0
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
/*
- * RX L2 TPA End Completion: Completion at the
- * end of a TPA operation. Length = 32B
+ * RX L2 TPA End Completion:
+ * Completion at the end of a TPA operation.
+ * Length = 32B
*/
- #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ #define RX_TPA_END_CMPL_TYPE_LAST \
+ RX_TPA_END_CMPL_TYPE_RX_TPA_END
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
/*
- * When this bit is '1', it indicates a packet that has an error
- * of some type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * error_flags.
*/
- #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo:
+ * TPA Packet was placed using jumbo algorithm. This means
+ * that the first buffer will be filled with data before
+ * moving to aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next aggregation
+ * buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO \
+ (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation:
+ * Packet was placed using Header/Data separation algorithm.
+ * The separation location is indicated by the itype field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS \
+ (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo:
+ * Packet will be placed using GRO/Jumbo where the first
+ * packet is filled with data. Subsequent packets will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
/*
- * Jumbo: TPA Packet was placed using jumbo
- * algorithm. This means that the first buffer
- * will be filled with data before moving to
- * aggregation buffers. Each aggregation buffer
- * will be filled before moving to the next
- * aggregation buffer.
+ * GRO/Header-Data Separation:
+ * Packet will be placed using GRO/HDS where the header
+ * is in the first packet.
+ * Payload of each packet will be
+ * placed such that any one packet does not span two
+ * aggregation buffers unless it starts at the beginning of
+ * an aggregation buffer.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS \
+ (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
/*
- * Header/Data Separation: Packet was placed
- * using Header/Data separation algorithm. The
- * separation location is indicated by the itype
- * field.
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ * - 2 TCP Packet
+ * Indicates that the packet was IP and TCP. This indicates
+ * that the ip_cs field is valid and that the tcp_udp_cs
+ * field is valid and contains the TCP checksum.
+ * This also indicates that the payload_offset field is valid.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
/*
- * GRO/Jumbo: Packet will be placed using
- * GRO/Jumbo where the first packet is filled
- * with data. Subsequent packets will be placed
- * such that any one packet does not span two
- * aggregation buffers unless it starts at the
- * beginning of an aggregation buffer.
+ * This value is zero for TPA End completions.
+ * There is no data in the buffer that corresponds to the opaque
+ * value in this completion.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7)
+ uint16_t len;
/*
- * GRO/Header-Data Separation: Packet will be
- * placed using GRO/HDS where the header is in
- * the first packet. Payload of each packet will
- * be placed such that any one packet does not
- * span two aggregation buffers unless it starts
- * at the beginning of an aggregation buffer.
+ * This is a copy of the opaque field from the RX BD this completion
+ * corresponds to.
*/
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
- #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
- RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
- /* unused is 2 b */
- #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
- #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ uint32_t opaque;
/*
- * This value indicates what the inner packet determined for the
- * packet was. - 2 TCP Packet Indicates that the packet was IP
- * and TCP. This indicates that the ip_cs field is valid and
- * that the tcp_udp_cs field is valid and contains the TCP
- * checksum. This also indicates that the payload_offset field
- * is valid.
- */
- #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
- #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_TPA_END_CMPL_FLAGS_SFT 6
- uint16_t len;
- /*
- * This value is zero for TPA End completions. There is no data
- * in the buffer that corresponds to the opaque value in this
- * completion.
- */
- uint32_t opaque;
- /*
- * This is a copy of the opaque field from the RX BD this
- * completion corresponds to.
- */
- uint8_t agg_bufs_v1;
- /* unused1 is 1 b */
- /*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
- */
- #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
- /*
- * This value is the number of aggregation buffers that follow
- * this entry in the completion ring that are a part of this
- * aggregation packet. If the value is zero, then the packet is
- * completely contained in the buffer space provided in the
- * aggregation start completion.
- */
- #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
- #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
- /* unused1 is 1 b */
- uint8_t tpa_segs;
- /* This value is the number of segments in the TPA operation. */
- uint8_t payload_offset;
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ uint8_t agg_bufs_v1;
/*
- * This value indicates the offset in bytes from the beginning
- * of the packet where the inner payload starts. This value is
- * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
- * indicates an offset of 256 bytes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t agg_id;
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
/*
- * This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
- * with the TPA end completion.
+ * This value is the number of aggregation buffers that follow this
+ * entry in the completion ring that are a part of this aggregation
+ * packet.
+ * If the value is zero, then the packet is completely contained
+ * in the buffer space provided in the aggregation start completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t tpa_segs;
+ /*
+ * This value indicates the offset in bytes from the beginning of the packet
+ * where the inner payload starts. This value is valid for TCP, UDP,
+ * FCoE, and RoCE packets.
+ *
+ * A value of zero indicates an offset of 256 bytes.
*/
+ uint8_t payload_offset;
+ uint8_t agg_id;
/* unused2 is 1 b */
+ #define RX_TPA_END_CMPL_UNUSED2 UINT32_C(0x1)
/*
* This is the aggregation ID that the completion is associated
- * with. Use this number to correlate the TPA start completion
+ * with. Use this number to correlate the TPA start completion
* with the TPA end completion.
*/
- #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
- #define RX_TPA_END_CMPL_AGG_ID_SFT 1
- uint32_t tsdelta;
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
/*
- * For non-GRO packets, this value is the timestamp delta
- * between earliest and latest timestamp values for TPA packet.
- * If packets were not time stamped, then delta will be zero.
+ * For non-GRO packets, this value is the
+ * timestamp delta between earliest and latest timestamp values for
+ * TPA packet. If packets were not time stamped, then delta will be
+ * zero.
+ *
* For GRO packets, this field is zero except for the following
- * sub-fields. - tsdelta[31] Timestamp present indication. When
- * '0', no Timestamp option is in the packet. When '1', then a
- * Timestamp option is present in the packet.
+ * sub-fields.
+ * - tsdelta[31]
+ * Timestamp present indication. When '0', no Timestamp
+ * option is in the packet. When '1', then a Timestamp
+ * option is present in the packet.
*/
+ uint32_t tsdelta;
} __attribute__((packed));
-/* last 16 bytes of RX TPA End Completion Record */
+/* rx_tpa_end_cmpl_hi (size:128b/16B) */
struct rx_tpa_end_cmpl_hi {
- uint32_t tpa_dup_acks;
- /* unused3 is 28 b */
/*
* This value is the number of duplicate ACKs that have been
* received as part of the TPA operation.
*/
- #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
- #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
- /* unused3 is 28 b */
- uint16_t tpa_seg_len;
- /*
- * This value is the valid when TPA completion is active. It
- * indicates the length of the longest segment of the TPA
- * operation for LRO mode and the length of the first segment in
- * GRO mode. This value may be used by GRO software to re-
- * construct the original packet stream from the TPA packet.
- * This is the length of all but the last segment for GRO. In
- * LRO mode this value may be used to indicate MSS size to the
- * stack.
- */
- uint16_t unused_3;
+ uint32_t tpa_dup_acks;
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA operation
+ * for LRO mode and the length of the first segment in GRO mode.
+ *
+ * This value may be used by GRO software to re-construct the original
+ * packet stream from the TPA packet. This is the length of all
+ * but the last segment for GRO. In LRO mode this value may be used
+ * to indicate MSS size to the stack.
+ */
+ uint16_t tpa_seg_len;
/* unused4 is 16 b */
- uint16_t errors_v2;
+ uint16_t unused3;
+ uint16_t errors_v2;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
/*
* This error indicates that there was some sort of problem with
* the BDs for the packet that was found after part of the
- * packet was already placed. The packet should be treated as
+ * packet was already placed. The packet should be treated as
* invalid.
*/
- #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
/*
- * This error occurs when there is a fatal HW
- * problem in the chip only. It indicates that
- * there were not BDs on chip but that there was
- * adequate reservation. provided by the TPA
- * block.
+ * This error occurs when there is a fatal HW problem in
+ * the chip only. It indicates that there were not
+ * BDs on chip but that there was adequate reservation.
+ * provided by the TPA block.
*/
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
(UINT32_C(0x2) << 1)
/*
- * This error occurs when TPA block was not
- * configured to reserve adequate BDs for TPA
- * operations on this RX ring. All data for the
- * TPA operation was not placed. This error can
- * also be generated when the number of segments
- * is not programmed correctly in TPA and the 33
- * total aggregation buffers allowed for the TPA
+ * This error occurs when TPA block was not configured to
+ * reserve adequate BDs for TPA operations on this RX
+ * ring. All data for the TPA operation was not placed.
+ *
+ * This error can also be generated when the number of
+ * segments is not programmed correctly in TPA and the
+ * 33 total aggregation buffers allowed for the TPA
* operation has been exceeded.
*/
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
(UINT32_C(0x4) << 1)
#define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
- #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define RX_TPA_END_CMPL_ERRORS_SFT 1
- uint16_t unused_4;
/* unused5 is 16 b */
- uint32_t start_opaque;
+ uint16_t unused_4;
/*
* This is the opaque value that was completed for the TPA start
* completion that corresponds to this TPA end completion.
*/
+ uint32_t start_opaque;
} __attribute__((packed));
-/* HWRM Forwarded Request (16 bytes) */
+/* rx_abuf_cmpl (size:128b/16B) */
+struct rx_abuf_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define RX_ABUF_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_ABUF_CMPL_TYPE_SFT 0
+ /*
+ * RX Aggregation Buffer completion :
+ * Completion of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length = 16B
+ */
+ #define RX_ABUF_CMPL_TYPE_RX_AGG UINT32_C(0x12)
+ #define RX_ABUF_CMPL_TYPE_LAST RX_ABUF_CMPL_TYPE_RX_AGG
+ /*
+ * This is the length of the data for the packet stored in this
+ * aggregation buffer identified by the opaque value. This does not
+ * include the length of any
+ * data placed in other aggregation BDs or in the packet or buffer
+ * BDs. This length does not include any space added due to
+ * hdr_offset register during HDS placement mode.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this aggregation
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define RX_ABUF_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* eject_cmpl (size:128b/16B) */
+struct eject_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define EJECT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define EJECT_CMPL_TYPE_SFT 0
+ /*
+ * Statistics Ejection Completion:
+ * Completion of statistics data ejection buffer.
+ * Length = 16B
+ */
+ #define EJECT_CMPL_TYPE_STAT_EJECT UINT32_C(0x1a)
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ /*
+ * This is the length of the statistics data stored in this
+ * buffer.
+ */
+ uint16_t len;
+ /*
+ * This is a copy of the opaque field from the RX BD this ejection
+ * buffer corresponds to.
+ */
+ uint32_t opaque;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define EJECT_CMPL_V UINT32_C(0x1)
+ /* unused3 is 32 b */
+ uint32_t unused_2;
+} __attribute__((packed));
+
+/* hwrm_cmpl (size:128b/16B) */
+struct hwrm_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_CMPL_TYPE_SFT 0
+ /*
+ * HWRM Command Completion:
+ * Completion of an HWRM command.
+ */
+ #define HWRM_CMPL_TYPE_HWRM_DONE UINT32_C(0x20)
+ #define HWRM_CMPL_TYPE_LAST HWRM_CMPL_TYPE_HWRM_DONE
+ /* This is the sequence_id of the HWRM command that has completed. */
+ uint16_t sequence_id;
+ /* unused2 is 32 b */
+ uint32_t unused_1;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_CMPL_V UINT32_C(0x1)
+ /* unused4 is 32 b */
+ uint32_t unused_3;
+} __attribute__((packed));
+
+/* hwrm_fwd_req_cmpl (size:128b/16B) */
struct hwrm_fwd_req_cmpl {
- uint16_t req_len_type;
- /* Length of forwarded request in bytes. */
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ uint16_t req_len_type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define HWRM_FWD_INPUT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_FWD_INPUT_CMPL_TYPE_SFT 0
+ #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
/* Forwarded HWRM Request */
- #define HWRM_FWD_INPUT_CMPL_TYPE_HWRM_FWD_INPUT UINT32_C(0x22)
+ #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ #define HWRM_FWD_REQ_CMPL_TYPE_LAST \
+ HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
/* Length of forwarded request in bytes. */
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
- uint16_t source_id;
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
/*
- * Source ID of this request. Typically used in forwarding
- * requests and responses. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * Source ID of this request.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
*/
- uint32_t unused_0;
+ uint16_t source_id;
/* unused1 is 32 b */
- uint32_t req_buf_addr_v[2];
+ uint32_t unused0;
/* Address of forwarded request. */
+ uint32_t req_buf_addr_v[2];
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define HWRM_FWD_INPUT_CMPL_V UINT32_C(0x1)
+ #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
/* Address of forwarded request. */
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
- #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
} __attribute__((packed));
-/* HWRM Asynchronous Event Completion Record (16 bytes) */
+/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+struct hwrm_fwd_resp_cmpl {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_FWD_RESP_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_RESP_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Response */
+ #define HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ #define HWRM_FWD_RESP_CMPL_TYPE_LAST \
+ HWRM_FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
+ /*
+ * Source ID of this response.
+ * Typically used in forwarding requests and responses.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t source_id;
+ /* Length of forwarded response in bytes. */
+ uint16_t resp_len;
+ /* unused2 is 16 b */
+ uint16_t unused_1;
+ /* Address of forwarded request. */
+ uint32_t resp_buf_addr_v[2];
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_FWD_RESP_CMPL_V UINT32_C(0x1)
+ /* Address of forwarded request. */
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_MASK UINT32_C(0xfffffffe)
+ #define HWRM_FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl (size:128b/16B) */
struct hwrm_async_event_cmpl {
- uint16_t type;
- /* unused1 is 10 b */
+ uint16_t type;
/*
- * This field indicates the exact type of the completion. By
- * convention, the LSB identifies the length of the record in
- * 16B units. Even values indicate 16B records. Odd values
- * indicate 32B records.
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
*/
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
/* HWRM Asynchronous Event Information */
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
- /* unused1 is 10 b */
- uint16_t event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
/* Identifiers of events. */
+ uint16_t event_id;
/* Link status changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
/* Link MTU changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
/* Link speed changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
/* DCB Configuration changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
/* Port connection not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
/* Link speed configuration was not allowed */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
UINT32_C(0x5)
/* Link speed configuration change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
/* Port PHY configuration change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
/* Function driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
/* Function driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
/* Function FLR related processing has completed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT UINT32_C(0x12)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
/* PF driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
/* PF driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21)
- /* VF Function Level Reset (FLR) */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR \
+ UINT32_C(0x30)
/* VF MAC Address Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
/* PF-VF communication channel status change. */
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
UINT32_C(0x32)
/* VF Configuration Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
/* LLFC/PFC Configuration Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
+ /* Default VNIC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \
+ UINT32_C(0x35)
/* HWRM Error */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff)
- uint32_t event_data2;
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
/* Event specific data */
- uint8_t opaque_v;
- /* opaque is 7 b */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This value is written by the NIC such that it will be
- * different for each pass through the completion queue. The
- * even passes will write 1. The odd passes will write 0.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
/* opaque is 7 b */
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- uint8_t timestamp_lo;
- /* 8-lsb timestamp from POR (100-msec resolution) */
- uint16_t timestamp_hi;
- /* 16-lsb timestamp from POR (100-msec resolution) */
- uint32_t event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
/* Event specific data */
+ uint32_t event_data1;
} __attribute__((packed));
-/* hwrm_ver_get */
-/*
- * Description: This function is called by a driver to determine the HWRM
- * interface version supported by the HWRM firmware, the version of HWRM
- * firmware implementation, the name of HWRM firmware, the versions of other
- * embedded firmwares, and the names of other embedded firmwares, etc. Any
- * interface or firmware version with major = 0, minor = 0, and update = 0 shall
- * be considered an invalid version.
- */
-/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- uint16_t req_type;
+/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE \
+ UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint16_t cmpl_ring;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates link status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE \
+ UINT32_C(0x1)
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * If this bit set to 0, then it indicates that the link
+ * was up and it went down.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN \
+ UINT32_C(0x0)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * If this bit is set to 1, then it indicates that the link
+ * was down and it went up.
*/
- uint64_t resp_addr;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+ /* Indicates the physical port this link status change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0xe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT \
+ 1
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 4
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff00000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 20
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_mtu_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE \
+ UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* The new MTU of the link in bytes. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of HWRM interface
- * specification supported by the driver HWRM implementation.
- * The interface major version is intended to change only when
- * non backward compatible changes are made to the HWRM
- * interface specification.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_min;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the driver HWRM implementation. A
- * change in interface minor version is used to reflect
- * significant backward compatible modification to HWRM
- * interface specification. This can be due to addition or
- * removal of functionality. HWRM interface specifications with
- * the same major version but different minor versions are
- * compatible.
+ * When this bit is '1', the link was forced to the
+ * force_link_speed value.
*/
- uint8_t hwrm_intf_upd;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE \
+ UINT32_C(0x1)
+ /* The new link speed in 100 Mbps units. */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK \
+ UINT32_C(0xfffe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT \
+ 1
+ /* 100Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB \
+ (UINT32_C(0x1) << 1)
+ /* 1Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB \
+ (UINT32_C(0xa) << 1)
+ /* 2Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB \
+ (UINT32_C(0x14) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB \
+ (UINT32_C(0x19) << 1)
+ /* 10Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB \
+ (UINT32_C(0x64) << 1)
+ /* 20Mb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB \
+ (UINT32_C(0xc8) << 1)
+ /* 25Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB \
+ (UINT32_C(0xfa) << 1)
+ /* 40Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB \
+ (UINT32_C(0x190) << 1)
+ /* 50Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB \
+ (UINT32_C(0x1f4) << 1)
+ /* 100Gb link speed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB \
+ (UINT32_C(0x3e8) << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_dcb_config_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* ETS configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS \
+ UINT32_C(0x1)
+ /* PFC configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC \
+ UINT32_C(0x2)
+ /* APP configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP \
+ UINT32_C(0x4)
+ uint8_t opaque_v;
/*
- * This field represents the update version of HWRM interface
- * specification supported by the driver HWRM implementation.
- * The interface update version is used to reflect minor changes
- * or bug fixes to a released HWRM interface specification.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /* Priority recommended for RoCE traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT \
+ 16
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE \
+ (UINT32_C(0xff) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+ /* Priority recommended for L2 traffic */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT \
+ 24
+ /* none is 255 */
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE \
+ (UINT32_C(0xff) << 24)
+ #define HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_conn_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t unused_0[5];
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+ /*
+ * This value indicates the current port level enforcement policy
+ * for the optics module when there is an optical module mismatch
+ * and port is not connected.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT \
+ 16
+ /* No enforcement */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE \
+ (UINT32_C(0x0) << 16)
+ /* Disable Transmit side Laser. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX \
+ (UINT32_C(0x1) << 16)
+ /* Raise a warning message. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG \
+ (UINT32_C(0x2) << 16)
+ /* Power down the module. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN \
+ (UINT32_C(0x3) << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
} __attribute__((packed));
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
- uint16_t error_code;
+/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
+ UINT32_C(0x5)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Link speed configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
/*
- * This field represents the major version of HWRM interface
- * specification supported by the HWRM implementation. The
- * interface major version is intended to change only when non
- * backward compatible changes are made to the HWRM interface
- * specification. A HWRM implementation that is compliant with
- * this specification shall provide value of 1 in this field.
+ * If set to 1, it indicates that the supported link speeds
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in supported link speeds
+ * configuration.
*/
- uint8_t hwrm_intf_min;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE \
+ UINT32_C(0x10000)
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the HWRM implementation. A change
- * in interface minor version is used to reflect significant
- * backward compatible modification to HWRM interface
- * specification. This can be due to addition or removal of
- * functionality. HWRM interface specifications with the same
- * major version but different minor versions are compatible. A
- * HWRM implementation that is compliant with this specification
- * shall provide value of 2 in this field.
+ * If set to 1, it indicates that the link speed configuration
+ * on the port has become illegal or invalid.
+ * If set to 0, then the link speed configuration on the port is
+ * legal or valid.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG \
+ UINT32_C(0x20000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_port_phy_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Port PHY configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE \
+ UINT32_C(0x7)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_intf_upd;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 0
/*
- * This field represents the update version of HWRM interface
- * specification supported by the HWRM implementation. The
- * interface update version is used to reflect minor changes or
- * bug fixes to a released HWRM interface specification. A HWRM
- * implementation that is compliant with this specification
- * shall provide value of 2 in this field.
+ * If set to 1, it indicates that the FEC
+ * configuration on the port has changed.
+ * If set to 0, then there is no change in FEC configuration.
*/
- uint8_t hwrm_intf_rsvd;
- uint8_t hwrm_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE \
+ UINT32_C(0x10000)
/*
- * This field represents the major version of HWRM firmware. A
- * change in firmware major version represents a major firmware
- * release.
+ * If set to 1, it indicates that the EEE configuration
+ * on the port has changed.
+ * If set to 0, then there is no change in EEE configuration
+ * on the port.
*/
- uint8_t hwrm_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE \
+ UINT32_C(0x20000)
/*
- * This field represents the minor version of HWRM firmware. A
- * change in firmware minor version represents significant
- * firmware functionality changes.
+ * If set to 1, it indicates that the pause configuration
+ * on the PHY has changed.
+ * If set to 0, then there is no change in the pause
+ * configuration on the PHY.
*/
- uint8_t hwrm_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE \
+ UINT32_C(0x40000)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD \
+ UINT32_C(0x10)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the build version of HWRM firmware. A
- * change in firmware build version represents bug fixes to a
- * released firmware.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t hwrm_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD \
+ UINT32_C(0x11)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version of the HWRM
- * firmware.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */
+struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Function FLR related processing has completed */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT \
+ UINT32_C(0x12)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of mgmt firmware. A
- * change in major version represents a major release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Function ID */
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_unload {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD \
+ UINT32_C(0x20)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the minor version of mgmt firmware. A
- * change in minor version represents significant functionality
- * changes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_drvr_load {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD \
+ UINT32_C(0x21)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the build version of mgmt firmware. A
- * change in update version represents bug fixes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t mgmt_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* PF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+ /* Indicates the physical port this pf belongs to */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x70000)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_flr {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR UINT32_C(0x30)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_maj;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0xff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_mac_addr_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE \
+ UINT32_C(0x31)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of network control
- * firmware. A change in major version represents a major
- * release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF ID */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 0
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
+ UINT32_C(0x32)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the minor version of network control
- * firmware. A change in minor version represents significant
- * functionality changes.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t netctrl_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
/*
- * This field represents the build version of network control
- * firmware. A change in update version represents bug fixes.
+ * If this bit is set to 1, then it indicates that the PF-VF
+ * communication was lost and it is established.
+ * If this bit set to 0, then it indicates that the PF-VF
+ * communication was established and it is lost.
*/
- uint8_t netctrl_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* VF Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE \
+ UINT32_C(0x33)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint32_t dev_caps_cfg;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
/*
- * This field is used to indicate device's capabilities and
- * configurations.
+ * Each flag provided in this field indicates a specific VF
+ * configuration change. At least one of these flags shall be set to 1
+ * when an asynchronous event completion of this type is provided
+ * by the HWRM.
*/
+ uint32_t event_data1;
/*
- * If set to 1, then secure firmware update behavior is
- * supported. If set to 0, then secure firmware update behavior
- * is not supported.
+ * If this bit is set to 1, then the value of MTU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE \
UINT32_C(0x1)
/*
- * If set to 1, then firmware based DCBX agent is supported. If
- * set to 0, then firmware based DCBX agent capability is not
- * supported on this device.
+ * If this bit is set to 1, then the value of MRU
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE \
UINT32_C(0x2)
/*
- * If set to 1, then HWRM short command format is supported. If
- * set to 0, then HWRM short command format is not supported.
+ * If this bit is set to 1, then the value of default MAC
+ * address was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE \
UINT32_C(0x4)
/*
- * If set to 1, then HWRM short command format is required. If
- * set to 0, then HWRM short command format is not required.
+ * If this bit is set to 1, then the value of default VLAN
+ * was changed on this VF.
+ * If set to 0, then this bit should be ignored.
*/
- #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED \
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE \
UINT32_C(0x8)
- uint8_t roce_fw_maj;
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_llfc_pfc_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* LLFC/PFC Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE \
+ UINT32_C(0x34)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field represents the major version of RoCE firmware. A
- * change in major version represents a major release.
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint8_t roce_fw_min;
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates llfc pfc status change */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT \
+ 0
/*
- * This field represents the minor version of RoCE firmware. A
- * change in minor version represents significant functionality
- * changes.
+ * If this field set to 1, then it indicates that llfc is
+ * enabled.
*/
- uint8_t roce_fw_bld;
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC \
+ UINT32_C(0x1)
/*
- * This field represents the build version of RoCE firmware. A
- * change in update version represents bug fixes.
+ * If this field is set to 2, then it indicates that pfc
+ * is enabled.
*/
- uint8_t roce_fw_rsvd;
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST \
+ HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC
+ /* Indicates the physical port this llfc pfc change occur */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK \
+ UINT32_C(0x1c)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT \
+ 2
+ /* PORT ID */
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK \
+ UINT32_C(0x1fffe0)
+ #define HWRM_ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT \
+ 5
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
+struct hwrm_async_event_cmpl_default_vnic_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \
+ 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* unused1 is 10 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \
+ UINT32_C(0xffc0)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \
+ 6
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* Notification of a default vnic allocaiton or free */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \
+ UINT32_C(0x35)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
/*
- * This field is a reserved field. This field can be used to
- * represent firmware branches or customer specific releases
- * tied to a specific (major,minor,update) version
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- char hwrm_fw_name[16];
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \
+ UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \
+ UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates default vnic configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \
+ 0
/*
- * This field represents the name of HWRM FW (ASCII chars with
- * NULL at the end).
+ * If this field is set to 1, then it indicates that
+ * a default VNIC has been allocate.
*/
- char mgmt_fw_name[16];
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \
+ UINT32_C(0x1)
/*
- * This field represents the name of mgmt FW (ASCII chars with
- * NULL at the end).
+ * If this field is set to 2, then it indicates that
+ * a default VNIC has been freed.
*/
- char netctrl_fw_name[16];
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE
+ /* Indicates the physical function this event occured on. */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \
+ UINT32_C(0x3fc)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \
+ 2
+ /* Indicates the virtual function this event occured on */
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \
+ UINT32_C(0x3fffc00)
+ #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \
+ 10
+} __attribute__((packed));
+
+/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */
+struct hwrm_async_event_cmpl_hwrm_error {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK \
+ UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT \
+ UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Severity of HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK \
+ UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+ /* Warning */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING \
+ UINT32_C(0x0)
+ /* Non-fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL \
+ UINT32_C(0x1)
+ /* Fatal Error */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL \
+ UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST \
+ HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+ uint8_t opaque_v;
/*
- * This field represents the name of network control firmware
- * (ASCII chars with NULL at the end).
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
- uint32_t reserved2[4];
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Time stamp for error event */
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP \
+ UINT32_C(0x1)
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_reset *
+ *******************/
+
+
+/* hwrm_func_reset_input (size:192b/24B) */
+struct hwrm_func_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is reserved for future use. The responder should
- * set it to 0. The requester should ignore this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- char roce_fw_name[16];
+ uint16_t cmpl_ring;
/*
- * This field represents the name of RoCE FW (ASCII chars with
- * NULL at the end).
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t chip_num;
- /* This field returns the chip number. */
- uint8_t chip_rev;
- /* This field returns the revision of chip. */
- uint8_t chip_metal;
- /* This field returns the chip metal number. */
- uint8_t chip_bond_id;
- /* This field returns the bond id of the chip. */
- uint8_t chip_platform_type;
+ uint16_t seq_id;
/*
- * This value indicates the type of platform used for chip
- * implementation.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- /* ASIC */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
- /* FPGA platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
- /* Palladium platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
- uint16_t max_req_win_len;
+ uint16_t target_id;
/*
- * This field returns the maximum value of request window that
- * is supported by the HWRM. The request window is mapped into
- * device address space using MMIO.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t max_resp_len;
- /* This field returns the maximum value of response buffer in bytes. */
- uint16_t def_req_timeout;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This field returns the default request timeout value in
- * milliseconds.
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
*/
- uint8_t init_pending;
+ #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
/*
- * This field will indicate if any subsystems is not fully
- * initialized.
+ * The ID of the VF that this PF is trying to reset.
+ * Only the parent PF shall be allowed to reset a child VF.
+ *
+ * A parent PF driver shall use this field only when a specific child VF
+ * is requested to be reset.
+ */
+ uint16_t vf_id;
+ /* This value indicates the level of a function reset. */
+ uint8_t func_reset_level;
+ /*
+ * Reset the caller function and its children VFs (if any). If no
+ * children functions exist, then reset the caller function only.
*/
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \
+ UINT32_C(0x0)
+ /* Reset the caller function only */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \
+ UINT32_C(0x1)
/*
- * If set to 1, device is not ready. If set to 0, device is
- * ready to accept all HWRM commands.
+ * Reset all children VFs of the caller function driver if the
+ * caller is a PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver with
+ * no children VFs.
*/
- #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1)
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t valid;
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
+ UINT32_C(0x2)
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Reset a specific VF of the caller function driver if the caller
+ * is the parent PF driver.
+ * It is an error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF driver that is not
+ * the parent of the VF that is being requested to reset.
*/
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_LAST \
+ HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF
+ uint8_t unused_0;
} __attribute__((packed));
-/* hwrm_func_reset */
-/*
- * Description: This command resets a hardware function (PCIe function) and
- * frees any resources used by the function. This command shall be initiated by
- * the driver after an FLR has occurred to prepare the function for re-use. This
- * command may also be initiated by a driver prior to doing it's own
- * configuration. This command puts the function into the reset state. In the
- * reset state, global and port related features of the chip are not available.
- */
-/*
- * Note: This command will reset a function that has already been disabled or
- * idled. The command returns all the resources owned by the function so a new
- * driver may allocate and configure resources normally.
- */
-/* Input (24 bytes) */
-struct hwrm_func_reset_input {
- uint16_t req_type;
+/* hwrm_func_reset_output (size:128b/16B) */
+struct hwrm_func_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_func_getfid *
+ ********************/
+
+
+/* hwrm_func_getfid_input (size:192b/24B) */
+struct hwrm_func_getfid_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id_valid field to be configured. */
- #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
- uint16_t vf_id;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * The ID of the VF that this PF is trying to reset. Only the
- * parent PF shall be allowed to reset a child VF. A parent PF
- * driver shall use this field only when a specific child VF is
- * requested to be reset.
+ * This bit must be '1' for the pci_id field to be
+ * configured.
*/
- uint8_t func_reset_level;
- /* This value indicates the level of a function reset. */
+ #define HWRM_FUNC_GETFID_INPUT_ENABLES_PCI_ID UINT32_C(0x1)
/*
- * Reset the caller function and its children
- * VFs (if any). If no children functions exist,
- * then reset the caller function only.
+ * This value is the PCI ID of the queried function.
+ * If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (5b):Function Number(3b).
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0)
- /* Reset the caller function only */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME UINT32_C(0x1)
+ uint16_t pci_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_func_getfid_output (size:128b/16B) */
+struct hwrm_func_getfid_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Reset all children VFs of the caller function
- * driver if the caller is a PF driver. It is an
- * error to specify this level by a VF driver.
- * It is an error to specify this level by a PF
- * driver with no children VFs.
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
- UINT32_C(0x2)
+ uint16_t fid;
+ uint8_t unused_0[5];
/*
- * Reset a specific VF of the caller function
- * driver if the caller is the parent PF driver.
- * It is an error to specify this level by a VF
- * driver. It is an error to specify this level
- * by a PF driver that is not the parent of the
- * VF that is being requested to reset.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF UINT32_C(0x3)
- uint8_t unused_0;
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_reset_output {
- uint16_t error_code;
+/**********************
+ * hwrm_func_vf_alloc *
+ **********************/
+
+
+/* hwrm_func_vf_alloc_input (size:192b/24B) */
+struct hwrm_func_vf_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_ALLOC_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
+ uint16_t first_vf_id;
+ /* The number of virtual functions requested. */
+ uint16_t num_vfs;
} __attribute__((packed));
-/* hwrm_func_vf_cfg */
-/*
- * Description: This command allows configuration of a VF by its driver. If this
- * function is called by a PF driver, then the HWRM shall fail this command. If
- * guest VLAN and/or MAC address are provided in this command, then the HWRM
- * shall set up appropriate MAC/VLAN filters for the VF that is being
- * configured. A VF driver should set VF MTU/MRU using this command prior to
- * allocating RX VNICs or TX rings for the corresponding VF.
- */
-/* Input (32 bytes) */
-struct hwrm_func_vf_cfg_input {
- uint16_t req_type;
+/* hwrm_func_vf_alloc_output (size:128b/16B) */
+struct hwrm_func_vf_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The ID of the first VF allocated. */
+ uint16_t first_vf_id;
+ uint8_t unused_0[5];
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_func_vf_free *
+ *********************/
+
+
+/* hwrm_func_vf_free_input (size:192b/24B) */
+struct hwrm_func_vf_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t enables;
- /* This bit must be '1' for the mtu field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
- /* This bit must be '1' for the guest_vlan field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ uint16_t target_id;
/*
- * This bit must be '1' for the async_event_cr field to be configured.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4)
- /* This bit must be '1' for the dflt_mac_addr field to be configured. */
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
- uint16_t mtu;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * The maximum transmission unit requested on the function. The HWRM
- * should make sure that the mtu of the function does not exceed the mtu
- * of the physical port that this function is associated with. In
- * addition to requesting mtu per function, it is possible to configure
- * mtu per transmit ring. By default, the mtu of each transmit ring
- * associated with a function is equal to the mtu of the function. The
- * HWRM should make sure that the mtu of each transmit ring that is
- * assigned to a function has a valid mtu.
+ * This bit must be '1' for the first_vf_id field to be
+ * configured.
*/
- uint16_t guest_vlan;
+ #define HWRM_FUNC_VF_FREE_INPUT_ENABLES_FIRST_VF_ID UINT32_C(0x1)
/*
- * The guest VLAN for the function being configured. This field's format
- * is same as 802.1Q Tag's Tag Control Information (TCI) format that
- * includes both Priority Code Point (PCP) and VLAN Identifier (VID).
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint16_t async_event_cr;
+ uint16_t first_vf_id;
/*
- * ID of the target completion ring for receiving asynchronous event
- * completions. If this field is not valid, then the HWRM shall use the
- * default completion ring of the function that is being configured as
- * the target completion ring for providing any asynchronous event
- * completions for that function. If this field is valid, then the HWRM
- * shall use the completion ring identified by this ID as the target
- * completion ring for providing any asynchronous event completions for
- * the function that is being configured.
+ * The number of virtual functions requested.
+ * 0xFFFF - Cleanup all children of this PF.
*/
- uint8_t dflt_mac_addr[6];
+ uint16_t num_vfs;
+} __attribute__((packed));
+
+/* hwrm_func_vf_free_output (size:128b/16B) */
+struct hwrm_func_vf_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value is the current MAC address requested by the VF driver to
- * be configured on this VF. A value of 00-00-00-00-00-00 indicates no
- * MAC address configuration is requested by the VF driver. The parent
- * PF driver may reject or overwrite this MAC address.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
+/********************
+ * hwrm_func_vf_cfg *
+ ********************/
-struct hwrm_func_vf_cfg_output {
- uint16_t error_code;
+
+/* hwrm_func_vf_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
-} __attribute__((packed));
-
-/* hwrm_func_qcaps */
-/*
- * Description: This command returns capabilities of a function. The input FID
- * value is used to indicate what function is being queried. This allows a
- * physical function driver to query virtual functions that are children of the
- * physical function. The output FID value is needed to configure Rings and
- * MSI-X vectors so their DMA operations appear correctly on the PCI bus.
- */
-/* Input (24 bytes) */
-struct hwrm_func_qcaps_input {
- uint16_t req_type;
+ uint16_t target_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t cmpl_ring;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This bit must be '1' for the mtu field to be
+ * configured.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This bit must be '1' for the guest_vlan field to be
+ * configured.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN \
+ UINT32_C(0x2)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This bit must be '1' for the async_event_cr field to be
+ * configured.
*/
- uint16_t fid;
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4)
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * This bit must be '1' for the dflt_mac_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_rsscos_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_cmpl_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
*/
- uint16_t unused_0[3];
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the num_stat_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the num_hw_ring_grps field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x800)
+ /*
+ * The maximum transmission unit requested on the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to requesting mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
+ /*
+ * The guest VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t guest_vlan;
+ /*
+ * ID of the target completion ring for receiving asynchronous
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
+ /*
+ * This value is the current MAC address requested by the VF
+ * driver to be configured on this VF. A value of
+ * 00-00-00-00-00-00 indicates no MAC address configuration
+ * is requested by the VF driver.
+ * The parent PF driver may reject or overwrite this
+ * MAC address.
+ */
+ uint8_t dflt_mac_addr[6];
+ uint32_t flags;
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x1)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x2)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x4)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x8)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x10)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x20)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x40)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x80)
+ /* The number of RSS/COS contexts requested for the VF. */
+ uint16_t num_rsscos_ctxs;
+ /* The number of completion rings requested for the VF. */
+ uint16_t num_cmpl_rings;
+ /* The number of transmit rings requested for the VF. */
+ uint16_t num_tx_rings;
+ /* The number of receive rings requested for the VF. */
+ uint16_t num_rx_rings;
+ /* The number of L2 contexts requested for the VF. */
+ uint16_t num_l2_ctxs;
+ /* The number of vnics requested for the VF. */
+ uint16_t num_vnics;
+ /* The number of statistic contexts requested for the VF. */
+ uint16_t num_stat_ctxs;
+ /* The number of HW ring groups requested for the VF. */
+ uint16_t num_hw_ring_grps;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (80 bytes) */
-struct hwrm_func_qcaps_output {
- uint16_t error_code;
+/* hwrm_func_vf_cfg_output (size:128b/16B) */
+struct hwrm_func_vf_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_func_qcaps *
+ *******************/
+
+
+/* hwrm_func_qcaps_input (size:192b/24B) */
+struct hwrm_func_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t fid;
+ uint16_t target_id;
/*
- * FID value. This value is used to identify operations on the
- * PCI bus as belonging to a particular PCI function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/*
- * Port ID of port that this function is associated with. Valid
- * only for the PF. 0xFF... (All Fs) if this function is not
- * associated with any port. 0xFF... (All Fs) if this function
- * is called from a VF.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint32_t flags;
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_func_qcaps_output (size:640b/80B) */
+struct hwrm_func_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
+ */
+ uint16_t fid;
+ /*
+ * Port ID of port that this function is associated with.
+ * Valid only for the PF.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
+ * 0xFF... (All Fs) if this function is called from a VF.
+ */
+ uint16_t port_id;
+ uint32_t flags;
/* If 1, then Push mode is supported on this function. */
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED \
+ UINT32_C(0x1)
/*
* If 1, then the global MSI-X auto-masking is enabled for the
* device.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
UINT32_C(0x2)
/*
- * If 1, then the Precision Time Protocol (PTP) processing is
- * supported on this function. The HWRM should enable PTP on
- * only a single Physical Function (PF) per port.
+ * If 1, then the Precision Time Protocol (PTP) processing
+ * is supported on this function.
+ * The HWRM should enable PTP on only a single Physical
+ * Function (PF) per port.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED \
+ UINT32_C(0x4)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
- * supported on this function.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1
+ * is supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED \
+ UINT32_C(0x8)
/*
- * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
- * supported on this function.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2
+ * is supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED \
+ UINT32_C(0x10)
/*
- * If 1, then control and configuration of WoL magic packet are
- * supported on this function.
+ * If 1, then control and configuration of WoL magic packet
+ * are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED \
UINT32_C(0x20)
/*
- * If 1, then control and configuration of bitmap pattern packet
- * are supported on this function.
+ * If 1, then control and configuration of bitmap pattern
+ * packet are supported on this function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED \
+ UINT32_C(0x40)
/*
* If set to 1, then the control and configuration of rate limit
* of an allocated TX ring on the queried function is supported.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED \
+ UINT32_C(0x80)
/*
- * If 1, then control and configuration of minimum and maximum
- * bandwidths are supported on the queried function.
+ * If 1, then control and configuration of minimum and
+ * maximum bandwidths are supported on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED \
+ UINT32_C(0x100)
/*
- * If the query is for a VF, then this flag shall be ignored. If
- * this query is for a PF and this flag is set to 1, then the PF
- * has the capability to set the rate limits on the TX rings of
- * its children VFs. If this query is for a PF and this flag is
- * set to 0, then the PF does not have the capability to set the
- * rate limits on the TX rings of its children VFs.
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the rate limits
+ * on the TX rings of its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the rate limits
+ * on the TX rings of its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED \
UINT32_C(0x200)
/*
- * If the query is for a VF, then this flag shall be ignored. If
- * this query is for a PF and this flag is set to 1, then the PF
- * has the capability to set the minimum and/or maximum
- * bandwidths for its children VFs. If this query is for a PF
- * and this flag is set to 0, then the PF does not have the
- * capability to set the minimum or maximum bandwidths for its
- * children VFs.
+ * If the query is for a VF, then this flag shall be ignored.
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to set the minimum and/or
+ * maximum bandwidths for its children VFs.
+ * If this query is for a PF and this flag is set to 0, then
+ * the PF does not have the capability to set the minimum or
+ * maximum bandwidths for its children VFs.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED \
+ UINT32_C(0x400)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is supported on the
- * queried function. If set to 0, then standard TX ring mode is
- * not available on the queried function.
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is supported
+ * on the queried function.
+ * If set to 0, then standard TX ring mode is not available
+ * on the queried function.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_STD_TX_RING_MODE_SUPPORTED \
UINT32_C(0x800)
- uint8_t mac_address[6];
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GENEVE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x1000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect NVGRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x2000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect GRE tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GRE_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x4000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to detect MPLS tunnel
+ * flags.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_MPLS_TUN_FLAGS_SUPPORTED \
+ UINT32_C(0x8000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to support pcie stats.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \
+ UINT32_C(0x10000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to adopt the VF's belonging
+ * to another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \
+ UINT32_C(0x20000)
+ /*
+ * If the query is for a VF, then this flag shall be ignored,
+ * If this query is for a PF and this flag is set to 1,
+ * then the PF has the capability to administer another PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \
+ UINT32_C(0x40000)
/*
* This value is current MAC address configured for this
- * function. A value of 00-00-00-00-00-00 indicates no MAC
- * address is currently configured.
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
*/
- uint16_t max_rsscos_ctx;
+ uint8_t mac_address[6];
/*
- * The maximum number of RSS/COS contexts that can be allocated
- * to the function.
+ * The maximum number of RSS/COS contexts that can be
+ * allocated to the function.
*/
- uint16_t max_cmpl_rings;
+ uint16_t max_rsscos_ctx;
/*
- * The maximum number of completion rings that can be allocated
- * to the function.
+ * The maximum number of completion rings that can be
+ * allocated to the function.
*/
- uint16_t max_tx_rings;
+ uint16_t max_cmpl_rings;
/*
- * The maximum number of transmit rings that can be allocated to
- * the function.
+ * The maximum number of transmit rings that can be
+ * allocated to the function.
*/
- uint16_t max_rx_rings;
+ uint16_t max_tx_rings;
/*
- * The maximum number of receive rings that can be allocated to
- * the function.
+ * The maximum number of receive rings that can be
+ * allocated to the function.
*/
- uint16_t max_l2_ctxs;
+ uint16_t max_rx_rings;
/*
- * The maximum number of L2 contexts that can be allocated to
- * the function.
+ * The maximum number of L2 contexts that can be
+ * allocated to the function.
*/
- uint16_t max_vnics;
+ uint16_t max_l2_ctxs;
/*
- * The maximum number of VNICs that can be allocated to the
- * function.
+ * The maximum number of VNICs that can be
+ * allocated to the function.
*/
- uint16_t first_vf_id;
+ uint16_t max_vnics;
/*
- * The identifier for the first VF enabled on a PF. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
- * this command is called on a PF with SR-IOV disabled or on a
- * VF.
+ * The identifier for the first VF enabled on a PF. This
+ * is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
*/
- uint16_t max_vfs;
+ uint16_t first_vf_id;
/*
- * The maximum number of VFs that can be allocated to the
- * function. This is valid only on the PF with SR-IOV enabled.
- * 0xFF... (All Fs) if this command is called on a PF with SR-
- * IOV disabled or on a VF.
+ * The maximum number of VFs that can be
+ * allocated to the function. This is valid only on the
+ * PF with SR-IOV enabled. 0xFF... (All Fs) if this
+ * command is called on a PF with SR-IOV disabled or
+ * on a VF.
*/
- uint16_t max_stat_ctx;
+ uint16_t max_vfs;
/*
* The maximum number of statistic contexts that can be
* allocated to the function.
*/
- uint32_t max_encap_records;
+ uint16_t max_stat_ctx;
/*
* The maximum number of Encapsulation records that can be
* offloaded by this function.
*/
- uint32_t max_decap_records;
+ uint32_t max_encap_records;
/*
- * The maximum number of decapsulation records that can be
- * offloaded by this function.
+ * The maximum number of decapsulation records that can
+ * be offloaded by this function.
*/
- uint32_t max_tx_em_flows;
+ uint32_t max_decap_records;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the TX side.
*/
- uint32_t max_tx_wm_flows;
+ uint32_t max_tx_em_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
- * offloaded by this function on the TX side.
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the TX side.
*/
- uint32_t max_rx_em_flows;
+ uint32_t max_tx_wm_flows;
/*
- * The maximum number of Exact Match (EM) flows that can be
+ * The maximum number of Exact Match (EM) flows that can be
* offloaded by this function on the RX side.
*/
- uint32_t max_rx_wm_flows;
+ uint32_t max_rx_em_flows;
/*
- * The maximum number of Wildcard Match (WM) flows that can be
- * offloaded by this function on the RX side.
+ * The maximum number of Wildcard Match (WM) flows that can
+ * be offloaded by this function on the RX side.
*/
- uint32_t max_mcast_filters;
+ uint32_t max_rx_wm_flows;
/*
- * The maximum number of multicast filters that can be supported
- * by this function on the RX side.
+ * The maximum number of multicast filters that can
+ * be supported by this function on the RX side.
*/
- uint32_t max_flow_id;
+ uint32_t max_mcast_filters;
/*
- * The maximum value of flow_id that can be supported in
- * completion records.
+ * The maximum value of flow_id that can be supported
+ * in completion records.
*/
- uint32_t max_hw_ring_grps;
+ uint32_t max_flow_id;
/*
- * The maximum number of HW ring groups that can be supported on
- * this function.
+ * The maximum number of HW ring groups that can be
+ * supported on this function.
*/
- uint16_t max_sp_tx_rings;
+ uint32_t max_hw_ring_grps;
/*
- * The maximum number of strict priority transmit rings that can
- * be allocated to the function. This number indicates the
- * maximum number of TX rings that can be assigned strict
- * priorities out of the maximum number of TX rings that can be
- * allocated (max_tx_rings) to the function.
+ * The maximum number of strict priority transmit rings
+ * that can be allocated to the function.
+ * This number indicates the maximum number of TX rings
+ * that can be assigned strict priorities out of the
+ * maximum number of TX rings that can be allocated
+ * (max_tx_rings) to the function.
*/
- uint8_t unused_0;
- uint8_t valid;
+ uint16_t max_sp_tx_rings;
+ uint8_t unused_0;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_qcfg */
-/*
- * Description: This command returns the current configuration of a function.
- * The input FID value is used to indicate what function is being queried. This
- * allows a physical function driver to query virtual functions that are
- * children of the physical function. The output FID value is needed to
- * configure Rings and MSI-X vectors so their DMA operations appear correctly on
- * the PCI bus. This command should be called by every driver after
- * 'hwrm_func_cfg' to get the actual number of resources allocated by the HWRM.
- * The values returned by hwrm_func_qcfg are the values the driver shall use.
- * These values may be different than what was originally requested in the
- * 'hwrm_func_cfg' command.
- */
-/* Input (24 bytes) */
+/******************
+ * hwrm_func_qcfg *
+ ******************/
+
+
+/* hwrm_func_qcfg_input (size:192b/24B) */
struct hwrm_func_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (72 bytes) */
+/* hwrm_func_qcfg_output (size:640b/80B) */
struct hwrm_func_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t fid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the
- * PCI bus as belonging to a particular PCI function.
+ * FID value. This value is used to identify operations on the PCI
+ * bus as belonging to a particular PCI function.
*/
- uint16_t port_id;
+ uint16_t fid;
/*
* Port ID of port that this function is associated with.
- * 0xFF... (All Fs) if this function is not associated with any
- * port.
+ * 0xFF... (All Fs) if this function is not associated with
+ * any port.
*/
- uint16_t vlan;
+ uint16_t port_id;
/*
- * This value is the current VLAN setting for this function. The
- * value of 0 for this field indicates no priority tagging or
- * VLAN is used. This field's format is same as 802.1Q Tag's Tag
- * Control Information (TCI) format that includes both Priority
- * Code Point (PCP) and VLAN Identifier (VID).
+ * This value is the current VLAN setting for this
+ * function. The value of 0 for this field indicates
+ * no priority tagging or VLAN is used.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
*/
- uint16_t flags;
+ uint16_t vlan;
+ uint16_t flags;
/*
* If 1, then magic packet based Out-Of-Box WoL is enabled on
* the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
UINT32_C(0x1)
/*
- * If 1, then bitmap pattern based Out-Of-Box WoL packet is
- * enabled on the port associated with this function.
+ * If 1, then bitmap pattern based Out-Of-Box WoL packet is enabled
+ * on the port associated with this function.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED \
+ UINT32_C(0x2)
/*
- * If set to 1, then FW based DCBX agent is enabled and running
- * on the port associated with this function. If set to 0, then
- * DCBX agent is not running in the firmware.
+ * If set to 1, then FW based DCBX agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0, then DCBX agent is not running in the firmware.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
UINT32_C(0x4)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is enabled on the
- * queried function. If set to 0, then the standard TX ring mode
- * is disabled on the queried function. In this extended TX ring
- * resource mode, the minimum and maximum bandwidth settings are
- * not supported to allow the allocation of TX rings to span
- * multiple scheduler nodes.
- */
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is enabled
+ * on the queried function.
+ * If set to 0, then the standard TX ring mode is disabled
+ * on the queried function. In this extended TX ring resource
+ * mode, the minimum and maximum bandwidth settings are not
+ * supported to allow the allocation of TX rings to span multiple
+ * scheduler nodes.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_STD_TX_RING_MODE_ENABLED \
UINT32_C(0x8)
/*
- * If set to 1 then FW based LLDP agent is enabled and running
- * on the port associated with this function. If set to 0 then
- * the LLDP agent is not running in the firmware.
+ * If set to 1 then FW based LLDP agent is enabled and running on
+ * the port associated with this function.
+ * If set to 0 then the LLDP agent is not running in the firmware.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED UINT32_C(0x10)
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED \
+ UINT32_C(0x10)
/*
- * If set to 1, then multi-host mode is active for this
- * function. If set to 0, then multi-host mode is inactive for
- * this function or not applicable for this device.
+ * If set to 1, then multi-host mode is active for this function.
+ * If set to 0, then multi-host mode is inactive for this function
+ * or not applicable for this device.
*/
- #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST UINT32_C(0x20)
- uint8_t mac_address[6];
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST \
+ UINT32_C(0x20)
/*
* This value is current MAC address configured for this
- * function. A value of 00-00-00-00-00-00 indicates no MAC
- * address is currently configured.
+ * function. A value of 00-00-00-00-00-00 indicates no
+ * MAC address is currently configured.
*/
- uint16_t pci_id;
+ uint8_t mac_address[6];
/*
- * This value is current PCI ID of this function. If ARI is
- * enabled, then it is Bus Number (8b):Function Number(8b).
- * Otherwise, it is Bus Number (8b):Device Number (4b):Function
- * Number(4b).
+ * This value is current PCI ID of this
+ * function. If ARI is enabled, then it is
+ * Bus Number (8b):Function Number(8b). Otherwise, it is
+ * Bus Number (8b):Device Number (4b):Function Number(4b).
+ * If multi-host mode is active, the 4 lsb will indicate
+ * the PF index for this function.
*/
- uint16_t alloc_rsscos_ctx;
+ uint16_t pci_id;
/*
- * The number of RSS/COS contexts currently allocated to the
- * function.
+ * The number of RSS/COS contexts currently
+ * allocated to the function.
*/
- uint16_t alloc_cmpl_rings;
+ uint16_t alloc_rsscos_ctx;
/*
- * The number of completion rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of completion rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_tx_rings;
+ uint16_t alloc_cmpl_rings;
/*
- * The number of transmit rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of transmit rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_rx_rings;
+ uint16_t alloc_tx_rings;
/*
- * The number of receive rings currently allocated to the
- * function. This does not include the rings allocated to any
- * children functions if any.
+ * The number of receive rings currently allocated to
+ * the function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t alloc_l2_ctx;
+ uint16_t alloc_rx_rings;
/* The allocated number of L2 contexts to the function. */
- uint16_t alloc_vnics;
+ uint16_t alloc_l2_ctx;
/* The allocated number of vnics to the function. */
- uint16_t mtu;
+ uint16_t alloc_vnics;
/*
- * The maximum transmission unit of the function. For rings
- * allocated on this function, this default value is used if
- * ring MTU is not specified.
+ * The maximum transmission unit of the function.
+ * For rings allocated on this function, this default
+ * value is used if ring MTU is not specified.
*/
- uint16_t mru;
+ uint16_t mtu;
/*
- * The maximum receive unit of the function. For vnics allocated
- * on this function, this default value is used if vnic MRU is
- * not specified.
+ * The maximum receive unit of the function.
+ * For vnics allocated on this function, this default
+ * value is used if vnic MRU is not specified.
*/
- uint16_t stat_ctx_id;
+ uint16_t mru;
/* The statistics context assigned to a function. */
- uint8_t port_partition_type;
+ uint16_t stat_ctx_id;
/*
- * The HWRM shall return Unknown value for this field when this
- * command is used to query VF's configuration.
+ * The HWRM shall return Unknown value for this field
+ * when this command is used to query VF's configuration.
*/
+ uint8_t port_partition_type;
/* Single physical function */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
/* Multiple physical functions */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
/* Network Partitioning 1.0 */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 UINT32_C(0x2)
/* Network Partitioning 1.5 */
@@ -2845,62 +5619,64 @@ struct hwrm_func_qcfg_output {
/* Network Partitioning 2.0 */
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 UINT32_C(0x4)
/* Unknown */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff)
- uint8_t port_pf_cnt;
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
+ UINT32_C(0xff)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN
/*
- * This field will indicate number of physical functions on this
- * port_partition. HWRM shall return unavail (i.e. value of 0)
- * for this field when this command is used to query VF's
- * configuration or from older firmware that doesn't support
- * this field.
+ * This field will indicate number of physical functions on this port_partition.
+ * HWRM shall return unavail (i.e. value of 0) for this field
+ * when this command is used to query VF's configuration or
+ * from older firmware that doesn't support this field.
*/
+ uint8_t port_pf_cnt;
/* number of PFs is not available */
- #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
- uint16_t dflt_vnic_id;
- /* The default VNIC ID assigned to a function that is being queried. */
- uint16_t max_mtu_configured;
- /*
- * This value specifies the MAX MTU that can be configured by
- * host drivers. This 'max_mtu_configure' can be HW max MTU or
- * OEM applications specified value. Host drivers can't
- * configure the MTU greater than this value. Host drivers
- * should read this value prior to configuring the MTU. FW will
- * fail the host request with MTU greater than
- * 'max_mtu_configured'.
- */
- uint32_t min_bw;
- /*
- * Minimum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device. A value of 0 indicates
- * the minimum bandwidth is not configured.
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0)
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL
+ /*
+ * The default VNIC ID assigned to a function that is
+ * being queried.
*/
+ uint16_t dflt_vnic_id;
+ uint16_t max_mtu_configured;
+ /*
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates the minimum bandwidth is not
+ * configured.
+ */
+ uint32_t min_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES \
(UINT32_C(0x1) << 28)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_LAST \
- FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -2910,40 +5686,44 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
- FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
- uint32_t max_bw;
+ HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
/*
- * Maximum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device. A value of 0 indicates
- * that the maximum bandwidth is not configured.
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
+ * A value of 0 indicates that the maximum bandwidth is not
+ * configured.
*/
+ uint32_t max_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES \
(UINT32_C(0x1) << 28)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_LAST \
- FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -2953,566 +5733,722 @@ struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint8_t evb_mode;
+ HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* This value indicates the Edge virtual bridge mode for the
* domain that this function belongs to.
*/
- /* No Edge Virtual Bridging (EVB) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
- #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_0;
- uint16_t alloc_vfs;
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_QCFG_OUTPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_RSVD_SFT 2
+ /*
+ * The number of VFs that are allocated to the function.
+ * This is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with
+ * SR-IOV disabled or on a VF.
+ */
+ uint16_t alloc_vfs;
+ /*
+ * The number of allocated multicast filters for this
+ * function on the RX side.
+ */
+ uint32_t alloc_mcast_filters;
/*
- * The number of VFs that are allocated to the function. This is
- * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
- * this command is called on a PF with SR-IOV disabled or on a
- * VF.
+ * The number of allocated HW ring groups for this
+ * function.
+ */
+ uint32_t alloc_hw_ring_grps;
+ /*
+ * The number of strict priority transmit rings out of
+ * currently allocated TX rings to the function
+ * (alloc_tx_rings).
*/
- uint32_t alloc_mcast_filters;
+ uint16_t alloc_sp_tx_rings;
/*
- * The number of allocated multicast filters for this function
- * on the RX side.
+ * The number of statistics contexts
+ * currently reserved for the function.
*/
- uint32_t alloc_hw_ring_grps;
- /* The number of allocated HW ring groups for this function. */
- uint16_t alloc_sp_tx_rings;
+ uint16_t alloc_stat_ctx;
/*
- * The number of strict priority transmit rings out of currently
- * allocated TX rings to the function (alloc_tx_rings).
+ * This field specifies how many NQs are reserved for the PF.
+ * Remaining NQs that belong to the PF are available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
*/
- uint8_t unused_1;
- uint8_t valid;
+ uint16_t alloc_msix;
+ uint8_t unused_2[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_vlan_qcfg */
-/*
- * Description: This command should be called by PF driver to get the current
- * C-TAG, S-TAG and correcponsing PCP and TPID values configured for the
- * function.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_func_vlan_qcfg *
+ ***********************/
+
+
+/* hwrm_func_vlan_qcfg_input (size:192b/24B) */
struct hwrm_func_vlan_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
*/
- uint16_t unused_0[3];
-};
+ uint16_t fid;
+ uint8_t unused_0[6];
+} __attribute__((packed));
-/* Output (40 bytes) */
+/* hwrm_func_vlan_qcfg_output (size:320b/40B) */
struct hwrm_func_vlan_qcfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
- */
- uint16_t stag_vid;
+ uint8_t valid;
/* S-TAG VLAN identifier configured for the function. */
- uint8_t stag_pcp;
+ uint16_t stag_vid;
/* S-TAG PCP value configured for the function. */
- uint8_t unused_4;
- uint16_t stag_tpid;
+ uint8_t stag_pcp;
+ uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint16_t ctag_vid;
+ uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
- uint8_t ctag_pcp;
+ uint16_t ctag_vid;
/* C-TAG PCP value configured for the function. */
- uint8_t unused_5;
- uint16_t ctag_tpid;
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint32_t rsvd2;
+ uint16_t ctag_tpid;
/* Future use. */
- uint32_t rsvd3;
+ uint32_t rsvd2;
/* Future use. */
- uint32_t unused_6;
-};
+ uint32_t rsvd3;
+ uint32_t unused_3;
+} __attribute__((packed));
-/* hwrm_func_vlan_cfg */
-/*
- * Description: This command allows PF driver to configure C-TAG, S-TAG and
- * corresponding PCP and TPID values for a function.
- */
-/* Input (48 bytes) */
+/**********************
+ * hwrm_func_vlan_cfg *
+ **********************/
+
+
+/* hwrm_func_vlan_cfg_input (size:384b/48B) */
struct hwrm_func_vlan_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
- /*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
- */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t enables;
- /* This bit must be '1' for the stag_vid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
- /* This bit must be '1' for the ctag_vid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
- /* This bit must be '1' for the stag_pcp field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
- /* This bit must be '1' for the ctag_pcp field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
- /* This bit must be '1' for the stag_tpid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
- /* This bit must be '1' for the ctag_tpid field to be configured. */
- #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
- uint16_t stag_vid;
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the configuration is
+ * for the requesting function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the stag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_VID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the ctag_vid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_VID UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the stag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_PCP UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the ctag_pcp field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_PCP UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_STAG_TPID UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ctag_tpid field to be
+ * configured.
+ */
+ #define HWRM_FUNC_VLAN_CFG_INPUT_ENABLES_CTAG_TPID UINT32_C(0x20)
/* S-TAG VLAN identifier configured for the function. */
- uint8_t stag_pcp;
+ uint16_t stag_vid;
/* S-TAG PCP value configured for the function. */
- uint8_t unused_2;
- uint16_t stag_tpid;
+ uint8_t stag_pcp;
+ uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * S-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint16_t ctag_vid;
+ uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
- uint8_t ctag_pcp;
+ uint16_t ctag_vid;
/* C-TAG PCP value configured for the function. */
- uint8_t unused_3;
- uint16_t ctag_tpid;
+ uint8_t ctag_pcp;
+ uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is
- * specified in network byte order.
+ * C-TAG TPID value configured for the function. This field is specified in
+ * network byte order.
*/
- uint32_t rsvd1;
+ uint16_t ctag_tpid;
/* Future use. */
- uint32_t rsvd2;
+ uint32_t rsvd1;
/* Future use. */
- uint32_t unused_4;
-};
+ uint32_t rsvd2;
+ uint8_t unused_3[4];
+} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_vlan_cfg_output (size:128b/16B) */
struct hwrm_func_vlan_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
-};
+ uint8_t valid;
+} __attribute__((packed));
-/* hwrm_func_cfg */
-/*
- * Description: This command allows configuration of a PF by the corresponding
- * PF driver. This command also allows configuration of a child VF by its parent
- * PF driver. The input FID value is used to indicate what function is being
- * configured. This allows a PF driver to configure the PF owned by itself or a
- * virtual function that is a child of the PF. This command allows to reserve
- * resources for a VF by its parent PF. To reverse the process, the command
- * should be called with all enables flags cleared for resources. This will free
- * allocated resources for the VF and return them to the resource pool. If this
- * command is requested by a VF driver to configure or reserve resources, then
- * the HWRM shall fail this command. If default MAC address and/or VLAN are
- * provided in this command, then the HWRM shall set up appropriate MAC/VLAN
- * filters for the function that is being configured. If source properties
- * checks are enabled and default MAC address and/or IP address are provided in
- * this command, then the HWRM shall set appropriate source property checks
- * based on provided MAC and/or IP addresses. The parent PF driver should not
- * set MTU/MRU for a VF using this command. This is to allow MTU/MRU setting by
- * the VF driver. If the MTU or MRU for a VF is set by the PF driver, then the
- * HWRM should ignore it. A function's MTU/MRU should be set prior to allocating
- * RX VNICs or TX rings. A PF driver calls hwrm_func_cfg to allocate resources
- * for itself or its children VFs. All function drivers shall call hwrm_func_cfg
- * to reserve resources. A request to hwrm_func_cfg may not be fully granted;
- * that is, a request for resources may be larger than what can be supported by
- * the device and the HWRM will allocate the best set of resources available,
- * but that may be less than requested. If all the amounts requested could not
- * be fulfilled, the HWRM shall allocate what it could and return a status code
- * of success. A function driver should call hwrm_func_qcfg immediately after
- * hwrm_func_cfg to determine what resources were assigned to the configured
- * function. A call by a PF driver to hwrm_func_cfg to allocate resources for
- * itself shall only allocate resources for the PF driver to use, not for its
- * children VFs. Likewise, a call to hwrm_func_qcfg shall return the resources
- * available for the PF driver to use, not what is available to its children
- * VFs.
- */
-/* Input (88 bytes) */
+/*****************
+ * hwrm_func_cfg *
+ *****************/
+
+
+/* hwrm_func_cfg_input (size:704b/88B) */
struct hwrm_func_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the configuration is for the
- * requesting function.
+ * Function ID of the function that is being
+ * configured.
+ * If set to 0xFF... (All Fs), then the the configuration is
+ * for the requesting function.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t flags;
+ uint16_t fid;
/*
- * When this bit is '1', the function is disabled with source
- * MAC address check. This is an anti-spoofing check. If this
- * flag is set, then the function shall be configured to
- * disallow transmission of frames with the source MAC address
- * that is configured for this function.
+ * This field specifies how many NQs will be reserved for the PF.
+ * Remaining NQs that belong to the PF become available for VFs.
+ * Once a PF has created VFs, it cannot change how many NQs are
+ * reserved for itself (since the NQs must be contiguous in HW).
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
+ uint16_t num_msix;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function is disabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to disallow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE \
UINT32_C(0x1)
/*
- * When this bit is '1', the function is enabled with source MAC
- * address check. This is an anti-spoofing check. If this flag
- * is set, then the function shall be configured to allow
- * transmission of frames with the source MAC address that is
- * configured for this function.
+ * When this bit is '1', the function is enabled with
+ * source MAC address check.
+ * This is an anti-spoofing check. If this flag is set,
+ * then the function shall be configured to allow
+ * transmission of frames with the source MAC address that
+ * is configured for this function.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE \
UINT32_C(0x2)
- /* reserved */
- #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK UINT32_C(0x1fc)
- #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
+ /* reserved. */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_MASK \
+ UINT32_C(0x1fc)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSVD_SFT 2
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then standard TX ring mode is requested to be
+ * reservation and limit settings on the queried function.
+ * If set to 1, then standard TX ring mode is requested to be
* enabled on the function being configured.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE \
UINT32_C(0x200)
/*
* Standard TX Ring mode is used for the allocation of TX ring
* and underlying scheduling resources that allow bandwidth
- * reservation and limit settings on the queried function. If
- * set to 1, then the standard TX ring mode is requested to be
- * disabled on the function being configured. In this extended
- * TX ring resource mode, the minimum and maximum bandwidth
- * settings are not supported to allow the allocation of TX
- * rings to span multiple scheduler nodes.
- */
- #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
+ * reservation and limit settings on the queried function.
+ * If set to 1, then the standard TX ring mode is requested to
+ * be disabled on the function being configured. In this extended
+ * TX ring resource mode, the minimum and maximum bandwidth settings
+ * are not supported to allow the allocation of TX rings to
+ * span multiple scheduler nodes.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE \
UINT32_C(0x400)
/*
- * If this bit is set, virtual mac address configured in this
- * command will be persistent over warm boot.
+ * If this bit is set, virtual mac address configured
+ * in this command will be persistent over warm boot.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800)
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST \
+ UINT32_C(0x800)
/*
- * This bit only applies to the VF. If this bit is set, the
- * statistic context counters will not be cleared when the
- * statistic context is freed or a function reset is called on
- * VF. This bit will be cleared when the PF is unloaded or a
- * function reset is called on the PF.
+ * This bit only applies to the VF. If this bit is set, the statistic
+ * context counters will not be cleared when the statistic context is freed
+ * or a function reset is called on VF. This bit will be cleared when the PF
+ * is unloaded or a function reset is called on the PF.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \
UINT32_C(0x1000)
/*
- * This bit requests that the firmware test to see if all the
- * assets requested in this command (i.e. number of TX rings)
- * are available. The firmware will return an error if the
- * requested assets are not available. The firwmare will NOT
- * reserve the assets if they are available.
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of TX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST \
+ UINT32_C(0x2000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RX rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST \
+ UINT32_C(0x4000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of CMPL rings) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST \
+ UINT32_C(0x8000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of RSS ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST \
+ UINT32_C(0x10000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of ring groups) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST \
+ UINT32_C(0x20000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of stat ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST \
+ UINT32_C(0x40000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of VNICs) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST \
+ UINT32_C(0x80000)
+ /*
+ * This bit requests that the firmware test to see if all the assets
+ * requested in this command (i.e. number of L2 ctx) are available.
+ * The firmware will return an error if the requested assets are
+ * not available. The firwmare will NOT reserve the assets if they
+ * are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS_L2_CTX_ASSETS_TEST \
+ UINT32_C(0x100000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the mtu field to be
+ * configured.
*/
- #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000)
- uint32_t enables;
- /* This bit must be '1' for the mtu field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
- /* This bit must be '1' for the mru field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x2)
/*
* This bit must be '1' for the num_rsscos_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS UINT32_C(0x4)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the num_cmpl_rings field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS UINT32_C(0x8)
- /* This bit must be '1' for the num_tx_rings field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x10)
- /* This bit must be '1' for the num_rx_rings field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x20)
- /* This bit must be '1' for the num_l2_ctxs field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x40)
- /* This bit must be '1' for the num_vnics field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS UINT32_C(0x80)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the num_tx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_rx_rings field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the num_l2_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the num_vnics field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS \
+ UINT32_C(0x80)
/*
* This bit must be '1' for the num_stat_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x100)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the dflt_mac_addr field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x200)
- /* This bit must be '1' for the dflt_vlan field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN UINT32_C(0x400)
- /* This bit must be '1' for the dflt_ip_addr field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR UINT32_C(0x800)
- /* This bit must be '1' for the min_bw field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW UINT32_C(0x1000)
- /* This bit must be '1' for the max_bw field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW UINT32_C(0x2000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dflt_vlan field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the dflt_ip_addr field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_IP_ADDR \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the min_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MIN_BW \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the max_bw field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_MAX_BW \
+ UINT32_C(0x2000)
/*
* This bit must be '1' for the async_event_cr field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the vlan_antispoof_mode field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE UINT32_C(0x8000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the allowed_vlan_pris field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS UINT32_C(0x10000)
- /* This bit must be '1' for the evb_mode field to be configured. */
- #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE UINT32_C(0x20000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_ALLOWED_VLAN_PRIS \
+ UINT32_C(0x10000)
+ /*
+ * This bit must be '1' for the evb_mode field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE \
+ UINT32_C(0x20000)
/*
* This bit must be '1' for the num_mcast_filters field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS UINT32_C(0x40000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MCAST_FILTERS \
+ UINT32_C(0x40000)
/*
* This bit must be '1' for the num_hw_ring_grps field to be
* configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x80000)
- uint16_t mtu;
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the cache_linesize field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_CACHE_LINESIZE \
+ UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the num_msix field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX \
+ UINT32_C(0x200000)
+ /*
+ * The maximum transmission unit of the function.
+ * The HWRM should make sure that the mtu of
+ * the function does not exceed the mtu of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mtu per function, it is
+ * possible to configure mtu per transmit ring.
+ * By default, the mtu of each transmit ring associated
+ * with a function is equal to the mtu of the function.
+ * The HWRM should make sure that the mtu of each transmit
+ * ring that is assigned to a function has a valid mtu.
+ */
+ uint16_t mtu;
/*
- * The maximum transmission unit of the function. The HWRM
- * should make sure that the mtu of the function does not exceed
- * the mtu of the physical port that this function is associated
- * with. In addition to configuring mtu per function, it is
- * possible to configure mtu per transmit ring. By default, the
- * mtu of each transmit ring associated with a function is equal
- * to the mtu of the function. The HWRM should make sure that
- * the mtu of each transmit ring that is assigned to a function
- * has a valid mtu.
+ * The maximum receive unit of the function.
+ * The HWRM should make sure that the mru of
+ * the function does not exceed the mru of the physical
+ * port that this function is associated with.
+ *
+ * In addition to configuring mru per function, it is
+ * possible to configure mru per vnic.
+ * By default, the mru of each vnic associated
+ * with a function is equal to the mru of the function.
+ * The HWRM should make sure that the mru of each vnic
+ * that is assigned to a function has a valid mru.
*/
- uint16_t mru;
+ uint16_t mru;
/*
- * The maximum receive unit of the function. The HWRM should
- * make sure that the mru of the function does not exceed the
- * mru of the physical port that this function is associated
- * with. In addition to configuring mru per function, it is
- * possible to configure mru per vnic. By default, the mru of
- * each vnic associated with a function is equal to the mru of
- * the function. The HWRM should make sure that the mru of each
- * vnic that is assigned to a function has a valid mru.
+ * The number of RSS/COS contexts requested for the
+ * function.
*/
- uint16_t num_rsscos_ctxs;
- /* The number of RSS/COS contexts requested for the function. */
- uint16_t num_cmpl_rings;
+ uint16_t num_rsscos_ctxs;
/*
- * The number of completion rings requested for the function.
- * This does not include the rings allocated to any children
- * functions if any.
+ * The number of completion rings requested for the
+ * function. This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t num_tx_rings;
+ uint16_t num_cmpl_rings;
/*
- * The number of transmit rings requested for the function. This
- * does not include the rings allocated to any children
- * functions if any.
+ * The number of transmit rings requested for the function.
+ * This does not include the rings allocated to any
+ * children functions if any.
*/
- uint16_t num_rx_rings;
+ uint16_t num_tx_rings;
/*
- * The number of receive rings requested for the function. This
- * does not include the rings allocated to any children
- * functions if any.
+ * The number of receive rings requested for the function.
+ * This does not include the rings allocated
+ * to any children functions if any.
*/
- uint16_t num_l2_ctxs;
+ uint16_t num_rx_rings;
/* The requested number of L2 contexts for the function. */
- uint16_t num_vnics;
+ uint16_t num_l2_ctxs;
/* The requested number of vnics for the function. */
- uint16_t num_stat_ctxs;
+ uint16_t num_vnics;
/* The requested number of statistic contexts for the function. */
- uint16_t num_hw_ring_grps;
+ uint16_t num_stat_ctxs;
/*
- * The number of HW ring groups that should be reserved for this
- * function.
+ * The number of HW ring groups that should
+ * be reserved for this function.
*/
- uint8_t dflt_mac_addr[6];
+ uint16_t num_hw_ring_grps;
/* The default MAC address for the function being configured. */
- uint16_t dflt_vlan;
+ uint8_t dflt_mac_addr[6];
/*
- * The default VLAN for the function being configured. This
- * field's format is same as 802.1Q Tag's Tag Control
- * Information (TCI) format that includes both Priority Code
- * Point (PCP) and VLAN Identifier (VID).
+ * The default VLAN for the function being configured.
+ * This field's format is same as 802.1Q Tag's
+ * Tag Control Information (TCI) format that includes both
+ * Priority Code Point (PCP) and VLAN Identifier (VID).
*/
- uint32_t dflt_ip_addr[4];
+ uint16_t dflt_vlan;
/*
* The default IP address for the function being configured.
* This address is only used in enabling source property check.
*/
- uint32_t min_bw;
+ uint32_t dflt_ip_addr[4];
/*
- * Minimum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device.
+ * Minimum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
*/
+ uint32_t min_bw;
/* The bandwidth value. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_LAST \
- FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
+ HWRM_FUNC_CFG_INPUT_MIN_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
+ /* Value is in Kb or KB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -3522,39 +6458,42 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_LAST \
- FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
- uint32_t max_bw;
+ HWRM_FUNC_CFG_INPUT_MIN_BW_BW_VALUE_UNIT_INVALID
/*
- * Maximum BW allocated for this function. The HWRM will
- * translate this value into byte counter and time interval used
- * for the scheduler inside the device.
+ * Maximum BW allocated for this function.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for the scheduler inside the device.
*/
+ uint32_t max_bw;
/* The bandwidth value. */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_MASK \
UINT32_C(0xfffffff)
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_LAST \
- FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
+ HWRM_FUNC_CFG_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
+ /* Value is in Gb or GB (base 10). */
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
@@ -3564,1984 +6503,4481 @@ struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint16_t async_event_cr;
+ HWRM_FUNC_CFG_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* ID of the target completion ring for receiving asynchronous
- * event completions. If this field is not valid, then the HWRM
- * shall use the default completion ring of the function that is
- * being configured as the target completion ring for providing
- * any asynchronous event completions for that function. If this
- * field is valid, then the HWRM shall use the completion ring
- * identified by this ID as the target completion ring for
- * providing any asynchronous event completions for the function
- * that is being configured.
- */
- uint8_t vlan_antispoof_mode;
+ * event completions. If this field is not valid, then the
+ * HWRM shall use the default completion ring of the function
+ * that is being configured as the target completion ring for
+ * providing any asynchronous event completions for that
+ * function.
+ * If this field is valid, then the HWRM shall use the
+ * completion ring identified by this ID as the target
+ * completion ring for providing any asynchronous event
+ * completions for the function that is being configured.
+ */
+ uint16_t async_event_cr;
/* VLAN Anti-spoofing mode. */
+ uint8_t vlan_antispoof_mode;
/* No VLAN anti-spoofing checks are enabled */
- #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK UINT32_C(0x0)
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK \
+ UINT32_C(0x0)
/* Validate VLAN against the configured VLAN(s) */
#define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN \
UINT32_C(0x1)
/* Insert VLAN if it does not exist, otherwise discard */
#define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE \
UINT32_C(0x2)
- /*
- * Insert VLAN if it does not exist, override
- * VLAN if it exists
- */
- #define \
- HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
+ /* Insert VLAN if it does not exist, override VLAN if it exists */
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN \
UINT32_C(0x3)
- uint8_t allowed_vlan_pris;
+ #define HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
/*
- * This bit field defines VLAN PRIs that are allowed on this
- * function. If nth bit is set, then VLAN PRI n is allowed on
+ * This bit field defines VLAN PRIs that are allowed on
* this function.
+ * If nth bit is set, then VLAN PRI n is allowed on this
+ * function.
*/
- uint8_t evb_mode;
+ uint8_t allowed_vlan_pris;
/*
* The HWRM shall allow a PF driver to change EVB mode for the
- * partition it belongs to. The HWRM shall not allow a VF driver
- * to change the EVB mode. The HWRM shall take into account the
- * switching of EVB mode from one to another and reconfigure
- * hardware resources as appropriately. The switching from VEB
- * to VEPA mode requires the disabling of the loopback traffic.
- * Additionally, source knock outs are handled differently in
- * VEB and VEPA modes.
- */
- /* No Edge Virtual Bridging (EVB) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
- /* Virtual Ethernet Bridge (VEB) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
- /* Virtual Ethernet Port Aggregator (VEPA) */
- #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
- uint8_t unused_2;
- uint16_t num_mcast_filters;
+ * partition it belongs to.
+ * The HWRM shall not allow a VF driver to change the EVB mode.
+ * The HWRM shall take into account the switching of EVB mode
+ * from one to another and reconfigure hardware resources as
+ * appropriately.
+ * The switching from VEB to VEPA mode requires
+ * the disabling of the loopback traffic. Additionally,
+ * source knock outs are handled differently in VEB and VEPA
+ * modes.
+ */
+ uint8_t evb_mode;
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_EVB_MODE_LAST \
+ HWRM_FUNC_CFG_INPUT_EVB_MODE_VEPA
+ uint8_t options;
+ /*
+ * This value indicates the PCIE device cache line size.
+ * The cache line size allows the DMA writes to terminate and
+ * start at the cache boundary.
+ */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_MASK \
+ UINT32_C(0x3)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SFT 0
+ /* Cache Line Size 64 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_64 \
+ UINT32_C(0x0)
+ /* Cache Line Size 128 bytes */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128 \
+ UINT32_C(0x1)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_LAST \
+ HWRM_FUNC_CFG_INPUT_OPTIONS_CACHE_LINESIZE_SIZE_128
+ /* Reserved for future. */
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_FUNC_CFG_INPUT_OPTIONS_RSVD_SFT 2
/*
- * The number of multicast filters that should be reserved for
- * this function on the RX side.
+ * The number of multicast filters that should
+ * be reserved for this function on the RX side.
*/
+ uint16_t num_mcast_filters;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_cfg_output (size:128b/16B) */
struct hwrm_func_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_qstats */
-/*
- * Description: This command returns statistics of a function. The input FID
- * value is used to indicate what function is being queried. This allows a
- * physical function driver to query virtual functions that are children of the
- * physical function. The HWRM shall return any unsupported counter with a value
- * of 0xFFFFFFFF for 32-bit counters and 0xFFFFFFFFFFFFFFFF for 64-bit counters.
- */
-/* Input (24 bytes) */
+/********************
+ * hwrm_func_qstats *
+ ********************/
+
+
+/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function that is being queried. 0xFF...
- * (All Fs) if the query is for the requesting function.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (176 bytes) */
+/* hwrm_func_qstats_output (size:1408b/176B) */
struct hwrm_func_qstats_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t tx_ucast_pkts;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Number of transmitted unicast packets on the function. */
- uint64_t tx_mcast_pkts;
+ uint64_t tx_ucast_pkts;
/* Number of transmitted multicast packets on the function. */
- uint64_t tx_bcast_pkts;
+ uint64_t tx_mcast_pkts;
/* Number of transmitted broadcast packets on the function. */
- uint64_t tx_err_pkts;
+ uint64_t tx_bcast_pkts;
/*
* Number of transmitted packets that were discarded due to
- * internal NIC resource problems. For transmit, this can only
- * happen if TMP is configured to allow dropping in HOL blocking
- * conditions, which is not a normal configuration.
+ * internal NIC resource problems. For transmit, this
+ * can only happen if TMP is configured to allow dropping
+ * in HOL blocking conditions, which is not a normal
+ * configuration.
*/
- uint64_t tx_drop_pkts;
+ uint64_t tx_discard_pkts;
/*
* Number of dropped packets on transmit path on the function.
- * These are packets that have been marked for drop by the TE
- * CFA block or are packets that exceeded the transmit MTU limit
- * for the function.
+ * These are packets that have been marked for drop by
+ * the TE CFA block or are packets that exceeded the
+ * transmit MTU limit for the function.
*/
- uint64_t tx_ucast_bytes;
+ uint64_t tx_drop_pkts;
/* Number of transmitted bytes for unicast traffic on the function. */
- uint64_t tx_mcast_bytes;
- /*
- * Number of transmitted bytes for multicast traffic on the
- * function.
- */
- uint64_t tx_bcast_bytes;
- /*
- * Number of transmitted bytes for broadcast traffic on the
- * function.
- */
- uint64_t rx_ucast_pkts;
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic on the function. */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic on the function. */
+ uint64_t tx_bcast_bytes;
/* Number of received unicast packets on the function. */
- uint64_t rx_mcast_pkts;
+ uint64_t rx_ucast_pkts;
/* Number of received multicast packets on the function. */
- uint64_t rx_bcast_pkts;
+ uint64_t rx_mcast_pkts;
/* Number of received broadcast packets on the function. */
- uint64_t rx_err_pkts;
+ uint64_t rx_bcast_pkts;
/*
- * Number of received packets that were discarded on the
- * function due to resource limitations. This can happen for 3
- * reasons. # The BD used for the packet has a bad format. #
- * There were no BDs available in the ring for the packet. #
- * There were no BDs available on-chip for the packet.
+ * Number of received packets that were discarded on the function
+ * due to resource limitations. This can happen for 3 reasons.
+ * # The BD used for the packet has a bad format.
+ * # There were no BDs available in the ring for the packet.
+ * # There were no BDs available on-chip for the packet.
*/
- uint64_t rx_drop_pkts;
+ uint64_t rx_discard_pkts;
/*
* Number of dropped packets on received path on the function.
- * These are packets that have been marked for drop by the RE
- * CFA.
+ * These are packets that have been marked for drop by the
+ * RE CFA.
*/
- uint64_t rx_ucast_bytes;
+ uint64_t rx_drop_pkts;
/* Number of received bytes for unicast traffic on the function. */
- uint64_t rx_mcast_bytes;
+ uint64_t rx_ucast_bytes;
/* Number of received bytes for multicast traffic on the function. */
- uint64_t rx_bcast_bytes;
+ uint64_t rx_mcast_bytes;
/* Number of received bytes for broadcast traffic on the function. */
- uint64_t rx_agg_pkts;
+ uint64_t rx_bcast_bytes;
/* Number of aggregated unicast packets on the function. */
- uint64_t rx_agg_bytes;
+ uint64_t rx_agg_pkts;
/* Number of aggregated unicast bytes on the function. */
- uint64_t rx_agg_events;
+ uint64_t rx_agg_bytes;
/* Number of aggregation events on the function. */
- uint64_t rx_agg_aborts;
+ uint64_t rx_agg_events;
/* Number of aborted aggregations on the function. */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_clr_stats */
-/*
- * Description: This command clears statistics of a function. The input FID
- * value is used to indicate what function's statistics is being cleared. This
- * allows a physical function driver to clear statistics of virtual functions
- * that are children of the physical function.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_func_clr_stats *
+ ***********************/
+
+
+/* hwrm_func_clr_stats_input (size:192b/24B) */
struct hwrm_func_clr_stats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t fid;
+ uint64_t resp_addr;
/*
- * Function ID of the function. 0xFF... (All Fs) if the query is
- * for the requesting function.
+ * Function ID of the function.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0[3];
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_clr_stats_output (size:128b/16B) */
struct hwrm_func_clr_stats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_func_vf_resc_free *
+ **************************/
+
+
+/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+struct hwrm_func_vf_resc_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_func_vf_vnic_ids_query */
-/* Description: This command is used to query vf vnic ids. */
-/* Input (32 bytes) */
+/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+struct hwrm_func_vf_resc_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_vf_vnic_ids_query *
+ *******************************/
+
+
+/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
struct hwrm_func_vf_vnic_ids_query_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t vf_id;
+ uint64_t resp_addr;
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t max_vnic_id_cnt;
+ uint16_t vf_id;
+ uint8_t unused_0[2];
/* Max number of vnic ids in vnic id table */
- uint64_t vnic_id_tbl_addr;
+ uint32_t max_vnic_id_cnt;
/* This is the address for VF VNIC ID table */
+ uint64_t vnic_id_tbl_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
struct hwrm_func_vf_vnic_ids_query_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * Actual number of vnic ids
+ *
+ * Each VNIC ID is written as a 32-bit number.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint32_t vnic_id_cnt;
+ uint8_t unused_0[3];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t vnic_id_cnt;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_rgtr *
+ **********************/
+
+
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
+struct hwrm_func_drv_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Actual number of vnic ids Each VNIC ID is written as a 32-bit
- * number.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* hwrm_func_drv_rgtr */
-/*
- * Description: This command is used by the function driver to register its
- * information with the HWRM. A function driver shall implement this command. A
- * function driver shall use this command during the driver initialization right
- * after the HWRM version discovery and default ring resources allocation.
- */
-/* Input (80 bytes) */
-struct hwrm_func_drv_rgtr_input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * When this bit is '1', the function driver is requesting
+ * all requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * When this bit is '1', the function is requesting none of
+ * the requests from its children VF drivers to be
+ * forwarded to itself.
+ * This flag can only be set by the PF driver.
+ * If a VF driver sets this flag, it should be ignored
+ * by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be ignored and ver_maj, ver_min, ver_upd
+ * and ver_patch shall be used for the driver version information.
+ * When this bit is '0', then ver_maj_8b, ver_min_8b, ver_upd_8b
+ * fields shall be used for the driver version information and
+ * ver_maj, ver_min, ver_upd and ver_patch shall be ignored.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the os_type field to be
+ * configured.
*/
- uint32_t flags;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the function driver is requesting all
- * requests from its children VF drivers to be forwarded to
- * itself. This flag can only be set by the PF driver. If a VF
- * driver sets this flag, it should be ignored by the HWRM.
+ * This bit must be '1' for the ver field to be
+ * configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the function is requesting none of the
- * requests from its children VF drivers to be forwarded to
- * itself. This flag can only be set by the PF driver. If a VF
- * driver sets this flag, it should be ignored by the HWRM.
+ * This bit must be '1' for the timestamp field to be
+ * configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
- uint32_t enables;
- /* This bit must be '1' for the os_type field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
- /* This bit must be '1' for the ver field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
- /* This bit must be '1' for the timestamp field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
- /* This bit must be '1' for the vf_req_fwd field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD UINT32_C(0x8)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP \
+ UINT32_C(0x4)
/*
- * This bit must be '1' for the async_event_fwd field to be
+ * This bit must be '1' for the vf_req_fwd field to be
* configured.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
- uint16_t os_type;
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD \
+ UINT32_C(0x8)
/*
- * This value indicates the type of OS. The values are based on
- * CIM_OperatingSystem.mof file as published by the DMTF.
+ * This bit must be '1' for the async_event_fwd field to be
+ * configured.
*/
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
+ UINT32_C(0x10)
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
/* Unknown */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
/* Other OS not listed below. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
/* MSDOS OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
/* Windows OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
/* Solaris OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
/* Linux OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
/* FreeBSD OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
/* VMware ESXi OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
/* Microsoft Windows 8 64-bit OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
/* Microsoft Windows Server 2012 R2 OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
- uint8_t ver_maj;
- /* This is the major version of the driver. */
- uint8_t ver_min;
- /* This is the minor version of the driver. */
- uint8_t ver_upd;
- /* This is the update version of the driver. */
- uint8_t unused_0;
- uint16_t unused_1;
- uint32_t timestamp;
- /*
- * This is a 32-bit timestamp provided by the driver for keep
- * alive. The timestamp is in multiples of 1ms.
- */
- uint32_t unused_2;
- uint32_t vf_req_fwd[8];
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[3];
+ /*
+ * This is a 32-bit timestamp provided by the driver for
+ * keep alive.
+ * The timestamp is in multiples of 1ms.
+ */
+ uint32_t timestamp;
+ uint8_t unused_1[4];
/*
* This is a 256-bit bit mask provided by the PF driver for
* letting the HWRM know what commands issued by the VF driver
- * to the HWRM should be forwarded to the PF driver. Nth bit
- * refers to the Nth req_type. Setting Nth bit to 1 indicates
- * that requests from the VF driver with req_type equal to N
- * shall be forwarded to the parent PF driver. This field is not
- * valid for the VF driver.
+ * to the HWRM should be forwarded to the PF driver.
+ * Nth bit refers to the Nth req_type.
+ *
+ * Setting Nth bit to 1 indicates that requests from the
+ * VF driver with req_type equal to N shall be forwarded to
+ * the parent PF driver.
+ *
+ * This field is not valid for the VF driver.
*/
- uint32_t async_event_fwd[8];
+ uint32_t vf_req_fwd[8];
/*
* This is a 256-bit bit mask provided by the function driver
- * (PF or VF driver) to indicate the list of asynchronous event
- * completions to be forwarded. Nth bit refers to the Nth
- * event_id. Setting Nth bit to 1 by the function driver shall
- * result in the HWRM forwarding asynchronous event completion
- * with event_id equal to N. If all bits are set to 0 (value of
- * 0), then the HWRM shall not forward any asynchronous event
- * completion to this function driver.
- */
+ * (PF or VF driver) to indicate the list of asynchronous event
+ * completions to be forwarded.
+ *
+ * Nth bit refers to the Nth event_id.
+ *
+ * Setting Nth bit to 1 by the function driver shall result in
+ * the HWRM forwarding asynchronous event completion with
+ * event_id equal to N.
+ *
+ * If all bits are set to 0 (value of 0), then the HWRM shall
+ * not forward any asynchronous event completion to this
+ * function driver.
+ */
+ uint32_t async_event_fwd[8];
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_drv_rgtr_output (size:128b/16B) */
struct hwrm_func_drv_rgtr_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_drv_unrgtr */
-/*
- * Description: This command is used by the function driver to un register with
- * the HWRM. A function driver shall implement this command. A function driver
- * shall use this command during the driver unloading.
- */
-/* Input (24 bytes) */
+/************************
+ * hwrm_func_drv_unrgtr *
+ ************************/
+
+
+/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
struct hwrm_func_drv_unrgtr_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the function driver is notifying the
- * HWRM to prepare for the shutdown.
+ * When this bit is '1', the function driver is notifying
+ * the HWRM to prepare for the shutdown.
*/
- #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
UINT32_C(0x1)
- uint32_t unused_0;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
struct hwrm_func_drv_unrgtr_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_buf_rgtr *
+ **********************/
+
+
+/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+struct hwrm_func_buf_rgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* hwrm_func_buf_rgtr */
-/*
- * Description: This command is used by the PF driver to register buffers used
- * in the PF-VF communication with the HWRM. The PF driver uses this command to
- * register buffers for each PF-VF channel. A parent PF may issue this command
- * per child VF. If VF ID is not valid, then this command is used to register
- * buffers for all children VFs of the PF.
- */
-/* Input (128 bytes) */
-struct hwrm_func_buf_rgtr_input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This bit must be '1' for the vf_id field to be
+ * configured.
*/
- uint64_t resp_addr;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This bit must be '1' for the err_buf_addr field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id field to be configured. */
- #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
- /* This bit must be '1' for the err_buf_addr field to be configured. */
- #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
- uint16_t vf_id;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_ENABLES_ERR_BUF_ADDR UINT32_C(0x2)
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
*/
- uint16_t req_buf_num_pages;
+ uint16_t vf_id;
/*
* This field represents the number of pages used for request
* buffer(s).
*/
- uint16_t req_buf_page_size;
- /* This field represents the page size used for request buffer(s). */
+ uint16_t req_buf_num_pages;
+ /*
+ * This field represents the page size used for request
+ * buffer(s).
+ */
+ uint16_t req_buf_page_size;
/* 16 bytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_16B UINT32_C(0x4)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_16B UINT32_C(0x4)
/* 4 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4K UINT32_C(0xc)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4K UINT32_C(0xc)
/* 8 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_8K UINT32_C(0xd)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_8K UINT32_C(0xd)
/* 64 Kbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_64K UINT32_C(0x10)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_64K UINT32_C(0x10)
/* 2 Mbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_2M UINT32_C(0x15)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_2M UINT32_C(0x15)
/* 4 Mbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_4M UINT32_C(0x16)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_4M UINT32_C(0x16)
/* 1 Gbytes */
- #define HWRM_FUNC_BUF_RGTR_INPUT_INPUT_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
- uint16_t req_buf_len;
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G UINT32_C(0x1e)
+ #define HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_LAST \
+ HWRM_FUNC_BUF_RGTR_INPUT_REQ_BUF_PAGE_SIZE_1G
/* The length of the request buffer per VF in bytes. */
- uint16_t resp_buf_len;
+ uint16_t req_buf_len;
/* The length of the response buffer in bytes. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint64_t req_buf_page_addr[10];
- /* This field represents the page address of req buffer. */
- uint64_t error_buf_addr;
+ uint16_t resp_buf_len;
+ uint8_t unused_0[2];
+ /* This field represents the page address of page #0. */
+ uint64_t req_buf_page_addr0;
+ /* This field represents the page address of page #1. */
+ uint64_t req_buf_page_addr1;
+ /* This field represents the page address of page #2. */
+ uint64_t req_buf_page_addr2;
+ /* This field represents the page address of page #3. */
+ uint64_t req_buf_page_addr3;
+ /* This field represents the page address of page #4. */
+ uint64_t req_buf_page_addr4;
+ /* This field represents the page address of page #5. */
+ uint64_t req_buf_page_addr5;
+ /* This field represents the page address of page #6. */
+ uint64_t req_buf_page_addr6;
+ /* This field represents the page address of page #7. */
+ uint64_t req_buf_page_addr7;
+ /* This field represents the page address of page #8. */
+ uint64_t req_buf_page_addr8;
+ /* This field represents the page address of page #9. */
+ uint64_t req_buf_page_addr9;
+ /*
+ * This field is used to receive the error reporting from
+ * the chipset. Only applicable for PFs.
+ */
+ uint64_t error_buf_addr;
+ /*
+ * This field is used to receive the response forwarded by the
+ * HWRM.
+ */
+ uint64_t resp_buf_addr;
+} __attribute__((packed));
+
+/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+struct hwrm_func_buf_rgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used to receive the error reporting from the
- * chipset. Only applicable for PFs.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint64_t resp_buf_addr;
- /* This field is used to receive the response forwarded by the HWRM. */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_buf_rgtr_output {
- uint16_t error_code;
+/************************
+ * hwrm_func_buf_unrgtr *
+ ************************/
+
+
+/* hwrm_func_buf_unrgtr_input (size:192b/24B) */
+struct hwrm_func_buf_unrgtr_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint32_t enables;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This bit must be '1' for the vf_id field to be
+ * configured.
*/
+ #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
+ /*
+ * This value is used to identify a Virtual Function (VF).
+ * The scope of VF ID is local within a PF.
+ */
+ uint16_t vf_id;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* hwrm_func_buf_unrgtr */
-/*
- * Description: This command is used by the PF driver to unregister buffers used
- * in the PF-VF communication with the HWRM. The PF driver uses this command to
- * unregister buffers for PF-VF communication. A parent PF may issue this
- * command to unregister buffers for communication between the PF and a specific
- * VF. If the VF ID is not valid, then this command is used to unregister
- * buffers used for communications with all children VFs of the PF.
- */
-/* Input (24 bytes) */
-struct hwrm_func_buf_unrgtr_input {
- uint16_t req_type;
+/* hwrm_func_buf_unrgtr_output (size:128b/16B) */
+struct hwrm_func_buf_unrgtr_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_func_drv_qver *
+ **********************/
+
+
+/* hwrm_func_drv_qver_input (size:192b/24B) */
+struct hwrm_func_drv_qver_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id field to be configured. */
- #define HWRM_FUNC_BUF_UNRGTR_INPUT_ENABLES_VF_ID UINT32_C(0x1)
- uint16_t vf_id;
+ uint64_t resp_addr;
+ /* Reserved for future use. */
+ uint32_t reserved;
/*
- * This value is used to identify a Virtual Function (VF). The
- * scope of VF ID is local within a PF.
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
*/
- uint16_t unused_0;
+ uint16_t fid;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_func_buf_unrgtr_output {
- uint16_t error_code;
+/* hwrm_func_drv_qver_output (size:192b/24B) */
+struct hwrm_func_drv_qver_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ uint16_t os_type;
+ /* Unknown */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ /* UEFI driver. */
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI UINT32_C(0x8000)
+ #define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_LAST \
+ HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UEFI
+ /* This is the 8bit major version of the driver. */
+ uint8_t ver_maj_8b;
+ /* This is the 8bit minor version of the driver. */
+ uint8_t ver_min_8b;
+ /* This is the 8bit update version of the driver. */
+ uint8_t ver_upd_8b;
+ uint8_t unused_0[2];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+ /* This is the 16bit major version of the driver. */
+ uint16_t ver_maj;
+ /* This is the 16bit minor version of the driver. */
+ uint16_t ver_min;
+ /* This is the 16bit update version of the driver. */
+ uint16_t ver_upd;
+ /* This is the 16bit patch version of the driver. */
+ uint16_t ver_patch;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_func_resource_qcaps *
+ ****************************/
+
+
+/* hwrm_func_resource_qcaps_input (size:192b/24B) */
+struct hwrm_func_resource_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * 0xFF... (All Fs) if the query is for the requesting
+ * function.
+ */
+ uint16_t fid;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_port_phy_cfg */
-/*
- * Description: This command configures the PHY device for the port. It allows
- * setting of the most generic settings for the PHY. The HWRM shall complete
- * this command as soon as PHY settings are configured. They may not be applied
- * when the command response is provided. A VF driver shall not be allowed to
- * configure PHY using this command. In a network partition mode, a PF driver
- * shall not be allowed to configure PHY using this command.
- */
-/* Input (56 bytes) */
-struct hwrm_port_phy_cfg_input {
- uint16_t req_type;
+/* hwrm_func_resource_qcaps_output (size:448b/56B) */
+struct hwrm_func_resource_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */
+ uint16_t max_vfs;
+ /* Maximum guaranteed number of MSI-X vectors supported by function */
+ uint16_t max_msix;
+ /* Hint of strategy to be used by PF driver to reserve resources for its VF */
+ uint16_t vf_reservation_strategy;
+ /* The PF driver should evenly divide its remaining resources among all VFs. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL \
+ UINT32_C(0x0)
+ /* The PF driver should only reserve minimal resources for each VF. */
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL \
+ UINT32_C(0x1)
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The PF driver should not reserve any resources for each VF until the
+ * the VF interface is brought up.
*/
- uint16_t cmpl_ring;
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST \
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ /*
+ * Maximum number of inputs into the transmit scheduler for this function.
+ * The number of TX rings assigned to the function cannot exceed this value.
+ */
+ uint16_t max_tx_scheduler_inputs;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_func_vf_resource_cfg *
+ *****************************/
+
+
+/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
+struct hwrm_func_vf_resource_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t flags;
+ uint16_t target_id;
/*
- * When this bit is set to '1', the PHY for the port shall be
- * reset. # If this bit is set to 1, then the HWRM shall reset
- * the PHY after applying PHY configuration changes specified in
- * this command. # In order to guarantee that PHY configuration
- * changes specified in this command take effect, the HWRM
- * client should set this flag to 1. # If this bit is not set to
- * 1, then the HWRM may reset the PHY depending on the current
- * PHY configuration and settings specified in this command.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /* deprecated bit. Do not use!!! */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2)
+ uint64_t resp_addr;
+ /* VF ID that is being configured by PF */
+ uint16_t vf_id;
+ /* Maximum guaranteed number of MSI-X vectors for the function */
+ uint16_t max_msix;
+ /* Minimum guaranteed number of RSS/COS contexts */
+ uint16_t min_rsscos_ctx;
+ /* Maximum non-guaranteed number of RSS/COS contexts */
+ uint16_t max_rsscos_ctx;
+ /* Minimum guaranteed number of completion rings */
+ uint16_t min_cmpl_rings;
+ /* Maximum non-guaranteed number of completion rings */
+ uint16_t max_cmpl_rings;
+ /* Minimum guaranteed number of transmit rings */
+ uint16_t min_tx_rings;
+ /* Maximum non-guaranteed number of transmit rings */
+ uint16_t max_tx_rings;
+ /* Minimum guaranteed number of receive rings */
+ uint16_t min_rx_rings;
+ /* Maximum non-guaranteed number of receive rings */
+ uint16_t max_rx_rings;
+ /* Minimum guaranteed number of L2 contexts */
+ uint16_t min_l2_ctxs;
+ /* Maximum non-guaranteed number of L2 contexts */
+ uint16_t max_l2_ctxs;
+ /* Minimum guaranteed number of VNICs */
+ uint16_t min_vnics;
+ /* Maximum non-guaranteed number of VNICs */
+ uint16_t max_vnics;
+ /* Minimum guaranteed number of statistic contexts */
+ uint16_t min_stat_ctx;
+ /* Maximum non-guaranteed number of statistic contexts */
+ uint16_t max_stat_ctx;
+ /* Minimum guaranteed number of ring groups */
+ uint16_t min_hw_ring_grps;
+ /* Maximum non-guaranteed number of ring groups */
+ uint16_t max_hw_ring_grps;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+struct hwrm_func_vf_resource_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Reserved number of RSS/COS contexts */
+ uint16_t reserved_rsscos_ctx;
+ /* Reserved number of completion rings */
+ uint16_t reserved_cmpl_rings;
+ /* Reserved number of transmit rings */
+ uint16_t reserved_tx_rings;
+ /* Reserved number of receive rings */
+ uint16_t reserved_rx_rings;
+ /* Reserved number of L2 contexts */
+ uint16_t reserved_l2_ctxs;
+ /* Reserved number of VNICs */
+ uint16_t reserved_vnics;
+ /* Reserved number of statistic contexts */
+ uint16_t reserved_stat_ctx;
+ /* Reserved number of ring groups */
+ uint16_t reserved_hw_ring_grps;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_func_backing_store_qcaps *
+ *********************************/
+
+
+/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * When this bit is set to '1', the link shall be forced to the
- * force_link_speed value. When this bit is set to '1', the HWRM
- * client should not enable any of the auto negotiation related
- * fields represented by auto_XXX fields in this command. When
- * this bit is set to '1' and the HWRM client has enabled a
- * auto_XXX field in this command, then the HWRM shall ignore
- * the enabled auto_XXX field. When this bit is set to zero, the
- * link shall be allowed to autoneg.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcaps_output (size:512b/64B) */
+struct hwrm_func_backing_store_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Maximum number of QP context entries supported for this function. */
+ uint32_t qp_max_entries;
+ /*
+ * Minimum number of QP context entries that are needed to be reserved
+ * for QP1 for the PF and its VFs. PF drivers must allocate at least
+ * this many QP context entries, even if RoCE will not be used.
+ */
+ uint16_t qp_min_qp1_entries;
+ /* Maximum number of QP context entries that can be used for L2. */
+ uint16_t qp_max_l2_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Maximum number of SRQ context entries that can be used for L2. */
+ uint16_t srq_max_l2_entries;
+ /* Maximum number of SRQ context entries supported for this function. */
+ uint32_t srq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Maximum number of CQ context entries that can be used for L2. */
+ uint16_t cq_max_l2_entries;
+ /* Maximum number of CQ context entries supported for this function. */
+ uint32_t cq_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Maximum number of VNIC context entries supported for this function. */
+ uint16_t vnic_max_vnic_entries;
+ /* Maximum number of Ring table context entries supported for this function. */
+ uint16_t vnic_max_ring_table_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Maximum number of statistic context entries supported for this function. */
+ uint32_t stat_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Maximum number of TQM context entries supported per ring. */
+ uint16_t tqm_max_entries_per_ring;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Maximum number of MR/AV context entries supported for this function. */
+ uint32_t mrav_max_entries;
+ /* Maximum number of Timer context entries supported for this function. */
+ uint32_t tim_max_entries;
+ /* Number of bytes that must be allocated for each context entry. */
+ uint16_t tim_entry_size;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_func_backing_store_cfg *
+ *******************************/
+
+
+/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+struct hwrm_func_backing_store_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING0 \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t qp_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t srq_entry_size;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t cq_entry_size;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t vnic_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t stat_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tqm_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t mrav_entry_size;
+ /* Number of bytes that have been allocated for each context entry. */
+ uint16_t tim_entry_size;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_func_backing_store_qcfg *
+ ********************************/
+
+
+/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */
+struct hwrm_func_backing_store_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_func_backing_store_qcfg_output (size:1920b/240B) */
+struct hwrm_func_backing_store_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When set, the firmware only uses on-chip resources and does not
+ * expect any backing store to be provided by the host driver. This
+ * mode provides minimal L2 functionality (e.g. limited L2 resources,
+ * no RoCE).
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_FLAGS_PREBOOT_MODE \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+ /*
+ * This bit must be '1' for the qp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_QP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the srq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_SRQ \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cq fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_CQ \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the vnic fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_VNIC \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the stat fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_STAT \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the tqm_sp fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_SP \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the tqm_ring0 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING0 \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tqm_ring1 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING1 \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the tqm_ring2 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING2 \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the tqm_ring3 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING3 \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the tqm_ring4 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING4 \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the tqm_ring5 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING5 \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the tqm_ring6 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING6 \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the tqm_ring7 fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TQM_RING7 \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the mrav fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_MRAV \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the tim fields to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_UNUSED_0_TIM \
+ UINT32_C(0x8000)
+ /* QPC page size and level. */
+ uint8_t qpc_pg_size_qpc_lvl;
+ /* QPC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2
+ /* QPC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_PG_SIZE_PG_1G
+ /* SRQ page size and level. */
+ uint8_t srq_pg_size_srq_lvl;
+ /* SRQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2
+ /* SRQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_PG_SIZE_PG_1G
+ /* CQ page size and level. */
+ uint8_t cq_pg_size_cq_lvl;
+ /* CQ PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2
+ /* CQ page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_PG_SIZE_PG_1G
+ /* VNIC page size and level. */
+ uint8_t vnic_pg_size_vnic_lvl;
+ /* VNIC PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2
+ /* VNIC page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_PG_SIZE_PG_1G
+ /* Stat page size and level. */
+ uint8_t stat_pg_size_stat_lvl;
+ /* Stat PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2
+ /* Stat page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_PG_SIZE_PG_1G
+ /* TQM slow path page size and level. */
+ uint8_t tqm_sp_pg_size_tqm_sp_lvl;
+ /* TQM slow path PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2
+ /* TQM slow path page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_PG_SIZE_PG_1G
+ /* TQM ring 0 page size and level. */
+ uint8_t tqm_ring0_pg_size_tqm_ring0_lvl;
+ /* TQM ring 0 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2
+ /* TQM ring 0 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_PG_SIZE_PG_1G
+ /* TQM ring 1 page size and level. */
+ uint8_t tqm_ring1_pg_size_tqm_ring1_lvl;
+ /* TQM ring 1 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2
+ /* TQM ring 1 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_PG_SIZE_PG_1G
+ /* TQM ring 2 page size and level. */
+ uint8_t tqm_ring2_pg_size_tqm_ring2_lvl;
+ /* TQM ring 2 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2
+ /* TQM ring 2 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_PG_SIZE_PG_1G
+ /* TQM ring 3 page size and level. */
+ uint8_t tqm_ring3_pg_size_tqm_ring3_lvl;
+ /* TQM ring 3 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2
+ /* TQM ring 3 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_PG_SIZE_PG_1G
+ /* TQM ring 4 page size and level. */
+ uint8_t tqm_ring4_pg_size_tqm_ring4_lvl;
+ /* TQM ring 4 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2
+ /* TQM ring 4 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_PG_SIZE_PG_1G
+ /* TQM ring 5 page size and level. */
+ uint8_t tqm_ring5_pg_size_tqm_ring5_lvl;
+ /* TQM ring 5 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2
+ /* TQM ring 5 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_PG_SIZE_PG_1G
+ /* TQM ring 6 page size and level. */
+ uint8_t tqm_ring6_pg_size_tqm_ring6_lvl;
+ /* TQM ring 6 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2
+ /* TQM ring 6 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_PG_SIZE_PG_1G
+ /* TQM ring 7 page size and level. */
+ uint8_t tqm_ring7_pg_size_tqm_ring7_lvl;
+ /* TQM ring 7 PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2
+ /* TQM ring 7 page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_PG_SIZE_PG_1G
+ /* MR/AV page size and level. */
+ uint8_t mrav_pg_size_mrav_lvl;
+ /* MR/AV PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2
+ /* MR/AV page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_PG_SIZE_PG_1G
+ /* Timer page size and level. */
+ uint8_t tim_pg_size_tim_lvl;
+ /* Timer PBL indirect levels. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_MASK \
+ UINT32_C(0xf)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 \
+ UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 \
+ UINT32_C(0x1)
+ /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 \
+ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2
+ /* Timer page size. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_4K \
+ (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8K \
+ (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_64K \
+ (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_2M \
+ (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_8M \
+ (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G \
+ (UINT32_C(0x5) << 4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_LAST \
+ HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_PG_SIZE_PG_1G
+ /* QP page directory. */
+ uint64_t qpc_page_dir;
+ /* SRQ page directory. */
+ uint64_t srq_page_dir;
+ /* CQ page directory. */
+ uint64_t cq_page_dir;
+ /* VNIC page directory. */
+ uint64_t vnic_page_dir;
+ /* Stat page directory. */
+ uint64_t stat_page_dir;
+ /* TQM slowpath page directory. */
+ uint64_t tqm_sp_page_dir;
+ /* TQM ring 0 page directory. */
+ uint64_t tqm_ring0_page_dir;
+ /* TQM ring 1 page directory. */
+ uint64_t tqm_ring1_page_dir;
+ /* TQM ring 2 page directory. */
+ uint64_t tqm_ring2_page_dir;
+ /* TQM ring 3 page directory. */
+ uint64_t tqm_ring3_page_dir;
+ /* TQM ring 4 page directory. */
+ uint64_t tqm_ring4_page_dir;
+ /* TQM ring 5 page directory. */
+ uint64_t tqm_ring5_page_dir;
+ /* TQM ring 6 page directory. */
+ uint64_t tqm_ring6_page_dir;
+ /* TQM ring 7 page directory. */
+ uint64_t tqm_ring7_page_dir;
+ /* MR/AV page directory. */
+ uint64_t mrav_page_dir;
+ /* Timer page directory. */
+ uint64_t tim_page_dir;
+ /* Number of entries to reserve for QP1 */
+ uint16_t qp_num_qp1_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t qp_num_l2_entries;
+ /* Number of QPs. */
+ uint32_t qp_num_entries;
+ /* Number of SRQs. */
+ uint32_t srq_num_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t srq_num_l2_entries;
+ /* Number of entries to reserve for L2 */
+ uint16_t cq_num_l2_entries;
+ /* Number of CQs. */
+ uint32_t cq_num_entries;
+ /* Number of entries to reserve for VNIC entries */
+ uint16_t vnic_num_vnic_entries;
+ /* Number of entries to reserve for Ring table entries */
+ uint16_t vnic_num_ring_table_entries;
+ /* Number of Stats. */
+ uint32_t stat_num_entries;
+ /* Number of TQM slowpath entries. */
+ uint32_t tqm_sp_num_entries;
+ /* Number of TQM ring 0 entries. */
+ uint32_t tqm_ring0_num_entries;
+ /* Number of TQM ring 1 entries. */
+ uint32_t tqm_ring1_num_entries;
+ /* Number of TQM ring 2 entries. */
+ uint32_t tqm_ring2_num_entries;
+ /* Number of TQM ring 3 entries. */
+ uint32_t tqm_ring3_num_entries;
+ /* Number of TQM ring 4 entries. */
+ uint32_t tqm_ring4_num_entries;
+ /* Number of TQM ring 5 entries. */
+ uint32_t tqm_ring5_num_entries;
+ /* Number of TQM ring 6 entries. */
+ uint32_t tqm_ring6_num_entries;
+ /* Number of TQM ring 7 entries. */
+ uint32_t tqm_ring7_num_entries;
+ /* Number of MR/AV entries. */
+ uint32_t mrav_num_entries;
+ /* Number of Timer entries. */
+ uint32_t tim_num_entries;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_phy_cfg *
+ *********************/
+
+
+/* hwrm_port_phy_cfg_input (size:448b/56B) */
+struct hwrm_port_phy_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is set to '1', the PHY for the port shall
+ * be reset.
+ *
+ * # If this bit is set to 1, then the HWRM shall reset the
+ * PHY after applying PHY configuration changes specified
+ * in this command.
+ * # In order to guarantee that PHY configuration changes
+ * specified in this command take effect, the HWRM
+ * client should set this flag to 1.
+ * # If this bit is not set to 1, then the HWRM may reset
+ * the PHY depending on the current PHY configuration and
+ * settings specified in this command.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY \
+ UINT32_C(0x1)
+ /* deprecated bit. Do not use!!! */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the link shall be forced to
+ * the force_link_speed value.
+ *
+ * When this bit is set to '1', the HWRM client should
+ * not enable any of the auto negotiation related
+ * fields represented by auto_XXX fields in this command.
+ * When this bit is set to '1' and the HWRM client has
+ * enabled a auto_XXX field in this command, then the
+ * HWRM shall ignore the enabled auto_XXX field.
+ *
+ * When this bit is set to zero, the link
+ * shall be allowed to autoneg.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE \
+ UINT32_C(0x4)
/*
* When this bit is set to '1', the auto-negotiation process
* shall be restarted on the link.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG \
+ UINT32_C(0x8)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
- * is requested to be enabled on this link. If EEE is not
- * supported on this port, then this flag shall be ignored by
- * the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be enabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE \
+ UINT32_C(0x10)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE)
- * is requested to be disabled on this link. If EEE is not
- * supported on this port, then this flag shall be ignored by
- * the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet
+ * (EEE) is requested to be disabled on this link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE \
+ UINT32_C(0x20)
/*
- * When this bit is set to '1' and EEE is enabled on this link,
- * then TX LPI is requested to be enabled on the link. If EEE is
- * not supported on this port, then this flag shall be ignored
- * by the HWRM. If EEE is disabled on this port, then this flag
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be enabled on the link.
+ * If EEE is not supported on this port, then this flag
* shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE \
+ UINT32_C(0x40)
/*
- * When this bit is set to '1' and EEE is enabled on this link,
- * then TX LPI is requested to be disabled on the link. If EEE
- * is not supported on this port, then this flag shall be
- * ignored by the HWRM. If EEE is disabled on this port, then
- * this flag shall be ignored by the HWRM.
+ * When this bit is set to '1' and EEE is enabled on this
+ * link, then TX LPI is requested to be disabled on the link.
+ * If EEE is not supported on this port, then this flag
+ * shall be ignored by the HWRM.
+ * If EEE is disabled on this port, then this flag shall be
+ * ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE \
+ UINT32_C(0x80)
/*
- * When set to 1, then the HWRM shall enable FEC
- * autonegotitation on this port if supported. When set to 0,
- * then this flag shall be ignored. If FEC autonegotiation is
- * not supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC autonegotitation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE \
+ UINT32_C(0x100)
/*
- * When set to 1, then the HWRM shall disable FEC
- * autonegotiation on this port if supported. When set to 0,
- * then this flag shall be ignored. If FEC autonegotiation is
- * not supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC autonegotiation
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC autonegotiation is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE \
UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
- * Code) on this port if supported. When set to 0, then this
- * flag shall be ignored. If FEC CLAUSE 74 is not supported,
- * then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE \
UINT32_C(0x400)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 74
- * (Fire Code) on this port if supported. When set to 0, then
- * this flag shall be ignored. If FEC CLAUSE 74 is not
- * supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE \
UINT32_C(0x800)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
- * Solomon) on this port if supported. When set to 0, then this
- * flag shall be ignored. If FEC CLAUSE 91 is not supported,
- * then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE \
UINT32_C(0x1000)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 91
- * (Reed Solomon) on this port if supported. When set to 0, then
- * this flag shall be ignored. If FEC CLAUSE 91 is not
- * supported, then the HWRM shall ignore this flag.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 91 (Reed Solomon)
+ * on this port if supported.
+ * When set to 0, then this flag shall be ignored.
+ * If FEC CLAUSE 91 is not supported, then the HWRM shall ignore this
+ * flag.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE \
UINT32_C(0x2000)
/*
- * When this bit is set to '1', the link shall be forced to be
- * taken down. # When this bit is set to '1", all other command
- * input settings related to the link speed shall be ignored.
- * Once the link state is forced down, it can be explicitly
- * cleared from that state by setting this flag to '0'. # If
- * this flag is set to '0', then the link shall be cleared from
- * forced down state if the link is in forced down state. There
- * may be conditions (e.g. out-of-band or sideband configuration
- * changes for the link) outside the scope of the HWRM
- * implementation that may clear forced down link state.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN UINT32_C(0x4000)
- uint32_t enables;
- /* This bit must be '1' for the auto_mode field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
- /* This bit must be '1' for the auto_duplex field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
- /* This bit must be '1' for the auto_pause field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
+ * When this bit is set to '1', the link shall be forced to
+ * be taken down.
+ *
+ * # When this bit is set to '1", all other
+ * command input settings related to the link speed shall
+ * be ignored.
+ * Once the link state is forced down, it can be
+ * explicitly cleared from that state by setting this flag
+ * to '0'.
+ * # If this flag is set to '0', then the link shall be
+ * cleared from forced down state if the link is in forced
+ * down state.
+ * There may be conditions (e.g. out-of-band or sideband
+ * configuration changes for the link) outside the scope
+ * of the HWRM implementation that may clear forced down
+ * link state.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN \
+ UINT32_C(0x4000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the auto_mode field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the auto_duplex field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the auto_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the auto_link_speed field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED \
+ UINT32_C(0x8)
/*
* This bit must be '1' for the auto_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
UINT32_C(0x10)
- /* This bit must be '1' for the wirespeed field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20)
- /* This bit must be '1' for the lpbk field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
- /* This bit must be '1' for the preemphasis field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80)
- /* This bit must be '1' for the force_pause field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the wirespeed field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the preemphasis field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the force_pause field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the eee_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
UINT32_C(0x200)
- /* This bit must be '1' for the tx_lpi_timer field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
- uint16_t port_id;
+ /*
+ * This bit must be '1' for the tx_lpi_timer field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER \
+ UINT32_C(0x400)
/* Port ID of port that is to be configured. */
- uint16_t force_link_speed;
+ uint16_t port_id;
/*
- * This is the speed that will be used if the force bit is '1'.
- * If unsupported speed is selected, an error will be generated.
+ * This is the speed that will be used if the force
+ * bit is '1'. If unsupported speed is selected, an error
+ * will be generated.
*/
+ uint16_t force_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t auto_mode;
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB
/*
- * This value is used to identify what autoneg mode is used when
- * the link speed is not being forced.
+ * This value is used to identify what autoneg mode is
+ * used when the link speed is not being forced.
*/
- /*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
- uint8_t auto_duplex;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK
/*
- * This is the duplex setting that will be used if the
- * autoneg_mode is "one_speed" or "one_or_below".
+ * This is the duplex setting that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below".
*/
+ uint8_t auto_duplex;
/* Half Duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
/* Full duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
/* Both Half and Full dupex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
- uint8_t auto_pause;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH
/*
- * This value is used to configure the pause that will be used
- * for autonegotiation. Add text on the usage of auto_pause and
- * force_pause.
+ * This value is used to configure the pause that will be
+ * used for autonegotiation.
+ * Add text on the usage of auto_pause and force_pause.
*/
+ uint8_t auto_pause;
/*
- * When this bit is '1', Generation of tx pause messages has
- * been requested. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages has been
- * requested. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
/*
- * When set to 1, the advertisement of pause is enabled. # When
- * the auto_mode is not set to none and this flag is set to 1,
- * then the auto_pause bits on this port are being advertised
- * and autoneg pause results are being interpreted. # When the
- * auto_mode is not set to none and this flag is set to 0, the
- * pause is forced as indicated in force_pause, and also
- * advertised as auto_pause bits, but the autoneg results are
- * not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is
- * set to 1, auto_pause bits should be ignored and should be set
- * to 0.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
- uint8_t unused_0;
- uint16_t auto_link_speed;
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
+ uint8_t unused_0;
/*
- * This is the speed that will be used if the autoneg_mode is
- * "one_speed" or "one_or_below". If an unsupported speed is
- * selected, an error will be generated.
+ * This is the speed that will be used if the autoneg_mode
+ * is "one_speed" or "one_or_below". If an unsupported speed
+ * is selected, an error will be generated.
*/
+ uint16_t auto_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
- uint16_t auto_link_speed_mask;
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB
/*
* This is a mask of link speeds that will be used if
- * autoneg_mode is "mask". If unsupported speed is enabled an
- * error will be generated.
+ * autoneg_mode is "mask". If unsupported speed is enabled
+ * an error will be generated.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
+ /* 100Mb link speed (Full-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
+ /* 1Gb link speed (Half-duplex) */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
- uint8_t wirespeed;
/* This value controls the wirespeed feature. */
+ uint8_t wirespeed;
/* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF UINT32_C(0x0)
/* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_ON UINT32_C(0x1)
- uint8_t lpbk;
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON
/* This value controls the loopback setting for the PHY. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * The HW will be configured with local loopback
- * such that host data is sent back to the host
- * without modification.
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
/*
- * The HW will be configured with remote
- * loopback such that port logic will send
- * packets back out the transmitter that are
- * received.
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
- uint8_t force_pause;
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL
/*
- * This value is used to configure the pause that will be used
- * for force mode.
+ * This value is used to configure the pause that will be
+ * used for force mode.
*/
+ uint8_t force_pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
- uint8_t unused_1;
- uint32_t preemphasis;
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ uint8_t unused_1;
/*
- * This value controls the pre-emphasis to be used for the link.
- * Driver should not set this value (use enable.preemphasis = 0)
- * unless driver is sure of setting. Normally HWRM FW will
- * determine proper pre-emphasis.
+ * This value controls the pre-emphasis to be used for the
+ * link. Driver should not set this value (use
+ * enable.preemphasis = 0) unless driver is sure of setting.
+ * Normally HWRM FW will determine proper pre-emphasis.
*/
- uint16_t eee_link_speed_mask;
+ uint32_t preemphasis;
/*
- * Setting for link speed mask that is used to advertise speeds
- * during autonegotiation when EEE is enabled. This field is
- * valid only when EEE is enabled. The speeds specified in this
- * field shall be a subset of speeds specified in
- * auto_link_speed_mask. If EEE is enabled,then at least one
- * speed shall be provided in this mask.
+ * Setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when EEE is enabled.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
+ * If EEE is enabled,then at least one speed shall be provided
+ * in this mask.
*/
+ uint16_t eee_link_speed_mask;
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t tx_lpi_timer;
- uint32_t unused_4;
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint8_t unused_2[2];
/*
- * Reuested setting of TX LPI timer in microseconds. This field
- * is valid only when EEE is enabled and TX LPI is enabled.
+ * Reuested setting of TX LPI timer in microseconds.
+ * This field is valid only when EEE is enabled and TX LPI is
+ * enabled.
*/
+ uint32_t tx_lpi_timer;
#define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
+ uint32_t unused_3;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_phy_cfg_output (size:128b/16B) */
struct hwrm_port_phy_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to invalid speed */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED UINT32_C(0x1)
+ /*
+ * retry the command since the phy is not ready.
+ * retry count is returned in opaque_0.
+ * This is only valid for the first command and
+ * this value will not change for successive calls.
+ * but if a 0 is returned at any time then this should
+ * be treated as an un recoverable failure,
+ *
+ * retry interval in milli seconds is returned in opaque_1.
+ * This specifies the time that user should wait before
+ * issuing the next port_phy_cfg command.
+ */
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_CMD_ERR_CODE_LAST \
+ HWRM_PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* hwrm_port_phy_qcfg */
-/* Description: This command queries the PHY configuration for the port. */
-/* Input (24 bytes) */
+/**********************
+ * hwrm_port_phy_qcfg *
+ **********************/
+
+
+/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is to be queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (96 bytes) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t link;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value indicates the current link status. */
+ uint8_t link;
/* There is no link or cable detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
/* There is no link, but a cable has been detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
/* There is a link. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
- uint8_t unused_0;
- uint16_t link_speed;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK
+ uint8_t unused_0;
/* This value indicates the current link speed of the connection. */
+ uint16_t link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t duplex_cfg;
- /* This value is indicates the duplex of the current connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB
+ /*
+ * This value is indicates the duplex of the current
+ * configuration.
+ */
+ uint8_t duplex_cfg;
/* Half Duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0)
/* Full duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1)
- uint8_t pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL
/*
- * This value is used to indicate the current pause
- * configuration. When autoneg is enabled, this value represents
- * the autoneg results of pause configuration.
+ * This value is used to indicate the current
+ * pause configuration. When autoneg is enabled, this value
+ * represents the autoneg results of pause configuration.
*/
+ uint8_t pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
- uint16_t support_speeds;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
/*
- * The supported speeds for the port. This is a bit mask. For
- * each speed that is supported, the corrresponding bit will be
- * set to '1'.
+ * The supported speeds for the port. This is a bit mask.
+ * For each speed that is supported, the corrresponding
+ * bit will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
+ uint16_t support_speeds;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
- uint16_t force_link_speed;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \
+ UINT32_C(0x2000)
/*
- * Current setting of forced link speed. When the link speed is
- * not being forced, this value shall be set to 0.
+ * Current setting of forced link speed.
+ * When the link speed is not being forced, this
+ * value shall be set to 0.
*/
+ uint16_t force_link_speed;
/* 100Mb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \
+ UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \
+ UINT32_C(0x1f4)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
+ UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
- uint8_t auto_mode;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB
/* Current setting of auto negotiation mode. */
- /*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ uint8_t auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
- uint8_t auto_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK
/*
- * Current setting of pause autonegotiation. Move autoneg_pause
- * flag here.
+ * Current setting of pause autonegotiation.
+ * Move autoneg_pause flag here.
*/
+ uint8_t auto_pause;
/*
- * When this bit is '1', Generation of tx pause messages has
- * been requested. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX \
+ UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages has been
- * requested. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * has been requested. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX \
+ UINT32_C(0x2)
/*
- * When set to 1, the advertisement of pause is enabled. # When
- * the auto_mode is not set to none and this flag is set to 1,
- * then the auto_pause bits on this port are being advertised
- * and autoneg pause results are being interpreted. # When the
- * auto_mode is not set to none and this flag is set to 0, the
- * pause is forced as indicated in force_pause, and also
- * advertised as auto_pause bits, but the autoneg results are
- * not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is
- * set to 1, auto_pause bits should be ignored and should be set
- * to 0.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
- uint16_t auto_link_speed;
+ * When set to 1, the advertisement of pause is enabled.
+ *
+ * # When the auto_mode is not set to none and this flag is
+ * set to 1, then the auto_pause bits on this port are being
+ * advertised and autoneg pause results are being interpreted.
+ * # When the auto_mode is not set to none and this
+ * flag is set to 0, the pause is forced as indicated in
+ * force_pause, and also advertised as auto_pause bits, but
+ * the autoneg results are not interpreted since the pause
+ * configuration is being forced.
+ * # When the auto_mode is set to none and this flag is set to
+ * 1, auto_pause bits should be ignored and should be set to 0.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
/*
- * Current setting for auto_link_speed. This field is only valid
- * when auto_mode is set to "one_speed" or "one_or_below".
+ * Current setting for auto_link_speed. This field is only
+ * valid when auto_mode is set to "one_speed" or "one_or_below".
*/
+ uint16_t auto_link_speed;
/* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
/* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
/* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
/* 100Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
- uint16_t auto_link_speed_mask;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \
+ UINT32_C(0xffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB
/*
* Current setting for auto_link_speed_mask that is used to
- * advertise speeds during autonegotiation. This field is only
- * valid when auto_mode is set to "mask". The speeds specified
- * in this field shall be a subset of supported speeds on this
- * port.
- */
- /* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ * advertise speeds during autonegotiation.
+ * This field is only valid when auto_mode is set to "mask".
+ * The speeds specified in this field shall be a subset of
+ * supported speeds on this port.
+ */
+ uint16_t auto_link_speed_mask;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
UINT32_C(0x2000)
- uint8_t wirespeed;
/* Current setting for wirespeed. */
+ uint8_t wirespeed;
/* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF UINT32_C(0x0)
/* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_ON UINT32_C(0x1)
- uint8_t lpbk;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON
/* Current setting for loopback. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
- * The HW will be configured with local loopback
- * such that host data is sent back to the host
- * without modification.
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * The HW will be configured with remote
- * loopback such that port logic will send
- * packets back out the transmitter that are
- * received.
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
- uint8_t force_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
/*
- * Current setting of forced pause. When the pause configuration
- * is not being forced, then this value shall be set to 0.
+ * The HW will be configured with external loopback such that
+ * host data is sent on the trasmitter and based on the external
+ * loopback connection the data will be received without modification.
*/
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * Current setting of forced pause.
+ * When the pause configuration is not being forced, then
+ * this value shall be set to 0.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ uint8_t force_pause;
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
- uint8_t module_status;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
/*
- * This value indicates the current status of the optics module
- * on this port.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ /*
+ * This value indicates the current status of the optics module on
+ * this port.
*/
+ uint8_t module_status;
/* Module is inserted and accepted */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \
+ UINT32_C(0x0)
/* Module is rejected and transmit side Laser is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
+ UINT32_C(0x1)
/* Module mismatch warning. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
+ UINT32_C(0x2)
/* Module is rejected and powered down. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \
+ UINT32_C(0x3)
/* Module is not inserted. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
UINT32_C(0x4)
/* Module status is not applicable. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
UINT32_C(0xff)
- uint32_t preemphasis;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE
/* Current setting for preemphasis. */
- uint8_t phy_maj;
+ uint32_t preemphasis;
/* This field represents the major version of the PHY. */
- uint8_t phy_min;
+ uint8_t phy_maj;
/* This field represents the minor version of the PHY. */
- uint8_t phy_bld;
+ uint8_t phy_min;
/* This field represents the build version of the PHY. */
- uint8_t phy_type;
+ uint8_t phy_bld;
/* This value represents a PHY type. */
+ uint8_t phy_type;
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \
+ UINT32_C(0x0)
/* BASE-CR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1)
- /* BASE-KR4 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \
+ UINT32_C(0x1)
+ /* BASE-KR4 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \
+ UINT32_C(0x2)
/* BASE-LR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \
+ UINT32_C(0x3)
/* BASE-SR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4)
- /* BASE-KR2 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \
+ UINT32_C(0x4)
+ /* BASE-KR2 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \
+ UINT32_C(0x5)
/* BASE-KX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \
+ UINT32_C(0x6)
/* BASE-KR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR UINT32_C(0x7)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \
+ UINT32_C(0x7)
/* BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \
+ UINT32_C(0x8)
/* EEE capable BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \
+ UINT32_C(0x9)
/* SGMII connected external PHY */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \
+ UINT32_C(0xa)
/* 25G_BASECR_CA_L */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L UINT32_C(0xb)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L \
+ UINT32_C(0xb)
/* 25G_BASECR_CA_S */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S UINT32_C(0xc)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S \
+ UINT32_C(0xc)
/* 25G_BASECR_CA_N */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N UINT32_C(0xd)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N \
+ UINT32_C(0xd)
/* 25G_BASESR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR UINT32_C(0xe)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR \
+ UINT32_C(0xe)
/* 100G_BASECR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 UINT32_C(0xf)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4 \
+ UINT32_C(0xf)
/* 100G_BASESR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4 \
+ UINT32_C(0x10)
/* 100G_BASELR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 UINT32_C(0x11)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4 \
+ UINT32_C(0x11)
/* 100G_BASEER4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 UINT32_C(0x12)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4 \
+ UINT32_C(0x12)
/* 100G_BASESR10 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 UINT32_C(0x13)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10 \
+ UINT32_C(0x13)
/* 40G_BASECR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 UINT32_C(0x14)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4 \
+ UINT32_C(0x14)
/* 40G_BASESR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 UINT32_C(0x15)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4 \
+ UINT32_C(0x15)
/* 40G_BASELR4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 UINT32_C(0x16)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4 \
+ UINT32_C(0x16)
/* 40G_BASEER4 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 UINT32_C(0x17)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4 \
+ UINT32_C(0x17)
/* 40G_ACTIVE_CABLE */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \
UINT32_C(0x18)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19)
+ /* 1G_baseT */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET \
+ UINT32_C(0x19)
/* 1G_baseSX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX \
+ UINT32_C(0x1a)
/* 1G_baseCX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b)
- uint8_t media_type;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX \
+ UINT32_C(0x1b)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX
/* This value represents a media type. */
+ uint8_t media_type;
/* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
/* Twisted Pair */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
/* Direct Attached Copper */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
/* Fiber */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
- uint8_t xcvr_pkg_type;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE
/* This value represents a transceiver type. */
+ uint8_t xcvr_pkg_type;
/* PHY and MAC are in the same package */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
UINT32_C(0x1)
/* PHY and MAC are in different packages */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
UINT32_C(0x2)
- uint8_t eee_config_phy_addr;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL
+ uint8_t eee_config_phy_addr;
+ /* This field represents PHY address. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK \
+ UINT32_C(0x1f)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
/*
* This field represents flags related to EEE configuration.
* These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
+ * auto_mode is not set to none (in other words autonegotiation
* is enabled).
*/
- /* This field represents PHY address. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is
- * enabled. Speeds for autoneg with EEE mode enabled are based
- * on eee_link_speed_mask.
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
+ * Speeds for autoneg with EEE mode enabled
+ * are based on eee_link_speed_mask.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
+ UINT32_C(0x20)
/*
- * This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this
- * flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
- * is enabled and in use. # If eee_enabled is set to 1 and this
- * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
- * is enabled but is currently not in use.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40)
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and in use.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but is currently not in use.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \
+ UINT32_C(0x40)
/*
- * This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this
- * flag shall be ignored. # If eee_enabled is set to 1 and this
- * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
- * is enabled and TX LPI is enabled. # If eee_enabled is set to
- * 1 and this flag is set to 0, then Energy Efficient Ethernet
- * (EEE) mode is enabled but TX LPI is disabled.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80)
+ * This flag is valid only when eee_enabled is set to 1.
+ *
+ * # If eee_enabled is set to 0, then EEE mode is disabled
+ * and this flag shall be ignored.
+ * # If eee_enabled is set to 1 and this flag is set to 1,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * and TX LPI is enabled.
+ * # If eee_enabled is set to 1 and this flag is set to 0,
+ * then Energy Efficient Ethernet (EEE) mode is enabled
+ * but TX LPI is disabled.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \
+ UINT32_C(0x80)
/*
- * This field represents flags related to EEE configuration.
- * These EEE configuration flags are valid only when the
- * auto_mode is not set to none (in other words autonegotiation
- * is enabled).
+ * When set to 1, the parallel detection is used to determine
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
- uint8_t parallel_detect;
- /* Reserved field, set to 0 */
+ uint8_t parallel_detect;
/*
* When set to 1, the parallel detection is used to determine
- * the speed of the link partner. Parallel detection is used
- * when a autonegotiation capable device is connected to a link
- * parter that is not capable of autonegotiation.
+ * the speed of the link partner.
+ *
+ * Parallel detection is used when a autonegotiation capable
+ * device is connected to a link parter that is not capable
+ * of autonegotiation.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
- /* Reserved field, set to 0 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1
- uint16_t link_partner_adv_speeds;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
/*
- * The advertised speeds for the port by the link partner. Each
- * advertised speed will be set to '1'.
+ * The advertised speeds for the port by the link partner.
+ * Each advertised speed will be set to '1'.
*/
- /* 100Mb link speed (Half-duplex) */
+ uint16_t link_partner_adv_speeds;
+ /* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
UINT32_C(0x2)
- /* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
UINT32_C(0x10)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
UINT32_C(0x800)
- /* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
UINT32_C(0x1000)
- /* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
UINT32_C(0x2000)
- uint8_t link_partner_adv_auto_mode;
- /*
- * The advertised autoneg for the port by the link partner. This
- * field is deprecated and should be set to 0.
- */
/*
- * Disable autoneg or autoneg disabled. No
- * speeds are selected.
+ * The advertised autoneg for the port by the link partner.
+ * This field is deprecated and should be set to 0.
*/
+ uint8_t link_partner_adv_auto_mode;
+ /* Disable autoneg or autoneg disabled. No speeds are selected. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \
UINT32_C(0x0)
/* Select all possible speeds for autoneg mode. */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for
- * autoneg mode. This mode has been DEPRECATED.
- * An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode has
+ * been DEPRECATED. An HWRM client should not use this mode.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below
- * that speed for autoneg. This mode has been
- * DEPRECATED. An HWRM client should not use
- * this mode.
+ * Select the auto_link_speed or any speed below that speed for autoneg.
+ * This mode has been DEPRECATED. An HWRM client should not use this mode.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding
- * link speed mask value that is provided.
+ * Select the speeds based on the corresponding link speed mask value
+ * that is provided.
*/
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
UINT32_C(0x4)
- uint8_t link_partner_adv_pause;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
/* The advertised pause settings on the port by the link partner. */
+ uint8_t link_partner_adv_pause;
/*
- * When this bit is '1', Generation of tx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is
- * supported. Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages
+ * is supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
UINT32_C(0x2)
- uint16_t adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is used to advertise
- * speeds during autonegotiation when EEE is enabled. This field
- * is valid only when eee_enabled flags is set to 1. The speeds
- * specified in this field shall be a subset of speeds specified
- * in auto_link_speed_mask.
+ * Current setting for link speed mask that is used to
+ * advertise speeds during autonegotiation when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
+ * The speeds specified in this field shall be a subset of
+ * speeds specified in auto_link_speed_mask.
*/
+ uint16_t adv_eee_link_speed_mask;
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
- uint16_t link_partner_adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is advertised by the
- * link partner when EEE is enabled. This field is valid only
- * when eee_enabled flags is set to 1.
+ * Current setting for link speed mask that is advertised by
+ * the link partner when EEE is enabled.
+ * This field is valid only when eee_enabled flags is set to 1.
*/
+ uint16_t link_partner_adv_eee_link_speed_mask;
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
UINT32_C(0x1)
- /* 100Mb link speed (Full-duplex) */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
UINT32_C(0x2)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
UINT32_C(0x4)
- /* 1Gb link speed (Full-duplex) */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
UINT32_C(0x8)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
UINT32_C(0x10)
/* Reserved */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
UINT32_C(0x20)
/* 10Gb link speed */
- #define \
- HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
UINT32_C(0x40)
- uint32_t xcvr_identifier_type_tx_lpi_timer;
- /* This value represents transceiver identifier type. */
+ uint32_t xcvr_identifier_type_tx_lpi_timer;
/*
- * Current setting of TX LPI timer in microseconds. This field
- * is valid only when_eee_enabled flag is set to 1 and
- * tx_lpi_enabled is set to 1.
+ * Current setting of TX LPI timer in microseconds.
+ * This field is valid only when_eee_enabled flag is set to 1
+ * and tx_lpi_enabled is set to 1.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
/* This value represents transceiver identifier type. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
UINT32_C(0xff000000)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
/* Unknown */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
(UINT32_C(0x0) << 24)
/* SFP/SFP+/SFP28 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
(UINT32_C(0x3) << 24)
- /* QSFP */
+ /* QSFP+ */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
(UINT32_C(0xc) << 24)
/* QSFP+ */
@@ -5550,2637 +10986,7759 @@ struct hwrm_port_phy_qcfg_output {
/* QSFP28 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
(UINT32_C(0x11) << 24)
- uint16_t fec_cfg;
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28
/*
- * This value represents the current configuration of Forward
- * Error Correction (FEC) on the port.
+ * This value represents the current configuration of
+ * Forward Error Correction (FEC) on the port.
*/
+ uint16_t fec_cfg;
/*
- * When set to 1, then FEC is not supported on this port. If
- * this flag is set to 1, then all other FEC configuration flags
- * shall be ignored. When set to 0, then FEC is supported as
- * indicated by other configuration flags. If no cable is
- * attached and the HWRM does not yet know the FEC capability,
- * then the HWRM shall set this flag to 1 when reporting FEC
- * capability.
+ * When set to 1, then FEC is not supported on this port. If this flag
+ * is set to 1, then all other FEC configuration flags shall be ignored.
+ * When set to 0, then FEC is supported as indicated by other
+ * configuration flags.
+ * If no cable is attached and the HWRM does not yet know the FEC
+ * capability, then the HWRM shall set this flag to 1 when reporting
+ * FEC capability.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
UINT32_C(0x1)
/*
- * When set to 1, then FEC autonegotiation is supported on this
- * port. When set to 0, then FEC autonegotiation is not
- * supported on this port.
+ * When set to 1, then FEC autonegotiation is supported on this port.
+ * When set to 0, then FEC autonegotiation is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
UINT32_C(0x2)
/*
- * When set to 1, then FEC autonegotiation is enabled on this
- * port. When set to 0, then FEC autonegotiation is disabled if
- * supported. This flag should be ignored if FEC autonegotiation
- * is not supported on this port.
+ * When set to 1, then FEC autonegotiation is enabled on this port.
+ * When set to 0, then FEC autonegotiation is disabled if supported.
+ * This flag should be ignored if FEC autonegotiation is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
- * not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
UINT32_C(0x8)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
- * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
- * disabled if supported. This flag should be ignored if FEC
- * CLAUSE 74 is not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 74 is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
UINT32_C(0x10)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
- * Solomon) is not supported on this port.
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
UINT32_C(0x20)
/*
- * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
- * on this port. When set to 0, then FEC CLAUSE 91 (Reed
- * Solomon) is disabled if supported. This flag should be
- * ignored if FEC CLAUSE 91 is not supported on this port.
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled on this port.
+ * When set to 0, then FEC CLAUSE 91 (Reed Solomon) is disabled if supported.
+ * This flag should be ignored if FEC CLAUSE 91 is not supported on this port.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
UINT32_C(0x40)
- uint8_t duplex_state;
/*
- * This value is indicates the duplex of the current connection
- * state.
+ * This value is indicates the duplex of the current
+ * connection state.
*/
+ uint8_t duplex_state;
/* Half Duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0)
/* Full duplex connection. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1)
- uint8_t unused_1;
- char phy_vendor_name[16];
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_LAST \
+ HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL
+ /* Option flags fields. */
+ uint8_t option_flags;
+ /* When this bit is '1', Media auto detect is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_MEDIA_AUTO_DETECT \
+ UINT32_C(0x1)
/*
- * Up to 16 bytes of null padded ASCII string representing PHY
- * vendor. If the string is set to null, then the vendor name is
- * not available.
+ * Up to 16 bytes of null padded ASCII string representing
+ * PHY vendor.
+ * If the string is set to null, then the vendor name is not
+ * available.
*/
- char phy_vendor_partnumber[16];
+ char phy_vendor_name[16];
/*
- * Up to 16 bytes of null padded ASCII string that identifies
- * vendor specific part number of the PHY. If the string is set
- * to null, then the vendor specific part number is not
- * available.
+ * Up to 16 bytes of null padded ASCII string that
+ * identifies vendor specific part number of the PHY.
+ * If the string is set to null, then the vendor specific
+ * part number is not available.
+ */
+ char phy_vendor_partnumber[16];
+ uint8_t unused_2[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_mac_cfg *
+ *********************/
+
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
+struct hwrm_port_mac_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * In this field, there are a number of CoS mappings related flags
+ * that are used to configure CoS mappings and their corresponding
+ * priorities in the hardware.
+ * For the priorities of CoS mappings, the HWRM uses the following
+ * priority order (high to low) by default:
+ * # vlan pri
+ * # ip_dscp
+ * # tunnel_vlan_pri
+ * # default cos
+ *
+ * A subset of CoS mappings can be enabled.
+ * If a priority is not specified for an enabled CoS mapping, the
+ * priority will be assigned in the above order for the enabled CoS
+ * mappings. For example, if vlan_pri and ip_dscp CoS mappings are
+ * enabled and their priorities are not specified, the following
+ * priority order (high to low) will be used by the HWRM:
+ * # vlan_pri
+ * # ip_dscp
+ * # default cos
+ *
+ * vlan_pri CoS mapping together with default CoS with lower priority
+ * are enabled by default by the HWRM.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', this command will configure
+ * the MAC to match the current link state of the PHY.
+ * If the link is not established on the PHY, then this
+ * bit has no effect.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_MATCH_LINK \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be enabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the receive side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * enable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the HWRM is requested to
+ * disable timestamp capture capability on the transmit side
+ * of this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE \
+ UINT32_C(0x80)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is requested to
+ * be enabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x100)
+ /*
+ * When this bit is '1', the the Out-Of-Box WoL is requested to
+ * be disabled on this port.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_OOB_WOL_DISABLE \
+ UINT32_C(0x200)
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_VLAN_PRI2COS_DISABLE \
+ UINT32_C(0x400)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_TUNNEL_PRI2COS_DISABLE \
+ UINT32_C(0x800)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * requested to be disabled.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_FLAGS_IP_DSCP2COS_DISABLE \
+ UINT32_C(0x1000)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ipg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_IPG \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the lpbk field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_LPBK \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the vlan_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_VLAN_PRI2COS_MAP_PRI \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the tunnel_pri2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TUNNEL_PRI2COS_MAP_PRI \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the dscp2cos_map_pri field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the cos_field_cfg field to be
+ * configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_COS_FIELD_CFG \
+ UINT32_C(0x100)
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ /*
+ * This value is used to configure the minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* This value controls the loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_INPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_LPBK_REMOTE
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of inner packet headers of
+ * tunneled packets or packet headers of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /* Reserved field. */
+ uint8_t reserved1;
+ /*
+ * This value controls the priority setting of VLAN PRI to CoS
+ * mapping based on VLAN Tags of tunneled header.
+ * This mapping only applies when tunneled headers
+ * are present.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * This value controls the priority setting of IP DSCP to CoS
+ * mapping based on inner IP header of tunneled packets or
+ * IP header of non-tunneled packets.
+ *
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being specified.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * This field shall be ignored if the ptp_rx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the receive side of the port to
+ * capture the time stamp of every received PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that is used to request a
+ * specific configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * This field shall be ignored if the ptp_tx_ts_capture_enable
+ * flag is not set in this command.
+ * Otherwise, if bit 'i' is set, then the HWRM is being
+ * requested to configure the transmit sied of the port to
+ * capture the time stamp of every transmitted PTP message
+ * with messageType field value set to i.
+ */
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_RSVD1 \
+ UINT32_C(0x1)
+ /*
+ * This field is used to specify selection of VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
+ * This field is valid only if inner VLAN PRI to CoS mapping
+ * is enabled.
+ * If VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI shall be selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to specify selection of tunnel VLAN
+ * PRI value based on whether one or two VLAN Tags are
+ * present in tunnel headers.
+ * This field is valid only if tunnel VLAN PRI to CoS mapping
+ * is enabled.
+ * If tunnel VLAN PRI to CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No tunnel VLAN PRI shall be selected for this
+ * configuration if only one VLAN Tag is present in
+ * the tunnel packet headers.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field shall be used to provide default CoS value
+ * that has been configured on this port.
+ * This field is valid only if default CoS mapping
+ * is enabled.
+ * If default CoS mapping is not enabled, then this
+ * field shall be ignored.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_CFG_INPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /* Current configuration of the IPG value. */
+ uint8_t ipg;
+ /* Current value of the loopback value. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_CFG_OUTPUT_LPBK_REMOTE
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_mac_qcfg *
+ **********************/
+
+
+/* hwrm_port_mac_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is to be configured. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_qcfg_output (size:192b/24B) */
+struct hwrm_port_mac_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be received on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mru;
+ /*
+ * This is the configured maximum length of Ethernet packet
+ * payload that is allowed to be transmitted on the port.
+ * This value does not include the number of bytes used by the
+ * Ethernet header and trailer (CRC).
+ */
+ uint16_t mtu;
+ /*
+ * The minimum IPG that will
+ * be sent between packets by this port.
+ */
+ uint8_t ipg;
+ /* The loopback setting for the MAC. */
+ uint8_t lpbk;
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
+ /*
+ * The HW will be configured with local loopback such that
+ * host data is sent back to the host without modification.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
+ /*
+ * The HW will be configured with remote loopback such that
+ * port logic will send packets back out the transmitter that
+ * are received.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_REMOTE
+ /*
+ * Priority setting for VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t vlan_pri2cos_map_pri;
+ /*
+ * In this field, a number of CoS mappings related flags
+ * are used to indicate configured CoS mappings.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the inner VLAN PRI to CoS mapping
+ * is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_VLAN_PRI2COS_ENABLE \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', tunnel VLAN PRI field to
+ * CoS mapping is enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_TUNNEL_PRI2COS_ENABLE \
+ UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', the IP DSCP to CoS mapping is
+ * enabled.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_IP_DSCP2COS_ENABLE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the Out-Of-Box WoL is enabled on this
+ * port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_OOB_WOL_ENABLE \
+ UINT32_C(0x8)
+ /* When this bit is '1', PTP is enabled for RX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x10)
+ /* When this bit is '1', PTP is enabled for TX on this port. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE \
+ UINT32_C(0x20)
+ /*
+ * Priority setting for tunnel VLAN PRI to CoS mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t tunnel_pri2cos_map_pri;
+ /*
+ * Priority setting for DSCP to PRI mapping.
+ * # Each XXX_pri variable shall have a unique priority value
+ * when it is being used.
+ * # When comparing priorities of mappings, higher value
+ * indicates higher priority.
+ * For example, a value of 0-3 is returned where 0 is being
+ * the lowest priority and 3 is being the highest priority.
+ * # If the correspoding CoS mapping is not enabled, then this
+ * field should be ignored.
+ * # This value indicates the normalized priority value retained
+ * in the HWRM.
+ */
+ uint8_t dscp2pri_map_pri;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the receive side of this port.
+ * If bit 'i' is set, then the receive side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the receive side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the receive side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
+ */
+ uint16_t rx_ts_capture_ptp_msg_type;
+ /*
+ * This is a 16-bit bit mask that represents the
+ * current configuration of time stamp capture of PTP messages
+ * on the transmit side of this port.
+ * If bit 'i' is set, then the transmit side of the port
+ * is configured to capture the time stamp of every
+ * received PTP message with messageType field value set
+ * to i.
+ * If all bits are set to 0 (i.e. field value set 0),
+ * then the transmit side of the port is not configured
+ * to capture timestamp for PTP messages.
+ * If all bits are set to 1, then the transmit side of the
+ * port is configured to capture timestamp for all PTP
+ * messages.
*/
- uint32_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t valid;
+ uint16_t tx_ts_capture_ptp_msg_type;
+ /* Configuration of CoS fields. */
+ uint8_t cos_field_cfg;
+ /* Reserved */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_RSVD \
+ UINT32_C(0x1)
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used for selecting VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the inner packet headers of tunneled packets or
+ * non-tunneled packets.
*/
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x6)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_SFT \
+ 1
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 1)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the inner packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the inner
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 1)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the inner packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 1)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used for selecting tunnel VLAN PRI value
+ * based on whether one or two VLAN Tags are present in
+ * the tunnel headers of tunneled packets. This selection
+ * does not apply to non-tunneled packets.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK \
+ UINT32_C(0x18)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT \
+ 3
+ /*
+ * Select inner VLAN PRI when 1 or 2 VLAN Tags are
+ * present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (UINT32_C(0x0) << 3)
+ /*
+ * Select outer VLAN Tag PRI when 2 VLAN Tags are
+ * present in the tunnel packet headers.
+ * No VLAN PRI is selected for this configuration
+ * if only one VLAN Tag is present in the tunnel
+ * packet headers.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (UINT32_C(0x1) << 3)
+ /*
+ * Select outermost VLAN PRI when 1 or 2 VLAN Tags
+ * are present in the tunnel packet headers
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (UINT32_C(0x2) << 3)
+ /* Unspecified */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (UINT32_C(0x3) << 3)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ /*
+ * This field is used to provide default CoS value that
+ * has been configured on this port.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_MASK \
+ UINT32_C(0xe0)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_COS_FIELD_CFG_DEFAULT_COS_SFT \
+ 5
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_qstats */
-/* Description: This function returns per port Ethernet statistics. */
-/* Input (40 bytes) */
+/**************************
+ * hwrm_port_mac_ptp_qcfg *
+ **************************/
+
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * In this field, a number of PTP related flags
+ * are used to indicate configured PTP capabilities.
+ */
+ uint8_t flags;
+ /*
+ * When this bit is set to '1', the PTP related registers are
+ * directly accessible by the host.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the PTP information is accessible
+ * via HWRM commands.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_HWRM_ACCESS \
+ UINT32_C(0x2)
+ uint8_t unused_0[3];
+ /* Offset of the PTP register for the lower 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for RX. */
+ uint32_t rx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for RX. */
+ uint32_t rx_ts_reg_off_seq_id;
+ /* Offset of the first PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_0;
+ /* Offset of the second PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_1;
+ /* Offset of the third PTP source ID for RX. */
+ uint32_t rx_ts_reg_off_src_id_2;
+ /* Offset of the domain ID for RX. */
+ uint32_t rx_ts_reg_off_domain_id;
+ /* Offset of the PTP FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo;
+ /* Offset of the PTP advance FIFO register for RX. */
+ uint32_t rx_ts_reg_off_fifo_adv;
+ /* PTP timestamp granularity for RX. */
+ uint32_t rx_ts_reg_off_granularity;
+ /* Offset of the PTP register for the lower 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_lower;
+ /* Offset of the PTP register for the upper 32 bits of timestamp for TX. */
+ uint32_t tx_ts_reg_off_upper;
+ /* Offset of the PTP register for the sequence ID for TX. */
+ uint32_t tx_ts_reg_off_seq_id;
+ /* Offset of the PTP FIFO register for TX. */
+ uint32_t tx_ts_reg_off_fifo;
+ /* PTP timestamp granularity for TX. */
+ uint32_t tx_ts_reg_off_granularity;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_port_qstats *
+ ********************/
+
+
+/* hwrm_port_qstats_input (size:320b/40B) */
struct hwrm_port_qstats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is being queried. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2[3];
- uint8_t unused_3;
- uint64_t tx_stat_host_addr;
- /* This is the host address where Tx port statistics will be stored */
- uint64_t rx_stat_host_addr;
- /* This is the host address where Rx port statistics will be stored */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_qstats_output (size:128b/16B) */
struct hwrm_port_qstats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of TX port statistics block in bytes. */
+ uint16_t tx_stat_size;
+ /* The size of RX port statistics block in bytes. */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_port_qstats_ext *
+ ************************/
+
+
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t tx_stat_size;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ /*
+ * The size of TX port extended
+ * statistics block in bytes.
+ */
+ uint16_t tx_stat_size;
+ /*
+ * The size of RX port extended
+ * statistics block in bytes
+ */
+ uint16_t rx_stat_size;
+ uint8_t unused_0[2];
+ /*
+ * This is the host address where
+ * Tx port statistics will be stored
+ */
+ uint64_t tx_stat_host_addr;
+ /*
+ * This is the host address where
+ * Rx port statistics will be stored
+ */
+ uint64_t rx_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* The size of TX port statistics block in bytes. */
- uint16_t rx_stat_size;
+ uint16_t tx_stat_size;
/* The size of RX port statistics block in bytes. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t rx_stat_size;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_clr_stats */
-/*
- * Description: This function clears per port statistics. The HWRM shall not
- * allow a VF driver to clear port statistics. The HWRM shall not allow a PF
- * driver to clear port statistics in a partitioning mode. The HWRM may allow a
- * PF driver to clear port statistics in the non-partitioning mode.
- */
-/* Input (24 bytes) */
+/*************************
+ * hwrm_port_lpbk_qstats *
+ *************************/
+
+
+/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+struct hwrm_port_lpbk_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+struct hwrm_port_lpbk_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast frames */
+ uint64_t lpbk_ucast_frames;
+ /* Number of transmitted multicast frames */
+ uint64_t lpbk_mcast_frames;
+ /* Number of transmitted broadcast frames */
+ uint64_t lpbk_bcast_frames;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t lpbk_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t lpbk_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t lpbk_bcast_bytes;
+ /* Total Tx Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_discard;
+ /* Total Tx Error Drops for loopback traffic reported by STATS block */
+ uint64_t tx_stat_error;
+ /* Total Rx Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_discard;
+ /* Total Rx Error Drops for loopback traffic reported by STATS block */
+ uint64_t rx_stat_error;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_clr_stats *
+ ***********************/
+
+
+/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port that is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_clr_stats_output (size:128b/16B) */
struct hwrm_port_clr_stats_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_port_lpbk_clr_stats *
+ ****************************/
+
+
+/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
} __attribute__((packed));
-/* hwrm_port_led_cfg */
-/*
- * Description: This function is used to configure LEDs on a given port. Each
- * port has individual set of LEDs associated with it. These LEDs are used for
- * speed/link configuration as well as activity indicator configuration. Up to
- * three LEDs can be configured, one for activity and two for speeds.
- */
-/* Input (64 bytes) */
+/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+struct hwrm_port_lpbk_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_port_ts_query *
+ **********************/
+
+
+/* hwrm_port_ts_query_input (size:192b/24B) */
+struct hwrm_port_ts_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_LAST \
+ HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Timestamp value of PTP message captured. */
+ uint64_t ptp_msg_ts;
+ /* Sequence ID of the PTP message captured. */
+ uint16_t ptp_msg_seqid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_port_phy_qcaps *
+ ***********************/
+
+
+/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+struct hwrm_port_phy_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+struct hwrm_port_phy_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* PHY capability flags */
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that the
+ * link is capable of supporting EEE.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then this field indicates that the
+ * PHY is capable of supporting external loopback.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_MASK \
+ UINT32_C(0xfc)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_RSVD1_SFT 2
+ /* Number of front panel ports for this device. */
+ uint8_t port_cnt;
+ /* Not supported or unknown */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_UNKNOWN UINT32_C(0x0)
+ /* single port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_1 UINT32_C(0x1)
+ /* 2-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_2 UINT32_C(0x2)
+ /* 3-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_3 UINT32_C(0x3)
+ /* 4-port device */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4 UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_LAST \
+ HWRM_PORT_PHY_QCAPS_OUTPUT_PORT_CNT_4
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * as forced speeds on this link.
+ * For each speed that can be forced on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_force_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_FORCE_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for autonegotiation on this link.
+ * For each speed that can be autonegotiated on this link, the
+ * corresponding mask bit shall be set to '1'.
+ */
+ uint16_t supported_speeds_auto_mode;
+ /* 100Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100MB \
+ UINT32_C(0x2)
+ /* 1Gb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_1GB \
+ UINT32_C(0x8)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2GB \
+ UINT32_C(0x10)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10GB \
+ UINT32_C(0x40)
+ /* 20Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_20GB \
+ UINT32_C(0x80)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_25GB \
+ UINT32_C(0x100)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_40GB \
+ UINT32_C(0x200)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_50GB \
+ UINT32_C(0x400)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_100GB \
+ UINT32_C(0x800)
+ /* 10Mb link speed (Half-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD \
+ UINT32_C(0x1000)
+ /* 10Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_AUTO_MODE_10MB \
+ UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for EEE on this link.
+ * For each speed that can be autonegotiated when EEE is enabled
+ * on this link, the corresponding mask bit shall be set to '1'.
+ * This field is only valid when the eee_suppotred is set to '1'.
+ */
+ uint16_t supported_speeds_eee_mode;
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 \
+ UINT32_C(0x1)
+ /* 100Mb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_100MB \
+ UINT32_C(0x2)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 \
+ UINT32_C(0x4)
+ /* 1Gb link speed (Full-duplex) */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_1GB \
+ UINT32_C(0x8)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 \
+ UINT32_C(0x10)
+ /* Reserved */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 \
+ UINT32_C(0x20)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS_EEE_MODE_10GB \
+ UINT32_C(0x40)
+ uint32_t tx_lpi_timer_low;
+ /*
+ * The lowest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_SFT 0
+ /*
+ * Reserved field. The HWRM shall set this field to 0.
+ * An HWRM client shall ignore this field.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_RSVD2_SFT 24
+ uint32_t valid_tx_lpi_timer_high;
+ /*
+ * The highest value of TX LPI timer that can be set on this link
+ * when EEE is enabled. This value is in microseconds.
+ * This field is valid only when_eee_supported is set to '1'.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_SFT 0
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_VALID_SFT 24
+} __attribute__((packed));
+
+/***************************
+ * hwrm_port_phy_i2c_write *
+ ***************************/
+
+
+/* hwrm_port_phy_i2c_write_input (size:832b/104B) */
+struct hwrm_port_phy_i2c_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_WRITE_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to write, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be written from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+ /* Up to 64B of data. */
+ uint32_t data[16];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_write_output (size:128b/16B) */
+struct hwrm_port_phy_i2c_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_port_phy_i2c_read *
+ **************************/
+
+
+/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+struct hwrm_port_phy_i2c_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the page_offset field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET \
+ UINT32_C(0x1)
+ /* Port ID of port. */
+ uint16_t port_id;
+ /* 8-bit I2C slave address. */
+ uint8_t i2c_slave_addr;
+ uint8_t unused_0;
+ /* The page number that is being accessed over I2C. */
+ uint16_t page_number;
+ /* Offset within the page that is being accessed over I2C. */
+ uint16_t page_offset;
+ /*
+ * Length of data to read, in bytes starting at the offset
+ * specified above. If the offset is not specified, then
+ * the data shall be read from the beginning of the page.
+ */
+ uint8_t data_length;
+ uint8_t unused_1[7];
+} __attribute__((packed));
+
+/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+struct hwrm_port_phy_i2c_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Up to 64B of data. */
+ uint32_t data[16];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_port_led_cfg *
+ *********************/
+
+
+/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the led0_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the led0_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the led0_color field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the led0_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_ID UINT32_C(0x1)
- /* This bit must be '1' for the led0_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_STATE UINT32_C(0x2)
- /* This bit must be '1' for the led0_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_COLOR \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the led0_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON UINT32_C(0x8)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_ON \
+ UINT32_C(0x8)
/*
* This bit must be '1' for the led0_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF UINT32_C(0x10)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_BLINK_OFF \
+ UINT32_C(0x10)
/*
* This bit must be '1' for the led0_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID UINT32_C(0x20)
- /* This bit must be '1' for the led1_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID UINT32_C(0x40)
- /* This bit must be '1' for the led1_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE UINT32_C(0x80)
- /* This bit must be '1' for the led1_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR UINT32_C(0x100)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED0_GROUP_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the led1_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the led1_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_STATE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the led1_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_COLOR \
+ UINT32_C(0x100)
/*
* This bit must be '1' for the led1_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON UINT32_C(0x200)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_ON \
+ UINT32_C(0x200)
/*
* This bit must be '1' for the led1_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF UINT32_C(0x400)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_BLINK_OFF \
+ UINT32_C(0x400)
/*
* This bit must be '1' for the led1_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID UINT32_C(0x800)
- /* This bit must be '1' for the led2_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID UINT32_C(0x1000)
- /* This bit must be '1' for the led2_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE UINT32_C(0x2000)
- /* This bit must be '1' for the led2_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR UINT32_C(0x4000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED1_GROUP_ID \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the led2_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_ID \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the led2_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_STATE \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the led2_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_COLOR \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the led2_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON UINT32_C(0x8000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_ON \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the led2_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF UINT32_C(0x10000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_BLINK_OFF \
+ UINT32_C(0x10000)
/*
* This bit must be '1' for the led2_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID UINT32_C(0x20000)
- /* This bit must be '1' for the led3_id field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID UINT32_C(0x40000)
- /* This bit must be '1' for the led3_state field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE UINT32_C(0x80000)
- /* This bit must be '1' for the led3_color field to be configured. */
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR UINT32_C(0x100000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED2_GROUP_ID \
+ UINT32_C(0x20000)
+ /*
+ * This bit must be '1' for the led3_id field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_ID \
+ UINT32_C(0x40000)
+ /*
+ * This bit must be '1' for the led3_state field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_STATE \
+ UINT32_C(0x80000)
+ /*
+ * This bit must be '1' for the led3_color field to be
+ * configured.
+ */
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_COLOR \
+ UINT32_C(0x100000)
/*
* This bit must be '1' for the led3_blink_on field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON UINT32_C(0x200000)
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_ON \
+ UINT32_C(0x200000)
/*
* This bit must be '1' for the led3_blink_off field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_BLINK_OFF \
UINT32_C(0x400000)
/*
* This bit must be '1' for the led3_group_id field to be
* configured.
*/
- #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID UINT32_C(0x800000)
- uint16_t port_id;
+ #define HWRM_PORT_LED_CFG_INPUT_ENABLES_LED3_GROUP_ID \
+ UINT32_C(0x800000)
/* Port ID of port whose LEDs are configured. */
- uint8_t num_leds;
+ uint16_t port_id;
/*
- * The number of LEDs that are being configured. Up to 4 LEDs
- * can be configured with this command.
+ * The number of LEDs that are being configured.
+ * Up to 4 LEDs can be configured with this command.
*/
- uint8_t rsvd;
+ uint8_t num_leds;
/* Reserved field. */
- uint8_t led0_id;
+ uint8_t rsvd;
/* An identifier for the LED #0. */
- uint8_t led0_state;
+ uint8_t led0_id;
/* The requested state of the LED #0. */
+ uint8_t led0_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led0_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT
/* The requested color of LED #0. */
+ uint8_t led0_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t led0_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led0_blink_off;
+ uint16_t led0_blink_on;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led0_group_id;
+ uint16_t led0_blink_off;
/*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #0 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #0 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd0;
+ uint8_t led0_group_id;
/* Reserved field. */
- uint8_t led1_id;
+ uint8_t rsvd0;
/* An identifier for the LED #1. */
- uint8_t led1_state;
+ uint8_t led1_id;
/* The requested state of the LED #1. */
+ uint8_t led1_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led1_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_STATE_BLINKALT
/* The requested color of LED #1. */
+ uint8_t led1_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_1;
- uint16_t led1_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led1_blink_off;
+ uint16_t led1_blink_on;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led1_group_id;
+ uint16_t led1_blink_off;
/*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #1 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #1 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #1 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd1;
+ uint8_t led1_group_id;
/* Reserved field. */
- uint8_t led2_id;
+ uint8_t rsvd1;
/* An identifier for the LED #2. */
- uint8_t led2_state;
+ uint8_t led2_id;
/* The requested state of the LED #2. */
+ uint8_t led2_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led2_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_STATE_BLINKALT
/* The requested color of LED #2. */
+ uint8_t led2_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_2;
- uint16_t led2_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led2_blink_off;
+ uint16_t led2_blink_on;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led2_group_id;
+ uint16_t led2_blink_off;
/*
- * An identifier for the group of LEDs that LED #2 belongs to.
- * If set to 0, then the LED #2 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #2 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #2 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd2;
+ uint8_t led2_group_id;
/* Reserved field. */
- uint8_t led3_id;
+ uint8_t rsvd2;
/* An identifier for the LED #3. */
- uint8_t led3_state;
+ uint8_t led3_id;
/* The requested state of the LED #3. */
+ uint8_t led3_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led3_color;
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_STATE_BLINKALT
/* The requested color of LED #3. */
+ uint8_t led3_color;
/* Default */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_3;
- uint16_t led3_blink_on;
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_CFG_INPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led3_blink_off;
+ uint16_t led3_blink_on;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led3_group_id;
+ uint16_t led3_blink_off;
/*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #3 shall not be grouped and shall
- * be treated as an individual resource. For all other non-zero
- * values of this field, LED #3 shall be grouped together with
- * the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 shall not be grouped and
+ * shall be treated as an individual resource.
+ * For all other non-zero values of this field, LED #3 shall
+ * be grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t rsvd3;
+ uint8_t led3_group_id;
/* Reserved field. */
+ uint8_t rsvd3;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_port_led_cfg_output (size:128b/16B) */
struct hwrm_port_led_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_led_qcfg */
-/*
- * Description: This function is used to query configuration of LEDs on a given
- * port. Each port has individual set of LEDs associated with it. These LEDs are
- * used for speed/link configuration as well as activity indicator
- * configuration. Up to three LEDs can be configured, one for activity and two
- * for speeds.
- */
-/* Input (24 bytes) */
+/**********************
+ * hwrm_port_led_qcfg *
+ **********************/
+
+
+/* hwrm_port_led_qcfg_input (size:192b/24B) */
struct hwrm_port_led_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port whose LED configuration is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (56 bytes) */
+/* hwrm_port_led_qcfg_output (size:448b/56B) */
struct hwrm_port_led_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t num_leds;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * The number of LEDs that are configured on this port. Up to 4
- * LEDs can be returned in the response.
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
*/
- uint8_t led0_id;
+ uint8_t num_leds;
/* An identifier for the LED #0. */
- uint8_t led0_type;
+ uint8_t led0_id;
/* The type of LED #0. */
+ uint8_t led0_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
- uint8_t led0_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_TYPE_INVALID
/* The current state of the LED #0. */
+ uint8_t led0_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led0_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_BLINKALT
/* The color of LED #0. */
+ uint8_t led0_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t led0_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED0_COLOR_GREENAMBER
+ uint8_t unused_0;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led0_blink_off;
+ uint16_t led0_blink_on;
/*
- * If the LED #0 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #0 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led0_group_id;
+ uint16_t led0_blink_off;
/*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 is not grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 is not grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led1_id;
+ uint8_t led0_group_id;
/* An identifier for the LED #1. */
- uint8_t led1_type;
+ uint8_t led1_id;
/* The type of LED #1. */
+ uint8_t led1_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
- uint8_t led1_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_TYPE_INVALID
/* The current state of the LED #1. */
+ uint8_t led1_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led1_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_STATE_BLINKALT
/* The color of LED #1. */
+ uint8_t led1_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_1;
- uint16_t led1_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED1_COLOR_GREENAMBER
+ uint8_t unused_1;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led1_blink_off;
+ uint16_t led1_blink_on;
/*
- * If the LED #1 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #1 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led1_group_id;
+ uint16_t led1_blink_off;
/*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #1 is not grouped. For all other
- * non-zero values of this field, LED #1 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #1 is not grouped.
+ * For all other non-zero values of this field, LED #1 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led2_id;
+ uint8_t led1_group_id;
/* An identifier for the LED #2. */
- uint8_t led2_type;
+ uint8_t led2_id;
/* The type of LED #2. */
+ uint8_t led2_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
- uint8_t led2_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_TYPE_INVALID
/* The current state of the LED #2. */
+ uint8_t led2_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led2_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_STATE_BLINKALT
/* The color of LED #2. */
+ uint8_t led2_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_2;
- uint16_t led2_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED2_COLOR_GREENAMBER
+ uint8_t unused_2;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led2_blink_off;
+ uint16_t led2_blink_on;
/*
- * If the LED #2 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #2 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led2_group_id;
+ uint16_t led2_blink_off;
/*
- * An identifier for the group of LEDs that LED #2 belongs to.
- * If set to 0, then the LED #2 is not grouped. For all other
- * non-zero values of this field, LED #2 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #2 belongs
+ * to.
+ * If set to 0, then the LED #2 is not grouped.
+ * For all other non-zero values of this field, LED #2 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t led3_id;
+ uint8_t led2_group_id;
/* An identifier for the LED #3. */
- uint8_t led3_type;
+ uint8_t led3_id;
/* The type of LED #3. */
+ uint8_t led3_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
- uint8_t led3_state;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_TYPE_INVALID
/* The current state of the LED #3. */
+ uint8_t led3_state;
/* Default state of the LED */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_DEFAULT UINT32_C(0x0)
/* Off */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_OFF UINT32_C(0x1)
/* On */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_ON UINT32_C(0x2)
/* Blink */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINK UINT32_C(0x3)
/* Blink Alternately */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
- uint8_t led3_color;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT UINT32_C(0x4)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_STATE_BLINKALT
/* The color of LED #3. */
+ uint8_t led3_color;
/* Default */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_DEFAULT UINT32_C(0x0)
/* Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_AMBER UINT32_C(0x1)
/* Green */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREEN UINT32_C(0x2)
/* Green or Amber */
- #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
- uint8_t unused_3;
- uint16_t led3_blink_on;
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER UINT32_C(0x3)
+ #define HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_LAST \
+ HWRM_PORT_LED_QCFG_OUTPUT_LED3_COLOR_GREENAMBER
+ uint8_t unused_3;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED on
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED on between cycles.
*/
- uint16_t led3_blink_off;
+ uint16_t led3_blink_on;
/*
- * If the LED #3 state is "blink" or "blinkalt", then this field
- * represents the requested time in milliseconds to keep LED off
- * between cycles.
+ * If the LED #3 state is "blink" or "blinkalt", then
+ * this field represents the requested time in milliseconds
+ * to keep LED off between cycles.
*/
- uint8_t led3_group_id;
+ uint16_t led3_blink_off;
/*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #3 is not grouped. For all other
- * non-zero values of this field, LED #3 is grouped together
- * with the LEDs with the same group ID value.
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #3 is not grouped.
+ * For all other non-zero values of this field, LED #3 is
+ * grouped together with the LEDs with the same group ID
+ * value.
*/
- uint8_t unused_4;
- uint16_t unused_5;
- uint8_t unused_6;
- uint8_t unused_7;
- uint8_t unused_8;
- uint8_t valid;
+ uint8_t led3_group_id;
+ uint8_t unused_4[6];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_port_led_qcaps */
-/*
- * Description: This function is used to query capabilities of LEDs on a given
- * port. Each port has individual set of LEDs associated with it. These LEDs are
- * used for speed/link configuration as well as activity indicator
- * configuration.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_port_led_qcaps *
+ ***********************/
+
+
+/* hwrm_port_led_qcaps_input (size:192b/24B) */
struct hwrm_port_led_qcaps_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t port_id;
+ uint64_t resp_addr;
/* Port ID of port whose LED configuration is being queried. */
- uint16_t unused_0[3];
+ uint16_t port_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (48 bytes) */
+/* hwrm_port_led_qcaps_output (size:384b/48B) */
struct hwrm_port_led_qcaps_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t num_leds;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * The number of LEDs that are configured on this port. Up to 4
- * LEDs can be returned in the response.
+ * The number of LEDs that are configured on this port.
+ * Up to 4 LEDs can be returned in the response.
*/
- uint8_t unused_0[3];
+ uint8_t num_leds;
/* Reserved for future use. */
- uint8_t led0_id;
+ uint8_t unused[3];
/* An identifier for the LED #0. */
- uint8_t led0_type;
+ uint8_t led0_id;
/* The type of LED #0. */
+ uint8_t led0_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
- uint8_t led0_group_id;
- /*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_1;
- uint16_t led0_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED0_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led0_group_id;
+ uint8_t unused_0;
/* The states supported by LED #0. */
+ uint16_t led0_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led0_color_caps;
/* The colors supported by LED #0. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led0_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED0_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led1_id;
/* An identifier for the LED #1. */
- uint8_t led1_type;
+ uint8_t led1_id;
/* The type of LED #1. */
+ uint8_t led1_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
- uint8_t led1_group_id;
- /*
- * An identifier for the group of LEDs that LED #1 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_2;
- uint16_t led1_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED1_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #1 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led1_group_id;
+ uint8_t unused_1;
/* The states supported by LED #1. */
+ uint16_t led1_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led1_color_caps;
/* The colors supported by LED #1. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led1_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED1_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led2_id;
/* An identifier for the LED #2. */
- uint8_t led2_type;
+ uint8_t led2_id;
/* The type of LED #2. */
+ uint8_t led2_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
- uint8_t led2_group_id;
- /*
- * An identifier for the group of LEDs that LED #0 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_3;
- uint16_t led2_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED2_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #0 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led2_group_id;
+ uint8_t unused_2;
/* The states supported by LED #2. */
+ uint16_t led2_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led2_color_caps;
/* The colors supported by LED #2. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led2_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED2_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t led3_id;
/* An identifier for the LED #3. */
- uint8_t led3_type;
+ uint8_t led3_id;
/* The type of LED #3. */
+ uint8_t led3_type;
/* Speed LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_SPEED UINT32_C(0x0)
/* Activity LED */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_ACTIVITY UINT32_C(0x1)
/* Invalid */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
- uint8_t led3_group_id;
- /*
- * An identifier for the group of LEDs that LED #3 belongs to.
- * If set to 0, then the LED #0 cannot be grouped. For all other
- * non-zero values of this field, LED #0 is grouped together
- * with the LEDs with the same group ID value.
- */
- uint8_t unused_4;
- uint16_t led3_state_caps;
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID UINT32_C(0xff)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_LAST \
+ HWRM_PORT_LED_QCAPS_OUTPUT_LED3_TYPE_INVALID
+ /*
+ * An identifier for the group of LEDs that LED #3 belongs
+ * to.
+ * If set to 0, then the LED #0 cannot be grouped.
+ * For all other non-zero values of this field, LED #0 is
+ * grouped together with the LEDs with the same group ID
+ * value.
+ */
+ uint8_t led3_group_id;
+ uint8_t unused_3;
/* The states supported by LED #3. */
+ uint16_t led3_state_caps;
/*
- * If set to 1, this LED is enabled. If set to 0, this LED is
- * disabled.
+ * If set to 1, this LED is enabled.
+ * If set to 0, this LED is disabled.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED UINT32_C(0x1)
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ENABLED \
+ UINT32_C(0x1)
/*
- * If set to 1, off state is supported on this LED. If set to 0,
- * off state is not supported on this LED.
+ * If set to 1, off state is supported on this LED.
+ * If set to 0, off state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_OFF_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, on state is supported on this LED. If set to 0,
- * on state is not supported on this LED.
+ * If set to 1, on state is supported on this LED.
+ * If set to 0, on state is not supported on this LED.
*/
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_ON_SUPPORTED \
UINT32_C(0x4)
/*
- * If set to 1, blink state is supported on this LED. If set to
- * 0, blink state is not supported on this LED.
+ * If set to 1, blink state is supported on this LED.
+ * If set to 0, blink state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_SUPPORTED \
UINT32_C(0x8)
/*
- * If set to 1, blink_alt state is supported on this LED. If set
- * to 0, blink_alt state is not supported on this LED.
+ * If set to 1, blink_alt state is supported on this LED.
+ * If set to 0, blink_alt state is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED \
UINT32_C(0x10)
- uint16_t led3_color_caps;
/* The colors supported by LED #3. */
- /* reserved */
- #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD UINT32_C(0x1)
+ uint16_t led3_color_caps;
+ /* reserved. */
+ #define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_RSVD \
+ UINT32_C(0x1)
/*
- * If set to 1, Amber color is supported on this LED. If set to
- * 0, Amber color is not supported on this LED.
+ * If set to 1, Amber color is supported on this LED.
+ * If set to 0, Amber color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_AMBER_SUPPORTED \
UINT32_C(0x2)
/*
- * If set to 1, Green color is supported on this LED. If set to
- * 0, Green color is not supported on this LED.
+ * If set to 1, Green color is supported on this LED.
+ * If set to 0, Green color is not supported on this LED.
*/
#define HWRM_PORT_LED_QCAPS_OUTPUT_LED3_COLOR_CAPS_GREEN_SUPPORTED \
UINT32_C(0x4)
- uint8_t unused_5;
- uint8_t unused_6;
- uint8_t unused_7;
- uint8_t valid;
+ uint8_t unused_4[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_queue_qportcfg */
-/*
- * Description: This function is called by a driver to query queue configuration
- * of a port. # The HWRM shall at least advertise one queue with lossy service
- * profile. # The driver shall use this command to query queue ids before
- * configuring or using any queues. # If a service profile is not set for a
- * queue, then the driver shall not use that queue without configuring a service
- * profile for it. # If the driver is not allowed to configure service profiles,
- * then the driver shall only use queues for which service profiles are pre-
- * configured.
- */
-/* Input (24 bytes) */
+/***********************
+ * hwrm_queue_qportcfg *
+ ***********************/
+
+
+/* hwrm_queue_qportcfg_input (size:192b/24B) */
struct hwrm_queue_qportcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
- QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
- uint16_t port_id;
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
/*
* Port ID of port for which the queue configuration is being
- * queried. This field is only required when sent by IPC.
+ * queried. This field is only required when sent by IPC.
*/
- uint16_t unused_0;
+ uint16_t port_id;
+ /*
+ * Drivers will set this capability when it can use
+ * queue_idx_service_profile to map the queues to application.
+ */
+ uint8_t drv_qmap_cap;
+ /* disabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_DISABLED UINT32_C(0x0)
+ /* enabled */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_LAST \
+ HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED
+ uint8_t unused_0;
} __attribute__((packed));
-/* Output (32 bytes) */
+/* hwrm_queue_qportcfg_output (size:256b/32B) */
struct hwrm_queue_qportcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint8_t max_configurable_queues;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
* The maximum number of queues that can be configured on this
- * port. Valid values range from 1 through 8.
+ * port.
+ * Valid values range from 1 through 8.
*/
- uint8_t max_configurable_lossless_queues;
+ uint8_t max_configurable_queues;
/*
* The maximum number of lossless queues that can be configured
- * on this port. Valid values range from 0 through 8.
+ * on this port.
+ * Valid values range from 0 through 8.
*/
- uint8_t queue_cfg_allowed;
+ uint8_t max_configurable_lossless_queues;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_cfg command. Each bit represents a specific queue
- * where bit 0 represents queue 0 and bit 7 represents queue 7.
+ * hwrm_queue_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
* # A value of 0 indicates that the queue is not configurable
- * by the hwrm_queue_cfg command. # A value of 1 indicates that
- * the queue is configurable. # A hwrm_queue_cfg command shall
- * return error when trying to configure a queue not
- * configurable.
+ * by the hwrm_queue_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_cfg command shall return error when trying to
+ * configure a queue not configurable.
*/
- uint8_t queue_cfg_info;
+ uint8_t queue_cfg_allowed;
/* Information about queue configuration. */
- /*
- * If this flag is set to '1', then the queues are configured
- * asymmetrically on TX and RX sides. If this flag is set to
- * '0', then the queues are configured symmetrically on TX and
- * RX sides. For symmetric configuration, the queue
- * configuration including queue ids and service profiles on the
- * TX side is the same as the corresponding queue configuration
- * on the RX side.
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG UINT32_C(0x1)
- uint8_t queue_pfcenable_cfg_allowed;
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queues are
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then the queues are
+ * configured symmetrically on TX and RX sides. For
+ * symmetric configuration, the queue configuration
+ * including queue ids and service profiles on the
+ * TX side is the same as the corresponding queue
+ * configuration on the RX side.
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pfcenable_cfg command. Each bit represents a
- * specific priority where bit 0 represents priority 0 and bit 7
- * represents priority 7. # A value of 0 indicates that the
- * priority is not configurable by the hwrm_queue_pfcenable_cfg
- * command. # A value of 1 indicates that the priority is
- * configurable. # A hwrm_queue_pfcenable_cfg command shall
- * return error when trying to configure a priority that is not
- * configurable.
- */
- uint8_t queue_pri2cos_cfg_allowed;
+ * hwrm_queue_pfcenable_cfg command.
+ *
+ * Each bit represents a specific priority where bit 0 represents
+ * priority 0 and bit 7 represents priority 7.
+ * # A value of 0 indicates that the priority is not configurable by
+ * the hwrm_queue_pfcenable_cfg command.
+ * # A value of 1 indicates that the priority is configurable.
+ * # A hwrm_queue_pfcenable_cfg command shall return error when
+ * trying to configure a priority that is not configurable.
+ */
+ uint8_t queue_pfcenable_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pri2cos_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pri2cos_cfg command. #
- * A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pri2cos_cfg command shall return error when trying
- * to configure a queue that is not configurable.
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue that is not configurable.
*/
- uint8_t queue_cos2bw_cfg_allowed;
+ uint8_t queue_pri2cos_cfg_allowed;
/*
* Bitmask indicating which queues can be configured by the
- * hwrm_queue_pri2cos_cfg command. Each bit represents a
- * specific queue where bit 0 represents queue 0 and bit 7
- * represents queue 7. # A value of 0 indicates that the queue
- * is not configurable by the hwrm_queue_pri2cos_cfg command. #
- * A value of 1 indicates that the queue is configurable. # A
- * hwrm_queue_pri2cos_cfg command shall return error when trying
- * to configure a queue not configurable.
- */
- uint8_t queue_id0;
- /*
- * ID of CoS Queue 0. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ * hwrm_queue_pri2cos_cfg command.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_pri2cos_cfg command.
+ * # A value of 1 indicates that the queue is configurable.
+ * # A hwrm_queue_pri2cos_cfg command shall return error when
+ * trying to configure a queue not configurable.
+ */
+ uint8_t queue_cos2bw_cfg_allowed;
+ /*
+ * ID of CoS Queue 0.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id0_service_profile;
+ uint8_t queue_id0;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id0_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id1;
- /*
- * ID of CoS Queue 1. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 1.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id1_service_profile;
+ uint8_t queue_id1;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id1_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id2;
- /*
- * ID of CoS Queue 2. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 2.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id2_service_profile;
+ uint8_t queue_id2;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id2_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id3;
- /*
- * ID of CoS Queue 3. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 3.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id3_service_profile;
+ uint8_t queue_id3;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id3_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id4;
- /*
- * ID of CoS Queue 4. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 4.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id4_service_profile;
+ uint8_t queue_id4;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id4_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id5;
- /*
- * ID of CoS Queue 5. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 5.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id5_service_profile;
+ uint8_t queue_id5;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id5_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id6;
- /*
- * ID of CoS Queue 6. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 6.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id6_service_profile;
+ uint8_t queue_id6;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id6_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t queue_id7;
- /*
- * ID of CoS Queue 7. FF - Invalid id # This ID can be used on
- * any subsequent call to an hwrm command that takes a queue id.
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ /*
+ * ID of CoS Queue 7.
+ * FF - Invalid id
+ *
+ * # This ID can be used on any subsequent call to an hwrm command
+ * that takes a queue id.
* # IDs must always be queried by this command before any use
- * by the driver or software. # Any driver or software should
- * not make any assumptions about queue IDs. # A value of 0xff
- * indicates that the queue is not available. # Available queues
- * may not be in sequential order.
+ * by the driver or software.
+ * # Any driver or software should not make any assumptions about
+ * queue IDs.
+ * # A value of 0xff indicates that the queue is not available.
+ * # Available queues may not be in sequential order.
*/
- uint8_t queue_id7_service_profile;
+ uint8_t queue_id7;
/* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
+ uint8_t queue_id7_service_profile;
+ /* Lossy (best-effort) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
UINT32_C(0x0)
- /* Lossless */
+ /* Lossless (legacy) */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
UINT32_C(0x1)
- /*
- * Set to 0xFF... (All Fs) if there is no
- * service profile specified
- */
+ /* Lossless RoCE */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE \
+ UINT32_C(0x1)
+ /* Lossy RoCE CNP */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP \
+ UINT32_C(0x2)
+ /* Lossless NIC */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC \
+ UINT32_C(0x3)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
#define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
UINT32_C(0xff)
- uint8_t valid;
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/*********************
- * hwrm_port_mac_cfg *
- *********************/
+/*******************
+ * hwrm_queue_qcfg *
+ *******************/
-/* hwrm_port_mac_cfg_input (size:320b/40B) */
-struct hwrm_port_mac_cfg_input {
+/* hwrm_queue_qcfg_input (size:192b/24B) */
+struct hwrm_queue_qcfg_input {
+ /* The HWRM command request type. */
uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
uint32_t flags;
- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
- uint32_t enables;
- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
- uint16_t port_id;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
- uint8_t vlan_pri2cos_map_pri;
- uint8_t reserved1;
- uint8_t tunnel_pri2cos_map_pri;
- uint8_t dscp2pri_map_pri;
- uint16_t rx_ts_capture_ptp_msg_type;
- uint16_t tx_ts_capture_ptp_msg_type;
- uint8_t cos_field_cfg;
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
- (0x1UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 1)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
- PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
- (0x1UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 3)
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
- PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- uint8_t unused_0[3];
-};
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_QCFG_INPUT_FLAGS_PATH_RX
+ /* Queue ID of the queue. */
+ uint32_t queue_id;
+} __attribute__((packed));
+
+/* hwrm_queue_qcfg_output (size:128b/16B) */
+struct hwrm_queue_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ */
+ uint32_t queue_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_QCFG_OUTPUT_SERVICE_PROFILE_UNKNOWN
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the queue is
+ * configured asymmetrically on TX and RX sides.
+ * If this flag is set to '0', then this queue is
+ * configured symmetrically on TX and RX sides.
+ */
+ #define HWRM_QUEUE_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/******************
+ * hwrm_queue_cfg *
+ ******************/
-/* hwrm_port_mac_cfg_output (size:128b/16B) */
-struct hwrm_port_mac_cfg_output {
+
+/* hwrm_queue_cfg_input (size:320b/40B) */
+struct hwrm_queue_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_BIDIR
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dflt_len field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_DFLT_LEN UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the service_profile field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_CFG_INPUT_ENABLES_SERVICE_PROFILE UINT32_C(0x2)
+ /* Queue ID of queue that is to be configured by this function. */
+ uint32_t queue_id;
+ /*
+ * This value is a the estimate packet length used in the
+ * TX arbiter.
+ * Set to 0xFF... (All Fs) to not adjust this value.
+ */
+ uint32_t dflt_len;
+ /* This value is applicable to CoS queues only. */
+ uint8_t service_profile;
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSY UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LOSSLESS UINT32_C(0x1)
+ /* Set to 0xFF... (All Fs) if there is no service profile specified */
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN UINT32_C(0xff)
+ #define HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_LAST \
+ HWRM_QUEUE_CFG_INPUT_SERVICE_PROFILE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_queue_cfg_output (size:128b/16B) */
+struct hwrm_queue_cfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint16_t mru;
- uint16_t mtu;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
- uint8_t unused_0;
- uint8_t valid;
-};
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*****************************
+ * hwrm_queue_pfcenable_qcfg *
+ *****************************/
-/**********************
- * hwrm_port_mac_qcfg *
- **********************/
+/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
-/* hwrm_port_mac_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_qcfg_input {
+/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /* If set to 1, then PFC is enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_QCFG_OUTPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_pfcenable_cfg *
+ ****************************/
+
+
+/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+struct hwrm_queue_pfcenable_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
+ uint32_t flags;
+ /* If set to 1, then PFC is requested to be enabled on PRI 0. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI0_PFC_ENABLED \
+ UINT32_C(0x1)
+ /* If set to 1, then PFC is requested to be enabled on PRI 1. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI1_PFC_ENABLED \
+ UINT32_C(0x2)
+ /* If set to 1, then PFC is requested to be enabled on PRI 2. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI2_PFC_ENABLED \
+ UINT32_C(0x4)
+ /* If set to 1, then PFC is requested to be enabled on PRI 3. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI3_PFC_ENABLED \
+ UINT32_C(0x8)
+ /* If set to 1, then PFC is requested to be enabled on PRI 4. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI4_PFC_ENABLED \
+ UINT32_C(0x10)
+ /* If set to 1, then PFC is requested to be enabled on PRI 5. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI5_PFC_ENABLED \
+ UINT32_C(0x20)
+ /* If set to 1, then PFC is requested to be enabled on PRI 6. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI6_PFC_ENABLED \
+ UINT32_C(0x40)
+ /* If set to 1, then PFC is requested to be enabled on PRI 7. */
+ #define HWRM_QUEUE_PFCENABLE_CFG_INPUT_FLAGS_PRI7_PFC_ENABLED \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
uint16_t port_id;
- uint8_t unused_0[6];
-};
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+struct hwrm_queue_pfcenable_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/***************************
+ * hwrm_queue_pri2cos_qcfg *
+ ***************************/
-/* hwrm_port_mac_qcfg_output (size:192b/24B) */
-struct hwrm_port_mac_qcfg_output {
+
+/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX
+ /*
+ * When this bit is set to '0', the query is
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the query is
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN UINT32_C(0x2)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+struct hwrm_queue_pri2cos_qcfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint16_t mru;
- uint16_t mtu;
- uint8_t ipg;
- uint8_t lpbk;
- #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL
- #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL
- #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL
- #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE
- uint8_t vlan_pri2cos_map_pri;
- uint8_t flags;
- #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL
- #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL
- #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL
- #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL
- #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
- #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL
- uint8_t tunnel_pri2cos_map_pri;
- uint8_t dscp2pri_map_pri;
- uint16_t rx_ts_capture_ptp_msg_type;
- uint16_t tx_ts_capture_ptp_msg_type;
- uint8_t cos_field_cfg;
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
- (0x1UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 1)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
- PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
- (0x0UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
- (0x1UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
- (0x2UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
- (0x3UL << 3)
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
- PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
- #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5
- uint8_t valid;
-};
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ * A value of 0xff indicates that no CoS queue is assigned to the
+ * specified priority.
+ */
+ uint8_t pri7_cos_queue_id;
+ /* Information about queue configuration. */
+ uint8_t queue_cfg_info;
+ /*
+ * If this flag is set to '1', then the PRI to CoS
+ * configuration is asymmetric on TX and RX sides.
+ * If this flag is set to '0', then PRI to CoS configuration
+ * is symmetric on TX and RX sides.
+ */
+ #define HWRM_QUEUE_PRI2COS_QCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_queue_pri2cos_cfg *
+ **************************/
+
+
+/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+struct hwrm_queue_pri2cos_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * Enumeration denoting the RX, TX, or both directions applicable to the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0
+ /* tx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ /* Bi-directional (Symmetrically applicable to TX and RX paths) */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR UINT32_C(0x2)
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR
+ /*
+ * When this bit is set to '0', the mapping is requested
+ * for VLAN PRI field in tunnel headers.
+ * When this bit is set to '1', the mapping is requested
+ * for VLAN PRI field in inner packet headers.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the pri0_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the pri1_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI1_COS_QUEUE_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the pri2_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI2_COS_QUEUE_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the pri3_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI3_COS_QUEUE_ID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the pri4_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI4_COS_QUEUE_ID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the pri5_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI5_COS_QUEUE_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the pri6_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI6_COS_QUEUE_ID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the pri7_cos_queue_id field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI7_COS_QUEUE_ID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * CoS Queue assigned to priority 0. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri0_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 1. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri1_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 2 This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri2_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 3. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri3_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 4. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri4_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 5. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri5_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 6. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri6_cos_queue_id;
+ /*
+ * CoS Queue assigned to priority 7. This value can only
+ * be changed before traffic has started.
+ */
+ uint8_t pri7_cos_queue_id;
+ uint8_t unused_0[7];
+} __attribute__((packed));
+/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+struct hwrm_queue_pri2cos_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
/**************************
- * hwrm_port_mac_ptp_qcfg *
+ * hwrm_queue_cos2bw_qcfg *
**************************/
-/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
-struct hwrm_port_mac_ptp_qcfg_input {
+/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+struct hwrm_queue_cos2bw_qcfg_input {
+ /* The HWRM command request type. */
uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
uint64_t resp_addr;
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
uint16_t port_id;
- uint8_t unused_0[6];
-};
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+struct hwrm_queue_cos2bw_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ uint16_t unused_1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_2[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*************************
+ * hwrm_queue_cos2bw_cfg *
+ *************************/
-/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
-struct hwrm_port_mac_ptp_qcfg_output {
+
+/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+struct hwrm_queue_cos2bw_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint32_t enables;
+ /*
+ * If this bit is set to 1, then all queue_id0 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID \
+ UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, then all queue_id1 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID1_VALID \
+ UINT32_C(0x2)
+ /*
+ * If this bit is set to 1, then all queue_id2 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID2_VALID \
+ UINT32_C(0x4)
+ /*
+ * If this bit is set to 1, then all queue_id3 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID3_VALID \
+ UINT32_C(0x8)
+ /*
+ * If this bit is set to 1, then all queue_id4 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID4_VALID \
+ UINT32_C(0x10)
+ /*
+ * If this bit is set to 1, then all queue_id5 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID5_VALID \
+ UINT32_C(0x20)
+ /*
+ * If this bit is set to 1, then all queue_id6 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID6_VALID \
+ UINT32_C(0x40)
+ /*
+ * If this bit is set to 1, then all queue_id7 related
+ * parameters in this command are valid.
+ */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID7_VALID \
+ UINT32_C(0x80)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure TC BW assignment on this port.
+ */
+ uint16_t port_id;
+ /* ID of CoS Queue 0. */
+ uint8_t queue_id0;
+ uint8_t unused_0;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id0_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id0_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id0_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id0_bw_weight;
+ /* ID of CoS Queue 1. */
+ uint8_t queue_id1;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id1_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id1_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id1_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id1_bw_weight;
+ /* ID of CoS Queue 2. */
+ uint8_t queue_id2;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id2_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id2_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id2_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id2_bw_weight;
+ /* ID of CoS Queue 3. */
+ uint8_t queue_id3;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id3_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id3_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id3_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id3_bw_weight;
+ /* ID of CoS Queue 4. */
+ uint8_t queue_id4;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id4_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id4_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id4_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id4_bw_weight;
+ /* ID of CoS Queue 5. */
+ uint8_t queue_id5;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id5_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id5_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id5_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id5_bw_weight;
+ /* ID of CoS Queue 6. */
+ uint8_t queue_id6;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id6_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id6_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id6_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id6_bw_weight;
+ /* ID of CoS Queue 7. */
+ uint8_t queue_id7;
+ /*
+ * Minimum BW allocated to CoS Queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_min_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+ /*
+ * Maximum BW allocated to CoS queue.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this COS inside the device.
+ */
+ uint32_t queue_id7_max_bw;
+ /* The bandwidth value. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST \
+ HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+ /* Transmission Selection Algorithm (TSA) for CoS Queue. */
+ uint8_t queue_id7_tsa_assign;
+ /* Strict Priority */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_SP \
+ UINT32_C(0x0)
+ /* Enhanced Transmission Selection */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_ETS \
+ UINT32_C(0x1)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST \
+ UINT32_C(0x2)
+ /* reserved. */
+ #define HWRM_QUEUE_COS2BW_CFG_INPUT_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST \
+ UINT32_C(0xff)
+ /*
+ * Priority level for strict priority. Valid only when the
+ * tsa_assign is 0 - Strict Priority (SP)
+ * 0..7 - Valid values.
+ * 8..255 - Reserved.
+ */
+ uint8_t queue_id7_pri_lvl;
+ /*
+ * Weight used to allocate remaining BW for this COS after
+ * servicing guaranteed bandwidths for all COS.
+ */
+ uint8_t queue_id7_bw_weight;
+ uint8_t unused_1[5];
+} __attribute__((packed));
+
+/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+struct hwrm_queue_cos2bw_cfg_output {
+ /* The specific error status for the command. */
uint16_t error_code;
+ /* The HWRM command request type. */
uint16_t req_type;
+ /* The sequence ID from the original command. */
uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint8_t flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
- uint8_t unused_0[3];
- uint32_t rx_ts_reg_off_lower;
- uint32_t rx_ts_reg_off_upper;
- uint32_t rx_ts_reg_off_seq_id;
- uint32_t rx_ts_reg_off_src_id_0;
- uint32_t rx_ts_reg_off_src_id_1;
- uint32_t rx_ts_reg_off_src_id_2;
- uint32_t rx_ts_reg_off_domain_id;
- uint32_t rx_ts_reg_off_fifo;
- uint32_t rx_ts_reg_off_fifo_adv;
- uint32_t rx_ts_reg_off_granularity;
- uint32_t tx_ts_reg_off_lower;
- uint32_t tx_ts_reg_off_upper;
- uint32_t tx_ts_reg_off_seq_id;
- uint32_t tx_ts_reg_off_fifo;
- uint32_t tx_ts_reg_off_granularity;
- uint8_t unused_1[7];
- uint8_t valid;
-};
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+/*************************
+ * hwrm_queue_dscp_qcaps *
+ *************************/
-/* hwrm_vnic_alloc */
-/*
- * Description: This VNIC is a resource in the RX side of the chip that is used
- * to represent a virtual host "interface". # At the time of VNIC allocation or
- * configuration, the function can specify whether it wants the requested VNIC
- * to be the default VNIC for the function or not. # If a function requests
- * allocation of a VNIC for the first time and a VNIC is successfully allocated
- * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC
- * for that function. # The default VNIC shall be used for the default action
- * for a partition or function. # For each VNIC allocated on a function, a
- * mapping on the RX side to map the allocated VNIC to source virtual interface
- * shall be performed by the HWRM. This should be hidden to the function driver
- * requesting the VNIC allocation. This enables broadcast/multicast replication
- * with source knockout. # If multicast replication with source knockout is
- * enabled, then the internal VNIC to SVIF mapping data structures shall be
- * programmed at the time of VNIC allocation.
- */
-/* Input (24 bytes) */
-struct hwrm_vnic_alloc_input {
- uint16_t req_type;
+
+/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+struct hwrm_queue_dscp_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
/*
- * When this bit is '1', this VNIC is requested to be the
- * default VNIC for this function.
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
*/
- #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
- uint32_t unused_0;
+ uint8_t port_id;
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_vnic_alloc_output {
- uint16_t error_code;
+/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+struct hwrm_queue_dscp_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The number of bits provided by the hardware for the DSCP value. */
+ uint8_t num_dscp_bits;
+ uint8_t unused_0;
+ /* Max number of DSCP-MASK-PRI entries supported. */
+ uint16_t max_entries;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_queue_dscp2pri_qcfg *
+ ****************************/
+
+
+/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t vnic_id;
- /* Logical vnic ID */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI
+ * tuple(s) will be copied to.
+ */
+ uint64_t dest_data_addr;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
*/
+ uint8_t port_id;
+ uint8_t unused_0;
+ /* Size of the buffer pointed to by dest_data_addr. */
+ uint16_t dest_data_buffer_size;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_vnic_free */
-/*
- * Description: Free a VNIC resource. Idle any resources associated with the
- * VNIC as well as the VNIC. Reset and release all resources associated with the
- * VNIC.
- */
-/* Input (24 bytes) */
-struct hwrm_vnic_free_input {
- uint16_t req_type;
+/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A count of the number of DSCP-MASK-PRI tuple(s) pointed to
+ * by the dest_data_addr.
*/
- uint16_t cmpl_ring;
+ uint16_t entry_cnt;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This is the default PRI which un-initialized DSCP values are
+ * mapped to.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t default_pri;
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_queue_dscp2pri_cfg *
+ ***************************/
+
+
+/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint64_t resp_addr;
+ uint16_t cmpl_ring;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the 24-bits DSCP-MASK-PRI tuple
+ * will be copied from.
+ */
+ uint64_t src_data_addr;
+ uint32_t flags;
+ /* use_hw_default_pri is 1 b */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_FLAGS_USE_HW_DEFAULT_PRI \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the default_pri field to be
+ * configured.
+ */
+ #define HWRM_QUEUE_DSCP2PRI_CFG_INPUT_ENABLES_DEFAULT_PRI \
+ UINT32_C(0x1)
+ /*
+ * Port ID of port for which the table is being configured.
+ * The HWRM needs to check whether this function is allowed
+ * to configure pri2cos mapping on this port.
+ */
+ uint8_t port_id;
+ /*
+ * This is the default PRI which un-initialized DSCP values will be
+ * mapped to.
+ */
+ uint8_t default_pri;
+ /*
+ * A count of the number of DSCP-MASK-PRI tuple(s) in the data pointed
+ * to by src_data_addr.
*/
- uint32_t vnic_id;
+ uint16_t entry_cnt;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_vnic_alloc *
+ *******************/
+
+
+/* hwrm_vnic_alloc_input (size:192b/24B) */
+struct hwrm_vnic_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /*
+ * When this bit is '1', this VNIC is requested to
+ * be the default VNIC for this function.
+ */
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_alloc_output (size:128b/16B) */
+struct hwrm_vnic_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Logical vnic ID */
- uint32_t unused_0;
+ uint32_t vnic_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_vnic_free_output {
- uint16_t error_code;
+/******************
+ * hwrm_vnic_free *
+ ******************/
+
+
+/* hwrm_vnic_free_input (size:192b/24B) */
+struct hwrm_vnic_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_free_output (size:128b/16B) */
+struct hwrm_vnic_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_cfg */
-/* Description: Configure the RX VNIC structure. */
-/* Input (40 bytes) */
+/*****************
+ * hwrm_vnic_cfg *
+ *****************/
+
+
+/* hwrm_vnic_cfg_input (size:320b/40B) */
struct hwrm_vnic_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is requested to be the default
- * VNIC for the function.
+ * When this bit is '1', the VNIC is requested to
+ * be the default VNIC for the function.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is being configured to strip
- * VLAN in the RX path. If set to '0', then VLAN stripping is
- * disabled on this VNIC.
+ * When this bit is '1', the VNIC is being configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is being configured to buffer
- * receive packets in the hardware until the host posts new
- * receive buffers. If set to '0', then bd_stall is being
- * configured to be disabled on this VNIC.
+ * When this bit is '1', the VNIC is being configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is being configured to be
+ * disabled on this VNIC.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is being configured to receive
- * both RoCE and non-RoCE traffic. If set to '0', then this VNIC
- * is not configured to be operating in dual VNIC mode.
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to be
+ * operating in dual VNIC mode.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
/*
- * When this flag is set to '1', the VNIC is requested to be
- * configured to receive only RoCE traffic. If this flag is set
- * to '0', then this flag shall be ignored by the HWRM. If
- * roce_dual_vnic_mode flag is set to '1', then the HWRM client
- * shall not set this flag to '1'.
+ * When this flag is set to '1', the VNIC is requested to
+ * be configured to receive only RoCE traffic.
+ * If this flag is set to '0', then this flag shall be
+ * ignored by the HWRM.
+ * If roce_dual_vnic_mode flag is set to '1'
+ * or roce_mirroring_capable_vnic_mode flag to 1,
+ * then the HWRM client shall not set this flag to '1'.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
- * used to direct packets to a VNIC with one destination ring
- * group only, there is no need to configure RSS indirection
- * table for that VNIC as only one destination ring group is
- * used. This flag is used to enable a mode where RSS is enabled
- * in the VNIC using a RSS context for computing RSS hash but
- * the RSS indirection table is not configured using
- * hwrm_vnic_rss_cfg. If this mode is enabled, then the driver
- * should not program RSS indirection table for the RSS context
- * that is used for computing RSS hash only.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
- /*
- * When this bit is '1', the VNIC is being configured to receive
- * both RoCE and non-RoCE traffic, but forward only the RoCE
- * traffic further. Also, RoCE traffic can be mirrored to L2
- * driver.
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * This flag is used to enable a mode where
+ * RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured using hwrm_vnic_rss_cfg.
+ *
+ * If this mode is enabled, then the driver should not program
+ * RSS indirection table for the RSS context that is used for
+ * computing RSS hash only.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the VNIC is being configured to
+ * receive both RoCE and non-RoCE traffic, but forward only the
+ * RoCE traffic further. Also, RoCE traffic can be mirrored to
+ * L2 driver.
*/
#define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
- uint32_t enables;
+ UINT32_C(0x40)
+ uint32_t enables;
/*
* This bit must be '1' for the dflt_ring_grp field to be
* configured.
*/
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
- /* This bit must be '1' for the rss_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
- /* This bit must be '1' for the cos_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
- /* This bit must be '1' for the lb_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
- /* This bit must be '1' for the mru field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
- uint16_t vnic_id;
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the rss_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cos_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the lb_rule field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the mru field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the default_rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the default_cmpl_ring_id field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID \
+ UINT32_C(0x40)
/* Logical vnic ID */
- uint16_t dflt_ring_grp;
+ uint16_t vnic_id;
/*
- * Default Completion ring for the VNIC. This ring will be
- * chosen if packet does not match any RSS rules and if there is
- * no COS rule.
+ * Default Completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules and if
+ * there is no COS rule.
*/
- uint16_t rss_rule;
+ uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
- uint16_t cos_rule;
+ uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
- uint16_t lb_rule;
+ uint16_t cos_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
- * Fs) if there is no LB rule.
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
*/
- uint16_t mru;
+ uint16_t lb_rule;
/*
- * The maximum receive unit of the vnic. Each vnic is associated
- * with a function. The vnic mru value overwrites the mru
- * setting of the associated function. The HWRM shall make sure
- * that vnic mru does not exceed the mru of the port the
- * function is associated with.
+ * The maximum receive unit of the vnic.
+ * Each vnic is associated with a function.
+ * The vnic mru value overwrites the mru setting of the
+ * associated function.
+ * The HWRM shall make sure that vnic mru does not exceed
+ * the mru of the port the function is associated with.
*/
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_vnic_cfg_output {
- uint16_t error_code;
+ uint16_t mru;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * Default Rx ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
+ * The aggregation ring associated with the Rx ring is
+ * implied based on the Rx ring specified when the
+ * aggregation ring was allocated.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t default_rx_ring_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * Default completion ring for the VNIC. This ring will
+ * be chosen if packet does not match any RSS rules.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t default_cmpl_ring_id;
+} __attribute__((packed));
+
+/* hwrm_vnic_cfg_output (size:128b/16B) */
+struct hwrm_vnic_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_qcfg */
-/*
- * Description: Query the RX VNIC structure. This function can be used by a PF
- * driver to query its own VNIC resource or VNIC resource of its child VF. This
- * function can also be used by a VF driver to query its own VNIC resource.
- */
-/* Input (32 bytes) */
+/******************
+ * hwrm_vnic_qcfg *
+ ******************/
+
+
+/* hwrm_vnic_qcfg_input (size:256b/32B) */
struct hwrm_vnic_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the vf_id_valid field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the vf_id_valid field to be configured. */
- #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
- uint32_t vnic_id;
+ #define HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
/* Logical vnic ID */
- uint16_t vf_id;
+ uint32_t vnic_id;
/* ID of Virtual Function whose VNIC resource is being queried. */
- uint16_t unused_0[3];
+ uint16_t vf_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (32 bytes) */
+/* hwrm_vnic_qcfg_output (size:256b/32B) */
struct hwrm_vnic_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t dflt_ring_grp;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* Default Completion ring for the VNIC. */
- uint16_t rss_rule;
+ uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
- uint16_t cos_rule;
+ uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
- uint16_t lb_rule;
+ uint16_t cos_rule;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All
- * Fs) if there is no LB rule.
+ * RSS ID for load balancing rule/table structure.
+ * 0xFF... (All Fs) if there is no LB rule.
*/
- uint16_t mru;
+ uint16_t lb_rule;
/* The maximum receive unit of the vnic. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t flags;
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is the default VNIC for the
- * function.
+ * When this bit is '1', the VNIC is the default VNIC for
+ * the function.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is configured to strip VLAN in
- * the RX path. If set to '0', then VLAN stripping is disabled
- * on this VNIC.
+ * When this bit is '1', the VNIC is configured to
+ * strip VLAN in the RX path.
+ * If set to '0', then VLAN stripping is disabled on
+ * this VNIC.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured to buffer
- * receive packets in the hardware until the host posts new
- * receive buffers. If set to '0', then bd_stall is disabled on
+ * When this bit is '1', the VNIC is configured to
+ * buffer receive packets in the hardware until the host
+ * posts new receive buffers.
+ * If set to '0', then bd_stall is disabled on
* this VNIC.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured to receive both
- * RoCE and non-RoCE traffic. If set to '0', then this VNIC is
- * not configured to operate in dual VNIC mode.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic.
+ * If set to '0', then this VNIC is not configured to
+ * operate in dual VNIC mode.
*/
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE \
+ UINT32_C(0x8)
/*
* When this flag is set to '1', the VNIC is configured to
- * receive only RoCE traffic. When this flag is set to '0', the
- * VNIC is not configured to receive only RoCE traffic. If
- * roce_dual_vnic_mode flag and this flag both are set to '1',
- * then it is an invalid configuration of the VNIC. The HWRM
- * should not allow that type of mis-configuration by HWRM
- * clients.
- */
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ * receive only RoCE traffic.
+ * When this flag is set to '0', the VNIC is not configured
+ * to receive only RoCE traffic.
+ * If roce_dual_vnic_mode flag and this flag both are set
+ * to '1', then it is an invalid configuration of the
+ * VNIC. The HWRM should not allow that type of
+ * mis-configuration by HWRM clients.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE \
+ UINT32_C(0x10)
/*
* When a VNIC uses one destination ring group for certain
- * application (e.g. Receive Flow Steering) where exact match is
- * used to direct packets to a VNIC with one destination ring
- * group only, there is no need to configure RSS indirection
- * table for that VNIC as only one destination ring group is
- * used. When this bit is set to '1', then the VNIC is enabled
- * in a mode where RSS is enabled in the VNIC using a RSS
- * context for computing RSS hash but the RSS indirection table
- * is not configured.
- */
- #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ * application (e.g. Receive Flow Steering) where
+ * exact match is used to direct packets to a VNIC with one
+ * destination ring group only, there is no need to configure
+ * RSS indirection table for that VNIC as only one destination
+ * ring group is used.
+ *
+ * When this bit is set to '1', then the VNIC is enabled in a
+ * mode where RSS is enabled in the VNIC using a RSS context
+ * for computing RSS hash but the RSS indirection table is
+ * not configured.
+ */
+ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE \
+ UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC is configured to receive both
- * RoCE and non-RoCE traffic, but forward only RoCE traffic
- * further. Also RoCE traffic can be mirrored to L2 driver.
+ * When this bit is '1', the VNIC is configured to
+ * receive both RoCE and non-RoCE traffic, but forward only
+ * RoCE traffic further. Also RoCE traffic can be mirrored to
+ * L2 driver.
*/
#define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \
- UINT32_C(0x40)
- uint32_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t valid;
+ UINT32_C(0x40)
+ uint8_t unused_1[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
+/*******************
+ * hwrm_vnic_qcaps *
+ *******************/
+
-/* hwrm_vnic_tpa_cfg */
-/* Description: This function is used to enable/configure TPA on the VNIC. */
-/* Input (40 bytes) */
+/* hwrm_vnic_qcaps_input (size:192b/24B) */
+struct hwrm_vnic_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_vnic_qcaps_output (size:192b/24B) */
+struct hwrm_vnic_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The maximum receive unit that is settable on a vnic. */
+ uint16_t mru;
+ uint8_t unused_0[2];
+ uint32_t flags;
+ /* Unused. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_UNUSED \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the capability of stripping VLAN in
+ * the RX path is supported on VNIC(s).
+ * If set to '0', then VLAN stripping capability is
+ * not supported on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VLAN_STRIP_CAP \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the capability to buffer receive
+ * packets in the hardware until the host posts new receive buffers
+ * is supported on VNIC(s).
+ * If set to '0', then bd_stall capability is not supported
+ * on VNIC(s).
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_BD_STALL_CAP \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the capability to
+ * receive both RoCE and non-RoCE traffic on VNIC(s) is
+ * supported.
+ * If set to '0', then the capability to receive
+ * both RoCE and non-RoCE traffic on VNIC(s) is
+ * not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_DUAL_VNIC_CAP \
+ UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', the capability to configure
+ * a VNIC to receive only RoCE traffic is supported.
+ * When this flag is set to '0', the VNIC capability to
+ * configure to receive only RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_ONLY_VNIC_CAP \
+ UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', then the capability to enable
+ * a VNIC in a mode where RSS context without configuring
+ * RSS indirection table is supported (for RSS hash computation).
+ * When this bit is set to '0', then a VNIC can not be configured
+ * with a mode to enable RSS context without configuring RSS
+ * indirection table.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_DFLT_CR_CAP \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1', the capability to
+ * mirror the the RoCE traffic is supported.
+ * If set to '0', then the capability to mirror the
+ * RoCE traffic is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1', the outermost RSS hashing capability
+ * is supported. If set to '0', then the outermost RSS hashing
+ * capability is not supported.
+ */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP \
+ UINT32_C(0x80)
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_vnic_tpa_cfg *
+ *********************/
+
+
+/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
struct hwrm_vnic_tpa_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of non-tunneled TCP
- * packets.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) of tunneled TCP packets.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Windows
- * Receive Segment Coalescing (RSC) rules.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) according to Linux
- * Generic Receive Offload (GRO) rules.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for TCP packets with IP
- * ECN set to non-zero.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC shall be configured to perform
- * transparent packet aggregation (TPA) for GRE tunneled TCP
- * packets only if all packets have the same GRE sequence.
+ * When this bit is '1', the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
UINT32_C(0x20)
/*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP/IPv4 packets with consecutively increasing
- * IPIDs. In other words, the last packet that is being
- * aggregated to an already existing aggregation context shall
- * have IPID 1 more than the IPID of the last packet that was
- * aggregated in that aggregation context.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
/*
- * When this bit is '1' and the GRO mode is enabled, the VNIC
- * shall be configured to perform transparent packet aggregation
- * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
- * (IPv6) value.
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC shall be configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
- uint32_t enables;
- /* This bit must be '1' for the max_agg_segs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
- /* This bit must be '1' for the max_aggs field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the max_agg_segs field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the max_aggs field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
/*
* This bit must be '1' for the max_agg_timer field to be
* configured.
*/
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
- /* This bit must be '1' for the min_agg_len field to be configured. */
- #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
- uint16_t vnic_id;
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the min_agg_len field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
/* Logical vnic ID */
- uint16_t max_agg_segs;
+ uint16_t vnic_id;
/*
- * This is the maximum number of TCP segments that can be
- * aggregated (unit is Log2). Max value is 31.
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
*/
+ uint16_t max_agg_segs;
/* 1 segment */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
/* 2 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
/* 4 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
/* 8 segments */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
/* Any segment size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
- uint16_t max_aggs;
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX
/*
* This is the maximum number of aggregations this VNIC is
- * allowed (unit is Log2). Max value is 7
+ * allowed (unit is Log2). Max value is 7
*/
+ uint16_t max_aggs;
/* 1 aggregation */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
/* 2 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
/* 4 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
/* 8 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
/* 16 aggregations */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
/* Any aggregation size larger than this is not valid */
- #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t max_agg_timer;
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX
+ uint8_t unused_0[2];
/*
- * This is the maximum amount of time allowed for an aggregation
- * context to complete after it was initiated.
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
*/
- uint32_t min_agg_len;
+ uint32_t max_agg_timer;
/*
* This is the minimum amount of payload length required to
* start an aggregation context.
*/
+ uint32_t min_agg_len;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
struct hwrm_vnic_tpa_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_tpa_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_tpa_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Logical vnic ID */
+ uint16_t vnic_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
+struct hwrm_vnic_tpa_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * non-tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_TPA \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) of
+ * tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_ENCAP_TPA \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Windows Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_RSC_WND_UPDATE \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) according
+ * to Linux Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for TCP
+ * packets with IP ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_ECN \
+ UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * GRE tunneled TCP packets only if all packets have the
+ * same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP/IPv4 packets with consecutively increasing IPIDs.
+ * In other words, the last packet that is being
+ * aggregated to an already existing aggregation context
+ * shall have IPID 1 more than the IPID of the last packet
+ * that was aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_IPID_CHECK \
+ UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled,
+ * the VNIC is configured to
+ * perform transparent packet aggregation (TPA) for
+ * TCP packets with the same TTL (IPv4) or Hop limit (IPv6)
+ * value.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_FLAGS_GRO_TTL_CHECK \
+ UINT32_C(0x80)
+ /*
+ * This is the maximum number of TCP segments that can
+ * be aggregated (unit is Log2). Max value is 31.
+ */
+ uint16_t max_agg_segs;
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGG_SEGS_MAX
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ uint16_t max_aggs;
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_LAST \
+ HWRM_VNIC_TPA_QCFG_OUTPUT_MAX_AGGS_MAX
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This is the maximum amount of time allowed for
+ * an aggregation context to complete after it was initiated.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint32_t max_agg_timer;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
*/
+ uint32_t min_agg_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cfg */
-/* Description: This function is used to enable RSS configuration. */
-/* Input (48 bytes) */
+/*********************
+ * hwrm_vnic_rss_cfg *
+ *********************/
+
+
+/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
struct hwrm_vnic_rss_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
*/
- uint32_t hash_type;
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source and destination IPv4 addresses of IPv4 packets.
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination
- * ports of TCP/IPv4 packets.
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ /* VNIC ID of VNIC associated with RSS table being configured. */
+ uint16_t vnic_id;
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination
- * ports of UDP/IPv4 packets.
+ * Specifies which VNIC ring table pair to configure.
+ * Valid values range from 0 to 7.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ uint8_t ring_table_pair_index;
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source and destination IPv4 addresses of IPv6 packets.
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination
- * ports of TCP/IPv6 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination
- * ports of UDP/IPv6 packets.
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
- uint32_t unused_0;
- uint64_t ring_grp_tbl_addr;
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
/* This is the address for rss ring group table */
- uint64_t hash_key_tbl_addr;
+ uint64_t ring_grp_tbl_addr;
/* This is the address for rss hash key table */
- uint16_t rss_ctx_idx;
+ uint64_t hash_key_tbl_addr;
/* Index to the rss indirection table. */
- uint16_t unused_1[3];
+ uint16_t rss_ctx_idx;
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
struct hwrm_vnic_rss_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_vnic_rss_qcfg *
+ **********************/
+
+
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Index to the rss indirection table. */
+ uint16_t rss_ctx_idx;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_vnic_plcmodes_cfg */
-/*
- * Description: This function can be used to set placement mode configuration of
- * the VNIC.
- */
-/* Input (40 bytes) */
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv4
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv4 addresses and
+ * source/destination ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source and destination IPv4 addresses of IPv6
+ * packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed
+ * over source/destination IPv6 addresses and
+ * source/destination ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ uint8_t unused_0[4];
+ /* This is the value of rss hash key */
+ uint32_t hash_key[10];
+ /* Flags to specify different RSS hash modes. */
+ uint8_t hash_mode_flags;
+ /*
+ * When this bit is '1', it indicates using current RSS
+ * hash mode setting configured in the device.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_DEFAULT \
+ UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 4 tuples {l3.src, l3.dest,
+ * l4.src, l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_4 \
+ UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over innermost 2 tuples {l3.src, l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_INNERMOST_2 \
+ UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 4 tuples {t_l3.src, t_l3.dest,
+ * t_l4.src, t_l4.dest} for tunnel packets. For none-tunnel
+ * packets, the RSS hash is computed over the normal
+ * src/dest l3 and src/dest l4 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_4 \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', it indicates requesting support of
+ * RSS hashing over outermost 2 tuples {t_l3.src, t_l3.dest} for
+ * tunnel packets. For none-tunnel packets, the RSS hash is
+ * computed over the normal src/dest l3 headers.
+ */
+ #define HWRM_VNIC_RSS_QCFG_OUTPUT_HASH_MODE_FLAGS_OUTERMOST_2 \
+ UINT32_C(0x10)
+ uint8_t unused_1[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_vnic_plcmodes_cfg *
+ **************************/
+
+
+/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC shall be configured to use regular
- * placement algorithm. By default, the regular placement algorithm
- * shall be enabled on the VNIC.
+ * When this bit is '1', the VNIC shall be configured to
+ * use regular placement algorithm.
+ * By default, the regular placement algorithm shall be
+ * enabled on the VNIC.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC shall be configured use the jumbo
- * placement algorithm.
+ * When this bit is '1', the VNIC shall be configured
+ * use the jumbo placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for IPv4 packets according to the following rules: # If
- * the packet is identified as TCP/IPv4, then the packet is split at the
- * beginning of the TCP payload. # If the packet is identified as
- * UDP/IPv4, then the packet is split at the beginning of UDP payload. #
- * If the packet is identified as non-TCP and non-UDP IPv4 packet, then
- * the packet is split at the beginning of the upper layer protocol
- * header carried in the IPv4 packet.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv4 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv4, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv4, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv4 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv4
+ * packet.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for IPv6 packets according to the following rules: # If
- * the packet is identified as TCP/IPv6, then the packet is split at the
- * beginning of the TCP payload. # If the packet is identified as
- * UDP/IPv6, then the packet is split at the beginning of UDP payload. #
- * If the packet is identified as non-TCP and non-UDP IPv6 packet, then
- * the packet is split at the beginning of the upper layer protocol
- * header carried in the IPv6 packet.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for IPv6 packets according
+ * to the following rules:
+ * # If the packet is identified as TCP/IPv6, then the
+ * packet is split at the beginning of the TCP payload.
+ * # If the packet is identified as UDP/IPv6, then the
+ * packet is split at the beginning of UDP payload.
+ * # If the packet is identified as non-TCP and non-UDP
+ * IPv6 packet, then the packet is split at the beginning
+ * of the upper layer protocol header carried in the IPv6
+ * packet.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for FCoE packets at the beginning of FC payload.
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for FCoE packets at the
+ * beginning of FC payload.
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC shall be configured to enable Header-
- * Data split for RoCE packets at the beginning of RoCE payload (after
- * BTH/GRH headers).
+ * When this bit is '1', the VNIC shall be configured
+ * to enable Header-Data split for RoCE packets at the
+ * beginning of RoCE payload (after BTH/GRH headers).
*/
- #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
- uint32_t enables;
+ #define HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
+ uint32_t enables;
/*
* This bit must be '1' for the jumbo_thresh_valid field to be
* configured.
@@ -8188,7 +18746,8 @@ struct hwrm_vnic_plcmodes_cfg_input {
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID \
UINT32_C(0x1)
/*
- * This bit must be '1' for the hds_offset_valid field to be configured.
+ * This bit must be '1' for the hds_offset_valid field to be
+ * configured.
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID \
UINT32_C(0x2)
@@ -8198,523 +18757,568 @@ struct hwrm_vnic_plcmodes_cfg_input {
*/
#define HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID \
UINT32_C(0x4)
- uint32_t vnic_id;
/* Logical vnic ID */
- uint16_t jumbo_thresh;
+ uint32_t vnic_id;
/*
- * When jumbo placement algorithm is enabled, this value is used to
- * determine the threshold for jumbo placement. Packets with length
- * larger than this value will be placed according to the jumbo
- * placement algorithm.
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
*/
- uint16_t hds_offset;
+ uint16_t jumbo_thresh;
/*
- * This value is used to determine the offset into packet buffer where
- * the split data (payload) will be placed according to one of of HDS
- * placement algorithm. The lengths of packet buffers provided for split
- * data shall be larger than this value.
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
*/
- uint16_t hds_threshold;
+ uint16_t hds_offset;
/*
- * When one of the HDS placement algorithm is enabled, this value is
- * used to determine the threshold for HDS placement. Packets with
- * length larger than this value will be placed according to the HDS
- * placement algorithm. This value shall be in multiple of 4 bytes.
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
*/
- uint16_t unused_0[3];
+ uint16_t hds_threshold;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
struct hwrm_vnic_plcmodes_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_plcmodes_qcfg */
-/*
- * Description: This function can be used to query placement mode configuration
- * of the VNIC.
- */
-/* Input (24 bytes) */
+/***************************
+ * hwrm_vnic_plcmodes_qcfg *
+ ***************************/
+
+
+/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */
struct hwrm_vnic_plcmodes_qcfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t vnic_id;
+ uint64_t resp_addr;
/* Logical vnic ID */
- uint32_t unused_0;
+ uint32_t vnic_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */
struct hwrm_vnic_plcmodes_qcfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint32_t flags;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint32_t flags;
/*
- * When this bit is '1', the VNIC is configured to use regular placement
- * algorithm.
+ * When this bit is '1', the VNIC is configured to
+ * use regular placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_REGULAR_PLACEMENT \
UINT32_C(0x1)
/*
- * When this bit is '1', the VNIC is configured to use the jumbo
- * placement algorithm.
+ * When this bit is '1', the VNIC is configured to
+ * use the jumbo placement algorithm.
*/
#define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_JUMBO_PLACEMENT \
UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for IPv4 packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv4 packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 UINT32_C(0x4)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV4 \
+ UINT32_C(0x4)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for IPv6 packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for IPv6 packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 UINT32_C(0x8)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_IPV6 \
+ UINT32_C(0x8)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for FCoE packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for FCoE packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE UINT32_C(0x10)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_FCOE \
+ UINT32_C(0x10)
/*
- * When this bit is '1', the VNIC is configured to enable Header-Data
- * split for RoCE packets.
+ * When this bit is '1', the VNIC is configured
+ * to enable Header-Data split for RoCE packets.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE UINT32_C(0x20)
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_HDS_ROCE \
+ UINT32_C(0x20)
/*
- * When this bit is '1', the VNIC is configured to be the default VNIC
- * of the requesting function.
+ * When this bit is '1', the VNIC is configured
+ * to be the default VNIC of the requesting function.
*/
- #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC UINT32_C(0x40)
- uint16_t jumbo_thresh;
+ #define HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC \
+ UINT32_C(0x40)
/*
- * When jumbo placement algorithm is enabled, this value is used to
- * determine the threshold for jumbo placement. Packets with length
- * larger than this value will be placed according to the jumbo
- * placement algorithm.
+ * When jumbo placement algorithm is enabled, this value
+ * is used to determine the threshold for jumbo placement.
+ * Packets with length larger than this value will be
+ * placed according to the jumbo placement algorithm.
*/
- uint16_t hds_offset;
+ uint16_t jumbo_thresh;
/*
- * This value is used to determine the offset into packet buffer where
- * the split data (payload) will be placed according to one of of HDS
- * placement algorithm. The lengths of packet buffers provided for split
- * data shall be larger than this value.
+ * This value is used to determine the offset into
+ * packet buffer where the split data (payload) will be
+ * placed according to one of of HDS placement algorithm.
+ *
+ * The lengths of packet buffers provided for split data
+ * shall be larger than this value.
*/
- uint16_t hds_threshold;
+ uint16_t hds_offset;
/*
- * When one of the HDS placement algorithm is enabled, this value is
- * used to determine the threshold for HDS placement. Packets with
- * length larger than this value will be placed according to the HDS
- * placement algorithm. This value shall be in multiple of 4 bytes.
+ * When one of the HDS placement algorithm is enabled, this
+ * value is used to determine the threshold for HDS
+ * placement.
+ * Packets with length larger than this value will be
+ * placed according to the HDS placement algorithm.
+ * This value shall be in multiple of 4 bytes.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ uint16_t hds_threshold;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
-/* Description: This function is used to allocate COS/Load Balance context. */
-/* Input (16 bytes) */
+/**********************************
+ * hwrm_vnic_rss_cos_lb_ctx_alloc *
+ **********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t rss_cos_lb_ctx_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* rss_cos_lb_ctx_id is 16 b */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Description: This function can be used to free COS/Load Balance context. */
-/* Input (24 bytes) */
+/*********************************
+ * hwrm_vnic_rss_cos_lb_ctx_free *
+ *********************************/
+
+
+/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t rss_cos_lb_ctx_id;
+ uint64_t resp_addr;
/* rss_cos_lb_ctx_id is 16 b */
- uint16_t unused_0[3];
+ uint16_t rss_cos_lb_ctx_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
struct hwrm_vnic_rss_cos_lb_ctx_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_ring_alloc */
-/*
- * Description: This command allocates and does basic preparation for a ring.
- */
-/* Input (80 bytes) */
+/*******************
+ * hwrm_ring_alloc *
+ *******************/
+
+
+/* hwrm_ring_alloc_input (size:640b/80B) */
struct hwrm_ring_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the Reserved1 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
- /* This bit must be '1' for the ring_arb_cfg field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG UINT32_C(0x2)
- /* This bit must be '1' for the Reserved3 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the ring_arb_cfg field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG \
+ UINT32_C(0x2)
/*
* This bit must be '1' for the stat_ctx_id_valid field to be
* configured.
*/
- #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8)
- /* This bit must be '1' for the Reserved4 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
- /* This bit must be '1' for the max_bw_valid field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
- uint8_t ring_type;
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_bw_valid field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the rx_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the nq_ring_id field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the rx_buf_size field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID \
+ UINT32_C(0x100)
/* Ring Type. */
- /* L2 Completion Ring (CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
- /* RoCE Notification Completion Ring (ROCE_CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t unused_1;
- uint64_t page_tbl_addr;
- /* This value is a pointer to the page table for the Ring. */
- uint32_t fbo;
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_LAST \
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ
+ uint8_t unused_0[3];
+ /*
+ * This value is a pointer to the page table for the
+ * Ring.
+ */
+ uint64_t page_tbl_addr;
/* First Byte Offset of the first entry in the first page. */
- uint8_t page_size;
- /*
- * Actual page size in 2^page_size. The supported range is
- * increments in powers of 2 from 16 bytes to 1GB. - 4 = 16 B
- * Page size is 16 B. - 12 = 4 KB Page size is 4 KB. - 13 = 8 KB
- * Page size is 8 KB. - 16 = 64 KB Page size is 64 KB. - 21 = 2
- * MB Page size is 2 MB. - 22 = 4 MB Page size is 4 MB. - 30 = 1
- * GB Page size is 1 GB.
+ uint32_t fbo;
+ /*
+ * Actual page size in 2^page_size. The supported range is increments
+ * in powers of 2 from 16 bytes to 1GB.
+ * - 4 = 16 B
+ * Page size is 16 B.
+ * - 12 = 4 KB
+ * Page size is 4 KB.
+ * - 13 = 8 KB
+ * Page size is 8 KB.
+ * - 16 = 64 KB
+ * Page size is 64 KB.
+ * - 21 = 2 MB
+ * Page size is 2 MB.
+ * - 22 = 4 MB
+ * Page size is 4 MB.
+ * - 30 = 1 GB
+ * Page size is 1 GB.
+ */
+ uint8_t page_size;
+ /*
+ * This value indicates the depth of page table.
+ * For this version of the specification, value other than 0 or
+ * 1 shall be considered as an invalid value.
+ * When the page_tbl_depth = 0, then it is treated as a
+ * special case with the following.
+ * 1. FBO and page size fields are not valid.
+ * 2. page_tbl_addr is the physical address of the first
+ * element of the ring.
+ */
+ uint8_t page_tbl_depth;
+ uint8_t unused_1[2];
+ /*
+ * Number of 16B units in the ring. Minimum size for
+ * a ring is 16 16B entries.
+ */
+ uint32_t length;
+ /*
+ * Logical ring number for the ring to be allocated.
+ * This value determines the position in the doorbell
+ * area where the update to the ring will be made.
+ *
+ * For completion rings, this value is also the MSI-X
+ * vector number for the function the completion ring is
+ * associated with.
*/
- uint8_t page_tbl_depth;
+ uint16_t logical_id;
/*
- * This value indicates the depth of page table. For this
- * version of the specification, value other than 0 or 1 shall
- * be considered as an invalid value. When the page_tbl_depth =
- * 0, then it is treated as a special case with the following.
- * 1. FBO and page size fields are not valid. 2. page_tbl_addr
- * is the physical address of the first element of the ring.
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what completion ring the TX ring
+ * is associated with.
*/
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t length;
+ uint16_t cmpl_ring_id;
/*
- * Number of 16B units in the ring. Minimum size for a ring is
- * 16 16B entries.
+ * This field is used only when ring_type is a TX ring.
+ * This value indicates what CoS queue the TX ring
+ * is associated with.
*/
- uint16_t logical_id;
+ uint16_t queue_id;
/*
- * Logical ring number for the ring to be allocated. This value
- * determines the position in the doorbell area where the update
- * to the ring will be made. For completion rings, this value is
- * also the MSI-X vector number for the function the completion
- * ring is associated with.
+ * When allocating a Rx ring or Rx aggregation ring, this field
+ * specifies the size of the buffer descriptors posted to the ring.
*/
- uint16_t cmpl_ring_id;
+ uint16_t rx_buf_size;
/*
- * This field is used only when ring_type is a TX ring. This
- * value indicates what completion ring the TX ring is
- * associated with.
+ * When allocating an Rx aggregation ring, this field
+ * specifies the associated Rx ring ID.
*/
- uint16_t queue_id;
+ uint16_t rx_ring_id;
/*
- * This field is used only when ring_type is a TX ring. This
- * value indicates what CoS queue the TX ring is associated
- * with.
+ * When allocating a completion ring, this field
+ * specifies the associated NQ ring ID.
*/
- uint8_t unused_4;
- uint8_t unused_5;
- uint32_t reserved1;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint16_t ring_arb_cfg;
+ uint16_t nq_ring_id;
/*
- * This field is used only when ring_type is a TX ring. This
- * field is used to configure arbitration related parameters for
- * a TX ring.
+ * This field is used only when ring_type is a TX ring.
+ * This field is used to configure arbitration related
+ * parameters for a TX ring.
*/
+ uint16_t ring_arb_cfg;
/* Arbitration policy used for the ring. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK UINT32_C(0xf)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
+ UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
/*
- * Use strict priority for the TX ring. Priority
- * value is specified in arb_policy_param
+ * Use strict priority for the TX ring.
+ * Priority value is specified in arb_policy_param
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
- (UINT32_C(0x1) << 0)
+ UINT32_C(0x1)
/*
- * Use weighted fair queue arbitration for the
- * TX ring. Weight is specified in
- * arb_policy_param
+ * Use weighted fair queue arbitration for the TX ring.
+ * Weight is specified in arb_policy_param
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
- (UINT32_C(0x2) << 0)
+ UINT32_C(0x2)
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
- RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
+ HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
/* Reserved field. */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
- /*
- * Arbitration policy specific parameter. # For strict priority
- * arbitration policy, this field represents a priority value.
- * If set to 0, then the priority is not specified and the HWRM
- * is allowed to select any priority for this TX ring. # For
- * weighted fair queue arbitration policy, this field represents
- * a weight value. If set to 0, then the weight is not specified
- * and the HWRM is allowed to select any weight for this TX
- * ring.
- */
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK \
+ UINT32_C(0xf0)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
+ /*
+ * Arbitration policy specific parameter.
+ * # For strict priority arbitration policy, this field
+ * represents a priority value. If set to 0, then the priority
+ * is not specified and the HWRM is allowed to select
+ * any priority for this TX ring.
+ * # For weighted fair queue arbitration policy, this field
+ * represents a weight value. If set to 0, then the weight
+ * is not specified and the HWRM is allowed to select
+ * any weight for this TX ring.
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
UINT32_C(0xff00)
- #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- uint8_t unused_6;
- uint8_t unused_7;
- uint32_t reserved3;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint32_t stat_ctx_id;
- /*
- * This field is used only when ring_type is a TX ring. This
- * input indicates what statistics context this ring should be
- * associated with.
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+ uint16_t unused_3;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved3;
+ /*
+ * This field is used only when ring_type is a TX ring.
+ * This input indicates what statistics context this ring
+ * should be associated with.
*/
- uint32_t reserved4;
- /* This field is reserved for the future use. It shall be set to 0. */
- uint32_t max_bw;
+ uint32_t stat_ctx_id;
/*
- * This field is used only when ring_type is a TX ring to
- * specify maximum BW allocated to the TX ring. The HWRM will
- * translate this value into byte counter and time interval used
- * for this ring inside the device.
+ * This field is reserved for the future use.
+ * It shall be set to 0.
*/
+ uint32_t reserved4;
+ /*
+ * This field is used only when ring_type is a TX ring
+ * to specify maximum BW allocated to the TX ring.
+ * The HWRM will translate this value into byte counter and
+ * time interval used for this ring inside the device.
+ */
+ uint32_t max_bw;
/* The bandwidth value. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK UINT32_C(0xfffffff)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
- /* The granularity of the value (bits or bytes). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE UINT32_C(0x10000000)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE \
+ UINT32_C(0x10000000)
/* Value is in bits. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS (UINT32_C(0x0) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
/* Value is in bytes. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES (UINT32_C(0x1) << 28)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_LAST \
- RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
+ HWRM_RING_ALLOC_INPUT_MAX_BW_SCALE_BYTES
/* bw_value_unit is 3 b */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
UINT32_C(0xe0000000)
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
- /* Value is in Mb or MB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MEGA \
(UINT32_C(0x0) << 29)
- /* Value is in Kb or KB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_KILO \
(UINT32_C(0x2) << 29)
/* Value is in bits or bytes. */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_BASE \
(UINT32_C(0x4) << 29)
- /* Value is in Gb or GB (base 10). */
- #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_GIGA \
(UINT32_C(0x6) << 29)
/* Value is in 1/100th of a percentage of total bandwidth. */
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
@@ -8723,628 +19327,932 @@ struct hwrm_ring_alloc_input {
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
(UINT32_C(0x7) << 29)
#define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
- RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
- uint8_t int_mode;
+ HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
/*
* This field is used only when ring_type is a Completion ring.
- * This value indicates what interrupt mode should be used on
- * this completion ring. Note: In the legacy interrupt mode, no
- * more than 16 completion rings are allowed.
+ * This value indicates what interrupt mode should be used
+ * on this completion ring.
+ * Note: In the legacy interrupt mode, no more than 16
+ * completion rings are allowed.
*/
+ uint8_t int_mode;
/* Legacy INTA */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
/* Reserved */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
/* MSI-X */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
/* No Interrupt - Polled mode */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
- uint8_t unused_8[3];
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LAST \
+ HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
+ uint8_t unused_4[3];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_ring_alloc_output (size:128b/16B) */
struct hwrm_ring_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint16_t ring_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Physical number of ring allocated. This value shall be unique
- * for a ring type.
+ * Physical number of ring allocated.
+ * This value shall be unique for a ring type.
*/
- uint16_t logical_ring_id;
+ uint16_t ring_id;
/* Logical number of ring allocated. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t logical_ring_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_ring_free */
-/*
- * Description: This command is used to free a ring and associated resources.
- * With QoS and DCBx agents, it is possible the traffic classes will be moved
- * from one CoS queue to another. When this occurs, the driver shall call
- * 'hwrm_ring_free' to free the allocated rings and then call 'hwrm_ring_alloc'
- * to re-allocate each ring and assign it to a new CoS queue. hwrm_ring_free
- * shall be called on a ring only after it has been idle for 500ms or more and
- * no frames have been posted to the ring during this time. All frames queued
- * for transmission shall be completed and at least 500ms time elapsed from the
- * last completion before calling this command.
- */
-/* Input (24 bytes) */
+/******************
+ * hwrm_ring_free *
+ ******************/
+
+
+/* hwrm_ring_free_input (size:192b/24B) */
struct hwrm_ring_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t ring_type;
+ uint64_t resp_addr;
/* Ring Type. */
- /* L2 Completion Ring (CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
- /* TX Ring (TR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
- /* RX Ring (RR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
- /* RoCE Notification Completion Ring (ROCE_CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
- uint8_t unused_0;
- uint16_t ring_id;
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ /* RX Aggregation Ring */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG UINT32_C(0x4)
+ /* Notification Queue */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_NQ UINT32_C(0x5)
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_LAST \
+ HWRM_RING_FREE_INPUT_RING_TYPE_NQ
+ uint8_t unused_0;
/* Physical number of ring allocated. */
- uint32_t unused_1;
+ uint16_t ring_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_ring_free_output (size:128b/16B) */
struct hwrm_ring_free_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_ring_cmpl_ring_qaggint_params *
+ **************************************/
+
+
+/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_ring_grp_alloc */
-/*
- * Description: This API allocates and does basic preparation for a ring group.
- */
-/* Input (24 bytes) */
-struct hwrm_ring_grp_alloc_input {
- uint16_t req_type;
+/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+struct hwrm_ring_cmpl_ring_qaggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint16_t flags;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * When this bit is set to '1', interrupt max
+ * timer is reset whenever a completion is received.
*/
- uint16_t cmpl_ring;
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS_OUTPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
*/
- uint64_t resp_addr;
+ uint16_t num_cmpl_dma_aggr;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
*/
- uint16_t cr;
- /* This value identifies the CR associated with the ring group. */
- uint16_t rr;
- /* This value identifies the main RR associated with the ring group. */
- uint16_t ar;
+ uint16_t num_cmpl_dma_aggr_during_int;
/*
- * This value identifies the aggregation RR associated with the
- * ring group. If this value is 0xFF... (All Fs), then no
- * Aggregation ring will be set.
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
*/
- uint16_t sc;
+ uint16_t cmpl_aggr_dma_tmr;
/*
- * This value identifies the statistics context associated with
- * the ring group.
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
*/
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
+ /*
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * completions before signaling the interrupt after the
+ * interrupt is enabled.
+ */
+ uint16_t int_lat_tmr_max;
+ /*
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
+ */
+ uint16_t num_cmpl_aggr_int;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_ring_grp_alloc_output {
- uint16_t error_code;
+/*****************************************
+ * hwrm_ring_cmpl_ring_cfg_aggint_params *
+ *****************************************/
+
+
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Physical number of completion ring. */
+ uint16_t ring_id;
+ uint16_t flags;
+ /*
+ * When this bit is set to '1', interrupt latency max
+ * timer is reset whenever a completion is received.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET \
+ UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', ring idle mode
+ * aggregation will be enabled.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE \
+ UINT32_C(0x2)
+ /*
+ * Set this flag to 1 when configuring parameters on a
+ * notification queue. Set this flag to 0 when configuring
+ * parameters on a completion queue.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_IS_NQ \
+ UINT32_C(0x4)
+ /*
+ * Number of completions to aggregate before DMA
+ * during the normal mode.
+ */
+ uint16_t num_cmpl_dma_aggr;
+ /*
+ * Number of completions to aggregate before DMA
+ * during the interrupt mode.
+ */
+ uint16_t num_cmpl_dma_aggr_during_int;
+ /*
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the normal mode (not in interrupt mode).
+ */
+ uint16_t cmpl_aggr_dma_tmr;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * Timer in unit of 80-nsec used to aggregate completions before
+ * DMA during the interrupt mode.
*/
- uint32_t ring_group_id;
+ uint16_t cmpl_aggr_dma_tmr_during_int;
+ /* Minimum time (in unit of 80-nsec) between two interrupts. */
+ uint16_t int_lat_tmr_min;
/*
- * This is the ring group ID value. Use this value to program
- * the default ring group for the VNIC or as table entries in an
- * RSS/COS context.
+ * Maximum wait time (in unit of 80-nsec) spent aggregating
+ * cmpls before signaling the interrupt after the
+ * interrupt is enabled.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t int_lat_tmr_max;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Minimum number of completions aggregated before signaling
+ * an interrupt.
*/
+ uint16_t num_cmpl_aggr_int;
+ /*
+ * Bitfield that indicates which parameters are to be applied. Only
+ * required when configuring devices with notification queues, and
+ * used in that case to set certain parameters on completion queues
+ * and others on notification queues.
+ */
+ uint16_t enables;
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the cmpl_aggr_dma_tmr field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the int_lat_tmr_min field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MIN \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the int_lat_tmr_max field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_INT_LAT_TMR_MAX \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the num_cmpl_aggr_int field to be
+ * configured.
+ */
+ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_AGGR_INT \
+ UINT32_C(0x20)
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_ring_grp_free */
-/*
- * Description: This API frees a ring group and associated resources. # If a
- * ring in the ring group is reset or free, then the associated rings in the
- * ring group shall also be reset/free using hwrm_ring_free. # A function driver
- * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As
- * a part of executing this command, the HWRM shall reset all associated ring
- * group resources.
- */
-/* Input (24 bytes) */
-struct hwrm_ring_grp_free_input {
- uint16_t req_type;
+/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************
+ * hwrm_ring_reset *
+ *******************/
+
+
+/* hwrm_ring_reset_input (size:192b/24B) */
+struct hwrm_ring_reset_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t ring_group_id;
- /* This is the ring group ID value. */
- uint32_t unused_0;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Ring Type. */
+ uint8_t ring_type;
+ /* L2 Completion Ring (CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_L2_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_RX UINT32_C(0x2)
+ /* RoCE Notification Completion Ring (ROCE_CR) */
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
+ #define HWRM_RING_RESET_INPUT_RING_TYPE_LAST \
+ HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL
+ uint8_t unused_0;
+ /* Physical number of the ring. */
+ uint16_t ring_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_ring_grp_free_output {
- uint16_t error_code;
+/* hwrm_ring_reset_output (size:128b/16B) */
+struct hwrm_ring_reset_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_ring_grp_alloc *
+ ***********************/
+
+
+/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+struct hwrm_ring_grp_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value identifies the CR associated with the ring
+ * group.
+ */
+ uint16_t cr;
+ /*
+ * This value identifies the main RR associated with the ring
+ * group.
+ */
+ uint16_t rr;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value identifies the aggregation RR associated with
+ * the ring group. If this value is 0xFF... (All Fs), then no
+ * Aggregation ring will be set.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t ar;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This value identifies the statistics context associated
+ * with the ring group.
*/
+ uint16_t sc;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_alloc */
-/*
- * Description: An L2 filter is a filter resource that is used to identify a
- * vnic or ring for a packet based on layer 2 fields. Layer 2 fields for
- * encapsulated packets include both outer L2 header and/or inner l2 header of
- * encapsulated packet. The L2 filter resource covers the following OS specific
- * L2 filters. Linux/FreeBSD (per function): # Broadcast enable/disable # List
- * of individual multicast filters # All multicast enable/disable filter #
- * Unicast filters # Promiscuous mode VMware: # Broadcast enable/disable (per
- * physical function) # All multicast enable/disable (per function) # Unicast
- * filters per ring or vnic # Promiscuous mode per PF Windows: # Broadcast
- * enable/disable (per physical function) # List of individual multicast filters
- * (Driver needs to advertise the maximum number of filters supported) # All
- * multicast enable/disable per physical function # Unicast filters per vnic #
- * Promiscuous mode per PF Implementation notes on the use of VNIC in this
- * command: # By default, these filters belong to default vnic for the function.
- * # Once these filters are set up, only destination VNIC can be modified. # If
- * the destination VNIC is not specified in this command, then the HWRM shall
- * only create an l2 context id. HWRM Implementation notes for multicast
- * filters: # The hwrm_filter_alloc command can be used to set up multicast
- * filters (perfect match or partial match). Each individual function driver can
- * set up multicast filters independently. # The HWRM needs to keep track of
- * multicast filters set up by function drivers and maintain multicast group
- * replication records to enable a subset of functions to receive traffic for a
- * specific multicast address. # When a specific multicast filter cannot be set,
- * the HWRM shall return an error. In this error case, the driver should fall
- * back to using one general filter (rather than specific) for all multicast
- * traffic. # When the SR-IOV is enabled, the HWRM needs to additionally track
- * source knockout per multicast group record. Examples of setting unicast
- * filters: For a unicast MAC based filter, one can use a combination of the
- * fields and masks provided in this command to set up the filter. Below are
- * some examples: # MAC + no VLAN filter: This filter is used to identify
- * traffic that does not contain any VLAN tags and matches destination (or
- * source) MAC address. This filter can be set up by setting only l2_addr field
- * to be a valid field. All other fields are not valid. The following value is
- * set for l2_addr. l2_addr = MAC # MAC + Any VLAN filter: This filter is used
- * to identify traffic that carries single VLAN tag and matches (destination or
- * source) MAC address. This filter can be set up by setting only l2_addr and
- * l2_ovlan_mask fields to be valid fields. All other fields are not valid. The
- * following values are set for those two valid fields. l2_addr = MAC,
- * l2_ovlan_mask = 0xFFFF # MAC + no VLAN or VLAN ID=0: This filter is used to
- * identify untagged traffic that does not contain any VLAN tags or a VLAN tag
- * with VLAN ID = 0 and matches destination (or source) MAC address. This filter
- * can be set up by setting only l2_addr and l2_ovlan fields to be valid fields.
- * All other fields are not valid. The following value are set for l2_addr and
- * l2_ovlan. l2_addr = MAC, l2_ovlan = 0x0 # MAC + no VLAN or any VLAN: This
- * filter is used to identify traffic that contains zero or 1 VLAN tag and
- * matches destination (or source) MAC address. This filter can be set up by
- * setting only l2_addr, l2_ovlan, and l2_mask fields to be valid fields. All
- * other fields are not valid. The following value are set for l2_addr,
- * l2_ovlan, and l2_mask fields. l2_addr = MAC, l2_ovlan = 0x0, l2_ovlan_mask =
- * 0xFFFF # MAC + VLAN ID filter: This filter can be set up by setting only
- * l2_addr, l2_ovlan, and l2_ovlan_mask fields to be valid fields. All other
- * fields are not valid. The following values are set for those three valid
- * fields. l2_addr = MAC, l2_ovlan = VLAN ID, l2_ovlan_mask = 0xF000
- */
-/* Input (96 bytes) */
+/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+struct hwrm_ring_grp_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the ring group ID value. Use this value to program
+ * the default ring group for the VNIC or as table entries
+ * in an RSS/COS context.
+ */
+ uint32_t ring_group_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_ring_grp_free *
+ **********************/
+
+
+/* hwrm_ring_grp_free_input (size:192b/24B) */
+struct hwrm_ring_grp_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This is the ring group ID value. */
+ uint32_t ring_group_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_ring_grp_free_output (size:128b/16B) */
+struct hwrm_ring_grp_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_cfa_l2_filter_alloc *
+ ****************************/
+
+
+/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
- CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x2)
/*
- * Setting of this flag indicates the applicability to the
- * loopback path.
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x4)
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * If this flag is set, all t_l2_* fields are invalid
+ * and they should not be specified.
+ * If this flag is set, then l2_* fields refer to
+ * fields of outermost L2 header.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
+ UINT32_C(0x8)
+ uint32_t enables;
/*
- * If this flag is set, all t_l2_* fields are invalid and they
- * should not be specified. If this flag is set, then l2_*
- * fields refer to fields of outermost L2 header.
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
- uint32_t enables;
- /* This bit must be '1' for the l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
- /* This bit must be '1' for the l2_addr_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the l2_addr_mask field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
UINT32_C(0x2)
- /* This bit must be '1' for the l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \
+ UINT32_C(0x4)
/*
* This bit must be '1' for the l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
UINT32_C(0x8)
- /* This bit must be '1' for the l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x10)
/*
* This bit must be '1' for the l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
UINT32_C(0x20)
- /* This bit must be '1' for the t_l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_l2_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \
+ UINT32_C(0x40)
/*
* This bit must be '1' for the t_l2_addr_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
UINT32_C(0x80)
- /* This bit must be '1' for the t_l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
+ /*
+ * This bit must be '1' for the t_l2_ovlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
UINT32_C(0x100)
/*
* This bit must be '1' for the t_l2_ovlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
UINT32_C(0x200)
- /* This bit must be '1' for the t_l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
+ /*
+ * This bit must be '1' for the t_l2_ivlan field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
UINT32_C(0x400)
/*
* This bit must be '1' for the t_l2_ivlan_mask field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
UINT32_C(0x800)
- /* This bit must be '1' for the src_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
- /* This bit must be '1' for the src_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ /*
+ * This bit must be '1' for the src_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
UINT32_C(0x4000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x10000)
- uint8_t l2_addr[6];
/*
* This value sets the match value for the L2 MAC address.
- * Destination MAC address for RX path. Source MAC address for
- * TX path.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t l2_addr_mask[6];
+ uint8_t l2_addr[6];
+ uint8_t unused_0[2];
/*
- * This value sets the mask value for the L2 address. A value of
- * 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the L2 address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t l2_ovlan;
+ uint8_t l2_addr_mask[6];
/* This value sets VLAN ID value for outer VLAN. */
- uint16_t l2_ovlan_mask;
+ uint16_t l2_ovlan;
/*
- * This value sets the mask value for the ovlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This value sets the mask value for the ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t l2_ivlan;
+ uint16_t l2_ovlan_mask;
/* This value sets VLAN ID value for inner VLAN. */
- uint16_t l2_ivlan_mask;
+ uint16_t l2_ivlan;
/*
- * This value sets the mask value for the ivlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This value sets the mask value for the ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t t_l2_addr[6];
+ uint16_t l2_ivlan_mask;
+ uint8_t unused_1[2];
/*
- * This value sets the match value for the tunnel L2 MAC
- * address. Destination MAC address for RX path. Source MAC
- * address for TX path.
+ * This value sets the match value for the tunnel
+ * L2 MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
- uint8_t unused_4;
- uint8_t unused_5;
- uint8_t t_l2_addr_mask[6];
+ uint8_t t_l2_addr[6];
+ uint8_t unused_2[2];
/*
- * This value sets the mask value for the tunnel L2 address. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel L2
+ * address.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t t_l2_ovlan;
+ uint8_t t_l2_addr_mask[6];
/* This value sets VLAN ID value for tunnel outer VLAN. */
- uint16_t t_l2_ovlan_mask;
+ uint16_t t_l2_ovlan;
/*
- * This value sets the mask value for the tunnel ovlan id. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel ovlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint16_t t_l2_ivlan;
+ uint16_t t_l2_ovlan_mask;
/* This value sets VLAN ID value for tunnel inner VLAN. */
- uint16_t t_l2_ivlan_mask;
+ uint16_t t_l2_ivlan;
/*
- * This value sets the mask value for the tunnel ivlan id. A
- * value of 0 will mask the corresponding bit from compare.
+ * This value sets the mask value for the tunnel ivlan id.
+ * A value of 0 will mask the corresponding bit from
+ * compare.
*/
- uint8_t src_type;
+ uint16_t t_l2_ivlan_mask;
/* This value identifies the type of source of the packet. */
+ uint8_t src_type;
/* Network port */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
/* Physical function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
/* Virtual function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
/* Virtual NIC of a function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
/* Embedded processor for CFA management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
/* Embedded processor for OOB management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
/* Embedded processor for RoCE */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
/* Embedded processor for network proxy functions */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
- uint8_t unused_6;
- uint32_t src_id;
- /*
- * This value is the id of the source. For a network port, it
- * represents port_id. For a physical function, it represents
- * fid. For a virtual function, it represents vf_id. For a vnic,
- * it represents vnic_id. For embedded processors, this id is
- * not valid. Notes: 1. The function ID is implied if it src_id
- * is not provided for a src_type that is either
- */
- uint8_t tunnel_type;
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG
+ uint8_t unused_3;
+ /*
+ * This value is the id of the source.
+ * For a network port, it represents port_id.
+ * For a physical function, it represents fid.
+ * For a virtual function, it represents vf_id.
+ * For a vnic, it represents vnic_id.
+ * For embedded processors, this id is not valid.
+ *
+ * Notes:
+ * 1. The function ID is implied if it src_id is
+ * not provided for a src_type that is either
+ */
+ uint32_t src_id;
/* Tunnel Type. */
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
/* IP in IP */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
- /*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
- */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t unused_7;
- uint16_t dst_id;
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_4;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint8_t pri_hint;
+ uint16_t dst_id;
/*
- * This hint is provided to help in placing the filter in the
- * filter table.
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
+ uint16_t mirror_vnic_id;
+ /*
+ * This hint is provided to help in placing
+ * the filter in the filter table.
+ */
+ uint8_t pri_hint;
/* No preference */
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
UINT32_C(0x0)
@@ -9355,576 +20263,1554 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
UINT32_C(0x2)
/* As high as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \
+ UINT32_C(0x3)
/* As low as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN UINT32_C(0x4)
- uint8_t unused_8;
- uint32_t unused_9;
- uint64_t l2_filter_id_hint;
- /*
- * This is the ID of the filter that goes along with the
- * pri_hint. This field is valid only for the following values.
- * 1 - Above the given filter 2 - Below the given filter
- */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \
+ UINT32_C(0x4)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN
+ uint8_t unused_5;
+ uint32_t unused_6;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ *
+ * This field is valid only for the following values.
+ * 1 - Above the given filter
+ * 2 - Below the given filter
+ */
+ uint64_t l2_filter_id_hint;
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_l2_filter_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t l2_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint32_t flow_id;
+ uint64_t l2_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_free */
-/*
- * Description: Free a L2 filter. The HWRM shall free all associated filter
- * resources with the L2 filter.
- */
-/* Input (24 bytes) */
+/***************************
+ * hwrm_cfa_l2_filter_free *
+ ***************************/
+
+
+/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
struct hwrm_cfa_l2_filter_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t l2_filter_id;
+ uint64_t resp_addr;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
+ uint64_t l2_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_cfg */
-/* Description: Change the configuration of an existing L2 filter */
-/* Input (40 bytes) */
+/**************************
+ * hwrm_cfa_l2_filter_cfg *
+ **************************/
+
+
+/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
struct hwrm_cfa_l2_filter_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
- CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
+ HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
- uint32_t enables;
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID UINT32_C(0x1)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint32_t dst_id;
+ uint32_t dst_id;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint32_t new_mirror_vnic_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_l2_filter_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_l2_set_rx_mask */
-/* Description: This command will set rx mask of the function. */
-/* Input (56 bytes) */
+/***************************
+ * hwrm_cfa_l2_set_rx_mask *
+ ***************************/
+
+
+/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
struct hwrm_cfa_l2_set_rx_mask_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t vnic_id;
+ uint64_t resp_addr;
/* VNIC ID */
- uint32_t mask;
- /* Reserved for future use. */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
+ uint32_t vnic_id;
+ uint32_t mask;
/*
* When this bit is '1', the function is requested to accept
* multi-cast packets specified by the multicast addr table.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST \
+ UINT32_C(0x2)
/*
- * When this bit is '1', the function is requested to accept all
- * multi-cast packets.
+ * When this bit is '1', the function is requested to accept
+ * all multi-cast packets.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST \
+ UINT32_C(0x4)
/*
* When this bit is '1', the function is requested to accept
* broadcast packets.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
- /*
- * When this bit is '1', the function is requested to be put in
- * the promiscuous mode. The HWRM should accept any function to
- * set up promiscuous mode. The HWRM shall follow the semantics
- * below for the promiscuous mode support. # When partitioning
- * is not enabled on a port (i.e. single PF on the port), then
- * the PF shall be allowed to be in the promiscuous mode. When
- * the PF is in the promiscuous mode, then it shall receive all
- * host bound traffic on that port. # When partitioning is
- * enabled on a port (i.e. multiple PFs per port) and a PF on
- * that port is in the promiscuous mode, then the PF receives
- * all traffic within that partition as identified by a unique
- * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
- * for the PF is specified, then the setting of promiscuous mode
- * on that PF shall result in the PF receiving all host bound
- * traffic with matching outer VLAN. # A VF shall can be set in
- * the promiscuous mode. In the promiscuous mode, the VF does
- * not receive any traffic unless a unique outer VLAN for the VF
- * is specified. If a unique outer VLAN for the VF is specified,
- * then the setting of promiscuous mode on that VF shall result
- * in the VF receiving all host bound traffic with the matching
- * outer VLAN. # The HWRM shall allow the setting of promiscuous
- * mode on a function independently from the promiscuous mode
- * settings on other functions.
- */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
- /*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for the outermost
- * Layer 2 destination MAC address field.
- */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
- /*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for the VLAN-
- * tagged packets that match the TPID and VID fields of VLAN
- * tags in the VLAN tag table specified in this command.
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST \
+ UINT32_C(0x8)
+ /*
+ * When this bit is '1', the function is requested to be
+ * put in the promiscuous mode.
+ *
+ * The HWRM should accept any function to set up
+ * promiscuous mode.
+ *
+ * The HWRM shall follow the semantics below for the
+ * promiscuous mode support.
+ * # When partitioning is not enabled on a port
+ * (i.e. single PF on the port), then the PF shall
+ * be allowed to be in the promiscuous mode. When the
+ * PF is in the promiscuous mode, then it shall
+ * receive all host bound traffic on that port.
+ * # When partitioning is enabled on a port
+ * (i.e. multiple PFs per port) and a PF on that
+ * port is in the promiscuous mode, then the PF
+ * receives all traffic within that partition as
+ * identified by a unique identifier for the
+ * PF (e.g. S-Tag). If a unique outer VLAN
+ * for the PF is specified, then the setting of
+ * promiscuous mode on that PF shall result in the
+ * PF receiving all host bound traffic with matching
+ * outer VLAN.
+ * # A VF shall can be set in the promiscuous mode.
+ * In the promiscuous mode, the VF does not receive any
+ * traffic unless a unique outer VLAN for the
+ * VF is specified. If a unique outer VLAN
+ * for the VF is specified, then the setting of
+ * promiscuous mode on that VF shall result in the
+ * VF receiving all host bound traffic with the
+ * matching outer VLAN.
+ * # The HWRM shall allow the setting of promiscuous
+ * mode on a function independently from the
+ * promiscuous mode settings on other functions.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS \
+ UINT32_C(0x10)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the outermost Layer 2 destination MAC
+ * address field.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY UINT32_C(0x40)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST \
+ UINT32_C(0x20)
/*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for non-VLAN
- * tagged packets and VLAN-tagged packets that match the TPID
- * and VID fields of VLAN tags in the VLAN tag table specified
- * in this command.
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for the VLAN-tagged packets that match the
+ * TPID and VID fields of VLAN tags in the VLAN tag
+ * table specified in this command.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN UINT32_C(0x80)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY \
+ UINT32_C(0x40)
/*
- * If this flag is set, the corresponding RX filters shall be
- * set up to cover multicast/broadcast filters for non-VLAN
- * tagged packets and VLAN-tagged packets matching any VLAN tag.
- * If this flag is set, then the HWRM shall ignore VLAN tags
- * specified in vlan_tag_tbl. If none of vlanonly, vlan_nonvlan,
- * and anyvlan_nonvlan flags is set, then the HWRM shall ignore
- * VLAN tags specified in vlan_tag_tbl. The HWRM client shall
- * set at most one flag out of vlanonly, vlan_nonvlan, and
- * anyvlan_nonvlan.
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets that match the TPID and VID fields of VLAN
+ * tags in the VLAN tag table specified in this command.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN \
+ UINT32_C(0x80)
+ /*
+ * If this flag is set, the corresponding RX
+ * filters shall be set up to cover multicast/broadcast
+ * filters for non-VLAN tagged packets and VLAN-tagged
+ * packets matching any VLAN tag.
+ *
+ * If this flag is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * If none of vlanonly, vlan_nonvlan, and anyvlan_nonvlan
+ * flags is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl.
+ *
+ * The HWRM client shall set at most one flag out of
+ * vlanonly, vlan_nonvlan, and anyvlan_nonvlan.
+ */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN \
UINT32_C(0x100)
- uint64_t mc_tbl_addr;
/* This is the address for mcast address tbl. */
- uint32_t num_mc_entries;
+ uint64_t mc_tbl_addr;
/*
* This value indicates how many entries in mc_tbl are valid.
* Each entry is 6 bytes.
*/
- uint32_t unused_0;
- uint64_t vlan_tag_tbl_addr;
+ uint32_t num_mc_entries;
+ uint8_t unused_0[4];
/*
- * This is the address for VLAN tag table. Each VLAN entry in
- * the table is 4 bytes of a VLAN tag including TPID, PCP, DEI,
- * and VID fields in network byte order.
+ * This is the address for VLAN tag table.
+ * Each VLAN entry in the table is 4 bytes of a VLAN tag
+ * including TPID, PCP, DEI, and VID fields in network byte
+ * order.
*/
- uint32_t num_vlan_tags;
+ uint64_t vlan_tag_tbl_addr;
/*
* This value indicates how many entries in vlan_tag_tbl are
* valid. Each entry is 4 bytes.
*/
- uint32_t unused_1;
+ uint32_t num_vlan_tags;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
struct hwrm_cfa_l2_set_rx_mask_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Ntuple Filter */
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST \
+ HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_vlan_antispoof_cfg *
+ *******************************/
+
+
+/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Function ID of the function that is being configured.
+ * Only valid for a VF FID configured by the PF.
*/
+ uint16_t fid;
+ uint8_t unused_0[2];
+ /* Number of VLAN entries in the vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table. Each table entry contains the 16-bit TPID
+ * (0x8100 or 0x88a8 only), 16-bit VLAN ID, and a 16-bit mask,
+ * all in network order to match hwrm_cfa_l2_set_rx_mask.
+ * For an individual VLAN entry, the mask value should be 0xfff
+ * for the 12-bit VLAN ID.
+ */
+ uint64_t vlan_tag_mask_tbl_addr;
} __attribute__((packed));
-/* Command specific Error Codes (8 bytes) */
-struct hwrm_cfa_l2_set_rx_mask_cmd_err {
- uint8_t code;
+/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * command specific error codes that goes to the cmd_err field
- * in Common HWRM Error Response.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- /* Unknown error */
- #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_vlan_antispoof_qcfg *
+ ********************************/
+
+
+/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Function ID of the function that is being queried.
+ * Only valid for a VF FID queried by the PF.
+ */
+ uint16_t fid;
+ uint8_t unused_0[2];
/*
- * Unable to complete operation due to conflict
- * with Ntuple Filter
+ * Maximum number of VLAN entries the firmware is allowed to DMA
+ * to vlan_tag_mask_tbl.
*/
- #define \
- HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \
- UINT32_C(0x1)
- uint8_t unused_0[7];
+ uint32_t max_vlan_entries;
+ /*
+ * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN
+ * antispoof table to which firmware will DMA to. Each table
+ * entry will contain the 16-bit TPID (0x8100 or 0x88a8 only),
+ * 16-bit VLAN ID, and a 16-bit mask, all in network order to
+ * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry,
+ * the mask value should be 0xfff for the 12-bit VLAN ID.
+ */
+ uint64_t vlan_tag_mask_tbl_addr;
} __attribute__((packed));
-/* hwrm_cfa_vlan_antispoof_cfg */
-/* Description: Configures vlan anti-spoof filters for VF. */
-/* Input (32 bytes) */
-struct hwrm_cfa_vlan_antispoof_cfg_input {
- uint16_t req_type;
+/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */
+struct hwrm_cfa_vlan_antispoof_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of valid entries DMAd by firmware to vlan_tag_mask_tbl. */
+ uint32_t num_vlan_entries;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_tunnel_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+struct hwrm_cfa_tunnel_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ uint32_t enables;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
*/
- uint64_t resp_addr;
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This bit must be '1' for the l2_addr field to be
+ * configured.
*/
- uint16_t fid;
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
+ UINT32_C(0x2)
/*
- * Function ID of the function that is being configured. Only valid for
- * a VF FID configured by the PF.
+ * This bit must be '1' for the l2_ivlan field to be
+ * configured.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t num_vlan_entries;
- /* Number of VLAN entries in the vlan_tag_mask_tbl. */
- uint64_t vlan_tag_mask_tbl_addr;
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_L3_ADDR_TYPE \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the t_l3_addr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR_TYPE \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_l3_addr field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_T_L3_ADDR \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the vni field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_VNI \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_DST_VNIC_ID \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x400)
+ /*
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
+ */
+ uint64_t l2_filter_id;
/*
- * The vlan_tag_mask_tbl_addr is the DMA address of the VLAN antispoof
- * table. Each table entry contains the 16-bit TPID (0x8100 or 0x88a8
- * only), 16-bit VLAN ID, and a 16-bit mask, all in network order to
- * match hwrm_cfa_l2_set_rx_mask. For an individual VLAN entry, the mask
- * value should be 0xfff for the 12-bit VLAN ID.
+ * This value sets the match value for the inner L2
+ * MAC address.
+ * Destination MAC address for RX path.
+ * Source MAC address for TX path.
*/
-};
+ uint8_t l2_addr[6];
+ /*
+ * This value sets VLAN ID value for inner VLAN.
+ * Only 12-bits of VLAN ID are used in setting the filter.
+ */
+ uint16_t l2_ivlan;
+ /*
+ * The value of inner destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t l3_addr[4];
+ /*
+ * The value of tunnel destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
+ */
+ uint32_t t_l3_addr[4];
+ /*
+ * This value indicates the type of inner IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t l3_addr_type;
+ /*
+ * This value indicates the type of tunnel IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
+ */
+ uint8_t t_l3_addr_type;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /*
+ * tunnel_flags allows the user to indicate the tunnel tag detection
+ * for the tunnel type specified in tunnel_type.
+ */
+ uint8_t tunnel_flags;
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match the geneve OAM packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates if
+ * we need to detect checksum present bit in geneve header.
+ * If the tunnel_type is mpls, then this bit indicates if we need
+ * to match mpls packet with explicit IPV4/IPV6 null header.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR \
+ UINT32_C(0x1)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to detect the critical option bit set in the oam packet.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with key present bit set in
+ * gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from inner/second label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 \
+ UINT32_C(0x2)
+ /*
+ * If the tunnel_type is geneve, then this bit indicates if we
+ * need to match geneve packet with extended header bit set in
+ * geneve header.
+ * If the tunnel_type is nvgre or gre, then this bit indicates
+ * if we need to match nvgre packets with sequence number
+ * present bit set in gre header.
+ * If the tunnel_type is mpls, then this bit indicates if we
+ * need to match mpls packet with S bit from out/first label.
+ */
+ #define HWRM_CFA_TUNNEL_FILTER_ALLOC_INPUT_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 \
+ UINT32_C(0x4)
+ /*
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t vni;
+ /* Logical VNIC ID of the destination VNIC. */
+ uint32_t dst_vnic_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t mirror_vnic_id;
+} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_cfa_vlan_antispoof_cfg_output {
- uint16_t error_code;
+/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_tunnel_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_tunnel_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
-};
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint64_t tunnel_filter_id;
+} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_alloc */
-/*
- * Description: This is a ntuple filter that uses fields from L4/L3 header and
- * optionally fields from L2. The ntuple filters apply to receive traffic only.
- * All L2/L3/L4 header fields are specified in network byte order. These filters
- * can be used for Receive Flow Steering (RFS). # For ethertype value, only
- * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a
- * field specified in this command is not enabled as a valid field, then that
- * field shall not be used in matching packet header fields against this filter.
- */
-/* Input (128 bytes) */
-struct hwrm_cfa_ntuple_filter_alloc_input {
- uint16_t req_type;
+/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_tunnel_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************************
+ * hwrm_cfa_redirect_tunnel_type_alloc *
+ ***************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ /* Tunnel alloc flags. */
+ uint8_t flags;
+ /* Setting of this flag indicates modify existing redirect tunnel to new destination function ID. */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC_INPUT_FLAGS_MODIFY_DST \
+ UINT32_C(0x1)
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_free *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************************
+ * hwrm_cfa_redirect_tunnel_type_info *
+ **************************************/
+
+
+/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */
+struct hwrm_cfa_redirect_tunnel_type_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The source function id. */
+ uint16_t src_fid;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[5];
+} __attribute__((packed));
+
+/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */
+struct hwrm_cfa_redirect_tunnel_type_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The destination function id, to whom the traffic is redirected. */
+ uint16_t dest_fid;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
+struct hwrm_vxlan_ipv4_hdr {
+ /* IPv4 version and header length. */
+ uint8_t ver_hlen;
+ /* IPv4 header length */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK UINT32_C(0xf)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
+ /* Version */
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK UINT32_C(0xf0)
+ #define HWRM_VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
+ /* IPv4 type of service. */
+ uint8_t tos;
+ /* IPv4 identification. */
+ uint16_t ip_id;
+ /* IPv4 flags and offset. */
+ uint16_t flags_frag_offset;
+ /* IPv4 TTL. */
+ uint8_t ttl;
+ /* IPv4 protocol. */
+ uint8_t protocol;
+ /* IPv4 source address. */
+ uint32_t src_ip_addr;
+ /* IPv4 destination address. */
+ uint32_t dest_ip_addr;
+} __attribute__((packed));
+
+/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
+struct hwrm_vxlan_ipv6_hdr {
+ /* IPv6 version, traffic class and flow label. */
+ uint32_t ver_tc_flow_label;
+ /* IPv6 version shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT \
+ UINT32_C(0x1c)
+ /* IPv6 version mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK \
+ UINT32_C(0xf0000000)
+ /* IPv6 TC shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT \
+ UINT32_C(0x14)
+ /* IPv6 TC mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK \
+ UINT32_C(0xff00000)
+ /* IPv6 flow label shift */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT \
+ UINT32_C(0x0)
+ /* IPv6 flow label mask */
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK \
+ UINT32_C(0xfffff)
+ #define HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST \
+ HWRM_VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
+ /* IPv6 payload length. */
+ uint16_t payload_len;
+ /* IPv6 next header. */
+ uint8_t next_hdr;
+ /* IPv6 TTL. */
+ uint8_t ttl;
+ /* IPv6 source address. */
+ uint32_t src_ip_addr[4];
+ /* IPv6 destination address. */
+ uint32_t dest_ip_addr[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
+struct hwrm_cfa_encap_data_vxlan {
+ /* Source MAC address. */
+ uint8_t src_mac_addr[6];
+ /* reserved. */
+ uint16_t unused_0;
+ /* Destination MAC address. */
+ uint8_t dst_mac_addr[6];
+ /* Number of VLAN tags. */
+ uint8_t num_vlan_tags;
+ /* reserved. */
+ uint8_t unused_1;
+ /* Outer VLAN TPID. */
+ uint16_t ovlan_tpid;
+ /* Outer VLAN TCI. */
+ uint16_t ovlan_tci;
+ /* Inner VLAN TPID. */
+ uint16_t ivlan_tpid;
+ /* Inner VLAN TCI. */
+ uint16_t ivlan_tci;
+ /* L3 header fields. */
+ uint32_t l3[10];
+ /* IP version mask. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_MASK UINT32_C(0xf)
+ /* IP version 4. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 UINT32_C(0x4)
+ /* IP version 6. */
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_ENCAP_DATA_VXLAN_L3_LAST \
+ HWRM_CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
+ /* UDP source port. */
+ uint16_t src_port;
+ /* UDP destination port. */
+ uint16_t dst_port;
+ /* VXLAN Network Identifier. */
+ uint32_t vni;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_encap_record_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+struct hwrm_cfa_encap_record_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_FLAGS_LOOPBACK \
+ UINT32_C(0x1)
+ /* Encapsulation Type. */
+ uint8_t encap_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) after inside Ethernet payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* VLAN */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_VLAN \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE \
+ UINT32_C(0x8)
+ #define HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_LAST \
+ HWRM_CFA_ENCAP_RECORD_ALLOC_INPUT_ENCAP_TYPE_IPGRE
+ uint8_t unused_0[3];
+ /* This value is encap data used for the given encap type. */
+ uint32_t encap_data[20];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_encap_record_free *
+ ******************************/
+
+
+/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+struct hwrm_cfa_encap_record_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t flags;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t encap_record_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+struct hwrm_cfa_encap_record_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_ntuple_filter_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_ntuple_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * Setting of this flag indicates the applicability to the
- * loopback path.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* Setting of this flag indicates the applicability to the loopback path. */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
UINT32_C(0x1)
/*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP \
+ UINT32_C(0x2)
/*
- * Setting of this flag indicates that a meter is expected to be
- * attached to this flow. This hint can be used when choosing
- * the action record format required for the flow.
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4)
- uint32_t enables;
- /* This bit must be '1' for the l2_filter_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER \
+ UINT32_C(0x4)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
UINT32_C(0x1)
- /* This bit must be '1' for the ethertype field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
UINT32_C(0x2)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
UINT32_C(0x4)
- /* This bit must be '1' for the src_macaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
UINT32_C(0x8)
- /* This bit must be '1' for the ipaddr_type field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
UINT32_C(0x10)
- /* This bit must be '1' for the src_ipaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
UINT32_C(0x20)
/*
* This bit must be '1' for the src_ipaddr_mask field to be
@@ -9932,8 +21818,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \
UINT32_C(0x40)
- /* This bit must be '1' for the dst_ipaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
UINT32_C(0x80)
/*
* This bit must be '1' for the dst_ipaddr_mask field to be
@@ -9941,29 +21830,41 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \
UINT32_C(0x100)
- /* This bit must be '1' for the ip_protocol field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
UINT32_C(0x200)
- /* This bit must be '1' for the src_port field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
UINT32_C(0x400)
/*
* This bit must be '1' for the src_port_mask field to be
* configured.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \
UINT32_C(0x800)
- /* This bit must be '1' for the dst_port field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
UINT32_C(0x1000)
/*
* This bit must be '1' for the dst_port_mask field to be
* configured.
*/
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \
UINT32_C(0x2000)
- /* This bit must be '1' for the pri_hint field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
+ /*
+ * This bit must be '1' for the pri_hint field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \
UINT32_C(0x4000)
/*
* This bit must be '1' for the ntuple_filter_id field to be
@@ -9971,8 +21872,11 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \
UINT32_C(0x8000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
UINT32_C(0x10000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
@@ -9980,26 +21884,31 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x20000)
- /* This bit must be '1' for the dst_macaddr field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
UINT32_C(0x40000)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint8_t src_macaddr[6];
+ uint64_t l2_filter_id;
/*
- * This value indicates the source MAC address in the Ethernet
- * header.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint16_t ethertype;
+ uint8_t src_macaddr[6];
/* This value indicates the ethertype in the Ethernet header. */
- uint8_t ip_addr_type;
+ uint16_t ethertype;
/*
- * This value indicates the type of IP address. 4 - IPv4 6 -
- * IPv6 All others are invalid.
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
+ uint8_t ip_addr_type;
/* invalid */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
UINT32_C(0x0)
@@ -10009,11 +21918,15 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
/* IPv6 */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
UINT32_C(0x6)
- uint8_t ip_protocol;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header. Applies to UDP and
- * TCP traffic. 6 - TCP 17 - UDP
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
+ uint8_t ip_protocol;
/* invalid */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
UINT32_C(0x0)
@@ -10023,283 +21936,284 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
/* UDP */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
UINT32_C(0x11)
- uint16_t dst_id;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
+ */
+ uint16_t dst_id;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint8_t tunnel_type;
+ uint16_t mirror_vnic_id;
/*
- * This value indicates the tunnel type for this filter. If this
- * field is not specified, then the filter shall apply to both
- * non-tunneled and tunneled packets. If this field conflicts
- * with the tunnel_type specified in the l2_filter_id, then the
- * HWRM shall return an error for this command.
+ * This value indicates the tunnel type for this filter.
+ * If this field is not specified, then the filter shall
+ * apply to both non-tunneled and tunneled packets.
+ * If this field conflicts with the tunnel_type specified
+ * in the l2_filter_id, then the HWRM shall return an
+ * error for this command.
*/
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
+ /* Virtual eXtensible Local Area Network (VXLAN) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
UINT32_C(0x3)
/* IP in IP */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
+ /* Generic Network Virtualization Encapsulation (Geneve) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
+ /* Multi-Protocol Lable Switching (MPLS) */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t pri_hint;
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
/*
- * This hint is provided to help in placing the filter in the
- * filter table.
+ * This hint is provided to help in placing
+ * the filter in the filter table.
*/
+ uint8_t pri_hint;
/* No preference */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
UINT32_C(0x0)
/* Above the given filter */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE \
+ UINT32_C(0x1)
/* Below the given filter */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW \
+ UINT32_C(0x2)
/* As high as possible */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \
UINT32_C(0x3)
/* As low as possible */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4)
- uint32_t src_ipaddr[4];
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST \
+ UINT32_C(0x4)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST
/*
- * The value of source IP address to be used in filtering. For
- * IPv4, first four bytes represent the IP address.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint32_t src_ipaddr_mask[4];
+ uint32_t src_ipaddr[4];
/*
- * The value of source IP address mask to be used in filtering.
+ * The value of source IP address mask to be used in
+ * filtering.
* For IPv4, first four bytes represent the IP address mask.
*/
- uint32_t dst_ipaddr[4];
+ uint32_t src_ipaddr_mask[4];
/*
* The value of destination IP address to be used in filtering.
* For IPv4, first four bytes represent the IP address.
*/
- uint32_t dst_ipaddr_mask[4];
+ uint32_t dst_ipaddr[4];
/*
* The value of destination IP address mask to be used in
- * filtering. For IPv4, first four bytes represent the IP
- * address mask.
+ * filtering.
+ * For IPv4, first four bytes represent the IP address mask.
*/
- uint16_t src_port;
+ uint32_t dst_ipaddr_mask[4];
/*
- * The value of source port to be used in filtering. Applies to
- * UDP and TCP traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
*/
- uint16_t src_port_mask;
+ uint16_t src_port;
/*
* The value of source port mask to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_port;
+ uint16_t src_port_mask;
/*
* The value of destination port to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_port_mask;
+ uint16_t dst_port;
/*
- * The value of destination port mask to be used in filtering.
+ * The value of destination port mask to be used in
+ * filtering.
* Applies to UDP and TCP traffic.
*/
- uint64_t ntuple_filter_id_hint;
- /* This is the ID of the filter that goes along with the pri_hint. */
+ uint16_t dst_port_mask;
+ /*
+ * This is the ID of the filter that goes along with
+ * the pri_hint.
+ */
+ uint64_t ntuple_filter_id_hint;
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t ntuple_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value is an opaque id into CFA data structures. */
- uint32_t flow_id;
+ uint64_t ntuple_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Command specific Error Codes (8 bytes) */
+/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
- uint8_t code;
/*
- * command specific error codes that goes to the cmd_err field
- * in Common HWRM Error Response.
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
*/
+ uint8_t code;
/* Unknown error */
- #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
- /*
- * Unable to complete operation due to conflict
- * with Rx Mask VLAN
- */
- #define \
- HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
- UINT32_C(0x1)
- uint8_t unused_0[7];
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* Unable to complete operation due to conflict with Rx Mask VLAN */
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \
+ UINT32_C(0x1)
+ #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST \
+ HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
+ uint8_t unused_0[7];
} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_free */
-/* Description: Free an ntuple filter */
-/* Input (24 bytes) */
+/*******************************
+ * hwrm_cfa_ntuple_filter_free *
+ *******************************/
+
+
+/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
struct hwrm_cfa_ntuple_filter_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t ntuple_filter_id;
+ uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
+ uint64_t ntuple_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_ntuple_filter_cfg */
-/*
- * Description: Configure an ntuple filter with a new destination VNIC and/or
- * meter.
- */
-/* Input (48 bytes) */
+/******************************
+ * hwrm_cfa_ntuple_filter_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_ntuple_filter_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t enables;
- /* This bit must be '1' for the new_dst_id field to be configured. */
- #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \
UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
@@ -10308,2209 +22222,5990 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
/*
- * This bit must be '1' for the new_meter_instance_id field to
- * be configured.
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
UINT32_C(0x4)
- uint32_t unused_0;
- uint64_t ntuple_filter_id;
+ uint8_t unused_0[4];
/* This value is an opaque id into CFA data structures. */
- uint32_t new_dst_id;
+ uint64_t ntuple_filter_id;
/*
- * If set, this value shall represent the new Logical VNIC ID of
- * the destination VNIC for the RX path and new network port id
- * of the destination port for the TX path.
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and new network port id of the destination port for
+ * the TX path.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint16_t new_meter_instance_id;
+ uint32_t new_dst_id;
/*
- * New meter to attach to the flow. Specifying the invalid
- * instance ID is used to remove any existing meter from the
- * flow.
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
+ /*
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
*/
+ uint16_t new_meter_instance_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint16_t unused_1[3];
+ #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
struct hwrm_cfa_ntuple_filter_cfg_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_alloc */
-/*
- * Description: This is a generic Exact Match (EM) flow that uses fields from
- * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All
- * L2/L3/L4 header fields are specified in network byte order. For each EM flow,
- * there is an associated set of actions specified. For tunneled packets, all
- * L2/L3/L4 fields specified are fields of inner headers unless otherwise
- * specified. # If a field specified in this command is not enabled as a valid
- * field, then that field shall not be used in matching packet header fields
- * against this EM flow entry.
- */
-/* Input (112 bytes) */
+/**************************
+ * hwrm_cfa_em_flow_alloc *
+ **************************/
+
+
+/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */
struct hwrm_cfa_em_flow_alloc_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t flags;
+ uint64_t resp_addr;
+ uint32_t flags;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
* TX and RX paths of the chip.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
/* tx path */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \
- CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
- /*
- * Setting of this flag indicates enabling of a byte counter for
- * a given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
- /*
- * Setting of this flag indicates enabling of a packet counter
- * for a given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
- /*
- * Setting of this flag indicates de-capsulation action for the
- * given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
- /*
- * Setting of this flag indicates encapsulation action for the
- * given flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
- /*
- * Setting of this flag indicates drop action. If this flag is
- * not set, then it should be considered accept action.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
- /*
- * Setting of this flag indicates that a meter is expected to be
- * attached to this flow. This hint can be used when choosing
- * the action record format required for the flow.
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
- uint32_t enables;
- /* This bit must be '1' for the l2_filter_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2)
- /* This bit must be '1' for the tunnel_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4)
- /* This bit must be '1' for the src_macaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8)
- /* This bit must be '1' for the dst_macaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10)
- /* This bit must be '1' for the ovlan_vid field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20)
- /* This bit must be '1' for the ivlan_vid field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40)
- /* This bit must be '1' for the ethertype field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80)
- /* This bit must be '1' for the src_ipaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100)
- /* This bit must be '1' for the dst_ipaddr field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200)
- /* This bit must be '1' for the ipaddr_type field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400)
- /* This bit must be '1' for the ip_protocol field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800)
- /* This bit must be '1' for the src_port field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000)
- /* This bit must be '1' for the dst_port field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000)
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX
+ /*
+ * Setting of this flag indicates enabling of a byte counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates enabling of a packet counter for a given
+ * flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4)
+ /* Setting of this flag indicates de-capsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8)
+ /* Setting of this flag indicates encapsulation action for the given flow. */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20)
+ /*
+ * Setting of this flag indicates that a meter is expected to be attached
+ * to this flow. This hint can be used when choosing the action record
+ * format required for the flow.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the l2_filter_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x4000)
/*
* This bit must be '1' for the mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
UINT32_C(0x8000)
/*
* This bit must be '1' for the encap_record_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \
UINT32_C(0x10000)
/*
* This bit must be '1' for the meter_instance_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \
UINT32_C(0x20000)
- uint64_t l2_filter_id;
/*
- * This value identifies a set of CFA data structures used for
- * an L2 context.
+ * This value identifies a set of CFA data structures used for an L2
+ * context.
*/
- uint8_t tunnel_type;
+ uint64_t l2_filter_id;
/* Tunnel Type. */
+ uint8_t tunnel_type;
/* Non-tunnel */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
UINT32_C(0x0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /*
- * Network Virtualization Generic Routing
- * Encapsulation (NVGRE)
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
- /*
- * Generic Routing Encapsulation (GRE) inside
- * Ethernet payload
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
/* IP in IP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
- /* Multi-Protocol Lable Switching (MPLS) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
- /*
- * Generic Routing Encapsulation (GRE) inside IP
- * datagram payload
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
- /*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
- */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
/* Any tunneled traffic */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
UINT32_C(0xff)
- uint8_t unused_0;
- uint16_t unused_1;
- uint32_t tunnel_id;
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0[3];
/*
- * Tunnel identifier. Virtual Network Identifier (VNI). Only
- * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower
- * 24-bits of VNI field are used in setting up the filter.
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
*/
- uint8_t src_macaddr[6];
+ uint32_t tunnel_id;
/*
- * This value indicates the source MAC address in the Ethernet
- * header.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint16_t meter_instance_id;
+ uint8_t src_macaddr[6];
/* The meter instance to attach to the flow. */
+ uint16_t meter_instance_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint8_t dst_macaddr[6];
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID
/*
- * This value indicates the destination MAC address in the
- * Ethernet header.
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
*/
- uint16_t ovlan_vid;
+ uint8_t dst_macaddr[6];
/*
- * This value indicates the VLAN ID of the outer VLAN tag in the
- * Ethernet header.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
*/
- uint16_t ivlan_vid;
+ uint16_t ovlan_vid;
/*
- * This value indicates the VLAN ID of the inner VLAN tag in the
- * Ethernet header.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
*/
- uint16_t ethertype;
+ uint16_t ivlan_vid;
/* This value indicates the ethertype in the Ethernet header. */
- uint8_t ip_addr_type;
+ uint16_t ethertype;
/*
- * This value indicates the type of IP address. 4 - IPv4 6 -
- * IPv6 All others are invalid.
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
+ uint8_t ip_addr_type;
/* invalid */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0)
/* IPv4 */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4)
/* IPv6 */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
- uint8_t ip_protocol;
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header. Applies to UDP and
- * TCP traffic. 6 - TCP 17 - UDP
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
+ uint8_t ip_protocol;
/* invalid */
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0)
/* TCP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6)
/* UDP */
- #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
- uint8_t unused_2;
- uint8_t unused_3;
- uint32_t src_ipaddr[4];
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11)
+ #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint8_t unused_1[2];
/*
- * The value of source IP address to be used in filtering. For
- * IPv4, first four bytes represent the IP address.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint32_t dst_ipaddr[4];
+ uint32_t src_ipaddr[4];
/*
- * big_endian = True The value of destination IP address to be
- * used in filtering. For IPv4, first four bytes represent the
- * IP address.
+ * big_endian = True
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint16_t src_port;
+ uint32_t dst_ipaddr[4];
/*
- * The value of source port to be used in filtering. Applies to
- * UDP and TCP traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
*/
- uint16_t dst_port;
+ uint16_t src_port;
/*
* The value of destination port to be used in filtering.
* Applies to UDP and TCP traffic.
*/
- uint16_t dst_id;
+ uint16_t dst_port;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint16_t mirror_vnic_id;
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint32_t encap_record_id;
+ uint16_t dst_id;
+ /*
+ * Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint16_t mirror_vnic_id;
/* Logical ID of the encapsulation record. */
- uint32_t unused_4;
+ uint32_t encap_record_id;
+ uint8_t unused_2[4];
} __attribute__((packed));
-/* Output (24 bytes) */
+/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */
struct hwrm_cfa_em_flow_alloc_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint64_t em_filter_id;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/* This value is an opaque id into CFA data structures. */
- uint32_t flow_id;
+ uint64_t em_filter_id;
/*
- * This is the ID of the flow associated with this filter. This
- * value shall be used to match and associate the flow
- * identifier returned in completion records. A value of
- * 0xFFFFFFFF shall indicate no flow id.
+ * This is the ID of the flow associated with this
+ * filter.
+ * This value shall be used to match and associate the
+ * flow identifier returned in completion records.
+ * A value of 0xFFFFFFFF shall indicate no flow id.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint32_t flow_id;
+ uint8_t unused_0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_free */
-/* Description: Free an EM flow table entry */
-/* Input (24 bytes) */
+/*************************
+ * hwrm_cfa_em_flow_free *
+ *************************/
+
+
+/* hwrm_cfa_em_flow_free_input (size:192b/24B) */
struct hwrm_cfa_em_flow_free_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t em_filter_id;
+ uint64_t resp_addr;
/* This value is an opaque id into CFA data structures. */
+ uint64_t em_filter_id;
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_em_flow_free_output (size:128b/16B) */
struct hwrm_cfa_em_flow_free_output {
- uint16_t error_code;
- /*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
- */
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
- /*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
- */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* hwrm_cfa_em_flow_cfg */
-/*
- * Description: Configure an EM flow with a new destination VNIC and/or meter.
- */
-/* Input (48 bytes) */
+/************************
+ * hwrm_cfa_em_flow_cfg *
+ ************************/
+
+
+/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */
struct hwrm_cfa_em_flow_cfg_input {
- uint16_t req_type;
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the new_dst_id field to be
+ * configured.
*/
- uint32_t enables;
- /* This bit must be '1' for the new_dst_id field to be configured. */
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1)
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID \
+ UINT32_C(0x1)
/*
* This bit must be '1' for the new_mirror_vnic_id field to be
* configured.
*/
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \
UINT32_C(0x2)
/*
- * This bit must be '1' for the new_meter_instance_id field to
- * be configured.
+ * This bit must be '1' for the new_meter_instance_id field to be
+ * configured.
*/
- #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \
UINT32_C(0x4)
- uint32_t unused_0;
- uint64_t em_filter_id;
+ uint8_t unused_0[4];
/* This value is an opaque id into CFA data structures. */
- uint32_t new_dst_id;
+ uint64_t em_filter_id;
/*
- * If set, this value shall represent the new Logical VNIC ID of
- * the destination VNIC for the RX path and network port id of
- * the destination port for the TX path.
+ * If set, this value shall represent the new
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path and network port id of the destination port for
+ * the TX path.
*/
- uint32_t new_mirror_vnic_id;
- /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint16_t new_meter_instance_id;
+ uint32_t new_dst_id;
/*
- * New meter to attach to the flow. Specifying the invalid
- * instance ID is used to remove any existing meter from the
- * flow.
+ * New Logical VNIC ID of the VNIC where traffic is
+ * mirrored.
+ */
+ uint32_t new_mirror_vnic_id;
+ /*
+ * New meter to attach to the flow. Specifying the
+ * invalid instance ID is used to remove any existing
+ * meter from the flow.
*/
+ uint16_t new_meter_instance_id;
/*
- * A value of 0xfff is considered invalid and
- * implies the instance is not configured.
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
#define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \
UINT32_C(0xffff)
- uint16_t unused_1[3];
+ #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[6];
} __attribute__((packed));
-/* Output (16 bytes) */
+/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */
struct hwrm_cfa_em_flow_cfg_output {
- uint16_t error_code;
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_profile_alloc *
+ ********************************/
+
+
+/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint8_t flags;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_METER_TYPE_RFC4115
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved1;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved2;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_query */
-/*
- * Description: This function is called by a driver to query tunnel type
- * specific destination port configuration.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_query_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- uint16_t cmpl_ring;
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_ALLOC_OUTPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_0[5];
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_meter_profile_free *
+ *******************************/
+
+
+/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_profile_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint64_t resp_addr;
+ uint16_t cmpl_ring;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX \
UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- uint8_t unused_0[7];
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_FREE_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_query_output {
- uint16_t error_code;
+/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_meter_profile_cfg *
+ ******************************/
+
+
+/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */
+struct hwrm_cfa_meter_profile_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t tunnel_dst_port_id;
+ uint16_t cmpl_ring;
/*
- * This field represents the identifier of L4 destination port
- * used for the given tunnel type. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP) transports
- * for tunneling.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t tunnel_dst_port_val;
+ uint16_t seq_id;
/*
- * This field represents the value of L4 destination port
- * identified by tunnel_dst_port_id. This field is valid for
- * specific tunnel types that use layer 4 (e.g. UDP) transports
- * for tunneling. This field is in network byte order. A value
- * of 0 means that the destination port is not configured.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_FLAGS_PATH_RX
+ /* The meter algorithm type. */
+ uint8_t meter_type;
+ /* RFC 2697 (srTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2697 \
+ UINT32_C(0x0)
+ /* RFC 2698 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC2698 \
+ UINT32_C(0x1)
+ /* RFC 4115 (trTCM) */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115 \
+ UINT32_C(0x2)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_TYPE_RFC4115
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_METER_PROFILE_ID_INVALID
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint32_t reserved;
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t commit_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t commit_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_COMMIT_BURST_BW_VALUE_UNIT_INVALID
+ /* A meter rate specified in bytes-per-second. */
+ uint32_t excess_peak_rate;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_RATE_BW_VALUE_UNIT_INVALID
+ /* A meter burst size specified in bytes. */
+ uint32_t excess_peak_burst;
+ /* The bandwidth value. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_SFT \
+ 0
+ /* The granularity of the value (bits or bytes). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE \
+ UINT32_C(0x10000000)
+ /* Value is in bits. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BITS \
+ (UINT32_C(0x0) << 28)
+ /* Value is in bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES \
+ (UINT32_C(0x1) << 28)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_SCALE_BYTES
+ /* bw_value_unit is 3 b */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT \
+ 29
+ /* Value is in Mb or MB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA \
+ (UINT32_C(0x0) << 29)
+ /* Value is in Kb or KB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO \
+ (UINT32_C(0x2) << 29)
+ /* Value is in bits or bytes. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE \
+ (UINT32_C(0x4) << 29)
+ /* Value is in Gb or GB (base 10). */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA \
+ (UINT32_C(0x6) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST \
+ HWRM_CFA_METER_PROFILE_CFG_INPUT_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_alloc */
-/*
- * Description: This function is called by a driver to allocate l4 destination
- * port for a specific tunnel type. The destination port value is provided in
- * the input. If the HWRM supports only one global destination port for a tunnel
- * type, then the HWRM shall keep track of its usage as described below. # The
- * first caller that allocates a destination port shall always succeed and the
- * HWRM shall save the destination port configuration for that tunnel type and
- * increment the usage count to 1. # Subsequent callers allocating the same
- * destination port for that tunnel type shall succeed and the HWRM shall
- * increment the usage count for that port for each subsequent caller that
- * succeeds. # Any subsequent caller trying to allocate a different destination
- * port for that tunnel type shall fail until the usage count for the original
- * destination port goes to zero. # A caller that frees a port will cause the
- * usage count for that port to decrement.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_alloc_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */
+struct hwrm_cfa_meter_profile_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************************
+ * hwrm_cfa_meter_instance_alloc *
+ *********************************/
+
+
+/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
- UINT32_C(0x5)
+ uint64_t resp_addr;
+ uint8_t flags;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
- UINT32_C(0x9)
- uint8_t unused_0;
- uint16_t tunnel_dst_port_val;
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH \
+ UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter profile in CFA. */
+ uint16_t meter_profile_id;
/*
- * This field represents the value of L4 destination port used
- * for the given tunnel type. This field is valid for specific
- * tunnel types that use layer 4 (e.g. UDP) transports for
- * tunneling. This field is in network byte order. A value of 0
- * shall fail the command.
+ * A value of 0xfff is considered invalid and implies the
+ * profile is not configured.
*/
- uint32_t unused_1;
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_INPUT_METER_PROFILE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_alloc_output {
- uint16_t error_code;
+/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_ALLOC_OUTPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_0[5];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t tunnel_dst_port_id;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************************
+ * hwrm_cfa_meter_instance_free *
+ ********************************/
+
+
+/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */
+struct hwrm_cfa_meter_instance_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Identifier of a tunnel L4 destination port value. Only
- * applies to tunnel types that has l4 destination port
- * parameters.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
- uint8_t valid;
+ uint64_t resp_addr;
+ uint8_t flags;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * Enumeration denoting the RX, TX type of the resource.
+ * This enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_TX \
+ UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX \
+ UINT32_C(0x1)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_FLAGS_PATH_RX
+ uint8_t unused_0;
+ /* This value identifies a meter instance in CFA. */
+ uint16_t meter_instance_id;
+ /*
+ * A value of 0xfff is considered invalid and implies the
+ * instance is not configured.
+ */
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID \
+ UINT32_C(0xffff)
+ #define HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_LAST \
+ HWRM_CFA_METER_INSTANCE_FREE_INPUT_METER_INSTANCE_ID_INVALID
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_tunnel_dst_port_free */
-/*
- * Description: This function is called by a driver to free l4 destination port
- * for a specific tunnel type.
- */
-/* Input (24 bytes) */
-struct hwrm_tunnel_dst_port_free_input {
- uint16_t req_type;
+/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */
+struct hwrm_cfa_meter_instance_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*******************************
+ * hwrm_cfa_decap_filter_alloc *
+ *******************************/
+
+
+/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+struct hwrm_cfa_decap_filter_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t tunnel_type;
- /* Tunnel Type. */
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ uint16_t target_id;
/*
- * IPV4 over virtual eXtensible Local Area
- * Network (IPV4oVXLAN)
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* ovs_tunnel is 1 b */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_FLAGS_OVS_TUNNEL \
+ UINT32_C(0x1)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the tunnel_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
+ UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the tunnel_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_ID \
+ UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the src_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \
+ UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the dst_macaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \
+ UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_OVLAN_VID \
+ UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IVLAN_VID \
+ UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the t_ovlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_OVLAN_VID \
+ UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the t_ivlan_vid field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_T_IVLAN_VID \
+ UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the ethertype field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \
+ UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the src_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \
+ UINT32_C(0x200)
+ /*
+ * This bit must be '1' for the dst_ipaddr field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \
+ UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ipaddr_type field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \
+ UINT32_C(0x800)
+ /*
+ * This bit must be '1' for the ip_protocol field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \
+ UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the src_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \
+ UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the dst_port field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \
+ UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the dst_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
+ UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
+ UINT32_C(0x10000)
+ /*
+ * Tunnel identifier.
+ * Virtual Network Identifier (VNI). Only valid with
+ * tunnel_types VXLAN, NVGRE, and Geneve.
+ * Only lower 24-bits of VNI field are used
+ * in setting up the filter.
+ */
+ uint32_t tunnel_id;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Non-tunnel */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
+ UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Network Virtualization Generic Routing Encapsulation (NVGRE) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
+ UINT32_C(0x2)
+ /* Generic Routing Encapsulation (GRE) inside Ethernet payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
+ UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
+ UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
+ UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
+ UINT32_C(0x7)
+ /* Generic Routing Encapsulation (GRE) inside IP datagram payload */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
+ UINT32_C(0x8)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
UINT32_C(0x9)
- uint8_t unused_0;
- uint16_t tunnel_dst_port_id;
+ /* Any tunneled traffic */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
+ UINT32_C(0xff)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL
+ uint8_t unused_0;
+ uint16_t unused_1;
/*
- * Identifier of a tunnel L4 destination port value. Only
- * applies to tunnel types that has l4 destination port
- * parameters.
+ * This value indicates the source MAC address in
+ * the Ethernet header.
*/
- uint32_t unused_1;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_tunnel_dst_port_free_output {
- uint16_t error_code;
+ uint8_t src_macaddr[6];
+ uint8_t unused_2[2];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This value indicates the destination MAC address in
+ * the Ethernet header.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t dst_macaddr[6];
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the Ethernet header.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t ovlan_vid;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the Ethernet header.
*/
-} __attribute__((packed));
-
-/* hwrm_stat_ctx_alloc */
-/*
- * Description: This command allocates and does basic preparation for a stat
- * context.
- */
-/* Input (32 bytes) */
-struct hwrm_stat_ctx_alloc_input {
- uint16_t req_type;
+ uint16_t ivlan_vid;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This value indicates the VLAN ID of the outer VLAN tag
+ * in the tunnel Ethernet header.
*/
- uint16_t cmpl_ring;
+ uint16_t t_ovlan_vid;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This value indicates the VLAN ID of the inner VLAN tag
+ * in the tunnel Ethernet header.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t t_ivlan_vid;
+ /* This value indicates the ethertype in the Ethernet header. */
+ uint16_t ethertype;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * This value indicates the type of IP address.
+ * 4 - IPv4
+ * 6 - IPv6
+ * All others are invalid.
*/
- uint64_t resp_addr;
+ uint8_t ip_addr_type;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \
+ UINT32_C(0x0)
+ /* IPv4 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \
+ UINT32_C(0x4)
+ /* IPv6 */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \
+ UINT32_C(0x6)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The value of protocol filed in IP header.
+ * Applies to UDP and TCP traffic.
+ * 6 - TCP
+ * 17 - UDP
*/
- uint64_t stats_dma_addr;
- /* This is the address for statistic block. */
- uint32_t update_period_ms;
+ uint8_t ip_protocol;
+ /* invalid */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \
+ UINT32_C(0x0)
+ /* TCP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \
+ UINT32_C(0x6)
+ /* UDP */
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \
+ UINT32_C(0x11)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_LAST \
+ HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP
+ uint16_t unused_3;
+ uint32_t unused_4;
/*
- * The statistic block update period in ms. e.g. 250ms, 500ms,
- * 750ms, 1000ms. If update_period_ms is 0, then the stats
- * update shall be never done and the DMA address shall not be
- * used. In this case, the stat block can only be read by
- * hwrm_stat_ctx_query command.
+ * The value of source IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
- uint8_t stat_ctx_flags;
+ uint32_t src_ipaddr[4];
/*
- * This field is used to specify statistics context specific
- * configuration flags.
+ * The value of destination IP address to be used in filtering.
+ * For IPv4, first four bytes represent the IP address.
*/
+ uint32_t dst_ipaddr[4];
/*
- * When this bit is set to '1', the statistics context shall be
- * allocated for RoCE traffic only. In this case, traffic other
- * than offloaded RoCE traffic shall not be included in this
- * statistic context. When this bit is set to '0', the
- * statistics context shall be used for the network traffic
- * other than offloaded RoCE traffic.
+ * The value of source port to be used in filtering.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t src_port;
+ /*
+ * The value of destination port to be used in filtering.
+ * Applies to UDP and TCP traffic.
*/
- #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
- uint8_t unused_0[3];
+ uint16_t dst_port;
+ /*
+ * If set, this value shall represent the
+ * Logical VNIC ID of the destination VNIC for the RX
+ * path.
+ */
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the L2 context that matches the L2
+ * information of the decap filter.
+ */
+ uint16_t l2_ctxt_ref_id;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_alloc_output {
- uint16_t error_code;
+/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_cfa_decap_filter_free *
+ ******************************/
+
+
+/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+struct hwrm_cfa_decap_filter_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t stat_ctx_id;
- /* This is the statistics context ID value. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This value is an opaque id into CFA data structures. */
+ uint32_t decap_filter_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_stat_ctx_free */
-/* Description: This command is used to free a stat context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_free_input {
- uint16_t req_type;
+/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+struct hwrm_cfa_decap_filter_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+struct hwrm_cfa_flow_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint16_t flags;
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_TUNNEL UINT32_C(0x1)
+ /* num_vlan is 2 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_MASK UINT32_C(0x6)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_SFT 1
+ /* no tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_NONE \
+ (UINT32_C(0x0) << 1)
+ /* 1 tag */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_ONE \
+ (UINT32_C(0x1) << 1)
+ /* 2 tags */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO \
+ (UINT32_C(0x2) << 1)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_NUM_VLAN_TWO
+ /* Enumeration denoting the Flow Type. */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_MASK UINT32_C(0x38)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_SFT 3
+ /* L2 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_L2 \
+ (UINT32_C(0x0) << 3)
+ /* IPV4 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV4 \
+ (UINT32_C(0x1) << 3)
+ /* IPV6 flow */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6 \
+ (UINT32_C(0x2) << 3)
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_LAST \
+ HWRM_CFA_FLOW_ALLOC_INPUT_FLAGS_FLOWTYPE_IPV6
+ /*
+ * Tx Flow: vf fid.
+ * Rx Flow: pf fid.
+ */
+ uint16_t src_fid;
+ /* Tunnel handle valid when tunnel flag is set. */
+ uint32_t tunnel_handle;
+ uint16_t action_flags;
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_FWD \
+ UINT32_C(0x1)
+ /* recycle is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_RECYCLE \
+ UINT32_C(0x2)
+ /*
+ * Setting of this flag indicates drop action. If this flag is not set,
+ * then it should be considered accept action.
+ */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_DROP \
+ UINT32_C(0x4)
+ /* meter is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_METER \
+ UINT32_C(0x8)
+ /* tunnel is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TUNNEL \
+ UINT32_C(0x10)
+ /* nat_src is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_SRC \
+ UINT32_C(0x20)
+ /* nat_dest is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_DEST \
+ UINT32_C(0x40)
+ /* nat_ipv4_address is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_NAT_IPV4_ADDRESS \
+ UINT32_C(0x80)
+ /* l2_header_rewrite is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_L2_HEADER_REWRITE \
+ UINT32_C(0x100)
+ /* ttl_decrement is 1 b */
+ #define HWRM_CFA_FLOW_ALLOC_INPUT_ACTION_FLAGS_TTL_DECREMENT \
+ UINT32_C(0x200)
+ /*
+ * Tx Flow: pf or vf fid.
+ * Rx Flow: vf fid.
+ */
+ uint16_t dst_fid;
+ /* VLAN tpid, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tpid;
+ /* VLAN tci, valid when push_vlan flag is set. */
+ uint16_t l2_rewrite_vlan_tci;
+ /* Meter id, valid when meter flag is set. */
+ uint16_t act_meter_id;
+ /* Flow with the same l2 context tcam key. */
+ uint16_t ref_flow_handle;
+ /* This value sets the match value for the ethertype. */
+ uint16_t ethertype;
+ /* valid when num tags is 1 or 2. */
+ uint16_t outer_vlan_tci;
+ /* This value sets the match value for the Destination MAC address. */
+ uint16_t dmac[3];
+ /* valid when num tags is 2. */
+ uint16_t inner_vlan_tci;
+ /* This value sets the match value for the Source MAC address. */
+ uint16_t smac[3];
+ /* The bit length of destination IP address mask. */
+ uint8_t ip_dst_mask_len;
+ /* The bit length of source IP address mask. */
+ uint8_t ip_src_mask_len;
+ /* The value of destination IPv4/IPv6 address. */
+ uint32_t ip_dst[4];
+ /* The source IPv4/IPv6 address. */
+ uint32_t ip_src[4];
+ /*
+ * The value of source port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port;
+ /*
+ * The value of source port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_src_port_mask;
+ /*
+ * The value of destination port.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port;
+ /*
+ * The value of destination port mask.
+ * Applies to UDP and TCP traffic.
+ */
+ uint16_t l4_dst_port_mask;
+ /*
+ * NAT IPv4/6 address based on address type flag.
+ * 0 values are ignored.
+ */
+ uint32_t nat_ip_address[4];
+ /* L2 header re-write Destination MAC address. */
+ uint16_t l2_rewrite_dmac[3];
+ /*
+ * The NAT source/destination port based on direction flag.
+ * Applies to UDP and TCP traffic.
+ * 0 values are ignored.
+ */
+ uint16_t nat_port;
+ /* L2 header re-write Source MAC address. */
+ uint16_t l2_rewrite_smac[3];
+ /* The value of ip protocol. */
+ uint8_t ip_proto;
+ uint8_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_free_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+struct hwrm_cfa_flow_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[5];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_free *
+ **********************/
+
+
+/* hwrm_cfa_flow_free_input (size:192b/24B) */
+struct hwrm_cfa_flow_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t stat_ctx_id;
- /* This is the statistics context ID value. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t valid;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* hwrm_stat_ctx_query */
-/* Description: This command returns statistics of a context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_query_input {
- uint16_t req_type;
+/* hwrm_cfa_flow_free_output (size:256b/32B) */
+struct hwrm_cfa_flow_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet is 64 b */
+ uint64_t packet;
+ /* byte is 64 b */
+ uint64_t byte;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_flow_info *
+ **********************/
+
+
+/* hwrm_cfa_flow_info_input (size:192b/24B) */
+struct hwrm_cfa_flow_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint64_t resp_addr;
+ /* Flow record index. */
+ uint16_t flow_handle;
+ /* Max flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_MASK \
+ UINT32_C(0xfff)
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_MAX_SFT 0
+ /* CNP flow handle */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT \
+ UINT32_C(0x1000)
+ /* Direction rx = 1 */
+ #define HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX \
+ UINT32_C(0x8000)
+ uint8_t unused_0[6];
} __attribute__((packed));
-/* Output (176 bytes) */
-struct hwrm_stat_ctx_query_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_info_output (size:448b/56B) */
+struct hwrm_cfa_flow_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* flags is 8 b */
+ uint8_t flags;
+ /* profile is 8 b */
+ uint8_t profile;
+ /* src_fid is 16 b */
+ uint16_t src_fid;
+ /* dst_fid is 16 b */
+ uint16_t dst_fid;
+ /* l2_ctxt_id is 16 b */
+ uint16_t l2_ctxt_id;
+ /* em_info is 64 b */
+ uint64_t em_info;
+ /* tcam_info is 64 b */
+ uint64_t tcam_info;
+ /* vfp_tcam_info is 64 b */
+ uint64_t vfp_tcam_info;
+ /* ar_id is 16 b */
+ uint16_t ar_id;
+ /* flow_handle is 16 b */
+ uint16_t flow_handle;
+ /* tunnel_handle is 32 b */
+ uint32_t tunnel_handle;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_flush *
+ ***********************/
+
+
+/* hwrm_cfa_flow_flush_input (size:192b/24B) */
+struct hwrm_cfa_flow_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t tx_ucast_pkts;
- /* Number of transmitted unicast packets */
- uint64_t tx_mcast_pkts;
- /* Number of transmitted multicast packets */
- uint64_t tx_bcast_pkts;
- /* Number of transmitted broadcast packets */
- uint64_t tx_err_pkts;
- /* Number of transmitted packets with error */
- uint64_t tx_drop_pkts;
- /* Number of dropped packets on transmit path */
- uint64_t tx_ucast_bytes;
- /* Number of transmitted bytes for unicast traffic */
- uint64_t tx_mcast_bytes;
- /* Number of transmitted bytes for multicast traffic */
- uint64_t tx_bcast_bytes;
- /* Number of transmitted bytes for broadcast traffic */
- uint64_t rx_ucast_pkts;
- /* Number of received unicast packets */
- uint64_t rx_mcast_pkts;
- /* Number of received multicast packets */
- uint64_t rx_bcast_pkts;
- /* Number of received broadcast packets */
- uint64_t rx_err_pkts;
- /* Number of received packets with error */
- uint64_t rx_drop_pkts;
- /* Number of dropped packets on received path */
- uint64_t rx_ucast_bytes;
- /* Number of received bytes for unicast traffic */
- uint64_t rx_mcast_bytes;
- /* Number of received bytes for multicast traffic */
- uint64_t rx_bcast_bytes;
- /* Number of received bytes for broadcast traffic */
- uint64_t rx_agg_pkts;
- /* Number of aggregated unicast packets */
- uint64_t rx_agg_bytes;
- /* Number of aggregated unicast bytes */
- uint64_t rx_agg_events;
- /* Number of aggregation events */
- uint64_t rx_agg_aborts;
- /* Number of aborted aggregations */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* hwrm_stat_ctx_clr_stats */
-/* Description: This command clears statistics of a context. */
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_clr_stats_input {
- uint16_t req_type;
+/* hwrm_cfa_flow_flush_output (size:128b/16B) */
+struct hwrm_cfa_flow_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_flow_stats *
+ ***********************/
+
+
+/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+struct hwrm_cfa_flow_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t stat_ctx_id;
- /* ID of the statistics context that is being queried. */
- uint32_t unused_0;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Flow handle. */
+ uint16_t num_flows;
+ /* Flow handle. */
+ uint16_t flow_handle_0;
+ /* Flow handle. */
+ uint16_t flow_handle_1;
+ /* Flow handle. */
+ uint16_t flow_handle_2;
+ /* Flow handle. */
+ uint16_t flow_handle_3;
+ /* Flow handle. */
+ uint16_t flow_handle_4;
+ /* Flow handle. */
+ uint16_t flow_handle_5;
+ /* Flow handle. */
+ uint16_t flow_handle_6;
+ /* Flow handle. */
+ uint16_t flow_handle_7;
+ /* Flow handle. */
+ uint16_t flow_handle_8;
+ /* Flow handle. */
+ uint16_t flow_handle_9;
+ uint8_t unused_0[2];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_clr_stats_output {
- uint16_t error_code;
+/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+struct hwrm_cfa_flow_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* packet_0 is 64 b */
+ uint64_t packet_0;
+ /* packet_1 is 64 b */
+ uint64_t packet_1;
+ /* packet_2 is 64 b */
+ uint64_t packet_2;
+ /* packet_3 is 64 b */
+ uint64_t packet_3;
+ /* packet_4 is 64 b */
+ uint64_t packet_4;
+ /* packet_5 is 64 b */
+ uint64_t packet_5;
+ /* packet_6 is 64 b */
+ uint64_t packet_6;
+ /* packet_7 is 64 b */
+ uint64_t packet_7;
+ /* packet_8 is 64 b */
+ uint64_t packet_8;
+ /* packet_9 is 64 b */
+ uint64_t packet_9;
+ /* byte_0 is 64 b */
+ uint64_t byte_0;
+ /* byte_1 is 64 b */
+ uint64_t byte_1;
+ /* byte_2 is 64 b */
+ uint64_t byte_2;
+ /* byte_3 is 64 b */
+ uint64_t byte_3;
+ /* byte_4 is 64 b */
+ uint64_t byte_4;
+ /* byte_5 is 64 b */
+ uint64_t byte_5;
+ /* byte_6 is 64 b */
+ uint64_t byte_6;
+ /* byte_7 is 64 b */
+ uint64_t byte_7;
+ /* byte_8 is 64 b */
+ uint64_t byte_8;
+ /* byte_9 is 64 b */
+ uint64_t byte_9;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_cfa_vf_pair_alloc *
+ **************************/
+
+
+/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ uint8_t unused_0[4];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* hwrm_exec_fwd_resp */
-/*
- * Description: This command is used to send an encapsulated request to the
- * HWRM. This command instructs the HWRM to execute the request and forward the
- * response of the encapsulated request to the location specified in the
- * original request that is encapsulated. The target id of this command shall be
- * set to 0xFFFF (HWRM). The response location in this command shall be used to
- * acknowledge the receipt of the encapsulated request and forwarding of the
- * response.
- */
-/* Input (128 bytes) */
-struct hwrm_exec_fwd_resp_input {
- uint16_t req_type;
+/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t cmpl_ring;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_cfa_vf_pair_free *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_vf_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t resp_addr;
+ uint16_t seq_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint32_t encap_request[26];
+ uint16_t target_id;
/*
- * This is an encapsulated request. This request should be
- * executed by the HWRM and the response should be provided in
- * the response buffer inside the encapsulated request.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t encap_resp_target_id;
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_vf_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * This value indicates the target id of the response to the
- * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t unused_0[3];
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_exec_fwd_resp_output {
- uint16_t error_code;
+/*************************
+ * hwrm_cfa_vf_pair_info *
+ *************************/
+
+
+/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_vf_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_VF_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* vf pair table index. */
+ uint16_t vf_pair_index;
+ uint8_t unused_0[2];
+ /* VF Pair name (32 byte string). */
+ char vf_pair_name[32];
} __attribute__((packed));
-/* hwrm_reject_fwd_resp */
-/*
- * Description: This command is used to send an encapsulated request to the
- * HWRM. This command instructs the HWRM to reject the request and forward the
- * error response of the encapsulated request to the location specified in the
- * original request that is encapsulated. The target id of this command shall be
- * set to 0xFFFF (HWRM). The response location in this command shall be used to
- * acknowledge the receipt of the encapsulated request and forwarding of the
- * response.
- */
-/* Input (128 bytes) */
-struct hwrm_reject_fwd_resp_input {
- uint16_t req_type;
+/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */
+struct hwrm_cfa_vf_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* vf pair table index. */
+ uint16_t next_vf_pair_index;
+ /* vf pair member a's vf_fid. */
+ uint16_t vf_a_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* vf pair member b's vf_fid. */
+ uint16_t vf_b_fid;
+ /* vf pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* vf pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_VF_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ uint8_t unused_0[5];
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_cfa_pair_alloc *
+ ***********************/
+
+
+/* hwrm_cfa_pair_alloc_input (size:576b/72B) */
+struct hwrm_cfa_pair_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t encap_request[26];
+ uint64_t resp_addr;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair, 5-rep2fn_mod). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ /* Modify exiting rep2fn pair and move pair to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MOD UINT32_C(0x5)
+ /* Modify exiting rep2fn pairs paired with same PF and move pairs to new PF. */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL UINT32_C(0x6)
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_ALLOC_INPUT_PAIR_MODE_REP2FN_MODALL
+ uint8_t unused_0;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_a_id;
+ /* Logical Host (0xff-local host). */
+ uint8_t host_b_id;
+ /* Logical PF (0xff-PF for command channel). */
+ uint8_t pf_b_id;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_b_id;
+ /* Loopback port (0xff-internal loopback), valid for mode-3. */
+ uint8_t port_id;
+ /* Priority used for encap of loopback packets valid for mode-3. */
+ uint8_t pri;
+ /* New PF for rep2fn modify, valid for mode 5. */
+ uint16_t new_pf_fid;
+ uint32_t enables;
/*
- * This is an encapsulated request. This request should be
- * rejected by the HWRM and the error response should be
- * provided in the response buffer inside the encapsulated
- * request.
+ * This bit must be '1' for the q_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the q_ba field to be
+ * configured.
*/
- uint16_t encap_resp_target_id;
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID UINT32_C(0x2)
/*
- * This value indicates the target id of the response to the
- * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
- * HWRM
+ * This bit must be '1' for the fc_ab field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the fc_ba field to be
+ * configured.
+ */
+ #define HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID UINT32_C(0x8)
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
+ /*
+ * The q_ab value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the A to B direction of
+ * the interface pair. The default value is 0.
+ */
+ uint8_t q_ab;
+ /*
+ * The q_ba value specifies the logical index of the TX/RX CoS
+ * queue to be assigned for traffic in the B to A direction of
+ * the interface pair. The default value is 1.
*/
- uint16_t unused_0[3];
+ uint8_t q_ba;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the A to B direction. The default value is 0, meaning that
+ * packets will be dropped when the B-side RX rings are full.
+ */
+ uint8_t fc_ab;
+ /*
+ * Specifies whether RX ring flow control is disabled (0) or enabled
+ * (1) in the B to A direction. The default value is 1, meaning that
+ * the RX CoS queue will be flow controlled when the A-side RX rings
+ * are full.
+ */
+ uint8_t fc_ba;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_reject_fwd_resp_output {
- uint16_t error_code;
+/* hwrm_cfa_pair_alloc_output (size:192b/24B) */
+struct hwrm_cfa_pair_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Only valid for modes 1 and 2. */
+ uint16_t rx_cfa_code_a;
+ /* Only valid for modes 1 and 2. */
+ uint16_t tx_cfa_action_a;
+ /* Only valid for mode 2. */
+ uint16_t rx_cfa_code_b;
+ /* Only valid for mode 2. */
+ uint16_t tx_cfa_action_b;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_pair_free *
+ **********************/
+
+
+/* hwrm_cfa_pair_free_input (size:384b/48B) */
+struct hwrm_cfa_pair_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* VF Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* hwrm_nvm_get_dir_entries */
-/* Input (24 bytes) */
-struct hwrm_nvm_get_dir_entries_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
- uint64_t host_dest_addr;
+/* hwrm_cfa_pair_free_output (size:128b/16B) */
+struct hwrm_cfa_pair_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_get_dir_entries_output {
- uint16_t error_code;
- uint16_t req_type;
- uint16_t seq_id;
- uint16_t resp_len;
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
-} __attribute__((packed));
+/**********************
+ * hwrm_cfa_pair_info *
+ **********************/
-/* hwrm_nvm_erase_dir_entry */
-/* Input (24 bytes) */
-struct hwrm_nvm_erase_dir_entry_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
- uint16_t dir_idx;
- uint16_t unused_0[3];
-};
-
-/* Output (16 bytes) */
-struct hwrm_nvm_erase_dir_entry_output {
- uint16_t error_code;
- uint16_t req_type;
- uint16_t seq_id;
- uint16_t resp_len;
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
-};
-
-/* hwrm_nvm_get_dir_info */
-/* Input (16 bytes) */
-struct hwrm_nvm_get_dir_info_input {
- uint16_t req_type;
- uint16_t cmpl_ring;
- uint16_t seq_id;
- uint16_t target_id;
- uint64_t resp_addr;
+/* hwrm_cfa_pair_info_input (size:448b/56B) */
+struct hwrm_cfa_pair_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t flags;
+ /* If this flag is set, lookup by name else lookup by index. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_TYPE UINT32_C(0x1)
+ /* If this flag is set, lookup by PF id and VF id. */
+ #define HWRM_CFA_PAIR_INFO_INPUT_FLAGS_LOOKUP_REPRE UINT32_C(0x2)
+ /* Pair table index. */
+ uint16_t pair_index;
+ /* Pair pf index. */
+ uint8_t pair_pfid;
+ /* Pair vf index. */
+ uint8_t pair_vfid;
+ /* Pair name (32 byte string). */
+ char pair_name[32];
} __attribute__((packed));
-/* Output (24 bytes) */
-struct hwrm_nvm_get_dir_info_output {
- uint16_t error_code;
+/* hwrm_cfa_pair_info_output (size:576b/72B) */
+struct hwrm_cfa_pair_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Pair table index. */
+ uint16_t next_pair_index;
+ /* Pair member a's fid. */
+ uint16_t a_fid;
+ /* Logical host number. */
+ uint8_t host_a_index;
+ /* Logical PF number. */
+ uint8_t pf_a_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_a_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_a;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_a;
+ /* Pair member b's fid. */
+ uint16_t b_fid;
+ /* Logical host number. */
+ uint8_t host_b_index;
+ /* Logical PF number. */
+ uint8_t pf_b_index;
+ /* Pair member a's Linux logical VF number. */
+ uint16_t vf_b_index;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code_b;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action_b;
+ /* Pair mode (0-vf2fn, 1-rep2fn, 2-rep2rep, 3-proxy, 4-pfpair). */
+ uint8_t pair_mode;
+ /* Pair between VF on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_VF2FN UINT32_C(0x0)
+ /* Pair between REP on local host with PF or VF on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2FN UINT32_C(0x1)
+ /* Pair between REP on local host with REP on specified host. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_REP2REP UINT32_C(0x2)
+ /* Pair for the proxy interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PROXY UINT32_C(0x3)
+ /* Pair for the PF interface. */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR UINT32_C(0x4)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_MODE_PFPAIR
+ /* Pair state. */
+ uint8_t pair_state;
+ /* Pair has been allocated */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ALLOCATED UINT32_C(0x1)
+ /* Both pair members are active */
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE UINT32_C(0x2)
+ #define HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_LAST \
+ HWRM_CFA_PAIR_INFO_OUTPUT_PAIR_STATE_ACTIVE
+ /* Pair name (32 byte string). */
+ char pair_name[32];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_cfa_vfr_alloc *
+ **********************/
+
+
+/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+struct hwrm_cfa_vfr_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t entries;
- /* Number of directory entries in the directory. */
- uint32_t entry_length;
- /* Size of each directory entry, in bytes. */
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Logical VF number (range: 0 -> MAX_VFS -1). */
+ uint16_t vf_id;
+ /*
+ * This field is reserved for the future use.
+ * It shall be set to 0.
+ */
+ uint16_t reserved;
+ uint8_t unused_0[4];
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
} __attribute__((packed));
-/* hwrm_nvm_write */
-/*
- * Note: Write to the allocated NVRAM of an item referenced by an existing
- * directory entry.
- */
-/* Input (48 bytes) */
-struct hwrm_nvm_write_input {
- uint16_t req_type;
+/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+struct hwrm_cfa_vfr_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Rx CFA code. */
+ uint16_t rx_cfa_code;
+ /* Tx CFA action. */
+ uint16_t tx_cfa_action;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_cfa_vfr_free *
+ *********************/
+
+
+/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+struct hwrm_cfa_vfr_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t cmpl_ring;
+ uint16_t cmpl_ring;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint16_t seq_id;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint64_t resp_addr;
+ uint16_t target_id;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint64_t host_src_addr;
- /* 64-bit Host Source Address. This is where the source data is. */
- uint16_t dir_type;
+ uint64_t resp_addr;
+ /* VF Representor name (32 byte string). */
+ char vfr_name[32];
+} __attribute__((packed));
+
+/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+struct hwrm_cfa_vfr_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************************
+ * hwrm_tunnel_dst_port_query *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * The Directory Entry Type (valid values are defined in the
- * bnxnvm_directory_type enum defined in the file
- * bnxnvm_defs.h).
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t dir_ordinal;
+ uint16_t cmpl_ring;
/*
- * Directory ordinal. The 0-based instance of the combined
- * Directory Entry Type and Extension.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint16_t dir_ext;
+ uint16_t seq_id;
/*
- * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the
- * file bnxnvm_defs.h).
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t dir_attr;
+ uint16_t target_id;
/*
- * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the
- * file bnxnvm_defs.h).
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint32_t dir_data_length;
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * Length of data to write, in bytes. May be less than or equal
- * to the allocated size for the directory entry. The data
- * length stored in the directory entry will be updated to
- * reflect this value once the write is complete.
+ * This field represents the identifier of L4 destination port
+ * used for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
*/
- uint16_t option;
- /* Option. */
- uint16_t flags;
+ uint16_t tunnel_dst_port_id;
/*
- * When this bit is '1', the original active image will not be
- * removed. TBD: what purpose is this?
+ * This field represents the value of L4 destination port
+ * identified by tunnel_dst_port_id. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ * This field is in network byte order.
+ *
+ * A value of 0 means that the destination port is not
+ * configured.
*/
- #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1)
- uint32_t dir_item_length;
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_0[3];
/*
- * The requested length of the allocated NVM for the item, in
- * bytes. This value may be greater than or equal to the
- * specified data length (dir_data_length). If this value is
- * less than the specified data length, it will be ignored. The
- * response will contain the actual allocated item length, which
- * may be greater than the requested item length. The purpose
- * for allocating more than the required number of bytes for an
- * item's data is to pre-allocate extra storage (padding) to
- * accommodate the potential future growth of an item (e.g.
- * upgraded firmware with a size increase, log growth, expanded
- * configuration data).
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint32_t unused_0;
+ uint8_t valid;
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_write_output {
- uint16_t error_code;
+/******************************
+ * hwrm_tunnel_dst_port_alloc *
+ ******************************/
+
+
+/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint32_t dir_item_length;
+ uint16_t seq_id;
/*
- * Length of the allocated NVM for the item, in bytes. The value
- * may be greater than or equal to the specified data length or
- * the requested item length. The actual item length used when
- * creating a new directory entry will be a multiple of an NVM
- * block size.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t dir_idx;
- /* The directory index of the created or modified item. */
- uint8_t unused_0;
- uint8_t valid;
+ uint16_t target_id;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * This field represents the value of L4 destination port used
+ * for the given tunnel type. This field is valid for
+ * specific tunnel types that use layer 4 (e.g. UDP)
+ * transports for tunneling.
+ *
+ * This field is in network byte order.
+ *
+ * A value of 0 shall fail the command.
+ */
+ uint16_t tunnel_dst_port_val;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* hwrm_nvm_read */
-/*
- * Note: Read the contents of an NVRAM item as referenced (indexed) by an
- * existing directory entry.
- */
-/* Input (40 bytes) */
-struct hwrm_nvm_read_input {
- uint16_t req_type;
+/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
*/
- uint16_t cmpl_ring;
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_0[5];
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_tunnel_dst_port_free *
+ *****************************/
+
+
+/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+struct hwrm_tunnel_dst_port_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint64_t resp_addr;
+ uint16_t cmpl_ring;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- uint64_t host_dest_addr;
+ uint16_t seq_id;
/*
- * 64-bit Host Destination Address. This is the host address
- * where the data will be written to.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t dir_idx;
- /* The 0-based index of the directory entry. */
- uint8_t unused_0;
- uint8_t unused_1;
- uint32_t offset;
- /* The NVRAM byte-offset to read from. */
- uint32_t len;
- /* The length of the data to be read, in bytes. */
- uint32_t unused_2;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Tunnel Type. */
+ uint8_t tunnel_type;
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN \
+ UINT32_C(0x1)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE \
+ UINT32_C(0x5)
+ /* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \
+ UINT32_C(0x9)
+ /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 \
+ UINT32_C(0xa)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST \
+ HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1
+ uint8_t unused_0;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to tunnel
+ * types that has l4 destination port parameters.
+ */
+ uint16_t tunnel_dst_port_id;
+ uint8_t unused_1[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_nvm_read_output {
- uint16_t error_code;
+/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+struct hwrm_tunnel_dst_port_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_1[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/* ctx_hw_stats (size:1280b/160B) */
+struct ctx_hw_stats {
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of discarded packets on received path */
+ uint64_t rx_discard_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of discarded packets on transmit path */
+ uint64_t tx_discard_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of TPA packets */
+ uint64_t tpa_pkts;
+ /* Number of TPA bytes */
+ uint64_t tpa_bytes;
+ /* Number of TPA events */
+ uint64_t tpa_events;
+ /* Number of TPA aborts */
+ uint64_t tpa_aborts;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_alloc *
+ ***********************/
+
+
+/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+struct hwrm_stat_ctx_alloc_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
- uint8_t valid;
+ uint16_t cmpl_ring;
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
-} __attribute__((packed));
-
-/* Hardware Resource Manager Specification */
-/* Description: This structure is used to specify port description. */
-/*
- * Note: The Hardware Resource Manager (HWRM) manages various hardware resources
- * inside the chip. The HWRM is implemented in firmware, and runs on embedded
- * processors inside the chip. This firmware service is vital part of the chip.
- * The chip can not be used by a driver or HWRM client without the HWRM.
- */
-/* Input (16 bytes) */
-struct input {
- uint16_t req_type;
+ uint16_t seq_id;
/*
- * This value indicates what type of request this is. The format
- * for the rest of the command is determined by this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint16_t cmpl_ring;
+ uint16_t target_id;
/*
- * This value indicates the what completion ring the request
- * will be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
- * valid CR ring_id value for this function.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- uint16_t seq_id;
- /* This value indicates the command sequence number. */
- uint16_t target_id;
+ uint64_t resp_addr;
+ /* This is the address for statistic block. */
+ uint64_t stats_dma_addr;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function
- * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
- * - HWRM
+ * The statistic block update period in ms.
+ * e.g. 250ms, 500ms, 750ms, 1000ms.
+ * If update_period_ms is 0, then the stats update
+ * shall be never done and the DMA address shall not be used.
+ * In this case, the stat block can only be read by
+ * hwrm_stat_ctx_query command.
*/
- uint64_t resp_addr;
+ uint32_t update_period_ms;
/*
- * This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
- * and must be cleared to zero before the request is made.
+ * This field is used to specify statistics context specific
+ * configuration flags.
*/
+ uint8_t stat_ctx_flags;
+ /*
+ * When this bit is set to '1', the statistics context shall be
+ * allocated for RoCE traffic only. In this case, traffic other
+ * than offloaded RoCE traffic shall not be included in this
+ * statistic context.
+ * When this bit is set to '0', the statistics context shall be
+ * used for the network traffic other than offloaded RoCE traffic.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ uint8_t unused_0[3];
} __attribute__((packed));
-/* Output (8 bytes) */
-struct output {
- uint16_t error_code;
+/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+struct hwrm_stat_ctx_alloc_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_stat_ctx_free *
+ **********************/
+
+
+/* hwrm_stat_ctx_free_input (size:192b/24B) */
+struct hwrm_stat_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Short Command Structure (16 bytes) */
-struct hwrm_short_input {
- uint16_t req_type;
+/* hwrm_stat_ctx_free_output (size:128b/16B) */
+struct hwrm_stat_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This is the statistics context ID value. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[3];
/*
- * This field indicates the type of request in the request
- * buffer. The format for the rest of the command (request) is
- * determined by this field.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t signature;
+ uint8_t valid;
+} __attribute__((packed));
+
+/***********************
+ * hwrm_stat_ctx_query *
+ ***********************/
+
+
+/* hwrm_stat_ctx_query_input (size:192b/24B) */
+struct hwrm_stat_ctx_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field indicates a signature that is used to identify
- * short form of the command listed here. This field shall be
- * set to 17185 (0x4321).
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- /* Signature indicating this is a short form of HWRM command */
- #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321)
- uint16_t unused_0;
- /* Reserved for future use. */
- uint16_t size;
- /* This value indicates the length of the request. */
- uint64_t req_addr;
- /*
- * This is the host address where the request was written. This
- * area must be 16B aligned.
- */
-} __attribute__((packed));
-
-#define HWRM_GET_HWRM_ERROR_CODE(arg) \
- { \
- typeof(arg) x = (arg); \
- ((x) == 0xf ? "HWRM_ERROR" : \
- ((x) == 0xffff ? "CMD_NOT_SUPPORTED" : \
- ((x) == 0xfffe ? "UNKNOWN_ERR" : \
- ((x) == 0x4 ? "RESOURCE_ALLOC_ERROR" : \
- ((x) == 0x5 ? "INVALID_FLAGS" : \
- ((x) == 0x6 ? "INVALID_ENABLES" : \
- ((x) == 0x0 ? "SUCCESS" : \
- ((x) == 0x1 ? "FAIL" : \
- ((x) == 0x2 ? "INVALID_PARAMS" : \
- ((x) == 0x3 ? "RESOURCE_ACCESS_DENIED" : \
- "Unknown error_code")))))))))) \
- }
-
-/* Return Codes (8 bytes) */
-struct ret_codes {
- uint16_t error_code;
- /* These are numbers assigned to return/error codes. */
- /* Request was successfully executed by the HWRM. */
- #define HWRM_ERR_CODE_SUCCESS (UINT32_C(0x0))
- /* THe HWRM failed to execute the request. */
- #define HWRM_ERR_CODE_FAIL (UINT32_C(0x1))
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
+} __attribute__((packed));
+
+/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+struct hwrm_stat_ctx_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of transmitted unicast packets */
+ uint64_t tx_ucast_pkts;
+ /* Number of transmitted multicast packets */
+ uint64_t tx_mcast_pkts;
+ /* Number of transmitted broadcast packets */
+ uint64_t tx_bcast_pkts;
+ /* Number of transmitted packets with error */
+ uint64_t tx_err_pkts;
+ /* Number of dropped packets on transmit path */
+ uint64_t tx_drop_pkts;
+ /* Number of transmitted bytes for unicast traffic */
+ uint64_t tx_ucast_bytes;
+ /* Number of transmitted bytes for multicast traffic */
+ uint64_t tx_mcast_bytes;
+ /* Number of transmitted bytes for broadcast traffic */
+ uint64_t tx_bcast_bytes;
+ /* Number of received unicast packets */
+ uint64_t rx_ucast_pkts;
+ /* Number of received multicast packets */
+ uint64_t rx_mcast_pkts;
+ /* Number of received broadcast packets */
+ uint64_t rx_bcast_pkts;
+ /* Number of received packets with error */
+ uint64_t rx_err_pkts;
+ /* Number of dropped packets on received path */
+ uint64_t rx_drop_pkts;
+ /* Number of received bytes for unicast traffic */
+ uint64_t rx_ucast_bytes;
+ /* Number of received bytes for multicast traffic */
+ uint64_t rx_mcast_bytes;
+ /* Number of received bytes for broadcast traffic */
+ uint64_t rx_bcast_bytes;
+ /* Number of aggregated unicast packets */
+ uint64_t rx_agg_pkts;
+ /* Number of aggregated unicast bytes */
+ uint64_t rx_agg_bytes;
+ /* Number of aggregation events */
+ uint64_t rx_agg_events;
+ /* Number of aborted aggregations */
+ uint64_t rx_agg_aborts;
+ uint8_t unused_0[7];
/*
- * The request contains invalid argument(s) or
- * input parameters.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- #define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2))
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_stat_ctx_clr_stats *
+ ***************************/
+
+
+/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+struct hwrm_stat_ctx_clr_stats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * The requester is not allowed to access the
- * requested resource. This error code shall be
- * provided in a response to a request to query
- * or modify an existing resource that is not
- * accessible by the requester.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3))
+ uint16_t cmpl_ring;
/*
- * The HWRM is unable to allocate the requested
- * resource. This code only applies to requests
- * for HWRM resource allocations.
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
*/
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR (UINT32_C(0x4))
- /* Invalid combination of flags is specified in the request. */
- #define HWRM_ERR_CODE_INVALID_FLAGS (UINT32_C(0x5))
+ uint16_t seq_id;
/*
- * Invalid combination of enables fields is
- * specified in the request.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- #define HWRM_ERR_CODE_INVALID_ENABLES (UINT32_C(0x6))
+ uint16_t target_id;
/*
- * Generic HWRM execution error that represents
- * an internal error.
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
*/
- #define HWRM_ERR_CODE_HWRM_ERROR (UINT32_C(0xf))
- /* Unknown error */
- #define HWRM_ERR_CODE_UNKNOWN_ERR (UINT32_C(0xfffe))
- /* Unsupported or invalid command */
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED (UINT32_C(0xffff))
- uint16_t unused_0[3];
+ uint64_t resp_addr;
+ /* ID of the statistics context that is being queried. */
+ uint32_t stat_ctx_id;
+ uint8_t unused_0[4];
} __attribute__((packed));
-/* Output (16 bytes) */
-struct hwrm_err_output {
- uint16_t error_code;
+/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+struct hwrm_stat_ctx_clr_stats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
/*
- * Pass/Fail or error type Note: receiver to verify the in
- * parameters, and fail the call with an error when appropriate
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
- uint16_t req_type;
- /* This field returns the type of original request. */
- uint16_t seq_id;
- /* This field provides original sequence number of the command. */
- uint16_t resp_len;
+ uint8_t valid;
+} __attribute__((packed));
+
+/********************
+ * hwrm_pcie_qstats *
+ ********************/
+
+
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
/*
- * This field is the length of the response in bytes. The last
- * byte of the response is a valid flag that will read as '1'
- * when the command has been completely written to memory.
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
*/
- uint32_t opaque_0;
- /* debug info for this error response. */
- uint16_t opaque_1;
- /* debug info for this error response. */
- uint8_t cmd_err;
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
/*
- * In the case of an error response, command specific error code
- * is returned in this field.
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
*/
- uint8_t valid;
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The size of PCIe statistics block in bytes.
+ * Firmware will DMA the PCIe statistics to
+ * the host with this field size in the response.
+ */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[6];
+ /*
+ * This is the host address where
+ * PCIe statistics will be stored
+ */
+ uint64_t pcie_stat_host_addr;
+} __attribute__((packed));
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* The size of PCIe statistics block in bytes. */
+ uint16_t pcie_stat_size;
+ uint8_t unused_0[5];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been completely
- * written. When writing a command completion or response to an
- * internal processor, the order of writes has to be such that
- * this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
*/
+ uint8_t valid;
} __attribute__((packed));
-/* Port Tx Statistics Formats (408 bytes) */
+/* Port Tx Statistics Formats */
+/* tx_port_stats (size:3264b/408B) */
struct tx_port_stats {
- uint64_t tx_64b_frames;
/* Total Number of 64 Bytes frames transmitted */
- uint64_t tx_65b_127b_frames;
+ uint64_t tx_64b_frames;
/* Total Number of 65-127 Bytes frames transmitted */
- uint64_t tx_128b_255b_frames;
+ uint64_t tx_65b_127b_frames;
/* Total Number of 128-255 Bytes frames transmitted */
- uint64_t tx_256b_511b_frames;
+ uint64_t tx_128b_255b_frames;
/* Total Number of 256-511 Bytes frames transmitted */
- uint64_t tx_512b_1023b_frames;
+ uint64_t tx_256b_511b_frames;
/* Total Number of 512-1023 Bytes frames transmitted */
- uint64_t tx_1024b_1518_frames;
+ uint64_t tx_512b_1023b_frames;
/* Total Number of 1024-1518 Bytes frames transmitted */
- uint64_t tx_good_vlan_frames;
+ uint64_t tx_1024b_1518_frames;
/*
- * Total Number of each good VLAN (exludes FCS errors) frame
- * transmitted which is 1519 to 1522 bytes in length inclusive
- * (excluding framing bits but including FCS bytes).
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame transmitted which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
*/
- uint64_t tx_1519b_2047_frames;
+ uint64_t tx_good_vlan_frames;
/* Total Number of 1519-2047 Bytes frames transmitted */
- uint64_t tx_2048b_4095b_frames;
+ uint64_t tx_1519b_2047_frames;
/* Total Number of 2048-4095 Bytes frames transmitted */
- uint64_t tx_4096b_9216b_frames;
+ uint64_t tx_2048b_4095b_frames;
/* Total Number of 4096-9216 Bytes frames transmitted */
- uint64_t tx_9217b_16383b_frames;
+ uint64_t tx_4096b_9216b_frames;
/* Total Number of 9217-16383 Bytes frames transmitted */
- uint64_t tx_good_frames;
+ uint64_t tx_9217b_16383b_frames;
/* Total Number of good frames transmitted */
- uint64_t tx_total_frames;
+ uint64_t tx_good_frames;
/* Total Number of frames transmitted */
- uint64_t tx_ucast_frames;
+ uint64_t tx_total_frames;
/* Total number of unicast frames transmitted */
- uint64_t tx_mcast_frames;
+ uint64_t tx_ucast_frames;
/* Total number of multicast frames transmitted */
- uint64_t tx_bcast_frames;
+ uint64_t tx_mcast_frames;
/* Total number of broadcast frames transmitted */
- uint64_t tx_pause_frames;
+ uint64_t tx_bcast_frames;
/* Total number of PAUSE control frames transmitted */
- uint64_t tx_pfc_frames;
- /* Total number of PFC/per-priority PAUSE control frames transmitted */
- uint64_t tx_jabber_frames;
+ uint64_t tx_pause_frames;
+ /*
+ * Total number of PFC/per-priority PAUSE
+ * control frames transmitted
+ */
+ uint64_t tx_pfc_frames;
/* Total number of jabber frames transmitted */
- uint64_t tx_fcs_err_frames;
+ uint64_t tx_jabber_frames;
/* Total number of frames transmitted with FCS error */
- uint64_t tx_control_frames;
+ uint64_t tx_fcs_err_frames;
/* Total number of control frames transmitted */
- uint64_t tx_oversz_frames;
+ uint64_t tx_control_frames;
/* Total number of over-sized frames transmitted */
- uint64_t tx_single_dfrl_frames;
+ uint64_t tx_oversz_frames;
/* Total number of frames with single deferral */
- uint64_t tx_multi_dfrl_frames;
+ uint64_t tx_single_dfrl_frames;
/* Total number of frames with multiple deferrals */
- uint64_t tx_single_coll_frames;
+ uint64_t tx_multi_dfrl_frames;
/* Total number of frames with single collision */
- uint64_t tx_multi_coll_frames;
+ uint64_t tx_single_coll_frames;
/* Total number of frames with multiple collisions */
- uint64_t tx_late_coll_frames;
+ uint64_t tx_multi_coll_frames;
/* Total number of frames with late collisions */
- uint64_t tx_excessive_coll_frames;
+ uint64_t tx_late_coll_frames;
/* Total number of frames with excessive collisions */
- uint64_t tx_frag_frames;
+ uint64_t tx_excessive_coll_frames;
/* Total number of fragmented frames transmitted */
- uint64_t tx_err;
+ uint64_t tx_frag_frames;
/* Total number of transmit errors */
- uint64_t tx_tagged_frames;
+ uint64_t tx_err;
/* Total number of single VLAN tagged frames transmitted */
- uint64_t tx_dbl_tagged_frames;
+ uint64_t tx_tagged_frames;
/* Total number of double VLAN tagged frames transmitted */
- uint64_t tx_runt_frames;
+ uint64_t tx_dbl_tagged_frames;
/* Total number of runt frames transmitted */
- uint64_t tx_fifo_underruns;
+ uint64_t tx_runt_frames;
/* Total number of TX FIFO under runs */
- uint64_t tx_pfc_ena_frames_pri0;
+ uint64_t tx_fifo_underruns;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 0
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 0 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri1;
+ uint64_t tx_pfc_ena_frames_pri0;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 1
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 1 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri2;
+ uint64_t tx_pfc_ena_frames_pri1;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 2
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 2 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri3;
+ uint64_t tx_pfc_ena_frames_pri2;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 3
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 3 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri4;
+ uint64_t tx_pfc_ena_frames_pri3;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 4
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 4 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri5;
+ uint64_t tx_pfc_ena_frames_pri4;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 5
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 5 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri6;
+ uint64_t tx_pfc_ena_frames_pri5;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 6
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 6 transmitted
*/
- uint64_t tx_pfc_ena_frames_pri7;
+ uint64_t tx_pfc_ena_frames_pri6;
/*
- * Total number of PFC frames with PFC enabled bit for Pri 7
- * transmitted
+ * Total number of PFC frames with PFC enabled bit for
+ * Pri 7 transmitted
*/
- uint64_t tx_eee_lpi_events;
+ uint64_t tx_pfc_ena_frames_pri7;
/* Total number of EEE LPI Events on TX */
- uint64_t tx_eee_lpi_duration;
+ uint64_t tx_eee_lpi_events;
/* EEE LPI Duration Counter on TX */
- uint64_t tx_llfc_logical_msgs;
+ uint64_t tx_eee_lpi_duration;
/*
- * Total number of Link Level Flow Control (LLFC) messages
+ * Total number of Link Level Flow Control (LLFC) messages
* transmitted
*/
- uint64_t tx_hcfc_msgs;
+ uint64_t tx_llfc_logical_msgs;
/* Total number of HCFC messages transmitted */
- uint64_t tx_total_collisions;
+ uint64_t tx_hcfc_msgs;
/* Total number of TX collisions */
- uint64_t tx_bytes;
+ uint64_t tx_total_collisions;
/* Total number of transmitted bytes */
- uint64_t tx_xthol_frames;
+ uint64_t tx_bytes;
/* Total number of end-to-end HOL frames */
- uint64_t tx_stat_discard;
+ uint64_t tx_xthol_frames;
/* Total Tx Drops per Port reported by STATS block */
- uint64_t tx_stat_error;
+ uint64_t tx_stat_discard;
/* Total Tx Error Drops per Port reported by STATS block */
+ uint64_t tx_stat_error;
} __attribute__((packed));
-/* Port Rx Statistics Formats (528 bytes) */
+/* Port Rx Statistics Formats */
+/* rx_port_stats (size:4224b/528B) */
struct rx_port_stats {
- uint64_t rx_64b_frames;
/* Total Number of 64 Bytes frames received */
- uint64_t rx_65b_127b_frames;
+ uint64_t rx_64b_frames;
/* Total Number of 65-127 Bytes frames received */
- uint64_t rx_128b_255b_frames;
+ uint64_t rx_65b_127b_frames;
/* Total Number of 128-255 Bytes frames received */
- uint64_t rx_256b_511b_frames;
+ uint64_t rx_128b_255b_frames;
/* Total Number of 256-511 Bytes frames received */
- uint64_t rx_512b_1023b_frames;
+ uint64_t rx_256b_511b_frames;
/* Total Number of 512-1023 Bytes frames received */
- uint64_t rx_1024b_1518_frames;
+ uint64_t rx_512b_1023b_frames;
/* Total Number of 1024-1518 Bytes frames received */
- uint64_t rx_good_vlan_frames;
+ uint64_t rx_1024b_1518_frames;
/*
- * Total Number of each good VLAN (exludes FCS errors) frame
- * received which is 1519 to 1522 bytes in length inclusive
- * (excluding framing bits but including FCS bytes).
+ * Total Number of each good VLAN (exludes FCS errors)
+ * frame received which is 1519 to 1522 bytes in length
+ * inclusive (excluding framing bits but including FCS bytes).
*/
- uint64_t rx_1519b_2047b_frames;
+ uint64_t rx_good_vlan_frames;
/* Total Number of 1519-2047 Bytes frames received */
- uint64_t rx_2048b_4095b_frames;
+ uint64_t rx_1519b_2047b_frames;
/* Total Number of 2048-4095 Bytes frames received */
- uint64_t rx_4096b_9216b_frames;
+ uint64_t rx_2048b_4095b_frames;
/* Total Number of 4096-9216 Bytes frames received */
- uint64_t rx_9217b_16383b_frames;
+ uint64_t rx_4096b_9216b_frames;
/* Total Number of 9217-16383 Bytes frames received */
- uint64_t rx_total_frames;
+ uint64_t rx_9217b_16383b_frames;
/* Total number of frames received */
- uint64_t rx_ucast_frames;
+ uint64_t rx_total_frames;
/* Total number of unicast frames received */
- uint64_t rx_mcast_frames;
+ uint64_t rx_ucast_frames;
/* Total number of multicast frames received */
- uint64_t rx_bcast_frames;
+ uint64_t rx_mcast_frames;
/* Total number of broadcast frames received */
- uint64_t rx_fcs_err_frames;
+ uint64_t rx_bcast_frames;
/* Total number of received frames with FCS error */
- uint64_t rx_ctrl_frames;
+ uint64_t rx_fcs_err_frames;
/* Total number of control frames received */
- uint64_t rx_pause_frames;
+ uint64_t rx_ctrl_frames;
/* Total number of PAUSE frames received */
- uint64_t rx_pfc_frames;
+ uint64_t rx_pause_frames;
/* Total number of PFC frames received */
- uint64_t rx_unsupported_opcode_frames;
- /* Total number of frames received with an unsupported opcode */
- uint64_t rx_unsupported_da_pausepfc_frames;
+ uint64_t rx_pfc_frames;
/*
- * Total number of frames received with an unsupported DA for
- * pause and PFC
+ * Total number of frames received with an unsupported
+ * opcode
*/
- uint64_t rx_wrong_sa_frames;
+ uint64_t rx_unsupported_opcode_frames;
+ /*
+ * Total number of frames received with an unsupported
+ * DA for pause and PFC
+ */
+ uint64_t rx_unsupported_da_pausepfc_frames;
/* Total number of frames received with an unsupported SA */
- uint64_t rx_align_err_frames;
+ uint64_t rx_wrong_sa_frames;
/* Total number of received packets with alignment error */
- uint64_t rx_oor_len_frames;
+ uint64_t rx_align_err_frames;
/* Total number of received frames with out-of-range length */
- uint64_t rx_code_err_frames;
+ uint64_t rx_oor_len_frames;
/* Total number of received frames with error termination */
- uint64_t rx_false_carrier_frames;
+ uint64_t rx_code_err_frames;
/*
* Total number of received frames with a false carrier is
- * detected during idle, as defined by RX_ER samples active and
- * RXD is 0xE. The event is reported along with the statistics
- * generated on the next received frame. Only one false carrier
- * condition can be detected and logged between frames. Carrier
- * event, valid for 10M/100M speed modes only.
- */
- uint64_t rx_ovrsz_frames;
+ * detected during idle, as defined by RX_ER samples active
+ * and RXD is 0xE. The event is reported along with the
+ * statistics generated on the next received frame. Only
+ * one false carrier condition can be detected and logged
+ * between frames.
+ *
+ * Carrier event, valid for 10M/100M speed modes only.
+ */
+ uint64_t rx_false_carrier_frames;
/* Total number of over-sized frames received */
- uint64_t rx_jbr_frames;
+ uint64_t rx_ovrsz_frames;
/* Total number of jabber packets received */
- uint64_t rx_mtu_err_frames;
+ uint64_t rx_jbr_frames;
/* Total number of received frames with MTU error */
- uint64_t rx_match_crc_frames;
+ uint64_t rx_mtu_err_frames;
/* Total number of received frames with CRC match */
- uint64_t rx_promiscuous_frames;
+ uint64_t rx_match_crc_frames;
/* Total number of frames received promiscuously */
- uint64_t rx_tagged_frames;
- /* Total number of received frames with one or two VLAN tags */
- uint64_t rx_double_tagged_frames;
+ uint64_t rx_promiscuous_frames;
+ /*
+ * Total number of received frames with one or two VLAN
+ * tags
+ */
+ uint64_t rx_tagged_frames;
/* Total number of received frames with two VLAN tags */
- uint64_t rx_trunc_frames;
+ uint64_t rx_double_tagged_frames;
/* Total number of truncated frames received */
- uint64_t rx_good_frames;
- /* Total number of good frames (without errors) received */
- uint64_t rx_pfc_xon2xoff_frames_pri0;
+ uint64_t rx_trunc_frames;
+ /* Total number of good frames (without errors) received */
+ uint64_t rx_good_frames;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 0
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 0
*/
- uint64_t rx_pfc_xon2xoff_frames_pri1;
+ uint64_t rx_pfc_xon2xoff_frames_pri0;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 1
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 1
*/
- uint64_t rx_pfc_xon2xoff_frames_pri2;
+ uint64_t rx_pfc_xon2xoff_frames_pri1;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 2
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 2
*/
- uint64_t rx_pfc_xon2xoff_frames_pri3;
+ uint64_t rx_pfc_xon2xoff_frames_pri2;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 3
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 3
*/
- uint64_t rx_pfc_xon2xoff_frames_pri4;
+ uint64_t rx_pfc_xon2xoff_frames_pri3;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 4
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 4
*/
- uint64_t rx_pfc_xon2xoff_frames_pri5;
+ uint64_t rx_pfc_xon2xoff_frames_pri4;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 5
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 5
*/
- uint64_t rx_pfc_xon2xoff_frames_pri6;
+ uint64_t rx_pfc_xon2xoff_frames_pri5;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 6
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 6
*/
- uint64_t rx_pfc_xon2xoff_frames_pri7;
+ uint64_t rx_pfc_xon2xoff_frames_pri6;
/*
- * Total number of received PFC frames with transition from XON
- * to XOFF on Pri 7
+ * Total number of received PFC frames with transition from
+ * XON to XOFF on Pri 7
*/
- uint64_t rx_pfc_ena_frames_pri0;
+ uint64_t rx_pfc_xon2xoff_frames_pri7;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 0
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 0
*/
- uint64_t rx_pfc_ena_frames_pri1;
+ uint64_t rx_pfc_ena_frames_pri0;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 1
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 1
*/
- uint64_t rx_pfc_ena_frames_pri2;
+ uint64_t rx_pfc_ena_frames_pri1;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 2
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 2
*/
- uint64_t rx_pfc_ena_frames_pri3;
+ uint64_t rx_pfc_ena_frames_pri2;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 3
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 3
*/
- uint64_t rx_pfc_ena_frames_pri4;
+ uint64_t rx_pfc_ena_frames_pri3;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 4
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 4
*/
- uint64_t rx_pfc_ena_frames_pri5;
+ uint64_t rx_pfc_ena_frames_pri4;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 5
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 5
*/
- uint64_t rx_pfc_ena_frames_pri6;
+ uint64_t rx_pfc_ena_frames_pri5;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 6
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 6
*/
- uint64_t rx_pfc_ena_frames_pri7;
+ uint64_t rx_pfc_ena_frames_pri6;
/*
- * Total number of received PFC frames with PFC enabled bit for
- * Pri 7
+ * Total number of received PFC frames with PFC enabled
+ * bit for Pri 7
*/
- uint64_t rx_sch_crc_err_frames;
+ uint64_t rx_pfc_ena_frames_pri7;
/* Total Number of frames received with SCH CRC error */
- uint64_t rx_undrsz_frames;
+ uint64_t rx_sch_crc_err_frames;
/* Total Number of under-sized frames received */
- uint64_t rx_frag_frames;
+ uint64_t rx_undrsz_frames;
/* Total Number of fragmented frames received */
- uint64_t rx_eee_lpi_events;
+ uint64_t rx_frag_frames;
/* Total number of RX EEE LPI Events */
- uint64_t rx_eee_lpi_duration;
+ uint64_t rx_eee_lpi_events;
/* EEE LPI Duration Counter on RX */
- uint64_t rx_llfc_physical_msgs;
+ uint64_t rx_eee_lpi_duration;
/*
- * Total number of physical type Link Level Flow Control (LLFC)
- * messages received
+ * Total number of physical type Link Level Flow Control
+ * (LLFC) messages received
*/
- uint64_t rx_llfc_logical_msgs;
+ uint64_t rx_llfc_physical_msgs;
/*
- * Total number of logical type Link Level Flow Control (LLFC)
- * messages received
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received
*/
- uint64_t rx_llfc_msgs_with_crc_err;
+ uint64_t rx_llfc_logical_msgs;
/*
- * Total number of logical type Link Level Flow Control (LLFC)
- * messages received with CRC error
+ * Total number of logical type Link Level Flow Control
+ * (LLFC) messages received with CRC error
*/
- uint64_t rx_hcfc_msgs;
+ uint64_t rx_llfc_msgs_with_crc_err;
/* Total number of HCFC messages received */
- uint64_t rx_hcfc_msgs_with_crc_err;
+ uint64_t rx_hcfc_msgs;
/* Total number of HCFC messages received with CRC error */
- uint64_t rx_bytes;
+ uint64_t rx_hcfc_msgs_with_crc_err;
/* Total number of received bytes */
- uint64_t rx_runt_bytes;
+ uint64_t rx_bytes;
/* Total number of bytes received in runt frames */
- uint64_t rx_runt_frames;
+ uint64_t rx_runt_bytes;
/* Total number of runt frames received */
- uint64_t rx_stat_discard;
+ uint64_t rx_runt_frames;
/* Total Rx Discards per Port reported by STATS block */
- uint64_t rx_stat_err;
- /* Total Rx Error Drops per Port reported by STATS block */
+ uint64_t rx_stat_discard;
+ uint64_t rx_stat_err;
} __attribute__((packed));
-/* Periodic Statistics Context DMA to host (160 bytes) */
-/*
- * per-context HW statistics -- chip view
- */
+/* Port Rx Statistics extended Formats */
+/* rx_port_stats_ext (size:320b/40B) */
+struct rx_port_stats_ext {
+ /* Number of times link state changed to down */
+ uint64_t link_down_events;
+ /* Number of times the idle rings with pause bit are found */
+ uint64_t continuous_pause_events;
+ /* Number of times the active rings pause bit resumed back */
+ uint64_t resume_pause_events;
+ /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ uint64_t continuous_roce_pause_events;
+ /* Number of times, the ROCE cos queue PFC is enabled back */
+ uint64_t resume_roce_pause_events;
+} __attribute__((packed));
+
+/* PCIe Statistics Formats */
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ /* Number of physical layer receiver errors */
+ uint64_t pcie_pl_signal_integrity;
+ /* Number of DLLP CRC errors detected by Data Link Layer */
+ uint64_t pcie_dl_signal_integrity;
+ /*
+ * Number of TLP LCRC and sequence number errors detected
+ * by Data Link Layer
+ */
+ uint64_t pcie_tl_signal_integrity;
+ /* Number of times LTSSM entered Recovery state */
+ uint64_t pcie_link_integrity;
+ /* Number of TLP bytes that have been trasmitted */
+ uint64_t pcie_tx_traffic_rate;
+ /* Number of TLP bytes that have been received */
+ uint64_t pcie_rx_traffic_rate;
+ /* Number of DLLP bytes that have been trasmitted */
+ uint64_t pcie_tx_dllp_statistics;
+ /* Number of DLLP bytes that have been received */
+ uint64_t pcie_rx_dllp_statistics;
+ /*
+ * Number of times spent in each phase of gen3
+ * equalization
+ */
+ uint64_t pcie_equalization_time;
+ /* Records the last 16 transitions of the LTSSM */
+ uint32_t pcie_ltssm_histogram[4];
+ /*
+ * Record the last 8 reasons on why LTSSM transitioned
+ * to Recovery
+ */
+ uint64_t pcie_recovery_histogram;
+} __attribute__((packed));
+
+/**********************
+ * hwrm_exec_fwd_resp *
+ **********************/
+
+
+/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
+struct hwrm_exec_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be executed by the HWRM and the response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_exec_fwd_resp_output (size:128b/16B) */
+struct hwrm_exec_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/************************
+ * hwrm_reject_fwd_resp *
+ ************************/
+
+
+/* hwrm_reject_fwd_resp_input (size:1024b/128B) */
+struct hwrm_reject_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is an encapsulated request. This request should
+ * be rejected by the HWRM and the error response should be
+ * provided in the response buffer inside the encapsulated
+ * request.
+ */
+ uint32_t encap_request[26];
+ /*
+ * This value indicates the target id of the response to
+ * the encapsulated request.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_reject_fwd_resp_output (size:128b/16B) */
+struct hwrm_reject_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_fwd_resp *
+ *****************/
+
+
+/* hwrm_fwd_resp_input (size:1024b/128B) */
+struct hwrm_fwd_resp_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * response.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - HWRM
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the completion ring the encapsulated
+ * response will be optionally completed on. If the value is
+ * -1, then no CR completion shall be generated for the
+ * encapsulated response. Any other value must be a
+ * valid CR ring_id value. If a valid encap_resp_cmpl_ring
+ * is provided, then a CR completion shall be generated for
+ * the encapsulated response.
+ */
+ uint16_t encap_resp_cmpl_ring;
+ /* This field indicates the length of encapsulated response. */
+ uint16_t encap_resp_len;
+ uint8_t unused_0;
+ uint8_t unused_1;
+ /*
+ * This is the host address where the encapsulated response
+ * will be written.
+ * This area must be 16B aligned and must be cleared to zero
+ * before the original request is made.
+ */
+ uint64_t encap_resp_addr;
+ /* This is an encapsulated response. */
+ uint32_t encap_resp[24];
+} __attribute__((packed));
+
+/* hwrm_fwd_resp_output (size:128b/16B) */
+struct hwrm_fwd_resp_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_fwd_async_event_cmpl *
+ *****************************/
+
+
+/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */
+struct hwrm_fwd_async_event_cmpl_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This value indicates the target id of the encapsulated
+ * asynchronous event.
+ * 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors
+ * 0xFFFF - Broadcast to all children VFs (only applicable when
+ * a PF is the requester)
+ */
+ uint16_t encap_async_event_target_id;
+ uint8_t unused_0[6];
+ /* This is an encapsulated asynchronous event completion. */
+ uint32_t encap_async_event_cmpl[4];
+} __attribute__((packed));
+
+/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */
+struct hwrm_fwd_async_event_cmpl_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_raw_write_blk *
+ **************************/
+
+
+/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
+struct hwrm_nvm_raw_write_blk_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is the loation of the source data to be written.
+ */
+ uint64_t host_src_addr;
+ /*
+ * 32-bit Destination Address.
+ * This is the NVRAM byte-offset where the source data will be written to.
+ */
+ uint32_t dest_addr;
+ /* Length of data to be written, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
+struct hwrm_nvm_raw_write_blk_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*****************
+ * hwrm_nvm_read *
+ *****************/
+
+
+/* hwrm_nvm_read_input (size:320b/40B) */
+struct hwrm_nvm_read_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* The 0-based index of the directory entry. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* The NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* The length of the data to be read, in bytes. */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_read_output (size:128b/16B) */
+struct hwrm_nvm_read_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*********************
+ * hwrm_nvm_raw_dump *
+ *********************/
+
+
+/* hwrm_nvm_raw_dump_input (size:256b/32B) */
+struct hwrm_nvm_raw_dump_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the data will be written to.
+ */
+ uint64_t host_dest_addr;
+ /* 32-bit NVRAM byte-offset to read from. */
+ uint32_t offset;
+ /* Total length of NVRAM contents to be read, in bytes. */
+ uint32_t len;
+} __attribute__((packed));
+
+/* hwrm_nvm_raw_dump_output (size:128b/16B) */
+struct hwrm_nvm_raw_dump_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_get_dir_entries *
+ ****************************/
+
+
+/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */
+struct hwrm_nvm_get_dir_entries_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Destination Address.
+ * This is the host address where the directory will be written.
+ */
+ uint64_t host_dest_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */
+struct hwrm_nvm_get_dir_entries_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dir_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dir_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dir_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dir_info_output (size:192b/24B) */
+struct hwrm_nvm_get_dir_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Number of directory entries in the directory. */
+ uint32_t entries;
+ /* Size of each directory entry, in bytes. */
+ uint32_t entry_length;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_write *
+ ******************/
+
+
+/* hwrm_nvm_write_input (size:384b/48B) */
+struct hwrm_nvm_write_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the source data is.
+ */
+ uint64_t host_src_addr;
+ /* The Directory Entry Type (valid values are defined in the bnxnvm_directory_type enum defined in the file bnxnvm_defs.h). */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The 0-based instance of the combined Directory Entry Type and Extension.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* in the file bnxnvm_defs.h). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the file bnxnvm_defs.h). */
+ uint16_t dir_attr;
+ /*
+ * Length of data to write, in bytes. May be less than or equal to the allocated size for the directory entry.
+ * The data length stored in the directory entry will be updated to reflect this value once the write is complete.
+ */
+ uint32_t dir_data_length;
+ /* Option. */
+ uint16_t option;
+ uint16_t flags;
+ /*
+ * When this bit is '1', the original active image
+ * will not be removed. TBD: what purpose is this?
+ */
+ #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG \
+ UINT32_C(0x1)
+ /*
+ * The requested length of the allocated NVM for the item, in bytes. This value may be greater than or equal to the specified data length (dir_data_length).
+ * If this value is less than the specified data length, it will be ignored.
+ * The response will contain the actual allocated item length, which may be greater than the requested item length.
+ * The purpose for allocating more than the required number of bytes for an item's data is to pre-allocate extra storage (padding) to accomodate
+ * the potential future growth of an item (e.g. upgraded firmware with a size increase, log growth, expanded configuration data).
+ */
+ uint32_t dir_item_length;
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_output (size:128b/16B) */
+struct hwrm_nvm_write_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Length of the allocated NVM for the item, in bytes. The value may be greater than or equal to the specified data length or the requested item length.
+ * The actual item length used when creating a new directory entry will be a multiple of an NVM block size.
+ */
+ uint32_t dir_item_length;
+ /* The directory index of the created or modified item. */
+ uint16_t dir_idx;
+ uint8_t unused_0;
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_write_cmd_err (size:64b/8B) */
+struct hwrm_nvm_write_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_WRITE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_WRITE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*******************
+ * hwrm_nvm_modify *
+ *******************/
+
+
+/* hwrm_nvm_modify_input (size:320b/40B) */
+struct hwrm_nvm_modify_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host Source Address.
+ * This is where the modified data is.
+ */
+ uint64_t host_src_addr;
+ /* 16-bit directory entry index. */
+ uint16_t dir_idx;
+ uint8_t unused_0[2];
+ /* 32-bit NVRAM byte-offset to modify content from. */
+ uint32_t offset;
+ /*
+ * Length of data to be modified, in bytes. The length shall
+ * be non-zero.
+ */
+ uint32_t len;
+ uint8_t unused_1[4];
+} __attribute__((packed));
+
+/* hwrm_nvm_modify_output (size:128b/16B) */
+struct hwrm_nvm_modify_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_find_dir_entry *
+ ***************************/
+
+
+/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dir_idx_valid field to be
+ * configured.
+ */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_ENABLES_DIR_IDX_VALID \
+ UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /* Directory Entry (Image) Type */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of this Directory Type
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags. */
+ uint16_t dir_ext;
+ /* This value indicates the search option using dir_ordinal. */
+ uint8_t opt_ordinal;
+ /* This value indicates the search option using dir_ordinal. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_MASK UINT32_C(0x3)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_SFT 0
+ /* Equal to specified ordinal value. */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_EQ UINT32_C(0x0)
+ /* Greater than or equal to specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GE UINT32_C(0x1)
+ /* Greater than specified ordinal value */
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT UINT32_C(0x2)
+ #define HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_LAST \
+ HWRM_NVM_FIND_DIR_ENTRY_INPUT_OPT_ORDINAL_GT
+ uint8_t unused_0[3];
+} __attribute__((packed));
+
+/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */
+struct hwrm_nvm_find_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Allocated NVRAM for this directory entry, in bytes. */
+ uint32_t dir_item_length;
+ /* Size of the stored data for this directory entry, in bytes. */
+ uint32_t dir_data_length;
+ /*
+ * Firmware version.
+ * Only valid if the directory entry is for embedded firmware stored in APE_BIN Format.
+ */
+ uint32_t fw_ver;
+ /* Directory ordinal. */
+ uint16_t dir_ordinal;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_erase_dir_entry *
+ ****************************/
+
+
+/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */
+struct hwrm_nvm_erase_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ uint8_t unused_0[6];
+} __attribute__((packed));
+
+/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_erase_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_dev_info *
+ *************************/
+
+
+/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+struct hwrm_nvm_get_dev_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+struct hwrm_nvm_get_dev_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Manufacturer ID. */
+ uint16_t manufacturer_id;
+ /* Device ID. */
+ uint16_t device_id;
+ /* Sector size of the NVRAM device. */
+ uint32_t sector_size;
+ /* Total size, in bytes of the NVRAM device. */
+ uint32_t nvram_size;
+ uint32_t reserved_size;
+ /* Available size that can be used, in bytes. Available size is the NVRAM size take away the used size and reserved size. */
+ uint32_t available_size;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_mod_dir_entry *
+ **************************/
+
+
+/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */
+struct hwrm_nvm_mod_dir_entry_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the checksum field to be
+ * configured.
+ */
+ #define HWRM_NVM_MOD_DIR_ENTRY_INPUT_ENABLES_CHECKSUM UINT32_C(0x1)
+ /* Directory Entry Index */
+ uint16_t dir_idx;
+ /*
+ * Directory ordinal.
+ * The (0-based) instance of this Directory Type.
+ */
+ uint16_t dir_ordinal;
+ /* The Directory Entry Extension flags (see BNX_DIR_EXT_* for extension flag definitions). */
+ uint16_t dir_ext;
+ /* Directory Entry Attribute flags (see BNX_DIR_ATTR_* for attribute flag definitions). */
+ uint16_t dir_attr;
+ /*
+ * If valid, then this field updates the checksum
+ * value of the content in the directory entry.
+ */
+ uint32_t checksum;
+} __attribute__((packed));
+
+/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */
+struct hwrm_nvm_mod_dir_entry_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/**************************
+ * hwrm_nvm_verify_update *
+ **************************/
+
+
+/* hwrm_nvm_verify_update_input (size:192b/24B) */
+struct hwrm_nvm_verify_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Directory Entry Type, to be verified. */
+ uint16_t dir_type;
+ /*
+ * Directory ordinal.
+ * The instance of the Directory Type to be verified.
+ */
+ uint16_t dir_ordinal;
+ /*
+ * The Directory Entry Extension flags.
+ * The "UPDATE" extension flag must be set in this value.
+ * A corresponding directory entry with the same type and ordinal values but *without*
+ * the "UPDATE" extension flag must also exist. The other flags of the extension must
+ * be identical between the active and update entries.
+ */
+ uint16_t dir_ext;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_verify_update_output (size:128b/16B) */
+struct hwrm_nvm_verify_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/***************************
+ * hwrm_nvm_install_update *
+ ***************************/
+
+
+/* hwrm_nvm_install_update_input (size:192b/24B) */
+struct hwrm_nvm_install_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Installation type. If the value 3 through 0xffff is used,
+ * only packaged items with that type value will be installed and
+ * conditional installation directives for those packaged items
+ * will be over-ridden (i.e. 'create' or 'replace' will be treated
+ * as 'install').
+ */
+ uint32_t install_type;
+ /*
+ * Perform a normal package installation. Conditional installation
+ * directives (e.g. 'create' and 'replace') of packaged items
+ * will be followed.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_NORMAL UINT32_C(0x0)
+ /*
+ * Install all packaged items regardless of installation directive
+ * (i.e. treat all packaged items as though they have an installation
+ * directive of 'install').
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL \
+ UINT32_C(0xffffffff)
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_INPUT_INSTALL_TYPE_ALL
+ uint16_t flags;
+ /* If set to 1, then securely erase all unused locations in persistent storage. */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ERASE_UNUSED_SPACE \
+ UINT32_C(0x1)
+ /*
+ * If set to 1, then unspecifed images, images not in the package file, will be safely deleted.
+ * When combined with erase_unused_space then unspecified images will be securely erased.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_REMOVE_UNUSED_PKG \
+ UINT32_C(0x2)
+ /*
+ * If set to 1, FW will defragment the NVM if defragmentation is required for the update.
+ * Allow additional time for this command to complete if this bit is set to 1.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_INPUT_FLAGS_ALLOWED_TO_DEFRAG \
+ UINT32_C(0x4)
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_output (size:192b/24B) */
+struct hwrm_nvm_install_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Bit-mask of successfully installed items.
+ * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc.
+ * A value of 0 indicates that no items were successfully installed.
+ */
+ uint64_t installed_items;
+ /* result is 8 b */
+ uint8_t result;
+ /* There was no problem with the package installation. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS UINT32_C(0x0)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESULT_SUCCESS
+ /* problem_item is 8 b */
+ uint8_t problem_item;
+ /* There was no problem with any packaged items. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_NONE \
+ UINT32_C(0x0)
+ /* There was a problem with the NVM package itself. */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE \
+ UINT32_C(0xff)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_PROBLEM_ITEM_PACKAGE
+ /* reset_required is 8 b */
+ uint8_t reset_required;
+ /*
+ * No reset is required for installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_NONE \
+ UINT32_C(0x0)
+ /*
+ * A PCIe reset (e.g. system reboot) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_PCI \
+ UINT32_C(0x1)
+ /*
+ * A controller power reset (e.g. system power-cycle) is
+ * required for newly installed/updated firmware or
+ * microcode to take effect. Some newly installed/updated
+ * firmware or microcode may still take effect upon the
+ * next PCIe reset.
+ */
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER \
+ UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_LAST \
+ HWRM_NVM_INSTALL_UPDATE_OUTPUT_RESET_REQUIRED_POWER
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
+struct hwrm_nvm_install_update_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Unable to complete operation due to fragmentation */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR UINT32_C(0x1)
+ /* nvm is completely full. */
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE UINT32_C(0x2)
+ #define HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/******************
+ * hwrm_nvm_flush *
+ ******************/
+
+
+/* hwrm_nvm_flush_input (size:128b/16B) */
+struct hwrm_nvm_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_output (size:128b/16B) */
+struct hwrm_nvm_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_flush_cmd_err (size:64b/8B) */
+struct hwrm_nvm_flush_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* flush could not be performed */
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL UINT32_C(0x1)
+ #define HWRM_NVM_FLUSH_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FLUSH_CMD_ERR_CODE_FAIL
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_get_variable *
+ *************************/
+
+
+/* hwrm_nvm_get_variable_input (size:320b/40B) */
+struct hwrm_nvm_get_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be stored
+ */
+ uint64_t dest_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /*
+ * When this bit is set to 1, the factory default value will be returned,
+ * 0 returns the operational value.
+ */
+ #define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT \
+ UINT32_C(0x1)
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_output (size:128b/16B) */
+struct hwrm_nvm_get_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* size of data of the actual variable retrieved in bits */
+ uint16_t data_len;
+ /*
+ * option_num is the option number for the data retrieved. It is possible in the
+ * future that the option number returned would be different than requested. This
+ * condition could occur if an option is deprecated and a new option id is defined
+ * with similar characteristics, but has a slightly different definition. This
+ * also makes it convenient for the caller to identify the variable result with
+ * the option id from the response.
+ */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_LAST \
+ HWRM_NVM_GET_VARIABLE_OUTPUT_OPTION_NUM_RSVD_FFFF
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_get_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ /* length specified is too small */
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT UINT32_C(0x3)
+ #define HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*************************
+ * hwrm_nvm_set_variable *
+ *************************/
+
+
+/* hwrm_nvm_set_variable_input (size:320b/40B) */
+struct hwrm_nvm_set_variable_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_0 UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t flags;
+ /* When this bit is 1, flush internal cache after this write operation (see hwrm_nvm_flush command.) */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_FORCE_FLUSH \
+ UINT32_C(0x1)
+ /* encryption method */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_MASK \
+ UINT32_C(0xe)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_SFT 1
+ /* No encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_NONE \
+ (UINT32_C(0x0) << 1)
+ /* one-way encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1 \
+ (UINT32_C(0x1) << 1)
+ /* symmetric AES256 encryption. */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_AES256 \
+ (UINT32_C(0x2) << 1)
+ /* SHA1 digest appended to plaintext contents, for authentication */
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH \
+ (UINT32_C(0x3) << 1)
+ #define HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_LAST \
+ HWRM_NVM_SET_VARIABLE_INPUT_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH
+ uint8_t unused_0;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_output (size:128b/16B) */
+struct hwrm_nvm_set_variable_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */
+struct hwrm_nvm_set_variable_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* variable does not exist */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST UINT32_C(0x1)
+ /* configuration is corrupted and the variable cannot be saved */
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR UINT32_C(0x2)
+ #define HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_LAST \
+ HWRM_NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/****************************
+ * hwrm_nvm_validate_option *
+ ****************************/
+
+
+/* hwrm_nvm_validate_option_input (size:320b/40B) */
+struct hwrm_nvm_validate_option_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * nvm variable will be copied from
+ */
+ uint64_t src_data_addr;
+ /* size of data in bits */
+ uint16_t data_len;
+ /* nvm cfg option number */
+ uint16_t option_num;
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_0 \
+ UINT32_C(0x0)
+ /* reserved. */
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF \
+ UINT32_C(0xffff)
+ #define HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_LAST \
+ HWRM_NVM_VALIDATE_OPTION_INPUT_OPTION_NUM_RSVD_FFFF
+ /*
+ * Number of dimensions for this nvm configuration variable.
+ * This value indicates how many of the indexN values to use.
+ * A value of 0 means that none of the indexN values are valid.
+ * A value of 1 requires at index0 is valued, a value of 2
+ * requires that index0 and index1 are valid, and so forth
+ */
+ uint16_t dimensions;
+ /* index for the 1st dimensions */
+ uint16_t index_0;
+ /* index for the 2nd dimensions */
+ uint16_t index_1;
+ /* index for the 3rd dimensions */
+ uint16_t index_2;
+ /* index for the 4th dimensions */
+ uint16_t index_3;
+ uint8_t unused_0[2];
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_output (size:128b/16B) */
+struct hwrm_nvm_validate_option_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* indicates that the value provided for the option is not matching with the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_NOT_MATCH UINT32_C(0x0)
+ /* indicates that the value provided for the option is matching the saved data. */
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH UINT32_C(0x1)
+ #define HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_LAST \
+ HWRM_NVM_VALIDATE_OPTION_OUTPUT_RESULT_MATCH
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */
+struct hwrm_nvm_validate_option_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ #define HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST \
+ HWRM_NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/*****************************
+ * hwrm_nvm_factory_defaults *
+ *****************************/
+
+
+/* hwrm_nvm_factory_defaults_input (size:192b/24B) */
+struct hwrm_nvm_factory_defaults_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFE - Reserved for internal processors
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* mode is 8 b */
+ uint8_t mode;
+ /* If set to 1, it will trigger restoration of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_RESTORE UINT32_C(0x0)
+ /* If set to 1, it will trigger creation of factory default settings */
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE UINT32_C(0x1)
+ #define HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_INPUT_MODE_CREATE
+ uint8_t unused_0[7];
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_output (size:128b/16B) */
+struct hwrm_nvm_factory_defaults_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t result;
+ /* factory defaults created successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_OK \
+ UINT32_C(0x0)
+ /* factory defaults restored successfully. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_RESTORE_OK \
+ UINT32_C(0x1)
+ /* factory defaults already created. */
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_OUTPUT_RESULT_CREATE_ALREADY
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} __attribute__((packed));
+
+/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */
+struct hwrm_nvm_factory_defaults_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN \
+ UINT32_C(0x0)
+ /* valid configuration not present to create defaults */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG \
+ UINT32_C(0x1)
+ /* No saved configuration present to restore, restore failed */
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG \
+ UINT32_C(0x2)
+ #define HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST \
+ HWRM_NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG
+ uint8_t unused_0[7];
+} __attribute__((packed));
-struct ctx_hw_stats64 {
- uint64_t rx_ucast_pkts;
- uint64_t rx_mcast_pkts;
- uint64_t rx_bcast_pkts;
- uint64_t rx_drop_pkts;
- uint64_t rx_discard_pkts;
- uint64_t rx_ucast_bytes;
- uint64_t rx_mcast_bytes;
- uint64_t rx_bcast_bytes;
-
- uint64_t tx_ucast_pkts;
- uint64_t tx_mcast_pkts;
- uint64_t tx_bcast_pkts;
- uint64_t tx_drop_pkts;
- uint64_t tx_discard_pkts;
- uint64_t tx_ucast_bytes;
- uint64_t tx_mcast_bytes;
- uint64_t tx_bcast_bytes;
-
- uint64_t tpa_pkts;
- uint64_t tpa_bytes;
- uint64_t tpa_events;
- uint64_t tpa_aborts;
-} __attribute__((packed));
-
-#endif /* _HSI_STRUCT_DEF_DPDK_ */
+#endif /* _HSI_STRUCT_DEF_DPDK_H_ */
diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build
new file mode 100644
index 00000000..e130f271
--- /dev/null
+++ b/drivers/net/bnxt/meson.build
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+install_headers('rte_pmd_bnxt.h')
+version = 2
+sources = files('bnxt_cpr.c',
+ 'bnxt_ethdev.c',
+ 'bnxt_filter.c',
+ 'bnxt_flow.c',
+ 'bnxt_hwrm.c',
+ 'bnxt_irq.c',
+ 'bnxt_ring.c',
+ 'bnxt_rxq.c',
+ 'bnxt_rxr.c',
+ 'bnxt_stats.c',
+ 'bnxt_txq.c',
+ 'bnxt_txr.c',
+ 'bnxt_util.c',
+ 'bnxt_vnic.c',
+ 'rte_pmd_bnxt.c')
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index cae95f8f..c298de83 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
*/
#include <inttypes.h>
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
index cd7227ac..68fbe34d 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.h
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Broadcom Limited.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Broadcom Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Broadcom
+ * All rights reserved.
*/
#ifndef _PMD_BNXT_H_
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 4a6633ed..acad16a1 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -27,6 +27,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_args.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_8023ad.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_flow.c
#
# Export include files
diff --git a/drivers/net/bonding/meson.build b/drivers/net/bonding/meson.build
index b90abc6d..602d2880 100644
--- a/drivers/net/bonding/meson.build
+++ b/drivers/net/bonding/meson.build
@@ -2,7 +2,8 @@
# Copyright(c) 2017 Intel Corporation
name = 'bond' #, james bond :-)
-sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c',
+version = 2
+sources = files('rte_eth_bond_api.c', 'rte_eth_bond_pmd.c', 'rte_eth_bond_flow.c',
'rte_eth_bond_args.c', 'rte_eth_bond_8023ad.c', 'rte_eth_bond_alb.c')
deps += 'sched' # needed for rte_bitmap.h
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index c452318f..f8cea4b6 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -16,9 +16,12 @@
static void bond_mode_8023ad_ext_periodic_cb(void *arg);
#ifdef RTE_LIBRTE_BOND_DEBUG_8023AD
-#define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \
- bond_dbg_get_time_diff_ms(), slave_id, \
- __func__, ##__VA_ARGS__)
+
+#define MODE4_DEBUG(fmt, ...) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%6u [Port %u: %s] " fmt, \
+ bond_dbg_get_time_diff_ms(), slave_id, \
+ __func__, ##__VA_ARGS__)
static uint64_t start_time;
@@ -77,44 +80,46 @@ bond_print_lacp(struct lacpdu *l)
if (p_len && p_state[p_len-1] == ' ')
p_state[p_len-1] = '\0';
- RTE_LOG(DEBUG, PMD, "LACP: {\n"\
- " subtype= %02X\n"\
- " ver_num=%02X\n"\
- " actor={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " partner={ tlv=%02X, len=%02X\n"\
- " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"\
- " state={ %s }\n"\
- " }\n"\
- " collector={info=%02X, length=%02X, max_delay=%04X\n, " \
- "type_term=%02X, terminator_length = %02X}\n",\
- l->subtype,\
- l->version_number,\
- l->actor.tlv_type_info,\
- l->actor.info_length,\
- l->actor.port_params.system_priority,\
- a_address,\
- l->actor.port_params.key,\
- l->actor.port_params.port_priority,\
- l->actor.port_params.port_number,\
- a_state,\
- l->partner.tlv_type_info,\
- l->partner.info_length,\
- l->partner.port_params.system_priority,\
- p_address,\
- l->partner.port_params.key,\
- l->partner.port_params.port_priority,\
- l->partner.port_params.port_number,\
- p_state,\
- l->tlv_type_collector_info,\
- l->collector_info_length,\
- l->collector_max_delay,\
- l->tlv_type_terminator,\
- l->terminator_length);
+ RTE_BOND_LOG(DEBUG,
+ "LACP: {\n"
+ " subtype= %02X\n"
+ " ver_num=%02X\n"
+ " actor={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " partner={ tlv=%02X, len=%02X\n"
+ " pri=%04X, system=%s, key=%04X, p_pri=%04X p_num=%04X\n"
+ " state={ %s }\n"
+ " }\n"
+ " collector={info=%02X, length=%02X, max_delay=%04X\n, "
+ "type_term=%02X, terminator_length = %02X }",
+ l->subtype,
+ l->version_number,
+ l->actor.tlv_type_info,
+ l->actor.info_length,
+ l->actor.port_params.system_priority,
+ a_address,
+ l->actor.port_params.key,
+ l->actor.port_params.port_priority,
+ l->actor.port_params.port_number,
+ a_state,
+ l->partner.tlv_type_info,
+ l->partner.info_length,
+ l->partner.port_params.system_priority,
+ p_address,
+ l->partner.port_params.key,
+ l->partner.port_params.port_priority,
+ l->partner.port_params.port_number,
+ p_state,
+ l->tlv_type_collector_info,
+ l->collector_info_length,
+ l->collector_max_delay,
+ l->tlv_type_terminator,
+ l->terminator_length);
}
+
#define BOND_PRINT_LACP(lacpdu) bond_print_lacp(lacpdu)
#else
#define BOND_PRINT_LACP(lacpdu) do { } while (0)
@@ -200,31 +205,34 @@ show_warnings(uint16_t slave_id)
rte_get_tsc_hz() / 1000);
if (warnings & WRN_RX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into RX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into RX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will notwork correctly",
+ slave_id);
}
if (warnings & WRN_TX_QUEUE_FULL) {
- RTE_LOG(DEBUG, PMD,
- "Slave %u: failed to enqueue LACP packet into TX ring.\n"
- "Receive and transmit functions must be invoked on bonded\n"
- "interface at least 10 times per second or LACP will not\n"
- "work correctly\n", slave_id);
+ RTE_BOND_LOG(DEBUG,
+ "Slave %u: failed to enqueue LACP packet into TX ring.\n"
+ "Receive and transmit functions must be invoked on bonded"
+ "interface at least 10 times per second or LACP will not work correctly",
+ slave_id);
}
if (warnings & WRN_RX_MARKER_TO_FAST)
- RTE_LOG(INFO, PMD, "Slave %u: marker to early - ignoring.\n", slave_id);
+ RTE_BOND_LOG(INFO, "Slave %u: marker to early - ignoring.",
+ slave_id);
if (warnings & WRN_UNKNOWN_SLOW_TYPE) {
- RTE_LOG(INFO, PMD,
- "Slave %u: ignoring unknown slow protocol frame type", slave_id);
+ RTE_BOND_LOG(INFO,
+ "Slave %u: ignoring unknown slow protocol frame type",
+ slave_id);
}
if (warnings & WRN_UNKNOWN_MARKER_TYPE)
- RTE_LOG(INFO, PMD, "Slave %u: ignoring unknown marker type", slave_id);
+ RTE_BOND_LOG(INFO, "Slave %u: ignoring unknown marker type",
+ slave_id);
if (warnings & WRN_NOT_LACP_CAPABLE)
MODE4_DEBUG("Port %u is not LACP capable!\n", slave_id);
@@ -507,8 +515,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
ACTOR_STATE_SET(port, DISTRIBUTING);
SM_FLAG_SET(port, NTT);
MODE4_DEBUG("COLLECTING -> DISTRIBUTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing started.\n",
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing started.",
internals->port_id, slave_id);
}
} else {
@@ -518,8 +526,8 @@ mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
ACTOR_STATE_CLR(port, DISTRIBUTING);
SM_FLAG_SET(port, NTT);
MODE4_DEBUG("DISTRIBUTING -> COLLECTING\n");
- RTE_LOG(INFO, PMD,
- "Bond %u: slave id %u distributing stopped.\n",
+ RTE_BOND_LOG(INFO,
+ "Bond %u: slave id %u distributing stopped.",
internals->port_id, slave_id);
}
}
@@ -557,7 +565,7 @@ tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
lacp_pkt = rte_pktmbuf_alloc(port->mbuf_pool);
if (lacp_pkt == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate LACP packet from pool\n");
+ RTE_BOND_LOG(ERR, "Failed to allocate LACP packet from pool");
return;
}
@@ -1337,7 +1345,7 @@ bond_8023ad_setup_validate(uint16_t port_id,
conf->tx_period_ms == 0 ||
conf->rx_marker_period_ms == 0 ||
conf->update_timeout_ms == 0) {
- RTE_LOG(ERR, PMD, "given mode 4 configuration is invalid\n");
+ RTE_BOND_LOG(ERR, "given mode 4 configuration is invalid");
return -EINVAL;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c
index 3f9945b3..c3891c7e 100644
--- a/drivers/net/bonding/rte_eth_bond_alb.c
+++ b/drivers/net/bonding/rte_eth_bond_alb.c
@@ -60,8 +60,8 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev)
0, data_size, socket_id);
if (internals->mode6.mempool == NULL) {
- RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n",
- bond_dev->device->name);
+ RTE_BOND_LOG(ERR, "%s: Failed to initialize ALB mempool.\n",
+ bond_dev->device->name);
goto mempool_alloc_error;
}
}
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index f854b737..8bc04cfd 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -194,7 +194,8 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
uint16_t first;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
- if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0)
+ if ((bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER) == 0)
return 0;
internals = bonded_eth_dev->data->dev_private;
@@ -211,9 +212,12 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
for (i = 0, mask = 1;
i < RTE_BITMAP_SLAB_BIT_SIZE;
i ++, mask <<= 1) {
- if (unlikely(slab & mask))
+ if (unlikely(slab & mask)) {
+ uint16_t vlan_id = pos + i;
+
res = rte_eth_dev_vlan_filter(slave_port_id,
- (uint16_t)pos, 1);
+ vlan_id, 1);
+ }
}
found = rte_bitmap_scan(internals->vlan_filter_bmp,
&pos, &slab);
@@ -223,6 +227,49 @@ slave_vlan_filter_set(uint16_t bonded_port_id, uint16_t slave_port_id)
}
static int
+slave_rte_flow_prepare(uint16_t slave_id, struct bond_dev_private *internals)
+{
+ struct rte_flow *flow;
+ struct rte_flow_error ferror;
+ uint16_t slave_port_id = internals->slaves[slave_id].port_id;
+
+ if (internals->flow_isolated_valid != 0) {
+ rte_eth_dev_stop(slave_port_id);
+ if (rte_flow_isolate(slave_port_id, internals->flow_isolated,
+ &ferror)) {
+ RTE_BOND_LOG(ERR, "rte_flow_isolate failed for slave"
+ " %d: %s", slave_id, ferror.message ?
+ ferror.message : "(no stated reason)");
+ return -1;
+ }
+ }
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ flow->flows[slave_id] = rte_flow_create(slave_port_id,
+ &flow->fd->attr,
+ flow->fd->items,
+ flow->fd->actions,
+ &ferror);
+ if (flow->flows[slave_id] == NULL) {
+ RTE_BOND_LOG(ERR, "Cannot create flow for slave"
+ " %d: %s", slave_id,
+ ferror.message ? ferror.message :
+ "(no stated reason)");
+ /* Destroy successful bond flows from the slave */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_id] != NULL) {
+ rte_flow_destroy(slave_port_id,
+ flow->flows[slave_id],
+ &ferror);
+ flow->flows[slave_id] = NULL;
+ }
+ }
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int
__eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
@@ -284,6 +331,8 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
/* Take the first dev's offload capabilities */
internals->rx_offload_capa = dev_info.rx_offload_capa;
internals->tx_offload_capa = dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa = dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa = dev_info.tx_queue_offload_capa;
internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
/* Inherit first slave's max rx packet size */
@@ -292,16 +341,10 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
} else {
internals->rx_offload_capa &= dev_info.rx_offload_capa;
internals->tx_offload_capa &= dev_info.tx_offload_capa;
+ internals->rx_queue_offload_capa &= dev_info.rx_queue_offload_capa;
+ internals->tx_queue_offload_capa &= dev_info.tx_queue_offload_capa;
internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
- if (link_properties_valid(bonded_eth_dev,
- &slave_eth_dev->data->dev_link) != 0) {
- RTE_BOND_LOG(ERR, "Invalid link properties for slave %d"
- " in bonding mode %d", slave_port_id,
- internals->mode);
- return -1;
- }
-
/* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
* the power of 2, the lower one is GCD
*/
@@ -316,6 +359,19 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
internals->flow_type_rss_offloads;
+ if (slave_rte_flow_prepare(internals->slave_count, internals) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to prepare new slave flows: port=%d",
+ slave_port_id);
+ return -1;
+ }
+
+ /* Add additional MAC addresses to the slave */
+ if (slave_add_mac_addresses(bonded_eth_dev, slave_port_id) != 0) {
+ RTE_BOND_LOG(ERR, "Failed to add mac address(es) to slave %hu",
+ slave_port_id);
+ return -1;
+ }
+
internals->slave_count++;
if (bonded_eth_dev->data->dev_started) {
@@ -330,7 +386,7 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
/* Add slave details to bonded device */
slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
- /* Update all slave devices MACs*/
+ /* Update all slave devices MACs */
mac_address_slaves_update(bonded_eth_dev);
/* Register link status change callback with bonded device pointer as
@@ -348,11 +404,6 @@ __eth_bond_slave_add_lock_free(uint16_t bonded_port_id, uint16_t slave_port_id)
!internals->user_defined_primary_port)
bond_ethdev_primary_set(internals,
slave_port_id);
-
- if (find_slave_by_id(internals->active_slaves,
- internals->active_slave_count,
- slave_port_id) == internals->active_slave_count)
- activate_slave(bonded_eth_dev, slave_port_id);
}
}
@@ -393,6 +444,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
struct rte_eth_dev *slave_eth_dev;
+ struct rte_flow_error flow_error;
+ struct rte_flow *flow;
int i, slave_idx;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
@@ -432,6 +485,21 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
rte_eth_dev_default_mac_addr_set(slave_port_id,
&(internals->slaves[slave_idx].persisted_mac_addr));
+ /* remove additional MAC addresses from the slave */
+ slave_remove_mac_addresses(bonded_eth_dev, slave_port_id);
+
+ /*
+ * Remove bond device flows from slave device.
+ * Note: don't restore flow isolate mode.
+ */
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ if (flow->flows[slave_idx] != NULL) {
+ rte_flow_destroy(slave_port_id, flow->flows[slave_idx],
+ &flow_error);
+ flow->flows[slave_idx] = NULL;
+ }
+ }
+
slave_eth_dev = &rte_eth_devices[slave_port_id];
slave_remove(internals, slave_eth_dev);
slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
@@ -458,6 +526,8 @@ __eth_bond_slave_remove_lock_free(uint16_t bonded_port_id,
if (internals->slave_count == 0) {
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = 0;
internals->candidate_max_rx_pktlen = 0;
@@ -643,9 +713,21 @@ rte_eth_bond_mac_address_reset(uint16_t bonded_port_id)
internals->user_defined_mac = 0;
if (internals->slave_count > 0) {
+ int slave_port;
+ /* Get the primary slave location based on the primary port
+ * number as, while slave_add(), we will keep the primary
+ * slave based on slave_count,but not based on the primary port.
+ */
+ for (slave_port = 0; slave_port < internals->slave_count;
+ slave_port++) {
+ if (internals->slaves[slave_port].port_id ==
+ internals->primary_port)
+ break;
+ }
+
/* Set MAC Address of Bonded Device */
if (mac_address_set(bonded_eth_dev,
- &internals->slaves[internals->primary_port].persisted_mac_addr)
+ &internals->slaves[slave_port].persisted_mac_addr)
!= 0) {
RTE_BOND_LOG(ERR, "Failed to set MAC address on bonded device");
return -1;
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index 27d3101b..b60fde6a 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -32,7 +32,7 @@ find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
struct rte_pci_addr *eth_pci_addr;
unsigned i;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
pci_dev = RTE_ETH_DEV_TO_PCI(&rte_eth_devices[i]);
eth_pci_addr = &pci_dev->addr;
@@ -50,7 +50,7 @@ find_port_id_by_dev_name(const char *name)
{
unsigned i;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
if (rte_eth_devices[i].data == NULL)
continue;
@@ -92,7 +92,7 @@ parse_port_id(const char *port_str)
if (pci_bus->parse(port_str, &dev_addr) == 0) {
dev = pci_bus->find_device(NULL, bond_pci_addr_cmp, &dev_addr);
if (dev == NULL) {
- RTE_LOG(ERR, PMD, "unable to find PCI device\n");
+ RTE_BOND_LOG(ERR, "unable to find PCI device");
return -1;
}
port_id = find_port_id_by_pci_addr(&dev_addr);
@@ -134,7 +134,8 @@ bond_ethdev_parse_slave_port_kvarg(const char *key,
if (strcmp(key, PMD_BOND_SLAVE_PORT_KVARG) == 0) {
int port_id = parse_port_id(value);
if (port_id < 0) {
- RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified", value);
+ RTE_BOND_LOG(ERR, "Invalid slave port value (%s) specified",
+ value);
return -1;
} else
slave_ports->slaves[slave_ports->slave_count++] =
@@ -244,7 +245,7 @@ bond_ethdev_parse_primary_slave_port_id_kvarg(const char *key __rte_unused,
if (primary_slave_port_id < 0)
return -1;
- *(uint8_t *)extra_args = (uint8_t)primary_slave_port_id;
+ *(uint16_t *)extra_args = (uint16_t)primary_slave_port_id;
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_flow.c b/drivers/net/bonding/rte_eth_bond_flow.c
new file mode 100644
index 00000000..31e4bcae
--- /dev/null
+++ b/drivers/net/bonding/rte_eth_bond_flow.c
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <sys/queue.h>
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_flow.h>
+
+#include "rte_eth_bond_private.h"
+
+static struct rte_flow *
+bond_flow_alloc(int numa_node, const struct rte_flow_attr *attr,
+ const struct rte_flow_item *items,
+ const struct rte_flow_action *actions)
+{
+ struct rte_flow *flow;
+ size_t fdsz;
+
+ fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
+ flow = rte_zmalloc_socket(NULL, sizeof(struct rte_flow) + fdsz,
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (unlikely(flow == NULL)) {
+ RTE_BOND_LOG(ERR, "Could not allocate new flow");
+ return NULL;
+ }
+ flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
+ if (unlikely(rte_flow_copy(flow->fd, fdsz, attr, items, actions) !=
+ fdsz)) {
+ RTE_BOND_LOG(ERR, "Failed to copy flow description");
+ rte_free(flow);
+ return NULL;
+ }
+ return flow;
+}
+
+static void
+bond_flow_release(struct rte_flow **flow)
+{
+ rte_free(*flow);
+ *flow = NULL;
+}
+
+static int
+bond_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_validate(internals->slaves[i].port_id, attr,
+ patterns, actions, err);
+ if (ret) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_validate failed"
+ " for slave %d with error %d", i, ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static struct rte_flow *
+bond_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ int i;
+
+ flow = bond_flow_alloc(dev->data->numa_node, attr, patterns, actions);
+ if (unlikely(flow == NULL)) {
+ rte_flow_error_set(err, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOMEM));
+ return NULL;
+ }
+ for (i = 0; i < internals->slave_count; i++) {
+ flow->flows[i] = rte_flow_create(internals->slaves[i].port_id,
+ attr, patterns, actions, err);
+ if (unlikely(flow->flows[i] == NULL)) {
+ RTE_BOND_LOG(ERR, "Failed to create flow on slave %d",
+ i);
+ goto err;
+ }
+ }
+ TAILQ_INSERT_TAIL(&internals->flow_list, flow, next);
+ return flow;
+err:
+ /* Destroy all slaves flows. */
+ for (i = 0; i < internals->slave_count; i++) {
+ if (flow->flows[i] != NULL)
+ rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ }
+ bond_flow_release(&flow);
+ return NULL;
+}
+
+static int
+bond_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ int lret;
+
+ if (unlikely(flow->flows[i] == NULL))
+ continue;
+ lret = rte_flow_destroy(internals->slaves[i].port_id,
+ flow->flows[i], err);
+ if (unlikely(lret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to destroy flow on slave %d:"
+ " %d", i, lret);
+ ret = lret;
+ }
+ }
+ TAILQ_REMOVE(&internals->flow_list, flow, next);
+ bond_flow_release(&flow);
+ return ret;
+}
+
+static int
+bond_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow *flow;
+ void *tmp;
+ int ret = 0;
+ int lret;
+
+ /* Destroy all bond flows from its slaves instead of flushing them to
+ * keep the LACP flow or any other external flows.
+ */
+ TAILQ_FOREACH_SAFE(flow, &internals->flow_list, next, tmp) {
+ lret = bond_flow_destroy(dev, flow, err);
+ if (unlikely(lret != 0))
+ ret = lret;
+ }
+ if (unlikely(ret != 0))
+ RTE_BOND_LOG(ERR, "Failed to flush flow in all slaves");
+ return ret;
+}
+
+static int
+bond_flow_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ struct rte_flow_query_count *count,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ struct rte_flow_query_count slave_count;
+ int i;
+ int ret;
+
+ count->bytes = 0;
+ count->hits = 0;
+ rte_memcpy(&slave_count, count, sizeof(slave_count));
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_query(internals->slaves[i].port_id,
+ flow->flows[i], action,
+ &slave_count, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Failed to query flow on"
+ " slave %d: %d", i, ret);
+ return ret;
+ }
+ count->bytes += slave_count.bytes;
+ count->hits += slave_count.hits;
+ slave_count.bytes = 0;
+ slave_count.hits = 0;
+ }
+ return 0;
+}
+
+static int
+bond_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *arg,
+ struct rte_flow_error *err)
+{
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ return bond_flow_query_count(dev, flow, action, arg, err);
+ default:
+ return rte_flow_error_set(err, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, arg,
+ rte_strerror(ENOTSUP));
+ }
+}
+
+static int
+bond_flow_isolate(struct rte_eth_dev *dev, int set,
+ struct rte_flow_error *err)
+{
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+ int ret;
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_flow_isolate(internals->slaves[i].port_id, set, err);
+ if (unlikely(ret != 0)) {
+ RTE_BOND_LOG(ERR, "Operation rte_flow_isolate failed"
+ " for slave %d with error %d", i, ret);
+ internals->flow_isolated_valid = 0;
+ return ret;
+ }
+ }
+ internals->flow_isolated = set;
+ internals->flow_isolated_valid = 1;
+ return 0;
+}
+
+const struct rte_flow_ops bond_flow_ops = {
+ .validate = bond_flow_validate,
+ .create = bond_flow_create,
+ .destroy = bond_flow_destroy,
+ .flush = bond_flow_flush,
+ .query = bond_flow_query,
+ .isolate = bond_flow_isolate,
+};
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index c34c3251..58f7377c 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -17,6 +17,7 @@
#include <rte_bus_vdev.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
+#include <rte_string_fns.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_private.h"
@@ -24,6 +25,7 @@
#define REORDER_PERIOD_MS 10
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+#define BOND_MAX_MAC_ADDRS 16
#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
@@ -570,34 +572,21 @@ update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator)
}
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
-#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
- RTE_LOG(DEBUG, PMD, \
- "%s " \
- "port:%d " \
- "SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
- "SrcIP:%s " \
- "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X " \
- "DstIP:%s " \
- "%s " \
- "%d\n", \
- info, \
- port, \
- eth_h->s_addr.addr_bytes[0], \
- eth_h->s_addr.addr_bytes[1], \
- eth_h->s_addr.addr_bytes[2], \
- eth_h->s_addr.addr_bytes[3], \
- eth_h->s_addr.addr_bytes[4], \
- eth_h->s_addr.addr_bytes[5], \
- src_ip, \
- eth_h->d_addr.addr_bytes[0], \
- eth_h->d_addr.addr_bytes[1], \
- eth_h->d_addr.addr_bytes[2], \
- eth_h->d_addr.addr_bytes[3], \
- eth_h->d_addr.addr_bytes[4], \
- eth_h->d_addr.addr_bytes[5], \
- dst_ip, \
- arp_op, \
- ++burstnumber)
+#define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \
+ rte_log(RTE_LOG_DEBUG, bond_logtype, \
+ "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \
+ "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \
+ info, \
+ port, \
+ eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \
+ eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \
+ eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \
+ src_ip, \
+ eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \
+ eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \
+ eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \
+ dst_ip, \
+ arp_op, ++burstnumber)
#endif
static void
@@ -617,7 +606,7 @@ mode6_debug(const char __attribute__((unused)) *info, struct ether_hdr *eth_h,
uint16_t offset = get_vlan_offset(eth_h, &ether_type);
#ifdef RTE_LIBRTE_BOND_DEBUG_ALB
- snprintf(buf, 16, "%s", info);
+ strlcpy(buf, info, 16);
#endif
if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
@@ -1138,7 +1127,8 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* Allocate new packet to send ARP update on current slave */
upd_pkt = rte_pktmbuf_alloc(internals->mode6.mempool);
if (upd_pkt == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate ARP packet from pool\n");
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate ARP packet from pool");
continue;
}
pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr)
@@ -1560,12 +1550,12 @@ mac_address_get(struct rte_eth_dev *eth_dev, struct ether_addr *dst_mac_addr)
struct ether_addr *mac_addr;
if (eth_dev == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer eth_dev specified\n", __func__);
+ RTE_BOND_LOG(ERR, "NULL pointer eth_dev specified");
return -1;
}
if (dst_mac_addr == NULL) {
- RTE_LOG(ERR, PMD, "%s: NULL pointer MAC specified\n", __func__);
+ RTE_BOND_LOG(ERR, "NULL pointer MAC specified");
return -1;
}
@@ -1599,6 +1589,61 @@ mac_address_set(struct rte_eth_dev *eth_dev, struct ether_addr *new_mac_addr)
return 0;
}
+static const struct ether_addr null_mac_addr;
+
+/*
+ * Add additional MAC addresses to the slave
+ */
+int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, ret;
+ struct ether_addr *mac_addr;
+
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_add(slave_port_id, mac_addr, 0);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i > 0; i--)
+ rte_eth_dev_mac_addr_remove(slave_port_id,
+ &bonded_eth_dev->data->mac_addrs[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Remove additional MAC addresses from the slave
+ */
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id)
+{
+ int i, rc, ret;
+ struct ether_addr *mac_addr;
+
+ rc = 0;
+ for (i = 1; i < BOND_MAX_MAC_ADDRS; i++) {
+ mac_addr = &bonded_eth_dev->data->mac_addrs[i];
+ if (is_same_ether_addr(mac_addr, &null_mac_addr))
+ break;
+
+ ret = rte_eth_dev_mac_addr_remove(slave_port_id, mac_addr);
+ /* save only the first error */
+ if (ret < 0 && rc == 0)
+ rc = ret;
+ }
+
+ return rc;
+}
+
int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev)
{
@@ -1686,9 +1731,9 @@ bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode)
if (internals->mode4.dedicated_queues.enabled == 0) {
eth_dev->rx_pkt_burst = bond_ethdev_rx_burst_8023ad;
eth_dev->tx_pkt_burst = bond_ethdev_tx_burst_8023ad;
- RTE_LOG(WARNING, PMD,
+ RTE_BOND_LOG(WARNING,
"Using mode 4, it is necessary to do TX burst "
- "and RX burst at least every 100ms.\n");
+ "and RX burst at least every 100ms.");
} else {
/* Use flow director's optimization */
eth_dev->rx_pkt_burst =
@@ -1818,8 +1863,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
}
- slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
- bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
+ if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ slave_eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ else
+ slave_eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_FILTER;
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
@@ -1831,12 +1881,20 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
}
+ errval = rte_eth_dev_set_mtu(slave_eth_dev->data->port_id,
+ bonded_eth_dev->data->mtu);
+ if (errval != 0 && errval != -ENOTSUP) {
+ RTE_BOND_LOG(ERR, "rte_eth_dev_set_mtu: port %u, err (%d)",
+ slave_eth_dev->data->port_id, errval);
+ return errval;
+ }
+
/* Configure device */
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
nb_rx_queues, nb_tx_queues,
&(slave_eth_dev->data->dev_conf));
if (errval != 0) {
- RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u , err (%d)",
+ RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)",
slave_eth_dev->data->port_id, errval);
return errval;
}
@@ -1918,10 +1976,10 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
&internals->reta_conf[0],
internals->slaves[i].reta_size);
if (errval != 0) {
- RTE_LOG(WARNING, PMD,
- "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
- " RSS Configuration for bonding may be inconsistent.\n",
- slave_eth_dev->data->port_id, errval);
+ RTE_BOND_LOG(WARNING,
+ "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)."
+ " RSS Configuration for bonding may be inconsistent.",
+ slave_eth_dev->data->port_id, errval);
}
break;
}
@@ -1950,10 +2008,19 @@ slave_remove(struct bond_dev_private *internals,
slave_eth_dev->data->port_id)
break;
- if (i < (internals->slave_count - 1))
+ if (i < (internals->slave_count - 1)) {
+ struct rte_flow *flow;
+
memmove(&internals->slaves[i], &internals->slaves[i + 1],
sizeof(internals->slaves[0]) *
(internals->slave_count - i - 1));
+ TAILQ_FOREACH(flow, &internals->flow_list, next) {
+ memmove(&flow->flows[i], &flow->flows[i + 1],
+ sizeof(flow->flows[0]) *
+ (internals->slave_count - i - 1));
+ flow->flows[internals->slave_count - 1] = NULL;
+ }
+ }
internals->slave_count--;
@@ -2026,7 +2093,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->slave_count == 0) {
RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices");
- return -1;
+ goto out_err;
}
if (internals->user_defined_mac == 0) {
@@ -2037,19 +2104,15 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
new_mac_addr = &internals->slaves[i].persisted_mac_addr;
if (new_mac_addr == NULL)
- return -1;
+ goto out_err;
if (mac_address_set(eth_dev, new_mac_addr) != 0) {
RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address",
eth_dev->data->port_id);
- return -1;
+ goto out_err;
}
}
- /* Update all slave devices MACs*/
- if (mac_address_slaves_update(eth_dev) != 0)
- return -1;
-
/* If bonded device is configure in promiscuous mode then re-apply config */
if (internals->promiscuous_en)
bond_ethdev_promiscuous_enable(eth_dev);
@@ -2073,7 +2136,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
"bonded port (%d) failed to reconfigure slave device (%d)",
eth_dev->data->port_id,
internals->slaves[i].port_id);
- return -1;
+ goto out_err;
}
/* We will need to poll for link status if any slave doesn't
* support interrupts
@@ -2081,6 +2144,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
if (internals->slaves[i].link_status_poll_enabled)
internals->link_status_polling_enabled = 1;
}
+
/* start polling if needed */
if (internals->link_status_polling_enabled) {
rte_eal_alarm_set(
@@ -2089,6 +2153,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
(void *)&rte_eth_devices[internals->port_id]);
}
+ /* Update all slave devices MACs*/
+ if (mac_address_slaves_update(eth_dev) != 0)
+ goto out_err;
+
if (internals->user_defined_primary_port)
bond_ethdev_primary_set(internals, internals->primary_port);
@@ -2100,6 +2168,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev)
bond_tlb_enable(internals);
return 0;
+
+out_err:
+ eth_dev->data->dev_started = 0;
+ return -1;
}
static void
@@ -2157,7 +2229,6 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->active_slave_count = 0;
internals->link_status_polling_enabled = 0;
for (i = 0; i < internals->slave_count; i++)
internals->slaves[i].last_link_status = 0;
@@ -2172,20 +2243,22 @@ bond_ethdev_close(struct rte_eth_dev *dev)
struct bond_dev_private *internals = dev->data->dev_private;
uint8_t bond_port_id = internals->port_id;
int skipped = 0;
+ struct rte_flow_error ferror;
- RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->device->name);
+ RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name);
while (internals->slave_count != skipped) {
uint16_t port_id = internals->slaves[skipped].port_id;
rte_eth_dev_stop(port_id);
if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to remove port %d from bonded device "
- "%s\n", port_id, dev->device->name);
+ RTE_BOND_LOG(ERR,
+ "Failed to remove port %d from bonded device %s",
+ port_id, dev->device->name);
skipped++;
}
}
+ bond_flow_ops.flush(dev, &ferror);
bond_ethdev_free_queues(dev);
rte_bitmap_reset(internals->vlan_filter_bmp);
}
@@ -2201,7 +2274,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
uint16_t max_nb_rx_queues = UINT16_MAX;
uint16_t max_nb_tx_queues = UINT16_MAX;
- dev_info->max_mac_addrs = 1;
+ dev_info->max_mac_addrs = BOND_MAX_MAC_ADDRS;
dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
internals->candidate_max_rx_pktlen :
@@ -2244,6 +2317,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->rx_offload_capa = internals->rx_offload_capa;
dev_info->tx_offload_capa = internals->tx_offload_capa;
+ dev_info->rx_queue_offload_capa = internals->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = internals->tx_queue_offload_capa;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
dev_info->reta_size = internals->reta_size;
@@ -2269,9 +2344,9 @@ bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
if (res == ENOTSUP)
- RTE_LOG(WARNING, PMD,
- "Setting VLAN filter on slave port %u not supported.\n",
- port_id);
+ RTE_BOND_LOG(WARNING,
+ "Setting VLAN filter on slave port %u not supported.",
+ port_id);
}
rte_spinlock_unlock(&internals->lock);
@@ -2633,6 +2708,11 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
if (!valid_slave)
return rc;
+ /* Synchronize lsc callback parallel calls either by real link event
+ * from the slaves PMDs or by the bonding PMD itself.
+ */
+ rte_spinlock_lock(&internals->lsc_lock);
+
/* Search for port in active port list */
active_pos = find_slave_by_id(internals->active_slaves,
internals->active_slave_count, port_id);
@@ -2640,7 +2720,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
rte_eth_link_get_nowait(port_id, &link);
if (link.link_status) {
if (active_pos < internals->active_slave_count)
- return rc;
+ goto link_update;
/* if no active slave ports then set this port to be primary port */
if (internals->active_slave_count < 1) {
@@ -2652,6 +2732,17 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
mac_address_slaves_update(bonded_eth_dev);
}
+ /* check link state properties if bonded link is up*/
+ if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) {
+ if (link_properties_valid(bonded_eth_dev, &link) != 0)
+ RTE_BOND_LOG(ERR, "Invalid link properties "
+ "for slave %d in bonding mode %d",
+ port_id, internals->mode);
+ } else {
+ /* inherit slave link properties */
+ link_properties_set(bonded_eth_dev, &link);
+ }
+
activate_slave(bonded_eth_dev, port_id);
/* If user has defined the primary port then default to using it */
@@ -2660,7 +2751,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
bond_ethdev_primary_set(internals, port_id);
} else {
if (active_pos == internals->active_slave_count)
- return rc;
+ goto link_update;
/* Remove from active slave list */
deactivate_slave(bonded_eth_dev, port_id);
@@ -2679,6 +2770,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
}
}
+link_update:
/**
* Update bonded device link properties after any change to active
* slaves
@@ -2713,7 +2805,10 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type,
NULL);
}
}
- return 0;
+
+ rte_spinlock_unlock(&internals->lsc_lock);
+
+ return rc;
}
static int
@@ -2851,11 +2946,88 @@ bond_ethdev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-static void
+static int
bond_ethdev_mac_address_set(struct rte_eth_dev *dev, struct ether_addr *addr)
{
- if (mac_address_set(dev, addr))
+ if (mac_address_set(dev, addr)) {
RTE_BOND_LOG(ERR, "Failed to update MAC address");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type type, enum rte_filter_op op, void *arg)
+{
+ if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &bond_flow_ops;
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static int
+bond_ethdev_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __rte_unused uint32_t index, uint32_t vmdq)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int ret, i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_add == NULL ||
+ *slave_eth_dev->dev_ops->mac_addr_remove == NULL) {
+ ret = -ENOTSUP;
+ goto end;
+ }
+ }
+
+ for (i = 0; i < internals->slave_count; i++) {
+ ret = rte_eth_dev_mac_addr_add(internals->slaves[i].port_id,
+ mac_addr, vmdq);
+ if (ret < 0) {
+ /* rollback */
+ for (i--; i >= 0; i--)
+ rte_eth_dev_mac_addr_remove(
+ internals->slaves[i].port_id, mac_addr);
+ goto end;
+ }
+ }
+
+ ret = 0;
+end:
+ rte_spinlock_unlock(&internals->lock);
+ return ret;
+}
+
+static void
+bond_ethdev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev *slave_eth_dev;
+ struct bond_dev_private *internals = dev->data->dev_private;
+ int i;
+
+ rte_spinlock_lock(&internals->lock);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ slave_eth_dev = &rte_eth_devices[internals->slaves[i].port_id];
+ if (*slave_eth_dev->dev_ops->mac_addr_remove == NULL)
+ goto end;
+ }
+
+ struct ether_addr *mac_addr = &dev->data->mac_addrs[index];
+
+ for (i = 0; i < internals->slave_count; i++)
+ rte_eth_dev_mac_addr_remove(internals->slaves[i].port_id,
+ mac_addr);
+
+end:
+ rte_spinlock_unlock(&internals->lock);
}
const struct eth_dev_ops default_dev_ops = {
@@ -2879,7 +3051,10 @@ const struct eth_dev_ops default_dev_ops = {
.rss_hash_update = bond_ethdev_rss_hash_update,
.rss_hash_conf_get = bond_ethdev_rss_hash_conf_get,
.mtu_set = bond_ethdev_mtu_set,
- .mac_addr_set = bond_ethdev_mac_address_set
+ .mac_addr_set = bond_ethdev_mac_address_set,
+ .mac_addr_add = bond_ethdev_mac_addr_add,
+ .mac_addr_remove = bond_ethdev_mac_addr_remove,
+ .filter_ctrl = bond_filter_ctrl
};
static int
@@ -2906,10 +3081,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
- socket_id);
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN *
+ BOND_MAX_MAC_ADDRS, 0, socket_id);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate %u bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * BOND_MAX_MAC_ADDRS);
goto err;
}
@@ -2917,6 +3095,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC;
rte_spinlock_init(&internals->lock);
+ rte_spinlock_init(&internals->lsc_lock);
internals->port_id = eth_dev->data->port_id;
internals->mode = BONDING_MODE_INVALID;
@@ -2936,6 +3115,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->active_slave_count = 0;
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->rx_queue_offload_capa = 0;
+ internals->tx_queue_offload_capa = 0;
internals->candidate_max_rx_pktlen = 0;
internals->max_rx_pktlen = 0;
@@ -2945,10 +3126,13 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
memset(internals->slaves, 0, sizeof(internals->slaves));
+ TAILQ_INIT(&internals->flow_list);
+ internals->flow_isolated_valid = 0;
+
/* Set mode 4 default configuration */
bond_mode_8023ad_setup(eth_dev, NULL);
if (bond_ethdev_mode_set(eth_dev, mode)) {
- RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode to %d",
eth_dev->data->port_id, mode);
goto err;
}
@@ -2959,7 +3143,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
RTE_CACHE_LINE_SIZE);
if (internals->vlan_filter_bmpmem == NULL) {
RTE_BOND_LOG(ERR,
- "Failed to allocate vlan bitmap for bonded device %u\n",
+ "Failed to allocate vlan bitmap for bonded device %u",
eth_dev->data->port_id);
goto err;
}
@@ -2968,7 +3152,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
if (internals->vlan_filter_bmp == NULL) {
RTE_BOND_LOG(ERR,
- "Failed to init vlan bitmap for bonded device %u\n",
+ "Failed to init vlan bitmap for bonded device %u",
eth_dev->data->port_id);
rte_free(internals->vlan_filter_bmpmem);
goto err;
@@ -2994,12 +3178,27 @@ bond_probe(struct rte_vdev_device *dev)
uint8_t bonding_mode, socket_id/*, agg_mode*/;
int arg_count, port_id;
uint8_t agg_mode;
+ struct rte_eth_dev *eth_dev;
if (!dev)
return -EINVAL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
+ RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ RTE_BOND_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
pmd_bond_init_valid_arguments);
@@ -3011,13 +3210,13 @@ bond_probe(struct rte_vdev_device *dev)
if (rte_kvargs_process(kvlist, PMD_BOND_MODE_KVARG,
&bond_ethdev_parse_slave_mode_kvarg,
&bonding_mode) != 0) {
- RTE_LOG(ERR, EAL, "Invalid mode for bonded device %s\n",
+ RTE_BOND_LOG(ERR, "Invalid mode for bonded device %s",
name);
goto parse_error;
}
} else {
- RTE_LOG(ERR, EAL, "Mode must be specified only once for bonded "
- "device %s\n", name);
+ RTE_BOND_LOG(ERR, "Mode must be specified only once for bonded "
+ "device %s", name);
goto parse_error;
}
@@ -3027,13 +3226,13 @@ bond_probe(struct rte_vdev_device *dev)
if (rte_kvargs_process(kvlist, PMD_BOND_SOCKET_ID_KVARG,
&bond_ethdev_parse_socket_id_kvarg, &socket_id)
!= 0) {
- RTE_LOG(ERR, EAL, "Invalid socket Id specified for "
- "bonded device %s\n", name);
+ RTE_BOND_LOG(ERR, "Invalid socket Id specified for "
+ "bonded device %s", name);
goto parse_error;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL, "Socket Id can be specified only once for "
- "bonded device %s\n", name);
+ RTE_BOND_LOG(ERR, "Socket Id can be specified only once for "
+ "bonded device %s", name);
goto parse_error;
} else {
socket_id = rte_socket_id();
@@ -3044,21 +3243,22 @@ bond_probe(struct rte_vdev_device *dev)
/* Create link bonding eth device */
port_id = bond_alloc(dev, bonding_mode);
if (port_id < 0) {
- RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
- "socket %u.\n", name, bonding_mode, socket_id);
+ RTE_BOND_LOG(ERR, "Failed to create socket %s in mode %u on "
+ "socket %u.", name, bonding_mode, socket_id);
goto parse_error;
}
internals = rte_eth_devices[port_id].data->dev_private;
internals->kvlist = kvlist;
+ rte_eth_dev_probing_finish(&rte_eth_devices[port_id]);
if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist,
PMD_BOND_AGG_MODE_KVARG,
&bond_ethdev_parse_slave_agg_mode_kvarg,
&agg_mode) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse agg selection mode for bonded device %s\n",
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
name);
goto parse_error;
}
@@ -3070,8 +3270,8 @@ bond_probe(struct rte_vdev_device *dev)
rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
}
- RTE_LOG(INFO, EAL, "Create bonded device %s on port %d in mode %u on "
- "socket %u.\n", name, port_id, bonding_mode, socket_id);
+ RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on "
+ "socket %u.", name, port_id, bonding_mode, socket_id);
return 0;
parse_error:
@@ -3091,7 +3291,7 @@ bond_remove(struct rte_vdev_device *dev)
return -EINVAL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
+ RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name);
/* now free all data allocation - for eth_dev structure,
* dummy pci driver and internal (private) data
@@ -3118,6 +3318,10 @@ bond_remove(struct rte_vdev_device *dev)
eth_dev->tx_pkt_burst = NULL;
internals = eth_dev->data->dev_private;
+ /* Try to release mempool used in mode6. If the bond
+ * device is not mode6, free the NULL is not problem.
+ */
+ rte_mempool_free(internals->mode6.mempool);
rte_bitmap_free(internals->vlan_filter_bmp);
rte_free(internals->vlan_filter_bmpmem);
rte_free(eth_dev->data->dev_private);
@@ -3178,23 +3382,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
struct ether_addr bond_mac;
if (rte_kvargs_process(kvlist, PMD_BOND_MAC_ADDR_KVARG,
- &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
- RTE_LOG(INFO, EAL, "Invalid mac address for bonded device %s\n",
- name);
+ &bond_ethdev_parse_bond_mac_addr_kvarg, &bond_mac) < 0) {
+ RTE_BOND_LOG(INFO, "Invalid mac address for bonded device %s",
+ name);
return -1;
}
/* Set MAC address */
if (rte_eth_bond_mac_address_set(port_id, &bond_mac) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set mac address on bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "Failed to set mac address on bonded device %s",
+ name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "MAC address can be specified only once for bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "MAC address can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3204,40 +3408,40 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint8_t xmit_policy;
if (rte_kvargs_process(kvlist, PMD_BOND_XMIT_POLICY_KVARG,
- &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
- 0) {
- RTE_LOG(INFO, EAL,
- "Invalid xmit policy specified for bonded device %s\n",
- name);
+ &bond_ethdev_parse_balance_xmit_policy_kvarg, &xmit_policy) !=
+ 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid xmit policy specified for bonded device %s",
+ name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_xmit_policy_set(port_id, xmit_policy) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set balance xmit policy on bonded device %s\n",
- name);
+ RTE_BOND_LOG(ERR,
+ "Failed to set balance xmit policy on bonded device %s",
+ name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(ERR, EAL,
- "Transmit policy can be specified only once for bonded device"
- " %s\n", name);
+ RTE_BOND_LOG(ERR,
+ "Transmit policy can be specified only once for bonded device %s",
+ name);
return -1;
}
if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) {
if (rte_kvargs_process(kvlist,
- PMD_BOND_AGG_MODE_KVARG,
- &bond_ethdev_parse_slave_agg_mode_kvarg,
- &agg_mode) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse agg selection mode for bonded device %s\n",
- name);
+ PMD_BOND_AGG_MODE_KVARG,
+ &bond_ethdev_parse_slave_agg_mode_kvarg,
+ &agg_mode) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse agg selection mode for bonded device %s",
+ name);
}
if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
- agg_mode);
+ rte_eth_bond_8023ad_agg_selection_set(port_id,
+ agg_mode);
}
/* Parse/add slave ports to bonded device */
@@ -3248,23 +3452,23 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
memset(&slave_ports, 0, sizeof(slave_ports));
if (rte_kvargs_process(kvlist, PMD_BOND_SLAVE_PORT_KVARG,
- &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to parse slave ports for bonded device %s\n",
- name);
+ &bond_ethdev_parse_slave_port_kvarg, &slave_ports) != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to parse slave ports for bonded device %s",
+ name);
return -1;
}
for (i = 0; i < slave_ports.slave_count; i++) {
if (rte_eth_bond_slave_add(port_id, slave_ports.slaves[i]) != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to add port %d as slave to bonded device %s\n",
- slave_ports.slaves[i], name);
+ RTE_BOND_LOG(ERR,
+ "Failed to add port %d as slave to bonded device %s",
+ slave_ports.slaves[i], name);
}
}
} else {
- RTE_LOG(INFO, EAL, "No slaves specified for bonded device %s\n", name);
+ RTE_BOND_LOG(INFO, "No slaves specified for bonded device %s", name);
return -1;
}
@@ -3274,27 +3478,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint16_t primary_slave_port_id;
if (rte_kvargs_process(kvlist,
- PMD_BOND_PRIMARY_SLAVE_KVARG,
- &bond_ethdev_parse_primary_slave_port_id_kvarg,
- &primary_slave_port_id) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid primary slave port id specified for bonded device"
- " %s\n", name);
+ PMD_BOND_PRIMARY_SLAVE_KVARG,
+ &bond_ethdev_parse_primary_slave_port_id_kvarg,
+ &primary_slave_port_id) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid primary slave port id specified for bonded device %s",
+ name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_primary_set(port_id, primary_slave_port_id)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set primary slave port %d on bonded device %s\n",
- primary_slave_port_id, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set primary slave port %d on bonded device %s",
+ primary_slave_port_id, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Primary slave can be specified only once for bonded device"
- " %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Primary slave can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3304,26 +3508,26 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t lsc_poll_interval_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LSC_POLL_PERIOD_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &lsc_poll_interval_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid lsc polling interval value specified for bonded"
- " device %s\n", name);
+ PMD_BOND_LSC_POLL_PERIOD_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &lsc_poll_interval_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid lsc polling interval value specified for bonded"
+ " device %s", name);
return -1;
}
if (rte_eth_bond_link_monitoring_set(port_id, lsc_poll_interval_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set lsc monitor polling interval (%u ms) on"
- " bonded device %s\n", lsc_poll_interval_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set lsc monitor polling interval (%u ms) on bonded device %s",
+ lsc_poll_interval_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "LSC polling interval can be specified only once for bonded"
- " device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "LSC polling interval can be specified only once for bonded"
+ " device %s", name);
return -1;
}
@@ -3333,27 +3537,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t link_up_delay_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_up_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link up propagation delay value specified for"
- " bonded device %s\n", name);
+ PMD_BOND_LINK_UP_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_up_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link up propagation delay value specified for"
+ " bonded device %s", name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_link_up_prop_delay_set(port_id, link_up_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link up propagation delay (%u ms) on bonded"
- " device %s\n", link_up_delay_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link up propagation delay (%u ms) on bonded"
+ " device %s", link_up_delay_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link up propagation delay can be specified only once for"
- " bonded device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Link up propagation delay can be specified only once for"
+ " bonded device %s", name);
return -1;
}
@@ -3363,27 +3567,27 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
uint32_t link_down_delay_ms;
if (rte_kvargs_process(kvlist,
- PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
- &bond_ethdev_parse_time_ms_kvarg,
- &link_down_delay_ms) < 0) {
- RTE_LOG(INFO, EAL,
- "Invalid link down propagation delay value specified for"
- " bonded device %s\n", name);
+ PMD_BOND_LINK_DOWN_PROP_DELAY_KVARG,
+ &bond_ethdev_parse_time_ms_kvarg,
+ &link_down_delay_ms) < 0) {
+ RTE_BOND_LOG(INFO,
+ "Invalid link down propagation delay value specified for"
+ " bonded device %s", name);
return -1;
}
/* Set balance mode transmit policy*/
if (rte_eth_bond_link_down_prop_delay_set(port_id, link_down_delay_ms)
- != 0) {
- RTE_LOG(ERR, EAL,
- "Failed to set link down propagation delay (%u ms) on"
- " bonded device %s\n", link_down_delay_ms, name);
+ != 0) {
+ RTE_BOND_LOG(ERR,
+ "Failed to set link down propagation delay (%u ms) on bonded device %s",
+ link_down_delay_ms, name);
return -1;
}
} else if (arg_count > 1) {
- RTE_LOG(INFO, EAL,
- "Link down propagation delay can be specified only once for"
- " bonded device %s\n", name);
+ RTE_BOND_LOG(INFO,
+ "Link down propagation delay can be specified only once for bonded device %s",
+ name);
return -1;
}
@@ -3409,3 +3613,12 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
"lsc_poll_period_ms=<int> "
"up_delay=<int> "
"down_delay=<int>");
+
+int bond_logtype;
+
+RTE_INIT(bond_init_log)
+{
+ bond_logtype = rte_log_register("pmd.net.bon");
+ if (bond_logtype >= 0)
+ rte_log_set_level(bond_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 92e15f8c..43e0e448 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -5,9 +5,12 @@
#ifndef _RTE_ETH_BOND_PRIVATE_H_
#define _RTE_ETH_BOND_PRIVATE_H_
+#include <sys/queue.h>
+
#include <rte_ethdev_driver.h>
#include <rte_spinlock.h>
#include <rte_bitmap.h>
+#include <rte_flow_driver.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_8023ad_private.h"
@@ -28,8 +31,11 @@
#define PMD_BOND_XMIT_POLICY_LAYER23_KVARG ("l23")
#define PMD_BOND_XMIT_POLICY_LAYER34_KVARG ("l34")
+extern int bond_logtype;
+
#define RTE_BOND_LOG(lvl, msg, ...) \
- RTE_LOG(lvl, PMD, "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
+ rte_log(RTE_LOG_ ## lvl, bond_logtype, \
+ "%s(%d) - " msg "\n", __func__, __LINE__, ##__VA_ARGS__)
#define BONDING_MODE_INVALID 0xFF
@@ -37,6 +43,8 @@ extern const char *pmd_bond_init_valid_arguments[];
extern struct rte_vdev_driver pmd_bond_drv;
+extern const struct rte_flow_ops bond_flow_ops;
+
/** Port Queue Mapping Structure */
struct bond_rx_queue {
uint16_t queue_id;
@@ -80,6 +88,14 @@ struct bond_slave_details {
uint16_t reta_size;
};
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next;
+ /* Slaves flows */
+ struct rte_flow *flows[RTE_MAX_ETHPORTS];
+ /* Flow description for synchronization */
+ struct rte_flow_desc *fd;
+};
+
typedef void (*burst_xmit_hash_t)(struct rte_mbuf **buf, uint16_t nb_pkts,
uint8_t slave_count, uint16_t *slaves);
@@ -89,6 +105,7 @@ struct bond_dev_private {
uint8_t mode; /**< Link Bonding Mode */
rte_spinlock_t lock;
+ rte_spinlock_t lsc_lock;
uint16_t primary_port; /**< Primary Slave Port */
uint16_t current_primary_port; /**< Primary Slave Port */
@@ -128,8 +145,17 @@ struct bond_dev_private {
/**< TLB active slaves send order */
struct mode_alb_private mode6;
- uint32_t rx_offload_capa; /** Rx offload capability */
- uint32_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_offload_capa; /** Rx offload capability */
+ uint64_t tx_offload_capa; /** Tx offload capability */
+ uint64_t rx_queue_offload_capa; /** per queue Rx offload capability */
+ uint64_t tx_queue_offload_capa; /** per queue Tx offload capability */
+
+ /**< List of the configured flows */
+ TAILQ_HEAD(sub_flows, rte_flow) flow_list;
+
+ /**< Flow isolation state */
+ int flow_isolated;
+ int flow_isolated_valid;
/** Bit mask of RSS offloads, the bit offset also means flow type */
uint64_t flow_type_rss_offloads;
@@ -205,6 +231,14 @@ int
mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev);
int
+slave_add_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
+slave_remove_mac_addresses(struct rte_eth_dev *bonded_eth_dev,
+ uint16_t slave_port_id);
+
+int
bond_ethdev_mode_set(struct rte_eth_dev *eth_dev, int mode);
int
diff --git a/drivers/net/bonding/rte_pmd_bond_version.map b/drivers/net/bonding/rte_pmd_bond_version.map
index ec3374b0..03ddb44e 100644
--- a/drivers/net/bonding/rte_pmd_bond_version.map
+++ b/drivers/net/bonding/rte_pmd_bond_version.map
@@ -1,6 +1,7 @@
DPDK_2.0 {
global:
+ rte_eth_bond_8023ad_slave_info;
rte_eth_bond_active_slaves_get;
rte_eth_bond_create;
rte_eth_bond_link_monitoring_set;
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 65df1425..5d66c4b3 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014-2015 Chelsio Communications.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Chelsio Communications nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014-2018 Chelsio Communications.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -45,12 +18,6 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map
LIBABIVER := 1
-ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd188
-else
#
# CFLAGS for gcc/clang
#
@@ -59,9 +26,7 @@ ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
endif
endif
-CFLAGS_BASE_DRIVER =
-endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -80,8 +45,14 @@ VPATH += $(SRCDIR)/base
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index f2057af1..e98dd218 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
/* This file should not be included directly. Include common.h instead. */
@@ -39,12 +11,16 @@
#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_io.h>
+#include <rte_rwlock.h>
+#include <rte_ethdev.h>
#include "cxgbe_compat.h"
#include "t4_regs_values.h"
+#include "cxgbe_ofld.h"
enum {
MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
};
struct adapter;
@@ -68,6 +44,7 @@ struct port_info {
u8 port_type; /* firmware port type */
u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
+ u8 pidx; /* port index for this PF */
u8 tx_chan; /* associated channel */
u8 n_rx_qsets; /* # of rx qsets */
@@ -77,6 +54,7 @@ struct port_info {
u16 *rss; /* rss table */
u8 rss_mode; /* rss mode */
u16 rss_size; /* size of VI's RSS table slice */
+ u64 rss_hf; /* RSS Hash Function */
};
/* Enable or disable autonegotiation. If this is set to enable,
@@ -196,6 +174,7 @@ struct sge_eth_rxq { /* a SW Ethernet Rx queue */
* scenario where a packet needs 32 bytes.
*/
#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_VF_PKT_NUM 7
#define ETH_COALESCE_PKT_PER_DESC 2
struct tx_eth_coal_desc {
@@ -225,6 +204,10 @@ struct eth_coalesce {
unsigned int len;
unsigned int flits;
unsigned int max;
+ __u8 ethmacdst[ETHER_ADDR_LEN];
+ __u8 ethmacsrc[ETHER_ADDR_LEN];
+ __be16 ethtype;
+ __be16 vlantci;
};
struct sge_txq {
@@ -247,6 +230,7 @@ struct sge_txq {
unsigned int equeidx; /* last sent credit request */
unsigned int last_pidx; /* last pidx recorded by tx monitor */
unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+ unsigned int abs_id;
int db_disabled; /* doorbell state */
unsigned short db_pidx; /* doorbell producer index */
@@ -267,16 +251,27 @@ struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
struct sge_txq q;
struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct rte_eth_dev_data *data;
struct sge_eth_tx_stats stats; /* queue statistics */
rte_spinlock_t txq_lock;
unsigned int flags; /* flags for state of the queue */
} __rte_cache_aligned;
+struct sge_ctrl_txq { /* State for an SGE control Tx queue */
+ struct sge_txq q; /* txq */
+ struct adapter *adapter; /* adapter associated with this queue */
+ rte_spinlock_t ctrlq_lock; /* control queue lock */
+ u8 full; /* the Tx ring is full */
+ u64 txp; /* number of transmits */
+ struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
+} __rte_cache_aligned;
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_rspq fw_evtq __rte_cache_aligned;
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
u16 max_ethqsets; /* # of available Ethernet queue sets */
u32 stat_len; /* length of status page at ring end */
@@ -308,7 +303,7 @@ struct adapter {
struct rte_pci_device *pdev; /* associated rte pci device */
struct rte_eth_dev *eth_dev; /* first port's rte eth device */
struct adapter_params params; /* adapter parameters */
- struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */
+ struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
struct sge sge; /* associated SGE */
/* support for single-threading access to adapter mailbox registers */
@@ -325,8 +320,76 @@ struct adapter {
unsigned int vpd_flag;
int use_unpacked_mode; /* unpacked rx mode state */
+ rte_spinlock_t win0_lock;
+
+ unsigned int clipt_start; /* CLIP table start */
+ unsigned int clipt_end; /* CLIP table end */
+ struct clip_tbl *clipt; /* CLIP table */
+
+ struct tid_info tids; /* Info used to access TID related tables */
};
+/**
+ * t4_os_rwlock_init - initialize rwlock
+ * @lock: the rwlock
+ */
+static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
+{
+ rte_rwlock_init(lock);
+}
+
+/**
+ * t4_os_write_lock - get a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_lock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_lock(lock);
+}
+
+/**
+ * t4_os_write_unlock - unlock a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_unlock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_unlock(lock);
+}
+
+/**
+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct port_info associated with a rte_eth_dev
+ */
+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
+{
+ return (struct port_info *)dev->data->dev_private;
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
+{
+ return adap->port[idx];
+}
+
+/**
+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct adapter associated with a rte_eth_dev
+ */
+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->adapter;
+}
+
#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
@@ -602,7 +665,7 @@ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- struct port_info *pi = &adapter->port[port_idx];
+ struct port_info *pi = adap2pinfo(adapter, port_idx);
ether_addr_copy((struct ether_addr *)hw_addr,
&pi->eth_dev->data->mac_addrs[0]);
@@ -688,15 +751,35 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
}
/**
- * adap2pinfo - return the port_info of a port
- * @adap: the adapter
- * @idx: the port index
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+ c->done = 0;
+ t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+ t4_os_lock(&c->lock);
+ c->done = 1;
+ t4_os_unlock(&c->lock);
+}
+
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
*
- * Return the port_info structure for the port of the given index.
+ * Return the VI id of the given port.
*/
-static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
{
- return &adap->port[idx];
+ return ethdev2pinfo(dev)->viid;
}
void *t4_alloc_mem(size_t size);
@@ -713,12 +796,17 @@ void t4_sge_tx_monitor_start(struct adapter *adap);
void t4_sge_tx_monitor_stop(struct adapter *adap);
int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
uint16_t nb_pkts);
+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct rte_eth_dev *eth_dev, uint16_t queue_id,
unsigned int iqid, int socket_id);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
struct rte_eth_dev *eth_dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t handler,
@@ -735,6 +823,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
unsigned int cnt);
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done);
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
#endif /* __T4_ADAPTER_H__ */
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 1eda57d0..157201da 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __CHELSIO_COMMON_H
@@ -36,6 +8,7 @@
#include "cxgbe_compat.h"
#include "t4_hw.h"
+#include "t4vf_hw.h"
#include "t4_chip_type.h"
#include "t4fw_interface.h"
@@ -45,6 +18,9 @@ extern "C" {
#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+#define T4_MEMORY_WRITE 0
+#define T4_MEMORY_READ 1
+
enum {
MAX_NPORTS = 4, /* max # of ports */
};
@@ -62,18 +38,20 @@ enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
-enum {
+enum cc_pause {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
-enum {
- FEC_RS = 1 << 0,
- FEC_BASER_RS = 1 << 1,
- FEC_RESERVED = 1 << 2,
+enum cc_fec {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
};
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -178,6 +156,9 @@ struct tp_params {
int vnic_shift;
int port_shift;
int protocol_shift;
+ int ethertype_shift;
+
+ u64 hash_filter_mask;
};
struct vpd_params {
@@ -209,12 +190,59 @@ struct arch_specific_params {
u16 mps_tcam_size;
};
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ uint synmapen:1; /* SYN Map Enable */
+ uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */
+ uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */
+ uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */
+ uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */
+ uint ofdmapen:1; /* Offload Map Enable */
+ uint tnlmapen:1; /* Tunnel Map Enable */
+ uint tnlalllookup:1; /* Tunnel All Lookup */
+ uint hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int neq; /* N egress Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
struct devlog_params devlog;
+ struct rss_params rss;
+ struct pf_resources pfres;
+ struct vf_resources vfres;
enum pcie_memwin drv_memwin;
unsigned int sf_size; /* serial flash size in bytes */
@@ -235,23 +263,46 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
+ unsigned char hash_filter;
+
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ u8 fw_caps_support; /* 32-bit Port Capabilities */
+};
+
+/* Firmware Port Capabilities types.
+ */
+typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
+typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
+
+enum fw_caps {
+ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
+ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
+ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
};
struct link_config {
- unsigned short supported; /* link capabilities */
- unsigned short advertising; /* advertised capabilities */
- unsigned int requested_speed; /* speed user has requested */
- unsigned int speed; /* actual link speed */
- unsigned char requested_fc; /* flow control user has requested */
- unsigned char fc; /* actual link flow control */
- unsigned char requested_fec; /* Forward Error Correction user */
- unsigned char fec; /* has requested and actual FEC */
- unsigned char autoneg; /* autonegotiating? */
- unsigned char link_ok; /* link up? */
+ fw_port_cap32_t pcaps; /* link capabilities */
+ fw_port_cap32_t acaps; /* advertised capabilities */
+
+ u32 requested_speed; /* speed (Mb/s) user has requested */
+ u32 speed; /* actual link speed (Mb/s) */
+
+ enum cc_pause requested_fc; /* flow control user has requested */
+ enum cc_pause fc; /* actual link flow control */
+
+ enum cc_fec auto_fec; /* Forward Error Correction
+ * "automatic" (IEEE 802.3)
+ */
+ enum cc_fec requested_fec; /* Forward Error Correction requested */
+ enum cc_fec fec; /* Forward Error Correction actual */
+
+ unsigned char autoneg; /* autonegotiating? */
+
+ unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#include "adapter.h"
@@ -269,9 +320,19 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
delay, NULL);
}
+static inline int is_pf4(struct adapter *adap)
+{
+ return adap->pf == 4;
+}
+
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
@@ -285,9 +346,12 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4vf_fw_reset(struct adapter *adap);
int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
+int t4vf_get_vfres(struct adapter *adap);
int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size,
enum chip_type chip_compat);
@@ -297,6 +361,13 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4vf_query_params(struct adapter *adap, unsigned int nparams,
+ const u32 *params, u32 *vals);
+int t4vf_get_dev_params(struct adapter *adap);
+int t4vf_get_vpd_params(struct adapter *adap);
+int t4vf_get_rss_glb_config(struct adapter *adap);
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals);
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
@@ -331,6 +402,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int fl0id, unsigned int fl1id);
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
@@ -379,6 +452,21 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals, unsigned int nregs,
unsigned int start_idx);
@@ -387,6 +475,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int nregs, unsigned int start_idx);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_flash_cfg_addr(struct adapter *adapter);
@@ -394,22 +483,34 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p);
void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *stats,
struct port_stats *offset);
void t4_clr_port_stats(struct adapter *adap, int idx);
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps);
void t4_reset_link_config(struct adapter *adap, int idx);
int t4_get_version_info(struct adapter *adapter);
void t4_dump_version_info(struct adapter *adapter);
int t4_get_flash_params(struct adapter *adapter);
int t4_get_chip_type(struct adapter *adap, int ver);
int t4_prep_adapter(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4vf_port_init(struct adapter *adap);
int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq);
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adap, u32 *key);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
@@ -421,8 +522,20 @@ int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
unsigned int t4_get_regs_len(struct adapter *adap);
+unsigned int t4vf_get_pf_from_vf(struct adapter *adap);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_memory_rw_addr(struct adapter *adap, int win,
+ u32 addr, u32 len, void *hbuf, int dir);
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir);
+static inline int t4_memory_rw(struct adapter *adap, int win,
+ int mtype, u32 maddr, u32 len,
+ void *hbuf, int dir)
+{
+ return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir);
+}
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h
index cd7a9282..c0c5d0b2 100644
--- a/drivers/net/cxgbe/base/t4_chip_type.h
+++ b/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_CHIP_TYPE_H__
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 56f38c83..31762c9c 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <netinet/in.h>
@@ -55,9 +27,6 @@
#include "t4_regs_values.h"
#include "t4fw_interface.h"
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps);
-
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
@@ -2167,6 +2136,91 @@ int t4_seeprom_wp(struct adapter *adapter, int enable)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+ unsigned int i;
+ int ret;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ (rw ? F_FW_CMD_READ :
+ F_FW_CMD_WRITE) |
+ V_FW_LDST_CMD_ADDRSPACE(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ if (rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+ }
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
+{
+ u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+ u8 rss_key_addr_cnt = 16;
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDRX(idx >> 4) |
+ V_T6_VFWRADDR(idx) | F_KEYWREN);
+ else
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+ }
+}
+
+/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
@@ -2257,7 +2311,11 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
* Send this portion of the RRS table update to the firmware;
* bail out on any errors.
*/
- ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (is_pf4(adapter))
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
+ NULL);
+ else
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
@@ -2287,7 +2345,44 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
- return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adapter))
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_read_config_vi_rss - read the configured per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: where to place the configured flags
+ * @defq: where to place the id of the default RSS queue for the VI.
+ *
+ * Read configured VI-specific RSS properties.
+ */
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq)
+{
+ struct fw_rss_vi_config_cmd c;
+ unsigned int result;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
+ if (!ret) {
+ result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
+ if (defq)
+ *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
+ if (flags)
+ *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
+ }
+
+ return ret;
}
/**
@@ -2385,6 +2480,46 @@ int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PFVF_CMD_PFN(adapter->pf) |
+ V_FW_PFVF_CMD_VFN(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ return 0;
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -2670,14 +2805,142 @@ void t4_dump_version_info(struct adapter *adapter)
G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
}
-#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
- FW_PORT_CAP_ANEG)
+#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
+ FW_PORT_CAP32_ANEG)
+/**
+ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
+ * @caps16: a 16-bit Port Capabilities value
+ *
+ * Returns the equivalent 32-bit Port Capabilities value.
+ */
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
+{
+ fw_port_cap32_t caps32 = 0;
+
+#define CAP16_TO_CAP32(__cap) \
+ do { \
+ if (caps16 & FW_PORT_CAP_##__cap) \
+ caps32 |= FW_PORT_CAP32_##__cap; \
+ } while (0)
+
+ CAP16_TO_CAP32(SPEED_100M);
+ CAP16_TO_CAP32(SPEED_1G);
+ CAP16_TO_CAP32(SPEED_25G);
+ CAP16_TO_CAP32(SPEED_10G);
+ CAP16_TO_CAP32(SPEED_40G);
+ CAP16_TO_CAP32(SPEED_100G);
+ CAP16_TO_CAP32(FC_RX);
+ CAP16_TO_CAP32(FC_TX);
+ CAP16_TO_CAP32(ANEG);
+ CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(FEC_RS);
+ CAP16_TO_CAP32(FEC_BASER_RS);
+ CAP16_TO_CAP32(802_3_PAUSE);
+ CAP16_TO_CAP32(802_3_ASM_DIR);
+
+#undef CAP16_TO_CAP32
+
+ return caps32;
+}
+
+/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+#define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+#undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/* Translate Firmware Pause specification to Common Code */
+static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
+{
+ enum cc_pause cc_pause = 0;
+
+ if (fw_pause & FW_PORT_CAP32_FC_RX)
+ cc_pause |= PAUSE_RX;
+ if (fw_pause & FW_PORT_CAP32_FC_TX)
+ cc_pause |= PAUSE_TX;
+
+ return cc_pause;
+}
+
+/* Translate Common Code Pause Frame specification into Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+{
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+ fw_pause |= FW_PORT_CAP32_FC_RX;
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_FC_TX;
+
+ return fw_pause;
+}
+
+/* Translate Firmware Forward Error Correction specification to Common Code */
+static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
+{
+ enum cc_fec cc_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ cc_fec |= FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ cc_fec |= FEC_BASER_RS;
+
+ return cc_fec;
+}
+
+/* Translate Common Code Forward Error Correction specification to Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+{
+ fw_port_cap32_t fw_fec = 0;
+
+ if (cc_fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
+
+ return fw_fec;
+}
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
- * @phy: the PHY to setup
- * @mac: the MAC to setup
- * @lc: the requested link configuration
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -2689,48 +2952,60 @@ void t4_dump_version_info(struct adapter *adapter)
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
- struct fw_port_cmd c;
- unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
- unsigned int fc, fec;
+ unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
lc->link_ok = 0;
- fc = 0;
- if (lc->requested_fc & PAUSE_RX)
- fc |= FW_PORT_CAP_FC_RX;
- if (lc->requested_fc & PAUSE_TX)
- fc |= FW_PORT_CAP_FC_TX;
-
- fec = 0;
- if (lc->requested_fec & FEC_RS)
- fec |= FW_PORT_CAP_FEC_RS;
- if (lc->requested_fec & FEC_BASER_RS)
- fec |= FW_PORT_CAP_FEC_BASER_RS;
- if (lc->requested_fec & FEC_RESERVED)
- fec |= FW_PORT_CAP_FEC_RESERVED;
- memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_PORT_CMD_PORTID(port));
- c.action_to_len16 =
- cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
-
- if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc | fec);
+ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
+
+ /* Convert Common Code Forward Error Control settings into the
+ * Firmware's API. If the current Requested FEC has "Automatic"
+ * (IEEE 802.3) specified, then we use whatever the Firmware
+ * sent us as part of it's IEEE 802.3-based interpratation of
+ * the Transceiver Module EPROM FEC parameters. Otherwise we
+ * use whatever is in the current Requested FEC settings.
+ */
+ if (lc->requested_fec & FEC_AUTO)
+ cc_fec = lc->auto_fec;
+ else
+ cc_fec = lc->requested_fec;
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
- fec | mdi);
+ rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else {
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ /* And send that on to the Firmware ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ cmd.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_L1_CFG :
+ FW_PORT_ACTION_L1_CFG32) |
+ FW_LEN16(cmd));
+
+ if (fw_caps == FW_CAPS16)
+ cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
+ else
+ cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
+
+ return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
}
/**
@@ -3823,12 +4098,17 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
- V_FW_VI_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3874,7 +4154,11 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
- return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+ if (is_pf4(adap))
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
+ sleep_ok);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3921,7 +4205,10 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret == 0) {
ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
@@ -3955,7 +4242,10 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
V_FW_VI_ENABLE_CMD_EEN(tx_en) |
V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
FW_LEN16(c));
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
}
/**
@@ -3996,15 +4286,20 @@ int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
V_FW_IQ_CMD_IQSTOP(!start) |
FW_LEN16(c));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap)) {
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ } else {
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+ }
}
/**
@@ -4028,14 +4323,19 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -4055,11 +4355,203 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(pf) |
- V_FW_EQ_ETH_CMD_VFN(vf));
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/* Return the highest speed set in the port capabilities, in Mb/s. */
+static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
+{
+#define TEST_SPEED_RETURN(__caps_speed, __speed) \
+ do { \
+ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return __speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(100G, 100000);
+ TEST_SPEED_RETURN(50G, 50000);
+ TEST_SPEED_RETURN(40G, 40000);
+ TEST_SPEED_RETURN(25G, 25000);
+ TEST_SPEED_RETURN(10G, 10000);
+ TEST_SPEED_RETURN(1G, 1000);
+ TEST_SPEED_RETURN(100M, 100);
+
+#undef TEST_SPEED_RETURN
+
+ return 0;
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
+ * @rpl: start of the FW message
+ *
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *cmd = (const void *)rpl;
+ int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
+ fw_port_cap32_t pcaps, acaps, linkattr;
+ struct link_config *lc = &pi->link_cfg;
+ struct adapter *adapter = pi->adapter;
+ enum fw_port_module_type mod_type;
+ enum fw_port_type port_type;
+ unsigned int speed, fc, fec;
+ int link_ok, linkdnrc;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ switch (action) {
+ case FW_PORT_ACTION_GET_PORT_INFO: {
+ u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
+
+ link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
+ pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
+ acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
+
+ /* Unfortunately the format of the Link Status in the old
+ * 16-bit Port Information message isn't the same as the
+ * 16-bit Port Capabilities bitfield used everywhere else ...
+ */
+ linkattr = 0;
+ if (lstatus & F_FW_PORT_CMD_RXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_RX;
+ if (lstatus & F_FW_PORT_CMD_TXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_TX;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ linkattr |= FW_PORT_CAP32_SPEED_100M;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ linkattr |= FW_PORT_CAP32_SPEED_1G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ linkattr |= FW_PORT_CAP32_SPEED_10G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ linkattr |= FW_PORT_CAP32_SPEED_25G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ linkattr |= FW_PORT_CAP32_SPEED_40G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ linkattr |= FW_PORT_CAP32_SPEED_100G;
+
+ break;
+ }
+
+ case FW_PORT_ACTION_GET_PORT_INFO32: {
+ u32 lstatus32 =
+ be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
+
+ link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
+ pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd->u.info32.acaps32);
+ linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
+ break;
+ }
+
+ default:
+ dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
+ be32_to_cpu(cmd->action_to_len16));
+ return;
+ }
+
+ fec = fwcap_to_cc_fec(acaps);
+
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
+ if (mod_type != pi->mod_type) {
+ lc->auto_fec = fec;
+ pi->port_type = port_type;
+ pi->mod_type = mod_type;
+ t4_os_portmod_changed(adapter, pi->pidx);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->fec = fec;
+ lc->pcaps = pcaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
+ if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ /* When Autoneg is disabled, user needs to set
+ * single speed.
+ * Similar to cxgb4_ethtool.c: set_link_ksettings
+ */
+ lc->acaps = 0;
+ lc->requested_speed = fwcap_to_speed(acaps);
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+ }
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4084,67 +4576,21 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
unsigned int action =
G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
- if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ if (opcode == FW_PORT_CMD &&
+ (action == FW_PORT_ACTION_GET_PORT_INFO ||
+ action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- unsigned int speed = 0, fc = 0, i;
int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
struct port_info *pi = NULL;
- struct link_config *lc;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
- u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
-
- if (stat & F_FW_PORT_CMD_RXPAUSE)
- fc |= PAUSE_RX;
- if (stat & F_FW_PORT_CMD_TXPAUSE)
- fc |= PAUSE_TX;
- if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = ETH_SPEED_NUM_100M;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = ETH_SPEED_NUM_1G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = ETH_SPEED_NUM_10G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
- speed = ETH_SPEED_NUM_25G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
- speed = ETH_SPEED_NUM_40G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
- speed = ETH_SPEED_NUM_100G;
+ int i;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
if (pi->tx_chan == chan)
break;
}
- lc = &pi->link_cfg;
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, i);
- }
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- if (!link_ok && lc->link_ok) {
- static const char * const reason[] = {
- "Link Down",
- "Remote Fault",
- "Auto-negotiation Failure",
- "Reserved",
- "Insufficient Airflow",
- "Unable To Determine Reason",
- "No RX Signal Detected",
- "Reserved",
- };
- unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
-
- dev_warn(adap, "Port %d link down, reason: %s\n",
- chan, reason[rc]);
- }
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- }
+ t4_handle_get_port_info(pi, rpl);
} else {
dev_warn(adap, "Unknown firmware reply %d\n", opcode);
return -EINVAL;
@@ -4173,12 +4619,10 @@ void t4_reset_link_config(struct adapter *adap, int idx)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps)
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps)
{
- unsigned int fec;
-
- lc->supported = pcaps;
+ lc->pcaps = pcaps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = 0;
@@ -4188,21 +4632,16 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps,
* For Forward Error Control, we default to whatever the Firmware
* tells us the Link is currently advertising.
*/
- fec = 0;
- if (acaps & FW_PORT_CAP_FEC_RS)
- fec |= FEC_RS;
- if (acaps & FW_PORT_CAP_FEC_BASER_RS)
- fec |= FEC_BASER_RS;
- if (acaps & FW_PORT_CAP_FEC_RESERVED)
- fec |= FEC_RESERVED;
- lc->requested_fec = fec;
- lc->fec = fec;
-
- if (lc->supported & FW_PORT_CAP_ANEG) {
- lc->advertising = lc->supported & ADVERT_MASK;
+ lc->auto_fec = fwcap_to_cc_fec(acaps);
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ lc->acaps = lc->pcaps & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
} else {
- lc->advertising = 0;
+ lc->acaps = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
@@ -4242,9 +4681,8 @@ struct flash_desc {
int t4_get_flash_params(struct adapter *adapter)
{
/*
- * Table for non-Numonix supported flash parts. Numonix parts are left
- * to the preexisting well-tested code. All flash parts have 64KB
- * sectors.
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
{ 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
@@ -4253,7 +4691,7 @@ int t4_get_flash_params(struct adapter *adapter)
int ret;
u32 flashid = 0;
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
/**
* Issue a Read ID Command to the Flash part. We decode supported
@@ -4268,6 +4706,9 @@ int t4_get_flash_params(struct adapter *adapter)
if (ret < 0)
return ret;
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
if (supported_flash[part].vendor_and_model_id == flashid) {
adapter->params.sf_size =
@@ -4278,6 +4719,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
}
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
manufacturer = flashid & 0xff;
switch (manufacturer) {
case 0x20: { /* Micron/Numonix */
@@ -4314,21 +4764,81 @@ int t4_get_flash_params(struct adapter *adapter)
case 0x22:
size = 1 << 28; /* 256MB */
break;
- default:
- dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
+ break;
+ }
- adapter->params.sf_size = size;
- adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
break;
}
- default:
- dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
- return -EINVAL;
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
}
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+
found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
@@ -4633,6 +5143,8 @@ int t4_init_tp_params(struct adapter *adap)
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ F_ETHERTYPE);
/*
* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
@@ -4641,6 +5153,11 @@ int t4_init_tp_params(struct adapter *adap)
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
return 0;
}
@@ -4723,47 +5240,305 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
- u8 addr[6];
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ struct fw_port_cmd cmd;
int ret, i, j = 0;
- struct fw_port_cmd c;
+ int mdio_addr;
+ u32 action;
+ u8 addr[6];
- memset(&c, 0, sizeof(c));
+ memset(&cmd, 0, sizeof(cmd));
for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
unsigned int rss_size = 0;
- struct port_info *p = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(j));
- c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
- FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ /* If we haven't yet determined whether we're talking to
+ * Firmware which knows the new 32-bit Port Capabilities, it's
+ * time to find out now. This will also tell new Firmware to
+ * send us Port Status Updates using the new 32-bit Port
+ * Capabilities version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val, caps;
+
+ caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(caps));
+ val = 1;
+ ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
+ &val);
+ fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
+ adap->params.fw_caps_support = fw_caps;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32;
+ cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
+ FW_LEN16(cmd));
+ ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
if (ret)
return ret;
+ /* Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus =
+ be32_to_cpu(cmd.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
+ pcaps = be16_to_cpu(cmd.u.info.pcap);
+ acaps = be16_to_cpu(cmd.u.info.acap);
+ pcaps = fwcaps16_to_caps32(pcaps);
+ acaps = fwcaps16_to_caps32(acaps);
+ } else {
+ u32 lstatus32 =
+ be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1;
+ pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd.u.info32.acaps32);
+ }
+
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
if (ret < 0)
return ret;
- p->viid = ret;
- p->tx_chan = j;
- p->rss_size = rss_size;
+ pi->viid = ret;
+ pi->tx_chan = j;
+ pi->rss_size = rss_size;
t4_os_set_hw_addr(adap, i, addr);
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
- G_FW_PORT_CMD_MDIOADDR(ret) : -1;
- p->port_type = G_FW_PORT_CMD_PTYPE(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
+ pi->port_type = port_type;
+ pi->mdio_addr = mdio_addr;
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
- be16_to_cpu(c.u.info.acap));
+ init_link_config(&pi->link_cfg, pcaps, acaps);
j++;
}
return 0;
}
+
+/**
+ * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: address within adapter memory
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address and host buffer must be aligned on 32-bit
+ * boudaries; the length may be arbitrary.
+ *
+ * NOTES:
+ * 1. The memory is transferred as a raw byte sequence from/to the
+ * firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the caller's responsibility to
+ * perform appropriate byte order conversions.
+ *
+ * 2. It is the Caller's responsibility to ensure that no other code
+ * uses the specified PCI-E Memory Window while this routine is
+ * using it. This is typically done via the use of OS-specific
+ * locks, etc.
+ */
+int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 pos, offset, resid;
+ u32 win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
+
+ /* Argument sanity checks ...*/
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+ buf = (u32 *)hbuf;
+
+ /* It's convenient to be able to handle lengths which aren't a
+ * multiple of 32-bits because we often end up transferring files to
+ * the firmware. So we'll handle that by normalizing the length here
+ * and then handling any residual transfer at the end.
+ */
+ resid = len & 0x3;
+ len -= resid;
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ win));
+ mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
+ mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
+
+ win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
+
+ /* Calculate our initial PCI-E Memory Window Position and Offset into
+ * that Window.
+ */
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer. (Read it back to ensure that changes propagate before we
+ * attempt to use the new value.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
+ pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
+
+ /* Transfer data to/from the adapter as long as there's an integral
+ * number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
+ */
+ while (len > 0) {
+ if (dir == T4_MEMORY_READ)
+ *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ else
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(*buf++));
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next. Note that
+ * doing this here after "len" may be 0 allows us to set up
+ * the PCI-E Memory Window for a possible final residual
+ * transfer below ...
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win), pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win));
+ }
+ }
+
+ /* If the original transfer had a length which wasn't a multiple of
+ * 32-bits, now's where we need to finish off the transfer of the
+ * residual amount. The PCI-E Memory Window has already been moved
+ * above (if necessary) to cover this final transfer.
+ */
+ if (resid) {
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = resid; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(last.word));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @maddr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
+ * provides an (memory type, address within memory type) interface.
+ */
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 mtype_offset;
+ u32 edc_size, mc_size;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ */
+ edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1) {
+ mtype_offset = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
+ A_MA_EXT_MEMORY0_BAR));
+ mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ return t4_memory_rw_addr(adap, win,
+ mtype_offset + maddr, len,
+ hbuf, dir);
+}
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index 07498841..e77563df 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_HW_H
@@ -70,6 +42,10 @@ enum {
SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
};
+enum {
+ TCB_SIZE = 128, /* TCB size */
+};
+
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 6acd749a..5d433c91 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -1,40 +1,21 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef T4_MSG_H
#define T4_MSG_H
enum {
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_ACT_OPEN_REQ6 = 0x83,
CPL_SGE_EGR_UPDATE = 0xA5,
CPL_FW4_MSG = 0xC0,
CPL_FW6_MSG = 0xE0,
@@ -42,6 +23,20 @@ enum {
CPL_TX_PKT_XT = 0xEE,
};
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
enum { /* TX_PKT_XT checksum types */
TX_CSUM_TCPIP = 8,
TX_CSUM_UDPIP = 9,
@@ -53,6 +48,24 @@ union opcode_tid {
__u8 opcode;
};
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
struct rss_header {
__u8 opcode;
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -94,6 +107,169 @@ struct work_request_hdr {
#define WR_HDR_SIZE 0
#endif
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL 28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL 14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define V_WORD(x) ((x) << S_WORD)
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+/* cpl_abort_req status command code
+ */
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
struct cpl_tx_data {
union opcode_tid ot;
__be32 len;
@@ -299,7 +475,13 @@ struct cpl_fw6_msg {
__be64 data[4];
};
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_PKT = 4
+};
+
enum {
+ ULP_TX_SC_NOOP = 0x80,
ULP_TX_SC_IMM = 0x81,
ULP_TX_SC_DSGL = 0x82,
ULP_TX_SC_ISGL = 0x83
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 1230e738..5f5cbe04 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_PCI_ID_TBL_H__
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 1100e16f..6f872edc 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#define MYPF_BASE 0x1b000
@@ -77,6 +49,7 @@
#define SGE_BASE_ADDR 0x1000
#define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
#define S_QID 15
#define M_QID 0x1ffffU
@@ -103,6 +76,9 @@
#define A_SGE_PF_GTS 0x4
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
#define S_INGRESSQID 16
#define M_INGRESSQID 0xffffU
#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
@@ -191,6 +167,8 @@
#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
#define S_ERR_CPL_EXCEED_IQE_SIZE 22
#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
@@ -280,6 +258,11 @@
#define A_SGE_CONM_CTRL 0x1094
+#define S_T6_EGRTHRESHOLDPACKING 16
+#define M_T6_EGRTHRESHOLDPACKING 0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+ M_T6_EGRTHRESHOLDPACKING)
+
#define S_EGRTHRESHOLD 8
#define M_EGRTHRESHOLD 0x3fU
#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
@@ -370,6 +353,7 @@
#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
#define A_SGE_CONTROL2 0x1124
@@ -443,6 +427,8 @@
/* registers for module CIM */
#define CIM_BASE_ADDR 0x7b00
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
#define A_CIM_PF_MAILBOX_DATA 0x240
#define A_CIM_PF_MAILBOX_CTRL 0x280
@@ -462,6 +448,8 @@
#define V_UPCRST(x) ((x) << S_UPCRST)
#define F_UPCRST V_UPCRST(1U)
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
/* registers for module TP */
#define A_TP_OUT_CONFIG 0x7d04
@@ -470,6 +458,7 @@
#define F_CRXPKTENC V_CRXPKTENC(1U)
#define TP_BASE_ADDR 0x7d00
+#define A_TP_CMM_TCB_BASE 0x7d10
#define A_TP_TIMER_RESOLUTION 0x7d90
@@ -503,9 +492,34 @@
#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_KEYWRADDR 0
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDRX 30
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+
+#define S_KEYEXTEND 26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR 8
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+
#define A_TP_PIO_ADDR 0x7e40
#define A_TP_PIO_DATA 0x7e44
+#define A_TP_RSS_SECRET_KEY0 0x40
+
#define A_TP_VLAN_PRI_MAP 0x140
#define S_FRAGMENTATION 9
@@ -558,8 +572,27 @@
#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+
+/* registers for module MA */
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_SIZE 0
+#define M_EXT_MEM0_SIZE 0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
/* registers for module MPS */
#define MPS_BASE_ADDR 0x9000
+#define T4VF_MPS_BASE_ADDR 0x0100
#define S_REPLICATE 11
#define V_REPLICATE(x) ((x) << S_REPLICATE)
@@ -766,6 +799,69 @@
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+
+#define A_MPS_PORT0_RX_IVLAN 0x3011c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+
+#define MPS_PORT_RX_IVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_IVLAN(idx) \
+ (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE)
+
+#define A_MPS_PORT0_RX_OVLAN0 0x30120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+
+#define MPS_PORT_RX_OVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_OVLAN_BASE(idx) \
+(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE)
+#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg))
+
+#define A_RX_OVLAN0 0x0
+#define A_RX_OVLAN1 0x4
+#define A_RX_OVLAN2 0x8
+
+#define A_MPS_PORT0_RX_CTL 0x30100
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1)
+
+#define MPS_PORT_RX_CTL_STRIDE 0x4000
+#define MPS_PORT_RX_CTL(idx) \
+ (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE)
+
/* registers for module ULP_RX */
#define ULP_RX_BASE_ADDR 0x19150
@@ -823,6 +919,7 @@
#define F_PFCIM V_PFCIM(1U)
#define A_PL_WHOAMI 0x19400
+#define A_PL_VF_WHOAMI 0x0
#define A_PL_RST 0x19428
@@ -837,8 +934,21 @@
#define F_PIORSTMODE V_PIORSTMODE(1U)
#define A_PL_REV 0x1943c
+#define A_PL_VF_REV 0x4
#define S_REV 0
#define M_REV 0xfU
#define V_REV(x) ((x) << S_REV)
#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+/* registers for module LE */
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h
index 9085ff6d..a9414d20 100644
--- a/drivers/net/cxgbe/base/t4_regs_values.h
+++ b/drivers/net/cxgbe/base/t4_regs_values.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_REGS_VALUES_H__
diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 00000000..25435f9f
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 6ca4f318..e80b58a3 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _T4FW_INTERFACE_H_
@@ -82,8 +54,13 @@ enum fw_memtype {
********************************/
enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_ETH_TX_PKTS2_WR = 0x78,
};
@@ -102,6 +79,11 @@ struct fw_wr_hdr {
#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+
/* work request immediate data length (hi)
*/
#define S_FW_WR_IMMDLEN 0
@@ -118,6 +100,11 @@ struct fw_wr_hdr {
#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+
/* length in units of 16-bytes (lo)
*/
#define S_FW_WR_LEN16 0
@@ -146,6 +133,173 @@ struct fw_eth_tx_pkts_wr {
__u8 type;
};
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct fw_eth_tx_pkts_vm_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ 0
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP 24
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT 9
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -171,24 +325,34 @@ struct fw_eth_tx_pkts_wr {
#define FW_CMD_HELLO_RETRIES 3
enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
FW_RESET_CMD = 0x03,
FW_HELLO_CMD = 0x04,
FW_BYE_CMD = 0x05,
FW_INITIALIZE_CMD = 0x06,
FW_CAPS_CONFIG_CMD = 0x07,
FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
FW_IQ_CMD = 0x10,
FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
FW_VI_CMD = 0x14,
FW_VI_MAC_CMD = 0x15,
FW_VI_RXMODE_CMD = 0x16,
FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
FW_PORT_CMD = 0x1b,
FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
FW_DEBUG_CMD = 0x81,
};
+enum fw_cmd_cap {
+ FW_CMD_CAP_PORT = 0x04,
+};
+
/*
* Generic command header flit0
*/
@@ -238,6 +402,94 @@ struct fw_cmd_hdr {
#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+/* address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_ctxtflush;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c_deprecated {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ __u8 pid;
+ __u8 did;
+ __u8 boffset;
+ __u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ __u8 val[33];
+ __u8 r11[7];
+ } le;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
struct fw_reset_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -386,6 +638,7 @@ struct fw_caps_config_cmd {
enum fw_params_mnem {
FW_PARAMS_MNEM_DEV = 1, /* device params */
FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
};
@@ -395,6 +648,12 @@ enum fw_params_mnem {
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
@@ -402,7 +661,12 @@ enum fw_params_param_dev {
* physical and virtual function parameters
*/
enum fw_params_param_pfvf {
- FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
};
/*
@@ -443,6 +707,10 @@ enum fw_params_param_dmaq {
#define G_FW_PARAMS_PARAM_YZ(x) \
(((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+
struct fw_params_cmd {
__be32 op_to_vfn;
__be32 retval_len16;
@@ -464,6 +732,74 @@ struct fw_params_cmd {
#define G_FW_PARAMS_CMD_VFN(x) \
(((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_PFN 8
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define G_FW_PFVF_CMD_TC(x) \
+ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+
/*
* ingress queue type; the first 1K ingress queues can have associated 0,
* 1 or 2 free lists and an interrupt, all other ingress queues lack these
@@ -473,6 +809,11 @@ enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -606,6 +947,9 @@ struct fw_iq_cmd {
(((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
#define S_FW_IQ_CMD_FL0CNGCHMAP 20
#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
@@ -724,6 +1068,11 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_EQID(x) \
(((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
#define S_FW_EQ_ETH_CMD_FETCHRO 22
#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
@@ -786,6 +1135,75 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_VIID(x) \
(((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+
enum fw_vi_func {
FW_VI_FUNC_ETH,
};
@@ -988,6 +1406,9 @@ struct fw_vi_enable_cmd {
(((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+
/* VI PF stats offset definitions */
#define VI_PF_NUM_STATS 17
enum fw_vi_stats_pf_index {
@@ -1065,7 +1486,16 @@ struct fw_vi_stats_cmd {
} u;
};
-/* port capabilities bitmap */
+#define S_FW_VI_STATS_CMD_VIID 0
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+
+/* old 16-bit port capabilities bitmap */
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
@@ -1100,9 +1530,45 @@ enum fw_port_mdi {
#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define FW_PORT_CAP32_SPEED_100M 0x00000001UL
+#define FW_PORT_CAP32_SPEED_1G 0x00000002UL
+#define FW_PORT_CAP32_SPEED_10G 0x00000004UL
+#define FW_PORT_CAP32_SPEED_25G 0x00000008UL
+#define FW_PORT_CAP32_SPEED_40G 0x00000010UL
+#define FW_PORT_CAP32_SPEED_50G 0x00000020UL
+#define FW_PORT_CAP32_SPEED_100G 0x00000040UL
+#define FW_PORT_CAP32_FC_RX 0x00010000UL
+#define FW_PORT_CAP32_FC_TX 0x00020000UL
+#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
+#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
+#define FW_PORT_CAP32_ANEG 0x00100000UL
+#define FW_PORT_CAP32_MDIX 0x00200000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_FEC_RS 0x00800000UL
+#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
+
+#define S_FW_PORT_CAP32_SPEED 0
+#define M_FW_PORT_CAP32_SPEED 0xfff
+#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+enum fw_port_mdi32 {
+ FW_PORT_CAP32_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
enum fw_port_action {
FW_PORT_ACTION_L1_CFG = 0x0001,
FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L1_CFG32 = 0x0009,
+ FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a,
};
struct fw_port_cmd {
@@ -1191,6 +1657,18 @@ struct fw_port_cmd {
__be64 r12;
} control;
} dcb;
+ struct fw_port_l1cfg32 {
+ __be32 rcap32;
+ __be32 r;
+ } l1cfg32;
+ struct fw_port_info32 {
+ __be32 lstatus32_to_cbllen32;
+ __be32 auxlinfo32_mtu32;
+ __be32 linkattr32;
+ __be32 pcaps32;
+ __be32 acaps32;
+ __be32 lpacaps32;
+ } info32;
} u;
};
@@ -1264,6 +1742,36 @@ struct fw_port_cmd {
#define G_FW_PORT_CMD_MODTYPE(x) \
(((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+#define S_FW_PORT_CMD_LSTATUS32 31
+#define M_FW_PORT_CMD_LSTATUS32 0x1
+#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32 28
+#define M_FW_PORT_CMD_LINKDNRC32 0x7
+#define G_FW_PORT_CMD_LINKDNRC32(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_MDIOCAP32 26
+#define M_FW_PORT_CMD_MDIOCAP32 0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32 21
+#define M_FW_PORT_CMD_MDIOADDR32 0x1f
+#define G_FW_PORT_CMD_MDIOADDR32(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32 13
+#define M_FW_PORT_CMD_PORTTYPE32 0xff
+#define G_FW_PORT_CMD_PORTTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32 8
+#define M_FW_PORT_CMD_MODTYPE32 0x1f
+#define G_FW_PORT_CMD_MODTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
/*
* These are configured into the VPD and hence tools that generate
* VPD may use this enumeration.
@@ -1532,6 +2040,83 @@ struct fw_rss_ind_tbl_cmd {
#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
(((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_keymode;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
struct fw_rss_vi_config_cmd {
__be32 op_to_viid;
__be32 retval_len16;
@@ -1611,6 +2196,22 @@ struct fw_rss_vi_config_cmd {
(((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
/******************************************************************************
* D E B U G C O M M A N D s
******************************************************/
diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c
new file mode 100644
index 00000000..d96456bb
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.c
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include "common.h"
+#include "t4_regs.h"
+
+/**
+ * t4vf_wait_dev_ready - wait till to reads of registers work
+ *
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+static int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ val);
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_entry entry;
+ unsigned int delay_idx;
+ u32 v, mbox_data;
+ const __be64 *p;
+ int i, ret;
+ int ms;
+
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ...
+ */
+ if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+
+ if (v != X_MBOWNER_PL) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ return ret;
+ }
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+
+ t4_read_reg(adapter, mbox_data); /* flush write */
+ t4_write_reg(adapter, mbox_ctl,
+ F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & F_MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0). (Again we
+ * avoid clogging the log with FW_VI_STATS_CMD
+ * reply results.)
+ */
+
+ /*
+ * Retrieve the command reply and release the mailbox.
+ */
+ get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+
+ /* return value in high-order host-endian word */
+ v = be64_to_cpu(cmd_rpl[0]);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & F_FW_CMD_REQUEST) == 0);
+ memcpy(rpl, cmd_rpl, size);
+ }
+ return -((int)G_FW_CMD_RETVAL(v));
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dev_err(adapter, "command %#x timed out\n",
+ *(const u8 *)cmd);
+ dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+ ret = -ETIMEDOUT;
+ return ret;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware resetting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+ F_FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ u32 pl_vf_rev;
+ int ret, ver;
+
+ ret = t4vf_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ unsigned int i;
+ size_t len16;
+ int ret;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = cpu_to_be32(*params++);
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ vpd_params->cclk = vals[0];
+ dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
+ __func__, vpd_params->cclk);
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives fw and tp version.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ adapter->params.fw_vers = vals[0];
+ adapter->params.tp_vers = vals[1];
+
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+ return 0;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ struct fw_params_param *p;
+ struct fw_params_cmd cmd;
+ unsigned int i;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+ u32 sge_control2)
+{
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Translate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
+ (be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu
+ (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ rss->u.basicvirtual.ofdmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ rss->u.basicvirtual.tnlmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+ vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = G_FW_PFVF_CMD_TC(word);
+ vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+ vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+ vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+ vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface via Firmware
+ * commands.
+ */
+static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ unsigned int rem = VI_VF_NUM_STATS;
+ struct fw_vi_stats_vf fwstats;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
+ V_FW_VI_STATS_CMD_VIID(pi->viid) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
+ V_FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
+ be64_to_cpu(fwstats.tx_mcast_bytes) +
+ be64_to_cpu(fwstats.tx_ucast_bytes);
+ p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
+
+ p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+ p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ /*
+ * If this is not the first Virtual Interface for our Virtual
+ * Function, we need to use Firmware commands to retrieve its
+ * MPS statistics.
+ */
+ if (pidx != 0)
+ t4vf_get_port_stats_fw(adapter, pidx, p);
+
+ /*
+ * But for the first VI, we can grab its statistics via the MPS
+ * register mapped into the VF register space.
+ */
+#define GET_STAT(name) \
+ t4_read_reg64(adapter, \
+ T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
+ GET_STAT(TX_VF_MCAST_BYTES) +
+ GET_STAT(TX_VF_UCAST_BYTES);
+ p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
+
+ p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+
+ p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
+#undef GET_STAT
+}
+
+static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ F_FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ F_FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+ return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
+}
+
+int t4vf_port_init(struct adapter *adapter)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd port_cmd, port_rpl;
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ int mdio_addr;
+ int ret, i;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ /*
+ * If we haven't yet determined if we're talking to Firmware
+ * which knows the new 32-bit Port Caps, it's time to find
+ * out now. This will also tell new Firmware to send us Port
+ * Status Updates using the new 32-bit Port Capabilities
+ * version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X
+ (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
+ val = 1;
+ ret = t4vf_set_params(adapter, 1, &param, &val);
+ fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
+ adapter->params.fw_caps_support = fw_caps;
+ }
+
+ ret = t4vf_alloc_vi(adapter, p->port_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", p->port_id, ret);
+ return ret;
+ }
+ p->viid = ret;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface
+ * information like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
+ ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ p->rss_size = G_FW_VI_CMD_RSSSIZE
+ (be16_to_cpu(vi_rpl.norss_rsssize));
+ t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're
+ * done now. Else, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32
+ (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(p->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32
+ (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_LEN16(port_cmd));
+ ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ /*
+ * Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus = be32_to_cpu
+ (port_rpl.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
+ -1);
+ pcaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.pcap));
+ acaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.acap));
+ } else {
+ u32 lstatus32 = be32_to_cpu
+ (port_rpl.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1);
+ pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
+ acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
+ }
+
+ p->port_type = port_type;
+ p->mdio_addr = mdio_addr;
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+ init_link_config(&p->link_cfg, pcaps, acaps);
+ }
+ return 0;
+}
diff --git a/drivers/net/cxgbe/base/t4vf_hw.h b/drivers/net/cxgbe/base/t4vf_hw.h
new file mode 100644
index 00000000..55e436e7
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4VF_HW_H
+#define __T4VF_HW_H
+
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_CIM_BASE_ADDR 0x0300
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
+
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES
+#endif /* __T4VF_HW_H */
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
new file mode 100644
index 00000000..5e4dc527
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "common.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate clip entry in HW with associated IPV4/IPv6 address
+ */
+static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * Delete clip entry in HW having the associated IPV4/IPV6 address
+ */
+static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * cxgbe_clip_release - Release associated CLIP entry
+ * @ce: clip entry to release
+ *
+ * Releases ref count and frees up a clip entry from CLIP table
+ */
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
+{
+ int ret;
+
+ t4_os_lock(&ce->lock);
+ if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ ret = clip6_release_mbox(dev, ce->addr);
+ if (ret)
+ dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
+ }
+ t4_os_unlock(&ce->lock);
+}
+
+/**
+ * find_or_alloc_clipe - Find/Allocate a free CLIP entry
+ * @c: CLIP table
+ * @lip: IPV4/IPV6 address to compare/add
+ * Returns pointer to the IPV4/IPV6 entry found/created
+ *
+ * Finds/Allocates an CLIP entry to be used for a filter rule.
+ */
+static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
+ const u32 *lip)
+{
+ struct clip_entry *end, *e;
+ struct clip_entry *first_free = NULL;
+ unsigned int clipt_size = c->clipt_size;
+
+ for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (memcmp(lip, e->addr, sizeof(e->addr)) == 0)
+ goto exists;
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto exists;
+ }
+
+ return NULL;
+
+exists:
+ return e;
+}
+
+static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
+ u32 *lip, u8 v6)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce;
+ int ret = 0;
+
+ if (!ctbl)
+ return NULL;
+
+ t4_os_write_lock(&ctbl->lock);
+ ce = find_or_alloc_clipe(ctbl, lip);
+ if (ce) {
+ t4_os_lock(&ce->lock);
+ if (!rte_atomic32_read(&ce->refcnt)) {
+ rte_memcpy(ce->addr, lip, sizeof(ce->addr));
+ if (v6) {
+ ce->type = FILTER_TYPE_IPV6;
+ rte_atomic32_set(&ce->refcnt, 1);
+ ret = clip6_get_mbox(dev, lip);
+ if (ret)
+ dev_debug(adap,
+ "CLIP FW ADD CMD failed: %d",
+ ret);
+ } else {
+ ce->type = FILTER_TYPE_IPV4;
+ }
+ } else {
+ rte_atomic32_inc(&ce->refcnt);
+ }
+ t4_os_unlock(&ce->lock);
+ }
+ t4_os_write_unlock(&ctbl->lock);
+
+ return ret ? NULL : ce;
+}
+
+/**
+ * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry
+ * @dev: rte_eth_dev pointer
+ * @lip: IPV6 address to add
+ * Returns pointer to the CLIP entry created
+ *
+ * Allocates a IPV6 CLIP entry to be used for a filter rule.
+ */
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip)
+{
+ return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6);
+}
+
+/**
+ * Initialize CLIP Table
+ */
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ unsigned int clipt_size;
+ struct clip_tbl *ctbl;
+ unsigned int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+
+ clipt_size = clipt_end - clipt_start + 1;
+
+ ctbl = t4_os_alloc(sizeof(*ctbl) +
+ clipt_size * sizeof(struct clip_entry));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+
+ t4_os_rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; i++) {
+ t4_os_lock_init(&ctbl->cl_list[i].lock);
+ rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ }
+
+ return ctbl;
+}
+
+/**
+ * Cleanup CLIP Table
+ */
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ if (adap->clipt)
+ t4_os_free(adap->clipt);
+}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
new file mode 100644
index 00000000..737ccc69
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_CLIP_H_
+#define _CXGBE_CLIP_H_
+
+/*
+ * State for the corresponding entry of the HW CLIP table.
+ */
+struct clip_entry {
+ enum filter_type type; /* entry type */
+ u32 addr[4]; /* IPV4 or IPV6 address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct clip_tbl {
+ unsigned int clipt_start; /* start index of CLIP table */
+ unsigned int clipt_size; /* size of CLIP table */
+ rte_rwlock_t lock; /* table rw lock */
+ struct clip_entry cl_list[0]; /* MUST BE LAST */
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+void t4_cleanup_clip_tbl(struct adapter *adap);
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip);
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce);
+#endif /* _CXGBE_CLIP_H_ */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index f9891545..5e6f5c98 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_H_
@@ -46,20 +18,51 @@
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
+#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
+
+#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
+#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
+
+bool force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
+int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
+int cxgbe_set_link_status(struct port_info *pi, bool status);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c);
int link_start(struct port_info *pi);
void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
unsigned int cnt, unsigned int size, unsigned int iqe_size);
int setup_sge_fwevtq(struct adapter *adapter);
+int setup_sge_ctrl_txq(struct adapter *adapter);
void cfg_queues(struct rte_eth_dev *eth_dev);
int cfg_queue_count(struct rte_eth_dev *eth_dev);
+int init_rss(struct adapter *adap);
int setup_rss(struct port_info *pi);
void cxgbe_enable_rx_queues(struct port_info *pi);
+void print_port_info(struct adapter *adap);
+void print_adapter_info(struct adapter *adap);
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
+void configure_max_ethqsets(struct adapter *adapter);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 03bba9fe..5d47c5f3 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_COMPAT_H_
@@ -226,15 +198,6 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
-/**
- * cxgbe_ffs - find first bit set
- * @x: the word to search
- */
-static inline int cxgbe_ffs(int x)
-{
- return x ? __builtin_ffs(x) : 0;
-}
-
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
@@ -278,4 +241,16 @@ static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
rte_write32_relaxed(val, addr);
}
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
#endif /* _CXGBE_COMPAT_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 5cd260f4..4dcad7a2 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -63,6 +35,8 @@
#include <rte_dev.h>
#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "cxgbe_flow.h"
/*
* Macros needed to support the PCI Device ID Table ...
@@ -85,8 +59,21 @@
*/
#include "t4_pci_id_tbl.h"
-static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\
+ DEV_RX_OFFLOAD_CRC_STRIP |\
+ DEV_RX_OFFLOAD_IPV4_CKSUM |\
+ DEV_RX_OFFLOAD_JUMBO_FRAME |\
+ DEV_RX_OFFLOAD_UDP_CKSUM |\
+ DEV_RX_OFFLOAD_TCP_CKSUM)
+
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
uint16_t pkts_sent, pkts_remain;
@@ -119,8 +106,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return total_sent;
}
-static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
unsigned int work_done;
@@ -135,8 +122,8 @@ static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return work_done;
}
-static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *device_info)
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -148,8 +135,6 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -159,25 +144,22 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_vfs = adapter->params.arch.vfcount;
device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
- device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ device_info->rx_queue_offload_capa = 0UL;
+ device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
- device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->tx_queue_offload_capa = 0UL;
+ device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
device_info->reta_size = pi->rss_size;
+ device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
+ device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}
-static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -186,7 +168,7 @@ static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1, -1, 1, -1, false);
}
-static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -195,7 +177,7 @@ static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
0, -1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -206,7 +188,7 @@ static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
-1, 1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -217,28 +199,91 @@ static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
-1, 0, 1, -1, false);
}
-static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct rte_eth_link *old_link = &eth_dev->data->dev_link;
- unsigned int work_done, budget = 4;
+ struct rte_eth_link new_link = { 0 };
+ unsigned int i, work_done, budget = 32;
+ u8 old_link = pi->link_cfg.link_ok;
+
+ for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* Exit if link status changed or always forced up */
+ if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ break;
+
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ }
+
+ new_link.link_status = force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_autoneg = pi->link_cfg.autoneg;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
+/**
+ * Set device link up.
+ */
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+ /* Flush all link events */
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
- if (old_link->link_status == pi->link_cfg.link_ok)
- return -1; /* link not changed */
- eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
+ /* If link already up, nothing to do */
+ if (pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, true);
+ if (ret)
+ return ret;
- /* link has changed */
+ cxgbe_dev_link_update(dev, 1);
return 0;
}
-static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+/**
+ * Set device link down.
+ */
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already down, nothing to do */
+ if (!pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, false);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 0);
+ return 0;
+}
+
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -254,9 +299,11 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
/* set to jumbo mode if needed */
if (new_mtu > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
@@ -266,21 +313,13 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return err;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static void cxgbe_dev_tx_queue_release(void *q);
-static void cxgbe_dev_rx_queue_release(void *q);
-
/*
* Stop device.
*/
-static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
- int i, dev_down = 0;
CXGBE_FUNC_TRACE();
@@ -294,28 +333,12 @@ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
* have been disabled
*/
t4_sge_eth_clear_queues(pi);
-
- /* See if all ports are down */
- for_each_port(adapter, i) {
- pi = adap2pinfo(adapter, i);
- /*
- * Skip first port of the adapter since it will be closed
- * by DPDK
- */
- if (i == 0)
- continue;
- dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
- }
-
- /* If rest of the ports are stopped, then free up resources */
- if (dev_down == (adapter->params.nports - 1))
- cxgbe_close(adapter);
}
/* Start the device.
* It returns 0 on success.
*/
-static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -367,7 +390,7 @@ out:
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
-static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -386,19 +409,35 @@ static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
t4_sge_eth_clear_queues(pi);
}
-static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
+ uint64_t configured_offloads;
int err;
CXGBE_FUNC_TRACE();
+ configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(configured_offloads)) {
+ dev_info(adapter, "can't disable hw crc strip\n");
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ }
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = setup_sge_fwevtq(adapter);
if (err)
return err;
adapter->flags |= FW_QUEUE_BOUND;
+ if (is_pf4(adapter)) {
+ err = setup_sge_ctrl_txq(adapter);
+ if (err)
+ return err;
+ }
}
err = cfg_queue_count(eth_dev);
@@ -408,8 +447,7 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
return 0;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -424,8 +462,7 @@ static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -440,10 +477,10 @@ static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -452,8 +489,6 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
int err = 0;
unsigned int temp_nb_desc;
- RTE_SET_USED(tx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
socket_id, pi->first_qset);
@@ -488,13 +523,12 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
s->fw_evtq.cntxt_id, socket_id);
- dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
- __func__, txq->q.cntxt_id, err);
-
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+ __func__, txq->q.cntxt_id, txq->q.abs_id, err);
return err;
}
-static void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(void *q)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
@@ -510,8 +544,7 @@ static void cxgbe_dev_tx_queue_release(void *q)
}
}
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -530,8 +563,7 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -549,11 +581,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -565,8 +597,6 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info dev_info;
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
- RTE_SET_USED(rx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
@@ -613,21 +643,25 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
/* Set to jumbo mode if necessary */
if (pkt_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, t4_ethrx_handler,
- t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
+ is_pf4(adapter) ?
+ t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
queue_idx, socket_id);
- dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
- __func__, err, pi->port_id, rxq->rspq.cntxt_id);
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+ rxq->rspq.abs_id);
return err;
}
-static void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(void *q)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
struct sge_rspq *rq = &rxq->rspq;
@@ -750,7 +784,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct adapter *adapter = pi->adapter;
struct link_config *lc = &pi->link_cfg;
- if (lc->supported & FW_PORT_CAP_ANEG) {
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
if (fc_conf->autoneg)
lc->requested_fc |= PAUSE_AUTONEG;
else
@@ -773,7 +807,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
&pi->link_cfg);
}
-static const uint32_t *
+const uint32_t *
cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
static const uint32_t ptypes[] = {
@@ -787,6 +821,88 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+/* Update RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
+ if (err)
+ return err;
+
+ pi->rss_hf = rss_conf->rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = cpu_to_be32(key[i]);
+
+ t4_write_rss_key(adapter, mod_key, -1);
+ }
+
+ return 0;
+}
+
+/* Get RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u64 rss_hf = 0;
+ u64 flags = 0;
+ int err;
+
+ err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ &flags, NULL);
+
+ if (err)
+ return err;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
+ rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
+
+ rss_conf->rss_hf = rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ t4_read_rss_key(adapter, key);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = be32_to_cpu(key[i]);
+
+ memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+ }
+
+ return 0;
+}
+
static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
@@ -956,6 +1072,23 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
return 0;
}
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt, (u8 *)addr, true, true);
+ if (ret < 0) {
+ dev_err(adapter, "failed to set mac addr; err = %d\n",
+ ret);
+ return ret;
+ }
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
@@ -968,6 +1101,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_infos_get = cxgbe_dev_info_get,
.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
.link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
.mtu_set = cxgbe_dev_mtu_set,
.tx_queue_setup = cxgbe_dev_tx_queue_setup,
.tx_queue_start = cxgbe_dev_tx_queue_start,
@@ -977,6 +1112,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
+ .filter_ctrl = cxgbe_dev_filter_ctrl,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
@@ -985,6 +1121,9 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.get_eeprom = cxgbe_get_eeprom,
.set_eeprom = cxgbe_set_eeprom,
.get_reg = cxgbe_get_regs,
+ .rss_hash_update = cxgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
};
/*
@@ -1004,14 +1143,34 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &cxgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- /* for secondary processes, we don't initialise any further as primary
- * has already done this work.
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
return 0;
-
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ }
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1043,6 +1202,16 @@ out_free_adapter:
return err;
}
+static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1052,7 +1221,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
}
static struct rte_pci_driver rte_cxgbe_pmd = {
@@ -1065,3 +1234,6 @@ static struct rte_pci_driver rte_cxgbe_pmd = {
RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 00000000..7f0d3800
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,1252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include <rte_net.h>
+#include "common.h"
+#include "t4_tcb.h"
+#include "t4_regs.h"
+#include "cxgbe_filter.h"
+#include "clip_tbl.h"
+
+/**
+ * Initialize Hash Filters
+ */
+int init_hash_filter(struct adapter *adap)
+{
+ unsigned int n_user_filters;
+ unsigned int user_filter_perc;
+ int ret;
+ u32 params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ return ret;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ user_filter_perc = 100;
+ n_user_filters = mult_frac(adap->tids.nftids,
+ user_filter_perc,
+ 100);
+
+ adap->tids.nftids = n_user_filters;
+ adap->params.hash_filter = 1;
+ return 0;
+}
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
+{
+ u32 fconf;
+
+ /*
+ * Check for unconfigured fields being used.
+ */
+ fconf = adapter->params.tp.vlan_pri_map;
+
+#define S(_field) \
+ (fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+ (!(fconf & (_mask)) && S(_field))
+
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
+ return -EOPNOTSUPP;
+
+#undef S
+#undef U
+
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /*
+ * Don't allow various trivially obvious bogus out-of-range
+ * values ...
+ */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int iq;
+
+ /*
+ * If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ iq = 0;
+ } else {
+ /*
+ * If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->n_rx_qsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+ u16 word, u64 mask, u64 val, int no_reply)
+{
+ struct rte_mbuf *mbuf;
+ struct cpl_set_tcb_field *req;
+ struct sge_ctrl_txq *ctrlq;
+
+ ctrlq = &adapter->sge.ctrlq[0];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ WARN_ON(!mbuf);
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+ V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+ V_NO_REPLY(no_reply));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
+ */
+static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word,
+ u64 mask, u64 val, u8 cookie,
+ int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
+ V_QUEUENO(0));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Check if entry already filled.
+ */
+bool is_filter_set(struct tid_info *t, int fidx, int family)
+{
+ bool result = FALSE;
+ int i, max;
+
+ /* IPv6 requires four slots and IPv4 requires only 1 slot.
+ * Ensure, there's enough slots available.
+ */
+ max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
+
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i <= max; i++) {
+ if (rte_bitmap_get(t->ftid_bmap, i)) {
+ result = TRUE;
+ break;
+ }
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return result;
+}
+
+/**
+ * Allocate a available free entry
+ */
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
+{
+ struct tid_info *t = &adap->tids;
+ int pos;
+ int size = t->nftids;
+
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV6)
+ pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
+ else
+ pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
+ t4_os_unlock(&t->ftid_lock);
+
+ return pos < size ? pos : -1;
+}
+
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+ u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!f->fs.val.proto)
+ ntuple |= (u64)tcp_proto << tp->protocol_shift;
+ else
+ ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+ }
+
+ if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+ ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+
+ if (ntuple != tp->hash_filter_mask)
+ return 0;
+
+ return ntuple;
+}
+
+/**
+ * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_req) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = cpu_to_be32(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_rpl) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = cpu_to_be32(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_rpl + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Delete the specified hash filter.
+ */
+static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
+ unsigned int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ unsigned int wrlen;
+ struct rte_mbuf *mbuf;
+ struct work_request_hdr *wr;
+ struct ulptx_idata *aligner;
+ struct cpl_set_tcb_field *req;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+
+ f->ctx = ctx;
+ f->pending = 1;
+
+ wrlen = cxgbe_roundup(sizeof(*wr) +
+ (sizeof(*req) + sizeof(*aligner)) +
+ sizeof(*abort_req) + sizeof(*abort_rpl),
+ 16);
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ dev_err(adapter, "%s: could not allocate skb ..\n",
+ __func__);
+ goto out_err;
+ }
+
+ mbuf->data_len = wrlen;
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO),
+ V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
+ 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_mgmt_tx(ctrlq, mbuf);
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *req = NULL;
+ u64 local_lo, local_hi, peer_lo, peer_hi;
+ u32 *lip = (u32 *)f->fs.val.lip;
+ u32 *fip = (u32 *)f->fs.val.fip;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ local_hi = ((u64)lip[1]) << 32 | lip[0];
+ local_lo = ((u64)lip[3]) << 32 | lip[2];
+ peer_hi = ((u64)fip[1]) << 32 | fip[0];
+ peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = local_hi;
+ req->local_ip_lo = local_lo;
+ req->peer_ip_hi = peer_hi;
+ req->peer_ip_lo = peer_lo;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *req = NULL;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct rte_mbuf *mbuf;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int iq;
+ int atid, size;
+ int ret = 0;
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+
+ ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+ f = t4_os_alloc(sizeof(*f));
+ if (!f)
+ goto out_err;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ atid = cxgbe_alloc_atid(t, f);
+ if (atid < 0)
+ goto out_err;
+
+ if (f->fs.type) {
+ /* IPv6 hash filter */
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_atid;
+
+ size = sizeof(struct cpl_t6_act_open_req6);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req6(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ /* IPv4 hash filter */
+ size = sizeof(struct cpl_t6_act_open_req);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+free_clip:
+ cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+ cxgbe_free_atid(t, atid);
+
+out_err:
+ t4_os_free(f);
+ return ret;
+}
+
+/**
+ * Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct filter_entry *f)
+{
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ /*
+ * The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+}
+
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ memset(fwr, 0, sizeof(*fwr));
+
+ /*
+ * Construct the work request to set the filter.
+ */
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio));
+ fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+ fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
+ ));
+ fwr->maci_to_matchtypem =
+ cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = cpu_to_be16(f->fs.val.lport);
+ fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+ fwr->fp = cpu_to_be16(f->fs.val.fport);
+ fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Set the corresponding entry in the bitmap. 4 slots are
+ * marked for IPv6, whereas only 1 slot is marked for IPv4.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+ t4_os_unlock(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ rte_bitmap_set(t->ftid_bmap, fidx + 1);
+ rte_bitmap_set(t->ftid_bmap, fidx + 2);
+ rte_bitmap_set(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return 0;
+}
+
+/**
+ * Clear the corresponding entry in the bitmap. 4 slots are
+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 1);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 2);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_del_hash_filter(dev, filter_id, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (!ret) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure filter id is aligned on the 2 slot boundary for T6,
+ * and 4 slot boundary for cards below T6.
+ */
+ if (fs->type) {
+ if (chip_ver < CHELSIO_T6)
+ filter_id &= ~(0x3);
+ else
+ filter_id &= ~(0x1);
+ }
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgbe_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return del_filter_wr(dev, filter_id);
+ }
+
+ /*
+ * If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+
+ return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int fidx, iq, fid_bit = 0;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ uint8_t bitoff[16] = {0};
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_set_hash_filter(dev, fs, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure filter id is aligned on the 4 slot boundary for IPv6
+ * maskfull filters.
+ */
+ if (fs->type)
+ filter_id &= ~(0x3);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (ret)
+ return -EBUSY;
+
+ iq = get_filter_steerq(dev, fs);
+
+ /*
+ * IPv6 filters occupy four slots and must be aligned on four-slot
+ * boundaries for T5. On T6, IPv6 filters occupy two-slots and
+ * must be aligned on two-slot boundaries.
+ *
+ * IPv4 filters only occupy a single slot and have no alignment
+ * requirements but writing a new IPv4 filter into the middle
+ * of an existing IPv6 filter requires clearing the old IPv6
+ * filter.
+ */
+ if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
+ /*
+ * For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ */
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
+ if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ } else { /* IPv6 */
+ unsigned int max_filter_id;
+
+ if (chip_ver < CHELSIO_T6) {
+ /*
+ * Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 4;
+ } else {
+ /*
+ * For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 2;
+ }
+
+ /*
+ * Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = adapter->tids.ftid_base + filter_id;
+ fid_bit = filter_id;
+ ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
+ if (ret)
+ return ret;
+
+ /*
+ * Check to make sure the filter requested is writable ...
+ */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return ret;
+ }
+
+ /*
+ * Allocate a clip table entry only if we have non-zero IPv6 address
+ */
+ if (chip_ver > CHELSIO_T5 && fs->type &&
+ memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_tid;
+ }
+
+ /*
+ * Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ /*
+ * Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(dev, filter_id);
+ if (ret) {
+ fid_bit = f->tid - adapter->tids.ftid_base;
+ goto free_tid;
+ }
+
+ return ret;
+
+free_tid:
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ clear_filter(f);
+ return ret;
+}
+
+/**
+ * Handle a Hash filter write reply.
+ */
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+ (be32_to_cpu(rpl->atid_status)));
+ unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %d\n",
+ __func__, ftid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE: {
+ f->tid = tid;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+
+ cxgbe_insert_tid(t, f, f->tid, 0);
+ cxgbe_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, tid,
+ W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
+ V_TCB_T_RTT_TS_RECENT_AGE
+ (M_TCB_T_RTT_TS_RECENT_AGE),
+ V_TCB_TIMESTAMP(0ULL) |
+ V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
+ 1);
+ break;
+ }
+ default:
+ dev_warn(adap, "%s: filter creation failed with status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+
+ cxgbe_free_atid(t, ftid);
+ t4_os_free(f);
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ struct filter_entry *f = NULL;
+ unsigned int tid = GET_TID(rpl);
+ int idx, max_fidx = adap->tids.nftids;
+
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = G_COOKIE(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /*
+ * Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ } else if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /*
+ * Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(f);
+ if (ctx)
+ ctx->result = 0;
+ } else {
+ /*
+ * Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_warn(adap, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+ }
+}
+
+/*
+ * Retrieve the packet count for the specified filter.
+ */
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte)
+{
+ struct filter_entry *f;
+ unsigned int tcb_base, tcbaddr;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+
+ if (is_t5(adapter->params.chip)) {
+ *c = 0;
+ return 0;
+ }
+ tcbaddr = tcb_base + (fidx * TCB_SIZE);
+ goto get_count;
+ } else {
+ return -ERANGE;
+ }
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+ }
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+get_count:
+ if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
+ /*
+ * For T5, the Filter Packet Hit Count is maintained as a
+ * 32-bit Big Endian value in the TCB field {timestamp}.
+ * Similar to the craziness above, instead of the filter hit
+ * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
+ * sizeof(u32)), it actually shows up at offset 24. Whacky.
+ */
+ if (get_byte) {
+ unsigned int word_offset = 4;
+ __be64 be64_byte_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = be64_to_cpu(be64_byte_count);
+ } else {
+ unsigned int word_offset = 6;
+ __be32 be32_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be32_count), &be32_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = (u64)be32_to_cpu(be32_count);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Handle a Hash filter delete reply.
+ */
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, tid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ f->valid = 0;
+
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ cxgbe_remove_tid(t, 0, tid, 0);
+ t4_os_free(f);
+
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
new file mode 100644
index 00000000..af8fa752
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_FILTER_H_
+#define _CXGBE_FILTER_H_
+
+#include "t4_msg.h"
+/*
+ * Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/*
+ * Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ */
+struct ch_filter_tuple {
+ /*
+ * Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /*
+ * Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+
+ /* reservations for future additions */
+ uint8_t rsvd[12];
+};
+
+/*
+ * Filter specification
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter. */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /*
+ * Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
+
+ /*
+ * Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t eport:2; /* egress port to switch packet out */
+
+ /* Filter rule value/mask pairs. */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum filter_type {
+ FILTER_TYPE_IPV4 = 0,
+ FILTER_TYPE_IPV6,
+};
+
+struct t4_completion {
+ unsigned int done; /* completion done (0 - No, 1 - Yes) */
+ rte_spinlock_t lock; /* completion lock */
+};
+
+/*
+ * Filter operation context to allow callers to wait for
+ * an asynchronous completion.
+ */
+struct filter_ctx {
+ struct t4_completion completion; /* completion rendezvous */
+ int result; /* result of operation */
+ u32 tid; /* to store tid of hash filter */
+};
+
+/*
+ * Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware or the
+ * firmware command.
+ */
+struct filter_entry {
+ /*
+ * Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+ u32 pending:1; /* filter action is pending FW reply */
+ struct filter_ctx *ctx; /* caller's completion hook */
+ struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct rte_eth_dev *dev; /* Port's rte eth device */
+ void *private; /* For use by apps using filter_entry */
+
+ /* This will store the actual tid */
+ u32 tid;
+
+ /*
+ * The filter itself.
+ */
+ struct ch_filter_specification fs;
+};
+
+#define FILTER_ID_MAX (~0U)
+
+struct tid_info;
+struct adapter;
+
+/**
+ * Find first clear bit in the bitmap.
+ */
+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
+ unsigned int size)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < size; idx++)
+ if (!rte_bitmap_get(bmap, idx))
+ break;
+
+ return idx;
+}
+
+/**
+ * Find a free region of 'num' consecutive entries.
+ */
+static inline unsigned int
+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
+ unsigned int num)
+{
+ unsigned int idx, j, free = 0;
+
+ if (num > size)
+ return size;
+
+ for (idx = 0; idx < size; idx += num) {
+ for (j = 0; j < num; j++) {
+ if (!rte_bitmap_get(bmap, idx + j)) {
+ free++;
+ } else {
+ free = 0;
+ break;
+ }
+ }
+
+ /* Found the Region */
+ if (free == num)
+ break;
+
+ /* Reached the end and still no region found */
+ if ((idx + num) > size) {
+ idx = size;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+bool is_filter_set(struct tid_info *, int fidx, int family);
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct filter_entry *f);
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);
+int writable_filter(struct filter_entry *f);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
+int init_hash_filter(struct adapter *adap);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
+int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte);
+#endif /* _CXGBE_FILTER_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
new file mode 100644
index 00000000..01c945f1
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "common.h"
+#include "cxgbe_flow.h"
+
+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
+do { \
+ if (!((fs)->val.elem || (fs)->mask.elem)) { \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
+ } else { \
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
+ NULL, "a filter can be specified" \
+ " only once"); \
+ } \
+} while (0)
+
+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
+do { \
+ memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
+ memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
+} while (0)
+
+#define CXGBE_FILL_FS(v, m, elem) \
+ __CXGBE_FILL_FS(v, m, fs, elem, e)
+
+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
+ __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
+
+static int
+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
+{
+ /* rte_flow specification does not allow it. */
+ if (!i->spec && (i->mask || i->last))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last or mask given without spec");
+ /*
+ * We don't support it.
+ * Although, we can support values in last as 0's or last == spec.
+ * But this will not provide user with any additional functionality
+ * and will only increase the complexity for us.
+ */
+ if (i->last)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last is not supported by chelsio pmd");
+ return 0;
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index upto 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_udp *val = item->spec;
+ const struct rte_flow_item_udp *umask = item->mask;
+ const struct rte_flow_item_udp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
+
+ if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "udp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_tcp *val = item->spec;
+ const struct rte_flow_item_tcp *umask = item->mask;
+ const struct rte_flow_item_tcp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
+
+ if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+ mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
+ mask->hdr.tcp_urp)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tcp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv4 *val = item->spec;
+ const struct rte_flow_item_ipv4 *umask = item->mask;
+ const struct rte_flow_item_ipv4 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
+
+ if (mask->hdr.time_to_live || mask->hdr.type_of_service)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "ttl/tos are not supported");
+
+ fs->type = FILTER_TYPE_IPV4;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv4 wild card */
+
+ CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv6 *val = item->spec;
+ const struct rte_flow_item_ipv6 *umask = item->mask;
+ const struct rte_flow_item_ipv6 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
+
+ if (mask->hdr.vtc_flow ||
+ mask->hdr.payload_len || mask->hdr.hop_limits)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tc/flow/hop are not supported");
+
+ fs->type = FILTER_TYPE_IPV6;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv6 wild card */
+
+ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
+ struct rte_flow_error *e)
+{
+ if (attr->egress)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "attribute:<egress> is"
+ " not supported !");
+ if (attr->group > 0)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "group parameter is"
+ " not supported.");
+
+ flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
+
+ return 0;
+}
+
+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+
+ if (rxq > pi->n_rx_qsets)
+ return -EINVAL;
+ return 0;
+}
+
+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct ch_filter_specification fs = f->fs;
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "invalid flow index %d.\n", fidx);
+ return -EINVAL;
+ }
+ if (!is_filter_set(&adap->tids, fidx, fs.type)) {
+ dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
+ struct adapter *adap, unsigned int fidx)
+{
+ if (is_filter_set(&adap->tids, fidx, fs->type)) {
+ dev_err(adap, "filter index: %d is busy.\n", fidx);
+ return -EBUSY;
+ }
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "filter index (%u) >= max(%u)\n",
+ fidx, adap->tids.nftids);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
+{
+ if (flow->fs.cap)
+ return 0; /* Hash filters */
+ return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
+ cxgbe_validate_fidxonadd(&flow->fs,
+ ethdev2adap(flow->dev), fidx);
+}
+
+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(flow->dev);
+
+ /* For tcam get the next available slot, if default value specified */
+ if (flow->fidx == FILTER_ID_MAX) {
+ int idx;
+
+ idx = cxgbe_alloc_ftid(adap, fs->type);
+ if (idx < 0) {
+ dev_err(adap, "unable to get a filter index in tcam\n");
+ return -ENOMEM;
+ }
+ *fidx = (unsigned int)idx;
+ } else {
+ *fidx = flow->fidx;
+ }
+
+ return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_phy_port *port;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ const struct rte_flow_action_queue *q;
+ const struct rte_flow_action *a;
+ char abit = 0;
+ int ret;
+
+ for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ q = (const struct rte_flow_action_queue *)a->conf;
+ if (!q)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "specify rx queue index");
+ if (check_rxq(flow->dev, q->index))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "Invalid rx queue");
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_PASS;
+ fs->dirsteer = 1;
+ fs->iq = q->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ fs->hitcnts = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
+ default:
+ /* Not supported action : return error */
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ a, "Action not supported");
+ }
+ }
+
+ return 0;
+}
+
+struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+ .fptr = ch_rte_parsetype_port,
+ .dmask = &(const struct rte_flow_item_phy_port){
+ .index = 0x7,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .fptr = ch_rte_parsetype_ipv4,
+ .dmask = &rte_flow_item_ipv4_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .fptr = ch_rte_parsetype_ipv6,
+ .dmask = &rte_flow_item_ipv6_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .fptr = ch_rte_parsetype_udp,
+ .dmask = &rte_flow_item_udp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .fptr = ch_rte_parsetype_tcp,
+ .dmask = &rte_flow_item_tcp_mask,
+ },
+};
+
+static int
+cxgbe_rtef_parse_items(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ const struct rte_flow_item *i;
+ char repeat[ARRAY_SIZE(parseitem)] = {0};
+
+ for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ struct chrte_fparse *idx = &flow->item_parser[i->type];
+ int ret;
+
+ if (i->type > ARRAY_SIZE(parseitem))
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "Item not supported");
+
+ switch (i->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ continue;
+ default:
+ /* check if item is repeated */
+ if (repeat[i->type])
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "parse items cannot be repeated (except void)");
+ repeat[i->type] = 1;
+
+ /* validate the item */
+ ret = cxgbe_validate_item(i, e);
+ if (ret)
+ return ret;
+
+ if (!idx || !idx->fptr) {
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "Item not supported");
+ } else {
+ ret = idx->fptr(idx->dmask, i, &flow->fs, e);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ cxgbe_fill_filter_region(adap, &flow->fs);
+
+ return 0;
+}
+
+static int
+cxgbe_flow_parse(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ /* parse user request into ch_filter_specification */
+ ret = cxgbe_rtef_parse_attr(flow, attr, e);
+ if (ret)
+ return ret;
+ ret = cxgbe_rtef_parse_items(flow, item, e);
+ if (ret)
+ return ret;
+ return cxgbe_rtef_parse_actions(flow, action, e);
+}
+
+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ struct filter_ctx ctx;
+ unsigned int fidx;
+ int err;
+
+ if (cxgbe_get_fidx(flow, &fidx))
+ return -ENOMEM;
+ if (cxgbe_verify_fidx(flow, fidx, 0))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ /* go create the filter */
+ err = cxgbe_set_filter(dev, fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while creating filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter set operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while creating the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ if (fs->cap) { /* to destroy the filter */
+ flow->fidx = ctx.tid;
+ flow->f = lookup_tid(t, ctx.tid);
+ } else {
+ flow->fidx = fidx;
+ flow->f = &adap->tids.ftid_tab[fidx];
+ }
+
+ return 0;
+}
+
+static struct rte_flow *
+cxgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct rte_flow *flow;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow) {
+ rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to allocate memory for"
+ " filter_entry");
+ return NULL;
+ }
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ if (cxgbe_flow_parse(flow, attr, item, action, e)) {
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ /* go, interact with cxgbe_filter */
+ ret = __cxgbe_flow_create(dev, flow);
+ if (ret) {
+ rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to create flow rule");
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ flow->f->private = flow; /* Will be used during flush */
+
+ return flow;
+}
+
+static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct filter_entry *f = flow->f;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int err;
+
+ fs = &f->fs;
+ if (cxgbe_verify_fidx(flow, flow->fidx, 1))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while deleting filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter delete operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while deleting the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ ret = __cxgbe_flow_destroy(dev, flow);
+ if (ret)
+ return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow, "error destroying filter.");
+ t4_os_free(flow);
+ return 0;
+}
+
+static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
+ u64 *byte_count)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs = flow->f->fs;
+ unsigned int fidx = flow->fidx;
+ int ret = 0;
+
+ ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
+ if (ret)
+ return ret;
+ return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
+}
+
+static int
+cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification fs;
+ struct rte_flow_query_count *c;
+ struct filter_entry *f;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ f = flow->f;
+ fs = f->fs;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "only count supported for query");
+
+ /*
+ * This is a valid operation, Since we are allowed to do chelsio
+ * specific operations in rte side of our code but not vise-versa
+ *
+ * So, fs can be queried/modified here BUT rte_flow_query_count
+ * cannot be worked on by the lower layer since we want to maintain
+ * it as rte_flow agnostic.
+ */
+ if (!fs.hitcnts)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ &fs, "filter hit counters were not"
+ " enabled during filter creation");
+
+ c = (struct rte_flow_query_count *)data;
+ ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
+ if (ret)
+ return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to"
+ " perform query");
+
+ /* Query was successful */
+ c->bytes_set = 1;
+ c->hits_set = 1;
+
+ return 0; /* success / partial_success */
+}
+
+static int
+cxgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ unsigned int fidx;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow)
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Unable to allocate memory for filter_entry");
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ ret = cxgbe_flow_parse(flow, attr, item, action, e);
+ if (ret) {
+ t4_os_free(flow);
+ return ret;
+ }
+
+ if (validate_filter(adap, &flow->fs)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "validation failed. Check f/w config file.");
+ }
+
+ if (cxgbe_get_fidx(flow, &fidx)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ }
+
+ if (cxgbe_verify_fidx(flow, fidx, 0)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ }
+
+ t4_os_free(flow);
+ return 0;
+}
+
+/*
+ * @ret : > 0 filter destroyed succsesfully
+ * < 0 error destroying filter
+ * == 1 filter not active / not found
+ */
+static int
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
+ struct rte_flow_error *e)
+{
+ if (f && (f->valid || f->pending) &&
+ f->dev == dev && /* Only if user has asked for this port */
+ f->private) /* We (rte_flow) created this filter */
+ return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
+ e);
+ return 1;
+}
+
+static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ unsigned int i;
+ int ret = 0;
+
+ if (adap->tids.ftid_tab) {
+ struct filter_entry *f = &adap->tids.ftid_tab[0];
+
+ for (i = 0; i < adap->tids.nftids; i++, f++) {
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (is_hashfilter(adap) && adap->tids.tid_tab) {
+ struct filter_entry *f;
+
+ for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+ f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret >= 0 ? 0 : ret;
+}
+
+static const struct rte_flow_ops cxgbe_flow_ops = {
+ .validate = cxgbe_flow_validate,
+ .create = cxgbe_flow_create,
+ .destroy = cxgbe_flow_destroy,
+ .flush = cxgbe_flow_flush,
+ .query = cxgbe_flow_query,
+ .isolate = NULL,
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ RTE_SET_USED(dev);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &cxgbe_flow_ops;
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h
new file mode 100644
index 00000000..0f750474
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_FLOW_H_
+#define _CXGBE_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "cxgbe_filter.h"
+#include "cxgbe.h"
+
+#define CXGBE_FLOW_POLL_US 10
+#define CXGBE_FLOW_POLL_CNT 10
+
+struct chrte_fparse {
+ int (*fptr)(const void *mask, /* currently supported mask */
+ const struct rte_flow_item *item, /* user input */
+ struct ch_filter_specification *fs, /* where to parse */
+ struct rte_flow_error *e);
+ const void *dmask; /* Specify what is supported by chelsio by default*/
+};
+
+struct rte_flow {
+ struct filter_entry *f;
+ struct ch_filter_specification fs; /* temp, to create filter */
+ struct chrte_fparse *item_parser;
+ /*
+ * filter_entry doesn't store user priority.
+ * Post creation of filter this will indicate the
+ * flow index (fidx) for both hash and tcam filters
+ */
+ unsigned int fidx;
+ struct rte_eth_dev *dev;
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* _CXGBE_FLOW_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 28db6c06..c3938e8d 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -57,14 +29,31 @@
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
-#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_kvargs.h>
#include "common.h"
#include "t4_regs.h"
#include "t4_msg.h"
#include "cxgbe.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ rte_free(addr);
+}
/*
* Response queue handler for the FW event queue.
@@ -98,6 +87,18 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_fw6_msg *msg = (const void *)rsp;
t4_handle_fw_rpl(q->adapter, msg->data);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+ hash_del_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+ filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+ hash_filter_rpl(q->adapter, p);
} else {
dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
opcode);
@@ -106,6 +107,79 @@ out:
return 0;
}
+/**
+ * Setup sge control queues to pass control information.
+ */
+int setup_sge_ctrl_txq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0, i = 0;
+
+ for_each_port(adapter, i) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+ q->q.size = 1024;
+ err = t4_sge_alloc_ctrl_txq(adapter, q,
+ adapter->eth_dev, i,
+ s->fw_evtq.cntxt_id,
+ rte_socket_id());
+ if (err) {
+ dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+ err);
+ goto out;
+ }
+ snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
+ q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+ RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_PRIV_ALIGN,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ SOCKET_ID_ANY);
+ if (!q->mb_pool) {
+ dev_err(adapter, "Can't create ctrl pool for port: %d",
+ i);
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ return 0;
+out:
+ t4_free_sge_resources(adapter);
+ return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c)
+{
+ unsigned int i;
+ unsigned int work_done, budget = 4;
+
+ if (!c)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ cxgbe_poll(q, NULL, budget, &work_done);
+ t4_os_lock(&c->lock);
+ if (c->done) {
+ t4_os_unlock(&c->lock);
+ return 0;
+ }
+ t4_os_unlock(&c->lock);
+ udelay(us);
+ }
+ return -ETIMEDOUT;
+}
+
int setup_sge_fwevtq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
@@ -197,17 +271,186 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
return 0;
}
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ t4_os_lock(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ t4_os_unlock(&t->atid_lock);
+ return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ t4_os_lock(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+ INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
+{
+ struct rte_mbuf *mbuf;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ rte_atomic32_dec(&t->conns_in_use);
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->tids_in_use);
+ }
+ }
+
+ mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+ if (mbuf) {
+ mbuf->data_len = sizeof(struct cpl_tid_release);
+ mbuf->pkt_len = mbuf->data_len;
+ mk_tid_release(mbuf, tid);
+ t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+ }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family)
+{
+ t->tid_tab[tid] = data;
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->tids_in_use);
+ }
+
+ rte_atomic32_inc(&t->conns_in_use);
+}
+
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+ if (t->tid_tab) {
+ if (t->ftid_bmap)
+ rte_bitmap_free(t->ftid_bmap);
+
+ if (t->ftid_bmap_array)
+ t4_os_free(t->ftid_bmap_array);
+
+ t4_os_free(t->tid_tab);
+ }
+
+ memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int ftid_bmap_size;
+ unsigned int natids = t->natids;
+ unsigned int max_ftids = t->nftids;
+
+ ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ max_ftids * sizeof(*t->ftid_tab) +
+ natids * sizeof(*t->atid_tab);
+
+ t->tid_tab = t4_os_alloc(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
+ t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+ if (!t->ftid_bmap_array) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ t4_os_lock_init(&t->atid_lock);
+ t4_os_lock_init(&t->ftid_lock);
+
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ rte_atomic32_init(&t->tids_in_use);
+ rte_atomic32_set(&t->tids_in_use, 0);
+ rte_atomic32_init(&t->conns_in_use);
+ rte_atomic32_set(&t->conns_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+
+ t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+ ftid_bmap_size);
+ if (!t->ftid_bmap) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static inline bool is_x_1g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0;
+ return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
{
unsigned int speeds, high_speeds;
- speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported));
- high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+ speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
+ high_speeds = speeds &
+ ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
return high_speeds != 0;
}
@@ -270,7 +513,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
* We default up to # of cores queues per 1G/10G port.
*/
if (nb_ports)
- q_per_port = (MAX_ETH_QSETS -
+ q_per_port = (s->max_ethqsets -
(adap->params.nports - nb_ports)) /
nb_ports;
@@ -294,8 +537,6 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
qidx += pi->n_rx_qsets;
}
- s->max_ethqsets = qidx;
-
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
@@ -345,14 +586,17 @@ static void setup_memwin(struct adapter *adap)
MEMWIN_NIC));
}
-static int init_rss(struct adapter *adap)
+int init_rss(struct adapter *adap)
{
unsigned int i;
- int err;
- err = t4_init_rss_mode(adap, adap->mbox);
- if (err)
- return err;
+ if (is_pf4(adap)) {
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -360,6 +604,8 @@ static int init_rss(struct adapter *adap)
pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
if (!pi->rss)
return -ENOMEM;
+
+ pi->rss_hf = CXGBE_RSS_HF_ALL;
}
return 0;
}
@@ -367,7 +613,7 @@ static int init_rss(struct adapter *adap)
/**
* Dump basic information about the adapter.
*/
-static void print_adapter_info(struct adapter *adap)
+void print_adapter_info(struct adapter *adap)
{
/**
* Hardware/Firmware/etc. Version/Revision IDs.
@@ -375,27 +621,29 @@ static void print_adapter_info(struct adapter *adap)
t4_dump_version_info(adap);
}
-static void print_port_info(struct adapter *adap)
+void print_port_info(struct adapter *adap)
{
int i;
char buf[80];
struct rte_pci_addr *loc = &adap->pdev->addr;
for_each_port(adap, i) {
- const struct port_info *pi = &adap->port[i];
+ const struct port_info *pi = adap2pinfo(adap, i);
char *bufp = buf;
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
bufp += sprintf(bufp, "1G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
bufp += sprintf(bufp, "10G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
bufp += sprintf(bufp, "25G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
bufp += sprintf(bufp, "40G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
+ bufp += sprintf(bufp, "50G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
@@ -412,6 +660,84 @@ static void print_port_info(struct adapter *adap)
}
}
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+ struct rte_pci_device *pdev = adapter->pdev;
+ int i;
+
+ for_each_port(adapter, i) {
+ /* OVLAN Type 0x88a8 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x88a8));
+ /* OVLAN Type 0x9100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x9100));
+ /* OVLAN Type 0x8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x8100));
+
+ /* IVLAN 0X8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+ V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+ V_IVLAN_ETYPE(0x8100));
+
+ t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN,
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN);
+ }
+
+ if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ V_RM_OVLAN(1), V_RM_OVLAN(0));
+}
+
static void configure_pcie_ext_tag(struct adapter *adapter)
{
u16 v;
@@ -442,6 +768,40 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
}
}
+/* Figure out how many Queue Sets we can support */
+void configure_max_ethqsets(struct adapter *adapter)
+{
+ unsigned int ethqsets;
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ if (is_pf4(adapter)) {
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ ethqsets = pfres->niqflint - 1;
+ if (pfres->neq < ethqsets * 2)
+ ethqsets = pfres->neq / 2;
+ } else {
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ }
+
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+}
+
/*
* Tweak configuration based on system architecture, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
@@ -580,8 +940,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* This will allow the firmware to optimize aspects of the hardware
* configuration which will result in improved performance.
*/
- caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
- FW_CAPS_CONFIG_NIC_ETHOFLD));
+ caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
caps_cmd.toecaps = 0;
caps_cmd.iscsicaps = 0;
caps_cmd.rdmacaps = 0;
@@ -648,6 +1007,7 @@ bye:
static int adap_init0(struct adapter *adap)
{
+ struct fw_caps_config_cmd caps_cmd;
int ret = 0;
u32 v, port_vec;
enum dev_state state;
@@ -723,6 +1083,17 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning info\n");
+ goto bye;
+ }
+
/* Find out what ports are available to us. */
v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
@@ -764,6 +1135,50 @@ static int adap_init0(struct adapter *adap)
V_FW_PARAMS_PARAM_Y(0) | \
V_FW_PARAMS_PARAM_Z(0))
+ params[0] = FW_PARAM_PFVF(FILTER_START);
+ params[1] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ftid_base = val[0];
+ adap->tids.nftids = val[1] - val[0] + 1;
+
+ params[0] = FW_PARAM_PFVF(CLIP_START);
+ params[1] = FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+ is_t6(adap->params.chip)) {
+ if (init_hash_filter(adap) < 0)
+ goto bye;
+ }
+
+ /* query tid-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
/* If we're running on newer firmware, let it know that we're
* prepared to deal with encapsulated CPL messages. Older
* firmware won't understand this and we'll just get
@@ -828,6 +1243,8 @@ static int adap_init0(struct adapter *adap)
t4_init_sge_params(adap);
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
+ configure_vlan_types(adap);
+ configure_max_ethqsets(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -860,7 +1277,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
- const struct port_info *pi = &adap->port[port_id];
+ const struct port_info *pi = adap2pinfo(adap, port_id);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
@@ -881,6 +1298,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
pi->port_id, pi->mod_type);
}
+inline bool force_linkup(struct adapter *adap)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver*/
+ if (!cxgbe_get_devargs(pdev->device.devargs,
+ CXGBE_DEVARG_FORCE_LINK_UP))
+ return false;
+ return true;
+}
+
/**
* link_start - enable a port
* @dev: the port to enable
@@ -912,7 +1341,7 @@ int link_start(struct port_info *pi)
ret = 0;
}
}
- if (ret == 0)
+ if (ret == 0 && is_pf4(adapter))
ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
@@ -926,18 +1355,80 @@ int link_start(struct port_info *pi)
ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
true, true, false);
}
+
+ if (ret == 0 && force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}
/**
- * cxgb4_write_rss - write the RSS table for a given port
+ * cxgbe_write_rss_conf - flash the RSS configuration for a given port
+ * @pi: the port
+ * @rss_hf: Hash configuration to apply
+ */
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
+{
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ u64 flags = 0;
+ u16 rss;
+ int err;
+
+ /* Should never be called before setting up sge eth rx queues */
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ dev_err(adap, "%s No RXQs available on port %d\n",
+ __func__, pi->port_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow unsupported hash functions */
+ if (rss_hf & ~CXGBE_RSS_HF_ALL)
+ return -EINVAL;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rxq[0].rspq.abs_id;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ flags, rss);
+ return err;
+}
+
+/**
+ * cxgbe_write_rss - write the RSS table for a given port
* @pi: the port
* @queues: array of queue indices for RSS
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
*/
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
@@ -958,20 +1449,6 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
- /*
- * If Tunnel All Lookup isn't specified in the global RSS
- * Configuration, then we need to specify a default Ingress
- * Queue for any ingress packets which aren't hashed. We'll
- * use our first ingress queue ...
- */
- if (!err)
- err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
- F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_UDPEN,
- rss[0]);
rte_free(rss);
return err;
}
@@ -1001,7 +1478,11 @@ int setup_rss(struct port_info *pi)
for (j = 0; j < pi->rss_size; j++)
pi->rss[j] = j % pi->n_rx_qsets;
- err = cxgb4_write_rss(pi, pi->rss);
+ err = cxgbe_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+
+ err = cxgbe_write_rss_conf(pi, pi->rss_hf);
if (err)
return err;
pi->flags |= PORT_RSS_DONE;
@@ -1016,7 +1497,8 @@ int setup_rss(struct port_info *pi)
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
/* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
V_SEINTARM(q->intr_params) |
V_INGRESSQID(q->cntxt_id));
}
@@ -1051,7 +1533,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
#define FW_CAPS_TO_SPEED(__fw_name) \
do { \
- if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
SET_SPEED(__fw_name); \
} while (0)
@@ -1106,6 +1588,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
FW_CAPS_TO_SPEED(SPEED_25G);
FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_50G);
FW_CAPS_TO_SPEED(SPEED_100G);
break;
@@ -1128,14 +1611,38 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
{
*speed_caps = 0;
- fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported,
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
speed_caps);
- if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG))
+ if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
*speed_caps |= ETH_LINK_SPEED_FIXED;
}
/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ if (!status)
+ t4_reset_link_config(adapter, pi->pidx);
+
+ return 0;
+}
+
+/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
@@ -1147,7 +1654,8 @@ int cxgbe_up(struct adapter *adap)
{
enable_rx(adap, &adap->sge.fw_evtq);
t4_sge_tx_monitor_start(adap);
- t4_intr_enable(adap);
+ if (is_pf4(adap))
+ t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
/* TODO: deadman watchdog ?? */
@@ -1159,17 +1667,7 @@ int cxgbe_up(struct adapter *adap)
*/
int cxgbe_down(struct port_info *pi)
{
- struct adapter *adapter = pi->adapter;
- int err = 0;
-
- err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
- if (err) {
- dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
- return err;
- }
-
- t4_reset_link_config(adapter, pi->port_id);
- return 0;
+ return cxgbe_set_link_status(pi, false);
}
/*
@@ -1181,7 +1679,10 @@ void cxgbe_close(struct adapter *adapter)
int i;
if (adapter->flags & FULL_INIT_DONE) {
- t4_intr_disable(adapter);
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
+ tid_free(&adapter->tids);
+ t4_cleanup_clip_tbl(adapter);
t4_sge_tx_monitor_stop(adapter);
t4_free_sge_resources(adapter);
for_each_port(adapter, i) {
@@ -1190,11 +1691,16 @@ void cxgbe_close(struct adapter *adapter)
t4_free_vi(adapter, adapter->mbox,
adapter->pf, 0, pi->viid);
rte_free(pi->eth_dev->data->mac_addrs);
+ /* Skip first port since it'll be freed by DPDK stack */
+ if (i) {
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
adapter->flags &= ~FULL_INIT_DONE;
}
- if (adapter->flags & FW_OK)
+ if (is_pf4(adapter) && (adapter->flags & FW_OK))
t4_fw_bye(adapter, adapter->mbox);
}
@@ -1220,6 +1726,7 @@ int cxgbe_probe(struct adapter *adapter)
t4_os_lock_init(&adapter->mbox_lock);
TAILQ_INIT(&adapter->mbox_list);
+ t4_os_lock_init(&adapter->win0_lock);
err = t4_prep_adapter(adapter);
if (err)
@@ -1265,21 +1772,16 @@ int cxgbe_probe(struct adapter *adapter)
}
for_each_port(adapter, i) {
- char name[RTE_ETH_NAME_MAX_LEN];
- struct rte_eth_dev_data *data = NULL;
const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
- pi = &adapter->port[i];
- pi->adapter = adapter;
- pi->xact_addr_filt = -1;
- pi->port_id = i;
-
- snprintf(name, sizeof(name), "cxgbe%d",
- adapter->eth_dev->data->port_id + i);
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
if (i == 0) {
/* First port is already allocated by DPDK */
- pi->eth_dev = adapter->eth_dev;
+ eth_dev = adapter->eth_dev;
goto allocate_mac;
}
@@ -1289,21 +1791,26 @@ int cxgbe_probe(struct adapter *adapter)
*/
/* reserve an ethdev entry */
- pi->eth_dev = rte_eth_dev_allocate(name);
- if (!pi->eth_dev)
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
goto out_free;
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (!data)
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
goto out_free;
- data->port_id = adapter->eth_dev->data->port_id + i;
-
- pi->eth_dev->data = data;
-
allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ pi->pidx = i;
+
pi->eth_dev->device = &adapter->pdev->device;
- pi->eth_dev->data->dev_private = pi;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
@@ -1318,6 +1825,11 @@ allocate_mac:
err = -1;
goto out_free;
}
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
}
if (adapter->flags & FW_OK) {
@@ -1334,6 +1846,35 @@ allocate_mac:
print_adapter_info(adapter);
print_port_info(adapter);
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up some
+ * functionality
+ */
+ dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
+ /* Disable filtering support */
+ dev_warn(adapter, "could not allocate TID table, "
+ "filter support disabled. Continuing\n");
+ }
+
+ if (is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+ u32 hash_base, hash_reg;
+
+ hash_reg = A_LE_DB_TID_HASHBASE;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ }
+ } else {
+ /* Disable hash filtering support */
+ dev_warn(adapter,
+ "Maskless filter support disabled. Continuing\n");
+ }
+
err = init_rss(adapter);
if (err)
goto out_free;
@@ -1349,8 +1890,11 @@ out_free:
/* Skip first port since it'll be de-allocated by DPDK */
if (i == 0)
continue;
- if (pi->eth_dev->data)
- rte_free(pi->eth_dev->data);
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
if (adapter->flags & FW_OK)
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
new file mode 100644
index 00000000..50931ed0
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_OFLD_H_
+#define _CXGBE_OFLD_H_
+
+#include <rte_bitmap.h>
+
+#include "cxgbe_filter.h"
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = cpu_to_be32( \
+ V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
+ V_FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of filter TID.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+ struct filter_entry *ftid_tab; /* Normal filters */
+ union aopen_entry *atid_tab;
+ struct rte_bitmap *ftid_bmap;
+ uint8_t *ftid_bmap_array;
+ unsigned int nftids, natids;
+ unsigned int ftid_base, hash_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ /* TIDs in the TCAM */
+ rte_atomic32_t tids_in_use;
+ /* TIDs in the HASH */
+ rte_atomic32_t hash_tids_in_use;
+ rte_atomic32_t conns_in_use;
+
+ rte_spinlock_t atid_lock __rte_cache_aligned;
+ rte_spinlock_t ftid_lock;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family);
+
+#endif /* _CXGBE_OFLD_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h
new file mode 100644
index 00000000..8d0a105a
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_PFVF_H_
+#define _CXGBE_PFVF_H_
+
+void cxgbe_dev_rx_queue_release(void *q);
+void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr);
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete);
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev);
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev);
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+#endif /* _CXGBE_PFVF_H_ */
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
new file mode 100644
index 00000000..3b32ca9d
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+/*
+ * Get port statistics.
+ */
+static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbevf_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ierrors = ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
+ ps.tx_ucast_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_drop;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbevf_dev_stats_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct adapter *adapter = NULL;
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbevfadapter%d",
+ eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ err = cxgbevf_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
+ eth_cxgbevf_dev_init);
+}
+
+static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbevf_pmd = {
+ .id_table = cxgb4vf_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbevf_pci_probe,
+ .remove = eth_cxgbevf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
new file mode 100644
index 00000000..4214d031
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+/*
+ * Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int pmask_nports;
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * do anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ configure_max_ethqsets(adapter);
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4vf_get_port_stats(pi->adapter, pi->pidx, stats);
+}
+
+static int adap_init0vf(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int err;
+
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+
+ adapter->pf = t4vf_get_pf_from_vf(adapter);
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "error in sge init\n");
+ return err;
+ }
+
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ val = 1;
+ t4vf_set_params(adapter, 1, &param, &val);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Check for various parameter sanity issues.
+ */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
+ }
+ if (adapter->params.vfres.nvi == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+ adapter->flags |= FW_OK;
+ return 0;
+}
+
+int cxgbevf_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ unsigned int pmask;
+ int err = 0;
+ int i;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ err = t4vf_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ return err;
+ }
+ }
+
+ err = adap_init0vf(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int port_id;
+
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = port_id;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4vf_port_init(adapter);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
+ print_port_info(adapter);
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+ return -err;
+}
diff --git a/drivers/net/cxgbe/meson.build b/drivers/net/cxgbe/meson.build
new file mode 100644
index 00000000..7c69a34b
--- /dev/null
+++ b/drivers/net/cxgbe/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('cxgbe_ethdev.c',
+ 'cxgbe_main.c',
+ 'cxgbevf_ethdev.c',
+ 'cxgbevf_main.c',
+ 'sge.c',
+ 'cxgbe_filter.c',
+ 'cxgbe_flow.c',
+ 'clip_tbl.c',
+ 'base/t4_hw.c',
+ 'base/t4vf_hw.c')
+includes += include_directories('base')
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 3d5aa596..4ea40d19 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -83,6 +55,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
#define MAX_IMM_TX_PKT_LEN 256
/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+/*
* Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
* per mbuf buffer). We currently only support two sizes for 1500- and
* 9000-byte MTUs. We could easily support more but there doesn't seem to be
@@ -337,7 +314,11 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
val | V_QID(q->cntxt_id));
} else {
writel_relaxed(val | V_QID(q->bar2_qid),
@@ -385,7 +366,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
@@ -570,12 +552,16 @@ static inline int is_eth_imm(const struct rte_mbuf *m)
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @m: the packet
+ * @adap: adapter structure pointer
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
unsigned int flits;
int hdrlen;
@@ -600,11 +586,10 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
else
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ flits += (wr_size +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits;
}
@@ -848,14 +833,20 @@ static void tx_timer_cb(void *data)
static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
struct sge_eth_txq *txq)
{
- u32 wr_mid;
- struct sge_txq *q = &txq->q;
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
unsigned int ndesc;
+ u32 wr_mid;
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
@@ -863,12 +854,18 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
wr->plen = cpu_to_be16(q->coalesce.len);
wr->npkt = q->coalesce.idx;
wr->r3 = 0;
- wr->type = q->coalesce.type;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
/* zero out coalesce structure members */
- q->coalesce.idx = 0;
- q->coalesce.flits = 0;
- q->coalesce.len = 0;
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
txq_advance(q, ndesc);
txq->stats.coal_wr++;
@@ -896,13 +893,27 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
unsigned int *nflits,
struct adapter *adap)
{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits;
+ int credits, wr_size;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
+
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
@@ -948,16 +959,21 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
new:
/* start a new pkts WR, the WR header is not filled below */
- flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
ndesc = flits_to_desc(q->coalesce.flits + flits);
credits = txq_avail(q) - ndesc;
if (unlikely(credits < 0 || wraps_around(q, ndesc)))
return 0;
- q->coalesce.flits += 2;
+ q->coalesce.flits += wr_size / sizeof(__be64);
q->coalesce.type = type;
q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
- 2 * sizeof(__be64);
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
return 1;
}
@@ -987,6 +1003,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+ unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
+ ETH_COALESCE_VF_PKT_NUM;
#ifdef RTE_LIBRTE_CXGBE_TPUT
RTE_SET_USED(nb_pkts);
@@ -1030,9 +1048,12 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
cpl->pack = htons(0);
cpl->len = htons(len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1061,7 +1082,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
sd->coalesce.idx = (idx & 1) + 1;
/* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM
+ if (++q->coalesce.idx == max_coal_pkt_num
#ifndef RTE_LIBRTE_CXGBE_TPUT
|| q->coalesce.idx >= nb_pkts
#endif
@@ -1085,6 +1106,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
struct adapter *adap;
struct rte_mbuf *m = mbuf;
struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *d;
dma_addr_t addr[m->nb_segs];
@@ -1095,7 +1117,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
u32 wr_mid;
u64 cntrl, *end;
bool v6;
- u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
@@ -1115,7 +1137,7 @@ out_free:
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
- pi = (struct port_info *)txq->eth_dev->data->dev_private;
+ pi = (struct port_info *)txq->data->dev_private;
adap = pi->adapter;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
@@ -1141,7 +1163,7 @@ out_free:
if (txq->q.coalesce.idx)
ship_tx_pkt_coalesce_wr(adap, txq);
- flits = calc_tx_flits(m);
+ flits = calc_tx_flits(m, adap);
ndesc = flits_to_desc(flits);
credits = txq_avail(&txq->q) - ndesc;
@@ -1163,31 +1185,55 @@ out_free:
}
wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
- wr->r3 = rte_cpu_to_be_64(0);
- end = (u64 *)wr + flits;
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
len = 0;
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
- cpl = (void *)(wr + 1);
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
if (m->ol_flags & PKT_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
}
} else {
- lso = (void *)(wr + 1);
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
len += sizeof(*lso);
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
@@ -1221,9 +1267,14 @@ out_free:
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
cpl->pack = htons(0);
cpl->len = htons(m->pkt_len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1254,6 +1305,126 @@ out_free:
}
/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
+/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @dev: the PCI device's core device
* @nelem: the number of descriptors
@@ -1299,7 +1470,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
if (!tz)
return NULL;
@@ -1468,6 +1640,7 @@ static int process_responses(struct sge_rspq *q, int budget,
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ struct sge *s = &q->adapter->sge;
unsigned int stat_pidx;
int stat_pidx_diff;
@@ -1550,10 +1723,12 @@ static int process_responses(struct sge_rspq *q, int budget,
}
if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN;
+ pkt->ol_flags |= PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci = ntohs(cpl->vlan);
}
+ rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
rxq->stats.rx_bytes += pkt->pkt_len;
rx_pkts[budget - budget_left] = pkt;
@@ -1612,7 +1787,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
val | V_INGRESSQID((u32)q->cntxt_id));
} else {
writel(val | V_INGRESSQID(q->bar2_qid),
@@ -1689,6 +1868,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
+ u8 pciechan;
/* Size needs to be multiple of 16, including status entry. */
iq->size = cxgbe_roundup(iq->size, 16);
@@ -1706,8 +1886,23 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
(sizeof(c) / 16));
c.type_to_iqandstindex =
@@ -1719,16 +1914,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
- pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
- if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
- F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
@@ -1768,7 +1959,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
0 : F_FW_IQ_CMD_FL0PACKEN) |
F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
F_FW_IQ_CMD_FL0PADEN);
- if (cong >= 0)
+ if (is_pf4(adap) && cong >= 0)
c.iqns_to_fl0congen |=
htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
F_FW_IQ_CMD_FL0CONGCIF |
@@ -1789,7 +1980,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -1806,7 +2000,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
- iq->port_id = pi->port_id;
+ iq->port_id = pi->pidx;
iq->mb_pool = mp;
/* set offset to -1 to distinguish ingress queues without FL */
@@ -1846,7 +2040,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* a lot easier to fix in one place ... For now we do something very
* simple (and hopefully less wrong).
*/
- if (!is_t4(adap->params.chip) && cong >= 0) {
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
u32 param, val;
int i;
@@ -1893,9 +2087,11 @@ err:
return ret;
}
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
{
q->cntxt_id = id;
+ q->abs_id = abs_id;
q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
&q->bar2_qid);
q->cidx = 0;
@@ -1943,6 +2139,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -1961,16 +2158,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(adap->pf) |
- V_FW_EQ_ETH_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
V_FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize =
htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
@@ -1978,7 +2181,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
V_FW_EQ_ETH_CMD_EQSIZE(nentries));
c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret) {
rte_free(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -1986,7 +2192,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
txq->stats.tso = 0;
txq->stats.pkts = 0;
txq->stats.tx_cso = 0;
@@ -1997,10 +2204,69 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->stats.mapping_err = 0;
txq->flags |= EQ_STOPPED;
txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
t4_os_lock_init(&txq->txq_lock);
return 0;
}
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name, "ctrl_tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
static void free_txq(struct sge_txq *q)
{
q->cntxt_id = 0;
@@ -2095,7 +2361,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)
*/
void t4_free_sge_resources(struct adapter *adap)
{
- int i;
+ unsigned int i;
struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
@@ -2112,6 +2378,18 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
if (adap->sge.fw_evtq.desc)
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
}
@@ -2280,3 +2558,182 @@ int t4_sge_init(struct adapter *adap)
return 0;
}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}
diff --git a/drivers/net/dpaa/Makefile b/drivers/net/dpaa/Makefile
index 9c2a5ea8..d7a0a50c 100644
--- a/drivers/net/dpaa/Makefile
+++ b/drivers/net/dpaa/Makefile
@@ -27,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa_version.map
LIBABIVER := 1
+# depends on dpaa bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# Interfaces with DPDK
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c
index 9b69ef45..7a950ac0 100644
--- a/drivers/net/dpaa/dpaa_ethdev.c
+++ b/drivers/net/dpaa/dpaa_ethdev.c
@@ -45,14 +45,43 @@
#include <fsl_bman.h>
#include <fsl_fman.h>
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
-/* At present we only allow up to 4 push mode queues - as each of this queue
- * need dedicated portal and we are short of portals.
+static int default_q; /* use default queue - FMC is not executed*/
+/* At present we only allow up to 4 push mode queues as default - as each of
+ * this queue need dedicated portal and we are short of portals.
*/
-#define DPAA_MAX_PUSH_MODE_QUEUE 4
+#define DPAA_MAX_PUSH_MODE_QUEUE 8
+#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4
-static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE;
static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
@@ -95,6 +124,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
static struct rte_dpaa_driver rte_dpaa_pmd;
+static void
+dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
+
static inline void
dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
{
@@ -122,9 +154,11 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -134,13 +168,32 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
-dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
+dpaa_eth_dev_configure(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
DPAA_MAX_RX_PKT_LEN) {
fman_if_set_maxfrm(dpaa_intf->fif,
@@ -256,14 +309,12 @@ static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
dev_info->speed_capa = (ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_10G);
- dev_info->rx_offload_capa =
- (DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa =
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
+ dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
+ dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
@@ -275,9 +326,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpaa_intf->fif->mac_type == fman_mac_1g)
- link->link_speed = 1000;
+ link->link_speed = ETH_SPEED_NUM_1G;
else if (dpaa_intf->fif->mac_type == fman_mac_10g)
- link->link_speed = 10000;
+ link->link_speed = ETH_SPEED_NUM_10G;
else
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, dpaa_intf->fif->mac_type);
@@ -316,12 +367,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings);
uint64_t values[sizeof(struct dpaa_if_stats) / 8];
- if (xstats == NULL)
- return 0;
-
if (n < num)
return num;
+ if (xstats == NULL)
+ return 0;
+
fman_if_stats_get_all(dpaa_intf->fif, values,
sizeof(struct dpaa_if_stats) / 8);
@@ -335,10 +386,13 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
static int
dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings);
+ if (limit < stat_cnt)
+ return stat_cnt;
+
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
snprintf(xstats_names[i].name,
@@ -366,7 +420,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
return 0;
fman_if_stats_get_all(dpaa_intf->fif, values_copy,
- sizeof(struct dpaa_if_stats));
+ sizeof(struct dpaa_if_stats) / 8);
for (i = 0; i < stat_cnt; i++)
values[i] =
@@ -463,7 +517,15 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_rx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_rx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, rxq->fqid);
if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
struct fman_if_ic_params icp;
@@ -527,9 +589,11 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
}
ret = qman_init_fq(rxq, flags, &opts);
- if (ret)
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
- " ret: %d", rxq->fqid, ret);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
+ return ret;
+ }
rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
rxq->is_static = true;
@@ -553,7 +617,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
-int __rte_experimental
+int
dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
u16 ch_id,
@@ -604,8 +668,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
ret = qman_init_fq(rxq, flags, &opts);
if (ret) {
- DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
- rxq->fqid, ret);
+ DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x "
+ "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
return ret;
}
@@ -616,7 +680,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
return ret;
}
-int __rte_experimental
+int
dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id)
{
@@ -662,7 +726,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
PMD_INIT_FUNC_TRACE();
- DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx);
+ if (queue_idx >= dev->data->nb_tx_queues) {
+ rte_errno = EOVERFLOW;
+ DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
+ (void *)dev, queue_idx, dev->data->nb_tx_queues);
+ return -rte_errno;
+ }
+
+ DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
+ queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
return 0;
}
@@ -813,7 +885,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
fman_if_clear_mac_addr(dpaa_intf->fif, index);
}
-static void
+static int
dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -825,6 +897,8 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
if (ret)
RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
}
static struct eth_dev_ops dpaa_devops = {
@@ -882,7 +956,7 @@ is_dpaa_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &rte_dpaa_pmd);
}
-int __rte_experimental
+int
rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
{
struct rte_eth_dev *dev;
@@ -953,15 +1027,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
ret = qman_reserve_fqid(fqid);
if (ret) {
- DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
fqid, ret);
return -EINVAL;
}
- DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid);
+ DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
if (ret) {
- DPAA_PMD_ERR("create rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
fqid, ret);
return ret;
}
@@ -977,7 +1051,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
&cgr_opts);
if (ret) {
DPAA_PMD_WARN(
- "rx taildrop init fail on rx fqid %d (ret=%d)",
+ "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
fqid, ret);
goto without_cgr;
}
@@ -988,7 +1062,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
without_cgr:
ret = qman_init_fq(fq, flags, &opts);
if (ret)
- DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret);
+ DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
return ret;
}
@@ -1016,10 +1090,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
- DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid);
+ DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret)
- DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret);
+ DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
return ret;
}
@@ -1090,25 +1164,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->cfg = cfg;
/* Initialize Rx FQ's */
- if (getenv("DPAA_NUM_RX_QUEUES"))
- num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
- else
+ if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
-
- /* if push mode queues to be enabled. Currenly we are allowing only
- * one queue per thread.
- */
- if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
- dpaa_push_mode_max_queue =
- atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
- if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
- dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ } else {
+ if (getenv("DPAA_NUM_RX_QUEUES"))
+ num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
+ else
+ num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
}
- /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
+
+ /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
- if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) {
+ if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
DPAA_PMD_ERR("Invalid number of RX queues\n");
return -EINVAL;
}
@@ -1141,8 +1210,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
}
for (loop = 0; loop < num_rx_fqs; loop++) {
- fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
- DPAA_PCD_FQID_MULTIPLIER + loop;
+ if (default_q)
+ fqid = cfg->rx_def;
+ else
+ fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+ DPAA_PCD_FQID_MULTIPLIER + loop;
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
@@ -1317,6 +1389,9 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
if (!eth_dev)
return -ENOMEM;
+ eth_dev->device = &dpaa_dev->device;
+ eth_dev->dev_ops = &dpaa_devops;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -1335,6 +1410,26 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
return ret;
}
+ if (access("/tmp/fmc.bin", F_OK) == -1) {
+ RTE_LOG(INFO, PMD,
+ "* FMC not configured.Enabling default mode\n");
+ default_q = 1;
+ }
+
+ /* disabling the default push mode for LS1043 */
+ if (dpaa_svr_family == SVR_LS1043A_FAMILY)
+ dpaa_push_mode_max_queue = 0;
+
+ /* if push mode queues to be enabled. Currenly we are allowing
+ * only one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
is_global_init = 1;
}
@@ -1366,8 +1461,10 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
- if (diag == 0)
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
diff --git a/drivers/net/dpaa/dpaa_ethdev.h b/drivers/net/dpaa/dpaa_ethdev.h
index c051ae32..c79b9f86 100644
--- a/drivers/net/dpaa/dpaa_ethdev.h
+++ b/drivers/net/dpaa/dpaa_ethdev.h
@@ -51,6 +51,10 @@
/*Maximum number of slots available in TX ring*/
#define DPAA_TX_BURST_SIZE 7
+/* Optimal burst size for RX and TX as default */
+#define DPAA_DEF_RX_BURST_SIZE 7
+#define DPAA_DEF_TX_BURST_SIZE DPAA_TX_BURST_SIZE
+
#ifndef VLAN_TAG_SIZE
#define VLAN_TAG_SIZE 4 /** < Vlan Header Length */
#endif
@@ -74,14 +78,10 @@
#define DPAA_DEBUG_FQ_TX_ERROR 1
#define DPAA_RSS_OFFLOAD_ALL ( \
- ETH_RSS_FRAG_IPV4 | \
- ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP | \
- ETH_RSS_NONFRAG_IPV4_SCTP | \
- ETH_RSS_FRAG_IPV6 | \
- ETH_RSS_NONFRAG_IPV6_TCP | \
- ETH_RSS_NONFRAG_IPV6_UDP | \
- ETH_RSS_NONFRAG_IPV6_SCTP)
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
#define DPAA_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
@@ -160,12 +160,14 @@ struct dpaa_if_stats {
uint64_t tund; /**<Tx Undersized */
};
-int __rte_experimental dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
- int eth_rx_queue_id,
+int
+dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
u16 ch_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
-int __rte_experimental dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+int
+dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id);
enum qman_cb_dqrr_result
diff --git a/drivers/net/dpaa/dpaa_rxtx.c b/drivers/net/dpaa/dpaa_rxtx.c
index 0dea8e79..168b77e4 100644
--- a/drivers/net/dpaa/dpaa_rxtx.c
+++ b/drivers/net/dpaa/dpaa_rxtx.c
@@ -59,7 +59,7 @@
} while (0)
#if (defined RTE_LIBRTE_DPAA_DEBUG_DRIVER)
-void dpaa_display_frame(const struct qm_fd *fd)
+static void dpaa_display_frame(const struct qm_fd *fd)
{
int ii;
char *ptr;
@@ -90,11 +90,10 @@ static inline void dpaa_slow_parsing(struct rte_mbuf *m __rte_unused,
/*TBD:XXX: to be implemented*/
}
-static inline void dpaa_eth_packet_info(struct rte_mbuf *m,
- uint64_t fd_virt_addr)
+static inline void dpaa_eth_packet_info(struct rte_mbuf *m, void *fd_virt_addr)
{
struct annotations_t *annot = GET_ANNOTATIONS(fd_virt_addr);
- uint64_t prs = *((uint64_t *)(&annot->parse)) & DPAA_PARSE_MASK;
+ uint64_t prs = *((uintptr_t *)(&annot->parse)) & DPAA_PARSE_MASK;
DPAA_DP_LOG(DEBUG, " Parsing mbuf: %p with annotations: %p", m, annot);
@@ -351,7 +350,7 @@ dpaa_eth_sg_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
prev_seg = cur_seg;
}
- dpaa_eth_packet_info(first_seg, (uint64_t)vaddr);
+ dpaa_eth_packet_info(first_seg, vaddr);
rte_pktmbuf_free_seg(temp);
return first_seg;
@@ -394,7 +393,7 @@ dpaa_eth_fd_to_mbuf(const struct qm_fd *fd, uint32_t ifid)
mbuf->ol_flags = 0;
mbuf->next = NULL;
rte_mbuf_refcnt_set(mbuf, 1);
- dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
return mbuf;
}
@@ -432,7 +431,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
}
fd = &dqrr[i]->fd;
- dpaa_intf = fq[i]->dpaa_intf;
+ dpaa_intf = fq[0]->dpaa_intf;
format = (fd->opaque & DPAA_FD_FORMAT_MASK) >>
DPAA_FD_FORMAT_SHIFT;
@@ -455,7 +454,7 @@ dpaa_rx_cb(struct qman_fq **fq, struct qm_dqrr_entry **dqrr,
mbuf->ol_flags = 0;
mbuf->next = NULL;
rte_mbuf_refcnt_set(mbuf, 1);
- dpaa_eth_packet_info(mbuf, (uint64_t)mbuf->buf_addr);
+ dpaa_eth_packet_info(mbuf, mbuf->buf_addr);
}
}
@@ -561,7 +560,8 @@ uint16_t dpaa_eth_queue_rx(void *q,
struct qman_fq *fq = q;
struct qm_dqrr_entry *dq;
uint32_t num_rx = 0, ifid = ((struct dpaa_if *)fq->dpaa_intf)->ifid;
- int ret;
+ int num_rx_bufs, ret;
+ uint32_t vdqcr_flags = 0;
if (likely(fq->is_static))
return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
@@ -574,8 +574,19 @@ uint16_t dpaa_eth_queue_rx(void *q,
}
}
- ret = qman_set_vdq(fq, (nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
- DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_bufs);
+ /* Until request for four buffers, we provide exact number of buffers.
+ * Otherwise we do not set the QM_VDQCR_EXACT flag.
+ * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
+ * requested, so we request two less in this case.
+ */
+ if (nb_bufs < 4) {
+ vdqcr_flags = QM_VDQCR_EXACT;
+ num_rx_bufs = nb_bufs;
+ } else {
+ num_rx_bufs = nb_bufs > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
+ (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_bufs - 2);
+ }
+ ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
if (ret)
return 0;
@@ -593,7 +604,7 @@ uint16_t dpaa_eth_queue_rx(void *q,
static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
{
int ret;
- uint64_t buf = 0;
+ size_t buf = 0;
struct bm_buffer bufs;
ret = bman_acquire(bp_info->bp, &bufs, 1, 0);
@@ -602,10 +613,10 @@ static void *dpaa_get_pktbuf(struct dpaa_bp_info *bp_info)
return (void *)buf;
}
- DPAA_DP_LOG(DEBUG, "got buffer 0x%lx from pool %d",
+ DPAA_DP_LOG(DEBUG, "got buffer 0x%" PRIx64 " from pool %d",
(uint64_t)bufs.addr, bufs.bpid);
- buf = (uint64_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
+ buf = (size_t)DPAA_MEMPOOL_PTOV(bp_info, bufs.addr)
- bp_info->meta_data_size;
if (!buf)
goto out;
@@ -826,6 +837,8 @@ tx_on_external_pool(struct qman_fq *txq, struct rte_mbuf *mbuf,
}
DPAA_MBUF_TO_CONTIG_FD(dmable_mbuf, fd_arr, dpaa_intf->bp_info->bpid);
+ if (mbuf->ol_flags & DPAA_TX_CKSUM_OFFLOAD_MASK)
+ dpaa_unsegmented_checksum(mbuf, fd_arr);
return 0;
}
diff --git a/drivers/net/dpaa/meson.build b/drivers/net/dpaa/meson.build
new file mode 100644
index 00000000..62dec7b0
--- /dev/null
+++ b/drivers/net/dpaa/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+deps += ['mempool_dpaa']
+
+sources = files('dpaa_ethdev.c',
+ 'dpaa_rxtx.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa.h')
diff --git a/drivers/net/dpaa/rte_pmd_dpaa.h b/drivers/net/dpaa/rte_pmd_dpaa.h
index 38405ec0..37eea9b0 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa.h
+++ b/drivers/net/dpaa/rte_pmd_dpaa.h
@@ -18,9 +18,6 @@
#include <rte_ethdev_driver.h>
/**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
* Enable/Disable TX loopback
*
* @param port
@@ -33,7 +30,7 @@
* - (-ENODEV) if *port* invalid.
* - (-EINVAL) if bad parameter.
*/
-int __rte_experimental
+int
rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on);
#endif /* _PMD_DPAA_H_ */
diff --git a/drivers/net/dpaa/rte_pmd_dpaa_version.map b/drivers/net/dpaa/rte_pmd_dpaa_version.map
index 3b937b10..8cb4500b 100644
--- a/drivers/net/dpaa/rte_pmd_dpaa_version.map
+++ b/drivers/net/dpaa/rte_pmd_dpaa_version.map
@@ -3,12 +3,10 @@ DPDK_17.11 {
local: *;
};
-EXPERIMENTAL {
+DPDK_18.08 {
global:
dpaa_eth_eventq_attach;
dpaa_eth_eventq_detach;
rte_pmd_dpaa_set_tx_loopback;
-
- local: *;
} DPDK_17.11;
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 5a93a0b9..9b0b1433 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -10,14 +10,8 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_dpaa2.a
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
-
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
@@ -25,7 +19,6 @@ CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
-CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
# versioning export map
@@ -34,6 +27,9 @@ EXPORT_MAP := rte_pmd_dpaa2_version.map
# library version
LIBABIVER := 1
+# depends on fslmc bus which uses experimental API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index b93376de..713a41bf 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -17,7 +17,7 @@
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <fslmc_logs.h>
+#include <dpaa2_pmd_logs.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
@@ -42,7 +42,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- PMD_INIT_LOG(ERR, "Memory unavailable");
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
@@ -50,8 +50,8 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported",
- req_dist_set);
+ DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported",
+ req_dist_set);
rte_free(p_params);
return ret;
}
@@ -61,7 +61,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -70,7 +70,7 @@ dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
&tc_cfg);
rte_free(p_params);
if (ret) {
- PMD_INIT_LOG(ERR,
+ DPAA2_PMD_ERR(
"Setting distribution for Rx failed with err: %d",
ret);
return ret;
@@ -93,7 +93,7 @@ int dpaa2_remove_flow_dist(
p_params = rte_malloc(
NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
if (!p_params) {
- PMD_INIT_LOG(ERR, "Memory unavailable");
+ DPAA2_PMD_ERR("Unable to allocate flow-dist parameters");
return -ENOMEM;
}
memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
@@ -105,7 +105,7 @@ int dpaa2_remove_flow_dist(
ret = dpkg_prepare_key_cfg(&kg_cfg, p_params);
if (ret) {
- PMD_INIT_LOG(ERR, "Unable to prepare extract parameters");
+ DPAA2_PMD_ERR("Unable to prepare extract parameters");
rte_free(p_params);
return ret;
}
@@ -114,8 +114,8 @@ int dpaa2_remove_flow_dist(
&tc_cfg);
rte_free(p_params);
if (ret)
- PMD_INIT_LOG(ERR,
- "Setting distribution for Rx failed with err:%d",
+ DPAA2_PMD_ERR(
+ "Setting distribution for Rx failed with err: %d",
ret);
return ret;
}
@@ -256,7 +256,7 @@ dpaa2_distset_to_dpkg_profile_cfg(
break;
default:
- PMD_INIT_LOG(WARNING,
+ DPAA2_PMD_WARN(
"Unsupported flow dist option %x",
dist_field);
return -EINVAL;
@@ -307,7 +307,7 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_RX, &layout);
if (retcode) {
- PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
+ DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)",
retcode);
return retcode;
}
@@ -322,9 +322,9 @@ dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
- " bpid = %d Error code = %d\n",
- bpool_cfg.pools[0].dpbp_id, retcode);
+ DPAA2_PMD_ERR("Error configuring buffer pool on interface."
+ " bpid = %d error code = %d",
+ bpool_cfg.pools[0].dpbp_id, retcode);
return retcode;
}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 09a11d65..c5047367 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -18,7 +18,7 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
-#include <fslmc_logs.h>
+#include "dpaa2_pmd_logs.h"
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_mempool.h>
@@ -27,6 +27,36 @@
#include "dpaa2_ethdev.h"
#include <fsl_qbman_debug.h>
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
struct rte_dpaa2_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint8_t page_id; /* dpni statistics page id */
@@ -57,57 +87,7 @@ static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev);
static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev);
static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &dev->data->dev_link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
+int dpaa2_logtype_pmd;
static int
dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
@@ -119,7 +99,7 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -1;
}
@@ -131,8 +111,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->token, vlan_id);
if (ret < 0)
- PMD_DRV_LOG(ERR, "ret = %d Unable to add/rem vlan %d hwid =%d",
- ret, vlan_id, priv->hw_id);
+ DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d",
+ ret, vlan_id, priv->hw_id);
return ret;
}
@@ -149,25 +129,25 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_FILTER_MASK) {
/* VLAN Filter not avaialble */
if (!priv->max_vlan_filters) {
- RTE_LOG(INFO, PMD, "VLAN filter not available\n");
+ DPAA2_PMD_INFO("VLAN filter not available");
goto next_mask;
}
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, true);
else
ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
- ret);
+ DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret);
}
next_mask:
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
- RTE_LOG(INFO, PMD,
- "VLAN extend offload not supported\n");
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
+ DPAA2_PMD_INFO("VLAN extend offload not supported");
}
return 0;
@@ -187,10 +167,10 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_soc_version failed\n");
+ DPAA2_PMD_WARN("\tmc_get_soc_version failed");
if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info))
- RTE_LOG(WARNING, PMD, "\tmc_get_version failed\n");
+ DPAA2_PMD_WARN("\tmc_get_version failed");
ret = snprintf(fw_version, fw_size,
"%x-%d.%d.%d",
@@ -220,20 +200,18 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup |
+ dev_rx_offloads_nodis;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup |
+ dev_tx_offloads_nodis;
dev_info->speed_capa = ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
ETH_LINK_SPEED_10G;
+
+ dev_info->max_hash_mac_addrs = 0;
+ dev_info->max_vfs = 0;
+ dev_info->max_vmdq_pools = ETH_16_POOLS;
+ dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
}
static int
@@ -253,7 +231,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
RTE_CACHE_LINE_SIZE);
if (!mc_q) {
- PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
+ DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues");
return -1;
}
@@ -320,18 +298,39 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = priv->hw;
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
- int rx_ip_csum_offload = false;
+ uint64_t rx_offloads = eth_conf->rxmode.offloads;
+ uint64_t tx_offloads = eth_conf->txmode.offloads;
+ int rx_l3_csum_offload = false;
+ int rx_l4_csum_offload = false;
+ int tx_l3_csum_offload = false;
+ int tx_l4_csum_offload = false;
int ret;
PMD_INIT_FUNC_TRACE();
- if (eth_conf->rxmode.jumbo_frame == 1) {
+ /* Rx offloads validation */
+ if (dev_rx_offloads_nodis & ~rx_offloads) {
+ DPAA2_PMD_WARN(
+ "Rx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
+ }
+
+ /* Tx offloads validation */
+ if (dev_tx_offloads_nodis & ~tx_offloads) {
+ DPAA2_PMD_WARN(
+ "Tx offloads non configurable - requested 0x%" PRIx64
+ " ignored 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
priv->token, eth_conf->rxmode.max_rx_pkt_len);
if (ret) {
- PMD_INIT_LOG(ERR,
- "unable to set mtu. check config\n");
+ DPAA2_PMD_ERR(
+ "Unable to set mtu. check config");
return ret;
}
} else {
@@ -343,40 +342,52 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
ret = dpaa2_setup_flow_dist(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to set flow distribution."
- "please check queue config\n");
+ DPAA2_PMD_ERR("Unable to set flow distribution."
+ "Check queue config");
return ret;
}
}
- if (eth_conf->rxmode.hw_ip_checksum)
- rx_ip_csum_offload = true;
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rx_l3_csum_offload = true;
+
+ if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+ (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+ rx_l4_csum_offload = true;
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+ DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
return ret;
}
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+ DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
return ret;
}
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+ tx_l3_csum_offload = true;
+
+ if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+ (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+ tx_l4_csum_offload = true;
+
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L3_CSUM, true);
+ DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
return ret;
}
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
- DPNI_OFF_TX_L4_CSUM, true);
+ DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
+ DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
return ret;
}
@@ -390,14 +401,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
DPNI_FLCTYPE_HASH, true);
if (ret) {
- PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n",
- ret);
+ DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret);
return ret;
}
}
- if (eth_conf->rxmode.hw_vlan_filter)
- dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+ dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
/* update the current status */
dpaa2_dev_link_update(dev, 0);
@@ -427,8 +436,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
- dev, rx_queue_id, mb_pool, rx_conf);
+ DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
bpid = mempool_to_bpid(mb_pool);
@@ -445,7 +454,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
memset(&cfg, 0, sizeof(struct dpni_queue));
options = options | DPNI_QUEUE_OPT_USER_CTX;
- cfg.user_context = (uint64_t)(dpaa2_q);
+ cfg.user_context = (size_t)(dpaa2_q);
/*if ls2088 or rev2 device, enable the stashing */
@@ -467,7 +476,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, options, &cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
+ DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret);
return -1;
}
@@ -479,14 +488,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
taildrop.threshold = CONG_THRESHOLD_RX_Q;
taildrop.units = DPNI_CONGESTION_UNIT_BYTES;
taildrop.oal = CONG_RX_OAL;
- PMD_DRV_LOG(DEBUG, "Enabling Early Drop on queue = %d",
- rx_queue_id);
+ DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d",
+ rx_queue_id);
ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
dpaa2_q->tc_index, flow_id, &taildrop);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the rx flow"
- " err : = %d\n", ret);
+ DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)",
+ ret);
return -1;
}
}
@@ -529,9 +538,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
tc_id, flow_id, options, &tx_flow_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
- "tc_id=%d, flow =%d ErrorCode = %x\n",
- tc_id, flow_id, -ret);
+ DPAA2_PMD_ERR("Error in setting the tx flow: "
+ "tc_id=%d, flow=%d err=%d",
+ tc_id, flow_id, ret);
return -1;
}
@@ -543,8 +552,8 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
priv->token,
DPNI_CONF_DISABLE);
if (ret) {
- PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
- " ErrorCode = %x", ret);
+ DPAA2_PMD_ERR("Error in set tx conf mode settings: "
+ "err=%d", ret);
return -1;
}
}
@@ -560,7 +569,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
*/
cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
cong_notif_cfg.message_ctx = 0;
- cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
+ cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
cong_notif_cfg.notification_mode =
DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -573,9 +582,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
tc_id,
&cong_notif_cfg);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Error in setting tx congestion notification: = %d",
- -ret);
+ DPAA2_PMD_ERR(
+ "Error in setting tx congestion notification: "
+ "err=%d", ret);
return -ret;
}
}
@@ -610,7 +619,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return -EINVAL;
}
}
@@ -620,8 +629,8 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
frame_cnt = qbman_fq_state_frame_count(&state);
- RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
- rx_queue_id, frame_cnt);
+ DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u",
+ rx_queue_id, frame_cnt);
}
return frame_cnt;
}
@@ -670,14 +679,14 @@ dpaa2_interrupt_handler(void *param)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token,
irq_index, &status);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Can't get irq status (err %d)", ret);
+ DPAA2_PMD_ERR("Can't get irq status (err %d)", ret);
clear = 0xffffffff;
goto out;
}
@@ -693,7 +702,7 @@ out:
ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token,
irq_index, clear);
if (unlikely(ret))
- RTE_LOG(ERR, PMD, "Can't clear irq status (err %d)", ret);
+ DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret);
}
static int
@@ -710,16 +719,16 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable)
err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token,
irq_index, mask);
if (err < 0) {
- PMD_INIT_LOG(ERR, "Error: dpni_set_irq_mask():%d (%s)", err,
- strerror(-err));
+ DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err,
+ strerror(-err));
return err;
}
err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token,
irq_index, enable);
if (err < 0)
- PMD_INIT_LOG(ERR, "Error: dpni_set_irq_enable():%d (%s)", err,
- strerror(-err));
+ DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err,
+ strerror(-err));
return err;
}
@@ -747,8 +756,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
- ret, priv->hw_id);
+ DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
+ priv->hw_id, ret);
return ret;
}
@@ -758,7 +767,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &qdid);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
+ DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
return ret;
}
priv->qdid = qdid;
@@ -769,8 +778,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
DPNI_QUEUE_RX, dpaa2_q->tc_index,
dpaa2_q->flow_id, &cfg, &qid);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to get flow "
- "information Error code = %d\n", ret);
+ DPAA2_PMD_ERR("Error in getting flow information: "
+ "err=%d", ret);
return ret;
}
dpaa2_q->fqid = qid.fqid;
@@ -785,8 +794,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
priv->token, &err_cfg);
if (ret) {
- PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
- "code = %d\n", ret);
+ DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d",
+ ret);
return ret;
}
@@ -845,14 +854,14 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
- ret, priv->hw_id);
+ DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev",
+ ret, priv->hw_id);
return;
}
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- dpaa2_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -878,13 +887,12 @@ dpaa2_dev_close(struct rte_eth_dev *dev)
/* Clean the device first */
ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
- " error code %d\n", ret);
+ DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret);
return;
}
memset(&link, 0, sizeof(link));
- dpaa2_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static void
@@ -898,17 +906,17 @@ dpaa2_dev_promiscuous_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable U promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret);
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable M promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret);
}
static void
@@ -922,21 +930,20 @@ dpaa2_dev_promiscuous_disable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable U promisc mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret);
if (dev->data->all_multicast == 0) {
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW,
priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD,
- "Unable to disable M promisc mode %d\n",
- ret);
+ DPAA2_PMD_ERR("Unable to disable M promisc mode %d",
+ ret);
}
}
@@ -951,13 +958,13 @@ dpaa2_dev_allmulticast_enable(
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to enable multicast mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret);
}
static void
@@ -970,7 +977,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
@@ -980,7 +987,7 @@ dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev)
ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
if (ret < 0)
- RTE_LOG(ERR, PMD, "Unable to disable multicast mode %d\n", ret);
+ DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret);
}
static int
@@ -995,7 +1002,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
@@ -1004,9 +1011,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return -EINVAL;
if (frame_size > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads &=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -1016,10 +1025,10 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
frame_size);
if (ret) {
- PMD_DRV_LOG(ERR, "setting the max frame length failed");
+ DPAA2_PMD_ERR("Setting the max frame length failed");
return -1;
}
- PMD_DRV_LOG(INFO, "MTU is configured %d for the device", mtu);
+ DPAA2_PMD_INFO("MTU configured for the device: %d", mtu);
return 0;
}
@@ -1036,15 +1045,15 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -1;
}
ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW,
priv->token, addr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Adding the MAC ADDR failed: err = %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Adding the MAC ADDR failed: err = %d", ret);
return 0;
}
@@ -1063,18 +1072,18 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev,
macaddr = &data->mac_addrs[index];
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW,
priv->token, macaddr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Removing the MAC ADDR failed: err = %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Removing the MAC ADDR failed: err = %d", ret);
}
-static void
+static int
dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -1085,17 +1094,20 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
- return;
+ DPAA2_PMD_ERR("dpni is NULL");
+ return -EINVAL;
}
ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW,
priv->token, addr->addr_bytes);
if (ret)
- RTE_LOG(ERR, PMD,
- "error: Setting the MAC ADDR failed %d\n", ret);
+ DPAA2_PMD_ERR(
+ "error: Setting the MAC ADDR failed %d", ret);
+
+ return ret;
}
+
static
int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats)
@@ -1111,12 +1123,12 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
if (!dpni) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return -EINVAL;
}
if (!stats) {
- RTE_LOG(ERR, PMD, "stats is NULL\n");
+ DPAA2_PMD_ERR("stats is NULL");
return -EINVAL;
}
@@ -1155,7 +1167,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev,
return 0;
err:
- RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
return retcode;
};
@@ -1169,12 +1181,12 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
union dpni_statistics value[3] = {};
unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings);
- if (xstats == NULL)
- return 0;
-
if (n < num)
return num;
+ if (xstats == NULL)
+ return 0;
+
/* Get Counters from page_0*/
retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
0, 0, &value[0]);
@@ -1200,17 +1212,20 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
}
return i;
err:
- RTE_LOG(ERR, PMD, "Error in obtaining extended stats (%d)\n", retcode);
+ DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode);
return retcode;
}
static int
dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned int limit)
+ unsigned int limit)
{
unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
+ if (limit < stat_cnt)
+ return stat_cnt;
+
if (xstats_names != NULL)
for (i = 0; i < stat_cnt; i++)
snprintf(xstats_names[i].name,
@@ -1269,7 +1284,7 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
for (i = 0; i < n; i++) {
if (ids[i] >= stat_cnt) {
- PMD_INIT_LOG(ERR, "id value isn't valid");
+ DPAA2_PMD_ERR("xstats id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -1294,7 +1309,7 @@ dpaa2_xstats_get_names_by_id(
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- PMD_INIT_LOG(ERR, "id value isn't valid");
+ DPAA2_PMD_ERR("xstats id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
@@ -1312,7 +1327,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return;
}
@@ -1323,7 +1338,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
return;
error:
- RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode);
return;
};
@@ -1335,24 +1350,17 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
int ret;
struct dpaa2_dev_priv *priv = dev->data->dev_private;
struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
struct dpni_link_state state = {0};
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return 0;
}
- memset(&old, 0, sizeof(old));
- dpaa2_dev_atomic_read_link_status(dev, &old);
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
- return -1;
- }
-
- if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
- RTE_LOG(DEBUG, PMD, "No change in status\n");
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
return -1;
}
@@ -1365,13 +1373,14 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
else
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- dpaa2_dev_atomic_write_link_status(dev, &link);
-
- if (link.link_status)
- PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ DPAA2_PMD_DEBUG("No change in status");
else
- PMD_DRV_LOG(INFO, "Port %d Link is Down", dev->data->port_id);
- return 0;
+ DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
+ link.link_status ? "Up" : "Down");
+
+ return ret;
}
/**
@@ -1391,7 +1400,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "DPNI is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -1399,7 +1408,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en);
if (ret) {
/* Unable to obtain dpni status; Not continuing */
- PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
return -EINVAL;
}
@@ -1407,13 +1416,13 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
if (!en) {
ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_DRV_LOG(ERR, "Interface Link UP failed (%d)", ret);
+ DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret);
return -EINVAL;
}
}
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ DPAA2_PMD_ERR("Unable to get link state (%d)", ret);
return -1;
}
@@ -1422,10 +1431,9 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev)
dev->data->dev_link.link_status = state.up;
if (state.up)
- PMD_DRV_LOG(INFO, "Port %d Link is set as UP",
- dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id);
else
- PMD_DRV_LOG(INFO, "Port %d Link is DOWN", dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id);
return ret;
}
@@ -1448,7 +1456,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "Device has not yet been configured\n");
+ DPAA2_PMD_ERR("Device has not yet been configured");
return ret;
}
@@ -1461,12 +1469,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
do {
ret = dpni_disable(dpni, 0, priv->token);
if (ret) {
- PMD_DRV_LOG(ERR, "dpni disable failed (%d)", ret);
+ DPAA2_PMD_ERR("dpni disable failed (%d)", ret);
return ret;
}
ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled);
if (ret) {
- PMD_DRV_LOG(ERR, "dpni_is_enabled failed (%d)", ret);
+ DPAA2_PMD_ERR("dpni enable check failed (%d)", ret);
return ret;
}
if (dpni_enabled)
@@ -1475,12 +1483,12 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
} while (dpni_enabled && --retries);
if (!retries) {
- PMD_DRV_LOG(WARNING, "Retry count exceeded disabling DPNI\n");
+ DPAA2_PMD_WARN("Retry count exceeded disabling dpni");
/* todo- we may have to manually cleanup queues.
*/
} else {
- PMD_DRV_LOG(INFO, "Port %d Link DOWN successful",
- dev->data->port_id);
+ DPAA2_PMD_INFO("Port %d Link DOWN successful",
+ dev->data->port_id);
}
dev->data->dev_link.link_status = 0;
@@ -1502,13 +1510,13 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL || fc_conf == NULL) {
- RTE_LOG(ERR, PMD, "device not configured\n");
+ DPAA2_PMD_ERR("device not configured");
return ret;
}
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret) {
- RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d\n", ret);
+ DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret);
return ret;
}
@@ -1558,7 +1566,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
dpni = (struct fsl_mc_io *)priv->hw;
if (dpni == NULL) {
- RTE_LOG(ERR, PMD, "dpni is NULL\n");
+ DPAA2_PMD_ERR("dpni is NULL");
return ret;
}
@@ -1568,7 +1576,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
*/
ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
if (ret) {
- RTE_LOG(ERR, PMD, "Unable to get link state (err=%d)\n", ret);
+ DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret);
return -1;
}
@@ -1613,16 +1621,15 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
break;
default:
- RTE_LOG(ERR, PMD, "Incorrect Flow control flag (%d)\n",
- fc_conf->mode);
+ DPAA2_PMD_ERR("Incorrect Flow control flag (%d)",
+ fc_conf->mode);
return -1;
}
ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg);
if (ret)
- RTE_LOG(ERR, PMD,
- "Unable to set Link configuration (err=%d)\n",
- ret);
+ DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)",
+ ret);
/* Enable link */
dpaa2_dev_set_link_up(dev);
@@ -1643,13 +1650,13 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev,
if (rss_conf->rss_hf) {
ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to set flow dist");
+ DPAA2_PMD_ERR("Unable to set flow dist");
return ret;
}
} else {
ret = dpaa2_remove_flow_dist(dev, 0);
if (ret) {
- PMD_INIT_LOG(ERR, "unable to remove flow dist");
+ DPAA2_PMD_ERR("Unable to remove flow dist");
return ret;
}
}
@@ -1702,12 +1709,12 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
}
options |= DPNI_QUEUE_OPT_USER_CTX;
- cfg.user_context = (uint64_t)(dpaa2_ethq);
+ cfg.user_context = (size_t)(dpaa2_ethq);
ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
dpaa2_ethq->tc_index, flow_id, options, &cfg);
if (ret) {
- RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
return ret;
}
@@ -1734,7 +1741,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
dpaa2_ethq->tc_index, flow_id, options, &cfg);
if (ret)
- RTE_LOG(ERR, PMD, "Error in dpni_set_queue: ret: %d\n", ret);
+ DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret);
return ret;
}
@@ -1801,15 +1808,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0);
if (!dpni_dev) {
- PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
+ DPAA2_PMD_ERR("Memory allocation failed for dpni device");
return -1;
}
dpni_dev->regs = rte_mcp_ptr_list[0];
ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure in opening dpni@%d with err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure in opening dpni@%d with err code %d",
hw_id, ret);
rte_free(dpni_dev);
return -1;
@@ -1818,16 +1825,15 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Clean the device first */
ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure cleaning dpni@%d with err code %d\n",
- hw_id, ret);
+ DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d",
+ hw_id, ret);
goto init_err;
}
ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure in get dpni@%d attribute, err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure in get dpni@%d attribute, err code %d",
hw_id, ret);
goto init_err;
}
@@ -1843,8 +1849,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Using number of TX queues as number of TX TCs */
priv->nb_tx_queues = attr.num_tx_tcs;
- PMD_DRV_LOG(DEBUG, "RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
- priv->num_rx_tc, priv->nb_rx_queues, priv->nb_tx_queues);
+ DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d",
+ priv->num_rx_tc, priv->nb_rx_queues,
+ priv->nb_tx_queues);
priv->hw = dpni_dev;
priv->hw_id = hw_id;
@@ -1856,7 +1863,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
/* Allocate memory for hardware structure for queues */
ret = dpaa2_alloc_rx_tx_queues(eth_dev);
if (ret) {
- PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
+ DPAA2_PMD_ERR("Queue allocation Failed");
goto init_err;
}
@@ -1864,9 +1871,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = rte_zmalloc("dpni",
ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
+ DPAA2_PMD_ERR(
"Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * attr.mac_filter_entries);
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
ret = -ENOMEM;
goto init_err;
}
@@ -1875,7 +1882,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
priv->token,
(uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
if (ret) {
- PMD_INIT_LOG(ERR, "DPNI get mac address failed:Err Code = %d\n",
+ DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d",
ret);
goto init_err;
}
@@ -1887,8 +1894,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer layout",
- ret);
+ DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret);
goto init_err;
}
@@ -1899,7 +1905,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
DPNI_QUEUE_TX_CONFIRM, &layout);
if (ret) {
- PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer layout",
+ DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout",
ret);
goto init_err;
}
@@ -1908,7 +1914,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
- rte_fslmc_vfio_dmamap();
RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
return 0;
@@ -1931,7 +1936,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
if (!dpni) {
- PMD_INIT_LOG(WARNING, "Already closed or not started");
+ DPAA2_PMD_WARN("Already closed or not started");
return -1;
}
@@ -1958,8 +1963,8 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
/* Close the device at underlying layer*/
ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Failure closing dpni device with err code %d\n",
+ DPAA2_PMD_ERR(
+ "Failure closing dpni device with err code %d",
ret);
}
@@ -1971,7 +1976,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+ DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name);
return 0;
}
@@ -1991,8 +1996,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
sizeof(struct dpaa2_dev_priv),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->dev_private == NULL) {
- PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
- " private port data\n");
+ DPAA2_PMD_CRIT(
+ "Unable to allocate memory for private data");
rte_eth_dev_release_port(eth_dev);
return -ENOMEM;
}
@@ -2013,8 +2018,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
/* Invoke PMD device initialization function */
diag = dpaa2_dev_init(eth_dev);
- if (diag == 0)
+ if (diag == 0) {
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
@@ -2045,3 +2052,10 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
};
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
+
+RTE_INIT(dpaa2_pmd_init_log)
+{
+ dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
+ if (dpaa2_logtype_pmd >= 0)
+ rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
index ba0856f3..bd69f523 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.h
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -50,6 +50,12 @@
/* Disable RX tail drop, default is enable */
#define DPAA2_RX_TAILDROP_OFF 0x04
+#define DPAA2_RSS_OFFLOAD_ALL ( \
+ ETH_RSS_IP | \
+ ETH_RSS_UDP | \
+ ETH_RSS_TCP | \
+ ETH_RSS_SCTP)
+
/* LX2 FRC Parsed values (Little Endian) */
#define DPAA2_PKT_TYPE_ETHER 0x0060
#define DPAA2_PKT_TYPE_IPV4 0x0000
diff --git a/drivers/net/dpaa2/dpaa2_pmd_logs.h b/drivers/net/dpaa2/dpaa2_pmd_logs.h
new file mode 100644
index 00000000..c04babdb
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_pmd_logs.h
@@ -0,0 +1,41 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef _DPAA2_PMD_LOGS_H_
+#define _DPAA2_PMD_LOGS_H_
+
+extern int dpaa2_logtype_pmd;
+
+#define DPAA2_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \
+ fmt "\n", ##args)
+
+#define DPAA2_PMD_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>")
+
+#define DPAA2_PMD_CRIT(fmt, args...) \
+ DPAA2_PMD_LOG(CRIT, fmt, ## args)
+#define DPAA2_PMD_INFO(fmt, args...) \
+ DPAA2_PMD_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_ERR(fmt, args...) \
+ DPAA2_PMD_LOG(ERR, fmt, ## args)
+#define DPAA2_PMD_WARN(fmt, args...) \
+ DPAA2_PMD_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_PMD_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_PMD_DP_DEBUG(fmt, args...) \
+ DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_PMD_DP_INFO(fmt, args...) \
+ DPAA2_PMD_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_PMD_DP_WARN(fmt, args...) \
+ DPAA2_PMD_DP_LOG(WARNING, fmt, ## args)
+
+#endif /* _DPAA2_PMD_LOGS_H_ */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 183293c1..ef109a62 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -16,13 +16,12 @@
#include <rte_dev.h>
#include <rte_fslmc.h>
-#include <fslmc_logs.h>
#include <fslmc_vfio.h>
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
-#include <dpaa2_eventdev.h>
+#include "dpaa2_pmd_logs.h"
#include "dpaa2_ethdev.h"
#include "base/dpaa2_hw_dpni_annot.h"
@@ -37,7 +36,7 @@
static inline void __attribute__((hot))
dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
{
- PMD_RX_LOG(DEBUG, "frc = 0x%x ", frc);
+ DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc);
m->packet_type = RTE_PTYPE_UNKNOWN;
switch (frc) {
@@ -104,13 +103,12 @@ dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
}
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
{
uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
- struct dpaa2_annot_hdr *annotation =
- (struct dpaa2_annot_hdr *)hw_annot_addr;
- PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+ DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
pkt_type = RTE_PTYPE_L2_ETHER_ARP;
goto parse_done;
@@ -167,12 +165,13 @@ parse_done:
}
static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
{
struct dpaa2_annot_hdr *annotation =
(struct dpaa2_annot_hdr *)hw_annot_addr;
- PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+ DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t",
+ annotation->word4);
/* Check offloads first */
if (BIT_ISSET_AT_POS(annotation->word3,
@@ -203,29 +202,27 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
RTE_PTYPE_L4_UDP;
default:
- PMD_RX_LOG(DEBUG, "Slow parse the parsing results\n");
break;
}
- return dpaa2_dev_rx_parse_slow(hw_annot_addr);
+ return dpaa2_dev_rx_parse_slow(annotation);
}
static inline struct rte_mbuf *__attribute__((hot))
eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
{
struct qbman_sge *sgt, *sge;
- dma_addr_t sg_addr;
+ size_t sg_addr, fd_addr;
int i = 0;
- uint64_t fd_addr;
struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
- fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
/* Get Scatter gather table address */
sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
sge = &sgt[i++];
- sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
/* First Scatter gather entry */
first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
@@ -243,14 +240,14 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FD_FRC_PARSE_SUM(fd));
else
first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
- + DPAA2_FD_PTA_SIZE);
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
rte_mbuf_refcnt_set(first_seg, 1);
cur_seg = first_seg;
while (!DPAA2_SG_IS_FINAL(sge)) {
sge = &sgt[i++];
- sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
+ sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
DPAA2_GET_FLE_ADDR(sge));
next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
@@ -299,11 +296,11 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
else
mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
- + DPAA2_FD_PTA_SIZE);
+ (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE));
- PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -320,15 +317,9 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_sge *sgt, *sge = NULL;
int i;
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- int ret = rte_vlan_insert(&mbuf);
- if (ret)
- return ret;
- }
-
temp = rte_pktmbuf_alloc(mbuf->pool);
if (temp == NULL) {
- PMD_TX_LOG(ERR, "No memory to allocate S/G table");
+ DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
return -ENOMEM;
}
@@ -340,7 +331,7 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
/*Set Scatter gather table and Scatter gather entries*/
sgt = (struct qbman_sge *)(
- (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ DPAA2_GET_FD_OFFSET(fd));
for (i = 0; i < mbuf->nb_segs; i++) {
@@ -392,17 +383,10 @@ static void __attribute__ ((noinline)) __attribute__((hot))
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- if (rte_vlan_insert(&mbuf)) {
- rte_pktmbuf_free(mbuf);
- return;
- }
- }
-
DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
- PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
- "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n",
mbuf, mbuf->buf_addr, mbuf->data_off,
DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
@@ -431,15 +415,9 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
struct rte_mbuf *m;
void *mb = NULL;
- if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
- int ret = rte_vlan_insert(&mbuf);
- if (ret)
- return ret;
- }
-
if (rte_dpaa2_mbuf_alloc_bulk(
rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
- PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
+ DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
return -1;
}
m = (struct rte_mbuf *)mb;
@@ -455,19 +433,26 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
- PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
- (void *)mbuf, mbuf->buf_addr);
-
- PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
- DPAA2_GET_FD_ADDR(fd),
+ DPAA2_PMD_DP_DEBUG(
+ "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
+ " meta: %d, off: %d, len: %d\n",
+ (void *)mbuf,
+ mbuf->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
DPAA2_GET_FD_BPID(fd),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
DPAA2_GET_FD_OFFSET(fd),
DPAA2_GET_FD_LEN(fd));
- return 0;
+return 0;
}
+/* This function assumes that caller will be keep the same value for nb_pkts
+ * across calls per queue, if that is not the case, better use non-prefetch
+ * version of rx call.
+ * It will return the packets as requested in previous call without honoring
+ * the current nb_pkts or bufs space.
+ */
uint16_t
dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
@@ -475,7 +460,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
struct qbman_result *dq_storage, *dq_storage1 = NULL;
uint32_t fqid = dpaa2_q->fqid;
- int ret, num_rx = 0;
+ int ret, num_rx = 0, pull_size;
uint8_t pending, status;
struct qbman_swp *swp;
const struct qbman_fd *fd, *next_fd;
@@ -483,48 +468,51 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev *dev = dpaa2_q->dev;
- if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
- ret = dpaa2_affine_qbman_swp();
+ if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
+ ret = dpaa2_affine_qbman_ethrx_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_PORTAL;
+ swp = DPAA2_PER_LCORE_ETHRX_PORTAL;
+ pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts;
if (unlikely(!q_storage->active_dqs)) {
q_storage->toggle = 0;
dq_storage = q_storage->dq_storage[q_storage->toggle];
- q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_pkts;
+ q_storage->last_num_pkts = pull_size;
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
q_storage->last_num_pkts);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
- (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
- if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
while (!qbman_check_command_complete(
- get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ get_swp_active_dqs(
+ DPAA2_PER_LCORE_ETHRX_DPIO->index)))
;
- clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
}
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(WARNING, "VDQ command is not issued."
- "QBMAN is busy\n");
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ " QBMAN is busy (1)\n");
/* Portal was busy, try again */
continue;
}
break;
}
q_storage->active_dqs = dq_storage;
- q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
- set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index,
+ dq_storage);
}
dq_storage = q_storage->active_dqs;
- rte_prefetch0((void *)((uint64_t)(dq_storage)));
- rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+ rte_prefetch0((void *)(size_t)(dq_storage));
+ rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
* prefething done on DQRR entries
@@ -532,10 +520,10 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
qbman_pull_desc_clear(&pulldesc);
- qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+ qbman_pull_desc_set_numframes(&pulldesc, pull_size);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
- (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
/* Check if the previous issued command is completed.
* Also seems like the SWP is shared between the Ethernet Driver
@@ -554,7 +542,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
*/
while (!qbman_check_new_result(dq_storage))
;
- rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
*/
@@ -569,7 +557,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
next_fd = qbman_result_DQ_fd(dq_storage + 1);
/* Prefetch Annotation address for the parse results */
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
+ DPAA2_FD_PTA_SIZE + 16));
if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
@@ -578,31 +566,31 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
bufs[num_rx] = eth_fd_to_mbuf(fd);
bufs[num_rx]->port = dev->data->port_id;
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
rte_vlan_strip(bufs[num_rx]);
dq_storage++;
num_rx++;
} while (pending);
- if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+ if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) {
while (!qbman_check_command_complete(
- get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
+ get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)))
;
- clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+ clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index);
}
/* issue a volatile dequeue command for next pull */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- PMD_RX_LOG(WARNING, "VDQ command is not issued."
- "QBMAN is busy\n");
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued."
+ "QBMAN is busy (2)\n");
continue;
}
break;
}
q_storage->active_dqs = dq_storage1;
- q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
- set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1);
dpaa2_q->rx_pkts += num_rx;
@@ -616,7 +604,7 @@ dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
struct dpaa2_queue *rxq,
struct rte_event *ev)
{
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
DPAA2_FD_PTA_SIZE + 16));
ev->flow_id = rxq->ev.flow_id;
@@ -641,7 +629,7 @@ dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
{
uint8_t dqrr_index;
- rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
DPAA2_FD_PTA_SIZE + 16));
ev->flow_id = rxq->ev.flow_id;
@@ -686,13 +674,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_PMD_ERR("Failure in affining portal");
return 0;
}
}
swp = DPAA2_PER_LCORE_PORTAL;
- PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
+ DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid);
/*Prepare enqueue descriptor*/
qbman_eq_desc_clear(&eqdesc);
@@ -726,7 +714,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
fd_arr[loop].simple.frc = 0;
DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
- DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
+ DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
if (likely(RTE_MBUF_DIRECT(*bufs))) {
mp = (*bufs)->pool;
/* Check the basic scenario and set
@@ -736,8 +724,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
priv->bp_list->dpaa2_ops_index &&
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
- if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ if (unlikely(((*bufs)->ol_flags
+ & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
@@ -753,19 +743,26 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
/* Not a hw_pkt pool allocated frame */
if (unlikely(!mp || !priv->bp_list)) {
- PMD_TX_LOG(ERR, "err: no bpool attached");
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
goto send_n_return;
}
+ if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ (dev->data->dev_conf.txmode.offloads
+ & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ int ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_n_return;
+ }
if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
- PMD_TX_LOG(ERR, "non hw offload bufffer ");
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
/* alloc should be from the default buffer pool
* attached to this interface
*/
bpid = priv->bp_list->buf_pool.bpid;
if (unlikely((*bufs)->nb_segs > 1)) {
- PMD_TX_LOG(ERR, "S/G support not added"
+ DPAA2_PMD_ERR("S/G support not added"
" for non hw offload buffer");
goto send_n_return;
}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
index 69cf119c..9f228169 100644
--- a/drivers/net/dpaa2/mc/dpni.c
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -198,7 +198,7 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
- for (i = 0; i < DPNI_MAX_DPBP; i++) {
+ for (i = 0; i < cmd_params->num_dpbp; i++) {
cmd_params->pool[i].dpbp_id =
cpu_to_le16(cfg->pools[i].dpbp_id);
cmd_params->pool[i].priority_mask =
diff --git a/drivers/net/dpaa2/meson.build b/drivers/net/dpaa2/meson.build
new file mode 100644
index 00000000..213f0d72
--- /dev/null
+++ b/drivers/net/dpaa2/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['mempool_dpaa2']
+sources = files('base/dpaa2_hw_dpni.c',
+ 'dpaa2_ethdev.c',
+ 'dpaa2_rxtx.c',
+ 'mc/dpkg.c',
+ 'mc/dpni.c')
+
+includes += include_directories('base', 'mc')
+
+# depends on fslmc bus which uses experimental API
+allow_experimental_apis = true
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ba81a1f4..9c87e883 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -22,7 +22,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181
+CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259
else
#
# CFLAGS for gcc/clang
@@ -61,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c
+SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c
SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 15c7dd84..da1a9a70 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -312,6 +312,9 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
break;
+ case BCM54616_E_PHY_ID:
+ phy->type = e1000_phy_none;
+ break;
default:
ret_val = -E1000_ERR_PHY;
goto out;
@@ -1607,6 +1610,8 @@ STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
case e1000_phy_82580:
ret_val = e1000_copper_link_setup_82577(hw);
break;
+ case e1000_phy_none:
+ break;
default:
ret_val = -E1000_ERR_PHY;
break;
diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h
index dbc2bbbe..e2101c17 100644
--- a/drivers/net/e1000/base/e1000_defines.h
+++ b/drivers/net/e1000/base/e1000_defines.h
@@ -1274,6 +1274,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I350_I_PHY_ID 0x015403B0
#define I210_I_PHY_ID 0x01410C00
#define IGP04E1000_E_PHY_ID 0x02A80391
+#define BCM54616_E_PHY_ID 0x03625D10
#define M88_VENDOR 0x0141
/* M88E1000 Specific Registers */
diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h
index 3e45a9ef..2cd0e14b 100644
--- a/drivers/net/e1000/base/e1000_phy.h
+++ b/drivers/net/e1000/base/e1000_phy.h
@@ -330,4 +330,12 @@ struct sfp_e1000_flags {
#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00
#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP 0x5C
+#define IGB_SFF_8472_COMP 0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE 0x4
+#define IGB_SFF_8472_UNSUP 0x00
+
#endif
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 23b089c8..902001f3 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -4,6 +4,10 @@
#ifndef _E1000_ETHDEV_H_
#define _E1000_ETHDEV_H_
+
+#include <stdint.h>
+
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_pci.h>
@@ -27,6 +31,7 @@
#define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */
#define IGB_VFTA_SIZE 128
+#define IGB_HKEY_MAX_INDEX 10
#define IGB_MAX_RX_QUEUE_NUM 8
#define IGB_MAX_RX_QUEUE_NUM_82576 16
@@ -229,8 +234,8 @@ struct igb_ethertype_filter {
};
struct igb_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@@ -357,6 +362,9 @@ void eth_igb_rx_queue_release(void *rxq);
void igb_dev_clear_queues(struct rte_eth_dev *dev);
void igb_dev_free_queues(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -370,6 +378,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -417,6 +428,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+uint32_t em_get_max_pktlen(struct rte_eth_dev *dev);
+
/*
* RX/TX EM function prototypes
*/
@@ -426,6 +439,9 @@ void eth_em_rx_queue_release(void *rxq);
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
+uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -439,6 +455,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -487,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev,
int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
struct rte_eth_flex_filter *filter,
bool add);
+int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int igb_config_rss_filter(struct rte_eth_dev *dev,
struct igb_rte_flow_rss_conf *conf,
bool add);
diff --git a/drivers/net/e1000/e1000_logs.c b/drivers/net/e1000/e1000_logs.c
new file mode 100644
index 00000000..22173939
--- /dev/null
+++ b/drivers/net/e1000/e1000_logs.c
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "e1000_logs.h"
+
+/* declared as extern in e1000_logs.h */
+int e1000_logtype_init;
+int e1000_logtype_driver;
+
+/* avoids double registering of logs if EM and IGB drivers are in use */
+static int e1000_log_initialized;
+
+void
+e1000_igb_init_log(void)
+{
+ if (!e1000_log_initialized) {
+ e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
+ if (e1000_logtype_init >= 0)
+ rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
+ e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
+ if (e1000_logtype_driver >= 0)
+ rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_log_initialized = 1;
+ }
+}
diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h
index 50348e9e..69d3d311 100644
--- a/drivers/net/e1000/e1000_logs.h
+++ b/drivers/net/e1000/e1000_logs.h
@@ -5,6 +5,8 @@
#ifndef _E1000_LOGS_H_
#define _E1000_LOGS_H_
+#include <rte_log.h>
+
extern int e1000_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, e1000_logtype_init, \
@@ -41,4 +43,8 @@ extern int e1000_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+/* log init function shared by e1000 and igb drivers */
+void e1000_igb_init_log(void);
+
#endif /* _E1000_LOGS_H_ */
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index 242375ff..053e855b 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -11,7 +11,6 @@
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_byteorder.h>
-#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -20,7 +19,6 @@
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -94,6 +92,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw);
static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr);
static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
@@ -105,9 +105,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
static enum e1000_fc_mode em_fc_setting = e1000_fc_full;
-int e1000_logtype_init;
-int e1000_logtype_driver;
-
/*
* The set of PCI devices this driver supports
*/
@@ -190,6 +187,7 @@ static const struct eth_dev_ops eth_em_ops = {
.dev_led_off = eth_em_led_off,
.flow_ctrl_get = eth_em_flow_ctrl_get,
.flow_ctrl_set = eth_em_flow_ctrl_set,
+ .mac_addr_set = eth_em_default_mac_addr_set,
.mac_addr_add = eth_em_rar_set,
.mac_addr_remove = eth_em_rar_clear,
.set_mc_addr_list = eth_em_set_mc_addr_list,
@@ -197,57 +195,6 @@ static const struct eth_dev_ops eth_em_ops = {
.txq_info_get = em_txq_info_get,
};
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
/**
* eth_em_dev_is_ich8 - Check for ICH8 device
@@ -506,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
PMD_INIT_FUNC_TRACE();
return 0;
@@ -802,7 +750,7 @@ eth_em_stop(struct rte_eth_dev *dev)
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -1069,9 +1017,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu
return 0;
}
-static uint32_t
-em_get_max_pktlen(const struct e1000_hw *hw)
+uint32_t
+em_get_max_pktlen(struct rte_eth_dev *dev)
{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
@@ -1100,20 +1050,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
- dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
+ dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1135,6 +1074,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
+ dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
@@ -1152,6 +1098,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G;
+
+ /* Preferred queue parameters */
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = 256;
+ dev_info->default_rxportconf.ring_size = 256;
}
/* return 0 means link status changed, -1 means not changed */
@@ -1160,7 +1112,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
int link_check, count;
link_check = 0;
@@ -1195,8 +1147,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_read_link_status(dev, &link);
- old = link;
/* Now we check if a transition has happened */
if (link_check && (link.link_status == ETH_LINK_DOWN)) {
@@ -1210,19 +1160,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
} else if (!link_check && (link.link_status == ETH_LINK_UP)) {
- link.link_speed = 0;
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_status = ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_FIXED;
}
- rte_em_dev_atomic_write_link_status(dev, &link);
-
- /* not changed */
- if (old.link_status == link.link_status)
- return -1;
- /* changed */
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -1460,15 +1404,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
static int
eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
em_vlan_hw_strip_enable(dev);
else
em_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
em_vlan_hw_filter_enable(dev);
else
em_vlan_hw_filter_disable(dev);
@@ -1631,8 +1578,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
- memset(&link, 0, sizeof(link));
- rte_em_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s",
dev->data->port_id, link.link_speed,
@@ -1810,6 +1757,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
}
static int
+eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
+{
+ eth_em_rar_clear(dev, 0);
+
+ return eth_em_rar_set(dev, (void *)addr, 0, 0);
+}
+
+static int
eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct rte_eth_dev_info dev_info;
@@ -1835,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -1864,14 +1822,8 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(e1000_init_log);
-static void
-e1000_init_log(void)
+/* see e1000_logs.c */
+RTE_INIT(igb_init_log)
{
- e1000_logtype_init = rte_log_register("pmd.net.e1000.init");
- if (e1000_logtype_init >= 0)
- rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE);
- e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver");
- if (e1000_logtype_driver >= 0)
- rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE);
+ e1000_igb_init_log();
}
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 02fae100..7d2ac4eb 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -85,6 +85,7 @@ struct em_rx_queue {
struct em_rx_entry *sw_ring; /**< address of RX software ring. */
struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+ uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
uint16_t nb_rx_desc; /**< number of RX descriptors. */
uint16_t rx_tail; /**< current value of RDT register. */
uint16_t nb_rx_hold; /**< number of held free RX desc. */
@@ -163,6 +164,7 @@ struct em_tx_queue {
uint8_t wthresh; /**< Write-back threshold register. */
struct em_ctx_info ctx_cache;
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
@@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq)
memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
}
+uint64_t
+em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+
+ RTE_SET_USED(dev);
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ return tx_offload_capa;
+}
+
+uint64_t
+em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t tx_queue_offload_capa;
+
+ /*
+ * As only one Tx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
+
+ return tx_queue_offload_capa;
+}
+
int
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
struct e1000_hw *hw;
uint32_t tsize;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev,
em_reset_tx_queue(txq);
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
return 0;
}
@@ -1313,6 +1349,44 @@ em_reset_rx_queue(struct em_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
+uint64_t
+em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+ uint32_t max_rx_pktlen;
+
+ max_rx_pktlen = em_get_max_pktlen(dev);
+
+ rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER;
+ if (max_rx_pktlen > ETHER_MAX_LEN)
+ rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
int
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1325,9 +1399,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
struct em_rx_queue *rxq;
struct e1000_hw *hw;
uint32_t rsize;
+ uint64_t offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -1382,8 +1459,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
@@ -1395,6 +1474,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
dev->data->rx_queues[queue_idx] = rxq;
em_reset_rx_queue(rxq);
+ rxq->offloads = offloads;
return 0;
}
@@ -1646,6 +1726,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
{
struct e1000_hw *hw;
struct em_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode;
uint32_t rctl;
uint32_t rfctl;
uint32_t rxcsum;
@@ -1654,6 +1735,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Make sure receives are disabled while setting
@@ -1713,9 +1795,10 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(i),
@@ -1745,7 +1828,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
* to avoid splitting packets that don't fit into
* one buffer.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
rctl_bsize < ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
@@ -1755,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
}
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
@@ -1768,7 +1851,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= E1000_RXCSUM_IPOFL;
else
rxcsum &= ~E1000_RXCSUM_IPOFL;
@@ -1780,24 +1863,24 @@ eth_em_rx_init(struct rte_eth_dev *dev)
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
- dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
}
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc)
- rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- else
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ else
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
@@ -1814,7 +1897,7 @@ eth_em_rx_init(struct rte_eth_dev *dev)
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
@@ -1894,6 +1977,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->scattered_rx = dev->data->scattered_rx;
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -1911,4 +1995,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.offloads = txq->offloads;
}
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 3c5138de..64dfe683 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
-#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -42,8 +41,6 @@
#define IGB_DEFAULT_TX_HTHRESH 1
#define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16)
-#define IGB_HKEY_MAX_INDEX 10
-
/* Bit shift and mask */
#define IGB_4_BIT_WIDTH (CHAR_BIT / 2)
#define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t)
@@ -146,7 +143,7 @@ static int eth_igb_rar_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
-static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
+static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
static void igbvf_intr_disable(struct e1000_hw *hw);
@@ -171,7 +168,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on);
static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on);
-static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
+static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
static int igbvf_get_reg_length(struct rte_eth_dev *dev);
static int igbvf_get_regs(struct rte_eth_dev *dev,
@@ -224,6 +221,10 @@ static int eth_igb_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
static int eth_igb_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev,
struct ether_addr *mc_addr_set,
uint32_t nb_mc_addr);
@@ -403,6 +404,8 @@ static const struct eth_dev_ops eth_igb_ops = {
.get_eeprom_length = eth_igb_get_eeprom_length,
.get_eeprom = eth_igb_get_eeprom,
.set_eeprom = eth_igb_set_eeprom,
+ .get_module_info = eth_igb_get_module_info,
+ .get_module_eeprom = eth_igb_get_module_eeprom,
.timesync_adjust_time = igb_timesync_adjust_time,
.timesync_read_time = igb_timesync_read_time,
.timesync_write_time = igb_timesync_write_time,
@@ -432,6 +435,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.rx_queue_setup = eth_igb_rx_queue_setup,
.rx_queue_release = eth_igb_rx_queue_release,
+ .rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
.set_mc_addr_list = eth_igb_set_mc_addr_list,
@@ -522,57 +528,6 @@ static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = {
#define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \
sizeof(rte_igbvf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
static inline void
igb_intr_enable(struct rte_eth_dev *dev)
@@ -1559,7 +1514,7 @@ eth_igb_stop(struct rte_eth_dev *dev)
/* clear the recorded link status */
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -1635,7 +1590,7 @@ eth_igb_close(struct rte_eth_dev *dev)
}
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -2196,22 +2151,15 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
switch (hw->mac.type) {
case e1000_82575:
@@ -2274,6 +2222,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2282,7 +2231,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -2325,14 +2274,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -2353,6 +2297,13 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
break;
}
+ dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = IGB_DEFAULT_RX_PTHRESH,
@@ -2361,6 +2312,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2369,7 +2321,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.hthresh = IGB_DEFAULT_TX_HTHRESH,
.wthresh = IGB_DEFAULT_TX_WTHRESH,
},
- .txq_flags = 0,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -2382,7 +2334,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
int link_check, count;
link_check = 0;
@@ -2423,8 +2375,6 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
}
memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
- old = link;
/* Now we check if a transition has happened */
if (link_check) {
@@ -2443,14 +2393,8 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
link.link_status = ETH_LINK_DOWN;
link.link_autoneg = ETH_LINK_FIXED;
}
- rte_igb_dev_atomic_write_link_status(dev, &link);
- /* not changed */
- if (old.link_status == link.link_status)
- return -1;
-
- /* changed */
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -2704,7 +2648,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
VLAN_TAG_SIZE);
@@ -2723,7 +2667,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
E1000_WRITE_REG(hw, E1000_RLPML,
dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * VLAN_TAG_SIZE);
@@ -2732,22 +2676,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
static int
eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
+ struct rte_eth_rxmode *rxmode;
+
+ rxmode = &dev->data->dev_conf.rxmode;
if(mask & ETH_VLAN_STRIP_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
igb_vlan_hw_strip_enable(dev);
else
igb_vlan_hw_strip_disable(dev);
}
if(mask & ETH_VLAN_FILTER_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
igb_vlan_hw_filter_enable(dev);
else
igb_vlan_hw_filter_disable(dev);
}
if(mask & ETH_VLAN_EXTEND_MASK){
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
igb_vlan_hw_extend_enable(dev);
else
igb_vlan_hw_extend_disable(dev);
@@ -2887,8 +2834,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev,
if (ret < 0)
return 0;
- memset(&link, 0, sizeof(link));
- rte_igb_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
if (link.link_status) {
PMD_INIT_LOG(INFO,
" Port %d: Link Up - speed %u Mbps - %s",
@@ -3146,13 +3092,14 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
e1000_rar_set(hw, addr, index);
}
-static void
+static int
eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
eth_igb_rar_clear(dev, 0);
-
eth_igb_rar_set(dev, (void *)addr, 0, 0);
+
+ return 0;
}
/*
* Virtual Function operations
@@ -3250,14 +3197,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -3504,7 +3451,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return 0;
}
-static void
+static int
igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct e1000_hw *hw =
@@ -3512,6 +3459,7 @@ igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
/* index is not used by rar_set() */
hw->mac.ops.rar_set(hw, (void *)addr, 0);
+ return 0;
}
@@ -4499,10 +4447,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
@@ -5384,6 +5334,86 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev,
}
static int
+eth_igb_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ if (hw->phy.media_type == e1000_media_type_copper ||
+ hw->phy.media_type == e1000_media_type_unknown)
+ return -EOPNOTSUPP;
+
+ /* Check whether we support SFF-8472 or not */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
+ if (status)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) {
+ /* We have an SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have an SFP which supports a revision of SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+eth_igb_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ uint32_t status = 0;
+ uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1];
+ u16 first_word, last_word;
+ int i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ first_word = info->offset >> 1;
+ last_word = (info->offset + info->length - 1) >> 1;
+
+ /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
+ for (i = 0; i < last_word - first_word + 1; i++) {
+ status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
+ if (status) {
+ /* Error occurred while reading module */
+ return -EIO;
+ }
+
+ dataword[i] = rte_be_to_cpu_16(dataword[i]);
+ }
+
+ memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length);
+
+ return 0;
+}
+
+static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
@@ -5631,7 +5661,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev)
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter_info->rss_info, TRUE);
}
@@ -5654,3 +5684,9 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci
RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci");
+
+/* see e1000_logs.c */
+RTE_INIT(e1000_init_log)
+{
+ e1000_igb_init_log();
+}
diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c
index a1427596..07385291 100644
--- a/drivers/net/e1000/igb_flow.c
+++ b/drivers/net/e1000/igb_flow.c
@@ -175,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -198,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -228,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
/* get the TCP/UDP/SCTP info */
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
if (item->spec && item->mask) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -263,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
}
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
if (item->spec && item->mask) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -289,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
}
} else {
if (item->spec && item->mask) {
- sctp_mask = (const struct rte_flow_item_sctp *)
- item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -380,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -533,8 +541,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
@@ -625,6 +633,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -848,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
@@ -924,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
@@ -1065,8 +1090,8 @@ item_loop:
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_spec = item->spec;
+ raw_mask = item->mask;
if (!raw_mask->length ||
!raw_mask->relative) {
@@ -1212,6 +1237,15 @@ item_loop:
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_flex_filter));
rte_flow_error_set(error, EINVAL,
@@ -1293,7 +1327,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -1301,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -1311,14 +1345,26 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
-
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (igb_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
index++;
@@ -1350,6 +1396,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
@@ -1519,9 +1574,8 @@ igb_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct igb_rte_flow_rss_conf));
+ igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
@@ -1758,7 +1812,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev)
struct e1000_filter_info *filter =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter->rss_info.num)
+ if (filter->rss_info.conf.queue_num)
igb_config_rss_filter(dev, &filter->rss_info, FALSE);
}
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 2f371672..b955068a 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -107,6 +107,7 @@ struct igb_rx_queue {
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
uint32_t flags; /**< RX flags. */
+ uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
};
/**
@@ -180,6 +181,7 @@ struct igb_tx_queue {
/**< Start context position for transmit queue. */
struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
/**< Hardware context history.*/
+ uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
};
#if 1
@@ -1447,6 +1449,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
igb_reset_tx_queue_stat(txq);
}
+uint64_t
+igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_queue_offload_capa;
+
+ rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
+
+ return rx_queue_offload_capa;
+}
+
int
eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1458,6 +1487,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
struct igb_tx_queue *txq;
struct e1000_hw *hw;
uint32_t size;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1542,6 +1574,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
dev->tx_pkt_burst = eth_igb_xmit_pkts;
dev->tx_pkt_prepare = &eth_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
+ txq->offloads = offloads;
return 0;
}
@@ -1593,6 +1626,46 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq)
rxq->pkt_last_seg = NULL;
}
+uint64_t
+igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ uint64_t rx_offload_capa;
+
+ RTE_SET_USED(dev);
+ rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ return rx_offload_capa;
+}
+
+uint64_t
+igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_queue_offload_capa;
+
+ switch (hw->mac.type) {
+ case e1000_vfadapt_i350:
+ /*
+ * As only one Rx queue can be used, let per queue offloading
+ * capability be same to per port queue offloading capability
+ * for better convenience.
+ */
+ rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
+ break;
+ default:
+ rx_queue_offload_capa = 0;
+ }
+ return rx_queue_offload_capa;
+}
+
int
eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1605,6 +1678,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
struct igb_rx_queue *rxq;
struct e1000_hw *hw;
unsigned int size;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1630,6 +1706,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (rxq == NULL)
return -ENOMEM;
+ rxq->offloads = offloads;
rxq->mb_pool = mp;
rxq->nb_rx_desc = nb_desc;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
@@ -1644,8 +1721,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
- ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
@@ -2227,6 +2306,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
int
eth_igb_rx_init(struct rte_eth_dev *dev)
{
+ struct rte_eth_rxmode *rxmode;
struct e1000_hw *hw;
struct igb_rx_queue *rxq;
uint32_t rctl;
@@ -2247,10 +2327,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rctl = E1000_READ_REG(hw, E1000_RCTL);
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+ rxmode = &dev->data->dev_conf.rxmode;
+
/*
* Configure support of jumbo frames, if any.
*/
- if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
rctl |= E1000_RCTL_LPE;
/*
@@ -2292,9 +2374,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure
*/
- rxq->crc_len =
- (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
bus_addr = rxq->rx_ring_phys_addr;
E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
@@ -2362,7 +2445,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2406,19 +2489,27 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxcsum |= E1000_RXCSUM_PCSD;
/* Enable both L3/L4 rx checksum offload */
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
- rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
- E1000_RXCSUM_CRCOFL);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+ rxcsum |= E1000_RXCSUM_IPOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_IPOFL;
+ if (rxmode->offloads &
+ (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
+ rxcsum |= E1000_RXCSUM_TUOFL;
else
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
- E1000_RXCSUM_CRCOFL);
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxcsum |= E1000_RXCSUM_CRCOFL;
+ else
+ rxcsum &= ~E1000_RXCSUM_CRCOFL;
+
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
- if (dev->data->dev_conf.rxmode.hw_strip_crc) {
- rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
- /* set STRCRC bit in all queues */
+ /* clear STRCRC bit in all queues */
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211 ||
@@ -2427,14 +2518,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(rxq->reg_idx));
- dvmolr |= E1000_DVMOLR_STRCRC;
+ dvmolr &= ~E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
}
}
} else {
- rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+ rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
- /* clear STRCRC bit in all queues */
+ /* set STRCRC bit in all queues */
if (hw->mac.type == e1000_i350 ||
hw->mac.type == e1000_i210 ||
hw->mac.type == e1000_i211 ||
@@ -2443,7 +2534,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
uint32_t dvmolr = E1000_READ_REG(hw,
E1000_DVMOLR(rxq->reg_idx));
- dvmolr &= ~E1000_DVMOLR_STRCRC;
+ dvmolr |= E1000_DVMOLR_STRCRC;
E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
}
}
@@ -2654,7 +2745,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
}
- if (dev->data->dev_conf.rxmode.enable_scatter) {
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
@@ -2741,6 +2832,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -2756,6 +2848,41 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_thresh.pthresh = txq->pthresh;
qinfo->conf.tx_thresh.hthresh = txq->hthresh;
qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+ qinfo->conf.offloads = txq->offloads;
+}
+
+int
+igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+igb_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
}
int
@@ -2764,7 +2891,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
{
uint32_t shift;
uint16_t i, j;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2772,8 +2904,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct igb_rte_flow_rss_conf)) == 0) {
+ if (igb_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
igb_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct igb_rte_flow_rss_conf));
@@ -2782,7 +2914,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table. */
@@ -2794,9 +2926,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
} reta;
uint8_t q_idx;
- q_idx = conf->queue[j];
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
+ q_idx = conf->conf.queue[j];
reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
if ((i & 3) == 3)
E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
@@ -2813,8 +2945,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
igb_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct igb_rte_flow_rss_conf));
+ if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build
index 3a1bf5af..cf456995 100644
--- a/drivers/net/e1000/meson.build
+++ b/drivers/net/e1000/meson.build
@@ -5,6 +5,7 @@ subdir('base')
objs = [base_objs]
sources = files(
+ 'e1000_logs.c',
'em_ethdev.c',
'em_rxtx.c',
'igb_ethdev.c',
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index f9bfe053..ff9ce315 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -43,6 +43,9 @@ INCLUDES :=-I$(SRCDIR) -I$(SRCDIR)/base/ena_defs -I$(SRCDIR)/base
EXPORT_MAP := rte_pmd_ena_version.map
LIBABIVER := 1
+# rte_fbarray is not yet part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
@@ -55,5 +58,6 @@ CFLAGS += $(INCLUDES)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_timer
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 38a05877..4abf1a28 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -37,11 +37,19 @@
/*****************************************************************************/
/* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32
+#ifdef ENA_EXTENDED_STATS
+
+#define ENA_HISTOGRAM_ACTIVE_MASK_OFFSET 0xF08
+#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF)
+#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16)
+
+#endif /* ENA_EXTENDED_STATS */
+
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
| (ENA_COMMON_SPEC_VERSION_MINOR))
@@ -62,7 +70,9 @@
#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
-static int ena_alloc_cnt;
+#define ENA_REGS_ADMIN_INTR_MASK 1
+
+#define ENA_POLL_MS 5
/*****************************************************************************/
/*****************************************************************************/
@@ -86,6 +96,11 @@ struct ena_comp_ctx {
bool occupied;
};
+struct ena_com_stats_ctx {
+ struct ena_admin_aq_get_stats_cmd get_cmd;
+ struct ena_admin_acq_get_stats_resp get_resp;
+};
+
static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
@@ -95,50 +110,49 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return ENA_COM_INVAL;
}
- ena_addr->mem_addr_low = (u32)addr;
- ena_addr->mem_addr_high =
- ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 32)) >> 32);
+ ena_addr->mem_addr_low = lower_32_bits(addr);
+ ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0;
}
static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_SQ_SIZE(queue->q_depth),
- queue->sq.entries,
- queue->sq.dma_addr,
- queue->sq.mem_handle);
+ struct ena_com_admin_sq *sq = &queue->sq;
+ u16 size = ADMIN_SQ_SIZE(queue->q_depth);
- if (!queue->sq.entries) {
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
+ sq->mem_handle);
+
+ if (!sq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->sq.head = 0;
- queue->sq.tail = 0;
- queue->sq.phase = 1;
+ sq->head = 0;
+ sq->tail = 0;
+ sq->phase = 1;
- queue->sq.db_addr = NULL;
+ sq->db_addr = NULL;
return 0;
}
static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
{
- ENA_MEM_ALLOC_COHERENT(queue->q_dmadev,
- ADMIN_CQ_SIZE(queue->q_depth),
- queue->cq.entries,
- queue->cq.dma_addr,
- queue->cq.mem_handle);
+ struct ena_com_admin_cq *cq = &queue->cq;
+ u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+
+ ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
+ cq->mem_handle);
- if (!queue->cq.entries) {
+ if (!cq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- queue->cq.head = 0;
- queue->cq.phase = 1;
+ cq->head = 0;
+ cq->phase = 1;
return 0;
}
@@ -146,44 +160,44 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
struct ena_aenq_handlers *aenq_handlers)
{
+ struct ena_com_aenq *aenq = &dev->aenq;
u32 addr_low, addr_high, aenq_caps;
+ u16 size;
dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
- ENA_MEM_ALLOC_COHERENT(dev->dmadev,
- ADMIN_AENQ_SIZE(dev->aenq.q_depth),
- dev->aenq.entries,
- dev->aenq.dma_addr,
- dev->aenq.mem_handle);
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+ ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
+ aenq->entries,
+ aenq->dma_addr,
+ aenq->mem_handle);
- if (!dev->aenq.entries) {
+ if (!aenq->entries) {
ena_trc_err("memory allocation failed");
return ENA_COM_NO_MEM;
}
- dev->aenq.head = dev->aenq.q_depth;
- dev->aenq.phase = 1;
+ aenq->head = aenq->q_depth;
+ aenq->phase = 1;
- addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(dev->aenq.dma_addr);
- addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(dev->aenq.dma_addr);
+ addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
+ addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
- ENA_REG_WRITE32(aenq_caps, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_CAPS_OFF);
-
- if (unlikely(!aenq_handlers))
+ if (unlikely(!aenq_handlers)) {
ena_trc_err("aenq handlers pointer is NULL\n");
+ return ENA_COM_INVAL;
+ }
- dev->aenq.aenq_handlers = aenq_handlers;
+ aenq->aenq_handlers = aenq_handlers;
return 0;
}
@@ -217,12 +231,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
return &queue->comp_ctx[command_id];
}
-static struct ena_comp_ctx *
-__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
struct ena_comp_ctx *comp_ctx;
u16 tail_masked, cmd_id;
@@ -234,12 +247,9 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
tail_masked = admin_queue->sq.tail & queue_size_mask;
/* In case of queue FULL */
- cnt = admin_queue->sq.tail - admin_queue->sq.head;
+ cnt = ATOMIC32_READ(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- ena_trc_dbg("admin queue is FULL (tail %d head %d depth: %d)\n",
- admin_queue->sq.tail,
- admin_queue->sq.head,
- admin_queue->q_depth);
+ ena_trc_dbg("admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(ENA_COM_NO_SPACE);
}
@@ -253,6 +263,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
+ if (unlikely(!comp_ctx))
+ return ERR_PTR(ENA_COM_INVAL);
comp_ctx->status = ENA_CMD_SUBMITTED;
comp_ctx->comp_size = (u32)comp_size_in_bytes;
@@ -272,7 +284,8 @@ __ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
admin_queue->sq.phase = !admin_queue->sq.phase;
- ENA_REG_WRITE32(admin_queue->sq.tail, admin_queue->sq.db_addr);
+ ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
+ admin_queue->sq.db_addr);
return comp_ctx;
}
@@ -298,12 +311,11 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
return 0;
}
-static struct ena_comp_ctx *
-ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_aq_entry *cmd,
- size_t cmd_size_in_bytes,
- struct ena_admin_acq_entry *comp,
- size_t comp_size_in_bytes)
+static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_aq_entry *cmd,
+ size_t cmd_size_in_bytes,
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
{
unsigned long flags = 0;
struct ena_comp_ctx *comp_ctx;
@@ -317,7 +329,7 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
cmd_size_in_bytes,
comp,
comp_size_in_bytes);
- if (unlikely(IS_ERR(comp_ctx)))
+ if (IS_ERR(comp_ctx))
admin_queue->running_state = false;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -331,9 +343,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size;
int dev_node = 0;
- ENA_TOUCH(ctx);
-
- memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
@@ -347,23 +357,26 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
+ io_sq->desc_addr.mem_handle,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
+ }
} else {
ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
size,
io_sq->desc_addr.virt_addr,
ctx->numa_node,
dev_node);
- if (!io_sq->desc_addr.virt_addr)
+ if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
ENA_MEM_ALLOC(ena_dev->dmadev, size);
+ }
}
if (!io_sq->desc_addr.virt_addr) {
@@ -385,8 +398,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size;
int prev_node = 0;
- ENA_TOUCH(ctx);
- memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes =
@@ -397,17 +409,19 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
- size,
- io_cq->cdesc_addr.virt_addr,
- io_cq->cdesc_addr.phys_addr,
- ctx->numa_node,
- prev_node);
- if (!io_cq->cdesc_addr.virt_addr)
+ size,
+ io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr,
+ io_cq->cdesc_addr.mem_handle,
+ ctx->numa_node,
+ prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
size,
io_cq->cdesc_addr.virt_addr,
io_cq->cdesc_addr.phys_addr,
io_cq->cdesc_addr.mem_handle);
+ }
if (!io_cq->cdesc_addr.virt_addr) {
ena_trc_err("memory allocation failed");
@@ -420,9 +434,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
return 0;
}
-static void
-ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
- struct ena_admin_acq_entry *cqe)
+static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
+ struct ena_admin_acq_entry *cqe)
{
struct ena_comp_ctx *comp_ctx;
u16 cmd_id;
@@ -447,8 +460,7 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
}
-static void
-ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
+static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
{
struct ena_admin_acq_entry *cqe = NULL;
u16 comp_num = 0;
@@ -499,7 +511,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER:
@@ -510,20 +522,24 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return 0;
}
-static int
-ena_com_wait_and_process_admin_cq_polling(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- u64 start_time;
+ unsigned long timeout;
int ret;
- start_time = ENA_GET_SYSTEM_USECS();
+ timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
+
+ while (1) {
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ break;
- while (comp_ctx->status == ENA_CMD_SUBMITTED) {
- if ((ENA_GET_SYSTEM_USECS() - start_time) >
- ADMIN_CMD_TIMEOUT_US) {
+ if (ENA_TIME_EXPIRE(timeout)) {
ena_trc_err("Wait for completion (polling) timeout\n");
/* ENA didn't have any completion */
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
@@ -535,9 +551,7 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_MSLEEP(ENA_POLL_MS);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -549,8 +563,8 @@ ena_com_wait_and_process_admin_cq_polling(
goto err;
}
- ENA_ASSERT(comp_ctx->status == ENA_CMD_COMPLETED,
- "Invalid comp status %d\n", comp_ctx->status);
+ ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
+ "Invalid comp status %d\n", comp_ctx->status);
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
err:
@@ -558,16 +572,14 @@ err:
return ret;
}
-static int
-ena_com_wait_and_process_admin_cq_interrupts(
- struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- int ret = 0;
+ int ret;
ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
- ADMIN_CMD_TIMEOUT_US);
+ admin_queue->completion_timeout);
/* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors
@@ -607,16 +619,18 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp;
- u32 mmio_read_reg, ret;
+ u32 mmio_read_reg, ret, i;
unsigned long flags = 0;
- int i;
+ u32 timeout = mmio_read->reg_read_to;
ENA_MIGHT_SLEEP();
+ if (timeout == 0)
+ timeout = ENA_REG_READ_TIMEOUT;
+
/* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported)
- return ENA_REG_READ32((unsigned char *)ena_dev->reg_bar +
- offset);
+ return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
mmio_read->seq_num++;
@@ -632,17 +646,16 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
*/
wmb();
- ENA_REG_WRITE32(mmio_read_reg, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_REG_READ_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
+ for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
break;
ENA_UDELAY(1);
}
- if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
+ if (unlikely(i == timeout)) {
ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num,
offset,
@@ -653,7 +666,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (read_resp->reg_off != offset) {
- ena_trc_err("reading failed for wrong offset value");
+ ena_trc_err("Read failure: wrong offset provided");
ret = ENA_MMIO_READ_TIMEOUT;
} else {
ret = read_resp->reg_val;
@@ -671,9 +684,8 @@ err:
* It is expected that the IRQ called ena_com_handle_admin_completion
* to mark the completions.
*/
-static int
-ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
- struct ena_com_admin_queue *admin_queue)
+static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
{
if (admin_queue->polling)
return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
@@ -692,7 +704,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX;
@@ -706,12 +718,11 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
destroy_cmd.sq.sq_idx = io_sq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("failed to destroy io sq error: %d\n", ret);
@@ -747,18 +758,20 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
io_sq->desc_addr.phys_addr,
io_sq->desc_addr.mem_handle);
else
- ENA_MEM_FREE(ena_dev->dmadev,
- io_sq->desc_addr.virt_addr);
+ ENA_MEM_FREE(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
io_sq->desc_addr.virt_addr = NULL;
}
}
-static int wait_for_reset_state(struct ena_com_dev *ena_dev,
- u32 timeout, u16 exp_state)
+static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ u16 exp_state)
{
u32 val, i;
+ /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
+ timeout = (timeout * 100) / ENA_POLL_MS;
+
for (i = 0; i < timeout; i++) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
@@ -771,16 +784,14 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev,
exp_state)
return 0;
- /* The resolution of the timeout is 100ms */
- ENA_MSLEEP(100);
+ ENA_MSLEEP(ENA_POLL_MS);
}
return ENA_COM_TIMER_EXPIRED;
}
-static bool
-ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
- enum ena_admin_aq_feature_id feature_id)
+static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_feature_id feature_id)
{
u32 feature_mask = 1 << feature_id;
@@ -802,14 +813,9 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_cmd get_cmd;
int ret;
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
-
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
- ena_trc_info("Feature %d isn't supported\n", feature_id);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", feature_id);
+ return ENA_COM_UNSUPPORTED;
}
memset(&get_cmd, 0x0, sizeof(get_cmd));
@@ -945,10 +951,10 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
sizeof(struct ena_admin_rss_ind_table_entry);
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- tbl_size,
- rss->rss_ind_tbl,
- rss->rss_ind_tbl_dma_addr,
- rss->rss_ind_tbl_mem_handle);
+ tbl_size,
+ rss->rss_ind_tbl,
+ rss->rss_ind_tbl_dma_addr,
+ rss->rss_ind_tbl_mem_handle);
if (unlikely(!rss->rss_ind_tbl))
goto mem_err1;
@@ -1005,7 +1011,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
@@ -1041,12 +1047,11 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
}
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
return ret;
@@ -1133,9 +1138,8 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
return 0;
}
-static void
-ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
- u16 intr_delay_resolution)
+static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
+ u16 intr_delay_resolution)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
unsigned int i;
@@ -1165,13 +1169,18 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
size_t comp_size)
{
struct ena_comp_ctx *comp_ctx;
- int ret = 0;
+ int ret;
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size);
- if (unlikely(IS_ERR(comp_ctx))) {
- ena_trc_err("Failed to submit command [%ld]\n",
- PTR_ERR(comp_ctx));
+ if (IS_ERR(comp_ctx)) {
+ if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
+ ena_trc_dbg("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+ else
+ ena_trc_err("Failed to submit command [%ld]\n",
+ PTR_ERR(comp_ctx));
+
return PTR_ERR(comp_ctx);
}
@@ -1195,7 +1204,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret;
- memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
+ memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
@@ -1215,12 +1224,11 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
return ret;
}
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&create_cmd,
- sizeof(create_cmd),
- (struct ena_admin_acq_entry *)&cmd_completion,
- sizeof(cmd_completion));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&create_cmd,
+ sizeof(create_cmd),
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
if (unlikely(ret)) {
ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
return ret;
@@ -1290,7 +1298,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- ENA_MSLEEP(20);
+ ENA_MSLEEP(ENA_POLL_MS);
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
}
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1304,17 +1312,16 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret;
- memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
+ memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)&destroy_cmd,
- sizeof(destroy_cmd),
- (struct ena_admin_acq_entry *)&destroy_resp,
- sizeof(destroy_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)&destroy_cmd,
+ sizeof(destroy_cmd),
+ (struct ena_admin_acq_entry *)&destroy_resp,
+ sizeof(destroy_resp));
if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
@@ -1341,13 +1348,12 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
{
u16 depth = ena_dev->aenq.q_depth;
- ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n");
+ ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
/* Init head_db to mark that all entries in the queue
* are initially available
*/
- ENA_REG_WRITE32(depth, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
@@ -1356,12 +1362,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
struct ena_admin_get_feat_resp get_resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
if (ret) {
@@ -1373,7 +1374,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
ena_trc_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups,
groups_flag);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -1476,41 +1477,42 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
+ u16 size;
- if (!admin_queue)
- return;
-
+ ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
if (admin_queue->comp_ctx)
ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
admin_queue->comp_ctx = NULL;
-
- if (admin_queue->sq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_SQ_SIZE(admin_queue->q_depth),
- admin_queue->sq.entries,
- admin_queue->sq.dma_addr,
- admin_queue->sq.mem_handle);
- admin_queue->sq.entries = NULL;
-
- if (admin_queue->cq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_CQ_SIZE(admin_queue->q_depth),
- admin_queue->cq.entries,
- admin_queue->cq.dma_addr,
- admin_queue->cq.mem_handle);
- admin_queue->cq.entries = NULL;
-
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
+ sq->dma_addr, sq->mem_handle);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
+ cq->dma_addr, cq->mem_handle);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
if (ena_dev->aenq.entries)
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- ADMIN_AENQ_SIZE(ena_dev->aenq.q_depth),
- ena_dev->aenq.entries,
- ena_dev->aenq.dma_addr,
- ena_dev->aenq.mem_handle);
- ena_dev->aenq.entries = NULL;
+ ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
+ aenq->dma_addr, aenq->mem_handle);
+ aenq->entries = NULL;
}
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
{
+ u32 mask_value = 0;
+
+ if (polling)
+ mask_value = ENA_REGS_ADMIN_INTR_MASK;
+
+ ENA_REG_WRITE32(ena_dev->bus, mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
ena_dev->admin_queue.polling = polling;
}
@@ -1536,8 +1538,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
return 0;
}
-void
-ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1548,10 +1549,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(0x0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
sizeof(*mmio_read->read_resp),
@@ -1570,10 +1569,8 @@ void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_MMIO_RESP_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
}
int ena_com_admin_init(struct ena_com_dev *ena_dev,
@@ -1619,24 +1616,20 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
- admin_queue->sq.db_addr = (u32 __iomem *)
- ((unsigned char *)ena_dev->reg_bar + ENA_REGS_AQ_DB_OFF);
+ admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ ENA_REGS_AQ_DB_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
- ENA_REG_WRITE32(addr_low, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_LO_OFF);
- ENA_REG_WRITE32(addr_high, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_BASE_HI_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
aq_caps = 0;
aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
@@ -1650,10 +1643,8 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
- ENA_REG_WRITE32(aq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_AQ_CAPS_OFF);
- ENA_REG_WRITE32(acq_caps, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_ACQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
if (ret)
goto error;
@@ -1672,7 +1663,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
{
struct ena_com_io_sq *io_sq;
struct ena_com_io_cq *io_cq;
- int ret = 0;
+ int ret;
if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
@@ -1683,8 +1674,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid];
- memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
- memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
+ memset(io_sq, 0x0, sizeof(*io_sq));
+ memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */
io_cq->q_depth = ctx->queue_size;
@@ -1794,6 +1785,19 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload));
+ /* Driver hints isn't mandatory admin command. So in case the
+ * command isn't supported set driver hints to 0
+ */
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
+
+ if (!rc)
+ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+ sizeof(get_resp.u.hw_hints));
+ else if (rc == ENA_COM_UNSUPPORTED)
+ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
return 0;
}
@@ -1826,6 +1830,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
ena_aenq_handler handler_cb;
+ unsigned long long timestamp;
u16 masked_head, processed = 0;
u8 phase;
@@ -1837,11 +1842,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
+ timestamp = (unsigned long long)aenq_common->timestamp_low |
+ ((unsigned long long)aenq_common->timestamp_high << 32);
+ ENA_TOUCH(timestamp); /* In case debug is disabled */
ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group,
aenq_common->syndrom,
- (unsigned long long)aenq_common->timestamp_low +
- ((u64)aenq_common->timestamp_high << 32));
+ timestamp);
/* Handle specific event*/
handler_cb = ena_com_get_specific_aenq_cb(dev,
@@ -1869,11 +1876,11 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- ENA_REG_WRITE32((u32)aenq->head, (unsigned char *)dev->reg_bar
- + ENA_REGS_AENQ_HEAD_DB_OFF);
+ ENA_REG_WRITE32(dev->bus, (u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
-int ena_com_dev_reset(struct ena_com_dev *ena_dev)
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap, reset_val;
int rc;
@@ -1901,8 +1908,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
- ENA_REG_WRITE32(reset_val, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
+ ENA_REGS_DEV_CTL_RESET_REASON_MASK;
+ ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */
ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
@@ -1915,29 +1923,32 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
}
/* reset done */
- ENA_REG_WRITE32(0, (unsigned char *)ena_dev->reg_bar
- + ENA_REGS_DEV_CTL_OFF);
+ ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
rc = wait_for_reset_state(ena_dev, timeout, 0);
if (rc != 0) {
ena_trc_err("Reset indication didn't turn off\n");
return rc;
}
+ timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
+ ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
+ if (timeout)
+ /* the resolution of timeout reg is 100ms */
+ ena_dev->admin_queue.completion_timeout = timeout * 100000;
+ else
+ ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
+
return 0;
}
static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
- struct ena_admin_aq_get_stats_cmd *get_cmd,
- struct ena_admin_acq_get_stats_resp *get_resp,
+ struct ena_com_stats_ctx *ctx,
enum ena_admin_get_stats_type type)
{
+ struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
+ struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
struct ena_com_admin_queue *admin_queue;
- int ret = 0;
-
- if (!ena_dev) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
admin_queue = &ena_dev->admin_queue;
@@ -1945,12 +1956,11 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
get_cmd->aq_common_descriptor.flags = 0;
get_cmd->type = type;
- ret = ena_com_execute_admin_command(
- admin_queue,
- (struct ena_admin_aq_entry *)get_cmd,
- sizeof(*get_cmd),
- (struct ena_admin_acq_entry *)get_resp,
- sizeof(*get_resp));
+ ret = ena_com_execute_admin_command(admin_queue,
+ (struct ena_admin_aq_entry *)get_cmd,
+ sizeof(*get_cmd),
+ (struct ena_admin_acq_entry *)get_resp,
+ sizeof(*get_resp));
if (unlikely(ret))
ena_trc_err("Failed to get stats. error: %d\n", ret);
@@ -1961,78 +1971,28 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats)
{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
+ struct ena_com_stats_ctx ctx;
+ int ret;
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_BASIC);
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &get_resp.basic_stats,
- sizeof(get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.basic_stats,
+ sizeof(ctx.get_resp.basic_stats));
return ret;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len)
-{
- int ret = 0;
- struct ena_admin_aq_get_stats_cmd get_cmd;
- struct ena_admin_acq_get_stats_resp get_resp;
- ena_mem_handle_t mem_handle = 0;
- void *virt_addr;
- dma_addr_t phys_addr;
-
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, len,
- virt_addr, phys_addr, mem_handle);
- if (!virt_addr) {
- ret = ENA_COM_NO_MEM;
- goto done;
- }
- memset(&get_cmd, 0x0, sizeof(get_cmd));
- ret = ena_com_mem_addr_set(ena_dev,
- &get_cmd.u.control_buffer.address,
- phys_addr);
- if (unlikely(ret)) {
- ena_trc_err("memory address set failed\n");
- return ret;
- }
- get_cmd.u.control_buffer.length = len;
-
- get_cmd.device_id = ena_dev->stats_func;
- get_cmd.queue_idx = ena_dev->stats_queue;
-
- ret = ena_get_dev_stats(ena_dev, &get_cmd, &get_resp,
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED);
- if (ret < 0)
- goto free_ext_stats_mem;
-
- ret = snprintf(buff, len, "%s", (char *)virt_addr);
-
-free_ext_stats_mem:
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev, len, virt_addr, phys_addr,
- mem_handle);
-done:
- return ret;
-}
-
int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
{
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
+ int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
- ena_trc_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2049,11 +2009,10 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
- return ENA_COM_INVAL;
- }
- return 0;
+
+ return ret;
}
int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
@@ -2066,7 +2025,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
if (unlikely(ret)) {
ena_trc_err("Failed to get offload capabilities %d\n", ret);
- return ENA_COM_INVAL;
+ return ret;
}
memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
@@ -2085,9 +2044,9 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_FUNCTION);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return ENA_COM_UNSUPPORTED;
}
/* Validate hash function is supported */
@@ -2099,7 +2058,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
ena_trc_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
memset(&cmd, 0x0, sizeof(cmd));
@@ -2158,7 +2117,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
switch (func) {
@@ -2207,7 +2166,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = (enum ena_admin_hash_functions)get_resp.u.flow_hash_func.selected_func;
+ rss->hash_func = get_resp.u.flow_hash_func.selected_func;
if (func)
*func = rss->hash_func;
@@ -2242,17 +2201,20 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
int ret;
if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_HASH_INPUT);
- return ENA_COM_PERMISSION;
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return ENA_COM_UNSUPPORTED;
}
+ memset(&cmd, 0x0, sizeof(cmd));
+
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
@@ -2268,20 +2230,17 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("memory address set failed\n");
return ret;
}
- cmd.control_buffer.length =
- sizeof(struct ena_admin_feature_rss_hash_control);
+ cmd.control_buffer.length = sizeof(*hash_ctrl);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
@@ -2293,7 +2252,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
int rc, i;
/* Get the supported hash input */
- rc = ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
if (unlikely(rc))
return rc;
@@ -2322,7 +2281,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
- hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+ hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
@@ -2332,7 +2291,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
- return ENA_COM_PERMISSION;
+ return ENA_COM_UNSUPPORTED;
}
}
@@ -2340,7 +2299,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return rc;
}
@@ -2377,7 +2336,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* In case of failure, restore the old hash ctrl */
if (unlikely(rc))
- ena_com_get_hash_ctrl(ena_dev, (enum ena_admin_flow_hash_proto)0, NULL);
+ ena_com_get_hash_ctrl(ena_dev, 0, NULL);
return 0;
}
@@ -2404,14 +2363,13 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
- int ret = 0;
+ int ret;
- if (!ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
- return ENA_COM_PERMISSION;
+ if (!ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ return ENA_COM_UNSUPPORTED;
}
ret = ena_com_ind_tbl_convert_to_device(ena_dev);
@@ -2446,12 +2404,10 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
(struct ena_admin_acq_entry *)&resp,
sizeof(resp));
- if (unlikely(ret)) {
+ if (unlikely(ret))
ena_trc_err("Failed to set indirect table. error: %d\n", ret);
- return ENA_COM_INVAL;
- }
- return 0;
+ return ret;
}
int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
@@ -2538,17 +2494,18 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
}
int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
- u32 debug_area_size) {
+ u32 debug_area_size)
+{
struct ena_host_attribute *host_attr = &ena_dev->host_attr;
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- debug_area_size,
- host_attr->debug_area_virt_addr,
- host_attr->debug_area_dma_addr,
- host_attr->debug_area_dma_handle);
- if (unlikely(!host_attr->debug_area_virt_addr)) {
- host_attr->debug_area_size = 0;
- return ENA_COM_NO_MEM;
+ ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
+ debug_area_size,
+ host_attr->debug_area_virt_addr,
+ host_attr->debug_area_dma_addr,
+ host_attr->debug_area_dma_handle);
+ if (unlikely(!host_attr->debug_area_virt_addr)) {
+ host_attr->debug_area_size = 0;
+ return ENA_COM_NO_MEM;
}
host_attr->debug_area_size = debug_area_size;
@@ -2590,6 +2547,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
+
int ret;
/* Host attribute config is called before ena_com_get_dev_attr_feat
@@ -2635,14 +2593,12 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
/* Interrupt moderation */
bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
{
- return ena_com_check_supported_feature_id(
- ena_dev,
- ENA_ADMIN_INTERRUPT_MODERATION);
+ return ena_com_check_supported_feature_id(ena_dev,
+ ENA_ADMIN_INTERRUPT_MODERATION);
}
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2655,9 +2611,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
return 0;
}
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs)
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
{
if (!ena_dev->intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
@@ -2690,9 +2645,9 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) {
- if (rc == ENA_COM_PERMISSION) {
- ena_trc_info("Feature %d isn't supported\n",
- ENA_ADMIN_INTERRUPT_MODERATION);
+ if (rc == ENA_COM_UNSUPPORTED) {
+ ena_trc_dbg("Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0;
} else {
ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
@@ -2719,8 +2674,7 @@ err:
return rc;
}
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2763,14 +2717,12 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
ENA_INTR_HIGHEST_BYTES;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
{
return ena_dev->intr_moder_tx_interval;
}
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
{
struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
@@ -2794,7 +2746,10 @@ void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
intr_moder_tbl[level].intr_moder_interval /=
ena_dev->intr_delay_resolution;
intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
- intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
+
+ /* use hardcoded value until ethtool supports bytecount parameter */
+ if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
+ intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
}
void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h
index e5345926..f58cd86a 100644
--- a/drivers/net/ena/base/ena_com.h
+++ b/drivers/net/ena/base/ena_com.h
@@ -35,15 +35,7 @@
#define ENA_COM
#include "ena_plat.h"
-#include "ena_common_defs.h"
-#include "ena_admin_defs.h"
-#include "ena_eth_io_defs.h"
-#include "ena_regs_defs.h"
-#if defined(__linux__) && !defined(__KERNEL__)
-#include <rte_lcore.h>
-#include <rte_spinlock.h>
-#define __iomem
-#endif
+#include "ena_includes.h"
#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
@@ -89,6 +81,11 @@
#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
+#define ENA_INTR_MODER_LEVEL_STRIDE 1
+#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
+
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
+
enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW,
@@ -120,8 +117,8 @@ struct ena_com_rx_buf_info {
};
struct ena_com_io_desc_addr {
- u8 __iomem *pbuf_dev_addr; /* LLQ address */
- u8 *virt_addr;
+ u8 __iomem *pbuf_dev_addr; /* LLQ address */
+ u8 *virt_addr;
dma_addr_t phys_addr;
ena_mem_handle_t mem_handle;
};
@@ -130,13 +127,12 @@ struct ena_com_tx_meta {
u16 mss;
u16 l3_hdr_len;
u16 l3_hdr_offset;
- u16 l3_outer_hdr_len; /* In words */
- u16 l3_outer_hdr_offset;
u16 l4_hdr_len; /* In words */
};
struct ena_com_io_cq {
struct ena_com_io_desc_addr cdesc_addr;
+ void *bus;
/* Interrupt unmask register */
u32 __iomem *unmask_reg;
@@ -174,6 +170,7 @@ struct ena_com_io_cq {
struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr;
+ void *bus;
u32 __iomem *db_addr;
u8 __iomem *header_addr;
@@ -228,8 +225,11 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue {
void *q_dmadev;
+ void *bus;
ena_spinlock_t q_lock; /* spinlock for the admin queue */
+
struct ena_comp_ctx *comp_ctx;
+ u32 completion_timeout;
u16 q_depth;
struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq;
@@ -266,6 +266,7 @@ struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr;
ena_mem_handle_t read_resp_mem_handle;
+ u32 reg_read_to; /* in us */
u16 seq_num;
bool readless_supported;
/* spin lock to ensure a single outstanding read */
@@ -316,6 +317,7 @@ struct ena_com_dev {
u8 __iomem *reg_bar;
void __iomem *mem_bar;
void *dmadev;
+ void *bus;
enum ena_admin_placement_policy_type tx_mem_queue_type;
u32 tx_max_header_size;
@@ -340,6 +342,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload;
+ struct ena_admin_ena_hw_hints hw_hints;
};
struct ena_com_create_io_ctx {
@@ -379,7 +382,7 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
* @ena_dev: ENA communication layer struct
- * @realess_supported: readless mode (enable/disable)
+ * @readless_supported: readless mode (enable/disable)
*/
void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev,
bool readless_supported);
@@ -421,14 +424,16 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct
+ * @reset_reason: Specify what is the trigger for the reset in case of an error.
*
* @return - 0 on success, negative value on failure.
*/
-int ena_com_dev_reset(struct ena_com_dev *ena_dev);
+int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct
- * ena_com_create_io_ctx - create context structure
+ * @ctx - create context structure
*
* Create the submission and the completion queues.
*
@@ -437,8 +442,9 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev);
int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
struct ena_com_create_io_ctx *ctx);
-/* ena_com_admin_destroy - Destroy IO queue with the queue id - qid.
+/* ena_com_destroy_io_queue - Destroy IO queue with the queue id - qid.
* @ena_dev: ENA communication layer struct
+ * @qid - the caller virtual queue id.
*/
void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid);
@@ -581,9 +587,8 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx);
/* ena_com_get_dev_basic_stats - Get device basic statistics
* @ena_dev: ENA communication layer struct
@@ -608,9 +613,8 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu);
*
* @return: 0 on Success and negative value otherwise.
*/
-int
-ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
- struct ena_admin_feature_offload_desc *offload);
+int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ struct ena_admin_feature_offload_desc *offload);
/* ena_com_rss_init - Init RSS
* @ena_dev: ENA communication layer struct
@@ -765,8 +769,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't
- * flash it to the device, the new configuration will be lost.
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
@@ -874,8 +878,7 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
* moderation table back to the default parameters.
* @ena_dev: ENA communication layer struct
*/
-void
-ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
+void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
* non-adaptive interval in Tx direction.
@@ -884,9 +887,8 @@ ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs);
/* ena_com_update_nonadaptive_moderation_interval_rx - Update the
* non-adaptive interval in Rx direction.
@@ -895,9 +897,8 @@ ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
*
* @return - 0 on success, negative value on failure.
*/
-int
-ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs);
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs);
/* ena_com_get_nonadaptive_moderation_interval_tx - Retrieve the
* non-adaptive interval in Tx direction.
@@ -905,8 +906,7 @@ ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
/* ena_com_get_nonadaptive_moderation_interval_rx - Retrieve the
* non-adaptive interval in Rx direction.
@@ -914,8 +914,7 @@ ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev);
*
* @return - interval in usec
*/
-unsigned int
-ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
+unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
* moderation table.
@@ -940,20 +939,17 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
enum ena_intr_moder_level level,
struct ena_intr_moder_entry *entry);
-static inline bool
-ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
+static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev *ena_dev)
{
return ena_dev->adaptive_coalescing;
}
-static inline void
-ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_enable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = true;
}
-static inline void
-ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
+static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
{
ena_dev->adaptive_coalescing = false;
}
@@ -966,12 +962,11 @@ ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_dev)
* @moder_tbl_idx: Current table level as input update new level as return
* value.
*/
-static inline void
-ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
- unsigned int pkts,
- unsigned int bytes,
- unsigned int *smoothed_interval,
- unsigned int *moder_tbl_idx)
+static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
+ unsigned int pkts,
+ unsigned int bytes,
+ unsigned int *smoothed_interval,
+ unsigned int *moder_tbl_idx)
{
enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
struct ena_intr_moder_entry *curr_moder_entry;
@@ -1001,17 +996,20 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
} else {
- pred_moder_entry = &intr_moder_tbl[curr_moder_idx - 1];
+ pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
if ((pkts <= pred_moder_entry->pkts_per_interval) ||
(bytes <= pred_moder_entry->bytes_per_interval))
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx - 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
else if ((pkts > curr_moder_entry->pkts_per_interval) ||
(bytes > curr_moder_entry->bytes_per_interval)) {
if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
- new_moder_idx = (enum ena_intr_moder_level)(curr_moder_idx + 1);
+ new_moder_idx =
+ (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
}
}
new_moder_entry = &intr_moder_tbl[new_moder_idx];
@@ -1044,18 +1042,12 @@ static inline void ena_com_update_intr_reg(struct ena_eth_io_intr_reg *intr_reg,
intr_reg->intr_control |=
(tx_delay_interval << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
+ & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
if (unmask)
intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-int ena_com_get_dev_extended_stats(struct ena_com_dev *ena_dev, char *buff,
- u32 len);
-
-int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev,
- u32 funct_queue);
-
#if defined(__cplusplus)
}
#endif /* __cplusplus */
diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
index 7a031d90..04d4e9a5 100644
--- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h
@@ -34,174 +34,140 @@
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
-/* admin commands opcodes */
enum ena_admin_aq_opcode {
- /* create submission queue */
- ENA_ADMIN_CREATE_SQ = 1,
+ ENA_ADMIN_CREATE_SQ = 1,
- /* destroy submission queue */
- ENA_ADMIN_DESTROY_SQ = 2,
+ ENA_ADMIN_DESTROY_SQ = 2,
- /* create completion queue */
- ENA_ADMIN_CREATE_CQ = 3,
+ ENA_ADMIN_CREATE_CQ = 3,
- /* destroy completion queue */
- ENA_ADMIN_DESTROY_CQ = 4,
+ ENA_ADMIN_DESTROY_CQ = 4,
- /* get capabilities of particular feature */
- ENA_ADMIN_GET_FEATURE = 8,
+ ENA_ADMIN_GET_FEATURE = 8,
- /* get capabilities of particular feature */
- ENA_ADMIN_SET_FEATURE = 9,
+ ENA_ADMIN_SET_FEATURE = 9,
- /* get statistics */
- ENA_ADMIN_GET_STATS = 11,
+ ENA_ADMIN_GET_STATS = 11,
};
-/* admin command completion status codes */
enum ena_admin_aq_completion_status {
- /* Request completed successfully */
- ENA_ADMIN_SUCCESS = 0,
+ ENA_ADMIN_SUCCESS = 0,
- /* no resources to satisfy request */
- ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
+ ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE = 1,
- /* Bad opcode in request descriptor */
- ENA_ADMIN_BAD_OPCODE = 2,
+ ENA_ADMIN_BAD_OPCODE = 2,
- /* Unsupported opcode in request descriptor */
- ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
+ ENA_ADMIN_UNSUPPORTED_OPCODE = 3,
- /* Wrong request format */
- ENA_ADMIN_MALFORMED_REQUEST = 4,
+ ENA_ADMIN_MALFORMED_REQUEST = 4,
- /* One of parameters is not valid. Provided in ACQ entry
- * extended_status
- */
- ENA_ADMIN_ILLEGAL_PARAMETER = 5,
+ /* Additional status is provided in ACQ entry extended_status */
+ ENA_ADMIN_ILLEGAL_PARAMETER = 5,
- /* unexpected error */
- ENA_ADMIN_UNKNOWN_ERROR = 6,
+ ENA_ADMIN_UNKNOWN_ERROR = 6,
};
-/* get/set feature subcommands opcodes */
enum ena_admin_aq_feature_id {
- /* list of all supported attributes/capabilities in the ENA */
- ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+ ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
+
+ ENA_ADMIN_MAX_QUEUES_NUM = 2,
- /* max number of supported queues per for every queues type */
- ENA_ADMIN_MAX_QUEUES_NUM = 2,
+ ENA_ADMIN_HW_HINTS = 3,
- /* Receive Side Scaling (RSS) function */
- ENA_ADMIN_RSS_HASH_FUNCTION = 10,
+ ENA_ADMIN_RSS_HASH_FUNCTION = 10,
- /* stateless TCP/UDP/IP offload capabilities. */
- ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- /* Multiple tuples flow table configuration */
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
- /* max MTU, current MTU */
- ENA_ADMIN_MTU = 14,
+ ENA_ADMIN_MTU = 14,
- /* Receive Side Scaling (RSS) hash input */
- ENA_ADMIN_RSS_HASH_INPUT = 18,
+ ENA_ADMIN_RSS_HASH_INPUT = 18,
- /* interrupt moderation parameters */
- ENA_ADMIN_INTERRUPT_MODERATION = 20,
+ ENA_ADMIN_INTERRUPT_MODERATION = 20,
- /* AENQ configuration */
- ENA_ADMIN_AENQ_CONFIG = 26,
+ ENA_ADMIN_AENQ_CONFIG = 26,
- /* Link configuration */
- ENA_ADMIN_LINK_CONFIG = 27,
+ ENA_ADMIN_LINK_CONFIG = 27,
- /* Host attributes configuration */
- ENA_ADMIN_HOST_ATTR_CONFIG = 28,
+ ENA_ADMIN_HOST_ATTR_CONFIG = 28,
- /* Number of valid opcodes */
- ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
-/* descriptors and headers placement */
enum ena_admin_placement_policy_type {
- /* descriptors and headers are in OS memory */
- ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
- /* descriptors and headers in device memory (a.k.a Low Latency
+ /* descriptors and headers are in device memory (a.k.a Low Latency
* Queue)
*/
- ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
+ ENA_ADMIN_PLACEMENT_POLICY_DEV = 3,
};
-/* link speeds */
enum ena_admin_link_types {
- ENA_ADMIN_LINK_SPEED_1G = 0x1,
+ ENA_ADMIN_LINK_SPEED_1G = 0x1,
- ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
+ ENA_ADMIN_LINK_SPEED_2_HALF_G = 0x2,
- ENA_ADMIN_LINK_SPEED_5G = 0x4,
+ ENA_ADMIN_LINK_SPEED_5G = 0x4,
- ENA_ADMIN_LINK_SPEED_10G = 0x8,
+ ENA_ADMIN_LINK_SPEED_10G = 0x8,
- ENA_ADMIN_LINK_SPEED_25G = 0x10,
+ ENA_ADMIN_LINK_SPEED_25G = 0x10,
- ENA_ADMIN_LINK_SPEED_40G = 0x20,
+ ENA_ADMIN_LINK_SPEED_40G = 0x20,
- ENA_ADMIN_LINK_SPEED_50G = 0x40,
+ ENA_ADMIN_LINK_SPEED_50G = 0x40,
- ENA_ADMIN_LINK_SPEED_100G = 0x80,
+ ENA_ADMIN_LINK_SPEED_100G = 0x80,
- ENA_ADMIN_LINK_SPEED_200G = 0x100,
+ ENA_ADMIN_LINK_SPEED_200G = 0x100,
- ENA_ADMIN_LINK_SPEED_400G = 0x200,
+ ENA_ADMIN_LINK_SPEED_400G = 0x200,
};
-/* completion queue update policy */
enum ena_admin_completion_policy_type {
- /* cqe for each sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
+ /* completion queue entry for each sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC = 0,
- /* cqe upon request in sq descriptor */
- ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
+ /* completion queue entry upon request in sq descriptor */
+ ENA_ADMIN_COMPLETION_POLICY_DESC_ON_DEMAND = 1,
/* current queue head pointer is updated in OS memory upon sq
* descriptor request
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD_ON_DEMAND = 2,
/* current queue head pointer is updated in OS memory for each sq
* descriptor
*/
- ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
+ ENA_ADMIN_COMPLETION_POLICY_HEAD = 3,
};
-/* type of get statistics command */
+/* basic stats return ena_admin_basic_stats while extanded stats return a
+ * buffer (string format) with additional statistics per queue and per
+ * device id
+ */
enum ena_admin_get_stats_type {
- /* Basic statistics */
- ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
+ ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
- /* Extended statistics */
- ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
};
-/* scope of get statistics command */
enum ena_admin_get_stats_scope {
- ENA_ADMIN_SPECIFIC_QUEUE = 0,
+ ENA_ADMIN_SPECIFIC_QUEUE = 0,
- ENA_ADMIN_ETH_TRAFFIC = 1,
+ ENA_ADMIN_ETH_TRAFFIC = 1,
};
-/* ENA Admin Queue (AQ) common descriptor */
struct ena_admin_aq_common_desc {
- /* word 0 : */
- /* command identificator to associate it with the completion
- * 11:0 : command_id
+ /* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command_id;
- /* as appears in ena_aq_opcode */
+ /* as appears in ena_admin_aq_opcode */
uint8_t opcode;
/* 0 : phase
@@ -214,24 +180,17 @@ struct ena_admin_aq_common_desc {
uint8_t flags;
};
-/* used in ena_aq_entry. Can point directly to control data, or to a page
- * list chunk. Used also at the end of indirect mode page list chunks, for
- * chaining.
+/* used in ena_admin_aq_entry. Can point directly to control data, or to a
+ * page list chunk. Used also at the end of indirect mode page list chunks,
+ * for chaining.
*/
struct ena_admin_ctrl_buff_info {
- /* word 0 : indicates length of the buffer pointed by
- * control_buffer_address.
- */
uint32_t length;
- /* words 1:2 : points to control buffer (direct or indirect) */
struct ena_common_mem_addr address;
};
-/* submission queue full identification */
struct ena_admin_sq {
- /* word 0 : */
- /* queue id */
uint16_t sq_idx;
/* 4:0 : reserved
@@ -242,36 +201,25 @@ struct ena_admin_sq {
uint8_t reserved1;
};
-/* AQ entry format */
struct ena_admin_aq_entry {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
- /* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Admin Completion Queue (ACQ) common descriptor */
struct ena_admin_acq_common_desc {
- /* word 0 : */
/* command identifier to associate it with the aq descriptor
* 11:0 : command_id
* 15:12 : reserved12
*/
uint16_t command;
- /* status of request execution */
uint8_t status;
/* 0 : phase
@@ -279,33 +227,21 @@ struct ena_admin_acq_common_desc {
*/
uint8_t flags;
- /* word 1 : */
- /* provides additional info */
uint16_t extended_status;
- /* submission queue head index, serves as a hint what AQ entries can
- * be revoked
- */
+ /* serves as a hint what AQ entries can be revoked */
uint16_t sq_head_indx;
};
-/* ACQ entry format */
struct ena_admin_acq_entry {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_descriptor;
- /* response type specific data */
uint32_t response_specific_data[14];
};
-/* ENA AQ Create Submission Queue command. Placed in control buffer pointed
- * by AQ entry
- */
struct ena_admin_aq_create_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved0_w1
* 7:5 : sq_direction - 0x1 - Tx, 0x2 - Rx
*/
@@ -337,7 +273,6 @@ struct ena_admin_aq_create_sq_cmd {
*/
uint8_t sq_caps_3;
- /* word 2 : */
/* associated completion queue id. This CQ must be created prior to
* SQ creation
*/
@@ -346,85 +281,62 @@ struct ena_admin_aq_create_sq_cmd {
/* submission queue depth in entries */
uint16_t sq_depth;
- /* words 3:4 : SQ physical base address in OS memory. This field
- * should not be used for Low Latency queues. Has to be page
- * aligned.
+ /* SQ physical base address in OS memory. This field should not be
+ * used for Low Latency queues. Has to be page aligned.
*/
struct ena_common_mem_addr sq_ba;
- /* words 5:6 : specifies queue head writeback location in OS
- * memory. Valid if completion_policy is set to
- * completion_policy_head_on_demand or completion_policy_head. Has
- * to be cache aligned
+ /* specifies queue head writeback location in OS memory. Valid if
+ * completion_policy is set to completion_policy_head_on_demand or
+ * completion_policy_head. Has to be cache aligned
*/
struct ena_common_mem_addr sq_head_writeback;
- /* word 7 : reserved word */
uint32_t reserved0_w7;
- /* word 8 : reserved word */
uint32_t reserved0_w8;
};
-/* submission queue direction */
enum ena_admin_sq_direction {
- ENA_ADMIN_SQ_DIRECTION_TX = 1,
+ ENA_ADMIN_SQ_DIRECTION_TX = 1,
- ENA_ADMIN_SQ_DIRECTION_RX = 2,
+ ENA_ADMIN_SQ_DIRECTION_RX = 2,
};
-/* ENA Response for Create SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_create_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* sq identifier */
uint16_t sq_idx;
uint16_t reserved;
- /* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */
+ /* queue doorbell address as an offset to PCIe MMIO REG BAR */
uint32_t sq_doorbell_offset;
- /* word 4 : low latency queue ring base address as an offset to
- * PCIe MMIO LLQ_MEM BAR
+ /* low latency queue ring base address as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_descriptors_offset;
- /* word 5 : low latency queue headers' memory as an offset to PCIe
- * MMIO LLQ_MEM BAR
+ /* low latency queue headers' memory as an offset to PCIe MMIO
+ * LLQ_MEM BAR
*/
uint32_t llq_headers_offset;
};
-/* ENA AQ Destroy Submission Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_sq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1 : */
struct ena_admin_sq sq;
};
-/* ENA Response for Destroy SQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_sq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
-/* ENA AQ Create Completion Queue command */
struct ena_admin_aq_create_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
/* 4:0 : reserved5
* 5 : interrupt_mode_enabled - if set, cq operates
* in interrupt mode, otherwise - polling
@@ -441,62 +353,39 @@ struct ena_admin_aq_create_cq_cmd {
/* completion queue depth in # of entries. must be power of 2 */
uint16_t cq_depth;
- /* word 2 : msix vector assigned to this cq */
+ /* msix vector assigned to this cq */
uint32_t msix_vector;
- /* words 3:4 : cq physical base address in OS memory. CQ must be
- * physically contiguous
+ /* cq physical base address in OS memory. CQ must be physically
+ * contiguous
*/
struct ena_common_mem_addr cq_ba;
};
-/* ENA Response for Create CQ Command. Appears in ACQ entry as response
- * specific data
- */
struct ena_admin_acq_create_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* word 2 : */
- /* cq identifier */
uint16_t cq_idx;
- /* actual cq depth in # of entries */
+ /* actual cq depth in number of entries */
uint16_t cq_actual_depth;
- /* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */
uint32_t numa_node_register_offset;
- /* word 4 : completion head doorbell address as an offset to PCIe
- * MMIO REG BAR
- */
uint32_t cq_head_db_register_offset;
- /* word 5 : interrupt unmask register address as an offset into
- * PCIe MMIO REG BAR
- */
uint32_t cq_interrupt_unmask_register_offset;
};
-/* ENA AQ Destroy Completion Queue command. Placed in control buffer
- * pointed by AQ entry
- */
struct ena_admin_aq_destroy_cq_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* word 1 : */
- /* associated queue id. */
uint16_t cq_idx;
uint16_t reserved1;
};
-/* ENA Response for Destroy CQ Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_destroy_cq_resp_desc {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
};
@@ -504,21 +393,15 @@ struct ena_admin_acq_destroy_cq_resp_desc {
* buffer pointed by AQ entry
*/
struct ena_admin_aq_get_stats_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : */
union {
/* command specific inline data */
uint32_t inline_data_w1[3];
- /* words 1:3 : points to control buffer (direct or
- * indirect, chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
} u;
- /* word 4 : */
/* stats type as defined in enum ena_admin_get_stats_type */
uint8_t type;
@@ -527,7 +410,6 @@ struct ena_admin_aq_get_stats_cmd {
uint16_t reserved3;
- /* word 5 : */
/* queue id. used when scope is specific_queue */
uint16_t queue_idx;
@@ -539,89 +421,60 @@ struct ena_admin_aq_get_stats_cmd {
/* Basic Statistics Command. */
struct ena_admin_basic_stats {
- /* word 0 : */
uint32_t tx_bytes_low;
- /* word 1 : */
uint32_t tx_bytes_high;
- /* word 2 : */
uint32_t tx_pkts_low;
- /* word 3 : */
uint32_t tx_pkts_high;
- /* word 4 : */
uint32_t rx_bytes_low;
- /* word 5 : */
uint32_t rx_bytes_high;
- /* word 6 : */
uint32_t rx_pkts_low;
- /* word 7 : */
uint32_t rx_pkts_high;
- /* word 8 : */
uint32_t rx_drops_low;
- /* word 9 : */
uint32_t rx_drops_high;
};
-/* ENA Response for Get Statistics Command. Appears in ACQ entry as
- * response_specific_data
- */
struct ena_admin_acq_get_stats_resp {
- /* words 0:1 : Common Admin Queue completion descriptor */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:11 : */
struct ena_admin_basic_stats basic_stats;
};
-/* ENA Get/Set Feature common descriptor. Appears as inline word in
- * ena_aq_entry
- */
struct ena_admin_get_set_feature_common_desc {
- /* word 0 : */
/* 1:0 : select - 0x1 - current value; 0x3 - default
* value
* 7:3 : reserved3
*/
uint8_t flags;
- /* as appears in ena_feature_id */
+ /* as appears in ena_admin_aq_feature_id */
uint8_t feature_id;
- /* reserved16 */
uint16_t reserved16;
};
-/* ENA Device Attributes Feature descriptor. */
struct ena_admin_device_attr_feature_desc {
- /* word 0 : implementation id */
uint32_t impl_id;
- /* word 1 : device version */
uint32_t device_version;
- /* word 2 : bit map of which bits are supported value of 1
- * indicated that this feature is supported and can perform SET/GET
- * for it
- */
+ /* bitmap of ena_admin_aq_feature_id */
uint32_t supported_features;
- /* word 3 : */
uint32_t reserved3;
- /* word 4 : Indicates how many bits are used physical address
- * access.
- */
+ /* Indicates how many bits are used physical address access. */
uint32_t phys_addr_width;
- /* word 5 : Indicates how many bits are used virtual address access. */
+ /* Indicates how many bits are used virtual address access. */
uint32_t virt_addr_width;
/* unicast MAC address (in Network byte order) */
@@ -629,36 +482,27 @@ struct ena_admin_device_attr_feature_desc {
uint8_t reserved7[2];
- /* word 8 : Max supported MTU value */
uint32_t max_mtu;
};
-/* ENA Max Queues Feature descriptor. */
struct ena_admin_queue_feature_desc {
- /* word 0 : Max number of submission queues (including LLQs) */
+ /* including LLQs */
uint32_t max_sq_num;
- /* word 1 : Max submission queue depth */
uint32_t max_sq_depth;
- /* word 2 : Max number of completion queues */
uint32_t max_cq_num;
- /* word 3 : Max completion queue depth */
uint32_t max_cq_depth;
- /* word 4 : Max number of LLQ submission queues */
uint32_t max_llq_num;
- /* word 5 : Max submission queue depth of LLQ */
uint32_t max_llq_depth;
- /* word 6 : Max header size */
uint32_t max_header_size;
- /* word 7 : */
- /* Maximum Descriptors number, including meta descriptors, allowed
- * for a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for
+ * a single Tx packet
*/
uint16_t max_packet_tx_descs;
@@ -666,86 +510,69 @@ struct ena_admin_queue_feature_desc {
uint16_t max_packet_rx_descs;
};
-/* ENA MTU Set Feature descriptor. */
struct ena_admin_set_feature_mtu_desc {
- /* word 0 : mtu payload size (exclude L2) */
+ /* exclude L2 */
uint32_t mtu;
};
-/* ENA host attributes Set Feature descriptor. */
struct ena_admin_set_feature_host_attr_desc {
- /* words 0:1 : host OS info base address in OS memory. host info is
- * 4KB of physically contiguous
+ /* host OS info base address in OS memory. host info is 4KB of
+ * physically contiguous
*/
struct ena_common_mem_addr os_info_ba;
- /* words 2:3 : host debug area base address in OS memory. debug
- * area must be physically contiguous
+ /* host debug area base address in OS memory. debug area must be
+ * physically contiguous
*/
struct ena_common_mem_addr debug_ba;
- /* word 4 : debug area size */
+ /* debug area size */
uint32_t debug_area_size;
};
-/* ENA Interrupt Moderation Get Feature descriptor. */
struct ena_admin_feature_intr_moder_desc {
- /* word 0 : */
/* interrupt delay granularity in usec */
uint16_t intr_delay_resolution;
uint16_t reserved;
};
-/* ENA Link Get Feature descriptor. */
struct ena_admin_get_feature_link_desc {
- /* word 0 : Link speed in Mb */
+ /* Link speed in Mb */
uint32_t speed;
- /* word 1 : supported speeds (bit field of enum ena_admin_link
- * types)
- */
+ /* bit field of enum ena_admin_link types */
uint32_t supported;
- /* word 2 : */
- /* 0 : autoneg - auto negotiation
+ /* 0 : autoneg
* 1 : duplex - Full Duplex
* 31:2 : reserved2
*/
uint32_t flags;
};
-/* ENA AENQ Feature descriptor. */
struct ena_admin_feature_aenq_desc {
- /* word 0 : bitmask for AENQ groups the device can report */
+ /* bitmask for AENQ groups the device can report */
uint32_t supported_groups;
- /* word 1 : bitmask for AENQ groups to report */
+ /* bitmask for AENQ groups to report */
uint32_t enabled_groups;
};
-/* ENA Stateless Offload Feature descriptor. */
struct ena_admin_feature_offload_desc {
- /* word 0 : */
- /* Trasmit side stateless offload
- * 0 : TX_L3_csum_ipv4 - IPv4 checksum
- * 1 : TX_L4_ipv4_csum_part - TCP/UDP over IPv4
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 2 : TX_L4_ipv4_csum_full - TCP/UDP over IPv4
- * checksum
- * 3 : TX_L4_ipv6_csum_part - TCP/UDP over IPv6
- * checksum, the checksum field should be initialized
- * with pseudo header checksum
- * 4 : TX_L4_ipv6_csum_full - TCP/UDP over IPv6
- * checksum
- * 5 : tso_ipv4 - TCP/IPv4 Segmentation Offloading
- * 6 : tso_ipv6 - TCP/IPv6 Segmentation Offloading
- * 7 : tso_ecn - TCP Segmentation with ECN
+ /* 0 : TX_L3_csum_ipv4
+ * 1 : TX_L4_ipv4_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 2 : TX_L4_ipv4_csum_full
+ * 3 : TX_L4_ipv6_csum_part - The checksum field
+ * should be initialized with pseudo header checksum
+ * 4 : TX_L4_ipv6_csum_full
+ * 5 : tso_ipv4
+ * 6 : tso_ipv6
+ * 7 : tso_ecn
*/
uint32_t tx;
- /* word 1 : */
/* Receive side supported stateless offload
* 0 : RX_L3_csum_ipv4 - IPv4 checksum
* 1 : RX_L4_ipv4_csum - TCP/UDP/IPv4 checksum
@@ -754,118 +581,94 @@ struct ena_admin_feature_offload_desc {
*/
uint32_t rx_supported;
- /* word 2 : */
- /* Receive side enabled stateless offload */
uint32_t rx_enabled;
};
-/* hash functions */
enum ena_admin_hash_functions {
- /* Toeplitz hash */
- ENA_ADMIN_TOEPLITZ = 1,
+ ENA_ADMIN_TOEPLITZ = 1,
- /* CRC32 hash */
- ENA_ADMIN_CRC32 = 2,
+ ENA_ADMIN_CRC32 = 2,
};
-/* ENA RSS flow hash control buffer structure */
struct ena_admin_feature_rss_flow_hash_control {
- /* word 0 : number of valid keys */
uint32_t keys_num;
- /* word 1 : */
uint32_t reserved;
- /* Toeplitz keys */
uint32_t key[10];
};
-/* ENA RSS Flow Hash Function */
struct ena_admin_feature_rss_flow_hash_function {
- /* word 0 : */
- /* supported hash functions
- * 7:0 : funcs - supported hash functions (bitmask
- * accroding to ena_admin_hash_functions)
- */
+ /* 7:0 : funcs - bitmask of ena_admin_hash_functions */
uint32_t supported_func;
- /* word 1 : */
- /* selected hash func
- * 7:0 : selected_func - selected hash function
- * (bitmask accroding to ena_admin_hash_functions)
+ /* 7:0 : selected_func - bitmask of
+ * ena_admin_hash_functions
*/
uint32_t selected_func;
- /* word 2 : initial value */
+ /* initial value */
uint32_t init_val;
};
/* RSS flow hash protocols */
enum ena_admin_flow_hash_proto {
- /* tcp/ipv4 */
- ENA_ADMIN_RSS_TCP4 = 0,
+ ENA_ADMIN_RSS_TCP4 = 0,
- /* udp/ipv4 */
- ENA_ADMIN_RSS_UDP4 = 1,
+ ENA_ADMIN_RSS_UDP4 = 1,
- /* tcp/ipv6 */
- ENA_ADMIN_RSS_TCP6 = 2,
+ ENA_ADMIN_RSS_TCP6 = 2,
- /* udp/ipv6 */
- ENA_ADMIN_RSS_UDP6 = 3,
+ ENA_ADMIN_RSS_UDP6 = 3,
- /* ipv4 not tcp/udp */
- ENA_ADMIN_RSS_IP4 = 4,
+ ENA_ADMIN_RSS_IP4 = 4,
- /* ipv6 not tcp/udp */
- ENA_ADMIN_RSS_IP6 = 5,
+ ENA_ADMIN_RSS_IP6 = 5,
- /* fragmented ipv4 */
- ENA_ADMIN_RSS_IP4_FRAG = 6,
+ ENA_ADMIN_RSS_IP4_FRAG = 6,
- /* not ipv4/6 */
- ENA_ADMIN_RSS_NOT_IP = 7,
+ ENA_ADMIN_RSS_NOT_IP = 7,
- /* max number of protocols */
- ENA_ADMIN_RSS_PROTO_NUM = 16,
+ /* TCPv6 with extension header */
+ ENA_ADMIN_RSS_TCP6_EX = 8,
+
+ /* IPv6 with extension header */
+ ENA_ADMIN_RSS_IP6_EX = 9,
+
+ ENA_ADMIN_RSS_PROTO_NUM = 16,
};
/* RSS flow hash fields */
enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */
- ENA_ADMIN_RSS_L2_DA = 0,
+ ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */
- ENA_ADMIN_RSS_L2_SA = 1,
+ ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */
- ENA_ADMIN_RSS_L3_DA = 2,
+ ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */
- ENA_ADMIN_RSS_L3_SA = 5,
+ ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */
- ENA_ADMIN_RSS_L4_DP = 6,
+ ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */
- ENA_ADMIN_RSS_L4_SP = 7,
+ ENA_ADMIN_RSS_L4_SP = BIT(5),
};
-/* hash input fields for flow protocol */
struct ena_admin_proto_input {
- /* word 0 : */
/* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */
uint16_t fields;
uint16_t reserved2;
};
-/* ENA RSS hash control buffer structure */
struct ena_admin_feature_rss_hash_control {
- /* supported input fields */
struct ena_admin_proto_input supported_fields[ENA_ADMIN_RSS_PROTO_NUM];
- /* selected input fields */
struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM];
struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM];
@@ -873,11 +676,9 @@ struct ena_admin_feature_rss_hash_control {
struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM];
};
-/* ENA RSS flow hash input */
struct ena_admin_feature_rss_flow_hash_input {
- /* word 0 : */
/* supported hash input sorting
- * 1 : L3_sort - support swap L3 addresses if DA
+ * 1 : L3_sort - support swap L3 addresses if DA is
* smaller than SA
* 2 : L4_sort - support swap L4 ports if DP smaller
* SP
@@ -893,46 +694,37 @@ struct ena_admin_feature_rss_flow_hash_input {
uint16_t enabled_input_sort;
};
-/* Operating system type */
enum ena_admin_os_type {
- /* Linux OS */
- ENA_ADMIN_OS_LINUX = 1,
+ ENA_ADMIN_OS_LINUX = 1,
- /* Windows OS */
- ENA_ADMIN_OS_WIN = 2,
+ ENA_ADMIN_OS_WIN = 2,
- /* DPDK OS */
- ENA_ADMIN_OS_DPDK = 3,
+ ENA_ADMIN_OS_DPDK = 3,
- /* FreeBSD OS */
- ENA_ADMIN_OS_FREEBSD = 4,
+ ENA_ADMIN_OS_FREEBSD = 4,
- /* PXE OS */
- ENA_ADMIN_OS_IPXE = 5,
+ ENA_ADMIN_OS_IPXE = 5,
};
-/* host info */
struct ena_admin_host_info {
- /* word 0 : OS type defined in enum ena_os_type */
+ /* defined in enum ena_admin_os_type */
uint32_t os_type;
/* os distribution string format */
uint8_t os_dist_str[128];
- /* word 33 : OS distribution numeric format */
+ /* OS distribution numeric format */
uint32_t os_dist;
/* kernel version string format */
uint8_t kernel_ver_str[32];
- /* word 42 : Kernel version numeric format */
+ /* Kernel version numeric format */
uint32_t kernel_ver;
- /* word 43 : */
- /* driver version
- * 7:0 : major - major
- * 15:8 : minor - minor
- * 23:16 : sub_minor - sub minor
+ /* 7:0 : major
+ * 15:8 : minor
+ * 23:16 : sub_minor
*/
uint32_t driver_version;
@@ -940,220 +732,200 @@ struct ena_admin_host_info {
uint32_t supported_network_features[4];
};
-/* ENA RSS indirection table entry */
struct ena_admin_rss_ind_table_entry {
- /* word 0 : */
- /* cq identifier */
uint16_t cq_idx;
uint16_t reserved;
};
-/* ENA RSS indirection table */
struct ena_admin_feature_rss_ind_table {
- /* word 0 : */
/* min supported table size (2^min_size) */
uint16_t min_size;
/* max supported table size (2^max_size) */
uint16_t max_size;
- /* word 1 : */
/* table size (2^size) */
uint16_t size;
uint16_t reserved;
- /* word 2 : index of the inline entry. 0xFFFFFFFF means invalid */
+ /* index of the inline entry. 0xFFFFFFFF means invalid */
uint32_t inline_index;
- /* words 3 : used for updating single entry, ignored when setting
- * the entire table through the control buffer.
+ /* used for updating single entry, ignored when setting the entire
+ * table through the control buffer.
*/
struct ena_admin_rss_ind_table_entry inline_entry;
};
-/* ENA Get Feature command */
+/* When hint value is 0, driver should use it's own predefined value */
+struct ena_admin_ena_hw_hints {
+ /* value in ms */
+ uint16_t mmio_read_timeout;
+
+ /* value in ms */
+ uint16_t driver_watchdog_timeout;
+
+ /* Per packet tx completion timeout. value in ms */
+ uint16_t missing_tx_completion_timeout;
+
+ uint16_t missed_tx_completion_count_threshold_to_reset;
+
+ /* value in ms */
+ uint16_t admin_completion_tx_timeout;
+
+ uint16_t netdev_wd_timeout;
+
+ uint16_t max_tx_sgl_size;
+
+ uint16_t max_rx_sgl_size;
+
+ uint16_t reserved[8];
+};
+
struct ena_admin_get_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
- union {
- /* raw words */
- uint32_t raw[11];
- } u;
+ uint32_t raw[11];
};
-/* ENA Get Feature command response */
struct ena_admin_get_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
- /* words 2:10 : Get Device Attributes */
struct ena_admin_device_attr_feature_desc dev_attr;
- /* words 2:5 : Max queues num */
struct ena_admin_queue_feature_desc max_queue;
- /* words 2:3 : AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 2:4 : Get Link configuration */
struct ena_admin_get_feature_link_desc link;
- /* words 2:4 : offload configuration */
struct ena_admin_feature_offload_desc offload;
- /* words 2:4 : rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 2 : rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 2:3 : rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
- /* words 2 : interrupt moderation configuration */
struct ena_admin_feature_intr_moder_desc intr_moderation;
+
+ struct ena_admin_ena_hw_hints hw_hints;
} u;
};
-/* ENA Set Feature command */
struct ena_admin_set_feat_cmd {
- /* words 0 : */
struct ena_admin_aq_common_desc aq_common_descriptor;
- /* words 1:3 : points to control buffer (direct or indirect,
- * chained if needed)
- */
struct ena_admin_ctrl_buff_info control_buffer;
- /* words 4 : */
struct ena_admin_get_set_feature_common_desc feat_common;
- /* words 5:15 : */
union {
- /* raw words */
uint32_t raw[11];
- /* words 5 : mtu size */
+ /* mtu size */
struct ena_admin_set_feature_mtu_desc mtu;
- /* words 5:7 : host attributes */
+ /* host attributes */
struct ena_admin_set_feature_host_attr_desc host_attr;
- /* words 5:6 : AENQ configuration */
+ /* AENQ configuration */
struct ena_admin_feature_aenq_desc aenq;
- /* words 5:7 : rss flow hash function */
+ /* rss flow hash function */
struct ena_admin_feature_rss_flow_hash_function flow_hash_func;
- /* words 5 : rss flow hash input */
+ /* rss flow hash input */
struct ena_admin_feature_rss_flow_hash_input flow_hash_input;
- /* words 5:6 : rss indirection table */
+ /* rss indirection table */
struct ena_admin_feature_rss_ind_table ind_table;
} u;
};
-/* ENA Set Feature command response */
struct ena_admin_set_feat_resp {
- /* words 0:1 : */
struct ena_admin_acq_common_desc acq_common_desc;
- /* words 2:15 : */
union {
- /* raw words */
uint32_t raw[14];
} u;
};
-/* ENA Asynchronous Event Notification Queue descriptor. */
struct ena_admin_aenq_common_desc {
- /* word 0 : */
uint16_t group;
uint16_t syndrom;
- /* word 1 : */
/* 0 : phase */
uint8_t flags;
uint8_t reserved1[3];
- /* word 2 : Timestamp LSB */
uint32_t timestamp_low;
- /* word 3 : Timestamp MSB */
uint32_t timestamp_high;
};
/* asynchronous event notification groups */
enum ena_admin_aenq_group {
- /* Link State Change */
- ENA_ADMIN_LINK_CHANGE = 0,
+ ENA_ADMIN_LINK_CHANGE = 0,
- ENA_ADMIN_FATAL_ERROR = 1,
+ ENA_ADMIN_FATAL_ERROR = 1,
- ENA_ADMIN_WARNING = 2,
+ ENA_ADMIN_WARNING = 2,
- ENA_ADMIN_NOTIFICATION = 3,
+ ENA_ADMIN_NOTIFICATION = 3,
- ENA_ADMIN_KEEP_ALIVE = 4,
+ ENA_ADMIN_KEEP_ALIVE = 4,
- ENA_ADMIN_AENQ_GROUPS_NUM = 5,
+ ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-/* syndorm of AENQ notification group */
enum ena_admin_aenq_notification_syndrom {
- ENA_ADMIN_SUSPEND = 0,
+ ENA_ADMIN_SUSPEND = 0,
+
+ ENA_ADMIN_RESUME = 1,
- ENA_ADMIN_RESUME = 1,
+ ENA_ADMIN_UPDATE_HINTS = 2,
};
-/* ENA Asynchronous Event Notification generic descriptor. */
struct ena_admin_aenq_entry {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
/* command specific inline data */
uint32_t inline_data_w4[12];
};
-/* ENA Asynchronous Event Notification Queue Link Change descriptor. */
struct ena_admin_aenq_link_change_desc {
- /* words 0:3 : */
struct ena_admin_aenq_common_desc aenq_common_desc;
- /* word 4 : */
/* 0 : link_status */
uint32_t flags;
};
-/* ENA MMIO Readless response interface */
+struct ena_admin_aenq_keep_alive_desc {
+ struct ena_admin_aenq_common_desc aenq_common_desc;
+
+ uint32_t rx_drops_low;
+
+ uint32_t rx_drops_high;
+};
+
struct ena_admin_ena_mmio_req_read_less_resp {
- /* word 0 : */
- /* request id */
uint16_t req_id;
- /* register offset */
uint16_t reg_off;
- /* word 1 : value is valid when poll is cleared */
+ /* value is valid when poll is cleared */
uint32_t reg_val;
};
@@ -1220,8 +992,7 @@ struct ena_admin_ena_mmio_req_read_less_resp {
/* feature_rss_flow_hash_function */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK GENMASK(7, 0)
-#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \
- GENMASK(7, 0)
+#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK GENMASK(7, 0)
/* feature_rss_flow_hash_input */
#define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1
@@ -1247,653 +1018,392 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint16_t
-get_ena_admin_aq_common_desc_command_id(
- const struct ena_admin_aq_common_desc *p)
+static inline uint16_t get_ena_admin_aq_common_desc_command_id(const struct ena_admin_aq_common_desc *p)
{
return p->command_id & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_aq_common_desc_command_id(struct ena_admin_aq_common_desc *p, uint16_t val)
{
p->command_id |= val & ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_phase(const struct ena_admin_aq_common_desc *p)
{
return p->flags & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_phase(struct ena_admin_aq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >>
- ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_common_desc_ctrl_data_indirect(
- const struct ena_admin_aq_common_desc *p)
+static inline uint8_t get_ena_admin_aq_common_desc_ctrl_data_indirect(const struct ena_admin_aq_common_desc *p)
{
- return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK)
- >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
+ return (p->flags & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK) >> ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT;
}
-static inline void
-set_ena_admin_aq_common_desc_ctrl_data_indirect(
- struct ena_admin_aq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aq_common_desc_ctrl_data_indirect(struct ena_admin_aq_common_desc *p, uint8_t val)
{
- p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT)
- & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
+ p->flags |= (val << ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT) & ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
}
-static inline uint8_t
-get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
+static inline uint8_t get_ena_admin_sq_sq_direction(const struct ena_admin_sq *p)
{
- return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_SQ_SQ_DIRECTION_MASK) >> ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
+static inline void set_ena_admin_sq_sq_direction(struct ena_admin_sq *p, uint8_t val)
{
- p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
- ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) & ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
}
-static inline uint16_t
-get_ena_admin_acq_common_desc_command_id(
- const struct ena_admin_acq_common_desc *p)
+static inline uint16_t get_ena_admin_acq_common_desc_command_id(const struct ena_admin_acq_common_desc *p)
{
return p->command & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p,
- uint16_t val)
+static inline void set_ena_admin_acq_common_desc_command_id(struct ena_admin_acq_common_desc *p, uint16_t val)
{
p->command |= val & ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
}
-static inline uint8_t
-get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
+static inline uint8_t get_ena_admin_acq_common_desc_phase(const struct ena_admin_acq_common_desc *p)
{
return p->flags & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_acq_common_desc_phase(struct ena_admin_acq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_sq_direction(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_sq_direction(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
+ return (p->sq_identity & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_sq_direction(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_sq_direction(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_identity |= (val <<
- ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
+ p->sq_identity |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_placement_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_placement_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
return p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_placement_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_placement_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
p->sq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_completion_policy(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_completion_policy(const struct ena_admin_aq_create_sq_cmd *p)
{
- return (p->sq_caps_2
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK)
- >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
+ return (p->sq_caps_2 & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK) >> ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_completion_policy(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_completion_policy(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_2 |=
- (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT)
- & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
+ p->sq_caps_2 |= (val << ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) & ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- const struct ena_admin_aq_create_sq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_sq_cmd_is_physically_contiguous(const struct ena_admin_aq_create_sq_cmd *p)
{
- return p->sq_caps_3 &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ return p->sq_caps_3 & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline void
-set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(
- struct ena_admin_aq_create_sq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_sq_cmd_is_physically_contiguous(struct ena_admin_aq_create_sq_cmd *p, uint8_t val)
{
- p->sq_caps_3 |= val &
- ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
+ p->sq_caps_3 |= val & ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(const struct ena_admin_aq_create_cq_cmd *p)
{
- return (p->cq_caps_1 &
- ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK)
- >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
+ return (p->cq_caps_1 & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK) >> ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_interrupt_mode_enabled(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_1 |=
- (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT)
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
+ p->cq_caps_1 |= (val << ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT) & ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
}
-static inline uint8_t
-get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- const struct ena_admin_aq_create_cq_cmd *p)
+static inline uint8_t get_ena_admin_aq_create_cq_cmd_cq_entry_size_words(const struct ena_admin_aq_create_cq_cmd *p)
{
- return p->cq_caps_2
- & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ return p->cq_caps_2 & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline void
-set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(
- struct ena_admin_aq_create_cq_cmd *p,
- uint8_t val)
+static inline void set_ena_admin_aq_create_cq_cmd_cq_entry_size_words(struct ena_admin_aq_create_cq_cmd *p, uint8_t val)
{
- p->cq_caps_2 |=
- val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
+ p->cq_caps_2 |= val & ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
}
-static inline uint8_t
-get_ena_admin_get_set_feature_common_desc_select(
- const struct ena_admin_get_set_feature_common_desc *p)
+static inline uint8_t get_ena_admin_get_set_feature_common_desc_select(const struct ena_admin_get_set_feature_common_desc *p)
{
return p->flags & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline void
-set_ena_admin_get_set_feature_common_desc_select(
- struct ena_admin_get_set_feature_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_get_set_feature_common_desc_select(struct ena_admin_get_set_feature_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_GET_SET_FEATURE_COMMON_DESC_SELECT_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_autoneg(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_autoneg(const struct ena_admin_get_feature_link_desc *p)
{
return p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline void
-set_ena_admin_get_feature_link_desc_autoneg(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_autoneg(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK;
}
-static inline uint32_t
-get_ena_admin_get_feature_link_desc_duplex(
- const struct ena_admin_get_feature_link_desc *p)
+static inline uint32_t get_ena_admin_get_feature_link_desc_duplex(const struct ena_admin_get_feature_link_desc *p)
{
- return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK)
- >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
+ return (p->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK) >> ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT;
}
-static inline void
-set_ena_admin_get_feature_link_desc_duplex(
- struct ena_admin_get_feature_link_desc *p,
- uint32_t val)
+static inline void set_ena_admin_get_feature_link_desc_duplex(struct ena_admin_get_feature_link_desc *p, uint32_t val)
{
- p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT)
- & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
+ p->flags |= (val << ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_SHIFT) & ENA_ADMIN_GET_FEATURE_LINK_DESC_DUPLEX_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
return p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
p->tx |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv4_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_part(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_TX_L4_ipv6_csum_full(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val <<
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_FULL_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ipv6(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ipv6(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ipv6(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ipv6(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_tso_ecn(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_tso_ecn(const struct ena_admin_feature_offload_desc *p)
{
- return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
+ return (p->tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_tso_ecn(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_tso_ecn(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
+ p->tx |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(const struct ena_admin_feature_offload_desc *p)
{
- return p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ return p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L3_csum_ipv4(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
+ p->rx_supported |= val & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L3_CSUM_IPV4_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv4_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_L4_ipv6_csum(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_offload_desc_RX_hash(
- const struct ena_admin_feature_offload_desc *p)
+static inline uint32_t get_ena_admin_feature_offload_desc_RX_hash(const struct ena_admin_feature_offload_desc *p)
{
- return (p->rx_supported &
- ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK)
- >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
+ return (p->rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) >> ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT;
}
-static inline void
-set_ena_admin_feature_offload_desc_RX_hash(
- struct ena_admin_feature_offload_desc *p,
- uint32_t val)
+static inline void set_ena_admin_feature_offload_desc_RX_hash(struct ena_admin_feature_offload_desc *p, uint32_t val)
{
- p->rx_supported |=
- (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT)
- & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
+ p->rx_supported |= (val << ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_SHIFT) & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_funcs(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_funcs(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->supported_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ return p->supported_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_funcs(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_funcs(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->supported_func |=
- val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
+ p->supported_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_FUNCS_MASK;
}
-static inline uint32_t
-get_ena_admin_feature_rss_flow_hash_function_selected_func(
- const struct ena_admin_feature_rss_flow_hash_function *p)
+static inline uint32_t get_ena_admin_feature_rss_flow_hash_function_selected_func(const struct ena_admin_feature_rss_flow_hash_function *p)
{
- return p->selected_func &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ return p->selected_func & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_function_selected_func(
- struct ena_admin_feature_rss_flow_hash_function *p,
- uint32_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_function_selected_func(struct ena_admin_feature_rss_flow_hash_function *p, uint32_t val)
{
- p->selected_func |=
- val &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
+ p->selected_func |= val & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->supported_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
+ return (p->supported_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->supported_input_sort |=
- (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
+ p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L3_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L3_SORT_MASK;
}
-static inline uint16_t
-get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- const struct ena_admin_feature_rss_flow_hash_input *p)
+static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(const struct ena_admin_feature_rss_flow_hash_input *p)
{
- return (p->enabled_input_sort &
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK)
- >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
+ return (p->enabled_input_sort & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT;
}
-static inline void
-set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(
- struct ena_admin_feature_rss_flow_hash_input *p,
- uint16_t val)
+static inline void set_ena_admin_feature_rss_flow_hash_input_enable_L4_sort(struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val)
{
- p->enabled_input_sort |=
- (val <<
- ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT)
- & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
+ p->enabled_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_SHIFT) & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_ENABLE_L4_SORT_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_major(const struct ena_admin_host_info *p)
{
return p->driver_version & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline void
-set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_major(struct ena_admin_host_info *p, uint32_t val)
{
p->driver_version |= val & ENA_ADMIN_HOST_INFO_MAJOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_MINOR_MASK;
}
-static inline uint32_t
-get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
+static inline uint32_t get_ena_admin_host_info_sub_minor(const struct ena_admin_host_info *p)
{
- return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK)
- >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
+ return (p->driver_version & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK) >> ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT;
}
-static inline void
-set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
+static inline void set_ena_admin_host_info_sub_minor(struct ena_admin_host_info *p, uint32_t val)
{
- p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT)
- & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
+ p->driver_version |= (val << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) & ENA_ADMIN_HOST_INFO_SUB_MINOR_MASK;
}
-static inline uint8_t
-get_ena_admin_aenq_common_desc_phase(
- const struct ena_admin_aenq_common_desc *p)
+static inline uint8_t get_ena_admin_aenq_common_desc_phase(const struct ena_admin_aenq_common_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline void
-set_ena_admin_aenq_common_desc_phase(
- struct ena_admin_aenq_common_desc *p,
- uint8_t val)
+static inline void set_ena_admin_aenq_common_desc_phase(struct ena_admin_aenq_common_desc *p, uint8_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK;
}
-static inline uint32_t
-get_ena_admin_aenq_link_change_desc_link_status(
- const struct ena_admin_aenq_link_change_desc *p)
+static inline uint32_t get_ena_admin_aenq_link_change_desc_link_status(const struct ena_admin_aenq_link_change_desc *p)
{
return p->flags & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
-static inline void
-set_ena_admin_aenq_link_change_desc_link_status(
- struct ena_admin_aenq_link_change_desc *p,
- uint32_t val)
+static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_admin_aenq_link_change_desc *p, uint32_t val)
{
p->flags |= val & ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
}
diff --git a/drivers/net/ena/base/ena_defs/ena_common_defs.h b/drivers/net/ena/base/ena_defs/ena_common_defs.h
index 95e0f389..072e6c1f 100644
--- a/drivers/net/ena/base/ena_defs/ena_common_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_common_defs.h
@@ -34,17 +34,13 @@
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
-/* spec version */
-#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* spec version major */
-#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* spec version minor */
+#define ENA_COMMON_SPEC_VERSION_MAJOR 0 /* */
+#define ENA_COMMON_SPEC_VERSION_MINOR 10 /* */
/* ENA operates with 48-bit memory addresses. ena_mem_addr_t */
struct ena_common_mem_addr {
- /* word 0 : low 32 bit of the memory address */
uint32_t mem_addr_low;
- /* word 1 : */
- /* high 16 bits of the memory address */
uint16_t mem_addr_high;
/* MBZ */
diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
index 6bc3d6a7..4cf0b205 100644
--- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h
@@ -34,35 +34,30 @@
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
-/* Layer 3 protocol index */
enum ena_eth_io_l3_proto_index {
- ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L3_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L3_PROTO_IPV4 = 8,
+ ENA_ETH_IO_L3_PROTO_IPV4 = 8,
- ENA_ETH_IO_L3_PROTO_IPV6 = 11,
+ ENA_ETH_IO_L3_PROTO_IPV6 = 11,
- ENA_ETH_IO_L3_PROTO_FCOE = 21,
+ ENA_ETH_IO_L3_PROTO_FCOE = 21,
- ENA_ETH_IO_L3_PROTO_ROCE = 22,
+ ENA_ETH_IO_L3_PROTO_ROCE = 22,
};
-/* Layer 4 protocol index */
enum ena_eth_io_l4_proto_index {
- ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
+ ENA_ETH_IO_L4_PROTO_UNKNOWN = 0,
- ENA_ETH_IO_L4_PROTO_TCP = 12,
+ ENA_ETH_IO_L4_PROTO_TCP = 12,
- ENA_ETH_IO_L4_PROTO_UDP = 13,
+ ENA_ETH_IO_L4_PROTO_UDP = 13,
- ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
+ ENA_ETH_IO_L4_PROTO_ROUTEABLE_ROCE = 23,
};
-/* ENA IO Queue Tx descriptor */
struct ena_eth_io_tx_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 15:0 : length - Buffer length in bytes, must
+ /* 15:0 : length - Buffer length in bytes, must
* include any packet trailers that the ENA supposed
* to update like End-to-End CRC, Authentication GMAC
* etc. This length must not include the
@@ -85,9 +80,7 @@ struct ena_eth_io_tx_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* ethernet control
- * 3:0 : l3_proto_idx - L3 protocol. This field
+ /* 3:0 : l3_proto_idx - L3 protocol. This field
* required when l3_csum_en,l3_csum or tso_en are set.
* 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and
* DF flags of the IPv4 header is 0. Otherwise must
@@ -119,10 +112,8 @@ struct ena_eth_io_tx_desc {
*/
uint32_t meta_ctrl;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
/* address high and header size
* 15:0 : addr_hi - Buffer Pointer[47:32]
* 23:16 : reserved16_w2
@@ -141,20 +132,16 @@ struct ena_eth_io_tx_desc {
uint32_t buff_addr_hi_hdr_sz;
};
-/* ENA IO Queue Tx Meta descriptor */
struct ena_eth_io_tx_meta_desc {
- /* word 0 : */
- /* length, request id and control flags
- * 9:0 : req_id_lo - Request ID[9:0]
+ /* 9:0 : req_id_lo - Request ID[9:0]
* 11:10 : reserved10 - MBZ
* 12 : reserved12 - MBZ
* 13 : reserved13 - MBZ
* 14 : ext_valid - if set, offset fields in Word2
- * are valid Also MSS High in Word 0 and Outer L3
- * Offset High in WORD 0 and bits [31:24] in Word 3
- * 15 : word3_valid - If set Crypto Info[23:0] of
- * Word 3 is valid
- * 19:16 : mss_hi_ptp
+ * are valid Also MSS High in Word 0 and bits [31:24]
+ * in Word 3
+ * 15 : reserved15
+ * 19:16 : mss_hi
* 20 : eth_meta_type - 0: Tx Metadata Descriptor, 1:
* Extended Metadata Descriptor
* 21 : meta_store - Store extended metadata in queue
@@ -175,19 +162,13 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t len_ctrl;
- /* word 1 : */
- /* word 1
- * 5:0 : req_id_hi
+ /* 5:0 : req_id_hi
* 31:6 : reserved6 - MBZ
*/
uint32_t word1;
- /* word 2 : */
- /* word 2
- * 7:0 : l3_hdr_len - the header length L3 IP header.
- * 15:8 : l3_hdr_off - the offset of the first byte
- * in the L3 header from the beginning of the to-be
- * transmitted packet.
+ /* 7:0 : l3_hdr_len
+ * 15:8 : l3_hdr_off
* 21:16 : l4_hdr_len_in_words - counts the L4 header
* length in words. there is an explicit assumption
* that L4 header appears right after L3 header and
@@ -196,13 +177,10 @@ struct ena_eth_io_tx_meta_desc {
*/
uint32_t word2;
- /* word 3 : */
uint32_t reserved;
};
-/* ENA IO Queue Tx completions descriptor */
struct ena_eth_io_tx_cdesc {
- /* word 0 : */
/* Request ID[15:0] */
uint16_t req_id;
@@ -214,24 +192,19 @@ struct ena_eth_io_tx_cdesc {
*/
uint8_t flags;
- /* word 1 : */
uint16_t sub_qid;
- /* indicates location of submission queue head */
uint16_t sq_head_idx;
};
-/* ENA IO Queue Rx descriptor */
struct ena_eth_io_rx_desc {
- /* word 0 : */
/* In bytes. 0 means 64KB */
uint16_t length;
/* MBZ */
uint8_t reserved2;
- /* control flags
- * 0 : phase
+ /* 0 : phase
* 1 : reserved1 - MBZ
* 2 : first - Indicates first descriptor in
* transaction
@@ -242,32 +215,27 @@ struct ena_eth_io_rx_desc {
*/
uint8_t ctrl;
- /* word 1 : */
uint16_t req_id;
/* MBZ */
uint16_t reserved6;
- /* word 2 : Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 3 : */
- /* Buffer Address bits[47:16] */
uint16_t buff_addr_hi;
/* MBZ */
uint16_t reserved16_w3;
};
-/* ENA IO Queue Rx Completion Base Descriptor (4-word format). Note: all
- * ethernet parsing information are valid only when last=1
+/* 4-word format Note: all ethernet parsing information are valid only when
+ * last=1
*/
struct ena_eth_io_rx_cdesc_base {
- /* word 0 : */
- /* 4:0 : l3_proto_idx - L3 protocol index
- * 6:5 : src_vlan_cnt - Source VLAN count
+ /* 4:0 : l3_proto_idx
+ * 6:5 : src_vlan_cnt
* 7 : reserved7 - MBZ
- * 12:8 : l4_proto_idx - L4 protocol index
+ * 12:8 : l4_proto_idx
* 13 : l3_csum_err - when set, either the L3
* checksum error detected, or, the controller didn't
* validate the checksum. This bit is valid only when
@@ -292,56 +260,43 @@ struct ena_eth_io_rx_cdesc_base {
*/
uint32_t status;
- /* word 1 : */
uint16_t length;
uint16_t req_id;
- /* word 2 : 32-bit hash result */
+ /* 32-bit hash result */
uint32_t hash;
- /* word 3 : */
- /* submission queue number */
uint16_t sub_qid;
uint16_t reserved;
};
-/* ENA IO Queue Rx Completion Descriptor (8-word format) */
+/* 8-word format */
struct ena_eth_io_rx_cdesc_ext {
- /* words 0:3 : Rx Completion Extended */
struct ena_eth_io_rx_cdesc_base base;
- /* word 4 : Completed Buffer address bits[31:0] */
uint32_t buff_addr_lo;
- /* word 5 : */
- /* the buffer address used bits[47:32] */
uint16_t buff_addr_hi;
uint16_t reserved16;
- /* word 6 : Reserved */
uint32_t reserved_w6;
- /* word 7 : Reserved */
uint32_t reserved_w7;
};
-/* ENA Interrupt Unmask Register */
struct ena_eth_io_intr_reg {
- /* word 0 : */
- /* 14:0 : rx_intr_delay - rx interrupt delay value
- * 29:15 : tx_intr_delay - tx interrupt delay value
- * 30 : intr_unmask - if set, unmasks interrupt
+ /* 14:0 : rx_intr_delay
+ * 29:15 : tx_intr_delay
+ * 30 : intr_unmask
* 31 : reserved
*/
uint32_t intr_control;
};
-/* ENA NUMA Node configuration register */
struct ena_eth_io_numa_node_cfg_reg {
- /* word 0 : */
/* 7:0 : numa
* 30:8 : reserved
* 31 : enabled
@@ -388,10 +343,8 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0)
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14
#define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14)
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15
-#define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK BIT(15)
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT 16
-#define ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK GENMASK(19, 16)
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT 16
+#define ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK GENMASK(19, 16)
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT 20
#define ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK BIT(20)
#define ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT 21
@@ -463,803 +416,544 @@ struct ena_eth_io_numa_node_cfg_reg {
#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31)
#if !defined(ENA_DEFS_LINUX_MAINLINE)
-static inline uint32_t get_ena_eth_io_tx_desc_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_length(const struct ena_eth_io_tx_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline void set_ena_eth_io_tx_desc_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_DESC_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_hi(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_meta_desc(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_meta_desc(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_meta_desc(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_phase(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_phase(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_phase(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_phase(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_first(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_first(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_first(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_first(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_last(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_last(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_LAST_MASK) >> ENA_ETH_IO_TX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_last(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_last(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_comp_req(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_comp_req(const struct ena_eth_io_tx_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_comp_req(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_comp_req(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->len_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_proto_idx(const struct ena_eth_io_tx_desc *p)
{
return p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_tx_desc_l3_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->meta_ctrl |= val & ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_DF(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_DF(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK)
- >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_DF_MASK) >> ENA_ETH_IO_TX_DESC_DF_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_DF(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_DF(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_DF_SHIFT)
- & ENA_ETH_IO_TX_DESC_DF_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_DF_SHIFT) & ENA_ETH_IO_TX_DESC_DF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_tso_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_tso_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TSO_EN_MASK) >> ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_tso_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_tso_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) & ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_proto_idx(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_proto_idx(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_proto_idx(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l3_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l3_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l3_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_en(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_en(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_en(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_ethernet_fcs_dis(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK)
- >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK) >> ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_ethernet_fcs_dis(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT)
- & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_SHIFT) & ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_l4_csum_partial(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK)
- >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK) >> ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_l4_csum_partial(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_l4_csum_partial(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |=
- (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT)
- & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo(const struct ena_eth_io_tx_desc *p)
{
- return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK)
- >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
+ return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK) >> ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_req_id_lo(
- struct ena_eth_io_tx_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_desc_req_id_lo(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT)
- & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
+ p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_addr_hi(const struct ena_eth_io_tx_desc *p)
{
return p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline void set_ena_eth_io_tx_desc_addr_hi(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_addr_hi(struct ena_eth_io_tx_desc *p, uint32_t val)
{
p->buff_addr_hi_hdr_sz |= val & ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_desc_header_length(
- const struct ena_eth_io_tx_desc *p)
+static inline uint32_t get_ena_eth_io_tx_desc_header_length(const struct ena_eth_io_tx_desc *p)
{
- return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK)
- >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
+ return (p->buff_addr_hi_hdr_sz & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK) >> ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT;
}
-static inline void set_ena_eth_io_tx_desc_header_length(
- struct ena_eth_io_tx_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_desc_header_length(struct ena_eth_io_tx_desc *p, uint32_t val)
{
- p->buff_addr_hi_hdr_sz |=
- (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT)
- & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
+ p->buff_addr_hi_hdr_sz |= (val << ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) & ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_lo(const struct ena_eth_io_tx_meta_desc *p)
{
return p->len_ctrl & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(
- const struct ena_eth_io_tx_meta_desc *p)
-{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
-}
-
-static inline void set_ena_eth_io_tx_meta_desc_ext_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
-{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
-}
-
-static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK)
- >> ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK) >> ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_word3_valid(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_ext_valid(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK) >> ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK) >> ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_store(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_store(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK)
- >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK) >> ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_meta_desc(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_meta_desc(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_phase(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK)
- >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_PHASE_MASK) >> ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_phase(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_phase(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_first(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_first(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_FIRST_MASK) >> ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_first(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_first(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_last(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_last(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK)
- >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_LAST_MASK) >> ENA_ETH_IO_TX_META_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_last(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_last(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
+ return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_comp_req(
- struct ena_eth_io_tx_meta_desc *p, uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_comp_req(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
+ p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_req_id_hi(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word1 & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_req_id_hi(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word1 |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_HI_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_len(const struct ena_eth_io_tx_meta_desc *p)
{
return p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_len(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
p->word2 |= val & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l3_hdr_off(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK) >> ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l3_hdr_off(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) & ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK)
- >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK) >> ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_l4_hdr_len_in_words(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
}
-static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(
- const struct ena_eth_io_tx_meta_desc *p)
+static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_lo(const struct ena_eth_io_tx_meta_desc *p)
{
- return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK)
- >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
+ return (p->word2 & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK) >> ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT;
}
-static inline void set_ena_eth_io_tx_meta_desc_mss_lo(
- struct ena_eth_io_tx_meta_desc *p,
- uint32_t val)
+static inline void set_ena_eth_io_tx_meta_desc_mss_lo(struct ena_eth_io_tx_meta_desc *p, uint32_t val)
{
- p->word2 |=
- (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT)
- & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
+ p->word2 |= (val << ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
}
-static inline uint8_t get_ena_eth_io_tx_cdesc_phase(
- const struct ena_eth_io_tx_cdesc *p)
+static inline uint8_t get_ena_eth_io_tx_cdesc_phase(const struct ena_eth_io_tx_cdesc *p)
{
return p->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_tx_cdesc_phase(
- struct ena_eth_io_tx_cdesc *p,
- uint8_t val)
+static inline void set_ena_eth_io_tx_cdesc_phase(struct ena_eth_io_tx_cdesc *p, uint8_t val)
{
p->flags |= val & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_phase(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_phase(const struct ena_eth_io_rx_desc *p)
{
return p->ctrl & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline void set_ena_eth_io_rx_desc_phase(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_phase(struct ena_eth_io_rx_desc *p, uint8_t val)
{
p->ctrl |= val & ENA_ETH_IO_RX_DESC_PHASE_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_first(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_first(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK)
- >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_FIRST_MASK) >> ENA_ETH_IO_RX_DESC_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_first(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_first(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT)
- & ENA_ETH_IO_RX_DESC_FIRST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_FIRST_SHIFT) & ENA_ETH_IO_RX_DESC_FIRST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_last(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_last(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK)
- >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_LAST_MASK) >> ENA_ETH_IO_RX_DESC_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_last(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_last(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT)
- & ENA_ETH_IO_RX_DESC_LAST_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_LAST_SHIFT) & ENA_ETH_IO_RX_DESC_LAST_MASK;
}
-static inline uint8_t get_ena_eth_io_rx_desc_comp_req(
- const struct ena_eth_io_rx_desc *p)
+static inline uint8_t get_ena_eth_io_rx_desc_comp_req(const struct ena_eth_io_rx_desc *p)
{
- return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK)
- >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
+ return (p->ctrl & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK) >> ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT;
}
-static inline void set_ena_eth_io_rx_desc_comp_req(
- struct ena_eth_io_rx_desc *p,
- uint8_t val)
+static inline void set_ena_eth_io_rx_desc_comp_req(struct ena_eth_io_rx_desc *p, uint8_t val)
{
- p->ctrl |=
- (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT)
- & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ p->ctrl |= (val << ENA_ETH_IO_RX_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
return p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
p->status |= val & ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_src_vlan_cnt(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(
- struct ena_eth_io_rx_cdesc_base *p,
- uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |=
- (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_phase(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_phase(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_first(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_first(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_first(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_last(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_last(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_last(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK;
}
-static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(
- const struct ena_eth_io_rx_cdesc_base *p)
+static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer(const struct ena_eth_io_rx_cdesc_base *p)
{
- return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK)
- >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
+ return (p->status & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT;
}
-static inline void set_ena_eth_io_rx_cdesc_base_buffer(
- struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
+static inline void set_ena_eth_io_rx_cdesc_base_buffer(struct ena_eth_io_rx_cdesc_base *p, uint32_t val)
{
- p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT)
- & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
+ p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
return p->intr_control & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline void set_ena_eth_io_intr_reg_rx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_rx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK)
- >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK) >> ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_tx_intr_delay(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_tx_intr_delay(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT)
- & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK;
}
-static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(
- const struct ena_eth_io_intr_reg *p)
+static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask(const struct ena_eth_io_intr_reg *p)
{
- return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK)
- >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
+ return (p->intr_control & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK) >> ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT;
}
-static inline void set_ena_eth_io_intr_reg_intr_unmask(
- struct ena_eth_io_intr_reg *p, uint32_t val)
+static inline void set_ena_eth_io_intr_reg_intr_unmask(struct ena_eth_io_intr_reg *p, uint32_t val)
{
- p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT)
- & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
+ p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa(const struct ena_eth_io_numa_node_cfg_reg *p)
{
return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_numa(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_numa(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK;
}
-static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(
- const struct ena_eth_io_numa_node_cfg_reg *p)
+static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled(const struct ena_eth_io_numa_node_cfg_reg *p)
{
- return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK)
- >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
+ return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT;
}
-static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(
- struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
+static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val)
{
- p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT)
- & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
+ p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
}
#endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */
diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h
index 3d252096..e87bcfd8 100644
--- a/drivers/net/ena/base/ena_defs/ena_gen_info.h
+++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h
@@ -31,5 +31,5 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define ENA_GEN_DATE "Sun Jun 5 10:24:39 IDT 2016"
-#define ENA_GEN_COMMIT "17146ed"
+#define ENA_GEN_DATE "Sun Oct 23 12:27:32 IDT 2016"
+#define ENA_GEN_COMMIT "79d82fa"
diff --git a/drivers/net/ena/base/ena_defs/ena_includes.h b/drivers/net/ena/base/ena_defs/ena_includes.h
index a86c876f..30a920a8 100644
--- a/drivers/net/ena/base/ena_defs/ena_includes.h
+++ b/drivers/net/ena/base/ena_defs/ena_includes.h
@@ -35,5 +35,3 @@
#include "ena_regs_defs.h"
#include "ena_admin_defs.h"
#include "ena_eth_io_defs.h"
-#include "ena_efa_admin_defs.h"
-#include "ena_efa_io_defs.h"
diff --git a/drivers/net/ena/base/ena_defs/ena_regs_defs.h b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
index d0241278..b0870f25 100644
--- a/drivers/net/ena/base/ena_defs/ena_regs_defs.h
+++ b/drivers/net/ena/base/ena_defs/ena_regs_defs.h
@@ -34,6 +34,38 @@
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
+enum ena_regs_reset_reason_types {
+ ENA_REGS_RESET_NORMAL = 0,
+
+ ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
+
+ ENA_REGS_RESET_ADMIN_TO = 2,
+
+ ENA_REGS_RESET_MISS_TX_CMPL = 3,
+
+ ENA_REGS_RESET_INV_RX_REQ_ID = 4,
+
+ ENA_REGS_RESET_INV_TX_REQ_ID = 5,
+
+ ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
+
+ ENA_REGS_RESET_INIT_ERR = 7,
+
+ ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
+
+ ENA_REGS_RESET_OS_TRIGGER = 9,
+
+ ENA_REGS_RESET_OS_NETDEV_WD = 10,
+
+ ENA_REGS_RESET_SHUTDOWN = 11,
+
+ ENA_REGS_RESET_USER_TRIGGER = 12,
+
+ ENA_REGS_RESET_GENERIC = 13,
+
+ ENA_REGS_RESET_MISS_INTERRUPT = 14,
+};
+
/* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
@@ -80,6 +112,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
+#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
@@ -104,6 +138,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
+#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
+#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1
diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c
index 290a5666..4c4989a3 100644
--- a/drivers/net/ena/base/ena_eth_com.c
+++ b/drivers/net/ena/base/ena_eth_com.c
@@ -43,11 +43,10 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
head_masked = io_cq->head & (io_cq->q_depth - 1);
expected_phase = io_cq->phase;
- cdesc = (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
@@ -74,7 +73,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
offset = tail_masked * io_sq->desc_entry_size;
- return (unsigned char *)io_sq->desc_addr.virt_addr + offset;
+ return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
@@ -86,8 +85,8 @@ static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
return;
- memcpy_toio((unsigned char *)io_sq->desc_addr.pbuf_dev_addr + offset,
- (unsigned char *)io_sq->desc_addr.virt_addr + offset,
+ memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
+ io_sq->desc_addr.virt_addr + offset,
io_sq->desc_entry_size);
}
@@ -125,11 +124,11 @@ static inline struct ena_eth_io_rx_cdesc_base *
{
idx &= (io_cq->q_depth - 1);
return (struct ena_eth_io_rx_cdesc_base *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr +
- idx * io_cq->cdesc_entry_size_in_bytes);
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ idx * io_cq->cdesc_entry_size_in_bytes);
}
-static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -143,7 +142,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
@@ -183,9 +182,8 @@ static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
return false;
}
-static inline void ena_com_create_and_store_tx_meta_desc(
- struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -203,8 +201,8 @@ static inline void ena_com_create_and_store_tx_meta_desc(
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) &
- ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK;
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
+ ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
@@ -237,11 +235,11 @@ static inline void ena_com_create_and_store_tx_meta_desc(
static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
- ena_rx_ctx->l3_proto = (enum ena_eth_io_l3_proto_index)(cdesc->status &
- ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK);
- ena_rx_ctx->l4_proto = (enum ena_eth_io_l4_proto_index)
- ((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT);
+ ena_rx_ctx->l3_proto = cdesc->status &
+ ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
+ ena_rx_ctx->l4_proto =
+ (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
@@ -280,8 +278,8 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
bool have_meta;
u64 addr_hi;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX,
+ "wrong Q type");
/* num_bufs +1 for potential meta desc */
if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
@@ -410,8 +408,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 nb_hw_desc;
u16 i;
- ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
if (nb_hw_desc == 0) {
@@ -455,8 +453,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
{
struct ena_eth_io_rx_desc *desc;
- ENA_ASSERT(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX,
- "wrong Q type");
+ ENA_WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
+ "wrong Q type");
if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
return ENA_COM_NO_SPACE;
@@ -475,8 +473,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi =
- ((ena_buf->paddr &
- GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
+ ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
ena_com_sq_update_tail(io_sq);
@@ -493,20 +490,37 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
expected_phase = io_cq->phase;
cdesc = (struct ena_eth_io_tx_cdesc *)
- ((unsigned char *)io_cq->cdesc_addr.virt_addr
- + (masked_head * io_cq->cdesc_entry_size_in_bytes));
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
/* When the current completion descriptor phase isn't the same as the
* expected, it mean that the device still didn't update
* this completion.
*/
- cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase)
return ENA_COM_TRY_AGAIN;
+ if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
+ ena_trc_err("Invalid req id %d\n", cdesc->req_id);
+ return ENA_COM_INVAL;
+ }
+
ena_com_cq_inc_head(io_cq);
- *req_id = cdesc->req_id;
+ *req_id = READ_ONCE(cdesc->req_id);
return 0;
}
+
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
+{
+ struct ena_eth_io_rx_cdesc_base *cdesc;
+
+ cdesc = ena_com_get_next_rx_cdesc(io_cq);
+ if (cdesc)
+ return false;
+ else
+ return true;
+}
+
diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h
index 71a880c0..56ea4ae6 100644
--- a/drivers/net/ena/base/ena_eth_com.h
+++ b/drivers/net/ena/base/ena_eth_com.h
@@ -92,10 +92,12 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
+bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
+
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
struct ena_eth_io_intr_reg *intr_reg)
{
- ENA_REG_WRITE32(intr_reg->intr_control, io_cq->unmask_reg);
+ ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
}
static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
@@ -118,7 +120,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- ENA_REG_WRITE32(tail, io_sq->db_addr);
+ ENA_REG_WRITE32(io_sq->bus, tail, io_sq->db_addr);
return 0;
}
@@ -135,7 +137,7 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
if (io_cq->cq_head_db_reg && need_update) {
ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
- ENA_REG_WRITE32(head, io_cq->cq_head_db_reg);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
@@ -153,7 +155,7 @@ static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK)
| ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK;
- ENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
+ ENA_REG_WRITE32(io_cq->bus, numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg);
}
static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem)
diff --git a/drivers/net/ena/base/ena_plat.h b/drivers/net/ena/base/ena_plat.h
index b5b64545..f829936b 100644
--- a/drivers/net/ena/base/ena_plat.h
+++ b/drivers/net/ena/base/ena_plat.h
@@ -43,7 +43,11 @@
#include "ena_plat_dpdk.h"
#endif
#elif defined(__FreeBSD__)
+#if defined(_KERNEL)
+#include "ena_plat_fbsd.h"
+#else
#include "ena_plat_dpdk.h"
+#endif
#elif defined(_WIN32)
#include "ena_plat_windows.h"
#else
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 8cba319e..900ba1a6 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -73,10 +73,10 @@ typedef uint64_t dma_addr_t;
#define ENA_COM_INVAL -EINVAL
#define ENA_COM_NO_SPACE -ENOSPC
#define ENA_COM_NO_DEVICE -ENODEV
-#define ENA_COM_PERMISSION -EPERM
#define ENA_COM_TIMER_EXPIRED -ETIME
#define ENA_COM_FAULT -EFAULT
#define ENA_COM_TRY_AGAIN -EAGAIN
+#define ENA_COM_UNSUPPORTED -EOPNOTSUPP
#define ____cacheline_aligned __rte_cache_aligned
@@ -116,11 +116,13 @@ typedef uint64_t dma_addr_t;
#define ENA_MIN16(x, y) RTE_MIN((x), (y))
#define ENA_MIN8(x, y) RTE_MIN((x), (y))
+#define BITS_PER_LONG_LONG (__SIZEOF_LONG_LONG__ * 8)
#define U64_C(x) x ## ULL
#define BIT(nr) (1UL << (nr))
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l) (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#ifdef RTE_LIBRTE_ENA_COM_DEBUG
#define ena_trc_dbg(format, arg...) \
@@ -138,6 +140,15 @@ typedef uint64_t dma_addr_t;
#define ena_trc_err(format, arg...) do { } while (0)
#endif /* RTE_LIBRTE_ENA_COM_DEBUG */
+#define ENA_WARN(cond, format, arg...) \
+do { \
+ if (unlikely(cond)) { \
+ ena_trc_err( \
+ "Warn failed on %s:%s:%d:" format, \
+ __FILE__, __func__, __LINE__, ##arg); \
+ } \
+} while (0)
+
/* Spinlock related methods */
#define ena_spinlock_t rte_spinlock_t
#define ENA_SPINLOCK_INIT(spinlock) rte_spinlock_init(&spinlock)
@@ -177,10 +188,21 @@ typedef uint64_t dma_addr_t;
#define ENA_WAIT_EVENT_SIGNAL(waitevent) pthread_cond_signal(&waitevent.cond)
/* pthread condition doesn't need to be rearmed after usage */
#define ENA_WAIT_EVENT_CLEAR(...)
+#define ENA_WAIT_EVENT_DESTROY(waitqueue) ((void)(waitqueue))
#define ena_wait_event_t ena_wait_queue_t
#define ENA_MIGHT_SLEEP()
+#define ENA_TIME_EXPIRE(timeout) (timeout < rte_get_timer_cycles())
+#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
+ (timeout_us * rte_get_timer_hz() / 1000000 + rte_get_timer_cycles())
+
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocations and add it to name.
+ */
+extern uint32_t ena_alloc_cnt;
+
#define ENA_MEM_ALLOC_COHERENT(dmadev, size, virt, phys, handle) \
do { \
const struct rte_memzone *mz; \
@@ -188,47 +210,57 @@ typedef uint64_t dma_addr_t;
ENA_TOUCH(dmadev); ENA_TOUCH(handle); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, \
+ RTE_MEMZONE_IOVA_CONTIG); \
handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \
({ ENA_TOUCH(size); ENA_TOUCH(phys); \
ENA_TOUCH(dmadev); \
rte_memzone_free(handle); })
-#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \
+#define ENA_MEM_ALLOC_COHERENT_NODE( \
+ dmadev, size, virt, phys, mem_handle, node, dev_node) \
do { \
const struct rte_memzone *mz; \
char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
snprintf(z_name, sizeof(z_name), \
"ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
- phys = mz->iova; \
+ mz = rte_memzone_reserve(z_name, size, node, \
+ RTE_MEMZONE_IOVA_CONTIG); \
+ mem_handle = mz; \
+ if (mz == NULL) { \
+ virt = NULL; \
+ phys = 0; \
+ } else { \
+ memset(mz->addr, 0, size); \
+ virt = mz->addr; \
+ phys = mz->iova; \
+ } \
} while (0)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \
do { \
- const struct rte_memzone *mz; \
- char z_name[RTE_MEMZONE_NAMESIZE]; \
ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \
- snprintf(z_name, sizeof(z_name), \
- "ena_alloc_%d", ena_alloc_cnt++); \
- mz = rte_memzone_reserve(z_name, size, node, 0); \
- memset(mz->addr, 0, size); \
- virt = mz->addr; \
+ virt = rte_zmalloc_socket(NULL, size, 0, node); \
} while (0)
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
-#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg))
-#define ENA_REG_READ32(reg) rte_read32_relaxed((reg))
+#define ENA_REG_WRITE32(bus, value, reg) \
+ ({ (void)(bus); rte_write32_relaxed((value), (reg)); })
+#define ENA_REG_READ32(bus, reg) \
+ ({ (void)(bus); rte_read32_relaxed((reg)); })
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
@@ -244,4 +276,11 @@ typedef uint64_t dma_addr_t;
#define PTR_ERR(error) ((long)(void *)error)
#define might_sleep()
+#define lower_32_bits(x) ((uint32_t)(x))
+#define upper_32_bits(x) ((uint32_t)(((x) >> 16) >> 16))
+
+#ifndef READ_ONCE
+#define READ_ONCE(var) (*((volatile typeof(var) *)(&(var))))
+#endif
+
#endif /* DPDK_ENA_COM_ENA_PLAT_DPDK_H_ */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 34b2a8d7..c255dc6d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -54,7 +54,7 @@
#include <ena_eth_io_defs.h>
#define DRV_MODULE_VER_MAJOR 1
-#define DRV_MODULE_VER_MINOR 0
+#define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 0
#define ENA_IO_TXQ_IDX(q) (2 * (q))
@@ -85,6 +85,9 @@
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE
+#define ENA_MIN_RING_DESC 128
+
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
@@ -114,6 +117,12 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
+/*
+ * Each rte_memzone should have unique name.
+ * To satisfy it, count number of allocation and add it to name.
+ */
+uint32_t ena_alloc_cnt;
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(io_suspend),
@@ -195,8 +204,11 @@ static const struct rte_pci_id pci_id_ena_map[] = {
{ .device_id = 0 },
};
+static struct ena_aenq_handlers aenq_handlers;
+
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx);
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state);
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -215,7 +227,9 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count);
static void ena_init_rings(struct ena_adapter *adapter);
static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static int ena_start(struct rte_eth_dev *dev);
+static void ena_stop(struct rte_eth_dev *dev);
static void ena_close(struct rte_eth_dev *dev);
+static int ena_dev_reset(struct rte_eth_dev *dev);
static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
static void ena_rx_queue_release_all(struct rte_eth_dev *dev);
static void ena_tx_queue_release_all(struct rte_eth_dev *dev);
@@ -238,10 +252,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads);
+static void ena_interrupt_handler_rte(void *cb_arg);
+static void ena_timer_wd_callback(struct rte_timer *timer, void *arg);
static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
@@ -249,12 +261,14 @@ static const struct eth_dev_ops ena_dev_ops = {
.rx_queue_setup = ena_rx_queue_setup,
.tx_queue_setup = ena_tx_queue_setup,
.dev_start = ena_start,
+ .dev_stop = ena_stop,
.link_update = ena_link_update,
.stats_get = ena_stats_get,
.mtu_set = ena_mtu_set,
.rx_queue_release = ena_rx_queue_release,
.tx_queue_release = ena_tx_queue_release,
.dev_close = ena_close,
+ .dev_reset = ena_dev_reset,
.reta_update = ena_rss_reta_update,
.reta_query = ena_rss_reta_query,
};
@@ -264,11 +278,15 @@ static const struct eth_dev_ops ena_dev_ops = {
static inline int ena_cpu_to_node(int cpu)
{
struct rte_config *config = rte_eal_get_configuration();
+ struct rte_fbarray *arr = &config->mem_config->memzones;
+ const struct rte_memzone *mz;
+
+ if (unlikely(cpu >= RTE_MAX_MEMZONE))
+ return NUMA_NO_NODE;
- if (likely(cpu < RTE_MAX_MEMZONE))
- return config->mem_config->memzone[cpu].socket_id;
+ mz = rte_fbarray_get(arr, cpu);
- return NUMA_NO_NODE;
+ return mz->socket_id;
}
static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
@@ -346,9 +364,6 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
ena_meta->mss = mbuf->tso_segsz;
ena_meta->l3_hdr_len = mbuf->l3_len;
ena_meta->l3_hdr_offset = mbuf->l2_len;
- /* this param needed only for TSO */
- ena_meta->l3_outer_hdr_len = 0;
- ena_meta->l3_outer_hdr_offset = 0;
ena_tx_ctx->meta_valid = true;
} else {
@@ -356,6 +371,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf,
}
}
+static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id)
+{
+ if (likely(req_id < rx_ring->ring_size))
+ return 0;
+
+ RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id);
+
+ rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ rx_ring->adapter->trigger_reset = true;
+
+ return -EFAULT;
+}
+
+static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
+{
+ struct ena_tx_buffer *tx_info = NULL;
+
+ if (likely(req_id < tx_ring->ring_size)) {
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->mbuf))
+ return 0;
+ }
+
+ if (tx_info)
+ RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n");
+ else
+ RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id);
+
+ /* Trigger device reset */
+ tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+ tx_ring->adapter->trigger_reset = true;
+ return -EFAULT;
+}
+
static void ena_config_host_info(struct ena_com_dev *ena_dev)
{
struct ena_admin_host_info *host_info;
@@ -387,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -440,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(&adapter->ena_dev);
if (rc) {
- RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
- if (rc != -EPERM)
- goto err;
+ if (rc == -ENA_COM_UNSUPPORTED)
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ else
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+
+ goto err;
}
return;
@@ -455,12 +510,76 @@ static void ena_close(struct rte_eth_dev *dev)
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
- adapter->state = ENA_ADAPTER_STATE_STOPPED;
+ ena_stop(dev);
+ adapter->state = ENA_ADAPTER_STATE_CLOSED;
ena_rx_queue_release_all(dev);
ena_tx_queue_release_all(dev);
}
+static int
+ena_dev_reset(struct rte_eth_dev *dev)
+{
+ struct rte_mempool *mb_pool_rx[ENA_MAX_NUM_QUEUES];
+ struct rte_eth_dev *eth_dev;
+ struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
+ struct ena_com_dev *ena_dev;
+ struct ena_com_dev_get_features_ctx get_feat_ctx;
+ struct ena_adapter *adapter;
+ int nb_queues;
+ int rc, i;
+ bool wd_state;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
+ ena_dev = &adapter->ena_dev;
+ eth_dev = adapter->rte_dev;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ nb_queues = eth_dev->data->nb_rx_queues;
+
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ rc = ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ if (rc)
+ RTE_LOG(ERR, PMD, "Device reset failed\n");
+
+ for (i = 0; i < nb_queues; i++)
+ mb_pool_rx[i] = adapter->rx_ring[i].mb_pool;
+
+ ena_rx_queue_release_all(eth_dev);
+ ena_tx_queue_release_all(eth_dev);
+
+ rte_intr_disable(intr_handle);
+
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
+ if (rc) {
+ PMD_INIT_LOG(CRIT, "Cannot initialize device\n");
+ return rc;
+ }
+ adapter->wd_state = wd_state;
+
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_rx_queue_setup(eth_dev, i, adapter->rx_ring_size, 0, NULL,
+ mb_pool_rx[i]);
+
+ for (i = 0; i < nb_queues; ++i)
+ ena_tx_queue_setup(eth_dev, i, adapter->tx_ring_size, 0, NULL);
+
+ adapter->trigger_reset = false;
+
+ return 0;
+}
+
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size)
@@ -468,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret, i;
+ int rc, i;
u16 entry_value;
int conf_idx;
int idx;
@@ -480,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
RTE_LOG(WARNING, PMD,
"indirection table %d is bigger than supported (%d)\n",
reta_size, ENA_RX_RSS_TABLE_SIZE);
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -493,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev,
if (TEST_BIT(reta_conf[conf_idx].mask, idx)) {
entry_value =
ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]);
- ret = ena_com_indirect_table_fill_entry(ena_dev,
- i,
- entry_value);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+ i,
+ entry_value);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD,
"Cannot fill indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
}
}
- ret = ena_com_indirect_table_set(ena_dev);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_set(ena_dev);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return rc;
}
RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n",
__func__, reta_size, adapter->rte_dev->data->port_id);
-err:
- return ret;
+
+ return 0;
}
/* Query redirection table. */
@@ -526,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
- int ret;
+ int rc;
int i;
u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0};
int reta_conf_idx;
@@ -536,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
(reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL)))
return -EINVAL;
- ret = ena_com_indirect_table_get(ena_dev, indirect_table);
- if (unlikely(ret && (ret != ENA_COM_PERMISSION))) {
+ rc = ena_com_indirect_table_get(ena_dev, indirect_table);
+ if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) {
RTE_LOG(ERR, PMD, "cannot get indirect table\n");
- ret = -ENOTSUP;
- goto err;
+ return -ENOTSUP;
}
for (i = 0 ; i < reta_size ; i++) {
@@ -550,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
reta_conf[reta_conf_idx].reta[reta_idx] =
ENA_IO_RXQ_IDX_REV(indirect_table[i]);
}
-err:
- return ret;
+
+ return 0;
}
static int ena_rss_init_default(struct ena_adapter *adapter)
@@ -571,7 +687,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = i % nb_rx_queues;
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -579,19 +695,19 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash function\n");
goto err_fill_indir;
}
rc = ena_com_set_default_hash_ctrl(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(INFO, PMD, "Cannot fill hash control\n");
goto err_fill_indir;
}
rc = ena_com_indirect_table_set(ena_dev);
- if (unlikely(rc && (rc != ENA_COM_PERMISSION))) {
+ if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) {
RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n");
goto err_fill_indir;
}
@@ -650,6 +766,10 @@ static void ena_rx_queue_release(void *queue)
rte_free(ring->rx_buffer_info);
ring->rx_buffer_info = NULL;
+ if (ring->empty_rx_reqs)
+ rte_free(ring->empty_rx_reqs);
+ ring->empty_rx_reqs = NULL;
+
ring->configured = 0;
RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n",
@@ -723,9 +843,12 @@ static int ena_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct rte_eth_link *link = &dev->data->dev_link;
+ struct ena_adapter *adapter;
+
+ adapter = (struct ena_adapter *)(dev->data->dev_private);
- link->link_status = 1;
- link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = ETH_LINK_FULL_DUPLEX;
return 0;
@@ -737,13 +860,18 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *queues = NULL;
+ int nb_queues;
int i = 0;
int rc = 0;
- queues = (ring_type == ENA_RING_TYPE_RX) ?
- adapter->rx_ring : adapter->tx_ring;
-
- for (i = 0; i < adapter->num_queues; i++) {
+ if (ring_type == ENA_RING_TYPE_RX) {
+ queues = adapter->rx_ring;
+ nb_queues = dev->data->nb_rx_queues;
+ } else {
+ queues = adapter->tx_ring;
+ nb_queues = dev->data->nb_tx_queues;
+ }
+ for (i = 0; i < nb_queues; i++) {
if (queues[i].configured) {
if (ring_type == ENA_RING_TYPE_RX) {
ena_assert_msg(
@@ -761,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR,
"failed to restart queue %d type(%d)",
i, ring_type);
- return -1;
+ return rc;
}
}
}
@@ -785,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
{
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
- if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
- return -1;
+ if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d. "
+ "max mtu: %d, min mtu: %d\n",
+ max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+ return ENA_COM_UNSUPPORTED;
}
return 0;
@@ -795,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
static int
ena_calc_queue_size(struct ena_com_dev *ena_dev,
+ u16 *max_tx_sgl_size,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
uint32_t queue_size = ENA_DEFAULT_RING_SIZE;
@@ -812,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
if (!rte_is_power_of_2(queue_size))
queue_size = rte_align32pow2(queue_size >> 1);
- if (queue_size == 0) {
+ if (unlikely(queue_size == 0)) {
PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
+ *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS,
+ get_feat_ctx->max_queues.max_packet_tx_descs);
+
return queue_size;
}
@@ -881,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- if (mtu > ena_get_mtu_conf(adapter)) {
+ if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
RTE_LOG(ERR, PMD,
- "Given MTU (%d) exceeds maximum MTU supported (%d)\n",
- mtu, ena_get_mtu_conf(adapter));
- rc = -EINVAL;
- goto err;
+ "Invalid MTU setting. new_mtu: %d "
+ "max mtu: %d min mtu: %d\n",
+ mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+ return -EINVAL;
}
rc = ena_com_set_dev_mtu(ena_dev, mtu);
@@ -895,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
else
RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu);
-err:
return rc;
}
@@ -903,14 +1036,9 @@ static int ena_start(struct rte_eth_dev *dev)
{
struct ena_adapter *adapter =
(struct ena_adapter *)(dev->data->dev_private);
+ uint64_t ticks;
int rc = 0;
- if (!(adapter->state == ENA_ADAPTER_STATE_CONFIG ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "API violation");
- return -1;
- }
-
rc = ena_check_valid_conf(adapter);
if (rc)
return rc;
@@ -924,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev)
return rc;
if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode &
- ETH_MQ_RX_RSS_FLAG) {
+ ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) {
rc = ena_rss_init_default(adapter);
if (rc)
return rc;
@@ -932,11 +1060,28 @@ static int ena_start(struct rte_eth_dev *dev)
ena_stats_restart(dev);
+ adapter->timestamp_wd = rte_get_timer_cycles();
+ adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
+
+ ticks = rte_get_timer_hz();
+ rte_timer_reset(&adapter->timer_wd, ticks, PERIODICAL, rte_lcore_id(),
+ ena_timer_wd_callback, adapter);
+
adapter->state = ENA_ADAPTER_STATE_RUNNING;
return 0;
}
+static void ena_stop(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ rte_timer_stop_sync(&adapter->timer_wd);
+
+ adapter->state = ENA_ADAPTER_STATE_STOPPED;
+}
+
static int ena_queue_restart(struct ena_ring *ring)
{
int rc, bufs_num;
@@ -954,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring)
rc = ena_populate_rx_queue(ring, bufs_num);
if (rc != bufs_num) {
PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
- return (-1);
+ return ENA_COM_FAULT;
}
return 0;
@@ -984,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of RX queue: %d is not a power of 2.",
+ "Unsupported size of TX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1001,12 +1146,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (tx_conf->txq_flags == ETH_TXQ_FLAGS_IGNORE &&
- !ena_are_tx_queue_offloads_allowed(adapter, tx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_TXQ_IDX(queue_idx);
ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
@@ -1021,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"failed to create io TX queue #%d (qid:%d) rc: %d\n",
queue_idx, ena_qid, rc);
+ return rc;
}
txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1032,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(ERR, PMD,
"Failed to get TX queue handlers. TX queue num %d rc: %d\n",
queue_idx, rc);
- ena_com_destroy_io_queue(ena_dev, ena_qid);
- goto err;
+ goto err_destroy_io_queue;
}
+ ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node);
+
txq->port_id = dev->data->port_id;
txq->next_to_clean = 0;
txq->next_to_use = 0;
@@ -1047,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->tx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_destroy_io_queue;
}
txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs",
@@ -1055,18 +1197,29 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!txq->empty_tx_reqs) {
RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n");
- rte_free(txq->tx_buffer_info);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_free;
}
+
for (i = 0; i < txq->ring_size; i++)
txq->empty_tx_reqs[i] = i;
- txq->offloads = tx_conf->offloads;
+ if (tx_conf != NULL) {
+ txq->offloads =
+ tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ }
/* Store pointer to this queue in upper layer */
txq->configured = 1;
dev->data->tx_queues[queue_idx] = txq;
-err:
+
+ return 0;
+
+err_free:
+ rte_free(txq->tx_buffer_info);
+
+err_destroy_io_queue:
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return rc;
}
@@ -1074,7 +1227,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
__rte_unused unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct ena_com_create_io_ctx ctx =
@@ -1085,7 +1238,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
(struct ena_adapter *)(dev->data->dev_private);
struct ena_ring *rxq = NULL;
uint16_t ena_qid = 0;
- int rc = 0;
+ int i, rc = 0;
struct ena_com_dev *ena_dev = &adapter->ena_dev;
rxq = &adapter->rx_ring[queue_idx];
@@ -1093,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_LOG(CRIT, PMD,
"API violation. Queue %d is already configured\n",
queue_idx);
- return -1;
+ return ENA_COM_FAULT;
}
if (!rte_is_power_of_2(nb_desc)) {
RTE_LOG(ERR, PMD,
- "Unsupported size of TX queue: %d is not a power of 2.",
+ "Unsupported size of RX queue: %d is not a power of 2.",
nb_desc);
return -EINVAL;
}
@@ -1110,11 +1263,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (!ena_are_rx_queue_offloads_allowed(adapter, rx_conf->offloads)) {
- RTE_LOG(ERR, PMD, "Unsupported queue offloads\n");
- return -EINVAL;
- }
-
ena_qid = ENA_IO_RXQ_IDX(queue_idx);
ctx.qid = ena_qid;
@@ -1125,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
ctx.numa_node = ena_cpu_to_node(queue_idx);
rc = ena_com_create_io_queue(ena_dev, &ctx);
- if (rc)
+ if (rc) {
RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n",
queue_idx, rc);
+ return rc;
+ }
rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid];
rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid];
@@ -1140,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
"Failed to get RX queue handlers. RX queue num %d rc: %d\n",
queue_idx, rc);
ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return rc;
}
rxq->port_id = dev->data->port_id;
@@ -1153,9 +1304,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
RTE_CACHE_LINE_SIZE);
if (!rxq->rx_buffer_info) {
RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n");
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
return -ENOMEM;
}
+ rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs",
+ sizeof(uint16_t) * nb_desc,
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->empty_rx_reqs) {
+ RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n");
+ rte_free(rxq->rx_buffer_info);
+ rxq->rx_buffer_info = NULL;
+ ena_com_destroy_io_queue(ena_dev, ena_qid);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nb_desc; i++)
+ rxq->empty_tx_reqs[i] = i;
+
/* Store pointer to this queue in upper layer */
rxq->configured = 1;
dev->data->rx_queues[queue_idx] = rxq;
@@ -1170,7 +1336,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
uint16_t ring_size = rxq->ring_size;
uint16_t ring_mask = ring_size - 1;
uint16_t next_to_use = rxq->next_to_use;
- uint16_t in_use;
+ uint16_t in_use, req_id;
struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
if (unlikely(!count))
@@ -1198,12 +1364,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
struct ena_com_buf ebuf;
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
+
+ req_id = rxq->empty_rx_reqs[next_to_use_masked];
+ rc = validate_rx_req_id(rxq, req_id);
+ if (unlikely(rc < 0))
+ break;
+
/* prepare physical address for DMA transaction */
ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM;
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
- &ebuf, next_to_use_masked);
+ &ebuf, req_id);
if (unlikely(rc)) {
rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
count - i);
@@ -1213,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
next_to_use++;
}
+ if (unlikely(i < count))
+ RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d "
+ "buffers (from %d)\n", rxq->id, i, count);
+
/* When we submitted free recources to device... */
- if (i > 0) {
- /* ...let HW know that it can fill buffers with data */
+ if (likely(i > 0)) {
+ /* ...let HW know that it can fill buffers with data
+ *
+ * Add memory barrier to make sure the desc were written before
+ * issue a doorbell
+ */
rte_wmb();
ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
@@ -1226,8 +1406,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
static int ena_device_init(struct ena_com_dev *ena_dev,
- struct ena_com_dev_get_features_ctx *get_feat_ctx)
+ struct ena_com_dev_get_features_ctx *get_feat_ctx,
+ bool *wd_state)
{
+ uint32_t aenq_groups;
int rc;
bool readless_supported;
@@ -1247,7 +1429,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_com_set_mmio_read_mode(ena_dev, readless_supported);
/* reset device */
- rc = ena_com_dev_reset(ena_dev);
+ rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) {
RTE_LOG(ERR, PMD, "cannot reset device\n");
goto err_mmio_read_less;
@@ -1263,7 +1445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
ena_dev->dma_addr_bits = ena_com_get_dma_width(ena_dev);
/* ENA device administration layer init */
- rc = ena_com_admin_init(ena_dev, NULL, true);
+ rc = ena_com_admin_init(ena_dev, &aenq_handlers, true);
if (rc) {
RTE_LOG(ERR, PMD,
"cannot initialize ena admin queue with device\n");
@@ -1286,6 +1468,21 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
goto err_admin_init;
}
+ aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
+ BIT(ENA_ADMIN_NOTIFICATION) |
+ BIT(ENA_ADMIN_KEEP_ALIVE) |
+ BIT(ENA_ADMIN_FATAL_ERROR) |
+ BIT(ENA_ADMIN_WARNING);
+
+ aenq_groups &= get_feat_ctx->aenq.supported_groups;
+ rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "Cannot configure aenq groups rc: %d\n", rc);
+ goto err_admin_init;
+ }
+
+ *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
+
return 0;
err_admin_init:
@@ -1297,16 +1494,89 @@ err_mmio_read_less:
return rc;
}
+static void ena_interrupt_handler_rte(void *cb_arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)cb_arg;
+ struct ena_com_dev *ena_dev = &adapter->ena_dev;
+
+ ena_com_admin_q_comp_intr_handler(ena_dev);
+ if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED))
+ ena_com_aenq_intr_handler(ena_dev, adapter);
+}
+
+static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+{
+ if (!adapter->wd_state)
+ return;
+
+ if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ return;
+
+ if (unlikely((rte_get_timer_cycles() - adapter->timestamp_wd) >=
+ adapter->keep_alive_timeout)) {
+ RTE_LOG(ERR, PMD, "Keep alive timeout\n");
+ adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+/* Check if admin queue is enabled */
+static void check_for_admin_com_state(struct ena_adapter *adapter)
+{
+ if (unlikely(!ena_com_get_admin_running_state(&adapter->ena_dev))) {
+ RTE_LOG(ERR, PMD, "ENA admin queue is not in running state!\n");
+ adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+ adapter->trigger_reset = true;
+ }
+}
+
+static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer,
+ void *arg)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)arg;
+ struct rte_eth_dev *dev = adapter->rte_dev;
+
+ check_for_missing_keep_alive(adapter);
+ check_for_admin_com_state(adapter);
+
+ if (unlikely(adapter->trigger_reset)) {
+ RTE_LOG(ERR, PMD, "Trigger reset is on\n");
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ int io_sq_num, io_cq_num, io_queue_num;
+
+ io_sq_num = get_feat_ctx->max_queues.max_sq_num;
+ io_cq_num = get_feat_ctx->max_queues.max_cq_num;
+
+ io_queue_num = RTE_MIN(io_sq_num, io_cq_num);
+
+ if (unlikely(io_queue_num == 0)) {
+ RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n");
+ return -EFAULT;
+ }
+
+ return io_queue_num;
+}
+
static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct ena_adapter *adapter =
(struct ena_adapter *)(eth_dev->data->dev_private);
struct ena_com_dev *ena_dev = &adapter->ena_dev;
struct ena_com_dev_get_features_ctx get_feat_ctx;
int queue_size, rc;
+ u16 tx_sgl_size = 0;
static int adapters_found;
+ bool wd_state;
memset(adapter, 0, sizeof(struct ena_adapter));
ena_dev = &adapter->ena_dev;
@@ -1330,19 +1600,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
pci_dev->addr.devid,
pci_dev->addr.function);
+ intr_handle = &pci_dev->intr_handle;
+
adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr;
adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr;
- /* Present ENA_MEM_BAR indicates available LLQ mode.
- * Use corresponding policy
- */
- if (adapter->dev_mem_base)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
- else if (adapter->regs)
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- else
+ if (!adapter->regs) {
PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
+ return -ENXIO;
+ }
ena_dev->reg_bar = adapter->regs;
ena_dev->dmadev = adapter->pdev;
@@ -1353,36 +1620,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
adapter->id_number);
/* device specific initialization routine */
- rc = ena_device_init(ena_dev, &get_feat_ctx);
+ rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state);
if (rc) {
PMD_INIT_LOG(CRIT, "Failed to init ENA device");
- return -1;
- }
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- if (get_feat_ctx.max_queues.max_llq_num == 0) {
- PMD_INIT_LOG(ERR,
- "Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.");
- ena_dev->tx_mem_queue_type =
- ENA_ADMIN_PLACEMENT_POLICY_HOST;
- adapter->num_queues =
- get_feat_ctx.max_queues.max_sq_num;
- } else {
- adapter->num_queues =
- get_feat_ctx.max_queues.max_llq_num;
- }
- } else {
- adapter->num_queues = get_feat_ctx.max_queues.max_sq_num;
+ goto err;
}
+ adapter->wd_state = wd_state;
- queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx);
- if ((queue_size <= 0) || (adapter->num_queues <= 0))
- return -EFAULT;
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ adapter->num_queues = ena_calc_io_queue_num(ena_dev,
+ &get_feat_ctx);
+
+ queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx);
+ if (queue_size <= 0 || adapter->num_queues <= 0) {
+ rc = -EFAULT;
+ goto err_device_destroy;
+ }
adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size;
+ adapter->max_tx_sgl_size = tx_sgl_size;
+
/* prepare ring structures */
ena_init_rings(adapter);
@@ -1405,58 +1664,77 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
RTE_CACHE_LINE_SIZE);
if (!adapter->drv_stats) {
RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_delete_debug_area;
}
+ rte_intr_callback_register(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+ rte_intr_enable(intr_handle);
+ ena_com_set_admin_polling_mode(ena_dev, false);
+ ena_com_admin_aenq_enable(ena_dev);
+
+ if (adapters_found == 0)
+ rte_timer_subsystem_init();
+ rte_timer_init(&adapter->timer_wd);
+
adapters_found++;
adapter->state = ENA_ADAPTER_STATE_INIT;
return 0;
+
+err_delete_debug_area:
+ ena_com_delete_debug_area(ena_dev);
+
+err_device_destroy:
+ ena_com_delete_host_info(ena_dev);
+ ena_com_admin_destroy(ena_dev);
+
+err:
+ return rc;
}
-static int ena_dev_configure(struct rte_eth_dev *dev)
+static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ena_adapter *adapter =
- (struct ena_adapter *)(dev->data->dev_private);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ (struct ena_adapter *)(eth_dev->data->dev_private);
- if ((tx_offloads & adapter->tx_supported_offloads) != tx_offloads) {
- RTE_LOG(ERR, PMD, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, adapter->tx_supported_offloads);
- return -ENOTSUP;
- }
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
- if ((rx_offloads & adapter->rx_supported_offloads) != rx_offloads) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- rx_offloads, adapter->rx_supported_offloads);
- return -ENOTSUP;
- }
+ if (adapter->state != ENA_ADAPTER_STATE_CLOSED)
+ ena_close(eth_dev);
- if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
- adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
- adapter->state);
- return -1;
- }
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
- switch (adapter->state) {
- case ENA_ADAPTER_STATE_INIT:
- case ENA_ADAPTER_STATE_STOPPED:
- adapter->state = ENA_ADAPTER_STATE_CONFIG;
- break;
- case ENA_ADAPTER_STATE_CONFIG:
- RTE_LOG(WARNING, PMD,
- "Ivalid driver state while trying to configure device\n");
- break;
- default:
- break;
- }
+ rte_free(adapter->drv_stats);
+ adapter->drv_stats = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ena_interrupt_handler_rte,
+ adapter);
+
+ adapter->state = ENA_ADAPTER_STATE_FREE;
+
+ return 0;
+}
- adapter->tx_selected_offloads = tx_offloads;
- adapter->rx_selected_offloads = rx_offloads;
+static int ena_dev_configure(struct rte_eth_dev *dev)
+{
+ struct ena_adapter *adapter =
+ (struct ena_adapter *)(dev->data->dev_private);
+
+ adapter->state = ENA_ADAPTER_STATE_CONFIG;
+
+ adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads;
+ adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads;
return 0;
}
@@ -1473,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter)
ring->id = i;
ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type;
ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size;
+ ring->sgl_size = adapter->max_tx_sgl_size;
}
for (i = 0; i < adapter->num_queues; i++) {
@@ -1485,32 +1764,6 @@ static void ena_init_rings(struct ena_adapter *adapter)
}
}
-static bool ena_are_tx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->tx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
-static bool ena_are_rx_queue_offloads_allowed(struct ena_adapter *adapter,
- uint64_t offloads)
-{
- uint64_t port_offloads = adapter->rx_selected_offloads;
-
- /* Check if port supports all requested offloads.
- * True if all offloads selected for queue are set for port.
- */
- if ((offloads & port_offloads) != offloads)
- return false;
- return true;
-}
-
static void ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
@@ -1527,8 +1780,6 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
@@ -1581,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev,
adapter->tx_supported_offloads = tx_feat;
adapter->rx_supported_offloads = rx_feat;
+
+ dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+
+ dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC;
+ dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC;
+ dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
+ dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
+ feat.max_queues.max_packet_tx_descs);
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -1591,6 +1852,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
uint16_t desc_in_use = 0;
+ uint16_t req_id;
unsigned int recv_idx = 0;
struct rte_mbuf *mbuf = NULL;
struct rte_mbuf *mbuf_head = NULL;
@@ -1624,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
&ena_rx_ctx);
if (unlikely(rc)) {
RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc);
+ rx_ring->adapter->trigger_reset = true;
return 0;
}
@@ -1631,12 +1894,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
while (segments < ena_rx_ctx.descs) {
- mbuf = rx_buff_info[next_to_clean & ring_mask];
+ req_id = ena_rx_ctx.ena_bufs[segments].req_id;
+ rc = validate_rx_req_id(rx_ring, req_id);
+ if (unlikely(rc))
+ break;
+
+ mbuf = rx_buff_info[req_id];
mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len;
mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->refcnt = 1;
mbuf->next = NULL;
- if (segments == 0) {
+ if (unlikely(segments == 0)) {
mbuf->nb_segs = ena_rx_ctx.descs;
mbuf->port = rx_ring->port_id;
mbuf->pkt_len = 0;
@@ -1648,6 +1916,8 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf_head->pkt_len += mbuf->data_len;
mbuf_prev = mbuf;
+ rx_ring->empty_rx_reqs[next_to_clean & ring_mask] =
+ req_id;
segments++;
next_to_clean++;
}
@@ -1741,6 +2011,46 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+static void ena_update_hints(struct ena_adapter *adapter,
+ struct ena_admin_ena_hw_hints *hints)
+{
+ if (hints->admin_completion_tx_timeout)
+ adapter->ena_dev.admin_queue.completion_timeout =
+ hints->admin_completion_tx_timeout * 1000;
+
+ if (hints->mmio_read_timeout)
+ /* convert to usec */
+ adapter->ena_dev.mmio_read.reg_read_to =
+ hints->mmio_read_timeout * 1000;
+
+ if (hints->driver_watchdog_timeout) {
+ if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
+ else
+ // Convert msecs to ticks
+ adapter->keep_alive_timeout =
+ (hints->driver_watchdog_timeout *
+ rte_get_timer_hz()) / 1000;
+ }
+}
+
+static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring,
+ struct rte_mbuf *mbuf)
+{
+ int num_segments, rc;
+
+ num_segments = mbuf->nb_segs;
+
+ if (likely(num_segments < tx_ring->sgl_size))
+ return 0;
+
+ rc = rte_pktmbuf_linearize(mbuf);
+ if (unlikely(rc))
+ RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n");
+
+ return rc;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1771,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
+ rc = ena_check_and_linearize_mbuf(tx_ring, mbuf);
+ if (unlikely(rc))
+ break;
+
req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->mbuf = mbuf;
@@ -1848,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Clear complete packets */
while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) {
+ rc = validate_tx_req_id(tx_ring, req_id);
+ if (rc)
+ break;
+
/* Get Tx info & store how many descs were processed */
tx_info = &tx_ring->tx_buffer_info[req_id];
total_tx_descs += tx_info->tx_descs;
@@ -1875,6 +2193,9 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return sent_idx;
}
+/*********************************************************************
+ * PMD configuration
+ *********************************************************************/
static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1884,12 +2205,13 @@ static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ena_dev_uninit);
}
static struct rte_pci_driver rte_ena_pmd = {
.id_table = pci_id_ena_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_WC_ACTIVATE,
.probe = eth_ena_pci_probe,
.remove = eth_ena_pci_remove,
};
@@ -1898,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(ena_init_log);
-static void
-ena_init_log(void)
+RTE_INIT(ena_init_log)
{
ena_logtype_init = rte_log_register("pmd.net.ena.init");
if (ena_logtype_init >= 0)
@@ -1909,3 +2229,75 @@ ena_init_log(void)
if (ena_logtype_driver >= 0)
rte_log_set_level(ena_logtype_driver, RTE_LOG_NOTICE);
}
+
+/******************************************************************************
+ ******************************** AENQ Handlers *******************************
+ *****************************************************************************/
+static void ena_update_on_link_change(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct rte_eth_dev *eth_dev;
+ struct ena_adapter *adapter;
+ struct ena_admin_aenq_link_change_desc *aenq_link_desc;
+ uint32_t status;
+
+ adapter = (struct ena_adapter *)adapter_data;
+ aenq_link_desc = (struct ena_admin_aenq_link_change_desc *)aenq_e;
+ eth_dev = adapter->rte_dev;
+
+ status = get_ena_admin_aenq_link_change_desc_link_status(aenq_link_desc);
+ adapter->link_status = status;
+
+ ena_link_update(eth_dev, 0);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static void ena_notification(void *data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_admin_ena_hw_hints *hints;
+
+ if (aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION)
+ RTE_LOG(WARNING, PMD, "Invalid group(%x) expected %x\n",
+ aenq_e->aenq_common_desc.group,
+ ENA_ADMIN_NOTIFICATION);
+
+ switch (aenq_e->aenq_common_desc.syndrom) {
+ case ENA_ADMIN_UPDATE_HINTS:
+ hints = (struct ena_admin_ena_hw_hints *)
+ (&aenq_e->inline_data_w4);
+ ena_update_hints(adapter, hints);
+ break;
+ default:
+ RTE_LOG(ERR, PMD, "Invalid aenq notification link state %d\n",
+ aenq_e->aenq_common_desc.syndrom);
+ }
+}
+
+static void ena_keep_alive(void *adapter_data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+
+ adapter->timestamp_wd = rte_get_timer_cycles();
+}
+
+/**
+ * This handler will called for unknown event group or unimplemented handlers
+ **/
+static void unimplemented_aenq_handler(__rte_unused void *data,
+ __rte_unused struct ena_admin_aenq_entry *aenq_e)
+{
+ RTE_LOG(ERR, PMD, "Unknown event was received or event with "
+ "unimplemented handler\n");
+}
+
+static struct ena_aenq_handlers aenq_handlers = {
+ .handlers = {
+ [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
+ [ENA_ADMIN_NOTIFICATION] = ena_notification,
+ [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive
+ },
+ .unimplemented_handler = unimplemented_aenq_handler
+};
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 394d05e0..2dc8129e 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -34,8 +34,10 @@
#ifndef _ENA_ETHDEV_H_
#define _ENA_ETHDEV_H_
+#include <rte_cycles.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
+#include <rte_timer.h>
#include "ena_com.h"
@@ -48,8 +50,13 @@
#define ENA_NAME_MAX_LEN 20
#define ENA_PKT_MAX_BUFS 17
+#define ENA_MIN_MTU 128
+
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
+#define ENA_WD_TIMEOUT_SEC 3
+#define ENA_DEVICE_KALIVE_TIMEOUT (ENA_WD_TIMEOUT_SEC * rte_get_timer_hz())
+
struct ena_adapter;
enum ena_ring_type {
@@ -70,8 +77,12 @@ struct ena_ring {
enum ena_ring_type type;
enum ena_admin_placement_policy_type tx_mem_queue_type;
- /* Holds the empty requests for TX OOO completions */
- uint16_t *empty_tx_reqs;
+ /* Holds the empty requests for TX/RX OOO completions */
+ union {
+ uint16_t *empty_tx_reqs;
+ uint16_t *empty_rx_reqs;
+ };
+
union {
struct ena_tx_buffer *tx_buffer_info; /* contex of tx packet */
struct rte_mbuf **rx_buffer_info; /* contex of rx packet */
@@ -92,14 +103,16 @@ struct ena_ring {
int configured;
struct ena_adapter *adapter;
uint64_t offloads;
+ u16 sgl_size;
} __rte_cache_aligned;
enum ena_adapter_state {
ENA_ADAPTER_STATE_FREE = 0,
ENA_ADAPTER_STATE_INIT = 1,
- ENA_ADAPTER_STATE_RUNNING = 2,
+ ENA_ADAPTER_STATE_RUNNING = 2,
ENA_ADAPTER_STATE_STOPPED = 3,
ENA_ADAPTER_STATE_CONFIG = 4,
+ ENA_ADAPTER_STATE_CLOSED = 5,
};
struct ena_driver_stats {
@@ -157,6 +170,7 @@ struct ena_adapter {
/* TX */
struct ena_ring tx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
int tx_ring_size;
+ u16 max_tx_sgl_size;
/* RX */
struct ena_ring rx_ring[ENA_MAX_NUM_QUEUES] __rte_cache_aligned;
@@ -180,6 +194,18 @@ struct ena_adapter {
uint64_t tx_selected_offloads;
uint64_t rx_supported_offloads;
uint64_t rx_selected_offloads;
+
+ bool link_status;
+
+ enum ena_regs_reset_reason_types reset_reason;
+
+ struct rte_timer timer_wd;
+ uint64_t timestamp_wd;
+ uint64_t keep_alive_timeout;
+
+ bool trigger_reset;
+
+ bool wd_state;
};
#endif /* _ENA_ETHDEV_H_ */
diff --git a/drivers/net/ena/meson.build b/drivers/net/ena/meson.build
new file mode 100644
index 00000000..091ca6e3
--- /dev/null
+++ b/drivers/net/ena/meson.build
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('ena_ethdev.c',
+ 'base/ena_com.c',
+ 'base/ena_eth_com.c')
+
+deps += ['timer']
+
+includes += include_directories('base', 'base/ena_defs')
diff --git a/drivers/net/enic/base/cq_desc.h b/drivers/net/enic/base/cq_desc.h
index 7e138027..ae8847c6 100644
--- a/drivers/net/enic/base/cq_desc.h
+++ b/drivers/net/enic/base/cq_desc.h
@@ -38,6 +38,7 @@ struct cq_desc {
#define CQ_DESC_TYPE_MASK ((1 << CQ_DESC_TYPE_BITS) - 1)
#define CQ_DESC_COLOR_MASK 1
#define CQ_DESC_COLOR_SHIFT 7
+#define CQ_DESC_COLOR_MASK_NOSHIFT 0x80
#define CQ_DESC_Q_NUM_BITS 10
#define CQ_DESC_Q_NUM_MASK ((1 << CQ_DESC_Q_NUM_BITS) - 1)
#define CQ_DESC_COMP_NDX_BITS 12
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 05b595eb..16e8814a 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -10,6 +10,7 @@
#include "vnic_dev.h"
#include "vnic_resource.h"
#include "vnic_devcmd.h"
+#include "vnic_nic.h"
#include "vnic_stats.h"
@@ -484,7 +485,7 @@ int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
* Retrun true in filter_tags if supported
*/
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags)
+ u8 *filter_actions)
{
u64 args[4];
int err;
@@ -492,14 +493,10 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
err = vnic_dev_advanced_filters_cap(vdev, args, 4);
- /* determine if filter tags are available */
- if (err)
- *filter_tags = 0;
- if ((args[2] == FILTER_CAP_MODE_V1) &&
- (args[3] & FILTER_ACTION_FILTER_ID_FLAG))
- *filter_tags = 1;
- else
- *filter_tags = 0;
+ /* determine supported filter actions */
+ *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
+ if (args[2] == FILTER_CAP_MODE_V1)
+ *filter_actions = args[3];
if (err || ((args[0] == 1) && (args[1] == 0))) {
/* Adv filter Command not supported or adv filters available but
@@ -531,6 +528,22 @@ parse_max_level:
return 0;
}
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ *cfg_chk = false;
+ *weak = false;
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err == 0 && a0 != 0 && a1 != 0) {
+ *cfg_chk = true;
+ *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
+ }
+}
+
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -587,17 +600,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
- static u32 instance;
- char name[NAME_MAX];
- if (!vdev->stats) {
- snprintf((char *)name, sizeof(name),
- "vnic_stats-%u", instance++);
- vdev->stats = vdev->alloc_consistent(vdev->priv,
- sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name);
- if (!vdev->stats)
- return -ENOMEM;
- }
+ if (!vdev->stats)
+ return -ENOMEM;
*stats = vdev->stats;
a0 = vdev->stats_pa;
@@ -922,6 +927,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
return vdev->intr_coal_timer_info.max_usec;
}
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
+{
+ char name[NAME_MAX];
+ static u32 instance;
+
+ snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
+ vdev->stats = vdev->alloc_consistent(vdev->priv,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, (u8 *)name);
+ return vdev->stats == NULL ? -ENOMEM : 0;
+}
+
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
@@ -1044,3 +1061,36 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
return ret;
}
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+ u64 a0 = overlay;
+ u64 a1 = config;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+ u16 vxlan_udp_port_number)
+{
+ u64 a1 = vxlan_udp_port_number;
+ u64 a0 = overlay;
+ int wait = 1000;
+
+ return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
+{
+ u64 a0 = VIC_FEATURE_VXLAN;
+ u64 a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
+ return ret == 0 &&
+ (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
+ (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
+}
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 8c099206..270a47bd 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -6,6 +6,8 @@
#ifndef _VNIC_DEV_H_
#define _VNIC_DEV_H_
+#include <stdbool.h>
+
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -108,7 +110,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd);
int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
- u8 *filter_tags);
+ u8 *filter_actions);
+void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
+ bool *weak);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -165,6 +169,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
+int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
int vnic_dev_get_size(void);
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
@@ -177,10 +182,9 @@ int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data, struct filter_action_v2 *action_v2);
-#ifdef ENIC_VXLAN
-int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev,
u8 overlay, u8 config);
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
-#endif
+int vnic_dev_capable_vxlan(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 6b95bc48..a22d8a76 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -138,9 +138,27 @@ enum vnic_devcmd_cmd {
/* del VLAN id in (u16)a0 */
CMD_VLAN_DEL = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 15),
- /* nic_cfg in (u32)a0 */
+ /*
+ * nic_cfg in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
CMD_NIC_CFG = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+ /*
+ * nic_cfg_chk (same as nic_cfg, but may return error)
+ * in (u32)a0
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if a1 is valid
+ * (u64) a1= (NIC_CFG bits supported) | (flags << 32)
+ * (flags are CMD_NIC_CFG_CAPF_xxx)
+ */
+ CMD_NIC_CFG_CHK = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 16),
+
/* union vnic_rss_key in mem: (u64)a0=paddr, (u16)a1=len */
CMD_RSS_KEY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 17),
@@ -600,10 +618,14 @@ enum filter_cap_mode {
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
+/* flags for CMD_NIC_CFG */
+#define CMD_NIC_CFG_CAPF_UDP_WEAK (1ULL << 0) /* Bodega-style UDP RSS */
+
/* flags for CMD_PACKET_FILTER */
#define CMD_PFILTER_DIRECTED 0x01
#define CMD_PFILTER_MULTICAST 0x02
@@ -840,7 +862,9 @@ struct filter_action {
#define FILTER_ACTION_RQ_STEERING_FLAG (1 << 0)
#define FILTER_ACTION_FILTER_ID_FLAG (1 << 1)
+#define FILTER_ACTION_DROP_FLAG (1 << 2)
#define FILTER_ACTION_V2_ALL (FILTER_ACTION_RQ_STEERING_FLAG \
+ | FILTER_ACTION_DROP_FLAG \
| FILTER_ACTION_FILTER_ID_FLAG)
/* Version 2 of filter action must be a strict extension of struct filter_action
@@ -1078,6 +1102,18 @@ typedef enum {
} vic_feature_t;
/*
+ * These flags are used in args[1] of devcmd CMD_GET_SUPP_FEATURE_VER
+ * to indicate the host driver about the VxLAN and Multi WQ features
+ * supported
+ */
+#define FEATURE_VXLAN_IPV6_INNER (1 << 0)
+#define FEATURE_VXLAN_IPV6_OUTER (1 << 1)
+#define FEATURE_VXLAN_MULTI_WQ (1 << 2)
+
+#define FEATURE_VXLAN_IPV6 (FEATURE_VXLAN_IPV6_INNER | \
+ FEATURE_VXLAN_IPV6_OUTER)
+
+/*
* CMD_CONFIG_GRPINTR subcommands
*/
typedef enum {
diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h
index 26918335..901f3b46 100644
--- a/drivers/net/enic/base/vnic_enet.h
+++ b/drivers/net/enic/base/vnic_enet.h
@@ -52,6 +52,9 @@ struct vnic_enet_config {
#define VENETF_VXLAN 0x10000 /* VxLAN offload */
#define VENETF_NVGRE 0x20000 /* NVGRE offload */
#define VENETF_GRPINTR 0x40000 /* group interrupt */
+#define VENETF_NICSWITCH 0x80000 /* NICSWITCH enabled */
+#define VENETF_RSSHASH_UDPIPV4 0x100000 /* Hash on UDP + IPv4 fields */
+#define VENETF_RSSHASH_UDPIPV6 0x200000 /* Hash on UDP + IPv6 fields */
#define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */
#define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */
diff --git a/drivers/net/enic/base/vnic_nic.h b/drivers/net/enic/base/vnic_nic.h
index a753b3a5..16040852 100644
--- a/drivers/net/enic/base/vnic_nic.h
+++ b/drivers/net/enic/base/vnic_nic.h
@@ -27,12 +27,14 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV4 (1 << 0)
#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD1 (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_RSVD2 (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP_IPV6 (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index d774bb0d..d8e67f74 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -6,6 +6,7 @@
#ifndef _VNIC_RQ_H_
#define _VNIC_RQ_H_
+#include <stdbool.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
@@ -51,6 +52,8 @@ struct vnic_rq {
struct vnic_dev *vdev;
struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
+ struct rte_mbuf **free_mbufs; /* reserve of free mbufs */
+ int num_free_mbufs;
struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
unsigned int mbuf_next_idx; /* next mb to consume */
void *os_buf_head;
@@ -69,6 +72,7 @@ struct vnic_rq {
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
uint16_t tot_nb_desc;
+ bool need_initial_post;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c
index d61c4c6e..c9bf3572 100644
--- a/drivers/net/enic/base/vnic_wq.c
+++ b/drivers/net/enic/base/vnic_wq.c
@@ -32,8 +32,8 @@ static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
unsigned int count = wq->ring.desc_count;
/* Allocate the mbuf ring */
- wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs",
- sizeof(struct vnic_wq_buf) * count,
+ wq->bufs = (struct rte_mbuf **)rte_zmalloc_socket("wq->bufs",
+ sizeof(struct rte_mbuf *) * count,
RTE_CACHE_LINE_SIZE, wq->socket_id);
wq->head_idx = 0;
wq->tail_idx = 0;
@@ -113,6 +113,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
vnic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
+ wq->cq_pend = 0;
wq->last_completed_index = 0;
}
@@ -145,9 +146,9 @@ int vnic_wq_disable(struct vnic_wq *wq)
}
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf))
+ void (*buf_clean)(struct rte_mbuf **buf))
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf **buf;
unsigned int to_clean = wq->tail_idx;
buf = &wq->bufs[to_clean];
diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h
index 7c069c06..236cf696 100644
--- a/drivers/net/enic/base/vnic_wq.h
+++ b/drivers/net/enic/base/vnic_wq.h
@@ -36,23 +36,20 @@ struct vnic_wq_ctrl {
u32 pad9;
};
-/* 16 bytes */
-struct vnic_wq_buf {
- struct rte_mempool *pool;
- void *mb;
-};
-
struct vnic_wq {
unsigned int index;
+ uint64_t tx_offload_notsup_mask;
struct vnic_dev *vdev;
struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
- struct vnic_wq_buf *bufs;
+ struct rte_mbuf **bufs;
unsigned int head_idx;
+ unsigned int cq_pend;
unsigned int tail_idx;
unsigned int socket_id;
const struct rte_memzone *cqmsg_rz;
uint16_t last_completed_index;
+ uint64_t offloads;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
@@ -163,5 +160,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq,
- void (*buf_clean)(struct vnic_wq_buf *buf));
+ void (*buf_clean)(struct rte_mbuf **buf));
#endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c083985e..7c27bd51 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -17,6 +17,7 @@
#include "vnic_rss.h"
#include "enic_res.h"
#include "cq_enet_desc.h"
+#include <stdbool.h>
#include <sys/queue.h>
#include <rte_spinlock.h>
@@ -49,6 +50,18 @@
#define ENICPMD_FDIR_MAX 64
+/* HW default VXLAN port */
+#define ENIC_DEFAULT_VXLAN_PORT 4789
+
+/*
+ * Interrupt 0: LSC and errors
+ * Interrupt 1: rx queue 0
+ * Interrupt 2: rx queue 1
+ * ...
+ */
+#define ENICPMD_LSC_INTR_OFFSET 0
+#define ENICPMD_RXQ_INTR_OFFSET 1
+
struct enic_fdir_node {
struct rte_eth_fdir_filter filter;
u16 fltr_id;
@@ -92,6 +105,7 @@ struct enic {
struct vnic_dev *vdev;
unsigned int port_id;
+ bool overlay_offload;
struct rte_eth_dev *rte_dev;
struct enic_fdir fdir;
char bdf_name[ENICPMD_BDF_LENGTH];
@@ -109,7 +123,13 @@ struct enic {
u16 max_mtu;
u8 adv_filters;
u32 flow_filter_mode;
- u8 filter_tags;
+ u8 filter_actions; /* HW supported actions */
+ bool vxlan;
+ bool disable_overlay; /* devargs disable_overlay=1 */
+ bool nic_cfg_chk; /* NIC_CFG_CHK available */
+ bool udp_rss_weak; /* Bodega style UDP RSS */
+ uint8_t ig_vlan_rewrite_mode; /* devargs ig-vlan-rewrite */
+ uint16_t vxlan_port; /* current vxlan port pushed to NIC */
unsigned int flags;
unsigned int priv_flags;
@@ -126,9 +146,9 @@ struct enic {
struct vnic_cq *cq;
unsigned int cq_count; /* equals rq_count + wq_count */
- /* interrupt resource */
- struct vnic_intr intr;
- unsigned int intr_count;
+ /* interrupt vectors (len = conf_intr_count) */
+ struct vnic_intr *intr;
+ unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
/* software counters */
struct enic_soft_stats soft_stats;
@@ -146,8 +166,34 @@ struct enic {
LIST_HEAD(enic_flows, rte_flow) flows;
rte_spinlock_t flows_lock;
+
+ /* RSS */
+ uint16_t reta_size;
+ uint8_t hash_key_size;
+ uint64_t flow_type_rss_offloads; /* 0 indicates RSS not supported */
+ /*
+ * Keep a copy of current RSS config for queries, as we cannot retrieve
+ * it from the NIC.
+ */
+ uint8_t rss_hash_type; /* NIC_CFG_RSS_HASH_TYPE flags */
+ uint8_t rss_enable;
+ uint64_t rss_hf; /* ETH_RSS flags */
+ union vnic_rss_key rss_key;
+ union vnic_rss_cpu rss_cpu;
+
+ uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
+ uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
+/* Compute ethdev's max packet size from MTU */
+static inline uint32_t enic_mtu_to_max_rx_pktlen(uint32_t mtu)
+{
+ /* ethdev max size includes eth and crc whereas NIC MTU does not */
+ return mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+}
+
/* Get the CQ index from a Start of Packet(SOP) RQ index */
static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
{
@@ -220,53 +266,61 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
return idx;
}
-extern void enic_fdir_stats_get(struct enic *enic,
- struct rte_eth_fdir_stats *stats);
-extern int enic_fdir_add_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern int enic_fdir_del_fltr(struct enic *enic,
- struct rte_eth_fdir_filter *params);
-extern void enic_free_wq(void *txq);
-extern int enic_alloc_intr_resources(struct enic *enic);
-extern int enic_setup_finish(struct enic *enic);
-extern int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, uint16_t nb_desc);
-extern void enic_start_wq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
-extern void enic_start_rq(struct enic *enic, uint16_t queue_idx);
-extern int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
-extern void enic_free_rq(void *rxq);
-extern int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, struct rte_mempool *mp,
- uint16_t nb_desc, uint16_t free_thresh);
-extern int enic_set_rss_nic_cfg(struct enic *enic);
-extern int enic_set_vnic_res(struct enic *enic);
-extern int enic_enable(struct enic *enic);
-extern int enic_disable(struct enic *enic);
-extern void enic_remove(struct enic *enic);
-extern int enic_get_link_status(struct enic *enic);
-extern int enic_dev_stats_get(struct enic *enic,
- struct rte_eth_stats *r_stats);
-extern void enic_dev_stats_clear(struct enic *enic);
-extern void enic_add_packet_filter(struct enic *enic);
+void enic_fdir_stats_get(struct enic *enic,
+ struct rte_eth_fdir_stats *stats);
+int enic_fdir_add_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+int enic_fdir_del_fltr(struct enic *enic,
+ struct rte_eth_fdir_filter *params);
+void enic_free_wq(void *txq);
+int enic_alloc_intr_resources(struct enic *enic);
+int enic_setup_finish(struct enic *enic);
+int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, uint16_t nb_desc);
+void enic_start_wq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
+void enic_start_rq(struct enic *enic, uint16_t queue_idx);
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+void enic_free_rq(void *rxq);
+int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
+ unsigned int socket_id, struct rte_mempool *mp,
+ uint16_t nb_desc, uint16_t free_thresh);
+int enic_set_vnic_res(struct enic *enic);
+int enic_init_rss_nic_cfg(struct enic *enic);
+int enic_set_rss_conf(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf);
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
+int enic_set_vlan_strip(struct enic *enic);
+int enic_enable(struct enic *enic);
+int enic_disable(struct enic *enic);
+void enic_remove(struct enic *enic);
+int enic_get_link_status(struct enic *enic);
+int enic_dev_stats_get(struct enic *enic,
+ struct rte_eth_stats *r_stats);
+void enic_dev_stats_clear(struct enic *enic);
+void enic_add_packet_filter(struct enic *enic);
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-void enic_del_mac_address(struct enic *enic, int mac_index);
-extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
-extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
- struct rte_mbuf *tx_pkt, unsigned short len,
- uint8_t sop, uint8_t eop, uint8_t cq_entry,
- uint16_t ol_flags, uint16_t vlan_tag);
-
-extern void enic_post_wq_index(struct vnic_wq *wq);
-extern int enic_probe(struct enic *enic);
-extern int enic_clsf_init(struct enic *enic);
-extern void enic_clsf_destroy(struct enic *enic);
+int enic_del_mac_address(struct enic *enic, int mac_index);
+unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
+void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
+ struct rte_mbuf *tx_pkt, unsigned short len,
+ uint8_t sop, uint8_t eop, uint8_t cq_entry,
+ uint16_t ol_flags, uint16_t vlan_tag);
+
+void enic_post_wq_index(struct vnic_wq *wq);
+int enic_probe(struct enic *enic);
+int enic_clsf_init(struct enic *enic);
+void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t enic_dummy_recv_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 3ef1d083..9d95201e 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -111,7 +111,6 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
- int i;
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
@@ -273,18 +272,14 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
}
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.src_addr[i * 4] =
- input->flow.ipv6_flow.src_ip[i];
- }
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
- input->flow.ipv6_flow.dst_ip[i];
- }
+ memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
+ sizeof(ipv6_mask.src_addr));
+ memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
+ sizeof(ipv6_val.src_addr));
+ memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
+ sizeof(ipv6_mask.dst_addr));
+ memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
+ sizeof(ipv6_val.dst_addr));
if (input->flow.ipv6_flow.tc) {
ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index c0af1ed2..ceb1b096 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -56,6 +56,11 @@
#define dev_debug(x, args...) dev_printk(DEBUG, args)
extern int enicpmd_logtype_flow;
+extern int enicpmd_logtype_init;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
+ "%s" fmt "\n", __func__, ##args)
#define __le16 u16
#define __le32 u32
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index d84714ef..b3d57771 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -11,6 +11,7 @@
#include <rte_bus_pci.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -23,10 +24,6 @@
int enicpmd_logtype_init;
int enicpmd_logtype_flow;
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
- "%s" fmt "\n", __func__, ##args)
-
#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
/*
@@ -39,9 +36,10 @@ static const struct rte_pci_id pci_id_enic_map[] = {
{.vendor_id = 0, /* sentinel */},
};
-RTE_INIT(enicpmd_init_log);
-static void
-enicpmd_init_log(void)
+#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
+
+RTE_INIT(enicpmd_init_log)
{
enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
if (enicpmd_logtype_init >= 0)
@@ -181,17 +179,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
RTE_ASSERT(queue_idx < enic->conf_wq_count);
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
if (ret) {
@@ -318,52 +320,40 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
return enicpmd_dev_setup_intr(enic);
}
-static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on)
-{
- struct enic *enic = pmd_priv(eth_dev);
- int err;
-
- ENICPMD_FUNC_TRACE();
- if (on)
- err = enic_add_vlan(enic, vlan_id);
- else
- err = enic_del_vlan(enic, vlan_id);
- return err;
-}
-
static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct enic *enic = pmd_priv(eth_dev);
+ uint64_t offloads;
ENICPMD_FUNC_TRACE();
+ offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
enic->ig_vlan_strip_en = 1;
else
enic->ig_vlan_strip_en = 0;
}
- enic_set_rss_nic_cfg(enic);
-
- if (mask & ETH_VLAN_FILTER_MASK) {
+ if ((mask & ETH_VLAN_FILTER_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
dev_warning(enic,
"Configuration of VLAN filter is not supported\n");
}
- if (mask & ETH_VLAN_EXTEND_MASK) {
+ if ((mask & ETH_VLAN_EXTEND_MASK) &&
+ (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
dev_warning(enic,
"Configuration of extended VLAN is not supported\n");
}
- return 0;
+ return enic_set_vlan_strip(enic);
}
static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
{
int ret;
+ int mask;
struct enic *enic = pmd_priv(eth_dev);
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -378,9 +368,21 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
- ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
-
- return ret;
+ /* All vlan offload masks to apply the current settings */
+ mask = ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = enicpmd_vlan_offload_set(eth_dev, mask);
+ if (ret) {
+ dev_err(enic, "Failed to configure VLAN offloads\n");
+ return ret;
+ }
+ /*
+ * Initialize RSS with the default reta and key. If the user key is
+ * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
+ * default key.
+ */
+ return enic_init_rss_nic_cfg(enic);
}
/* Start the device.
@@ -410,10 +412,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
ENICPMD_FUNC_TRACE();
enic_disable(enic);
+
memset(&link, 0, sizeof(link));
- rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&eth_dev->data->dev_link,
- *(uint64_t *)&link);
+ rte_eth_linkstatus_set(eth_dev, &link);
}
/*
@@ -459,27 +460,52 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
- device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
+ /* "Max" mtu is not a typo. HW receives packet sizes up to the
+ * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
+ * a hint to the driver to size receive buffers accordingly so that
+ * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
+ * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+ * ignoring vNIC mtu.
+ */
+ device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
- device_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- device_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->rx_offload_capa = enic->rx_offload_capa;
+ device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
+ device_info->reta_size = enic->reta_size;
+ device_info->hash_key_size = enic->hash_key_size;
+ device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
+ device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.rq_desc_count,
+ .nb_min = ENIC_MIN_RQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ };
+ device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = enic->config.wq_desc_count,
+ .nb_min = ENIC_MIN_WQ_DESCS,
+ .nb_align = ENIC_ALIGN_DESCS,
+ .nb_seg_max = ENIC_TX_XMIT_MAX,
+ .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
+ };
+ device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_RX_BURST,
+ .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
+ ENIC_DEFAULT_RX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_RX_RINGS,
+ };
+ device_info->default_txportconf = (struct rte_eth_dev_portconf) {
+ .burst_size = ENIC_DEFAULT_TX_BURST,
+ .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
+ ENIC_DEFAULT_TX_RING_SIZE),
+ .nb_queues = ENIC_DEFAULT_TX_RINGS,
+ };
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
@@ -496,7 +522,8 @@ static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_UNKNOWN
};
- if (dev->rx_pkt_burst == enic_recv_pkts)
+ if (dev->rx_pkt_burst == enic_recv_pkts ||
+ dev->rx_pkt_burst == enic_noscatter_recv_pkts)
return ptypes;
return NULL;
}
@@ -571,7 +598,24 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
return;
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic, index);
+ if (enic_del_mac_address(enic, index))
+ dev_err(enic, "del mac addr failed\n");
+}
+
+static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
+ struct ether_addr *addr)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
+ ENICPMD_FUNC_TRACE();
+ ret = enic_del_mac_address(enic, 0);
+ if (ret)
+ return ret;
+ return enic_set_mac_address(enic, addr->addr_bytes);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -582,6 +626,242 @@ static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return enic_set_mtu(enic, mtu);
}
+static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
+ enic->rss_cpu.cpu[i / 4].b[i % 4]);
+ }
+
+ return 0;
+}
+
+static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64
+ *reta_conf,
+ uint16_t reta_size)
+{
+ struct enic *enic = pmd_priv(dev);
+ union vnic_rss_cpu rss_cpu;
+ uint16_t i, idx, shift;
+
+ ENICPMD_FUNC_TRACE();
+ if (reta_size != ENIC_RSS_RETA_SIZE) {
+ dev_err(enic, "reta_update: wrong reta_size. given=%u"
+ " expected=%u\n",
+ reta_size, ENIC_RSS_RETA_SIZE);
+ return -EINVAL;
+ }
+ /*
+ * Start with the current reta and modify it per reta_conf, as we
+ * need to push the entire reta even if we only modify one entry.
+ */
+ rss_cpu = enic->rss_cpu;
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (reta_conf[idx].mask & (1ULL << shift))
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(
+ reta_conf[idx].reta[shift]);
+ }
+ return enic_set_rss_reta(enic, &rss_cpu);
+}
+
+static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ return enic_set_rss_conf(enic, rss_conf);
+}
+
+static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct enic *enic = pmd_priv(dev);
+
+ ENICPMD_FUNC_TRACE();
+ if (rss_conf == NULL)
+ return -EINVAL;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
+ " expected=%u+\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ rss_conf->rss_hf = enic->rss_hf;
+ if (rss_conf->rss_key != NULL) {
+ int i;
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
+ rss_conf->rss_key[i] =
+ enic->rss_key.key[i / 10].b[i % 10];
+ }
+ rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ return 0;
+}
+
+static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ struct rte_eth_rxconf *conf;
+ uint16_t sop_queue_idx;
+ uint16_t data_queue_idx;
+
+ ENICPMD_FUNC_TRACE();
+ sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ rq_sop = &enic->rq[sop_queue_idx];
+ rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
+ qinfo->mp = rq_sop->mp;
+ qinfo->scattered_rx = rq_sop->data_queue_enable;
+ qinfo->nb_desc = rq_sop->ring.desc_count;
+ if (qinfo->scattered_rx)
+ qinfo->nb_desc += rq_data->ring.desc_count;
+ conf = &qinfo->conf;
+ memset(conf, 0, sizeof(*conf));
+ conf->rx_free_thresh = rq_sop->rx_free_thresh;
+ conf->rx_drop_en = 1;
+ /*
+ * Except VLAN stripping (port setting), all the checksum offloads
+ * are always enabled.
+ */
+ conf->offloads = enic->rx_offload_capa;
+ if (!enic->ig_vlan_strip_en)
+ conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ /* rx_thresh and other fields are not applicable for enic */
+}
+
+static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct enic *enic = pmd_priv(dev);
+ struct vnic_wq *wq = &enic->wq[tx_queue_id];
+
+ ENICPMD_FUNC_TRACE();
+ qinfo->nb_desc = wq->ring.desc_count;
+ memset(&qinfo->conf, 0, sizeof(qinfo->conf));
+ qinfo->conf.offloads = wq->offloads;
+ /* tx_thresh, and all the other fields are not applicable for enic */
+}
+
+static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+
+ ENICPMD_FUNC_TRACE();
+ vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+ return 0;
+}
+
+static int udp_tunnel_common_check(struct enic *enic,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+ return -ENOTSUP;
+ if (!enic->overlay_offload) {
+ PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
+ "supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int update_vxlan_port(struct enic *enic, uint16_t port)
+{
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ port)) {
+ PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
+ enic->vxlan_port = port;
+ return 0;
+}
+
+static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * The NIC has 1 configurable VXLAN port number. "Adding" a new port
+ * number replaces it.
+ */
+ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+ PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, tnl->udp_port);
+}
+
+static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * Clear the previously set port number and restore the
+ * hardware default port number. Some drivers disable VXLAN
+ * offloads when there are no configured port numbers. But
+ * enic does not do that as VXLAN is part of overlay offload,
+ * which is tied to inner RSS and TSO.
+ */
+ if (tnl->udp_port != enic->vxlan_port) {
+ PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
+}
+
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
@@ -600,7 +880,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_infos_get = enicpmd_dev_info_get,
.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
.mtu_set = enicpmd_mtu_set,
- .vlan_filter_set = enicpmd_vlan_filter_set,
+ .vlan_filter_set = NULL,
.vlan_tpid_set = NULL,
.vlan_offload_set = enicpmd_vlan_offload_set,
.vlan_strip_queue_set = NULL,
@@ -614,6 +894,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
+ .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
+ .rxq_info_get = enicpmd_dev_rxq_info_get,
+ .txq_info_get = enicpmd_dev_txq_info_get,
.dev_led_on = NULL,
.dev_led_off = NULL,
.flow_ctrl_get = NULL,
@@ -621,9 +905,97 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.priority_flow_ctrl_set = NULL,
.mac_addr_add = enicpmd_add_mac_addr,
.mac_addr_remove = enicpmd_remove_mac_addr,
+ .mac_addr_set = enicpmd_set_mac_addr,
.filter_ctrl = enicpmd_dev_filter_ctrl,
+ .reta_query = enicpmd_dev_rss_reta_query,
+ .reta_update = enicpmd_dev_rss_reta_update,
+ .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
+ .rss_hash_update = enicpmd_dev_rss_hash_update,
+ .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
};
+static int enic_parse_disable_overlay(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "0") == 0) {
+ enic->disable_overlay = false;
+ } else if (strcmp(value, "1") == 0) {
+ enic->disable_overlay = true;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
+ ": expected=0|1 given=%s\n", value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
+ const char *value,
+ void *opaque)
+{
+ struct enic *enic;
+
+ enic = (struct enic *)opaque;
+ if (strcmp(value, "trunk") == 0) {
+ /* Trunk mode: always tag */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
+ } else if (strcmp(value, "untag") == 0) {
+ /* Untag default VLAN mode: untag if VLAN = default VLAN */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "priority") == 0) {
+ /*
+ * Priority-tag default VLAN mode: priority tag (VLAN header
+ * with ID=0) if VLAN = default
+ */
+ enic->ig_vlan_rewrite_mode =
+ IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
+ } else if (strcmp(value, "pass") == 0) {
+ /* Pass through mode: do not touch tags */
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ } else {
+ dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
+ ": expected=trunk|untag|priority|pass given=%s\n",
+ value);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int enic_check_devargs(struct rte_eth_dev *dev)
+{
+ static const char *const valid_keys[] = {
+ ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_IG_VLAN_REWRITE,
+ NULL};
+ struct enic *enic = pmd_priv(dev);
+ struct rte_kvargs *kvlist;
+
+ ENICPMD_FUNC_TRACE();
+
+ enic->disable_overlay = false;
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
+ if (!dev->device->devargs)
+ return 0;
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
+ if (!kvlist)
+ return -EINVAL;
+ if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
+ enic_parse_disable_overlay, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
+ enic_parse_ig_vlan_rewrite, enic) < 0) {
+ rte_kvargs_free(kvlist);
+ return -EINVAL;
+ }
+ rte_kvargs_free(kvlist);
+ return 0;
+}
+
struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
@@ -633,6 +1005,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pdev;
struct rte_pci_addr *addr;
struct enic *enic = pmd_priv(eth_dev);
+ int err;
ENICPMD_FUNC_TRACE();
@@ -651,6 +1024,9 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
addr->domain, addr->bus, addr->devid, addr->function);
+ err = enic_check_devargs(eth_dev);
+ if (err)
+ return err;
return enic_probe(enic);
}
@@ -676,3 +1052,6 @@ static struct rte_pci_driver rte_enic_pmd = {
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_enic,
+ ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index 28923b0e..0cf04aef 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -3,6 +3,7 @@
*/
#include <errno.h>
+#include <stdint.h>
#include <rte_log.h>
#include <rte_ethdev_driver.h>
#include <rte_flow_driver.h>
@@ -273,21 +274,33 @@ static const enum rte_flow_action_type enic_supported_actions_v1[] = {
};
/** Supported actions for newer NICs */
-static const enum rte_flow_action_type enic_supported_actions_v2[] = {
+static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
RTE_FLOW_ACTION_TYPE_END,
};
+static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
/** Action capabilities indexed by NIC version information */
static const struct enic_action_cap enic_action_cap[] = {
[FILTER_ACTION_RQ_STEERING_FLAG] = {
.actions = enic_supported_actions_v1,
.copy_fn = enic_copy_action_v1,
},
- [FILTER_ACTION_V2_ALL] = {
- .actions = enic_supported_actions_v2,
+ [FILTER_ACTION_FILTER_ID_FLAG] = {
+ .actions = enic_supported_actions_v2_id,
+ .copy_fn = enic_copy_action_v2,
+ },
+ [FILTER_ACTION_DROP_FLAG] = {
+ .actions = enic_supported_actions_v2_drop,
.copy_fn = enic_copy_action_v2,
},
};
@@ -544,16 +557,21 @@ enic_copy_item_vlan_v2(const struct rte_flow_item *item,
if (!spec)
return 0;
- /* Don't support filtering in tpid */
- if (mask) {
- if (mask->tpid != 0)
- return ENOTSUP;
- } else {
+ if (!mask)
mask = &rte_flow_item_vlan_mask;
- RTE_ASSERT(mask->tpid == 0);
- }
if (*inner_ofst == 0) {
+ struct ether_hdr *eth_mask =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
+ struct ether_hdr *eth_val =
+ (void *)gp->layer[FILTER_GENERIC_1_L2].val;
+
+ /* Outer TPID cannot be matched */
+ if (eth_mask->ether_type)
+ return ENOTSUP;
+ eth_mask->ether_type = mask->inner_type;
+ eth_val->ether_type = spec->inner_type;
+
/* Outer header. Use the vlan mask/val fields */
gp->mask_vlan = mask->tci;
gp->val_vlan = spec->tci;
@@ -952,6 +970,9 @@ static int
enic_copy_action_v1(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -963,6 +984,10 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
break;
@@ -972,6 +997,8 @@ enic_copy_action_v1(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_RQ_STEERING;
return 0;
}
@@ -989,6 +1016,9 @@ static int
enic_copy_action_v2(const struct rte_flow_action actions[],
struct filter_action_v2 *enic_action)
{
+ enum { FATE = 1, MARK = 2, };
+ uint32_t overlap = 0;
+
FLOW_TRACE();
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
@@ -997,6 +1027,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
actions->conf;
+
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
enic_action->rq_idx =
enic_rte_rq_idx_to_sop_idx(queue->index);
enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
@@ -1007,6 +1041,9 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
(const struct rte_flow_action_mark *)
actions->conf;
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
/* ENIC_MAGIC_FILTER_ID is reserved and is the highest
* in the range of allows mark ids.
*/
@@ -1017,10 +1054,20 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
case RTE_FLOW_ACTION_TYPE_FLAG: {
+ if (overlap & MARK)
+ return ENOTSUP;
+ overlap |= MARK;
enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
break;
}
+ case RTE_FLOW_ACTION_TYPE_DROP: {
+ if (overlap & FATE)
+ return ENOTSUP;
+ overlap |= FATE;
+ enic_action->flags |= FILTER_ACTION_DROP_FLAG;
+ break;
+ }
case RTE_FLOW_ACTION_TYPE_VOID:
continue;
default:
@@ -1028,6 +1075,8 @@ enic_copy_action_v2(const struct rte_flow_action actions[],
break;
}
}
+ if (!(overlap & FATE))
+ return ENOTSUP;
enic_action->type = FILTER_ACTION_V2;
return 0;
}
@@ -1059,10 +1108,14 @@ enic_get_filter_cap(struct enic *enic)
static const struct enic_action_cap *
enic_get_action_cap(struct enic *enic)
{
- static const struct enic_action_cap *ea;
-
- if (enic->filter_tags)
- ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
+ const struct enic_action_cap *ea;
+ uint8_t actions;
+
+ actions = enic->filter_actions;
+ if (actions & FILTER_ACTION_DROP_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
+ else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
+ ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
else
ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
return ea;
@@ -1268,6 +1321,12 @@ enic_flow_parse(struct rte_eth_dev *dev,
NULL,
"egress is not supported");
return -rte_errno;
+ } else if (attrs->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ return -rte_errno;
} else if (!attrs->ingress) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index ec9d343f..fd940c58 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -69,12 +69,12 @@ enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq)
}
}
-static void enic_free_wq_buf(struct vnic_wq_buf *buf)
+static void enic_free_wq_buf(struct rte_mbuf **buf)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb;
+ struct rte_mbuf *mbuf = *buf;
rte_pktmbuf_free_seg(mbuf);
- buf->mb = NULL;
+ *buf = NULL;
}
static void enic_log_q_error(struct enic *enic)
@@ -162,13 +162,12 @@ int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
return 0;
}
-void enic_del_mac_address(struct enic *enic, int mac_index)
+int enic_del_mac_address(struct enic *enic, int mac_index)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
- if (vnic_dev_del_addr(enic->vdev, mac_addr))
- dev_err(enic, "del mac addr failed\n");
+ return vnic_dev_del_addr(enic->vdev, mac_addr);
}
int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
@@ -200,10 +199,15 @@ void enic_init_vnic_resources(struct enic *enic)
{
unsigned int error_interrupt_enable = 1;
unsigned int error_interrupt_offset = 0;
+ unsigned int rxq_interrupt_enable = 0;
+ unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int index = 0;
unsigned int cq_idx;
struct vnic_rq *data_rq;
+ if (enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ rxq_interrupt_enable = 1;
+
for (index = 0; index < enic->rq_count; index++) {
cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
@@ -225,11 +229,13 @@ void enic_init_vnic_resources(struct enic *enic)
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
- 0 /* interrupt_enable */,
+ rxq_interrupt_enable,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
- 0 /* interrupt offset */,
+ rxq_interrupt_offset,
0 /* cq_message_addr */);
+ if (rxq_interrupt_enable)
+ rxq_interrupt_offset++;
}
for (index = 0; index < enic->wq_count; index++) {
@@ -237,6 +243,9 @@ void enic_init_vnic_resources(struct enic *enic)
enic_cq_wq(enic, index),
error_interrupt_enable,
error_interrupt_offset);
+ /* Compute unsupported ol flags for enic_prep_pkts() */
+ enic->wq[index].tx_offload_notsup_mask =
+ PKT_TX_OFFLOAD_MASK ^ enic->tx_offload_mask;
cq_idx = enic_cq_wq(enic, index);
vnic_cq_init(&enic->cq[cq_idx],
@@ -252,10 +261,12 @@ void enic_init_vnic_resources(struct enic *enic)
(u64)enic->wq[index].cqmsg_rz->iova);
}
- vnic_intr_init(&enic->intr,
- enic->config.intr_timer_usec,
- enic->config.intr_timer_type,
- /*mask_on_assertion*/1);
+ for (index = 0; index < enic->intr_count; index++) {
+ vnic_intr_init(&enic->intr[index],
+ enic->config.intr_timer_usec,
+ enic->config.intr_timer_type,
+ /*mask_on_assertion*/1);
+ }
}
@@ -266,6 +277,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
struct rq_enet_desc *rqd = rq->ring.descs;
unsigned i;
dma_addr_t dma_addr;
+ uint32_t max_rx_pkt_len;
+ uint16_t rq_buf_len;
if (!rq->in_use)
return 0;
@@ -273,6 +286,18 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
rq->ring.desc_count);
+ /*
+ * If *not* using scatter and the mbuf size is greater than the
+ * requested max packet size (max_rx_pkt_len), then reduce the
+ * posted buffer size to max_rx_pkt_len. HW still receives packets
+ * larger than max_rx_pkt_len, but they will be truncated, which we
+ * drop in the rx handler. Not ideal, but better than returning
+ * large packets when the user is not expecting them.
+ */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
+ if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pkt_len;
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
@@ -287,9 +312,29 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
: RQ_ENET_TYPE_NOT_SOP),
- mb->buf_len - RTE_PKTMBUF_HEADROOM);
+ rq_buf_len);
rq->mbuf_ring[i] = mb;
}
+ /*
+ * Do not post the buffers to the NIC until we enable the RQ via
+ * enic_start_rq().
+ */
+ rq->need_initial_post = true;
+ /* Initialize fetch index while RQ is disabled */
+ iowrite32(0, &rq->ctrl->fetch_index);
+ return 0;
+}
+
+/*
+ * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
+ * allocated the buffers and filled the RQ descriptor ring. Just need to push
+ * the post index to the NIC.
+ */
+static void
+enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
+{
+ if (!rq->in_use || !rq->need_initial_post)
+ return;
/* make sure all prior writes are complete before doing the PIO write */
rte_rmb();
@@ -302,11 +347,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
iowrite32(rq->posted_index, &rq->ctrl->posted_index);
- iowrite32(0, &rq->ctrl->fetch_index);
rte_rmb();
-
- return 0;
-
+ rq->need_initial_post = false;
}
static void *
@@ -319,8 +361,8 @@ enic_alloc_consistent(void *priv, size_t size,
struct enic *enic = (struct enic *)priv;
struct enic_memzone_entry *mze;
- rz = rte_memzone_reserve_aligned((const char *)name,
- size, SOCKET_ID_ANY, 0, ENIC_ALIGN);
+ rz = rte_memzone_reserve_aligned((const char *)name, size,
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!rz) {
pr_err("%s : Failed to allocate memory requested for %s\n",
__func__, name);
@@ -379,16 +421,14 @@ enic_free_consistent(void *priv,
int enic_link_update(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
- int ret;
- int link_status = 0;
+ struct rte_eth_link link;
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ memset(&link, 0, sizeof(link));
+ link.link_status = enic_get_link_status(enic);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = vnic_dev_port_speed(enic->vdev);
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void
@@ -397,13 +437,98 @@ enic_intr_handler(void *arg)
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
- vnic_intr_return_all_credits(&enic->intr);
+ vnic_intr_return_all_credits(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
enic_link_update(enic);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
+static int enic_rxq_intr_init(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+ uint32_t rxq_intr_count, i;
+ int err;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ if (!enic->rte_dev->data->dev_conf.intr_conf.rxq)
+ return 0;
+ /*
+ * Rx queue interrupts only work when we have MSI-X interrupts,
+ * one per queue. Sharing one interrupt is technically
+ * possible with VIC, but it is not worth the complications it brings.
+ */
+ if (!rte_intr_cap_multiple(intr_handle)) {
+ dev_err(enic, "Rx queue interrupts require MSI-X interrupts"
+ " (vfio-pci driver)\n");
+ return -ENOTSUP;
+ }
+ rxq_intr_count = enic->intr_count - ENICPMD_RXQ_INTR_OFFSET;
+ err = rte_intr_efd_enable(intr_handle, rxq_intr_count);
+ if (err) {
+ dev_err(enic, "Failed to enable event fds for Rx queue"
+ " interrupts\n");
+ return err;
+ }
+ intr_handle->intr_vec = rte_zmalloc("enic_intr_vec",
+ rxq_intr_count * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ dev_err(enic, "Failed to allocate intr_vec\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < rxq_intr_count; i++)
+ intr_handle->intr_vec[i] = i + ENICPMD_RXQ_INTR_OFFSET;
+ return 0;
+}
+
+static void enic_rxq_intr_deinit(struct enic *enic)
+{
+ struct rte_intr_handle *intr_handle;
+
+ intr_handle = enic->rte_dev->intr_handle;
+ rte_intr_efd_disable(intr_handle);
+ if (intr_handle->intr_vec != NULL) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
+ }
+}
+
+static void enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
+{
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ unsigned int i;
+
+ /*
+ * Fill WQ descriptor fields that never change. Every descriptor is
+ * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
+ * descriptors (i.e. request one completion update every 32 packets).
+ */
+ wq = &enic->wq[queue_idx];
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ for (i = 0; i < wq->ring.desc_count; i++, desc++) {
+ desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
+ if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
+ desc->header_length_flags |=
+ (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
+ }
+}
+
+static void pick_rx_handler(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev;
+
+ /* Use the non-scatter, simplified RX handler if possible. */
+ eth_dev = enic->rte_dev;
+ if (enic->rq_count > 0 && enic->rq[0].data_queue_enable == 0) {
+ PMD_INIT_LOG(DEBUG, " use the non-scatter Rx handler");
+ eth_dev->rx_pkt_burst = &enic_noscatter_recv_pkts;
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the normal Rx handler");
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
+ }
+}
+
int enic_enable(struct enic *enic)
{
unsigned int index;
@@ -420,6 +545,9 @@ int enic_enable(struct enic *enic)
if (eth_dev->data->dev_conf.intr_conf.lsc)
vnic_dev_notify_set(enic->vdev, 0);
+ err = enic_rxq_intr_init(enic);
+ if (err)
+ return err;
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
@@ -443,6 +571,22 @@ int enic_enable(struct enic *enic)
}
}
+ /*
+ * Use the simple TX handler if possible. All offloads must be
+ * disabled.
+ */
+ if (eth_dev->data->dev_conf.txmode.offloads == 0) {
+ PMD_INIT_LOG(DEBUG, " use the simple tx handler");
+ eth_dev->tx_pkt_burst = &enic_simple_xmit_pkts;
+ for (index = 0; index < enic->wq_count; index++)
+ enic_prep_wq_for_simple_tx(enic, index);
+ } else {
+ PMD_INIT_LOG(DEBUG, " use the default tx handler");
+ eth_dev->tx_pkt_burst = &enic_xmit_pkts;
+ }
+
+ pick_rx_handler(enic);
+
for (index = 0; index < enic->wq_count; index++)
enic_start_wq(enic, index);
for (index = 0; index < enic->rq_count; index++)
@@ -457,7 +601,8 @@ int enic_enable(struct enic *enic)
enic_intr_handler, (void *)enic->rte_dev);
rte_intr_enable(&(enic->pdev->intr_handle));
- vnic_intr_unmask(&enic->intr);
+ /* Unmask LSC interrupt */
+ vnic_intr_unmask(&enic->intr[ENICPMD_LSC_INTR_OFFSET]);
return 0;
}
@@ -465,17 +610,21 @@ int enic_enable(struct enic *enic)
int enic_alloc_intr_resources(struct enic *enic)
{
int err;
+ unsigned int i;
dev_info(enic, "vNIC resources used: "\
"wq %d rq %d cq %d intr %d\n",
enic->wq_count, enic_vnic_rq_count(enic),
enic->cq_count, enic->intr_count);
- err = vnic_intr_alloc(enic->vdev, &enic->intr, 0);
- if (err)
- enic_free_vnic_resources(enic);
-
- return err;
+ for (i = 0; i < enic->intr_count; i++) {
+ err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i);
+ if (err) {
+ enic_free_vnic_resources(enic);
+ return err;
+ }
+ }
+ return 0;
}
void enic_free_rq(void *rxq)
@@ -490,6 +639,19 @@ void enic_free_rq(void *rxq)
enic = vnic_dev_priv(rq_sop->vdev);
rq_data = &enic->rq[rq_sop->data_queue_idx];
+ if (rq_sop->free_mbufs) {
+ struct rte_mbuf **mb;
+ int i;
+
+ mb = rq_sop->free_mbufs;
+ for (i = ENIC_RX_BURST_MAX - rq_sop->num_free_mbufs;
+ i < ENIC_RX_BURST_MAX; i++)
+ rte_pktmbuf_free(mb[i]);
+ rte_free(rq_sop->free_mbufs);
+ rq_sop->free_mbufs = NULL;
+ rq_sop->num_free_mbufs = 0;
+ }
+
enic_rxmbuf_queue_release(enic, rq_sop);
if (rq_data->in_use)
enic_rxmbuf_queue_release(enic, rq_data);
@@ -539,10 +701,13 @@ void enic_start_rq(struct enic *enic, uint16_t queue_idx)
rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
- if (rq_data->in_use)
+ if (rq_data->in_use) {
vnic_rq_enable(rq_data);
+ enic_initial_post_rx(enic, rq_data);
+ }
rte_mb();
vnic_rq_enable(rq_sop);
+ enic_initial_post_rx(enic, rq_sop);
eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
}
@@ -581,7 +746,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
- uint16_t mtu = enic->rte_dev->data->mtu;
+ uint32_t max_rx_pkt_len;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -599,22 +764,42 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
+ /* max_rx_pkt_len includes the ethernet header and CRC. */
+ max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
- /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
- (mbuf_size - 1)) / mbuf_size;
+ /* ceil((max pkt len)/mbuf_size) */
+ mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
mbufs_per_pkt = 1;
+ if (max_rx_pkt_len > mbuf_size) {
+ dev_warning(enic, "The maximum Rx packet size (%u) is"
+ " larger than the mbuf size (%u), and"
+ " scatter is disabled. Larger packets will"
+ " be truncated.\n",
+ max_rx_pkt_len, mbuf_size);
+ }
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "Rq %u Scatter rx mode in use\n", queue_idx);
rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
+ /*
+ * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * receives packet sizes up to the "max" MTU.
+ * If not using scatter, we can achieve the effect of dropping
+ * larger packets by reducing the size of posted buffers.
+ * See enic_alloc_rx_queue_mbufs().
+ */
+ if (max_rx_pkt_len <
+ enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
+ dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
+ " when scatter rx mode is in use.\n");
+ }
} else {
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
queue_idx);
@@ -623,20 +808,20 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
/* number of descriptors have to be a multiple of 32 */
- nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
- nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
+ nb_sop_desc = (nb_desc / mbufs_per_pkt) & ENIC_ALIGN_DESCS_MASK;
+ nb_data_desc = (nb_desc - nb_sop_desc) & ENIC_ALIGN_DESCS_MASK;
rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
if (mbufs_per_pkt > 1) {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = ((enic->config.rq_desc_count /
- (mbufs_per_pkt - 1)) & ~0x1F);
+ (mbufs_per_pkt - 1)) & ENIC_ALIGN_DESCS_MASK);
min_data = min_sop * (mbufs_per_pkt - 1);
max_data = enic->config.rq_desc_count;
} else {
- min_sop = 64;
+ min_sop = ENIC_RX_BURST_MAX;
max_sop = enic->config.rq_desc_count;
min_data = 0;
max_data = 0;
@@ -654,8 +839,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
nb_data_desc = max_data;
}
if (mbufs_per_pkt > 1) {
- dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- mtu, mbuf_size, min_sop + min_data,
+ dev_info(enic, "For max packet size %u and mbuf size %u valid"
+ " rx descriptor range is %u to %u\n",
+ max_rx_pkt_len, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -706,10 +892,21 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->free_mbufs = (struct rte_mbuf **)
+ rte_zmalloc_socket("rq->free_mbufs",
+ sizeof(struct rte_mbuf *) *
+ ENIC_RX_BURST_MAX,
+ RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
+ if (rq_sop->free_mbufs == NULL)
+ goto err_free_data_mbuf;
+ rq_sop->num_free_mbufs = 0;
+
rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
return 0;
+err_free_data_mbuf:
+ rte_free(rq_data->mbuf_ring);
err_free_sop_mbuf:
rte_free(rq_sop->mbuf_ring);
err_free_cq:
@@ -749,25 +946,15 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
static int instance;
wq->socket_id = socket_id;
- if (nb_desc) {
- if (nb_desc > enic->config.wq_desc_count) {
- dev_warning(enic,
- "WQ %d - number of tx desc in cmd line (%d)"\
- "is greater than that in the UCSM/CIMC adapter"\
- "policy. Applying the value in the adapter "\
- "policy (%d)\n",
- queue_idx, nb_desc, enic->config.wq_desc_count);
- } else if (nb_desc != enic->config.wq_desc_count) {
- enic->config.wq_desc_count = nb_desc;
- dev_info(enic,
- "TX Queues - effective number of descs:%d\n",
- nb_desc);
- }
- }
+ /*
+ * rte_eth_tx_queue_setup() checks min, max, and alignment. So just
+ * print an info message for diagnostics.
+ */
+ dev_info(enic, "TX Queues - effective number of descs:%d\n", nb_desc);
/* Allocate queue resources */
err = vnic_wq_alloc(enic->vdev, &enic->wq[queue_idx], queue_idx,
- enic->config.wq_desc_count,
+ nb_desc,
sizeof(struct wq_enet_desc));
if (err) {
dev_err(enic, "error in allocation of wq\n");
@@ -775,7 +962,7 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
}
err = vnic_cq_alloc(enic->vdev, &enic->cq[cq_index], cq_index,
- socket_id, enic->config.wq_desc_count,
+ socket_id, nb_desc,
sizeof(struct cq_enet_wq_desc));
if (err) {
vnic_wq_free(wq);
@@ -788,9 +975,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
instance++);
wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name,
- sizeof(uint32_t),
- SOCKET_ID_ANY, 0,
- ENIC_ALIGN);
+ sizeof(uint32_t), SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, ENIC_ALIGN);
if (!wq->cqmsg_rz)
return -ENOMEM;
@@ -802,8 +988,11 @@ int enic_disable(struct enic *enic)
unsigned int i;
int err;
- vnic_intr_mask(&enic->intr);
- (void)vnic_intr_masked(&enic->intr); /* flush write */
+ for (i = 0; i < enic->intr_count; i++) {
+ vnic_intr_mask(&enic->intr[i]);
+ (void)vnic_intr_masked(&enic->intr[i]); /* flush write */
+ }
+ enic_rxq_intr_deinit(enic);
rte_intr_disable(&enic->pdev->intr_handle);
rte_intr_callback_unregister(&enic->pdev->intr_handle,
enic_intr_handler,
@@ -846,7 +1035,8 @@ int enic_disable(struct enic *enic)
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_clean(&enic->cq[i]);
- vnic_intr_clean(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_clean(&enic->intr[i]);
return 0;
}
@@ -879,9 +1069,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
static int enic_dev_open(struct enic *enic)
{
int err;
+ int flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic),
"vNIC device open failed, err %d\n", err);
@@ -889,44 +1080,42 @@ static int enic_dev_open(struct enic *enic)
return err;
}
-static int enic_set_rsskey(struct enic *enic)
+static int enic_set_rsskey(struct enic *enic, uint8_t *user_key)
{
dma_addr_t rss_key_buf_pa;
union vnic_rss_key *rss_key_buf_va = NULL;
- static union vnic_rss_key rss_key = {
- .key = {
- [0] = {.b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}},
- [1] = {.b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}},
- [2] = {.b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}},
- [3] = {.b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}},
- }
- };
- int err;
+ int err, i;
u8 name[NAME_MAX];
+ RTE_ASSERT(user_key != NULL);
snprintf((char *)name, NAME_MAX, "rss_key-%s", enic->bdf_name);
rss_key_buf_va = enic_alloc_consistent(enic, sizeof(union vnic_rss_key),
&rss_key_buf_pa, name);
if (!rss_key_buf_va)
return -ENOMEM;
- rte_memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
+ for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++)
+ rss_key_buf_va->key[i / 10].b[i % 10] = user_key[i];
err = enic_set_rss_key(enic,
rss_key_buf_pa,
sizeof(union vnic_rss_key));
+ /* Save for later queries */
+ if (!err) {
+ rte_memcpy(&enic->rss_key, rss_key_buf_va,
+ sizeof(union vnic_rss_key));
+ }
enic_free_consistent(enic, sizeof(union vnic_rss_key),
rss_key_buf_va, rss_key_buf_pa);
return err;
}
-static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
+int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu)
{
dma_addr_t rss_cpu_buf_pa;
union vnic_rss_cpu *rss_cpu_buf_va = NULL;
- int i;
int err;
u8 name[NAME_MAX];
@@ -936,9 +1125,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
if (!rss_cpu_buf_va)
return -ENOMEM;
- for (i = 0; i < (1 << rss_hash_bits); i++)
- (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
+ rte_memcpy(rss_cpu_buf_va, rss_cpu, sizeof(union vnic_rss_cpu));
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -947,6 +1134,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
enic_free_consistent(enic, sizeof(union vnic_rss_cpu),
rss_cpu_buf_va, rss_cpu_buf_pa);
+ /* Save for later queries */
+ if (!err)
+ rte_memcpy(&enic->rss_cpu, rss_cpu, sizeof(union vnic_rss_cpu));
return err;
}
@@ -956,8 +1146,6 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
const u8 tso_ipid_split_en = 0;
int err;
- /* Enable VLAN tag stripping */
-
err = enic_set_nic_cfg(enic,
rss_default_cpu, rss_hash_type,
rss_hash_bits, rss_base_cpu,
@@ -967,47 +1155,50 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
return err;
}
-int enic_set_rss_nic_cfg(struct enic *enic)
+/* Initialize RSS with defaults, called from dev_configure */
+int enic_init_rss_nic_cfg(struct enic *enic)
{
- const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
- NIC_CFG_RSS_HASH_TYPE_IPV6 |
- NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
- const u8 rss_hash_bits = 7;
- const u8 rss_base_cpu = 0;
- u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
-
- if (rss_enable) {
- if (!enic_set_rsskey(enic)) {
- if (enic_set_rsscpu(enic, rss_hash_bits)) {
- rss_enable = 0;
- dev_warning(enic, "RSS disabled, "\
- "Failed to set RSS cpu indirection table.");
- }
- } else {
- rss_enable = 0;
- dev_warning(enic,
- "RSS disabled, Failed to set RSS key.\n");
+ static uint8_t default_rss_key[] = {
+ 85, 67, 83, 97, 119, 101, 115, 111, 109, 101,
+ 80, 65, 76, 79, 117, 110, 105, 113, 117, 101,
+ 76, 73, 78, 85, 88, 114, 111, 99, 107, 115,
+ 69, 78, 73, 67, 105, 115, 99, 111, 111, 108,
+ };
+ struct rte_eth_rss_conf rss_conf;
+ union vnic_rss_cpu rss_cpu;
+ int ret, i;
+
+ rss_conf = enic->rte_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ /*
+ * If setting key for the first time, and the user gives us none, then
+ * push the default key to NIC.
+ */
+ if (rss_conf.rss_key == NULL) {
+ rss_conf.rss_key = default_rss_key;
+ rss_conf.rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
+ }
+ ret = enic_set_rss_conf(enic, &rss_conf);
+ if (ret) {
+ dev_err(enic, "Failed to configure RSS\n");
+ return ret;
+ }
+ if (enic->rss_enable) {
+ /* If enabling RSS, use the default reta */
+ for (i = 0; i < ENIC_RSS_RETA_SIZE; i++) {
+ rss_cpu.cpu[i / 4].b[i % 4] =
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
}
+ ret = enic_set_rss_reta(enic, &rss_cpu);
+ if (ret)
+ dev_err(enic, "Failed to set RSS indirection table\n");
}
-
- return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type,
- rss_hash_bits, rss_base_cpu, rss_enable);
+ return ret;
}
int enic_setup_finish(struct enic *enic)
{
- int ret;
-
enic_init_soft_stats(enic);
- ret = enic_set_rss_nic_cfg(enic);
- if (ret) {
- dev_err(enic, "Failed to config nic, aborting.\n");
- return -1;
- }
-
/* Default conf */
vnic_dev_packet_filter(enic->vdev,
1 /* directed */,
@@ -1022,6 +1213,115 @@ int enic_setup_finish(struct enic *enic)
return 0;
}
+static int enic_rss_conf_valid(struct enic *enic,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ /* RSS is disabled per VIC settings. Ignore rss_conf. */
+ if (enic->flow_type_rss_offloads == 0)
+ return 0;
+ if (rss_conf->rss_key != NULL &&
+ rss_conf->rss_key_len != ENIC_RSS_HASH_KEY_SIZE) {
+ dev_err(enic, "Given rss_key is %d bytes, it must be %d\n",
+ rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
+ return -EINVAL;
+ }
+ if (rss_conf->rss_hf != 0 &&
+ (rss_conf->rss_hf & enic->flow_type_rss_offloads) == 0) {
+ dev_err(enic, "Given rss_hf contains none of the supported"
+ " types\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Set hash type and key according to rss_conf */
+int enic_set_rss_conf(struct enic *enic, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *eth_dev;
+ uint64_t rss_hf;
+ u8 rss_hash_type;
+ u8 rss_enable;
+ int ret;
+
+ RTE_ASSERT(rss_conf != NULL);
+ ret = enic_rss_conf_valid(enic, rss_conf);
+ if (ret) {
+ dev_err(enic, "RSS configuration (rss_conf) is invalid\n");
+ return ret;
+ }
+
+ eth_dev = enic->rte_dev;
+ rss_hash_type = 0;
+ rss_hf = rss_conf->rss_hf & enic->flow_type_rss_offloads;
+ if (enic->rq_count > 1 &&
+ (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) &&
+ rss_hf != 0) {
+ rss_enable = 1;
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV4;
+ if (enic->udp_rss_weak) {
+ /*
+ * 'TCP' is not a typo. The "weak" version of
+ * UDP RSS requires both the TCP and UDP bits
+ * be set. It does enable TCP RSS as well.
+ */
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV4;
+ }
+ }
+ if (rss_hf & (ETH_RSS_IPV6 | ETH_RSS_IPV6_EX |
+ ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX)) {
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP_IPV6;
+ if (enic->udp_rss_weak)
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
+ }
+ } else {
+ rss_enable = 0;
+ rss_hf = 0;
+ }
+
+ /* Set the hash key if provided */
+ if (rss_enable && rss_conf->rss_key) {
+ ret = enic_set_rsskey(enic, rss_conf->rss_key);
+ if (ret) {
+ dev_err(enic, "Failed to set RSS key\n");
+ return ret;
+ }
+ }
+
+ ret = enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ rss_enable);
+ if (!ret) {
+ enic->rss_hf = rss_hf;
+ enic->rss_hash_type = rss_hash_type;
+ enic->rss_enable = rss_enable;
+ } else {
+ dev_err(enic, "Failed to update RSS configurations."
+ " hash=0x%x\n", rss_hash_type);
+ }
+ return ret;
+}
+
+int enic_set_vlan_strip(struct enic *enic)
+{
+ /*
+ * Unfortunately, VLAN strip on/off and RSS on/off are configured
+ * together. So, re-do niccfg, preserving the current RSS settings.
+ */
+ return enic_set_niccfg(enic, ENIC_RSS_DEFAULT_CPU, enic->rss_hash_type,
+ ENIC_RSS_HASH_BITS, ENIC_RSS_BASE_CPU,
+ enic->rss_enable);
+}
+
void enic_add_packet_filter(struct enic *enic)
{
/* Args -> directed, multicast, broadcast, promisc, allmulti */
@@ -1043,6 +1343,7 @@ static void enic_dev_deinit(struct enic *enic)
rte_free(eth_dev->data->mac_addrs);
rte_free(enic->cq);
+ rte_free(enic->intr);
rte_free(enic->rq);
rte_free(enic->wq);
}
@@ -1052,12 +1353,16 @@ int enic_set_vnic_res(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
- unsigned int required_rq, required_wq, required_cq;
+ unsigned int required_rq, required_wq, required_cq, required_intr;
/* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
required_rq = eth_dev->data->nb_rx_queues * 2;
required_wq = eth_dev->data->nb_tx_queues;
required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+ required_intr = 1; /* 1 for LSC even if intr_conf.lsc is 0 */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ required_intr += eth_dev->data->nb_rx_queues;
+ }
if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
@@ -1076,11 +1381,18 @@ int enic_set_vnic_res(struct enic *enic)
required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
+ if (enic->conf_intr_count < required_intr) {
+ dev_err(dev, "Not enough Interrupts to support Rx queue"
+ " interrupts. Required:%u, Configured:%u\n",
+ required_intr, enic->conf_intr_count);
+ rc = -EINVAL;
+ }
if (rc == 0) {
enic->rq_count = eth_dev->data->nb_rx_queues;
enic->wq_count = eth_dev->data->nb_tx_queues;
enic->cq_count = enic->rq_count + enic->wq_count;
+ enic->intr_count = required_intr;
}
return rc;
@@ -1176,20 +1488,26 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
- /* The easy case is when scatter is disabled. However if the MTU
- * becomes greater than the mbuf data size, packet drops will ensue.
+ /* Update the MTU and maximum packet length */
+ eth_dev->data->mtu = new_mtu;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
+ enic_mtu_to_max_rx_pktlen(new_mtu);
+
+ /*
+ * If the device has not started (enic_enable), nothing to do.
+ * Later, enic_enable() will set up RQs reflecting the new maximum
+ * packet length.
*/
- if (!(enic->rte_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_SCATTER)) {
- eth_dev->data->mtu = new_mtu;
+ if (!eth_dev->data->dev_started)
goto set_mtu_done;
- }
- /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
- * change Rx scatter mode if necessary for better performance. I.e. if
- * MTU was greater than the mbuf size and now it's less, scatter Rx
- * doesn't have to be used and vice versa.
- */
+ /*
+ * The device has started, re-do RQs on the fly. In the process, we
+ * pick up the new maximum packet length.
+ *
+ * Some applications rely on the ability to change MTU without stopping
+ * the device. So keep this behavior for now.
+ */
rte_spinlock_lock(&enic->mtu_lock);
/* Stop traffic on all RQs */
@@ -1214,12 +1532,12 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* now it is safe to reconfigure the RQs */
- /* update the mtu */
- eth_dev->data->mtu = new_mtu;
/* free and reallocate RQs with the new MTU */
for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (!rq->in_use)
+ continue;
enic_free_rq(rq);
rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
@@ -1240,7 +1558,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
/* put back the real receive function */
rte_mb();
- eth_dev->rx_pkt_burst = enic_recv_pkts;
+ pick_rx_handler(enic);
rte_mb();
/* restart Rx traffic */
@@ -1282,6 +1600,8 @@ static int enic_dev_init(struct enic *enic)
/* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
enic->conf_cq_count, 8);
+ enic->intr = rte_zmalloc("enic_vnic_intr", sizeof(struct vnic_intr) *
+ enic->conf_intr_count, 8);
enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
enic->conf_rq_count, 8);
enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
@@ -1290,6 +1610,10 @@ static int enic_dev_init(struct enic *enic)
dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
return -1;
}
+ if (enic->conf_intr_count > 0 && enic->intr == NULL) {
+ dev_err(enic, "failed to allocate vnic_intr, aborting.\n");
+ return -1;
+ }
if (enic->conf_rq_count > 0 && enic->rq == NULL) {
dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
return -1;
@@ -1319,6 +1643,38 @@ static int enic_dev_init(struct enic *enic)
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+ enic->overlay_offload = false;
+ if (!enic->disable_overlay && enic->vxlan &&
+ /* 'VXLAN feature' enables VXLAN, NVGRE, and GENEVE. */
+ vnic_dev_overlay_offload_ctrl(enic->vdev,
+ OVERLAY_FEATURE_VXLAN,
+ OVERLAY_OFFLOAD_ENABLE) == 0) {
+ enic->tx_offload_capa |=
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO;
+ /*
+ * Do not add PKT_TX_OUTER_{IPV4,IPV6} as they are not
+ * 'offload' flags (i.e. not part of PKT_TX_OFFLOAD_MASK).
+ */
+ enic->tx_offload_mask |=
+ PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_MASK;
+ enic->overlay_offload = true;
+ enic->vxlan_port = ENIC_DEFAULT_VXLAN_PORT;
+ dev_info(enic, "Overlay offload is enabled\n");
+ /*
+ * Reset the vxlan port to the default, as the NIC firmware
+ * does not reset it automatically and keeps the old setting.
+ */
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ ENIC_DEFAULT_VXLAN_PORT)) {
+ dev_err(enic, "failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1351,6 +1707,15 @@ int enic_probe(struct enic *enic)
enic_alloc_consistent,
enic_free_consistent);
+ /*
+ * Allocate the consistent memory for stats upfront so both primary and
+ * secondary processes can dump stats.
+ */
+ err = vnic_dev_alloc_stats_mem(enic->vdev);
+ if (err) {
+ dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_unregister;
+ }
/* Issue device open to get device in known state */
err = enic_dev_open(enic);
if (err) {
@@ -1359,8 +1724,10 @@ int enic_probe(struct enic *enic)
}
/* Set ingress vlan rewrite mode before vnic initialization */
+ dev_debug(enic, "Set ig_vlan_rewrite_mode=%u\n",
+ enic->ig_vlan_rewrite_mode);
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PASS_THRU);
+ enic->ig_vlan_rewrite_mode);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index c99d6183..8d493ffe 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -76,19 +76,26 @@ int enic_get_vnic_config(struct enic *enic)
? "" : "not "));
err = vnic_dev_capable_filter_mode(enic->vdev, &enic->flow_filter_mode,
- &enic->filter_tags);
+ &enic->filter_actions);
if (err) {
dev_err(enic_get_dev(enic),
"Error getting filter modes, %d\n", err);
return err;
}
+ vnic_dev_capable_udp_rss_weak(enic->vdev, &enic->nic_cfg_chk,
+ &enic->udp_rss_weak);
- dev_info(enic, "Flow api filter mode: %s, Filter tagging %savailable\n",
+ dev_info(enic, "Flow api filter mode: %s Actions: %s%s%s\n",
((enic->flow_filter_mode == FILTER_DPDK_1) ? "DPDK" :
((enic->flow_filter_mode == FILTER_USNIC_IP) ? "USNIC" :
((enic->flow_filter_mode == FILTER_IPV4_5TUPLE) ? "5TUPLE" :
"NONE"))),
- ((enic->filter_tags) ? "" : "not "));
+ ((enic->filter_actions & FILTER_ACTION_RQ_STEERING_FLAG) ?
+ "steer " : ""),
+ ((enic->filter_actions & FILTER_ACTION_FILTER_ID_FLAG) ?
+ "tag " : ""),
+ ((enic->filter_actions & FILTER_ACTION_DROP_FLAG) ?
+ "drop " : ""));
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
@@ -117,7 +124,10 @@ int enic_get_vnic_config(struct enic *enic)
"loopback tag 0x%04x\n",
ENIC_SETTING(enic, TXCSUM) ? "yes" : "no",
ENIC_SETTING(enic, RXCSUM) ? "yes" : "no",
- ENIC_SETTING(enic, RSS) ? "yes" : "no",
+ ENIC_SETTING(enic, RSS) ?
+ (ENIC_SETTING(enic, RSSHASH_UDPIPV4) ? "+UDP" :
+ ((enic->udp_rss_weak ? "+udp" :
+ "yes"))) : "no",
c->intr_mode == VENET_INTR_MODE_INTX ? "INTx" :
c->intr_mode == VENET_INTR_MODE_MSI ? "MSI" :
c->intr_mode == VENET_INTR_MODE_ANY ? "any" :
@@ -128,6 +138,74 @@ int enic_get_vnic_config(struct enic *enic)
c->intr_timer_usec,
c->loop_tag);
+ /* RSS settings from vNIC */
+ enic->reta_size = ENIC_RSS_RETA_SIZE;
+ enic->hash_key_size = ENIC_RSS_HASH_KEY_SIZE;
+ enic->flow_type_rss_offloads = 0;
+ if (ENIC_SETTING(enic, RSSHASH_IPV4))
+ /*
+ * IPV4 hash type handles both non-frag and frag packet types.
+ * TCP/UDP is controlled via a separate flag below.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (ENIC_SETTING(enic, RSSHASH_IPV6))
+ /*
+ * The VIC adapter can perform RSS on IPv6 packets with and
+ * without extension headers. An IPv6 "fragment" is an IPv6
+ * packet with the fragment extension header.
+ */
+ enic->flow_type_rss_offloads |= ETH_RSS_IPV6 |
+ ETH_RSS_IPV6_EX | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER;
+ if (ENIC_SETTING(enic, RSSHASH_TCPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_TCP_EX;
+ if (enic->udp_rss_weak)
+ enic->flow_type_rss_offloads |=
+ ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV4))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (ENIC_SETTING(enic, RSSHASH_UDPIPV6))
+ enic->flow_type_rss_offloads |= ETH_RSS_NONFRAG_IPV6_UDP |
+ ETH_RSS_IPV6_UDP_EX;
+
+ /* Zero offloads if RSS is not enabled */
+ if (!ENIC_SETTING(enic, RSS))
+ enic->flow_type_rss_offloads = 0;
+
+ enic->vxlan = ENIC_SETTING(enic, VXLAN) &&
+ vnic_dev_capable_vxlan(enic->vdev);
+ /*
+ * Default hardware capabilities. enic_dev_init() may add additional
+ * flags if it enables overlay offloads.
+ */
+ enic->tx_queue_offload_capa = 0;
+ enic->tx_offload_capa =
+ enic->tx_queue_offload_capa |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ enic->rx_offload_capa =
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ enic->tx_offload_mask =
+ PKT_TX_VLAN_PKT |
+ PKT_TX_IP_CKSUM |
+ PKT_TX_L4_MASK |
+ PKT_TX_TCP_SEG;
+
return 0;
}
@@ -161,6 +239,7 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en,
u8 ig_vlan_strip_en)
{
+ enum vnic_devcmd_cmd cmd;
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
@@ -171,8 +250,8 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
a0 = nic_cfg;
a1 = 0;
-
- return vnic_dev_cmd(enic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
+ cmd = enic->nic_cfg_chk ? CMD_NIC_CFG_CHK : CMD_NIC_CFG;
+ return vnic_dev_cmd(enic->vdev, cmd, &a0, &a1, wait);
}
int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len)
@@ -202,7 +281,8 @@ void enic_free_vnic_resources(struct enic *enic)
vnic_rq_free(&enic->rq[i]);
for (i = 0; i < enic->cq_count; i++)
vnic_cq_free(&enic->cq[i]);
- vnic_intr_free(&enic->intr);
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_free(&enic->intr[i]);
}
void enic_get_res_counts(struct enic *enic)
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index cf3a6fde..3786bc0e 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -16,6 +16,13 @@
#define ENIC_MIN_RQ_DESCS 64
#define ENIC_MAX_RQ_DESCS 4096
+/* A descriptor ring has a multiple of 32 descriptors */
+#define ENIC_ALIGN_DESCS 32
+#define ENIC_ALIGN_DESCS_MASK ~(ENIC_ALIGN_DESCS - 1)
+
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH 32
+
#define ENIC_MIN_MTU 68
/* Does not include (possible) inserted VLAN tag and FCS */
@@ -30,6 +37,21 @@
#define ENIC_NON_TSO_MAX_DESC 16
#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_TX_XMIT_MAX 64
+#define ENIC_RX_BURST_MAX 64
+
+/* Defaults for dev_info.default_{rx,tx}portconf */
+#define ENIC_DEFAULT_RX_BURST 32
+#define ENIC_DEFAULT_RX_RINGS 1
+#define ENIC_DEFAULT_RX_RING_SIZE 512
+#define ENIC_DEFAULT_TX_BURST 32
+#define ENIC_DEFAULT_TX_RINGS 1
+#define ENIC_DEFAULT_TX_RING_SIZE 512
+
+#define ENIC_RSS_DEFAULT_CPU 0
+#define ENIC_RSS_BASE_CPU 0
+#define ENIC_RSS_HASH_BITS 7
+#define ENIC_RSS_RETA_SIZE (1 << ENIC_RSS_HASH_BITS)
+#define ENIC_RSS_HASH_KEY_SIZE 40
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 2fe5a3fa..7129e121 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -15,15 +15,6 @@
#include <rte_ip.h>
#include <rte_tcp.h>
-#define ENIC_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
-
-#define ENIC_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ ENIC_TX_OFFLOAD_MASK)
-
#define RTE_PMD_USE_PREFETCH
#ifdef RTE_PMD_USE_PREFETCH
@@ -130,30 +121,103 @@ enic_cq_rx_check_err(struct cq_desc *cqd)
/* Lookup table to translate RX CQ flags to mbuf flags. */
static inline uint32_t
-enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd, uint8_t tnl)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint8_t cqrd_flags = cqrd->flags;
+ /*
+ * Odd-numbered entries are for tunnel packets. All packet type info
+ * applies to the inner packet, and there is no info on the outer
+ * packet. The outer flags in these entries exist only to avoid
+ * changing enic_cq_rx_to_pkt_flags(). They are cleared from mbuf
+ * afterwards.
+ *
+ * Also, as there is no tunnel type info (VXLAN, NVGRE, or GENEVE), set
+ * RTE_PTYPE_TUNNEL_GRENAT..
+ */
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
+ [0x01] = RTE_PTYPE_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER,
[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x21] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x23] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x25] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x61] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x63] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x65] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x11] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x13] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x15] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x51] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x53] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x55] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
- return cq_type_table[cqrd_flags];
+ return cq_type_table[cqrd_flags + tnl];
}
static inline void
@@ -200,10 +264,18 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
uint32_t l4_flags;
l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
- if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
- pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
- else if (mbuf->packet_type & RTE_PTYPE_L3_IPV4)
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ /*
+ * When overlay offload is enabled, the NIC may
+ * set ipv4_csum_ok=1 if the inner packet is IPv6..
+ * So, explicitly check for IPv4 before checking
+ * ipv4_csum_ok.
+ */
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ }
if (l4_flags == RTE_PTYPE_L4_UDP ||
l4_flags == RTE_PTYPE_L4_TCP) {
@@ -238,13 +310,14 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
struct vnic_rq *rq;
struct enic *enic = vnic_dev_priv(sop_rq->vdev);
uint16_t cq_idx;
- uint16_t rq_idx;
+ uint16_t rq_idx, max_rx;
uint16_t rq_num;
struct rte_mbuf *nmb, *rxmb;
uint16_t nb_rx = 0;
struct vnic_cq *cq;
volatile struct cq_desc *cqd_ptr;
uint8_t color;
+ uint8_t tnl;
uint16_t seg_length;
struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
@@ -252,19 +325,23 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
data_rq = &enic->rq[sop_rq->data_queue_idx];
- while (nb_rx < nb_pkts) {
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, cq->ring.desc_count - cq_idx);
+
+ while (max_rx) {
volatile struct rq_enet_desc *rqd_ptr;
struct cq_desc cqd;
uint8_t packet_error;
uint16_t ciflags;
+ max_rx--;
+
/* Check for pkts available */
- color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
- & CQ_DESC_COLOR_MASK;
- if (color == cq->last_color)
+ if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
break;
/* Get the cq descriptor and extract rq info from it */
@@ -288,13 +365,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* Get the mbuf to return and replace with one just allocated */
rxmb = rq->mbuf_ring[rq_idx];
rq->mbuf_ring[rq_idx] = nmb;
-
- /* Increment cqd, rqd, mbuf_table index */
cq_idx++;
- if (unlikely(cq_idx == cq->ring.desc_count)) {
- cq_idx = 0;
- cq->last_color = cq->last_color ? 0 : 1;
- }
/* Prefetch next mbuf & desc while processing current one */
cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
@@ -336,10 +407,22 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
continue;
}
+ /*
+ * When overlay offload is enabled, CQ.fcoe indicates the
+ * packet is tunnelled.
+ */
+ tnl = enic->overlay_offload &&
+ (ciflags & CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
/* cq rx flags are only valid if eop bit is set */
- first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+ first_seg->packet_type =
+ enic_cq_rx_flags_to_pkt_type(&cqd, tnl);
enic_cq_rx_to_pkt_flags(&cqd, first_seg);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ first_seg->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
if (unlikely(packet_error)) {
rte_pktmbuf_free(first_seg);
rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
@@ -354,6 +437,10 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* store the mbuf address into the next entry of the array */
rx_pkts[nb_rx++] = first_seg;
}
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
sop_rq->pkt_first_seg = first_seg;
sop_rq->pkt_last_seg = last_seg;
@@ -387,9 +474,123 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
}
+uint16_t
+enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_mbuf *mb, **rx, **rxmb;
+ uint16_t cq_idx, nb_rx, max_rx;
+ struct cq_enet_rq_desc *cqd;
+ struct rq_enet_desc *rqd;
+ unsigned int port_id;
+ struct vnic_cq *cq;
+ struct vnic_rq *rq;
+ struct enic *enic;
+ uint8_t color;
+ bool overlay;
+ bool tnl;
+
+ rq = rx_queue;
+ enic = vnic_dev_priv(rq->vdev);
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ cq_idx = cq->to_clean;
+
+ /*
+ * Fill up the reserve of free mbufs. Below, we restock the receive
+ * ring with these mbufs to avoid allocation failures.
+ */
+ if (rq->num_free_mbufs == 0) {
+ if (rte_mempool_get_bulk(rq->mp, (void **)rq->free_mbufs,
+ ENIC_RX_BURST_MAX))
+ return 0;
+ rq->num_free_mbufs = ENIC_RX_BURST_MAX;
+ }
+
+ /* Receive until the end of the ring, at most. */
+ max_rx = RTE_MIN(nb_pkts, rq->num_free_mbufs);
+ max_rx = RTE_MIN(max_rx, cq->ring.desc_count - cq_idx);
+
+ cqd = (struct cq_enet_rq_desc *)(cq->ring.descs) + cq_idx;
+ color = cq->last_color;
+ rxmb = rq->mbuf_ring + cq_idx;
+ port_id = enic->port_id;
+ overlay = enic->overlay_offload;
+
+ rx = rx_pkts;
+ while (max_rx) {
+ max_rx--;
+ if ((cqd->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color)
+ break;
+ if (unlikely(cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED)) {
+ rte_pktmbuf_free(*rxmb++);
+ rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
+ cqd++;
+ continue;
+ }
+
+ mb = *rxmb++;
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(mb->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+ mb->data_len = cqd->bytes_written_flags &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+ mb->pkt_len = mb->data_len;
+ mb->port = port_id;
+ tnl = overlay && (cqd->completed_index_flags &
+ CQ_ENET_RQ_DESC_FLAGS_FCOE) != 0;
+ mb->packet_type =
+ enic_cq_rx_flags_to_pkt_type((struct cq_desc *)cqd,
+ tnl);
+ enic_cq_rx_to_pkt_flags((struct cq_desc *)cqd, mb);
+ /* Wipe the outer types set by enic_cq_rx_flags_to_pkt_type() */
+ if (tnl) {
+ mb->packet_type &= ~(RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+ }
+ cqd++;
+ *rx++ = mb;
+ }
+ /* Number of descriptors visited */
+ nb_rx = cqd - (struct cq_enet_rq_desc *)(cq->ring.descs) - cq_idx;
+ if (nb_rx == 0)
+ return 0;
+ rqd = ((struct rq_enet_desc *)rq->ring.descs) + cq_idx;
+ rxmb = rq->mbuf_ring + cq_idx;
+ cq_idx += nb_rx;
+ rq->rx_nb_hold += nb_rx;
+ if (unlikely(cq_idx == cq->ring.desc_count)) {
+ cq_idx = 0;
+ cq->last_color ^= CQ_DESC_COLOR_MASK_NOSHIFT;
+ }
+ cq->to_clean = cq_idx;
+
+ memcpy(rxmb, rq->free_mbufs + ENIC_RX_BURST_MAX - rq->num_free_mbufs,
+ sizeof(struct rte_mbuf *) * nb_rx);
+ rq->num_free_mbufs -= nb_rx;
+ while (nb_rx) {
+ nb_rx--;
+ mb = *rxmb++;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ rqd->address = mb->buf_iova + RTE_PKTMBUF_HEADROOM;
+ rqd++;
+ }
+ if (rq->rx_nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index,
+ rq->rx_nb_hold);
+ rq->rx_nb_hold = 0;
+ rte_wmb();
+ iowrite32_relaxed(rq->posted_index,
+ &rq->ctrl->posted_index);
+ }
+
+ return rx - rx_pkts;
+}
+
static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
- struct vnic_wq_buf *buf;
+ struct rte_mbuf *buf;
struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
unsigned int nb_to_free, nb_free = 0, i;
struct rte_mempool *pool;
@@ -399,13 +600,10 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ 1;
tail_idx = wq->tail_idx;
- buf = &wq->bufs[tail_idx];
- pool = ((struct rte_mbuf *)buf->mb)->pool;
+ pool = wq->bufs[tail_idx]->pool;
for (i = 0; i < nb_to_free; i++) {
- buf = &wq->bufs[tail_idx];
- m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
- buf->mb = NULL;
-
+ buf = wq->bufs[tail_idx];
+ m = rte_pktmbuf_prefree_seg(buf);
if (unlikely(m == NULL)) {
tail_idx = enic_ring_incr(desc_count, tail_idx);
continue;
@@ -443,9 +641,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
return 0;
}
-uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
+ struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
int32_t ret;
uint16_t i;
uint64_t ol_flags;
@@ -453,9 +652,13 @@ uint16_t enic_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
for (i = 0; i != nb_pkts; i++) {
m = tx_pkts[i];
+ if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ rte_errno = EINVAL;
+ return i;
+ }
ol_flags = m->ol_flags;
- if (ol_flags & ENIC_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ if (ol_flags & wq->tx_offload_notsup_mask) {
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
@@ -489,12 +692,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint64_t ol_flags_mask;
unsigned int wq_desc_avail;
int head_idx;
- struct vnic_wq_buf *buf;
unsigned int desc_count;
struct wq_enet_desc *descs, *desc_p, desc_tmp;
uint16_t mss;
uint8_t vlan_tag_insert;
- uint8_t eop;
+ uint8_t eop, cq;
uint64_t bus_addr;
uint8_t offload_mode;
uint16_t header_len;
@@ -558,6 +760,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
mss = tx_pkt->tso_segsz;
+ /* For tunnel, need the size of outer+inner headers */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ header_len += tx_pkt->outer_l2_len +
+ tx_pkt->outer_l3_len;
+ }
}
if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
@@ -572,15 +779,18 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
break;
}
}
-
-
+ wq->cq_pend++;
+ cq = 0;
+ if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
- offload_mode, eop, eop, 0, vlan_tag_insert,
+ offload_mode, eop, cq, 0, vlan_tag_insert,
vlan_id, 0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
@@ -589,20 +799,26 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_pkt->next) {
data_len = tx_pkt->data_len;
- if (tx_pkt->next == NULL)
+ wq->cq_pend++;
+ cq = 0;
+ if (tx_pkt->next == NULL) {
eop = 1;
+ if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+ cq = 1;
+ wq->cq_pend = 0;
+ }
+ }
desc_p = descs + head_idx;
bus_addr = (dma_addr_t)(tx_pkt->buf_iova
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, offload_mode, eop, eop,
+ mss, 0, offload_mode, eop, cq,
0, vlan_tag_insert, vlan_id,
0);
*desc_p = desc_tmp;
- buf = &wq->bufs[head_idx];
- buf->mb = (void *)tx_pkt;
+ wq->bufs[head_idx] = tx_pkt;
head_idx = enic_ring_incr(desc_count, head_idx);
wq_desc_avail--;
}
@@ -618,4 +834,81 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return index;
}
+static void enqueue_simple_pkts(struct rte_mbuf **pkts,
+ struct wq_enet_desc *desc,
+ uint16_t n,
+ struct enic *enic)
+{
+ struct rte_mbuf *p;
+
+ while (n) {
+ n--;
+ p = *pkts++;
+ desc->address = p->buf_iova + p->data_off;
+ desc->length = p->pkt_len;
+ /*
+ * The app should not send oversized
+ * packets. tx_pkt_prepare includes a check as
+ * well. But some apps ignore the device max size and
+ * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * and the NIC ends up disabling the whole WQ. So
+ * truncate packets..
+ */
+ if (unlikely(p->pkt_len > ENIC_TX_MAX_PKT_SIZE)) {
+ desc->length = ENIC_TX_MAX_PKT_SIZE;
+ rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ }
+ desc++;
+ }
+}
+
+uint16_t enic_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ unsigned int head_idx, desc_count;
+ struct wq_enet_desc *desc;
+ struct vnic_wq *wq;
+ struct enic *enic;
+ uint16_t rem, n;
+
+ wq = (struct vnic_wq *)tx_queue;
+ enic = vnic_dev_priv(wq->vdev);
+ enic_cleanup_wq(enic, wq);
+ /* Will enqueue this many packets in this call */
+ nb_pkts = RTE_MIN(nb_pkts, wq->ring.desc_avail);
+ if (nb_pkts == 0)
+ return 0;
+
+ head_idx = wq->head_idx;
+ desc_count = wq->ring.desc_count;
+
+ /* Descriptors until the end of the ring */
+ n = desc_count - head_idx;
+ n = RTE_MIN(nb_pkts, n);
+ /* Save mbuf pointers to free later */
+ memcpy(wq->bufs + head_idx, tx_pkts, sizeof(struct rte_mbuf *) * n);
+
+ /* Enqueue until the ring end */
+ rem = nb_pkts - n;
+ desc = ((struct wq_enet_desc *)wq->ring.descs) + head_idx;
+ enqueue_simple_pkts(tx_pkts, desc, n, enic);
+
+ /* Wrap to the start of the ring */
+ if (rem) {
+ tx_pkts += n;
+ memcpy(wq->bufs, tx_pkts, sizeof(struct rte_mbuf *) * rem);
+ desc = (struct wq_enet_desc *)wq->ring.descs;
+ enqueue_simple_pkts(tx_pkts, desc, rem, enic);
+ }
+ rte_wmb();
+
+ /* Update head_idx and desc_avail */
+ wq->ring.desc_avail -= nb_pkts;
+ head_idx += nb_pkts;
+ if (head_idx >= desc_count)
+ head_idx -= desc_count;
+ wq->head_idx = head_idx;
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
+ return nb_pkts;
+}
diff --git a/drivers/net/enic/meson.build b/drivers/net/enic/meson.build
new file mode 100644
index 00000000..bfd4e237
--- /dev/null
+++ b/drivers/net/enic/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cisco Systems, Inc.
+
+sources = files(
+ 'base/vnic_cq.c',
+ 'base/vnic_dev.c',
+ 'base/vnic_intr.c',
+ 'base/vnic_rq.c',
+ 'base/vnic_rss.c',
+ 'base/vnic_wq.c',
+ 'enic_clsf.c',
+ 'enic_ethdev.c',
+ 'enic_flow.c',
+ 'enic_main.c',
+ 'enic_res.c',
+ 'enic_rxtx.c',
+ )
+deps += ['hash']
+includes += include_directories('base')
diff --git a/drivers/net/failsafe/Makefile b/drivers/net/failsafe/Makefile
index bd2f0198..81802d09 100644
--- a/drivers/net/failsafe/Makefile
+++ b/drivers/net/failsafe/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright 2017 6WIND S.A.
-# Copyright 2017 Mellanox.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 6WIND S.A.
+# Copyright 2017 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index c499bfb9..657919f9 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_alarm.h>
@@ -13,6 +13,8 @@
#include "failsafe_private.h"
+int failsafe_logtype;
+
const char pmd_failsafe_driver_name[] = FAILSAFE_DRIVER_NAME;
static const struct rte_eth_link eth_link = {
.link_speed = ETH_SPEED_NUM_10G,
@@ -202,16 +204,25 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
}
snprintf(priv->my_owner.name, sizeof(priv->my_owner.name),
FAILSAFE_OWNER_NAME);
+ DEBUG("Failsafe port %u owner info: %s_%016"PRIX64, dev->data->port_id,
+ priv->my_owner.name, priv->my_owner.id);
+ ret = rte_eth_dev_callback_register(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback,
+ dev);
+ if (ret) {
+ ERROR("Failed to register NEW callback");
+ goto free_args;
+ }
ret = failsafe_eal_init(dev);
if (ret)
- goto free_args;
+ goto unregister_new_callback;
ret = fs_mutex_init(priv);
if (ret)
- goto free_args;
+ goto unregister_new_callback;
ret = failsafe_hotplug_alarm_install(dev);
if (ret) {
ERROR("Could not set up plug-in event detection");
- goto free_args;
+ goto unregister_new_callback;
}
mac = &dev->data->mac_addrs[0];
if (mac_from_arg) {
@@ -224,7 +235,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
mac);
if (ret) {
ERROR("Failed to set default MAC address");
- goto free_args;
+ goto cancel_alarm;
}
}
} else {
@@ -257,7 +268,13 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
.fd = -1,
.type = RTE_INTR_HANDLE_EXT,
};
+ rte_eth_dev_probing_finish(dev);
return 0;
+cancel_alarm:
+ failsafe_hotplug_alarm_cancel(dev);
+unregister_new_callback:
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
free_args:
failsafe_args_free(dev);
free_subs:
@@ -277,6 +294,8 @@ fs_rte_eth_free(const char *name)
dev = rte_eth_dev_allocated(name);
if (dev == NULL)
return -ENODEV;
+ rte_eth_dev_callback_unregister(RTE_ETH_ALL, RTE_ETH_EVENT_NEW,
+ failsafe_eth_new_event_callback, dev);
ret = failsafe_eal_uninit(dev);
if (ret)
ERROR("Error while uninitializing sub-EAL");
@@ -294,10 +313,26 @@ static int
rte_pmd_failsafe_probe(struct rte_vdev_device *vdev)
{
const char *name;
+ struct rte_eth_dev *eth_dev;
name = rte_vdev_device_name(vdev);
INFO("Initializing " FAILSAFE_DRIVER_NAME " for %s",
name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(vdev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ ERROR("Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &failsafe_ops;
+ eth_dev->device = &vdev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
return fs_eth_dev_create(vdev);
}
@@ -318,3 +353,10 @@ static struct rte_vdev_driver failsafe_drv = {
RTE_PMD_REGISTER_VDEV(net_failsafe, failsafe_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_failsafe, PMD_FAILSAFE_PARAM_STRING);
+
+RTE_INIT(failsafe_init_log)
+{
+ failsafe_logtype = rte_log_register("pmd.net.failsafe");
+ if (failsafe_logtype >= 0)
+ rte_log_set_level(failsafe_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 366dbea1..626883ce 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <fcntl.h>
@@ -14,6 +14,7 @@
#include <rte_devargs.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
+#include <rte_string_fns.h>
#include "failsafe_private.h"
@@ -62,7 +63,7 @@ fs_parse_device(struct sub_device *sdev, char *args)
d = &sdev->devargs;
DEBUG("%s", args);
- ret = rte_eal_devargs_parse(args, d);
+ ret = rte_devargs_parse(d, args);
if (ret) {
DEBUG("devargs parsing failed with code %d", ret);
return ret;
@@ -340,7 +341,7 @@ fs_remove_sub_devices_definition(char params[DEVARGS_MAXLEN])
a = b + 1;
}
out:
- snprintf(params, DEVARGS_MAXLEN, "%s", buffer);
+ strlcpy(params, buffer, DEVARGS_MAXLEN);
return 0;
}
@@ -392,7 +393,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
ret = 0;
priv->subs_tx = FAILSAFE_MAX_ETHPORTS;
/* default parameters */
- n = snprintf(mut_params, sizeof(mut_params), "%s", params);
+ n = strlcpy(mut_params, params, sizeof(mut_params));
if (n >= sizeof(mut_params)) {
ERROR("Parameter string too long (>=%zu)",
sizeof(mut_params));
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index c3d67312..ce1633f1 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_malloc.h>
@@ -18,8 +18,9 @@ fs_ethdev_portid_get(const char *name, uint16_t *port_id)
return -EINVAL;
}
len = strlen(name);
- RTE_ETH_FOREACH_DEV(pid) {
- if (!strncmp(name, rte_eth_devices[pid].device->name, len)) {
+ for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
+ if (rte_eth_dev_is_valid_port(pid) &&
+ !strncmp(name, rte_eth_devices[pid].device->name, len)) {
*port_id = pid;
return 0;
}
@@ -41,6 +42,8 @@ fs_bus_init(struct rte_eth_dev *dev)
continue;
da = &sdev->devargs;
if (fs_ethdev_portid_get(da->name, &pid) != 0) {
+ struct rte_eth_dev_owner pid_owner;
+
ret = rte_eal_hotplug_add(da->bus->name,
da->name,
da->args);
@@ -55,12 +58,26 @@ fs_bus_init(struct rte_eth_dev *dev)
ERROR("sub_device %d init went wrong", i);
return -ENODEV;
}
+ /*
+ * The NEW callback tried to take ownership, check
+ * whether it succeed or didn't.
+ */
+ rte_eth_dev_owner_get(pid, &pid_owner);
+ if (pid_owner.id != PRIV(dev)->my_owner.id) {
+ INFO("sub_device %d owner(%s_%016"PRIX64") is not my,"
+ " owner(%s_%016"PRIX64"), will try again later",
+ i, pid_owner.name, pid_owner.id,
+ PRIV(dev)->my_owner.name,
+ PRIV(dev)->my_owner.id);
+ continue;
+ }
} else {
+ /* The sub-device port was found. */
char devstr[DEVARGS_MAXLEN] = "";
struct rte_devargs *probed_da =
rte_eth_devices[pid].device->devargs;
- /* Take control of device probed by EAL options. */
+ /* Take control of probed device. */
free(da->args);
memset(da, 0, sizeof(*da));
if (probed_da != NULL)
@@ -69,7 +86,7 @@ fs_bus_init(struct rte_eth_dev *dev)
else
snprintf(devstr, sizeof(devstr), "%s",
rte_eth_devices[pid].device->name);
- ret = rte_eal_devargs_parse(devstr, da);
+ ret = rte_devargs_parse(da, devstr);
if (ret) {
ERROR("Probed devargs parsing failed with code"
" %d", ret);
@@ -77,28 +94,28 @@ fs_bus_init(struct rte_eth_dev *dev)
}
INFO("Taking control of a probed sub device"
" %d named %s", i, da->name);
- }
- ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
- if (ret < 0) {
- INFO("sub_device %d owner set failed (%s),"
- " will try again later", i, strerror(-ret));
- continue;
- } else if (strncmp(rte_eth_devices[pid].device->name, da->name,
- strlen(da->name)) != 0) {
- /*
- * The device probably was removed and its port id was
- * reallocated before ownership set.
- */
- rte_eth_dev_owner_unset(pid, PRIV(dev)->my_owner.id);
- INFO("sub_device %d was probably removed before taking"
- " ownership, will try again later", i);
- continue;
+ ret = rte_eth_dev_owner_set(pid, &PRIV(dev)->my_owner);
+ if (ret < 0) {
+ INFO("sub_device %d owner set failed (%s), "
+ "will try again later", i, strerror(-ret));
+ continue;
+ } else if (strncmp(rte_eth_devices[pid].device->name,
+ da->name, strlen(da->name)) != 0) {
+ /*
+ * The device probably was removed and its port
+ * id was reallocated before ownership set.
+ */
+ rte_eth_dev_owner_unset(pid,
+ PRIV(dev)->my_owner.id);
+ INFO("sub_device %d was removed before taking"
+ " ownership, will try again later", i);
+ continue;
+ }
}
ETH(sdev) = &rte_eth_devices[pid];
SUB_ID(sdev) = i;
sdev->fs_dev = dev;
sdev->dev = ETH(sdev)->device;
- ETH(sdev)->state = RTE_ETH_DEV_DEFERRED;
sdev->state = DEV_PROBED;
}
return 0;
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 2c0bf936..5b5cb3b4 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <unistd.h>
@@ -260,6 +260,7 @@ fs_dev_remove(struct sub_device *sdev)
sdev->state = DEV_ACTIVE;
/* fallthrough */
case DEV_ACTIVE:
+ failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_PROBED;
/* fallthrough */
@@ -321,6 +322,35 @@ fs_rxtx_clean(struct sub_device *sdev)
}
void
+failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev)
+{
+ int ret;
+
+ if (sdev == NULL)
+ return;
+ if (sdev->rmv_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_RMV,
+ failsafe_eth_rmv_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister RMV callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->rmv_callback = 0;
+ }
+ if (sdev->lsc_callback) {
+ ret = rte_eth_dev_callback_unregister(PORT_ID(sdev),
+ RTE_ETH_EVENT_INTR_LSC,
+ failsafe_eth_lsc_event_callback,
+ sdev);
+ if (ret)
+ WARN("Failed to unregister LSC callback for sub_device"
+ " %d", SUB_ID(sdev));
+ sdev->lsc_callback = 0;
+ }
+}
+
+void
failsafe_dev_remove(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
@@ -463,3 +493,26 @@ failsafe_eth_lsc_event_callback(uint16_t port_id __rte_unused,
else
return 0;
}
+
+/* Take sub-device ownership before it becomes exposed to the application. */
+int
+failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event __rte_unused,
+ void *cb_arg, void *out __rte_unused)
+{
+ struct rte_eth_dev *fs_dev = cb_arg;
+ struct sub_device *sdev;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ uint8_t i;
+
+ FOREACH_SUBDEV_STATE(sdev, i, fs_dev, DEV_PARSED) {
+ if (sdev->state >= DEV_PROBED)
+ continue;
+ if (strcmp(sdev->devargs.name, dev->device->name) != 0)
+ continue;
+ rte_eth_dev_owner_set(port_id, &PRIV(fs_dev)->my_owner);
+ /* The actual owner will be checked after the port probing. */
+ break;
+ }
+ return 0;
+}
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index ec8c909b..bfe42fce 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <sys/queue.h>
@@ -174,7 +174,7 @@ fs_flow_flush(struct rte_eth_dev *dev,
static int
fs_flow_query(struct rte_eth_dev *dev,
struct rte_flow *flow,
- enum rte_flow_action_type type,
+ const struct rte_flow_action *action,
void *arg,
struct rte_flow_error *error)
{
@@ -185,7 +185,7 @@ fs_flow_query(struct rte_eth_dev *dev,
if (sdev != NULL) {
int ret = rte_flow_query(PORT_ID(sdev),
flow->flows[SUB_ID(sdev)],
- type, arg, error);
+ action, arg, error);
if ((ret = fs_err(sdev, ret))) {
fs_unlock(dev, 0);
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index 6b7f9c1a..fc6ec37f 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 057e435c..24e91c93 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <stdbool.h>
@@ -13,6 +13,7 @@
#include <rte_malloc.h>
#include <rte_flow.h>
#include <rte_cycles.h>
+#include <rte_ethdev.h>
#include "failsafe_private.h"
@@ -81,30 +82,22 @@ static struct rte_eth_dev_info default_infos = {
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM,
- .flow_type_rss_offloads = 0x0,
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO,
+ .flow_type_rss_offloads =
+ ETH_RSS_IP |
+ ETH_RSS_UDP |
+ ETH_RSS_TCP,
};
static int
fs_dev_configure(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
- uint64_t supp_tx_offloads;
- uint64_t tx_offloads;
uint8_t i;
int ret;
fs_lock(dev, 0);
- supp_tx_offloads = PRIV(dev)->infos.tx_offload_capa;
- tx_offloads = dev->data->dev_conf.txmode.offloads;
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- ERROR("Some Tx offloads are not supported, "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
FOREACH_SUBDEV(sdev, i, dev) {
int rmv_interrupt = 0;
int lsc_interrupt = 0;
@@ -145,7 +138,7 @@ fs_dev_configure(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
return ret;
}
- if (rmv_interrupt) {
+ if (rmv_interrupt && sdev->rmv_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_RMV,
failsafe_eth_rmv_event_callback,
@@ -153,9 +146,11 @@ fs_dev_configure(struct rte_eth_dev *dev)
if (ret)
WARN("Failed to register RMV callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->rmv_callback = 1;
}
dev->data->dev_conf.intr_conf.rmv = 0;
- if (lsc_interrupt) {
+ if (lsc_interrupt && sdev->lsc_callback == 0) {
ret = rte_eth_dev_callback_register(PORT_ID(sdev),
RTE_ETH_EVENT_INTR_LSC,
failsafe_eth_lsc_event_callback,
@@ -163,6 +158,8 @@ fs_dev_configure(struct rte_eth_dev *dev)
if (ret)
WARN("Failed to register LSC callback for sub_device %d",
SUB_ID(sdev));
+ else
+ sdev->lsc_callback = 1;
}
dev->data->dev_conf.intr_conf.lsc = lsc_enabled;
sdev->state = DEV_ACTIVE;
@@ -289,6 +286,7 @@ fs_dev_close(struct rte_eth_dev *dev)
PRIV(dev)->state = DEV_ACTIVE - 1;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
DEBUG("Closing sub_device %d", i);
+ failsafe_eth_dev_unregister_callbacks(sdev);
rte_eth_dev_close(PORT_ID(sdev));
sdev->state = DEV_ACTIVE - 1;
}
@@ -296,25 +294,6 @@ fs_dev_close(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
}
-static bool
-fs_rxq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.rxmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.rx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.rx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_rx_queue_release(void *queue)
{
@@ -367,19 +346,6 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
fs_rx_queue_release(rxq);
dev->data->rx_queues[rx_queue_id] = NULL;
}
- /* Verify application offloads are valid for our port and queue. */
- if (fs_rxq_offloads_valid(dev, rx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- PRIV(dev)->infos.rx_offload_capa |
- PRIV(dev)->infos.rx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
rxq = rte_zmalloc(NULL,
sizeof(*rxq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -498,25 +464,6 @@ unlock:
return rc;
}
-static bool
-fs_txq_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads;
- uint64_t queue_supp_offloads;
- uint64_t port_supp_offloads;
-
- port_offloads = dev->data->dev_conf.txmode.offloads;
- queue_supp_offloads = PRIV(dev)->infos.tx_queue_offload_capa;
- port_supp_offloads = PRIV(dev)->infos.tx_offload_capa;
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
static void
fs_tx_queue_release(void *queue)
{
@@ -556,24 +503,6 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
fs_tx_queue_release(txq);
dev->data->tx_queues[tx_queue_id] = NULL;
}
- /*
- * Don't verify queue offloads for applications which
- * use the old API.
- */
- if (tx_conf != NULL &&
- (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- fs_txq_offloads_valid(dev, tx_conf->offloads) == false) {
- rte_errno = ENOTSUP;
- ERROR("Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- PRIV(dev)->infos.tx_offload_capa |
- PRIV(dev)->infos.tx_queue_offload_capa);
- fs_unlock(dev, 0);
- return -rte_errno;
- }
txq = rte_zmalloc("ethdev TX queue",
sizeof(*txq) +
sizeof(rte_atomic64_t) * PRIV(dev)->subs_tail,
@@ -804,26 +733,29 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
} else {
uint64_t rx_offload_capa;
uint64_t rxq_offload_capa;
+ uint64_t rss_hf_offload_capa;
rx_offload_capa = default_infos.rx_offload_capa;
rxq_offload_capa = default_infos.rx_queue_offload_capa;
+ rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
rte_eth_dev_info_get(PORT_ID(sdev),
&PRIV(dev)->infos);
rx_offload_capa &= PRIV(dev)->infos.rx_offload_capa;
rxq_offload_capa &=
PRIV(dev)->infos.rx_queue_offload_capa;
+ rss_hf_offload_capa &=
+ PRIV(dev)->infos.flow_type_rss_offloads;
}
sdev = TX_SUBDEV(dev);
rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
+ PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
PRIV(dev)->infos.tx_offload_capa &=
default_infos.tx_offload_capa;
PRIV(dev)->infos.tx_queue_offload_capa &=
default_infos.tx_queue_offload_capa;
- PRIV(dev)->infos.flow_type_rss_offloads &=
- default_infos.flow_type_rss_offloads;
}
rte_memcpy(infos, &PRIV(dev)->infos, sizeof(*infos));
}
@@ -997,16 +929,52 @@ fs_mac_addr_add(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sub_device *sdev;
uint8_t i;
+ int ret;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_default_mac_addr_set(PORT_ID(sdev), mac_addr);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_mac_addr_set failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ fs_unlock(dev, 0);
+
+ return 0;
+}
+
+static int
+fs_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_rss_hash_update(PORT_ID(sdev), rss_conf);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Operation rte_eth_dev_rss_hash_update"
+ " failed for sub_device %d with error %d",
+ i, ret);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
fs_unlock(dev, 0);
+
+ return 0;
}
static int
@@ -1068,5 +1036,6 @@ const struct eth_dev_ops failsafe_ops = {
.mac_addr_remove = fs_mac_addr_remove,
.mac_addr_add = fs_mac_addr_add,
.mac_addr_set = fs_mac_addr_set,
+ .rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index 2d16ba4c..886af861 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
@@ -119,6 +119,10 @@ struct sub_device {
volatile unsigned int remove:1;
/* flow isolation state */
int flow_isolated:1;
+ /* RMV callback registration state */
+ unsigned int rmv_callback:1;
+ /* LSC callback registration state */
+ unsigned int lsc_callback:1;
};
struct fs_priv {
@@ -211,6 +215,7 @@ int failsafe_eal_uninit(struct rte_eth_dev *dev);
/* ETH_DEV */
int failsafe_eth_dev_state_sync(struct rte_eth_dev *dev);
+void failsafe_eth_dev_unregister_callbacks(struct sub_device *sdev);
void failsafe_dev_remove(struct rte_eth_dev *dev);
void failsafe_stats_increment(struct rte_eth_stats *to,
struct rte_eth_stats *from);
@@ -220,6 +225,9 @@ int failsafe_eth_rmv_event_callback(uint16_t port_id,
int failsafe_eth_lsc_event_callback(uint16_t port_id,
enum rte_eth_event_type event,
void *cb_arg, void *out);
+int failsafe_eth_new_event_callback(uint16_t port_id,
+ enum rte_eth_event_type event,
+ void *cb_arg, void *out);
/* GLOBALS */
@@ -326,8 +334,12 @@ extern int mac_from_arg;
#define FS_THREADID_FMT "lu"
#endif
-#define LOG__(level, m, ...) \
- RTE_LOG(level, PMD, "net_failsafe: " m "%c", __VA_ARGS__)
+extern int failsafe_logtype;
+
+#define LOG__(l, m, ...) \
+ rte_log(RTE_LOG_ ## l, failsafe_logtype, \
+ "net_failsafe: " m "%c", __VA_ARGS__)
+
#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
#define DEBUG(...) LOG_(DEBUG, __VA_ARGS__)
#define INFO(...) LOG_(INFO, __VA_ARGS__)
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 363cf7ba..7bd0f963 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <rte_atomic.h>
diff --git a/drivers/net/failsafe/meson.build b/drivers/net/failsafe/meson.build
new file mode 100644
index 00000000..a249ff4a
--- /dev/null
+++ b/drivers/net/failsafe/meson.build
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+cflags += '-std=gnu99'
+cflags += '-D_DEFAULT_SOURCE'
+cflags += '-D_XOPEN_SOURCE=700'
+cflags += '-pedantic'
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
+allow_experimental_apis = true
+
+sources = files('failsafe_args.c',
+ 'failsafe.c',
+ 'failsafe_eal.c',
+ 'failsafe_ether.c',
+ 'failsafe_flow.c',
+ 'failsafe_intr.c',
+ 'failsafe_ops.c',
+ 'failsafe_rxtx.c')
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index b059a700..d657dff8 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -19,7 +19,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 30dad3e2..dc814855 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -106,9 +106,6 @@
#define FM10K_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define FM10K_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
-#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
struct fm10k_macvlan_filter_info {
uint16_t vlan_num; /* Total VLAN number */
uint16_t mac_num; /* Total mac number */
@@ -180,6 +177,7 @@ struct fm10k_rx_queue {
uint8_t drop_en;
uint8_t rx_deferred_start; /* don't start this queue in dev start. */
uint16_t rx_ftag_en; /* indicates FTAG RX supported */
+ uint64_t offloads; /* offloads of DEV_RX_OFFLOAD_* */
};
/*
@@ -211,7 +209,7 @@ struct fm10k_tx_queue {
uint16_t next_rs; /* Next pos to set RS flag */
uint16_t next_dd; /* Next pos to check DD flag */
volatile uint32_t *tail_ptr;
- uint32_t txq_flags; /* Holds flags for this TXq */
+ uint64_t offloads; /* Offloads of DEV_TX_OFFLOAD_* */
uint16_t nb_desc;
uint16_t port_id;
uint8_t tx_deferred_start; /** don't start this queue in dev start. */
@@ -328,6 +326,13 @@ uint16_t fm10k_recv_scattered_pkts(void *rx_queue,
int
fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+
+int
+fm10k_dev_tx_descriptor_status(void *rx_queue, uint16_t offset);
+
+
uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 94237610..541a49b7 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -60,6 +60,13 @@ static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
@@ -444,8 +451,12 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
@@ -454,6 +465,8 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
return ret;
}
+ dev->data->scattered_rx = 0;
+
return 0;
}
@@ -756,7 +769,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- dev->data->dev_conf.rxmode.enable_scatter) {
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
@@ -797,52 +810,50 @@ static int
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err = -1;
+ int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
- err = rx_queue_reset(rxq);
- if (err == -ENOMEM) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
- return err;
- } else if (err == -EINVAL) {
- PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
- " %d", err);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled on real
- * hardware, but BEFORE the queue is enabled when using the
- * emulation platform. Do it in both places for now and remove
- * this comment and the following two register writes when the
- * emulation platform is no longer being used.
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- /* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
- if (hw->mac.type == fm10k_mac_pf)
- reg |= FM10K_RXQCTL_PF;
- reg |= FM10K_RXQCTL_ENABLE;
- /* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
- FM10K_WRITE_FLUSH(hw);
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- }
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -852,14 +863,12 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- /* Disable RX queue */
- rx_queue_disable(hw, rx_queue_id);
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
- /* Free mbuf and clean HW ring */
- rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -871,28 +880,23 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
- int err = 0;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
+ q->ops->reset(q);
- q->ops->reset(q);
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
- /* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
-
- /* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
- FM10K_TXDCTL_ENABLE | txdctl);
- FM10K_WRITE_FLUSH(hw);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- err = -1;
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -902,11 +906,9 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_disable(hw, tx_queue_id);
- tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1233,13 +1235,11 @@ fm10k_link_update(struct rte_eth_dev *dev,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
- * leave the speed undefined since there is no 50Gbps Ethernet.
- */
- dev->data->dev_link.link_speed = 0;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
return 0;
}
@@ -1377,7 +1377,6 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
@@ -1389,17 +1388,12 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
@@ -1412,6 +1406,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1422,7 +1417,7 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
- .txq_flags = FM10K_SIMPLE_TX_FLAG,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1571,19 +1566,22 @@ static int
fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
PMD_INIT_LOG(ERR, "VLAN stripping is "
"always on in fm10k");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_INIT_LOG(ERR, "VLAN QinQ is not "
"supported in fm10k");
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
@@ -1781,6 +1779,27 @@ mempool_element_size_valid(struct rte_mempool *mp)
return 1;
}
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
+}
+
static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1791,9 +1810,12 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
@@ -1838,6 +1860,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
@@ -1947,6 +1970,24 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
return 0;
}
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
@@ -1955,9 +1996,12 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
@@ -1994,7 +2038,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
- q->txq_flags = conf->txq_flags;
+ q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
@@ -2784,6 +2828,8 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = {
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
.rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
@@ -2860,7 +2906,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
uint16_t tx_ftag_en = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- /* primary process has set the ftag flag and txq_flags */
+ /* primary process has set the ftag flag and offloads */
txq = dev->data->tx_queues[0];
if (fm10k_tx_vec_condition_check(txq)) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
@@ -3237,9 +3283,7 @@ RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(fm10k_init_log);
-static void
-fm10k_init_log(void)
+RTE_INIT(fm10k_init_log)
{
fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
if (fm10k_logtype_init >= 0)
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 9320748c..4a5b46ec 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -389,6 +389,84 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
return ret;
}
+int
+fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile union fm10k_rx_desc *rxdp;
+ struct fm10k_rx_queue *rxq = rx_queue;
+ uint16_t nb_hold, trigger_last;
+ uint16_t desc;
+ int ret;
+
+ if (unlikely(offset >= rxq->nb_desc)) {
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
+ return 0;
+ }
+
+ if (rxq->next_trigger < rxq->alloc_thresh)
+ trigger_last = rxq->next_trigger +
+ rxq->nb_desc - rxq->alloc_thresh;
+ else
+ trigger_last = rxq->next_trigger - rxq->alloc_thresh;
+
+ if (rxq->next_dd < trigger_last)
+ nb_hold = rxq->next_dd + rxq->nb_desc - trigger_last;
+ else
+ nb_hold = rxq->next_dd - trigger_last;
+
+ if (offset >= rxq->nb_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->next_dd + offset;
+ if (desc >= rxq->nb_desc)
+ desc -= rxq->nb_desc;
+
+ rxdp = &rxq->hw_ring[desc];
+
+ ret = !!(rxdp->w.status &
+ rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
+
+ return ret;
+}
+
+int
+fm10k_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ volatile struct fm10k_tx_desc *txdp;
+ struct fm10k_tx_queue *txq = tx_queue;
+ uint16_t desc;
+ uint16_t next_rs = txq->nb_desc;
+ struct fifo rs_tracker = txq->rs_tracker;
+ struct fifo *r = &rs_tracker;
+
+ if (unlikely(offset >= txq->nb_desc))
+ return -EINVAL;
+
+ desc = txq->next_free + offset;
+ /* go to next desc that has the RS bit */
+ desc = (desc / txq->rs_thresh + 1) *
+ txq->rs_thresh - 1;
+
+ if (desc >= txq->nb_desc) {
+ desc -= txq->nb_desc;
+ if (desc >= txq->nb_desc)
+ desc -= txq->nb_desc;
+ }
+
+ r->head = r->list;
+ for ( ; r->head != r->endp; ) {
+ if (*r->head >= desc && *r->head < next_rs)
+ next_rs = *r->head;
+ ++r->head;
+ }
+
+ txdp = &txq->hw_ring[next_rs];
+ if (txdp->flags & FM10K_TXD_FLAG_DONE)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
/*
* Free multiple TX mbuf at a time if they are in the same pool
*
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 498a1781..005fda63 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -210,7 +210,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
/* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_extend != 0)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
@@ -219,7 +219,7 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
return -1;
/* no header split support */
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
return 0;
@@ -695,7 +695,7 @@ int __attribute__((cold))
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
- if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) != FM10K_SIMPLE_TX_FLAG)
+ if (txq->offloads != 0)
return -1;
if (txq->tx_ftag_en)
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 5663f5b1..3f869a8d 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -11,6 +11,8 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
CFLAGS += -DX722_A0_SUPPORT
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
LDLIBS += -lrte_bus_pci
@@ -24,7 +26,7 @@ LIBABIVER := 2
# to disable warnings
#
ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-CFLAGS_BASE_DRIVER = -wd593 -wd188
+CFLAGS_BASE_DRIVER = -diag-disable 593
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-sign-compare
CFLAGS_BASE_DRIVER += -Wno-unused-value
@@ -85,6 +87,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_vf_representor.c
ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2)
CC_AVX2_SUPPORT=1
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index a482ab90..df66e76a 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 508b4171..85a6a867 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -11,6 +11,7 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_common.h>
#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
@@ -41,6 +42,8 @@
#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb"
#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list"
+#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
+#define ETH_I40E_QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
#define I40E_CLEAR_PXE_WAIT_MS 200
@@ -213,7 +216,7 @@
/* Bit mask of Extended Tag enable/disable */
#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
-static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev);
static int i40e_dev_configure(struct rte_eth_dev *dev);
static int i40e_dev_start(struct rte_eth_dev *dev);
@@ -369,7 +372,12 @@ static int i40e_get_eeprom_length(struct rte_eth_dev *dev);
static int i40e_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
@@ -395,6 +403,13 @@ static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
int i40e_logtype_init;
int i40e_logtype_driver;
+static const char *const valid_keys[] = {
+ ETH_I40E_FLOATING_VEB_ARG,
+ ETH_I40E_FLOATING_VEB_LIST_ARG,
+ ETH_I40E_SUPPORT_MULTI_DRIVER,
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG,
+ NULL};
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -489,6 +504,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.get_reg = i40e_get_regs,
.get_eeprom_length = i40e_get_eeprom_length,
.get_eeprom = i40e_get_eeprom,
+ .get_module_info = i40e_get_module_info,
+ .get_module_eeprom = i40e_get_module_eeprom,
.mac_addr_set = i40e_set_default_mac_addr,
.mtu_set = i40e_dev_mtu_set,
.tm_ops_get = i40e_tm_ops_get,
@@ -607,16 +624,74 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+static int
+eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct i40e_adapter), eth_i40e_dev_init);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ }
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct i40e_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_i40e_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ /* probe VF representor ports */
+ struct rte_eth_dev *pf_ethdev = rte_eth_dev_allocated(
+ pci_dev->device.name);
+
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct i40e_vf_representor representor = {
+ .vf_id = eth_da.representor_ports[i],
+ .switch_domain_id = I40E_DEV_PRIVATE_TO_PF(
+ pf_ethdev->data->dev_private)->switch_domain_id,
+ .adapter = I40E_DEV_PRIVATE_TO_ADAPTER(
+ pf_ethdev->data->dev_private)
+ };
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name, eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct i40e_vf_representor), NULL, NULL,
+ i40e_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create i40e vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, i40e_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_i40e_dev_uninit);
}
static struct rte_pci_driver rte_i40e_pmd = {
@@ -627,41 +702,21 @@ static struct rte_pci_driver rte_i40e_pmd = {
.remove = eth_i40e_pci_remove,
};
-static inline int
-rte_i40e_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static inline void
-i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+i40e_write_global_rx_ctl(struct i40e_hw *hw, uint32_t reg_addr,
+ uint32_t reg_val)
{
+ uint32_t ori_reg_val;
+ struct rte_eth_dev *dev;
+
+ ori_reg_val = i40e_read_rx_ctl(hw, reg_addr);
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
i40e_write_rx_ctl(hw, reg_addr, reg_val);
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified "
- "with value 0x%08x",
- reg_addr, reg_val);
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
}
RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
@@ -688,7 +743,6 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
*/
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
- i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER);
}
static inline void i40e_config_automask(struct i40e_pf *pf)
@@ -807,7 +861,7 @@ config_vf_floating_veb(struct rte_devargs *devargs,
if (devargs == NULL)
return;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return;
@@ -848,7 +902,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
if (devargs == NULL)
return 0;
- kvlist = rte_kvargs_parse(devargs->args, NULL);
+ kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (kvlist == NULL)
return 0;
@@ -1055,8 +1109,6 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev)
memset(info, 0, sizeof(struct i40e_queue_regions));
}
-#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver"
-
static int
i40e_parse_multi_drv_handler(__rte_unused const char *key,
const char *value,
@@ -1088,9 +1140,8 @@ static int
i40e_support_multi_driver(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- static const char *const valid_keys[] = {
- ETH_I40E_SUPPORT_MULTI_DRIVER, NULL};
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* Enable global configuration by default */
pf->support_multi_driver = false;
@@ -1102,7 +1153,13 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
if (!kvlist)
return -EINVAL;
- if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
ETH_I40E_SUPPORT_MULTI_DRIVER);
@@ -1118,7 +1175,34 @@ i40e_support_multi_driver(struct rte_eth_dev *dev)
}
static int
-eth_i40e_dev_init(struct rte_eth_dev *dev)
+i40e_aq_debug_write_global_register(struct i40e_hw *hw,
+ uint32_t reg_addr, uint64_t reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ uint64_t ori_reg_val;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ ret = i40e_aq_debug_read_register(hw, reg_addr, &ori_reg_val, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from 0x%08x",
+ reg_addr);
+ return -EIO;
+ }
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+
+ if (ori_reg_val != reg_val)
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%"PRIx64", after: 0x%"PRIx64,
+ dev->device->name, reg_addr, ori_reg_val, reg_val);
+
+ return i40e_aq_debug_write_register(hw, reg_addr, reg_val, cmd_details);
+}
+
+static int
+eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev;
struct rte_intr_handle *intr_handle;
@@ -1170,6 +1254,13 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->bus.func = pci_dev->addr.function;
hw->adapter_stopped = 0;
+ /*
+ * Switch Tag value should not be identical to either the First Tag
+ * or Second Tag values. So set something other than common Ethertype
+ * for internal switching.
+ */
+ hw->switch_tag = 0xffff;
+
/* Check if need to support multi-driver */
i40e_support_multi_driver(dev);
@@ -1224,7 +1315,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialise the L3_MAP register */
if (!pf->support_multi_driver) {
- ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GLQF_L3_MAP(40),
0x00000028, NULL);
if (ret)
PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d",
@@ -1232,7 +1324,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG,
"Global register 0x%08x is changed with 0x28",
I40E_GLQF_L3_MAP(40));
- i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER);
}
/* Need the special FW version to support floating VEB */
@@ -1519,7 +1610,6 @@ void i40e_flex_payload_reg_set_default(struct i40e_hw *hw)
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000);
I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000);
- i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD);
}
static int
@@ -1533,6 +1623,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
+ int retries = 0;
PMD_INIT_FUNC_TRACE();
@@ -1544,6 +1635,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
pci_dev = RTE_ETH_DEV_TO_PCI(dev);
intr_handle = &pci_dev->intr_handle;
+ ret = rte_eth_switch_domain_free(pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1574,9 +1669,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- /* register callback func to eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40e_dev_interrupt_handler, dev);
+ /* unregister callback func to eal lib */
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ i40e_msec_delay(500);
+ } while (retries++ < 5);
i40e_rm_ethtype_filter_list(pf);
i40e_rm_tunnel_filter_list(pf);
@@ -1746,8 +1852,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect,
/* Write first RX queue to Link list register as the head element */
if (vsi->type != I40E_VSI_SRIOV) {
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1,
- pf->support_multi_driver);
+ i40e_calc_itr_interval(1, pf->support_multi_driver);
if (msix_vect == I40E_MISC_VEC_ID) {
I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0,
@@ -1943,27 +2048,40 @@ i40e_phy_conf_link(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
enum i40e_aq_phy_type cnt;
+ uint8_t avail_speed;
uint32_t phy_type_mask = 0;
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
- const uint8_t advt = I40E_LINK_SPEED_40GB |
- I40E_LINK_SPEED_25GB |
- I40E_LINK_SPEED_10GB |
- I40E_LINK_SPEED_1GB |
- I40E_LINK_SPEED_100MB;
int ret = -ENOTSUP;
+ /* To get phy capabilities of available speeds. */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get PHY capabilities: %d\n",
+ status);
+ return ret;
+ }
+ avail_speed = phy_ab.link_speed;
+ /* To get the current phy config. */
status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab,
NULL);
- if (status)
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to get the current PHY config: %d\n",
+ status);
return ret;
+ }
- /* If link already up, no need to set up again */
- if (is_up && phy_ab.phy_type != 0)
+ /* If link needs to go up and it is in autoneg mode the speed is OK,
+ * no need to set up again.
+ */
+ if (is_up && phy_ab.phy_type != 0 &&
+ abilities & I40E_AQ_PHY_AN_ENABLED &&
+ phy_ab.link_speed != 0)
return I40E_SUCCESS;
memset(&phy_conf, 0, sizeof(phy_conf));
@@ -1972,18 +2090,20 @@ i40e_phy_conf_link(struct i40e_hw *hw,
abilities &= ~mask;
abilities |= phy_ab.abilities & mask;
- /* update ablities and speed */
- if (abilities & I40E_AQ_PHY_AN_ENABLED)
- phy_conf.link_speed = advt;
- else
- phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
-
phy_conf.abilities = abilities;
+ /* If link needs to go up, but the force speed is not supported,
+ * Warn users and config the default available speeds.
+ */
+ if (is_up && !(force_speed & avail_speed)) {
+ PMD_DRV_LOG(WARNING, "Invalid speed setting, set to default!\n");
+ phy_conf.link_speed = avail_speed;
+ } else {
+ phy_conf.link_speed = is_up ? force_speed : avail_speed;
+ }
-
- /* To enable link, phy_type mask needs to include each type */
- for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ /* PHY type mask needs to include each type except PHY type extension */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_25GBASE_KR; cnt++)
phy_type_mask |= 1 << cnt;
/* use get_phy_abilities_resp value for the rest */
@@ -2016,11 +2136,18 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *conf = &dev->data->dev_conf;
+ if (conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ conf->link_speeds = ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_20G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_100M;
+ }
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- abilities |= I40E_AQ_PHY_LINK_ENABLED;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK |
+ I40E_AQ_PHY_AN_ENABLED |
+ I40E_AQ_PHY_LINK_ENABLED;
return i40e_phy_conf_link(hw, abilities, speed, true);
}
@@ -2137,13 +2264,6 @@ i40e_dev_start(struct rte_eth_dev *dev)
}
/* Apply link configure */
- if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
- ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
- ETH_LINK_SPEED_40G)) {
- PMD_DRV_LOG(ERR, "Invalid link setting");
- goto err_up;
- }
ret = i40e_apply_link_speed(dev);
if (I40E_SUCCESS != ret) {
PMD_DRV_LOG(ERR, "Fail to apply link setting");
@@ -2286,6 +2406,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
i40e_pf_disable_irq0(hw);
rte_intr_disable(intr_handle);
+ i40e_fdir_teardown(pf);
+
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2297,7 +2419,6 @@ i40e_dev_close(struct rte_eth_dev *dev)
pf->vmdq = NULL;
/* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
i40e_vsi_release(pf->main_vsi);
/* shutdown the adminq */
@@ -2339,7 +2460,7 @@ i40e_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_i40e_dev_init(dev);
+ ret = eth_i40e_dev_init(dev, NULL);
return ret;
}
@@ -2437,84 +2558,143 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
return i40e_phy_conf_link(hw, abilities, speed, false);
}
-int
-i40e_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete)
+static __rte_always_inline void
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
+{
+/* Link status registers and values*/
+#define I40E_PRTMAC_LINKSTA 0x001E2420
+#define I40E_REG_LINK_UP 0x40000080
+#define I40E_PRTMAC_MACC 0x001E24E0
+#define I40E_REG_MACC_25GB 0x00020000
+#define I40E_REG_SPEED_MASK 0x38000000
+#define I40E_REG_SPEED_100MB 0x00000000
+#define I40E_REG_SPEED_1GB 0x08000000
+#define I40E_REG_SPEED_10GB 0x10000000
+#define I40E_REG_SPEED_20GB 0x20000000
+#define I40E_REG_SPEED_25_40GB 0x18000000
+ uint32_t link_speed;
+ uint32_t reg_val;
+
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA);
+ link_speed = reg_val & I40E_REG_SPEED_MASK;
+ reg_val &= I40E_REG_LINK_UP;
+ link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0;
+
+ if (unlikely(link->link_status == 0))
+ return;
+
+ /* Parse the link status */
+ switch (link_speed) {
+ case I40E_REG_SPEED_100MB:
+ link->link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_REG_SPEED_1GB:
+ link->link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_REG_SPEED_10GB:
+ link->link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_REG_SPEED_20GB:
+ link->link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_REG_SPEED_25_40GB:
+ reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC);
+
+ if (reg_val & I40E_REG_MACC_25GB)
+ link->link_speed = ETH_SPEED_NUM_25G;
+ else
+ link->link_speed = ETH_SPEED_NUM_40G;
+
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed);
+ break;
+ }
+}
+
+static __rte_always_inline void
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
+ uint32_t rep_cnt = MAX_REPEAT_TIME;
struct i40e_link_status link_status;
- struct rte_eth_link link, old;
int status;
- unsigned rep_cnt = MAX_REPEAT_TIME;
- bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
- memset(&link, 0, sizeof(link));
- memset(&old, 0, sizeof(old));
memset(&link_status, 0, sizeof(link_status));
- rte_i40e_dev_atomic_read_link_status(dev, &old);
do {
+ memset(&link_status, 0, sizeof(link_status));
+
/* Get link status information from hardware */
status = i40e_aq_get_link_info(hw, enable_lse,
&link_status, NULL);
- if (status != I40E_SUCCESS) {
- link.link_speed = ETH_SPEED_NUM_100M;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ if (unlikely(status != I40E_SUCCESS)) {
+ link->link_speed = ETH_SPEED_NUM_100M;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
PMD_DRV_LOG(ERR, "Failed to get link info");
- goto out;
+ return;
}
- link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (!wait_to_complete || link.link_status)
+ link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
+ if (!wait_to_complete || link->link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
} while (--rep_cnt);
- if (!link.link_status)
- goto out;
-
- /* i40e uses full duplex only */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
/* Parse the link status */
switch (link_status.link_speed) {
case I40E_LINK_SPEED_100MB:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
case I40E_LINK_SPEED_1GB:
- link.link_speed = ETH_SPEED_NUM_1G;
+ link->link_speed = ETH_SPEED_NUM_1G;
break;
case I40E_LINK_SPEED_10GB:
- link.link_speed = ETH_SPEED_NUM_10G;
+ link->link_speed = ETH_SPEED_NUM_10G;
break;
case I40E_LINK_SPEED_20GB:
- link.link_speed = ETH_SPEED_NUM_20G;
+ link->link_speed = ETH_SPEED_NUM_20G;
break;
case I40E_LINK_SPEED_25GB:
- link.link_speed = ETH_SPEED_NUM_25G;
+ link->link_speed = ETH_SPEED_NUM_25G;
break;
case I40E_LINK_SPEED_40GB:
- link.link_speed = ETH_SPEED_NUM_40G;
+ link->link_speed = ETH_SPEED_NUM_40G;
break;
default:
- link.link_speed = ETH_SPEED_NUM_100M;
+ link->link_speed = ETH_SPEED_NUM_100M;
break;
}
+}
+int
+i40e_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_link link;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
+ int ret;
+
+ memset(&link, 0, sizeof(link));
+
+ /* i40e uses full duplex only */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
-out:
- rte_i40e_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
+ else
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
+ ret = rte_eth_linkstatus_set(dev, &link);
i40e_notify_all_vfs_link_status(dev);
- return 0;
+ return ret;
}
/* Get all the statistics of a VSI */
@@ -3169,13 +3349,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vsi *vsi = pf->main_vsi;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
dev_info->max_vfs = pci_dev->max_vfs;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3183,7 +3363,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3196,7 +3382,13 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ dev_info->tx_queue_offload_capa;
+ dev_info->dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -3210,6 +3402,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3220,8 +3413,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -3248,15 +3440,42 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
- else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ dev_info->default_rxportconf.nb_queues = 2;
+ dev_info->default_txportconf.nb_queues = 2;
+ if (dev->data->nb_rx_queues == 1)
+ dev_info->default_rxportconf.ring_size = 2048;
+ else
+ dev_info->default_rxportconf.ring_size = 1024;
+ if (dev->data->nb_tx_queues == 1)
+ dev_info->default_txportconf.ring_size = 1024;
+ else
+ dev_info->default_txportconf.ring_size = 512;
+
+ } else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) {
/* For XXV710 */
dev_info->speed_capa = ETH_LINK_SPEED_25G;
- else
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_10G) {
+ dev_info->default_rxportconf.ring_size = 512;
+ dev_info->default_txportconf.ring_size = 256;
+ } else {
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
+ }
+ }
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
}
static int
@@ -3307,7 +3526,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev,
return 0;
}
- ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_SWT_L2TAGCTRL(reg_id),
reg_w, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
@@ -3329,7 +3549,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
int ret = 0;
if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3367,7 +3588,6 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
/* If NVM API < 1.7, keep the register setting */
ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type,
tpid, qinq);
- i40e_global_cfg_warning(I40E_WARNING_TPID);
return ret;
}
@@ -3377,9 +3597,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
i40e_vsi_config_vlan_filter(vsi, TRUE);
else
i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3387,14 +3609,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40e_vsi_config_vlan_stripping(vsi, TRUE);
else
i40e_vsi_config_vlan_stripping(vsi, FALSE);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
i40e_vsi_config_double_vlan(vsi, TRUE);
/* Set global registers with default ethertype. */
i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3611,7 +3833,6 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW,
pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS]
<< I40E_KILOSHIFT);
- i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL);
} else {
PMD_DRV_LOG(ERR,
"Water marker configuration is not supported.");
@@ -3641,6 +3862,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_mac_filter_info mac_filter;
struct i40e_vsi *vsi;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int ret;
/* If VMDQ not enabled or configured, return */
@@ -3659,7 +3881,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
}
rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
else
mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -4010,8 +4232,8 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
return I40E_ERR_PARAM;
snprintf(z_name, sizeof(z_name), "i40e_dma_%"PRIu64, rte_rand());
- mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
- alignment, RTE_PGSIZE_2M);
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG, alignment, RTE_PGSIZE_2M);
if (!mz)
return I40E_ERR_NO_MEMORY;
@@ -4147,7 +4369,6 @@ i40e_get_cap(struct i40e_hw *hw)
}
#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4
-#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf"
static int i40e_pf_parse_vf_queue_number_handler(const char *key,
const char *value,
@@ -4181,9 +4402,9 @@ static int i40e_pf_parse_vf_queue_number_handler(const char *key,
static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
{
- static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL};
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_kvargs *kvlist;
+ int kvargs_count;
/* set default queue number per VF as 4 */
pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
@@ -4195,12 +4416,18 @@ static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev)
if (kvlist == NULL)
return -(EINVAL);
- if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1)
+ kvargs_count = rte_kvargs_count(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG);
+ if (!kvargs_count) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (kvargs_count > 1)
PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only "
"the first invalid or last valid one is used !",
- QUEUE_NUM_PER_VF_ARG);
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG);
- rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG,
+ rte_kvargs_process(kvlist, ETH_I40E_QUEUE_NUM_PER_VF_ARG,
i40e_pf_parse_vf_queue_number_handler, pf);
rte_kvargs_free(kvlist);
@@ -5669,6 +5896,12 @@ i40e_pf_setup(struct i40e_pf *pf)
PMD_DRV_LOG(ERR, "Could not get switch config, err %d", ret);
return ret;
}
+
+ ret = rte_eth_switch_domain_alloc(&pf->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING,
+ "failed to allocate switch domain for device %d", ret);
+
if (pf->flags & I40E_FLAG_FDIR) {
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
@@ -7372,6 +7605,7 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7415,13 +7649,14 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7431,6 +7666,7 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7459,10 +7695,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* For MPLSoGRE */
memset(&filter_replace, 0,
@@ -7485,13 +7724,14 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7501,6 +7741,7 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7536,10 +7777,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7568,13 +7812,14 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -7584,6 +7829,7 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
enum i40e_status_code status = I40E_SUCCESS;
if (pf->support_multi_driver) {
@@ -7611,10 +7857,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
&filter_replace_buf);
if (status < 0)
return status;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* for GTP-U */
memset(&filter_replace, 0,
@@ -7636,13 +7885,14 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf)
status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!status) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!status && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return status;
}
@@ -8194,14 +8444,14 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
}
if (reg != val) {
- ret = i40e_aq_debug_write_register(hw, I40E_GL_PRS_FVBM(2),
+ ret = i40e_aq_debug_write_global_register(hw,
+ I40E_GL_PRS_FVBM(2),
reg, NULL);
if (ret != 0)
return ret;
PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed "
"with value 0x%08x",
I40E_GL_PRS_FVBM(2), reg);
- i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN);
} else {
ret = 0;
}
@@ -8467,7 +8717,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
I40E_GLQF_HSYM(j),
reg);
}
- i40e_global_cfg_warning(I40E_WARNING_HSYM);
}
}
@@ -8493,7 +8742,6 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
goto out;
i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg);
- i40e_global_cfg_warning(I40E_WARNING_QF_CTL);
out:
I40E_WRITE_FLUSH(hw);
@@ -9086,12 +9334,17 @@ void
i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
+ struct rte_eth_dev *dev;
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
- if (reg != val)
- i40e_write_global_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
- (uint32_t)i40e_read_rx_ctl(hw, addr));
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev;
+ if (reg != val) {
+ i40e_write_rx_ctl(hw, addr, val);
+ PMD_DRV_LOG(WARNING,
+ "i40e device %s changed global register [0x%08x]."
+ " original: 0x%08x, new: 0x%08x",
+ dev->device->name, addr, reg,
+ (uint32_t)i40e_read_rx_ctl(hw, addr));
+ }
}
static void
@@ -9165,12 +9418,6 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
pf->hash_input_set[pctype] = input_set;
pf->fdir.input_set[pctype] = input_set;
}
-
- if (!pf->support_multi_driver) {
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
- }
}
int
@@ -9236,7 +9483,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
- i40e_global_cfg_warning(I40E_WARNING_HASH_INSET);
for (i = 0; i < num; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
@@ -9245,7 +9491,6 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_HASH_MSK);
I40E_WRITE_FLUSH(hw);
pf->hash_input_set[pctype] = input_set;
@@ -9326,7 +9571,6 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
i40e_check_write_global_reg(hw,
I40E_GLQF_FD_MSK(i, pctype),
0);
- i40e_global_cfg_warning(I40E_WARNING_FD_MSK);
} else {
PMD_DRV_LOG(ERR, "FDIR bit mask is not supported.");
}
@@ -9809,6 +10053,60 @@ i40e_pctype_to_flowtype(const struct i40e_adapter *adapter,
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+/*
+ * GL_SWR_PM_UP_THR:
+ * The value is not impacted from the link speed, its value is set according
+ * to the total number of ports for a better pipe-monitor configuration.
+ */
+static bool
+i40e_get_swr_pm_cfg(struct i40e_hw *hw, uint32_t *value)
+{
+#define I40E_GL_SWR_PM_EF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_EF_VALUE
+
+#define I40E_GL_SWR_PM_SF_DEVICE(dev) \
+ .device_id = (dev), \
+ .val = I40E_GL_SWR_PM_UP_THR_SF_VALUE
+
+ static const struct {
+ uint16_t device_id;
+ uint32_t val;
+ } swr_pm_table[] = {
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_SFP_XL710) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_KX_C) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T) },
+ { I40E_GL_SWR_PM_EF_DEVICE(I40E_DEV_ID_10G_BASE_T4) },
+
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_KX_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_QSFP_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_20G_KR2_A) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_B) },
+ { I40E_GL_SWR_PM_SF_DEVICE(I40E_DEV_ID_25G_SFP28) },
+ };
+ uint32_t i;
+
+ if (value == NULL) {
+ PMD_DRV_LOG(ERR, "value is NULL");
+ return false;
+ }
+
+ for (i = 0; i < RTE_DIM(swr_pm_table); i++) {
+ if (hw->device_id == swr_pm_table[i].device_id) {
+ *value = swr_pm_table[i].val;
+
+ PMD_DRV_LOG(DEBUG, "Device 0x%x with GL_SWR_PM_UP_THR "
+ "value - 0x%08x",
+ hw->device_id, *value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static int
i40e_dev_sync_phy_type(struct i40e_hw *hw)
{
@@ -9873,13 +10171,16 @@ i40e_configure_registers(struct i40e_hw *hw)
}
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
- I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_SF_VALUE;
- else /* For X710 */
- reg_table[i].val =
- I40E_GL_SWR_PM_UP_THR_EF_VALUE;
+ uint32_t cfg_val;
+
+ if (!i40e_get_swr_pm_cfg(hw, &cfg_val)) {
+ PMD_DRV_LOG(DEBUG, "Device 0x%x skips "
+ "GL_SWR_PM_UP_THR value fixup",
+ hw->device_id);
+ continue;
+ }
+
+ reg_table[i].val = cfg_val;
}
ret = i40e_aq_debug_read_register(hw, reg_table[i].addr,
@@ -10321,9 +10622,8 @@ i40e_start_timecounters(struct rte_eth_dev *dev)
uint32_t tsync_inc_h;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
i40e_dev_link_update(dev, 1);
- rte_i40e_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_40G:
@@ -11249,8 +11549,148 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev,
return 0;
}
-static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr)
+static int i40e_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sff8472_comp = 0;
+ uint32_t sff8472_swap = 0;
+ uint32_t sff8636_rev = 0;
+ i40e_status status;
+ uint32_t type = 0;
+
+ /* Check if firmware supports reading module EEPROM. */
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
+ PMD_DRV_LOG(ERR,
+ "Module EEPROM memory read not supported. "
+ "Please update the NVM image.\n");
+ return -EINVAL;
+ }
+
+ status = i40e_update_link_info(hw);
+ if (status)
+ return -EIO;
+
+ if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) {
+ PMD_DRV_LOG(ERR,
+ "Cannot read module EEPROM memory. "
+ "No module connected.\n");
+ return -EINVAL;
+ }
+
+ type = hw->phy.link_info.module_type[0];
+
+ switch (type) {
+ case I40E_MODULE_TYPE_SFP:
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_COMP,
+ &sff8472_comp, NULL);
+ if (status)
+ return -EIO;
+
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ I40E_I2C_EEPROM_DEV_ADDR,
+ I40E_MODULE_SFF_8472_SWAP,
+ &sff8472_swap, NULL);
+ if (status)
+ return -EIO;
+
+ /* Check if the module requires address swap to access
+ * the other EEPROM memory page.
+ */
+ if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) {
+ PMD_DRV_LOG(WARNING,
+ "Module address swap to access "
+ "page 0xA2 is not supported.\n");
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else if (sff8472_comp == 0x00) {
+ /* Module is not SFF-8472 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP_PLUS:
+ /* Read from memory page 0. */
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ 0,
+ I40E_MODULE_REVISION_ADDR,
+ &sff8636_rev, NULL);
+ if (status)
+ return -EIO;
+ /* Determine revision compliance byte */
+ if (sff8636_rev > 0x02) {
+ /* Module is SFF-8636 compliant */
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ } else {
+ modinfo->type = RTE_ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ }
+ break;
+ case I40E_MODULE_TYPE_QSFP28:
+ modinfo->type = RTE_ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Module type unrecognized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int i40e_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ bool is_sfp = false;
+ i40e_status status;
+ uint8_t *data = info->data;
+ uint32_t value = 0;
+ uint32_t i;
+
+ if (!info || !info->length || !data)
+ return -EINVAL;
+
+ if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP)
+ is_sfp = true;
+
+ for (i = 0; i < info->length; i++) {
+ u32 offset = i + info->offset;
+ u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0;
+
+ /* Check if we need to access the other memory page */
+ if (is_sfp) {
+ if (offset >= RTE_ETH_MODULE_SFF_8079_LEN) {
+ offset -= RTE_ETH_MODULE_SFF_8079_LEN;
+ addr = I40E_I2C_EEPROM_DEV_ADDR2;
+ }
+ } else {
+ while (offset >= RTE_ETH_MODULE_SFF_8436_LEN) {
+ /* Compute memory page number and offset. */
+ offset -= RTE_ETH_MODULE_SFF_8436_LEN / 2;
+ addr++;
+ }
+ }
+ status = i40e_aq_get_phy_register(hw,
+ I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
+ addr, offset, &value, NULL);
+ if (status)
+ return -EIO;
+ data[i] = (uint8_t)value;
+ }
+ return 0;
+}
+
+static int i40e_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -11261,7 +11701,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
TAILQ_FOREACH(f, &vsi->mac_list, next) {
@@ -11271,25 +11711,31 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
if (f == NULL) {
PMD_DRV_LOG(ERR, "Failed to find filter for default mac");
- return;
+ return -EIO;
}
mac_filter = f->mac_info;
ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to delete mac filter");
- return;
+ return -EIO;
}
memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN);
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add mac filter");
- return;
+ return -EIO;
}
memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN);
- i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
- mac_addr->addr_bytes, NULL);
+ ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
+ mac_addr->addr_bytes, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to change mac");
+ return -EIO;
+ }
+
+ return 0;
}
static int
@@ -11312,9 +11758,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
@@ -11413,7 +11861,7 @@ i40e_rss_filter_restore(struct i40e_pf *pf)
{
struct i40e_rte_flow_rss_conf *conf =
&pf->rss_info;
- if (conf->num)
+ if (conf->conf.queue_num)
i40e_config_rss_filter(pf, conf, TRUE);
}
@@ -11456,7 +11904,8 @@ i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index)
static int
i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t pctype_num;
@@ -11469,6 +11918,12 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i, j, n;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&pctype_num, sizeof(pctype_num),
RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM);
@@ -11531,8 +11986,13 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
i40e_find_customized_pctype(pf,
I40E_CUSTOMIZED_GTPU);
if (new_pctype) {
- new_pctype->pctype = pctype_value;
- new_pctype->valid = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ new_pctype->pctype = pctype_value;
+ new_pctype->valid = true;
+ } else {
+ new_pctype->pctype = I40E_FILTER_PCTYPE_INVALID;
+ new_pctype->valid = false;
+ }
}
}
@@ -11542,8 +12002,9 @@ i40e_update_customized_pctype(struct rte_eth_dev *dev, uint8_t *pkg,
static int
i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size, uint32_t proto_num,
- struct rte_pmd_i40e_proto_info *proto)
+ uint32_t pkg_size, uint32_t proto_num,
+ struct rte_pmd_i40e_proto_info *proto,
+ enum rte_pmd_i40e_package_op op)
{
struct rte_pmd_i40e_ptype_mapping *ptype_mapping;
uint16_t port_id = dev->data->port_id;
@@ -11556,6 +12017,17 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
bool in_tunnel;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return -1;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ rte_pmd_i40e_ptype_mapping_reset(port_id);
+ return 0;
+ }
+
/* get information about new ptype num */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&ptype_num, sizeof(ptype_num),
@@ -11705,7 +12177,8 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_GRENAT;
in_tunnel = true;
- } else if (!strncasecmp(name, "L2TPV2CTL", 9)) {
+ } else if (!strncasecmp(name, "L2TPV2CTL", 9) ||
+ !strncasecmp(name, "L2TPV2", 6)) {
ptype_mapping[i].sw_ptype |=
RTE_PTYPE_TUNNEL_L2TP;
in_tunnel = true;
@@ -11728,7 +12201,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg,
void
i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size)
+ uint32_t pkg_size, enum rte_pmd_i40e_package_op op)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
uint32_t proto_num;
@@ -11737,6 +12210,12 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
uint32_t i;
int ret;
+ if (op != RTE_PMD_I40E_PKG_OP_WR_ADD &&
+ op != RTE_PMD_I40E_PKG_OP_WR_DEL) {
+ PMD_DRV_LOG(ERR, "Unsupported operation.");
+ return;
+ }
+
/* get information about protocol number */
ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size,
(uint8_t *)&proto_num, sizeof(proto_num),
@@ -11770,20 +12249,23 @@ i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
/* Check if GTP is supported. */
for (i = 0; i < proto_num; i++) {
if (!strncmp(proto[i].name, "GTP", 3)) {
- pf->gtp_support = true;
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD)
+ pf->gtp_support = true;
+ else
+ pf->gtp_support = false;
break;
}
}
/* Update customized pctype info */
ret = i40e_update_customized_pctype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No pctype is updated.");
/* Update customized ptype info */
ret = i40e_update_customized_ptype(dev, pkg, pkg_size,
- proto_num, proto);
+ proto_num, proto, op);
if (ret)
PMD_DRV_LOG(INFO, "No ptype is updated.");
@@ -11840,6 +12322,7 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_eth_dev *dev = ((struct i40e_adapter *)hw->back)->eth_dev;
if (pf->support_multi_driver) {
PMD_DRV_LOG(ERR, "Replace cloud filter is not supported.");
@@ -11876,10 +12359,14 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
&filter_replace_buf);
if (ret != I40E_SUCCESS)
return ret;
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud l1 type is changed from 0x%x to 0x%x",
- filter_replace.old_filter_type,
- filter_replace.new_filter_type);
+
+ if (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type)
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud l1 type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
+ filter_replace.old_filter_type,
+ filter_replace.new_filter_type);
/* Apply the second L2 cloud filter */
memset(&filter_replace, 0,
@@ -11901,29 +12388,68 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
&filter_replace_buf);
- if (!ret) {
- i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER);
- PMD_DRV_LOG(DEBUG, "Global configuration modification: "
- "cloud filter type is changed from 0x%x to 0x%x",
+ if (!ret && (filter_replace.old_filter_type !=
+ filter_replace.new_filter_type))
+ PMD_DRV_LOG(WARNING, "i40e device %s changed cloud filter type."
+ " original: 0x%x, new: 0x%x",
+ dev->device->name,
filter_replace.old_filter_type,
filter_replace.new_filter_type);
- }
+
return ret;
}
int
+i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint32_t i, lut = 0;
uint16_t j, num;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
if (!add) {
- if (memcmp(conf, rss_info,
- sizeof(struct i40e_rte_flow_rss_conf)) == 0) {
+ if (i40e_action_rss_same(&rss_info->conf, &conf->conf)) {
i40e_pf_disable_rss(pf);
memset(rss_info, 0,
sizeof(struct i40e_rte_flow_rss_conf));
@@ -11932,7 +12458,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
return -EINVAL;
}
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
return -EINVAL;
/* If both VMDQ and RSS enabled, not all of PF queues are configured.
@@ -11943,7 +12469,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
else
num = pf->dev_data->nb_rx_queues;
- num = RTE_MIN(num, conf->num);
+ num = RTE_MIN(num, conf->conf.queue_num);
PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured",
num);
@@ -11956,7 +12482,7 @@ i40e_config_rss_filter(struct i40e_pf *pf,
for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) {
if (j == num)
j = 0;
- lut = (lut << 8) | (conf->queue[j] & ((0x1 <<
+ lut = (lut << 8) | (conf->conf.queue[j] & ((0x1 <<
hw->func_caps.rss_table_entry_width) - 1));
if ((i & 3) == 3)
I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -11981,15 +12507,13 @@ i40e_config_rss_filter(struct i40e_pf *pf,
i40e_hw_rss_hash_set(pf, &rss_conf);
- rte_memcpy(rss_info,
- conf, sizeof(struct i40e_rte_flow_rss_conf));
+ if (i40e_rss_conf_init(rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
-RTE_INIT(i40e_init_log);
-static void
-i40e_init_log(void)
+RTE_INIT(i40e_init_log)
{
i40e_logtype_init = rte_log_register("pmd.net.i40e.init");
if (i40e_logtype_init >= 0)
@@ -12000,5 +12524,7 @@ i40e_init_log(void)
}
RTE_PMD_REGISTER_PARAM_STRING(net_i40e,
- QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
+ ETH_I40E_FLOATING_VEB_ARG "=1"
+ ETH_I40E_FLOATING_VEB_LIST_ARG "=<string>"
+ ETH_I40E_QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16"
ETH_I40E_SUPPORT_MULTI_DRIVER "=1");
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 99efb670..3fffe5a5 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -5,12 +5,18 @@
#ifndef _I40E_ETHDEV_H_
#define _I40E_ETHDEV_H_
+#include <stdint.h>
+
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
#include <rte_hash.h>
+#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_tm_driver.h>
+#include "rte_pmd_i40e.h"
+
+#include "base/i40e_register.h"
#define I40E_VLAN_TAG_SIZE 4
@@ -22,6 +28,7 @@
#define I40E_NUM_DESC_ALIGN 32
#define I40E_BUF_SIZE_MIN 1024
#define I40E_FRAME_SIZE_MAX 9728
+#define I40E_TSO_FRAME_SIZE_MAX 262144
#define I40E_QUEUE_BASE_ADDR_UNIT 128
/* number of VSIs and queue default setting */
#define I40E_MAX_QP_NUM_PER_VF 16
@@ -80,11 +87,19 @@
#define I40E_WRITE_GLB_REG(hw, reg, value) \
do { \
+ uint32_t ori_val; \
+ struct rte_eth_dev *dev; \
+ ori_val = I40E_READ_REG((hw), (reg)); \
+ dev = ((struct i40e_adapter *)hw->back)->eth_dev; \
I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \
(reg)), (value)); \
- PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \
- "with value 0x%08x", \
- (reg), (value)); \
+ if (ori_val != value) \
+ PMD_DRV_LOG(WARNING, \
+ "i40e device %s changed global " \
+ "register [0x%08x]. original: 0x%08x, " \
+ "new: 0x%08x ", \
+ (dev->device->name), (reg), \
+ (ori_val), (value)); \
} while (0)
/* index flex payload per layer */
@@ -170,7 +185,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_ITR_INDEX_NONE 3
#define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
#define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
-#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */
+#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
/* Special FW support this floating VEB feature */
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
@@ -877,9 +892,11 @@ struct i40e_customized_pctype {
};
struct i40e_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
uint16_t queue_region_conf; /**< Queue region config flag */
- uint16_t num; /**< Number of entries in queue[]. */
+ uint8_t key[(I40E_VFQF_HKEY_MAX_INDEX > I40E_PFQF_HKEY_MAX_INDEX ?
+ I40E_VFQF_HKEY_MAX_INDEX : I40E_PFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */
};
@@ -956,6 +973,8 @@ struct i40e_pf {
bool gtp_support; /* 1 - support GTP-C and GTP-U */
/* customer customized pctype */
struct i40e_customized_pctype customized_pctype[I40E_CUSTOMIZED_MAX];
+ /* Switch Domain Id */
+ uint16_t switch_domain_id;
};
enum pending_msg {
@@ -1005,6 +1024,9 @@ struct i40e_vf {
uint16_t promisc_flags; /* Promiscuous setting */
uint32_t vlan[I40E_VFTA_SIZE]; /* VLAN bit map */
+ struct ether_addr mc_addrs[I40E_NUM_MACADDR_MAX]; /* Multicast addrs */
+ uint16_t mc_addrs_num; /* Multicast mac addresses number */
+
/* Event from pf */
bool dev_closed;
bool link_up;
@@ -1058,6 +1080,20 @@ struct i40e_adapter {
uint64_t pctypes_mask;
};
+/**
+ * Strucute to store private data for each VF representor instance
+ */
+struct i40e_vf_representor {
+ uint16_t switch_domain_id;
+ /**< Virtual Function ID */
+ uint16_t vf_id;
+ /**< Virtual Function ID */
+ struct i40e_adapter *adapter;
+ /**< Private data store of assocaiated physical function */
+ struct i40e_eth_stats stats_offset;
+ /**< Zero-point of VF statistics*/
+};
+
extern const struct rte_flow_ops i40e_flow_ops;
union i40e_filter_t {
@@ -1079,22 +1115,6 @@ struct i40e_valid_pattern {
parse_filter_t parse_filter;
};
-enum I40E_WARNING_IDX {
- I40E_WARNING_DIS_FLX_PLD,
- I40E_WARNING_ENA_FLX_PLD,
- I40E_WARNING_QINQ_PARSER,
- I40E_WARNING_QINQ_CLOUD_FILTER,
- I40E_WARNING_TPID,
- I40E_WARNING_FLOW_CTL,
- I40E_WARNING_GRE_KEY_LEN,
- I40E_WARNING_QF_CTL,
- I40E_WARNING_HASH_INSET,
- I40E_WARNING_HSYM,
- I40E_WARNING_HASH_MSK,
- I40E_WARNING_FD_MSK,
- I40E_WARNING_RPL_CLD_FILTER,
-};
-
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
int i40e_vsi_release(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf,
@@ -1206,7 +1226,8 @@ void i40e_tm_conf_uninit(struct rte_eth_dev *dev);
struct i40e_customized_pctype*
i40e_find_customized_pctype(struct i40e_pf *pf, uint8_t index);
void i40e_update_customized_info(struct rte_eth_dev *dev, uint8_t *pkg,
- uint32_t pkg_size);
+ uint32_t pkg_size,
+ enum rte_pmd_i40e_package_op op);
int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb);
int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev,
struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on);
@@ -1214,8 +1235,14 @@ void i40e_init_queue_region_conf(struct rte_eth_dev *dev);
void i40e_flex_payload_reg_set_default(struct i40e_hw *hw);
int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len);
int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size);
+int i40e_rss_conf_init(struct i40e_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int i40e_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int i40e_config_rss_filter(struct i40e_pf *pf,
struct i40e_rte_flow_rss_conf *conf, bool add);
+int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
#define I40E_DEV_TO_PCI(eth_dev) \
RTE_DEV_TO_PCI((eth_dev)->device)
@@ -1292,50 +1319,23 @@ i40e_align_floor(int n)
}
static inline uint16_t
-i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv)
+i40e_calc_itr_interval(bool is_pf, bool is_multi_drv)
{
- if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) {
- if (is_multi_drv) {
- interval = I40E_QUEUE_ITR_INTERVAL_MAX;
- } else {
- if (is_pf)
- interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
- else
- interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
- }
+ uint16_t interval = 0;
+
+ if (is_multi_drv) {
+ interval = I40E_QUEUE_ITR_INTERVAL_MAX;
+ } else {
+ if (is_pf)
+ interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
+ else
+ interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT;
}
/* Convert to hardware count, as writing each 1 represents 2 us */
return interval / 2;
}
-static inline void
-i40e_global_cfg_warning(enum I40E_WARNING_IDX idx)
-{
- const char *warning;
- static const char *const warning_list[] = {
- [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload",
- [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload",
- [I40E_WARNING_QINQ_PARSER] = "support QinQ parser",
- [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter",
- [I40E_WARNING_TPID] = "support TPID configuration",
- [I40E_WARNING_FLOW_CTL] = "configure water marker",
- [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting",
- [I40E_WARNING_QF_CTL] = "support hash function setting",
- [I40E_WARNING_HASH_INSET] = "configure hash input set",
- [I40E_WARNING_HSYM] = "set symmetric hash",
- [I40E_WARNING_HASH_MSK] = "configure hash mask",
- [I40E_WARNING_FD_MSK] = "configure fdir mask",
- [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter",
- };
-
- warning = warning_list[idx];
-
- RTE_LOG(WARNING, PMD,
- "Global register is changed during %s\n",
- warning);
-}
-
#define I40E_VALID_FLOW(flow_type) \
((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index fd003fe0..001c301b 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -44,6 +44,8 @@
#define I40EVF_BUSY_WAIT_COUNT 50
#define MAX_RESET_WAIT_CNT 20
+#define I40EVF_ALARM_INTERVAL 50000 /* us */
+
struct i40evf_arq_msg_info {
enum virtchnl_ops ops;
enum i40e_status_code result;
@@ -120,7 +122,7 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
-static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
@@ -130,6 +132,14 @@ static void i40evf_handle_pf_event(struct rte_eth_dev *dev,
uint8_t *msg,
uint16_t msglen);
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr, bool add);
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
/* Default hash key buffer for RSS */
static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
@@ -195,6 +205,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.txq_info_get = i40e_txq_info_get,
.mac_addr_add = i40evf_add_mac_addr,
.mac_addr_remove = i40evf_del_mac_addr,
+ .set_mc_addr_list = i40evf_set_mc_addr_list,
.reta_update = i40evf_dev_rss_reta_update,
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
@@ -1036,20 +1047,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static inline int
-i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/* Disable IRQ0 */
static inline void
i40evf_disable_irq0(struct i40e_hw *hw)
@@ -1138,7 +1135,7 @@ i40evf_init_vf(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
vf->dev_data = dev->data;
@@ -1375,7 +1372,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(void *param)
+i40evf_dev_alarm_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1404,6 +1401,8 @@ i40evf_dev_interrupt_handler(void *param)
done:
i40evf_enable_irq0(hw);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, dev);
}
static int
@@ -1447,12 +1446,8 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
return -1;
}
- /* register callback func to eal lib */
- rte_intr_callback_register(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)eth_dev);
-
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_eal_alarm_set(I40EVF_ALARM_INTERVAL,
+ i40evf_dev_alarm_handler, eth_dev);
/* configure and enable device interrupt */
i40evf_enable_irq0(hw);
@@ -1541,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
/* For non-DPDK PF drivers, VF has no ability to disable HW
* CRC strip, and is implicitly enabled by the PF.
*/
- if (!conf->rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
(vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1575,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
/* Vlan stripping setting */
if (mask & ETH_VLAN_STRIP_MASK) {
/* Enable or disable VLAN stripping */
- if (dev_conf->rxmode.hw_vlan_strip)
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
i40evf_enable_vlan_strip(dev);
else
i40evf_disable_vlan_strip(dev);
@@ -1588,37 +1583,35 @@ static int
i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = 0;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail register. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- I40EVF_WRITE_FLUSH(hw);
+ rte_wmb();
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ /* Init the RX tail register. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ I40EVF_WRITE_FLUSH(hw);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
- else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1627,45 +1620,39 @@ i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
int err;
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ rxq = dev->data->rx_queues[rx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
-
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
static int
i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
-
- /* Ready to switch the queue on */
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
-
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /* Ready to switch the queue on */
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
@@ -1674,22 +1661,19 @@ i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
struct i40e_tx_queue *txq;
int err;
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ txq = dev->data->tx_queues[tx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1732,7 +1716,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
/**
* Check if the jumbo frame and maximum packet length are set correctly
*/
- if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1752,7 +1736,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
}
}
- if (dev_data->dev_conf.rxmode.enable_scatter ||
+ if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
(rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
dev_data->scattered_rx = 1;
}
@@ -1841,7 +1825,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
- i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0);
+ i40e_calc_itr_interval(0, 0);
uint16_t msix_intr;
msix_intr = intr_handle->intr_vec[queue_id];
@@ -1864,8 +1848,6 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&pci_dev->intr_handle);
-
return 0;
}
@@ -2012,23 +1994,18 @@ i40evf_dev_start(struct rte_eth_dev *dev)
/* Set all mac addrs */
i40evf_add_del_all_mac_addr(dev, TRUE);
+ /* Set all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ TRUE);
if (i40evf_start_queues(dev) != 0) {
PMD_DRV_LOG(ERR, "enable queues failed");
goto err_mac;
}
- /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
- * is mapped to VFIO vector 0 in i40evf_dev_init( ).
- * If previous VFIO interrupt mapping set in i40evf_dev_init( ) is
- * not cleared, it will fail when rte_intr_enable( ) tries to map Rx
- * queue interrupt to other VFIO vectors.
- * So clear uio/vfio intr/evevnfd first to avoid failure.
- */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
- rte_intr_disable(intr_handle);
+ /* only enable interrupt in rx interrupt mode */
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
rte_intr_enable(intr_handle);
- }
i40evf_enable_queues_intr(dev);
@@ -2036,6 +2013,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
err_mac:
i40evf_add_del_all_mac_addr(dev, FALSE);
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
err_queue:
return -1;
}
@@ -2046,9 +2025,13 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
+ if (dev->data->dev_conf.intr_conf.rxq != 0)
+ rte_intr_disable(intr_handle);
+
if (hw->adapter_stopped == 1)
return;
i40evf_stop_queues(dev);
@@ -2063,6 +2046,9 @@ i40evf_dev_stop(struct rte_eth_dev *dev)
}
/* remove all mac addrs */
i40evf_add_del_all_mac_addr(dev, FALSE);
+ /* remove all multicast addresses */
+ i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
hw->adapter_stopped = 1;
}
@@ -2078,6 +2064,7 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
* while Linux driver does not
*/
+ memset(&new_link, 0, sizeof(new_link));
/* Linux driver PF host */
switch (vf->link_speed) {
case I40E_LINK_SPEED_100MB:
@@ -2107,11 +2094,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
new_link.link_status = vf->link_up ? ETH_LINK_UP :
ETH_LINK_DOWN;
new_link.link_autoneg =
- dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED;
-
- i40evf_dev_atomic_write_link_status(dev, &new_link);
+ !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED);
- return 0;
+ return rte_eth_linkstatus_set(dev, &new_link);
}
static void
@@ -2179,8 +2164,6 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- memset(dev_info, 0, sizeof(*dev_info));
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2189,6 +2172,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_queue_offload_capa = 0;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2196,7 +2180,13 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+
+ dev_info->tx_queue_offload_capa = 0;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2209,7 +2199,8 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GRE_TNL_TSO |
DEV_TX_OFFLOAD_IPIP_TNL_TSO |
- DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2219,6 +2210,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2229,8 +2221,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -2276,19 +2267,20 @@ static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ rte_eal_alarm_cancel(i40evf_dev_alarm_handler, dev);
i40evf_dev_stop(dev);
i40e_dev_free_queues(dev);
+ /*
+ * disable promiscuous mode before reset vf
+ * it is a workaround solution when work with kernel driver
+ * and it is not the normal way
+ */
+ i40evf_dev_promiscuous_disable(dev);
+ i40evf_dev_allmulticast_disable(dev);
+
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
- /* disable uio intr before callback unregister */
- rte_intr_disable(intr_handle);
-
- /* unregister callback func from eal lib */
- rte_intr_callback_unregister(intr_handle,
- i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2649,16 +2641,17 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
}
if (frame_size > ETHER_MAX_LEN)
- dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ dev_data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+ dev_data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return ret;
}
-static void
+static int
i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr)
{
@@ -2667,15 +2660,99 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
if (!is_valid_assigned_ether_addr(mac_addr)) {
PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
- return;
+ return -EINVAL;
}
if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
- return;
+ return -EPERM;
i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr);
- i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+ if (i40evf_add_mac_addr(dev, mac_addr, 0, 0) != 0)
+ return -EIO;
ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr);
+ return 0;
+}
+
+static int
+i40evf_add_del_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num, bool add)
+{
+ struct virtchnl_ether_addr_list *list;
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ uint8_t cmd_buffer[sizeof(struct virtchnl_ether_addr_list) +
+ (I40E_NUM_MACADDR_MAX * sizeof(struct virtchnl_ether_addr))];
+ uint32_t i;
+ int err;
+ struct vf_cmd_info args;
+
+ if (mc_addrs == NULL || mc_addrs_num == 0)
+ return 0;
+
+ if (mc_addrs_num > I40E_NUM_MACADDR_MAX)
+ return -EINVAL;
+
+ list = (struct virtchnl_ether_addr_list *)cmd_buffer;
+ list->vsi_id = vf->vsi_res->vsi_id;
+ list->num_elements = mc_addrs_num;
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!I40E_IS_MULTICAST(mc_addrs[i].addr_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
+ mc_addrs[i].addr_bytes[0],
+ mc_addrs[i].addr_bytes[1],
+ mc_addrs[i].addr_bytes[2],
+ mc_addrs[i].addr_bytes[3],
+ mc_addrs[i].addr_bytes[4],
+ mc_addrs[i].addr_bytes[5]);
+ return -EINVAL;
+ }
+
+ memcpy(list->list[i].addr, mc_addrs[i].addr_bytes,
+ sizeof(list->list[i].addr));
+ }
+
+ args.ops = add ? VIRTCHNL_OP_ADD_ETH_ADDR : VIRTCHNL_OP_DEL_ETH_ADDR;
+ args.in_args = cmd_buffer;
+ args.in_args_size = sizeof(struct virtchnl_ether_addr_list) +
+ i * sizeof(struct virtchnl_ether_addr);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = I40E_AQ_BUF_SZ;
+ err = i40evf_execute_vf_cmd(dev, &args);
+ if (err) {
+ PMD_DRV_LOG(ERR, "fail to execute command %s",
+ add ? "OP_ADD_ETH_ADDR" : "OP_DEL_ETH_ADDR");
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+i40evf_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ int err;
+
+ /* flush previous addresses */
+ err = i40evf_add_del_mc_addr_list(dev, vf->mc_addrs, vf->mc_addrs_num,
+ FALSE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = 0;
+
+ /* add new ones */
+ err = i40evf_add_del_mc_addr_list(dev, mc_addrs, mc_addrs_num,
+ TRUE);
+ if (err)
+ return err;
+
+ vf->mc_addrs_num = mc_addrs_num;
+ memcpy(vf->mc_addrs, mc_addrs, mc_addrs_num * sizeof(*mc_addrs));
+
+ return 0;
}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index b83a0cff..d41601a1 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -525,8 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
}
for (i = 0; i < num; i++) {
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 16c47cf7..c67b264d 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <stdarg.h>
+#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_log.h>
@@ -53,6 +54,7 @@ static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
struct rte_flow_error *error,
struct rte_eth_ethertype_filter *filter);
static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter);
@@ -1939,7 +1941,8 @@ static uint16_t
i40e_get_outer_vlan(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ int qinq = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
uint64_t reg_r = 0;
uint16_t reg_id;
uint16_t tpid;
@@ -2259,8 +2262,7 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
- I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
- i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
}
/* Set flex pit */
@@ -2400,7 +2402,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
break;
}
- if (cus_pctype)
+ if (cus_pctype && cus_pctype->valid)
return cus_pctype->pctype;
return I40E_FILTER_PCTYPE_INVALID;
@@ -2419,6 +2421,7 @@ i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
*/
static int
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
struct rte_flow_error *error,
struct i40e_fdir_filter_conf *filter)
@@ -2490,16 +2493,22 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Invalid MAC_addr mask.");
return -rte_errno;
}
+ }
+ if (eth_spec && eth_mask && eth_mask->type) {
+ enum rte_flow_item_type next = (item + 1)->type;
- if ((eth_mask->type & UINT16_MAX) ==
- UINT16_MAX) {
- input_set |= I40E_INSET_LAST_ETHER_TYPE;
- filter->input.flow.l2_flow.ether_type =
- eth_spec->type;
+ if (eth_mask->type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid type mask.");
+ return -rte_errno;
}
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (ether_type == ETHER_TYPE_IPv4 ||
+
+ if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == ETHER_TYPE_IPv4 ||
ether_type == ETHER_TYPE_IPv6 ||
ether_type == ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
@@ -2509,6 +2518,9 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
"Unsupported ether_type.");
return -rte_errno;
}
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ eth_spec->type;
}
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
@@ -2518,6 +2530,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
+
+ RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2526,6 +2540,33 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
vlan_spec->tci;
}
}
+ if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
+ if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid inner_type"
+ " mask.");
+ return -rte_errno;
+ }
+
+ ether_type =
+ rte_be_to_cpu_16(vlan_spec->inner_type);
+
+ if (ether_type == ETHER_TYPE_IPv4 ||
+ ether_type == ETHER_TYPE_IPv6 ||
+ ether_type == ETHER_TYPE_ARP ||
+ ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported inner_type.");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_LAST_ETHER_TYPE;
+ filter->input.flow.l2_flow.ether_type =
+ vlan_spec->inner_type;
+ }
pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
layer_idx = I40E_FLXPLD_L2_IDX;
@@ -2918,6 +2959,16 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VF:
vf_spec = item->spec;
+ if (!attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Matching VF traffic"
+ " without affecting it"
+ " (transfer attribute)"
+ " is unsupported");
+ return -rte_errno;
+ }
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@ -3080,7 +3131,8 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
&filter->fdir_filter;
int ret;
- ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
+ fdir_filter);
if (ret)
return ret;
@@ -3284,7 +3336,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -3514,7 +3567,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_VLAN:
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4022,7 +4076,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
vlan_spec = item->spec;
vlan_mask = item->mask;
- if (!(vlan_spec && vlan_mask)) {
+ if (!(vlan_spec && vlan_mask) ||
+ vlan_mask->inner_type) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
@@ -4150,7 +4205,8 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
info->region[0].user_priority[0] =
- (vlan_spec->tci >> 13) & 0x7;
+ (rte_be_to_cpu_16(
+ vlan_spec->tci) >> 13) & 0x7;
info->region[0].user_priority_num = 1;
info->queue_region_number = 1;
*action_flag = 0;
@@ -4169,6 +4225,19 @@ i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
return 0;
}
+/**
+ * This function is used to parse rss queue index, total queue number and
+ * hash functions, If the purpose of this configuration is for queue region
+ * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
+ * In queue region configuration, it also need to parse hardware flowtype
+ * and user_priority from configuration, it will also cheeck the validity
+ * of these parameters. For example, The queue region sizes should
+ * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
+ * hw_flowtype or PCTYPE max index should be 63, the user priority
+ * max index should be 7, and so on. And also, queue index should be
+ * continuous sequence and queue region index should be part of rss
+ * queue index for this port.
+ */
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
@@ -4205,7 +4274,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ if (rss->types & (hf_bit << n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
@@ -4214,41 +4283,73 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
}
+ /**
+ * Do some queue region related parameters check
+ * in order to keep queue index for queue region to be
+ * continuous sequence and also to be part of RSS
+ * queue index for this port.
+ */
+ if (conf_info->queue_region_number) {
+ for (i = 0; i < rss->queue_num; i++) {
+ for (j = 0; j < rss_info->conf.queue_num; j++) {
+ if (rss->queue[i] == rss_info->conf.queue[j])
+ break;
+ }
+ if (j == rss_info->conf.queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+ }
+ }
+
+ /* Parse queue region related parameters from configuration */
for (n = 0; n < conf_info->queue_region_number; n++) {
if (conf_info->region[n].user_priority_num ||
conf_info->region[n].flowtype_num) {
- if (!((rte_is_power_of_2(rss->num)) &&
- rss->num <= 64)) {
- PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
- "total number of queues do not exceed the VSI allocation");
+ if (!((rte_is_power_of_2(rss->queue_num)) &&
+ rss->queue_num <= 64)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
return -rte_errno;
}
if (conf_info->region[n].user_priority[n] >=
I40E_MAX_USER_PRIORITY) {
- PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the user priority max index is 7");
return -rte_errno;
}
if (conf_info->region[n].hw_flowtype[n] >=
I40E_FILTER_PCTYPE_MAX) {
- PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
- return -rte_errno;
- }
-
- if (rss_info->num < rss->num ||
- rss_info->queue[0] < rss->queue[0] ||
- (rss->queue[0] + rss->num >
- rss_info->num + rss_info->queue[0])) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
- "no valid queues");
+ "the hw_flowtype or PCTYPE max index is 63");
return -rte_errno;
}
for (i = 0; i < info->queue_region_number; i++) {
- if (info->region[i].queue_num == rss->num &&
+ if (info->region[i].queue_num ==
+ rss->queue_num &&
info->region[i].queue_start_index ==
rss->queue[0])
break;
@@ -4256,12 +4357,15 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (i == info->queue_region_number) {
if (i > I40E_REGION_MAX_INDEX) {
- PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "the queue region max index is 7");
return -rte_errno;
}
info->region[i].queue_num =
- rss->num;
+ rss->queue_num;
info->region[i].queue_start_index =
rss->queue[0];
info->region[i].region_id =
@@ -4301,10 +4405,13 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
rss_config->queue_region_conf = TRUE;
}
+ /**
+ * Return function if this flow is used for queue region configuration
+ */
if (rss_config->queue_region_conf)
return 0;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -4312,7 +4419,7 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -4321,15 +4428,29 @@ i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_config->rss_conf = *rss->rss_conf;
- else
- rss_config->rss_conf.rss_hf =
- pf->adapter->flow_types_mask;
- for (n = 0; n < rss->num; ++n)
- rss_config->queue[n] = rss->queue[n];
- rss_config->num = rss->num;
+ /* Parse RSS related parameters from configuration */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+ if (rss->queue_num > RTE_DIM(rss_config->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (i40e_rss_conf_init(rss_config, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
index++;
/* check if the next not void action is END */
@@ -4385,14 +4506,15 @@ i40e_config_rss_filter_set(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
if (conf->queue_region_conf) {
- i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else {
- i40e_config_rss_filter(pf, conf, 1);
+ ret = i40e_config_rss_filter(pf, conf, 1);
}
- return 0;
+ return ret;
}
static int
@@ -4545,6 +4667,8 @@ i40e_flow_create(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
+ if (ret)
+ goto free_flow;
flow->rule = &pf->rss_info;
break;
default:
@@ -4846,7 +4970,7 @@ i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
- if (rss_info->num)
+ if (rss_info->conf.queue_num)
ret = i40e_config_rss_filter(pf, rss_info, FALSE);
return ret;
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1217e5a6..2a28ee34 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -40,9 +40,6 @@
/* Base address of the HW descriptor ring should be 128B aligned. */
#define I40E_RING_BASE_ALIGN 128
-#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
#ifdef RTE_LIBRTE_IEEE1588
@@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; i++)
rte_prefetch0((txep + i)->mbuf);
- if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) {
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) {
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
rte_mempool_put(txep->mbuf->pool, txep->mbuf);
txep->mbuf = NULL;
@@ -1442,13 +1439,15 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_SEG ||
- m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = -EINVAL;
return i;
}
- } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
- (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ } else if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->tso_segsz < I40E_MIN_TSO_MSS ||
+ m->tso_segsz > I40E_MAX_TSO_MSS ||
+ m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
/* MSS outside the range (256B - 9674B) are considered
* malicious
*/
@@ -1461,6 +1460,12 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
@@ -1527,38 +1532,36 @@ int
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = -1;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ rte_wmb();
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- } else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1568,24 +1571,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- /*
- * rx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1593,28 +1593,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = -1;
+ int err;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
@@ -1624,26 +1623,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
+ txq = dev->data->tx_queues[tx_queue_id];
- /*
- * tx_queue_id is queue id application refers to, while
- * txq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
-
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
@@ -1692,6 +1688,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
+static int
+i40e_dev_first_queue(uint16_t idx, void **queues, int num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ if (i != idx && queues[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_rx_queue *rxq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ uint16_t buf_size =
+ (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
+ int use_scattered_rx =
+ ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+
+ if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(rxq->queue_id,
+ dev->data->rx_queues,
+ dev->data->nb_rx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags to default and call
+ * i40e_set_rx_function.
+ */
+ ad->rx_bulk_alloc_allowed = true;
+ ad->rx_vec_allowed = true;
+ dev->data->scattered_rx = use_scattered_rx;
+ if (use_def_burst_func)
+ ad->rx_bulk_alloc_allowed = false;
+ i40e_set_rx_function(dev);
+ return 0;
+ }
+
+ /* check bulk alloc conflict */
+ if (ad->rx_bulk_alloc_allowed && use_def_burst_func) {
+ PMD_DRV_LOG(ERR, "Can't use default burst.");
+ return -EINVAL;
+ }
+ /* check scatterred conflict */
+ if (!dev->data->scattered_rx && use_scattered_rx) {
+ PMD_DRV_LOG(ERR, "Scattered rx is required.");
+ return -EINVAL;
+ }
+ /* check vector conflict */
+ if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) {
+ PMD_DRV_LOG(ERR, "Failed vector rx setup.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1712,6 +1777,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len, i;
uint16_t reg_idx, base, bsf, tc_mapping;
int q_offset, use_def_burst_func = 1;
+ uint64_t offloads;
+
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,11 +1828,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/* Allocate the maximun number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
@@ -1808,25 +1879,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_rx_queue(rxq);
rxq->q_set = TRUE;
- dev->data->rx_queues[queue_idx] = rxq;
-
- use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
-
- if (!use_def_burst_func) {
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
-#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
- } else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.",
- rxq->port_id, rxq->queue_id);
- ad->rx_bulk_alloc_allowed = false;
- }
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -1841,6 +1893,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) {
+ i40e_dev_rx_queue_release(rxq);
+ return -EINVAL;
+ }
+ } else {
+ use_def_burst_func =
+ check_rx_burst_bulk_alloc_preconditions(rxq);
+ if (!use_def_burst_func) {
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
+ } else {
+ PMD_INIT_LOG(DEBUG,
+ "Rx Burst Bulk Alloc Preconditions are "
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.",
+ rxq->port_id, rxq->queue_id);
+ ad->rx_bulk_alloc_allowed = false;
+ }
+ }
+
+ dev->data->rx_queues[queue_idx] = rxq;
return 0;
}
@@ -1972,6 +2052,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
return RTE_ETH_TX_DESC_FULL;
}
+static int
+i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev,
+ struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (i40e_tx_queue_init(txq) != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to do TX queue initialization");
+ return -EINVAL;
+ }
+
+ if (i40e_dev_first_queue(txq->queue_id,
+ dev->data->tx_queues,
+ dev->data->nb_tx_queues)) {
+ /**
+ * If it is the first queue to setup,
+ * set all flags and call
+ * i40e_set_tx_function.
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ i40e_set_tx_function(dev);
+ return 0;
+ }
+
+ /* check vector conflict */
+ if (ad->tx_vec_allowed) {
+ if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ ||
+ i40e_txq_vec_setup(txq)) {
+ PMD_DRV_LOG(ERR, "Failed vector tx setup.");
+ return -EINVAL;
+ }
+ }
+ /* check simple tx conflict */
+ if (ad->tx_simple_allowed) {
+ if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 ||
+ txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) {
+ PMD_DRV_LOG(ERR, "No-simple tx is required.");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -1989,6 +2115,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t reg_idx, i, base, bsf, tc_mapping;
int q_offset;
+ uint64_t offloads;
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -2123,7 +2252,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->reg_idx = reg_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->vsi = vsi;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
@@ -2144,10 +2273,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
i40e_reset_tx_queue(txq);
txq->q_set = TRUE;
- dev->data->tx_queues[queue_idx] = txq;
-
- /* Use a simple TX queue without offloads or multi segs if possible */
- i40e_set_tx_function_flag(dev, txq);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -2162,6 +2287,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->dcb_tc = i;
}
+ if (dev->data->dev_started) {
+ if (i40e_dev_tx_queue_setup_runtime(dev, txq)) {
+ i40e_dev_tx_queue_release(txq);
+ return -EINVAL;
+ }
+ } else {
+ /**
+ * Use a simple TX queue without offloads or
+ * multi segs if possible
+ */
+ i40e_set_tx_function_flag(dev, txq);
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+
return 0;
}
@@ -2189,8 +2328,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id)
if (mz)
return mz;
- mz = rte_memzone_reserve_aligned(name, len,
- socket_id, 0, I40E_RING_BASE_ALIGN);
+ mz = rte_memzone_reserve_aligned(name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN);
return mz;
}
@@ -2469,7 +2608,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
- if (data->dev_conf.rxmode.jumbo_frame == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2886,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -2765,8 +2905,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
}
void __attribute__((cold))
@@ -2889,19 +3029,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS)
- && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) {
- if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) {
- PMD_INIT_LOG(DEBUG, "Vector tx"
- " can be enabled on this txq.");
-
- } else {
- ad->tx_vec_allowed = false;
- }
- } else {
- ad->tx_simple_allowed = false;
- }
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ ad->tx_vec_allowed = (ad->tx_simple_allowed &&
+ txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
+
+ if (ad->tx_vec_allowed)
+ PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Neither simple nor vector Tx enabled on Tx queue %u\n",
+ txq->queue_id);
}
void __attribute__((cold))
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 34cd7923..3fc619af 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -30,6 +30,8 @@
#define I40E_TX_MAX_SEG UINT8_MAX
#define I40E_TX_MAX_MTU_SEG 8
+#define I40E_TX_MIN_PKT_LEN 17
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -107,6 +109,7 @@ struct i40e_rx_queue {
bool rx_deferred_start; /**< don't start this queue in dev start */
uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
uint8_t dcb_tc; /**< Traffic class of rx queue */
+ uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
};
struct i40e_tx_entry {
@@ -141,13 +144,13 @@ struct i40e_tx_queue {
uint16_t port_id; /**< Device port identifier. */
uint16_t queue_id; /**< TX queue index. */
uint16_t reg_idx;
- uint32_t txq_flags;
struct i40e_vsi *vsi; /**< the VSI this queue belongs to */
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
bool tx_deferred_start; /**< don't start this queue in dev start */
uint8_t dcb_tc; /**< Traffic class of tx queue */
+ uint64_t offloads; /**< Tx offload flags of DEV_RX_OFFLOAD_* */
};
/** Offload features */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
index dbcb61f3..23179b3b 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_avx2.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c
@@ -188,7 +188,7 @@ _recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
*/
- while (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
i40e_rxq_rearm(rxq);
/* Before we start moving massive data around, check to see if
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 3ffedcb9..63cb1774 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -202,11 +202,11 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
/* - no csum error report support
* - no header split support
*/
- if (rxmode->header_split == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
return -1;
/* no QinQ support */
- if (rxmode->hw_vlan_extend == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
return -1;
return 0;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index e549d1e8..83572ef8 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * Copyright(c) 2016, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation.
+ * Copyright(c) 2016-2018, Linaro Limited.
*/
#include <stdint.h>
diff --git a/drivers/net/i40e/i40e_vf_representor.c b/drivers/net/i40e/i40e_vf_representor.c
new file mode 100644
index 00000000..f9f13161
--- /dev/null
+++ b/drivers/net/i40e/i40e_vf_representor.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_type.h"
+#include "base/virtchnl.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+static int
+i40e_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return i40e_dev_link_update(representor->adapter->eth_dev,
+ wait_to_complete);
+}
+static void
+i40e_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ /* get dev info for the vdev */
+ dev_info->device = ethdev->device;
+
+ dev_info->max_rx_queues = ethdev->data->nb_rx_queues;
+ dev_info->max_tx_queues = ethdev->data->nb_tx_queues;
+
+ dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
+ dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
+ dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
+ sizeof(uint32_t);
+ dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
+ dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
+ dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_QINQ_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_thresh = {
+ .pthresh = I40E_DEFAULT_RX_PTHRESH,
+ .hthresh = I40E_DEFAULT_RX_HTHRESH,
+ .wthresh = I40E_DEFAULT_RX_WTHRESH,
+ },
+ .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_thresh = {
+ .pthresh = I40E_DEFAULT_TX_PTHRESH,
+ .hthresh = I40E_DEFAULT_TX_HTHRESH,
+ .wthresh = I40E_DEFAULT_TX_WTHRESH,
+ },
+ .tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
+ .tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
+ .offloads = 0,
+ };
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = I40E_MAX_RING_DESC,
+ .nb_min = I40E_MIN_RING_DESC,
+ .nb_align = I40E_ALIGN_RING_DESC,
+ };
+
+ dev_info->switch_info.name =
+ representor->adapter->eth_dev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int
+i40e_vf_representor_dev_configure(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void
+i40e_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+i40e_vf_representor_rx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int
+i40e_vf_representor_tx_queue_setup(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static void
+i40evf_stat_update_48(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = *stat - *offset;
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_48_BIT_WIDTH)) - *offset);
+
+ *stat &= I40E_48_BIT_MASK;
+}
+
+static void
+i40evf_stat_update_32(uint64_t *offset,
+ uint64_t *stat)
+{
+ if (*stat >= *offset)
+ *stat = (uint64_t)(*stat - *offset);
+ else
+ *stat = (uint64_t)((*stat +
+ ((uint64_t)1 << I40E_32_BIT_WIDTH)) - *offset);
+}
+
+static int
+rte_pmd_i40e_get_vf_native_stats(uint16_t port,
+ uint16_t vf_id,
+ struct i40e_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+ memcpy(stats, &vsi->eth_stats, sizeof(vsi->eth_stats));
+
+ return 0;
+}
+
+static int
+i40e_vf_representor_stats_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_stats *stats)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct i40e_eth_stats native_stats;
+ int ret;
+
+ ret = rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &native_stats);
+ if (ret == 0) {
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_bytes,
+ &native_stats.rx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_unicast,
+ &native_stats.rx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_multicast,
+ &native_stats.rx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.rx_broadcast,
+ &native_stats.rx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_discards,
+ &native_stats.rx_discards);
+ i40evf_stat_update_32(
+ &representor->stats_offset.rx_unknown_protocol,
+ &native_stats.rx_unknown_protocol);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_bytes,
+ &native_stats.tx_bytes);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_unicast,
+ &native_stats.tx_unicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_multicast,
+ &native_stats.tx_multicast);
+ i40evf_stat_update_48(
+ &representor->stats_offset.tx_broadcast,
+ &native_stats.tx_broadcast);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_errors,
+ &native_stats.tx_errors);
+ i40evf_stat_update_32(
+ &representor->stats_offset.tx_discards,
+ &native_stats.tx_discards);
+
+ stats->ipackets = native_stats.rx_unicast +
+ native_stats.rx_multicast +
+ native_stats.rx_broadcast;
+ stats->opackets = native_stats.tx_unicast +
+ native_stats.tx_multicast +
+ native_stats.tx_broadcast;
+ stats->ibytes = native_stats.rx_bytes;
+ stats->obytes = native_stats.tx_bytes;
+ stats->ierrors = native_stats.rx_discards;
+ stats->oerrors = native_stats.tx_errors + native_stats.tx_discards;
+ }
+ return ret;
+}
+
+static void
+i40e_vf_representor_stats_reset(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_get_vf_native_stats(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &representor->stats_offset);
+}
+
+static void
+i40e_vf_representor_promiscuous_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_promiscuous_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_unicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_allmulticast_enable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 1);
+}
+
+static void
+i40e_vf_representor_allmulticast_disable(struct rte_eth_dev *ethdev)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_multicast_promisc(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, 0);
+}
+
+static void
+i40e_vf_representor_mac_addr_remove(struct rte_eth_dev *ethdev, uint32_t index)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_remove_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, &ethdev->data->mac_addrs[index]);
+}
+
+static int
+i40e_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_mac_addr(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static int
+i40e_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_i40e_set_vf_vlan_filter(
+ representor->adapter->eth_dev->data->port_id,
+ vlan_id, vf_mask, on);
+}
+
+static int
+i40e_vf_representor_vlan_offload_set(struct rte_eth_dev *ethdev, int mask)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+ struct rte_eth_dev *pdev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ uint32_t vfid;
+
+ pdev = representor->adapter->eth_dev;
+ vfid = representor->vf_id;
+
+ if (!is_i40e_supported(pdev)) {
+ PMD_DRV_LOG(ERR, "Invalid PF dev.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(pdev->data->dev_private);
+
+ if (vfid >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vf = &pf->vfs[vfid];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* Enable or disable VLAN filtering offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ return i40e_vsi_config_vlan_filter(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_filter(vsi, FALSE);
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ /* Enable or disable VLAN stripping offload */
+ if (ethdev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
+ return i40e_vsi_config_vlan_stripping(vsi, TRUE);
+ else
+ return i40e_vsi_config_vlan_stripping(vsi, FALSE);
+ }
+
+ return -EINVAL;
+}
+
+static void
+i40e_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_i40e_set_vf_vlan_stripq(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, on);
+}
+
+static int
+i40e_vf_representor_vlan_pvid_set(struct rte_eth_dev *ethdev, uint16_t vlan_id,
+ __rte_unused int on)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_i40e_set_vf_vlan_insert(
+ representor->adapter->eth_dev->data->port_id,
+ representor->vf_id, vlan_id);
+}
+
+struct eth_dev_ops i40e_representor_dev_ops = {
+ .dev_infos_get = i40e_vf_representor_dev_infos_get,
+
+ .dev_start = i40e_vf_representor_dev_start,
+ .dev_configure = i40e_vf_representor_dev_configure,
+ .dev_stop = i40e_vf_representor_dev_stop,
+
+ .rx_queue_setup = i40e_vf_representor_rx_queue_setup,
+ .tx_queue_setup = i40e_vf_representor_tx_queue_setup,
+
+ .link_update = i40e_vf_representor_link_update,
+
+ .stats_get = i40e_vf_representor_stats_get,
+ .stats_reset = i40e_vf_representor_stats_reset,
+
+ .promiscuous_enable = i40e_vf_representor_promiscuous_enable,
+ .promiscuous_disable = i40e_vf_representor_promiscuous_disable,
+
+ .allmulticast_enable = i40e_vf_representor_allmulticast_enable,
+ .allmulticast_disable = i40e_vf_representor_allmulticast_disable,
+
+ .mac_addr_remove = i40e_vf_representor_mac_addr_remove,
+ .mac_addr_set = i40e_vf_representor_mac_addr_set,
+
+ .vlan_filter_set = i40e_vf_representor_vlan_filter_set,
+ .vlan_offload_set = i40e_vf_representor_vlan_offload_set,
+ .vlan_strip_queue_set = i40e_vf_representor_vlan_strip_queue_set,
+ .vlan_pvid_set = i40e_vf_representor_vlan_pvid_set
+
+};
+
+static uint16_t
+i40e_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+i40e_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct i40e_vf_representor *representor = ethdev->data->dev_private;
+
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct rte_eth_link *link;
+
+ representor->vf_id =
+ ((struct i40e_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct i40e_vf_representor *)init_params)->switch_domain_id;
+ representor->adapter =
+ ((struct i40e_vf_representor *)init_params)->adapter;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(
+ representor->adapter->eth_dev->data->dev_private);
+
+ if (representor->vf_id >= pf->vf_num)
+ return -ENODEV;
+
+ /** representor shares the same driver as it's PF device */
+ ethdev->device->driver = representor->adapter->eth_dev->device->driver;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &i40e_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = i40e_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = i40e_vf_representor_tx_burst;
+
+ vf = &pf->vfs[representor->vf_id];
+
+ if (!vf->vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -ENODEV;
+ }
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = vf->vsi->nb_qps;
+ ethdev->data->nb_tx_queues = vf->vsi->nb_qps;
+
+ ethdev->data->mac_addrs = &vf->mac_addr;
+
+ /* Link state. Inherited from PF */
+ link = &representor->adapter->eth_dev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+i40e_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build
index 8764b0e5..d783f362 100644
--- a/drivers/net/i40e/meson.build
+++ b/drivers/net/i40e/meson.build
@@ -1,10 +1,13 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DPF_DRIVER',
'-DVF_DRIVER',
'-DINTEGRATED_VF',
- '-DX722_A0_SUPPORT']
+ '-DX722_A0_SUPPORT',
+ '-DALLOW_EXPERIMENTAL_API']
subdir('base')
objs = [base_objs]
@@ -17,10 +20,12 @@ sources = files(
'i40e_fdir.c',
'i40e_flow.c',
'i40e_tm.c',
+ 'i40e_vf_representor.c',
'rte_pmd_i40e.c'
)
deps += ['hash']
+includes += include_directories('base')
if arch_subdir == 'x86'
dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1)
@@ -36,11 +41,10 @@ if arch_subdir == 'x86'
'i40e_rxtx_vec_avx2.c',
dependencies: [static_rte_ethdev,
static_rte_kvargs, static_rte_hash],
- c_args: '-mavx2')
+ include_directories: includes,
+ c_args: [cflags, '-mavx2'])
objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c')
endif
endif
-includes += include_directories('base')
-
install_headers('rte_pmd_i40e.h')
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index dae59e6d..bba62b1c 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -570,6 +570,49 @@ rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
return 0;
}
+static const struct ether_addr null_mac_addr;
+
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (is_same_ether_addr(mac_addr, &vf->mac_addr))
+ /* Reset the mac with NULL address */
+ ether_addr_copy(&null_mac_addr, &vf->mac_addr);
+
+ /* Remove the mac */
+ i40e_vsi_delete_mac(vsi, mac_addr);
+
+ return 0;
+}
+
/* Set vlan strip on/off for specific VF from host */
int
rte_pmd_i40e_set_vf_vlan_stripq(uint16_t port, uint16_t vf_id, uint8_t on)
@@ -1530,6 +1573,11 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
return 1;
}
}
+ /* profile with group id 0xff is compatible with any other profile */
+ if ((pinfo->track_id & group_mask) == group_mask) {
+ rte_free(buff);
+ return 0;
+ }
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
if ((p->track_id & group_mask) == 0) {
@@ -1540,6 +1588,8 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec)
}
for (i = 0; i < p_list->p_count; i++) {
p = &p_list->p_info[i];
+ if ((p->track_id & group_mask) == group_mask)
+ continue;
if ((pinfo->track_id & group_mask) !=
(p->track_id & group_mask)) {
PMD_DRV_LOG(INFO, "Profile of different group exists.");
@@ -1603,8 +1653,6 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
return -EINVAL;
}
- i40e_update_customized_info(dev, buff, size);
-
/* Find metadata segment */
metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
pkg_hdr);
@@ -1661,6 +1709,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
PMD_DRV_LOG(ERR, "Profile of group 0 already exists.");
else if (is_exist == 3)
PMD_DRV_LOG(ERR, "Profile of different group already exists");
+ i40e_update_customized_info(dev, buff, size, op);
rte_free(profile_info_sec);
return -EEXIST;
}
@@ -1708,6 +1757,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff,
}
}
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD ||
+ op == RTE_PMD_I40E_PKG_OP_WR_DEL)
+ i40e_update_customized_info(dev, buff, size, op);
+
rte_free(profile_info_sec);
return status;
}
@@ -3071,6 +3124,7 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
{
struct rte_eth_dev *dev;
struct i40e_hw *hw;
+ struct i40e_pf *pf;
uint64_t inset_reg;
uint32_t mask_reg[2];
int i;
@@ -3086,10 +3140,12 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
return -EINVAL;
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- /* Clear mask first */
- for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
+ if (pf->support_multi_driver) {
+ PMD_DRV_LOG(ERR, "Input set configuration is not supported.");
+ return -ENOTSUP;
+ }
inset_reg = inset->inset;
for (i = 0; i < 2; i++)
@@ -3098,14 +3154,15 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
switch (inset_type) {
case INSET_HASH:
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
- (uint32_t)(inset_reg & UINT32_MAX));
- i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
- (uint32_t)((inset_reg >>
- I40E_32_BIT_WIDTH) & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
+ (uint32_t)(inset_reg & UINT32_MAX));
+ i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype),
+ (uint32_t)((inset_reg >>
+ I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_HASH_MSK(i, pctype),
+ mask_reg[i]);
break;
case INSET_FDIR:
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
@@ -3114,8 +3171,9 @@ rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype,
(uint32_t)((inset_reg >>
I40E_32_BIT_WIDTH) & UINT32_MAX));
for (i = 0; i < 2; i++)
- i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
- mask_reg[i]);
+ i40e_check_write_global_reg(hw,
+ I40E_GLQF_FD_MSK(i, pctype),
+ mask_reg[i]);
break;
case INSET_FDIR_FLX:
i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype),
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
index d248adb1..be4a6024 100644
--- a/drivers/net/i40e/rte_pmd_i40e.h
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -456,6 +456,24 @@ int rte_pmd_i40e_set_vf_mac_addr(uint16_t port, uint16_t vf_id,
struct ether_addr *mac_addr);
/**
+ * Remove the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int
+rte_pmd_i40e_remove_vf_mac_addr(uint16_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
* Enable/Disable vf vlan strip for all queues in a pool
*
* @param port
diff --git a/drivers/net/ifc/Makefile b/drivers/net/ifc/Makefile
new file mode 100644
index 00000000..39b36ae5
--- /dev/null
+++ b/drivers/net/ifc/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifc.a
+
+LDLIBS += -lpthread
+LDLIBS += -lrte_eal -lrte_pci -lrte_vhost -lrte_bus_pci
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+#
+# Add extra flags for base driver source files to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+
+VPATH += $(SRCDIR)/base
+
+EXPORT_MAP := rte_pmd_ifc_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf_vdpa.c
+SRCS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += ifcvf.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ifc/base/ifcvf.c b/drivers/net/ifc/base/ifcvf.c
new file mode 100644
index 00000000..4b22d9ed
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf.c
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "ifcvf.h"
+#include "ifcvf_osdep.h"
+
+STATIC void *
+get_cap_addr(struct ifcvf_hw *hw, struct ifcvf_pci_cap *cap)
+{
+ u8 bar = cap->bar;
+ u32 length = cap->length;
+ u32 offset = cap->offset;
+
+ if (bar > IFCVF_PCI_MAX_RESOURCE - 1) {
+ DEBUGOUT("invalid bar: %u\n", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ DEBUGOUT("offset(%u) + length(%u) overflows\n",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > hw->mem_resource[cap->bar].len) {
+ DEBUGOUT("offset(%u) + length(%u) overflows bar length(%u)",
+ offset, length, (u32)hw->mem_resource[cap->bar].len);
+ return NULL;
+ }
+
+ return hw->mem_resource[bar].addr + offset;
+}
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev)
+{
+ int ret;
+ u8 pos;
+ struct ifcvf_pci_cap cap;
+
+ ret = PCI_READ_CONFIG_BYTE(dev, &pos, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ DEBUGOUT("failed to read pci capability list\n");
+ return -1;
+ }
+
+ while (pos) {
+ ret = PCI_READ_CONFIG_RANGE(dev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ DEBUGOUT("failed to read cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ DEBUGOUT("cfg type: %u, bar: %u, offset: %u, "
+ "len: %u\n", cap.cfg_type, cap.bar,
+ cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case IFCVF_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_NOTIFY_CFG:
+ PCI_READ_CONFIG_DWORD(dev, &hw->notify_off_multiplier,
+ pos + sizeof(cap));
+ hw->notify_base = get_cap_addr(hw, &cap);
+ hw->notify_region = cap.bar;
+ break;
+ case IFCVF_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ break;
+ case IFCVF_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ break;
+ }
+next:
+ pos = cap.cap_next;
+ }
+
+ hw->lm_cfg = hw->mem_resource[4].addr;
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->dev_cfg == NULL) {
+ DEBUGOUT("capability incomplete\n");
+ return -1;
+ }
+
+ DEBUGOUT("capability mapping:\ncommon cfg: %p\n"
+ "notify base: %p\nisr cfg: %p\ndevice cfg: %p\n"
+ "multiplier: %u\n",
+ hw->common_cfg, hw->dev_cfg,
+ hw->isr, hw->notify_base,
+ hw->notify_off_multiplier);
+
+ return 0;
+}
+
+STATIC u8
+ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return IFCVF_READ_REG8(&hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ IFCVF_WRITE_REG8(status, &hw->common_cfg->device_status);
+}
+
+STATIC void
+ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+
+ /* flush status write */
+ while (ifcvf_get_status(hw))
+ msec_delay(1);
+}
+
+STATIC void
+ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
+{
+ if (status != 0)
+ status |= ifcvf_get_status(hw);
+
+ ifcvf_set_status(hw, status);
+ ifcvf_get_status(hw);
+}
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ u32 features_lo, features_hi;
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->device_feature_select);
+ features_lo = IFCVF_READ_REG32(&cfg->device_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->device_feature_select);
+ features_hi = IFCVF_READ_REG32(&cfg->device_feature);
+
+ return ((u64)features_hi << 32) | features_lo;
+}
+
+STATIC void
+ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct ifcvf_pci_common_cfg *cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG32(0, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features & ((1ULL << 32) - 1), &cfg->guest_feature);
+
+ IFCVF_WRITE_REG32(1, &cfg->guest_feature_select);
+ IFCVF_WRITE_REG32(features >> 32, &cfg->guest_feature);
+}
+
+STATIC int
+ifcvf_config_features(struct ifcvf_hw *hw)
+{
+ u64 host_features;
+
+ host_features = ifcvf_get_features(hw);
+ hw->req_features &= host_features;
+
+ ifcvf_set_features(hw, hw->req_features);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_FEATURES_OK);
+
+ if (!(ifcvf_get_status(hw) & IFCVF_CONFIG_STATUS_FEATURES_OK)) {
+ DEBUGOUT("failed to set FEATURES_OK status\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+STATIC void
+io_write64_twopart(u64 val, u32 *lo, u32 *hi)
+{
+ IFCVF_WRITE_REG32(val & ((1ULL << 32) - 1), lo);
+ IFCVF_WRITE_REG32(val >> 32, hi);
+}
+
+STATIC int
+ifcvf_hw_enable(struct ifcvf_hw *hw)
+{
+ struct ifcvf_pci_common_cfg *cfg;
+ u8 *lm_cfg;
+ u32 i;
+ u16 notify_off;
+
+ cfg = hw->common_cfg;
+ lm_cfg = hw->lm_cfg;
+
+ IFCVF_WRITE_REG16(0, &cfg->msix_config);
+ if (IFCVF_READ_REG16(&cfg->msix_config) == IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("msix vec alloc failed for device config\n");
+ return -1;
+ }
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ io_write64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ io_write64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ io_write64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+ IFCVF_WRITE_REG16(hw->vring[i].size, &cfg->queue_size);
+
+ *(u32 *)(lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4) =
+ (u32)hw->vring[i].last_avail_idx |
+ ((u32)hw->vring[i].last_used_idx << 16);
+
+ IFCVF_WRITE_REG16(i + 1, &cfg->queue_msix_vector);
+ if (IFCVF_READ_REG16(&cfg->queue_msix_vector) ==
+ IFCVF_MSI_NO_VECTOR) {
+ DEBUGOUT("queue %u, msix vec alloc failed\n",
+ i);
+ return -1;
+ }
+
+ notify_off = IFCVF_READ_REG16(&cfg->queue_notify_off);
+ hw->notify_addr[i] = (void *)((u8 *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+ IFCVF_WRITE_REG16(1, &cfg->queue_enable);
+ }
+
+ return 0;
+}
+
+STATIC void
+ifcvf_hw_disable(struct ifcvf_hw *hw)
+{
+ u32 i;
+ struct ifcvf_pci_common_cfg *cfg;
+ u32 ring_state;
+
+ cfg = hw->common_cfg;
+
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->msix_config);
+ for (i = 0; i < hw->nr_vring; i++) {
+ IFCVF_WRITE_REG16(i, &cfg->queue_select);
+ IFCVF_WRITE_REG16(0, &cfg->queue_enable);
+ IFCVF_WRITE_REG16(IFCVF_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+ ring_state = *(u32 *)(hw->lm_cfg + IFCVF_LM_RING_STATE_OFFSET +
+ (i / 2) * IFCVF_LM_CFG_SIZE + (i % 2) * 4);
+ hw->vring[i].last_avail_idx = (u16)ring_state;
+ hw->vring[i].last_used_idx = (u16)(ring_state >> 16);
+ }
+}
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_reset(hw);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_ACK);
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER);
+
+ if (ifcvf_config_features(hw) < 0)
+ return -1;
+
+ if (ifcvf_hw_enable(hw) < 0)
+ return -1;
+
+ ifcvf_add_status(hw, IFCVF_CONFIG_STATUS_DRIVER_OK);
+ return 0;
+}
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_hw_disable(hw);
+ ifcvf_reset(hw);
+}
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ IFCVF_WRITE_REG16(qid, hw->notify_addr[qid]);
+}
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw)
+{
+ return hw->notify_region;
+}
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid)
+{
+ return (u8 *)hw->notify_addr[qid] -
+ (u8 *)hw->mem_resource[hw->notify_region].addr;
+}
diff --git a/drivers/net/ifc/base/ifcvf.h b/drivers/net/ifc/base/ifcvf.h
new file mode 100644
index 00000000..badacb61
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include "ifcvf_osdep.h"
+
+#define IFCVF_VENDOR_ID 0x1AF4
+#define IFCVF_DEVICE_ID 0x1041
+#define IFCVF_SUBSYS_VENDOR_ID 0x8086
+#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_MAX_QUEUES 1
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* Common configuration */
+#define IFCVF_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define IFCVF_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define IFCVF_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define IFCVF_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define IFCVF_PCI_CAP_PCI_CFG 5
+
+#define IFCVF_CONFIG_STATUS_RESET 0x00
+#define IFCVF_CONFIG_STATUS_ACK 0x01
+#define IFCVF_CONFIG_STATUS_DRIVER 0x02
+#define IFCVF_CONFIG_STATUS_DRIVER_OK 0x04
+#define IFCVF_CONFIG_STATUS_FEATURES_OK 0x08
+#define IFCVF_CONFIG_STATUS_FAILED 0x80
+
+#define IFCVF_MSI_NO_VECTOR 0xffff
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_CFG_SIZE 0x40
+#define IFCVF_LM_RING_STATE_OFFSET 0x20
+
+#define IFCVF_LM_LOGGING_CTRL 0x0
+
+#define IFCVF_LM_BASE_ADDR_LOW 0x10
+#define IFCVF_LM_BASE_ADDR_HIGH 0x14
+#define IFCVF_LM_END_ADDR_LOW 0x18
+#define IFCVF_LM_END_ADDR_HIGH 0x1c
+
+#define IFCVF_LM_DISABLE 0x0
+#define IFCVF_LM_ENABLE_VF 0x1
+#define IFCVF_LM_ENABLE_PF 0x3
+
+#define IFCVF_32_BIT_MASK 0xffffffff
+
+
+struct ifcvf_pci_cap {
+ u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ u8 cap_next; /* Generic PCI field: next ptr. */
+ u8 cap_len; /* Generic PCI field: capability length */
+ u8 cfg_type; /* Identifies the structure. */
+ u8 bar; /* Where to find it. */
+ u8 padding[3]; /* Pad to full dword. */
+ u32 offset; /* Offset within bar. */
+ u32 length; /* Length of the structure, in bytes. */
+};
+
+struct ifcvf_pci_notify_cap {
+ struct ifcvf_pci_cap cap;
+ u32 notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+struct ifcvf_pci_common_cfg {
+ /* About the whole device. */
+ u32 device_feature_select;
+ u32 device_feature;
+ u32 guest_feature_select;
+ u32 guest_feature;
+ u16 msix_config;
+ u16 num_queues;
+ u8 device_status;
+ u8 config_generation;
+
+ /* About a specific virtqueue. */
+ u16 queue_select;
+ u16 queue_size;
+ u16 queue_msix_vector;
+ u16 queue_enable;
+ u16 queue_notify_off;
+ u32 queue_desc_lo;
+ u32 queue_desc_hi;
+ u32 queue_avail_lo;
+ u32 queue_avail_hi;
+ u32 queue_used_lo;
+ u32 queue_used_hi;
+};
+
+struct ifcvf_net_config {
+ u8 mac[6];
+ u16 status;
+ u16 max_virtqueue_pairs;
+} __attribute__((packed));
+
+struct ifcvf_pci_mem_resource {
+ u64 phys_addr; /**< Physical address, 0 if not resource. */
+ u64 len; /**< Length of the resource. */
+ u8 *addr; /**< Virtual address, NULL when not mapped. */
+};
+
+struct vring_info {
+ u64 desc;
+ u64 avail;
+ u64 used;
+ u16 size;
+ u16 last_avail_idx;
+ u16 last_used_idx;
+};
+
+struct ifcvf_hw {
+ u64 req_features;
+ u8 notify_region;
+ u32 notify_off_multiplier;
+ struct ifcvf_pci_common_cfg *common_cfg;
+ struct ifcvf_net_device_config *dev_cfg;
+ u8 *isr;
+ u16 *notify_base;
+ u16 *notify_addr[IFCVF_MAX_QUEUES * 2];
+ u8 *lm_cfg;
+ struct vring_info vring[IFCVF_MAX_QUEUES * 2];
+ u8 nr_vring;
+ struct ifcvf_pci_mem_resource mem_resource[IFCVF_PCI_MAX_RESOURCE];
+};
+
+int
+ifcvf_init_hw(struct ifcvf_hw *hw, PCI_DEV *dev);
+
+u64
+ifcvf_get_features(struct ifcvf_hw *hw);
+
+int
+ifcvf_start_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_stop_hw(struct ifcvf_hw *hw);
+
+void
+ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+
+u8
+ifcvf_get_notify_region(struct ifcvf_hw *hw);
+
+u64
+ifcvf_get_queue_notify_off(struct ifcvf_hw *hw, int qid);
+
+#endif /* _IFCVF_H_ */
diff --git a/drivers/net/ifc/base/ifcvf_osdep.h b/drivers/net/ifc/base/ifcvf_osdep.h
new file mode 100644
index 00000000..cf151ef5
--- /dev/null
+++ b/drivers/net/ifc/base/ifcvf_osdep.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _IFCVF_OSDEP_H_
+#define _IFCVF_OSDEP_H_
+
+#include <stdint.h>
+#include <linux/pci_regs.h>
+
+#include <rte_cycles.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#define DEBUGOUT(S, args...) RTE_LOG(DEBUG, PMD, S, ##args)
+#define STATIC static
+
+#define msec_delay rte_delay_ms
+
+#define IFCVF_READ_REG8(reg) rte_read8(reg)
+#define IFCVF_WRITE_REG8(val, reg) rte_write8((val), (reg))
+#define IFCVF_READ_REG16(reg) rte_read16(reg)
+#define IFCVF_WRITE_REG16(val, reg) rte_write16((val), (reg))
+#define IFCVF_READ_REG32(reg) rte_read32(reg)
+#define IFCVF_WRITE_REG32(val, reg) rte_write32((val), (reg))
+
+typedef struct rte_pci_device PCI_DEV;
+
+#define PCI_READ_CONFIG_BYTE(dev, val, where) \
+ rte_pci_read_config(dev, val, 1, where)
+
+#define PCI_READ_CONFIG_DWORD(dev, val, where) \
+ rte_pci_read_config(dev, val, 4, where)
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef int16_t s16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef int64_t s64;
+typedef uint64_t u64;
+
+static inline int
+PCI_READ_CONFIG_RANGE(PCI_DEV *dev, uint32_t *val, int size, int where)
+{
+ return rte_pci_read_config(dev, val, size, where);
+}
+
+#endif /* _IFCVF_OSDEP_H_ */
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
new file mode 100644
index 00000000..88d81403
--- /dev/null
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -0,0 +1,793 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
+
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_bus_pci.h>
+#include <rte_vhost.h>
+#include <rte_vdpa.h>
+#include <rte_vfio.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "base/ifcvf.h"
+
+#define DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+static int ifcvf_vdpa_logtype;
+
+struct ifcvf_internal {
+ struct rte_vdpa_dev_addr dev_addr;
+ struct rte_pci_device *pdev;
+ struct ifcvf_hw hw;
+ int vfio_container_fd;
+ int vfio_group_fd;
+ int vfio_dev_fd;
+ pthread_t tid; /* thread for notify relay */
+ int epfd;
+ int vid;
+ int did;
+ uint16_t max_queues;
+ uint64_t features;
+ rte_atomic32_t started;
+ rte_atomic32_t dev_attached;
+ rte_atomic32_t running;
+ rte_spinlock_t lock;
+};
+
+struct internal_list {
+ TAILQ_ENTRY(internal_list) next;
+ struct ifcvf_internal *internal;
+};
+
+TAILQ_HEAD(internal_list_head, internal_list);
+static struct internal_list_head internal_list =
+ TAILQ_HEAD_INITIALIZER(internal_list);
+
+static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static struct internal_list *
+find_internal_resource_by_did(int did)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (did == list->internal->did) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static struct internal_list *
+find_internal_resource_by_dev(struct rte_pci_device *pdev)
+{
+ int found = 0;
+ struct internal_list *list;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ if (pdev == list->internal->pdev) {
+ found = 1;
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ if (!found)
+ return NULL;
+
+ return list;
+}
+
+static int
+ifcvf_vfio_setup(struct ifcvf_internal *internal)
+{
+ struct rte_pci_device *dev = internal->pdev;
+ char devname[RTE_DEV_NAME_MAX_LEN] = {0};
+ int iommu_group_num;
+ int ret = 0;
+ int i;
+
+ internal->vfio_dev_fd = -1;
+ internal->vfio_group_fd = -1;
+ internal->vfio_container_fd = -1;
+
+ rte_pci_device_name(&dev->addr, devname, RTE_DEV_NAME_MAX_LEN);
+ rte_vfio_get_group_num(rte_pci_get_sysfs_path(), devname,
+ &iommu_group_num);
+
+ internal->vfio_container_fd = rte_vfio_container_create();
+ if (internal->vfio_container_fd < 0)
+ return -1;
+
+ internal->vfio_group_fd = rte_vfio_container_group_bind(
+ internal->vfio_container_fd, iommu_group_num);
+ if (internal->vfio_group_fd < 0)
+ goto err;
+
+ if (rte_pci_map_device(dev))
+ goto err;
+
+ internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
+
+ for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
+ i++) {
+ internal->hw.mem_resource[i].addr =
+ internal->pdev->mem_resource[i].addr;
+ internal->hw.mem_resource[i].phys_addr =
+ internal->pdev->mem_resource[i].phys_addr;
+ internal->hw.mem_resource[i].len =
+ internal->pdev->mem_resource[i].len;
+ }
+ ret = ifcvf_init_hw(&internal->hw, internal->pdev);
+
+ return ret;
+
+err:
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ return -1;
+}
+
+static int
+ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
+{
+ uint32_t i;
+ int ret;
+ struct rte_vhost_memory *mem = NULL;
+ int vfio_container_fd;
+
+ ret = rte_vhost_get_mem_table(internal->vid, &mem);
+ if (ret < 0) {
+ DRV_LOG(ERR, "failed to get VM memory layout.");
+ goto exit;
+ }
+
+ vfio_container_fd = internal->vfio_container_fd;
+
+ for (i = 0; i < mem->nregions; i++) {
+ struct rte_vhost_mem_region *reg;
+
+ reg = &mem->regions[i];
+ DRV_LOG(INFO, "%s, region %u: HVA 0x%" PRIx64 ", "
+ "GPA 0x%" PRIx64 ", size 0x%" PRIx64 ".",
+ do_map ? "DMA map" : "DMA unmap", i,
+ reg->host_user_addr, reg->guest_phys_addr, reg->size);
+
+ if (do_map) {
+ ret = rte_vfio_container_dma_map(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA map failed.");
+ goto exit;
+ }
+ } else {
+ ret = rte_vfio_container_dma_unmap(vfio_container_fd,
+ reg->host_user_addr, reg->guest_phys_addr,
+ reg->size);
+ if (ret < 0) {
+ DRV_LOG(ERR, "DMA unmap failed.");
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return ret;
+}
+
+static uint64_t
+qva_to_gpa(int vid, uint64_t qva)
+{
+ struct rte_vhost_memory *mem = NULL;
+ struct rte_vhost_mem_region *reg;
+ uint32_t i;
+ uint64_t gpa = 0;
+
+ if (rte_vhost_get_mem_table(vid, &mem) < 0)
+ goto exit;
+
+ for (i = 0; i < mem->nregions; i++) {
+ reg = &mem->regions[i];
+
+ if (qva >= reg->host_user_addr &&
+ qva < reg->host_user_addr + reg->size) {
+ gpa = qva - reg->host_user_addr + reg->guest_phys_addr;
+ break;
+ }
+ }
+
+exit:
+ if (mem)
+ free(mem);
+ return gpa;
+}
+
+static int
+vdpa_ifcvf_start(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ int i, nr_vring;
+ int vid;
+ struct rte_vhost_vring vq;
+ uint64_t gpa;
+
+ vid = internal->vid;
+ nr_vring = rte_vhost_get_vring_num(vid);
+ rte_vhost_get_negotiated_features(vid, &hw->req_features);
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(vid, i, &vq);
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
+ return -1;
+ }
+ hw->vring[i].desc = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for available ring.");
+ return -1;
+ }
+ hw->vring[i].avail = gpa;
+
+ gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ if (gpa == 0) {
+ DRV_LOG(ERR, "Fail to get GPA for used ring.");
+ return -1;
+ }
+ hw->vring[i].used = gpa;
+
+ hw->vring[i].size = vq.size;
+ rte_vhost_get_vring_base(vid, i, &hw->vring[i].last_avail_idx,
+ &hw->vring[i].last_used_idx);
+ }
+ hw->nr_vring = i;
+
+ return ifcvf_start_hw(&internal->hw);
+}
+
+static void
+vdpa_ifcvf_stop(struct ifcvf_internal *internal)
+{
+ struct ifcvf_hw *hw = &internal->hw;
+ uint32_t i;
+ int vid;
+
+ vid = internal->vid;
+ ifcvf_stop_hw(hw);
+
+ for (i = 0; i < hw->nr_vring; i++)
+ rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
+ hw->vring[i].last_used_idx);
+}
+
+#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
+ sizeof(int) * (IFCVF_MAX_QUEUES * 2 + 1))
+static int
+vdpa_enable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ uint32_t i, nr_vring;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+ int *fd_ptr;
+ struct rte_vhost_vring vring;
+
+ nr_vring = rte_vhost_get_vring_num(internal->vid);
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = nr_vring + 1;
+ irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
+ VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+ fd_ptr = (int *)&irq_set->data;
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
+
+ for (i = 0; i < nr_vring; i++) {
+ rte_vhost_get_vhost_vring(internal->vid, i, &vring);
+ fd_ptr[RTE_INTR_VEC_RXTX_OFFSET + i] = vring.callfd;
+ }
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error enabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vdpa_disable_vfio_intr(struct ifcvf_internal *internal)
+{
+ int ret;
+ char irq_set_buf[MSIX_IRQ_SET_BUF_LEN];
+ struct vfio_irq_set *irq_set;
+
+ irq_set = (struct vfio_irq_set *)irq_set_buf;
+ irq_set->argsz = sizeof(irq_set_buf);
+ irq_set->count = 0;
+ irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
+ irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
+ irq_set->start = 0;
+
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
+ if (ret) {
+ DRV_LOG(ERR, "Error disabling MSI-X interrupts: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void *
+notify_relay(void *arg)
+{
+ int i, kickfd, epfd, nfds = 0;
+ uint32_t qid, q_num;
+ struct epoll_event events[IFCVF_MAX_QUEUES * 2];
+ struct epoll_event ev;
+ uint64_t buf;
+ int nbytes;
+ struct rte_vhost_vring vring;
+ struct ifcvf_internal *internal = (struct ifcvf_internal *)arg;
+ struct ifcvf_hw *hw = &internal->hw;
+
+ q_num = rte_vhost_get_vring_num(internal->vid);
+
+ epfd = epoll_create(IFCVF_MAX_QUEUES * 2);
+ if (epfd < 0) {
+ DRV_LOG(ERR, "failed to create epoll instance.");
+ return NULL;
+ }
+ internal->epfd = epfd;
+
+ for (qid = 0; qid < q_num; qid++) {
+ ev.events = EPOLLIN | EPOLLPRI;
+ rte_vhost_get_vhost_vring(internal->vid, qid, &vring);
+ ev.data.u64 = qid | (uint64_t)vring.kickfd << 32;
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, vring.kickfd, &ev) < 0) {
+ DRV_LOG(ERR, "epoll add error: %s", strerror(errno));
+ return NULL;
+ }
+ }
+
+ for (;;) {
+ nfds = epoll_wait(epfd, events, q_num, -1);
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ DRV_LOG(ERR, "epoll_wait return fail\n");
+ return NULL;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ qid = events[i].data.u32;
+ kickfd = (uint32_t)(events[i].data.u64 >> 32);
+ do {
+ nbytes = read(kickfd, &buf, 8);
+ if (nbytes < 0) {
+ if (errno == EINTR ||
+ errno == EWOULDBLOCK ||
+ errno == EAGAIN)
+ continue;
+ DRV_LOG(INFO, "Error reading "
+ "kickfd: %s",
+ strerror(errno));
+ }
+ break;
+ } while (1);
+
+ ifcvf_notify_queue(hw, qid);
+ }
+ }
+
+ return NULL;
+}
+
+static int
+setup_notify_relay(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ ret = pthread_create(&internal->tid, NULL, notify_relay,
+ (void *)internal);
+ if (ret) {
+ DRV_LOG(ERR, "failed to create notify relay pthread.");
+ return -1;
+ }
+ return 0;
+}
+
+static int
+unset_notify_relay(struct ifcvf_internal *internal)
+{
+ void *status;
+
+ if (internal->tid) {
+ pthread_cancel(internal->tid);
+ pthread_join(internal->tid, &status);
+ }
+ internal->tid = 0;
+
+ if (internal->epfd >= 0)
+ close(internal->epfd);
+ internal->epfd = -1;
+
+ return 0;
+}
+
+static int
+update_datapath(struct ifcvf_internal *internal)
+{
+ int ret;
+
+ rte_spinlock_lock(&internal->lock);
+
+ if (!rte_atomic32_read(&internal->running) &&
+ (rte_atomic32_read(&internal->started) &&
+ rte_atomic32_read(&internal->dev_attached))) {
+ ret = ifcvf_dma_map(internal, 1);
+ if (ret)
+ goto err;
+
+ ret = vdpa_enable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = setup_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_ifcvf_start(internal);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 1);
+ } else if (rte_atomic32_read(&internal->running) &&
+ (!rte_atomic32_read(&internal->started) ||
+ !rte_atomic32_read(&internal->dev_attached))) {
+ vdpa_ifcvf_stop(internal);
+
+ ret = unset_notify_relay(internal);
+ if (ret)
+ goto err;
+
+ ret = vdpa_disable_vfio_intr(internal);
+ if (ret)
+ goto err;
+
+ ret = ifcvf_dma_map(internal, 0);
+ if (ret)
+ goto err;
+
+ rte_atomic32_set(&internal->running, 0);
+ }
+
+ rte_spinlock_unlock(&internal->lock);
+ return 0;
+err:
+ rte_spinlock_unlock(&internal->lock);
+ return ret;
+}
+
+static int
+ifcvf_dev_config(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ internal->vid = vid;
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_dev_close(int vid)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_datapath(internal);
+
+ return 0;
+}
+
+static int
+ifcvf_get_vfio_group_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_group_fd;
+}
+
+static int
+ifcvf_get_vfio_device_fd(int vid)
+{
+ int did;
+ struct internal_list *list;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ return list->internal->vfio_dev_fd;
+}
+
+static int
+ifcvf_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ int ret;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+
+ reg.index = ifcvf_get_notify_region(&internal->hw);
+ ret = ioctl(internal->vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
+ if (ret) {
+ DRV_LOG(ERR, "Get not get device region info: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ *offset = ifcvf_get_queue_notify_off(&internal->hw, qid) + reg.offset;
+ *size = 0x1000;
+
+ return 0;
+}
+
+static int
+ifcvf_get_queue_num(int did, uint32_t *queue_num)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *queue_num = list->internal->max_queues;
+
+ return 0;
+}
+
+static int
+ifcvf_get_vdpa_features(int did, uint64_t *features)
+{
+ struct internal_list *list;
+
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ *features = list->internal->features;
+
+ return 0;
+}
+
+#define VDPA_SUPPORTED_PROTOCOL_FEATURES \
+ (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
+ 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
+ 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
+ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+static int
+ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
+{
+ *features = VDPA_SUPPORTED_PROTOCOL_FEATURES;
+ return 0;
+}
+
+struct rte_vdpa_dev_ops ifcvf_ops = {
+ .get_queue_num = ifcvf_get_queue_num,
+ .get_features = ifcvf_get_vdpa_features,
+ .get_protocol_features = ifcvf_get_protocol_features,
+ .dev_conf = ifcvf_dev_config,
+ .dev_close = ifcvf_dev_close,
+ .set_vring_state = NULL,
+ .set_features = NULL,
+ .migration_done = NULL,
+ .get_vfio_group_fd = ifcvf_get_vfio_group_fd,
+ .get_vfio_device_fd = ifcvf_get_vfio_device_fd,
+ .get_notify_area = ifcvf_get_notify_area,
+};
+
+static int
+ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ uint64_t features;
+ struct ifcvf_internal *internal = NULL;
+ struct internal_list *list = NULL;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = rte_zmalloc("ifcvf", sizeof(*list), 0);
+ if (list == NULL)
+ goto error;
+
+ internal = rte_zmalloc("ifcvf", sizeof(*internal), 0);
+ if (internal == NULL)
+ goto error;
+
+ internal->pdev = pci_dev;
+ rte_spinlock_init(&internal->lock);
+ if (ifcvf_vfio_setup(internal) < 0)
+ return -1;
+
+ internal->max_queues = IFCVF_MAX_QUEUES;
+ features = ifcvf_get_features(&internal->hw);
+ internal->features = (features &
+ ~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+
+ internal->dev_addr.pci_addr = pci_dev->addr;
+ internal->dev_addr.type = PCI_ADDR;
+ list->internal = internal;
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_INSERT_TAIL(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ internal->did = rte_vdpa_register_device(&internal->dev_addr,
+ &ifcvf_ops);
+ if (internal->did < 0)
+ goto error;
+
+ rte_atomic32_set(&internal->started, 1);
+ update_datapath(internal);
+
+ return 0;
+
+error:
+ rte_free(list);
+ rte_free(internal);
+ return -1;
+}
+
+static int
+ifcvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct ifcvf_internal *internal;
+ struct internal_list *list;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ list = find_internal_resource_by_dev(pci_dev);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device: %s", pci_dev->name);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_atomic32_set(&internal->started, 0);
+ update_datapath(internal);
+
+ rte_pci_unmap_device(internal->pdev);
+ rte_vfio_container_destroy(internal->vfio_container_fd);
+ rte_vdpa_unregister_device(internal->did);
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+
+ rte_free(list);
+ rte_free(internal);
+
+ return 0;
+}
+
+/*
+ * IFCVF has the same vendor ID and device ID as virtio net PCI
+ * device, with its specific subsystem vendor ID and device ID.
+ */
+static const struct rte_pci_id pci_id_ifcvf_map[] = {
+ { .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = IFCVF_VENDOR_ID,
+ .device_id = IFCVF_DEVICE_ID,
+ .subsystem_vendor_id = IFCVF_SUBSYS_VENDOR_ID,
+ .subsystem_device_id = IFCVF_SUBSYS_DEVICE_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+static struct rte_pci_driver rte_ifcvf_vdpa = {
+ .id_table = pci_id_ifcvf_map,
+ .drv_flags = 0,
+ .probe = ifcvf_pci_probe,
+ .remove = ifcvf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_ifcvf, rte_ifcvf_vdpa);
+RTE_PMD_REGISTER_PCI_TABLE(net_ifcvf, pci_id_ifcvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ifcvf, "* vfio-pci");
+
+RTE_INIT(ifcvf_vdpa_init_log)
+{
+ ifcvf_vdpa_logtype = rte_log_register("pmd.net.ifcvf_vdpa");
+ if (ifcvf_vdpa_logtype >= 0)
+ rte_log_set_level(ifcvf_vdpa_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/ifc/meson.build b/drivers/net/ifc/meson.build
new file mode 100644
index 00000000..72df070a
--- /dev/null
+++ b/drivers/net/ifc/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+allow_experimental_apis = true
+sources = files('ifcvf_vdpa.c', 'base/ifcvf.c')
+includes += include_directories('base')
+deps += 'vhost'
diff --git a/drivers/net/ifc/rte_pmd_ifc_version.map b/drivers/net/ifc/rte_pmd_ifc_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/net/ifc/rte_pmd_ifc_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index d0804fc5..7b6af353 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -20,9 +20,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
-CFLAGS_ixgbe_rxtx.o += -wd3656
+CFLAGS_ixgbe_rxtx.o += -diag-disable 3656
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
@@ -103,6 +104,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ipsec.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf_representor.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 44832585..26b19273 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_eal.h>
@@ -60,9 +59,6 @@
*/
#define IXGBE_FC_LO 0x40
-/* Default minimum inter-interrupt interval for EITR configuration */
-#define IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT 0x79E
-
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
@@ -101,8 +97,6 @@
#define IXGBE_QUEUE_STAT_COUNTERS (sizeof(hw_stats->qprc) / sizeof(hw_stats->qprc[0]))
-#define IXGBE_HKEY_MAX_INDEX 10
-
/* Additional timesync values. */
#define NSEC_PER_SEC 1000000000L
#define IXGBE_INCVAL_10GB 0x66666666
@@ -118,7 +112,6 @@
#define IXGBE_VT_CTL_POOLING_MODE_MASK 0x00030000
#define IXGBE_VT_CTL_POOLING_MODE_ETAG 0x00010000
-#define DEFAULT_ETAG_ETYPE 0x893f
#define IXGBE_ETAG_ETYPE 0x00005084
#define IXGBE_ETAG_ETYPE_MASK 0x0000ffff
#define IXGBE_ETAG_ETYPE_VALID 0x80000000
@@ -133,7 +126,7 @@
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
-static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
@@ -196,6 +189,9 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev,
uint16_t queue, bool on);
static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue,
int on);
+static void ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev,
+ int mask);
+static int ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue);
@@ -228,7 +224,7 @@ static void ixgbe_dev_interrupt_delayed_handler(void *param);
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
static bool is_device_supported(struct rte_eth_dev *dev,
@@ -244,8 +240,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static int ixgbevf_dev_reset(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -253,6 +249,7 @@ static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev,
uint16_t queue, int on);
+static int ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on);
static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev,
@@ -286,7 +283,7 @@ static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
-static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+static int ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
@@ -328,6 +325,11 @@ static int ixgbe_get_eeprom(struct rte_eth_dev *dev,
static int ixgbe_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom);
+static int ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+static int ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+
static int ixgbevf_get_reg_length(struct rte_eth_dev *dev);
static int ixgbevf_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs);
@@ -565,6 +567,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.set_eeprom = ixgbe_set_eeprom,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
.get_dcb_info = ixgbe_dev_get_dcb_info,
.timesync_adjust_time = ixgbe_timesync_adjust_time,
.timesync_read_time = ixgbe_timesync_read_time,
@@ -787,58 +791,6 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
#define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
sizeof(rte_ixgbevf_stats_strings[0]))
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
/*
* This function is the same as ixgbe_is_sfp() in base/ixgbe.h.
*/
@@ -1096,7 +1048,7 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
* It returns 0 on success.
*/
static int
-eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
+eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1339,6 +1291,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
+ int retries = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
@@ -1359,8 +1313,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
- rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler, eth_dev);
+
+ do {
+ ret = rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
+ if (ret >= 0) {
+ break;
+ } else if (ret != -EAGAIN) {
+ PMD_INIT_LOG(ERR,
+ "intr callback unregister failed: %d",
+ ret);
+ return ret;
+ }
+ rte_delay_ms(100);
+ } while (retries++ < (10 + IXGBE_LINK_UP_TIME));
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1522,7 +1488,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
}
l2_tn_info->e_tag_en = FALSE;
l2_tn_info->e_tag_fwd_en = FALSE;
- l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+ l2_tn_info->e_tag_ether_type = ETHER_TYPE_ETAG;
return 0;
}
@@ -1641,7 +1607,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
ixgbevf_dev_stats_reset(eth_dev);
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
diag = hw->mac.ops.reset_hw(hw);
@@ -1710,7 +1676,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
rte_intr_callback_register(intr_handle,
ixgbevf_dev_interrupt_handler, eth_dev);
rte_intr_enable(intr_handle);
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(eth_dev);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -1743,7 +1709,7 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = NULL;
/* Disable the interrupts for VF */
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(eth_dev);
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1755,16 +1721,81 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+static int
+eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *pf_ethdev;
+ struct rte_eth_devargs eth_da;
+ int i, retval;
+
+ if (pci_dev->device.devargs) {
+ retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
+ &eth_da);
+ if (retval)
+ return retval;
+ } else
+ memset(&eth_da, 0, sizeof(eth_da));
+
+ retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
+ sizeof(struct ixgbe_adapter),
+ eth_dev_pci_specific_init, pci_dev,
+ eth_ixgbe_dev_init, NULL);
+
+ if (retval || eth_da.nb_representor_ports < 1)
+ return retval;
+
+ pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (pf_ethdev == NULL)
+ return -ENODEV;
+
+ /* probe VF representor ports */
+ for (i = 0; i < eth_da.nb_representor_ports; i++) {
+ struct ixgbe_vf_info *vfinfo;
+ struct ixgbe_vf_representor representor;
+
+ vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ pf_ethdev->data->dev_private);
+ if (vfinfo == NULL) {
+ PMD_DRV_LOG(ERR,
+ "no virtual functions supported by PF");
+ break;
+ }
+
+ representor.vf_id = eth_da.representor_ports[i];
+ representor.switch_domain_id = vfinfo->switch_domain_id;
+ representor.pf_ethdev = pf_ethdev;
+
+ /* representor port net_bdf_port */
+ snprintf(name, sizeof(name), "net_%s_representor_%d",
+ pci_dev->device.name,
+ eth_da.representor_ports[i]);
+
+ retval = rte_eth_dev_create(&pci_dev->device, name,
+ sizeof(struct ixgbe_vf_representor), NULL, NULL,
+ ixgbe_vf_representor_init, &representor);
+
+ if (retval)
+ PMD_DRV_LOG(ERR, "failed to create ixgbe vf "
+ "representor %s.", name);
+ }
+
+ return 0;
}
static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+ struct rte_eth_dev *ethdev;
+
+ ethdev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (!ethdev)
+ return -ENODEV;
+
+ if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
+ return rte_eth_dev_destroy(ethdev, ixgbe_vf_representor_uninit);
+ else
+ return rte_eth_dev_destroy(ethdev, eth_ixgbe_dev_uninit);
}
static struct rte_pci_driver rte_ixgbe_pmd = {
@@ -1947,10 +1978,13 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on)
rxq = dev->data->rx_queues[queue];
- if (on)
+ if (on) {
rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- else
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ } else {
rxq->vlan_flags = PKT_RX_VLAN;
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
}
static void
@@ -2001,64 +2035,6 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue)
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
-void
-ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl &= ~IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
- }
- }
-}
-
-void
-ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev)
-{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t ctrl;
- uint16_t i;
- struct ixgbe_rx_queue *rxq;
-
- PMD_INIT_FUNC_TRACE();
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- ctrl |= IXGBE_VLNCTRL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- } else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxq = dev->data->rx_queues[i];
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
-
- /* record those setting for HW strip per queue */
- ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
- }
- }
-}
-
static void
ixgbe_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
@@ -2114,25 +2090,93 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev)
*/
}
-static int
-ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+void
+ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t ctrl;
+ uint16_t i;
+ struct ixgbe_rx_queue *rxq;
+ bool on;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl |= IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ } else {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ ctrl &= ~IXGBE_VLNCTRL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+ }
+ } else {
+ /*
+ * Other 10G NIC, the VLAN strip can be setup
+ * per queue in RXDCTL
+ */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ ctrl |= IXGBE_RXDCTL_VME;
+ on = TRUE;
+ } else {
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ on = FALSE;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
+
+ /* record those setting for HW strip per queue */
+ ixgbe_vlan_hw_strip_bitmap_set(dev, i, on);
+ }
+ }
+}
+
+static void
+ixgbe_config_vlan_strip_on_all_queues(struct rte_eth_dev *dev, int mask)
{
+ uint16_t i;
+ struct rte_eth_rxmode *rxmode;
+ struct ixgbe_rx_queue *rxq;
+
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
+ }
+}
+
+static int
+ixgbe_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ rxmode = &dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ ixgbe_vlan_hw_strip_config(dev);
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
ixgbe_vlan_hw_filter_enable(dev);
else
ixgbe_vlan_hw_filter_disable(dev);
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
ixgbe_vlan_hw_extend_enable(dev);
else
ixgbe_vlan_hw_extend_disable(dev);
@@ -2141,6 +2185,16 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
return 0;
}
+static int
+ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbe_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
static void
ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
@@ -2289,11 +2343,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
const struct rte_eth_dcb_rx_conf *conf;
- if (nb_rx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->rx_adv_conf.dcb_rx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2307,11 +2356,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
const struct rte_eth_dcb_tx_conf *conf;
- if (nb_tx_q != IXGBE_DCB_NB_QUEUES) {
- PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.",
- IXGBE_DCB_NB_QUEUES);
- return -EINVAL;
- }
conf = &dev_conf->tx_adv_conf.dcb_tx_conf;
if (!(conf->nb_tcs == ETH_4_TCS ||
conf->nb_tcs == ETH_8_TCS)) {
@@ -2480,6 +2524,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
+ uint32_t allowed_speeds = 0;
int mask = 0;
int status;
uint16_t vf, idx;
@@ -2561,7 +2606,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbe_vlan_offload_set(dev, mask);
+ err = ixgbe_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload");
goto error;
@@ -2628,9 +2673,21 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_5G |
+ ETH_LINK_SPEED_10G;
+ break;
+ default:
+ allowed_speeds = ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G;
+ }
+
link_speeds = &dev->data->dev_conf.link_speeds;
- if (*link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G |
- ETH_LINK_SPEED_10G)) {
+ if (*link_speeds & ~allowed_speeds) {
PMD_INIT_LOG(ERR, "Invalid link setting");
goto error;
}
@@ -2656,6 +2713,10 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
} else {
if (*link_speeds & ETH_LINK_SPEED_10G)
speed |= IXGBE_LINK_SPEED_10GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_5G)
+ speed |= IXGBE_LINK_SPEED_5GB_FULL;
+ if (*link_speeds & ETH_LINK_SPEED_2_5G)
+ speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_1G)
speed |= IXGBE_LINK_SPEED_1GB_FULL;
if (*link_speeds & ETH_LINK_SPEED_100M)
@@ -2666,6 +2727,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
if (err)
goto error;
+ ixgbe_dev_link_update(dev, 0);
+
skip_link_setup:
if (rte_intr_allow_others(intr_handle)) {
@@ -2757,7 +2820,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
@@ -2881,7 +2944,7 @@ ixgbe_dev_reset(struct rte_eth_dev *dev)
if (ret)
return ret;
- ret = eth_ixgbe_dev_init(dev);
+ ret = eth_ixgbe_dev_init(dev, NULL);
return ret;
}
@@ -3057,9 +3120,18 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
}
/* Flow Director Stats registers */
- hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
- hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
-
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+ hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+ hw_stats->fdirustat_add += IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) & 0xFFFF;
+ hw_stats->fdirustat_remove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRUSTAT) >> 16) & 0xFFFF;
+ hw_stats->fdirfstat_fadd += IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) & 0xFFFF;
+ hw_stats->fdirfstat_fremove += (IXGBE_READ_REG(hw,
+ IXGBE_FDIRFSTAT) >> 16) & 0xFFFF;
+ }
/* MACsec Stats registers */
macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
macsec_stats->out_pkts_encrypted +=
@@ -3625,7 +3697,6 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3647,54 +3718,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
dev_info->vmdq_queue_num = dev_info->max_rx_queues;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
-
- /*
- * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
- * mode.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540) &&
- !RTE_ETH_DEV_SRIOV(dev).active)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
-
- if (hw->mac.type == ixgbe_mac_82599EB ||
- hw->mac.type == ixgbe_mac_X540)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
-
- if (hw->mac.type == ixgbe_mac_X550 ||
- hw->mac.type == ixgbe_mac_X550EM_x ||
- hw->mac.type == ixgbe_mac_X550EM_a)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
-
-#ifdef RTE_LIBRTE_SECURITY
- if (dev->security_ctx) {
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
- }
-#endif
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3704,6 +3732,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3714,8 +3743,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3736,6 +3764,14 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
dev_info->speed_capa |= ETH_LINK_SPEED_5G;
}
+
+ /* Driver-preferred Rx/Tx parameters */
+ dev_info->default_rxportconf.burst_size = 32;
+ dev_info->default_txportconf.burst_size = 32;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_rxportconf.ring_size = 256;
+ dev_info->default_txportconf.ring_size = 256;
}
static const uint32_t *
@@ -3784,7 +3820,6 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
@@ -3796,17 +3831,11 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
dev_info->max_vmdq_pools = ETH_64_POOLS;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_SCTP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = ixgbe_get_rx_queue_offloads(dev);
+ dev_info->rx_offload_capa = (ixgbe_get_rx_port_offloads(dev) |
+ dev_info->rx_queue_offload_capa);
+ dev_info->tx_queue_offload_capa = ixgbe_get_tx_queue_offloads(dev);
+ dev_info->tx_offload_capa = ixgbe_get_tx_port_offloads(dev);
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -3816,6 +3845,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.rx_free_thresh = IXGBE_DEFAULT_RX_FREE_THRESH,
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3826,8 +3856,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
},
.tx_free_thresh = IXGBE_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IXGBE_DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
+ .offloads = 0,
};
dev_info->rx_desc_lim = rx_desc_lim;
@@ -3863,7 +3892,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
* before the link status is correct
*/
- if (mac->type == ixgbe_mac_82599_vf) {
+ if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) {
int i;
for (i = 0; i < 5; i++) {
@@ -3941,12 +3970,12 @@ out:
}
/* return 0 means link status changed, -1 means not changed */
-static int
+int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3956,12 +3985,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
int wait = 1;
bool autoneg = false;
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
- link.link_speed = 0;
+ link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
- rte_ixgbe_dev_atomic_read_link_status(dev, &old);
hw->mac.get_link_status = true;
@@ -3985,19 +4013,14 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
if (link_up == 0) {
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
+
intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -4029,12 +4052,8 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
link.link_speed = ETH_SPEED_NUM_10G;
break;
}
- rte_ixgbe_dev_atomic_write_link_status(dev, &link);
-
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -4233,8 +4252,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
+
if (link.link_status) {
PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
(int)(dev->data->port_id),
@@ -4269,7 +4288,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
- struct rte_eth_link link;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -4286,9 +4304,10 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
}
if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+ struct rte_eth_link link;
+
/* get the link status before link update, for predicting later */
- memset(&link, 0, sizeof(link));
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
ixgbe_dev_link_update(dev, 0);
@@ -4853,14 +4872,15 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
ixgbe_clear_rar(hw, index);
}
-static void
+static int
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
ixgbe_remove_rar(dev, 0);
-
ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
+
+ return 0;
}
static bool
@@ -4909,10 +4929,12 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if (frame_size > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
@@ -4932,19 +4954,32 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
* Virtual Function operations
*/
static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear mask value. */
+ intr->mask = 0;
}
static void
-ixgbevf_intr_enable(struct ixgbe_hw *hw)
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
PMD_INIT_FUNC_TRACE();
/* VF enable interrupt autoclean */
@@ -4953,6 +4988,9 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
IXGBE_WRITE_FLUSH(hw);
+
+ /* Save IXGBE_VTEIMS value to mask. */
+ intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
}
static int
@@ -4970,14 +5008,14 @@ ixgbevf_dev_configure(struct rte_eth_dev *dev)
* Keep the persistent behavior the same as Host PF
*/
#ifndef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC
- if (!conf->rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 1;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
#else
- if (conf->rxmode.hw_strip_crc) {
+ if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) {
PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip");
- conf->rxmode.hw_strip_crc = 0;
+ conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP;
}
#endif
@@ -5030,7 +5068,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Set HW strip */
mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
- err = ixgbevf_vlan_offload_set(dev, mask);
+ err = ixgbevf_vlan_offload_config(dev, mask);
if (err) {
PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err);
ixgbe_dev_clear_queues(dev);
@@ -5039,6 +5077,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
ixgbevf_dev_rxtx_start(dev);
+ ixgbevf_dev_link_update(dev, 0);
+
/* check and configure queue intr-vector mapping */
if (rte_intr_cap_multiple(intr_handle) &&
dev->data->dev_conf.intr_conf.rxq) {
@@ -5074,7 +5114,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -5088,7 +5128,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
@@ -5226,24 +5266,34 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
}
static int
-ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+ixgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
{
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
uint16_t i;
int on = 0;
/* VF function only support hw strip feature, others are not support */
if (mask & ETH_VLAN_STRIP_MASK) {
- on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
-
- for (i = 0; i < hw->mac.max_rx_queues; i++)
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
ixgbevf_vlan_strip_queue_set(dev, i, on);
+ }
}
return 0;
}
+static int
+ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ ixgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ ixgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
int
ixgbe_vt_check(struct ixgbe_hw *hw)
{
@@ -5580,17 +5630,17 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask |= (1 << vec);
+ intr->mask |= (1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
rte_intr_enable(intr_handle);
@@ -5600,19 +5650,19 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- uint32_t mask;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vec = IXGBE_MISC_VEC_ID;
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
if (rte_intr_allow_others(intr_handle))
vec = IXGBE_RX_VEC_START;
- mask &= ~(1 << vec);
+ intr->mask &= ~(1 << vec);
RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
return 0;
}
@@ -5778,6 +5828,13 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
if (vector_idx < base + intr_handle->nb_efd - 1)
vector_idx++;
}
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of IXGBE_MISC_VEC_ID.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
}
/**
@@ -5799,8 +5856,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* won't configure msix register if no mapping is done
* between intr vector and event fd
+ * but if misx has been enabled already, need to configure
+ * auto clean, auto mask and throttling.
*/
- if (!rte_intr_dp_is_en(intr_handle))
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ if (!rte_intr_dp_is_en(intr_handle) &&
+ !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT)))
return;
if (rte_intr_allow_others(intr_handle))
@@ -5824,30 +5885,34 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
/* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
- queue_id++) {
- /* by default, 1:1 mapping */
- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
+ if (rte_intr_dp_is_en(intr_handle)) {
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- IXGBE_MISC_VEC_ID);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
- break;
- default:
- break;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1,
+ IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
}
IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
- IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+ IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | IXGBE_EITR_CNT_WDIS);
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
@@ -5863,6 +5928,7 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
@@ -5884,14 +5950,14 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
bcnrc_val = 0;
}
+ rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
+ (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
IXGBE_MMW_SIZE_JUMBO_FRAME);
else
@@ -5983,12 +6049,14 @@ ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
}
}
-static void
+static int
ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
}
int
@@ -6238,7 +6306,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!rx_conf->enable_scatter &&
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -6812,9 +6880,8 @@ ixgbe_start_timecounters(struct rte_eth_dev *dev)
uint32_t shift = 0;
/* Get current link speed. */
- memset(&link, 0, sizeof(link));
ixgbe_dev_link_update(dev, 1);
- rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
switch (link.link_speed) {
case ETH_SPEED_NUM_100M:
@@ -7166,6 +7233,78 @@ ixgbe_set_eeprom(struct rte_eth_dev *dev,
return eeprom->ops.write_buffer(hw, first, length, data);
}
+static int
+ixgbe_get_module_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status;
+ uint8_t sff8472_rev, addr_mode;
+ bool page_swap = false;
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0)
+ return -EIO;
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0)
+ return -EIO;
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ PMD_DRV_LOG(ERR,
+ "Address change required to access page 0xA2, "
+ "but not supported. Please report the module "
+ "type to the driver maintainers.");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = RTE_ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = RTE_ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_get_module_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t status = IXGBE_ERR_PHY_ADDR_INVALID;
+ uint8_t databyte = 0xFF;
+ uint8_t *data = info->data;
+ uint32_t i = 0;
+
+ if (info->length == 0)
+ return -EINVAL;
+
+ for (i = info->offset; i < info->offset + info->length; i++) {
+ if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
+
+ if (status != 0)
+ return -EIO;
+
+ data[i - info->offset] = databyte;
+ }
+
+ return 0;
+}
+
uint16_t
ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
switch (mac_type) {
@@ -8169,7 +8308,7 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- ixgbevf_intr_disable(hw);
+ ixgbevf_intr_disable(dev);
/* read-on-clear nic registers here */
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -8186,7 +8325,6 @@ ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
static int
ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -8195,7 +8333,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
intr->flags &= ~IXGBE_FLAG_MAILBOX;
}
- ixgbevf_intr_enable(hw);
+ ixgbevf_intr_enable(dev);
return 0;
}
@@ -8334,7 +8472,7 @@ ixgbe_rss_filter_restore(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev,
&filter_info->rss_info, TRUE);
}
@@ -8446,9 +8584,7 @@ RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio-pci");
-RTE_INIT(ixgbe_init_log);
-static void
-ixgbe_init_log(void)
+RTE_INIT(ixgbe_init_log)
{
ixgbe_logtype_init = rte_log_register("pmd.net.ixgbe.init");
if (ixgbe_logtype_init >= 0)
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index c56d6524..d0b93968 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -4,6 +4,9 @@
#ifndef _IXGBE_ETHDEV_H_
#define _IXGBE_ETHDEV_H_
+
+#include <stdint.h>
+
#include "base/ixgbe_type.h"
#include "base/ixgbe_dcb.h"
#include "base/ixgbe_dcb_82599.h"
@@ -12,6 +15,7 @@
#ifdef RTE_LIBRTE_SECURITY
#include "ixgbe_ipsec.h"
#endif
+#include <rte_flow.h>
#include <rte_time.h>
#include <rte_hash.h>
#include <rte_pci.h>
@@ -39,6 +43,7 @@
#define IXGBE_EXTENDED_VLAN (uint32_t)(1 << 26) /* EXTENDED VLAN ENABLE */
#define IXGBE_VFTA_SIZE 128
#define IXGBE_VLAN_TAG_SIZE 4
+#define IXGBE_HKEY_MAX_INDEX 10
#define IXGBE_MAX_RX_QUEUE_NUM 128
#define IXGBE_MAX_INTR_QUEUE_NUM 15
#define IXGBE_VMDQ_DCB_NB_QUEUES IXGBE_MAX_RX_QUEUE_NUM
@@ -57,6 +62,7 @@
(((us) * 1000 / IXGBE_EITR_INTERVAL_UNIT_NS << IXGBE_EITR_ITR_INT_SHIFT) & \
IXGBE_EITR_ITR_INT_MASK)
+#define IXGBE_QUEUE_ITR_INTERVAL_DEFAULT 500 /* 500us */
/* Loopback operation modes */
/* 82599 specific loopback operation types */
@@ -94,6 +100,11 @@
#define IXGBE_5TUPLE_MAX_PRI 7
#define IXGBE_5TUPLE_MIN_PRI 1
+/* bit of VXLAN tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_VXLAN_TUNNEL_TYPE 0x8000
+/* bit of NVGRE tunnel type | 7 bits of zeros | 8 bits of zeros*/
+#define IXGBE_FDIR_NVGRE_TUNNEL_TYPE 0x0
+
#define IXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
@@ -196,8 +207,8 @@ struct ixgbe_hw_fdir_info {
};
struct ixgbe_rte_flow_rss_conf {
- struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
+ struct rte_flow_action_rss conf; /**< RSS parameters. */
+ uint8_t key[IXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
};
@@ -253,6 +264,7 @@ struct ixgbe_vf_info {
uint16_t vlan_count;
uint8_t spoofchk_enabled;
uint8_t api_version;
+ uint16_t switch_domain_id;
};
/*
@@ -480,6 +492,15 @@ struct ixgbe_adapter {
struct ixgbe_tm_conf tm_conf;
};
+struct ixgbe_vf_representor {
+ uint16_t vf_id;
+ uint16_t switch_domain_id;
+ struct rte_eth_dev *pf_ethdev;
+};
+
+int ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
+int ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev);
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
@@ -652,6 +673,10 @@ int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
+int
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf);
+
/*
* misc function prototypes
*/
@@ -659,9 +684,7 @@ void ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev);
void ixgbe_vlan_hw_filter_disable(struct rte_eth_dev *dev);
-void ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev);
-
-void ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev);
+void ixgbe_vlan_hw_strip_config(struct rte_eth_dev *dev);
void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev);
@@ -698,6 +721,10 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev);
void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev);
int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t tx_rate);
+int ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in);
+int ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
int ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add);
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index d5e51797..e559f0fa 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -394,9 +394,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = info->mask.mac_addr_byte_mask;
- fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
- & IXGBE_FDIRIP6M_INNER_MAC;
+ fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC;
+ mac_mask = info->mask.mac_addr_byte_mask &
+ (IXGBE_FDIRIP6M_INNER_MAC >>
+ IXGBE_FDIRIP6M_INNER_MAC_SHIFT);
+ fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) &
+ IXGBE_FDIRIP6M_INNER_MAC);
switch (info->mask.tunnel_type_mask) {
case 0:
@@ -771,10 +774,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
input->formatted.inner_mac,
fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes,
sizeof(input->formatted.inner_mac));
- input->formatted.tunnel_type =
- fdir_filter->input.flow.tunnel_flow.tunnel_type;
+ if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_VXLAN)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
+ else if (fdir_filter->input.flow.tunnel_flow.tunnel_type ==
+ RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ input->formatted.tunnel_type =
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
+ else
+ PMD_DRV_LOG(ERR, " invalid tunnel type arguments.");
+
input->formatted.tni_vni =
- fdir_filter->input.flow.tunnel_flow.tunnel_id;
+ fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8;
}
return 0;
@@ -1001,8 +1013,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0);
} else {
/* tunnel mode */
- if (input->formatted.tunnel_type !=
- RTE_FDIR_TUNNEL_TYPE_NVGRE)
+ if (input->formatted.tunnel_type)
tunnel_type = 0x80000000;
tunnel_type |= addr_high;
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low);
@@ -1010,6 +1021,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2),
input->formatted.tni_vni);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0);
}
/* record vlan (little-endian) and flex_bytes(big-endian) */
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index dcbfb38b..1adf1b80 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -264,8 +264,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -298,8 +298,8 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
@@ -346,7 +346,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -368,7 +368,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -413,7 +413,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -447,12 +447,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -471,11 +471,11 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
- sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -494,7 +494,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
- sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
} else {
@@ -557,6 +557,15 @@ action:
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
@@ -699,8 +708,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
@@ -787,6 +796,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
}
/* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
@@ -1000,8 +1017,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
@@ -1078,6 +1095,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
@@ -1198,8 +1224,8 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
- e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
/* Only care about GRP and E cid base. */
if (e_tag_mask->epcp_edei_in_ecid_b ||
@@ -1250,6 +1276,15 @@ cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
}
/* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
@@ -1354,6 +1389,15 @@ ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
}
/* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ /* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -1447,12 +1491,9 @@ static inline uint8_t signature_match(const struct rte_flow_item pattern[])
break;
if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
- spec =
- (const struct rte_flow_item_fuzzy *)item->spec;
- last =
- (const struct rte_flow_item_fuzzy *)item->last;
- mask =
- (const struct rte_flow_item_fuzzy *)item->mask;
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
if (!spec || !mask)
return 0;
@@ -1632,7 +1673,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -1645,7 +1686,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->mask) {
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type ||
@@ -1698,7 +1739,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -1725,8 +1767,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -1772,8 +1814,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
ipv4_mask->hdr.total_length ||
@@ -1793,8 +1834,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
rule->ixgbe_fdir.formatted.dst_ip[0] =
ipv4_spec->hdr.dst_addr;
rule->ixgbe_fdir.formatted.src_ip[0] =
@@ -1844,8 +1884,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
rule->b_mask = TRUE;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_mask = item->mask;
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len ||
ipv6_mask->hdr.proto ||
@@ -1885,8 +1924,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_spec = item->spec;
rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
@@ -1938,7 +1976,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
@@ -1957,7 +1995,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
tcp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2003,7 +2041,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2017,7 +2055,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
udp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2068,8 +2106,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2083,8 +2120,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
if (item->spec) {
rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
sctp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
@@ -2092,8 +2128,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
}
/* others even sctp port is not supported */
} else {
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask &&
(sctp_mask->hdr.src_port ||
sctp_mask->hdr.dst_port ||
@@ -2136,7 +2171,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_mask = item->mask;
/* check mask */
if (raw_mask->relative != 0x1 ||
@@ -2152,7 +2187,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_spec = item->spec;
/* check spec */
if (raw_spec->relative != 0 ||
@@ -2403,7 +2438,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
@@ -2425,8 +2460,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_mask = item->mask;
if (vxlan_mask->flags) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2452,20 +2486,17 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- vxlan_spec = (const struct rte_flow_item_vxlan *)
- item->spec;
+ vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
@@ -2490,8 +2521,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_mask = item->mask;
if (nvgre_mask->flow_id) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
@@ -2534,8 +2564,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_spec = item->spec;
if (nvgre_spec->c_k_s_rsvd0_ver !=
rte_cpu_to_be_16(0x2000) &&
nvgre_mask->c_k_s_rsvd0_ver) {
@@ -2557,7 +2586,6 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
@@ -2591,7 +2619,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type) {
@@ -2632,7 +2660,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
for (j = 0; j < ETHER_ADDR_LEN; j++) {
@@ -2671,8 +2699,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
@@ -2775,7 +2803,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
rss = (const struct rte_flow_action_rss *)act->conf;
- if (!rss || !rss->num) {
+ if (!rss || !rss->queue_num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act,
@@ -2783,7 +2811,7 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
- for (n = 0; n < rss->num; n++) {
+ for (n = 0; n < rss->queue_num; n++) {
if (rss->queue[n] >= dev->data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@ -2792,14 +2820,27 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
}
- if (rss->rss_conf)
- rss_conf->rss_conf = *rss->rss_conf;
- else
- rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
- for (n = 0; n < rss->num; ++n)
- rss_conf->queue[n] = rss->queue[n];
- rss_conf->num = rss->num;
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
@@ -2830,6 +2871,15 @@ ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
@@ -2848,7 +2898,7 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
}
@@ -3167,9 +3217,8 @@ ixgbe_flow_create(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- rte_memcpy(&rss_filter_ptr->filter_info,
- &rss_conf,
- sizeof(struct ixgbe_rte_flow_rss_conf));
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
TAILQ_INSERT_TAIL(&filter_rss_list,
rss_filter_ptr, entries);
flow->rule = rss_filter_ptr;
diff --git a/drivers/net/ixgbe/ixgbe_ipsec.c b/drivers/net/ixgbe/ixgbe_ipsec.c
index 176ec0fd..08405f1e 100644
--- a/drivers/net/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ixgbe/ixgbe_ipsec.c
@@ -598,13 +598,18 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t reg;
+ uint64_t rx_offloads;
+ uint64_t tx_offloads;
+
+ rx_offloads = dev->data->dev_conf.rxmode.offloads;
+ tx_offloads = dev->data->dev_conf.txmode.offloads;
/* sanity checks */
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
return -1;
}
- if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
+ if (rte_eth_dev_must_keep_crc(rx_offloads)) {
PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
return -1;
}
@@ -624,7 +629,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
reg |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SECURITY) {
+ if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
if (reg != 0) {
@@ -632,7 +637,7 @@ ixgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return -1;
}
}
- if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_SECURITY) {
+ if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL,
IXGBE_SECTXCTRL_STORE_FORWARD);
reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index ea997371..4b833ffa 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
+ rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id);
+
memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
@@ -122,11 +124,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_vf_info **vfinfo;
uint16_t vf_num;
+ int ret;
PMD_INIT_FUNC_TRACE();
- vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
-
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0;
RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0;
@@ -136,6 +137,14 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
if (vf_num == 0)
return;
+ vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
+ if (*vfinfo == NULL)
+ return;
+
+ ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id);
+ if (ret)
+ PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret);
+
rte_free(*vfinfo);
*vfinfo = NULL;
}
@@ -329,10 +338,7 @@ set_rx_mode(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
- ixgbe_vlan_hw_strip_enable_all(dev);
- else
- ixgbe_vlan_hw_strip_disable_all(dev);
+ ixgbe_vlan_hw_strip_config(dev);
}
static inline void
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 6c582b4b..f82b74a9 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1420,7 +1420,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
/*
* Check if VLAN present only.
* Do not check whether L3/L4 rx checksum done by NIC or not,
- * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ * That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
@@ -2379,7 +2379,7 @@ void __attribute__((cold))
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
- if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS) &&
+ if ((txq->offloads == 0) &&
#ifdef RTE_LIBRTE_SECURITY
!(txq->using_ipsec) &&
#endif
@@ -2398,9 +2398,8 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
PMD_INIT_LOG(DEBUG,
- " - txq_flags = %lx " "[IXGBE_SIMPLE_FLAGS=%lx]",
- (unsigned long)txq->txq_flags,
- (unsigned long)IXGBE_SIMPLE_FLAGS);
+ " - offloads = 0x%" PRIx64,
+ txq->offloads);
PMD_INIT_LOG(DEBUG,
" - tx_rs_thresh = %lu " "[RTE_PMD_IXGBE_TX_MAX_BURST=%lu]",
(unsigned long)txq->tx_rs_thresh,
@@ -2410,6 +2409,45 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
}
}
+uint64_t
+ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+uint64_t
+ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t tx_offload_capa;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+#endif
+ return tx_offload_capa;
+}
+
int __attribute__((cold))
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2421,10 +2459,13 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
struct ixgbe_tx_queue *txq;
struct ixgbe_hw *hw;
uint16_t tx_rs_thresh, tx_free_thresh;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/*
* Validate number of transmit descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2550,7 +2591,7 @@ ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->ops = &def_txq_ops;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_SECURITY
@@ -2769,6 +2810,82 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
#endif
}
+static int
+ixgbe_is_vf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+uint64_t
+ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ return offloads;
+}
+
+uint64_t
+ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
+{
+ uint64_t offloads;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_SCATTER;
+
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+
+ if (ixgbe_is_vf(dev) == 0)
+ offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
+
+ /*
+ * RSC is only supported by 82599 and x540 PF devices in a non-SR-IOV
+ * mode.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540) &&
+ !RTE_ETH_DEV_SRIOV(dev).active)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_X550EM_x ||
+ hw->mac.type == ixgbe_mac_X550EM_a)
+ offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+#ifdef RTE_LIBRTE_SECURITY
+ if (dev->security_ctx)
+ offloads |= DEV_RX_OFFLOAD_SECURITY;
+#endif
+
+ return offloads;
+}
+
int __attribute__((cold))
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -2783,10 +2900,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t len;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/*
* Validate number of receive descriptors.
* It must not exceed hardware maximum, and must be multiple
@@ -2816,10 +2936,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
- 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+ rxq->offloads = offloads;
/*
* The packet type in RX descriptor is different for different NICs.
@@ -4574,7 +4697,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
rsc_capable = true;
- if (!rsc_capable && rx_conf->enable_lro) {
+ if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
"support it");
return -EINVAL;
@@ -4582,7 +4705,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (!rx_conf->hw_strip_crc && rx_conf->enable_lro) {
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+ (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* 3.0 RSC configuration requires HW CRC stripping being
@@ -4596,7 +4720,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
/* RFCTL configuration */
rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
- if ((rsc_capable) && (rx_conf->enable_lro))
+ if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
/*
* Since NFS packets coalescing is not supported - clear
* RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
@@ -4609,7 +4733,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
- if (!rx_conf->enable_lro)
+ if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
return 0;
/* Set RDRXCTL.RSCACKC bit */
@@ -4666,7 +4790,8 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
* at most 500us latency for a single RSC aggregation.
*/
eitr &= ~IXGBE_EITR_ITR_INT_MASK;
- eitr |= IXGBE_EITR_INTERVAL_US(500) | IXGBE_EITR_CNT_WDIS;
+ eitr |= IXGBE_EITR_INTERVAL_US(IXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
+ eitr |= IXGBE_EITR_CNT_WDIS;
IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rxq->reg_idx), srrctl);
IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxq->reg_idx), rscctl);
@@ -4729,15 +4854,15 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rx_conf->hw_strip_crc)
- hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+ else
+ hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->jumbo_frame == 1) {
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
@@ -4757,6 +4882,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -4765,7 +4895,8 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rx_conf->hw_strip_crc ? 0 : ETHER_CRC_LEN;
+ rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
+ ETHER_CRC_LEN : 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
@@ -4779,28 +4910,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), 0);
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (rx_conf->header_split) {
- if (hw->mac.type == ixgbe_mac_82599EB) {
- /* Must setup the PSRTYPE register */
- uint32_t psrtype;
-
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(rxq->reg_idx), psrtype);
- }
- srrctl = ((rx_conf->split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -4826,9 +4936,11 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
}
- if (rx_conf->enable_scatter)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
dev->data->scattered_rx = 1;
/*
@@ -4843,7 +4955,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
- if (rx_conf->hw_ip_checksum)
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
rxcsum |= IXGBE_RXCSUM_IPPCSE;
else
rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
@@ -4853,10 +4965,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rx_conf->hw_strip_crc)
- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
- else
+ if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+ else
+ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
@@ -5064,34 +5176,30 @@ ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /* Allocate buffers for descriptor rings */
- if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
- PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
- rx_queue_id);
- return -1;
- }
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl |= IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxq = dev->data->rx_queues[rx_queue_id];
- /* Wait until RX Enable ready */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d",
- rx_queue_id);
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
+ /* Allocate buffers for descriptor rings */
+ if (ixgbe_alloc_rx_queue_mbufs(rxq) != 0) {
+ PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
+ rx_queue_id);
return -1;
+ }
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+
+ /* Wait until RX Enable ready */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_RDH(rxq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5112,30 +5220,26 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
+ rxq = dev->data->rx_queues[rx_queue_id];
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- rxdctl &= ~IXGBE_RXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl);
- /* Wait until RX Enable bit clear */
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
- } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
- rx_queue_id);
+ /* Wait until RX Enable bit clear */
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
+ } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
- rte_delay_us(RTE_IXGBE_WAIT_100_US);
+ rte_delay_us(RTE_IXGBE_WAIT_100_US);
- ixgbe_rx_queue_release_mbufs(rxq);
- ixgbe_reset_rx_queue(adapter, rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- } else
- return -1;
+ ixgbe_rx_queue_release_mbufs(rxq);
+ ixgbe_reset_rx_queue(adapter, rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -5155,30 +5259,27 @@ ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
- txdctl |= IXGBE_TXDCTL_ENABLE;
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
+ txq = dev->data->tx_queues[tx_queue_id];
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl);
- /* Wait until TX Enable ready */
- if (hw->mac.type == ixgbe_mac_82599EB) {
- poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
- do {
- rte_delay_ms(1);
- txdctl = IXGBE_READ_REG(hw,
- IXGBE_TXDCTL(txq->reg_idx));
- } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
- if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not enable "
- "Tx Queue %d", tx_queue_id);
- }
- rte_wmb();
- IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- return -1;
+ /* Wait until TX Enable ready */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS;
+ do {
+ rte_delay_ms(1);
+ txdctl = IXGBE_READ_REG(hw,
+ IXGBE_TXDCTL(txq->reg_idx));
+ } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!poll_ms)
+ PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d",
+ tx_queue_id);
+ }
+ rte_wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_TDH(txq->reg_idx), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TDT(txq->reg_idx), 0);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
@@ -5198,9 +5299,6 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
PMD_INIT_FUNC_TRACE();
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id >= dev->data->nb_tx_queues)
- return -1;
-
txq = dev->data->tx_queues[tx_queue_id];
/* Wait until TX queue is empty */
@@ -5214,8 +5312,9 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TDT(txq->reg_idx));
} while (--poll_ms && (txtdh != txtdt));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Tx Queue %d is not empty "
- "when stopping.", tx_queue_id);
+ PMD_INIT_LOG(ERR,
+ "Tx Queue %d is not empty when stopping.",
+ tx_queue_id);
}
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx));
@@ -5231,8 +5330,8 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
IXGBE_TXDCTL(txq->reg_idx));
} while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable "
- "Tx Queue %d", tx_queue_id);
+ PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
+ tx_queue_id);
}
if (txq->ops != NULL) {
@@ -5259,6 +5358,7 @@ ixgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
}
void
@@ -5277,7 +5377,7 @@ ixgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
@@ -5289,6 +5389,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
@@ -5328,6 +5429,11 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
ixgbevf_rlpml_set_vf(hw,
(uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ /*
+ * Assume no header split and no VLAN strip support
+ * on any Rx queue first .
+ */
+ rxmode->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
/* Setup RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
@@ -5351,18 +5457,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
/* Configure the SRRCTL register */
-#ifdef RTE_HEADER_SPLIT_ENABLE
- /*
- * Configure Header Split
- */
- if (dev->data->dev_conf.rxmode.header_split) {
- srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- } else
-#endif
- srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
/* Set if packets are dropped when no descriptors available */
if (rxq->drop_en)
@@ -5387,24 +5482,18 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
- if (dev->data->dev_conf.rxmode.enable_scatter ||
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ (rxmode->max_rx_pkt_len +
2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
}
- }
-#ifdef RTE_HEADER_SPLIT_ENABLE
- if (dev->data->dev_conf.rxmode.header_split)
- /* Must setup the PSRTYPE register */
- psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR;
-#endif
+ if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ }
/* Set RQPL for VF RSS according to max Rx queue */
psrtype |= (dev->data->nb_rx_queues >> 1) <<
@@ -5522,6 +5611,40 @@ ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
}
int
+ixgbe_rss_conf_init(struct ixgbe_rte_flow_rss_conf *out,
+ const struct rte_flow_action_rss *in)
+{
+ if (in->key_len > RTE_DIM(out->key) ||
+ in->queue_num > RTE_DIM(out->queue))
+ return -EINVAL;
+ out->conf = (struct rte_flow_action_rss){
+ .func = in->func,
+ .level = in->level,
+ .types = in->types,
+ .key_len = in->key_len,
+ .queue_num = in->queue_num,
+ .key = memcpy(out->key, in->key, in->key_len),
+ .queue = memcpy(out->queue, in->queue,
+ sizeof(*in->queue) * in->queue_num),
+ };
+ return 0;
+}
+
+int
+ixgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+ return (comp->func == with->func &&
+ comp->level == with->level &&
+ comp->types == with->types &&
+ comp->key_len == with->key_len &&
+ comp->queue_num == with->queue_num &&
+ !memcmp(comp->key, with->key, with->key_len) &&
+ !memcmp(comp->queue, with->queue,
+ sizeof(*with->queue) * with->queue_num));
+}
+
+int
ixgbe_config_rss_filter(struct rte_eth_dev *dev,
struct ixgbe_rte_flow_rss_conf *conf, bool add)
{
@@ -5531,7 +5654,12 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
uint16_t j;
uint16_t sp_reta_size;
uint32_t reta_reg;
- struct rte_eth_rss_conf rss_conf = conf->rss_conf;
+ struct rte_eth_rss_conf rss_conf = {
+ .rss_key = conf->conf.key_len ?
+ (void *)(uintptr_t)conf->conf.key : NULL,
+ .rss_key_len = conf->conf.key_len,
+ .rss_hf = conf->conf.types,
+ };
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
@@ -5541,8 +5669,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
sp_reta_size = ixgbe_reta_size_get(hw->mac.type);
if (!add) {
- if (memcmp(conf, &filter_info->rss_info,
- sizeof(struct ixgbe_rte_flow_rss_conf)) == 0) {
+ if (ixgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
ixgbe_rss_disable(dev);
memset(&filter_info->rss_info, 0,
sizeof(struct ixgbe_rte_flow_rss_conf));
@@ -5551,7 +5679,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
- if (filter_info->rss_info.num)
+ if (filter_info->rss_info.conf.queue_num)
return -EINVAL;
/* Fill in redirection table
* The byte-swap is needed because NIC registers are in
@@ -5561,9 +5689,9 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
for (i = 0, j = 0; i < sp_reta_size; i++, j++) {
reta_reg = ixgbe_reta_reg_get(hw->mac.type, i);
- if (j == conf->num)
+ if (j == conf->conf.queue_num)
j = 0;
- reta = (reta << 8) | conf->queue[j];
+ reta = (reta << 8) | conf->conf.queue[j];
if ((i & 3) == 3)
IXGBE_WRITE_REG(hw, reta_reg,
rte_bswap32(reta));
@@ -5580,8 +5708,8 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
rss_conf.rss_key = rss_intel_key; /* Default hash key */
ixgbe_hw_rss_hash_set(hw, &rss_conf);
- rte_memcpy(&filter_info->rss_info,
- conf, sizeof(struct ixgbe_rte_flow_rss_conf));
+ if (ixgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 69c718bc..39378f75 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -129,6 +129,7 @@ struct ixgbe_rx_queue {
uint8_t rx_deferred_start; /**< not in global dev start. */
/** flags to set in mbuf when a vlan is detected. */
uint64_t vlan_flags;
+ uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** hold packets to return to application */
@@ -221,7 +222,7 @@ struct ixgbe_tx_queue {
uint8_t pthresh; /**< Prefetch threshold register. */
uint8_t hthresh; /**< Host threshold register. */
uint8_t wthresh; /**< Write-back threshold reg. */
- uint32_t txq_flags; /**< Holds flags for this TXq */
+ uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
uint32_t ctx_curr; /**< Hardware context states. */
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
@@ -240,20 +241,6 @@ struct ixgbe_txq_ops {
};
/*
- * The "simple" TX queue functions require that the following
- * flags are set when the TX queue is configured:
- * - ETH_TXQ_FLAGS_NOMULTSEGS
- * - ETH_TXQ_FLAGS_NOVLANOFFL
- * - ETH_TXQ_FLAGS_NOXSUMSCTP
- * - ETH_TXQ_FLAGS_NOXSUMUDP
- * - ETH_TXQ_FLAGS_NOXSUMTCP
- * and that the RS bit threshold (tx_rs_thresh) is at least equal to
- * RTE_PMD_IXGBE_TX_MAX_BURST.
- */
-#define IXGBE_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
-/*
* Populate descriptors with the following info:
* 1.) buffer_addr = phys_addr + headroom
* 2.) cmd_type_len = DCMD_DTYP_FLAGS | pkt_len
@@ -305,6 +292,11 @@ extern const uint32_t ptype_table_tn[IXGBE_PACKET_TYPE_TN_MAX];
uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
-
#endif /* RTE_IXGBE_INC_VECTOR */
+
+uint64_t ixgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
+uint64_t ixgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
+
#endif /* _IXGBE_RXTX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 414840a2..a97c2718 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -278,17 +278,12 @@ static inline int
ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* no header split support */
- if (rxmode->header_split == 1)
- return -1;
-
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e0f9998f..edb13835 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -515,7 +515,7 @@ ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
/* no csum error report support */
- if (rxmode->hw_ip_checksum == 1)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
return -1;
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
diff --git a/drivers/net/ixgbe/ixgbe_vf_representor.c b/drivers/net/ixgbe/ixgbe_vf_representor.c
new file mode 100644
index 00000000..db516d99
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_vf_representor.c
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_rxtx.h"
+#include "rte_pmd_ixgbe.h"
+
+
+static int
+ixgbe_vf_representor_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return ixgbe_dev_link_update_share(representor->pf_ethdev,
+ wait_to_complete, 0);
+}
+
+static int
+ixgbe_vf_representor_mac_addr_set(struct rte_eth_dev *ethdev,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ return rte_pmd_ixgbe_set_vf_mac_addr(
+ representor->pf_ethdev->data->port_id,
+ representor->vf_id, mac_addr);
+}
+
+static void
+ixgbe_vf_representor_dev_infos_get(struct rte_eth_dev *ethdev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(
+ representor->pf_ethdev->data->dev_private);
+
+ dev_info->device = representor->pf_ethdev->device;
+
+ dev_info->min_rx_bufsize = 1024;
+ /**< Minimum size of RX buffer. */
+ dev_info->max_rx_pktlen = 9728;
+ /**< Maximum configurable length of RX pkt. */
+ dev_info->max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ /**< Maximum number of RX queues. */
+ dev_info->max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+ /**< Maximum number of TX queues. */
+
+ dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+ /**< Maximum number of MAC addresses. */
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ /**< Device RX offload capabilities. */
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
+ /**< Device TX offload capabilities. */
+
+ dev_info->speed_capa =
+ representor->pf_ethdev->data->dev_link.link_speed;
+ /**< Supported speeds bitmap (ETH_LINK_SPEED_). */
+
+ dev_info->switch_info.name =
+ representor->pf_ethdev->device->name;
+ dev_info->switch_info.domain_id = representor->switch_domain_id;
+ dev_info->switch_info.port_id = representor->vf_id;
+}
+
+static int ixgbe_vf_representor_dev_configure(
+ __rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_rx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ __rte_unused struct rte_mempool *mb_pool)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_tx_queue_setup(
+ __rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t rx_queue_id,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ return 0;
+}
+
+static int ixgbe_vf_representor_dev_start(__rte_unused struct rte_eth_dev *dev)
+{
+ return 0;
+}
+
+static void ixgbe_vf_representor_dev_stop(__rte_unused struct rte_eth_dev *dev)
+{
+}
+
+static int
+ixgbe_vf_representor_vlan_filter_set(struct rte_eth_dev *ethdev,
+ uint16_t vlan_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+ uint64_t vf_mask = 1ULL << representor->vf_id;
+
+ return rte_pmd_ixgbe_set_vf_vlan_filter(
+ representor->pf_ethdev->data->port_id, vlan_id, vf_mask, on);
+}
+
+static void
+ixgbe_vf_representor_vlan_strip_queue_set(struct rte_eth_dev *ethdev,
+ __rte_unused uint16_t rx_queue_id, int on)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ rte_pmd_ixgbe_set_vf_vlan_stripq(representor->pf_ethdev->data->port_id,
+ representor->vf_id, on);
+}
+
+struct eth_dev_ops ixgbe_vf_representor_dev_ops = {
+ .dev_infos_get = ixgbe_vf_representor_dev_infos_get,
+
+ .dev_start = ixgbe_vf_representor_dev_start,
+ .dev_configure = ixgbe_vf_representor_dev_configure,
+ .dev_stop = ixgbe_vf_representor_dev_stop,
+
+ .rx_queue_setup = ixgbe_vf_representor_rx_queue_setup,
+ .tx_queue_setup = ixgbe_vf_representor_tx_queue_setup,
+
+ .link_update = ixgbe_vf_representor_link_update,
+
+ .vlan_filter_set = ixgbe_vf_representor_vlan_filter_set,
+ .vlan_strip_queue_set = ixgbe_vf_representor_vlan_strip_queue_set,
+
+ .mac_addr_set = ixgbe_vf_representor_mac_addr_set,
+};
+
+static uint16_t
+ixgbe_vf_representor_rx_burst(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+ixgbe_vf_representor_tx_burst(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts, __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params)
+{
+ struct ixgbe_vf_representor *representor = ethdev->data->dev_private;
+
+ struct ixgbe_vf_info *vf_data;
+ struct rte_pci_device *pci_dev;
+ struct rte_eth_link *link;
+
+ if (!representor)
+ return -ENOMEM;
+
+ representor->vf_id =
+ ((struct ixgbe_vf_representor *)init_params)->vf_id;
+ representor->switch_domain_id =
+ ((struct ixgbe_vf_representor *)init_params)->switch_domain_id;
+ representor->pf_ethdev =
+ ((struct ixgbe_vf_representor *)init_params)->pf_ethdev;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(representor->pf_ethdev);
+
+ if (representor->vf_id >= pci_dev->max_vfs)
+ return -ENODEV;
+
+ ethdev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+
+ /* Set representor device ops */
+ ethdev->dev_ops = &ixgbe_vf_representor_dev_ops;
+
+ /* No data-path, but need stub Rx/Tx functions to avoid crash
+ * when testing with the likes of testpmd.
+ */
+ ethdev->rx_pkt_burst = ixgbe_vf_representor_rx_burst;
+ ethdev->tx_pkt_burst = ixgbe_vf_representor_tx_burst;
+
+ /* Setting the number queues allocated to the VF */
+ ethdev->data->nb_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+ ethdev->data->nb_tx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ /* Reference VF mac address from PF data structure */
+ vf_data = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(
+ representor->pf_ethdev->data->dev_private);
+
+ ethdev->data->mac_addrs = (struct ether_addr *)
+ vf_data[representor->vf_id].vf_mac_addresses;
+
+ /* Link state. Inherited from PF */
+ link = &representor->pf_ethdev->data->dev_link;
+
+ ethdev->data->dev_link.link_speed = link->link_speed;
+ ethdev->data->dev_link.link_duplex = link->link_duplex;
+ ethdev->data->dev_link.link_status = link->link_status;
+ ethdev->data->dev_link.link_autoneg = link->link_autoneg;
+
+ return 0;
+}
+
+int
+ixgbe_vf_representor_uninit(struct rte_eth_dev *ethdev __rte_unused)
+{
+ return 0;
+}
diff --git a/drivers/net/ixgbe/meson.build b/drivers/net/ixgbe/meson.build
index 60af0bae..02d5ef5e 100644
--- a/drivers/net/ixgbe/meson.build
+++ b/drivers/net/ixgbe/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
+
cflags += ['-DRTE_LIBRTE_IXGBE_BYPASS']
subdir('base')
@@ -17,6 +19,7 @@ sources = files(
'ixgbe_pf.c',
'ixgbe_rxtx.c',
'ixgbe_tm.c',
+ 'ixgbe_vf_representor.c',
'rte_pmd_ixgbe.c'
)
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
index d8ca8ca3..3a874f9a 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.c
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -5,6 +5,7 @@
#include <rte_ethdev_driver.h>
#include "base/ixgbe_api.h"
+#include "base/ixgbe_x550.h"
#include "ixgbe_ethdev.h"
#include "rte_pmd_ixgbe.h"
@@ -880,6 +881,34 @@ rte_pmd_ixgbe_set_tc_bw_alloc(uint16_t port,
return 0;
}
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t fctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+
+ /* If 'enable' set the SBP bit else clear it */
+ if (enable)
+ fctrl |= IXGBE_FCTRL_SBP;
+ else
+ fctrl &= ~(IXGBE_FCTRL_SBP);
+
+ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ return 0;
+}
+
#ifdef RTE_LIBRTE_IXGBE_BYPASS
int
rte_pmd_ixgbe_bypass_init(uint16_t port_id)
@@ -1012,3 +1041,204 @@ rte_pmd_ixgbe_bypass_wd_reset(uint16_t port_id)
return ixgbe_bypass_wd_reset(dev);
}
#endif
+
+/**
+ * rte_pmd_ixgbe_acquire_swfw - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared phy token as needed
+ */
+STATIC s32 rte_pmd_ixgbe_acquire_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status = IXGBE_SUCCESS;
+
+ while (--retries) {
+ status = ixgbe_acquire_swfw_semaphore(hw, mask);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Get SWFW sem failed, Status = %d\n",
+ status);
+ return status;
+ }
+ status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_SUCCESS)
+ return IXGBE_SUCCESS;
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ PMD_DRV_LOG(ERR, "Get PHY token failed, Status = %d\n",
+ status);
+
+ ixgbe_release_swfw_semaphore(hw, mask);
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ PMD_DRV_LOG(ERR,
+ "Retry get PHY token failed, Status=%d\n",
+ status);
+ return status;
+ }
+ }
+ PMD_DRV_LOG(ERR, "swfw acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
+ return status;
+}
+
+/**
+ * rte_pmd_ixgbe_release_swfw_sync - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore and puts the shared phy token as needed
+ */
+STATIC void rte_pmd_ixgbe_release_swfw(struct ixgbe_hw *hw, u32 mask)
+{
+ ixgbe_put_phy_token(hw);
+ ixgbe_release_swfw_semaphore(hw, mask);
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ if (rte_pmd_ixgbe_acquire_swfw(hw, swfw_mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_hw *hw;
+ u32 swfw_mask;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ if (hw->bus.lan_id)
+ swfw_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+ rte_pmd_ixgbe_release_swfw(hw, swfw_mask);
+
+ return IXGBE_SUCCESS;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ u32 i, data, command;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Setup and write the read command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND)
+ return IXGBE_ERR_PHY;
+
+ /* Read operation is complete. Get the data from MSRWD */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)data;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data)
+{
+ struct ixgbe_hw *hw;
+ u32 i, command;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+ dev = &rte_eth_devices[port];
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!hw)
+ return -ENOTSUP;
+
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+ /* Setup and write the write command */
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (dev_type << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the access completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ usec_delay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if (!(command & IXGBE_MSCA_MDI_COMMAND))
+ break;
+ }
+ if (command & IXGBE_MSCA_MDI_COMMAND) {
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index 11a9f334..72a941f9 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -573,6 +573,77 @@ int rte_pmd_ixgbe_bypass_wd_timeout_show(uint16_t port, uint32_t *wd_timeout);
*/
int rte_pmd_ixgbe_bypass_wd_reset(uint16_t port);
+/**
+ * Acquire swfw semaphore lock for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_SWFW_SYNC) If sw/fw semaphore acquisition failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_lock(uint16_t port);
+
+/**
+ * Release swfw semaphore lock used for MDIO access
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlock(uint16_t port);
+
+/**
+ * Read PHY register using MDIO without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Pointer for reading PHY register data
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_read(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t *phy_data);
+
+/**
+ * Write data to PHY register using without MDIO lock
+ * The lock must be taken separately before calling this
+ * API
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param reg_addr
+ * 32 bit PHY Register
+ * @param dev_type
+ * Used to define device base address
+ * @param phy_data
+ * Data to write to PHY register
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port* invalid.
+ * - (IXGBE_ERR_PHY) If PHY read command failed
+ */
+int __rte_experimental
+rte_pmd_ixgbe_mdio_unlocked_write(uint16_t port, uint32_t reg_addr,
+ uint32_t dev_type, uint16_t phy_data);
/**
* Response sent back to ixgbe driver from user app after callback
@@ -637,4 +708,17 @@ enum {
((x) > RTE_PMD_IXGBE_BYPASS_TMT_OFF && \
(x) < RTE_PMD_IXGBE_BYPASS_TMT_NUM))
+/**
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param enable
+ * 0 to disable and nonzero to enable 'SBP' bit in FCTRL register
+ * to receive all packets
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int __rte_experimental
+rte_pmd_ixgbe_upd_fctrl_sbp(uint16_t port, int enable);
#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index bf776742..c814f96d 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -52,3 +52,13 @@ DPDK_17.08 {
rte_pmd_ixgbe_bypass_wd_timeout_show;
rte_pmd_ixgbe_bypass_wd_timeout_store;
} DPDK_17.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_ixgbe_mdio_lock;
+ rte_pmd_ixgbe_mdio_unlock;
+ rte_pmd_ixgbe_mdio_unlocked_read;
+ rte_pmd_ixgbe_mdio_unlocked_write;
+ rte_pmd_ixgbe_upd_fctrl_sbp;
+};
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
index 01eaef05..562e8d2d 100644
--- a/drivers/net/kni/Makefile
+++ b/drivers/net/kni/Makefile
@@ -10,6 +10,7 @@ LIB = librte_pmd_kni.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lpthread
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_kni
diff --git a/drivers/net/kni/meson.build b/drivers/net/kni/meson.build
new file mode 100644
index 00000000..0f784c6d
--- /dev/null
+++ b/drivers/net/kni/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# this driver can be built if-and-only-if KNI library is buildable
+build = dpdk_conf.has('RTE_LIBRTE_KNI')
+allow_experimental_apis = true
+sources = files('rte_eth_kni.c')
+deps += 'kni'
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
index dc4e65f5..085bb845 100644
--- a/drivers/net/kni/rte_eth_kni.c
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -61,10 +61,15 @@ static const struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
static int is_kni_initialized;
+static int eth_kni_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
static uint16_t
eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -126,8 +131,8 @@ eth_kni_start(struct rte_eth_dev *dev)
internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
if (internals->kni == NULL) {
- RTE_LOG(ERR, PMD,
- "Fail to create kni interface for port: %d\n",
+ PMD_LOG(ERR,
+ "Fail to create kni interface for port: %d",
port_id);
return -1;
}
@@ -149,11 +154,12 @@ eth_kni_dev_start(struct rte_eth_dev *dev)
}
if (internals->no_request_thread == 0) {
- ret = pthread_create(&internals->thread, NULL,
+ ret = rte_ctrl_thread_create(&internals->thread,
+ "kni_handle_req", NULL,
kni_handle_request, internals);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Fail to create kni request thread\n");
+ PMD_LOG(ERR,
+ "Fail to create kni request thread");
return -1;
}
}
@@ -174,11 +180,11 @@ eth_kni_dev_stop(struct rte_eth_dev *dev)
ret = pthread_cancel(internals->thread);
if (ret)
- RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
+ PMD_LOG(ERR, "Can't cancel the thread");
ret = pthread_join(internals->thread, NULL);
if (ret)
- RTE_LOG(ERR, PMD, "Can't join the thread\n");
+ PMD_LOG(ERR, "Can't join the thread");
internals->stop_thread = 0;
}
@@ -201,7 +207,7 @@ eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -337,25 +343,17 @@ eth_kni_create(struct rte_vdev_device *vdev,
struct pmd_internals *internals;
struct rte_eth_dev_data *data;
struct rte_eth_dev *eth_dev;
- const char *name;
- RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Creating kni ethdev on numa socket %u",
numa_node);
- name = rte_vdev_device_name(vdev);
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- return NULL;
-
/* reserve an ethdev entry */
eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
- if (eth_dev == NULL) {
- rte_free(data);
+ if (!eth_dev)
return NULL;
- }
internals = eth_dev->data->dev_private;
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->nb_rx_queues = 1;
data->nb_tx_queues = 1;
data->dev_link = pmd_link;
@@ -363,7 +361,6 @@ eth_kni_create(struct rte_vdev_device *vdev,
eth_random_addr(internals->eth_addr.addr_bytes);
- eth_dev->data = data;
eth_dev->dev_ops = &eth_kni_ops;
internals->no_request_thread = args->no_request_thread;
@@ -412,7 +409,21 @@ eth_kni_probe(struct rte_vdev_device *vdev)
name = rte_vdev_device_name(vdev);
params = rte_vdev_device_args(vdev);
- RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name);
+ PMD_LOG(INFO, "Initializing eth_kni for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &eth_kni_ops;
+ eth_dev->device = &vdev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
ret = eth_kni_kvargs_process(&args, params);
if (ret < 0)
@@ -429,6 +440,7 @@ eth_kni_probe(struct rte_vdev_device *vdev)
eth_dev->rx_pkt_burst = eth_kni_rx;
eth_dev->tx_pkt_burst = eth_kni_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
kni_uninit:
@@ -446,7 +458,7 @@ eth_kni_remove(struct rte_vdev_device *vdev)
const char *name;
name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name);
+ PMD_LOG(INFO, "Un-Initializing eth_kni for %s", name);
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
@@ -459,7 +471,6 @@ eth_kni_remove(struct rte_vdev_device *vdev)
rte_kni_release(internals->kni);
rte_free(internals);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -477,3 +488,10 @@ static struct rte_vdev_driver eth_kni_drv = {
RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
+
+RTE_INIT(eth_kni_init_log)
+{
+ eth_kni_logtype = rte_log_register("pmd.net.kni");
+ if (eth_kni_logtype >= 0)
+ rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
index fc5f18ad..f1092851 100644
--- a/drivers/net/liquidio/Makefile
+++ b/drivers/net/liquidio/Makefile
@@ -15,7 +15,7 @@ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
-EXPORT_MAP := rte_pmd_lio_version.map
+EXPORT_MAP := rte_pmd_liquidio_version.map
LIBABIVER := 1
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
index e1a20cd6..93e89007 100644
--- a/drivers/net/liquidio/lio_ethdev.c
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -373,8 +373,6 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev,
struct lio_device *lio_dev = LIO_DEV(eth_dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- devinfo->pci_dev = pci_dev;
-
switch (pci_dev->id.subsystem_device_id) {
/* CN23xx 10G cards */
case PCI_SUBSYS_DEV_ID_CN2350_210:
@@ -478,9 +476,11 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
}
if (frame_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
eth_dev->data->mtu = mtu;
@@ -904,32 +904,6 @@ lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
return 0;
}
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param eth_dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &eth_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static uint64_t
lio_hweight64(uint64_t w)
{
@@ -949,23 +923,19 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
int wait_to_complete __rte_unused)
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
/* Initialize */
+ memset(&link, 0, sizeof(link));
link.link_status = ETH_LINK_DOWN;
link.link_speed = ETH_SPEED_NUM_NONE;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- memset(&old, 0, sizeof(old));
/* Return what we found */
if (lio_dev->linfo.link.s.link_up == 0) {
/* Interface is down */
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
- if (link.link_status == old.link_status)
- return -1;
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
link.link_status = ETH_LINK_UP; /* Interface is up */
@@ -982,13 +952,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev,
link.link_duplex = ETH_LINK_HALF_DUPLEX;
}
- if (lio_dev_atomic_write_link_status(eth_dev, &link))
- return -1;
-
- if (link.link_status == old.link_status)
- return -1;
-
- return 0;
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
/**
@@ -1441,6 +1405,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
/* Configure RSS if device configured with multiple RX queues. */
lio_dev_mq_rx_configure(eth_dev);
+ /* Before update the link info,
+ * must set linfo.link.link_status64 to 0.
+ */
+ lio_dev->linfo.link.link_status64 = 0;
+
/* start polling for lsc */
ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
lio_sync_link_state_check,
@@ -2146,19 +2115,8 @@ static int
lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct lio_device));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = lio_eth_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device),
+ lio_eth_dev_init);
}
static int
@@ -2185,9 +2143,7 @@ RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");
-RTE_INIT(lio_init_log);
-static void
-lio_init_log(void)
+RTE_INIT(lio_init_log)
{
lio_logtype_init = rte_log_register("pmd.net.liquidio.init");
if (lio_logtype_init >= 0)
diff --git a/drivers/net/liquidio/meson.build b/drivers/net/liquidio/meson.build
new file mode 100644
index 00000000..9ae48e21
--- /dev/null
+++ b/drivers/net/liquidio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('base/lio_23xx_vf.c',
+ 'base/lio_mbox.c',
+ 'lio_ethdev.c',
+ 'lio_rxtx.c')
+includes += include_directories('base')
diff --git a/drivers/net/liquidio/rte_pmd_lio_version.map b/drivers/net/liquidio/rte_pmd_liquidio_version.map
index 8591cc0b..8591cc0b 100644
--- a/drivers/net/liquidio/rte_pmd_lio_version.map
+++ b/drivers/net/liquidio/rte_pmd_liquidio_version.map
diff --git a/drivers/net/meson.build b/drivers/net/meson.build
index 704cbe3c..9c28ed4d 100644
--- a/drivers/net/meson.build
+++ b/drivers/net/meson.build
@@ -1,10 +1,33 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['af_packet', 'bonding',
- 'e1000', 'fm10k', 'i40e', 'ixgbe',
+drivers = ['af_packet',
+ 'ark',
+ 'avp',
+ 'axgbe', 'bonding',
+ 'bnx2x',
+ 'bnxt',
+ 'cxgbe',
+ 'dpaa', 'dpaa2',
+ 'e1000',
+ 'ena',
+ 'enic',
+ 'failsafe',
+ 'fm10k', 'i40e',
+ 'ifc',
+ 'ixgbe',
+ 'kni',
+ 'liquidio',
+ 'mvpp2',
+ 'netvsc',
+ 'nfp',
'null', 'octeontx', 'pcap', 'ring',
- 'sfc', 'thunderx']
+ 'sfc',
+ 'softnic',
+ 'szedata2',
+ 'thunderx',
+ 'vhost',
+ 'virtio']
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
std_deps += ['bus_pci'] # very many PMDs depend on PCI, so make std
std_deps += ['bus_vdev'] # same with vdev bus
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index cc800493..92e93225 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2012 6WIND S.A.
-# Copyright 2012 Mellanox
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2012 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
@@ -64,6 +37,7 @@ CFLAGS += -D_BSD_SOURCE
CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
CFLAGS += -DMLX4_GLUE='"$(LIB_GLUE)"'
CFLAGS += -DMLX4_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
@@ -95,10 +69,6 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE
-CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE)
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx4_autoconf.h.
@@ -115,6 +85,11 @@ mlx4_autoconf.h.new: FORCE
mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q : > '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX4_WQE_LSO_SEG \
+ infiniband/mlx4dv.h \
+ type 'struct mlx4_wqe_lso_seg' \
+ $(AUTOCONF_OUTPUT)
# Create mlx4_autoconf.h or update it in case it differs from the new one.
@@ -132,10 +107,15 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS),y)
$(LIB): $(LIB_GLUE)
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
$(LIB_GLUE): mlx4_glue.o
- $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
- -s -shared -o $@ $< -libverbs -lmlx4
+ -shared -o $@ $< -libverbs -lmlx4
mlx4_glue.o: mlx4_autoconf.h
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index ee93dafe..defc0d4b 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
+ * Copyright 2012 Mellanox Technologies, Ltd
*/
/**
@@ -44,9 +44,15 @@
#include "mlx4.h"
#include "mlx4_glue.h"
#include "mlx4_flow.h"
+#include "mlx4_mr.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
+struct mlx4_dev_list mlx4_mem_event_cb_list =
+ LIST_HEAD_INITIALIZER(mlx4_mem_event_cb_list);
+
+rte_rwlock_t mlx4_mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
+
/** Configuration structure for device arguments. */
struct mlx4_conf {
struct {
@@ -61,6 +67,8 @@ const char *pmd_mlx4_init_params[] = {
NULL,
};
+static void mlx4_dev_stop(struct rte_eth_dev *dev);
+
/**
* DPDK callback for Ethernet device configuration.
*
@@ -123,6 +131,9 @@ mlx4_dev_start(struct rte_eth_dev *dev)
(void *)dev, strerror(-ret));
goto err;
}
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
ret = mlx4_rxq_intr_enable(priv);
if (ret) {
ERROR("%p: interrupt handler installation failed",
@@ -143,8 +154,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
dev->rx_pkt_burst = mlx4_rx_burst;
return 0;
err:
- /* Rollback. */
- priv->started = 0;
+ mlx4_dev_stop(dev);
return ret;
}
@@ -194,10 +204,12 @@ mlx4_dev_close(struct rte_eth_dev *dev)
dev->tx_pkt_burst = mlx4_tx_burst_removed;
rte_wmb();
mlx4_flow_clean(priv);
+ mlx4_rss_deinit(priv);
for (i = 0; i != dev->data->nb_rx_queues; ++i)
mlx4_rx_queue_release(dev->data->rx_queues[i]);
for (i = 0; i != dev->data->nb_tx_queues; ++i)
mlx4_tx_queue_release(dev->data->tx_queues[i]);
+ mlx4_mr_release(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(mlx4_glue->dealloc_pd(priv->pd));
@@ -385,6 +397,99 @@ free_kvlist:
return ret;
}
+/**
+ * Interpret RSS capabilities reported by device.
+ *
+ * This function returns the set of usable Verbs RSS hash fields, kernel
+ * quirks taken into account.
+ *
+ * @param ctx
+ * Verbs context.
+ * @param pd
+ * Verbs protection domain.
+ * @param device_attr_ex
+ * Extended device attributes to interpret.
+ *
+ * @return
+ * Usable RSS hash fields mask in Verbs format.
+ */
+static uint64_t
+mlx4_hw_rss_sup(struct ibv_context *ctx, struct ibv_pd *pd,
+ struct ibv_device_attr_ex *device_attr_ex)
+{
+ uint64_t hw_rss_sup = device_attr_ex->rss_caps.rx_hash_fields_mask;
+ struct ibv_cq *cq = NULL;
+ struct ibv_wq *wq = NULL;
+ struct ibv_rwq_ind_table *ind = NULL;
+ struct ibv_qp *qp = NULL;
+
+ if (!hw_rss_sup) {
+ WARN("no RSS capabilities reported; disabling support for UDP"
+ " RSS and inner VXLAN RSS");
+ return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
+ }
+ if (!(hw_rss_sup & IBV_RX_HASH_INNER))
+ return hw_rss_sup;
+ /*
+ * Although reported as supported, missing code in some Linux
+ * versions (v4.15, v4.16) prevents the creation of hash QPs with
+ * inner capability.
+ *
+ * There is no choice but to attempt to instantiate a temporary RSS
+ * context in order to confirm its support.
+ */
+ cq = mlx4_glue->create_cq(ctx, 1, NULL, NULL, 0);
+ wq = cq ? mlx4_glue->create_wq
+ (ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = pd,
+ .cq = cq,
+ }) : NULL;
+ ind = wq ? mlx4_glue->create_rwq_ind_table
+ (ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &wq,
+ .comp_mask = 0,
+ }) : NULL;
+ qp = ind ? mlx4_glue->create_qp_ex
+ (ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .comp_mask =
+ (IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_RX_HASH |
+ IBV_QP_INIT_ATTR_IND_TABLE),
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .pd = pd,
+ .rwq_ind_tbl = ind,
+ .rx_hash_conf = {
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .rx_hash_key = mlx4_rss_hash_key_default,
+ .rx_hash_fields_mask = hw_rss_sup,
+ },
+ }) : NULL;
+ if (!qp) {
+ WARN("disabling unusable inner RSS capability due to kernel"
+ " quirk");
+ hw_rss_sup &= ~IBV_RX_HASH_INNER;
+ } else {
+ claim_zero(mlx4_glue->destroy_qp(qp));
+ }
+ if (ind)
+ claim_zero(mlx4_glue->destroy_rwq_ind_table(ind));
+ if (wq)
+ claim_zero(mlx4_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx4_glue->destroy_cq(cq));
+ return hw_rss_sup;
+}
+
static struct rte_pci_driver mlx4_driver;
/**
@@ -470,14 +575,14 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
if (mlx4_glue->query_device(attr_ctx, &device_attr)) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto error;
}
INFO("%u port(s) detected", device_attr.phys_port_cnt);
conf.ports.present |= (UINT64_C(1) << device_attr.phys_port_cnt) - 1;
if (mlx4_args(pci_dev->device.devargs, &conf)) {
ERROR("failed to process device arguments");
- rte_errno = EINVAL;
+ err = EINVAL;
goto error;
}
/* Use all ports when none are defined */
@@ -485,7 +590,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
conf.ports.enabled = conf.ports.present;
/* Retrieve extended device attributes. */
if (mlx4_glue->query_device_ex(attr_ctx, NULL, &device_attr_ex)) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto error;
}
assert(device_attr.max_sge >= MLX4_MAX_SGE);
@@ -504,18 +609,18 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u", port);
ctx = mlx4_glue->open_device(ibv_dev);
if (ctx == NULL) {
- rte_errno = ENODEV;
+ err = ENODEV;
goto port_error;
}
/* Check port status. */
err = mlx4_glue->query_port(ctx, port, &port_attr);
if (err) {
- rte_errno = err;
- ERROR("port query failed: %s", strerror(rte_errno));
+ err = ENODEV;
+ ERROR("port query failed: %s", strerror(err));
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- rte_errno = ENOTSUP;
+ err = ENOTSUP;
ERROR("port %d is not configured in Ethernet mode",
port);
goto port_error;
@@ -525,15 +630,16 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
port, mlx4_glue->port_state_str(port_attr.state),
port_attr.state);
/* Make asynchronous FD non-blocking to handle interrupts. */
- if (mlx4_fd_set_non_blocking(ctx->async_fd) < 0) {
+ err = mlx4_fd_set_non_blocking(ctx->async_fd);
+ if (err) {
ERROR("cannot make asynchronous FD non-blocking: %s",
- strerror(rte_errno));
+ strerror(err));
goto port_error;
}
/* Allocate protection domain. */
pd = mlx4_glue->alloc_pd(ctx);
if (pd == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("PD allocation failure");
goto port_error;
}
@@ -542,7 +648,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sizeof(*priv),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- rte_errno = ENOMEM;
+ err = ENOMEM;
ERROR("priv allocation failure");
goto port_error;
}
@@ -562,26 +668,32 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
(device_attr.vendor_part_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO);
DEBUG("L2 tunnel checksum offloads are %ssupported",
- (priv->hw_csum_l2tun ? "" : "not "));
- priv->hw_rss_sup = device_attr_ex.rss_caps.rx_hash_fields_mask;
- if (!priv->hw_rss_sup) {
- WARN("no RSS capabilities reported; disabling support"
- " for UDP RSS and inner VXLAN RSS");
- /* Fake support for all possible RSS hash fields. */
- priv->hw_rss_sup = ~UINT64_C(0);
- priv->hw_rss_sup = mlx4_conv_rss_hf(priv, -1);
- /* Filter out known unsupported fields. */
- priv->hw_rss_sup &=
- ~(uint64_t)(IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP |
- IBV_RX_HASH_INNER);
- }
+ priv->hw_csum_l2tun ? "" : "not ");
+ priv->hw_rss_sup = mlx4_hw_rss_sup(priv->ctx, priv->pd,
+ &device_attr_ex);
DEBUG("supported RSS hash fields mask: %016" PRIx64,
priv->hw_rss_sup);
+ priv->hw_rss_max_qps =
+ device_attr_ex.rss_caps.max_rwq_indirection_table_size;
+ DEBUG("MAX RSS queues %d", priv->hw_rss_max_qps);
+ priv->hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DEBUG("FCS stripping toggling is %ssupported",
+ priv->hw_fcs_strip ? "" : "not ");
+ priv->tso =
+ ((device_attr_ex.tso_caps.max_tso > 0) &&
+ (device_attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (priv->tso)
+ priv->tso_max_payload_sz =
+ device_attr_ex.tso_caps.max_tso;
+ DEBUG("TSO is %ssupported",
+ priv->tso ? "" : "not ");
/* Configure the first MAC address by default. */
- if (mlx4_get_mac(priv, &mac.addr_bytes)) {
+ err = mlx4_get_mac(priv, &mac.addr_bytes);
+ if (err) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
- " (rte_errno: %s)", strerror(rte_errno));
+ " (error: %s)", strerror(err));
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
@@ -614,8 +726,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
+ err = ENOMEM;
ERROR("can not allocate rte ethdev");
- rte_errno = ENOMEM;
goto port_error;
}
eth_dev->data->dev_private = priv;
@@ -649,6 +761,24 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
/* Update link status once if waiting for LSC. */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
mlx4_link_update(eth_dev, 0);
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx4_mr_btree_init(&priv->mr.cache,
+ MLX4_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ /* rte_errno is already set. */
+ goto port_error;
+ }
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx4_mem_event_cb_list, priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+ rte_eth_dev_probing_finish(eth_dev);
continue;
port_error:
rte_free(priv);
@@ -660,8 +790,6 @@ port_error:
rte_eth_dev_release_port(eth_dev);
break;
}
- if (i == device_attr.phys_port_cnt)
- return 0;
/*
* XXX if something went wrong in the loop above, there is a resource
* leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
@@ -673,8 +801,9 @@ error:
claim_zero(mlx4_glue->close_device(attr_ctx));
if (list)
mlx4_glue->free_device_list(list);
- assert(rte_errno >= 0);
- return -rte_errno;
+ if (err)
+ rte_errno = err;
+ return -err;
}
static const struct rte_pci_id mlx4_pci_id_map[] = {
@@ -708,11 +837,53 @@ static struct rte_pci_driver mlx4_driver = {
#ifdef RTE_LIBRTE_MLX4_DLOPEN_DEPS
/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx4_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ ERROR("unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
* Initialization routine for run-time dependency on rdma-core.
*/
static int
mlx4_glue_init(void)
{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
const char *path[] = {
/*
* A basic security check is necessary before trusting
@@ -720,7 +891,13 @@ mlx4_glue_init(void)
*/
(geteuid() == getuid() && getegid() == getgid() ?
getenv("MLX4_GLUE_PATH") : NULL),
- RTE_EAL_PMD_PATH,
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx4_glue_path(glue_path, sizeof(glue_path)) : ""),
};
unsigned int i = 0;
void *handle = NULL;
@@ -790,9 +967,7 @@ glue_error:
/**
* Driver initialization routine.
*/
-RTE_INIT(rte_mlx4_pmd_init);
-static void
-rte_mlx4_pmd_init(void)
+RTE_INIT(rte_mlx4_pmd_init)
{
/*
* MLX4_DEVICE_FATAL_CLEANUP tells ibv_destroy functions we
@@ -828,6 +1003,8 @@ rte_mlx4_pmd_init(void)
}
mlx4_glue->fork_init();
rte_pci_register(&mlx4_driver);
+ rte_mem_event_callback_register("MLX4_MEM_EVENT_CB",
+ mlx4_mr_mem_event_cb, NULL);
}
RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 19c8a223..e6fb934f 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2012 6WIND S.A.
- * Copyright 2012 Mellanox
+ * Copyright 2012 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX4_H_
@@ -23,7 +23,9 @@
#include <rte_ether.h>
#include <rte_interrupts.h>
#include <rte_mempool.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
+
+#include "mlx4_mr.h"
#ifndef IBV_RX_HASH_INNER
/** This is not necessarily defined by supported RDMA core versions. */
@@ -42,20 +44,12 @@
/** Fixed RSS hash key size in bytes. Cannot be modified. */
#define MLX4_RSS_HASH_KEY_SIZE 40
-/**
- * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
- * from which buffers are to be transmitted will have to be mapped by this
- * driver to their own Memory Region (MR). This is a slow operation.
- *
- * This value is always 1 for RX queues.
- */
-#ifndef MLX4_PMD_TX_MP_CACHE
-#define MLX4_PMD_TX_MP_CACHE 8
-#endif
-
/** Interrupt alarm timeout value in microseconds. */
#define MLX4_INTR_ALARM_TIMEOUT 100000
+/* Maximum packet headers size (L2+L3+L4) for TSO. */
+#define MLX4_MAX_TSO_HEADER 192
+
/** Port parameter. */
#define MLX4_PMD_PORT_KVARG "port"
@@ -78,20 +72,12 @@ struct rxq;
struct txq;
struct rte_flow;
-/** Memory region descriptor. */
-struct mlx4_mr {
- LIST_ENTRY(mlx4_mr) next; /**< Next entry in list. */
- uintptr_t start; /**< Base address for memory region. */
- uintptr_t end; /**< End address for memory region. */
- uint32_t lkey; /**< L_Key extracted from @p mr. */
- uint32_t refcnt; /**< Reference count for this object. */
- struct priv *priv; /**< Back pointer to private data. */
- struct ibv_mr *mr; /**< Memory region associated with @p mp. */
- struct rte_mempool *mp; /**< Target memory pool (mempool). */
-};
+LIST_HEAD(mlx4_dev_list, priv);
+LIST_HEAD(mlx4_mr_list, mlx4_mr);
/** Private data structure. */
struct priv {
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
struct rte_eth_dev *dev; /**< Ethernet device. */
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
@@ -103,15 +89,25 @@ struct priv {
uint32_t vf:1; /**< This is a VF device. */
uint32_t intr_alarm:1; /**< An interrupt alarm is scheduled. */
uint32_t isolated:1; /**< Toggle isolated mode. */
+ uint32_t rss_init:1; /**< Common RSS context is initialized. */
uint32_t hw_csum:1; /**< Checksum offload is supported. */
uint32_t hw_csum_l2tun:1; /**< Checksum support for L2 tunnels. */
+ uint32_t hw_fcs_strip:1; /**< FCS stripping toggling is supported. */
+ uint32_t tso:1; /**< Transmit segmentation offload is supported. */
+ uint32_t tso_max_payload_sz; /**< Max supported TSO payload size. */
+ uint32_t hw_rss_max_qps; /**< Max Rx Queues supported by RSS. */
uint64_t hw_rss_sup; /**< Supported RSS hash fields (Verbs format). */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx4_mr_btree cache; /* Global MR cache table. */
+ struct mlx4_mr_list mr_list; /* Registered MR list. */
+ struct mlx4_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
LIST_HEAD(, mlx4_rss) rss; /**< Shared targets for Rx flow rules. */
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
- LIST_HEAD(, mlx4_mr) mr; /**< Registered memory regions. */
- rte_spinlock_t mr_lock; /**< Lock for @p mr access. */
struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
/**< Configured MAC addresses. Unused entries are zeroed. */
};
@@ -131,7 +127,7 @@ void mlx4_allmulticast_disable(struct rte_eth_dev *dev);
void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq);
-void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
int mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx4_stats_reset(struct rte_eth_dev *dev);
@@ -154,11 +150,4 @@ void mlx4_rxq_intr_disable(struct priv *priv);
int mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
int mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
-/* mlx4_mr.c */
-
-struct mlx4_mr *mlx4_mr_get(struct priv *priv, struct rte_mempool *mp);
-void mlx4_mr_put(struct mlx4_mr *mr);
-uint32_t mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp,
- uint32_t i);
-
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 3bc69273..30deb3ef 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -39,6 +39,7 @@
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_pci.h>
+#include <rte_string_fns.h>
#include "mlx4.h"
#include "mlx4_flow.h"
@@ -120,7 +121,7 @@ try_dev_id:
goto try_dev_id;
dev_port_prev = dev_port;
if (dev_port == (priv->port - 1u))
- snprintf(match, sizeof(match), "%s", name);
+ strlcpy(match, name, sizeof(match));
}
closedir(dir);
if (match[0] == '\0') {
@@ -132,167 +133,6 @@ try_dev_id:
}
/**
- * Read from sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[out] buf
- * Data output buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * Number of bytes read on success, negative errno value otherwise and
- * rte_errno is set.
- */
-static int
-mlx4_sysfs_read(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
-
- ret = mlx4_get_ifname(priv, &ifname);
- if (ret)
- return ret;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
- if (file == NULL) {
- rte_errno = errno;
- return -rte_errno;
- }
- ret = fread(buf, 1, size, file);
- if ((size_t)ret < size && ferror(file)) {
- rte_errno = EIO;
- ret = -rte_errno;
- } else {
- ret = size;
- }
- fclose(file);
- return ret;
-}
-
-/**
- * Write to sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[in] buf
- * Data buffer.
- * @param size
- * Buffer size.
- *
- * @return
- * Number of bytes written on success, negative errno value otherwise and
- * rte_errno is set.
- */
-static int
-mlx4_sysfs_write(const struct priv *priv, const char *entry,
- char *buf, size_t size)
-{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
-
- ret = mlx4_get_ifname(priv, &ifname);
- if (ret)
- return ret;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "wb");
- if (file == NULL) {
- rte_errno = errno;
- return -rte_errno;
- }
- ret = fwrite(buf, 1, size, file);
- if ((size_t)ret < size || ferror(file)) {
- rte_errno = EIO;
- ret = -rte_errno;
- } else {
- ret = size;
- }
- fclose(file);
- return ret;
-}
-
-/**
- * Get unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param[out] value
- * Value output buffer.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
-{
- int ret;
- unsigned long value_ret;
- char value_str[32];
-
- ret = mlx4_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret < 0) {
- DEBUG("cannot read %s value from sysfs: %s",
- name, strerror(rte_errno));
- return ret;
- }
- value_str[ret] = '\0';
- errno = 0;
- value_ret = strtoul(value_str, NULL, 0);
- if (errno) {
- rte_errno = errno;
- DEBUG("invalid %s value `%s': %s", name, value_str,
- strerror(rte_errno));
- return -rte_errno;
- }
- *value = value_ret;
- return 0;
-}
-
-/**
- * Set unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param value
- * Value to set.
- *
- * @return
- * 0 on success, negative errno value otherwise and rte_errno is set.
- */
-static int
-mlx4_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
-{
- int ret;
- MKSTR(value_str, "%lu", value);
-
- ret = mlx4_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret < 0) {
- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
- name, value_str, value, strerror(rte_errno));
- return ret;
- }
- return 0;
-}
-
-/**
* Perform ifreq ioctl() on associated Ethernet device.
*
* @param[in] priv
@@ -361,12 +201,12 @@ mlx4_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
int
mlx4_mtu_get(struct priv *priv, uint16_t *mtu)
{
- unsigned long ulong_mtu = 0;
- int ret = mlx4_get_sysfs_ulong(priv, "mtu", &ulong_mtu);
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFMTU, &request);
if (ret)
return ret;
- *mtu = ulong_mtu;
+ *mtu = request.ifr_mtu;
return 0;
}
@@ -385,20 +225,13 @@ int
mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
- uint16_t new_mtu;
- int ret = mlx4_set_sysfs_ulong(priv, "mtu", mtu);
+ struct ifreq request = { .ifr_mtu = mtu, };
+ int ret = mlx4_ifreq(priv, SIOCSIFMTU, &request);
if (ret)
return ret;
- ret = mlx4_mtu_get(priv, &new_mtu);
- if (ret)
- return ret;
- if (new_mtu == mtu) {
- priv->mtu = mtu;
- return 0;
- }
- rte_errno = EINVAL;
- return -rte_errno;
+ priv->mtu = mtu;
+ return 0;
}
/**
@@ -417,14 +250,14 @@ mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
static int
mlx4_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
{
- unsigned long tmp = 0;
- int ret = mlx4_get_sysfs_ulong(priv, "flags", &tmp);
+ struct ifreq request;
+ int ret = mlx4_ifreq(priv, SIOCGIFFLAGS, &request);
if (ret)
return ret;
- tmp &= keep;
- tmp |= (flags & (~keep));
- return mlx4_set_sysfs_ulong(priv, "flags", tmp);
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx4_ifreq(priv, SIOCSIFFLAGS, &request);
}
/**
@@ -701,11 +534,14 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
*/
-void
+int
mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
- mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+ return mlx4_mac_addr_add(dev, mac_addr, 0, 0);
}
/**
@@ -723,7 +559,6 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -752,6 +587,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
ETH_LINK_SPEED_20G |
ETH_LINK_SPEED_40G |
ETH_LINK_SPEED_56G;
+ info->flow_type_rss_offloads = mlx4_conv_rss_types(priv, 0, 1);
}
/**
@@ -878,7 +714,7 @@ mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = 0;
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
else
dev_link.link_speed = link_speed;
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
index 2d55bfe0..b40e7e5c 100644
--- a/drivers/net/mlx4/mlx4_flow.c
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -76,69 +76,94 @@ struct mlx4_drop {
};
/**
- * Convert DPDK RSS hash fields to their Verbs equivalent.
+ * Convert supported RSS hash field types between DPDK and Verbs formats.
*
- * This function returns the supported (default) set when @p rss_hf has
- * special value (uint64_t)-1.
+ * This function returns the supported (default) set when @p types has
+ * special value 0.
*
* @param priv
* Pointer to private structure.
- * @param rss_hf
- * Hash fields in DPDK format (see struct rte_eth_rss_conf).
+ * @param types
+ * Depending on @p verbs_to_dpdk, hash types in either DPDK (see struct
+ * rte_eth_rss_conf) or Verbs format.
+ * @param verbs_to_dpdk
+ * A zero value converts @p types from DPDK to Verbs, a nonzero value
+ * performs the reverse operation.
*
* @return
- * A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
- * otherwise and rte_errno is set.
+ * Converted RSS hash fields on success, (uint64_t)-1 otherwise and
+ * rte_errno is set.
*/
uint64_t
-mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf)
+mlx4_conv_rss_types(struct priv *priv, uint64_t types, int verbs_to_dpdk)
{
- enum { IPV4, IPV6, TCP, UDP, };
- const uint64_t in[] = {
- [IPV4] = (ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV4_OTHER),
- [IPV6] = (ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX |
- ETH_RSS_IPV6_TCP_EX |
- ETH_RSS_IPV6_UDP_EX),
- [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
- ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX),
- [UDP] = (ETH_RSS_NONFRAG_IPV4_UDP |
- ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX),
+ enum {
+ INNER,
+ IPV4, IPV4_1, IPV4_2, IPV6, IPV6_1, IPV6_2, IPV6_3,
+ TCP, UDP,
+ IPV4_TCP, IPV4_UDP, IPV6_TCP, IPV6_TCP_1, IPV6_UDP, IPV6_UDP_1,
};
- const uint64_t out[RTE_DIM(in)] = {
- [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
- [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
- [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
- [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
+ enum {
+ VERBS_IPV4 = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
+ VERBS_IPV6 = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
+ VERBS_TCP = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
+ VERBS_UDP = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
};
+ static const uint64_t dpdk[] = {
+ [INNER] = 0,
+ [IPV4] = ETH_RSS_IPV4,
+ [IPV4_1] = ETH_RSS_FRAG_IPV4,
+ [IPV4_2] = ETH_RSS_NONFRAG_IPV4_OTHER,
+ [IPV6] = ETH_RSS_IPV6,
+ [IPV6_1] = ETH_RSS_FRAG_IPV6,
+ [IPV6_2] = ETH_RSS_NONFRAG_IPV6_OTHER,
+ [IPV6_3] = ETH_RSS_IPV6_EX,
+ [TCP] = 0,
+ [UDP] = 0,
+ [IPV4_TCP] = ETH_RSS_NONFRAG_IPV4_TCP,
+ [IPV4_UDP] = ETH_RSS_NONFRAG_IPV4_UDP,
+ [IPV6_TCP] = ETH_RSS_NONFRAG_IPV6_TCP,
+ [IPV6_TCP_1] = ETH_RSS_IPV6_TCP_EX,
+ [IPV6_UDP] = ETH_RSS_NONFRAG_IPV6_UDP,
+ [IPV6_UDP_1] = ETH_RSS_IPV6_UDP_EX,
+ };
+ static const uint64_t verbs[RTE_DIM(dpdk)] = {
+ [INNER] = IBV_RX_HASH_INNER,
+ [IPV4] = VERBS_IPV4,
+ [IPV4_1] = VERBS_IPV4,
+ [IPV4_2] = VERBS_IPV4,
+ [IPV6] = VERBS_IPV6,
+ [IPV6_1] = VERBS_IPV6,
+ [IPV6_2] = VERBS_IPV6,
+ [IPV6_3] = VERBS_IPV6,
+ [TCP] = VERBS_TCP,
+ [UDP] = VERBS_UDP,
+ [IPV4_TCP] = VERBS_IPV4 | VERBS_TCP,
+ [IPV4_UDP] = VERBS_IPV4 | VERBS_UDP,
+ [IPV6_TCP] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_TCP_1] = VERBS_IPV6 | VERBS_TCP,
+ [IPV6_UDP] = VERBS_IPV6 | VERBS_UDP,
+ [IPV6_UDP_1] = VERBS_IPV6 | VERBS_UDP,
+ };
+ const uint64_t *in = verbs_to_dpdk ? verbs : dpdk;
+ const uint64_t *out = verbs_to_dpdk ? dpdk : verbs;
uint64_t seen = 0;
uint64_t conv = 0;
unsigned int i;
- for (i = 0; i != RTE_DIM(in); ++i)
- if (rss_hf & in[i]) {
- seen |= rss_hf & in[i];
+ if (!types) {
+ if (!verbs_to_dpdk)
+ return priv->hw_rss_sup;
+ types = priv->hw_rss_sup;
+ }
+ for (i = 0; i != RTE_DIM(dpdk); ++i)
+ if (in[i] && (types & in[i]) == in[i]) {
+ seen |= types & in[i];
conv |= out[i];
}
- if ((conv & priv->hw_rss_sup) == conv) {
- if (rss_hf == (uint64_t)-1) {
- /* Include inner RSS by default if supported. */
- conv |= priv->hw_rss_sup & IBV_RX_HASH_INNER;
- return conv;
- }
- if (!(rss_hf & ~seen))
- return conv;
- }
+ if ((verbs_to_dpdk || (conv & priv->hw_rss_sup) == conv) &&
+ !(types & ~seen))
+ return conv;
rte_errno = ENOTSUP;
return (uint64_t)-1;
}
@@ -362,6 +387,9 @@ error:
* Additional mlx4-specific constraints on supported fields:
*
* - No support for partial masks.
+ * - Due to HW/FW limitation, flow rule priority is not taken into account
+ * when matching UDP destination ports, doing is therefore only supported
+ * at the highest priority level (0).
*
* @param[in, out] flow
* Flow rule handle to update.
@@ -393,6 +421,11 @@ mlx4_flow_merge_udp(struct rte_flow *flow,
msg = "mlx4 does not support matching partial UDP fields";
goto error;
}
+ if (mask && mask->hdr.dst_port && flow->priority) {
+ msg = "combining UDP destination port matching with a nonzero"
+ " priority level is not supported";
+ goto error;
+ }
if (!flow->ibv_attr)
return 0;
++flow->ibv_attr->num_of_specs;
@@ -637,6 +670,7 @@ mlx4_flow_prepare(struct priv *priv,
struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
struct rte_flow *flow = &temp;
const char *msg = NULL;
+ int overlap;
if (attr->group)
return rte_flow_error_set
@@ -651,12 +685,18 @@ mlx4_flow_prepare(struct priv *priv,
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
NULL, "egress is not supported");
+ if (attr->transfer)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
if (!attr->ingress)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
NULL, "only ingress is supported");
fill:
+ overlap = 0;
proc = mlx4_flow_proc_item_list;
+ flow->priority = attr->priority;
/* Go over pattern. */
for (item = pattern; item->type; ++item) {
const struct mlx4_flow_proc_item *next = NULL;
@@ -702,14 +742,24 @@ fill:
}
/* Go over actions list. */
for (action = actions; action->type; ++action) {
+ /* This one may appear anywhere multiple times. */
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+ /* Fate-deciding actions may appear exactly once. */
+ if (overlap) {
+ msg = "cannot combine several fate-deciding actions,"
+ " choose between DROP, QUEUE or RSS";
+ goto exit_action_not_supported;
+ }
+ overlap = 1;
switch (action->type) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
- const struct rte_eth_rss_conf *rss_conf;
+ const uint8_t *rss_key;
+ uint32_t rss_key_len;
+ uint64_t fields;
unsigned int i;
- case RTE_FLOW_ACTION_TYPE_VOID:
- continue;
case RTE_FLOW_ACTION_TYPE_DROP:
flow->drop = 1;
break;
@@ -736,54 +786,68 @@ fill:
break;
rss = action->conf;
/* Default RSS configuration if none is provided. */
- rss_conf =
- rss->rss_conf ?
- rss->rss_conf :
- &(struct rte_eth_rss_conf){
- .rss_key = mlx4_rss_hash_key_default,
- .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
- .rss_hf = -1,
- };
+ if (rss->key_len) {
+ rss_key = rss->key;
+ rss_key_len = rss->key_len;
+ } else {
+ rss_key = mlx4_rss_hash_key_default;
+ rss_key_len = MLX4_RSS_HASH_KEY_SIZE;
+ }
/* Sanity checks. */
- for (i = 0; i < rss->num; ++i)
+ for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
priv->dev->data->nb_rx_queues)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "queue index target beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
}
- if (!rte_is_power_of_2(rss->num)) {
+ if (!rte_is_power_of_2(rss->queue_num)) {
msg = "for RSS, mlx4 requires the number of"
" queues to be a power of two";
goto exit_action_not_supported;
}
- if (rss_conf->rss_key_len !=
- sizeof(flow->rss->key)) {
+ if (rss_key_len != sizeof(flow->rss->key)) {
msg = "mlx4 supports exactly one RSS hash key"
" length: "
MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
goto exit_action_not_supported;
}
- for (i = 1; i < rss->num; ++i)
+ for (i = 1; i < rss->queue_num; ++i)
if (rss->queue[i] - rss->queue[i - 1] != 1)
break;
- if (i != rss->num) {
+ if (i != rss->queue_num) {
msg = "mlx4 requires RSS contexts to use"
" consecutive queue indices only";
goto exit_action_not_supported;
}
- if (rss->queue[0] % rss->num) {
+ if (rss->queue[0] % rss->queue_num) {
msg = "mlx4 requires the first queue of a RSS"
" context to be aligned on a multiple"
" of the context size";
goto exit_action_not_supported;
}
+ if (rss->func &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
+ msg = "the only supported RSS hash function"
+ " is Toeplitz";
+ goto exit_action_not_supported;
+ }
+ if (rss->level) {
+ msg = "a nonzero RSS encapsulation level is"
+ " not supported";
+ goto exit_action_not_supported;
+ }
+ rte_errno = 0;
+ fields = mlx4_conv_rss_types(priv, rss->types, 0);
+ if (fields == (uint64_t)-1 && rte_errno) {
+ msg = "unsupported RSS hash type requested";
+ goto exit_action_not_supported;
+ }
flow->rss = mlx4_rss_get
- (priv,
- mlx4_conv_rss_hf(priv, rss_conf->rss_hf),
- rss_conf->rss_key, rss->num, rss->queue);
+ (priv, fields, rss_key, rss->queue_num,
+ rss->queue);
if (!flow->rss) {
msg = "either invalid parameters or not enough"
" resources for additional multi-queue"
@@ -795,10 +859,9 @@ fill:
goto exit_action_not_supported;
}
}
- if (!flow->rss && !flow->drop)
- return rte_flow_error_set
- (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "no valid action");
+ /* When fate is unknown, drop traffic. */
+ if (!overlap)
+ flow->drop = 1;
/* Validation ends here. */
if (!addr) {
if (flow->rss)
@@ -820,11 +883,14 @@ fill:
},
};
- if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
+ if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec))) {
+ if (temp.rss)
+ mlx4_rss_put(temp.rss);
return rte_flow_error_set
(error, -rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"flow rule handle allocation failure");
+ }
/* Most fields will be updated by second pass. */
*flow = (struct rte_flow){
.ibv_attr = temp.ibv_attr,
@@ -1264,14 +1330,20 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
*/
uint32_t queues =
rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
- alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
- [offsetof(struct rte_flow_action_rss, queue) +
- sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
- struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
+ uint16_t queue[queues];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = 0,
+ .key_len = MLX4_RSS_HASH_KEY_SIZE,
+ .queue_num = queues,
+ .key = mlx4_rss_hash_key_default,
+ .queue = queue,
+ };
struct rte_flow_action actions[] = {
{
.type = RTE_FLOW_ACTION_TYPE_RSS,
- .conf = rss_conf,
+ .conf = &action_rss,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
@@ -1293,12 +1365,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
if (!queues)
goto error;
/* Prepare default RSS configuration. */
- *rss_conf = (struct rte_flow_action_rss){
- .rss_conf = NULL, /* Rely on default fallback settings. */
- .num = queues,
- };
for (i = 0; i != queues; ++i)
- rss_conf->queue[i] = i;
+ queue[i] = i;
/*
* Set up VLAN item if filtering is enabled and at least one VLAN
* filter is configured.
@@ -1357,7 +1425,7 @@ next_vlan:
if (j != sizeof(mac->addr_bytes))
continue;
if (flow->rss->queues != queues ||
- memcmp(flow->rss->queue_id, rss_conf->queue,
+ memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
continue;
break;
@@ -1397,7 +1465,7 @@ next_vlan:
if (flow && flow->internal) {
assert(flow->rss);
if (flow->rss->queues != queues ||
- memcmp(flow->rss->queue_id, rss_conf->queue,
+ memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
flow = NULL;
}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
index 00188a65..2917ebe9 100644
--- a/drivers/net/mlx4/mlx4_flow.h
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX4_FLOW_H_
@@ -42,12 +42,14 @@ struct rte_flow {
uint32_t promisc:1; /**< This rule matches everything. */
uint32_t allmulti:1; /**< This rule matches all multicast traffic. */
uint32_t drop:1; /**< This rule drops packets. */
+ uint32_t priority; /**< Flow rule priority. */
struct mlx4_rss *rss; /**< Rx target. */
};
/* mlx4_flow.c */
-uint64_t mlx4_conv_rss_hf(struct priv *priv, uint64_t rss_hf);
+uint64_t mlx4_conv_rss_types(struct priv *priv, uint64_t types,
+ int verbs_to_dpdk);
int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
diff --git a/drivers/net/mlx4/mlx4_glue.c b/drivers/net/mlx4/mlx4_glue.c
index 3b79d320..67b3bfac 100644
--- a/drivers/net/mlx4/mlx4_glue.c
+++ b/drivers/net/mlx4/mlx4_glue.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#include <stddef.h>
diff --git a/drivers/net/mlx4/mlx4_glue.h b/drivers/net/mlx4/mlx4_glue.h
index 368f906b..668ca867 100644
--- a/drivers/net/mlx4/mlx4_glue.h
+++ b/drivers/net/mlx4/mlx4_glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#ifndef MLX4_GLUE_H_
diff --git a/drivers/net/mlx4/mlx4_intr.c b/drivers/net/mlx4/mlx4_intr.c
index 2141992e..eeb982a0 100644
--- a/drivers/net/mlx4/mlx4_intr.c
+++ b/drivers/net/mlx4/mlx4_intr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c
index 9a1e4de3..d23d3c61 100644
--- a/drivers/net/mlx4/mlx4_mr.c
+++ b/drivers/net/mlx4/mlx4_mr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -30,237 +30,1152 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_mempool.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
#include "mlx4_glue.h"
+#include "mlx4_mr.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
-struct mlx4_check_mempool_data {
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx4_mr_ctrl *mr_ctrl;
int ret;
- char *start;
- char *end;
};
/**
- * Called by mlx4_check_mempool() when iterating the memory chunks.
- *
- * @param[in] mp
- * Pointer to memory pool (unused).
- * @param[in, out] data
- * Pointer to shared buffer with mlx4_check_mempool().
- * @param[in] memhdr
- * Pointer to mempool chunk header.
- * @param mem_idx
- * Mempool element index (unused).
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
*/
-static void
-mlx4_check_mempool_cb(struct rte_mempool *mp, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx)
+static int
+mr_btree_expand(struct mlx4_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx4_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ WARN("failed to expand MR B-tree (%p) table", (void *)bt);
+ ret = -1;
+ } else {
+ DEBUG("expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx4_mr_btree *bt, uint16_t *idx, uintptr_t addr)
+{
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
+
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx4_mr_btree *bt, struct mlx4_mr_cache *entry)
{
- struct mlx4_check_mempool_data *data = opaque;
+ struct mlx4_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
- (void)mp;
- (void)mem_idx;
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DEBUG("abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
+ }
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx4_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DEBUG("inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx4_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ ERROR("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx4_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_free(struct mlx4_mr_btree *bt)
+{
+ if (bt == NULL)
return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
+ DEBUG("freeing B-tree %p with table %p", (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx4_mr_btree_dump(struct mlx4_mr_btree *bt)
+{
+ int idx;
+ struct mlx4_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx4_mr_cache *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
+}
+#endif
+
+/**
+ * Find virtually contiguous memory chunk in a given MR.
+ *
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
+ *
+ * @return
+ * Next index to go on lookup.
+ */
+static int
+mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry,
+ int base_idx)
+{
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
+
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
}
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
- return;
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
}
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
+ return idx;
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx4_mr_create() on miss.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area.
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * 0 on success, -1 on failure.
*/
static int
-mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, uintptr_t *end)
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_check_mempool_data data;
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
- return data.ret;
+ DEBUG("port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
}
/**
- * Obtain a memory region from a memory pool.
+ * Look up address in the original global MR list.
*
- * If a matching memory region already exists, it is returned with its
- * reference count incremented, otherwise a new one is registered.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to memory pool.
+ * @return
+ * Found MR on match, NULL otherwise.
+ */
+static struct mlx4_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Look up address on device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * Memory region pointer, NULL in case of error and rte_errno is set.
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-struct mlx4_mr *
-mlx4_mr_get(struct priv *priv, struct rte_mempool *mp)
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
struct mlx4_mr *mr;
- if (mlx4_check_mempool(mp, &start, &end) != 0) {
- rte_errno = EINVAL;
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
+}
+
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx4_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DEBUG("freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx4_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
+
+/**
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
+ struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+
+ /*
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
+ */
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- rte_spinlock_lock(&priv->mr_lock);
- LIST_FOREACH(mr, &priv->mr, next)
- if (mp == mr->mp && start >= mr->start && end <= mr->end)
- break;
- if (mr) {
- ++mr->refcnt;
- goto release;
+}
+
+/* Called during rte_memseg_contig_walk() by mlx4_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
+}
+
+/**
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
+ */
+static uint32_t
+mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
+ uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx4_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
+ };
+ struct mr_find_contig_memsegs_data data_re;
+
+ DEBUG("port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx4_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
}
- mr = rte_malloc(__func__, sizeof(*mr), 0);
- if (!mr) {
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ WARN("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
rte_errno = ENOMEM;
- goto release;
+ goto err_nolock;
}
- *mr = (struct mlx4_mr){
- .start = start,
- .end = end,
- .refcnt = 1,
- .priv = priv,
- .mr = mlx4_glue->reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE),
- .mp = mp,
- };
- if (mr->mr) {
- mr->lkey = mr->mr->lkey;
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- } else {
- rte_free(mr);
- mr = NULL;
- rte_errno = errno ? errno : EINVAL;
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ WARN("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ WARN("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
}
-release:
- rte_spinlock_unlock(&priv->mr_lock);
- return mr;
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx4_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
+ }
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx4_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx4_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx4_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ WARN("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx4_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
}
/**
- * Release a memory region.
+ * Rebuild the global B-tree cache of device from the original MR list.
*
- * This function decrements its reference count and destroys it after
- * reaching 0.
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr;
+
+ DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx4_mr_garbage_collect().
*
- * Note to avoid race conditions given this function may be used from the
- * data plane, it's extremely important that each user holds its own
- * reference.
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
*
- * @param mr
- * Memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx4_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx4_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
+ }
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
+ }
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+#ifndef NDEBUG
+ if (rebuild)
+ mlx4_mr_dump_dev(dev);
+#endif
+}
+
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
*/
void
-mlx4_mr_put(struct mlx4_mr *mr)
+mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
{
- struct priv *priv = mr->priv;
-
- rte_spinlock_lock(&priv->mr_lock);
- assert(mr->refcnt);
- if (--mr->refcnt)
- goto release;
- LIST_REMOVE(mr, next);
- claim_zero(mlx4_glue->dereg_mr(mr->mr));
- rte_free(mr);
-release:
- rte_spinlock_unlock(&priv->mr_lock);
+ struct priv *priv;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
+ /* Iterate all the existing mlx4 devices. */
+ LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
+ mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
}
/**
- * Add memory region (MR) <-> memory pool (MP) association to txq->mp2mr[].
- * If mp2mr[] is full, remove an entry first.
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct mlx4_mr_cache *entry, uintptr_t addr)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
+
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
+ }
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx4_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx4_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
+{
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx4_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx4_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
+ }
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX4_MR_CACHE_N;
+ return lkey;
+}
+
+/**
+ * Bottom-half of LKey search on Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq->priv;
+
+ DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
*
* @param txq
* Pointer to Tx queue structure.
- * @param[in] mp
- * Memory pool for which a memory region lkey must be added.
- * @param[in] i
- * Index in memory pool (MP) where to add memory region (MR).
+ * @param addr
+ * Search key.
*
* @return
- * Added mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
uint32_t
-mlx4_txq_add_mr(struct txq *txq, struct rte_mempool *mp, uint32_t i)
+mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq->priv;
+
+ DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DEBUG("mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */
+static void
+mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx4_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
+}
+
+/**
+ * Register entire memory chunks in a Mempool.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+int
+mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
+{
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+#ifndef NDEBUG
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_dump_dev(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
+
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx4_mr_cache ret = { 0, };
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq, mp->name, (void *)mp);
- mr = mlx4_mr_get(txq->priv, mp);
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, mlx4_mr_get() failed",
- (void *)txq);
- return (uint32_t)-1;
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
}
- if (unlikely(i == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq);
- --i;
- mlx4_mr_put(txq->mp2mr[0].mr);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
+ DEBUG("port %u dumping global cache", dev->data->port_id);
+ mlx4_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+}
+#endif
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx4_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx4_mem_event_rwlock);
+#ifndef NDEBUG
+ mlx4_mr_dump_dev(dev);
+#endif
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx4_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
}
- /* Store the new entry. */
- txq->mp2mr[i].mp = mp;
- txq->mp2mr[i].mr = mr;
- txq->mp2mr[i].lkey = mr->lkey;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey);
- return txq->mp2mr[i].lkey;
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx4_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx4_mr_garbage_collect(dev);
}
diff --git a/drivers/net/mlx4/mlx4_mr.h b/drivers/net/mlx4/mlx4_mr.h
new file mode 100644
index 00000000..37a365a8
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_mr.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX4_MR_H_
+#define RTE_PMD_MLX4_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs headers do not support -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Size of per-queue MR cache array for linear search. */
+#define MLX4_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX4_MR_BTREE_CACHE_N 256
+
+/* Memory Region object. */
+struct mlx4_mr {
+ LIST_ENTRY(mlx4_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx4_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx4_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx4_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx4_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx4_mr_cache cache[MLX4_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx4_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx4_dev_list mlx4_mem_event_cb_list;
+extern rte_rwlock_t mlx4_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx4_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx4_mr_btree_init(struct mlx4_mr_btree *bt, int n, int socket);
+void mlx4_mr_btree_free(struct mlx4_mr_btree *bt);
+void mlx4_mr_btree_dump(struct mlx4_mr_btree *bt);
+void mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx4_mr_dump_dev(struct rte_eth_dev *dev);
+void mlx4_mr_release(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_mr_lookup_cache(struct mlx4_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX4_MR_H_ */
diff --git a/drivers/net/mlx4/mlx4_prm.h b/drivers/net/mlx4/mlx4_prm.h
index 153dda52..aef77ba0 100644
--- a/drivers/net/mlx4/mlx4_prm.h
+++ b/drivers/net/mlx4/mlx4_prm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_PRM_H_
@@ -19,6 +19,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include "mlx4_autoconf.h"
/* ConnectX-3 Tx queue basic block. */
#define MLX4_TXBB_SHIFT 6
@@ -40,6 +41,7 @@
/* Work queue element (WQE) flags. */
#define MLX4_WQE_CTRL_IIP_HDR_CSUM (1 << 28)
#define MLX4_WQE_CTRL_IL4_HDR_CSUM (1 << 27)
+#define MLX4_WQE_CTRL_RR (1 << 6)
/* CQE checksum flags. */
enum {
@@ -51,6 +53,7 @@ enum {
};
/* CQE status flags. */
+#define MLX4_CQE_STATUS_IPV6F (1 << 12)
#define MLX4_CQE_STATUS_IPV4 (1 << 22)
#define MLX4_CQE_STATUS_IPV4F (1 << 23)
#define MLX4_CQE_STATUS_IPV6 (1 << 24)
@@ -97,6 +100,19 @@ struct mlx4_cq {
int arm_sn; /**< Rx event counter. */
};
+#ifndef HAVE_IBV_MLX4_WQE_LSO_SEG
+/*
+ * WQE LSO segment structure.
+ * Defined here as backward compatibility for rdma-core v17 and below.
+ * Similar definition is found in infiniband/mlx4dv.h in rdma-core v18
+ * and above.
+ */
+struct mlx4_wqe_lso_seg {
+ rte_be32_t mss_hdr_size;
+ rte_be32_t header[];
+};
+#endif
+
/**
* Retrieve a CQE entry from a CQ.
*
diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c
index 7a036ed8..9737da2e 100644
--- a/drivers/net/mlx4/mlx4_rxq.c
+++ b/drivers/net/mlx4/mlx4_rxq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -88,7 +88,7 @@ mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE] = {
*/
struct mlx4_rss *
mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[])
{
struct mlx4_rss *rss;
@@ -336,6 +336,14 @@ mlx4_rss_init(struct priv *priv)
unsigned int i;
int ret;
+ if (priv->rss_init)
+ return 0;
+ if (priv->dev->data->nb_rx_queues > priv->hw_rss_max_qps) {
+ ERROR("RSS does not support more than %d queues",
+ priv->hw_rss_max_qps);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Prepare range for RSS contexts before creating the first WQ. */
ret = mlx4_glue->dv_set_context_attr
(priv->ctx,
@@ -418,6 +426,7 @@ wq_num_check:
}
wq_num_prev = wq_num;
}
+ priv->rss_init = 1;
return 0;
error:
ERROR("cannot initialize common RSS resources (queue %u): %s: %s",
@@ -446,6 +455,8 @@ mlx4_rss_deinit(struct priv *priv)
{
unsigned int i;
+ if (!priv->rss_init)
+ return;
for (i = 0; i != priv->dev->data->nb_rx_queues; ++i) {
struct rxq *rxq = priv->dev->data->rx_queues[i];
@@ -454,6 +465,7 @@ mlx4_rss_deinit(struct priv *priv)
mlx4_rxq_detach(rxq);
}
}
+ priv->rss_init = 0;
}
/**
@@ -482,6 +494,7 @@ mlx4_rxq_attach(struct rxq *rxq)
}
struct priv *priv = rxq->priv;
+ struct rte_eth_dev *dev = priv->dev;
const uint32_t elts_n = 1 << rxq->elts_n;
const uint32_t sges_n = 1 << rxq->sges_n;
struct rte_mbuf *(*elts)[elts_n] = rxq->elts;
@@ -491,6 +504,8 @@ mlx4_rxq_attach(struct rxq *rxq)
const char *msg;
struct ibv_cq *cq = NULL;
struct ibv_wq *wq = NULL;
+ uint32_t create_flags = 0;
+ uint32_t comp_mask = 0;
volatile struct mlx4_wqe_data_seg (*wqes)[];
unsigned int i;
int ret;
@@ -503,6 +518,11 @@ mlx4_rxq_attach(struct rxq *rxq)
msg = "CQ creation failure";
goto error;
}
+ /* By default, FCS (CRC) is stripped by hardware. */
+ if (rxq->crc_present) {
+ create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
wq = mlx4_glue->create_wq
(priv->ctx,
&(struct ibv_wq_init_attr){
@@ -511,6 +531,8 @@ mlx4_rxq_attach(struct rxq *rxq)
.max_sge = sges_n,
.pd = priv->pd,
.cq = cq,
+ .comp_mask = comp_mask,
+ .create_flags = create_flags,
});
if (!wq) {
ret = errno ? errno : EINVAL;
@@ -537,6 +559,11 @@ mlx4_rxq_attach(struct rxq *rxq)
msg = "failed to obtain device information from WQ/CQ objects";
goto error;
}
+ /* Pre-register Rx mempool. */
+ DEBUG("port %u Rx queue %u registering mp %s having %u chunks",
+ priv->dev->data->port_id, rxq->stats.idx,
+ rxq->mp->name, rxq->mp->nb_mem_chunks);
+ mlx4_mr_update_mp(dev, &rxq->mr_ctrl, rxq->mp);
wqes = (volatile struct mlx4_wqe_data_seg (*)[])
((uintptr_t)dv_rwq.buf.buf + dv_rwq.rq.offset);
for (i = 0; i != RTE_DIM(*elts); ++i) {
@@ -568,7 +595,7 @@ mlx4_rxq_attach(struct rxq *rxq)
.addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
uintptr_t)),
.byte_count = rte_cpu_to_be_32(buf->data_len),
- .lkey = rte_cpu_to_be_32(rxq->mr->lkey),
+ .lkey = mlx4_rx_mb2mr(rxq, buf),
};
(*elts)[i] = buf;
}
@@ -597,6 +624,7 @@ error:
claim_zero(mlx4_glue->destroy_wq(wq));
if (cq)
claim_zero(mlx4_glue->destroy_cq(cq));
+ --rxq->usecnt;
rte_errno = ret;
ERROR("error while attaching Rx queue %p: %s: %s",
(void *)rxq, msg, strerror(ret));
@@ -650,7 +678,9 @@ uint64_t
mlx4_get_rx_queue_offloads(struct priv *priv)
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
if (priv->hw_csum)
offloads |= DEV_RX_OFFLOAD_CHECKSUM;
@@ -676,26 +706,6 @@ mlx4_get_rx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_rx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = mlx4_get_rx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Rx queue.
*
* @param dev
@@ -736,20 +746,14 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
},
};
int ret;
+ uint32_t crc_present;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
- (void)conf; /* Thresholds configuration (ignored). */
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- if (!mlx4_check_rx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx4_get_rx_port_offloads(priv) |
- mlx4_get_rx_queue_offloads(priv)));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_rx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -774,6 +778,23 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
" to the next power of two (%u)",
(void *)dev, idx, desc);
}
+ /* By default, FCS (CRC) is stripped by hardware. */
+ crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (priv->hw_fcs_strip) {
+ crc_present = 1;
+ } else {
+ WARN("%p: CRC stripping has been disabled but will still"
+ " be performed by hardware, make sure MLNX_OFED and"
+ " firmware are up to date",
+ (void *)dev);
+ }
+ }
+ DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ (void *)dev,
+ crc_present ? "disabled" : "enabled",
+ crc_present << 2);
/* Allocate and initialize Rx queue. */
mlx4_zmallocv_socket("RXQ", vec, RTE_DIM(vec), socket);
if (!rxq) {
@@ -790,9 +811,10 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts = elts,
/* Toggle Rx checksum offload if hardware supports it. */
.csum = priv->hw_csum &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ (offloads & DEV_RX_OFFLOAD_CHECKSUM),
+ .crc_present = crc_present,
.l2tun_offload = priv->hw_csum_l2tun,
.stats = {
.idx = idx,
@@ -804,7 +826,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
(mb_len - RTE_PKTMBUF_HEADROOM)) {
;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -847,11 +869,9 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1 << rxq->sges_n);
goto error;
}
- /* Use the entire Rx mempool as the memory region. */
- rxq->mr = mlx4_mr_get(priv, mp);
- if (!rxq->mr) {
- ERROR("%p: MR creation failure: %s",
- (void *)dev, strerror(rte_errno));
+ if (mlx4_mr_btree_init(&rxq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
goto error;
}
if (dev->data->dev_conf.intr_conf.rxq) {
@@ -911,7 +931,6 @@ mlx4_rx_queue_release(void *dpdk_rxq)
assert(!rxq->rq_db);
if (rxq->channel)
claim_zero(mlx4_glue->destroy_comp_channel(rxq->channel));
- if (rxq->mr)
- mlx4_mr_put(rxq->mr);
+ mlx4_mr_btree_free(&rxq->mr_ctrl.cache_bh);
rte_free(rxq);
}
diff --git a/drivers/net/mlx4/mlx4_rxtx.c b/drivers/net/mlx4/mlx4_rxtx.c
index 8ca8b77c..8c88effc 100644
--- a/drivers/net/mlx4/mlx4_rxtx.c
+++ b/drivers/net/mlx4/mlx4_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -38,10 +38,29 @@
* DWORD (32 byte) of a TXBB.
*/
struct pv {
- volatile struct mlx4_wqe_data_seg *dseg;
+ union {
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile uint32_t *dst;
+ };
uint32_t val;
};
+/** A helper structure for TSO packet handling. */
+struct tso_info {
+ /** Pointer to the array of saved first DWORD (32 byte) of a TXBB. */
+ struct pv *pv;
+ /** Current entry in the pv array. */
+ int pv_counter;
+ /** Total size of the WQE including padding. */
+ uint32_t wqe_size;
+ /** Size of TSO header to prepend to each packet to send. */
+ uint16_t tso_header_size;
+ /** Total size of the TSO segment in the WQE. */
+ uint16_t wqe_tso_seg_size;
+ /** Raw WQE size in units of 16 Bytes and without padding. */
+ uint8_t fence_size;
+};
+
/** A table to translate Rx completion flags to packet type. */
uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
/*
@@ -52,49 +71,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
* bit[4] - MLX4_CQE_STATUS_TCP
* bit[3] - MLX4_CQE_STATUS_IPV4OPT
* bit[2] - MLX4_CQE_STATUS_IPV6
- * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[1] - MLX4_CQE_STATUS_IPF
* bit[0] - MLX4_CQE_STATUS_IPV4
* giving a total of up to 256 entries.
*/
+ /* L2 */
[0x00] = RTE_PTYPE_L2_ETHER,
+ /* L3 */
[0x01] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_NONFRAG,
[0x02] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG,
[0x03] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_FRAG,
- [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
- [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT,
+ [0x04] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [0x08] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
+ [0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_NONFRAG,
[0x0a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_FRAG,
+ [0x0b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
+ RTE_PTYPE_L4_FRAG,
+ /* TCP */
[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP,
- [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
[0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP,
+ [0x16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
[0x18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_TCP,
[0x19] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_TCP,
- [0x1a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
- RTE_PTYPE_L4_TCP,
+ /* UDP */
[0x21] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP,
- [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
[0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP,
+ [0x26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
[0x28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_UDP,
[0x29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
RTE_PTYPE_L4_UDP,
- [0x2a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT |
- RTE_PTYPE_L4_UDP,
/* Tunneled - L3 IPV6 */
[0x80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
[0x81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
@@ -102,65 +130,58 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
[0x84] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0x8b] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
/* Tunneled - L3 IPV6, TCP */
[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
- [0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
- [0x93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
[0x94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
+ [0x96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0x98] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
[0x99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT |
- RTE_PTYPE_INNER_L4_TCP,
- [0x9a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP,
/* Tunneled - L3 IPV6, UDP */
- [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- [0xa2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
RTE_PTYPE_INNER_L4_UDP,
- [0xa3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
- [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
- [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [0xa8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
- [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ [0xa9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
- [0xaa] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
/* Tunneled - L3 IPV4 */
[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xc2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
@@ -168,65 +189,54 @@ uint32_t mlx4_ptype_table[0x100] __rte_cache_aligned = {
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_FRAG,
[0xc4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xc8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xc9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_NONFRAG,
[0xca] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_FRAG,
+ [0xcb] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_FRAG,
/* Tunneled - L3 IPV4, TCP */
- [0xd0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
- [0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
- [0xd3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
[0xd4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP,
+ [0xd6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xd8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_TCP,
[0xd9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_TCP,
- [0xda] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_TCP,
/* Tunneled - L3 IPV4, UDP */
- [0xe0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
[0xe1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
- [0xe2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
- [0xe3] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG |
- RTE_PTYPE_INNER_L4_UDP,
[0xe4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_UDP,
+ [0xe6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
[0xe8] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
+ RTE_PTYPE_INNER_L4_UDP,
[0xe9] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP,
- [0xea] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_FRAG |
+ RTE_PTYPE_INNER_L3_IPV4_EXT |
RTE_PTYPE_INNER_L4_UDP,
};
@@ -263,7 +273,7 @@ mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, volatile uint32_t *start,
} while (start != (volatile uint32_t *)sq->eob);
start = (volatile uint32_t *)sq->buf;
/* Flip invalid stamping ownership. */
- stamp ^= RTE_BE32(0x1 << MLX4_SQ_OWNER_BIT);
+ stamp ^= RTE_BE32(1u << MLX4_SQ_OWNER_BIT);
sq->stamp = stamp;
if (start == end)
return size;
@@ -344,24 +354,6 @@ mlx4_txq_complete(struct txq *txq, const unsigned int elts_m,
}
/**
- * Get memory pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
- *
- * @param buf
- * Pointer to mbuf.
- *
- * @return
- * Memory pool where data is located for given mbuf.
- */
-static struct rte_mempool *
-mlx4_txq_mb2mp(struct rte_mbuf *buf)
-{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
-}
-
-/**
* Write Tx data segment to the SQ.
*
* @param dseg
@@ -378,7 +370,7 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
uint32_t lkey, uintptr_t addr, rte_be32_t byte_count)
{
dseg->addr = rte_cpu_to_be_64(addr);
- dseg->lkey = rte_cpu_to_be_32(lkey);
+ dseg->lkey = lkey;
#if RTE_CACHE_LINE_SIZE < 64
/*
* Need a barrier here before writing the byte_count
@@ -395,6 +387,342 @@ mlx4_fill_tx_data_seg(volatile struct mlx4_wqe_data_seg *dseg,
}
/**
+ * Obtain and calculate TSO information needed for assembling a TSO WQE.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to a structure to fill the info with.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline int
+mlx4_tx_burst_tso_get_params(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo)
+{
+ struct mlx4_sq *sq = &txq->msq;
+ const uint8_t tunneled = txq->priv->hw_csum_l2tun &&
+ (buf->ol_flags & PKT_TX_TUNNEL_MASK);
+
+ tinfo->tso_header_size = buf->l2_len + buf->l3_len + buf->l4_len;
+ if (tunneled)
+ tinfo->tso_header_size +=
+ buf->outer_l2_len + buf->outer_l3_len;
+ if (unlikely(buf->tso_segsz == 0 ||
+ tinfo->tso_header_size == 0 ||
+ tinfo->tso_header_size > MLX4_MAX_TSO_HEADER ||
+ tinfo->tso_header_size > buf->data_len))
+ return -EINVAL;
+ /*
+ * Calculate the WQE TSO segment size
+ * Note:
+ * 1. An LSO segment must be padded such that the subsequent data
+ * segment is 16-byte aligned.
+ * 2. The start address of the TSO segment is always 16 Bytes aligned.
+ */
+ tinfo->wqe_tso_seg_size = RTE_ALIGN(sizeof(struct mlx4_wqe_lso_seg) +
+ tinfo->tso_header_size,
+ sizeof(struct mlx4_wqe_data_seg));
+ tinfo->fence_size = ((sizeof(struct mlx4_wqe_ctrl_seg) +
+ tinfo->wqe_tso_seg_size) >> MLX4_SEG_SHIFT) +
+ buf->nb_segs;
+ tinfo->wqe_size =
+ RTE_ALIGN((uint32_t)(tinfo->fence_size << MLX4_SEG_SHIFT),
+ MLX4_TXBB_SIZE);
+ /* Validate WQE size and WQE space in the send queue. */
+ if (sq->remain_size < tinfo->wqe_size ||
+ tinfo->wqe_size > MLX4_MAX_WQE_SIZE)
+ return -ENOMEM;
+ /* Init pv. */
+ tinfo->pv = (struct pv *)txq->bounce_buf;
+ tinfo->pv_counter = 0;
+ return 0;
+}
+
+/**
+ * Fill the TSO WQE data segments with info on buffers to transmit .
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param dseg
+ * Pointer to the first data segment in the TSO WQE.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_fill_tso_dsegs(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_data_seg *dseg,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ uint32_t lkey;
+ int nb_segs = buf->nb_segs;
+ int nb_segs_txbb;
+ struct mlx4_sq *sq = &txq->msq;
+ struct rte_mbuf *sbuf = buf;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next =
+ (volatile struct mlx4_wqe_ctrl_seg *)
+ ((volatile uint8_t *)ctrl + tinfo->wqe_size);
+ uint16_t data_len = sbuf->data_len - tinfo->tso_header_size;
+ uintptr_t data_addr = rte_pktmbuf_mtod_offset(sbuf, uintptr_t,
+ tinfo->tso_header_size);
+
+ do {
+ /* how many dseg entries do we have in the current TXBB ? */
+ nb_segs_txbb = (MLX4_TXBB_SIZE -
+ ((uintptr_t)dseg & (MLX4_TXBB_SIZE - 1))) >>
+ MLX4_SEG_SHIFT;
+ switch (nb_segs_txbb) {
+#ifndef NDEBUG
+ default:
+ /* Should never happen. */
+ rte_panic("%p: Invalid number of SGEs(%d) for a TXBB",
+ (void *)txq, nb_segs_txbb);
+ /* rte_panic never returns. */
+ break;
+#endif /* NDEBUG */
+ case 4:
+ /* Memory region key for this memory pool. */
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ dseg->addr = rte_cpu_to_be_64(data_addr);
+ dseg->lkey = lkey;
+ /*
+ * This data segment starts at the beginning of a new
+ * TXBB, so we need to postpone its byte_count writing
+ * for later.
+ */
+ pv[*pv_counter].dseg = dseg;
+ /*
+ * Zero length segment is treated as inline segment
+ * with zero data.
+ */
+ pv[(*pv_counter)++].val =
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000);
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 3:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 2:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ case 1:
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
+ if (unlikely(lkey == (uint32_t)-1))
+ goto err;
+ mlx4_fill_tx_data_seg(dseg, lkey, data_addr,
+ rte_cpu_to_be_32(data_len ?
+ data_len :
+ 0x80000000));
+ if (--nb_segs == 0)
+ return ctrl_next;
+ /* Prepare next buf info */
+ sbuf = sbuf->next;
+ dseg++;
+ data_len = sbuf->data_len;
+ data_addr = rte_pktmbuf_mtod(sbuf, uintptr_t);
+ /* fallthrough */
+ }
+ /* Wrap dseg if it points at the end of the queue. */
+ if ((volatile uint8_t *)dseg >= sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((volatile uint8_t *)dseg - sq->size);
+ } while (true);
+err:
+ return NULL;
+}
+
+/**
+ * Fill the packet's l2, l3 and l4 headers to the WQE.
+ *
+ * This will be used as the header for each TSO segment that is transmitted.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tinfo
+ * Pointer to TSO info to use.
+ * @param ctrl
+ * Pointer to the control segment in the TSO WQE.
+ *
+ * @return
+ * 0 on success, negative value upon error.
+ */
+static inline volatile struct mlx4_wqe_data_seg *
+mlx4_tx_burst_fill_tso_hdr(struct rte_mbuf *buf,
+ struct txq *txq,
+ struct tso_info *tinfo,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_lso_seg *tseg =
+ (volatile struct mlx4_wqe_lso_seg *)(ctrl + 1);
+ struct mlx4_sq *sq = &txq->msq;
+ struct pv *pv = tinfo->pv;
+ int *pv_counter = &tinfo->pv_counter;
+ int remain_size = tinfo->tso_header_size;
+ char *from = rte_pktmbuf_mtod(buf, char *);
+ uint16_t txbb_avail_space;
+ /* Union to overcome volatile constraints when copying TSO header. */
+ union {
+ volatile uint8_t *vto;
+ uint8_t *to;
+ } thdr = { .vto = (volatile uint8_t *)tseg->header, };
+
+ /*
+ * TSO data always starts at offset 20 from the beginning of the TXBB
+ * (16 byte ctrl + 4byte TSO desc). Since each TXBB is 64Byte aligned
+ * we can write the first 44 TSO header bytes without worry for TxQ
+ * wrapping or overwriting the first TXBB 32bit word.
+ */
+ txbb_avail_space = MLX4_TXBB_SIZE -
+ (sizeof(struct mlx4_wqe_ctrl_seg) +
+ sizeof(struct mlx4_wqe_lso_seg));
+ while (remain_size >= (int)(txbb_avail_space + sizeof(uint32_t))) {
+ /* Copy to end of txbb. */
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ /* New TXBB, stash the first 32bits for later use. */
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ pv[(*pv_counter)++].val = *(uint32_t *)from,
+ from += sizeof(uint32_t);
+ thdr.to += sizeof(uint32_t);
+ remain_size -= txbb_avail_space + sizeof(uint32_t);
+ /* Avail space in new TXBB is TXBB size - 4 */
+ txbb_avail_space = MLX4_TXBB_SIZE - sizeof(uint32_t);
+ }
+ if (remain_size > txbb_avail_space) {
+ rte_memcpy(thdr.to, from, txbb_avail_space);
+ from += txbb_avail_space;
+ thdr.to += txbb_avail_space;
+ remain_size -= txbb_avail_space;
+ /* New TXBB, Check for TxQ wrap. */
+ if (thdr.to >= sq->eob)
+ thdr.vto = sq->buf;
+ pv[*pv_counter].dst = (volatile uint32_t *)thdr.to;
+ rte_memcpy(&pv[*pv_counter].val, from, remain_size);
+ (*pv_counter)++;
+ } else if (remain_size) {
+ rte_memcpy(thdr.to, from, remain_size);
+ }
+ tseg->mss_hdr_size = rte_cpu_to_be_32((buf->tso_segsz << 16) |
+ tinfo->tso_header_size);
+ /* Calculate data segment location */
+ return (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)tseg + tinfo->wqe_tso_seg_size);
+}
+
+/**
+ * Write data segments and header for TSO uni/multi segment packet.
+ *
+ * @param buf
+ * Pointer to the first packet mbuf.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param ctrl
+ * Pointer to the WQE control segment.
+ *
+ * @return
+ * Pointer to the next WQE control segment on success, NULL otherwise.
+ */
+static volatile struct mlx4_wqe_ctrl_seg *
+mlx4_tx_burst_tso(struct rte_mbuf *buf, struct txq *txq,
+ volatile struct mlx4_wqe_ctrl_seg *ctrl)
+{
+ volatile struct mlx4_wqe_data_seg *dseg;
+ volatile struct mlx4_wqe_ctrl_seg *ctrl_next;
+ struct mlx4_sq *sq = &txq->msq;
+ struct tso_info tinfo;
+ struct pv *pv;
+ int pv_counter;
+ int ret;
+
+ ret = mlx4_tx_burst_tso_get_params(buf, txq, &tinfo);
+ if (unlikely(ret))
+ goto error;
+ dseg = mlx4_tx_burst_fill_tso_hdr(buf, txq, &tinfo, ctrl);
+ if (unlikely(dseg == NULL))
+ goto error;
+ if ((uintptr_t)dseg >= (uintptr_t)sq->eob)
+ dseg = (volatile struct mlx4_wqe_data_seg *)
+ ((uintptr_t)dseg - sq->size);
+ ctrl_next = mlx4_tx_burst_fill_tso_dsegs(buf, txq, &tinfo, dseg, ctrl);
+ if (unlikely(ctrl_next == NULL))
+ goto error;
+ /* Write the first DWORD of each TXBB save earlier. */
+ if (likely(tinfo.pv_counter)) {
+ pv = tinfo.pv;
+ pv_counter = tinfo.pv_counter;
+ /* Need a barrier here before writing the first TXBB word. */
+ rte_io_wmb();
+ do {
+ --pv_counter;
+ *pv[pv_counter].dst = pv[pv_counter].val;
+ } while (pv_counter > 0);
+ }
+ ctrl->fence_size = tinfo.fence_size;
+ sq->remain_size -= tinfo.wqe_size;
+ return ctrl_next;
+error:
+ txq->stats.odropped++;
+ return NULL;
+}
+
+/**
* Write data segments of multi-segment packet.
*
* @param buf
@@ -437,7 +765,7 @@ mlx4_tx_burst_segs(struct rte_mbuf *buf, struct txq *txq,
goto txbb_tail_segs;
txbb_head_seg:
/* Memory region key (big endian) for this memory pool. */
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -449,7 +777,7 @@ txbb_head_seg:
dseg = (volatile struct mlx4_wqe_data_seg *)
sq->buf;
dseg->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(sbuf, uintptr_t));
- dseg->lkey = rte_cpu_to_be_32(lkey);
+ dseg->lkey = lkey;
/*
* This data segment starts at the beginning of a new
* TXBB, so we need to postpone its byte_count writing
@@ -469,7 +797,7 @@ txbb_tail_segs:
/* Jump to default if there are more than two segments remaining. */
switch (nb_segs) {
default:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -485,7 +813,7 @@ txbb_tail_segs:
nb_segs--;
/* fallthrough */
case 2:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -501,7 +829,7 @@ txbb_tail_segs:
nb_segs--;
/* fallthrough */
case 1:
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(sbuf));
+ lkey = mlx4_tx_mb2mr(txq, sbuf);
if (unlikely(lkey == (uint32_t)-1)) {
DEBUG("%p: unable to get MP <-> MR association",
(void *)txq);
@@ -587,6 +915,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t flags16[2];
} srcrb;
uint32_t lkey;
+ bool tso = txq->priv->tso && (buf->ol_flags & PKT_TX_TCP_SEG);
/* Clean up old buffer. */
if (likely(elt->buf != NULL)) {
@@ -605,13 +934,22 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
} while (tmp != NULL);
}
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
- if (buf->nb_segs == 1) {
+ if (tso) {
+ /* Change opcode to TSO */
+ owner_opcode &= ~MLX4_OPCODE_CONFIG_CMD;
+ owner_opcode |= MLX4_OPCODE_LSO | MLX4_WQE_CTRL_RR;
+ ctrl_next = mlx4_tx_burst_tso(buf, txq, ctrl);
+ if (!ctrl_next) {
+ elt->buf = NULL;
+ break;
+ }
+ } else if (buf->nb_segs == 1) {
/* Validate WQE space in the send queue. */
if (sq->remain_size < MLX4_TXBB_SIZE) {
elt->buf = NULL;
break;
}
- lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
+ lkey = mlx4_tx_mb2mr(txq, buf);
if (unlikely(lkey == (uint32_t)-1)) {
/* MR does not exist. */
DEBUG("%p: unable to get MP <-> MR association",
@@ -639,7 +977,7 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
ctrl_next = (volatile struct mlx4_wqe_ctrl_seg *)
((volatile uint8_t *)ctrl_next - sq->size);
/* Flip HW valid ownership. */
- sq->owner_opcode ^= 0x1 << MLX4_SQ_OWNER_BIT;
+ sq->owner_opcode ^= 1u << MLX4_SQ_OWNER_BIT;
}
/*
* For raw Ethernet, the SOLICIT flag is used to indicate
@@ -746,11 +1084,13 @@ rxq_cq_to_pkt_type(volatile struct mlx4_cqe *cqe,
* bit[4] - MLX4_CQE_STATUS_TCP
* bit[3] - MLX4_CQE_STATUS_IPV4OPT
* bit[2] - MLX4_CQE_STATUS_IPV6
- * bit[1] - MLX4_CQE_STATUS_IPV4F
+ * bit[1] - MLX4_CQE_STATUS_IPF
* bit[0] - MLX4_CQE_STATUS_IPV4
* giving a total of up to 256 entries.
*/
idx |= ((status & MLX4_CQE_STATUS_PTYPE_MASK) >> 22);
+ if (status & MLX4_CQE_STATUS_IPV6)
+ idx |= ((status & MLX4_CQE_STATUS_IPV6F) >> 11);
return mlx4_ptype_table[idx];
}
@@ -934,11 +1274,14 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
goto skip;
}
pkt = seg;
+ assert(len >= (rxq->crc_present << 2));
/* Update packet information. */
pkt->packet_type =
rxq_cq_to_pkt_type(cqe, rxq->l2tun_offload);
pkt->ol_flags = PKT_RX_RSS_HASH;
pkt->hash.rss = cqe->immed_rss_invalid;
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
pkt->pkt_len = len;
if (rxq->csum | rxq->csum_l2tun) {
uint32_t flags =
@@ -963,6 +1306,9 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* changes.
*/
scat->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx4_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ scat->lkey = mlx4_rx_mb2mr(rxq, rep);
if (len > seg->data_len) {
len -= seg->data_len;
++pkt->nb_segs;
diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h
index c12bd39a..ffa8abfc 100644
--- a/drivers/net/mlx4/mlx4_rxtx.h
+++ b/drivers/net/mlx4/mlx4_rxtx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_RXTX_H_
@@ -25,6 +25,7 @@
#include "mlx4.h"
#include "mlx4_prm.h"
+#include "mlx4_mr.h"
/** Rx queue counters. */
struct mlx4_rxq_stats {
@@ -39,7 +40,6 @@ struct mlx4_rxq_stats {
struct rxq {
struct priv *priv; /**< Back pointer to private data. */
struct rte_mempool *mp; /**< Memory pool for allocations. */
- struct mlx4_mr *mr; /**< Memory region. */
struct ibv_cq *cq; /**< Completion queue. */
struct ibv_wq *wq; /**< Work queue. */
struct ibv_comp_channel *channel; /**< Rx completion channel. */
@@ -47,11 +47,13 @@ struct rxq {
uint16_t port_id; /**< Port ID for incoming packets. */
uint16_t sges_n; /**< Number of segments per packet (log2 value). */
uint16_t elts_n; /**< Mbuf queue size (log2 value). */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
struct rte_mbuf *(*elts)[]; /**< Rx elements. */
volatile struct mlx4_wqe_data_seg (*wqes)[]; /**< HW queue entries. */
volatile uint32_t *rq_db; /**< RQ doorbell record. */
uint32_t csum:1; /**< Enable checksum offloading. */
uint32_t csum_l2tun:1; /**< Same for L2 tunnels. */
+ uint32_t crc_present:1; /**< CRC must be subtracted. */
uint32_t l2tun_offload:1; /**< L2 tunnel offload is enabled. */
struct mlx4_cq mcq; /**< Info for directly manipulating the CQ. */
struct mlx4_rxq_stats stats; /**< Rx queue counters. */
@@ -83,12 +85,12 @@ struct txq_elt {
};
};
-/** Rx queue counters. */
+/** Tx queue counters. */
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
- uint64_t odropped; /**< Total of packets not sent when Tx ring full. */
+ uint64_t odropped; /**< Total number of packets failed to transmit. */
};
/** Tx queue descriptor. */
@@ -100,6 +102,7 @@ struct txq {
int elts_comp_cd; /**< Countdown for next completion. */
unsigned int elts_comp_cd_init; /**< Initial value for countdown. */
unsigned int elts_n; /**< (*elts)[] length. */
+ struct mlx4_mr_ctrl mr_ctrl; /* MR control descriptor. */
struct txq_elt (*elts)[]; /**< Tx elements. */
struct mlx4_txq_stats stats; /**< Tx queue counters. */
uint32_t max_inline; /**< Max inline send size. */
@@ -108,11 +111,6 @@ struct txq {
uint32_t lb:1; /**< Whether packets should be looped back by eSwitch. */
uint8_t *bounce_buf;
/**< Memory used for storing the first DWORD of data TXBBs. */
- struct {
- const struct rte_mempool *mp; /**< Cached memory pool. */
- struct mlx4_mr *mr; /**< Memory region (for mp). */
- uint32_t lkey; /**< mr->lkey copy. */
- } mp2mr[MLX4_PMD_TX_MP_CACHE]; /**< MP to MR translation table. */
struct priv *priv; /**< Back pointer to private data. */
unsigned int socket; /**< CPU socket ID for allocations. */
struct ibv_cq *cq; /**< Completion queue. */
@@ -126,7 +124,7 @@ uint8_t mlx4_rss_hash_key_default[MLX4_RSS_HASH_KEY_SIZE];
int mlx4_rss_init(struct priv *priv);
void mlx4_rss_deinit(struct priv *priv);
struct mlx4_rss *mlx4_rss_get(struct priv *priv, uint64_t fields,
- uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
+ const uint8_t key[MLX4_RSS_HASH_KEY_SIZE],
uint16_t queues, const uint16_t queue_id[]);
void mlx4_rss_put(struct mlx4_rss *rss);
int mlx4_rss_attach(struct mlx4_rss *rss);
@@ -160,34 +158,70 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx,
const struct rte_eth_txconf *conf);
void mlx4_tx_queue_release(void *dpdk_txq);
+/* mlx4_mr.c */
+
+void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl);
+uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr);
+uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr);
+
/**
- * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[].
- * Call mlx4_txq_add_mr() if MP is not registered yet.
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr)
+{
+ struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx4_rx_addr2mr_bh(rxq, addr);
+}
+
+#define mlx4_rx_mb2mr(rxq, mb) mlx4_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
*
* @param txq
* Pointer to Tx queue structure.
- * @param[in] mp
- * Memory pool for which a memory region lkey must be returned.
+ * @param addr
+ * Address to search.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static inline uint32_t
-mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+static __rte_always_inline uint32_t
+mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr)
{
- unsigned int i;
-
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i].mp == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i].mp == mp) {
- /* MP found MP. */
- return txq->mp2mr[i].lkey;
- }
- }
- return mlx4_txq_add_mr(txq, mp, i);
+ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx4_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx4_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX4_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx4_tx_addr2mr_bh(txq, addr);
}
+#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
#endif /* MLX4_RXTX_H_ */
diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c
index 071b2d5d..9aa7440d 100644
--- a/drivers/net/mlx4/mlx4_txq.c
+++ b/drivers/net/mlx4/mlx4_txq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
@@ -63,64 +63,6 @@ mlx4_txq_free_elts(struct txq *txq)
txq->elts_tail = txq->elts_head;
}
-struct txq_mp2mr_mbuf_check_data {
- int ret;
-};
-
-/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct mlx4_txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
- */
-static void
-mlx4_txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index)
-{
- struct txq_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
-
- (void)index;
- /*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
- */
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
-}
-
-/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a Tx queue.
- *
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to Tx queue structure.
- */
-static void
-mlx4_txq_mp2mr_iter(struct rte_mempool *mp, void *arg)
-{
- struct txq *txq = arg;
- struct txq_mp2mr_mbuf_check_data data = {
- .ret = 0,
- };
-
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, mlx4_txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- mlx4_txq_mp2mr(txq, mp);
-}
-
/**
* Retrieves information needed in order to directly access the Tx queue.
*
@@ -144,9 +86,9 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv)
uint32_t headroom_size = 2048 + (1 << dqp->sq.wqe_shift);
/* Continuous headroom size bytes must always stay freed. */
sq->remain_size = sq->size - headroom_size;
- sq->owner_opcode = MLX4_OPCODE_SEND | (0 << MLX4_SQ_OWNER_BIT);
+ sq->owner_opcode = MLX4_OPCODE_SEND | (0u << MLX4_SQ_OWNER_BIT);
sq->stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
- (0 << MLX4_SQ_OWNER_BIT));
+ (0u << MLX4_SQ_OWNER_BIT));
sq->db = dqp->sdb;
sq->doorbell_qpn = dqp->doorbell_qpn;
cq->buf = dcq->buf.buf;
@@ -174,32 +116,18 @@ mlx4_get_tx_port_offloads(struct priv *priv)
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
}
- if (priv->hw_csum_l2tun)
+ if (priv->tso)
+ offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->hw_csum_l2tun) {
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (priv->tso)
+ offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ }
return offloads;
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param requested
- * Per-queue offloads configuration.
- *
- * @return
- * Nonzero when configuration is valid.
- */
-static int
-mlx4_check_tx_queue_offloads(struct priv *priv, uint64_t requested)
-{
- uint64_t mandatory = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t supported = mlx4_get_tx_port_offloads(priv);
-
- return !((mandatory ^ requested) & supported);
-}
-
-/**
* DPDK callback to configure a Tx queue.
*
* @param dev
@@ -246,23 +174,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
},
};
int ret;
+ uint64_t offloads;
+
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
DEBUG("%p: configuring queue %u for %u descriptors",
(void *)dev, idx, desc);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if ((conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !mlx4_check_tx_queue_offloads(priv, conf->offloads)) {
- rte_errno = ENOTSUP;
- ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx4_get_tx_port_offloads(priv));
- return -rte_errno;
- }
+
if (idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
ERROR("%p: queue index out of range (%u >= %u)",
@@ -313,11 +231,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
.elts_comp_cd_init =
RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4),
.csum = priv->hw_csum &&
- (conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ (offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM)),
.csum_l2tun = priv->hw_csum_l2tun &&
- (conf->offloads &
+ (offloads &
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM),
/* Enable Tx loopback for VF devices. */
.lb = !!priv->vf,
@@ -404,8 +322,13 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
/* Save first wqe pointer in the first element. */
(&(*txq->elts)[0])->wqe =
(volatile struct mlx4_wqe_ctrl_seg *)txq->msq.buf;
- /* Pre-register known mempools. */
- rte_mempool_walk(mlx4_txq_mp2mr_iter, txq);
+ if (mlx4_mr_btree_init(&txq->mr_ctrl.cache_bh,
+ MLX4_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ txq->mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
DEBUG("%p: adding Tx queue %p to list", (void *)dev, (void *)txq);
dev->data->tx_queues[idx] = txq;
return 0;
@@ -446,11 +369,6 @@ mlx4_tx_queue_release(void *dpdk_txq)
claim_zero(mlx4_glue->destroy_qp(txq->qp));
if (txq->cq)
claim_zero(mlx4_glue->destroy_cq(txq->cq));
- for (i = 0; i != RTE_DIM(txq->mp2mr); ++i) {
- if (!txq->mp2mr[i].mp)
- break;
- assert(txq->mp2mr[i].mr);
- mlx4_mr_put(txq->mp2mr[i].mr);
- }
+ mlx4_mr_btree_free(&txq->mr_ctrl.cache_bh);
rte_free(txq);
}
diff --git a/drivers/net/mlx4/mlx4_utils.c b/drivers/net/mlx4/mlx4_utils.c
index d10812ec..a727d703 100644
--- a/drivers/net/mlx4/mlx4_utils.c
+++ b/drivers/net/mlx4/mlx4_utils.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
/**
diff --git a/drivers/net/mlx4/mlx4_utils.h b/drivers/net/mlx4/mlx4_utils.h
index 9fdbacad..86abb3b7 100644
--- a/drivers/net/mlx4/mlx4_utils.h
+++ b/drivers/net/mlx4/mlx4_utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef MLX4_UTILS_H_
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 3bc9736c..2e70dec5 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2015 6WIND S.A.
-# Copyright 2015 Mellanox.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2015 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
@@ -35,7 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_mlx5.a
LIB_GLUE = $(LIB_GLUE_BASE).$(LIB_GLUE_VERSION)
LIB_GLUE_BASE = librte_pmd_mlx5_glue.so
-LIB_GLUE_VERSION = 18.02.0
+LIB_GLUE_VERSION = 18.05.0
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5.c
@@ -59,6 +32,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl.c
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_nl_flow.c
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
INSTALL-$(CONFIG_RTE_LIBRTE_MLX5_PMD)-lib += $(LIB_GLUE)
@@ -82,6 +57,7 @@ LDLIBS += -ldl
else
LDLIBS += -libverbs -lmlx5
endif
+LDLIBS += -lmnl
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -92,6 +68,9 @@ CFLAGS += -Wno-error=cast-qual
EXPORT_MAP := rte_pmd_mlx5_version.map
LIBABIVER := 1
+# memseg walk is not part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
# DEBUG which is usually provided on the command-line may enable
# CONFIG_RTE_LIBRTE_MLX5_DEBUG.
ifeq ($(DEBUG),1)
@@ -105,10 +84,6 @@ else
CFLAGS += -DNDEBUG -UPEDANTIC
endif
-ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE
-CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE)
-endif
-
include $(RTE_SDK)/mk/rte.lib.mk
# Generate and clean-up mlx5_autoconf.h.
@@ -125,9 +100,19 @@ mlx5_autoconf.h.new: FORCE
mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
- HAVE_IBV_DEVICE_VXLAN_SUPPORT \
+ HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_TUNNEL_SUPPORT \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IBV_DEVICE_MPLS_SUPPORT \
infiniband/verbs.h \
- enum IBV_DEVICE_VXLAN_SUPPORT \
+ enum IBV_FLOW_SPEC_MPLS \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_WQ_FLAG_RX_END_PADDING \
@@ -135,6 +120,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
enum IBV_WQ_FLAG_RX_END_PADDING \
$(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
+ HAVE_IBV_MLX5_MOD_SWP \
+ infiniband/mlx5dv.h \
+ type 'struct mlx5dv_sw_parsing_caps' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
HAVE_IBV_MLX5_MOD_MPW \
infiniband/mlx5dv.h \
enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \
@@ -162,7 +152,237 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \
infiniband/verbs.h \
- enum IBV_FLOW_SPEC_ACTION_COUNT \
+ type 'struct ibv_counter_set_init_attr' \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NL_NLDEV \
+ rdma/rdma_netlink.h \
+ enum RDMA_NL_NLDEV \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_CMD_PORT_GET \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_CMD_PORT_GET \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_DEV_NAME \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_DEV_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_PORT_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_PORT_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX \
+ rdma/rdma_netlink.h \
+ enum RDMA_NLDEV_ATTR_NDEV_INDEX \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_SWITCH_ID \
+ linux/if_link.h \
+ enum IFLA_PHYS_SWITCH_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_IFLA_PHYS_PORT_NAME \
+ linux/if_link.h \
+ enum IFLA_PHYS_PORT_NAME \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_ACT \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_ACT \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_FLAGS \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_FLAGS \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_ETH_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IP_PROTO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IP_PROTO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV4_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_IPV6_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_TCP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_TCP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_SRC_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_UDP_DST_MASK \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_UDP_DST_MASK \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ID \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_PRIO \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_ETH_TYPE \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_ACT_VLAN \
+ linux/tc_act/tc_vlan.h \
+ enum TCA_VLAN_PUSH_VLAN_PRIORITY \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_40000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_40000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseKR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseKR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseCR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseCR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseSR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseSR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_SUPPORTED_56000baseLR4_Full \
+ /usr/include/linux/ethtool.h \
+ define SUPPORTED_56000baseLR4_Full \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_STATIC_ASSERT \
+ /usr/include/assert.h \
+ define static_assert \
$(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
@@ -181,10 +401,15 @@ ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
$(LIB): $(LIB_GLUE)
+ifeq ($(LINK_USING_CC),1)
+GLUE_LDFLAGS := $(call linkerprefix,$(LDFLAGS))
+else
+GLUE_LDFLAGS := $(LDFLAGS)
+endif
$(LIB_GLUE): mlx5_glue.o
- $Q $(LD) $(LDFLAGS) $(EXTRA_LDFLAGS) \
+ $Q $(LD) $(GLUE_LDFLAGS) $(EXTRA_LDFLAGS) \
-Wl,-h,$(LIB_GLUE) \
- -s -shared -o $@ $< -libverbs -lmlx5
+ -shared -o $@ $< -libverbs -lmlx5
mlx5_glue.o: mlx5_autoconf.h
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 6c0985bd..ec63bc6e 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -13,6 +13,8 @@
#include <errno.h>
#include <net/if.h>
#include <sys/mman.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -33,6 +35,9 @@
#include <rte_config.h>
#include <rte_eal_memconfig.h>
#include <rte_kvargs.h>
+#include <rte_rwlock.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
#include "mlx5.h"
#include "mlx5_utils.h"
@@ -40,10 +45,23 @@
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_glue.h"
+#include "mlx5_mr.h"
/* Device parameter to enable RX completion queue compression. */
#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en"
+/* Device parameter to enable Multi-Packet Rx queue. */
+#define MLX5_RX_MPRQ_EN "mprq_en"
+
+/* Device parameter to configure log 2 of the number of strides for MPRQ. */
+#define MLX5_RX_MPRQ_LOG_STRIDE_NUM "mprq_log_stride_num"
+
+/* Device parameter to limit the size of memcpy'd packet for MPRQ. */
+#define MLX5_RX_MPRQ_MAX_MEMCPY_LEN "mprq_max_memcpy_len"
+
+/* Device parameter to set the minimum number of Rx queues to enable MPRQ. */
+#define MLX5_RXQS_MIN_MPRQ "rxqs_min_mprq"
+
/* Device parameter to configure inline send. */
#define MLX5_TXQ_INLINE "txq_inline"
@@ -68,6 +86,15 @@
/* Device parameter to enable hardware Rx vector. */
#define MLX5_RX_VEC_EN "rx_vec_en"
+/* Allow L3 VXLAN flow creation. */
+#define MLX5_L3_VXLAN_EN "l3_vxlan_en"
+
+/* Activate Netlink support in VF mode. */
+#define MLX5_VF_NL_EN "vf_nl_en"
+
+/* Select port representors to instantiate. */
+#define MLX5_REPRESENTOR "representor"
+
#ifndef HAVE_IBV_MLX5_MOD_MPW
#define MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED (1 << 2)
#define MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW (1 << 3)
@@ -77,6 +104,50 @@
#define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4)
#endif
+static const char *MZ_MLX5_PMD_SHARED_DATA = "mlx5_pmd_shared_data";
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data *mlx5_shared_data;
+
+/* Spinlock for mlx5_shared_data allocation. */
+static rte_spinlock_t mlx5_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
+/** Driver-specific log messages type. */
+int mlx5_logtype;
+
+/**
+ * Prepare shared data between primary and secondary process.
+ */
+static void
+mlx5_prepare_shared_data(void)
+{
+ const struct rte_memzone *mz;
+
+ rte_spinlock_lock(&mlx5_shared_data_lock);
+ if (mlx5_shared_data == NULL) {
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* Allocate shared memory. */
+ mz = rte_memzone_reserve(MZ_MLX5_PMD_SHARED_DATA,
+ sizeof(*mlx5_shared_data),
+ SOCKET_ID_ANY, 0);
+ } else {
+ /* Lookup allocated shared memory. */
+ mz = rte_memzone_lookup(MZ_MLX5_PMD_SHARED_DATA);
+ }
+ if (mz == NULL)
+ rte_panic("Cannot allocate mlx5 shared data\n");
+ mlx5_shared_data = mz->addr;
+ /* Initialize shared data. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ LIST_INIT(&mlx5_shared_data->mem_event_cb_list);
+ rte_rwlock_init(&mlx5_shared_data->mem_event_rwlock);
+ }
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb, NULL);
+ }
+ rte_spinlock_unlock(&mlx5_shared_data_lock);
+}
+
/**
* Retrieve integer value from environment variable.
*
@@ -108,7 +179,7 @@ mlx5_getenv_int(const char *name)
* A pointer to the callback data.
*
* @return
- * a pointer to the allocate space.
+ * Allocated buffer, NULL otherwise and rte_errno is set.
*/
static void *
mlx5_alloc_verbs_buf(size_t size, void *data)
@@ -130,7 +201,8 @@ mlx5_alloc_verbs_buf(size_t size, void *data)
}
assert(data != NULL);
ret = rte_malloc_socket(__func__, size, alignment, socket);
- DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret);
+ if (!ret && size)
+ rte_errno = ENOMEM;
return ret;
}
@@ -146,7 +218,6 @@ static void
mlx5_free_verbs_buf(void *ptr, void *data __rte_unused)
{
assert(data != NULL);
- DEBUG("Extern free request: %p", ptr);
rte_free(ptr);
}
@@ -165,13 +236,13 @@ mlx5_dev_close(struct rte_eth_dev *dev)
unsigned int i;
int ret;
- priv_lock(priv);
- DEBUG("%p: closing device \"%s\"",
- (void *)dev,
- ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
+ DRV_LOG(DEBUG, "port %u closing device \"%s\"",
+ dev->data->port_id,
+ ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
/* In case mlx5_dev_stop() has not been called. */
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_dev_traffic_disable(priv, dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_traffic_disable(dev);
+ mlx5_flow_flush(dev, NULL);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
@@ -179,7 +250,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_rx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->rxqs_n); ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
priv->rxqs_n = 0;
priv->rxqs = NULL;
}
@@ -187,10 +258,12 @@ mlx5_dev_close(struct rte_eth_dev *dev)
/* XXX race condition if mlx5_tx_burst() is still running. */
usleep(1000);
for (i = 0; (i != priv->txqs_n); ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
priv->txqs_n = 0;
priv->txqs = NULL;
}
+ mlx5_mprq_free_mp(dev);
+ mlx5_mr_release(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(mlx5_glue->dealloc_pd(priv->pd));
@@ -202,33 +275,64 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
if (priv->primary_socket)
- priv_socket_uninit(priv);
- ret = mlx5_priv_hrxq_ibv_verify(priv);
+ mlx5_socket_uninit(dev);
+ if (priv->config.vf)
+ mlx5_nl_mac_addr_flush(dev);
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ ret = mlx5_hrxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Hash Rx queue still remain", (void *)priv);
- ret = mlx5_priv_ind_table_ibv_verify(priv);
+ DRV_LOG(WARNING, "port %u some hash Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_ind_table_ibv_verify(dev);
if (ret)
- WARN("%p: some Indirection table still remain", (void *)priv);
- ret = mlx5_priv_rxq_ibv_verify(priv);
+ DRV_LOG(WARNING, "port %u some indirection table still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Rx queue still remain", (void *)priv);
- ret = mlx5_priv_rxq_verify(priv);
+ DRV_LOG(WARNING, "port %u some Verbs Rx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_rxq_verify(dev);
if (ret)
- WARN("%p: some Rx Queues still remain", (void *)priv);
- ret = mlx5_priv_txq_ibv_verify(priv);
+ DRV_LOG(WARNING, "port %u some Rx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_ibv_verify(dev);
if (ret)
- WARN("%p: some Verbs Tx queue still remain", (void *)priv);
- ret = mlx5_priv_txq_verify(priv);
+ DRV_LOG(WARNING, "port %u some Verbs Tx queue still remain",
+ dev->data->port_id);
+ ret = mlx5_txq_verify(dev);
if (ret)
- WARN("%p: some Tx Queues still remain", (void *)priv);
- ret = priv_flow_verify(priv);
+ DRV_LOG(WARNING, "port %u some Tx queues still remain",
+ dev->data->port_id);
+ ret = mlx5_flow_verify(dev);
if (ret)
- WARN("%p: some flows still remain", (void *)priv);
- ret = priv_mr_verify(priv);
- if (ret)
- WARN("%p: some Memory Region still remain", (void *)priv);
- priv_unlock(priv);
+ DRV_LOG(WARNING, "port %u some flows still remain",
+ dev->data->port_id);
+ if (priv->domain_id != RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ unsigned int c = 0;
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id != priv->domain_id ||
+ &rte_eth_devices[port_id[i]] == dev)
+ continue;
+ ++c;
+ }
+ if (!c)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ }
memset(priv, 0, sizeof(*priv));
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
}
const struct eth_dev_ops mlx5_dev_ops = {
@@ -260,6 +364,7 @@ const struct eth_dev_ops mlx5_dev_ops = {
.mac_addr_remove = mlx5_mac_addr_remove,
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
@@ -294,6 +399,10 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.dev_set_link_down = mlx5_set_link_down,
.dev_set_link_up = mlx5_set_link_up,
.dev_close = mlx5_dev_close,
+ .promiscuous_enable = mlx5_promiscuous_enable,
+ .promiscuous_disable = mlx5_promiscuous_disable,
+ .allmulticast_enable = mlx5_allmulticast_enable,
+ .allmulticast_disable = mlx5_allmulticast_disable,
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
@@ -312,6 +421,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.mac_addr_remove = mlx5_mac_addr_remove,
.mac_addr_add = mlx5_mac_addr_add,
.mac_addr_set = mlx5_mac_addr_set,
+ .set_mc_addr_list = mlx5_set_mc_addr_list,
.mtu_set = mlx5_dev_set_mtu,
.vlan_strip_queue_set = mlx5_vlan_strip_queue_set,
.vlan_offload_set = mlx5_vlan_offload_set,
@@ -323,39 +433,6 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = {
.is_removed = mlx5_is_removed,
};
-static struct {
- struct rte_pci_addr pci_addr; /* associated PCI address */
- uint32_t ports; /* physical ports bitfield. */
-} mlx5_dev[32];
-
-/**
- * Get device index in mlx5_dev[] from PCI bus address.
- *
- * @param[in] pci_addr
- * PCI bus address to look for.
- *
- * @return
- * mlx5_dev[] index on success, -1 on failure.
- */
-static int
-mlx5_dev_idx(struct rte_pci_addr *pci_addr)
-{
- unsigned int i;
- int ret = -1;
-
- assert(pci_addr != NULL);
- for (i = 0; (i != RTE_DIM(mlx5_dev)); ++i) {
- if ((mlx5_dev[i].pci_addr.domain == pci_addr->domain) &&
- (mlx5_dev[i].pci_addr.bus == pci_addr->bus) &&
- (mlx5_dev[i].pci_addr.devid == pci_addr->devid) &&
- (mlx5_dev[i].pci_addr.function == pci_addr->function))
- return i;
- if ((mlx5_dev[i].ports == 0) && (ret == -1))
- ret = i;
- }
- return ret;
-}
-
/**
* Verify and store value for device argument.
*
@@ -367,7 +444,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
* User data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
@@ -375,14 +452,26 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
struct mlx5_dev_config *config = opaque;
unsigned long tmp;
+ /* No-op, port representors are processed in mlx5_dev_spawn(). */
+ if (!strcmp(MLX5_REPRESENTOR, key))
+ return 0;
errno = 0;
tmp = strtoul(val, NULL, 0);
if (errno) {
- WARN("%s: \"%s\" is not a valid integer", key, val);
- return errno;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "%s: \"%s\" is not a valid integer", key, val);
+ return -rte_errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
config->cqe_comp = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) {
+ config->mprq.enabled = !!tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_LOG_STRIDE_NUM, key) == 0) {
+ config->mprq.stride_num_n = tmp;
+ } else if (strcmp(MLX5_RX_MPRQ_MAX_MEMCPY_LEN, key) == 0) {
+ config->mprq.max_memcpy_len = tmp;
+ } else if (strcmp(MLX5_RXQS_MIN_MPRQ, key) == 0) {
+ config->mprq.min_rxqs_num = tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
config->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
@@ -397,9 +486,14 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
config->tx_vec_en = !!tmp;
} else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
config->rx_vec_en = !!tmp;
+ } else if (strcmp(MLX5_L3_VXLAN_EN, key) == 0) {
+ config->l3_vxlan_en = !!tmp;
+ } else if (strcmp(MLX5_VF_NL_EN, key) == 0) {
+ config->vf_nl_en = !!tmp;
} else {
- WARN("%s: unknown parameter", key);
- return -EINVAL;
+ DRV_LOG(WARNING, "%s: unknown parameter", key);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
return 0;
}
@@ -413,13 +507,17 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* Device arguments structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
+ MLX5_RX_MPRQ_EN,
+ MLX5_RX_MPRQ_LOG_STRIDE_NUM,
+ MLX5_RX_MPRQ_MAX_MEMCPY_LEN,
+ MLX5_RXQS_MIN_MPRQ,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
MLX5_TXQ_MPW_EN,
@@ -427,6 +525,9 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
MLX5_TXQ_MAX_INLINE_LEN,
MLX5_TX_VEC_EN,
MLX5_RX_VEC_EN,
+ MLX5_L3_VXLAN_EN,
+ MLX5_VF_NL_EN,
+ MLX5_REPRESENTOR,
NULL,
};
struct rte_kvargs *kvlist;
@@ -444,9 +545,10 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs)
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
mlx5_args_check, config);
- if (ret != 0) {
+ if (ret) {
+ rte_errno = EINVAL;
rte_kvargs_free(kvlist);
- return ret;
+ return -rte_errno;
}
}
}
@@ -465,50 +567,60 @@ static struct rte_pci_driver mlx5_driver;
*/
static void *uar_base;
+static int
+find_lower_va_bound(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ void **addr = arg;
+
+ if (*addr == NULL)
+ *addr = ms->addr;
+ else
+ *addr = RTE_MIN(*addr, ms->addr);
+
+ return 0;
+}
+
/**
* Reserve UAR address space for primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_uar_init_primary(struct priv *priv)
+mlx5_uar_init_primary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr = (void *)0;
- int i;
- const struct rte_mem_config *mcfg;
- int ret;
if (uar_base) { /* UAR address space mapped. */
priv->uar_base = uar_base;
return 0;
}
/* find out lower bound of hugepage segments */
- mcfg = rte_eal_get_configuration()->mem_config;
- for (i = 0; i < RTE_MAX_MEMSEG && mcfg->memseg[i].addr; i++) {
- if (addr)
- addr = RTE_MIN(addr, mcfg->memseg[i].addr);
- else
- addr = mcfg->memseg[i].addr;
- }
+ rte_memseg_walk(find_lower_va_bound, &addr);
+
/* keep distance to hugepages to minimize potential conflicts. */
- addr = RTE_PTR_SUB(addr, MLX5_UAR_OFFSET + MLX5_UAR_SIZE);
+ addr = RTE_PTR_SUB(addr, (uintptr_t)(MLX5_UAR_OFFSET + MLX5_UAR_SIZE));
/* anonymous mmap, no real memory consumption. */
addr = mmap(addr, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("Failed to reserve UAR address space, please adjust "
- "MLX5_UAR_SIZE or try --base-virtaddr");
- ret = ENOMEM;
- return ret;
+ DRV_LOG(ERR,
+ "port %u failed to reserve UAR address space, please"
+ " adjust MLX5_UAR_SIZE or try --base-virtaddr",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
/* Accept either same addr or a new addr returned from mmap if target
* range occupied.
*/
- INFO("Reserved UAR address space: %p", addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
priv->uar_base = addr; /* for primary and secondary UAR re-mmap. */
uar_base = addr; /* process local, don't reserve again. */
return 0;
@@ -518,17 +630,17 @@ priv_uar_init_primary(struct priv *priv)
* Reserve UAR address space for secondary process, align with
* primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_uar_init_secondary(struct priv *priv)
+mlx5_uar_init_secondary(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
void *addr;
- int ret;
assert(priv->uar_base);
if (uar_base) { /* already reserved. */
@@ -539,467 +651,802 @@ priv_uar_init_secondary(struct priv *priv)
addr = mmap(priv->uar_base, MLX5_UAR_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ERROR("UAR mmap failed: %p size: %llu",
- priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ DRV_LOG(ERR, "port %u UAR mmap failed: %p size: %llu",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
if (priv->uar_base != addr) {
- ERROR("UAR address %p size %llu occupied, please adjust "
- "MLX5_UAR_OFFSET or try EAL parameter --base-virtaddr",
- priv->uar_base, MLX5_UAR_SIZE);
- ret = ENXIO;
- return ret;
+ DRV_LOG(ERR,
+ "port %u UAR address %p size %llu occupied, please"
+ " adjust MLX5_UAR_OFFSET or try EAL parameter"
+ " --base-virtaddr",
+ dev->data->port_id, priv->uar_base, MLX5_UAR_SIZE);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
uar_base = addr; /* process local, don't reserve again */
- INFO("Reserved UAR address space: %p", addr);
+ DRV_LOG(INFO, "port %u reserved UAR address space: %p",
+ dev->data->port_id, addr);
return 0;
}
/**
- * DPDK callback to register a PCI device.
+ * Spawn an Ethernet device from Verbs information.
*
- * This function creates an Ethernet device for each port of a given
- * PCI device.
- *
- * @param[in] pci_drv
- * PCI driver structure (mlx5_driver).
- * @param[in] pci_dev
- * PCI device information.
+ * @param dpdk_dev
+ * Backing DPDK device.
+ * @param ibv_dev
+ * Verbs device.
+ * @param vf
+ * If nonzero, enable VF-specific features.
+ * @param[in] switch_info
+ * Switch properties of Ethernet device.
*
* @return
- * 0 on success, negative errno value on failure.
+ * A valid Ethernet device object on success, NULL otherwise and rte_errno
+ * is set. The following error is defined:
+ *
+ * EBUSY: device is not supposed to be spawned.
*/
-static int
-mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+static struct rte_eth_dev *
+mlx5_dev_spawn(struct rte_device *dpdk_dev,
+ struct ibv_device *ibv_dev,
+ int vf,
+ const struct mlx5_switch_info *switch_info)
{
- struct ibv_device **list;
- struct ibv_device *ibv_dev;
+ struct ibv_context *ctx;
+ struct ibv_device_attr_ex attr;
+ struct ibv_port_attr port_attr;
+ struct ibv_pd *pd = NULL;
+ struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+ struct mlx5_dev_config config = {
+ .vf = !!vf,
+ .tx_vec_en = 1,
+ .rx_vec_en = 1,
+ .mpw_hdr_dseg = 0,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .vf_nl_en = 1,
+ .mprq = {
+ .enabled = 0,
+ .stride_num_n = MLX5_MPRQ_STRIDE_NUM_N,
+ .max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN,
+ .min_rxqs_num = MLX5_MPRQ_MIN_RXQS,
+ },
+ };
+ struct rte_eth_dev *eth_dev = NULL;
+ struct priv *priv = NULL;
int err = 0;
- struct ibv_context *attr_ctx = NULL;
- struct ibv_device_attr_ex device_attr;
- unsigned int sriov;
unsigned int mps;
unsigned int cqe_comp;
unsigned int tunnel_en = 0;
- int idx;
- int i;
- struct mlx5dv_context attrs_out;
+ unsigned int mpls_en = 0;
+ unsigned int swp = 0;
+ unsigned int mprq = 0;
+ unsigned int mprq_min_stride_size_n = 0;
+ unsigned int mprq_max_stride_size_n = 0;
+ unsigned int mprq_min_stride_num_n = 0;
+ unsigned int mprq_max_stride_num_n = 0;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- struct ibv_counter_set_description cs_desc;
+ struct ibv_counter_set_description cs_desc = { .counter_type = 0 };
#endif
+ struct ether_addr mac;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int own_domain_id = 0;
+ unsigned int i;
- (void)pci_drv;
- assert(pci_drv == &mlx5_driver);
- /* Get mlx5_dev[] index. */
- idx = mlx5_dev_idx(&pci_dev->addr);
- if (idx == -1) {
- ERROR("this driver cannot support any more adapters");
- return -ENOMEM;
- }
- DEBUG("using driver device index %d", idx);
-
- /* Save PCI address. */
- mlx5_dev[idx].pci_addr = pci_dev->addr;
- list = mlx5_glue->get_device_list(&i);
- if (list == NULL) {
- assert(errno);
- if (errno == ENOSYS)
- ERROR("cannot list devices, is ib_uverbs loaded?");
- return -errno;
- }
- assert(i >= 0);
- /*
- * For each listed device, check related sysfs entry against
- * the provided PCI ID.
- */
- while (i != 0) {
- struct rte_pci_addr pci_addr;
+ /* Determine if this port representor is supposed to be spawned. */
+ if (switch_info->representor && dpdk_dev->devargs) {
+ struct rte_eth_devargs eth_da;
- --i;
- DEBUG("checking device \"%s\"", list[i]->name);
- if (mlx5_ibv_device_to_pci_addr(list[i], &pci_addr))
- continue;
- if ((pci_dev->addr.domain != pci_addr.domain) ||
- (pci_dev->addr.bus != pci_addr.bus) ||
- (pci_dev->addr.devid != pci_addr.devid) ||
- (pci_dev->addr.function != pci_addr.function))
- continue;
- sriov = ((pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
- (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
- switch (pci_dev->id.device_id) {
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
- tunnel_en = 1;
- break;
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
- tunnel_en = 1;
- break;
- default:
- break;
+ err = rte_eth_devargs_parse(dpdk_dev->devargs->args, &eth_da);
+ if (err) {
+ rte_errno = -err;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ return NULL;
}
- INFO("PCI information matches, using device \"%s\""
- " (SR-IOV: %s)",
- list[i]->name,
- sriov ? "true" : "false");
- attr_ctx = mlx5_glue->open_device(list[i]);
- err = errno;
- break;
- }
- if (attr_ctx == NULL) {
- mlx5_glue->free_device_list(list);
- switch (err) {
- case 0:
- ERROR("cannot access device, is mlx5_ib loaded?");
- return -ENODEV;
- case EINVAL:
- ERROR("cannot use device, are drivers up to date?");
- return -EINVAL;
+ for (i = 0; i < eth_da.nb_representor_ports; ++i)
+ if (eth_da.representor_ports[i] ==
+ (uint16_t)switch_info->port_name)
+ break;
+ if (i == eth_da.nb_representor_ports) {
+ rte_errno = EBUSY;
+ return NULL;
}
- assert(err > 0);
- return -err;
}
- ibv_dev = list[i];
-
- DEBUG("device opened");
+ /* Prepare shared data between primary and secondary process. */
+ mlx5_prepare_shared_data();
+ errno = 0;
+ ctx = mlx5_glue->open_device(ibv_dev);
+ if (!ctx) {
+ rte_errno = errno ? errno : ENODEV;
+ return NULL;
+ }
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
+#endif
/*
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
- mlx5_glue->dv_query_device(attr_ctx, &attrs_out);
- if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
- if (attrs_out.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
- DEBUG("Enhanced MPW is supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS;
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+#endif
+ mlx5_glue->dv_query_device(ctx, &dv_attr);
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ DRV_LOG(DEBUG, "enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
- DEBUG("MPW is supported");
+ DRV_LOG(DEBUG, "MPW is supported");
mps = MLX5_MPW;
}
} else {
- DEBUG("MPW isn't supported");
+ DRV_LOG(DEBUG, "MPW isn't supported");
mps = MLX5_MPW_DISABLED;
}
+ config.mps = mps;
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ swp = dv_attr.sw_parsing_caps.sw_parsing_offloads;
+ DRV_LOG(DEBUG, "SWP support: %u", swp);
+#endif
+ config.swp = !!swp;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ struct mlx5dv_striding_rq_caps mprq_caps =
+ dv_attr.striding_rq_caps;
+
+ DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
+ mprq_caps.min_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
+ mprq_caps.max_single_stride_log_num_of_bytes);
+ DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
+ mprq_caps.min_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
+ mprq_caps.max_single_wqe_log_num_of_strides);
+ DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ mprq_caps.supported_qpts);
+ DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
+ mprq = 1;
+ mprq_min_stride_size_n =
+ mprq_caps.min_single_stride_log_num_of_bytes;
+ mprq_max_stride_size_n =
+ mprq_caps.max_single_stride_log_num_of_bytes;
+ mprq_min_stride_num_n =
+ mprq_caps.min_single_wqe_log_num_of_strides;
+ mprq_max_stride_num_n =
+ mprq_caps.max_single_wqe_log_num_of_strides;
+ config.mprq.stride_num_n = RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ }
+#endif
if (RTE_CACHE_LINE_SIZE == 128 &&
- !(attrs_out.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
+ !(dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP))
cqe_comp = 0;
else
cqe_comp = 1;
- if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr))
+ config.cqe_comp = cqe_comp;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ tunnel_en = ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
+ }
+ DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
+ tunnel_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING,
+ "tunnel offloading disabled due to old OFED/rdma-core version");
+#endif
+ config.tunnel_en = tunnel_en;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ mpls_en = ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
+ mpls_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
+ " old OFED/rdma-core version or firmware configuration");
+#endif
+ config.mpls_en = mpls_en;
+ err = mlx5_glue->query_device_ex(ctx, NULL, &attr);
+ if (err) {
+ DEBUG("ibv_query_device_ex() failed");
goto error;
- INFO("%u port(s) detected", device_attr.orig_attr.phys_port_cnt);
-
- for (i = 0; i < device_attr.orig_attr.phys_port_cnt; i++) {
- char name[RTE_ETH_NAME_MAX_LEN];
- int len;
- uint32_t port = i + 1; /* ports are indexed from one */
- uint32_t test = (1 << i);
- struct ibv_context *ctx = NULL;
- struct ibv_port_attr port_attr;
- struct ibv_pd *pd = NULL;
- struct priv *priv = NULL;
- struct rte_eth_dev *eth_dev;
- struct ibv_device_attr_ex device_attr_ex;
- struct ether_addr mac;
- uint16_t num_vfs = 0;
- struct ibv_device_attr_ex device_attr;
- struct mlx5_dev_config config = {
- .cqe_comp = cqe_comp,
- .mps = mps,
- .tunnel_en = tunnel_en,
- .tx_vec_en = 1,
- .rx_vec_en = 1,
- .mpw_hdr_dseg = 0,
- .txq_inline = MLX5_ARG_UNSET,
- .txqs_inline = MLX5_ARG_UNSET,
- .inline_max_packet_sz = MLX5_ARG_UNSET,
- };
-
- len = snprintf(name, sizeof(name), PCI_PRI_FMT,
- pci_dev->addr.domain, pci_dev->addr.bus,
- pci_dev->addr.devid, pci_dev->addr.function);
- if (device_attr.orig_attr.phys_port_cnt > 1)
- snprintf(name + len, sizeof(name), " port %u", i);
-
- mlx5_dev[idx].ports |= test;
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- eth_dev = rte_eth_dev_attach_secondary(name);
- if (eth_dev == NULL) {
- ERROR("can not attach rte ethdev");
- err = ENOMEM;
- goto error;
- }
- eth_dev->device = &pci_dev->device;
- eth_dev->dev_ops = &mlx5_dev_sec_ops;
- priv = eth_dev->data->dev_private;
- err = priv_uar_init_secondary(priv);
- if (err < 0) {
- err = -err;
- goto error;
- }
- /* Receive command fd from primary process */
- err = priv_socket_connect(priv);
- if (err < 0) {
- err = -err;
- goto error;
- }
- /* Remap UAR for Tx queues. */
- err = priv_tx_uar_remap(priv, err);
- if (err)
- goto error;
- /*
- * Ethdev pointer is still required as input since
- * the primary device is not accessible from the
- * secondary process.
- */
- eth_dev->rx_pkt_burst =
- priv_select_rx_function(priv, eth_dev);
- eth_dev->tx_pkt_burst =
- priv_select_tx_function(priv, eth_dev);
- continue;
- }
-
- DEBUG("using port %u (%08" PRIx32 ")", port, test);
-
- ctx = mlx5_glue->open_device(ibv_dev);
- if (ctx == NULL) {
- err = ENODEV;
- goto port_error;
+ }
+ if (!switch_info->representor)
+ rte_strlcpy(name, dpdk_dev->name, sizeof(name));
+ else
+ snprintf(name, sizeof(name), "%s_representor_%u",
+ dpdk_dev->name, switch_info->port_name);
+ DRV_LOG(DEBUG, "naming Ethernet device \"%s\"", name);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not attach rte ethdev");
+ rte_errno = ENOMEM;
+ err = rte_errno;
+ goto error;
}
-
- mlx5_glue->query_device_ex(ctx, NULL, &device_attr);
- /* Check port status. */
- err = mlx5_glue->query_port(ctx, port, &port_attr);
+ eth_dev->device = dpdk_dev;
+ eth_dev->dev_ops = &mlx5_dev_sec_ops;
+ err = mlx5_uar_init_secondary(eth_dev);
if (err) {
- ERROR("port query failed: %s", strerror(err));
- goto port_error;
+ err = rte_errno;
+ goto error;
}
-
- if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
- ERROR("port %d is not configured in Ethernet mode",
- port);
- err = EINVAL;
- goto port_error;
+ /* Receive command fd from primary process */
+ err = mlx5_socket_connect(eth_dev);
+ if (err < 0) {
+ err = rte_errno;
+ goto error;
}
-
- if (port_attr.state != IBV_PORT_ACTIVE)
- DEBUG("port %d is not active: \"%s\" (%d)",
- port, mlx5_glue->port_state_str(port_attr.state),
- port_attr.state);
-
- /* Allocate protection domain. */
- pd = mlx5_glue->alloc_pd(ctx);
- if (pd == NULL) {
- ERROR("PD allocation failure");
- err = ENOMEM;
- goto port_error;
+ /* Remap UAR for Tx queues. */
+ err = mlx5_tx_uar_remap(eth_dev, err);
+ if (err) {
+ err = rte_errno;
+ goto error;
}
-
- mlx5_dev[idx].ports |= test;
-
- /* from rte_ethdev.c */
- priv = rte_zmalloc("ethdev private structure",
- sizeof(*priv),
- RTE_CACHE_LINE_SIZE);
- if (priv == NULL) {
- ERROR("priv allocation failure");
- err = ENOMEM;
- goto port_error;
+ /*
+ * Ethdev pointer is still required as input since
+ * the primary device is not accessible from the
+ * secondary process.
+ */
+ eth_dev->rx_pkt_burst = mlx5_select_rx_function(eth_dev);
+ eth_dev->tx_pkt_burst = mlx5_select_tx_function(eth_dev);
+ claim_zero(mlx5_glue->close_device(ctx));
+ return eth_dev;
+ }
+ /* Check port status. */
+ err = mlx5_glue->query_port(ctx, 1, &port_attr);
+ if (err) {
+ DRV_LOG(ERR, "port query failed: %s", strerror(err));
+ goto error;
+ }
+ if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
+ DRV_LOG(ERR, "port is not configured in Ethernet mode");
+ err = EINVAL;
+ goto error;
+ }
+ if (port_attr.state != IBV_PORT_ACTIVE)
+ DRV_LOG(DEBUG, "port is not active: \"%s\" (%d)",
+ mlx5_glue->port_state_str(port_attr.state),
+ port_attr.state);
+ /* Allocate protection domain. */
+ pd = mlx5_glue->alloc_pd(ctx);
+ if (pd == NULL) {
+ DRV_LOG(ERR, "PD allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv = rte_zmalloc("ethdev private structure",
+ sizeof(*priv),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ DRV_LOG(ERR, "priv allocation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ priv->ctx = ctx;
+ strncpy(priv->ibdev_name, priv->ctx->device->name,
+ sizeof(priv->ibdev_name));
+ strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
+ sizeof(priv->ibdev_path));
+ priv->device_attr = attr;
+ priv->pd = pd;
+ priv->mtu = ETHER_MTU;
+#ifndef RTE_ARCH_64
+ /* Initialize UAR access locks for 32bit implementations. */
+ rte_spinlock_init(&priv->uar_lock_cq);
+ for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
+ rte_spinlock_init(&priv->uar_lock[i]);
+#endif
+ /* Some internal functions rely on Netlink sockets, open them now. */
+ priv->nl_socket_rdma = mlx5_nl_init(NETLINK_RDMA);
+ priv->nl_socket_route = mlx5_nl_init(NETLINK_ROUTE);
+ priv->nl_sn = 0;
+ priv->representor = !!switch_info->representor;
+ priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+ priv->representor_id =
+ switch_info->representor ? switch_info->port_name : -1;
+ /*
+ * Look for sibling devices in order to reuse their switch domain
+ * if any, otherwise allocate one.
+ */
+ i = mlx5_dev_to_port_id(dpdk_dev, NULL, 0);
+ if (i > 0) {
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dpdk_dev, port_id, i), i);
+ while (i--) {
+ const struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->domain_id ==
+ RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID)
+ continue;
+ priv->domain_id = opriv->domain_id;
+ break;
}
-
- priv->ctx = ctx;
- strncpy(priv->ibdev_path, priv->ctx->device->ibdev_path,
- sizeof(priv->ibdev_path));
- priv->device_attr = device_attr;
- priv->port = port;
- priv->pd = pd;
- priv->mtu = ETHER_MTU;
- err = mlx5_args(&config, pci_dev->device.devargs);
+ }
+ if (priv->domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) {
+ err = rte_eth_switch_domain_alloc(&priv->domain_id);
if (err) {
- ERROR("failed to process device arguments: %s",
- strerror(err));
- goto port_error;
+ err = rte_errno;
+ DRV_LOG(ERR, "unable to allocate switch domain: %s",
+ strerror(rte_errno));
+ goto error;
}
- if (mlx5_glue->query_device_ex(ctx, NULL, &device_attr_ex)) {
- ERROR("ibv_query_device_ex() failed");
- goto port_error;
- }
-
- config.hw_csum = !!(device_attr_ex.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
- DEBUG("checksum offloading is %ssupported",
- (config.hw_csum ? "" : "not "));
-
-#ifdef HAVE_IBV_DEVICE_VXLAN_SUPPORT
- config.hw_csum_l2tun =
- !!(exp_device_attr.exp_device_cap_flags &
- IBV_DEVICE_VXLAN_SUPPORT);
-#endif
- DEBUG("Rx L2 tunnel checksum offloads are %ssupported",
- (config.hw_csum_l2tun ? "" : "not "));
-
+ own_domain_id = 1;
+ }
+ err = mlx5_args(&config, dpdk_dev->devargs);
+ if (err) {
+ err = rte_errno;
+ DRV_LOG(ERR, "failed to process device arguments: %s",
+ strerror(rte_errno));
+ goto error;
+ }
+ config.hw_csum = !!(attr.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+ DRV_LOG(DEBUG, "checksum offloading is %ssupported",
+ (config.hw_csum ? "" : "not "));
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- config.flow_counter_en = !!(device_attr.max_counter_sets);
- mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
- DEBUG("counter type = %d, num of cs = %ld, attributes = %d",
- cs_desc.counter_type, cs_desc.num_of_cs,
- cs_desc.attributes);
+ config.flow_counter_en = !!attr.max_counter_sets;
+ mlx5_glue->describe_counter_set(ctx, 0, &cs_desc);
+ DRV_LOG(DEBUG, "counter type = %d, num of cs = %ld, attributes = %d",
+ cs_desc.counter_type, cs_desc.num_of_cs,
+ cs_desc.attributes);
#endif
- config.ind_table_max_size =
- device_attr_ex.rss_caps.max_rwq_indirection_table_size;
- /* Remove this check once DPDK supports larger/variable
- * indirection tables. */
- if (config.ind_table_max_size >
- (unsigned int)ETH_RSS_RETA_SIZE_512)
- config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
- DEBUG("maximum RX indirection table size is %u",
- config.ind_table_max_size);
- config.hw_vlan_strip = !!(device_attr_ex.raw_packet_caps &
- IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DEBUG("VLAN stripping is %ssupported",
- (config.hw_vlan_strip ? "" : "not "));
-
- config.hw_fcs_strip = !!(device_attr_ex.raw_packet_caps &
- IBV_RAW_PACKET_CAP_SCATTER_FCS);
- DEBUG("FCS stripping configuration is %ssupported",
- (config.hw_fcs_strip ? "" : "not "));
-
+ config.ind_table_max_size =
+ attr.rss_caps.max_rwq_indirection_table_size;
+ /*
+ * Remove this check once DPDK supports larger/variable
+ * indirection tables.
+ */
+ if (config.ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512)
+ config.ind_table_max_size = ETH_RSS_RETA_SIZE_512;
+ DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
+ config.ind_table_max_size);
+ config.hw_vlan_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
+ (config.hw_vlan_strip ? "" : "not "));
+ config.hw_fcs_strip = !!(attr.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+ DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
+ (config.hw_fcs_strip ? "" : "not "));
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
- config.hw_padding = !!device_attr_ex.rx_pad_end_addr_align;
+ config.hw_padding = !!attr.rx_pad_end_addr_align;
#endif
- DEBUG("hardware RX end alignment padding is %ssupported",
- (config.hw_padding ? "" : "not "));
-
- priv_get_num_vfs(priv, &num_vfs);
- config.sriov = (num_vfs || sriov);
- config.tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
- (device_attr_ex.tso_caps.supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (config.tso)
- config.tso_max_payload_sz =
- device_attr_ex.tso_caps.max_tso;
- if (config.mps && !mps) {
- ERROR("multi-packet send not supported on this device"
- " (" MLX5_TXQ_MPW_EN ")");
- err = ENOTSUP;
- goto port_error;
- }
- INFO("%sMPS is %s",
- config.mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
- config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
- if (config.cqe_comp && !cqe_comp) {
- WARN("Rx CQE compression isn't supported");
- config.cqe_comp = 0;
- }
- err = priv_uar_init_primary(priv);
- if (err)
- goto port_error;
- /* Configure the first MAC address by default. */
- if (priv_get_mac(priv, &mac.addr_bytes)) {
- ERROR("cannot get MAC address, is mlx5_en loaded?"
- " (errno: %s)", strerror(errno));
- err = ENODEV;
- goto port_error;
+ DRV_LOG(DEBUG, "hardware Rx end alignment padding is %ssupported",
+ (config.hw_padding ? "" : "not "));
+ config.tso = (attr.tso_caps.max_tso > 0 &&
+ (attr.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (config.tso)
+ config.tso_max_payload_sz = attr.tso_caps.max_tso;
+ if (config.mps && !mps) {
+ DRV_LOG(ERR,
+ "multi-packet send not supported on this device"
+ " (" MLX5_TXQ_MPW_EN ")");
+ err = ENOTSUP;
+ goto error;
+ }
+ DRV_LOG(INFO, "%sMPS is %s",
+ config.mps == MLX5_MPW_ENHANCED ? "enhanced " : "",
+ config.mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ if (config.cqe_comp && !cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported");
+ config.cqe_comp = 0;
+ }
+ if (config.mprq.enabled && mprq) {
+ if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
+ config.mprq.stride_num_n < mprq_min_stride_num_n) {
+ config.mprq.stride_num_n =
+ RTE_MAX(MLX5_MPRQ_STRIDE_NUM_N,
+ mprq_min_stride_num_n);
+ DRV_LOG(WARNING,
+ "the number of strides"
+ " for Multi-Packet RQ is out of range,"
+ " setting default value (%u)",
+ 1 << config.mprq.stride_num_n);
}
- INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
- priv->port,
- mac.addr_bytes[0], mac.addr_bytes[1],
- mac.addr_bytes[2], mac.addr_bytes[3],
- mac.addr_bytes[4], mac.addr_bytes[5]);
+ config.mprq.min_stride_size_n = mprq_min_stride_size_n;
+ config.mprq.max_stride_size_n = mprq_max_stride_size_n;
+ } else if (config.mprq.enabled && !mprq) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
+ config.mprq.enabled = 0;
+ }
+ eth_dev = rte_eth_dev_allocate(name);
+ if (eth_dev == NULL) {
+ DRV_LOG(ERR, "can not allocate rte ethdev");
+ err = ENOMEM;
+ goto error;
+ }
+ if (priv->representor)
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
+ eth_dev->data->dev_private = priv;
+ priv->dev_data = eth_dev->data;
+ eth_dev->data->mac_addrs = priv->mac;
+ eth_dev->device = dpdk_dev;
+ eth_dev->device->driver = &mlx5_driver.driver;
+ err = mlx5_uar_init_primary(eth_dev);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ /* Configure the first MAC address by default. */
+ if (mlx5_get_mac(eth_dev, &mac.addr_bytes)) {
+ DRV_LOG(ERR,
+ "port %u cannot get MAC address, is mlx5_en"
+ " loaded? (errno: %s)",
+ eth_dev->data->port_id, strerror(rte_errno));
+ err = ENODEV;
+ goto error;
+ }
+ DRV_LOG(INFO,
+ "port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
+ eth_dev->data->port_id,
+ mac.addr_bytes[0], mac.addr_bytes[1],
+ mac.addr_bytes[2], mac.addr_bytes[3],
+ mac.addr_bytes[4], mac.addr_bytes[5]);
#ifndef NDEBUG
- {
- char ifname[IF_NAMESIZE];
-
- if (priv_get_ifname(priv, &ifname) == 0)
- DEBUG("port %u ifname is \"%s\"",
- priv->port, ifname);
- else
- DEBUG("port %u ifname is unknown", priv->port);
- }
+ {
+ char ifname[IF_NAMESIZE];
+
+ if (mlx5_get_ifname(eth_dev, &ifname) == 0)
+ DRV_LOG(DEBUG, "port %u ifname is \"%s\"",
+ eth_dev->data->port_id, ifname);
+ else
+ DRV_LOG(DEBUG, "port %u ifname is unknown",
+ eth_dev->data->port_id);
+ }
#endif
- /* Get actual MTU if possible. */
- priv_get_mtu(priv, &priv->mtu);
- DEBUG("port %u MTU is %u", priv->port, priv->mtu);
+ /* Get actual MTU if possible. */
+ err = mlx5_get_mtu(eth_dev, &priv->mtu);
+ if (err) {
+ err = rte_errno;
+ goto error;
+ }
+ DRV_LOG(DEBUG, "port %u MTU is %u", eth_dev->data->port_id,
+ priv->mtu);
+ /* Initialize burst functions to prevent crashes before link-up. */
+ eth_dev->rx_pkt_burst = removed_rx_burst;
+ eth_dev->tx_pkt_burst = removed_tx_burst;
+ eth_dev->dev_ops = &mlx5_dev_ops;
+ /* Register MAC address. */
+ claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
+ if (vf && config.vf_nl_en)
+ mlx5_nl_mac_addr_sync(eth_dev);
+ priv->mnl_socket = mlx5_nl_flow_socket_create();
+ if (!priv->mnl_socket) {
+ err = -rte_errno;
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will not be"
+ " supported: cannot open libmnl socket: %s",
+ strerror(rte_errno));
+ } else {
+ struct rte_flow_error error;
+ unsigned int ifindex = mlx5_ifindex(eth_dev);
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- ERROR("can not allocate rte ethdev");
- err = ENOMEM;
- goto port_error;
+ if (!ifindex) {
+ err = -rte_errno;
+ error.message =
+ "cannot retrieve network interface index";
+ } else {
+ err = mlx5_nl_flow_init(priv->mnl_socket, ifindex,
+ &error);
+ }
+ if (err) {
+ DRV_LOG(WARNING,
+ "flow rules relying on switch offloads will"
+ " not be supported: %s: %s",
+ error.message, strerror(rte_errno));
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ priv->mnl_socket = NULL;
}
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = priv->mac;
- eth_dev->device = &pci_dev->device;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->device->driver = &mlx5_driver.driver;
- /*
- * Initialize burst functions to prevent crashes before link-up.
- */
- eth_dev->rx_pkt_burst = removed_rx_burst;
- eth_dev->tx_pkt_burst = removed_tx_burst;
- priv->dev = eth_dev;
- eth_dev->dev_ops = &mlx5_dev_ops;
- /* Register MAC address. */
- claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- TAILQ_INIT(&priv->flows);
- TAILQ_INIT(&priv->ctrl_flows);
-
- /* Hint libmlx5 to use PMD allocator for data plane resources */
- struct mlx5dv_ctx_allocators alctr = {
- .alloc = &mlx5_alloc_verbs_buf,
- .free = &mlx5_free_verbs_buf,
- .data = priv,
- };
- mlx5_glue->dv_set_context_attr(ctx,
- MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
- (void *)((uintptr_t)&alctr));
-
- /* Bring Ethernet device up. */
- DEBUG("forcing Ethernet interface up");
- priv_set_flags(priv, ~IFF_UP, IFF_UP);
- /* Store device configuration on private structure. */
- priv->config = config;
- continue;
-
-port_error:
- if (priv)
- rte_free(priv);
- if (pd)
- claim_zero(mlx5_glue->dealloc_pd(pd));
- if (ctx)
- claim_zero(mlx5_glue->close_device(ctx));
- break;
}
-
+ TAILQ_INIT(&priv->flows);
+ TAILQ_INIT(&priv->ctrl_flows);
+ /* Hint libmlx5 to use PMD allocator for data plane resources */
+ struct mlx5dv_ctx_allocators alctr = {
+ .alloc = &mlx5_alloc_verbs_buf,
+ .free = &mlx5_free_verbs_buf,
+ .data = priv,
+ };
+ mlx5_glue->dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS,
+ (void *)((uintptr_t)&alctr));
+ /* Bring Ethernet device up. */
+ DRV_LOG(DEBUG, "port %u forcing Ethernet interface up",
+ eth_dev->data->port_id);
+ mlx5_set_link_up(eth_dev);
/*
- * XXX if something went wrong in the loop above, there is a resource
- * leak (ctx, pd, priv, dpdk ethdev) but we can do nothing about it as
- * long as the dpdk does not provide a way to deallocate a ethdev and a
- * way to enumerate the registered ethdevs to free the previous ones.
+ * Even though the interrupt handler is not installed yet,
+ * interrupts will still trigger on the asyn_fd from
+ * Verbs context returned by ibv_open_device().
*/
-
- /* no port found, complain */
- if (!mlx5_dev[idx].ports) {
- err = ENODEV;
+ mlx5_link_update(eth_dev, 0);
+ /* Store device configuration on private structure. */
+ priv->config = config;
+ /* Supported Verbs flow priority number detection. */
+ err = mlx5_flow_discover_priorities(eth_dev);
+ if (err < 0)
+ goto error;
+ priv->config.flow_prio = err;
+ /*
+ * Once the device is added to the list of memory event
+ * callback, its global MR cache table cannot be expanded
+ * on the fly because of deadlock. If it overflows, lookup
+ * should be done by searching MR list linearly, which is slow.
+ */
+ err = mlx5_mr_btree_init(&priv->mr.cache,
+ MLX5_MR_BTREE_CACHE_N * 2,
+ eth_dev->device->numa_node);
+ if (err) {
+ err = rte_errno;
goto error;
}
-
+ /* Add device to memory callback list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
+ priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ return eth_dev;
error:
- if (attr_ctx)
- claim_zero(mlx5_glue->close_device(attr_ctx));
- if (list)
- mlx5_glue->free_device_list(list);
- assert(err >= 0);
- return -err;
+ if (priv) {
+ if (priv->nl_socket_route >= 0)
+ close(priv->nl_socket_route);
+ if (priv->nl_socket_rdma >= 0)
+ close(priv->nl_socket_rdma);
+ if (priv->mnl_socket)
+ mlx5_nl_flow_socket_destroy(priv->mnl_socket);
+ if (own_domain_id)
+ claim_zero(rte_eth_switch_domain_free(priv->domain_id));
+ rte_free(priv);
+ }
+ if (pd)
+ claim_zero(mlx5_glue->dealloc_pd(pd));
+ if (eth_dev)
+ rte_eth_dev_release_port(eth_dev);
+ if (ctx)
+ claim_zero(mlx5_glue->close_device(ctx));
+ assert(err > 0);
+ rte_errno = err;
+ return NULL;
+}
+
+/** Data associated with devices to spawn. */
+struct mlx5_dev_spawn_data {
+ unsigned int ifindex; /**< Network interface index. */
+ struct mlx5_switch_info info; /**< Switch information. */
+ struct ibv_device *ibv_dev; /**< Associated IB device. */
+ struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
+};
+
+/**
+ * Comparison callback to sort device data.
+ *
+ * This is meant to be used with qsort().
+ *
+ * @param a[in]
+ * Pointer to pointer to first data object.
+ * @param b[in]
+ * Pointer to pointer to second data object.
+ *
+ * @return
+ * 0 if both objects are equal, less than 0 if the first argument is less
+ * than the second, greater than 0 otherwise.
+ */
+static int
+mlx5_dev_spawn_data_cmp(const void *a, const void *b)
+{
+ const struct mlx5_switch_info *si_a =
+ &((const struct mlx5_dev_spawn_data *)a)->info;
+ const struct mlx5_switch_info *si_b =
+ &((const struct mlx5_dev_spawn_data *)b)->info;
+ int ret;
+
+ /* Master device first. */
+ ret = si_b->master - si_a->master;
+ if (ret)
+ return ret;
+ /* Then representor devices. */
+ ret = si_b->representor - si_a->representor;
+ if (ret)
+ return ret;
+ /* Unidentified devices come last in no specific order. */
+ if (!si_a->representor)
+ return 0;
+ /* Order representors by name. */
+ return si_a->port_name - si_b->port_name;
+}
+
+/**
+ * DPDK callback to register a PCI device.
+ *
+ * This function spawns Ethernet devices out of a given PCI device.
+ *
+ * @param[in] pci_drv
+ * PCI driver structure (mlx5_driver).
+ * @param[in] pci_dev
+ * PCI device information.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct ibv_device **ibv_list;
+ unsigned int n = 0;
+ int vf;
+ int ret;
+
+ assert(pci_drv == &mlx5_driver);
+ errno = 0;
+ ibv_list = mlx5_glue->get_device_list(&ret);
+ if (!ibv_list) {
+ rte_errno = errno ? errno : ENOSYS;
+ DRV_LOG(ERR, "cannot list devices, is ib_uverbs loaded?");
+ return -rte_errno;
+ }
+
+ struct ibv_device *ibv_match[ret + 1];
+
+ while (ret-- > 0) {
+ struct rte_pci_addr pci_addr;
+
+ DRV_LOG(DEBUG, "checking device \"%s\"", ibv_list[ret]->name);
+ if (mlx5_ibv_device_to_pci_addr(ibv_list[ret], &pci_addr))
+ continue;
+ if (pci_dev->addr.domain != pci_addr.domain ||
+ pci_dev->addr.bus != pci_addr.bus ||
+ pci_dev->addr.devid != pci_addr.devid ||
+ pci_dev->addr.function != pci_addr.function)
+ continue;
+ DRV_LOG(INFO, "PCI information matches for device \"%s\"",
+ ibv_list[ret]->name);
+ ibv_match[n++] = ibv_list[ret];
+ }
+ ibv_match[n] = NULL;
+
+ struct mlx5_dev_spawn_data list[n];
+ int nl_route = n ? mlx5_nl_init(NETLINK_ROUTE) : -1;
+ int nl_rdma = n ? mlx5_nl_init(NETLINK_RDMA) : -1;
+ unsigned int i;
+ unsigned int u;
+
+ /*
+ * The existence of several matching entries (n > 1) means port
+ * representors have been instantiated. No existing Verbs call nor
+ * /sys entries can tell them apart, this can only be done through
+ * Netlink calls assuming kernel drivers are recent enough to
+ * support them.
+ *
+ * In the event of identification failure through Netlink, try again
+ * through sysfs, then either:
+ *
+ * 1. No device matches (n == 0), complain and bail out.
+ * 2. A single IB device matches (n == 1) and is not a representor,
+ * assume no switch support.
+ * 3. Otherwise no safe assumptions can be made; complain louder and
+ * bail out.
+ */
+ for (i = 0; i != n; ++i) {
+ list[i].ibv_dev = ibv_match[i];
+ list[i].eth_dev = NULL;
+ if (nl_rdma < 0)
+ list[i].ifindex = 0;
+ else
+ list[i].ifindex = mlx5_nl_ifindex
+ (nl_rdma, list[i].ibv_dev->name);
+ if (nl_route < 0 ||
+ !list[i].ifindex ||
+ mlx5_nl_switch_info(nl_route, list[i].ifindex,
+ &list[i].info) ||
+ ((!list[i].info.representor && !list[i].info.master) &&
+ mlx5_sysfs_switch_info(list[i].ifindex, &list[i].info))) {
+ list[i].ifindex = 0;
+ memset(&list[i].info, 0, sizeof(list[i].info));
+ continue;
+ }
+ }
+ if (nl_rdma >= 0)
+ close(nl_rdma);
+ if (nl_route >= 0)
+ close(nl_route);
+ /* Count unidentified devices. */
+ for (u = 0, i = 0; i != n; ++i)
+ if (!list[i].info.master && !list[i].info.representor)
+ ++u;
+ if (u) {
+ if (n == 1 && u == 1) {
+ /* Case #2. */
+ DRV_LOG(INFO, "no switch support detected");
+ } else {
+ /* Case #3. */
+ DRV_LOG(ERR,
+ "unable to tell which of the matching devices"
+ " is the master (lack of kernel support?)");
+ n = 0;
+ }
+ }
+ /*
+ * Sort list to probe devices in natural order for users convenience
+ * (i.e. master first, then representors from lowest to highest ID).
+ */
+ if (n)
+ qsort(list, n, sizeof(*list), mlx5_dev_spawn_data_cmp);
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ vf = 1;
+ break;
+ default:
+ vf = 0;
+ }
+ for (i = 0; i != n; ++i) {
+ uint32_t restore;
+
+ list[i].eth_dev = mlx5_dev_spawn
+ (&pci_dev->device, list[i].ibv_dev, vf, &list[i].info);
+ if (!list[i].eth_dev) {
+ if (rte_errno != EBUSY)
+ break;
+ /* Device is disabled, ignore it. */
+ continue;
+ }
+ restore = list[i].eth_dev->data->dev_flags;
+ rte_eth_copy_pci_info(list[i].eth_dev, pci_dev);
+ /* Restore non-PCI flags cleared by the above call. */
+ list[i].eth_dev->data->dev_flags |= restore;
+ rte_eth_dev_probing_finish(list[i].eth_dev);
+ }
+ mlx5_glue->free_device_list(ibv_list);
+ if (!n) {
+ DRV_LOG(WARNING,
+ "no Verbs device matches PCI device " PCI_PRI_FMT ","
+ " are kernel drivers loaded?",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ rte_errno = ENOENT;
+ ret = -rte_errno;
+ } else if (i != n) {
+ DRV_LOG(ERR,
+ "probe of PCI device " PCI_PRI_FMT " aborted after"
+ " encountering an error: %s",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function,
+ strerror(rte_errno));
+ ret = -rte_errno;
+ /* Roll back. */
+ while (i--) {
+ if (!list[i].eth_dev)
+ continue;
+ mlx5_dev_close(list[i].eth_dev);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(list[i].eth_dev->data->dev_private);
+ claim_zero(rte_eth_dev_release_port(list[i].eth_dev));
+ }
+ /* Restore original error. */
+ rte_errno = -ret;
+ } else {
+ ret = 0;
+ }
+ return ret;
}
static const struct rte_pci_id mlx5_pci_id_map[] = {
@@ -1036,6 +1483,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
+ },
+ {
.vendor_id = 0
}
};
@@ -1052,11 +1503,54 @@ static struct rte_pci_driver mlx5_driver = {
#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
/**
+ * Suffix RTE_EAL_PMD_PATH with "-glue".
+ *
+ * This function performs a sanity check on RTE_EAL_PMD_PATH before
+ * suffixing its last component.
+ *
+ * @param buf[out]
+ * Output buffer, should be large enough otherwise NULL is returned.
+ * @param size
+ * Size of @p out.
+ *
+ * @return
+ * Pointer to @p buf or @p NULL in case suffix cannot be appended.
+ */
+static char *
+mlx5_glue_path(char *buf, size_t size)
+{
+ static const char *const bad[] = { "/", ".", "..", NULL };
+ const char *path = RTE_EAL_PMD_PATH;
+ size_t len = strlen(path);
+ size_t off;
+ int i;
+
+ while (len && path[len - 1] == '/')
+ --len;
+ for (off = len; off && path[off - 1] != '/'; --off)
+ ;
+ for (i = 0; bad[i]; ++i)
+ if (!strncmp(path + off, bad[i], (int)(len - off)))
+ goto error;
+ i = snprintf(buf, size, "%.*s-glue", (int)len, path);
+ if (i == -1 || (size_t)i >= size)
+ goto error;
+ return buf;
+error:
+ DRV_LOG(ERR,
+ "unable to append \"-glue\" to last component of"
+ " RTE_EAL_PMD_PATH (\"" RTE_EAL_PMD_PATH "\"),"
+ " please re-configure DPDK");
+ return NULL;
+}
+
+/**
* Initialization routine for run-time dependency on rdma-core.
*/
static int
mlx5_glue_init(void)
{
+ char glue_path[sizeof(RTE_EAL_PMD_PATH) - 1 + sizeof("-glue")];
const char *path[] = {
/*
* A basic security check is necessary before trusting
@@ -1064,7 +1558,13 @@ mlx5_glue_init(void)
*/
(geteuid() == getuid() && getegid() == getgid() ?
getenv("MLX5_GLUE_PATH") : NULL),
- RTE_EAL_PMD_PATH,
+ /*
+ * When RTE_EAL_PMD_PATH is set, use its glue-suffixed
+ * variant, otherwise let dlopen() look up libraries on its
+ * own.
+ */
+ (*RTE_EAL_PMD_PATH ?
+ mlx5_glue_path(glue_path, sizeof(glue_path)) : ""),
};
unsigned int i = 0;
void *handle = NULL;
@@ -1095,7 +1595,8 @@ mlx5_glue_init(void)
break;
if (sizeof(name) != (size_t)ret + 1)
continue;
- DEBUG("looking for rdma-core glue as \"%s\"", name);
+ DRV_LOG(DEBUG, "looking for rdma-core glue as \"%s\"",
+ name);
handle = dlopen(name, RTLD_LAZY);
break;
} while (1);
@@ -1107,7 +1608,7 @@ mlx5_glue_init(void)
rte_errno = EINVAL;
dlmsg = dlerror();
if (dlmsg)
- WARN("cannot load glue library: %s", dlmsg);
+ DRV_LOG(WARNING, "cannot load glue library: %s", dlmsg);
goto glue_error;
}
sym = dlsym(handle, "mlx5_glue");
@@ -1115,7 +1616,7 @@ mlx5_glue_init(void)
rte_errno = EINVAL;
dlmsg = dlerror();
if (dlmsg)
- ERROR("cannot resolve glue symbol: %s", dlmsg);
+ DRV_LOG(ERR, "cannot resolve glue symbol: %s", dlmsg);
goto glue_error;
}
mlx5_glue = *sym;
@@ -1123,9 +1624,9 @@ mlx5_glue_init(void)
glue_error:
if (handle)
dlclose(handle);
- WARN("cannot initialize PMD due to missing run-time"
- " dependency on rdma-core libraries (libibverbs,"
- " libmlx5)");
+ DRV_LOG(WARNING,
+ "cannot initialize PMD due to missing run-time dependency on"
+ " rdma-core libraries (libibverbs, libmlx5)");
return -rte_errno;
}
@@ -1134,12 +1635,17 @@ glue_error:
/**
* Driver initialization routine.
*/
-RTE_INIT(rte_mlx5_pmd_init);
-static void
-rte_mlx5_pmd_init(void)
+RTE_INIT(rte_mlx5_pmd_init)
{
- /* Build the static table for ptype conversion. */
+ /* Initialize driver log type. */
+ mlx5_logtype = rte_log_register("pmd.net.mlx5");
+ if (mlx5_logtype >= 0)
+ rte_log_set_level(mlx5_logtype, RTE_LOG_NOTICE);
+
+ /* Build the static tables for Verbs conversion. */
mlx5_set_ptype_table();
+ mlx5_set_cksum_table();
+ mlx5_set_swp_types_table();
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
@@ -1150,6 +1656,11 @@ rte_mlx5_pmd_init(void)
/* Match the size of Rx completion entry to the size of a cacheline. */
if (RTE_CACHE_LINE_SIZE == 128)
setenv("MLX5_CQE_SIZE", "128", 0);
+ /*
+ * MLX5_DEVICE_FATAL_CLEANUP tells ibv_destroy functions to
+ * cleanup all the Verbs resources even when the device was removed.
+ */
+ setenv("MLX5_DEVICE_FATAL_CLEANUP", "1", 1);
#ifdef RTE_LIBRTE_MLX5_DLOPEN_DEPS
if (mlx5_glue_init())
return;
@@ -1165,8 +1676,9 @@ rte_mlx5_pmd_init(void)
}
#endif
if (strcmp(mlx5_glue->version, MLX5_GLUE_VERSION)) {
- ERROR("rdma-core glue \"%s\" mismatch: \"%s\" is required",
- mlx5_glue->version, MLX5_GLUE_VERSION);
+ DRV_LOG(ERR,
+ "rdma-core glue \"%s\" mismatch: \"%s\" is required",
+ mlx5_glue->version, MLX5_GLUE_VERSION);
return;
}
mlx5_glue->fork_init();
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 965c19f2..a3a34cff 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_H_
@@ -26,12 +26,13 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
-#include <rte_spinlock.h>
+#include <rte_rwlock.h>
#include <rte_interrupts.h>
#include <rte_errno.h>
#include <rte_flow.h>
#include "mlx5_utils.h"
+#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
@@ -49,8 +50,27 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
};
+/** Switch information returned by mlx5_nl_switch_info(). */
+struct mlx5_switch_info {
+ uint32_t master:1; /**< Master device. */
+ uint32_t representor:1; /**< Representor device. */
+ int32_t port_name; /**< Representor port name. */
+ uint64_t switch_id; /**< Switch identifier. */
+};
+
+LIST_HEAD(mlx5_dev_list, priv);
+
+/* Shared memory between primary and secondary processes. */
+struct mlx5_shared_data {
+ struct mlx5_dev_list mem_event_cb_list;
+ rte_rwlock_t mem_event_rwlock;
+};
+
+extern struct mlx5_shared_data *mlx5_shared_data;
+
struct mlx5_xstats_ctrl {
/* Number of device stats. */
uint16_t stats_n;
@@ -75,19 +95,34 @@ TAILQ_HEAD(mlx5_flows, rte_flow);
*/
struct mlx5_dev_config {
unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int sriov:1; /* This is a VF or PF with VF devices. */
+ unsigned int vf:1; /* This is a VF. */
unsigned int mps:2; /* Multi-packet send supported mode. */
- unsigned int tunnel_en:1; /* Whether tunnel is supported. */
+ unsigned int tunnel_en:1;
+ /* Whether tunnel stateless offloads are supported. */
+ unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
unsigned int flow_counter_en:1; /* Whether flow counter is supported. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int tso:1; /* Whether TSO is supported. */
unsigned int tx_vec_en:1; /* Tx vector is enabled. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
+ unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
+ unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
+ unsigned int swp:1; /* Tx generic tunnel checksum and TSO offload. */
+ struct {
+ unsigned int enabled:1; /* Whether MPRQ is enabled. */
+ unsigned int stride_num_n; /* Number of strides. */
+ unsigned int min_stride_size_n; /* Min size of a stride. */
+ unsigned int max_stride_size_n; /* Max size of a stride. */
+ unsigned int max_memcpy_len;
+ /* Maximum packet size to memcpy Rx packets. */
+ unsigned int min_rxqs_num;
+ /* Rx queue count threshold to enable MPRQ. */
+ } mprq; /* Configurations for Multi-Packet RQ. */
+ unsigned int flow_prio; /* Number of flow priorities. */
unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int ind_table_max_size; /* Maximum indirection table size. */
int txq_inline; /* Maximum packet size for inlining. */
@@ -113,33 +148,63 @@ struct mlx5_verbs_alloc_ctx {
const void *obj; /* Pointer to the DPDK object. */
};
+LIST_HEAD(mlx5_mr_list, mlx5_mr);
+
+/* Flow drop context necessary due to Verbs API. */
+struct mlx5_drop {
+ struct mlx5_hrxq *hrxq; /* Hash Rx queue queue. */
+ struct mlx5_rxq_ibv *rxq; /* Verbs Rx queue. */
+};
+
+/** DPDK port to network interface index (ifindex) conversion. */
+struct mlx5_nl_flow_ptoi {
+ uint16_t port_id; /**< DPDK port ID. */
+ unsigned int ifindex; /**< Network interface index. */
+};
+
+struct mnl_socket;
+
struct priv {
- struct rte_eth_dev *dev; /* Ethernet device of master process. */
+ LIST_ENTRY(priv) mem_event_cb; /* Called by memory event callback. */
+ struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct ibv_context *ctx; /* Verbs context. */
struct ibv_device_attr_ex device_attr; /* Device properties. */
struct ibv_pd *pd; /* Protection Domain. */
+ char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
+ BITFIELD_DECLARE(mac_own, uint64_t, MLX5_MAX_MAC_ADDRESSES);
+ /* Bit-field of MAC addresses owned by the PMD. */
uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
- uint8_t port; /* Physical port number. */
- unsigned int pending_alarm:1; /* An alarm is pending. */
unsigned int isolated:1; /* Whether isolated mode is enabled. */
+ unsigned int representor:1; /* Device is a port representor. */
+ uint16_t domain_id; /* Switch domain identifier. */
+ int32_t representor_id; /* Port representor identifier. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
struct mlx5_rxq_data *(*rxqs)[]; /* RX queues. */
struct mlx5_txq_data *(*txqs)[]; /* TX queues. */
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
struct rte_eth_rss_conf rss_conf; /* RSS configuration. */
struct rte_intr_handle intr_handle; /* Interrupt handler. */
unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */
- struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
+ struct mlx5_drop drop_queue; /* Flow drop queues. */
struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
- LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
+ LIST_HEAD(counters, mlx5_flow_counter) flow_counters;
+ /* Flow counters. */
+ struct {
+ uint32_t dev_gen; /* Generation number to flush local caches. */
+ rte_rwlock_t rwlock; /* MR Lock. */
+ struct mlx5_mr_btree cache; /* Global MR cache table. */
+ struct mlx5_mr_list mr_list; /* Registered MR list. */
+ struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+ } mr;
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -149,55 +214,25 @@ struct priv {
LIST_HEAD(ind_tables, mlx5_ind_table_ibv) ind_tbls;
uint32_t link_speed_capa; /* Link speed capabilities. */
struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
- rte_spinlock_t lock; /* Lock for control functions. */
int primary_socket; /* Unix socket for primary process. */
void *uar_base; /* Reserved address space for UAR mapping */
struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */
struct mlx5_dev_config config; /* Device configuration. */
struct mlx5_verbs_alloc_ctx verbs_alloc_ctx;
/* Context for Verbs allocator. */
+ int nl_socket_rdma; /* Netlink socket (NETLINK_RDMA). */
+ int nl_socket_route; /* Netlink socket (NETLINK_ROUTE). */
+ uint32_t nl_sn; /* Netlink message sequence number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t uar_lock_cq; /* CQs share a common distinct UAR */
+ rte_spinlock_t uar_lock[MLX5_UAR_PAGE_NUM_MAX];
+ /* UAR same-page access control required in 32bit implementations. */
+#endif
+ struct mnl_socket *mnl_socket; /* Libmnl socket. */
};
-/**
- * Lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_lock(struct priv *priv)
-{
- rte_spinlock_lock(&priv->lock);
-}
-
-/**
- * Try to lock private structure to protect it from concurrent access in the
- * control path.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 1 if the lock is successfully taken; 0 otherwise.
- */
-static inline int
-priv_trylock(struct priv *priv)
-{
- return rte_spinlock_trylock(&priv->lock);
-}
-
-/**
- * Unlock private structure.
- *
- * @param priv
- * Pointer to private structure.
- */
-static inline void
-priv_unlock(struct priv *priv)
-{
- rte_spinlock_unlock(&priv->lock);
-}
+#define PORT_ID(priv) ((priv)->dev_data->port_id)
+#define ETH_DEV(priv) (&rte_eth_devices[PORT_ID(priv)])
/* mlx5.c */
@@ -205,131 +240,179 @@ int mlx5_getenv_int(const char *);
/* mlx5_ethdev.c */
-struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
-int mlx5_is_secondary(void);
-int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
-int priv_ifreq(const struct priv *, int req, struct ifreq *);
-int priv_is_ib_cntr(const char *);
-int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *);
-int priv_get_num_vfs(struct priv *, uint16_t *);
-int priv_get_mtu(struct priv *, uint16_t *);
-int priv_set_flags(struct priv *, unsigned int, unsigned int);
-int mlx5_dev_configure(struct rte_eth_dev *);
-void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
+int mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE]);
+int mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE]);
+unsigned int mlx5_ifindex(const struct rte_eth_dev *dev);
+int mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master);
+int mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu);
+int mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep,
+ unsigned int flags);
+int mlx5_dev_configure(struct rte_eth_dev *dev);
+void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-int priv_link_update(struct priv *, int);
-int priv_force_link_status_change(struct priv *, int);
-int mlx5_link_update(struct rte_eth_dev *, int);
-int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
-int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
-int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
-int mlx5_ibv_device_to_pci_addr(const struct ibv_device *,
- struct rte_pci_addr *);
-void mlx5_dev_link_status_handler(void *);
-void mlx5_dev_interrupt_handler(void *);
-void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
-void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+int mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+int mlx5_force_link_status_change(struct rte_eth_dev *dev, int status);
+int mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu);
+int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev,
+ struct rte_eth_fc_conf *fc_conf);
+int mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr);
+void mlx5_dev_link_status_handler(void *arg);
+void mlx5_dev_interrupt_handler(void *arg);
+void mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev);
+void mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev);
int mlx5_set_link_down(struct rte_eth_dev *dev);
int mlx5_set_link_up(struct rte_eth_dev *dev);
int mlx5_is_removed(struct rte_eth_dev *dev);
-eth_tx_burst_t priv_select_tx_function(struct priv *, struct rte_eth_dev *);
-eth_rx_burst_t priv_select_rx_function(struct priv *, struct rte_eth_dev *);
+eth_tx_burst_t mlx5_select_tx_function(struct rte_eth_dev *dev);
+eth_rx_burst_t mlx5_select_rx_function(struct rte_eth_dev *dev);
+unsigned int mlx5_dev_to_port_id(const struct rte_device *dev,
+ uint16_t *port_list,
+ unsigned int port_list_n);
+int mlx5_sysfs_switch_info(unsigned int ifindex,
+ struct mlx5_switch_info *info);
/* mlx5_mac.c */
-int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]);
-void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t);
-int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
- uint32_t);
-void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
+int mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN]);
+void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq);
+int mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
+int mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr);
/* mlx5_rss.c */
-int mlx5_rss_hash_update(struct rte_eth_dev *, struct rte_eth_rss_conf *);
-int mlx5_rss_hash_conf_get(struct rte_eth_dev *, struct rte_eth_rss_conf *);
-int priv_rss_reta_index_resize(struct priv *, unsigned int);
-int mlx5_dev_rss_reta_query(struct rte_eth_dev *,
- struct rte_eth_rss_reta_entry64 *, uint16_t);
-int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
- struct rte_eth_rss_reta_entry64 *, uint16_t);
+int mlx5_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf);
+int mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size);
+int mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
/* mlx5_rxmode.c */
-void mlx5_promiscuous_enable(struct rte_eth_dev *);
-void mlx5_promiscuous_disable(struct rte_eth_dev *);
-void mlx5_allmulticast_enable(struct rte_eth_dev *);
-void mlx5_allmulticast_disable(struct rte_eth_dev *);
+void mlx5_promiscuous_enable(struct rte_eth_dev *dev);
+void mlx5_promiscuous_disable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_enable(struct rte_eth_dev *dev);
+void mlx5_allmulticast_disable(struct rte_eth_dev *dev);
/* mlx5_stats.c */
-void priv_xstats_init(struct priv *);
-int mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
-void mlx5_stats_reset(struct rte_eth_dev *);
-int mlx5_xstats_get(struct rte_eth_dev *,
- struct rte_eth_xstat *, unsigned int);
-void mlx5_xstats_reset(struct rte_eth_dev *);
-int mlx5_xstats_get_names(struct rte_eth_dev *,
- struct rte_eth_xstat_name *, unsigned int);
+void mlx5_xstats_init(struct rte_eth_dev *dev);
+int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+void mlx5_stats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n);
+void mlx5_xstats_reset(struct rte_eth_dev *dev);
+int mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int n);
/* mlx5_vlan.c */
-int mlx5_vlan_filter_set(struct rte_eth_dev *, uint16_t, int);
-int mlx5_vlan_offload_set(struct rte_eth_dev *, int);
-void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
+int mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+void mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on);
+int mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask);
/* mlx5_trigger.c */
-int mlx5_dev_start(struct rte_eth_dev *);
-void mlx5_dev_stop(struct rte_eth_dev *);
-int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *);
-int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *);
-int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *);
-int mlx5_traffic_restart(struct rte_eth_dev *);
+int mlx5_dev_start(struct rte_eth_dev *dev);
+void mlx5_dev_stop(struct rte_eth_dev *dev);
+int mlx5_traffic_enable(struct rte_eth_dev *dev);
+void mlx5_traffic_disable(struct rte_eth_dev *dev);
+int mlx5_traffic_restart(struct rte_eth_dev *dev);
/* mlx5_flow.c */
-int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
- enum rte_filter_op, void *);
-int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
- const struct rte_flow_item [],
- const struct rte_flow_action [],
- struct rte_flow_error *);
-struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
- const struct rte_flow_attr *,
- const struct rte_flow_item [],
- const struct rte_flow_action [],
- struct rte_flow_error *);
-int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
- struct rte_flow_error *);
-void priv_flow_flush(struct priv *, struct mlx5_flows *);
-int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
-int mlx5_flow_query(struct rte_eth_dev *, struct rte_flow *,
- enum rte_flow_action_type, void *,
- struct rte_flow_error *);
-int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
-int priv_flow_start(struct priv *, struct mlx5_flows *);
-void priv_flow_stop(struct priv *, struct mlx5_flows *);
-int priv_flow_verify(struct priv *);
-int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *,
- struct rte_flow_item_eth *, struct rte_flow_item_vlan *,
- struct rte_flow_item_vlan *);
-int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
- struct rte_flow_item_eth *);
-int priv_flow_create_drop_queue(struct priv *);
-void priv_flow_delete_drop_queue(struct priv *);
+int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
+void mlx5_flow_print(struct rte_flow *flow);
+int mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error);
+void mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+int mlx5_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *error);
+int mlx5_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error);
+int mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list);
+void mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list);
+int mlx5_flow_verify(struct rte_eth_dev *dev);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask);
+int mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask);
+int mlx5_flow_create_drop_queue(struct rte_eth_dev *dev);
+void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
/* mlx5_socket.c */
-int priv_socket_init(struct priv *priv);
-int priv_socket_uninit(struct priv *priv);
-void priv_socket_handle(struct priv *priv);
-int priv_socket_connect(struct priv *priv);
-
-/* mlx5_mr.c */
-
-struct mlx5_mr *priv_mr_new(struct priv *, struct rte_mempool *);
-struct mlx5_mr *priv_mr_get(struct priv *, struct rte_mempool *);
-int priv_mr_release(struct priv *, struct mlx5_mr *);
-int priv_mr_verify(struct priv *);
+int mlx5_socket_init(struct rte_eth_dev *priv);
+void mlx5_socket_uninit(struct rte_eth_dev *priv);
+void mlx5_socket_handle(struct rte_eth_dev *priv);
+int mlx5_socket_connect(struct rte_eth_dev *priv);
+
+/* mlx5_nl.c */
+
+int mlx5_nl_init(int protocol);
+int mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+int mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index);
+void mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev);
+void mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev);
+int mlx5_nl_promisc(struct rte_eth_dev *dev, int enable);
+int mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable);
+unsigned int mlx5_nl_ifindex(int nl, const char *name);
+int mlx5_nl_switch_info(int nl, unsigned int ifindex,
+ struct mlx5_switch_info *info);
+
+/* mlx5_nl_flow.c */
+
+int mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error);
+void mlx5_nl_flow_brand(void *buf, uint32_t handle);
+int mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error);
+int mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error);
+struct mnl_socket *mlx5_nl_flow_socket_create(void);
+void mlx5_nl_flow_socket_destroy(struct mnl_socket *nl);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index c3334ca3..f2a16795 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_DEFS_H_
@@ -13,8 +13,13 @@
/* Reported driver name. */
#define MLX5_DRIVER_NAME "net_mlx5"
+/* Maximum number of simultaneous unicast MAC addresses. */
+#define MLX5_MAX_UC_MAC_ADDRESSES 128
+/* Maximum number of simultaneous Multicast MAC addresses. */
+#define MLX5_MAX_MC_MAC_ADDRESSES 128
/* Maximum number of simultaneous MAC addresses. */
-#define MLX5_MAX_MAC_ADDRESSES 128
+#define MLX5_MAX_MAC_ADDRESSES \
+ (MLX5_MAX_UC_MAC_ADDRESSES + MLX5_MAX_MC_MAC_ADDRESSES)
/* Maximum number of simultaneous VLAN filters. */
#define MLX5_MAX_VLAN_IDS 128
@@ -32,16 +37,11 @@
*/
#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
-/*
- * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
- * from which buffers are to be transmitted will have to be mapped by this
- * driver to their own Memory Region (MR). This is a slow operation.
- *
- * This value is always 1 for RX queues.
- */
-#ifndef MLX5_PMD_TX_MP_CACHE
-#define MLX5_PMD_TX_MP_CACHE 8
-#endif
+/* Size of per-queue MR cache array for linear search. */
+#define MLX5_MR_CACHE_N 8
+
+/* Size of MR cache table for binary search. */
+#define MLX5_MR_BTREE_CACHE_N 256
/*
* If defined, only use software counters. The PMD will never ask the hardware
@@ -58,16 +58,17 @@
#define MLX5_MAX_XSTATS 32
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
-#define MLX5_MAX_TSO_HEADER 128
+#define MLX5_MAX_TSO_HEADER 192
/* Default minimum number of Tx queues for vectorized Tx. */
#define MLX5_VPMD_MIN_TXQS 4
/* Threshold of buffer replenishment for vectorized Rx. */
-#define MLX5_VPMD_RXQ_RPLNSH_THRESH 64U
+#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
+ (RTE_MIN(MLX5_VPMD_RX_MAX_BURST, (unsigned int)(n) >> 2))
/* Maximum size of burst for vectorized Rx. */
-#define MLX5_VPMD_RX_MAX_BURST MLX5_VPMD_RXQ_RPLNSH_THRESH
+#define MLX5_VPMD_RX_MAX_BURST 64U
/*
* Maximum size of burst for vectorized Tx. This is related to the maximum size
@@ -82,17 +83,54 @@
/* Supported RSS */
#define MLX5_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
-/* Maximum number of attempts to query link status before giving up. */
-#define MLX5_MAX_LINK_QUERY_ATTEMPTS 5
+/* Timeout in seconds to get a valid link status. */
+#define MLX5_LINK_STATUS_TIMEOUT 10
/* Reserved address space for UAR mapping. */
-#define MLX5_UAR_SIZE (1ULL << 32)
+#define MLX5_UAR_SIZE (1ULL << (sizeof(uintptr_t) * 4))
/* Offset of reserved UAR address space to hugepage memory. Offset is used here
* to minimize possibility of address next to hugepage being used by other code
* in either primary or secondary process, failing to map TX UAR would make TX
* packets invisible to HW.
*/
-#define MLX5_UAR_OFFSET (1ULL << 32)
+#define MLX5_UAR_OFFSET (1ULL << (sizeof(uintptr_t) * 4))
+
+/* Maximum number of UAR pages used by a port,
+ * These are the size and mask for an array of mutexes used to synchronize
+ * the access to port's UARs on platforms that do not support 64 bit writes.
+ * In such systems it is possible to issue the 64 bits DoorBells through two
+ * consecutive writes, each write 32 bits. The access to a UAR page (which can
+ * be accessible by all threads in the process) must be synchronized
+ * (for example, using a semaphore). Such a synchronization is not required
+ * when ringing DoorBells on different UAR pages.
+ * A port with 512 Tx queues uses 8, 4kBytes, UAR pages which are shared
+ * among the ports.
+ */
+#define MLX5_UAR_PAGE_NUM_MAX 64
+#define MLX5_UAR_PAGE_NUM_MASK ((MLX5_UAR_PAGE_NUM_MAX) - 1)
+
+/* Log 2 of the default number of strides per WQE for Multi-Packet RQ. */
+#define MLX5_MPRQ_STRIDE_NUM_N 6U
+
+/* Two-byte shift is disabled for Multi-Packet RQ. */
+#define MLX5_MPRQ_TWO_BYTE_SHIFT 0
+
+/*
+ * Minimum size of packet to be memcpy'd instead of being attached as an
+ * external buffer.
+ */
+#define MLX5_MPRQ_MEMCPY_DEFAULT_LEN 128
+
+/* Minimum number Rx queues to enable Multi-Packet RQ. */
+#define MLX5_MPRQ_MIN_RXQS 12
+
+/* Cache size of mempool for Multi-Packet RQ. */
+#define MLX5_MPRQ_MP_CACHE_SZ 32U
+
+/* Definition of static_assert found in /usr/include/assert.h */
+#ifndef HAVE_STATIC_ASSERT
+#define static_assert _Static_assert
+#endif
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 66650769..34c5b95e 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#define _GNU_SOURCE
#include <stddef.h>
#include <assert.h>
+#include <inttypes.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
@@ -17,14 +18,13 @@
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
-#include <sys/utsname.h>
#include <netinet/in.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
-#include <linux/version.h>
#include <fcntl.h>
#include <stdalign.h>
#include <sys/un.h>
+#include <time.h>
#include <rte_atomic.h>
#include <rte_ethdev_driver.h>
@@ -32,14 +32,41 @@
#include <rte_mbuf.h>
#include <rte_common.h>
#include <rte_interrupts.h>
-#include <rte_alarm.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_rwlock.h>
#include "mlx5.h"
#include "mlx5_glue.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Supported speed values found in /usr/include/linux/ethtool.h */
+#ifndef HAVE_SUPPORTED_40000baseKR4_Full
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseCR4_Full
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseSR4_Full
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#endif
+#ifndef HAVE_SUPPORTED_40000baseLR4_Full
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseKR4_Full
+#define SUPPORTED_56000baseKR4_Full (1 << 27)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseCR4_Full
+#define SUPPORTED_56000baseCR4_Full (1 << 28)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseSR4_Full
+#define SUPPORTED_56000baseSR4_Full (1 << 29)
+#endif
+#ifndef HAVE_SUPPORTED_56000baseLR4_Full
+#define SUPPORTED_56000baseLR4_Full (1 << 30)
+#endif
+
/* Add defines in case the running kernel is not the same as user headers. */
#ifndef ETHTOOL_GLINKSETTINGS
struct ethtool_link_settings {
@@ -92,19 +119,21 @@ struct ethtool_link_settings {
#endif
/**
- * Get interface name from private structure.
+ * Get master interface name from private structure.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] ifname
* Interface name output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
+mlx5_get_master_ifname(const struct rte_eth_dev *dev,
+ char (*ifname)[IF_NAMESIZE])
{
+ struct priv *priv = dev->data->dev_private;
DIR *dir;
struct dirent *dent;
unsigned int dev_type = 0;
@@ -115,8 +144,10 @@ priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
MKSTR(path, "%s/device/net", priv->ibdev_path);
dir = opendir(path);
- if (dir == NULL)
- return -1;
+ if (dir == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
}
while ((dent = readdir(dir)) != NULL) {
char *name = dent->d_name;
@@ -162,359 +193,198 @@ try_dev_id:
if (dev_port == dev_port_prev)
goto try_dev_id;
dev_port_prev = dev_port;
- if (dev_port == (priv->port - 1u))
- snprintf(match, sizeof(match), "%s", name);
+ if (dev_port == 0)
+ strlcpy(match, name, sizeof(match));
}
closedir(dir);
- if (match[0] == '\0')
- return -1;
+ if (match[0] == '\0') {
+ rte_errno = ENOENT;
+ return -rte_errno;
+ }
strncpy(*ifname, match, sizeof(*ifname));
return 0;
}
/**
- * Check if the counter is located on ib counters file.
+ * Get interface name from private structure.
*
- * @param[in] cntr
- * Counter name.
+ * This is a port representor-aware version of mlx5_get_master_ifname().
*
- * @return
- * 1 if counter is located on ib counters file , 0 otherwise.
- */
-int
-priv_is_ib_cntr(const char *cntr)
-{
- if (!strcmp(cntr, "out_of_buffer"))
- return 1;
- return 0;
-}
-
-/**
- * Read from sysfs entry.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[out] buf
- * Data output buffer.
- * @param size
- * Buffer size.
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[out] ifname
+ * Interface name output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-priv_sysfs_read(const struct priv *priv, const char *entry,
- char *buf, size_t size)
+int
+mlx5_get_ifname(const struct rte_eth_dev *dev, char (*ifname)[IF_NAMESIZE])
{
- char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
-
- if (priv_is_ib_cntr(entry)) {
- MKSTR(path, "%s/ports/1/hw_counters/%s",
- priv->ibdev_path, entry);
- file = fopen(path, "rb");
- } else {
- MKSTR(path, "%s/device/net/%s/%s",
- priv->ibdev_path, ifname, entry);
- file = fopen(path, "rb");
+ struct priv *priv = dev->data->dev_private;
+ unsigned int ifindex =
+ priv->nl_socket_rdma >= 0 ?
+ mlx5_nl_ifindex(priv->nl_socket_rdma, priv->ibdev_name) : 0;
+
+ if (!ifindex) {
+ if (!priv->representor)
+ return mlx5_get_master_ifname(dev, ifname);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
- if (file == NULL)
- return -1;
- ret = fread(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) && (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
+ if (if_indextoname(ifindex, &(*ifname)[0]))
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
}
/**
- * Write to sysfs entry.
+ * Get the interface index from device name.
*
- * @param[in] priv
- * Pointer to private structure.
- * @param[in] entry
- * Entry name relative to sysfs path.
- * @param[in] buf
- * Data buffer.
- * @param size
- * Buffer size.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * Nonzero interface index on success, zero otherwise and rte_errno is set.
*/
-static int
-priv_sysfs_write(const struct priv *priv, const char *entry,
- char *buf, size_t size)
+unsigned int
+mlx5_ifindex(const struct rte_eth_dev *dev)
{
char ifname[IF_NAMESIZE];
- FILE *file;
- int ret;
- int err;
-
- if (priv_get_ifname(priv, &ifname))
- return -1;
-
- MKSTR(path, "%s/device/net/%s/%s", priv->ibdev_path, ifname, entry);
-
- file = fopen(path, "wb");
- if (file == NULL)
- return -1;
- ret = fwrite(buf, 1, size, file);
- err = errno;
- if (((size_t)ret < size) || (ferror(file)))
- ret = -1;
- else
- ret = size;
- fclose(file);
- errno = err;
- return ret;
-}
-
-/**
- * Get unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param[out] value
- * Value output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
-{
- int ret;
- unsigned long value_ret;
- char value_str[32];
+ unsigned int ifindex;
- ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret == -1) {
- DEBUG("cannot read %s value from sysfs: %s",
- name, strerror(errno));
- return -1;
- }
- value_str[ret] = '\0';
- errno = 0;
- value_ret = strtoul(value_str, NULL, 0);
- if (errno) {
- DEBUG("invalid %s value `%s': %s", name, value_str,
- strerror(errno));
- return -1;
- }
- *value = value_ret;
- return 0;
-}
-
-/**
- * Set unsigned long sysfs property.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] name
- * Entry name relative to sysfs path.
- * @param value
- * Value to set.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-static int
-priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
-{
- int ret;
- MKSTR(value_str, "%lu", value);
-
- ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
- if (ret == -1) {
- DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
- name, value_str, value, strerror(errno));
- return -1;
- }
- return 0;
+ if (mlx5_get_ifname(dev, &ifname))
+ return 0;
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex)
+ rte_errno = errno;
+ return ifindex;
}
/**
* Perform ifreq ioctl() on associated Ethernet device.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param req
* Request number to pass to ioctl().
* @param[out] ifr
* Interface request structure output buffer.
+ * @param master
+ * When device is a port representor, perform request on master device
+ * instead.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
+mlx5_ifreq(const struct rte_eth_dev *dev, int req, struct ifreq *ifr,
+ int master)
{
int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
- int ret = -1;
+ int ret = 0;
- if (sock == -1)
- return ret;
- if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
- ret = ioctl(sock, req, ifr);
+ if (sock == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ if (master)
+ ret = mlx5_get_master_ifname(dev, &ifr->ifr_name);
+ else
+ ret = mlx5_get_ifname(dev, &ifr->ifr_name);
+ if (ret)
+ goto error;
+ ret = ioctl(sock, req, ifr);
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
close(sock);
- return ret;
-}
-
-/**
- * Return the number of active VFs for the current device.
- *
- * @param[in] priv
- * Pointer to private structure.
- * @param[out] num_vfs
- * Number of active VFs.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-int
-priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
-{
- /* The sysfs entry name depends on the operating system. */
- const char **name = (const char *[]){
- "device/sriov_numvfs",
- "device/mlx5_num_vfs",
- NULL,
- };
- int ret;
-
- do {
- unsigned long ulong_num_vfs;
-
- ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
- if (!ret)
- *num_vfs = ulong_num_vfs;
- } while (*(++name) && ret);
- return ret;
+ return 0;
+error:
+ close(sock);
+ return -rte_errno;
}
/**
* Get device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] mtu
* MTU value output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_mtu(struct priv *priv, uint16_t *mtu)
+mlx5_get_mtu(struct rte_eth_dev *dev, uint16_t *mtu)
{
- unsigned long ulong_mtu;
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFMTU, &request, 0);
- if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
- return -1;
- *mtu = ulong_mtu;
- return 0;
-}
-
-/**
- * Read device counter from sysfs.
- *
- * @param priv
- * Pointer to private structure.
- * @param name
- * Counter name.
- * @param[out] cntr
- * Counter output buffer.
- *
- * @return
- * 0 on success, -1 on failure and errno is set.
- */
-int
-priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
-{
- unsigned long ulong_ctr;
-
- if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
- return -1;
- *cntr = ulong_ctr;
+ if (ret)
+ return ret;
+ *mtu = request.ifr_mtu;
return 0;
}
/**
* Set device MTU.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param mtu
* MTU value to set.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_set_mtu(struct priv *priv, uint16_t mtu)
+mlx5_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint16_t new_mtu;
+ struct ifreq request = { .ifr_mtu = mtu, };
- if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
- priv_get_mtu(priv, &new_mtu))
- return -1;
- if (new_mtu == mtu)
- return 0;
- errno = EINVAL;
- return -1;
+ return mlx5_ifreq(dev, SIOCSIFMTU, &request, 0);
}
/**
* Set device flags.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param keep
* Bitmask for flags that must remain untouched.
* @param flags
* Bitmask for flags to modify.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
+mlx5_set_flags(struct rte_eth_dev *dev, unsigned int keep, unsigned int flags)
{
- unsigned long tmp;
+ struct ifreq request;
+ int ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &request, 0);
- if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
- return -1;
- tmp &= keep;
- tmp |= (flags & (~keep));
- return priv_set_sysfs_ulong(priv, "flags", tmp);
+ if (ret)
+ return ret;
+ request.ifr_flags &= keep;
+ request.ifr_flags |= flags & ~keep;
+ return mlx5_ifreq(dev, SIOCSIFFLAGS, &request, 0);
}
/**
- * Ethernet device configuration.
- *
- * Prepare the driver for a given number of TX and RX queues.
+ * DPDK callback for Ethernet device configuration.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-dev_configure(struct rte_eth_dev *dev)
+int
+mlx5_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
@@ -524,60 +394,49 @@ dev_configure(struct rte_eth_dev *dev)
unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
- uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t supp_rx_offloads =
- (mlx5_priv_get_rx_port_offloads(priv) |
- mlx5_priv_get_rx_queue_offloads(priv));
- uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
-
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- ERROR("Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- tx_offloads, supp_tx_offloads);
- return ENOTSUP;
- }
- if ((rx_offloads & supp_rx_offloads) != rx_offloads) {
- ERROR("Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64,
- rx_offloads, supp_rx_offloads);
- return ENOTSUP;
- }
+ int ret = 0;
+
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
- rss_hash_default_key_len)) {
- /* MLX5 RSS only support 40bytes key. */
- return EINVAL;
+ MLX5_RSS_HASH_KEY_LEN)) {
+ DRV_LOG(ERR, "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id, RTE_STR(MLX5_RSS_HASH_KEY_LEN));
+ rte_errno = EINVAL;
+ return -rte_errno;
}
priv->rss_conf.rss_key =
rte_realloc(priv->rss_conf.rss_key,
- rss_hash_default_key_len, 0);
+ MLX5_RSS_HASH_KEY_LEN, 0);
if (!priv->rss_conf.rss_key) {
- ERROR("cannot allocate RSS hash key memory (%u)", rxqs_n);
- return ENOMEM;
+ DRV_LOG(ERR, "port %u cannot allocate RSS hash key memory (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key,
use_app_rss_key ?
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
rss_hash_default_key,
- rss_hash_default_key_len);
- priv->rss_conf.rss_key_len = rss_hash_default_key_len;
+ MLX5_RSS_HASH_KEY_LEN);
+ priv->rss_conf.rss_key_len = MLX5_RSS_HASH_KEY_LEN;
priv->rss_conf.rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
priv->rxqs = (void *)dev->data->rx_queues;
priv->txqs = (void *)dev->data->tx_queues;
if (txqs_n != priv->txqs_n) {
- INFO("%p: TX queues number update: %u -> %u",
- (void *)dev, priv->txqs_n, txqs_n);
+ DRV_LOG(INFO, "port %u Tx queues number update: %u -> %u",
+ dev->data->port_id, priv->txqs_n, txqs_n);
priv->txqs_n = txqs_n;
}
if (rxqs_n > priv->config.ind_table_max_size) {
- ERROR("cannot handle this many RX queues (%u)", rxqs_n);
- return EINVAL;
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rxqs_n == priv->rxqs_n)
return 0;
- INFO("%p: RX queues number update: %u -> %u",
- (void *)dev, priv->rxqs_n, rxqs_n);
+ DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
/* If the requested number of RX queues is not a power of two, use the
* maximum indirection table size for better balancing.
@@ -585,8 +444,9 @@ dev_configure(struct rte_eth_dev *dev)
reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
priv->config.ind_table_max_size :
rxqs_n));
- if (priv_rss_reta_index_resize(priv, reta_idx_n))
- return ENOMEM;
+ ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+ if (ret)
+ return ret;
/* When the number of RX queues is not a power of two, the remaining
* table entries are padded with reused WQs and hashes are not spread
* uniformly. */
@@ -599,25 +459,42 @@ dev_configure(struct rte_eth_dev *dev)
}
/**
- * DPDK callback for Ethernet device configuration.
+ * Sets default tuning parameters.
*
* @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success, negative errno value on failure.
+ * Pointer to Ethernet device.
+ * @param[out] info
+ * Info structure output buffer.
*/
-int
-mlx5_dev_configure(struct rte_eth_dev *dev)
+static void
+mlx5_set_default_params(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
{
struct priv *priv = dev->data->dev_private;
- int ret;
- priv_lock(priv);
- ret = dev_configure(dev);
- assert(ret >= 0);
- priv_unlock(priv);
- return -ret;
+ /* Minimum CPU utilization. */
+ info->default_rxportconf.ring_size = 256;
+ info->default_txportconf.ring_size = 256;
+ info->default_rxportconf.burst_size = 64;
+ info->default_txportconf.burst_size = 64;
+ if (priv->link_speed_capa & ETH_LINK_SPEED_100G) {
+ info->default_rxportconf.nb_queues = 16;
+ info->default_txportconf.nb_queues = 16;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 2048;
+ info->default_txportconf.ring_size = 2048;
+ }
+ } else {
+ info->default_rxportconf.nb_queues = 8;
+ info->default_txportconf.nb_queues = 8;
+ if (dev->data->nb_rx_queues > 2 ||
+ dev->data->nb_tx_queues > 2) {
+ /* Max Throughput. */
+ info->default_rxportconf.ring_size = 4096;
+ info->default_txportconf.ring_size = 4096;
+ }
+ }
}
/**
@@ -636,9 +513,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
- info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
- priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
info->max_rx_pktlen = 65536;
@@ -653,22 +527,54 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
max = 65535;
info->max_rx_queues = max;
info->max_tx_queues = max;
- info->max_mac_addrs = RTE_DIM(priv->mac);
- info->rx_queue_offload_capa =
- mlx5_priv_get_rx_queue_offloads(priv);
- info->rx_offload_capa = (mlx5_priv_get_rx_port_offloads(priv) |
+ info->max_mac_addrs = MLX5_MAX_UC_MAC_ADDRESSES;
+ info->rx_queue_offload_capa = mlx5_get_rx_queue_offloads(dev);
+ info->rx_offload_capa = (mlx5_get_rx_port_offloads() |
info->rx_queue_offload_capa);
- info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
- if (priv_get_ifname(priv, &ifname) == 0)
+ info->tx_offload_capa = mlx5_get_tx_port_offloads(dev);
+ if (mlx5_get_ifname(dev, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
info->reta_size = priv->reta_idx_n ?
priv->reta_idx_n : config->ind_table_max_size;
- info->hash_key_size = priv->rss_conf.rss_key_len;
+ info->hash_key_size = MLX5_RSS_HASH_KEY_LEN;
info->speed_capa = priv->link_speed_capa;
info->flow_type_rss_offloads = ~MLX5_RSS_HF_MASK;
- priv_unlock(priv);
+ mlx5_set_default_params(dev, info);
+ info->switch_info.name = dev->data->name;
+ info->switch_info.domain_id = priv->domain_id;
+ info->switch_info.port_id = priv->representor_id;
+ if (priv->representor) {
+ unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[i];
+
+ i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i);
+ while (i--) {
+ struct priv *opriv =
+ rte_eth_devices[port_id[i]].data->dev_private;
+
+ if (!opriv ||
+ opriv->representor ||
+ opriv->domain_id != priv->domain_id)
+ continue;
+ /*
+ * Override switch name with that of the master
+ * device.
+ */
+ info->switch_info.name = opriv->dev_data->name;
+ break;
+ }
+ }
}
+/**
+ * Get supported packet types.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * A pointer to the supported Packet types array.
+ */
const uint32_t *
mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
@@ -691,6 +597,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
};
if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
dev->rx_pkt_burst == mlx5_rx_burst_vec)
return ptypes;
return NULL;
@@ -701,11 +608,15 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
{
struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
@@ -714,26 +625,28 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
struct ifreq ifr;
struct rte_eth_link dev_link;
int link_speed = 0;
+ int ret;
- /* priv_lock() is not taken to allow concurrent calls. */
-
- (void)wait_to_complete;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&edata;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
link_speed = ethtool_cmd_speed(&edata);
if (link_speed == -1)
- dev_link.link_speed = 0;
+ dev_link.link_speed = ETH_SPEED_NUM_NONE;
else
dev_link.link_speed = link_speed;
priv->link_speed_capa = 0;
@@ -753,13 +666,13 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
- /* Link status changed. */
- dev->data->dev_link = dev_link;
- return 0;
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
- /* Link status is still the same. */
- return -1;
+ *link = dev_link;
+ return 0;
}
/**
@@ -767,31 +680,41 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
+ * @param[out] link
+ * Storage for current link status.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+
{
struct priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
+ int ret;
- (void)wait_to_complete;
- if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
- WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFFLAGS, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u ioctl(SIOCGIFFLAGS) failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
ifr.ifr_data = (void *)&gcmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
@@ -802,10 +725,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
*ecmd = gcmd;
ifr.ifr_data = (void *)ecmd;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
- strerror(errno));
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(DEBUG,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
dev_link.link_speed = ecmd->speed;
sc = ecmd->link_mode_masks[0] |
@@ -849,121 +775,13 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
- /* Link status changed. */
- dev->data->dev_link = dev_link;
- return 0;
- }
- /* Link status is still the same. */
- return -1;
-}
-
-/**
- * Enable receiving and transmitting traffic.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_link_start(struct priv *priv)
-{
- struct rte_eth_dev *dev = priv->dev;
- int err;
-
- dev->tx_pkt_burst = priv_select_tx_function(priv, dev);
- dev->rx_pkt_burst = priv_select_rx_function(priv, dev);
- err = priv_dev_traffic_enable(priv, dev);
- if (err)
- ERROR("%p: error occurred while configuring control flows: %s",
- (void *)priv, strerror(err));
- err = priv_flow_start(priv, &priv->flows);
- if (err)
- ERROR("%p: error occurred while configuring flows: %s",
- (void *)priv, strerror(err));
-}
-
-/**
- * Disable receiving and transmitting traffic.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_link_stop(struct priv *priv)
-{
- struct rte_eth_dev *dev = priv->dev;
-
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
-}
-
-/**
- * Retrieve physical link information and update rx/tx_pkt_burst callbacks
- * accordingly.
- *
- * @param priv
- * Pointer to private structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-int
-priv_link_update(struct priv *priv, int wait_to_complete)
-{
- struct rte_eth_dev *dev = priv->dev;
- struct utsname utsname;
- int ver[3];
- int ret;
- struct rte_eth_link dev_link = dev->data->dev_link;
-
- if (uname(&utsname) == -1 ||
- sscanf(utsname.release, "%d.%d.%d",
- &ver[0], &ver[1], &ver[2]) != 3 ||
- KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
- else
- ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
- /* If lsc interrupt is disabled, should always be ready for traffic. */
- if (!dev->data->dev_conf.intr_conf.lsc) {
- priv_link_start(priv);
- return ret;
- }
- /* Re-select burst callbacks only if link status has been changed. */
- if (!ret && dev_link.link_status != dev->data->dev_link.link_status) {
- if (dev->data->dev_link.link_status == ETH_LINK_UP)
- priv_link_start(priv);
- else
- priv_link_stop(priv);
- }
- return ret;
-}
-
-/**
- * Querying the link status till it changes to the desired state.
- * Number of query attempts is bounded by MLX5_MAX_LINK_QUERY_ATTEMPTS.
- *
- * @param priv
- * Pointer to private structure.
- * @param status
- * Link desired status.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-priv_force_link_status_change(struct priv *priv, int status)
-{
- int try = 0;
-
- while (try < MLX5_MAX_LINK_QUERY_ATTEMPTS) {
- priv_link_update(priv, 0);
- if (priv->dev->data->dev_link.link_status == status)
- return 0;
- try++;
- sleep(1);
+ if ((dev_link.link_speed && !dev_link.link_status) ||
+ (!dev_link.link_speed && dev_link.link_status)) {
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
- return -EAGAIN;
+ *link = dev_link;
+ return 0;
}
/**
@@ -972,17 +790,42 @@ priv_force_link_status_change(struct priv *priv, int status)
* @param dev
* Pointer to Ethernet device structure.
* @param wait_to_complete
- * Wait for request completion (ignored).
+ * Wait for request completion.
+ *
+ * @return
+ * 0 if link status was not updated, positive if it was, a negative errno
+ * value otherwise and rte_errno is set.
*/
int
mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = dev->data->dev_private;
int ret;
+ struct rte_eth_link dev_link;
+ time_t start_time = time(NULL);
- priv_lock(priv);
- ret = priv_link_update(priv, wait_to_complete);
- priv_unlock(priv);
+ do {
+ ret = mlx5_link_update_unlocked_gs(dev, &dev_link);
+ if (ret)
+ ret = mlx5_link_update_unlocked_gset(dev, &dev_link);
+ if (ret == 0)
+ break;
+ /* Handle wait to complete situation. */
+ if (wait_to_complete && ret == -EAGAIN) {
+ if (abs((int)difftime(time(NULL), start_time)) <
+ MLX5_LINK_STATUS_TIMEOUT) {
+ usleep(0);
+ continue;
+ } else {
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ } else if (ret < 0) {
+ return ret;
+ }
+ } while (wait_to_complete);
+ ret = !!memcmp(&dev->data->dev_link, &dev_link,
+ sizeof(struct rte_eth_link));
+ dev->data->dev_link = dev_link;
return ret;
}
@@ -995,39 +838,33 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
* New MTU.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct priv *priv = dev->data->dev_private;
- uint16_t kern_mtu;
- int ret = 0;
+ uint16_t kern_mtu = 0;
+ int ret;
- priv_lock(priv);
- ret = priv_get_mtu(priv, &kern_mtu);
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
/* Set kernel interface MTU first. */
- ret = priv_set_mtu(priv, mtu);
+ ret = mlx5_set_mtu(dev, mtu);
if (ret)
- goto out;
- ret = priv_get_mtu(priv, &kern_mtu);
+ return ret;
+ ret = mlx5_get_mtu(dev, &kern_mtu);
if (ret)
- goto out;
+ return ret;
if (kern_mtu == mtu) {
priv->mtu = mtu;
- DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
+ DRV_LOG(DEBUG, "port %u adapter MTU set to %u",
+ dev->data->port_id, mtu);
+ return 0;
}
- priv_unlock(priv);
- return 0;
-out:
- ret = errno;
- WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
- strerror(ret));
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ rte_errno = EAGAIN;
+ return -rte_errno;
}
/**
@@ -1039,12 +876,11 @@ out:
* Flow control output buffer.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_GPAUSEPARAM
@@ -1052,15 +888,14 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
int ret;
ifr.ifr_data = (void *)&ethpause;
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM) failed:"
+ " %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
-
fc_conf->autoneg = ethpause.autoneg;
if (ethpause.rx_pause && ethpause.tx_pause)
fc_conf->mode = RTE_FC_FULL;
@@ -1070,12 +905,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
fc_conf->mode = RTE_FC_TX_PAUSE;
else
fc_conf->mode = RTE_FC_NONE;
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -1087,12 +917,11 @@ out:
* Flow control parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
{
- struct priv *priv = dev->data->dev_private;
struct ifreq ifr;
struct ethtool_pauseparam ethpause = {
.cmd = ETHTOOL_SPAUSEPARAM
@@ -1112,21 +941,15 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
ethpause.tx_pause = 1;
else
ethpause.tx_pause = 0;
-
- priv_lock(priv);
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
- ret = errno;
- WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
- " failed: %s",
- strerror(ret));
- goto out;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 0);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
+ " failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return ret;
}
- ret = 0;
-
-out:
- priv_unlock(priv);
- assert(ret >= 0);
- return -ret;
+ return 0;
}
/**
@@ -1138,7 +961,7 @@ out:
* PCI bus address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
@@ -1149,8 +972,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
MKSTR(path, "%s/device/uevent", device->ibdev_path);
file = fopen(path, "rb");
- if (file == NULL)
- return -1;
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
while (fgets(line, sizeof(line), file) == line) {
size_t len = strlen(line);
int ret;
@@ -1180,46 +1005,10 @@ mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
}
/**
- * Update the link status.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * Zero if the callback process can be called immediately.
- */
-static int
-priv_link_status_update(struct priv *priv)
-{
- struct rte_eth_link *link = &priv->dev->data->dev_link;
-
- priv_link_update(priv, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
- /*
- * Inconsistent status. Event likely occurred before the
- * kernel netdevice exposes the new status.
- */
- if (!priv->pending_alarm) {
- priv->pending_alarm = 1;
- rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
- mlx5_dev_link_status_handler,
- priv->dev);
- }
- return 1;
- } else if (unlikely(priv->pending_alarm)) {
- /* Link interrupt occurred while alarm is already scheduled. */
- priv->pending_alarm = 0;
- rte_eal_alarm_cancel(mlx5_dev_link_status_handler, priv->dev);
- }
- return 0;
-}
-
-/**
* Device status handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param events
* Pointer to event flags holder.
*
@@ -1227,60 +1016,37 @@ priv_link_status_update(struct priv *priv)
* Events bitmap of callback process which can be called immediately.
*/
static uint32_t
-priv_dev_status_handler(struct priv *priv)
+mlx5_dev_status_handler(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct ibv_async_event event;
uint32_t ret = 0;
+ if (mlx5_link_update(dev, 0) == -EAGAIN) {
+ usleep(0);
+ return 0;
+ }
/* Read all message and acknowledge them. */
for (;;) {
if (mlx5_glue->get_async_event(priv->ctx, &event))
break;
if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
event.event_type == IBV_EVENT_PORT_ERR) &&
- (priv->dev->data->dev_conf.intr_conf.lsc == 1))
+ (dev->data->dev_conf.intr_conf.lsc == 1))
ret |= (1 << RTE_ETH_EVENT_INTR_LSC);
else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
- priv->dev->data->dev_conf.intr_conf.rmv == 1)
+ dev->data->dev_conf.intr_conf.rmv == 1)
ret |= (1 << RTE_ETH_EVENT_INTR_RMV);
else
- DEBUG("event type %d on port %d not handled",
- event.event_type, event.element.port_num);
+ DRV_LOG(DEBUG,
+ "port %u event type %d on not handled",
+ dev->data->port_id, event.event_type);
mlx5_glue->ack_async_event(&event);
}
- if (ret & (1 << RTE_ETH_EVENT_INTR_LSC))
- if (priv_link_status_update(priv))
- ret &= ~(1 << RTE_ETH_EVENT_INTR_LSC);
return ret;
}
/**
- * Handle delayed link status event.
- *
- * @param arg
- * Registered argument.
- */
-void
-mlx5_dev_link_status_handler(void *arg)
-{
- struct rte_eth_dev *dev = arg;
- struct priv *priv = dev->data->dev_private;
- int ret;
-
- while (!priv_trylock(priv)) {
- /* Alarm is being canceled. */
- if (priv->pending_alarm == 0)
- return;
- rte_pause();
- }
- priv->pending_alarm = 0;
- ret = priv_link_status_update(priv);
- priv_unlock(priv);
- if (!ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
-}
-
-/**
* Handle interrupts from the NIC.
*
* @param[in] intr_handle
@@ -1292,12 +1058,9 @@ void
mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
uint32_t events;
- priv_lock(priv);
- events = priv_dev_status_handler(priv);
- priv_unlock(priv);
+ events = mlx5_dev_status_handler(dev);
if (events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
if (events & (1 << RTE_ETH_EVENT_INTR_RMV))
@@ -1314,24 +1077,21 @@ static void
mlx5_dev_handler_socket(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
- struct priv *priv = dev->data->dev_private;
- priv_lock(priv);
- priv_socket_handle(priv);
- priv_unlock(priv);
+ mlx5_socket_handle(dev);
}
/**
* Uninstall interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_uninstall(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rmv)
rte_intr_callback_unregister(&priv->intr_handle,
@@ -1339,10 +1099,6 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
if (priv->primary_socket)
rte_intr_callback_unregister(&priv->intr_handle_socket,
mlx5_dev_handler_socket, dev);
- if (priv->pending_alarm) {
- priv->pending_alarm = 0;
- rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
- }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
priv->intr_handle_socket.fd = 0;
@@ -1352,21 +1108,24 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
/**
* Install interrupt handler.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to the rte_eth_dev structure.
+ * Pointer to Ethernet device.
*/
void
-priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev)
{
- int rc, flags;
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ int flags;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
- rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
- if (rc < 0) {
- INFO("failed to change file descriptor async event queue");
+ ret = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
+ if (ret) {
+ DRV_LOG(INFO,
+ "port %u failed to change file descriptor async event"
+ " queue",
+ dev->data->port_id);
dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
}
@@ -1377,9 +1136,11 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
rte_intr_callback_register(&priv->intr_handle,
mlx5_dev_interrupt_handler, dev);
}
-
- rc = priv_socket_init(priv);
- if (!rc && priv->primary_socket) {
+ ret = mlx5_socket_init(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot initialise socket: %s",
+ dev->data->port_id, strerror(rte_errno));
+ else if (priv->primary_socket) {
priv->intr_handle_socket.fd = priv->primary_socket;
priv->intr_handle_socket.type = RTE_INTR_HANDLE_EXT;
rte_intr_callback_register(&priv->intr_handle_socket,
@@ -1388,41 +1149,18 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
}
/**
- * Change the link state (UP / DOWN).
- *
- * @param priv
- * Pointer to private data structure.
- * @param up
- * Nonzero for link up, otherwise link down.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_dev_set_link(struct priv *priv, int up)
-{
- return priv_set_flags(priv, ~IFF_UP, up ? IFF_UP : ~IFF_UP);
-}
-
-/**
* DPDK callback to bring the link DOWN.
*
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_down(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_dev_set_link(priv, 0);
- priv_unlock(priv);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, ~IFF_UP);
}
/**
@@ -1432,63 +1170,68 @@ mlx5_set_link_down(struct rte_eth_dev *dev)
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_set_link_up(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int err;
-
- priv_lock(priv);
- err = priv_dev_set_link(priv, 1);
- priv_unlock(priv);
- return err;
+ return mlx5_set_flags(dev, ~IFF_UP, IFF_UP);
}
/**
* Configure the TX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to private data structure.
*
* @return
* Pointer to selected Tx burst function.
*/
eth_tx_burst_t
-priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_select_tx_function(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
eth_tx_burst_t tx_pkt_burst = mlx5_tx_burst;
struct mlx5_dev_config *config = &priv->config;
uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
int tso = !!(tx_offloads & (DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO));
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
+ int swp = !!(tx_offloads & (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
int vlan_insert = !!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT);
assert(priv != NULL);
/* Select appropriate TX function. */
- if (vlan_insert || tso)
+ if (vlan_insert || tso || swp)
return tx_pkt_burst;
if (config->mps == MLX5_MPW_ENHANCED) {
- if (priv_check_vec_tx_support(priv, dev) > 0) {
- if (priv_check_raw_vec_tx_support(priv, dev) > 0)
+ if (mlx5_check_vec_tx_support(dev) > 0) {
+ if (mlx5_check_raw_vec_tx_support(dev) > 0)
tx_pkt_burst = mlx5_tx_burst_raw_vec;
else
tx_pkt_burst = mlx5_tx_burst_vec;
- DEBUG("selected Enhanced MPW TX vectorized function");
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx vectorized"
+ " function",
+ dev->data->port_id);
} else {
tx_pkt_burst = mlx5_tx_burst_empw;
- DEBUG("selected Enhanced MPW TX function");
+ DRV_LOG(DEBUG,
+ "port %u selected enhanced MPW Tx function",
+ dev->data->port_id);
}
} else if (config->mps && (config->txq_inline > 0)) {
tx_pkt_burst = mlx5_tx_burst_mpw_inline;
- DEBUG("selected MPW inline TX function");
+ DRV_LOG(DEBUG, "port %u selected MPW inline Tx function",
+ dev->data->port_id);
} else if (config->mps) {
tx_pkt_burst = mlx5_tx_burst_mpw;
- DEBUG("selected MPW TX function");
+ DRV_LOG(DEBUG, "port %u selected MPW Tx function",
+ dev->data->port_id);
}
return tx_pkt_burst;
}
@@ -1496,23 +1239,24 @@ priv_select_tx_function(struct priv *priv, struct rte_eth_dev *dev)
/**
* Configure the RX function to use.
*
- * @param priv
- * Pointer to private data structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to private data structure.
*
* @return
* Pointer to selected Rx burst function.
*/
eth_rx_burst_t
-priv_select_rx_function(struct priv *priv, __rte_unused struct rte_eth_dev *dev)
+mlx5_select_rx_function(struct rte_eth_dev *dev)
{
eth_rx_burst_t rx_pkt_burst = mlx5_rx_burst;
- assert(priv != NULL);
- if (priv_check_vec_rx_support(priv) > 0) {
+ assert(dev != NULL);
+ if (mlx5_check_vec_rx_support(dev) > 0) {
rx_pkt_burst = mlx5_rx_burst_vec;
- DEBUG("selected RX vectorized function");
+ DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
+ dev->data->port_id);
+ } else if (mlx5_mprq_enabled(dev)) {
+ rx_pkt_burst = mlx5_rx_burst_mprq;
}
return rx_pkt_burst;
}
@@ -1536,3 +1280,93 @@ mlx5_is_removed(struct rte_eth_dev *dev)
return 1;
return 0;
}
+
+/**
+ * Get port ID list of mlx5 instances sharing a common device.
+ *
+ * @param[in] dev
+ * Device to look for.
+ * @param[out] port_list
+ * Result buffer for collected port IDs.
+ * @param port_list_n
+ * Maximum number of entries in result buffer. If 0, @p port_list can be
+ * NULL.
+ *
+ * @return
+ * Number of matching instances regardless of the @p port_list_n
+ * parameter, 0 if none were found.
+ */
+unsigned int
+mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list,
+ unsigned int port_list_n)
+{
+ uint16_t id;
+ unsigned int n = 0;
+
+ RTE_ETH_FOREACH_DEV(id) {
+ struct rte_eth_dev *ldev = &rte_eth_devices[id];
+
+ if (!ldev->device ||
+ !ldev->device->driver ||
+ strcmp(ldev->device->driver->name, MLX5_DRIVER_NAME) ||
+ ldev->device != dev)
+ continue;
+ if (n < port_list_n)
+ port_list[n] = id;
+ n++;
+ }
+ return n;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_sysfs_switch_info(unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ char ifname[IF_NAMESIZE];
+ FILE *file;
+ struct mlx5_switch_info data = { .master = 0, };
+ bool port_name_set = false;
+ bool port_switch_id_set = false;
+ char c;
+
+ if (!if_indextoname(ifindex, ifname)) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+
+ MKSTR(phys_port_name, "/sys/class/net/%s/phys_port_name",
+ ifname);
+ MKSTR(phys_switch_id, "/sys/class/net/%s/phys_switch_id",
+ ifname);
+
+ file = fopen(phys_port_name, "rb");
+ if (file != NULL) {
+ port_name_set =
+ fscanf(file, "%d%c", &data.port_name, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ }
+ file = fopen(phys_switch_id, "rb");
+ if (file == NULL) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ port_switch_id_set =
+ fscanf(file, "%" SCNx64 "%c", &data.switch_id, &c) == 2 &&
+ c == '\n';
+ fclose(file);
+ data.master = port_switch_id_set && !port_name_set;
+ data.representor = port_switch_id_set && port_name_set;
+ *info = data;
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 26002c4b..ca4625b6 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1,9 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#include <sys/queue.h>
+#include <stdalign.h>
+#include <stdint.h>
#include <string.h>
/* Verbs header. */
@@ -16,6 +18,9 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_common.h>
+#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@ -27,388 +32,289 @@
#include "mlx5_prm.h"
#include "mlx5_glue.h"
-/* Define minimal priority for control plane flows. */
-#define MLX5_CTRL_FLOW_PRIORITY 4
-
-/* Internet Protocol versions. */
-#define MLX5_IPV4 4
-#define MLX5_IPV6 6
-
-#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_flow_spec_counter_action {
- int dummy;
-};
-#endif
-
/* Dev ops structure defined in mlx5.c */
extern const struct eth_dev_ops mlx5_dev_ops;
extern const struct eth_dev_ops mlx5_dev_ops_isolate;
-static int
-mlx5_flow_create_eth(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_vlan(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_ipv4(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_ipv6(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_udp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_tcp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-static int
-mlx5_flow_create_vxlan(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
-
-struct mlx5_flow_parse;
-
-static void
-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
- unsigned int size);
-
-static int
-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
-
-static int
-mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
-
-/* Hash RX queue types. */
-enum hash_rxq_type {
- HASH_RXQ_TCPV4,
- HASH_RXQ_UDPV4,
- HASH_RXQ_IPV4,
- HASH_RXQ_TCPV6,
- HASH_RXQ_UDPV6,
- HASH_RXQ_IPV6,
- HASH_RXQ_ETH,
-};
-
-/* Initialization data for hash RX queue. */
-struct hash_rxq_init {
- uint64_t hash_fields; /* Fields that participate in the hash. */
- uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
- unsigned int flow_priority; /* Flow priority to use. */
- unsigned int ip_version; /* Internet protocol. */
+/* Pattern outer Layer bits. */
+#define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
+#define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
+#define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
+#define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
+#define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
+
+/* Pattern inner Layer bits. */
+#define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
+#define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
+#define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
+#define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
+#define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
+
+/* Pattern tunnel Layer bits. */
+#define MLX5_FLOW_LAYER_VXLAN (1u << 12)
+#define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
+#define MLX5_FLOW_LAYER_MPLS (1u << 15)
+
+/* Outer Masks. */
+#define MLX5_FLOW_LAYER_OUTER_L3 \
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+#define MLX5_FLOW_LAYER_OUTER_L4 \
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
+#define MLX5_FLOW_LAYER_OUTER \
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
+ MLX5_FLOW_LAYER_OUTER_L4)
+
+/* Tunnel Masks. */
+#define MLX5_FLOW_LAYER_TUNNEL \
+ (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+ MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS)
+
+/* Inner Masks. */
+#define MLX5_FLOW_LAYER_INNER_L3 \
+ (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
+#define MLX5_FLOW_LAYER_INNER_L4 \
+ (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
+#define MLX5_FLOW_LAYER_INNER \
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
+ MLX5_FLOW_LAYER_INNER_L4)
+
+/* Actions that modify the fate of matching traffic. */
+#define MLX5_FLOW_FATE_DROP (1u << 0)
+#define MLX5_FLOW_FATE_QUEUE (1u << 1)
+#define MLX5_FLOW_FATE_RSS (1u << 2)
+
+/* Modify a packet. */
+#define MLX5_FLOW_MOD_FLAG (1u << 0)
+#define MLX5_FLOW_MOD_MARK (1u << 1)
+#define MLX5_FLOW_MOD_COUNT (1u << 2)
+
+/* possible L3 layers protocols filtering. */
+#define MLX5_IP_PROTOCOL_TCP 6
+#define MLX5_IP_PROTOCOL_UDP 17
+#define MLX5_IP_PROTOCOL_GRE 47
+#define MLX5_IP_PROTOCOL_MPLS 147
+
+/* Priority reserved for default flows. */
+#define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1)
+
+enum mlx5_expansion {
+ MLX5_EXPANSION_ROOT,
+ MLX5_EXPANSION_ROOT_OUTER,
+ MLX5_EXPANSION_ROOT_ETH_VLAN,
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_ETH_VLAN,
+ MLX5_EXPANSION_OUTER_VLAN,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE,
+ MLX5_EXPANSION_GRE,
+ MLX5_EXPANSION_MPLS,
+ MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_ETH_VLAN,
+ MLX5_EXPANSION_VLAN,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP,
+ MLX5_EXPANSION_IPV6,
+ MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP,
};
-/* Initialization data for hash RX queues. */
-const struct hash_rxq_init hash_rxq_init[] = {
- [HASH_RXQ_TCPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4 |
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV4,
+/** Supported expansion of items. */
+static const struct rte_flow_expand_node mlx5_support_expansion[] = {
+ [MLX5_EXPANSION_ROOT] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_UDPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4 |
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV4,
+ [MLX5_EXPANSION_ROOT_OUTER] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
+ MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_IPV4] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
- IBV_RX_HASH_DST_IPV4),
- .dpdk_rss_hf = (ETH_RSS_IPV4 |
- ETH_RSS_FRAG_IPV4),
- .flow_priority = 1,
- .ip_version = MLX5_IPV4,
+ [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_TCPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6 |
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_END,
},
- [HASH_RXQ_UDPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6 |
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP),
- .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
- .flow_priority = 0,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_OUTER_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6,
+ MLX5_EXPANSION_MPLS),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
},
- [HASH_RXQ_IPV6] = {
- .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
- IBV_RX_HASH_DST_IPV6),
- .dpdk_rss_hf = (ETH_RSS_IPV6 |
- ETH_RSS_FRAG_IPV6),
- .flow_priority = 1,
- .ip_version = MLX5_IPV6,
+ [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .rss_types = 0,
},
- [HASH_RXQ_ETH] = {
- .hash_fields = 0,
- .dpdk_rss_hf = 0,
- .flow_priority = 2,
+ [MLX5_EXPANSION_OUTER_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
+ MLX5_EXPANSION_OUTER_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
},
-};
-
-/* Number of entries in hash_rxq_init[]. */
-const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
-
-/** Structure for holding counter stats. */
-struct mlx5_flow_counter_stats {
- uint64_t hits; /**< Number of packets matched by the rule. */
- uint64_t bytes; /**< Number of bytes matched by the rule. */
-};
-
-/** Structure for Drop queue. */
-struct mlx5_hrxq_drop {
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_wq *wq; /**< Verbs work queue. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
-};
-
-/* Flows structures. */
-struct mlx5_flow {
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_flow *ibv_flow; /**< Verbs flow. */
- struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
-};
-
-/* Drop flows structures. */
-struct mlx5_flow_drop {
- struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_flow *ibv_flow; /**< Verbs flow. */
-};
-
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- uint32_t mark:1; /**< Set if the flow is marked. */
- uint32_t drop:1; /**< Drop queue. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
- uint16_t (*queues)[]; /**< Queues indexes to use. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
- uint8_t rss_key[40]; /**< copy of the RSS key. */
- struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
- struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
- struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
- /**< Flow with Rx queue. */
-};
-
-/** Static initializer for items. */
-#define ITEMS(...) \
- (const enum rte_flow_item_type []){ \
- __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
- }
-
-/** Structure to generate a simple graph of layers supported by the NIC. */
-struct mlx5_flow_items {
- /** List of possible actions for these items. */
- const enum rte_flow_action_type *const actions;
- /** Bit-masks corresponding to the possibilities for the item. */
- const void *mask;
- /**
- * Default bit-masks to use when item->mask is not provided. When
- * \default_mask is also NULL, the full supported bit-mask (\mask) is
- * used instead.
- */
- const void *default_mask;
- /** Bit-masks size in bytes. */
- const unsigned int mask_sz;
- /**
- * Conversion function from rte_flow to NIC specific flow.
- *
- * @param item
- * rte_flow item to convert.
- * @param default_mask
- * Default bit-masks to use when item->mask is not provided.
- * @param data
- * Internal structure to store the conversion.
- *
- * @return
- * 0 on success, negative value otherwise.
- */
- int (*convert)(const struct rte_flow_item *item,
- const void *default_mask,
- void *data);
- /** Size in bytes of the destination structure. */
- const unsigned int dst_sz;
- /** List of possible following items. */
- const enum rte_flow_item_type *const items;
-};
-
-/** Valid action for this PMD. */
-static const enum rte_flow_action_type valid_actions[] = {
- RTE_FLOW_ACTION_TYPE_DROP,
- RTE_FLOW_ACTION_TYPE_QUEUE,
- RTE_FLOW_ACTION_TYPE_MARK,
- RTE_FLOW_ACTION_TYPE_FLAG,
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- RTE_FLOW_ACTION_TYPE_COUNT,
-#endif
- RTE_FLOW_ACTION_TYPE_END,
-};
-
-/** Graph of supported items and associated actions. */
-static const struct mlx5_flow_items mlx5_flow_items[] = {
- [RTE_FLOW_ITEM_TYPE_END] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
- RTE_FLOW_ITEM_TYPE_VXLAN),
+ [MLX5_EXPANSION_OUTER_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV4_UDP,
+ MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_GRE),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
},
- [RTE_FLOW_ITEM_TYPE_ETH] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
- RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- .type = -1,
- },
- .default_mask = &rte_flow_item_eth_mask,
- .mask_sz = sizeof(struct rte_flow_item_eth),
- .convert = mlx5_flow_create_eth,
- .dst_sz = sizeof(struct ibv_flow_spec_eth),
+ [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
},
- [RTE_FLOW_ITEM_TYPE_VLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
- RTE_FLOW_ITEM_TYPE_IPV6),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vlan){
- .tci = -1,
- },
- .default_mask = &rte_flow_item_vlan_mask,
- .mask_sz = sizeof(struct rte_flow_item_vlan),
- .convert = mlx5_flow_create_vlan,
- .dst_sz = 0,
+ [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
},
- [RTE_FLOW_ITEM_TYPE_IPV4] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_ipv4){
- .hdr = {
- .src_addr = -1,
- .dst_addr = -1,
- .type_of_service = -1,
- .next_proto_id = -1,
- },
- },
- .default_mask = &rte_flow_item_ipv4_mask,
- .mask_sz = sizeof(struct rte_flow_item_ipv4),
- .convert = mlx5_flow_create_ipv4,
- .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
+ [MLX5_EXPANSION_OUTER_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT
+ (MLX5_EXPANSION_OUTER_IPV6_UDP,
+ MLX5_EXPANSION_OUTER_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
},
- [RTE_FLOW_ITEM_TYPE_IPV6] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
- RTE_FLOW_ITEM_TYPE_TCP),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_ipv6){
- .hdr = {
- .src_addr = {
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- },
- .dst_addr = {
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff,
- },
- .vtc_flow = -1,
- .proto = -1,
- .hop_limits = -1,
- },
- },
- .default_mask = &rte_flow_item_ipv6_mask,
- .mask_sz = sizeof(struct rte_flow_item_ipv6),
- .convert = mlx5_flow_create_ipv6,
- .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
+ [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
+ MLX5_EXPANSION_VXLAN_GPE),
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
},
- [RTE_FLOW_ITEM_TYPE_UDP] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_udp){
- .hdr = {
- .src_port = -1,
- .dst_port = -1,
- },
- },
- .default_mask = &rte_flow_item_udp_mask,
- .mask_sz = sizeof(struct rte_flow_item_udp),
- .convert = mlx5_flow_create_udp,
- .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
- [RTE_FLOW_ITEM_TYPE_TCP] = {
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_tcp){
- .hdr = {
- .src_port = -1,
- .dst_port = -1,
- },
- },
- .default_mask = &rte_flow_item_tcp_mask,
- .mask_sz = sizeof(struct rte_flow_item_tcp),
- .convert = mlx5_flow_create_tcp,
- .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ [MLX5_EXPANSION_VXLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
},
- [RTE_FLOW_ITEM_TYPE_VXLAN] = {
- .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
- .actions = valid_actions,
- .mask = &(const struct rte_flow_item_vxlan){
- .vni = "\xff\xff\xff",
- },
- .default_mask = &rte_flow_item_vxlan_mask,
- .mask_sz = sizeof(struct rte_flow_item_vxlan),
- .convert = mlx5_flow_create_vxlan,
- .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
+ [MLX5_EXPANSION_VXLAN_GPE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
+ MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+ },
+ [MLX5_EXPANSION_GRE] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
+ .type = RTE_FLOW_ITEM_TYPE_GRE,
+ },
+ [MLX5_EXPANSION_MPLS] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_MPLS,
+ },
+ [MLX5_EXPANSION_ETH] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_ETH_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ },
+ [MLX5_EXPANSION_VLAN] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
+ MLX5_EXPANSION_IPV6),
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ },
+ [MLX5_EXPANSION_IPV4] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
+ MLX5_EXPANSION_IPV4_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER,
+ },
+ [MLX5_EXPANSION_IPV4_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
+ },
+ [MLX5_EXPANSION_IPV4_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
+ },
+ [MLX5_EXPANSION_IPV6] = {
+ .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
+ MLX5_EXPANSION_IPV6_TCP),
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER,
+ },
+ [MLX5_EXPANSION_IPV6_UDP] = {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
+ },
+ [MLX5_EXPANSION_IPV6_TCP] = {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
},
};
-/** Structure to pass to the conversion function. */
-struct mlx5_flow_parse {
- uint32_t inner; /**< Set once VXLAN is encountered. */
- uint32_t create:1;
- /**< Whether resources should remain after a validate. */
- uint32_t drop:1; /**< Target is a drop queue. */
- uint32_t mark:1; /**< Mark is present in the flow. */
- uint32_t count:1; /**< Count is present in the flow. */
- uint32_t mark_id; /**< Mark identifier. */
- uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
- uint16_t queues_n; /**< Number of entries in queue[]. */
- struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
- uint8_t rss_key[40]; /**< copy of the RSS key. */
- enum hash_rxq_type layer; /**< Last pattern layer detected. */
- struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
+/** Handles information leading to a drop fate. */
+struct mlx5_flow_verbs {
+ LIST_ENTRY(mlx5_flow_verbs) next;
+ unsigned int size; /**< Size of the attribute. */
struct {
- struct ibv_flow_attr *ibv_attr;
- /**< Pointer to Verbs attributes. */
- unsigned int offset;
- /**< Current position or total size of the attribute. */
- } queue[RTE_DIM(hash_rxq_init)];
+ struct ibv_flow_attr *attr;
+ /**< Pointer to the Specification buffer. */
+ uint8_t *specs; /**< Pointer to the specifications. */
+ };
+ struct ibv_flow *flow; /**< Verbs flow pointer. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */
+ uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */
+};
+
+/* Counters information. */
+struct mlx5_flow_counter {
+ LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t ref_cnt:31; /**< Reference counter. */
+ uint32_t id; /**< Counter ID. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/* Flow structure. */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct rte_flow_attr attributes; /**< User flow attribute. */
+ uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
+ uint32_t layers;
+ /**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
+ uint32_t modifier;
+ /**< Bit-fields of present modifier see MLX5_FLOW_MOD_*. */
+ uint32_t fate;
+ /**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+ uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
+ LIST_HEAD(verbs, mlx5_flow_verbs) verbs; /**< Verbs flows list. */
+ struct mlx5_flow_verbs *cur_verbs;
+ /**< Current Verbs flow structure being filled. */
+ struct mlx5_flow_counter *counter; /**< Holds Verbs flow counter. */
+ struct rte_flow_action_rss rss;/**< RSS context. */
+ uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
+ uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
+ void *nl_flow; /**< Netlink flow buffer if relevant. */
};
static const struct rte_flow_ops mlx5_flow_ops = {
@@ -416,12 +322,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {
.create = mlx5_flow_create,
.destroy = mlx5_flow_destroy,
.flush = mlx5_flow_flush,
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
- .query = mlx5_flow_query,
-#else
- .query = NULL,
-#endif
.isolate = mlx5_flow_isolate,
+ .query = mlx5_flow_query,
};
/* Convert FDIR request to Generic flow. */
@@ -436,9 +338,17 @@ struct mlx5_fdir {
struct rte_flow_item_ipv6 ipv6;
} l3;
union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3_mask;
+ union {
struct rte_flow_item_udp udp;
struct rte_flow_item_tcp tcp;
} l4;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4_mask;
struct rte_flow_action_queue queue;
};
@@ -448,778 +358,459 @@ struct ibv_spec_header {
uint16_t size;
};
-/**
- * Check support for a given item.
- *
- * @param item[in]
- * Item specification.
- * @param mask[in]
- * Bit-masks covering supported fields to compare with spec, last and mask in
- * \item.
- * @param size
- * Bit-Mask size in bytes.
- *
- * @return
- * 0 on success.
+/*
+ * Number of sub priorities.
+ * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
+ * matching on the NIC (firmware dependent) L4 most have the higher priority
+ * followed by L3 and ending with L2.
*/
-static int
-mlx5_flow_item_validate(const struct rte_flow_item *item,
- const uint8_t *mask, unsigned int size)
-{
- int ret = 0;
-
- if (!item->spec && (item->mask || item->last))
- return -1;
- if (item->spec && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->spec;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->last && !item->mask) {
- unsigned int i;
- const uint8_t *spec = item->last;
-
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->mask) {
- unsigned int i;
- const uint8_t *spec = item->spec;
+#define MLX5_PRIORITY_MAP_L2 2
+#define MLX5_PRIORITY_MAP_L3 1
+#define MLX5_PRIORITY_MAP_L4 0
+#define MLX5_PRIORITY_MAP_MAX 3
+
+/* Map of Verbs to Flow priority with 8 Verbs priorities. */
+static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
+};
- for (i = 0; i < size; ++i)
- if ((spec[i] | mask[i]) != mask[i])
- return -1;
- }
- if (item->spec && item->last) {
- uint8_t spec[size];
- uint8_t last[size];
- const uint8_t *apply = mask;
- unsigned int i;
+/* Map of Verbs to Flow priority with 16 Verbs priorities. */
+static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
+ { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
+ { 9, 10, 11 }, { 12, 13, 14 },
+};
- if (item->mask)
- apply = item->mask;
- for (i = 0; i < size; ++i) {
- spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
- last[i] = ((const uint8_t *)item->last)[i] & apply[i];
- }
- ret = memcmp(spec, last, size);
- }
- return ret;
-}
+/* Tunnel information. */
+struct mlx5_flow_tunnel_info {
+ uint32_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
+ uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
+};
-/**
- * Copy the RSS configuration from the user ones, of the rss_conf is null,
- * uses the driver one.
- *
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Internal parser structure.
- * @param rss_conf
- * User RSS configuration to save.
- *
- * @return
- * 0 on success, errno value on failure.
- */
-static int
-priv_flow_convert_rss_conf(struct priv *priv,
- struct mlx5_flow_parse *parser,
- const struct rte_eth_rss_conf *rss_conf)
-{
- /*
- * This function is also called at the beginning of
- * priv_flow_convert_actions() to initialize the parser with the
- * device default RSS configuration.
- */
- (void)priv;
- if (rss_conf) {
- if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
- return EINVAL;
- if (rss_conf->rss_key_len != 40)
- return EINVAL;
- if (rss_conf->rss_key_len && rss_conf->rss_key) {
- parser->rss_conf.rss_key_len = rss_conf->rss_key_len;
- memcpy(parser->rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- parser->rss_conf.rss_key = parser->rss_key;
- }
- parser->rss_conf.rss_hf = rss_conf->rss_hf;
- }
- return 0;
-}
+static struct mlx5_flow_tunnel_info tunnels_info[] = {
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
+ .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_GRE,
+ .ptype = RTE_PTYPE_TUNNEL_GRE,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
+ },
+ {
+ .tunnel = MLX5_FLOW_LAYER_MPLS,
+ .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
+ },
+};
/**
- * Extract attribute to the parser.
+ * Discover the maximum number of priority available.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] attr
- * Flow rule attributes.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * number of supported flow priority on success, a negative errno
+ * value otherwise and rte_errno is set.
*/
-static int
-priv_flow_convert_attributes(struct priv *priv,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+int
+mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
{
- (void)priv;
- (void)parser;
- if (attr->group) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups are not supported");
- return -rte_errno;
- }
- if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priorities are not supported");
+ struct {
+ struct ibv_flow_attr attr;
+ struct ibv_flow_spec_eth eth;
+ struct ibv_flow_spec_action_drop drop;
+ } flow_attr = {
+ .attr = {
+ .num_of_specs = 2,
+ },
+ .eth = {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = sizeof(struct ibv_flow_spec_eth),
+ },
+ .drop = {
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ },
+ };
+ struct ibv_flow *flow;
+ struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
+ uint16_t vprio[] = { 8, 16 };
+ int i;
+ int priority = 0;
+
+ if (!drop) {
+ rte_errno = ENOTSUP;
return -rte_errno;
}
- if (attr->egress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL,
- "egress is not supported");
- return -rte_errno;
+ for (i = 0; i != RTE_DIM(vprio); i++) {
+ flow_attr.attr.priority = vprio[i] - 1;
+ flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
+ if (!flow)
+ break;
+ claim_zero(mlx5_glue->destroy_flow(flow));
+ priority = vprio[i];
}
- if (!attr->ingress) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "only ingress is supported");
+ switch (priority) {
+ case 8:
+ priority = RTE_DIM(priority_map_3);
+ break;
+ case 16:
+ priority = RTE_DIM(priority_map_5);
+ break;
+ default:
+ rte_errno = ENOTSUP;
+ DRV_LOG(ERR,
+ "port %u verbs maximum priority: %d expected 8/16",
+ dev->data->port_id, vprio[i]);
return -rte_errno;
}
- return 0;
+ mlx5_hrxq_drop_release(dev);
+ DRV_LOG(INFO, "port %u flow maximum priority: %d",
+ dev->data->port_id, priority);
+ return priority;
}
/**
- * Extract actions request to the parser.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] actions
- * Associated actions (list terminated by the END action).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * Adjust flow priority.
*
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param flow
+ * Pointer to an rte flow.
*/
-static int
-priv_flow_convert_actions(struct priv *priv,
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+static void
+mlx5_flow_adjust_priority(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- /*
- * Add default RSS configuration necessary for Verbs to create QP even
- * if no RSS is necessary.
- */
- priv_flow_convert_rss_conf(priv, parser,
- (const struct rte_eth_rss_conf *)
- &priv->rss_conf);
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
- if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
- continue;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- parser->drop = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- const struct rte_flow_action_queue *queue =
- (const struct rte_flow_action_queue *)
- actions->conf;
- uint16_t n;
- uint16_t found = 0;
-
- if (!queue || (queue->index > (priv->rxqs_n - 1)))
- goto exit_action_not_supported;
- for (n = 0; n < parser->queues_n; ++n) {
- if (parser->queues[n] == queue->index) {
- found = 1;
- break;
- }
- }
- if (parser->queues_n > 1 && !found) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "queue action not in RSS queues");
- return -rte_errno;
- }
- if (!found) {
- parser->queues_n = 1;
- parser->queues[0] = queue->index;
- }
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
- const struct rte_flow_action_rss *rss =
- (const struct rte_flow_action_rss *)
- actions->conf;
- uint16_t n;
-
- if (!rss || !rss->num) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "no valid queues");
- return -rte_errno;
- }
- if (parser->queues_n == 1) {
- uint16_t found = 0;
-
- assert(parser->queues_n);
- for (n = 0; n < rss->num; ++n) {
- if (parser->queues[0] ==
- rss->queue[n]) {
- found = 1;
- break;
- }
- }
- if (!found) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "queue action not in RSS"
- " queues");
- return -rte_errno;
- }
- }
- for (n = 0; n < rss->num; ++n) {
- if (rss->queue[n] >= priv->rxqs_n) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "queue id > number of"
- " queues");
- return -rte_errno;
- }
- }
- for (n = 0; n < rss->num; ++n)
- parser->queues[n] = rss->queue[n];
- parser->queues_n = rss->num;
- if (priv_flow_convert_rss_conf(priv, parser,
- rss->rss_conf)) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "wrong RSS configuration");
- return -rte_errno;
- }
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
- const struct rte_flow_action_mark *mark =
- (const struct rte_flow_action_mark *)
- actions->conf;
-
- if (!mark) {
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "mark must be defined");
- return -rte_errno;
- } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions,
- "mark must be between 0"
- " and 16777199");
- return -rte_errno;
- }
- parser->mark = 1;
- parser->mark_id = mark->id;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
- parser->mark = 1;
- } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->config.flow_counter_en) {
- parser->count = 1;
- } else {
- goto exit_action_not_supported;
- }
- }
- if (parser->drop && parser->mark)
- parser->mark = 0;
- if (!parser->queues_n && !parser->drop) {
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "no valid action");
- return -rte_errno;
+ struct priv *priv = dev->data->dev_private;
+ uint32_t priority = flow->attributes.priority;
+ uint32_t subpriority = flow->cur_verbs->attr->priority;
+
+ switch (priv->config.flow_prio) {
+ case RTE_DIM(priority_map_3):
+ priority = priority_map_3[priority][subpriority];
+ break;
+ case RTE_DIM(priority_map_5):
+ priority = priority_map_5[priority][subpriority];
+ break;
}
- return 0;
-exit_action_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "action not supported");
- return -rte_errno;
+ flow->cur_verbs->attr->priority = priority;
}
/**
- * Validate items.
+ * Get a flow counter.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] items
- * Pattern specification (list terminated by the END pattern item).
- * @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
+ * A pointer to the counter, NULL otherwise and rte_errno is set.
*/
-static int
-priv_flow_convert_items_validate(struct priv *priv,
- const struct rte_flow_item items[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+static struct mlx5_flow_counter *
+mlx5_flow_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
- unsigned int i;
-
- (void)priv;
- /* Initialise the offsets to start after verbs attribute. */
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset = sizeof(struct ibv_flow_attr);
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx5_flow_items *token = NULL;
- unsigned int n;
- int err;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt;
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (!cnt->shared || cnt->shared != shared)
continue;
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx5_flow_items[items->type];
- break;
- }
- }
- if (!token)
- goto exit_item_not_supported;
- cur_item = token;
- err = mlx5_flow_item_validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
- goto exit_item_not_supported;
- if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
- if (parser->inner) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "cannot recognize multiple"
- " VXLAN encapsulations");
- return -rte_errno;
- }
- parser->inner = IBV_FLOW_SPEC_INNER;
- }
- if (parser->drop) {
- parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
- } else {
- for (n = 0; n != hash_rxq_init_n; ++n)
- parser->queue[n].offset += cur_item->dst_sz;
- }
- }
- if (parser->drop) {
- parser->queue[HASH_RXQ_ETH].offset +=
- sizeof(struct ibv_flow_spec_action_drop);
- }
- if (parser->mark) {
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset +=
- sizeof(struct ibv_flow_spec_action_tag);
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
}
- if (parser->count) {
- unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
- for (i = 0; i != hash_rxq_init_n; ++i)
- parser->queue[i].offset += size;
+ if (!tmpl.cs) {
+ rte_errno = errno;
+ return NULL;
}
- return 0;
-exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
}
/**
- * Allocate memory space to store verbs flow attributes.
- *
- * @param priv
- * Pointer to private structure.
- * @param[in] priority
- * Flow priority.
- * @param[in] size
- * Amount of byte to allocate.
- * @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Release a flow counter.
*
- * @return
- * A verbs flow attribute on success, NULL otherwise.
+ * @param[in] counter
+ * Pointer to the counter handler.
*/
-static struct ibv_flow_attr*
-priv_flow_convert_allocate(struct priv *priv,
- unsigned int priority,
- unsigned int size,
- struct rte_flow_error *error)
+static void
+mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
{
- struct ibv_flow_attr *ibv_attr;
-
- (void)priv;
- ibv_attr = rte_calloc(__func__, 1, size, 0);
- if (!ibv_attr) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "cannot allocate verbs spec attributes.");
- return NULL;
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
}
- ibv_attr->priority = priority;
- return ibv_attr;
}
/**
- * Finalise verbs flow attributes.
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[out] error
+ * Pointer to error structure.
*
- * @param priv
- * Pointer to private structure.
- * @param[in, out] parser
- * Internal parser structure.
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+static int
+mlx5_flow_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- const unsigned int ipv4 =
- hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
- const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
- const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
- const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
- const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
- const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
- unsigned int i;
-
- (void)priv;
- if (parser->layer == HASH_RXQ_ETH) {
- goto fill;
- } else {
- /*
- * This layer becomes useless as the pattern define under
- * layers.
- */
- rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
- }
- /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
- for (i = ohmin; i != (ohmax + 1); ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- /* Remove impossible flow according to the RSS configuration. */
- if (hash_rxq_init[parser->layer].dpdk_rss_hf &
- parser->rss_conf.rss_hf) {
- /* Remove any other flow. */
- for (i = hmin; i != (hmax + 1); ++i) {
- if ((i == parser->layer) ||
- (!parser->queue[i].ibv_attr))
- continue;
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- } else if (!parser->queue[ip].ibv_attr) {
- /* no RSS possible with the current configuration. */
- parser->queues_n = 1;
- return;
- }
-fill:
- /*
- * Fill missing layers in verbs specifications, or compute the correct
- * offset to allocate the memory space for the attributes and
- * specifications.
- */
- for (i = 0; i != hash_rxq_init_n - 1; ++i) {
- union {
- struct ibv_flow_spec_ipv4_ext ipv4;
- struct ibv_flow_spec_ipv6 ipv6;
- struct ibv_flow_spec_tcp_udp udp_tcp;
- } specs;
- void *dst;
- uint16_t size;
-
- if (i == parser->layer)
- continue;
- if (parser->layer == HASH_RXQ_ETH) {
- if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
- size = sizeof(struct ibv_flow_spec_ipv4_ext);
- specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
- .type = IBV_FLOW_SPEC_IPV4_EXT,
- .size = size,
- };
- } else {
- size = sizeof(struct ibv_flow_spec_ipv6);
- specs.ipv6 = (struct ibv_flow_spec_ipv6){
- .type = IBV_FLOW_SPEC_IPV6,
- .size = size,
- };
- }
- if (parser->queue[i].ibv_attr) {
- dst = (void *)((uintptr_t)
- parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, &specs, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- }
- parser->queue[i].offset += size;
- }
- if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
- (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
- size = sizeof(struct ibv_flow_spec_tcp_udp);
- specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
- .type = ((i == HASH_RXQ_UDPV4 ||
- i == HASH_RXQ_UDPV6) ?
- IBV_FLOW_SPEC_UDP :
- IBV_FLOW_SPEC_TCP),
- .size = size,
- };
- if (parser->queue[i].ibv_attr) {
- dst = (void *)((uintptr_t)
- parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, &specs, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- }
- parser->queue[i].offset += size;
- }
- }
+ uint32_t priority_max =
+ ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ flow->attributes = *attributes;
+ if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
+ flow->attributes.priority = priority_max;
+ return 0;
}
/**
- * Validate and convert a flow supported by the NIC.
+ * Verify the @p item specifications (spec, last, mask) are compatible with the
+ * NIC capabilities.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] attr
- * Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask
+ * @p item->mask or flow default bit-masks.
+ * @param[in] nic_mask
+ * Bit-masks covering supported fields by the NIC to compare with user mask.
+ * @param[in] size
+ * Bit-masks size in bytes.
* @param[out] error
- * Perform verbose error reporting if not NULL.
- * @param[in, out] parser
- * Internal parser structure.
+ * Pointer to error structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
{
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
unsigned int i;
- int ret;
- /* First step. Validate the attributes, items and actions. */
- *parser = (struct mlx5_flow_parse){
- .create = parser->create,
- .layer = HASH_RXQ_ETH,
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- };
- ret = priv_flow_convert_attributes(priv, attr, error, parser);
- if (ret)
- return ret;
- ret = priv_flow_convert_actions(priv, actions, error, parser);
- if (ret)
- return ret;
- ret = priv_flow_convert_items_validate(priv, items, error, parser);
- if (ret)
- return ret;
- priv_flow_convert_finalise(priv, parser);
- /*
- * Second step.
- * Allocate the memory space to store verbs specifications.
- */
- if (parser->drop) {
- unsigned int priority =
- attr->priority +
- hash_rxq_init[HASH_RXQ_ETH].flow_priority;
- unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
-
- parser->queue[HASH_RXQ_ETH].ibv_attr =
- priv_flow_convert_allocate(priv, priority,
- offset, error);
- if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
- return ENOMEM;
- parser->queue[HASH_RXQ_ETH].offset =
- sizeof(struct ibv_flow_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- unsigned int priority =
- attr->priority +
- hash_rxq_init[i].flow_priority;
- unsigned int offset;
-
- if (!(parser->rss_conf.rss_hf &
- hash_rxq_init[i].dpdk_rss_hf) &&
- (i != HASH_RXQ_ETH))
- continue;
- offset = parser->queue[i].offset;
- parser->queue[i].ibv_attr =
- priv_flow_convert_allocate(priv, priority,
- offset, error);
- if (!parser->queue[i].ibv_attr)
- goto exit_enomem;
- parser->queue[i].offset = sizeof(struct ibv_flow_attr);
- }
- }
- /* Third step. Conversion parse, fill the specifications. */
- parser->inner = 0;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
- continue;
- cur_item = &mlx5_flow_items[items->type];
- ret = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- parser);
- if (ret) {
- rte_flow_error_set(error, ret,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- goto exit_free;
- }
- }
- if (parser->mark)
- mlx5_flow_create_flag_mark(parser, parser->mark_id);
- if (parser->count && parser->create) {
- mlx5_flow_create_count(priv, parser);
- if (!parser->cs)
- goto exit_count_error;
- }
- /*
- * Last step. Complete missing specification to reach the RSS
- * configuration.
- */
- if (!parser->drop) {
- priv_flow_convert_finalise(priv, parser);
- } else {
- parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
- attr->priority +
- hash_rxq_init[parser->layer].flow_priority;
- }
-exit_free:
- /* Only verification is expected, all resources should be released. */
- if (!parser->create) {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser->queue[i].ibv_attr) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
- }
- }
- }
- return ret;
-exit_enomem:
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser->queue[i].ibv_attr) {
- rte_free(parser->queue[i].ibv_attr);
- parser->queue[i].ibv_attr = NULL;
+ assert(nic_mask);
+ for (i = 0; i < size; ++i)
+ if ((nic_mask[i] | mask[i]) != nic_mask[i])
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask enables non supported"
+ " bits");
+ if (!item->spec && (item->mask || item->last))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask/last without a spec is not"
+ " supported");
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
+ last[i] = ((const uint8_t *)item->last)[i] & mask[i];
}
+ ret = memcmp(spec, last, size);
+ if (ret != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range is not supported");
}
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot allocate verbs spec attributes.");
- return ret;
-exit_count_error:
- rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "cannot create counter.");
- return rte_errno;
+ return 0;
}
/**
- * Copy the specification created into the flow.
+ * Add a verbs item specification into @p flow.
*
- * @param parser
- * Internal parser structure.
- * @param src
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
* Create specification.
- * @param size
+ * @param[in] size
* Size in bytes of the specification to copy.
*/
static void
-mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
- unsigned int size)
+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
{
- unsigned int i;
- void *dst;
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
- /* Specification must be the same l3 type or none. */
- if (parser->layer == HASH_RXQ_ETH ||
- (hash_rxq_init[parser->layer].ip_version ==
- hash_rxq_init[i].ip_version) ||
- (hash_rxq_init[i].ip_version == 0)) {
- dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset);
- memcpy(dst, src, size);
- ++parser->queue[i].ibv_attr->num_of_specs;
- parser->queue[i].offset += size;
- }
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
+ memcpy(dst, src, size);
+ ++verbs->attr->num_of_specs;
}
+ verbs->size += size;
}
/**
- * Convert Ethernet item to Verbs specification.
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in, out] flow.
+ * Pointer to flow structure.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ */
+static void
+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
+ int tunnel __rte_unused,
+ uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+ if (flow->rss.level == 2 && !tunnel)
+ hash_fields = 0;
+ else if (flow->rss.level < 2 && tunnel)
+ hash_fields = 0;
+#endif
+ if (!(flow->rss.types & layer_types))
+ hash_fields = 0;
+ flow->cur_verbs->hash_fields |= hash_fields;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_eth(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
struct ibv_flow_spec_eth eth = {
- .type = parser->inner | IBV_FLOW_SPEC_ETH,
- .size = eth_size,
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_ETH;
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layers already configured");
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (ret)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ if (size > flow_size)
+ return size;
if (spec) {
unsigned int i;
- if (!mask)
- mask = default_mask;
memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
@@ -1233,80 +824,211 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
}
eth.val.ether_type &= eth.mask.ether_type;
}
- mlx5_flow_create_copy(parser, &eth, eth_size);
- return 0;
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ return size;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
}
/**
- * Convert VLAN item to Verbs specification.
+ * Convert the @p item into @p flow (or by updating the already present
+ * Ethernet Verbs) specification after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_vlan(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- struct ibv_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
-
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
+ };
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+ const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+ const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (flow->layers & vlanm)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN layer already configured");
+ else if ((flow->layers & l34m) != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layer cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
if (spec) {
- unsigned int i;
- if (!mask)
- mask = default_mask;
-
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!parser->queue[i].ibv_attr)
- continue;
-
- eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
- parser->queue[i].offset - eth_size);
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
- eth->val.vlan_tag &= eth->mask.vlan_tag;
- }
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
}
- return 0;
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!eth.mask.vlan_tag)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ if (!(flow->layers & l2m)) {
+ if (size <= flow_size) {
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, &eth, size);
+ }
+ } else {
+ if (flow->cur_verbs)
+ mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
+ &eth);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ flow->layers |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+ return size;
}
/**
- * Convert IPv4 item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_ipv4(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
struct ibv_flow_spec_ipv4_ext ipv4 = {
- .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
- .size = ipv4_size,
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_IPV4;
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (spec) {
- if (!mask)
- mask = default_mask;
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
.src_ip = spec->hdr.src_addr,
.dst_ip = spec->hdr.dst_addr,
@@ -1325,44 +1047,108 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
ipv4.val.proto &= ipv4.mask.proto;
ipv4.val.tos &= ipv4.mask.tos;
}
- mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
- return 0;
+ flow->l3_protocol_en = !!ipv4.mask.proto;
+ flow->l3_protocol = ipv4.val.proto;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ }
+ return size;
}
/**
- * Convert IPv6 item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_ipv6(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
struct ibv_flow_spec_ipv6 ipv6 = {
- .type = parser->inner | IBV_FLOW_SPEC_IPV6,
- .size = ipv6_size,
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- /* Don't update layer for the inner pattern. */
- if (!parser->inner)
- parser->layer = HASH_RXQ_IPV6;
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ /*
+ * IPv6 is not recognised by the NIC inside a GRE tunnel.
+ * Such support has to be disabled as the rule will be
+ * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+ * Mellanox OFED 4.4-1.0.0.0.
+ */
+ if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPv6 inside a GRE tunnel is"
+ " not recognised.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (spec) {
unsigned int i;
uint32_t vtc_flow_val;
uint32_t vtc_flow_mask;
- if (!mask)
- mask = default_mask;
memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
RTE_DIM(ipv6.val.src_ip));
memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
@@ -1397,44 +1183,86 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
}
- mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
- return 0;
+ flow->l3_protocol_en = !!ipv6.mask.next_hdr;
+ flow->l3_protocol = ipv6.val.next_hdr;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
+ (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ }
+ return size;
}
/**
- * Convert UDP item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_udp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp udp = {
- .type = parser->inner | IBV_FLOW_SPEC_UDP,
- .size = udp_size,
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- /* Don't update layer for the inner pattern. */
- if (!parser->inner) {
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_UDPV4;
- else
- parser->layer = HASH_RXQ_UDPV6;
- }
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter"
+ " on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
if (spec) {
- if (!mask)
- mask = default_mask;
udp.val.dst_port = spec->hdr.dst_port;
udp.val.src_port = spec->hdr.src_port;
udp.mask.dst_port = mask->hdr.dst_port;
@@ -1443,44 +1271,81 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
udp.val.src_port &= udp.mask.src_port;
udp.val.dst_port &= udp.mask.dst_port;
}
- mlx5_flow_create_copy(parser, &udp, udp_size);
- return 0;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &udp, size);
+ }
+ return size;
}
/**
- * Convert TCP item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_tcp(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
- unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
struct ibv_flow_spec_tcp_udp tcp = {
- .type = parser->inner | IBV_FLOW_SPEC_TCP,
- .size = tcp_size,
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
};
+ int ret;
- /* Don't update layer for the inner pattern. */
- if (!parser->inner) {
- if (parser->layer == HASH_RXQ_IPV4)
- parser->layer = HASH_RXQ_TCPV4;
- else
- parser->layer = HASH_RXQ_TCPV6;
- }
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already present");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
if (spec) {
- if (!mask)
- mask = default_mask;
tcp.val.dst_port = spec->hdr.dst_port;
tcp.val.src_port = spec->hdr.src_port;
tcp.mask.dst_port = mask->hdr.dst_port;
@@ -1489,43 +1354,78 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
tcp.val.src_port &= tcp.mask.src_port;
tcp.val.dst_port &= tcp.mask.dst_port;
}
- mlx5_flow_create_copy(parser, &tcp, tcp_size);
- return 0;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &tcp, size);
+ }
+ return size;
}
/**
- * Convert VXLAN item to Verbs specification.
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
*
- * @param item[in]
+ * @param[in] item
* Item specification.
- * @param default_mask[in]
- * Default bit-masks to use when item->mask is not provided.
- * @param data[in, out]
- * User structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_vxlan(const struct rte_flow_item *item,
- const void *default_mask,
- void *data)
+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
struct ibv_flow_spec_tunnel vxlan = {
- .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
.size = size,
};
+ int ret;
union vni {
uint32_t vlan_id;
uint8_t vni[4];
- } id;
+ } id = { .vlan_id = 0, };
- id.vni[0] = 0;
- parser->inner = IBV_FLOW_SPEC_INNER;
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan), error);
+ if (ret < 0)
+ return ret;
if (spec) {
- if (!mask)
- mask = default_mask;
memcpy(&id.vni[1], spec->vni, 3);
vxlan.val.tunnel_id = id.vlan_id;
memcpy(&id.vni[1], mask->vni, 3);
@@ -1534,297 +1434,1571 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
}
/*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN tunnel must be fully defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ if (spec->protocol)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not supported");
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ /*
* Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
* layer is defined in the Verbs specification it is interpreted as
* wildcard and all packets will match this rule, if it follows a full
* stack layer (ex: eth / ipv4 / udp), all packets matching the layers
- * before will also match this rule.
- * To avoid such situation, VNI 0 is currently refused.
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
*/
- if (!vxlan.val.tunnel_id)
- return EINVAL;
- mlx5_flow_create_copy(parser, &vxlan, size);
+ if (!vxlan_gpe.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ return size;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * It will also update the previous L3 layer with the protocol value matching
+ * the GRE.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#else
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#endif
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#else
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ if (size <= flow_size) {
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
+ MLX5_IP_PROTOCOL_GRE);
+ else
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV6,
+ MLX5_IP_PROTOCOL_GRE);
+ mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_GRE;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL &&
+ (flow->layers & MLX5_FLOW_LAYER_GRE) != MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_MPLS;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
+/**
+ * Convert the @p pattern into a Verbs specifications after ensuring the NIC
+ * will understand and process it correctly.
+ * The conversion is performed item per item, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p pattern, unless an error is encountered.
+ *
+ * @param[in] pattern
+ * Flow pattern.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @pattern has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_items(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ int remain = flow_size;
+ size_t size = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ int ret = 0;
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_item_eth(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_item_vxlan(pattern, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
+ remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "item not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->layers) {
+ const struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ };
+
+ return mlx5_flow_item_eth(&item, flow, flow_size, error);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_drop(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "drop is not compatible with"
+ " flag/mark action");
+ if (size < flow_size)
+ mlx5_flow_spec_verbs_add(flow, &drop, size);
+ flow->fate |= MLX5_FLOW_FATE_DROP;
+ return size;
+}
+
+/**
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_action_queue(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ flow->fate |= MLX5_FLOW_FATE_QUEUE;
+ return 0;
+}
+
+/**
+ * Ensure the @p action will be understood and used correctly by the NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param action[in]
+ * Pointer to flow actions array.
+ * @param flow[in, out]
+ * Pointer to the rte_flow structure.
+ * @param error[in, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success @p flow->queue array and @p flow->rss are filled and valid.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (!rss->queue_num)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "no queues were provided for RSS");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (rss->queue[i] >= priv->rxqs_n)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ rss,
+ "queue index out of range");
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "queue is not configured");
+ }
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ flow->fate |= MLX5_FLOW_FATE_RSS;
return 0;
}
/**
- * Convert mark/flag action to Verbs specification.
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
- * @param parser
- * Internal parser structure.
- * @param mark_id
- * Mark identifier.
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
+mlx5_flow_action_flag(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
{
unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
struct ibv_flow_spec_action_tag tag = {
.type = IBV_FLOW_SPEC_ACTION_TAG,
.size = size,
- .tag_id = mlx5_flow_mark_set(mark_id),
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
};
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ size = 0;
+ else if (size <= flow_size && verbs)
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ flow->modifier |= MLX5_FLOW_MOD_FLAG;
+ return size;
+}
- assert(parser->mark);
- mlx5_flow_create_copy(parser, &tag, size);
- return 0;
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
}
/**
- * Convert count action to Verbs specification.
+ * Convert the @p action into @p flow (or by updating the already present
+ * Flag Verbs specification) after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
*
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Pointer to MLX5 flow parser structure.
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_mark(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_mark *mark = action->conf;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ };
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (!mark)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "configuration cannot be null");
+ if (mark->id >= MLX5_FLOW_MARK_MAX)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &mark->id,
+ "mark id must in 0 <= id < "
+ RTE_STR(MLX5_FLOW_MARK_MAX));
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG) {
+ mlx5_flow_verbs_mark_update(verbs, mark->id);
+ size = 0;
+ } else if (size <= flow_size) {
+ tag.tag_id = mlx5_flow_mark_set(mark->id);
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ }
+ flow->modifier |= MLX5_FLOW_MOD_MARK;
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param action[in]
+ * Action configuration.
+ * @param flow[in, out]
+ * Pointer to flow structure.
+ * @param flow_size[in]
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param error[int, out]
+ * Pointer to error structure.
*
* @return
- * 0 on success, errno value on failure.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-mlx5_flow_create_count(struct priv *priv __rte_unused,
- struct mlx5_flow_parse *parser __rte_unused)
+mlx5_flow_action_count(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
{
+ const struct rte_flow_action_count *count = action->conf;
#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
- struct ibv_counter_set_init_attr init_attr = {0};
struct ibv_flow_spec_counter_action counter = {
.type = IBV_FLOW_SPEC_ACTION_COUNT,
.size = size,
- .counter_set_handle = 0,
};
+#endif
- init_attr.counter_set_id = 0;
- parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
- if (!parser->cs)
- return EINVAL;
- counter.counter_set_handle = parser->cs->handle;
- mlx5_flow_create_copy(parser, &counter, size);
+ if (!flow->counter) {
+ flow->counter = mlx5_flow_counter_new(dev, count->shared,
+ count->id);
+ if (!flow->counter)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot get counter"
+ " context.");
+ }
+ if (!((struct priv *)dev->data->dev_private)->config.flow_counter_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flow counters are not supported.");
+ flow->modifier |= MLX5_FLOW_MOD_COUNT;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ counter.counter_set_handle = flow->counter->cs->handle;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &counter, size);
+ return size;
#endif
return 0;
}
/**
- * Complete flow rule creation with a drop queue.
- *
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ * The conversion is performed action per action, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p action, unless an error is encountered.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] actions
+ * Pointer to flow actions array.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
* @param[out] error
- * Perform verbose error reporting if not NULL.
+ * Pointer to error structure.
*
* @return
- * 0 on success, errno value on failure.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p actions has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-priv_flow_create_action_queue_drop(struct priv *priv,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
{
- struct ibv_flow_spec_action_drop *drop;
- unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
- int err = 0;
-
- assert(priv->pd);
- assert(priv->ctx);
- flow->drop = 1;
- drop = (void *)((uintptr_t)parser->queue[HASH_RXQ_ETH].ibv_attr +
- parser->queue[HASH_RXQ_ETH].offset);
- *drop = (struct ibv_flow_spec_action_drop){
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = size,
- };
- ++parser->queue[HASH_RXQ_ETH].ibv_attr->num_of_specs;
- parser->queue[HASH_RXQ_ETH].offset += size;
- flow->frxq[HASH_RXQ_ETH].ibv_attr =
- parser->queue[HASH_RXQ_ETH].ibv_attr;
- if (parser->count)
- flow->cs = parser->cs;
- if (!priv->dev->data->dev_started)
- return 0;
- parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
- flow->frxq[HASH_RXQ_ETH].ibv_flow =
- mlx5_glue->create_flow(priv->flow_drop_queue->qp,
- flow->frxq[HASH_RXQ_ETH].ibv_attr);
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- err = ENOMEM;
- goto error;
- }
- return 0;
-error:
- assert(flow);
- if (flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- }
- if (flow->frxq[HASH_RXQ_ETH].ibv_attr) {
- rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
- flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;
- }
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- parser->cs = NULL;
- }
- return err;
+ size_t size = 0;
+ int remain = flow_size;
+ int ret = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = mlx5_flow_action_flag(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = mlx5_flow_action_mark(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_action_drop(actions, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_action_queue(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ ret = mlx5_flow_action_rss(dev, actions, flow, error);
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_action_count(dev, actions, flow, remain,
+ error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no fate action found");
+ return size;
}
/**
- * Create hash Rx queues when RSS is enabled.
+ * Validate flow rule and fill flow structure accordingly.
*
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] flow
+ * Pointer to flow structure.
+ * @param flow_size
+ * Size of allocated space for @p flow.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * A positive value representing the size of the flow object in bytes
+ * regardless of @p flow_size on success, a negative errno value otherwise
+ * and rte_errno is set.
*/
static int
-priv_flow_create_action_queue_rss(struct priv *priv,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_merge_switch(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ size_t flow_size,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
+ unsigned int n = mlx5_dev_to_port_id(dev->device, NULL, 0);
+ uint16_t port_id[!n + n];
+ struct mlx5_nl_flow_ptoi ptoi[!n + n + 1];
+ size_t off = RTE_ALIGN_CEIL(sizeof(*flow), alignof(max_align_t));
unsigned int i;
+ unsigned int own = 0;
+ int ret;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- uint64_t hash_fields;
+ /* At least one port is needed when no switch domain is present. */
+ if (!n) {
+ n = 1;
+ port_id[0] = dev->data->port_id;
+ } else {
+ n = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, n), n);
+ }
+ for (i = 0; i != n; ++i) {
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(port_id[i], &dev_info);
+ if (port_id[i] == dev->data->port_id)
+ own = i;
+ ptoi[i].port_id = port_id[i];
+ ptoi[i].ifindex = dev_info.if_index;
+ }
+ /* Ensure first entry of ptoi[] is the current device. */
+ if (own) {
+ ptoi[n] = ptoi[0];
+ ptoi[0] = ptoi[own];
+ ptoi[own] = ptoi[n];
+ }
+ /* An entry with zero ifindex terminates ptoi[]. */
+ ptoi[n].port_id = 0;
+ ptoi[n].ifindex = 0;
+ if (flow_size < off)
+ flow_size = 0;
+ ret = mlx5_nl_flow_transpose((uint8_t *)flow + off,
+ flow_size ? flow_size - off : 0,
+ ptoi, attr, pattern, actions, error);
+ if (ret < 0)
+ return ret;
+ if (flow_size) {
+ *flow = (struct rte_flow){
+ .attributes = *attr,
+ .nl_flow = (uint8_t *)flow + off,
+ };
+ /*
+ * Generate a reasonably unique handle based on the address
+ * of the target buffer.
+ *
+ * This is straightforward on 32-bit systems where the flow
+ * pointer can be used directly. Otherwise, its least
+ * significant part is taken after shifting it by the
+ * previous power of two of the pointed buffer size.
+ */
+ if (sizeof(flow) <= 4)
+ mlx5_nl_flow_brand(flow->nl_flow, (uintptr_t)flow);
+ else
+ mlx5_nl_flow_brand
+ (flow->nl_flow,
+ (uintptr_t)flow >>
+ rte_log2_u32(rte_align32prevpow2(flow_size)));
+ }
+ return off + ret;
+}
- if (!parser->queue[i].ibv_attr)
- continue;
- flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
- parser->queue[i].ibv_attr = NULL;
- hash_fields = hash_rxq_init[i].hash_fields;
- if (!priv->dev->data->dev_started)
- continue;
- flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
- if (flow->frxq[i].hrxq)
- continue;
- flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv,
- parser->rss_conf.rss_key,
- parser->rss_conf.rss_key_len,
- hash_fields,
- parser->queues,
- parser->queues_n);
- if (!flow->frxq[i].hrxq) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot create hash rxq");
- return ENOMEM;
+static unsigned int
+mlx5_find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
+{
+ const struct rte_flow_item *item;
+ unsigned int has_vlan = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ has_vlan = 1;
+ break;
}
}
- return 0;
+ if (has_vlan)
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
+ MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
+ return rss_level < 2 ? MLX5_EXPANSION_ROOT :
+ MLX5_EXPANSION_ROOT_OUTER;
}
/**
- * Complete flow rule creation.
- *
- * @param priv
- * Pointer to private structure.
- * @param parser
- * Internal parser structure.
- * @param flow
- * Pointer to the rte_flow.
+ * Convert the @p attributes, @p pattern, @p action, into an flow for the NIC
+ * after ensuring the NIC will understand and process it correctly.
+ * The conversion is only performed item/action per item/action, each of
+ * them is written into the @p flow if its size is lesser or equal to @p
+ * flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end, unless an error is encountered.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[in] attributes
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the flow has fully been converted and
+ * can be applied, otherwise another call with this returned memory size
+ * should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
*/
static int
-priv_flow_create_action_queue(struct priv *priv,
- struct mlx5_flow_parse *parser,
- struct rte_flow *flow,
- struct rte_flow_error *error)
+mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const size_t flow_size,
+ const struct rte_flow_attr *attributes,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow local_flow = { .layers = 0, };
+ size_t size = sizeof(*flow);
+ union {
+ struct rte_flow_expand_rss buf;
+ uint8_t buffer[2048];
+ } expand_buffer;
+ struct rte_flow_expand_rss *buf = &expand_buffer.buf;
+ struct mlx5_flow_verbs *original_verbs = NULL;
+ size_t original_verbs_size = 0;
+ uint32_t original_layers = 0;
+ int expanded_pattern_idx = 0;
+ int ret;
+ uint32_t i;
+
+ if (attributes->transfer)
+ return mlx5_flow_merge_switch(dev, flow, flow_size,
+ attributes, pattern,
+ actions, error);
+ if (size > flow_size)
+ flow = &local_flow;
+ ret = mlx5_flow_attributes(dev, attributes, flow, error);
+ if (ret < 0)
+ return ret;
+ ret = mlx5_flow_actions(dev, actions, &local_flow, 0, error);
+ if (ret < 0)
+ return ret;
+ if (local_flow.rss.types) {
+ unsigned int graph_root;
+
+ graph_root = mlx5_find_graph_root(pattern,
+ local_flow.rss.level);
+ ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
+ pattern, local_flow.rss.types,
+ mlx5_support_expansion,
+ graph_root);
+ assert(ret > 0 &&
+ (unsigned int)ret < sizeof(expand_buffer.buffer));
+ } else {
+ buf->entries = 1;
+ buf->entry[0].pattern = (void *)(uintptr_t)pattern;
+ }
+ size += RTE_ALIGN_CEIL(local_flow.rss.queue_num * sizeof(uint16_t),
+ sizeof(void *));
+ if (size <= flow_size)
+ flow->queue = (void *)(flow + 1);
+ LIST_INIT(&flow->verbs);
+ flow->layers = 0;
+ flow->modifier = 0;
+ flow->fate = 0;
+ for (i = 0; i != buf->entries; ++i) {
+ size_t off = size;
+ size_t off2;
+
+ flow->layers = original_layers;
+ size += sizeof(struct ibv_flow_attr) +
+ sizeof(struct mlx5_flow_verbs);
+ off2 = size;
+ if (size < flow_size) {
+ flow->cur_verbs = (void *)((uintptr_t)flow + off);
+ flow->cur_verbs->attr = (void *)(flow->cur_verbs + 1);
+ flow->cur_verbs->specs =
+ (void *)(flow->cur_verbs->attr + 1);
+ }
+ /* First iteration convert the pattern into Verbs. */
+ if (i == 0) {
+ /* Actions don't need to be converted several time. */
+ ret = mlx5_flow_actions(dev, actions, flow,
+ (size < flow_size) ?
+ flow_size - size : 0,
+ error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ } else {
+ /*
+ * Next iteration means the pattern has already been
+ * converted and an expansion is necessary to match
+ * the user RSS request. For that only the expanded
+ * items will be converted, the common part with the
+ * user pattern are just copied into the next buffer
+ * zone.
+ */
+ size += original_verbs_size;
+ if (size < flow_size) {
+ rte_memcpy(flow->cur_verbs->attr,
+ original_verbs->attr,
+ original_verbs_size +
+ sizeof(struct ibv_flow_attr));
+ flow->cur_verbs->size = original_verbs_size;
+ }
+ }
+ ret = mlx5_flow_items
+ (dev,
+ (const struct rte_flow_item *)
+ &buf->entry[i].pattern[expanded_pattern_idx],
+ flow,
+ (size < flow_size) ? flow_size - size : 0, error);
+ if (ret < 0)
+ return ret;
+ size += ret;
+ if (size <= flow_size) {
+ mlx5_flow_adjust_priority(dev, flow);
+ LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
+ }
+ /*
+ * Keep a pointer of the first verbs conversion and the layers
+ * it has encountered.
+ */
+ if (i == 0) {
+ original_verbs = flow->cur_verbs;
+ original_verbs_size = size - off2;
+ original_layers = flow->layers;
+ /*
+ * move the index of the expanded pattern to the
+ * first item not addressed yet.
+ */
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
+ expanded_pattern_idx++;
+ } else {
+ const struct rte_flow_item *item = pattern;
+
+ for (item = pattern;
+ item->type != RTE_FLOW_ITEM_TYPE_END;
+ ++item)
+ expanded_pattern_idx++;
+ }
+ }
+ }
+ /* Restore the origin layers in the flow. */
+ flow->layers = original_layers;
+ return size;
+}
+
+/**
+ * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
+ * if several tunnel rules are used on this queue, the tunnel ptype will be
+ * cleared.
+ *
+ * @param rxq_ctrl
+ * Rx queue to update.
+ */
+static void
+mlx5_flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- int err = 0;
unsigned int i;
+ uint32_t tunnel_ptype = 0;
- assert(priv->pd);
- assert(priv->ctx);
- assert(!parser->drop);
- err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
- if (err)
- goto error;
- if (parser->count)
- flow->cs = parser->cs;
- if (!priv->dev->data->dev_started)
- return 0;
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].hrxq)
+ /* Look up for the ptype to use. */
+ for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
+ if (!rxq_ctrl->flow_tunnels_n[i])
continue;
- flow->frxq[i].ibv_flow =
- mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
- if (!flow->frxq[i].ibv_flow) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- err = ENOMEM;
- goto error;
+ if (!tunnel_ptype) {
+ tunnel_ptype = tunnels_info[i].ptype;
+ } else {
+ tunnel_ptype = 0;
+ break;
+ }
+ }
+ rxq_ctrl->rxq.tunnel = tunnel_ptype;
+}
+
+/**
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to flow structure.
+ */
+static void
+mlx5_flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->rxq.mark = 1;
+ rxq_ctrl->flow_mark_n++;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Increase the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]++;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
+ }
+ }
+}
+
+/**
+ * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
+ * @p flow if no other flow uses it with the same kind of request.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the flow.
+ */
+static void
+mlx5_flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ const int mark = !!(flow->modifier &
+ (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK));
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int i;
+
+ assert(dev->data->dev_started);
+ for (i = 0; i != flow->rss.queue_num; ++i) {
+ int idx = (*flow->queue)[i];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of((*priv->rxqs)[idx],
+ struct mlx5_rxq_ctrl, rxq);
+
+ if (mark) {
+ rxq_ctrl->flow_mark_n--;
+ rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
+ }
+ if (tunnel) {
+ unsigned int j;
+
+ /* Decrease the counter matching the flow. */
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
+ if ((tunnels_info[j].tunnel & flow->layers) ==
+ tunnels_info[j].tunnel) {
+ rxq_ctrl->flow_tunnels_n[j]--;
+ break;
+ }
+ }
+ mlx5_flow_rxq_tunnel_ptype_update(rxq_ctrl);
}
- DEBUG("%p type %d QP %p ibv_flow %p",
- (void *)flow, i,
- (void *)flow->frxq[i].hrxq,
- (void *)flow->frxq[i].ibv_flow);
}
- for (i = 0; i != parser->queues_n; ++i) {
- struct mlx5_rxq_data *q =
- (*priv->rxqs)[parser->queues[i]];
+}
+
+/**
+ * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+static void
+mlx5_flow_rxq_flags_clear(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+ unsigned int j;
- q->mark |= parser->mark;
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i],
+ struct mlx5_rxq_ctrl, rxq);
+ rxq_ctrl->flow_mark_n = 0;
+ rxq_ctrl->rxq.mark = 0;
+ for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
+ rxq_ctrl->flow_tunnels_n[j] = 0;
+ rxq_ctrl->rxq.tunnel = 0;
}
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret = mlx5_flow_merge(dev, NULL, 0, attr, items, actions, error);
+
+ if (ret < 0)
+ return ret;
return 0;
-error:
- assert(flow);
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (flow->frxq[i].ibv_flow) {
- struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
+}
+
+/**
+ * Remove the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ */
+static void
+mlx5_flow_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+
+ if (flow->nl_flow && priv->mnl_socket)
+ mlx5_nl_flow_destroy(priv->mnl_socket, flow->nl_flow, NULL);
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->flow) {
+ claim_zero(mlx5_glue->destroy_flow(verbs->flow));
+ verbs->flow = NULL;
+ }
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
+ }
+ if (flow->counter) {
+ mlx5_flow_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
+}
+
+/**
+ * Apply the flow.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_verbs *verbs;
+ int err;
- claim_zero(mlx5_glue->destroy_flow(ibv_flow));
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP) {
+ verbs->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!verbs->hrxq) {
+ rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get drop hash queue");
+ goto error;
+ }
+ } else {
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_hrxq_get(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num);
+ if (!hrxq)
+ hrxq = mlx5_hrxq_new(dev, flow->key,
+ MLX5_RSS_HASH_KEY_LEN,
+ verbs->hash_fields,
+ (*flow->queue),
+ flow->rss.queue_num,
+ !!(flow->layers &
+ MLX5_FLOW_LAYER_TUNNEL));
+ if (!hrxq) {
+ rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot get hash queue");
+ goto error;
+ }
+ verbs->hrxq = hrxq;
+ }
+ verbs->flow =
+ mlx5_glue->create_flow(verbs->hrxq->qp, verbs->attr);
+ if (!verbs->flow) {
+ rte_flow_error_set(error, errno,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "hardware refuses to create flow");
+ goto error;
}
- if (flow->frxq[i].hrxq)
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
- if (flow->frxq[i].ibv_attr)
- rte_free(flow->frxq[i].ibv_attr);
}
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- parser->cs = NULL;
+ if (flow->nl_flow &&
+ priv->mnl_socket &&
+ mlx5_nl_flow_create(priv->mnl_socket, flow->nl_flow, error))
+ goto error;
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ LIST_FOREACH(verbs, &flow->verbs, next) {
+ if (verbs->hrxq) {
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ mlx5_hrxq_drop_release(dev);
+ else
+ mlx5_hrxq_release(dev, verbs->hrxq);
+ verbs->hrxq = NULL;
+ }
}
- return err;
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
- * Convert a flow.
+ * Create a flow and add it to @p list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
- * @param[in] pattern
+ * @param[in] items
* Pattern specification (list terminated by the END pattern item).
* @param[in] actions
* Associated actions (list terminated by the END action).
@@ -1832,85 +3006,53 @@ error:
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow on success, NULL otherwise.
+ * A flow on success, NULL otherwise and rte_errno is set.
*/
static struct rte_flow *
-priv_flow_create(struct priv *priv,
- struct mlx5_flows *list,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+mlx5_flow_list_create(struct rte_eth_dev *dev,
+ struct mlx5_flows *list,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
{
- struct mlx5_flow_parse parser = { .create = 1, };
struct rte_flow *flow = NULL;
- unsigned int i;
- int err;
+ size_t size = 0;
+ int ret;
- err = priv_flow_convert(priv, attr, items, actions, error, &parser);
- if (err)
- goto exit;
- flow = rte_calloc(__func__, 1,
- sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
- 0);
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0)
+ return NULL;
+ size = ret;
+ flow = rte_calloc(__func__, 1, size, 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot allocate flow memory");
+ "not enough memory to create flow");
return NULL;
}
- /* Copy queues configuration. */
- flow->queues = (uint16_t (*)[])(flow + 1);
- memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
- flow->queues_n = parser.queues_n;
- flow->mark = parser.mark;
- /* Copy RSS configuration. */
- flow->rss_conf = parser.rss_conf;
- flow->rss_conf.rss_key = flow->rss_key;
- memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
- /* finalise the flow. */
- if (parser.drop)
- err = priv_flow_create_action_queue_drop(priv, &parser, flow,
- error);
- else
- err = priv_flow_create_action_queue(priv, &parser, flow, error);
- if (err)
- goto exit;
+ ret = mlx5_flow_merge(dev, flow, size, attr, items, actions, error);
+ if (ret < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+ assert((size_t)ret == size);
+ if (dev->data->dev_started) {
+ ret = mlx5_flow_apply(dev, flow, error);
+ if (ret < 0) {
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ if (flow) {
+ mlx5_flow_remove(dev, flow);
+ rte_free(flow);
+ }
+ rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+ }
+ }
TAILQ_INSERT_TAIL(list, flow, next);
- DEBUG("Flow created %p", (void *)flow);
+ mlx5_flow_rxq_flags_set(dev, flow);
return flow;
-exit:
- ERROR("flow creation error: %s", error->message);
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
- rte_free(flow);
- return NULL;
-}
-
-/**
- * Validate a flow supported by the NIC.
- *
- * @see rte_flow_validate()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
- int ret;
- struct mlx5_flow_parse parser = { .create = 0, };
-
- priv_lock(priv);
- ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
- priv_unlock(priv);
- return ret;
}
/**
@@ -1926,379 +3068,123 @@ mlx5_flow_create(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- struct rte_flow *flow;
-
- priv_lock(priv);
- flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
- error);
- priv_unlock(priv);
- return flow;
+ return mlx5_flow_list_create
+ (dev, &((struct priv *)dev->data->dev_private)->flows,
+ attr, items, actions, error);
}
/**
- * Destroy a flow.
+ * Destroy a flow in a list.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
-priv_flow_destroy(struct priv *priv,
- struct mlx5_flows *list,
- struct rte_flow *flow)
+mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
+ struct rte_flow *flow)
{
- unsigned int i;
-
- if (flow->drop || !flow->mark)
- goto free;
- for (i = 0; i != flow->queues_n; ++i) {
- struct rte_flow *tmp;
- int mark = 0;
-
- /*
- * To remove the mark from the queue, the queue must not be
- * present in any other marked flow (RSS or not).
- */
- TAILQ_FOREACH(tmp, list, next) {
- unsigned int j;
- uint16_t *tqs = NULL;
- uint16_t tq_n = 0;
-
- if (!tmp->mark)
- continue;
- for (j = 0; j != hash_rxq_init_n; ++j) {
- if (!tmp->frxq[j].hrxq)
- continue;
- tqs = tmp->frxq[j].hrxq->ind_table->queues;
- tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
- }
- if (!tq_n)
- continue;
- for (j = 0; (j != tq_n) && !mark; j++)
- if (tqs[j] == (*flow->queues)[i])
- mark = 1;
- }
- (*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
- }
-free:
- if (flow->drop) {
- if (flow->frxq[HASH_RXQ_ETH].ibv_flow)
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
- } else {
- for (i = 0; i != hash_rxq_init_n; ++i) {
- struct mlx5_flow *frxq = &flow->frxq[i];
-
- if (frxq->ibv_flow)
- claim_zero(mlx5_glue->destroy_flow
- (frxq->ibv_flow));
- if (frxq->hrxq)
- mlx5_priv_hrxq_release(priv, frxq->hrxq);
- if (frxq->ibv_attr)
- rte_free(frxq->ibv_attr);
- }
- }
- if (flow->cs) {
- claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
- flow->cs = NULL;
- }
+ mlx5_flow_remove(dev, flow);
TAILQ_REMOVE(list, flow, next);
- DEBUG("Flow destroyed %p", (void *)flow);
+ /*
+ * Update RX queue flags only if port is started, otherwise it is
+ * already clean.
+ */
+ if (dev->data->dev_started)
+ mlx5_flow_rxq_flags_trim(dev, flow);
rte_free(flow);
}
/**
* Destroy all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
flow = TAILQ_FIRST(list);
- priv_flow_destroy(priv, list, flow);
- }
-}
-
-/**
- * Create drop queue.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success.
- */
-int
-priv_flow_create_drop_queue(struct priv *priv)
-{
- struct mlx5_hrxq_drop *fdq = NULL;
-
- assert(priv->pd);
- assert(priv->ctx);
- fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
- if (!fdq) {
- WARN("cannot allocate memory for drop queue");
- goto error;
- }
- fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
- if (!fdq->cq) {
- WARN("cannot allocate CQ for drop queue");
- goto error;
- }
- fdq->wq = mlx5_glue->create_wq
- (priv->ctx,
- &(struct ibv_wq_init_attr){
- .wq_type = IBV_WQT_RQ,
- .max_wr = 1,
- .max_sge = 1,
- .pd = priv->pd,
- .cq = fdq->cq,
- });
- if (!fdq->wq) {
- WARN("cannot allocate WQ for drop queue");
- goto error;
- }
- fdq->ind_table = mlx5_glue->create_rwq_ind_table
- (priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
- .log_ind_tbl_size = 0,
- .ind_tbl = &fdq->wq,
- .comp_mask = 0,
- });
- if (!fdq->ind_table) {
- WARN("cannot allocate indirection table for drop queue");
- goto error;
+ mlx5_flow_list_destroy(dev, list, flow);
}
- fdq->qp = mlx5_glue->create_qp_ex
- (priv->ctx,
- &(struct ibv_qp_init_attr_ex){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_QP_INIT_ATTR_PD |
- IBV_QP_INIT_ATTR_IND_TABLE |
- IBV_QP_INIT_ATTR_RX_HASH,
- .rx_hash_conf = (struct ibv_rx_hash_conf){
- .rx_hash_function =
- IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = 0,
- },
- .rwq_ind_tbl = fdq->ind_table,
- .pd = priv->pd
- });
- if (!fdq->qp) {
- WARN("cannot allocate QP for drop queue");
- goto error;
- }
- priv->flow_drop_queue = fdq;
- return 0;
-error:
- if (fdq->qp)
- claim_zero(mlx5_glue->destroy_qp(fdq->qp));
- if (fdq->ind_table)
- claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
- if (fdq->wq)
- claim_zero(mlx5_glue->destroy_wq(fdq->wq));
- if (fdq->cq)
- claim_zero(mlx5_glue->destroy_cq(fdq->cq));
- if (fdq)
- rte_free(fdq);
- priv->flow_drop_queue = NULL;
- return -1;
-}
-
-/**
- * Delete drop queue.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-priv_flow_delete_drop_queue(struct priv *priv)
-{
- struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
-
- if (!fdq)
- return;
- if (fdq->qp)
- claim_zero(mlx5_glue->destroy_qp(fdq->qp));
- if (fdq->ind_table)
- claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
- if (fdq->wq)
- claim_zero(mlx5_glue->destroy_wq(fdq->wq));
- if (fdq->cq)
- claim_zero(mlx5_glue->destroy_cq(fdq->cq));
- rte_free(fdq);
- priv->flow_drop_queue = NULL;
}
/**
* Remove all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
- unsigned int i;
- struct mlx5_ind_table_ibv *ind_tbl = NULL;
-
- if (flow->drop) {
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
- continue;
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[HASH_RXQ_ETH].ibv_flow));
- flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
- DEBUG("Flow %p removed", (void *)flow);
- /* Next flow. */
- continue;
- }
- /* Verify the flow has not already been cleaned. */
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_flow)
- continue;
- /*
- * Indirection table may be necessary to remove the
- * flags in the Rx queues.
- * This helps to speed-up the process by avoiding
- * another loop.
- */
- ind_tbl = flow->frxq[i].hrxq->ind_table;
- break;
- }
- if (i == hash_rxq_init_n)
- return;
- if (flow->mark) {
- assert(ind_tbl);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
- }
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_flow)
- continue;
- claim_zero(mlx5_glue->destroy_flow
- (flow->frxq[i].ibv_flow));
- flow->frxq[i].ibv_flow = NULL;
- mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
- flow->frxq[i].hrxq = NULL;
- }
- DEBUG("Flow %p removed", (void *)flow);
- }
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
+ mlx5_flow_remove(dev, flow);
+ mlx5_flow_rxq_flags_clear(dev);
}
/**
* Add all flows.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param list
* Pointer to a TAILQ flow list.
*
* @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv, struct mlx5_flows *list)
+mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
{
struct rte_flow *flow;
+ struct rte_flow_error error;
+ int ret = 0;
TAILQ_FOREACH(flow, list, next) {
- unsigned int i;
-
- if (flow->drop) {
- flow->frxq[HASH_RXQ_ETH].ibv_flow =
- mlx5_glue->create_flow
- (priv->flow_drop_queue->qp,
- flow->frxq[HASH_RXQ_ETH].ibv_attr);
- if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
- }
- DEBUG("Flow %p applied", (void *)flow);
- /* Next flow. */
- continue;
- }
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (!flow->frxq[i].ibv_attr)
- continue;
- flow->frxq[i].hrxq =
- mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
- if (flow->frxq[i].hrxq)
- goto flow_create;
- flow->frxq[i].hrxq =
- mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
- flow->rss_conf.rss_key_len,
- hash_rxq_init[i].hash_fields,
- (*flow->queues),
- flow->queues_n);
- if (!flow->frxq[i].hrxq) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
- }
-flow_create:
- flow->frxq[i].ibv_flow =
- mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
- if (!flow->frxq[i].ibv_flow) {
- DEBUG("Flow %p cannot be applied",
- (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
- }
- DEBUG("Flow %p applied", (void *)flow);
- }
- if (!flow->mark)
- continue;
- for (i = 0; i != flow->queues_n; ++i)
- (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
+ ret = mlx5_flow_apply(dev, flow, &error);
+ if (ret < 0)
+ goto error;
+ mlx5_flow_rxq_flags_set(dev, flow);
}
return 0;
+error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_stop(dev, list);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Verify the flow list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return the number of flows not released.
*/
int
-priv_flow_verify(struct priv *priv)
+mlx5_flow_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow *flow;
int ret = 0;
TAILQ_FOREACH(flow, &priv->flows, next) {
- DEBUG("%p: flow %p still referenced", (void *)priv,
- (void *)flow);
+ DRV_LOG(DEBUG, "port %u flow %p still referenced",
+ dev->data->port_id, (void *)flow);
++ret;
}
return ret;
@@ -2319,7 +3205,7 @@ priv_flow_verify(struct priv *priv)
* A VLAN flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
@@ -2331,7 +3217,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
const struct rte_flow_attr attr = {
.ingress = 1,
- .priority = MLX5_CTRL_FLOW_PRIORITY,
+ .priority = MLX5_FLOW_PRIO_RSVD,
};
struct rte_flow_item items[] = {
{
@@ -2351,9 +3237,20 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
.type = RTE_FLOW_ITEM_TYPE_END,
},
};
+ uint16_t queue[priv->reta_idx_n];
+ struct rte_flow_action_rss action_rss = {
+ .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
+ .level = 0,
+ .types = priv->rss_conf.rss_hf,
+ .key_len = priv->rss_conf.rss_key_len,
+ .queue_num = priv->reta_idx_n,
+ .key = priv->rss_conf.rss_key,
+ .queue = queue,
+ };
struct rte_flow_action actions[] = {
{
.type = RTE_FLOW_ACTION_TYPE_RSS,
+ .conf = &action_rss,
},
{
.type = RTE_FLOW_ACTION_TYPE_END,
@@ -2362,26 +3259,17 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
struct rte_flow *flow;
struct rte_flow_error error;
unsigned int i;
- union {
- struct rte_flow_action_rss rss;
- struct {
- const struct rte_eth_rss_conf *rss_conf;
- uint16_t num;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- } local;
- } action_rss;
-
- if (!priv->reta_idx_n)
- return EINVAL;
+
+ if (!priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
for (i = 0; i != priv->reta_idx_n; ++i)
- action_rss.local.queue[i] = (*priv->reta_idx)[i];
- action_rss.local.rss_conf = &priv->rss_conf;
- action_rss.local.num = priv->reta_idx_n;
- actions[0].conf = (const void *)&action_rss.rss;
- flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
- &error);
+ queue[i] = (*priv->reta_idx)[i];
+ flow = mlx5_flow_list_create(dev, &priv->ctrl_flows, &attr, items,
+ actions, &error);
if (!flow)
- return rte_errno;
+ return -rte_errno;
return 0;
}
@@ -2396,7 +3284,7 @@ mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
* An Ethernet flow mask to apply.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_ctrl_flow(struct rte_eth_dev *dev,
@@ -2415,14 +3303,11 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
int
mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, &priv->flows, flow);
- priv_unlock(priv);
+ mlx5_flow_list_destroy(dev, &priv->flows, flow);
return 0;
}
@@ -2434,152 +3319,161 @@ mlx5_flow_destroy(struct rte_eth_dev *dev,
*/
int
mlx5_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv, &priv->flows);
- priv_unlock(priv);
+ mlx5_flow_list_flush(dev, &priv->flows);
return 0;
}
-#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
/**
- * Query flow counter.
- *
- * @param cs
- * the counter set.
- * @param counter_value
- * returned data from the counter.
+ * Isolated mode.
*
- * @return
- * 0 on success, a errno value otherwise and rte_errno is set.
+ * @see rte_flow_isolate()
+ * @see rte_flow_ops
*/
-static int
-priv_flow_query_count(struct ibv_counter_set *cs,
- struct mlx5_flow_counter_stats *counter_stats,
- struct rte_flow_query_count *query_count,
- struct rte_flow_error *error)
+int
+mlx5_flow_isolate(struct rte_eth_dev *dev,
+ int enable,
+ struct rte_flow_error *error)
{
- uint64_t counters[2];
- struct ibv_query_counter_set_attr query_cs_attr = {
- .cs = cs,
- .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
- };
- struct ibv_counter_set_data query_out = {
- .out = counters,
- .outlen = 2 * sizeof(uint64_t),
- };
- int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
+ struct priv *priv = dev->data->dev_private;
- if (res) {
- rte_flow_error_set(error, -res,
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
- "cannot read counter");
- return -res;
- }
- query_count->hits_set = 1;
- query_count->bytes_set = 1;
- query_count->hits = counters[0] - counter_stats->hits;
- query_count->bytes = counters[1] - counter_stats->bytes;
- if (query_count->reset) {
- counter_stats->hits = counters[0];
- counter_stats->bytes = counters[1];
+ "port must be stopped first");
+ return -rte_errno;
}
+ priv->isolated = !!enable;
+ if (enable)
+ dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ dev->dev_ops = &mlx5_dev_ops;
return 0;
}
/**
- * Query a flows.
+ * Query flow counter.
*
- * @see rte_flow_query()
- * @see rte_flow_ops
+ * @param flow
+ * Pointer to the flow.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_flow_query(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- enum rte_flow_action_type action __rte_unused,
- void *data,
- struct rte_flow_error *error)
+static int
+mlx5_flow_query_count(struct rte_flow *flow __rte_unused,
+ void *data __rte_unused,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- int res = EINVAL;
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ if (flow->modifier & MLX5_FLOW_MOD_COUNT) {
+ struct rte_flow_query_count *qc = data;
+ uint64_t counters[2] = {0, 0};
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = flow->counter->cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int err = mlx5_glue->query_counter_set(&query_cs_attr,
+ &query_out);
- priv_lock(priv);
- if (flow->cs) {
- res = priv_flow_query_count(flow->cs,
- &flow->counter_stats,
- (struct rte_flow_query_count *)data,
- error);
- } else {
- rte_flow_error_set(error, res,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "no counter found for flow");
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = counters[0] - flow->counter->hits;
+ qc->bytes = counters[1] - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = counters[0];
+ flow->counter->bytes = counters[1];
+ }
+ return 0;
}
- priv_unlock(priv);
- return -res;
-}
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "flow does not have counter");
#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
+}
/**
- * Isolated mode.
+ * Query a flows.
*
- * @see rte_flow_isolate()
+ * @see rte_flow_query()
* @see rte_flow_ops
*/
int
-mlx5_flow_isolate(struct rte_eth_dev *dev,
- int enable,
- struct rte_flow_error *error)
+mlx5_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow,
+ const struct rte_flow_action *actions,
+ void *data,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ int ret = 0;
- priv_lock(priv);
- if (dev->data->dev_started) {
- rte_flow_error_set(error, EBUSY,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- "port must be stopped first");
- priv_unlock(priv);
- return -rte_errno;
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = mlx5_flow_query_count(flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ if (ret < 0)
+ return ret;
}
- priv->isolated = !!enable;
- if (enable)
- priv->dev->dev_ops = &mlx5_dev_ops_isolate;
- else
- priv->dev->dev_ops = &mlx5_dev_ops;
- priv_unlock(priv);
return 0;
}
/**
* Convert a flow director filter to a generic flow.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
* @param attributes
* Generic flow parameters structure.
*
* @return
- * 0 on success, errno value on error.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_convert(struct priv *priv,
+mlx5_fdir_filter_convert(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter,
struct mlx5_fdir *attributes)
{
+ struct priv *priv = dev->data->dev_private;
const struct rte_eth_fdir_input *input = &fdir_filter->input;
+ const struct rte_eth_fdir_masks *mask =
+ &dev->data->dev_conf.fdir_conf.mask;
/* Validate queue number. */
if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
- ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
- return EINVAL;
+ DRV_LOG(ERR, "port %u invalid queue number %d",
+ dev->data->port_id, fdir_filter->action.rx_queue);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
attributes->attr.ingress = 1;
attributes->items[0] = (struct rte_flow_item) {
@@ -2600,144 +3494,140 @@ priv_fdir_filter_convert(struct priv *priv,
};
break;
default:
- ERROR("invalid behavior %d", fdir_filter->action.behavior);
- return ENOTSUP;
+ DRV_LOG(ERR, "port %u invalid behavior %d",
+ dev->data->port_id,
+ fdir_filter->action.behavior);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
attributes->queue.index = fdir_filter->action.rx_queue;
+ /* Handle L3. */
switch (fdir_filter->input.flow_type) {
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.udp4_flow.ip.src_ip,
- .dst_addr = input->flow.udp4_flow.ip.dst_ip,
- .time_to_live = input->flow.udp4_flow.ip.ttl,
- .type_of_service = input->flow.udp4_flow.ip.tos,
- .next_proto_id = input->flow.udp4_flow.ip.proto,
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
};
- attributes->l4.udp.hdr = (struct udp_hdr){
- .src_port = input->flow.udp4_flow.src_port,
- .dst_port = input->flow.udp4_flow.dst_port,
+ attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = mask->ipv4_mask.src_ip,
+ .dst_addr = mask->ipv4_mask.dst_ip,
+ .time_to_live = mask->ipv4_mask.ttl,
+ .type_of_service = mask->ipv4_mask.tos,
+ .next_proto_id = mask->ipv4_mask.proto,
};
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
- .mask = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
+ mask->ipv6_mask.src_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
+ mask->ipv6_mask.dst_ip,
+ RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ .mask = &attributes->l3_mask,
+ };
+ break;
+ default:
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ /* Handle L4. */
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.tcp4_flow.ip.src_ip,
- .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
- .time_to_live = input->flow.tcp4_flow.ip.ttl,
- .type_of_service = input->flow.tcp4_flow.ip.tos,
- .next_proto_id = input->flow.tcp4_flow.ip.proto,
- };
attributes->l4.tcp.hdr = (struct tcp_hdr){
.src_port = input->flow.tcp4_flow.src_port,
.dst_port = input->flow.tcp4_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV4,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
- };
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- attributes->l3.ipv4.hdr = (struct ipv4_hdr){
- .src_addr = input->flow.ip4_flow.src_ip,
- .dst_addr = input->flow.ip4_flow.dst_ip,
- .time_to_live = input->flow.ip4_flow.ttl,
- .type_of_service = input->flow.ip4_flow.tos,
- .next_proto_id = input->flow.ip4_flow.proto,
- };
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV4,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.udp6_flow.ip.hop_limits,
- .proto = input->flow.udp6_flow.ip.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.udp6_flow.ip.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.udp6_flow.ip.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
attributes->l4.udp.hdr = (struct udp_hdr){
.src_port = input->flow.udp6_flow.src_port,
.dst_port = input->flow.udp6_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.udp.hdr = (struct udp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
- .proto = input->flow.tcp6_flow.ip.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.tcp6_flow.ip.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.tcp6_flow.ip.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
attributes->l4.tcp.hdr = (struct tcp_hdr){
.src_port = input->flow.tcp6_flow.src_port,
.dst_port = input->flow.tcp6_flow.dst_port,
};
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
+ attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
+ .src_port = mask->src_port_mask,
+ .dst_port = mask->dst_port_mask,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
- .mask = &attributes->l4,
+ .mask = &attributes->l4_mask,
};
break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
- attributes->l3.ipv6.hdr = (struct ipv6_hdr){
- .hop_limits = input->flow.ipv6_flow.hop_limits,
- .proto = input->flow.ipv6_flow.proto,
- };
- memcpy(attributes->l3.ipv6.hdr.src_addr,
- input->flow.ipv6_flow.src_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- memcpy(attributes->l3.ipv6.hdr.dst_addr,
- input->flow.ipv6_flow.dst_ip,
- RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
- attributes->items[1] = (struct rte_flow_item){
- .type = RTE_FLOW_ITEM_TYPE_IPV6,
- .spec = &attributes->l3,
- .mask = &attributes->l3,
- };
break;
default:
- ERROR("invalid flow type%d",
- fdir_filter->input.flow_type);
- return ENOTSUP;
+ DRV_LOG(ERR, "port %u invalid flow type%d",
+ dev->data->port_id, fdir_filter->input.flow_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
return 0;
}
@@ -2745,18 +3635,19 @@ priv_fdir_filter_convert(struct priv *priv,
/**
* Add new flow director filter and store it in list.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Flow director filter to add.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_add(struct priv *priv,
+mlx5_fdir_filter_add(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_fdir attributes = {
.attr.group = 0,
.l2_mask = {
@@ -2765,181 +3656,96 @@ priv_fdir_filter_add(struct priv *priv,
.type = 0,
},
};
- struct mlx5_flow_parse parser = {
- .layer = HASH_RXQ_ETH,
- };
struct rte_flow_error error;
struct rte_flow *flow;
int ret;
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
- if (ret)
- return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
- attributes.actions, &error, &parser);
+ ret = mlx5_fdir_filter_convert(dev, fdir_filter, &attributes);
if (ret)
- return -ret;
- flow = priv_flow_create(priv,
- &priv->flows,
- &attributes.attr,
- attributes.items,
- attributes.actions,
- &error);
+ return ret;
+ flow = mlx5_flow_list_create(dev, &priv->flows, &attributes.attr,
+ attributes.items, attributes.actions,
+ &error);
if (flow) {
- DEBUG("FDIR created %p", (void *)flow);
+ DRV_LOG(DEBUG, "port %u FDIR created %p", dev->data->port_id,
+ (void *)flow);
return 0;
}
- return ENOTSUP;
+ return -rte_errno;
}
/**
* Delete specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be deleted.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_delete(struct priv *priv,
- const struct rte_eth_fdir_filter *fdir_filter)
+mlx5_fdir_filter_delete(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_eth_fdir_filter *fdir_filter
+ __rte_unused)
{
- struct mlx5_fdir attributes = {
- .attr.group = 0,
- };
- struct mlx5_flow_parse parser = {
- .create = 1,
- .layer = HASH_RXQ_ETH,
- };
- struct rte_flow_error error;
- struct rte_flow *flow;
- unsigned int i;
- int ret;
-
- ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
- if (ret)
- return -ret;
- ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
- attributes.actions, &error, &parser);
- if (ret)
- goto exit;
- /*
- * Special case for drop action which is only set in the
- * specifications when the flow is created. In this situation the
- * drop specification is missing.
- */
- if (parser.drop) {
- struct ibv_flow_spec_action_drop *drop;
-
- drop = (void *)((uintptr_t)parser.queue[HASH_RXQ_ETH].ibv_attr +
- parser.queue[HASH_RXQ_ETH].offset);
- *drop = (struct ibv_flow_spec_action_drop){
- .type = IBV_FLOW_SPEC_ACTION_DROP,
- .size = sizeof(struct ibv_flow_spec_action_drop),
- };
- parser.queue[HASH_RXQ_ETH].ibv_attr->num_of_specs++;
- }
- TAILQ_FOREACH(flow, &priv->flows, next) {
- struct ibv_flow_attr *attr;
- struct ibv_spec_header *attr_h;
- void *spec;
- struct ibv_flow_attr *flow_attr;
- struct ibv_spec_header *flow_h;
- void *flow_spec;
- unsigned int specs_n;
-
- attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
- flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
- /* Compare first the attributes. */
- if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
- continue;
- if (attr->num_of_specs == 0)
- continue;
- spec = (void *)((uintptr_t)attr +
- sizeof(struct ibv_flow_attr));
- flow_spec = (void *)((uintptr_t)flow_attr +
- sizeof(struct ibv_flow_attr));
- specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);
- for (i = 0; i != specs_n; ++i) {
- attr_h = spec;
- flow_h = flow_spec;
- if (memcmp(spec, flow_spec,
- RTE_MIN(attr_h->size, flow_h->size)))
- goto wrong_flow;
- spec = (void *)((uintptr_t)spec + attr_h->size);
- flow_spec = (void *)((uintptr_t)flow_spec +
- flow_h->size);
- }
- /* At this point, the flow match. */
- break;
-wrong_flow:
- /* The flow does not match. */
- continue;
- }
- if (flow)
- priv_flow_destroy(priv, &priv->flows, flow);
-exit:
- for (i = 0; i != hash_rxq_init_n; ++i) {
- if (parser.queue[i].ibv_attr)
- rte_free(parser.queue[i].ibv_attr);
- }
- return -ret;
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
/**
* Update queue for specific filter.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param fdir_filter
* Filter to be updated.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_filter_update(struct priv *priv,
+mlx5_fdir_filter_update(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *fdir_filter)
{
int ret;
- ret = priv_fdir_filter_delete(priv, fdir_filter);
+ ret = mlx5_fdir_filter_delete(dev, fdir_filter);
if (ret)
return ret;
- ret = priv_fdir_filter_add(priv, fdir_filter);
- return ret;
+ return mlx5_fdir_filter_add(dev, fdir_filter);
}
/**
* Flush all filters.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-priv_fdir_filter_flush(struct priv *priv)
+mlx5_fdir_filter_flush(struct rte_eth_dev *dev)
{
- priv_flow_flush(priv, &priv->flows);
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->flows);
}
/**
* Get flow director information.
*
- * @param priv
- * Private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] fdir_info
* Resulting flow director information.
*/
static void
-priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
+mlx5_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
{
struct rte_eth_fdir_masks *mask =
- &priv->dev->data->dev_conf.fdir_conf.mask;
+ &dev->data->dev_conf.fdir_conf.mask;
- fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
+ fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
fdir_info->guarant_spc = 0;
rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
fdir_info->max_flexpayload = 0;
@@ -2953,54 +3759,52 @@ priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
/**
* Deal with flow director operations.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param filter_op
* Operation to perform.
* @param arg
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
+mlx5_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
+ void *arg)
{
enum rte_fdir_mode fdir_mode =
- priv->dev->data->dev_conf.fdir_conf.mode;
- int ret = 0;
+ dev->data->dev_conf.fdir_conf.mode;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
- ERROR("%p: flow director mode %d not supported",
- (void *)priv, fdir_mode);
- return EINVAL;
+ DRV_LOG(ERR, "port %u flow director mode %d not supported",
+ dev->data->port_id, fdir_mode);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
switch (filter_op) {
case RTE_ETH_FILTER_ADD:
- ret = priv_fdir_filter_add(priv, arg);
- break;
+ return mlx5_fdir_filter_add(dev, arg);
case RTE_ETH_FILTER_UPDATE:
- ret = priv_fdir_filter_update(priv, arg);
- break;
+ return mlx5_fdir_filter_update(dev, arg);
case RTE_ETH_FILTER_DELETE:
- ret = priv_fdir_filter_delete(priv, arg);
- break;
+ return mlx5_fdir_filter_delete(dev, arg);
case RTE_ETH_FILTER_FLUSH:
- priv_fdir_filter_flush(priv);
+ mlx5_fdir_filter_flush(dev);
break;
case RTE_ETH_FILTER_INFO:
- priv_fdir_info_get(priv, arg);
+ mlx5_fdir_info_get(dev, arg);
break;
default:
- DEBUG("%p: unknown operation %u", (void *)priv,
- filter_op);
- ret = EINVAL;
- break;
+ DRV_LOG(DEBUG, "port %u unknown operation %u",
+ dev->data->port_id, filter_op);
+ rte_errno = EINVAL;
+ return -rte_errno;
}
- return ret;
+ return 0;
}
/**
@@ -3016,7 +3820,7 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
* Pointer to operation-specific structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
@@ -3024,24 +3828,21 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = EINVAL;
- struct priv *priv = dev->data->dev_private;
-
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
*(const void **)arg = &mlx5_flow_ops;
return 0;
case RTE_ETH_FILTER_FDIR:
- priv_lock(priv);
- ret = priv_fdir_ctrl_func(priv, filter_op, arg);
- priv_unlock(priv);
- break;
+ return mlx5_fdir_ctrl_func(dev, filter_op, arg);
default:
- ERROR("%p: filter type (%d) not supported",
- (void *)dev, filter_type);
- break;
+ DRV_LOG(ERR, "port %u filter type (%d) not supported",
+ dev->data->port_id, filter_type);
+ rte_errno = ENOTSUP;
+ return -rte_errno;
}
- return -ret;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_glue.c b/drivers/net/mlx5/mlx5_glue.c
index 1c4396ad..84f9492a 100644
--- a/drivers/net/mlx5/mlx5_glue.c
+++ b/drivers/net/mlx5/mlx5_glue.c
@@ -1,12 +1,19 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#include <errno.h>
+#include <stdalign.h>
#include <stddef.h>
#include <stdint.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -17,6 +24,8 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_config.h>
+
#include "mlx5_autoconf.h"
#include "mlx5_glue.h"
@@ -287,6 +296,21 @@ mlx5_glue_dv_create_cq(struct ibv_context *context,
return mlx5dv_create_cq(context, cq_attr, mlx5_cq_attr);
}
+static struct ibv_wq *
+mlx5_glue_dv_create_wq(struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr)
+{
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ (void)context;
+ (void)wq_attr;
+ (void)mlx5_wq_attr;
+ return NULL;
+#else
+ return mlx5dv_create_wq(context, wq_attr, mlx5_wq_attr);
+#endif
+}
+
static int
mlx5_glue_dv_query_device(struct ibv_context *ctx,
struct mlx5dv_context *attrs_out)
@@ -307,6 +331,22 @@ mlx5_glue_dv_init_obj(struct mlx5dv_obj *obj, uint64_t obj_type)
return mlx5dv_init_obj(obj, obj_type);
}
+static struct ibv_qp *
+mlx5_glue_dv_create_qp(struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ return mlx5dv_create_qp(context, qp_init_attr_ex, dv_qp_init_attr);
+#else
+ (void)context;
+ (void)qp_init_attr_ex;
+ (void)dv_qp_init_attr;
+ return NULL;
+#endif
+}
+
+alignas(RTE_CACHE_LINE_SIZE)
const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.version = MLX5_GLUE_VERSION,
.fork_init = mlx5_glue_fork_init,
@@ -347,7 +387,9 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue){
.port_state_str = mlx5_glue_port_state_str,
.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
.dv_create_cq = mlx5_glue_dv_create_cq,
+ .dv_create_wq = mlx5_glue_dv_create_wq,
.dv_query_device = mlx5_glue_dv_query_device,
.dv_set_context_attr = mlx5_glue_dv_set_context_attr,
.dv_init_obj = mlx5_glue_dv_init_obj,
+ .dv_create_qp = mlx5_glue_dv_create_qp,
};
diff --git a/drivers/net/mlx5/mlx5_glue.h b/drivers/net/mlx5/mlx5_glue.h
index b5efee3b..e584d367 100644
--- a/drivers/net/mlx5/mlx5_glue.h
+++ b/drivers/net/mlx5/mlx5_glue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
#ifndef MLX5_GLUE_H_
@@ -31,6 +31,14 @@ struct ibv_counter_set_init_attr;
struct ibv_query_counter_set_attr;
#endif
+#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+struct mlx5dv_qp_init_attr;
+#endif
+
+#ifndef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+struct mlx5dv_wq_init_attr;
+#endif
+
/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
struct mlx5_glue {
const char *version;
@@ -100,12 +108,20 @@ struct mlx5_glue {
(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
struct mlx5dv_cq_init_attr *mlx5_cq_attr);
+ struct ibv_wq *(*dv_create_wq)
+ (struct ibv_context *context,
+ struct ibv_wq_init_attr *wq_attr,
+ struct mlx5dv_wq_init_attr *mlx5_wq_attr);
int (*dv_query_device)(struct ibv_context *ctx_in,
struct mlx5dv_context *attrs_out);
int (*dv_set_context_attr)(struct ibv_context *ibv_ctx,
enum mlx5dv_set_ctx_attr_type type,
void *attr);
int (*dv_init_obj)(struct mlx5dv_obj *obj, uint64_t obj_type);
+ struct ibv_qp *(*dv_create_qp)
+ (struct ibv_context *context,
+ struct ibv_qp_init_attr_ex *qp_init_attr_ex,
+ struct mlx5dv_qp_init_attr *dv_qp_init_attr);
};
const struct mlx5_glue *mlx5_glue;
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index e8a8d459..12ee37f5 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -35,44 +35,52 @@
/**
* Get MAC address by querying netdevice.
*
- * @param[in] priv
- * struct priv for the requested device.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param[out] mac
* MAC address output buffer.
*
* @return
- * 0 on success, -1 on failure and errno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
+mlx5_get_mac(struct rte_eth_dev *dev, uint8_t (*mac)[ETHER_ADDR_LEN])
{
struct ifreq request;
+ int ret;
- if (priv_ifreq(priv, SIOCGIFHWADDR, &request))
- return -1;
+ ret = mlx5_ifreq(dev, SIOCGIFHWADDR, &request, 0);
+ if (ret)
+ return ret;
memcpy(mac, request.ifr_hwaddr.sa_data, ETHER_ADDR_LEN);
return 0;
}
/**
- * DPDK callback to remove a MAC address.
+ * Remove a MAC address from the internal array.
*
* @param dev
* Pointer to Ethernet device structure.
* @param index
* MAC address index.
*/
-void
-mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+static void
+mlx5_internal_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
+
assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(&dev->data->mac_addrs[index]))
+ return;
+ if (vf)
+ mlx5_nl_mac_addr_remove(dev, &dev->data->mac_addrs[index],
+ index);
memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
- if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
}
/**
- * DPDK callback to add a MAC address.
+ * Adds a MAC address to the internal array.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -80,21 +88,23 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
* MAC address to register.
* @param index
* MAC address index.
- * @param vmdq
- * VMDq pool index to associate address with (ignored).
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
- uint32_t index, uint32_t vmdq)
+static int
+mlx5_internal_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
{
+ struct priv *priv = dev->data->dev_private;
+ const int vf = priv->config.vf;
unsigned int i;
- int ret = 0;
- (void)vmdq;
assert(index < MLX5_MAX_MAC_ADDRESSES);
+ if (is_zero_ether_addr(mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* First, make sure this address isn't already configured. */
for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
/* Skip this index, it's going to be reconfigured. */
@@ -103,12 +113,74 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
continue;
/* Address already configured elsewhere, return with error. */
- return EADDRINUSE;
+ rte_errno = EADDRINUSE;
+ return -rte_errno;
+ }
+ if (vf) {
+ int ret = mlx5_nl_mac_addr_add(dev, mac, index);
+
+ if (ret)
+ return ret;
}
dev->data->mac_addrs[index] = *mac;
+ return 0;
+}
+
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES)
+ return;
+ mlx5_internal_mac_addr_remove(dev, index);
+ if (!dev->data->promiscuous) {
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot restart traffic: %s",
+ dev->data->port_id, strerror(rte_errno));
+ }
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index, uint32_t vmdq __rte_unused)
+{
+ int ret;
+
+ if (index >= MLX5_MAX_UC_MAC_ADDRESSES) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_internal_mac_addr_add(dev, mac, index);
+ if (ret < 0)
+ return ret;
if (!dev->data->promiscuous)
- mlx5_traffic_restart(dev);
- return ret;
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
@@ -118,10 +190,43 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-void
+int
mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
- DEBUG("%p: setting primary MAC address", (void *)dev);
- mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+ DRV_LOG(DEBUG, "port %u setting primary MAC address",
+ dev->data->port_id);
+ return mlx5_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
+/**
+ * DPDK callback to set multicast addresses list.
+ *
+ * @see rte_eth_dev_set_mc_addr_list()
+ */
+int
+mlx5_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ uint32_t i;
+ int ret;
+
+ if (nb_mc_addr >= MLX5_MAX_MC_MAC_ADDRESSES) {
+ rte_errno = ENOSPC;
+ return -rte_errno;
+ }
+ for (i = MLX5_MAX_UC_MAC_ADDRESSES; i != MLX5_MAX_MAC_ADDRESSES; ++i)
+ mlx5_internal_mac_addr_remove(dev, i);
+ i = MLX5_MAX_UC_MAC_ADDRESSES;
+ while (nb_mc_addr--) {
+ ret = mlx5_internal_mac_addr_add(dev, mc_addr_set++, i++);
+ if (ret)
+ return ret;
+ }
+ if (!dev->data->promiscuous)
+ return mlx5_traffic_restart(dev);
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 857dfcd8..1d1bcb5f 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifdef PEDANTIC
@@ -13,362 +13,1174 @@
#include <rte_mempool.h>
#include <rte_malloc.h>
+#include <rte_rwlock.h>
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
#include "mlx5_glue.h"
-struct mlx5_check_mempool_data {
+struct mr_find_contig_memsegs_data {
+ uintptr_t addr;
+ uintptr_t start;
+ uintptr_t end;
+ const struct rte_memseg_list *msl;
+};
+
+struct mr_update_mp_data {
+ struct rte_eth_dev *dev;
+ struct mlx5_mr_ctrl *mr_ctrl;
int ret;
- char *start;
- char *end;
};
-/* Called by mlx5_check_mempool() when iterating the memory chunks. */
-static void
-mlx5_check_mempool_cb(struct rte_mempool *mp,
- void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx)
+/**
+ * Expand B-tree table to a given size. Can't be called with holding
+ * memory_hotplug_lock or priv->mr.rwlock due to rte_realloc().
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries for expansion.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_expand(struct mlx5_mr_btree *bt, int n)
+{
+ void *mem;
+ int ret = 0;
+
+ if (n <= bt->size)
+ return ret;
+ /*
+ * Downside of directly using rte_realloc() is that SOCKET_ID_ANY is
+ * used inside if there's no room to expand. Because this is a quite
+ * rare case and a part of very slow path, it is very acceptable.
+ * Initially cache_bh[] will be given practically enough space and once
+ * it is expanded, expansion wouldn't be needed again ever.
+ */
+ mem = rte_realloc(bt->table, n * sizeof(struct mlx5_mr_cache), 0);
+ if (mem == NULL) {
+ /* Not an error, B-tree search will be skipped. */
+ DRV_LOG(WARNING, "failed to expand MR B-tree (%p) table",
+ (void *)bt);
+ ret = -1;
+ } else {
+ DRV_LOG(DEBUG, "expanded MR B-tree table (size=%u)", n);
+ bt->table = mem;
+ bt->size = n;
+ }
+ return ret;
+}
+
+/**
+ * Look up LKey from given B-tree lookup table, store the last index and return
+ * searched LKey.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param[out] idx
+ * Pointer to index. Even on search failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mr_btree_lookup(struct mlx5_mr_btree *bt, uint16_t *idx, uintptr_t addr)
{
- struct mlx5_check_mempool_data *data = opaque;
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t n;
+ uint16_t base = 0;
- (void)mp;
- (void)mem_idx;
+ assert(bt != NULL);
+ lkp_tbl = *bt->table;
+ n = bt->len;
+ /* First entry must be NULL for comparison. */
+ assert(bt->len > 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
- return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
- return;
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
+}
+
+/**
+ * Insert an entry to B-tree lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param entry
+ * Pointer to new entry to insert.
+ *
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_btree_insert(struct mlx5_mr_btree *bt, struct mlx5_mr_cache *entry)
+{
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t idx = 0;
+ size_t shift;
+
+ assert(bt != NULL);
+ assert(bt->len <= bt->size);
+ assert(bt->len > 0);
+ lkp_tbl = *bt->table;
+ /* Find out the slot for insertion. */
+ if (mr_btree_lookup(bt, &idx, entry->start) != UINT32_MAX) {
+ DRV_LOG(DEBUG,
+ "abort insertion to B-tree(%p): already exist at"
+ " idx=%u [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ /* Already exist, return. */
+ return 0;
}
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
+ /* If table is full, return error. */
+ if (unlikely(bt->len == bt->size)) {
+ bt->overflow = 1;
+ return -1;
+ }
+ /* Insert entry. */
+ ++idx;
+ shift = (bt->len - idx) * sizeof(struct mlx5_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ bt->len++;
+ DRV_LOG(DEBUG,
+ "inserted B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
+ return 0;
+}
+
+/**
+ * Initialize B-tree and allocate memory for lookup table.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ * @param n
+ * Number of entries to allocate.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
+{
+ if (bt == NULL) {
+ rte_errno = EINVAL;
+ return -rte_errno;
}
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
+ assert(!bt->table && !bt->size);
+ memset(bt, 0, sizeof(*bt));
+ bt->table = rte_calloc_socket("B-tree table",
+ n, sizeof(struct mlx5_mr_cache),
+ 0, socket);
+ if (bt->table == NULL) {
+ rte_errno = ENOMEM;
+ DEBUG("failed to allocate memory for btree cache on socket %d",
+ socket);
+ return -rte_errno;
+ }
+ bt->size = n;
+ /* First entry must be NULL for binary search. */
+ (*bt->table)[bt->len++] = (struct mlx5_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ DEBUG("initialized B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ return 0;
+}
+
+/**
+ * Free B-tree resources.
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_free(struct mlx5_mr_btree *bt)
+{
+ if (bt == NULL)
return;
+ DEBUG("freeing B-tree %p with table %p",
+ (void *)bt, (void *)bt->table);
+ rte_free(bt->table);
+ memset(bt, 0, sizeof(*bt));
+}
+
+/**
+ * Dump all the entries in a B-tree
+ *
+ * @param bt
+ * Pointer to B-tree structure.
+ */
+void
+mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused)
+{
+#ifndef NDEBUG
+ int idx;
+ struct mlx5_mr_cache *lkp_tbl;
+
+ if (bt == NULL)
+ return;
+ lkp_tbl = *bt->table;
+ for (idx = 0; idx < bt->len; ++idx) {
+ struct mlx5_mr_cache *entry = &lkp_tbl[idx];
+
+ DEBUG("B-tree(%p)[%u],"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ") lkey=0x%x",
+ (void *)bt, idx, entry->start, entry->end, entry->lkey);
}
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
+#endif
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Find virtually contiguous memory chunk in a given MR.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area
+ * @param dev
+ * Pointer to MR structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If not found, this will not be
+ * updated.
+ * @param start_idx
+ * Start index of the memseg bitmap.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * Next index to go on lookup.
*/
-static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
+static int
+mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
+ int base_idx)
{
- struct mlx5_check_mempool_data data;
+ uintptr_t start = 0;
+ uintptr_t end = 0;
+ uint32_t idx = 0;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
+ for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
+ if (rte_bitmap_get(mr->ms_bmp, idx)) {
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
- return data.ret;
+ msl = mr->msl;
+ ms = rte_fbarray_get(&msl->memseg_arr,
+ mr->ms_base_idx + idx);
+ assert(msl->page_sz == ms->hugepage_sz);
+ if (!start)
+ start = ms->addr_64;
+ end = ms->addr_64 + ms->hugepage_sz;
+ } else if (start) {
+ /* Passed the end of a fragment. */
+ break;
+ }
+ }
+ if (start) {
+ /* Found one chunk. */
+ entry->start = start;
+ entry->end = end;
+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+ }
+ return idx;
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ * Insert a MR to the global B-tree cache. It may fail due to low-on-memory.
+ * Then, this entry will have to be searched by mr_lookup_dev_list() in
+ * mlx5_mr_create() on miss.
*
- * This function should only be called by txq_mp2mr().
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr
+ * Pointer to MR to insert.
*
- * @param priv
- * Pointer to private structure.
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * @return
+ * 0 on success, -1 on failure.
+ */
+static int
+mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx5_mr *mr)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int n;
+
+ DRV_LOG(DEBUG, "port %u inserting MR(%p) to global cache",
+ dev->data->port_id, (void *)mr);
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache entry = { 0, };
+
+ /* Find a contiguous chunk and advance the index. */
+ n = mr_find_next_chunk(mr, &entry, n);
+ if (!entry.end)
+ break;
+ if (mr_btree_insert(&priv->mr.cache, &entry) < 0) {
+ /*
+ * Overflowed, but the global table cannot be expanded
+ * because of deadlock.
+ */
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Look up address in the original global MR list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure.
+ * Found MR on match, NULL otherwise.
*/
-struct mlx5_mr*
-priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *txq,
- struct rte_mempool *mp, unsigned int idx)
+static struct mlx5_mr *
+mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- /* Add a new entry, register MR first. */
- DEBUG("%p: discovered new memory pool \"%s\" (%p)",
- (void *)txq_ctrl, mp->name, (void *)mp);
- mr = priv_mr_get(priv, mp);
- if (mr == NULL) {
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DEBUG("Using unregistered mempool 0x%p(%s) in secondary process,"
- " please create mempool before rte_eth_dev_start()",
- (void *)mp, mp->name);
- return NULL;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (addr >= ret.start && addr < ret.end) {
+ /* Found. */
+ *entry = ret;
+ return mr;
+ }
}
- mr = priv_mr_new(priv, mp);
- }
- if (unlikely(mr == NULL)) {
- DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.",
- (void *)txq_ctrl);
- return NULL;
- }
- if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DEBUG("%p: MR <-> MP table full, dropping oldest entry.",
- (void *)txq_ctrl);
- --idx;
- priv_mr_release(priv, txq->mp2mr[0]);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
}
- /* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx] = mr;
- DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32,
- (void *)txq_ctrl, mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx]->lkey);
- return mr;
+ return NULL;
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
- *
- * This function should only be called by txq_mp2mr().
+ * Look up address on device.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry. If no match, this will not be updated.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure.
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-struct mlx5_mr*
-mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
- unsigned int idx)
+static uint32_t
+mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t lkey = UINT32_MAX;
struct mlx5_mr *mr;
- priv_lock(txq_ctrl->priv);
- mr = priv_txq_mp2mr_reg(txq_ctrl->priv, txq, mp, idx);
- priv_unlock(txq_ctrl->priv);
- return mr;
+ /*
+ * If the global cache has overflowed since it failed to expand the
+ * B-tree table, it can't have all the existing MRs. Then, the address
+ * has to be searched by traversing the original MR list instead, which
+ * is very slow path. Otherwise, the global cache is all inclusive.
+ */
+ if (!unlikely(priv->mr.cache.overflow)) {
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX)
+ *entry = (*priv->mr.cache.table)[idx];
+ } else {
+ /* Falling back to the slowest path. */
+ mr = mr_lookup_dev_list(dev, entry, addr);
+ if (mr != NULL)
+ lkey = entry->lkey;
+ }
+ assert(lkey == UINT32_MAX || (addr >= entry->start &&
+ addr < entry->end));
+ return lkey;
}
-struct mlx5_mp2mr_mbuf_check_data {
- int ret;
-};
+/**
+ * Free MR resources. MR lock must not be held to avoid a deadlock. rte_free()
+ * can raise memory free event and the callback function will spin on the lock.
+ *
+ * @param mr
+ * Pointer to MR to free.
+ */
+static void
+mr_free(struct mlx5_mr *mr)
+{
+ if (mr == NULL)
+ return;
+ DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
+ if (mr->ibv_mr != NULL)
+ claim_zero(mlx5_glue->dereg_mr(mr->ibv_mr));
+ if (mr->ms_bmp != NULL)
+ rte_bitmap_free(mr->ms_bmp);
+ rte_free(mr);
+}
/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
- *
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
+ * Releass resources of detached MR having no online entry.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
*/
static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
+mlx5_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct mlx5_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next;
+ struct mlx5_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
+ /* Must be called from the primary process. */
+ assert(rte_eal_process_type() == RTE_PROC_PRIMARY);
/*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
+ * MR can't be freed with holding the lock because rte_free() could call
+ * memory free callback function. This will be a deadlock situation.
*/
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach the whole free list and release it after unlocking. */
+ free_list = priv->mr.mr_free_list;
+ LIST_INIT(&priv->mr.mr_free_list);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Release resources. */
+ mr_next = LIST_FIRST(&free_list);
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ mr_free(mr);
+ }
+}
+
+/* Called during rte_memseg_contig_walk() by mlx5_mr_create(). */
+static int
+mr_find_contig_memsegs_cb(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct mr_find_contig_memsegs_data *data = arg;
+
+ if (data->addr < ms->addr_64 || data->addr >= ms->addr_64 + len)
+ return 0;
+ /* Found, save it and stop walking. */
+ data->start = ms->addr_64;
+ data->end = ms->addr_64 + len;
+ data->msl = msl;
+ return 1;
}
/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
+ * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Register entire virtually contiguous memory chunk around the address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this will not be updated.
+ * @param addr
+ * Target virtual address to register.
*
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to TX queue structure.
+ * @return
+ * Searched LKey on success, UINT32_MAX on failure and rte_errno is set.
*/
-void
-mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
+static uint32_t
+mlx5_mr_create(struct rte_eth_dev *dev, struct mlx5_mr_cache *entry,
+ uintptr_t addr)
{
- struct priv *priv = (struct priv *)arg;
- struct mlx5_mp2mr_mbuf_check_data data = {
- .ret = 0,
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ const struct rte_memseg_list *msl;
+ const struct rte_memseg *ms;
+ struct mlx5_mr *mr = NULL;
+ size_t len;
+ uint32_t ms_n;
+ uint32_t bmp_size;
+ void *bmp_mem;
+ int ms_idx_shift = -1;
+ unsigned int n;
+ struct mr_find_contig_memsegs_data data = {
+ .addr = addr,
};
- struct mlx5_mr *mr;
+ struct mr_find_contig_memsegs_data data_re;
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- mr = priv_mr_get(priv, mp);
- if (mr) {
- priv_mr_release(priv, mr);
- return;
+ DRV_LOG(DEBUG, "port %u creating a MR using address (%p)",
+ dev->data->port_id, (void *)addr);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ DRV_LOG(WARNING,
+ "port %u using address (%p) of unregistered mempool"
+ " in secondary process, please create mempool"
+ " before rte_eth_dev_start()",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EPERM;
+ goto err_nolock;
+ }
+ /*
+ * Release detached MRs if any. This can't be called with holding either
+ * memory_hotplug_lock or priv->mr.rwlock. MRs on the free list have
+ * been detached by the memory free event but it couldn't be released
+ * inside the callback due to deadlock. As a result, releasing resources
+ * is quite opportunistic.
+ */
+ mlx5_mr_garbage_collect(dev);
+ /*
+ * Find out a contiguous virtual address chunk in use, to which the
+ * given address belongs, in order to register maximum range. In the
+ * best case where mempools are not dynamically recreated and
+ * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * have only one MR(LKey) per a socket and per a hugepage-size even
+ * though the system memory is highly fragmented.
+ */
+ if (!rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data)) {
+ DRV_LOG(WARNING,
+ "port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_nolock;
+ }
+alloc_resources:
+ /* Addresses must be page-aligned. */
+ assert(rte_is_aligned((void *)data.start, data.msl->page_sz));
+ assert(rte_is_aligned((void *)data.end, data.msl->page_sz));
+ msl = data.msl;
+ ms = rte_mem_virt2memseg((void *)data.start, msl);
+ len = data.end - data.start;
+ assert(msl->page_sz == ms->hugepage_sz);
+ /* Number of memsegs in the range. */
+ ms_n = len / msl->page_sz;
+ DEBUG("port %u extending %p to [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " page_sz=0x%" PRIx64 ", ms_n=%u",
+ dev->data->port_id, (void *)addr,
+ data.start, data.end, msl->page_sz, ms_n);
+ /* Size of memory for bitmap. */
+ bmp_size = rte_bitmap_get_memory_footprint(ms_n);
+ mr = rte_zmalloc_socket(NULL,
+ RTE_ALIGN_CEIL(sizeof(*mr),
+ RTE_CACHE_LINE_SIZE) +
+ bmp_size,
+ RTE_CACHE_LINE_SIZE, msl->socket_id);
+ if (mr == NULL) {
+ DEBUG("port %u unable to allocate memory for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENOMEM;
+ goto err_nolock;
+ }
+ mr->msl = msl;
+ /*
+ * Save the index of the first memseg and initialize memseg bitmap. To
+ * see if a memseg of ms_idx in the memseg-list is still valid, check:
+ * rte_bitmap_get(mr->bmp, ms_idx - mr->ms_base_idx)
+ */
+ mr->ms_base_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
+ mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
+ if (mr->ms_bmp == NULL) {
+ DEBUG("port %u unable to initialize bitamp for a new MR of"
+ " address (%p).",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_nolock;
+ }
+ /*
+ * Should recheck whether the extended contiguous chunk is still valid.
+ * Because memory_hotplug_lock can't be held if there's any memory
+ * related calls in a critical path, resource allocation above can't be
+ * locked. If the memory has been changed at this point, try again with
+ * just single page. If not, go on with the big chunk atomically from
+ * here.
+ */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ data_re = data;
+ if (len > msl->page_sz &&
+ !rte_memseg_contig_walk(mr_find_contig_memsegs_cb, &data_re)) {
+ DEBUG("port %u unable to find virtually contiguous"
+ " chunk for address (%p)."
+ " rte_memseg_contig_walk() failed.",
+ dev->data->port_id, (void *)addr);
+ rte_errno = ENXIO;
+ goto err_memlock;
+ }
+ if (data.start != data_re.start || data.end != data_re.end) {
+ /*
+ * The extended contiguous chunk has been changed. Try again
+ * with single memseg instead.
+ */
+ data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
+ data.end = data.start + msl->page_sz;
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ mr_free(mr);
+ goto alloc_resources;
+ }
+ assert(data.msl == data_re.msl);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /*
+ * Check the address is really missing. If other thread already created
+ * one or it is not found due to overflow, abort and return.
+ */
+ if (mr_lookup_dev(dev, entry, addr) != UINT32_MAX) {
+ /*
+ * Insert to the global cache table. It may fail due to
+ * low-on-memory. Then, this entry will have to be searched
+ * here again.
+ */
+ mr_btree_insert(&priv->mr.cache, entry);
+ DEBUG("port %u found MR for %p on final lookup, abort",
+ dev->data->port_id, (void *)addr);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ /*
+ * Must be unlocked before calling rte_free() because
+ * mlx5_mr_mem_event_free_cb() can be called inside.
+ */
+ mr_free(mr);
+ return entry->lkey;
}
- priv_mr_new(priv, mp);
+ /*
+ * Trim start and end addresses for verbs MR. Set bits for registering
+ * memsegs but exclude already registered ones. Bitmap can be
+ * fragmented.
+ */
+ for (n = 0; n < ms_n; ++n) {
+ uintptr_t start;
+ struct mlx5_mr_cache ret = { 0, };
+
+ start = data_re.start + n * msl->page_sz;
+ /* Exclude memsegs already registered by other MRs. */
+ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
+ /*
+ * Start from the first unregistered memseg in the
+ * extended range.
+ */
+ if (ms_idx_shift == -1) {
+ mr->ms_base_idx += n;
+ data.start = start;
+ ms_idx_shift = n;
+ }
+ data.end = start + msl->page_sz;
+ rte_bitmap_set(mr->ms_bmp, n - ms_idx_shift);
+ ++mr->ms_n;
+ }
+ }
+ len = data.end - data.start;
+ mr->ms_bmp_n = len / msl->page_sz;
+ assert(ms_idx_shift + mr->ms_bmp_n <= ms_n);
+ /*
+ * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
+ * called with holding the memory lock because it doesn't use
+ * mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
+ * through mlx5_alloc_verbs_buf().
+ */
+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)data.start, len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ DEBUG("port %u fail to create a verbs MR for address (%p)",
+ dev->data->port_id, (void *)addr);
+ rte_errno = EINVAL;
+ goto err_mrlock;
+ }
+ assert((uintptr_t)mr->ibv_mr->addr == data.start);
+ assert(mr->ibv_mr->length == len);
+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+ DEBUG("port %u MR CREATED (%p) for %p:\n"
+ " [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+ dev->data->port_id, (void *)mr, (void *)addr,
+ data.start, data.end, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+ /* Insert to the global cache table. */
+ mr_insert_dev_cache(dev, mr);
+ /* Fill in output data. */
+ mr_lookup_dev(dev, entry, addr);
+ /* Lookup can't fail. */
+ assert(entry->lkey != UINT32_MAX);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return entry->lkey;
+err_mrlock:
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+err_memlock:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+err_nolock:
+ /*
+ * In case of error, as this can be called in a datapath, a warning
+ * message per an error is preferable instead. Must be unlocked before
+ * calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
+ * inside.
+ */
+ mr_free(mr);
+ return UINT32_MAX;
}
/**
- * Register a new memory region from the mempool and store it in the memory
- * region list.
+ * Rebuild the global B-tree cache of device from the original MR list.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to the memory pool to register.
- * @return
- * The memory region on success.
+ * @param dev
+ * Pointer to Ethernet device.
*/
-struct mlx5_mr*
-priv_mr_new(struct priv *priv, struct rte_mempool *mp)
+static void
+mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
- mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
- if (!mr) {
- DEBUG("unable to configure MR, ibv_reg_mr() failed.");
- return NULL;
+ DRV_LOG(DEBUG, "port %u rebuild dev cache[]", dev->data->port_id);
+ /* Flush cache to rebuild. */
+ priv->mr.cache.len = 1;
+ priv->mr.cache.overflow = 0;
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr)
+ if (mr_insert_dev_cache(dev, mr) < 0)
+ return;
+}
+
+/**
+ * Callback for memory free event. Iterate freed memsegs and check whether it
+ * belongs to an existing MR. If found, clear the bit from bitmap of MR. As a
+ * result, the MR would be fragmented. If it becomes empty, the MR will be freed
+ * later by mlx5_mr_garbage_collect(). Even if this callback is called from a
+ * secondary process, the garbage collector will be called in primary process
+ * as the secondary process can't call mlx5_mr_create().
+ *
+ * The global cache must be rebuilt if there's any change and this event has to
+ * be propagated to dataplane threads to flush the local caches.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param addr
+ * Address of freed memory.
+ * @param len
+ * Size of freed memory.
+ */
+static void
+mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg_list *msl;
+ struct mlx5_mr *mr;
+ int ms_n;
+ int i;
+ int rebuild = 0;
+
+ DEBUG("port %u free callback: addr=%p, len=%zu",
+ dev->data->port_id, addr, len);
+ msl = rte_mem_virt2memseg_list(addr);
+ /* addr and len must be page-aligned. */
+ assert((uintptr_t)addr == RTE_ALIGN((uintptr_t)addr, msl->page_sz));
+ assert(len == RTE_ALIGN(len, msl->page_sz));
+ ms_n = len / msl->page_sz;
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Clear bits of freed memsegs from MR. */
+ for (i = 0; i < ms_n; ++i) {
+ const struct rte_memseg *ms;
+ struct mlx5_mr_cache entry;
+ uintptr_t start;
+ int ms_idx;
+ uint32_t pos;
+
+ /* Find MR having this memseg. */
+ start = (uintptr_t)addr + i * msl->page_sz;
+ mr = mr_lookup_dev_list(dev, &entry, start);
+ if (mr == NULL)
+ continue;
+ ms = rte_mem_virt2memseg((void *)start, msl);
+ assert(ms != NULL);
+ assert(msl->page_sz == ms->hugepage_sz);
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ pos = ms_idx - mr->ms_base_idx;
+ assert(rte_bitmap_get(mr->ms_bmp, pos));
+ assert(pos < mr->ms_bmp_n);
+ DEBUG("port %u MR(%p): clear bitmap[%u] for addr %p",
+ dev->data->port_id, (void *)mr, pos, (void *)start);
+ rte_bitmap_clear(mr->ms_bmp, pos);
+ if (--mr->ms_n == 0) {
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ DEBUG("port %u remove MR(%p) from list",
+ dev->data->port_id, (void *)mr);
+ }
+ /*
+ * MR is fragmented or will be freed. the global cache must be
+ * rebuilt.
+ */
+ rebuild = 1;
}
- if (mlx5_check_mempool(mp, &start, &end) != 0) {
- ERROR("mempool %p: not virtually contiguous",
- (void *)mp);
- return NULL;
+ if (rebuild) {
+ mr_rebuild_dev_cache(dev);
+ /*
+ * Flush local caches by propagating invalidation across cores.
+ * rte_smp_wmb() is enough to synchronize this event. If one of
+ * freed memsegs is seen by other core, that means the memseg
+ * has been allocated by allocator, which will come after this
+ * free call. Therefore, this store instruction (incrementing
+ * generation below) will be guaranteed to be seen by other core
+ * before the core sees the newly allocated memory.
+ */
+ ++priv->mr.dev_gen;
+ DEBUG("broadcasting local cache flush, gen=%d",
+ priv->mr.dev_gen);
+ rte_smp_wmb();
}
- DEBUG("mempool %p area start=%p end=%p size=%zu",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Save original addresses for exact MR lookup. */
- mr->start = start;
- mr->end = end;
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
-
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+}
+
+/**
+ * Callback for memory event. This can be called from both primary and secondary
+ * process.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct priv *priv;
+ struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
+
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ /* Iterate all the existing mlx5 devices. */
+ LIST_FOREACH(priv, dev_list, mem_event_cb)
+ mlx5_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
}
- DEBUG("mempool %p using start=%p end=%p size=%zu for MR",
- (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- mr->mr = mlx5_glue->reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE);
- mr->mp = mp;
- mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("%p: new Memory Region %p refcnt: %d", (void *)priv,
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- return mr;
}
/**
- * Search the memory region object in the memory region list.
+ * Look up address in the global MR cache table. If not found, create a new MR.
+ * Insert the found/created entry to local bottom-half cache table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param[out] entry
+ * Pointer to returning MR cache entry, found in the global cache or newly
+ * created. If failed to create one, this is not written.
+ * @param addr
+ * Search key.
*
- * @param priv
- * Pointer to private structure.
- * @param mp
- * Pointer to the memory pool to register.
* @return
- * The memory region on success.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-struct mlx5_mr*
-priv_mr_get(struct priv *priv, struct rte_mempool *mp)
+static uint32_t
+mlx5_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct mlx5_mr_cache *entry, uintptr_t addr)
{
- struct mlx5_mr *mr;
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr_btree *bt = &mr_ctrl->cache_bh;
+ uint16_t idx;
+ uint32_t lkey;
- assert(mp);
- if (LIST_EMPTY(&priv->mr))
- return NULL;
- LIST_FOREACH(mr, &priv->mr, next) {
- if (mr->mp == mp) {
- rte_atomic32_inc(&mr->refcnt);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- return mr;
- }
+ /* If local cache table is full, try to double it. */
+ if (unlikely(bt->len == bt->size))
+ mr_btree_expand(bt, bt->size << 1);
+ /* Look up in the global cache. */
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ lkey = mr_btree_lookup(&priv->mr.cache, &idx, addr);
+ if (lkey != UINT32_MAX) {
+ /* Found. */
+ *entry = (*priv->mr.cache.table)[idx];
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /*
+ * Update local cache. Even if it fails, return the found entry
+ * to update top-half cache. Next time, this entry will be found
+ * in the global cache.
+ */
+ mr_btree_insert(bt, entry);
+ return lkey;
}
- return NULL;
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+ /* First time to see the address? Create a new MR. */
+ lkey = mlx5_mr_create(dev, entry, addr);
+ /*
+ * Update the local cache if successfully created a new global MR. Even
+ * if failed to create one, there's no action to take in this datapath
+ * code. As returning LKey is invalid, this will eventually make HW
+ * fail.
+ */
+ if (lkey != UINT32_MAX)
+ mr_btree_insert(bt, entry);
+ return lkey;
}
/**
- * Release the memory region object.
+ * Bottom-half of LKey search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
*
- * @param mr
- * Pointer to memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
*
* @return
- * 0 on success, errno on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-int
-priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
+static uint32_t
+mlx5_mr_addr2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
{
- (void)priv;
- assert(mr);
- DEBUG("Memory Region %p refcnt: %d",
- (void *)mr, rte_atomic32_read(&mr->refcnt));
- if (rte_atomic32_dec_and_test(&mr->refcnt)) {
- claim_zero(mlx5_glue->dereg_mr(mr->mr));
- LIST_REMOVE(mr, next);
- rte_free(mr);
- return 0;
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ /* Victim in top-half cache to replace with new entry. */
+ struct mlx5_mr_cache *repl = &mr_ctrl->cache[mr_ctrl->head];
+
+ /* Binary-search MR translation table. */
+ lkey = mr_btree_lookup(&mr_ctrl->cache_bh, &bh_idx, addr);
+ /* Update top-half cache. */
+ if (likely(lkey != UINT32_MAX)) {
+ *repl = (*mr_ctrl->cache_bh.table)[bh_idx];
+ } else {
+ /*
+ * If missed in local lookup table, search in the global cache
+ * and local cache_bh[] will be updated inside if possible.
+ * Top-half cache entry will also be updated.
+ */
+ lkey = mlx5_mr_lookup_dev(dev, mr_ctrl, repl, addr);
+ if (unlikely(lkey == UINT32_MAX))
+ return UINT32_MAX;
}
- return EBUSY;
+ /* Update the most recently used entry. */
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
}
/**
- * Verify the flow list is empty
+ * Bottom-half of LKey search on Rx.
*
- * @param priv
- * Pointer to private structure.
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
*
- * @return the number of object not released.
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ struct priv *priv = rxq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ rxq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
+{
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ struct priv *priv = txq_ctrl->priv;
+
+ DRV_LOG(DEBUG,
+ "Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
+ txq_ctrl->idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
+ return mlx5_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Flush all of the local cache entries.
+ *
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ */
+void
+mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
+{
+ /* Reset the most-recently-used index. */
+ mr_ctrl->mru = 0;
+ /* Reset the linear search array. */
+ mr_ctrl->head = 0;
+ memset(mr_ctrl->cache, 0, sizeof(mr_ctrl->cache));
+ /* Reset the B-tree table. */
+ mr_ctrl->cache_bh.len = 1;
+ mr_ctrl->cache_bh.overflow = 0;
+ /* Update the generation number. */
+ mr_ctrl->cur_gen = *mr_ctrl->dev_gen_ptr;
+ DRV_LOG(DEBUG, "mr_ctrl(%p): flushed, cur_gen=%d",
+ (void *)mr_ctrl, mr_ctrl->cur_gen);
+}
+
+/* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
+static void
+mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned mem_idx __rte_unused)
+{
+ struct mr_update_mp_data *data = opaque;
+ uint32_t lkey;
+
+ /* Stop iteration if failed in the previous walk. */
+ if (data->ret < 0)
+ return;
+ /* Register address of the chunk and update local caches. */
+ lkey = mlx5_mr_addr2mr_bh(data->dev, data->mr_ctrl,
+ (uintptr_t)memhdr->addr);
+ if (lkey == UINT32_MAX)
+ data->ret = -1;
+}
+
+/**
+ * Register entire memory chunks in a Mempool.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param mp
+ * Pointer to registering Mempool.
+ *
+ * @return
+ * 0 on success, -1 on failure.
*/
int
-priv_mr_verify(struct priv *priv)
+mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp)
{
- int ret = 0;
+ struct mr_update_mp_data data = {
+ .dev = dev,
+ .mr_ctrl = mr_ctrl,
+ .ret = 0,
+ };
+
+ rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+ return data.ret;
+}
+
+/**
+ * Dump all the created MRs and the global cache entries.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_dump_dev(struct rte_eth_dev *dev __rte_unused)
+{
+#ifndef NDEBUG
+ struct priv *priv = dev->data->dev_private;
struct mlx5_mr *mr;
+ int mr_n = 0;
+ int chunk_n = 0;
- LIST_FOREACH(mr, &priv->mr, next) {
- DEBUG("%p: mr %p still referenced", (void *)priv,
- (void *)mr);
- ++ret;
+ rte_rwlock_read_lock(&priv->mr.rwlock);
+ /* Iterate all the existing MRs. */
+ LIST_FOREACH(mr, &priv->mr.mr_list, mr) {
+ unsigned int n;
+
+ DEBUG("port %u MR[%u], LKey = 0x%x, ms_n = %u, ms_bmp_n = %u",
+ dev->data->port_id, mr_n++,
+ rte_cpu_to_be_32(mr->ibv_mr->lkey),
+ mr->ms_n, mr->ms_bmp_n);
+ if (mr->ms_n == 0)
+ continue;
+ for (n = 0; n < mr->ms_bmp_n; ) {
+ struct mlx5_mr_cache ret = { 0, };
+
+ n = mr_find_next_chunk(mr, &ret, n);
+ if (!ret.end)
+ break;
+ DEBUG(" chunk[%u], [0x%" PRIxPTR ", 0x%" PRIxPTR ")",
+ chunk_n++, ret.start, ret.end);
+ }
}
- return ret;
+ DEBUG("port %u dumping global cache", dev->data->port_id);
+ mlx5_mr_btree_dump(&priv->mr.cache);
+ rte_rwlock_read_unlock(&priv->mr.rwlock);
+#endif
+}
+
+/**
+ * Release all the created MRs and resources. Remove device from memory callback
+ * list.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_mr_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+
+ /* Remove from memory callback device list. */
+ rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
+ LIST_REMOVE(priv, mem_event_cb);
+ rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
+ if (rte_log_get_level(mlx5_logtype) == RTE_LOG_DEBUG)
+ mlx5_mr_dump_dev(dev);
+ rte_rwlock_write_lock(&priv->mr.rwlock);
+ /* Detach from MR list and move to free list. */
+ while (mr_next != NULL) {
+ struct mlx5_mr *mr = mr_next;
+
+ mr_next = LIST_NEXT(mr, mr);
+ LIST_REMOVE(mr, mr);
+ LIST_INSERT_HEAD(&priv->mr.mr_free_list, mr, mr);
+ }
+ LIST_INIT(&priv->mr.mr_list);
+ /* Free global cache. */
+ mlx5_mr_btree_free(&priv->mr.cache);
+ rte_rwlock_write_unlock(&priv->mr.rwlock);
+ /* Free all remaining MRs. */
+ mlx5_mr_garbage_collect(dev);
}
diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h
new file mode 100644
index 00000000..a57003fe
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_mr.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#ifndef RTE_PMD_MLX5_MR_H_
+#define RTE_PMD_MLX5_MR_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#include <infiniband/mlx5dv.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_eal_memconfig.h>
+#include <rte_ethdev.h>
+#include <rte_rwlock.h>
+#include <rte_bitmap.h>
+
+/* Memory Region object. */
+struct mlx5_mr {
+ LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
+ const struct rte_memseg_list *msl;
+ int ms_base_idx; /* Start index of msl->memseg_arr[]. */
+ int ms_n; /* Number of memsegs in use. */
+ uint32_t ms_bmp_n; /* Number of bits in memsegs bit-mask. */
+ struct rte_bitmap *ms_bmp; /* Bit-mask of memsegs belonged to MR. */
+};
+
+/* Cache entry for Memory Region. */
+struct mlx5_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* MR Cache table for Binary search. */
+struct mlx5_mr_btree {
+ uint16_t len; /* Number of entries. */
+ uint16_t size; /* Total number of entries. */
+ int overflow; /* Mark failure of table expansion. */
+ struct mlx5_mr_cache (*table)[];
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx5_mr_ctrl {
+ uint32_t *dev_gen_ptr; /* Generation number of device to poll. */
+ uint32_t cur_gen; /* Generation number saved to flush caches. */
+ uint16_t mru; /* Index of last hit entry in top-half cache. */
+ uint16_t head; /* Index of the oldest entry in top-half cache. */
+ struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* Cache for top-half. */
+ struct mlx5_mr_btree cache_bh; /* Cache for bottom-half. */
+} __rte_packed;
+
+extern struct mlx5_dev_list mlx5_mem_event_cb_list;
+extern rte_rwlock_t mlx5_mem_event_rwlock;
+
+/* First entry must be NULL for comparison. */
+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
+
+int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
+void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
+void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg);
+int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ struct rte_mempool *mp);
+void mlx5_mr_release(struct rte_eth_dev *dev);
+
+/* Debug purpose functions. */
+void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt);
+void mlx5_mr_dump_dev(struct rte_eth_dev *dev);
+
+/**
+ * Look up LKey from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKey.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+static __rte_always_inline uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
+{
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
+}
+
+#endif /* RTE_PMD_MLX5_MR_H_ */
diff --git a/drivers/net/mlx5/mlx5_nl.c b/drivers/net/mlx5/mlx5_nl.c
new file mode 100644
index 00000000..d61826ae
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_nl.c
@@ -0,0 +1,916 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <errno.h>
+#include <linux/if_link.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <net/if.h>
+#include <rdma/rdma_netlink.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+
+#include "mlx5.h"
+#include "mlx5_utils.h"
+
+/* Size of the buffer to receive kernel messages */
+#define MLX5_NL_BUF_SIZE (32 * 1024)
+/* Send buffer size for the Netlink socket */
+#define MLX5_SEND_BUF_SIZE 32768
+/* Receive buffer size for the Netlink socket */
+#define MLX5_RECV_BUF_SIZE 32768
+
+/*
+ * Define NDA_RTA as defined in iproute2 sources.
+ *
+ * see in iproute2 sources file include/libnetlink.h
+ */
+#ifndef MLX5_NDA_RTA
+#define MLX5_NDA_RTA(r) \
+ ((struct rtattr *)(((char *)(r)) + NLMSG_ALIGN(sizeof(struct ndmsg))))
+#endif
+
+/*
+ * The following definitions are normally found in rdma/rdma_netlink.h,
+ * however they are so recent that most systems do not expose them yet.
+ */
+#ifndef HAVE_RDMA_NL_NLDEV
+#define RDMA_NL_NLDEV 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_GET
+#define RDMA_NLDEV_CMD_GET 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_CMD_PORT_GET
+#define RDMA_NLDEV_CMD_PORT_GET 5
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_INDEX
+#define RDMA_NLDEV_ATTR_DEV_INDEX 1
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_DEV_NAME
+#define RDMA_NLDEV_ATTR_DEV_NAME 2
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_PORT_INDEX
+#define RDMA_NLDEV_ATTR_PORT_INDEX 3
+#endif
+#ifndef HAVE_RDMA_NLDEV_ATTR_NDEV_INDEX
+#define RDMA_NLDEV_ATTR_NDEV_INDEX 50
+#endif
+
+/* These are normally found in linux/if_link.h. */
+#ifndef HAVE_IFLA_PHYS_SWITCH_ID
+#define IFLA_PHYS_SWITCH_ID 36
+#endif
+#ifndef HAVE_IFLA_PHYS_PORT_NAME
+#define IFLA_PHYS_PORT_NAME 38
+#endif
+
+/* Add/remove MAC address through Netlink */
+struct mlx5_nl_mac_addr {
+ struct ether_addr (*mac)[];
+ /**< MAC address handled by the device. */
+ int mac_n; /**< Number of addresses in the array. */
+};
+
+/** Data structure used by mlx5_nl_ifindex_cb(). */
+struct mlx5_nl_ifindex_data {
+ const char *name; /**< IB device name (in). */
+ uint32_t ibindex; /**< IB device index (out). */
+ uint32_t ifindex; /**< Network interface index (out). */
+};
+
+/**
+ * Opens a Netlink socket.
+ *
+ * @param protocol
+ * Netlink protocol (e.g. NETLINK_ROUTE, NETLINK_RDMA).
+ *
+ * @return
+ * A file descriptor on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+int
+mlx5_nl_init(int protocol)
+{
+ int fd;
+ int sndbuf_size = MLX5_SEND_BUF_SIZE;
+ int rcvbuf_size = MLX5_RECV_BUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ };
+ int ret;
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol);
+ if (fd == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ ret = bind(fd, (struct sockaddr *)&local, sizeof(local));
+ if (ret == -1) {
+ rte_errno = errno;
+ goto error;
+ }
+ return fd;
+error:
+ close(fd);
+ return -rte_errno;
+}
+
+/**
+ * Send a request message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * Netlink socket file descriptor.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] ssn
+ * Sequence number.
+ * @param[in] req
+ * Pointer to the request structure.
+ * @param[in] len
+ * Length of the request in bytes.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_request(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn, void *req,
+ int len)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov[2] = {
+ { .iov_base = nh, .iov_len = sizeof(*nh), },
+ { .iov_base = req, .iov_len = len, },
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = iov,
+ .msg_iovlen = 2,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Send a message to the kernel on the Netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The Netlink message send to the kernel.
+ * @param[in] sn
+ * Sequence number.
+ *
+ * @return
+ * The number of sent bytes on success, a negative errno value otherwise and
+ * rte_errno is set.
+ */
+static int
+mlx5_nl_send(int nlsk_fd, struct nlmsghdr *nh, uint32_t sn)
+{
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = sn;
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ return send_bytes;
+}
+
+/**
+ * Receive a message from the kernel on the Netlink socket, following
+ * mlx5_nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The Netlink socket file descriptor used for communication.
+ * @param[in] sn
+ * Sequence number.
+ * @param[in] cb
+ * The callback function to call for each Netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_recv(int nlsk_fd, uint32_t sn, int (*cb)(struct nlmsghdr *, void *arg),
+ void *arg)
+{
+ struct sockaddr_nl sa;
+ char buf[MLX5_RECV_BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ do {
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes == -1) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ nh = (struct nlmsghdr *)buf;
+ } while (nh->nlmsg_seq != sn);
+ for (;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ rte_errno = -err_data->error;
+ return -rte_errno;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb) {
+ ret = cb(nh, arg);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Parse Netlink message to retrieve the bridge MAC address.
+ *
+ * @param nh
+ * Pointer to Netlink Message Header.
+ * @param arg
+ * PMD data register with this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_mac_addr *data = arg;
+ struct ndmsg *r = NLMSG_DATA(nh);
+ struct rtattr *attribute;
+ int len;
+
+ len = nh->nlmsg_len - NLMSG_LENGTH(sizeof(*r));
+ for (attribute = MLX5_NDA_RTA(r);
+ RTA_OK(attribute, len);
+ attribute = RTA_NEXT(attribute, len)) {
+ if (attribute->rta_type == NDA_LLADDR) {
+ if (data->mac_n == MLX5_MAX_MAC_ADDRESSES) {
+ DRV_LOG(WARNING,
+ "not enough room to finalize the"
+ " request");
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+#ifndef NDEBUG
+ char m[18];
+
+ ether_format_addr(m, 18, RTA_DATA(attribute));
+ DRV_LOG(DEBUG, "bridge MAC address %s", m);
+#endif
+ memcpy(&(*data->mac)[data->mac_n++],
+ RTA_DATA(attribute), ETHER_ADDR_LEN);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Get bridge MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac[out]
+ * Pointer to the array table of MAC addresses to fill.
+ * Its size should be of MLX5_MAX_MAC_ADDRESSES.
+ * @param mac_n[out]
+ * Number of entries filled in MAC array.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_list(struct rte_eth_dev *dev, struct ether_addr (*mac)[],
+ int *mac_n)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifm;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_GETNEIGH,
+ .nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ },
+ .ifm = {
+ .ifi_family = PF_BRIDGE,
+ .ifi_index = iface_idx,
+ },
+ };
+ struct mlx5_nl_mac_addr data = {
+ .mac = mac,
+ .mac_n = 0,
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket_route == -1)
+ return 0;
+ fd = priv->nl_socket_route;
+ ret = mlx5_nl_request(fd, &req.hdr, sn, &req.ifm,
+ sizeof(struct ifinfomsg));
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, mlx5_nl_mac_addr_cb, &data);
+ if (ret < 0)
+ goto error;
+ *mac_n = data.mac_n;
+ return 0;
+error:
+ DRV_LOG(DEBUG, "port %u cannot retrieve MAC address list %s",
+ dev->data->port_id, strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Modify the MAC address neighbour table with Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to consider.
+ * @param add
+ * 1 to add the MAC address, 0 to remove the MAC address.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_mac_addr_modify(struct rte_eth_dev *dev, struct ether_addr *mac,
+ int add)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ndmsg ndm;
+ struct rtattr rta;
+ uint8_t buffer[ETHER_ADDR_LEN];
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ndmsg)),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE |
+ NLM_F_EXCL | NLM_F_ACK,
+ .nlmsg_type = add ? RTM_NEWNEIGH : RTM_DELNEIGH,
+ },
+ .ndm = {
+ .ndm_family = PF_BRIDGE,
+ .ndm_state = NUD_NOARP | NUD_PERMANENT,
+ .ndm_ifindex = iface_idx,
+ .ndm_flags = NTF_SELF,
+ },
+ .rta = {
+ .rta_type = NDA_LLADDR,
+ .rta_len = RTA_LENGTH(ETHER_ADDR_LEN),
+ },
+ };
+ int fd;
+ int ret;
+ uint32_t sn = priv->nl_sn++;
+
+ if (priv->nl_socket_route == -1)
+ return 0;
+ fd = priv->nl_socket_route;
+ memcpy(RTA_DATA(&req.rta), mac, ETHER_ADDR_LEN);
+ req.hdr.nlmsg_len = NLMSG_ALIGN(req.hdr.nlmsg_len) +
+ RTA_ALIGN(req.rta.rta_len);
+ ret = mlx5_nl_send(fd, &req.hdr, sn);
+ if (ret < 0)
+ goto error;
+ ret = mlx5_nl_recv(fd, sn, NULL, NULL);
+ if (ret < 0)
+ goto error;
+ return 0;
+error:
+ DRV_LOG(DEBUG,
+ "port %u cannot %s MAC address %02X:%02X:%02X:%02X:%02X:%02X"
+ " %s",
+ dev->data->port_id,
+ add ? "add" : "remove",
+ mac->addr_bytes[0], mac->addr_bytes[1],
+ mac->addr_bytes[2], mac->addr_bytes[3],
+ mac->addr_bytes[4], mac->addr_bytes[5],
+ strerror(rte_errno));
+ return -rte_errno;
+}
+
+/**
+ * Add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_modify(dev, mac, 1);
+ if (!ret)
+ BITFIELD_SET(priv->mac_own, index);
+ if (ret == -EEXIST)
+ return 0;
+ return ret;
+}
+
+/**
+ * Remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param mac
+ * MAC address to remove.
+ * @param index
+ * MAC address index.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_mac_addr_remove(struct rte_eth_dev *dev, struct ether_addr *mac,
+ uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ BITFIELD_RESET(priv->mac_own, index);
+ return mlx5_nl_mac_addr_modify(dev, mac, 0);
+}
+
+/**
+ * Synchronize Netlink bridge table to the internal table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_sync(struct rte_eth_dev *dev)
+{
+ struct ether_addr macs[MLX5_MAX_MAC_ADDRESSES];
+ int macs_n = 0;
+ int i;
+ int ret;
+
+ ret = mlx5_nl_mac_addr_list(dev, &macs, &macs_n);
+ if (ret)
+ return;
+ for (i = 0; i != macs_n; ++i) {
+ int j;
+
+ /* Verify the address is not in the array yet. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j)
+ if (is_same_ether_addr(&macs[i],
+ &dev->data->mac_addrs[j]))
+ break;
+ if (j != MLX5_MAX_MAC_ADDRESSES)
+ continue;
+ /* Find the first entry available. */
+ for (j = 0; j != MLX5_MAX_MAC_ADDRESSES; ++j) {
+ if (is_zero_ether_addr(&dev->data->mac_addrs[j])) {
+ dev->data->mac_addrs[j] = macs[i];
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * Flush all added MAC addresses.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_nl_mac_addr_flush(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ int i;
+
+ for (i = MLX5_MAX_MAC_ADDRESSES - 1; i >= 0; --i) {
+ struct ether_addr *m = &dev->data->mac_addrs[i];
+
+ if (BITFIELD_ISSET(priv->mac_own, i))
+ mlx5_nl_mac_addr_remove(dev, m, i);
+ }
+}
+
+/**
+ * Enable promiscuous / all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param flags
+ * IFF_PROMISC for promiscuous, IFF_ALLMULTI for allmulti.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_device_flags(struct rte_eth_dev *dev, uint32_t flags, int enable)
+{
+ struct priv *priv = dev->data->dev_private;
+ unsigned int iface_idx = mlx5_ifindex(dev);
+ struct {
+ struct nlmsghdr hdr;
+ struct ifinfomsg ifi;
+ } req = {
+ .hdr = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlmsg_type = RTM_NEWLINK,
+ .nlmsg_flags = NLM_F_REQUEST,
+ },
+ .ifi = {
+ .ifi_flags = enable ? flags : 0,
+ .ifi_change = flags,
+ .ifi_index = iface_idx,
+ },
+ };
+ int fd;
+ int ret;
+
+ assert(!(flags & ~(IFF_PROMISC | IFF_ALLMULTI)));
+ if (priv->nl_socket_route < 0)
+ return 0;
+ fd = priv->nl_socket_route;
+ ret = mlx5_nl_send(fd, &req.hdr, priv->nl_sn++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Enable promiscuous mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_promisc(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_PROMISC, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s promisc mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Enable all multicast mode through Netlink.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param enable
+ * Nonzero to enable, disable otherwise.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_allmulti(struct rte_eth_dev *dev, int enable)
+{
+ int ret = mlx5_nl_device_flags(dev, IFF_ALLMULTI, enable);
+
+ if (ret)
+ DRV_LOG(DEBUG,
+ "port %u cannot %s allmulti mode: Netlink error %s",
+ dev->data->port_id, enable ? "enable" : "disable",
+ strerror(rte_errno));
+ return ret;
+}
+
+/**
+ * Process network interface information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_ifindex_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_nl_ifindex_data *data = arg;
+ size_t off = NLMSG_HDRLEN;
+ uint32_t ibindex = 0;
+ uint32_t ifindex = 0;
+ int found = 0;
+
+ if (nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET) &&
+ nh->nlmsg_type !=
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_PORT_GET))
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct nlattr *na = (void *)((uintptr_t)nh + off);
+ void *payload = (void *)((uintptr_t)na + NLA_HDRLEN);
+
+ if (na->nla_len > nh->nlmsg_len - off)
+ goto error;
+ switch (na->nla_type) {
+ case RDMA_NLDEV_ATTR_DEV_INDEX:
+ ibindex = *(uint32_t *)payload;
+ break;
+ case RDMA_NLDEV_ATTR_DEV_NAME:
+ if (!strcmp(payload, data->name))
+ found = 1;
+ break;
+ case RDMA_NLDEV_ATTR_NDEV_INDEX:
+ ifindex = *(uint32_t *)payload;
+ break;
+ default:
+ break;
+ }
+ off += NLA_ALIGN(na->nla_len);
+ }
+ if (found) {
+ data->ibindex = ibindex;
+ data->ifindex = ifindex;
+ }
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get index of network interface associated with some IB device.
+ *
+ * This is the only somewhat safe method to avoid resorting to heuristics
+ * when faced with port representors. Unfortunately it requires at least
+ * Linux 4.17.
+ *
+ * @param nl
+ * Netlink socket of the RDMA kind (NETLINK_RDMA).
+ * @param[in] name
+ * IB device name.
+ *
+ * @return
+ * A valid (nonzero) interface index on success, 0 otherwise and rte_errno
+ * is set.
+ */
+unsigned int
+mlx5_nl_ifindex(int nl, const char *name)
+{
+ static const uint32_t pindex = 1;
+ uint32_t seq = random();
+ struct mlx5_nl_ifindex_data data = {
+ .name = name,
+ .ibindex = 0, /* Determined during first pass. */
+ .ifindex = 0, /* Determined during second pass. */
+ };
+ union {
+ struct nlmsghdr nh;
+ uint8_t buf[NLMSG_HDRLEN +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(data.ibindex)) +
+ NLA_HDRLEN + NLA_ALIGN(sizeof(pindex))];
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(0),
+ .nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_GET),
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP,
+ },
+ };
+ struct nlattr *na;
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ibindex)
+ goto error;
+ ++seq;
+ req.nh.nlmsg_type = RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
+ RDMA_NLDEV_CMD_PORT_GET);
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.buf) - NLMSG_HDRLEN);
+ na = (void *)((uintptr_t)req.buf + NLMSG_HDRLEN);
+ na->nla_len = NLA_HDRLEN + sizeof(data.ibindex);
+ na->nla_type = RDMA_NLDEV_ATTR_DEV_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &data.ibindex, sizeof(data.ibindex));
+ na = (void *)((uintptr_t)na + NLA_ALIGN(na->nla_len));
+ na->nla_len = NLA_HDRLEN + sizeof(pindex);
+ na->nla_type = RDMA_NLDEV_ATTR_PORT_INDEX;
+ memcpy((void *)((uintptr_t)na + NLA_HDRLEN),
+ &pindex, sizeof(pindex));
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret < 0)
+ return 0;
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_ifindex_cb, &data);
+ if (ret < 0)
+ return 0;
+ if (!data.ifindex)
+ goto error;
+ return data.ifindex;
+error:
+ rte_errno = ENODEV;
+ return 0;
+}
+
+/**
+ * Process switch information from Netlink message.
+ *
+ * @param nh
+ * Pointer to Netlink message header.
+ * @param arg
+ * Opaque data pointer for this callback.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_switch_info_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct mlx5_switch_info info = {
+ .master = 0,
+ .representor = 0,
+ .port_name = 0,
+ .switch_id = 0,
+ };
+ size_t off = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ bool port_name_set = false;
+ bool switch_id_set = false;
+
+ if (nh->nlmsg_type != RTM_NEWLINK)
+ goto error;
+ while (off < nh->nlmsg_len) {
+ struct rtattr *ra = (void *)((uintptr_t)nh + off);
+ void *payload = RTA_DATA(ra);
+ char *end;
+ unsigned int i;
+
+ if (ra->rta_len > nh->nlmsg_len - off)
+ goto error;
+ switch (ra->rta_type) {
+ case IFLA_PHYS_PORT_NAME:
+ errno = 0;
+ info.port_name = strtol(payload, &end, 0);
+ if (errno ||
+ (size_t)(end - (char *)payload) != strlen(payload))
+ goto error;
+ port_name_set = true;
+ break;
+ case IFLA_PHYS_SWITCH_ID:
+ info.switch_id = 0;
+ for (i = 0; i < RTA_PAYLOAD(ra); ++i) {
+ info.switch_id <<= 8;
+ info.switch_id |= ((uint8_t *)payload)[i];
+ }
+ switch_id_set = true;
+ break;
+ }
+ off += RTA_ALIGN(ra->rta_len);
+ }
+ info.master = switch_id_set && !port_name_set;
+ info.representor = switch_id_set && port_name_set;
+ memcpy(arg, &info, sizeof(info));
+ return 0;
+error:
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
+/**
+ * Get switch information associated with network interface.
+ *
+ * @param nl
+ * Netlink socket of the ROUTE kind (NETLINK_ROUTE).
+ * @param ifindex
+ * Network interface index.
+ * @param[out] info
+ * Switch information object, populated in case of success.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_switch_info(int nl, unsigned int ifindex, struct mlx5_switch_info *info)
+{
+ uint32_t seq = random();
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ } req = {
+ .nh = {
+ .nlmsg_len = NLMSG_LENGTH(sizeof(req.info)),
+ .nlmsg_type = RTM_GETLINK,
+ .nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK,
+ },
+ .info = {
+ .ifi_family = AF_UNSPEC,
+ .ifi_index = ifindex,
+ },
+ };
+ int ret;
+
+ ret = mlx5_nl_send(nl, &req.nh, seq);
+ if (ret >= 0)
+ ret = mlx5_nl_recv(nl, seq, mlx5_nl_switch_info_cb, info);
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_nl_flow.c b/drivers/net/mlx5/mlx5_nl_flow.c
new file mode 100644
index 00000000..a1c8c340
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_nl_flow.c
@@ -0,0 +1,1248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 6WIND S.A.
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <libmnl/libmnl.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/pkt_cls.h>
+#include <linux/pkt_sched.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <netinet/in.h>
+#include <stdalign.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+
+#include "mlx5.h"
+#include "mlx5_autoconf.h"
+
+#ifdef HAVE_TC_ACT_VLAN
+
+#include <linux/tc_act/tc_vlan.h>
+
+#else /* HAVE_TC_ACT_VLAN */
+
+#define TCA_VLAN_ACT_POP 1
+#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_PARMS 2
+#define TCA_VLAN_PUSH_VLAN_ID 3
+#define TCA_VLAN_PUSH_VLAN_PROTOCOL 4
+#define TCA_VLAN_PAD 5
+#define TCA_VLAN_PUSH_VLAN_PRIORITY 6
+
+struct tc_vlan {
+ tc_gen;
+ int v_action;
+};
+
+#endif /* HAVE_TC_ACT_VLAN */
+
+/* Normally found in linux/netlink.h. */
+#ifndef NETLINK_CAP_ACK
+#define NETLINK_CAP_ACK 10
+#endif
+
+/* Normally found in linux/pkt_sched.h. */
+#ifndef TC_H_MIN_INGRESS
+#define TC_H_MIN_INGRESS 0xfff2u
+#endif
+
+/* Normally found in linux/pkt_cls.h. */
+#ifndef TCA_CLS_FLAGS_SKIP_SW
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1)
+#endif
+#ifndef HAVE_TCA_FLOWER_ACT
+#define TCA_FLOWER_ACT 3
+#endif
+#ifndef HAVE_TCA_FLOWER_FLAGS
+#define TCA_FLOWER_FLAGS 22
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_TYPE
+#define TCA_FLOWER_KEY_ETH_TYPE 8
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST
+#define TCA_FLOWER_KEY_ETH_DST 4
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_DST_MASK
+#define TCA_FLOWER_KEY_ETH_DST_MASK 5
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC
+#define TCA_FLOWER_KEY_ETH_SRC 6
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_ETH_SRC_MASK
+#define TCA_FLOWER_KEY_ETH_SRC_MASK 7
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IP_PROTO
+#define TCA_FLOWER_KEY_IP_PROTO 9
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC
+#define TCA_FLOWER_KEY_IPV4_SRC 10
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_SRC_MASK
+#define TCA_FLOWER_KEY_IPV4_SRC_MASK 11
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST
+#define TCA_FLOWER_KEY_IPV4_DST 12
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV4_DST_MASK
+#define TCA_FLOWER_KEY_IPV4_DST_MASK 13
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC
+#define TCA_FLOWER_KEY_IPV6_SRC 14
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_SRC_MASK
+#define TCA_FLOWER_KEY_IPV6_SRC_MASK 15
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST
+#define TCA_FLOWER_KEY_IPV6_DST 16
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_IPV6_DST_MASK
+#define TCA_FLOWER_KEY_IPV6_DST_MASK 17
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC
+#define TCA_FLOWER_KEY_TCP_SRC 18
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_SRC_MASK
+#define TCA_FLOWER_KEY_TCP_SRC_MASK 35
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST
+#define TCA_FLOWER_KEY_TCP_DST 19
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_TCP_DST_MASK
+#define TCA_FLOWER_KEY_TCP_DST_MASK 36
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC
+#define TCA_FLOWER_KEY_UDP_SRC 20
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_SRC_MASK
+#define TCA_FLOWER_KEY_UDP_SRC_MASK 37
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST
+#define TCA_FLOWER_KEY_UDP_DST 21
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_UDP_DST_MASK
+#define TCA_FLOWER_KEY_UDP_DST_MASK 38
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ID
+#define TCA_FLOWER_KEY_VLAN_ID 23
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_PRIO
+#define TCA_FLOWER_KEY_VLAN_PRIO 24
+#endif
+#ifndef HAVE_TCA_FLOWER_KEY_VLAN_ETH_TYPE
+#define TCA_FLOWER_KEY_VLAN_ETH_TYPE 25
+#endif
+
+/** Parser state definitions for mlx5_nl_flow_trans[]. */
+enum mlx5_nl_flow_trans {
+ INVALID,
+ BACK,
+ ATTR,
+ PATTERN,
+ ITEM_VOID,
+ ITEM_PORT_ID,
+ ITEM_ETH,
+ ITEM_VLAN,
+ ITEM_IPV4,
+ ITEM_IPV6,
+ ITEM_TCP,
+ ITEM_UDP,
+ ACTIONS,
+ ACTION_VOID,
+ ACTION_PORT_ID,
+ ACTION_DROP,
+ ACTION_OF_POP_VLAN,
+ ACTION_OF_PUSH_VLAN,
+ ACTION_OF_SET_VLAN_VID,
+ ACTION_OF_SET_VLAN_PCP,
+ END,
+};
+
+#define TRANS(...) (const enum mlx5_nl_flow_trans []){ __VA_ARGS__, INVALID, }
+
+#define PATTERN_COMMON \
+ ITEM_VOID, ITEM_PORT_ID, ACTIONS
+#define ACTIONS_COMMON \
+ ACTION_VOID, ACTION_OF_POP_VLAN, ACTION_OF_PUSH_VLAN, \
+ ACTION_OF_SET_VLAN_VID, ACTION_OF_SET_VLAN_PCP
+#define ACTIONS_FATE \
+ ACTION_PORT_ID, ACTION_DROP
+
+/** Parser state transitions used by mlx5_nl_flow_transpose(). */
+static const enum mlx5_nl_flow_trans *const mlx5_nl_flow_trans[] = {
+ [INVALID] = NULL,
+ [BACK] = NULL,
+ [ATTR] = TRANS(PATTERN),
+ [PATTERN] = TRANS(ITEM_ETH, PATTERN_COMMON),
+ [ITEM_VOID] = TRANS(BACK),
+ [ITEM_PORT_ID] = TRANS(BACK),
+ [ITEM_ETH] = TRANS(ITEM_IPV4, ITEM_IPV6, ITEM_VLAN, PATTERN_COMMON),
+ [ITEM_VLAN] = TRANS(ITEM_IPV4, ITEM_IPV6, PATTERN_COMMON),
+ [ITEM_IPV4] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_IPV6] = TRANS(ITEM_TCP, ITEM_UDP, PATTERN_COMMON),
+ [ITEM_TCP] = TRANS(PATTERN_COMMON),
+ [ITEM_UDP] = TRANS(PATTERN_COMMON),
+ [ACTIONS] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_VOID] = TRANS(BACK),
+ [ACTION_PORT_ID] = TRANS(ACTION_VOID, END),
+ [ACTION_DROP] = TRANS(ACTION_VOID, END),
+ [ACTION_OF_POP_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_PUSH_VLAN] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_VID] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [ACTION_OF_SET_VLAN_PCP] = TRANS(ACTIONS_FATE, ACTIONS_COMMON),
+ [END] = NULL,
+};
+
+/** Empty masks for known item types. */
+static const union {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_empty;
+
+/** Supported masks for known item types. */
+static const struct {
+ struct rte_flow_item_port_id port_id;
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_udp udp;
+} mlx5_nl_flow_mask_supported = {
+ .port_id = {
+ .id = 0xffffffff,
+ },
+ .eth = {
+ .type = RTE_BE16(0xffff),
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .vlan = {
+ /* PCP and VID only, no DEI. */
+ .tci = RTE_BE16(0xefff),
+ .inner_type = RTE_BE16(0xffff),
+ },
+ .ipv4.hdr = {
+ .next_proto_id = 0xff,
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ },
+ .ipv6.hdr = {
+ .proto = 0xff,
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .tcp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+ .udp.hdr = {
+ .src_port = RTE_BE16(0xffff),
+ .dst_port = RTE_BE16(0xffff),
+ },
+};
+
+/**
+ * Retrieve mask for pattern item.
+ *
+ * This function does basic sanity checks on a pattern item in order to
+ * return the most appropriate mask for it.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask_default
+ * Default mask for pattern item as specified by the flow API.
+ * @param[in] mask_supported
+ * Mask fields supported by the implementation.
+ * @param[in] mask_empty
+ * Empty mask to return when there is no specification.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * Either @p item->mask or one of the mask parameters on success, NULL
+ * otherwise and rte_errno is set.
+ */
+static const void *
+mlx5_nl_flow_item_mask(const struct rte_flow_item *item,
+ const void *mask_default,
+ const void *mask_supported,
+ const void *mask_empty,
+ size_t mask_size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *mask;
+ size_t i;
+
+ /* item->last and item->mask cannot exist without item->spec. */
+ if (!item->spec && (item->mask || item->last)) {
+ rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "\"mask\" or \"last\" field provided without a"
+ " corresponding \"spec\"");
+ return NULL;
+ }
+ /* No spec, no mask, no problem. */
+ if (!item->spec)
+ return mask_empty;
+ mask = item->mask ? item->mask : mask_default;
+ assert(mask);
+ /*
+ * Single-pass check to make sure that:
+ * - Mask is supported, no bits are set outside mask_supported.
+ * - Both item->spec and item->last are included in mask.
+ */
+ for (i = 0; i != mask_size; ++i) {
+ if (!mask[i])
+ continue;
+ if ((mask[i] | ((const uint8_t *)mask_supported)[i]) !=
+ ((const uint8_t *)mask_supported)[i]) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask, "unsupported field found in \"mask\"");
+ return NULL;
+ }
+ if (item->last &&
+ (((const uint8_t *)item->spec)[i] & mask[i]) !=
+ (((const uint8_t *)item->last)[i] & mask[i])) {
+ rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST,
+ item->last,
+ "range between \"spec\" and \"last\" not"
+ " comprised in \"mask\"");
+ return NULL;
+ }
+ }
+ return mask;
+}
+
+/**
+ * Transpose flow rule description to rtnetlink message.
+ *
+ * This function transposes a flow rule description to a traffic control
+ * (TC) filter creation message ready to be sent over Netlink.
+ *
+ * Target interface is specified as the first entry of the @p ptoi table.
+ * Subsequent entries enable this function to resolve other DPDK port IDs
+ * found in the flow rule.
+ *
+ * @param[out] buf
+ * Output message buffer. May be NULL when @p size is 0.
+ * @param size
+ * Size of @p buf. Message may be truncated if not large enough.
+ * @param[in] ptoi
+ * DPDK port ID to network interface index translation table. This table
+ * is terminated by an entry with a zero ifindex value.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification.
+ * @param[in] actions
+ * Associated actions.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A positive value representing the exact size of the message in bytes
+ * regardless of the @p size parameter on success, a negative errno value
+ * otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_transpose(void *buf,
+ size_t size,
+ const struct mlx5_nl_flow_ptoi *ptoi,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error)
+{
+ alignas(struct nlmsghdr)
+ uint8_t buf_tmp[mnl_nlmsg_size(sizeof(struct tcmsg) + 1024)];
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *action;
+ unsigned int n;
+ uint32_t act_index_cur;
+ bool in_port_id_set;
+ bool eth_type_set;
+ bool vlan_present;
+ bool vlan_eth_type_set;
+ bool ip_proto_set;
+ struct nlattr *na_flower;
+ struct nlattr *na_flower_act;
+ struct nlattr *na_vlan_id;
+ struct nlattr *na_vlan_priority;
+ const enum mlx5_nl_flow_trans *trans;
+ const enum mlx5_nl_flow_trans *back;
+
+ if (!size)
+ goto error_nobufs;
+init:
+ item = pattern;
+ action = actions;
+ n = 0;
+ act_index_cur = 0;
+ in_port_id_set = false;
+ eth_type_set = false;
+ vlan_present = false;
+ vlan_eth_type_set = false;
+ ip_proto_set = false;
+ na_flower = NULL;
+ na_flower_act = NULL;
+ na_vlan_id = NULL;
+ na_vlan_priority = NULL;
+ trans = TRANS(ATTR);
+ back = trans;
+trans:
+ switch (trans[n++]) {
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ } conf;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ struct nlattr *act_index;
+ struct nlattr *act;
+ unsigned int i;
+
+ case INVALID:
+ if (item->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "unsupported pattern item combination");
+ else if (action->type)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ action, "unsupported action combination");
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "flow rule lacks some kind of fate action");
+ case BACK:
+ trans = back;
+ n = 0;
+ goto trans;
+ case ATTR:
+ /*
+ * Supported attributes: no groups, some priorities and
+ * ingress only. Don't care about transfer as it is the
+ * caller's problem.
+ */
+ if (attr->group)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "groups are not supported");
+ if (attr->priority > 0xfffe)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "lowest priority level is 0xfffe");
+ if (!attr->ingress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "only ingress is supported");
+ if (attr->egress)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "egress is not supported");
+ if (size < mnl_nlmsg_size(sizeof(*tcm)))
+ goto error_nobufs;
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = 0;
+ nlh->nlmsg_flags = 0;
+ nlh->nlmsg_seq = 0;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ptoi[0].ifindex;
+ /*
+ * Let kernel pick a handle by default. A predictable handle
+ * can be set by the caller on the resulting buffer through
+ * mlx5_nl_flow_brand().
+ */
+ tcm->tcm_handle = 0;
+ tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
+ /*
+ * Priority cannot be zero to prevent the kernel from
+ * picking one automatically.
+ */
+ tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16,
+ RTE_BE16(ETH_P_ALL));
+ break;
+ case PATTERN:
+ if (!mnl_attr_put_strz_check(buf, size, TCA_KIND, "flower"))
+ goto error_nobufs;
+ na_flower = mnl_attr_nest_start_check(buf, size, TCA_OPTIONS);
+ if (!na_flower)
+ goto error_nobufs;
+ if (!mnl_attr_put_u32_check(buf, size, TCA_FLOWER_FLAGS,
+ TCA_CLS_FLAGS_SKIP_SW))
+ goto error_nobufs;
+ break;
+ case ITEM_VOID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ goto trans;
+ ++item;
+ break;
+ case ITEM_PORT_ID:
+ if (item->type != RTE_FLOW_ITEM_TYPE_PORT_ID)
+ goto trans;
+ mask.port_id = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_port_id_mask,
+ &mlx5_nl_flow_mask_supported.port_id,
+ &mlx5_nl_flow_mask_empty.port_id,
+ sizeof(mlx5_nl_flow_mask_supported.port_id), error);
+ if (!mask.port_id)
+ return -rte_errno;
+ if (mask.port_id == &mlx5_nl_flow_mask_empty.port_id) {
+ in_port_id_set = 1;
+ ++item;
+ break;
+ }
+ spec.port_id = item->spec;
+ if (mask.port_id->id && mask.port_id->id != 0xffffffff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.port_id,
+ "no support for partial mask on"
+ " \"id\" field");
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "missing data to convert port ID to ifindex");
+ tcm = mnl_nlmsg_get_payload(buf);
+ if (in_port_id_set &&
+ ptoi[i].ifindex != (unsigned int)tcm->tcm_ifindex)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "cannot match traffic for several port IDs"
+ " through a single flow rule");
+ tcm->tcm_ifindex = ptoi[i].ifindex;
+ in_port_id_set = 1;
+ ++item;
+ break;
+ case ITEM_ETH:
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH)
+ goto trans;
+ mask.eth = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_eth_mask,
+ &mlx5_nl_flow_mask_supported.eth,
+ &mlx5_nl_flow_mask_empty.eth,
+ sizeof(mlx5_nl_flow_mask_supported.eth), error);
+ if (!mask.eth)
+ return -rte_errno;
+ if (mask.eth == &mlx5_nl_flow_mask_empty.eth) {
+ ++item;
+ break;
+ }
+ spec.eth = item->spec;
+ if (mask.eth->type && mask.eth->type != RTE_BE16(0xffff))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.eth,
+ "no support for partial mask on"
+ " \"type\" field");
+ if (mask.eth->type) {
+ if (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ spec.eth->type))
+ goto error_nobufs;
+ eth_type_set = 1;
+ }
+ if ((!is_zero_ether_addr(&mask.eth->dst) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST,
+ ETHER_ADDR_LEN,
+ spec.eth->dst.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_DST_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->dst.addr_bytes))) ||
+ (!is_zero_ether_addr(&mask.eth->src) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC,
+ ETHER_ADDR_LEN,
+ spec.eth->src.addr_bytes) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_ETH_SRC_MASK,
+ ETHER_ADDR_LEN,
+ mask.eth->src.addr_bytes))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_VLAN:
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN)
+ goto trans;
+ mask.vlan = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_vlan_mask,
+ &mlx5_nl_flow_mask_supported.vlan,
+ &mlx5_nl_flow_mask_empty.vlan,
+ sizeof(mlx5_nl_flow_mask_supported.vlan), error);
+ if (!mask.vlan)
+ return -rte_errno;
+ if (!eth_type_set &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_8021Q)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_present = 1;
+ if (mask.vlan == &mlx5_nl_flow_mask_empty.vlan) {
+ ++item;
+ break;
+ }
+ spec.vlan = item->spec;
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ (mask.vlan->tci & RTE_BE16(0xe000)) != RTE_BE16(0xe000)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ (mask.vlan->tci & RTE_BE16(0x0fff)) != RTE_BE16(0x0fff)) ||
+ (mask.vlan->inner_type &&
+ mask.vlan->inner_type != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vlan,
+ "no support for partial masks on"
+ " \"tci\" (PCP and VID parts) and"
+ " \"inner_type\" fields");
+ if (mask.vlan->inner_type) {
+ if (!mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ spec.vlan->inner_type))
+ goto error_nobufs;
+ vlan_eth_type_set = 1;
+ }
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ !mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_PRIO,
+ (rte_be_to_cpu_16(spec.vlan->tci) >> 13) & 0x7)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ !mnl_attr_put_u16_check
+ (buf, size, TCA_FLOWER_KEY_VLAN_ID,
+ rte_be_to_cpu_16(spec.vlan->tci & RTE_BE16(0x0fff)))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV4:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+ goto trans;
+ mask.ipv4 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv4_mask,
+ &mlx5_nl_flow_mask_supported.ipv4,
+ &mlx5_nl_flow_mask_empty.ipv4,
+ sizeof(mlx5_nl_flow_mask_supported.ipv4), error);
+ if (!mask.ipv4)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IP)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv4 == &mlx5_nl_flow_mask_empty.ipv4) {
+ ++item;
+ break;
+ }
+ spec.ipv4 = item->spec;
+ if (mask.ipv4->hdr.next_proto_id &&
+ mask.ipv4->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv4,
+ "no support for partial mask on"
+ " \"hdr.next_proto_id\" field");
+ if (mask.ipv4->hdr.next_proto_id) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv4->hdr.next_proto_id))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((mask.ipv4->hdr.src_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC,
+ spec.ipv4->hdr.src_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr))) ||
+ (mask.ipv4->hdr.dst_addr &&
+ (!mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST,
+ spec.ipv4->hdr.dst_addr) ||
+ !mnl_attr_put_u32_check(buf, size,
+ TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask.ipv4->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_IPV6:
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV6)
+ goto trans;
+ mask.ipv6 = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_ipv6_mask,
+ &mlx5_nl_flow_mask_supported.ipv6,
+ &mlx5_nl_flow_mask_empty.ipv6,
+ sizeof(mlx5_nl_flow_mask_supported.ipv6), error);
+ if (!mask.ipv6)
+ return -rte_errno;
+ if ((!eth_type_set || !vlan_eth_type_set) &&
+ !mnl_attr_put_u16_check(buf, size,
+ vlan_present ?
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE :
+ TCA_FLOWER_KEY_ETH_TYPE,
+ RTE_BE16(ETH_P_IPV6)))
+ goto error_nobufs;
+ eth_type_set = 1;
+ vlan_eth_type_set = 1;
+ if (mask.ipv6 == &mlx5_nl_flow_mask_empty.ipv6) {
+ ++item;
+ break;
+ }
+ spec.ipv6 = item->spec;
+ if (mask.ipv6->hdr.proto && mask.ipv6->hdr.proto != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv6,
+ "no support for partial mask on"
+ " \"hdr.proto\" field");
+ if (mask.ipv6->hdr.proto) {
+ if (!mnl_attr_put_u8_check
+ (buf, size, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv6->hdr.proto))
+ goto error_nobufs;
+ ip_proto_set = 1;
+ }
+ if ((!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.src_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec.ipv6->hdr.src_addr),
+ spec.ipv6->hdr.src_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask.ipv6->hdr.src_addr),
+ mask.ipv6->hdr.src_addr))) ||
+ (!IN6_IS_ADDR_UNSPECIFIED(mask.ipv6->hdr.dst_addr) &&
+ (!mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec.ipv6->hdr.dst_addr),
+ spec.ipv6->hdr.dst_addr) ||
+ !mnl_attr_put_check(buf, size,
+ TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask.ipv6->hdr.dst_addr),
+ mask.ipv6->hdr.dst_addr))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_TCP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP)
+ goto trans;
+ mask.tcp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_tcp_mask,
+ &mlx5_nl_flow_mask_supported.tcp,
+ &mlx5_nl_flow_mask_empty.tcp,
+ sizeof(mlx5_nl_flow_mask_supported.tcp), error);
+ if (!mask.tcp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_TCP))
+ goto error_nobufs;
+ if (mask.tcp == &mlx5_nl_flow_mask_empty.tcp) {
+ ++item;
+ break;
+ }
+ spec.tcp = item->spec;
+ if ((mask.tcp->hdr.src_port &&
+ mask.tcp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.tcp->hdr.dst_port &&
+ mask.tcp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.tcp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.tcp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC,
+ spec.tcp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_SRC_MASK,
+ mask.tcp->hdr.src_port))) ||
+ (mask.tcp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST,
+ spec.tcp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_TCP_DST_MASK,
+ mask.tcp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ITEM_UDP:
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP)
+ goto trans;
+ mask.udp = mlx5_nl_flow_item_mask
+ (item, &rte_flow_item_udp_mask,
+ &mlx5_nl_flow_mask_supported.udp,
+ &mlx5_nl_flow_mask_empty.udp,
+ sizeof(mlx5_nl_flow_mask_supported.udp), error);
+ if (!mask.udp)
+ return -rte_errno;
+ if (!ip_proto_set &&
+ !mnl_attr_put_u8_check(buf, size,
+ TCA_FLOWER_KEY_IP_PROTO,
+ IPPROTO_UDP))
+ goto error_nobufs;
+ if (mask.udp == &mlx5_nl_flow_mask_empty.udp) {
+ ++item;
+ break;
+ }
+ spec.udp = item->spec;
+ if ((mask.udp->hdr.src_port &&
+ mask.udp->hdr.src_port != RTE_BE16(0xffff)) ||
+ (mask.udp->hdr.dst_port &&
+ mask.udp->hdr.dst_port != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.udp,
+ "no support for partial masks on"
+ " \"hdr.src_port\" and \"hdr.dst_port\""
+ " fields");
+ if ((mask.udp->hdr.src_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC,
+ spec.udp->hdr.src_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_SRC_MASK,
+ mask.udp->hdr.src_port))) ||
+ (mask.udp->hdr.dst_port &&
+ (!mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST,
+ spec.udp->hdr.dst_port) ||
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_FLOWER_KEY_UDP_DST_MASK,
+ mask.udp->hdr.dst_port))))
+ goto error_nobufs;
+ ++item;
+ break;
+ case ACTIONS:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ goto trans;
+ assert(na_flower);
+ assert(!na_flower_act);
+ na_flower_act =
+ mnl_attr_nest_start_check(buf, size, TCA_FLOWER_ACT);
+ if (!na_flower_act)
+ goto error_nobufs;
+ act_index_cur = 1;
+ break;
+ case ACTION_VOID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
+ goto trans;
+ ++action;
+ break;
+ case ACTION_PORT_ID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_PORT_ID)
+ goto trans;
+ conf.port_id = action->conf;
+ if (conf.port_id->original)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == conf.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf.port_id,
+ "missing data to convert port ID to ifindex");
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "mirred"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_MIRRED_PARMS,
+ sizeof(struct tc_mirred),
+ &(struct tc_mirred){
+ .action = TC_ACT_STOLEN,
+ .eaction = TCA_EGRESS_REDIR,
+ .ifindex = ptoi[i].ifindex,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_DROP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_DROP)
+ goto trans;
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "gact"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_GACT_PARMS,
+ sizeof(struct tc_gact),
+ &(struct tc_gact){
+ .action = TC_ACT_SHOT,
+ }))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ case ACTION_OF_POP_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_POP_VLAN)
+ goto trans;
+ conf.of_push_vlan = NULL;
+ i = TCA_VLAN_ACT_POP;
+ goto action_of_vlan;
+ case ACTION_OF_PUSH_VLAN:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN)
+ goto trans;
+ conf.of_push_vlan = action->conf;
+ i = TCA_VLAN_ACT_PUSH;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_VID:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)
+ goto trans;
+ conf.of_set_vlan_vid = action->conf;
+ if (na_vlan_id)
+ goto override_na_vlan_id;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+ case ACTION_OF_SET_VLAN_PCP:
+ if (action->type != RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)
+ goto trans;
+ conf.of_set_vlan_pcp = action->conf;
+ if (na_vlan_priority)
+ goto override_na_vlan_priority;
+ i = TCA_VLAN_ACT_MODIFY;
+ goto action_of_vlan;
+action_of_vlan:
+ act_index =
+ mnl_attr_nest_start_check(buf, size, act_index_cur++);
+ if (!act_index ||
+ !mnl_attr_put_strz_check(buf, size, TCA_ACT_KIND, "vlan"))
+ goto error_nobufs;
+ act = mnl_attr_nest_start_check(buf, size, TCA_ACT_OPTIONS);
+ if (!act)
+ goto error_nobufs;
+ if (!mnl_attr_put_check(buf, size, TCA_VLAN_PARMS,
+ sizeof(struct tc_vlan),
+ &(struct tc_vlan){
+ .action = TC_ACT_PIPE,
+ .v_action = i,
+ }))
+ goto error_nobufs;
+ if (i == TCA_VLAN_ACT_POP) {
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ ++action;
+ break;
+ }
+ if (i == TCA_VLAN_ACT_PUSH &&
+ !mnl_attr_put_u16_check(buf, size,
+ TCA_VLAN_PUSH_VLAN_PROTOCOL,
+ conf.of_push_vlan->ethertype))
+ goto error_nobufs;
+ na_vlan_id = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u16_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ na_vlan_priority = mnl_nlmsg_get_payload_tail(buf);
+ if (!mnl_attr_put_u8_check(buf, size, TCA_VLAN_PAD, 0))
+ goto error_nobufs;
+ mnl_attr_nest_end(buf, act);
+ mnl_attr_nest_end(buf, act_index);
+ if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
+override_na_vlan_id:
+ na_vlan_id->nla_type = TCA_VLAN_PUSH_VLAN_ID;
+ *(uint16_t *)mnl_attr_get_payload(na_vlan_id) =
+ rte_be_to_cpu_16
+ (conf.of_set_vlan_vid->vlan_vid);
+ } else if (action->type ==
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
+override_na_vlan_priority:
+ na_vlan_priority->nla_type =
+ TCA_VLAN_PUSH_VLAN_PRIORITY;
+ *(uint8_t *)mnl_attr_get_payload(na_vlan_priority) =
+ conf.of_set_vlan_pcp->vlan_pcp;
+ }
+ ++action;
+ break;
+ case END:
+ if (item->type != RTE_FLOW_ITEM_TYPE_END ||
+ action->type != RTE_FLOW_ACTION_TYPE_END)
+ goto trans;
+ if (na_flower_act)
+ mnl_attr_nest_end(buf, na_flower_act);
+ if (na_flower)
+ mnl_attr_nest_end(buf, na_flower);
+ nlh = buf;
+ return nlh->nlmsg_len;
+ }
+ back = trans;
+ trans = mlx5_nl_flow_trans[trans[n - 1]];
+ n = 0;
+ goto trans;
+error_nobufs:
+ if (buf != buf_tmp) {
+ buf = buf_tmp;
+ size = sizeof(buf_tmp);
+ goto init;
+ }
+ return rte_flow_error_set
+ (error, ENOBUFS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "generated TC message is too large");
+}
+
+/**
+ * Brand rtnetlink buffer with unique handle.
+ *
+ * This handle should be unique for a given network interface to avoid
+ * collisions.
+ *
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param handle
+ * Unique 32-bit handle to use.
+ */
+void
+mlx5_nl_flow_brand(void *buf, uint32_t handle)
+{
+ struct tcmsg *tcm = mnl_nlmsg_get_payload(buf);
+
+ tcm->tcm_handle = handle;
+}
+
+/**
+ * Send Netlink message with acknowledgment.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param nlh
+ * Message to send. This function always raises the NLM_F_ACK flag before
+ * sending.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_nl_flow_nl_ack(struct mnl_socket *nl, struct nlmsghdr *nlh)
+{
+ alignas(struct nlmsghdr)
+ uint8_t ans[mnl_nlmsg_size(sizeof(struct nlmsgerr)) +
+ nlh->nlmsg_len - sizeof(*nlh)];
+ uint32_t seq = random();
+ int ret;
+
+ nlh->nlmsg_flags |= NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+ ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len);
+ if (ret != -1)
+ ret = mnl_socket_recvfrom(nl, ans, sizeof(ans));
+ if (ret != -1)
+ ret = mnl_cb_run
+ (ans, ret, seq, mnl_socket_get_portid(nl), NULL, NULL);
+ if (!ret)
+ return 0;
+ rte_errno = errno;
+ return -rte_errno;
+}
+
+/**
+ * Create a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_create(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_NEWTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to create TC flow rule");
+}
+
+/**
+ * Destroy a Netlink flow rule.
+ *
+ * @param nl
+ * Libmnl socket to use.
+ * @param buf
+ * Flow rule buffer previously initialized by mlx5_nl_flow_transpose().
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_destroy(struct mnl_socket *nl, void *buf,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh = buf;
+
+ nlh->nlmsg_type = RTM_DELTFILTER;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ if (!mlx5_nl_flow_nl_ack(nl, nlh))
+ return 0;
+ return rte_flow_error_set
+ (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "netlink: failed to destroy TC flow rule");
+}
+
+/**
+ * Initialize ingress qdisc of a given network interface.
+ *
+ * @param nl
+ * Libmnl socket of the @p NETLINK_ROUTE kind.
+ * @param ifindex
+ * Index of network interface to initialize.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_nl_flow_init(struct mnl_socket *nl, unsigned int ifindex,
+ struct rte_flow_error *error)
+{
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ alignas(struct nlmsghdr)
+ uint8_t buf[mnl_nlmsg_size(sizeof(*tcm) + 128)];
+
+ /* Destroy existing ingress qdisc and everything attached to it. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_DELQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ /* Ignore errors when qdisc is already absent. */
+ if (mlx5_nl_flow_nl_ack(nl, nlh) &&
+ rte_errno != EINVAL && rte_errno != ENOENT)
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to remove ingress qdisc");
+ /* Create fresh ingress qdisc. */
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = RTM_NEWQDISC;
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL;
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ifindex;
+ tcm->tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ tcm->tcm_parent = TC_H_INGRESS;
+ mnl_attr_put_strz_check(nlh, sizeof(buf), TCA_KIND, "ingress");
+ if (mlx5_nl_flow_nl_ack(nl, nlh))
+ return rte_flow_error_set
+ (error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "netlink: failed to create ingress qdisc");
+ return 0;
+}
+
+/**
+ * Create and configure a libmnl socket for Netlink flow rules.
+ *
+ * @return
+ * A valid libmnl socket object pointer on success, NULL otherwise and
+ * rte_errno is set.
+ */
+struct mnl_socket *
+mlx5_nl_flow_socket_create(void)
+{
+ struct mnl_socket *nl = mnl_socket_open(NETLINK_ROUTE);
+
+ if (nl) {
+ mnl_socket_setsockopt(nl, NETLINK_CAP_ACK, &(int){ 1 },
+ sizeof(int));
+ if (!mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID))
+ return nl;
+ }
+ rte_errno = errno;
+ if (nl)
+ mnl_socket_close(nl);
+ return NULL;
+}
+
+/**
+ * Destroy a libmnl socket.
+ */
+void
+mlx5_nl_flow_socket_destroy(struct mnl_socket *nl)
+{
+ mnl_socket_close(nl);
+}
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 9eb9c15e..0870d32f 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_PRM_H_
@@ -21,6 +21,9 @@
#include <rte_vect.h>
#include "mlx5_autoconf.h"
+/* RSS hash key size. */
+#define MLX5_RSS_HASH_KEY_LEN 40
+
/* Get CQE owner bit. */
#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK)
@@ -107,6 +110,30 @@
/* Inner L4 checksum offload (Tunneled packets only). */
#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+/* Outer L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_OUTER_TCP (0u << 5)
+
+/* Outer L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_OUTER_UDP (1u << 5)
+
+/* Outer L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV4 (0u << 4)
+
+/* Outer L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_OUTER_IPV6 (1u << 4)
+
+/* Inner L4 type is TCP. */
+#define MLX5_ETH_WQE_L4_INNER_TCP (0u << 1)
+
+/* Inner L4 type is UDP. */
+#define MLX5_ETH_WQE_L4_INNER_UDP (1u << 1)
+
+/* Inner L3 type is IPV4. */
+#define MLX5_ETH_WQE_L3_INNER_IPV4 (0u << 0)
+
+/* Inner L3 type is IPV6. */
+#define MLX5_ETH_WQE_L3_INNER_IPV6 (1u << 0)
+
/* Is flow mark valid. */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
@@ -195,13 +222,30 @@ struct mlx5_mpw {
} data;
};
+/* WQE for Multi-Packet RQ. */
+struct mlx5_wqe_mprq {
+ struct mlx5_wqe_srq_next_seg next_seg;
+ struct mlx5_wqe_data_seg dseg;
+};
+
+#define MLX5_MPRQ_LEN_MASK 0x000ffff
+#define MLX5_MPRQ_LEN_SHIFT 0
+#define MLX5_MPRQ_STRIDE_NUM_MASK 0x3fff0000
+#define MLX5_MPRQ_STRIDE_NUM_SHIFT 16
+#define MLX5_MPRQ_FILLER_MASK 0x80000000
+#define MLX5_MPRQ_FILLER_SHIFT 31
+
+#define MLX5_MPRQ_STRIDE_SHIFT_BYTE 2
+
/* CQ element structure - should be equal to the cache line size */
struct mlx5_cqe {
#if (RTE_CACHE_LINE_SIZE == 128)
uint8_t padding[64];
#endif
uint8_t pkt_info;
- uint8_t rsvd0[11];
+ uint8_t rsvd0;
+ uint16_t wqe_id;
+ uint8_t rsvd3[8];
uint32_t rx_hash_res;
uint8_t rx_hash_type;
uint8_t rsvd1[11];
@@ -246,7 +290,10 @@ struct mlx5_cqe {
struct mlx5_mini_cqe8 {
union {
uint32_t rx_hash_result;
- uint32_t checksum;
+ struct {
+ uint16_t checksum;
+ uint16_t stride_idx;
+ };
struct {
uint16_t wqe_counter;
uint8_t s_wqe_opcode;
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index d06b0bee..b95778a8 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -35,35 +35,49 @@
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct priv *priv = dev->data->dev_private;
- int ret = 0;
+ unsigned int i;
+ unsigned int idx;
- priv_lock(priv);
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) {
- ret = -EINVAL;
- goto out;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ if (rss_conf->rss_key_len != MLX5_RSS_HASH_KEY_LEN) {
+ DRV_LOG(ERR,
+ "port %u RSS key len must be %s Bytes long",
+ dev->data->port_id,
+ RTE_STR(MLX5_RSS_HASH_KEY_LEN));
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
priv->rss_conf.rss_key = rte_realloc(priv->rss_conf.rss_key,
rss_conf->rss_key_len, 0);
if (!priv->rss_conf.rss_key) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
memcpy(priv->rss_conf.rss_key, rss_conf->rss_key,
rss_conf->rss_key_len);
priv->rss_conf.rss_key_len = rss_conf->rss_key_len;
}
priv->rss_conf.rss_hf = rss_conf->rss_hf;
-out:
- priv_unlock(priv);
- return ret;
+ /* Enable the RSS hash in all Rx queues. */
+ for (i = 0, idx = 0; idx != priv->rxqs_n; ++i) {
+ if (!(*priv->rxqs)[i])
+ continue;
+ (*priv->rxqs)[i]->rss_hash = !!rss_conf->rss_hf &&
+ !!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS);
+ ++idx;
+ }
+ return 0;
}
/**
@@ -75,7 +89,7 @@ out:
* RSS configuration data.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
@@ -83,9 +97,10 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
{
struct priv *priv = dev->data->dev_private;
- if (!rss_conf)
- return -EINVAL;
- priv_lock(priv);
+ if (!rss_conf) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
if (rss_conf->rss_key &&
(rss_conf->rss_key_len >= priv->rss_conf.rss_key_len)) {
memcpy(rss_conf->rss_key, priv->rss_conf.rss_key,
@@ -93,24 +108,24 @@ mlx5_rss_hash_conf_get(struct rte_eth_dev *dev,
}
rss_conf->rss_key_len = priv->rss_conf.rss_key_len;
rss_conf->rss_hf = priv->rss_conf.rss_hf;
- priv_unlock(priv);
return 0;
}
/**
* Allocate/reallocate RETA index table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @praram reta_size
* The size of the array to allocate.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
+mlx5_rss_reta_index_resize(struct rte_eth_dev *dev, unsigned int reta_size)
{
+ struct priv *priv = dev->data->dev_private;
void *mem;
unsigned int old_size = priv->reta_idx_n;
@@ -119,11 +134,12 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
mem = rte_realloc(priv->reta_idx,
reta_size * sizeof((*priv->reta_idx)[0]), 0);
- if (!mem)
- return ENOMEM;
+ if (!mem) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
priv->reta_idx = mem;
priv->reta_idx_n = reta_size;
-
if (old_size < reta_size)
memset(&(*priv->reta_idx)[old_size], 0,
(reta_size - old_size) *
@@ -132,28 +148,31 @@ priv_rss_reta_index_resize(struct priv *priv, unsigned int reta_size)
}
/**
- * Query RETA table.
+ * DPDK callback to get the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in, out] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-priv_dev_rss_reta_query(struct priv *priv,
+int
+mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
- if (!reta_size || reta_size > priv->reta_idx_n)
- return EINVAL;
+ if (!reta_size || reta_size > priv->reta_idx_n) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -164,34 +183,36 @@ priv_dev_rss_reta_query(struct priv *priv,
}
/**
- * Update RETA table.
+ * DPDK callback to update the RETA indirection table.
*
- * @param priv
- * Pointer to private structure.
- * @param[in] reta_conf
- * Pointer to the first RETA configuration structure.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param reta_conf
+ * Pointer to RETA configuration structure array.
* @param reta_size
- * Number of entries.
+ * Size of the RETA table.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
-priv_dev_rss_reta_update(struct priv *priv,
+int
+mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
- unsigned int reta_size)
+ uint16_t reta_size)
{
+ int ret;
+ struct priv *priv = dev->data->dev_private;
unsigned int idx;
unsigned int i;
unsigned int pos;
- int ret;
- if (!reta_size)
- return EINVAL;
- ret = priv_rss_reta_index_resize(priv, reta_size);
+ if (!reta_size) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ ret = mlx5_rss_reta_index_resize(dev, reta_size);
if (ret)
return ret;
-
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
pos = i % RTE_RETA_GROUP_SIZE;
@@ -200,63 +221,9 @@ priv_dev_rss_reta_update(struct priv *priv,
assert(reta_conf[idx].reta[pos] < priv->rxqs_n);
(*priv->reta_idx)[i] = reta_conf[idx].reta[pos];
}
- return 0;
-}
-
-/**
- * DPDK callback to get the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_query(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- ret = priv_dev_rss_reta_query(priv, reta_conf, reta_size);
- priv_unlock(priv);
- return -ret;
-}
-
-/**
- * DPDK callback to update the RETA indirection table.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param reta_conf
- * Pointer to RETA configuration structure array.
- * @param reta_size
- * Size of the RETA table.
- *
- * @return
- * 0 on success, negative errno value on failure.
- */
-int
-mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
-{
- int ret;
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
- priv_unlock(priv);
if (dev->data->dev_started) {
mlx5_dev_stop(dev);
- mlx5_dev_start(dev);
+ return mlx5_dev_start(dev);
}
- return -ret;
+ return 0;
}
diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c
index 4ffc869a..e74fdef8 100644
--- a/drivers/net/mlx5/mlx5_rxmode.c
+++ b/drivers/net/mlx5/mlx5_rxmode.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -32,8 +32,23 @@
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
dev->data->promiscuous = 1;
- mlx5_traffic_restart(dev);
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable promiscuous mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
+ mlx5_nl_promisc(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -45,8 +60,16 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
dev->data->promiscuous = 0;
- mlx5_traffic_restart(dev);
+ if (priv->config.vf)
+ mlx5_nl_promisc(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable promiscuous mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -58,8 +81,23 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
dev->data->all_multicast = 1;
- mlx5_traffic_restart(dev);
+ if (priv->isolated) {
+ DRV_LOG(WARNING,
+ "port %u cannot enable allmulticast mode"
+ " in flow isolation mode",
+ dev->data->port_id);
+ return;
+ }
+ if (priv->config.vf)
+ mlx5_nl_allmulti(dev, 1);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot enable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
/**
@@ -71,6 +109,14 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
void
mlx5_allmulticast_disable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+
dev->data->all_multicast = 0;
- mlx5_traffic_restart(dev);
+ if (priv->config.vf)
+ mlx5_nl_allmulti(dev, 0);
+ ret = mlx5_traffic_restart(dev);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot disable allmulicast mode: %s",
+ dev->data->port_id, strerror(rte_errno));
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index ff58c492..1f7bfd44 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -52,10 +52,129 @@ uint8_t rss_hash_default_key[] = {
};
/* Length of the default RSS hash key. */
-const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
+static_assert(MLX5_RSS_HASH_KEY_LEN ==
+ (unsigned int)sizeof(rss_hash_default_key),
+ "wrong RSS default key size.");
/**
- * Allocate RX queue elements.
+ * Check whether Multi-Packet RQ can be enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 1 if supported, negative errno value if not.
+ */
+inline int
+mlx5_check_mprq_support(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ if (priv->config.mprq.enabled &&
+ priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
+ return 1;
+ return -ENOTSUP;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the Rx queue.
+ *
+ * @param rxq
+ * Pointer to receive queue structure.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
+{
+ return rxq->strd_num_n > 0;
+}
+
+/**
+ * Check whether Multi-Packet RQ is enabled for the device.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 if disabled, otherwise enabled.
+ */
+inline int
+mlx5_mprq_enabled(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ uint16_t i;
+ uint16_t n = 0;
+
+ if (mlx5_check_mprq_support(dev) < 0)
+ return 0;
+ /* All the configured queues should be enabled. */
+ for (i = 0; i < priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (!rxq)
+ continue;
+ if (mlx5_rxq_mprq_enabled(rxq))
+ ++n;
+ }
+ /* Multi-Packet RQ can't be partially configured. */
+ assert(n == 0 || n == priv->rxqs_n);
+ return n == priv->rxqs_n;
+}
+
+/**
+ * Allocate RX queue elements for Multi-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+rxq_alloc_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ unsigned int wqe_n = 1 << rxq->elts_n;
+ unsigned int i;
+ int err;
+
+ /* Iterate on segments. */
+ for (i = 0; i <= wqe_n; ++i) {
+ struct mlx5_mprq_buf *buf;
+
+ if (rte_mempool_get(rxq->mprq_mp, (void **)&buf) < 0) {
+ DRV_LOG(ERR, "port %u empty mbuf pool", rxq->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ if (i < wqe_n)
+ (*rxq->mprq_bufs)[i] = buf;
+ else
+ rxq->mprq_repl = buf;
+ }
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments",
+ rxq->port_id, rxq_ctrl->idx, wqe_n);
+ return 0;
+error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ wqe_n = i;
+ for (i = 0; (i != wqe_n); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ rte_mempool_put(rxq->mprq_mp,
+ (*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ rxq->port_id, rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
+}
+
+/**
+ * Allocate RX queue elements for Single-Packet RQ.
*
* @param rxq_ctrl
* Pointer to RX queue structure.
@@ -63,13 +182,13 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
* @return
* 0 on success, errno value on failure.
*/
-int
-rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+static int
+rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
unsigned int i;
- int ret = 0;
+ int err;
/* Iterate on segments. */
for (i = 0; (i != elts_n); ++i) {
@@ -77,8 +196,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
if (buf == NULL) {
- ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
- ret = ENOMEM;
+ DRV_LOG(ERR, "port %u empty mbuf pool",
+ PORT_ID(rxq_ctrl->priv));
+ rte_errno = ENOMEM;
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
@@ -97,7 +217,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
(*rxq_ctrl->rxq.elts)[i] = buf;
}
/* If Rx vector is activated. */
- if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
int j;
@@ -118,30 +238,78 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DEBUG("%p: allocated and configured %u segments (max %u packets)",
- (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
- assert(ret == 0);
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u allocated and configured %u segments"
+ " (max %u packets)",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
+ err = rte_errno; /* Save rte_errno before cleanup. */
elts_n = i;
for (i = 0; (i != elts_n); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
- assert(ret > 0);
- return ret;
+ DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
+ rte_errno = err; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
- * Free RX queue elements.
+ * Allocate RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+int
+rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ return mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_alloc_elts_mprq(rxq_ctrl) : rxq_alloc_elts_sprq(rxq_ctrl);
+}
+
+/**
+ * Free RX queue elements for Multi-Packet RQ.
*
* @param rxq_ctrl
* Pointer to RX queue structure.
*/
static void
-rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+rxq_free_elts_mprq(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
+ uint16_t i;
+
+ DRV_LOG(DEBUG, "port %u Multi-Packet Rx queue %u freeing WRs",
+ rxq->port_id, rxq_ctrl->idx);
+ if (rxq->mprq_bufs == NULL)
+ return;
+ assert(mlx5_rxq_check_vec_support(rxq) < 0);
+ for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
+ if ((*rxq->mprq_bufs)[i] != NULL)
+ mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
+ (*rxq->mprq_bufs)[i] = NULL;
+ }
+ if (rxq->mprq_repl != NULL) {
+ mlx5_mprq_buf_free(rxq->mprq_repl);
+ rxq->mprq_repl = NULL;
+ }
+}
+
+/**
+ * Free RX queue elements for Single-Packet RQ.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = (1 << rxq->elts_n);
@@ -149,14 +317,15 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
uint16_t i;
- DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
if (rxq->elts == NULL)
return;
/**
* Some mbuf in the Ring belongs to the application. They cannot be
* freed.
*/
- if (rxq_check_vec_support(rxq) > 0) {
+ if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
(*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
rxq->rq_pi = rxq->rq_ci;
@@ -169,6 +338,21 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
}
/**
+ * Free RX queue elements.
+ *
+ * @param rxq_ctrl
+ * Pointer to RX queue structure.
+ */
+static void
+rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ rxq_free_elts_mprq(rxq_ctrl);
+ else
+ rxq_free_elts_sprq(rxq_ctrl);
+}
+
+/**
* Clean up a RX queue.
*
* Destroy objects, free allocated memory and reset the structure for reuse.
@@ -179,31 +363,35 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
void
mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- DEBUG("cleaning up %p", (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u",
+ PORT_ID(rxq_ctrl->priv), rxq_ctrl->idx);
if (rxq_ctrl->ibv)
- mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
}
/**
* Returns the per-queue supported offloads.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_priv_get_rx_queue_offloads(struct priv *priv)
+mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME);
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
if (config->hw_fcs_strip)
- offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
+
if (config->hw_csum)
offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -217,13 +405,11 @@ mlx5_priv_get_rx_queue_offloads(struct priv *priv)
/**
* Returns the per-port supported offloads.
*
- * @param priv
- * Pointer to private structure.
* @return
* Supported Rx offloads.
*/
uint64_t
-mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
+mlx5_get_rx_port_offloads(void)
{
uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER;
@@ -231,33 +417,6 @@ mlx5_priv_get_rx_port_offloads(struct priv *priv __rte_unused)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
-{
- uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads =
- mlx5_priv_get_rx_queue_offloads(priv);
- uint64_t port_supp_offloads = mlx5_priv_get_rx_port_offloads(priv);
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return 0;
- if (((port_offloads ^ offloads) & port_supp_offloads))
- return 0;
- return 1;
-}
-
-/**
*
* @param dev
* Pointer to Ethernet device structure.
@@ -273,7 +432,7 @@ priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
* Memory pool for buffer allocations.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -284,53 +443,40 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- int ret = 0;
- priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in RX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Rx queue %u"
+ " to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->rxqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->rxqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
- }
- if (!priv_is_rx_queue_offloads_allowed(priv, conf->offloads)) {
- ret = ENOTSUP;
- ERROR("%p: Rx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (mlx5_priv_get_rx_port_offloads(priv) |
- mlx5_priv_get_rx_queue_offloads(priv)));
- goto out;
- }
- if (!mlx5_priv_rxq_releasable(priv, idx)) {
- ret = EBUSY;
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
- goto out;
- }
- mlx5_priv_rxq_release(priv, idx);
- rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, conf, mp);
+ DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->rxqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
+ }
+ if (!mlx5_rxq_releasable(dev, idx)) {
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ mlx5_rxq_release(dev, idx);
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp);
if (!rxq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
- DEBUG("%p: adding RX queue %p to list",
- (void *)dev, (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
-out:
- priv_unlock(priv);
- return -ret;
+ return 0;
}
/**
@@ -350,45 +496,48 @@ mlx5_rx_queue_release(void *dpdk_rxq)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
- priv_lock(priv);
- if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx))
- rte_panic("Rx queue %p is still used by a flow and cannot be"
- " removed\n", (void *)rxq_ctrl);
- mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx);
- priv_unlock(priv);
+ if (!mlx5_rxq_releasable(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx))
+ rte_panic("port %u Rx queue %u is still used by a flow and"
+ " cannot be removed\n",
+ PORT_ID(priv), rxq_ctrl->idx);
+ mlx5_rxq_release(ETH_DEV(priv), rxq_ctrl->rxq.stats.idx);
}
/**
* Allocate queue vector and fill epoll fd list for Rx interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_rx_intr_vec_enable(struct priv *priv)
+mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int count = 0;
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
- if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
- ERROR("failed to allocate memory for interrupt vector,"
- " Rx interrupts will not be supported");
- return -ENOMEM;
+ DRV_LOG(ERR,
+ "port %u failed to allocate memory for interrupt"
+ " vector, Rx interrupts will not be supported",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
intr_handle->type = RTE_INTR_HANDLE_EXT;
for (i = 0; i != n; ++i) {
/* This rxq ibv must not be released in this function. */
- struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i);
+ struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i);
int fd;
int flags;
int rc;
@@ -402,27 +551,34 @@ priv_rx_intr_vec_enable(struct priv *priv)
continue;
}
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
- ERROR("too many Rx queues for interrupt vector size"
- " (%d), Rx interrupts cannot be enabled",
- RTE_MAX_RXTX_INTR_VEC_ID);
- priv_rx_intr_vec_disable(priv);
- return -1;
+ DRV_LOG(ERR,
+ "port %u too many Rx queues for interrupt"
+ " vector size (%d), Rx interrupts cannot be"
+ " enabled",
+ dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID);
+ mlx5_rx_intr_vec_disable(dev);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
fd = rxq_ibv->channel->fd;
flags = fcntl(fd, F_GETFL);
rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
- ERROR("failed to make Rx interrupt file descriptor"
- " %d non-blocking for queue index %d", fd, i);
- priv_rx_intr_vec_disable(priv);
- return -1;
+ rte_errno = errno;
+ DRV_LOG(ERR,
+ "port %u failed to make Rx interrupt file"
+ " descriptor %d non-blocking for queue index"
+ " %d",
+ dev->data->port_id, fd, i);
+ mlx5_rx_intr_vec_disable(dev);
+ return -rte_errno;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
intr_handle->efds[count] = fd;
count++;
}
if (!count)
- priv_rx_intr_vec_disable(priv);
+ mlx5_rx_intr_vec_disable(dev);
else
intr_handle->nb_efd = count;
return 0;
@@ -431,18 +587,19 @@ priv_rx_intr_vec_enable(struct priv *priv)
/**
* Clean up Rx interrupts handler.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_rx_intr_vec_disable(struct priv *priv)
+mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+ struct priv *priv = dev->data->dev_private;
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
unsigned int i;
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- if (!priv->dev->data->dev_conf.intr_conf.rxq)
+ if (!dev->data->dev_conf.intr_conf.rxq)
return;
if (!intr_handle->intr_vec)
goto free;
@@ -459,7 +616,7 @@ priv_rx_intr_vec_disable(struct priv *priv)
*/
rxq_data = (*priv->rxqs)[i];
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
+ mlx5_rxq_ibv_release(rxq_ctrl->ibv);
}
free:
rte_intr_free_epoll_fd(intr_handle);
@@ -490,7 +647,8 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
doorbell = (uint64_t)doorbell_hi << 32;
doorbell |= rxq->cqn;
rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi);
- rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg);
+ mlx5_uar_write64(rte_cpu_to_be_64(doorbell),
+ cq_db_reg, rxq->uar_lock_cq);
}
/**
@@ -502,7 +660,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq)
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -510,31 +668,25 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
- int ret = 0;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->irq) {
struct mlx5_rxq_ibv *rxq_ibv;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn);
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
+ mlx5_rxq_ibv_release(rxq_ibv);
}
-exit:
- priv_unlock(priv);
- if (ret)
- WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
- return -ret;
+ return 0;
}
/**
@@ -546,7 +698,7 @@ exit:
* Rx queue number.
*
* @return
- * 0 on success, negative on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
@@ -557,53 +709,54 @@ mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct mlx5_rxq_ibv *rxq_ibv = NULL;
struct ibv_cq *ev_cq;
void *ev_ctx;
- int ret = 0;
+ int ret;
- priv_lock(priv);
rxq_data = (*priv->rxqs)[rx_queue_id];
if (!rxq_data) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (!rxq_ctrl->irq)
- goto exit;
- rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id);
+ return 0;
+ rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id);
if (!rxq_ibv) {
- ret = EINVAL;
- goto exit;
+ rte_errno = EINVAL;
+ return -rte_errno;
}
ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx);
if (ret || ev_cq != rxq_ibv->cq) {
- ret = EINVAL;
+ rte_errno = EINVAL;
goto exit;
}
rxq_data->cq_arm_sn++;
mlx5_glue->ack_cq_events(rxq_ibv->cq, 1);
+ return 0;
exit:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (rxq_ibv)
- mlx5_priv_rxq_ibv_release(priv, rxq_ibv);
- priv_unlock(priv);
- if (ret)
- WARN("unable to disable interrupt on rx queue %d",
- rx_queue_id);
- return -ret;
+ mlx5_rxq_ibv_release(rxq_ibv);
+ DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
+ dev->data->port_id, rx_queue_id);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Create the Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ibv*
-mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
@@ -613,10 +766,16 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
struct ibv_cq_init_attr_ex ibv;
struct mlx5dv_cq_init_attr mlx5;
} cq;
- struct ibv_wq_init_attr wq;
+ struct {
+ struct ibv_wq_init_attr ibv;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ struct mlx5dv_wq_init_attr mlx5;
+#endif
+ } wq;
struct ibv_cq_ex cq_attr;
} attr;
- unsigned int cqe_n = (1 << rxq_data->elts_n) - 1;
+ unsigned int cqe_n;
+ unsigned int wqe_n = 1 << rxq_data->elts_n;
struct mlx5_rxq_ibv *tmpl;
struct mlx5dv_cq cq_info;
struct mlx5dv_rwq rwq;
@@ -624,6 +783,7 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
int ret = 0;
struct mlx5dv_obj obj;
struct mlx5_dev_config *config = &priv->config;
+ const int mprq_en = mlx5_rxq_mprq_enabled(rxq_data);
assert(rxq_data);
assert(!rxq_ctrl->ibv);
@@ -632,28 +792,26 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
- ERROR("%p: cannot allocate verbs resources",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u cannot allocate verbs resources",
+ dev->data->port_id, rxq_ctrl->idx);
+ rte_errno = ENOMEM;
goto error;
}
tmpl->rxq_ctrl = rxq_ctrl;
- /* Use the entire RX mempool as the memory region. */
- tmpl->mr = priv_mr_get(priv, rxq_data->mp);
- if (!tmpl->mr) {
- tmpl->mr = priv_mr_new(priv, rxq_data->mp);
- if (!tmpl->mr) {
- ERROR("%p: MR creation failure", (void *)rxq_ctrl);
- goto error;
- }
- }
if (rxq_ctrl->irq) {
tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx);
if (!tmpl->channel) {
- ERROR("%p: Comp Channel creation failure",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u: comp channel creation failure",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
goto error;
}
}
+ if (mprq_en)
+ cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ else
+ cqe_n = wqe_n - 1;
attr.cq.ibv = (struct ibv_cq_init_attr_ex){
.cqe = cqe_n,
.channel = tmpl->channel,
@@ -665,32 +823,43 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
if (config->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.cq.mlx5.cqe_comp_res_format =
+ mprq_en ? MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
*/
- if (rxq_check_vec_support(rxq_data) < 0)
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
attr.cq.ibv.cqe *= 2;
} else if (config->cqe_comp && rxq_data->hw_timestamp) {
- DEBUG("Rx CQE compression is disabled for HW timestamp");
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
}
tmpl->cq = mlx5_glue->cq_ex_to_cq
(mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
if (tmpl->cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
- attr.wq = (struct ibv_wq_init_attr){
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
+ attr.wq.ibv = (struct ibv_wq_init_attr){
.wq_context = NULL, /* Could be useful in the future. */
.wq_type = IBV_WQT_RQ,
/* Max number of outstanding WRs. */
- .max_wr = (1 << rxq_data->elts_n) >> rxq_data->sges_n,
+ .max_wr = wqe_n >> rxq_data->sges_n,
/* Max number of scatter/gather elements in a WR. */
.max_sge = 1 << rxq_data->sges_n,
.pd = priv->pd,
@@ -704,32 +873,54 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
};
/* By default, FCS (CRC) is stripped by hardware. */
if (rxq_data->crc_present) {
- attr.wq.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
- attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
}
#ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING
if (config->hw_padding) {
- attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
- attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ attr.wq.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
+ attr.wq.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
+ }
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ attr.wq.mlx5 = (struct mlx5dv_wq_init_attr){
+ .comp_mask = 0,
+ };
+ if (mprq_en) {
+ struct mlx5dv_striding_rq_init_attr *mprq_attr =
+ &attr.wq.mlx5.striding_rq_attrs;
+
+ attr.wq.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
+ *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
+ .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
+ .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
+ .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
+ };
}
+ tmpl->wq = mlx5_glue->dv_create_wq(priv->ctx, &attr.wq.ibv,
+ &attr.wq.mlx5);
+#else
+ tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq.ibv);
#endif
- tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq);
if (tmpl->wq == NULL) {
- ERROR("%p: WQ creation failure", (void *)rxq_ctrl);
+ DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
/*
* Make sure number of WRs*SGEs match expectations since a queue
* cannot allocate more than "desc" buffers.
*/
- if (((int)attr.wq.max_wr !=
- ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) ||
- ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) {
- ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs",
- (void *)rxq_ctrl,
- ((1 << rxq_data->elts_n) >> rxq_data->sges_n),
- (1 << rxq_data->sges_n),
- attr.wq.max_wr, attr.wq.max_sge);
+ if (attr.wq.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
+ attr.wq.ibv.max_sge != (1u << rxq_data->sges_n)) {
+ DRV_LOG(ERR,
+ "port %u Rx queue %u requested %u*%u but got %u*%u"
+ " WRs*SGEs",
+ dev->data->port_id, idx,
+ wqe_n >> rxq_data->sges_n, (1 << rxq_data->sges_n),
+ attr.wq.ibv.max_wr, attr.wq.ibv.max_sge);
+ rte_errno = EINVAL;
goto error;
}
/* Change queue state to ready. */
@@ -739,8 +930,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
};
ret = mlx5_glue->modify_wq(tmpl->wq, &mod);
if (ret) {
- ERROR("%p: WQ state to IBV_WQS_RDY failed",
- (void *)rxq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Rx queue %u WQ state to IBV_WQS_RDY failed",
+ dev->data->port_id, idx);
+ rte_errno = ret;
goto error;
}
obj.cq.in = tmpl->cq;
@@ -748,33 +941,53 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
obj.rwq.in = tmpl->wq;
obj.rwq.out = &rwq;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ);
- if (ret != 0)
+ if (ret) {
+ rte_errno = ret;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
/* Fill the rings. */
- rxq_data->wqes = (volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)rwq.buf;
- for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
- struct rte_mbuf *buf = (*rxq_data->elts)[i];
- volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
-
+ rxq_data->wqes = rwq.buf;
+ for (i = 0; (i != wqe_n); ++i) {
+ volatile struct mlx5_wqe_data_seg *scat;
+ uintptr_t addr;
+ uint32_t byte_count;
+
+ if (mprq_en) {
+ struct mlx5_mprq_buf *buf = (*rxq_data->mprq_bufs)[i];
+
+ scat = &((volatile struct mlx5_wqe_mprq *)
+ rxq_data->wqes)[i].dseg;
+ addr = (uintptr_t)mlx5_mprq_buf_addr(buf);
+ byte_count = (1 << rxq_data->strd_sz_n) *
+ (1 << rxq_data->strd_num_n);
+ } else {
+ struct rte_mbuf *buf = (*rxq_data->elts)[i];
+
+ scat = &((volatile struct mlx5_wqe_data_seg *)
+ rxq_data->wqes)[i];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ byte_count = DATA_LEN(buf);
+ }
/* scat->addr must be able to store a pointer. */
assert(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
- .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t)),
- .byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
- .lkey = tmpl->mr->lkey,
+ .addr = rte_cpu_to_be_64(addr),
+ .byte_count = rte_cpu_to_be_32(byte_count),
+ .lkey = mlx5_rx_addr2mr(rxq_data, addr),
};
}
rxq_data->rq_db = rwq.dbrec;
rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
rxq_data->cq_ci = 0;
- rxq_data->rq_ci = 0;
+ rxq_data->consumed_strd = 0;
rxq_data->rq_pi = 0;
rxq_data->zip = (struct rxq_zip){
.ai = 0,
@@ -785,43 +998,43 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx)
rxq_data->cqn = cq_info.cqn;
rxq_data->cq_arm_sn = 0;
/* Update doorbell counter. */
- rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
+ rxq_data->rq_ci = wqe_n >> rxq_data->sges_n;
rte_wmb();
*rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci);
- DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl);
+ DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
+ idx, (void *)&tmpl);
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return tmpl;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl->wq)
claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
if (tmpl->cq)
claim_zero(mlx5_glue->destroy_cq(tmpl->cq));
if (tmpl->channel)
claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel));
- if (tmpl->mr)
- priv_mr_release(priv, tmpl->mr);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Rx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object if it exists.
*/
-struct mlx5_rxq_ibv*
-mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
struct mlx5_rxq_ctrl *rxq_ctrl;
@@ -831,11 +1044,7 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
return NULL;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
- priv_mr_get(priv, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl->ibv,
- rte_atomic32_read(&rxq_ctrl->ibv->refcnt));
}
return rxq_ctrl->ibv;
}
@@ -843,28 +1052,18 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx)
/**
* Release an Rx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
{
- int ret;
-
assert(rxq_ibv);
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
- assert(rxq_ibv->mr);
- ret = priv_mr_release(priv, rxq_ibv->mr);
- if (!ret)
- rxq_ibv->mr = NULL;
- DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) {
rxq_free_elts(rxq_ibv->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq));
@@ -876,26 +1075,28 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
rte_free(rxq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify the Verbs Rx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_rxq_ibv_verify(struct priv *priv)
+mlx5_rxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_rxq_ibv *rxq_ibv;
LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) {
- DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv,
- (void *)rxq_ibv);
+ DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced",
+ dev->data->port_id, rxq_ibv->rxq_ctrl->idx);
++ret;
}
return ret;
@@ -904,65 +1105,279 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv)
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param rxq_ibv
* Verbs Rx queue object.
*/
int
-mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv)
{
- (void)priv;
assert(rxq_ibv);
return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
}
/**
+ * Callback function to initialize mbufs for Multi-Packet RQ.
+ */
+static inline void
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+ void *_m, unsigned int i __rte_unused)
+{
+ struct mlx5_mprq_buf *buf = _m;
+
+ memset(_m, 0, sizeof(*buf));
+ buf->mp = mp;
+ rte_atomic16_set(&buf->refcnt, 1);
+}
+
+/**
+ * Free mempool of Multi-Packet RQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_free_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ unsigned int i;
+
+ if (mp == NULL)
+ return 0;
+ DRV_LOG(DEBUG, "port %u freeing mempool (%s) for Multi-Packet RQ",
+ dev->data->port_id, mp->name);
+ /*
+ * If a buffer in the pool has been externally attached to a mbuf and it
+ * is still in use by application, destroying the Rx qeueue can spoil
+ * the packet. It is unlikely to happen but if application dynamically
+ * creates and destroys with holding Rx packets, this can happen.
+ *
+ * TODO: It is unavoidable for now because the mempool for Multi-Packet
+ * RQ isn't provided by application but managed by PMD.
+ */
+ if (!rte_mempool_full(mp)) {
+ DRV_LOG(ERR,
+ "port %u mempool for Multi-Packet RQ is still in use",
+ dev->data->port_id);
+ rte_errno = EBUSY;
+ return -rte_errno;
+ }
+ rte_mempool_free(mp);
+ /* Unset mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = NULL;
+ }
+ return 0;
+}
+
+/**
+ * Allocate a mempool for Multi-Packet RQ. All configured Rx queues share the
+ * mempool. If already allocated, reuse it if there're enough elements.
+ * Otherwise, resize it.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_mprq_alloc_mp(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_mempool *mp = priv->mprq_mp;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ unsigned int desc = 0;
+ unsigned int buf_len;
+ unsigned int obj_num;
+ unsigned int obj_size;
+ unsigned int strd_num_n = 0;
+ unsigned int strd_sz_n = 0;
+ unsigned int i;
+
+ if (!mlx5_mprq_enabled(dev))
+ return 0;
+ /* Count the total number of descriptors configured. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ desc += 1 << rxq->elts_n;
+ /* Get the max number of strides. */
+ if (strd_num_n < rxq->strd_num_n)
+ strd_num_n = rxq->strd_num_n;
+ /* Get the max size of a stride. */
+ if (strd_sz_n < rxq->strd_sz_n)
+ strd_sz_n = rxq->strd_sz_n;
+ }
+ assert(strd_num_n && strd_sz_n);
+ buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
+ obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+ /*
+ * Received packets can be either memcpy'd or externally referenced. In
+ * case that the packet is attached to an mbuf as an external buffer, as
+ * it isn't possible to predict how the buffers will be queued by
+ * application, there's no option to exactly pre-allocate needed buffers
+ * in advance but to speculatively prepares enough buffers.
+ *
+ * In the data path, if this Mempool is depleted, PMD will try to memcpy
+ * received packets to buffers provided by application (rxq->mp) until
+ * this Mempool gets available again.
+ */
+ desc *= 4;
+ obj_num = desc + MLX5_MPRQ_MP_CACHE_SZ * priv->rxqs_n;
+ /*
+ * rte_mempool_create_empty() has sanity check to refuse large cache
+ * size compared to the number of elements.
+ * CACHE_FLUSHTHRESH_MULTIPLIER is defined in a C file, so using a
+ * constant number 2 instead.
+ */
+ obj_num = RTE_MAX(obj_num, MLX5_MPRQ_MP_CACHE_SZ * 2);
+ /* Check a mempool is already allocated and if it can be resued. */
+ if (mp != NULL && mp->elt_size >= obj_size && mp->size >= obj_num) {
+ DRV_LOG(DEBUG, "port %u mempool %s is being reused",
+ dev->data->port_id, mp->name);
+ /* Reuse. */
+ goto exit;
+ } else if (mp != NULL) {
+ DRV_LOG(DEBUG, "port %u mempool %s should be resized, freeing it",
+ dev->data->port_id, mp->name);
+ /*
+ * If failed to free, which means it may be still in use, no way
+ * but to keep using the existing one. On buffer underrun,
+ * packets will be memcpy'd instead of external buffer
+ * attachment.
+ */
+ if (mlx5_mprq_free_mp(dev)) {
+ if (mp->elt_size >= obj_size)
+ goto exit;
+ else
+ return -rte_errno;
+ }
+ }
+ snprintf(name, sizeof(name), "%s-mprq", dev->device->name);
+ mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
+ 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+ dev->device->numa_node, 0);
+ if (mp == NULL) {
+ DRV_LOG(ERR,
+ "port %u failed to allocate a mempool for"
+ " Multi-Packet RQ, count=%u, size=%u",
+ dev->data->port_id, obj_num, obj_size);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->mprq_mp = mp;
+exit:
+ /* Set mempool for each Rx queue. */
+ for (i = 0; i != priv->rxqs_n; ++i) {
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
+
+ if (rxq == NULL)
+ continue;
+ rxq->mprq_mp = mp;
+ }
+ DRV_LOG(INFO, "port %u Multi-Packet RQ is configured",
+ dev->data->port_id);
+ return 0;
+}
+
+/**
* Create a DPDK Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
- * TX queue index.
+ * RX queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* NUMA socket on which memory must be allocated.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
-struct mlx5_rxq_ctrl*
-mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket, const struct rte_eth_rxconf *conf,
- struct rte_mempool *mp)
+struct mlx5_rxq_ctrl *
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp)
{
- struct rte_eth_dev *dev = priv->dev;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
*/
- const uint16_t desc_n =
+ uint16_t desc_n =
desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ uint64_t offloads = conf->offloads |
+ dev->data->dev_conf.rxmode.offloads;
+ const int mprq_en = mlx5_check_mprq_support(dev) > 0;
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
tmpl->socket = socket;
- if (priv->dev->data->dev_conf.intr_conf.rxq)
+ if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- /* Enable scattered packets support for this queue if necessary. */
+ /*
+ * This Rx queue can be configured as a Multi-Packet RQ if all of the
+ * following conditions are met:
+ * - MPRQ is enabled.
+ * - The number of descs is more than the number of strides.
+ * - max_rx_pkt_len plus overhead is less than the max size of a
+ * stride.
+ * Otherwise, enable Rx scatter if necessary.
+ */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ mprq_stride_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
+ if (mprq_en &&
+ desc > (1U << config->mprq.stride_num_n) &&
+ mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
+ /* TODO: Rx scatter isn't supported yet. */
+ tmpl->rxq.sges_n = 0;
+ /* Trim the number of descs needed. */
+ desc >>= config->mprq.stride_num_n;
+ tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
+ tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
+ config->mprq.min_stride_size_n);
+ tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
+ tmpl->rxq.mprq_max_memcpy_len =
+ RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
+ config->mprq.max_memcpy_len);
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u: Multi-Packet RQ is enabled"
+ " strd_num_n = %u, strd_sz_n = %u",
+ dev->data->port_id, idx,
+ tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
- } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) {
+ } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -978,57 +1393,71 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
size = mb_len * (1 << tmpl->rxq.sges_n);
size -= RTE_PKTMBUF_HEADROOM;
if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
- ERROR("%p: too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- (void *)dev,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ DRV_LOG(ERR,
+ "port %u too many SGEs (%u) needed to handle"
+ " requested maximum packet size %u",
+ dev->data->port_id,
+ 1 << sges_n,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ rte_errno = EOVERFLOW;
goto error;
}
} else {
- WARN("%p: the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered"
- " mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
- }
- DEBUG("%p: maximum number of segments per packet: %u",
- (void *)dev, 1 << tmpl->rxq.sges_n);
+ DRV_LOG(WARNING,
+ "port %u the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered mode has"
+ " not been requested",
+ dev->data->port_id,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
+ }
+ if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
+ DRV_LOG(WARNING,
+ "port %u MPRQ is requested but cannot be enabled"
+ " (requested: desc = %u, stride_sz = %u,"
+ " supported: min_stride_num = %u, max_stride_sz = %u).",
+ dev->data->port_id, desc, mprq_stride_size,
+ (1 << config->mprq.stride_num_n),
+ (1 << config->mprq.max_stride_size_n));
+ DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
+ dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
- ERROR("%p: number of RX queue descriptors (%u) is not a"
- " multiple of SGEs per packet (%u)",
- (void *)dev,
- desc,
- 1 << tmpl->rxq.sges_n);
+ DRV_LOG(ERR,
+ "port %u number of Rx queue descriptors (%u) is not a"
+ " multiple of SGEs per packet (%u)",
+ dev->data->port_id,
+ desc,
+ 1 << tmpl->rxq.sges_n);
+ rte_errno = EINVAL;
goto error;
}
/* Toggle RX checksum offload if hardware supports it. */
- tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM);
- tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) &&
- priv->config.hw_csum_l2tun);
- tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP);
+ tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
+ tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
/* Configure VLAN stripping. */
- tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
/* By default, FCS (CRC) is stripped by hardware. */
- if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
- tmpl->rxq.crc_present = 0;
- } else if (config->hw_fcs_strip) {
- tmpl->rxq.crc_present = 1;
- } else {
- WARN("%p: CRC stripping has been disabled but will still"
- " be performed by hardware, make sure MLNX_OFED and"
- " firmware are up to date",
- (void *)dev);
- tmpl->rxq.crc_present = 0;
- }
- DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from"
- " incoming frames to hide it",
- (void *)dev,
- tmpl->rxq.crc_present ? "disabled" : "enabled",
- tmpl->rxq.crc_present << 2);
+ tmpl->rxq.crc_present = 0;
+ if (rte_eth_dev_must_keep_crc(offloads)) {
+ if (config->hw_fcs_strip) {
+ tmpl->rxq.crc_present = 1;
+ } else {
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been disabled but will"
+ " still be performed by hardware, make sure MLNX_OFED"
+ " and firmware are up to date",
+ dev->data->port_id);
+ }
+ }
+ DRV_LOG(DEBUG,
+ "port %u CRC stripping is %s, %u bytes will be subtracted from"
+ " incoming frames to hide it",
+ dev->data->port_id,
+ tmpl->rxq.crc_present ? "disabled" : "enabled",
+ tmpl->rxq.crc_present << 2);
/* Save port ID. */
- tmpl->rxq.rss_hash = priv->rxqs_n > 1;
+ tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
+ (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->priv = priv;
tmpl->rxq.mp = mp;
@@ -1036,9 +1465,11 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc,
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+#ifndef RTE_ARCH_64
+ tmpl->rxq.uar_lock_cq = &priv->uar_lock_cq;
+#endif
+ tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
@@ -1049,28 +1480,26 @@ error:
/**
* Get a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * A pointer to the queue if it exists.
+ * A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl*
-mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
+struct mlx5_rxq_ctrl *
+mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
if ((*priv->rxqs)[idx]) {
rxq_ctrl = container_of((*priv->rxqs)[idx],
struct mlx5_rxq_ctrl,
rxq);
-
- mlx5_priv_rxq_ibv_get(priv, idx);
+ mlx5_rxq_ibv_get(dev, idx);
rte_atomic32_inc(&rxq_ctrl->refcnt);
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
}
return rxq_ctrl;
}
@@ -1078,59 +1507,58 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx)
/**
* Release a Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_rxq_release(struct priv *priv, uint16_t idx)
+mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
if (!(*priv->rxqs)[idx])
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
assert(rxq_ctrl->priv);
- if (rxq_ctrl->ibv) {
- int ret;
-
- ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv);
- if (!ret)
- rxq_ctrl->ibv = NULL;
- }
- DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv,
- (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt));
+ if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv))
+ rxq_ctrl->ibv = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
+ mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 1 if the queue can be released.
+ * 1 if the queue can be released, negative errno otherwise and rte_errno is
+ * set.
*/
int
-mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
+mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!(*priv->rxqs)[idx])
- return -1;
+ if (!(*priv->rxqs)[idx]) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1);
}
@@ -1138,20 +1566,22 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_rxq_verify(struct priv *priv)
+mlx5_rxq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) {
- DEBUG("%p: Rx Queue %p still referenced", (void *)priv,
- (void *)rxq_ctrl);
+ DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced",
+ dev->data->port_id, rxq_ctrl->idx);
++ret;
}
return ret;
@@ -1160,20 +1590,21 @@ mlx5_priv_rxq_verify(struct priv *priv)
/**
* Create an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
* Number of queues in the array.
*
* @return
- * A new indirection table.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_ind_table_ibv*
-mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
@@ -1184,11 +1615,12 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) +
queues_n * sizeof(uint16_t), 0);
- if (!ind_tbl)
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq =
- mlx5_priv_rxq_get(priv, queues[i]);
+ struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
if (!rxq)
goto error;
@@ -1206,24 +1638,24 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[],
.ind_tbl = wq,
.comp_mask = 0,
});
- if (!ind_tbl->ind_table)
+ if (!ind_tbl->ind_table) {
+ rte_errno = errno;
goto error;
+ }
rte_atomic32_inc(&ind_tbl->refcnt);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
return ind_tbl;
error:
rte_free(ind_tbl);
- DEBUG("%p cannot create indirection table", (void *)priv);
+ DEBUG("port %u cannot create indirection table", dev->data->port_id);
return NULL;
}
/**
* Get an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param queues
* Queues entering in the indirection table.
* @param queues_n
@@ -1232,10 +1664,11 @@ error:
* @return
* An indirection table if found.
*/
-struct mlx5_ind_table_ibv*
-mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
- uint16_t queues_n)
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+ uint32_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
@@ -1249,10 +1682,8 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
unsigned int i;
rte_atomic32_inc(&ind_tbl->refcnt);
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
for (i = 0; i != ind_tbl->queues_n; ++i)
- mlx5_priv_rxq_get(priv, ind_tbl->queues[i]);
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
}
return ind_tbl;
}
@@ -1260,52 +1691,53 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[],
/**
* Release an indirection table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_ind_table_ibv_release(struct priv *priv,
- struct mlx5_ind_table_ibv *ind_tbl)
+mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl)
{
unsigned int i;
- DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv,
- (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt));
if (rte_atomic32_dec_and_test(&ind_tbl->refcnt))
claim_zero(mlx5_glue->destroy_rwq_ind_table
(ind_tbl->ind_table));
for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i]));
+ claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (!rte_atomic32_read(&ind_tbl->refcnt)) {
LIST_REMOVE(ind_tbl, next);
rte_free(ind_tbl);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_ind_table_ibv_verify(struct priv *priv)
+mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_ind_table_ibv *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
- DEBUG("%p: Verbs indirection table %p still referenced",
- (void *)priv, (void *)ind_tbl);
+ DRV_LOG(DEBUG,
+ "port %u Verbs indirection table %p still referenced",
+ dev->data->port_id, (void *)ind_tbl);
++ret;
}
return ret;
@@ -1314,8 +1746,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
/**
* Create an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_key
* RSS key for the Rx hash queue.
* @param rss_key_len
@@ -1329,22 +1761,60 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv)
* Number of queues.
*
* @return
- * An hash Rx queue on success.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_hrxq*
-mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+struct mlx5_hrxq *
+mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel __rte_unused)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
+ int err;
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
- if (!ind_tbl)
- ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (!rss_key_len) {
+ rss_key_len = MLX5_RSS_HASH_KEY_LEN;
+ rss_key = rss_hash_default_key;
+ }
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ qp = mlx5_glue->dv_create_qp
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
+ .rx_hash_fields_mask = hash_fields,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd,
+ },
+ &(struct mlx5dv_qp_init_attr){
+ .comp_mask = tunnel ?
+ MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS : 0,
+ .create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS,
+ });
+#else
qp = mlx5_glue->create_qp_ex
(priv->ctx,
&(struct ibv_qp_init_attr_ex){
@@ -1355,15 +1825,21 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
IBV_QP_INIT_ATTR_RX_HASH,
.rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_key_len,
- .rx_hash_key = rss_key,
+ .rx_hash_key_len = rss_key_len ? rss_key_len :
+ MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_key ?
+ (void *)(uintptr_t)rss_key :
+ rss_hash_default_key,
.rx_hash_fields_mask = hash_fields,
},
.rwq_ind_tbl = ind_tbl->ind_table,
.pd = priv->pd,
});
- if (!qp)
+#endif
+ if (!qp) {
+ rte_errno = errno;
goto error;
+ }
hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0);
if (!hrxq)
goto error;
@@ -1374,21 +1850,21 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
memcpy(hrxq->rss_key, rss_key, rss_key_len);
rte_atomic32_inc(&hrxq->refcnt);
LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
error:
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ err = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
+ rte_errno = err; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Rx Hash queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
@@ -1400,10 +1876,13 @@ error:
* @return
* An hash Rx queue on success.
*/
-struct mlx5_hrxq*
-mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
- uint64_t hash_fields, uint16_t queues[], uint16_t queues_n)
+struct mlx5_hrxq *
+mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
queues_n = hash_fields ? queues_n : 1;
@@ -1416,16 +1895,14 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
+ ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_priv_ind_table_ibv_release(priv, ind_tbl);
+ mlx5_ind_table_ibv_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
return hrxq;
}
return NULL;
@@ -1434,48 +1911,281 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len,
/**
* Release the hash Rx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param hrxq
* Pointer to Hash Rx queue to release.
*
* @return
- * 0 on success, errno value on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq)
+mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
- DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv,
- (void *)hrxq, rte_atomic32_read(&hrxq->refcnt));
if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table);
+ mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table));
- return EBUSY;
+ claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ return 1;
}
/**
* Verify the Rx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_hrxq_ibv_verify(struct priv *priv)
+mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
int ret = 0;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- DEBUG("%p: Verbs Hash Rx queue %p still referenced",
- (void *)priv, (void *)hrxq);
+ DRV_LOG(DEBUG,
+ "port %u Verbs hash Rx queue %p still referenced",
+ dev->data->port_id, (void *)hrxq);
++ret;
}
return ret;
}
+
+/**
+ * Create a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_rxq_ibv *
+mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct ibv_cq *cq;
+ struct ibv_wq *wq = NULL;
+ struct mlx5_rxq_ibv *rxq;
+
+ if (priv->drop_queue.rxq)
+ return priv->drop_queue.rxq;
+ cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
+ if (!cq) {
+ DEBUG("port %u cannot allocate CQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ wq = mlx5_glue->create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
+ .pd = priv->pd,
+ .cq = cq,
+ });
+ if (!wq) {
+ DEBUG("port %u cannot allocate WQ for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ rxq = rte_calloc(__func__, 1, sizeof(*rxq), 0);
+ if (!rxq) {
+ DEBUG("port %u cannot allocate drop Rx queue memory",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ rxq->cq = cq;
+ rxq->wq = wq;
+ priv->drop_queue.rxq = rxq;
+ return rxq;
+error:
+ if (wq)
+ claim_zero(mlx5_glue->destroy_wq(wq));
+ if (cq)
+ claim_zero(mlx5_glue->destroy_cq(cq));
+ return NULL;
+}
+
+/**
+ * Release a drop Rx queue Verbs object.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+void
+mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_ibv *rxq = priv->drop_queue.rxq;
+
+ if (rxq->wq)
+ claim_zero(mlx5_glue->destroy_wq(rxq->wq));
+ if (rxq->cq)
+ claim_zero(mlx5_glue->destroy_cq(rxq->cq));
+ rte_free(rxq);
+ priv->drop_queue.rxq = NULL;
+}
+
+/**
+ * Create a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_ind_table_ibv *
+mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_rxq_ibv *rxq;
+ struct mlx5_ind_table_ibv tmpl;
+
+ rxq = mlx5_rxq_ibv_drop_new(dev);
+ if (!rxq)
+ return NULL;
+ tmpl.ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
+ .log_ind_tbl_size = 0,
+ .ind_tbl = &rxq->wq,
+ .comp_mask = 0,
+ });
+ if (!tmpl.ind_table) {
+ DEBUG("port %u cannot allocate indirection table for drop"
+ " queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl), 0);
+ if (!ind_tbl) {
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ ind_tbl->ind_table = tmpl.ind_table;
+ return ind_tbl;
+error:
+ mlx5_rxq_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop indirection table.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
+ mlx5_rxq_ibv_drop_release(dev);
+ rte_free(ind_tbl);
+ priv->drop_queue.hrxq->ind_table = NULL;
+}
+
+/**
+ * Create a drop Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ *
+ * @return
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_hrxq *
+mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_ind_table_ibv *ind_tbl;
+ struct ibv_qp *qp;
+ struct mlx5_hrxq *hrxq;
+
+ if (priv->drop_queue.hrxq) {
+ rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
+ return priv->drop_queue.hrxq;
+ }
+ ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ if (!ind_tbl)
+ return NULL;
+ qp = mlx5_glue->create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
+ .rx_hash_function =
+ IBV_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ },
+ .rwq_ind_tbl = ind_tbl->ind_table,
+ .pd = priv->pd
+ });
+ if (!qp) {
+ DEBUG("port %u cannot allocate QP for drop queue",
+ dev->data->port_id);
+ rte_errno = errno;
+ goto error;
+ }
+ hrxq = rte_calloc(__func__, 1, sizeof(*hrxq), 0);
+ if (!hrxq) {
+ DRV_LOG(WARNING,
+ "port %u cannot allocate memory for drop queue",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ hrxq->ind_table = ind_tbl;
+ hrxq->qp = qp;
+ priv->drop_queue.hrxq = hrxq;
+ rte_atomic32_set(&hrxq->refcnt, 1);
+ return hrxq;
+error:
+ if (ind_tbl)
+ mlx5_ind_table_ibv_drop_release(dev);
+ return NULL;
+}
+
+/**
+ * Release a drop hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ */
+void
+mlx5_hrxq_drop_release(struct rte_eth_dev *dev)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
+
+ if (rte_atomic32_dec_and_test(&hrxq->refcnt)) {
+ claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
+ mlx5_ind_table_ibv_drop_release(dev);
+ rte_free(hrxq);
+ priv->drop_queue.hrxq = NULL;
+ }
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index dc4ead93..2d14f8a6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <assert.h>
@@ -34,19 +34,29 @@
#include "mlx5_prm.h"
static __rte_always_inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
static __rte_always_inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash);
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe);
static __rte_always_inline uint32_t
-rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe);
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe);
+
+static __rte_always_inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res);
+
+static __rte_always_inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx);
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
+uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
+uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+
/**
* Build a table to translate Rx completion flags to packet type.
*
@@ -86,6 +96,14 @@ mlx5_set_ptype_table(void)
RTE_PTYPE_L4_TCP;
(*p)[0x06] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
+ (*p)[0x0d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x0e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x11] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
/* UDP */
(*p)[0x09] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
@@ -104,17 +122,27 @@ mlx5_set_ptype_table(void)
RTE_PTYPE_L4_TCP;
(*p)[0x86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_TCP;
+ (*p)[0x8d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x8e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x91] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
+ (*p)[0x92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP;
(*p)[0x89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
(*p)[0x8a] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_L4_UDP;
/* Tunneled - L3 */
+ (*p)[0x40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
(*p)[0x41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
(*p)[0x42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
+ (*p)[0xc0] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
(*p)[0xc1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_NONFRAG;
@@ -141,12 +169,36 @@ mlx5_set_ptype_table(void)
(*p)[0x46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4d] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x4e] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc5] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
(*p)[0xc6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xcd] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xce] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd1] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
+ (*p)[0xd2] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP;
/* Tunneled - UDP */
(*p)[0x49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
@@ -163,6 +215,74 @@ mlx5_set_ptype_table(void)
}
/**
+ * Build a table to translate packet to checksum type of Verbs.
+ */
+void
+mlx5_set_cksum_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ for (i = 0; i < RTE_DIM(mlx5_cksum_table); ++i) {
+ v = 0;
+ if (i & (1 << 9)) {
+ /* Tunneled packet. */
+ if (i & (1 << 8)) /* Outer IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (1 << 4)) /* Inner IP. */
+ v |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ /* No tunnel. */
+ if (i & (1 << 4)) /* IP. */
+ v |= MLX5_ETH_WQE_L3_CSUM;
+ if (i & (3 << 2 | 1 << 0)) /* L4 or TSO. */
+ v |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ mlx5_cksum_table[i] = v;
+ }
+}
+
+/**
+ * Build a table to translate packet type of mbuf to SWP type of Verbs.
+ */
+void
+mlx5_set_swp_types_table(void)
+{
+ unsigned int i;
+ uint8_t v;
+
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ for (i = 0; i < RTE_DIM(mlx5_swp_types_table); ++i) {
+ v = 0;
+ if (i & (1 << 8))
+ v |= MLX5_ETH_WQE_L3_OUTER_IPV6;
+ if (i & (1 << 9))
+ v |= MLX5_ETH_WQE_L4_OUTER_UDP;
+ if (i & (1 << 4))
+ v |= MLX5_ETH_WQE_L3_INNER_IPV6;
+ if ((i & 3) == (PKT_TX_UDP_CKSUM >> 52))
+ v |= MLX5_ETH_WQE_L4_INNER_UDP;
+ mlx5_swp_types_table[i] = v;
+ }
+}
+
+/**
* Return the size of tailroom of WQ.
*
* @param txq
@@ -219,6 +339,60 @@ mlx5_copy_to_wq(void *dst, const void *src, size_t n,
}
/**
+ * Inline TSO headers into WQE.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+inline_tso(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint32_t *length,
+ uintptr_t *addr,
+ uint16_t *pkt_inline_sz,
+ uint8_t **raw,
+ uint16_t *max_wqe,
+ uint16_t *tso_segsz,
+ uint16_t *tso_header_sz)
+{
+ uintptr_t end = (uintptr_t)(((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz = (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint8_t tunneled = txq->tunnel_en && (buf->ol_flags &
+ PKT_TX_TUNNEL_MASK);
+ uint16_t n_wqe;
+
+ *tso_segsz = buf->tso_segsz;
+ *tso_header_sz = buf->l2_len + vlan_sz + buf->l3_len + buf->l4_len;
+ if (unlikely(*tso_segsz == 0 || *tso_header_sz == 0)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ if (tunneled)
+ *tso_header_sz += buf->outer_l2_len + buf->outer_l3_len;
+ /* First seg must contain all TSO headers. */
+ if (unlikely(*tso_header_sz > MLX5_MAX_TSO_HEADER) ||
+ *tso_header_sz > DATA_LEN(buf)) {
+ txq->stats.oerrors++;
+ return -EINVAL;
+ }
+ copy_b = *tso_header_sz - *pkt_inline_sz;
+ if (!copy_b || ((end - (uintptr_t)*raw) < copy_b))
+ return -EAGAIN;
+ n_wqe = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+ if (unlikely(*max_wqe < n_wqe))
+ return -EINVAL;
+ *max_wqe -= n_wqe;
+ rte_memcpy((void *)*raw, (void *)*addr, copy_b);
+ *length -= copy_b;
+ *addr += copy_b;
+ copy_b = MLX5_WQE_DS(copy_b) * MLX5_WQE_DWORD_SIZE;
+ *pkt_inline_sz += copy_b;
+ *raw += copy_b;
+ return 0;
+}
+
+/**
* DPDK callback to check the status of a tx descriptor.
*
* @param tx_queue
@@ -321,6 +495,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
const unsigned int max_inline = txq->max_inline;
+ uint64_t addr_64;
if (unlikely(!pkts_n))
return 0;
@@ -329,13 +504,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
do {
- struct rte_mbuf *buf = NULL;
+ struct rte_mbuf *buf = *pkts; /* First_seg. */
uint8_t *raw;
volatile struct mlx5_wqe_v *wqe = NULL;
volatile rte_v128u32_t *dseg = NULL;
@@ -347,14 +520,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t tso_header_sz = 0;
uint16_t ehdr;
uint8_t cs_flags;
- uint64_t tso = 0;
+ uint8_t tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
+ uint32_t swp_offsets = 0;
+ uint8_t swp_types = 0;
uint16_t tso_segsz = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
+ int ret;
- /* first_seg */
- buf = *pkts;
segs_n = buf->nb_segs;
/*
* Make sure there is enough room to store this packet and
@@ -389,7 +563,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (pkts_n - i > 1)
rte_prefetch0(
rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
+ txq_mbuf_to_swp(txq, buf, (uint8_t *)&swp_offsets, &swp_types);
raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
@@ -415,54 +590,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
addr += pkt_inline_sz;
}
raw += MLX5_WQE_DWORD_SIZE;
- tso = txq->tso_en && (buf->ol_flags & PKT_TX_TCP_SEG);
if (tso) {
- uintptr_t end =
- (uintptr_t)(((uintptr_t)txq->wqes) +
- (1 << txq->wqe_n) * MLX5_WQE_SIZE);
- unsigned int copy_b;
- uint8_t vlan_sz =
- (buf->ol_flags & PKT_TX_VLAN_PKT) ? 4 : 0;
- const uint64_t is_tunneled =
- buf->ol_flags & (PKT_TX_TUNNEL_GRE |
- PKT_TX_TUNNEL_VXLAN);
-
- tso_header_sz = buf->l2_len + vlan_sz +
- buf->l3_len + buf->l4_len;
- tso_segsz = buf->tso_segsz;
- if (unlikely(tso_segsz == 0)) {
- txq->stats.oerrors++;
- break;
- }
- if (is_tunneled && txq->tunnel_en) {
- tso_header_sz += buf->outer_l2_len +
- buf->outer_l3_len;
- cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
- } else {
- cs_flags |= MLX5_ETH_WQE_L4_CSUM;
- }
- if (unlikely(tso_header_sz > MLX5_MAX_TSO_HEADER)) {
- txq->stats.oerrors++;
+ ret = inline_tso(txq, buf, &length,
+ &addr, &pkt_inline_sz,
+ &raw, &max_wqe,
+ &tso_segsz, &tso_header_sz);
+ if (ret == -EINVAL) {
break;
- }
- copy_b = tso_header_sz - pkt_inline_sz;
- /* First seg must contain all headers. */
- assert(copy_b <= length);
- if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
- uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
-
- if (unlikely(max_wqe < n))
- break;
- max_wqe -= n;
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- /* Include padding for TSO header. */
- copy_b = MLX5_WQE_DS(copy_b) *
- MLX5_WQE_DWORD_SIZE;
- pkt_inline_sz += copy_b;
- raw += copy_b;
- } else {
+ } else if (ret == -EAGAIN) {
/* NOP WQE. */
wqe->ctrl = (rte_v128u32_t){
rte_cpu_to_be_32(txq->wqe_ci << 8),
@@ -507,7 +642,8 @@ pkt_inline:
if (unlikely(max_wqe < n))
break;
max_wqe -= n;
- if (tso && !inl) {
+ if (tso) {
+ assert(inl == 0);
inl = rte_cpu_to_be_32(copy_b |
MLX5_INLINE_SEG);
rte_memcpy((void *)raw,
@@ -542,8 +678,17 @@ pkt_inline:
} else if (!segs_n) {
goto next_pkt;
} else {
- raw += copy_b;
- inline_room -= copy_b;
+ /*
+ * Further inline the next segment only for
+ * non-TSO packets.
+ */
+ if (!tso) {
+ raw += copy_b;
+ inline_room -= copy_b;
+ } else {
+ inline_room = 0;
+ }
+ /* Move to the next segment. */
--segs_n;
buf = buf->next;
assert(buf);
@@ -565,12 +710,12 @@ pkt_inline:
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- addr = rte_cpu_to_be_64(addr);
+ addr_64 = rte_cpu_to_be_64(addr);
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
++ds;
if (!segs_n)
@@ -604,12 +749,12 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t));
*dseg = (rte_v128u32_t){
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
(*txq->elts)[++elts_head & elts_m] = buf;
if (--segs_n)
@@ -633,8 +778,9 @@ next_pkt:
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags | (rte_cpu_to_be_16(tso_segsz) << 16),
+ swp_offsets,
+ cs_flags | (swp_types << 8) |
+ (rte_cpu_to_be_16(tso_segsz) << 16),
0,
(ehdr << 16) | rte_cpu_to_be_16(tso_header_sz),
};
@@ -647,8 +793,8 @@ next_pkt:
0,
};
wqe->eseg = (rte_v128u32_t){
- 0,
- cs_flags,
+ swp_offsets,
+ cs_flags | (swp_types << 8),
0,
(ehdr << 16) | rte_cpu_to_be_16(pkt_inline_sz),
};
@@ -669,14 +815,13 @@ next_wqe:
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
last_wqe->ctrl2 = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -795,8 +940,6 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
@@ -820,7 +963,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
max_elts -= segs_n;
--pkts_n;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
assert(length);
@@ -885,14 +1028,13 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -1024,8 +1166,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
@@ -1052,7 +1192,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* iteration.
*/
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if packet differs. */
@@ -1182,14 +1322,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (comp >= MLX5_TX_COMP_THRESH) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp = comp;
}
@@ -1303,6 +1442,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
unsigned int mpw_room = 0;
unsigned int inl_pad = 0;
uint32_t inl_hdr;
+ uint64_t addr_64;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
};
@@ -1312,15 +1452,12 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
- unsigned int n;
unsigned int do_inline = 0; /* Whether inline is possible. */
uint32_t length;
uint8_t cs_flags;
@@ -1330,7 +1467,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
/* Make sure there is enough room to store this packet. */
if (max_elts - j == 0)
break;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Retrieve packet information. */
length = PKT_LEN(buf);
/* Start new session if:
@@ -1382,7 +1519,7 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
(!txq->mpw_hdr_dseg ||
mpw.total_len >= MLX5_WQE_SIZE);
}
- if (do_inline) {
+ if (max_inline && do_inline) {
/* Inline packet into WQE. */
unsigned int max;
@@ -1440,16 +1577,13 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
((uintptr_t)mpw.data.raw +
inl_pad);
(*txq->elts)[elts_head++ & elts_m] = buf;
- addr = rte_pktmbuf_mtod(buf, uintptr_t);
- for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
- rte_prefetch2((void *)(addr +
- n * RTE_CACHE_LINE_SIZE));
- addr = rte_cpu_to_be_64(addr);
+ addr_64 = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
+ uintptr_t));
*dseg = (rte_v128u32_t) {
rte_cpu_to_be_32(length),
mlx5_tx_mb2mr(txq, buf),
- addr,
- addr >> 32,
+ addr_64,
+ addr_64 >> 32,
};
mpw.data.raw = (volatile void *)(dseg + 1);
mpw.total_len += (inl_pad + sizeof(*dseg));
@@ -1473,15 +1607,14 @@ txq_burst_empw(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
(1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
volatile struct mlx5_wqe *wqe = mpw.wqe;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request completion on last WQE. */
wqe->ctrl[2] = rte_cpu_to_be_32(8);
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
} else {
txq->elts_comp += j;
}
@@ -1541,6 +1674,8 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Translate RX completion flags to packet type.
*
+ * @param[in] rxq
+ * Pointer to RX queue structure.
* @param[in] cqe
* Pointer to CQE.
*
@@ -1550,7 +1685,7 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
{
uint8_t idx;
uint8_t pinfo = cqe->pkt_info;
@@ -1565,7 +1700,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
* bit[7] = outer_l3_type
*/
idx = ((pinfo & 0x3) << 6) | ((ptype & 0xfc00) >> 10);
- return mlx5_ptype_table[idx];
+ return mlx5_ptype_table[idx] | rxq->tunnel * !!(idx & (1 << 6));
}
/**
@@ -1577,8 +1712,9 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
* Pointer to RX queue.
* @param cqe
* CQE to process.
- * @param[out] rss_hash
- * Packet RSS Hash result.
+ * @param[out] mcqe
+ * Store pointer to mini-CQE if compressed. Otherwise, the pointer is not
+ * written.
*
* @return
* Packet size in bytes (0 if there is none), -1 in case of completion
@@ -1586,7 +1722,7 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
*/
static inline int
mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
- uint16_t cqe_cnt, uint32_t *rss_hash)
+ uint16_t cqe_cnt, volatile struct mlx5_mini_cqe8 **mcqe)
{
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
@@ -1600,7 +1736,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
(uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].pkt_info);
len = rte_be_to_cpu_32((*mc)[zip->ai & 7].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[zip->ai & 7].rx_hash_result);
+ *mcqe = &(*mc)[zip->ai & 7];
if ((++zip->ai & 7) == 0) {
/* Invalidate consumed CQEs */
idx = zip->ca;
@@ -1665,7 +1801,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = rte_be_to_cpu_32((*mc)[0].byte_cnt);
- *rss_hash = rte_be_to_cpu_32((*mc)[0].rx_hash_result);
+ *mcqe = &(*mc)[0];
zip->ai = 1;
/* Prefetch all the entries to be invalidated */
idx = zip->ca;
@@ -1676,7 +1812,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
}
} else {
len = rte_be_to_cpu_32(cqe->byte_cnt);
- *rss_hash = rte_be_to_cpu_32(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1688,8 +1823,6 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
/**
* Translate RX completion flags to offload flags.
*
- * @param[in] rxq
- * Pointer to RX queue structure.
* @param[in] cqe
* Pointer to CQE.
*
@@ -1697,7 +1830,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
+rxq_cq_to_ol_flags(volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
uint16_t flags = rte_be_to_cpu_16(cqe->hdr_type_etc);
@@ -1709,18 +1842,56 @@ rxq_cq_to_ol_flags(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe)
TRANSPOSE(flags,
MLX5_CQE_RX_L4_HDR_VALID,
PKT_RX_L4_CKSUM_GOOD);
- if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
- ol_flags |=
- TRANSPOSE(flags,
- MLX5_CQE_RX_L3_HDR_VALID,
- PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_L4_HDR_VALID,
- PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
/**
+ * Fill in mbuf fields from RX completion flags.
+ * Note that pkt->ol_flags should be initialized outside of this function.
+ *
+ * @param rxq
+ * Pointer to RX queue.
+ * @param pkt
+ * mbuf to fill.
+ * @param cqe
+ * CQE to process.
+ * @param rss_hash_res
+ * Packet RSS Hash result.
+ */
+static inline void
+rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct rte_mbuf *pkt,
+ volatile struct mlx5_cqe *cqe, uint32_t rss_hash_res)
+{
+ /* Update packet information. */
+ pkt->packet_type = rxq_cq_to_pkt_type(rxq, cqe);
+ if (rss_hash_res && rxq->rss_hash) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ }
+ if (rxq->mark && MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
+ pkt->ol_flags |= PKT_RX_FDIR;
+ if (cqe->sop_drop_qpn !=
+ rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
+ uint32_t mark = cqe->sop_drop_qpn;
+
+ pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
+ }
+ }
+ if (rxq->csum)
+ pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
+ if (rxq->vlan_strip &&
+ (cqe->hdr_type_etc & rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_be_to_cpu_16(cqe->vlan_info);
+ }
+ if (rxq->hw_timestamp) {
+ pkt->timestamp = rte_be_to_cpu_64(cqe->timestamp);
+ pkt->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+}
+
+/**
* DPDK callback for RX.
*
* @param dpdk_rxq
@@ -1750,9 +1921,11 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
- volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
- uint32_t rss_hash_res = 0;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res;
if (pkt)
NEXT(seg) = rep;
@@ -1782,8 +1955,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
if (!pkt) {
cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
- &rss_hash_res);
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe);
if (!len) {
rte_mbuf_raw_free(rep);
break;
@@ -1796,40 +1968,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
pkt = seg;
assert(len >= (rxq->crc_present << 2));
- /* Update packet information. */
- pkt->packet_type = rxq_cq_to_pkt_type(cqe);
pkt->ol_flags = 0;
- if (rss_hash_res && rxq->rss_hash) {
- pkt->hash.rss = rss_hash_res;
- pkt->ol_flags = PKT_RX_RSS_HASH;
- }
- if (rxq->mark &&
- MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
- pkt->ol_flags |= PKT_RX_FDIR;
- if (cqe->sop_drop_qpn !=
- rte_cpu_to_be_32(MLX5_FLOW_MARK_DEFAULT)) {
- uint32_t mark = cqe->sop_drop_qpn;
-
- pkt->ol_flags |= PKT_RX_FDIR_ID;
- pkt->hash.fdir.hi =
- mlx5_flow_mark_get(mark);
- }
- }
- if (rxq->csum | rxq->csum_l2tun)
- pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
- if (rxq->vlan_strip &&
- (cqe->hdr_type_etc &
- rte_cpu_to_be_16(MLX5_CQE_VLAN_STRIPPED))) {
- pkt->ol_flags |= PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci =
- rte_be_to_cpu_16(cqe->vlan_info);
- }
- if (rxq->hw_timestamp) {
- pkt->timestamp =
- rte_be_to_cpu_64(cqe->timestamp);
- pkt->ol_flags |= PKT_RX_TIMESTAMP;
- }
+ /* If compressed, take hash result from mini-CQE. */
+ rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
+ cqe->rx_hash_res :
+ mcqe->rx_hash_result);
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (rxq->crc_present)
len -= ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
@@ -1845,6 +1989,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* changes.
*/
wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
@@ -1882,6 +2029,246 @@ skip:
return i;
}
+void
+mlx5_mprq_buf_free_cb(void *addr __rte_unused, void *opaque)
+{
+ struct mlx5_mprq_buf *buf = opaque;
+
+ if (rte_atomic16_read(&buf->refcnt) == 1) {
+ rte_mempool_put(buf->mp, buf);
+ } else if (rte_atomic16_add_return(&buf->refcnt, -1) == 0) {
+ rte_atomic16_set(&buf->refcnt, 1);
+ rte_mempool_put(buf->mp, buf);
+ }
+}
+
+void
+mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf)
+{
+ mlx5_mprq_buf_free_cb(NULL, buf);
+}
+
+static inline void
+mprq_buf_replace(struct mlx5_rxq_data *rxq, uint16_t rq_idx)
+{
+ struct mlx5_mprq_buf *rep = rxq->mprq_repl;
+ volatile struct mlx5_wqe_data_seg *wqe =
+ &((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
+ void *addr;
+
+ assert(rep != NULL);
+ /* Replace MPRQ buf. */
+ (*rxq->mprq_bufs)[rq_idx] = rep;
+ /* Replace WQE. */
+ addr = mlx5_mprq_buf_addr(rep);
+ wqe->addr = rte_cpu_to_be_64((uintptr_t)addr);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wqe->lkey = mlx5_rx_addr2mr(rxq, (uintptr_t)addr);
+ /* Stash a mbuf for next replacement. */
+ if (likely(!rte_mempool_get(rxq->mprq_mp, (void **)&rep)))
+ rxq->mprq_repl = rep;
+ else
+ rxq->mprq_repl = NULL;
+}
+
+/**
+ * DPDK callback for RX with Multi-Packet RQ support.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct mlx5_rxq_data *rxq = dpdk_rxq;
+ const unsigned int strd_n = 1 << rxq->strd_num_n;
+ const unsigned int strd_sz = 1 << rxq->strd_sz_n;
+ const unsigned int strd_shift =
+ MLX5_MPRQ_STRIDE_SHIFT_BYTE * rxq->strd_shift_en;
+ const unsigned int cq_mask = (1 << rxq->cqe_n) - 1;
+ const unsigned int wq_mask = (1 << rxq->elts_n) - 1;
+ volatile struct mlx5_cqe *cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ unsigned int i = 0;
+ uint16_t rq_ci = rxq->rq_ci;
+ uint16_t consumed_strd = rxq->consumed_strd;
+ struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+
+ while (i < pkts_n) {
+ struct rte_mbuf *pkt;
+ void *addr;
+ int ret;
+ unsigned int len;
+ uint16_t strd_cnt;
+ uint16_t strd_idx;
+ uint32_t offset;
+ uint32_t byte_cnt;
+ volatile struct mlx5_mini_cqe8 *mcqe = NULL;
+ uint32_t rss_hash_res = 0;
+
+ if (consumed_strd == strd_n) {
+ /* Replace WQE only if the buffer is still in use. */
+ if (rte_atomic16_read(&buf->refcnt) > 1) {
+ mprq_buf_replace(rxq, rq_ci & wq_mask);
+ /* Release the old buffer. */
+ mlx5_mprq_buf_free(buf);
+ } else if (unlikely(rxq->mprq_repl == NULL)) {
+ struct mlx5_mprq_buf *rep;
+
+ /*
+ * Currently, the MPRQ mempool is out of buffer
+ * and doing memcpy regardless of the size of Rx
+ * packet. Retry allocation to get back to
+ * normal.
+ */
+ if (!rte_mempool_get(rxq->mprq_mp,
+ (void **)&rep))
+ rxq->mprq_repl = rep;
+ }
+ /* Advance to the next WQE. */
+ consumed_strd = 0;
+ ++rq_ci;
+ buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
+ }
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
+ if (!ret)
+ break;
+ if (unlikely(ret == -1)) {
+ /* RX error, packet is likely too large. */
+ ++rxq->stats.idropped;
+ continue;
+ }
+ byte_cnt = ret;
+ strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+ MLX5_MPRQ_STRIDE_NUM_SHIFT;
+ assert(strd_cnt);
+ consumed_strd += strd_cnt;
+ if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
+ continue;
+ if (mcqe == NULL) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
+ strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
+ } else {
+ /* mini-CQE for MPRQ doesn't have hash result. */
+ strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
+ }
+ assert(strd_idx < strd_n);
+ assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+ /*
+ * Currently configured to receive a packet per a stride. But if
+ * MTU is adjusted through kernel interface, device could
+ * consume multiple strides without raising an error. In this
+ * case, the packet should be dropped because it is bigger than
+ * the max_rx_pkt_len.
+ */
+ if (unlikely(strd_cnt > 1)) {
+ ++rxq->stats.idropped;
+ continue;
+ }
+ pkt = rte_pktmbuf_alloc(rxq->mp);
+ if (unlikely(pkt == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
+ assert((int)len >= (rxq->crc_present << 2));
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
+ offset = strd_idx * strd_sz + strd_shift;
+ addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf), offset);
+ /* Initialize the offload flag. */
+ pkt->ol_flags = 0;
+ /*
+ * Memcpy packets to the target mbuf if:
+ * - The size of packet is smaller than mprq_max_memcpy_len.
+ * - Out of buffer in the Mempool for Multi-Packet RQ.
+ */
+ if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
+ /*
+ * When memcpy'ing packet due to out-of-buffer, the
+ * packet must be smaller than the target mbuf.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ } else {
+ rte_iova_t buf_iova;
+ struct rte_mbuf_ext_shared_info *shinfo;
+ uint16_t buf_len = strd_cnt * strd_sz;
+
+ /* Increment the refcnt of the whole chunk. */
+ rte_atomic16_add_return(&buf->refcnt, 1);
+ assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+ strd_n + 1);
+ addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
+ /*
+ * MLX5 device doesn't use iova but it is necessary in a
+ * case where the Rx packet is transmitted via a
+ * different PMD.
+ */
+ buf_iova = rte_mempool_virt2iova(buf) +
+ RTE_PTR_DIFF(addr, buf);
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(addr,
+ &buf_len, mlx5_mprq_buf_free_cb, buf);
+ /*
+ * EXT_ATTACHED_MBUF will be set to pkt->ol_flags when
+ * attaching the stride to mbuf and more offload flags
+ * will be added below by calling rxq_cq_to_mbuf().
+ * Other fields will be overwritten.
+ */
+ rte_pktmbuf_attach_extbuf(pkt, addr, buf_iova, buf_len,
+ shinfo);
+ rte_pktmbuf_reset_headroom(pkt);
+ assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ /*
+ * Prevent potential overflow due to MTU change through
+ * kernel interface.
+ */
+ if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.idropped;
+ continue;
+ }
+ }
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
+ PKT_LEN(pkt) = len;
+ DATA_LEN(pkt) = len;
+ PORT(pkt) = rxq->port_id;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+ /* Return packet. */
+ *(pkts++) = pkt;
+ ++i;
+ }
+ /* Update the consumer indexes. */
+ rxq->consumed_strd = consumed_strd;
+ rte_cio_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ if (rq_ci != rxq->rq_ci) {
+ rxq->rq_ci = rq_ci;
+ rte_cio_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+#endif
+ return i;
+}
+
/**
* Dummy DPDK callback for TX.
*
@@ -1899,11 +2286,10 @@ skip:
* Number of packets successfully transmitted (<= pkts_n).
*/
uint16_t
-removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_tx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
@@ -1924,11 +2310,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* Number of packets successfully received (<= pkts_n).
*/
uint16_t
-removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_rx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
@@ -1940,58 +2325,49 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
*/
uint16_t __attribute__((weak))
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
- (void)rxq;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
return -ENOTSUP;
}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d7e89055..48ed2b20 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_H_
@@ -26,13 +26,19 @@
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_atomic.h>
+#include <rte_spinlock.h>
+#include <rte_io.h>
#include "mlx5_utils.h"
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5_prm.h"
+/* Support tunnel matching. */
+#define MLX5_FLOW_TUNNEL 5
+
struct mlx5_rxq_stats {
unsigned int idx; /**< Mapping index. */
#ifdef MLX5_PMD_SOFT_COUNTERS
@@ -54,17 +60,6 @@ struct mlx5_txq_stats {
struct priv;
-/* Memory region queue object. */
-struct mlx5_mr {
- LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
- rte_atomic32_t refcnt; /*<< Reference counter. */
- uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
- uintptr_t start; /* Start address of MR */
- uintptr_t end; /* End address of MR */
- struct ibv_mr *mr; /*<< Memory Region. */
- struct rte_mempool *mp; /*<< Memory Pool. */
-};
-
/* Compressed CQE context. */
struct rxq_zip {
uint16_t ai; /* Array index. */
@@ -74,10 +69,19 @@ struct rxq_zip {
uint32_t cqe_cnt; /* Number of CQEs. */
};
+/* Multi-Packet RQ buffer header. */
+struct mlx5_mprq_buf {
+ struct rte_mempool *mp;
+ rte_atomic16_t refcnt; /* Atomically accessed refcnt. */
+ uint8_t pad[RTE_PKTMBUF_HEADROOM]; /* Headroom for the first packet. */
+} __rte_cache_aligned;
+
+/* Get pointer to the first stride. */
+#define mlx5_mprq_buf_addr(ptr) ((ptr) + 1)
+
/* RX queue descriptor. */
struct mlx5_rxq_data {
unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
@@ -86,24 +90,41 @@ struct mlx5_rxq_data {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int mark:1; /* Marked flow available on the queue. */
- unsigned int :15; /* Remaining bits. */
+ unsigned int strd_num_n:5; /* Log 2 of the number of stride. */
+ unsigned int strd_sz_n:4; /* Log 2 of stride size. */
+ unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
+ unsigned int :6; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
uint16_t rq_ci;
+ uint16_t consumed_strd; /* Number of consumed strides in WQE. */
uint16_t rq_pi;
uint16_t cq_ci;
- volatile struct mlx5_wqe_data_seg(*wqes)[];
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
+ uint16_t mprq_max_memcpy_len; /* Maximum size of packet to memcpy. */
+ volatile void *wqes;
volatile struct mlx5_cqe(*cqes)[];
struct rxq_zip zip; /* Compressed context. */
- struct rte_mbuf *(*elts)[];
+ RTE_STD_C11
+ union {
+ struct rte_mbuf *(*elts)[];
+ struct mlx5_mprq_buf *(*mprq_bufs)[];
+ };
struct rte_mempool *mp;
+ struct rte_mempool *mprq_mp; /* Mempool for Multi-Packet RQ. */
+ struct mlx5_mprq_buf *mprq_repl; /* Stashed mbuf for replenish. */
struct mlx5_rxq_stats stats;
uint64_t mbuf_initializer; /* Default rearm_data for vectorized Rx. */
struct rte_mbuf fake_mbuf; /* elts padding for vectorized Rx. */
void *cq_uar; /* CQ user access region. */
uint32_t cqn; /* CQ number. */
uint8_t cq_arm_sn; /* CQ arm seq number. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock_cq;
+ /* CQ (UAR) access lock required for 32bit implementations */
+#endif
+ uint32_t tunnel; /* Tunnel information. */
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
@@ -114,18 +135,20 @@ struct mlx5_rxq_ibv {
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_wq *wq; /* Work Queue. */
struct ibv_comp_channel *channel;
- struct mlx5_mr *mr; /* Memory Region (for mp). */
};
/* RX queue control descriptor. */
struct mlx5_rxq_ctrl {
LIST_ENTRY(mlx5_rxq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_ibv *ibv; /* Verbs elements. */
+ struct priv *priv; /* Back pointer to private data. */
struct mlx5_rxq_data rxq; /* Data path structure. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int irq:1; /* Whether IRQ is enabled. */
+ uint16_t idx; /* Queue index. */
+ uint32_t flow_mark_n; /* Number of Mark/Flag flows using this Queue. */
+ uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
};
/* Indirection table. */
@@ -133,7 +156,7 @@ struct mlx5_ind_table_ibv {
LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
- uint16_t queues_n; /**< Number of queues in the list. */
+ uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
@@ -144,7 +167,7 @@ struct mlx5_hrxq {
struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
uint64_t hash_fields; /* Verbs Hash fields. */
- uint8_t rss_key_len; /* Hash key length in bytes. */
+ uint32_t rss_key_len; /* Hash key length in bytes. */
uint8_t rss_key[]; /* Hash key. */
};
@@ -167,26 +190,31 @@ struct mlx5_txq_data {
uint16_t tso_en:1; /* When set hardware TSO is enabled. */
uint16_t tunnel_en:1;
/* When set TX offload for tunneled packets are supported. */
+ uint16_t swp_en:1; /* Whether SW parser is enabled. */
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
- uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
uint64_t offloads; /* Offloads for Tx Queue. */
+ struct mlx5_mr_ctrl mr_ctrl; /* MR control descriptor. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register remapped. */
- struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
+#ifndef RTE_ARCH_64
+ rte_spinlock_t *uar_lock;
+ /* UAR access lock required for 32bit implementations */
+#endif
} __rte_cache_aligned;
/* Verbs Rx queue elements. */
struct mlx5_txq_ibv {
LIST_ENTRY(mlx5_txq_ibv) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
+ struct mlx5_txq_ctrl *txq_ctrl; /* Pointer to the control queue. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
};
@@ -195,112 +223,203 @@ struct mlx5_txq_ibv {
struct mlx5_txq_ctrl {
LIST_ENTRY(mlx5_txq_ctrl) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct priv *priv; /* Back pointer to private data. */
unsigned int socket; /* CPU socket ID for allocations. */
unsigned int max_inline_data; /* Max inline data. */
unsigned int max_tso_header; /* Max TSO header size. */
struct mlx5_txq_ibv *ibv; /* Verbs queue object. */
+ struct priv *priv; /* Back pointer to private data. */
struct mlx5_txq_data txq; /* Data path structure. */
off_t uar_mmap_offset; /* UAR mmap offset for non-primary process. */
volatile void *bf_reg_orig; /* Blueflame register from verbs. */
+ uint16_t idx; /* Queue index. */
};
/* mlx5_rxq.c */
extern uint8_t rss_hash_default_key[];
-extern const size_t rss_hash_default_key_len;
-
-void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
-int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_rxconf *, struct rte_mempool *);
-void mlx5_rx_queue_release(void *);
-int priv_rx_intr_vec_enable(struct priv *priv);
-void priv_rx_intr_vec_disable(struct priv *priv);
+
+int mlx5_check_mprq_support(struct rte_eth_dev *dev);
+int mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq);
+int mlx5_mprq_enabled(struct rte_eth_dev *dev);
+int mlx5_mprq_free_mp(struct rte_eth_dev *dev);
+int mlx5_mprq_alloc_mp(struct rte_eth_dev *dev);
+void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl);
+int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+void mlx5_rx_queue_release(void *dpdk_rxq);
+int mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev);
+void mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev);
int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_new(struct priv *, uint16_t);
-struct mlx5_rxq_ibv *mlx5_priv_rxq_ibv_get(struct priv *, uint16_t);
-int mlx5_priv_rxq_ibv_release(struct priv *, struct mlx5_rxq_ibv *);
-int mlx5_priv_rxq_ibv_releasable(struct priv *, struct mlx5_rxq_ibv *);
-int mlx5_priv_rxq_ibv_verify(struct priv *);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_new(struct priv *, uint16_t,
- uint16_t, unsigned int,
- const struct rte_eth_rxconf *,
- struct rte_mempool *);
-struct mlx5_rxq_ctrl *mlx5_priv_rxq_get(struct priv *, uint16_t);
-int mlx5_priv_rxq_release(struct priv *, uint16_t);
-int mlx5_priv_rxq_releasable(struct priv *, uint16_t);
-int mlx5_priv_rxq_verify(struct priv *);
-int rxq_alloc_elts(struct mlx5_rxq_ctrl *);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_new(struct priv *,
- uint16_t [],
- uint16_t);
-struct mlx5_ind_table_ibv *mlx5_priv_ind_table_ibv_get(struct priv *,
- uint16_t [],
- uint16_t);
-int mlx5_priv_ind_table_ibv_release(struct priv *, struct mlx5_ind_table_ibv *);
-int mlx5_priv_ind_table_ibv_verify(struct priv *);
-struct mlx5_hrxq *mlx5_priv_hrxq_new(struct priv *, uint8_t *, uint8_t,
- uint64_t, uint16_t [], uint16_t);
-struct mlx5_hrxq *mlx5_priv_hrxq_get(struct priv *, uint8_t *, uint8_t,
- uint64_t, uint16_t [], uint16_t);
-int mlx5_priv_hrxq_release(struct priv *, struct mlx5_hrxq *);
-int mlx5_priv_hrxq_ibv_verify(struct priv *);
-uint64_t mlx5_priv_get_rx_port_offloads(struct priv *);
-uint64_t mlx5_priv_get_rx_queue_offloads(struct priv *);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv);
+int mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv);
+struct mlx5_rxq_ibv *mlx5_rxq_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_rxq_ibv_drop_release(struct rte_eth_dev *dev);
+int mlx5_rxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_rxq_ctrl *mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_rxconf *conf,
+ struct rte_mempool *mp);
+struct mlx5_rxq_ctrl *mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_rxq_verify(struct rte_eth_dev *dev);
+int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
+int rxq_alloc_mprq_buf(struct mlx5_rxq_ctrl *rxq_ctrl);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_new(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_get(struct rte_eth_dev *dev,
+ const uint16_t *queues,
+ uint32_t queues_n);
+int mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_ibv *ind_tbl);
+int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_ind_table_ibv *mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev);
+void mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n,
+ int tunnel __rte_unused);
+struct mlx5_hrxq *mlx5_hrxq_get(struct rte_eth_dev *dev,
+ const uint8_t *rss_key, uint32_t rss_key_len,
+ uint64_t hash_fields,
+ const uint16_t *queues, uint32_t queues_n);
+int mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hxrq);
+int mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_hrxq *mlx5_hrxq_drop_new(struct rte_eth_dev *dev);
+void mlx5_hrxq_drop_release(struct rte_eth_dev *dev);
+uint64_t mlx5_get_rx_port_offloads(void);
+uint64_t mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev);
/* mlx5_txq.c */
-int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
- const struct rte_eth_txconf *);
-void mlx5_tx_queue_release(void *);
-int priv_tx_uar_remap(struct priv *priv, int fd);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_new(struct priv *, uint16_t);
-struct mlx5_txq_ibv *mlx5_priv_txq_ibv_get(struct priv *, uint16_t);
-int mlx5_priv_txq_ibv_release(struct priv *, struct mlx5_txq_ibv *);
-int mlx5_priv_txq_ibv_releasable(struct priv *, struct mlx5_txq_ibv *);
-int mlx5_priv_txq_ibv_verify(struct priv *);
-struct mlx5_txq_ctrl *mlx5_priv_txq_new(struct priv *, uint16_t,
- uint16_t, unsigned int,
- const struct rte_eth_txconf *);
-struct mlx5_txq_ctrl *mlx5_priv_txq_get(struct priv *, uint16_t);
-int mlx5_priv_txq_release(struct priv *, uint16_t);
-int mlx5_priv_txq_releasable(struct priv *, uint16_t);
-int mlx5_priv_txq_verify(struct priv *);
-void txq_alloc_elts(struct mlx5_txq_ctrl *);
-uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
+int mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf);
+void mlx5_tx_queue_release(void *dpdk_txq);
+int mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd);
+struct mlx5_txq_ibv *mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx);
+struct mlx5_txq_ibv *mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv);
+int mlx5_txq_ibv_verify(struct rte_eth_dev *dev);
+struct mlx5_txq_ctrl *mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx,
+ uint16_t desc, unsigned int socket,
+ const struct rte_eth_txconf *conf);
+struct mlx5_txq_ctrl *mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx);
+int mlx5_txq_verify(struct rte_eth_dev *dev);
+void txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl);
+uint64_t mlx5_get_tx_port_offloads(struct rte_eth_dev *dev);
/* mlx5_rxtx.c */
extern uint32_t mlx5_ptype_table[];
+extern uint8_t mlx5_cksum_table[];
+extern uint8_t mlx5_swp_types_table[];
void mlx5_set_ptype_table(void);
-uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
-uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
-int mlx5_rx_descriptor_status(void *, uint16_t);
-int mlx5_tx_descriptor_status(void *, uint16_t);
+void mlx5_set_cksum_table(void);
+void mlx5_set_swp_types_table(void);
+uint16_t mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
+void mlx5_mprq_buf_free_cb(void *addr, void *opaque);
+void mlx5_mprq_buf_free(struct mlx5_mprq_buf *buf);
+uint16_t mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+int mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset);
/* Vectorized version of mlx5_rxtx.c */
-int priv_check_raw_vec_tx_support(struct priv *, struct rte_eth_dev *);
-int priv_check_vec_tx_support(struct priv *, struct rte_eth_dev *);
-int rxq_check_vec_support(struct mlx5_rxq_data *);
-int priv_check_vec_rx_support(struct priv *);
-uint16_t mlx5_tx_burst_raw_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_tx_burst_vec(void *, struct rte_mbuf **, uint16_t);
-uint16_t mlx5_rx_burst_vec(void *, struct rte_mbuf **, uint16_t);
+int mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_check_vec_tx_support(struct rte_eth_dev *dev);
+int mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq_data);
+int mlx5_check_vec_rx_support(struct rte_eth_dev *dev);
+uint16_t mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
+uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
+ uint16_t pkts_n);
/* mlx5_mr.c */
-void mlx5_mp2mr_iter(struct rte_mempool *, void *);
-struct mlx5_mr *priv_txq_mp2mr_reg(struct priv *priv, struct mlx5_txq_data *,
- struct rte_mempool *, unsigned int);
-struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *, struct rte_mempool *,
- unsigned int);
+void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
+uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
+uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64_relaxed(uint64_t val, volatile void *addr,
+ rte_spinlock_t *lock __rte_unused)
+{
+#ifdef RTE_ARCH_64
+ rte_write64_relaxed(val, addr);
+#else /* !RTE_ARCH_64 */
+ rte_spinlock_lock(lock);
+ rte_write32_relaxed(val, addr);
+ rte_io_wmb();
+ rte_write32_relaxed(val >> 32,
+ (volatile void *)((volatile char *)addr + 4));
+ rte_spinlock_unlock(lock);
+#endif
+}
+
+/**
+ * Provide safe 64bit store operation to mlx5 UAR region for both 32bit and
+ * 64bit architectures while guaranteeing the order of execution with the
+ * code being executed.
+ *
+ * @param val
+ * value to write in CPU endian format.
+ * @param addr
+ * Address to write to.
+ * @param lock
+ * Address of the lock to use for that UAR access.
+ */
+static __rte_always_inline void
+__mlx5_uar_write64(uint64_t val, volatile void *addr, rte_spinlock_t *lock)
+{
+ rte_io_wmb();
+ __mlx5_uar_write64_relaxed(val, addr, lock);
+}
+
+/* Assist macros, used instead of directly calling the functions they wrap. */
+#ifdef RTE_ARCH_64
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, NULL)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, NULL)
+#else
+#define mlx5_uar_write64_relaxed(val, dst, lock) \
+ __mlx5_uar_write64_relaxed(val, dst, lock)
+#define mlx5_uar_write64(val, dst, lock) __mlx5_uar_write64(val, dst, lock)
+#endif
#ifndef NDEBUG
/**
@@ -316,7 +435,7 @@ static inline int
check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd1)] = &cqe->rsvd1;
int ret = 1;
unsigned int i;
@@ -363,9 +482,10 @@ check_cqe(volatile struct mlx5_cqe *cqe,
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE error %u (0x%02x)"
- " syndrome 0x%02x",
- op_code, op_code, syndrome);
+ DRV_LOG(ERR,
+ "unexpected CQE error %u (0x%02x) syndrome"
+ " 0x%02x",
+ op_code, op_code, syndrome);
rte_hexdump(stderr, "MLX5 Error CQE:",
(const void *)((uintptr_t)err_cqe),
sizeof(*err_cqe));
@@ -374,8 +494,8 @@ check_cqe(volatile struct mlx5_cqe *cqe,
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected CQE opcode %u (0x%02x)",
- op_code, op_code);
+ DRV_LOG(ERR, "unexpected CQE opcode %u (0x%02x)",
+ op_code, op_code);
rte_hexdump(stderr, "MLX5 CQE:",
(const void *)((uintptr_t)cqe),
sizeof(*cqe));
@@ -435,7 +555,7 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
if (!check_cqe_seen(cqe)) {
- ERROR("unexpected error CQE, TX stopped");
+ DRV_LOG(ERR, "unexpected error CQE, Tx stopped");
rte_hexdump(stderr, "MLX5 TXQ:",
(const void *)((uintptr_t)txq->wqes),
((1 << txq->wqe_n) *
@@ -487,77 +607,65 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
+ * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
+ * as mempool is pre-configured and static.
*
- * @param buf
- * Pointer to mbuf.
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
*
* @return
- * Memory pool where data is located for given mbuf.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
-static struct rte_mempool *
-mlx5_tx_mb2mp(struct rte_mbuf *buf)
+static __rte_always_inline uint32_t
+mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
+ struct mlx5_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (Binary Search) on miss. */
+ return mlx5_rx_addr2mr_bh(rxq, addr);
}
+#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
/**
- * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
+ * Query LKey from a packet buffer for Tx. If not found, add the mempool.
*
* @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Address to search.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * Searched LKey on success, UINT32_MAX on no match.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
{
- uint16_t i = txq->mr_cache_idx;
- uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
- struct mlx5_mr *mr;
-
- assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
- return txq->mp2mr[i]->lkey;
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i] == NULL ||
- txq->mp2mr[i]->mr == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i]->start <= addr &&
- txq->mp2mr[i]->end > addr) {
- assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
- txq->mr_cache_idx = i;
- return txq->mp2mr[i]->lkey;
- }
- }
- mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
- /*
- * Request the reference to use in this queue, the original one is
- * kept by the control plane.
- */
- if (mr) {
- rte_atomic32_inc(&mr->refcnt);
- txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
- return mr->lkey;
- } else {
- struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
-
- WARN("Failed to register mempool 0x%p(%s)",
- (void *)mp, mp->name);
- }
- return (uint32_t)-1;
+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+ uint32_t lkey;
+
+ /* Check generation bit to see if there's any change on existing MRs. */
+ if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
+ mlx5_mr_flush_local_cache(mr_ctrl);
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(mr_ctrl->cache, &mr_ctrl->mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx5_tx_addr2mr_bh(txq, addr);
}
+#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
/**
* Ring TX queue doorbell and flush the update if requested.
*
@@ -579,7 +687,7 @@ mlx5_tx_dbrec_cond_wmb(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe,
*txq->qp_db = rte_cpu_to_be_32(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
- *dst = *src;
+ mlx5_uar_write64_relaxed(*src, dst, txq->uar_lock);
if (cond)
rte_wmb();
}
@@ -599,38 +707,100 @@ mlx5_tx_dbrec(struct mlx5_txq_data *txq, volatile struct mlx5_wqe *wqe)
}
/**
- * Convert the Checksum offloads to Verbs.
+ * Convert mbuf to Verb SWP.
*
* @param txq_data
* Pointer to the Tx queue.
* @param buf
* Pointer to the mbuf.
+ * @param tso
+ * TSO offloads enabled.
+ * @param vlan
+ * VLAN offloads enabled
+ * @param offsets
+ * Pointer to the SWP header offsets.
+ * @param swp_types
+ * Pointer to the SWP header types.
+ */
+static __rte_always_inline void
+txq_mbuf_to_swp(struct mlx5_txq_data *txq, struct rte_mbuf *buf,
+ uint8_t *offsets, uint8_t *swp_types)
+{
+ const uint64_t vlan = buf->ol_flags & PKT_TX_VLAN_PKT;
+ const uint64_t tunnel = buf->ol_flags & PKT_TX_TUNNEL_MASK;
+ const uint64_t tso = buf->ol_flags & PKT_TX_TCP_SEG;
+ const uint64_t csum_flags = buf->ol_flags & PKT_TX_L4_MASK;
+ const uint64_t inner_ip =
+ buf->ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6);
+ const uint64_t ol_flags_mask = PKT_TX_L4_MASK | PKT_TX_IPV6 |
+ PKT_TX_OUTER_IPV6;
+ uint16_t idx;
+ uint16_t off;
+
+ if (likely(!txq->swp_en || (tunnel != PKT_TX_TUNNEL_UDP &&
+ tunnel != PKT_TX_TUNNEL_IP)))
+ return;
+ /*
+ * The index should have:
+ * bit[0:1] = PKT_TX_L4_MASK
+ * bit[4] = PKT_TX_IPV6
+ * bit[8] = PKT_TX_OUTER_IPV6
+ * bit[9] = PKT_TX_OUTER_UDP
+ */
+ idx = (buf->ol_flags & ol_flags_mask) >> 52;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ idx |= 1 << 9;
+ *swp_types = mlx5_swp_types_table[idx];
+ /*
+ * Set offsets for SW parser. Since ConnectX-5, SW parser just
+ * complements HW parser. SW parser starts to engage only if HW parser
+ * can't reach a header. For the older devices, HW parser will not kick
+ * in if any of SWP offsets is set. Therefore, all of the L3 offsets
+ * should be set regardless of HW offload.
+ */
+ off = buf->outer_l2_len + (vlan ? sizeof(struct vlan_hdr) : 0);
+ offsets[1] = off >> 1; /* Outer L3 offset. */
+ off += buf->outer_l3_len;
+ if (tunnel == PKT_TX_TUNNEL_UDP)
+ offsets[0] = off >> 1; /* Outer L4 offset. */
+ if (inner_ip) {
+ off += buf->l2_len;
+ offsets[3] = off >> 1; /* Inner L3 offset. */
+ if (csum_flags == PKT_TX_TCP_CKSUM || tso ||
+ csum_flags == PKT_TX_UDP_CKSUM) {
+ off += buf->l3_len;
+ offsets[2] = off >> 1; /* Inner L4 offset. */
+ }
+ }
+}
+
+/**
+ * Convert the Checksum offloads to Verbs.
+ *
+ * @param buf
+ * Pointer to the mbuf.
*
* @return
- * the converted cs_flags.
+ * Converted checksum flags.
*/
static __rte_always_inline uint8_t
-txq_ol_cksum_to_cs(struct mlx5_txq_data *txq_data, struct rte_mbuf *buf)
+txq_ol_cksum_to_cs(struct rte_mbuf *buf)
{
- uint8_t cs_flags = 0;
-
- /* Should we enable HW CKSUM offload */
- if (buf->ol_flags &
- (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM |
- PKT_TX_OUTER_IP_CKSUM)) {
- if (txq_data->tunnel_en &&
- (buf->ol_flags &
- (PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN))) {
- cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
- MLX5_ETH_WQE_L4_INNER_CSUM;
- if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
- cs_flags |= MLX5_ETH_WQE_L3_CSUM;
- } else {
- cs_flags = MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- }
- }
- return cs_flags;
+ uint32_t idx;
+ uint8_t is_tunnel = !!(buf->ol_flags & PKT_TX_TUNNEL_MASK);
+ const uint64_t ol_flags_mask = PKT_TX_TCP_SEG | PKT_TX_L4_MASK |
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM;
+
+ /*
+ * The index should have:
+ * bit[0] = PKT_TX_TCP_SEG
+ * bit[2:3] = PKT_TX_UDP_CKSUM, PKT_TX_TCP_CKSUM
+ * bit[4] = PKT_TX_IP_CKSUM
+ * bit[8] = PKT_TX_OUTER_IP_CKSUM
+ * bit[9] = tunnel
+ */
+ idx = ((buf->ol_flags & ol_flags_mask) >> 50) | (!!is_tunnel << 9);
+ return mlx5_cksum_table[idx];
}
/**
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index b66c2916..0a4aed8f 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <assert.h>
@@ -42,8 +42,6 @@
/**
* Count the number of packets having same ol_flags and calculate cs_flags.
*
- * @param txq
- * Pointer to TX queue structure.
* @param pkts
* Pointer to array of packets.
* @param pkts_n
@@ -55,8 +53,7 @@
* Number of packets having same ol_flags.
*/
static inline unsigned int
-txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n, uint8_t *cs_flags)
+txq_calc_offload(struct rte_mbuf **pkts, uint16_t pkts_n, uint8_t *cs_flags)
{
unsigned int pos;
const uint64_t ol_mask =
@@ -70,7 +67,7 @@ txq_calc_offload(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
for (pos = 1; pos < pkts_n; ++pos)
if ((pkts[pos]->ol_flags ^ pkts[0]->ol_flags) & ol_mask)
break;
- *cs_flags = txq_ol_cksum_to_cs(txq, pkts[0]);
+ *cs_flags = txq_ol_cksum_to_cs(pkts[0]);
return pos;
}
@@ -141,7 +138,7 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
n = txq_count_contig_single_seg(&pkts[nb_tx], n);
if (txq->offloads & MLX5_VEC_TX_CKSUM_OFFLOAD_CAP)
- n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
+ n = txq_calc_offload(&pkts[nb_tx], n, &cs_flags);
ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
nb_tx += ret;
if (!ret)
@@ -223,17 +220,14 @@ mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/**
* Check Tx queue flags are set for raw vectorized Tx.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
- struct rte_eth_dev *dev)
+mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev)
{
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
@@ -246,17 +240,16 @@ priv_check_raw_vec_tx_support(__rte_unused struct priv *priv,
/**
* Check a device can support vectorized TX.
*
- * @param priv
- * Pointer to private structure.
* @param dev
- * Pointer to rte_eth_dev structure.
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
if (!priv->config.tx_vec_en ||
@@ -277,11 +270,13 @@ priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ if (mlx5_mprq_enabled(ETH_DEV(ctrl->priv)))
+ return -ENOTSUP;
if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
return 1;
@@ -290,26 +285,29 @@ rxq_check_vec_support(struct mlx5_rxq_data *rxq)
/**
* Check a device can support vectorized RX.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* 1 if supported, negative errno value if not.
*/
int __attribute__((cold))
-priv_check_vec_rx_support(struct priv *priv)
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint16_t i;
if (!priv->config.rx_vec_en)
return -ENOTSUP;
+ if (mlx5_mprq_enabled(dev))
+ return -ENOTSUP;
/* All the configured queues should support. */
for (i = 0; i < priv->rxqs_n; ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
if (!rxq)
continue;
- if (rxq_check_vec_support(rxq) < 0)
+ if (mlx5_rxq_check_vec_support(rxq) < 0)
break;
}
if (i != priv->rxqs_n)
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index 44856bbf..fb884f92 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_H_
@@ -87,21 +87,26 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
const uint16_t q_mask = q_n - 1;
uint16_t elts_idx = rxq->rq_ci & q_mask;
struct rte_mbuf **elts = &(*rxq->elts)[elts_idx];
- volatile struct mlx5_wqe_data_seg *wq = &(*rxq->wqes)[elts_idx];
+ volatile struct mlx5_wqe_data_seg *wq =
+ &((volatile struct mlx5_wqe_data_seg *)rxq->wqes)[elts_idx];
unsigned int i;
- assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH);
+ assert(n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n));
assert(n <= (uint16_t)(q_n - (rxq->rq_ci - rxq->rq_pi)));
- assert(MLX5_VPMD_RXQ_RPLNSH_THRESH > MLX5_VPMD_DESCS_PER_LOOP);
+ assert(MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n) > MLX5_VPMD_DESCS_PER_LOOP);
/* Not to cross queue end. */
n = RTE_MIN(n - MLX5_VPMD_DESCS_PER_LOOP, q_n - elts_idx);
if (rte_mempool_get_bulk(rxq->mp, (void *)elts, n) < 0) {
rxq->stats.rx_nombuf += n;
return;
}
- for (i = 0; i < n; ++i)
+ for (i = 0; i < n; ++i) {
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
+ /* If there's only one MR, no need to replace LKey in WQE. */
+ if (unlikely(mlx5_mr_btree_len(&rxq->mr_ctrl.cache_bh) > 1))
+ wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+ }
rxq->rq_ci += n;
/* Prevent overflowing into consumed mbufs. */
elts_idx = rxq->rq_ci & q_mask;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index bbe1818e..b37b7381 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_NEON_H_
@@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -142,7 +140,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
break;
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Title WQEBB pointer. */
t_wqe = (uint8x16_t *)wqe;
dseg = (uint8_t *)(wqe + 1);
@@ -167,8 +165,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
vst1q_u16((void *)(t_wqe + 1),
- (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 });
+ ((uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
+ 0, 0, 0, 0 }));
txq->wqe_ci = wqe_ci;
}
if (!n)
@@ -176,12 +174,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -245,8 +242,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
if (unlikely(!pkts_n))
@@ -282,11 +277,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -300,10 +294,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
vst1q_u8((void *)t_wqe, ctrl);
/* Fill ESEG in the header. */
vst1q_u8((void *)(t_wqe + 1),
- (uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 });
+ ((uint8x16_t) { 0, 0, 0, 0,
+ cs_flags, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0 }));
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += pkts_n;
#endif
@@ -551,6 +545,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer);
const uint64x1_t r32_mask = vcreate_u64(0xffffffff);
uint64x2_t rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
if (rxq->mark) {
const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
@@ -583,14 +578,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
ptype = vshrn_n_u32(ptype_info, 10);
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
ptype = vorr_u16(ptype, op_err);
- pkts[0]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 6)];
- pkts[1]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 4)];
- pkts[2]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 2)];
- pkts[3]->packet_type =
- mlx5_ptype_table[vget_lane_u8(vreinterpret_u8_u16(ptype), 0)];
+ pt_idx0 = vget_lane_u8(vreinterpret_u8_u16(ptype), 6);
+ pt_idx1 = vget_lane_u8(vreinterpret_u8_u16(ptype), 4);
+ pt_idx2 = vget_lane_u8(vreinterpret_u8_u16(ptype), 2);
+ pt_idx3 = vget_lane_u8(vreinterpret_u8_u16(ptype), 0);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = vandq_u32(ptype_info, ptype_ol_mask);
pinfo = vreinterpretq_u32_u8(
@@ -734,7 +733,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index c088bcb5..54b3783c 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_RXTX_VEC_SSE_H_
@@ -107,8 +107,6 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
@@ -144,7 +142,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
}
wqe = &((volatile struct mlx5_wqe64 *)
txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
+ cs_flags = txq_ol_cksum_to_cs(buf);
/* Title WQEBB pointer. */
t_wqe = (__m128i *)wqe;
dseg = (__m128i *)(wqe + 1);
@@ -177,12 +175,11 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
txq->elts_head = elts_head;
if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
@@ -244,8 +241,6 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
@@ -283,11 +278,10 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
txq->elts_comp += pkts_n;
} else {
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi++ - txq->cq_ci));
/* Request a completion. */
txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
@@ -542,6 +536,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
const __m128i mbuf_init =
_mm_loadl_epi64((__m128i *)&rxq->mbuf_initializer);
__m128i rearm0, rearm1, rearm2, rearm3;
+ uint8_t pt_idx0, pt_idx1, pt_idx2, pt_idx3;
/* Extract pkt_info field. */
pinfo0 = _mm_unpacklo_epi32(cqes[0], cqes[1]);
@@ -595,10 +590,18 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, __m128i cqes[4],
/* Errored packets will have RTE_PTYPE_ALL_MASK. */
op_err = _mm_srli_epi16(op_err, 8);
ptype = _mm_or_si128(ptype, op_err);
- pkts[0]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 0)];
- pkts[1]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 2)];
- pkts[2]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 4)];
- pkts[3]->packet_type = mlx5_ptype_table[_mm_extract_epi8(ptype, 6)];
+ pt_idx0 = _mm_extract_epi8(ptype, 0);
+ pt_idx1 = _mm_extract_epi8(ptype, 2);
+ pt_idx2 = _mm_extract_epi8(ptype, 4);
+ pt_idx3 = _mm_extract_epi8(ptype, 6);
+ pkts[0]->packet_type = mlx5_ptype_table[pt_idx0] |
+ !!(pt_idx0 & (1 << 6)) * rxq->tunnel;
+ pkts[1]->packet_type = mlx5_ptype_table[pt_idx1] |
+ !!(pt_idx1 & (1 << 6)) * rxq->tunnel;
+ pkts[2]->packet_type = mlx5_ptype_table[pt_idx2] |
+ !!(pt_idx2 & (1 << 6)) * rxq->tunnel;
+ pkts[3]->packet_type = mlx5_ptype_table[pt_idx3] |
+ !!(pt_idx3 & (1 << 6)) * rxq->tunnel;
/* Fill flags for checksum and VLAN. */
pinfo = _mm_and_si128(pinfo, ptype_ol_mask);
pinfo = _mm_shuffle_epi8(cv_flag_sel, pinfo);
@@ -715,7 +718,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH)
+ if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_socket.c b/drivers/net/mlx5/mlx5_socket.c
index 61c1a4a5..a3a52291 100644
--- a/drivers/net/mlx5/mlx5_socket.c
+++ b/drivers/net/mlx5/mlx5_socket.c
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#define _GNU_SOURCE
@@ -18,92 +19,105 @@
/**
* Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_socket_init(struct priv *priv)
+mlx5_socket_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
int ret;
int flags;
- struct stat file_stat;
/*
+ * Close the last socket that was used to communicate
+ * with the secondary process
+ */
+ if (priv->primary_socket)
+ mlx5_socket_uninit(dev);
+ /*
* Initialise the socket to communicate with the secondary
* process.
*/
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
- WARN("secondary process not supported: %s", strerror(errno));
- return ret;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
priv->primary_socket = ret;
flags = fcntl(priv->primary_socket, F_GETFL, 0);
- if (flags == -1)
- goto out;
+ if (flags == -1) {
+ rte_errno = errno;
+ goto error;
+ }
ret = fcntl(priv->primary_socket, F_SETFL, flags | O_NONBLOCK);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ rte_errno = errno;
+ goto error;
+ }
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
- ret = stat(sun.sun_path, &file_stat);
- if (!ret)
- claim_zero(remove(sun.sun_path));
+ remove(sun.sun_path);
ret = bind(priv->primary_socket, (const struct sockaddr *)&sun,
sizeof(sun));
if (ret < 0) {
- WARN("cannot bind socket, secondary process not supported: %s",
- strerror(errno));
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot bind socket, secondary process not"
+ " supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
ret = listen(priv->primary_socket, 0);
if (ret < 0) {
- WARN("Secondary process not supported: %s", strerror(errno));
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u secondary process not supported: %s",
+ dev->data->port_id, strerror(errno));
goto close;
}
- return ret;
+ return 0;
close:
remove(sun.sun_path);
-out:
+error:
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
- return -(ret);
+ return -rte_errno;
}
/**
* Un-Initialise the socket to communicate with the secondary process
*
- * @param[in] priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, errno value on failure.
+ * @param[in] dev
*/
-int
-priv_socket_uninit(struct priv *priv)
+void
+mlx5_socket_uninit(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
+
MKSTR(path, "/var/tmp/%s_%d", MLX5_DRIVER_NAME, priv->primary_socket);
claim_zero(close(priv->primary_socket));
priv->primary_socket = 0;
claim_zero(remove(path));
- return 0;
}
/**
* Handle socket interrupts.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_socket_handle(struct priv *priv)
+mlx5_socket_handle(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int conn_sock;
int ret = 0;
struct cmsghdr *cmsg = NULL;
@@ -125,25 +139,30 @@ priv_socket_handle(struct priv *priv)
/* Accept the connection from the client. */
conn_sock = accept(priv->primary_socket, NULL, NULL);
if (conn_sock < 0) {
- WARN("connection failed: %s", strerror(errno));
+ DRV_LOG(WARNING, "port %u connection failed: %s",
+ dev->data->port_id, strerror(errno));
return;
}
ret = setsockopt(conn_sock, SOL_SOCKET, SO_PASSCRED, &(int){1},
sizeof(int));
if (ret < 0) {
- WARN("cannot change socket options");
- goto out;
+ ret = errno;
+ DRV_LOG(WARNING, "port %u cannot change socket options: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
}
ret = recvmsg(conn_sock, &msg, MSG_WAITALL);
if (ret < 0) {
- WARN("received an empty message: %s", strerror(errno));
- goto out;
+ ret = errno;
+ DRV_LOG(WARNING, "port %u received an empty message: %s",
+ dev->data->port_id, strerror(rte_errno));
+ goto error;
}
/* Expect to receive credentials only. */
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("no message");
- goto out;
+ DRV_LOG(WARNING, "port %u no message", dev->data->port_id);
+ goto error;
}
if ((cmsg->cmsg_type == SCM_CREDENTIALS) &&
(cmsg->cmsg_len >= sizeof(*cred))) {
@@ -152,14 +171,16 @@ priv_socket_handle(struct priv *priv)
}
cmsg = CMSG_NXTHDR(&msg, cmsg);
if (cmsg != NULL) {
- WARN("Message wrongly formatted");
- goto out;
+ DRV_LOG(WARNING, "port %u message wrongly formatted",
+ dev->data->port_id);
+ goto error;
}
/* Make sure all the ancillary data was received and valid. */
if ((cred == NULL) || (cred->uid != getuid()) ||
(cred->gid != getgid())) {
- WARN("wrong credentials");
- goto out;
+ DRV_LOG(WARNING, "port %u wrong credentials",
+ dev->data->port_id);
+ goto error;
}
/* Set-up the ancillary data. */
cmsg = CMSG_FIRSTHDR(&msg);
@@ -171,27 +192,29 @@ priv_socket_handle(struct priv *priv)
*fd = priv->ctx->cmd_fd;
ret = sendmsg(conn_sock, &msg, 0);
if (ret < 0)
- WARN("cannot send response");
-out:
+ DRV_LOG(WARNING, "port %u cannot send response",
+ dev->data->port_id);
+error:
close(conn_sock);
}
/**
* Connect to the primary process.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet structure.
*
* @return
- * fd on success, negative errno value on failure.
+ * fd on success, negative errno value otherwise and rte_errno is set.
*/
int
-priv_socket_connect(struct priv *priv)
+mlx5_socket_connect(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct sockaddr_un sun = {
.sun_family = AF_UNIX,
};
- int socket_fd;
+ int socket_fd = -1;
int *fd = NULL;
int ret;
struct ucred *cred;
@@ -211,57 +234,75 @@ priv_socket_connect(struct priv *priv)
ret = socket(AF_UNIX, SOCK_STREAM, 0);
if (ret < 0) {
- WARN("cannot connect to primary");
- return ret;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
}
socket_fd = ret;
snprintf(sun.sun_path, sizeof(sun.sun_path), "/var/tmp/%s_%d",
MLX5_DRIVER_NAME, priv->primary_socket);
ret = connect(socket_fd, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
- WARN("cannot connect to primary");
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u cannot connect to primary",
+ dev->data->port_id);
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- DEBUG("cannot get first message");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u cannot get first message",
+ dev->data->port_id);
+ goto error;
}
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_CREDENTIALS;
cmsg->cmsg_len = CMSG_LEN(sizeof(*cred));
cred = (struct ucred *)CMSG_DATA(cmsg);
if (cred == NULL) {
- DEBUG("no credentials received");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(DEBUG, "port %u no credentials received",
+ dev->data->port_id);
+ goto error;
}
cred->pid = getpid();
cred->uid = getuid();
cred->gid = getgid();
ret = sendmsg(socket_fd, &msg, MSG_DONTWAIT);
if (ret < 0) {
- WARN("cannot send credentials to primary: %s",
- strerror(errno));
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING,
+ "port %u cannot send credentials to primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
ret = recvmsg(socket_fd, &msg, MSG_WAITALL);
if (ret <= 0) {
- WARN("no message from primary: %s", strerror(errno));
- goto out;
+ rte_errno = errno;
+ DRV_LOG(WARNING, "port %u no message from primary: %s",
+ dev->data->port_id, strerror(errno));
+ goto error;
}
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
- WARN("No file descriptor received");
- goto out;
+ rte_errno = EINVAL;
+ DRV_LOG(WARNING, "port %u no file descriptor received",
+ dev->data->port_id);
+ goto error;
}
fd = (int *)CMSG_DATA(cmsg);
- if (*fd <= 0) {
- WARN("no file descriptor received: %s", strerror(errno));
- ret = *fd;
- goto out;
+ if (*fd < 0) {
+ DRV_LOG(WARNING, "port %u no file descriptor received: %s",
+ dev->data->port_id, strerror(errno));
+ rte_errno = *fd;
+ goto error;
}
ret = *fd;
-out:
close(socket_fd);
return ret;
+error:
+ if (socket_fd != -1)
+ close(socket_fd);
+ return -rte_errno;
}
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 378472a7..91f3d474 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -1,10 +1,13 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
+#include <inttypes.h>
#include <linux/sockios.h>
#include <linux/ethtool.h>
+#include <stdint.h>
+#include <stdio.h>
#include <rte_ethdev_driver.h>
#include <rte_common.h>
@@ -19,6 +22,7 @@ struct mlx5_counter_ctrl {
char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
/* Name of the counter on the device table. */
char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t ib:1; /**< Nonzero for IB counters. */
};
static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
@@ -93,6 +97,7 @@ static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
{
.dpdk_name = "rx_out_of_buffer",
.ctr_name = "out_of_buffer",
+ .ib = 1,
},
{
.dpdk_name = "tx_packets_phy",
@@ -117,39 +122,56 @@ static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
/**
* Read device counters table.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Counters table output buffer.
*
* @return
- * 0 on success and stats is filled, negative on error.
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
-priv_read_dev_counters(struct priv *priv, uint64_t *stats)
+mlx5_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
struct ifreq ifr;
unsigned int stats_sz = xstats_ctrl->stats_n * sizeof(uint64_t);
unsigned char et_stat_buf[sizeof(struct ethtool_stats) + stats_sz];
struct ethtool_stats *et_stats = (struct ethtool_stats *)et_stat_buf;
+ int ret;
et_stats->cmd = ETHTOOL_GSTATS;
et_stats->n_stats = xstats_ctrl->stats_n;
ifr.ifr_data = (caddr_t)et_stats;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to read statistic values from device");
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING,
+ "port %u unable to read statistic values from device",
+ dev->data->port_id);
+ return ret;
}
for (i = 0; i != xstats_n; ++i) {
- if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name))
- priv_get_cntr_sysfs(priv,
- mlx5_counters_init[i].ctr_name,
- &stats[i]);
- else
+ if (mlx5_counters_init[i].ib) {
+ FILE *file;
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ibdev_path,
+ mlx5_counters_init[i].ctr_name);
+
+ file = fopen(path, "rb");
+ if (file) {
+ int n = fscanf(file, "%" SCNu64, &stats[i]);
+
+ fclose(file);
+ if (n != 1)
+ stats[i] = 0;
+ }
+ } else {
stats[i] = (uint64_t)
et_stats->data[xstats_ctrl->dev_table_idx[i]];
+ }
}
return 0;
}
@@ -157,22 +179,26 @@ priv_read_dev_counters(struct priv *priv, uint64_t *stats)
/**
* Query the number of statistics provided by ETHTOOL.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * Number of statistics on success, -1 on error.
+ * Number of statistics on success, negative errno value otherwise and
+ * rte_errno is set.
*/
static int
-priv_ethtool_get_stats_n(struct priv *priv) {
+mlx5_ethtool_get_stats_n(struct rte_eth_dev *dev) {
struct ethtool_drvinfo drvinfo;
struct ifreq ifr;
+ int ret;
drvinfo.cmd = ETHTOOL_GDRVINFO;
ifr.ifr_data = (caddr_t)&drvinfo;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to query number of statistics");
- return -1;
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to query number of statistics",
+ dev->data->port_id);
+ return ret;
}
return drvinfo.n_stats;
}
@@ -180,12 +206,13 @@ priv_ethtool_get_stats_n(struct priv *priv) {
/**
* Init the structures to read device counters.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*/
void
-priv_xstats_init(struct priv *priv)
+mlx5_xstats_init(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
unsigned int i;
unsigned int j;
@@ -193,12 +220,15 @@ priv_xstats_init(struct priv *priv)
struct ethtool_gstrings *strings = NULL;
unsigned int dev_stats_n;
unsigned int str_sz;
+ int ret;
- dev_stats_n = priv_ethtool_get_stats_n(priv);
- if (dev_stats_n < 1) {
- WARN("no extended statistics available");
+ ret = mlx5_ethtool_get_stats_n(dev);
+ if (ret < 0) {
+ DRV_LOG(WARNING, "port %u no extended statistics available",
+ dev->data->port_id);
return;
}
+ dev_stats_n = ret;
xstats_ctrl->stats_n = dev_stats_n;
/* Allocate memory to grab stat names and values. */
str_sz = dev_stats_n * ETH_GSTRING_LEN;
@@ -206,15 +236,18 @@ priv_xstats_init(struct priv *priv)
rte_malloc("xstats_strings",
str_sz + sizeof(struct ethtool_gstrings), 0);
if (!strings) {
- WARN("unable to allocate memory for xstats");
+ DRV_LOG(WARNING, "port %u unable to allocate memory for xstats",
+ dev->data->port_id);
return;
}
strings->cmd = ETHTOOL_GSTRINGS;
strings->string_set = ETH_SS_STATS;
strings->len = dev_stats_n;
ifr.ifr_data = (caddr_t)strings;
- if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
- WARN("unable to get statistic names");
+ ret = mlx5_ifreq(dev, SIOCETHTOOL, &ifr, 1);
+ if (ret) {
+ DRV_LOG(WARNING, "port %u unable to get statistic names",
+ dev->data->port_id);
goto free;
}
for (j = 0; j != xstats_n; ++j)
@@ -232,68 +265,67 @@ priv_xstats_init(struct priv *priv)
}
}
for (j = 0; j != xstats_n; ++j) {
- if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name))
+ if (mlx5_counters_init[j].ib)
continue;
if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
- WARN("counter \"%s\" is not recognized",
- mlx5_counters_init[j].dpdk_name);
+ DRV_LOG(WARNING,
+ "port %u counter \"%s\" is not recognized",
+ dev->data->port_id,
+ mlx5_counters_init[j].dpdk_name);
goto free;
}
}
/* Copy to base at first time. */
assert(xstats_n <= MLX5_MAX_XSTATS);
- priv_read_dev_counters(priv, xstats_ctrl->base);
+ ret = mlx5_read_dev_counters(dev, xstats_ctrl->base);
+ if (ret)
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
free:
rte_free(strings);
}
/**
- * Get device extended statistics.
+ * DPDK callback to get extended device statistics.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param[out] stats
* Pointer to rte extended stats table.
+ * @param n
+ * The size of the stats table.
*
* @return
* Number of extended stats on success and stats is filled,
- * negative on error.
+ * negative on error and rte_errno is set.
*/
-static int
-priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats)
+int
+mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
+ unsigned int n)
{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- unsigned int n = xstats_n;
uint64_t counters[n];
- if (priv_read_dev_counters(priv, counters) < 0)
- return -1;
- for (i = 0; i != xstats_n; ++i) {
- stats[i].id = i;
- stats[i].value = (counters[i] - xstats_ctrl->base[i]);
- }
- return n;
-}
-
-/**
- * Reset device extended statistics.
- *
- * @param priv
- * Pointer to private structure.
- */
-static void
-priv_xstats_reset(struct priv *priv)
-{
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- unsigned int i;
- unsigned int n = xstats_n;
- uint64_t counters[n];
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+ int ret;
- if (priv_read_dev_counters(priv, counters) < 0)
- return;
- for (i = 0; i != n; ++i)
- xstats_ctrl->base[i] = counters[i];
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0)
+ return stats_n;
+ if (xstats_ctrl->stats_n != stats_n)
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret)
+ return ret;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
+ }
+ return xstats_n;
}
/**
@@ -303,6 +335,10 @@ priv_xstats_reset(struct priv *priv)
* Pointer to Ethernet device structure.
* @param[out] stats
* Stats structure output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative errno value otherwise and
+ * rte_errno is set.
*/
int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
@@ -312,7 +348,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
@@ -358,7 +393,6 @@ mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* FIXME: retrieve and add hardware counters. */
#endif
*stats = tmp;
- priv_unlock(priv);
return 0;
}
@@ -375,7 +409,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
unsigned int i;
unsigned int idx;
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); ++i) {
if ((*priv->rxqs)[i] == NULL)
continue;
@@ -393,45 +426,6 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#ifndef MLX5_PMD_SOFT_COUNTERS
/* FIXME: reset hardware counters. */
#endif
- priv_unlock(priv);
-}
-
-/**
- * DPDK callback to get extended device statistics.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param[out] stats
- * Stats table output buffer.
- * @param n
- * The size of the stats table.
- *
- * @return
- * Number of xstats on success, negative on failure.
- */
-int
-mlx5_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstat *stats, unsigned int n)
-{
- struct priv *priv = dev->data->dev_private;
- int ret = xstats_n;
-
- if (n >= xstats_n && stats) {
- struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
- int stats_n;
-
- priv_lock(priv);
- stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0) {
- priv_unlock(priv);
- return -1;
- }
- if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- ret = priv_xstats_get(priv, stats);
- priv_unlock(priv);
- }
- return ret;
}
/**
@@ -446,16 +440,27 @@ mlx5_xstats_reset(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
int stats_n;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+ int ret;
- priv_lock(priv);
- stats_n = priv_ethtool_get_stats_n(priv);
- if (stats_n < 0)
- goto unlock;
+ stats_n = mlx5_ethtool_get_stats_n(dev);
+ if (stats_n < 0) {
+ DRV_LOG(ERR, "port %u cannot get stats: %s", dev->data->port_id,
+ strerror(-stats_n));
+ return;
+ }
if (xstats_ctrl->stats_n != stats_n)
- priv_xstats_init(priv);
- priv_xstats_reset(priv);
-unlock:
- priv_unlock(priv);
+ mlx5_xstats_init(dev);
+ ret = mlx5_read_dev_counters(dev, counters);
+ if (ret) {
+ DRV_LOG(ERR, "port %u cannot read device counters: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return;
+ }
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
}
/**
@@ -472,21 +477,18 @@ unlock:
* Number of xstats names.
*/
int
-mlx5_xstats_get_names(struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned int n)
+mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
- struct priv *priv = dev->data->dev_private;
unsigned int i;
if (n >= xstats_n && xstats_names) {
- priv_lock(priv);
for (i = 0; i != xstats_n; ++i) {
strncpy(xstats_names[i].name,
mlx5_counters_init[i].dpdk_name,
RTE_ETH_XSTATS_NAME_SIZE);
xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
}
- priv_unlock(priv);
}
return xstats_n;
}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index f5711a99..e2a9bb70 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <unistd.h>
@@ -14,83 +14,133 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/**
+ * Stop traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
-priv_txq_stop(struct priv *priv)
+mlx5_txq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->txqs_n; ++i)
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(dev, i);
}
+/**
+ * Start traffic on Tx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
static int
-priv_txq_start(struct priv *priv)
+mlx5_txq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
+ int ret;
- /* Add memory regions to Tx queues. */
for (i = 0; i != priv->txqs_n; ++i) {
- unsigned int idx = 0;
- struct mlx5_mr *mr;
- struct mlx5_txq_ctrl *txq_ctrl = mlx5_priv_txq_get(priv, i);
+ struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- LIST_FOREACH(mr, &priv->mr, next) {
- priv_txq_mp2mr_reg(priv, &txq_ctrl->txq, mr->mp, idx++);
- if (idx == MLX5_PMD_TX_MP_CACHE)
- break;
- }
txq_alloc_elts(txq_ctrl);
- txq_ctrl->ibv = mlx5_priv_txq_ibv_new(priv, i);
+ txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
if (!txq_ctrl->ibv) {
- ret = ENOMEM;
+ rte_errno = ENOMEM;
goto error;
}
}
- ret = priv_tx_uar_remap(priv, priv->ctx->cmd_fd);
- if (ret)
+ ret = mlx5_tx_uar_remap(dev, priv->ctx->cmd_fd);
+ if (ret) {
+ /* Adjust index for rollback. */
+ i = priv->txqs_n - 1;
goto error;
- return ret;
+ }
+ return 0;
error:
- priv_txq_stop(priv);
- return ret;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ do {
+ mlx5_txq_release(dev, i);
+ } while (i-- != 0);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
+/**
+ * Stop traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
static void
-priv_rxq_stop(struct priv *priv)
+mlx5_rxq_stop(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != priv->rxqs_n; ++i)
- mlx5_priv_rxq_release(priv, i);
+ mlx5_rxq_release(dev, i);
}
+/**
+ * Start traffic on Rx queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
static int
-priv_rxq_start(struct priv *priv)
+mlx5_rxq_start(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i;
int ret = 0;
+ /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
+ if (mlx5_mprq_alloc_mp(dev)) {
+ /* Should not release Rx queues but return immediately. */
+ return -rte_errno;
+ }
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_priv_rxq_get(priv, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+ struct rte_mempool *mp;
if (!rxq_ctrl)
continue;
+ /* Pre-register Rx mempool. */
+ mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
+ DRV_LOG(DEBUG,
+ "port %u Rx queue %u registering"
+ " mp %s having %u chunks",
+ dev->data->port_id, rxq_ctrl->idx,
+ mp->name, mp->nb_mem_chunks);
+ mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)
goto error;
- rxq_ctrl->ibv = mlx5_priv_rxq_ibv_new(priv, i);
- if (!rxq_ctrl->ibv) {
- ret = ENOMEM;
+ rxq_ctrl->ibv = mlx5_rxq_ibv_new(dev, i);
+ if (!rxq_ctrl->ibv)
goto error;
- }
}
- return -ret;
+ return 0;
error:
- priv_rxq_stop(priv);
- return -ret;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ do {
+ mlx5_rxq_release(dev, i);
+ } while (i-- != 0);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -102,68 +152,62 @@ error:
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr = NULL;
- int err;
+ int ret;
- dev->data->dev_started = 1;
- priv_lock(priv);
- err = priv_flow_create_drop_queue(priv);
- if (err) {
- ERROR("%p: Drop queue allocation failed: %s",
- (void *)dev, strerror(err));
- goto error;
+ DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
+ ret = mlx5_txq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ return -rte_errno;
}
- DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
- rte_mempool_walk(mlx5_mp2mr_iter, priv);
- err = priv_txq_start(priv);
- if (err) {
- ERROR("%p: TXQ allocation failed: %s",
- (void *)dev, strerror(err));
- goto error;
+ ret = mlx5_rxq_start(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
+ dev->data->port_id, strerror(rte_errno));
+ mlx5_txq_stop(dev);
+ return -rte_errno;
}
- err = priv_rxq_start(priv);
- if (err) {
- ERROR("%p: RXQ allocation failed: %s",
- (void *)dev, strerror(err));
+ dev->data->dev_started = 1;
+ ret = mlx5_rx_intr_vec_enable(dev);
+ if (ret) {
+ DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
+ dev->data->port_id);
goto error;
}
- err = priv_rx_intr_vec_enable(priv);
- if (err) {
- ERROR("%p: RX interrupt vector creation failed",
- (void *)priv);
+ mlx5_xstats_init(dev);
+ ret = mlx5_traffic_enable(dev);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+ dev->data->port_id);
goto error;
}
- priv_xstats_init(priv);
- /* Update link status and Tx/Rx callbacks for the first time. */
- memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
- INFO("Forcing port %u link to be up", dev->data->port_id);
- err = priv_force_link_status_change(priv, ETH_LINK_UP);
- if (err) {
- DEBUG("Failed to set port %u link to be up",
- dev->data->port_id);
+ ret = mlx5_flow_start(dev, &priv->flows);
+ if (ret) {
+ DRV_LOG(DEBUG, "port %u failed to set flows",
+ dev->data->port_id);
goto error;
}
- priv_dev_interrupt_handler_install(priv, dev);
- priv_unlock(priv);
+ dev->tx_pkt_burst = mlx5_select_tx_function(dev);
+ dev->rx_pkt_burst = mlx5_select_rx_function(dev);
+ mlx5_dev_interrupt_handler_install(dev);
return 0;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
/* Rollback. */
dev->data->dev_started = 0;
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
- priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
- return err;
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
@@ -178,42 +222,37 @@ void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr;
- priv_lock(priv);
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
rte_wmb();
usleep(1000 * priv->rxqs_n);
- DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
- priv_flow_stop(priv, &priv->flows);
- priv_dev_traffic_disable(priv, dev);
- priv_rx_intr_vec_disable(priv);
- priv_dev_interrupt_handler_uninstall(priv, dev);
- priv_txq_stop(priv);
- priv_rxq_stop(priv);
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- priv_mr_release(priv, mr);
- priv_flow_delete_drop_queue(priv);
- priv_unlock(priv);
+ DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
+ mlx5_flow_stop(dev, &priv->flows);
+ mlx5_traffic_disable(dev);
+ mlx5_rx_intr_vec_disable(dev);
+ mlx5_dev_interrupt_handler_uninstall(dev);
+ mlx5_txq_stop(dev);
+ mlx5_rxq_stop(dev);
}
/**
* Enable traffic flows configured by control plane
*
- * @param priv
+ * @param dev
* Pointer to Ethernet device private data.
* @param dev
* Pointer to Ethernet device structure.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
+mlx5_traffic_enable(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct rte_flow_item_eth bcast = {
.dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
};
@@ -246,8 +285,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
- return 0;
+ ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
+ if (ret)
+ goto error;
}
if (dev->data->all_multicast) {
struct rte_flow_item_eth multicast = {
@@ -256,7 +296,9 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
.type = 0,
};
- claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
+ ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
+ if (ret)
+ goto error;
} else {
/* Add broadcast/multicast flows. */
for (i = 0; i != vlan_filter_n; ++i) {
@@ -265,9 +307,8 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
&vlan_spec, &vlan_mask);
@@ -304,9 +345,8 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
struct rte_flow_item_vlan vlan_spec = {
.tci = rte_cpu_to_be_16(vlan),
};
- struct rte_flow_item_vlan vlan_mask = {
- .tci = 0xffff,
- };
+ struct rte_flow_item_vlan vlan_mask =
+ rte_flow_item_vlan_mask;
ret = mlx5_ctrl_flow_vlan(dev, &unicast,
&unicast_mask,
@@ -316,74 +356,49 @@ priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
goto error;
}
if (!vlan_filter_n) {
- ret = mlx5_ctrl_flow(dev, &unicast,
- &unicast_mask);
+ ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
if (ret)
goto error;
}
}
return 0;
error:
- return rte_errno;
+ ret = rte_errno; /* Save rte_errno before cleanup. */
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+ rte_errno = ret; /* Restore rte_errno. */
+ return -rte_errno;
}
/**
* Disable traffic flows configured by control plane
*
- * @param priv
- * Pointer to Ethernet device private data.
* @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success.
- */
-int
-priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
-{
- (void)dev;
- priv_flow_flush(priv, &priv->ctrl_flows);
- return 0;
-}
-
-/**
- * Restart traffic flows configured by control plane
- *
- * @param priv
* Pointer to Ethernet device private data.
- * @param dev
- * Pointer to Ethernet device structure.
- *
- * @return
- * 0 on success.
*/
-int
-priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
+void
+mlx5_traffic_disable(struct rte_eth_dev *dev)
{
- if (dev->data->dev_started) {
- priv_dev_traffic_disable(priv, dev);
- priv_dev_traffic_enable(priv, dev);
- }
- return 0;
+ struct priv *priv = dev->data->dev_private;
+
+ mlx5_flow_list_flush(dev, &priv->ctrl_flows);
}
/**
* Restart traffic flows configured by control plane
*
* @param dev
- * Pointer to Ethernet device structure.
+ * Pointer to Ethernet device private data.
*
* @return
- * 0 on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_traffic_restart(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
-
- priv_lock(priv);
- priv_dev_traffic_restart(priv, dev);
- priv_unlock(priv);
+ if (dev->data->dev_started) {
+ mlx5_traffic_disable(dev);
+ return mlx5_traffic_enable(dev);
+ }
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index ed1c713e..f9bc4739 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -47,7 +47,8 @@ txq_alloc_elts(struct mlx5_txq_ctrl *txq_ctrl)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
+ DRV_LOG(DEBUG, "port %u Tx queue %u allocated and configured %u WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx, elts_n);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -68,7 +69,8 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
uint16_t elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
- DEBUG("%p: freeing WRs", (void *)txq_ctrl);
+ DRV_LOG(DEBUG, "port %u Tx queue %u freeing WRs",
+ PORT_ID(txq_ctrl->priv), txq_ctrl->idx);
txq_ctrl->txq.elts_head = 0;
txq_ctrl->txq.elts_tail = 0;
txq_ctrl->txq.elts_comp = 0;
@@ -91,15 +93,16 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
/**
* Returns the per-port supported offloads.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
* Supported Tx offloads.
*/
uint64_t
-mlx5_priv_get_tx_port_offloads(struct priv *priv)
+mlx5_get_tx_port_offloads(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT);
struct mlx5_dev_config *config = &priv->config;
@@ -110,6 +113,14 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv)
DEV_TX_OFFLOAD_TCP_CKSUM);
if (config->tso)
offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (config->swp) {
+ if (config->hw_csum)
+ offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ if (config->tso)
+ offloads |= (DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO);
+ }
+
if (config->tunnel_en) {
if (config->hw_csum)
offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
@@ -121,31 +132,6 @@ mlx5_priv_get_tx_port_offloads(struct priv *priv)
}
/**
- * Checks if the per-queue offload configuration is valid.
- *
- * @param priv
- * Pointer to private structure.
- * @param offloads
- * Per-queue offloads configuration.
- *
- * @return
- * 1 if the configuration is valid, 0 otherwise.
- */
-static int
-priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
-{
- uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
- uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
-
- /* There are no Tx offloads which are per queue. */
- if ((offloads & port_supp_offloads) != offloads)
- return 0;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return 0;
- return 1;
-}
-
-/**
* DPDK callback to configure a TX queue.
*
* @param dev
@@ -160,7 +146,7 @@ priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
* Thresholds parameters.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
@@ -170,64 +156,47 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mlx5_txq_data *txq = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq, struct mlx5_txq_ctrl, txq);
- int ret = 0;
- priv_lock(priv);
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (!!(conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) &&
- !priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
- ret = ENOTSUP;
- ERROR("%p: Tx queue offloads 0x%" PRIx64 " don't match port "
- "offloads 0x%" PRIx64 " or supported offloads 0x%" PRIx64,
- (void *)dev, conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- mlx5_priv_get_tx_port_offloads(priv));
- goto out;
- }
if (desc <= MLX5_TX_COMP_THRESH) {
- WARN("%p: number of descriptors requested for TX queue %u"
- " must be higher than MLX5_TX_COMP_THRESH, using"
- " %u instead of %u",
- (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
+ DRV_LOG(WARNING,
+ "port %u number of descriptors requested for Tx queue"
+ " %u must be higher than MLX5_TX_COMP_THRESH, using %u"
+ " instead of %u",
+ dev->data->port_id, idx, MLX5_TX_COMP_THRESH + 1, desc);
desc = MLX5_TX_COMP_THRESH + 1;
}
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
- WARN("%p: increased number of descriptors in TX queue %u"
- " to the next power of two (%d)",
- (void *)dev, idx, desc);
+ DRV_LOG(WARNING,
+ "port %u increased number of descriptors in Tx queue"
+ " %u to the next power of two (%d)",
+ dev->data->port_id, idx, desc);
}
- DEBUG("%p: configuring queue %u for %u descriptors",
- (void *)dev, idx, desc);
+ DRV_LOG(DEBUG, "port %u configuring queue %u for %u descriptors",
+ dev->data->port_id, idx, desc);
if (idx >= priv->txqs_n) {
- ERROR("%p: queue index out of range (%u >= %u)",
- (void *)dev, idx, priv->txqs_n);
- priv_unlock(priv);
- return -EOVERFLOW;
+ DRV_LOG(ERR, "port %u Tx queue index out of range (%u >= %u)",
+ dev->data->port_id, idx, priv->txqs_n);
+ rte_errno = EOVERFLOW;
+ return -rte_errno;
}
- if (!mlx5_priv_txq_releasable(priv, idx)) {
- ret = EBUSY;
- ERROR("%p: unable to release queue index %u",
- (void *)dev, idx);
- goto out;
+ if (!mlx5_txq_releasable(dev, idx)) {
+ rte_errno = EBUSY;
+ DRV_LOG(ERR, "port %u unable to release queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
}
- mlx5_priv_txq_release(priv, idx);
- txq_ctrl = mlx5_priv_txq_new(priv, idx, desc, socket, conf);
+ mlx5_txq_release(dev, idx);
+ txq_ctrl = mlx5_txq_new(dev, idx, desc, socket, conf);
if (!txq_ctrl) {
- ERROR("%p: unable to allocate queue index %u",
- (void *)dev, idx);
- ret = ENOMEM;
- goto out;
+ DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ dev->data->port_id, idx);
+ return -rte_errno;
}
- DEBUG("%p: adding TX queue %p to list",
- (void *)dev, (void *)txq_ctrl);
+ DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
+ dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
-out:
- priv_unlock(priv);
- return -ret;
+ return 0;
}
/**
@@ -248,15 +217,13 @@ mlx5_tx_queue_release(void *dpdk_txq)
return;
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
priv = txq_ctrl->priv;
- priv_lock(priv);
for (i = 0; (i != priv->txqs_n); ++i)
if ((*priv->txqs)[i] == txq) {
- DEBUG("%p: removing TX queue %p from list",
- (void *)priv->dev, (void *)txq_ctrl);
- mlx5_priv_txq_release(priv, i);
+ mlx5_txq_release(ETH_DEV(priv), i);
+ DRV_LOG(DEBUG, "port %u removing Tx queue %u from list",
+ PORT_ID(priv), txq_ctrl->idx);
break;
}
- priv_unlock(priv);
}
@@ -265,17 +232,18 @@ mlx5_tx_queue_release(void *dpdk_txq)
* Both primary and secondary process do mmap to make UAR address
* aligned.
*
- * @param[in] priv
- * Pointer to private structure.
+ * @param[in] dev
+ * Pointer to Ethernet device.
* @param fd
* Verbs file descriptor to map UAR pages.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-priv_tx_uar_remap(struct priv *priv, int fd)
+mlx5_tx_uar_remap(struct rte_eth_dev *dev, int fd)
{
+ struct priv *priv = dev->data->dev_private;
unsigned int i, j;
uintptr_t pages[priv->txqs_n];
unsigned int pages_n = 0;
@@ -287,7 +255,9 @@ priv_tx_uar_remap(struct priv *priv, int fd)
struct mlx5_txq_ctrl *txq_ctrl;
int already_mapped;
size_t page_size = sysconf(_SC_PAGESIZE);
- int r;
+#ifndef RTE_ARCH_64
+ unsigned int lock_idx;
+#endif
memset(pages, 0, priv->txqs_n * sizeof(uintptr_t));
/*
@@ -300,6 +270,7 @@ priv_tx_uar_remap(struct priv *priv, int fd)
continue;
txq = (*priv->txqs)[i];
txq_ctrl = container_of(txq, struct mlx5_txq_ctrl, txq);
+ assert(txq_ctrl->idx == (uint16_t)i);
/* UAR addr form verbs used to find dup and offset in page. */
uar_va = (uintptr_t)txq_ctrl->bf_reg_orig;
off = uar_va & (page_size - 1); /* offset in page. */
@@ -313,7 +284,7 @@ priv_tx_uar_remap(struct priv *priv, int fd)
}
/* new address in reserved UAR address space. */
addr = RTE_PTR_ADD(priv->uar_base,
- uar_va & (MLX5_UAR_SIZE - 1));
+ uar_va & (uintptr_t)(MLX5_UAR_SIZE - 1));
if (!already_mapped) {
pages[pages_n++] = uar_va;
/* fixed mmap to specified address in reserved
@@ -324,10 +295,12 @@ priv_tx_uar_remap(struct priv *priv, int fd)
txq_ctrl->uar_mmap_offset);
if (ret != addr) {
/* fixed mmap have to return same address */
- ERROR("call to mmap failed on UAR for txq %d\n",
- i);
- r = ENXIO;
- return r;
+ DRV_LOG(ERR,
+ "port %u call to mmap failed on UAR"
+ " for txq %u",
+ dev->data->port_id, txq_ctrl->idx);
+ rte_errno = ENXIO;
+ return -rte_errno;
}
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY) /* save once */
@@ -335,6 +308,12 @@ priv_tx_uar_remap(struct priv *priv, int fd)
else
assert(txq_ctrl->txq.bf_reg ==
RTE_PTR_ADD((void *)addr, off));
+#ifndef RTE_ARCH_64
+ /* Assign a UAR lock according to UAR page number */
+ lock_idx = (txq_ctrl->uar_mmap_offset / page_size) &
+ MLX5_UAR_PAGE_NUM_MASK;
+ txq->uar_lock = &priv->uar_lock[lock_idx];
+#endif
}
return 0;
}
@@ -361,17 +340,18 @@ is_empw_burst_func(eth_tx_burst_t tx_pkt_burst)
/**
* Create the Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
- * The Verbs object initialised if it can be created.
+ * The Verbs object initialised, NULL otherwise and rte_errno is set.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
struct mlx5_txq_ctrl *txq_ctrl =
container_of(txq_data, struct mlx5_txq_ctrl, txq);
@@ -388,18 +368,20 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
struct mlx5dv_cq cq_info;
struct mlx5dv_obj obj;
const int desc = 1 << txq_data->elts_n;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst = mlx5_select_tx_function(dev);
int ret = 0;
assert(txq_data);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
priv->verbs_alloc_ctx.obj = txq_ctrl;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
- ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
- goto error;
+ DRV_LOG(ERR,
+ "port %u MLX5_ENABLE_CQE_COMPRESSION must never be set",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return NULL;
}
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
- /* MRs will be registered in mp2mr[] later. */
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
@@ -409,7 +391,9 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = mlx5_glue->create_cq(priv->ctx, cqe_n, NULL, NULL, 0);
if (tmpl.cq == NULL) {
- ERROR("%p: CQ creation failure", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u CQ creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.init = (struct ibv_qp_init_attr_ex){
@@ -450,19 +434,24 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
}
tmpl.qp = mlx5_glue->create_qp_ex(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
- ERROR("%p: QP creation failure", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u QP creation failure",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
/* Move the QP to this state. */
.qp_state = IBV_QPS_INIT,
/* Primary port number. */
- .port_num = priv->port
+ .port_num = 1,
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod,
(IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- ERROR("%p: QP state to IBV_QPS_INIT failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_INIT failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod = (struct ibv_qp_attr){
@@ -470,19 +459,27 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
};
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTR failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTR failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
attr.mod.qp_state = IBV_QPS_RTS;
ret = mlx5_glue->modify_qp(tmpl.qp, &attr.mod, IBV_QP_STATE);
if (ret) {
- ERROR("%p: QP state to IBV_QPS_RTS failed", (void *)txq_ctrl);
+ DRV_LOG(ERR,
+ "port %u Tx queue %u QP state to IBV_QPS_RTS failed",
+ dev->data->port_id, idx);
+ rte_errno = errno;
goto error;
}
txq_ibv = rte_calloc_socket(__func__, 1, sizeof(struct mlx5_txq_ibv), 0,
txq_ctrl->socket);
if (!txq_ibv) {
- ERROR("%p: cannot allocate memory", (void *)txq_ctrl);
+ DRV_LOG(ERR, "port %u Tx queue %u cannot allocate memory",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
goto error;
}
obj.cq.in = tmpl.cq;
@@ -490,11 +487,16 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
obj.qp.in = tmpl.qp;
obj.qp.out = &qp;
ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
- if (ret != 0)
+ if (ret != 0) {
+ rte_errno = errno;
goto error;
+ }
if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
- ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
- "it should be set to %u", RTE_CACHE_LINE_SIZE);
+ DRV_LOG(ERR,
+ "port %u wrong MLX5_CQE_SIZE environment variable"
+ " value: it should be set to %u",
+ dev->data->port_id, RTE_CACHE_LINE_SIZE);
+ rte_errno = EINVAL;
goto error;
}
txq_data->cqe_n = log2above(cq_info.cqe_cnt);
@@ -518,38 +520,46 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx)
rte_atomic32_inc(&txq_ibv->refcnt);
if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
+ DRV_LOG(DEBUG, "port %u: uar_mmap_offset 0x%lx",
+ dev->data->port_id, txq_ctrl->uar_mmap_offset);
} else {
- ERROR("Failed to retrieve UAR info, invalid libmlx5.so version");
+ DRV_LOG(ERR,
+ "port %u failed to retrieve UAR info, invalid"
+ " libmlx5.so",
+ dev->data->port_id);
+ rte_errno = EINVAL;
goto error;
}
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
LIST_INSERT_HEAD(&priv->txqsibv, txq_ibv, next);
+ txq_ibv->txq_ctrl = txq_ctrl;
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
return txq_ibv;
error:
+ ret = rte_errno; /* Save rte_errno before cleanup. */
if (tmpl.cq)
claim_zero(mlx5_glue->destroy_cq(tmpl.cq));
if (tmpl.qp)
claim_zero(mlx5_glue->destroy_qp(tmpl.qp));
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
+ rte_errno = ret; /* Restore rte_errno. */
return NULL;
}
/**
* Get an Tx queue Verbs object.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* Queue index in DPDK Rx queue array
*
* @return
* The Verbs object if it exists.
*/
-struct mlx5_txq_ibv*
-mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ibv *
+mlx5_txq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
if (idx >= priv->txqs_n)
@@ -557,33 +567,24 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx)
if (!(*priv->txqs)[idx])
return NULL;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (txq_ctrl->ibv) {
+ if (txq_ctrl->ibv)
rte_atomic32_inc(&txq_ctrl->ibv->refcnt);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ctrl->ibv,
- rte_atomic32_read(&txq_ctrl->ibv->refcnt));
- }
return txq_ctrl->ibv;
}
/**
* Release an Tx verbs queue object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_release(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
- DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
if (rte_atomic32_dec_and_test(&txq_ibv->refcnt)) {
claim_zero(mlx5_glue->destroy_qp(txq_ibv->qp));
claim_zero(mlx5_glue->destroy_cq(txq_ibv->cq));
@@ -591,21 +592,18 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
rte_free(txq_ibv);
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Return true if a single reference exists on the object.
*
- * @param priv
- * Pointer to private structure.
* @param txq_ibv
* Verbs Tx queue object.
*/
int
-mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_txq_ibv_releasable(struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
}
@@ -613,20 +611,22 @@ mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
/**
* Verify the Verbs Tx queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_ibv_verify(struct priv *priv)
+mlx5_txq_ibv_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
int ret = 0;
struct mlx5_txq_ibv *txq_ibv;
LIST_FOREACH(txq_ibv, &priv->txqsibv, next) {
- DEBUG("%p: Verbs Tx queue %p still referenced", (void *)priv,
- (void *)txq_ibv);
+ DRV_LOG(DEBUG, "port %u Verbs Tx queue %u still referenced",
+ dev->data->port_id, txq_ibv->txq_ctrl->idx);
++ret;
}
return ret;
@@ -649,9 +649,14 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
unsigned int txq_inline;
unsigned int txqs_inline;
unsigned int inline_max_packet_sz;
- eth_tx_burst_t tx_pkt_burst = priv_select_tx_function(priv, priv->dev);
+ eth_tx_burst_t tx_pkt_burst =
+ mlx5_select_tx_function(ETH_DEV(priv));
int is_empw_func = is_empw_burst_func(tx_pkt_burst);
- int tso = !!(txq_ctrl->txq.offloads & DEV_TX_OFFLOAD_TCP_TSO);
+ int tso = !!(txq_ctrl->txq.offloads & (DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO));
txq_inline = (config->txq_inline == MLX5_ARG_UNSET) ?
0 : config->txq_inline;
@@ -685,18 +690,6 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
inline_max_packet_sz) +
(RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
- } else if (tso) {
- int inline_diff = txq_ctrl->txq.max_inline -
- max_tso_inline;
-
- /*
- * Adjust inline value as Verbs aggregates
- * tso_inline and txq_inline fields.
- */
- txq_ctrl->max_inline_data = inline_diff > 0 ?
- inline_diff *
- RTE_CACHE_LINE_SIZE :
- 0;
} else {
txq_ctrl->max_inline_data =
txq_ctrl->txq.max_inline * RTE_CACHE_LINE_SIZE;
@@ -716,9 +709,10 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
max_inline = max_inline - (max_inline %
RTE_CACHE_LINE_SIZE);
- WARN("txq inline is too large (%d) setting it to "
- "the maximum possible: %d\n",
- txq_inline, max_inline);
+ DRV_LOG(WARNING,
+ "port %u txq inline is too large (%d) setting"
+ " it to the maximum possible: %d\n",
+ PORT_ID(priv), txq_inline, max_inline);
txq_ctrl->txq.max_inline = max_inline /
RTE_CACHE_LINE_SIZE;
}
@@ -729,14 +723,18 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
max_tso_inline);
txq_ctrl->txq.tso_en = 1;
}
- txq_ctrl->txq.tunnel_en = config->tunnel_en;
+ txq_ctrl->txq.tunnel_en = config->tunnel_en | config->swp;
+ txq_ctrl->txq.swp_en = ((DEV_TX_OFFLOAD_IP_TNL_TSO |
+ DEV_TX_OFFLOAD_UDP_TNL_TSO |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) &
+ txq_ctrl->txq.offloads) && config->swp;
}
/**
* Create a DPDK Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
* @param desc
@@ -747,76 +745,75 @@ txq_set_params(struct mlx5_txq_ctrl *txq_ctrl)
* Thresholds parameters.
*
* @return
- * A DPDK queue object on success.
+ * A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc,
- unsigned int socket,
- const struct rte_eth_txconf *conf)
+struct mlx5_txq_ctrl *
+mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ unsigned int socket, const struct rte_eth_txconf *conf)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *tmpl;
tmpl = rte_calloc_socket("TXQ", 1,
sizeof(*tmpl) +
desc * sizeof(struct rte_mbuf *),
0, socket);
- if (!tmpl)
+ if (!tmpl) {
+ rte_errno = ENOMEM;
return NULL;
+ }
+ if (mlx5_mr_btree_init(&tmpl->txq.mr_ctrl.cache_bh,
+ MLX5_MR_BTREE_CACHE_N, socket)) {
+ /* rte_errno is already set. */
+ goto error;
+ }
+ /* Save pointer of global generation number to check memory event. */
+ tmpl->txq.mr_ctrl.dev_gen_ptr = &priv->mr.dev_gen;
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl->txq.offloads = conf->offloads;
+ tmpl->txq.offloads = conf->offloads |
+ dev->data->dev_conf.txmode.offloads;
tmpl->priv = priv;
tmpl->socket = socket;
tmpl->txq.elts_n = log2above(desc);
+ tmpl->idx = idx;
txq_set_params(tmpl);
- /* MRs will be registered in mp2mr[] later. */
- DEBUG("priv->device_attr.max_qp_wr is %d",
- priv->device_attr.orig_attr.max_qp_wr);
- DEBUG("priv->device_attr.max_sge is %d",
- priv->device_attr.orig_attr.max_sge);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
+ DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
+ dev->data->port_id, priv->device_attr.orig_attr.max_sge);
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)tmpl, rte_atomic32_read(&tmpl->refcnt));
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
+error:
+ rte_free(tmpl);
+ return NULL;
}
/**
* Get a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
* A pointer to the queue if it exists.
*/
-struct mlx5_txq_ctrl*
-mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
+struct mlx5_txq_ctrl *
+mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *ctrl = NULL;
if ((*priv->txqs)[idx]) {
ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
txq);
- unsigned int i;
-
- mlx5_priv_txq_ibv_get(priv, idx);
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- struct mlx5_mr *mr = NULL;
-
- (void)mr;
- if (ctrl->txq.mp2mr[i]) {
- mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
- assert(mr);
- }
- }
+ mlx5_txq_ibv_get(dev, idx);
rte_atomic32_inc(&ctrl->refcnt);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)ctrl, rte_atomic32_read(&ctrl->refcnt));
}
return ctrl;
}
@@ -824,57 +821,45 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx)
/**
* Release a Tx queue.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
* @return
- * 0 on success, errno on failure.
+ * 1 while a reference on it exists, 0 when freed.
*/
int
-mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
+mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
- unsigned int i;
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
if (!(*priv->txqs)[idx])
return 0;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,
- (void *)txq, rte_atomic32_read(&txq->refcnt));
- if (txq->ibv) {
- int ret;
-
- ret = mlx5_priv_txq_ibv_release(priv, txq->ibv);
- if (!ret)
- txq->ibv = NULL;
- }
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (txq->txq.mp2mr[i]) {
- priv_mr_release(priv, txq->txq.mp2mr[i]);
- txq->txq.mp2mr[i] = NULL;
- }
- }
+ if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
+ txq->ibv = NULL;
if (priv->uar_base)
munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
page_size), page_size);
if (rte_atomic32_dec_and_test(&txq->refcnt)) {
txq_free_elts(txq);
+ mlx5_mr_btree_free(&txq->txq.mr_ctrl.cache_bh);
LIST_REMOVE(txq, next);
rte_free(txq);
(*priv->txqs)[idx] = NULL;
return 0;
}
- return EBUSY;
+ return 1;
}
/**
* Verify if the queue can be released.
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
* @param idx
* TX queue index.
*
@@ -882,8 +867,9 @@ mlx5_priv_txq_release(struct priv *priv, uint16_t idx)
* 1 if the queue can be released.
*/
int
-mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
+mlx5_txq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
if (!(*priv->txqs)[idx])
@@ -895,20 +881,22 @@ mlx5_priv_txq_releasable(struct priv *priv, uint16_t idx)
/**
* Verify the Tx Queue list is empty
*
- * @param priv
- * Pointer to private structure.
+ * @param dev
+ * Pointer to Ethernet device.
*
- * @return the number of object not released.
+ * @return
+ * The number of object not released.
*/
int
-mlx5_priv_txq_verify(struct priv *priv)
+mlx5_txq_verify(struct rte_eth_dev *dev)
{
+ struct priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq;
int ret = 0;
LIST_FOREACH(txq, &priv->txqsctrl, next) {
- DEBUG("%p: Tx Queue %p still referenced", (void *)priv,
- (void *)txq);
+ DRV_LOG(DEBUG, "port %u Tx queue %u still referenced",
+ dev->data->port_id, txq->idx);
++ret;
}
return ret;
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index e1bfb9cd..886f60e6 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#ifndef RTE_PMD_MLX5_UTILS_H_
@@ -61,14 +61,21 @@ pmd_drv_log_basename(const char *s)
return s;
}
+extern int mlx5_logtype;
+
+#define PMD_DRV_LOG___(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ mlx5_logtype, \
+ RTE_FMT(MLX5_DRIVER_NAME ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
/*
* When debugging is enabled (NDEBUG not defined), file, line and function
* information replace the driver name (MLX5_DRIVER_NAME) in log messages.
*/
#ifndef NDEBUG
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, "%s:%u: %s(): " __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -80,9 +87,6 @@ pmd_drv_log_basename(const char *s)
__VA_ARGS__)
#else /* NDEBUG */
-
-#define PMD_DRV_LOG___(level, ...) \
- ERRNO_SAFE(RTE_LOG(level, PMD, MLX5_DRIVER_NAME ": " __VA_ARGS__))
#define PMD_DRV_LOG__(level, ...) \
PMD_DRV_LOG___(level, __VA_ARGS__)
#define PMD_DRV_LOG_(level, s, ...) \
@@ -91,18 +95,15 @@ pmd_drv_log_basename(const char *s)
#endif /* NDEBUG */
/* Generic printf()-like logging macro with automatic line feed. */
-#define PMD_DRV_LOG(level, ...) \
+#define DRV_LOG(level, ...) \
PMD_DRV_LOG_(level, \
__VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
PMD_DRV_LOG_CPAREN)
-/*
- * Like assert(), DEBUG() becomes a no-op and claim_zero() does not perform
- * any check when debugging is disabled.
- */
+/* claim_zero() does not perform any check when debugging is disabled. */
#ifndef NDEBUG
-#define DEBUG(...) PMD_DRV_LOG(DEBUG, __VA_ARGS__)
+#define DEBUG(...) DRV_LOG(DEBUG, __VA_ARGS__)
#define claim_zero(...) assert((__VA_ARGS__) == 0)
#define claim_nonzero(...) assert((__VA_ARGS__) != 0)
@@ -114,9 +115,9 @@ pmd_drv_log_basename(const char *s)
#endif /* NDEBUG */
-#define INFO(...) PMD_DRV_LOG(INFO, __VA_ARGS__)
-#define WARN(...) PMD_DRV_LOG(WARNING, __VA_ARGS__)
-#define ERROR(...) PMD_DRV_LOG(ERR, __VA_ARGS__)
+#define INFO(...) DRV_LOG(INFO, __VA_ARGS__)
+#define WARN(...) DRV_LOG(WARNING, __VA_ARGS__)
+#define ERROR(...) DRV_LOG(ERR, __VA_ARGS__)
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c
index 75c34562..c91d08be 100644
--- a/drivers/net/mlx5/mlx5_vlan.c
+++ b/drivers/net/mlx5/mlx5_vlan.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2015 6WIND S.A.
- * Copyright 2015 Mellanox.
+ * Copyright 2015 Mellanox Technologies, Ltd
*/
#include <stddef.h>
@@ -8,6 +8,12 @@
#include <assert.h>
#include <stdint.h>
+/*
+ * Not needed by this file; included to work around the lack of off_t
+ * definition for mlx5dv.h with unpatched rdma-core versions.
+ */
+#include <sys/types.h>
+
/* Verbs headers do not support -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
@@ -37,26 +43,24 @@
* Toggle filter.
*
* @return
- * 0 on success, negative errno value on failure.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct priv *priv = dev->data->dev_private;
unsigned int i;
- int ret = 0;
- priv_lock(priv);
- DEBUG("%p: %s VLAN filter ID %" PRIu16,
- (void *)dev, (on ? "enable" : "disable"), vlan_id);
+ DRV_LOG(DEBUG, "port %u %s VLAN filter ID %" PRIu16,
+ dev->data->port_id, (on ? "enable" : "disable"), vlan_id);
assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
for (i = 0; (i != priv->vlan_filter_n); ++i)
if (priv->vlan_filter[i] == vlan_id)
break;
/* Check if there's room for another VLAN filter. */
if (i == RTE_DIM(priv->vlan_filter)) {
- ret = -ENOMEM;
- goto out;
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
if (i < priv->vlan_filter_n) {
assert(priv->vlan_filter_n != 0);
@@ -79,37 +83,49 @@ mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
++priv->vlan_filter_n;
}
- if (dev->data->dev_started)
- priv_dev_traffic_restart(priv, dev);
out:
- priv_unlock(priv);
- return ret;
+ if (dev->data->dev_started)
+ return mlx5_traffic_restart(dev);
+ return 0;
}
/**
- * Set/reset VLAN stripping for a specific queue.
+ * Callback to set/reset VLAN stripping for a specific queue.
*
- * @param priv
- * Pointer to private structure.
- * @param idx
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue
* RX queue index.
* @param on
* Enable/disable VLAN stripping.
*/
-static void
-priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
+void
+mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
{
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[queue];
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
struct ibv_wq_attr mod;
uint16_t vlan_offloads =
(on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
0;
- int err;
+ int ret;
- DEBUG("set VLAN offloads 0x%x for port %d queue %d",
- vlan_offloads, rxq->port_id, idx);
+ /* Validate hw support */
+ if (!priv->config.hw_vlan_strip) {
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
+ return;
+ }
+ /* Validate queue number */
+ if (queue >= priv->rxqs_n) {
+ DRV_LOG(ERR, "port %u VLAN stripping, invalid queue number %d",
+ dev->data->port_id, queue);
+ return;
+ }
+ DRV_LOG(DEBUG, "port %u set VLAN offloads 0x%x for port %uqueue %d",
+ dev->data->port_id, vlan_offloads, rxq->port_id, queue);
if (!rxq_ctrl->ibv) {
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
@@ -120,57 +136,26 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on)
.flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
.flags = vlan_offloads,
};
-
- err = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
- if (err) {
- ERROR("%p: failed to modified stripping mode: %s",
- (void *)priv, strerror(err));
+ ret = mlx5_glue->modify_wq(rxq_ctrl->ibv->wq, &mod);
+ if (ret) {
+ DRV_LOG(ERR, "port %u failed to modified stripping mode: %s",
+ dev->data->port_id, strerror(rte_errno));
return;
}
-
/* Update related bits in RX queue. */
rxq->vlan_strip = !!on;
}
/**
- * Callback to set/reset VLAN stripping for a specific queue.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param queue
- * RX queue index.
- * @param on
- * Enable/disable VLAN stripping.
- */
-void
-mlx5_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
-{
- struct priv *priv = dev->data->dev_private;
-
- /* Validate hw support */
- if (!priv->config.hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
- return;
- }
-
- /* Validate queue number */
- if (queue >= priv->rxqs_n) {
- ERROR("VLAN stripping, invalid queue number %d", queue);
- return;
- }
-
- priv_lock(priv);
- priv_vlan_strip_queue_set(priv, queue, on);
- priv_unlock(priv);
-}
-
-/**
* Callback to set/reset VLAN offloads for a port.
*
* @param dev
* Pointer to Ethernet device structure.
* @param mask
* VLAN offload bit mask.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
@@ -183,16 +168,13 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask)
DEV_RX_OFFLOAD_VLAN_STRIP);
if (!priv->config.hw_vlan_strip) {
- ERROR("VLAN stripping is not supported");
+ DRV_LOG(ERR, "port %u VLAN stripping is not supported",
+ dev->data->port_id);
return 0;
}
-
/* Run on every RX queue and set/reset VLAN stripping. */
- priv_lock(priv);
for (i = 0; (i != priv->rxqs_n); i++)
- priv_vlan_strip_queue_set(priv, i, hw_vlan_strip);
- priv_unlock(priv);
+ mlx5_vlan_strip_queue_set(dev, i, hw_vlan_strip);
}
-
return 0;
}
diff --git a/drivers/net/mrvl/Makefile b/drivers/net/mrvl/Makefile
deleted file mode 100644
index f75e53c6..00000000
--- a/drivers/net/mrvl/Makefile
+++ /dev/null
@@ -1,68 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Marvell International Ltd.
-# Copyright(c) 2017 Semihalf.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),config)
-ifeq ($(LIBMUSDK_PATH),)
-$(error "Please define LIBMUSDK_PATH environment variable")
-endif
-endif
-endif
-
-# library name
-LIB = librte_pmd_mrvl.a
-
-# library version
-LIBABIVER := 1
-
-# versioning export map
-EXPORT_MAP := rte_pmd_mrvl_version.map
-
-# external library dependencies
-CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_TYPES_PUBLIC
-CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-LDLIBS += -L$(LIBMUSDK_PATH)/lib
-LDLIBS += -lmusdk
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
-LDLIBS += -lrte_bus_vdev
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += mrvl_qos.c
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvpp2/Makefile b/drivers/net/mvpp2/Makefile
new file mode 100644
index 00000000..492aef97
--- /dev/null
+++ b/drivers/net/mvpp2/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvpp2.a
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvpp2_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+LDLIBS += -L$(LIBMUSDK_PATH)/lib
+LDLIBS += -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_cfgfile
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_qos.c
+SRCS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += mrvl_flow.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mvpp2/meson.build b/drivers/net/mvpp2/meson.build
new file mode 100644
index 00000000..e1398895
--- /dev/null
+++ b/drivers/net/mvpp2/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs : [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files(
+ 'mrvl_ethdev.c',
+ 'mrvl_flow.c',
+ 'mrvl_qos.c'
+)
+
+deps += ['cfgfile']
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mvpp2/mrvl_ethdev.c
index 705c4bd8..a2d0576e 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mvpp2/mrvl_ethdev.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Semihalf nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_ethdev_driver.h>
@@ -122,6 +94,8 @@ struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
+int mrvl_logtype;
+
struct mrvl_ifnames {
const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
int idx;
@@ -162,6 +136,7 @@ struct mrvl_txq {
int port_id;
uint64_t bytes_sent;
struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
+ int tx_deferred_start;
};
static int mrvl_lcore_first;
@@ -173,6 +148,32 @@ static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
struct pp2_hif *hif, unsigned int core_id,
struct mrvl_shadow_txq *sq, int qid, int force);
+#define MRVL_XSTATS_TBL_ENTRY(name) { \
+ #name, offsetof(struct pp2_ppio_statistics, name), \
+ sizeof(((struct pp2_ppio_statistics *)0)->name) \
+}
+
+/* Table with xstats data */
+static struct {
+ const char *name;
+ unsigned int offset;
+ unsigned int size;
+} mrvl_xstats_tbl[] = {
+ MRVL_XSTATS_TBL_ENTRY(rx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(rx_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(rx_errors),
+ MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
+ MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
+ MRVL_XSTATS_TBL_ENTRY(tx_bytes),
+ MRVL_XSTATS_TBL_ENTRY(tx_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
+ MRVL_XSTATS_TBL_ENTRY(tx_errors)
+};
+
static inline int
mrvl_get_bpool_size(int pp2_id, int pool_id)
{
@@ -207,7 +208,7 @@ mrvl_init_hif(int core_id)
ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
return ret;
}
@@ -217,7 +218,7 @@ mrvl_init_hif(int core_id)
params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
ret = pp2_hif_init(&params, &hifs[core_id]);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
return ret;
}
@@ -236,7 +237,7 @@ mrvl_get_hif(struct mrvl_priv *priv, int core_id)
ret = mrvl_init_hif(core_id);
if (ret < 0) {
- RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
+ MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
goto out;
}
@@ -266,7 +267,7 @@ static int
mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
{
if (rss_conf->rss_key)
- RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
+ MRVL_LOG(WARNING, "Changing hash key is not supported");
if (rss_conf->rss_hf == 0) {
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
@@ -308,34 +309,21 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
- RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
+ MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
dev->data->dev_conf.rxmode.mq_mode);
return -EINVAL;
}
- if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
- RTE_LOG(INFO, PMD,
- "L2 CRC stripping is always enabled in hw\n");
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) {
+ MRVL_LOG(INFO, "L2 CRC stripping is always enabled in hw");
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
- RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
- return -EINVAL;
- }
-
if (dev->data->dev_conf.rxmode.split_hdr_size) {
- RTE_LOG(INFO, PMD, "Split headers not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
- RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
- return -EINVAL;
- }
-
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
- RTE_LOG(INFO, PMD, "LRO not supported\n");
+ MRVL_LOG(INFO, "Split headers not supported");
return -EINVAL;
}
@@ -348,13 +336,18 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
if (ret < 0)
return ret;
+ ret = mrvl_configure_txqs(priv, dev->data->port_id,
+ dev->data->nb_tx_queues);
+ if (ret < 0)
+ return ret;
+
priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
priv->ppio_params.maintain_stats = 1;
priv->nb_rx_queues = dev->data->nb_rx_queues;
if (dev->data->nb_rx_queues == 1 &&
dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
- RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
+ MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
return 0;
@@ -456,6 +449,70 @@ mrvl_dev_set_link_down(struct rte_eth_dev *dev)
}
/**
+ * DPDK callback to start tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv)
+ return -EPERM;
+
+ /* passing 1 enables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to stop tx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param queue_id
+ * Transmit queue index.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret;
+
+ if (!priv->ppio)
+ return -EPERM;
+
+ /* passing 0 disables given tx queue */
+ ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
+ return ret;
+ }
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/**
* DPDK callback to start the device.
*
* @param dev
@@ -469,7 +526,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
{
struct mrvl_priv *priv = dev->data->dev_private;
char match[MRVL_MATCH_LEN];
- int ret = 0, def_init_size;
+ int ret = 0, i, def_init_size;
snprintf(match, sizeof(match), "ppio-%d:%d",
priv->pp_id, priv->ppio_id);
@@ -493,7 +550,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
priv->bpool_init_size += buffs_to_add;
ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
+ MRVL_LOG(ERR, "Failed to add buffers to bpool");
}
/*
@@ -508,7 +565,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init ppio\n");
+ MRVL_LOG(ERR, "Failed to init ppio");
return ret;
}
@@ -521,8 +578,8 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (!priv->uc_mc_flushed) {
ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to flush uc/mc filter list\n");
+ MRVL_LOG(ERR,
+ "Failed to flush uc/mc filter list");
goto out;
}
priv->uc_mc_flushed = 1;
@@ -531,7 +588,7 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (!priv->vlan_flushed) {
ret = pp2_ppio_flush_vlan(priv->ppio);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
+ MRVL_LOG(ERR, "Failed to flush vlan list");
/*
* TODO
* once pp2_ppio_flush_vlan() is supported jump to out
@@ -545,20 +602,38 @@ mrvl_dev_start(struct rte_eth_dev *dev)
if (mrvl_qos_cfg) {
ret = mrvl_start_qos_mapping(priv);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
+ MRVL_LOG(ERR, "Failed to setup QoS mapping");
goto out;
}
}
ret = mrvl_dev_set_link_up(dev);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to set link up\n");
+ MRVL_LOG(ERR, "Failed to set link up");
goto out;
}
+ /* start tx queues */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct mrvl_txq *txq = dev->data->tx_queues[i];
+
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ if (!txq->tx_deferred_start)
+ continue;
+
+ /*
+ * All txqs are started by default. Stop them
+ * so that tx_deferred_start works as expected.
+ */
+ ret = mrvl_tx_queue_stop(dev, i);
+ if (ret)
+ goto out;
+ }
+
return 0;
out:
- RTE_LOG(ERR, PMD, "Failed to start device\n");
+ MRVL_LOG(ERR, "Failed to start device");
pp2_ppio_deinit(priv->ppio);
return ret;
}
@@ -574,7 +649,7 @@ mrvl_flush_rx_queues(struct rte_eth_dev *dev)
{
int i;
- RTE_LOG(INFO, PMD, "Flushing rx queues\n");
+ MRVL_LOG(INFO, "Flushing rx queues");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
int ret, num;
@@ -603,7 +678,7 @@ mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
int i, j;
struct mrvl_txq *txq;
- RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
+ MRVL_LOG(INFO, "Flushing tx shadow queues");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = (struct mrvl_txq *)dev->data->tx_queues[i];
@@ -651,7 +726,7 @@ mrvl_flush_bpool(struct rte_eth_dev *dev)
ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
+ MRVL_LOG(ERR, "Failed to get bpool buffers number");
return;
}
@@ -682,12 +757,23 @@ mrvl_dev_stop(struct rte_eth_dev *dev)
mrvl_dev_set_link_down(dev);
mrvl_flush_rx_queues(dev);
mrvl_flush_tx_shadow_queues(dev);
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
if (priv->qos_tbl) {
pp2_cls_qos_tbl_deinit(priv->qos_tbl);
priv->qos_tbl = NULL;
}
- pp2_ppio_deinit(priv->ppio);
+ if (priv->ppio)
+ pp2_ppio_deinit(priv->ppio);
priv->ppio = NULL;
+
+ /* policer must be released after ppio deinitialization */
+ if (priv->policer) {
+ pp2_cls_plcr_deinit(priv->policer);
+ priv->policer = NULL;
+ }
}
/**
@@ -800,9 +886,12 @@ mrvl_promiscuous_enable(struct rte_eth_dev *dev)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to enable promiscuous mode");
}
/**
@@ -820,9 +909,12 @@ mrvl_allmulticast_enable(struct rte_eth_dev *dev)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
if (ret)
- RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed enable all-multicast mode");
}
/**
@@ -842,7 +934,7 @@ mrvl_promiscuous_disable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
+ MRVL_LOG(ERR, "Failed to disable promiscuous mode");
}
/**
@@ -862,7 +954,7 @@ mrvl_allmulticast_disable(struct rte_eth_dev *dev)
ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
+ MRVL_LOG(ERR, "Failed to disable all-multicast mode");
}
/**
@@ -883,12 +975,15 @@ mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
if (!priv->ppio)
return;
+ if (priv->isolated)
+ return;
+
ret = pp2_ppio_remove_mac_addr(priv->ppio,
dev->data->mac_addrs[index].addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf),
&dev->data->mac_addrs[index]);
- RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to remove mac %s", buf);
}
}
@@ -915,6 +1010,9 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
char buf[ETHER_ADDR_FMT_SIZE];
int ret;
+ if (priv->isolated)
+ return -ENOTSUP;
+
if (index == 0)
/* For setting index 0, mrvl_mac_addr_set() should be used.*/
return -1;
@@ -938,7 +1036,7 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
+ MRVL_LOG(ERR, "Failed to add mac %s", buf);
return -1;
}
@@ -952,22 +1050,30 @@ mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
* Pointer to Ethernet device structure.
* @param mac_addr
* MAC address to register.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
*/
-static void
+static int
mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct mrvl_priv *priv = dev->data->dev_private;
int ret;
if (!priv->ppio)
- return;
+ return 0;
+
+ if (priv->isolated)
+ return -ENOTSUP;
ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
if (ret) {
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, sizeof(buf), mac_addr);
- RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
+ MRVL_LOG(ERR, "Failed to set mac to %s", buf);
}
+
+ return ret;
}
/**
@@ -1001,8 +1107,8 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = rxq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "rx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "rx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
continue;
}
@@ -1012,8 +1118,8 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
priv->rxq_map[idx].inq,
&rx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update rx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update rx queue %d stats", idx);
break;
}
@@ -1036,16 +1142,16 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
idx = txq->queue_id;
if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
- RTE_LOG(ERR, PMD,
- "tx queue %d stats out of range (0 - %d)\n",
+ MRVL_LOG(ERR,
+ "tx queue %d stats out of range (0 - %d)",
idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
}
ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
&tx_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD,
- "Failed to update tx queue %d stats\n", idx);
+ MRVL_LOG(ERR,
+ "Failed to update tx queue %d stats", idx);
break;
}
@@ -1056,7 +1162,7 @@ mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
if (unlikely(ret)) {
- RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
+ MRVL_LOG(ERR, "Failed to update port statistics");
return ret;
}
@@ -1107,6 +1213,90 @@ mrvl_stats_reset(struct rte_eth_dev *dev)
}
/**
+ * DPDK callback to get extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param stats
+ * Pointer to xstats table.
+ * @param n
+ * Number of entries in xstats table.
+ * @return
+ * Negative value on error, number of read xstats otherwise.
+ */
+static int
+mrvl_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_ppio_statistics ppio_stats;
+ unsigned int i;
+
+ if (!stats)
+ return 0;
+
+ pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
+ for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
+ uint64_t val;
+
+ if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
+ val = *(uint32_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
+ val = *(uint64_t *)((uint8_t *)&ppio_stats +
+ mrvl_xstats_tbl[i].offset);
+ else
+ return -EINVAL;
+
+ stats[i].id = i;
+ stats[i].value = val;
+ }
+
+ return n;
+}
+
+/**
+ * DPDK callback to reset extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+static void
+mrvl_xstats_reset(struct rte_eth_dev *dev)
+{
+ mrvl_stats_reset(dev);
+}
+
+/**
+ * DPDK callback to get extended statistics names.
+ *
+ * @param dev (unused)
+ * Pointer to Ethernet device structure.
+ * @param xstats_names
+ * Pointer to xstats names table.
+ * @param size
+ * Size of the xstats names table.
+ * @return
+ * Number of read names.
+ */
+static int
+mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
+{
+ unsigned int i;
+
+ if (!xstats_names)
+ return RTE_DIM(mrvl_xstats_tbl);
+
+ for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
+ snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
+ mrvl_xstats_tbl[i].name);
+
+ return size;
+}
+
+/**
* DPDK callback to get information about the device.
*
* @param dev
@@ -1217,9 +1407,11 @@ static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
struct rte_eth_txq_info *qinfo)
{
struct mrvl_priv *priv = dev->data->dev_private;
+ struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
qinfo->nb_desc =
priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
/**
@@ -1243,6 +1435,9 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
if (!priv->ppio)
return -EPERM;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
pp2_ppio_remove_vlan(priv->ppio, vlan_id);
}
@@ -1261,8 +1456,8 @@ mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
static int
mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
{
- struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
- struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
+ struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
+ struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
int i, ret;
unsigned int core_id;
struct pp2_hif *hif;
@@ -1289,8 +1484,8 @@ mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
for (i = 0; i < num; i++) {
if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
!= cookie_addr_high) {
- RTE_LOG(ERR, PMD,
- "mbuf virtual addr high 0x%lx out of range\n",
+ MRVL_LOG(ERR,
+ "mbuf virtual addr high 0x%lx out of range",
(uint64_t)mbufs[i] >> 32);
goto out;
}
@@ -1316,42 +1511,6 @@ out:
}
/**
- * Check whether requested rx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = MRVL_RX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the receive queue.
*
* @param dev
@@ -1381,15 +1540,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
uint32_t min_size,
max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
int ret, tc, inq;
+ uint64_t offloads;
- if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
/*
* Unknown TC mapping, mapping will not have a correct queue.
*/
- RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
+ MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
idx, priv->ppio_id);
return -EFAULT;
}
@@ -1397,8 +1556,8 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
MRVL_PKT_EFFEC_OFFS;
if (min_size < max_rx_pkt_len) {
- RTE_LOG(ERR, PMD,
- "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
+ MRVL_LOG(ERR,
+ "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
MRVL_PKT_EFFEC_OFFS,
max_rx_pkt_len);
@@ -1416,8 +1575,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
rxq->priv = priv;
rxq->mp = mp;
- rxq->cksum_enabled =
- dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
+ rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
rxq->queue_id = idx;
rxq->port_id = dev->data->port_id;
mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
@@ -1458,9 +1616,12 @@ mrvl_rx_queue_release(void *rxq)
if (core_id == LCORE_ID_ANY)
core_id = 0;
+ if (!q)
+ return;
+
hif = mrvl_get_hif(q->priv, core_id);
- if (!q || !hif)
+ if (!hif)
return;
tc = q->priv->rxq_map[q->queue_id].tc;
@@ -1480,42 +1641,6 @@ mrvl_rx_queue_release(void *rxq)
}
/**
- * Check whether requested tx queue offloads match port offloads.
- *
- * @param
- * dev Pointer to the device.
- * @param
- * requested Bitmap of the requested offloads.
- *
- * @return
- * 1 if requested offloads are okay, 0 otherwise.
- */
-static int
-mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
-{
- uint64_t mandatory = dev->data->dev_conf.txmode.offloads;
- uint64_t supported = MRVL_TX_OFFLOADS;
- uint64_t unsupported = requested & ~supported;
- uint64_t missing = mandatory & ~requested;
-
- if (unsupported) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- requested, supported);
- return 0;
- }
-
- if (missing) {
- RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
- "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
- requested, missing);
- return 0;
- }
-
- return 1;
-}
-
-/**
* DPDK callback to configure the transmit queue.
*
* @param dev
@@ -1527,7 +1652,7 @@ mrvl_tx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
* @param socket
* NUMA socket on which memory must be allocated.
* @param conf
- * Thresholds parameters.
+ * Tx queue configuration parameters.
*
* @return
* 0 on success, negative error value otherwise.
@@ -1540,9 +1665,6 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_txq *txq;
- if (!mrvl_tx_queue_offloads_okay(dev, conf->offloads))
- return -ENOTSUP;
-
if (dev->data->tx_queues[idx]) {
rte_free(dev->data->tx_queues[idx]);
dev->data->tx_queues[idx] = NULL;
@@ -1555,10 +1677,10 @@ mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
txq->priv = priv;
txq->queue_id = idx;
txq->port_id = dev->data->port_id;
+ txq->tx_deferred_start = conf->tx_deferred_start;
dev->data->tx_queues[idx] = txq;
priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
- priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
return 0;
}
@@ -1581,6 +1703,82 @@ mrvl_tx_queue_release(void *txq)
}
/**
+ * DPDK callback to get flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ int ret, en;
+
+ if (!priv)
+ return -EPERM;
+
+ ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to read rx pause state");
+ return ret;
+ }
+
+ fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
+
+ return 0;
+}
+
+/**
+ * DPDK callback to set flow control configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param fc_conf
+ * Pointer to the flow control configuration.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (!priv)
+ return -EPERM;
+
+ if (fc_conf->high_water ||
+ fc_conf->low_water ||
+ fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd ||
+ fc_conf->autoneg) {
+ MRVL_LOG(ERR, "Flowctrl parameter is not supported");
+
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == RTE_FC_NONE ||
+ fc_conf->mode == RTE_FC_RX_PAUSE) {
+ int ret, en;
+
+ en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
+ ret = pp2_ppio_set_rx_pause(priv->ppio, en);
+ if (ret)
+ MRVL_LOG(ERR,
+ "Failed to change flowctrl on RX side");
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* Update RSS hash configuration
*
* @param dev
@@ -1597,6 +1795,9 @@ mrvl_rss_hash_update(struct rte_eth_dev *dev,
{
struct mrvl_priv *priv = dev->data->dev_private;
+ if (priv->isolated)
+ return -ENOTSUP;
+
return mrvl_configure_rss(priv, rss_conf);
}
@@ -1633,6 +1834,39 @@ mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+/**
+ * DPDK callback to get rte_flow callbacks.
+ *
+ * @param dev
+ * Pointer to the device structure.
+ * @param filer_type
+ * Flow filter type.
+ * @param filter_op
+ * Flow filter operation.
+ * @param arg
+ * Pointer to pass the flow ops.
+ *
+ * @return
+ * 0 on success, negative error value otherwise.
+ */
+static int
+mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mrvl_flow_ops;
+ return 0;
+ default:
+ MRVL_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ return -EINVAL;
+ }
+}
+
static const struct eth_dev_ops mrvl_ops = {
.dev_configure = mrvl_dev_configure,
.dev_start = mrvl_dev_start,
@@ -1651,17 +1885,25 @@ static const struct eth_dev_ops mrvl_ops = {
.mtu_set = mrvl_mtu_set,
.stats_get = mrvl_stats_get,
.stats_reset = mrvl_stats_reset,
+ .xstats_get = mrvl_xstats_get,
+ .xstats_reset = mrvl_xstats_reset,
+ .xstats_get_names = mrvl_xstats_get_names,
.dev_infos_get = mrvl_dev_infos_get,
.dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
.rxq_info_get = mrvl_rxq_info_get,
.txq_info_get = mrvl_txq_info_get,
.vlan_filter_set = mrvl_vlan_filter_set,
+ .tx_queue_start = mrvl_tx_queue_start,
+ .tx_queue_stop = mrvl_tx_queue_stop,
.rx_queue_setup = mrvl_rx_queue_setup,
.rx_queue_release = mrvl_rx_queue_release,
.tx_queue_setup = mrvl_tx_queue_setup,
.tx_queue_release = mrvl_tx_queue_release,
+ .flow_ctrl_get = mrvl_flow_ctrl_get,
+ .flow_ctrl_set = mrvl_flow_ctrl_set,
.rss_hash_update = mrvl_rss_hash_update,
.rss_hash_conf_get = mrvl_rss_hash_conf_get,
+ .filter_ctrl = mrvl_eth_filter_ctrl,
};
/**
@@ -1716,7 +1958,7 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
*l4_offset = *l3_offset + MRVL_ARP_LENGTH;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
break;
}
@@ -1728,7 +1970,7 @@ mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
packet_type |= RTE_PTYPE_L4_UDP;
break;
default:
- RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
+ MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
break;
}
@@ -1799,7 +2041,7 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
if (unlikely(ret < 0)) {
- RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ MRVL_LOG(ERR, "Failed to receive packets");
return 0;
}
mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
@@ -1866,15 +2108,15 @@ mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(!rx_done && num < q->priv->bpool_init_size))) {
ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
if (ret)
- RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ MRVL_LOG(ERR, "Failed to fill bpool");
} else if (unlikely(num > q->priv->bpool_max_size)) {
int i;
int pkt_to_remove = num - q->priv->bpool_init_size;
struct rte_mbuf *mbuf;
struct pp2_buff_inf buff;
- RTE_LOG(DEBUG, PMD,
- "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ MRVL_LOG(DEBUG,
+ "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
bpool->pp2_id, q->priv->ppio->port_id,
bpool->id, pkt_to_remove, num,
q->priv->bpool_init_size);
@@ -1994,8 +2236,8 @@ mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
for (i = 0; i < nb_done; i++) {
entry = &sq->ent[sq->tail + num];
if (unlikely(!entry->buff.addr)) {
- RTE_LOG(ERR, PMD,
- "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ MRVL_LOG(ERR,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!",
sq->tail, (u64)entry->buff.cookie,
(u64)entry->buff.addr);
skip_bufs = 1;
@@ -2072,8 +2314,8 @@ mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
if (unlikely(nb_pkts > sq_free_size)) {
- RTE_LOG(DEBUG, PMD,
- "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ MRVL_LOG(DEBUG,
+ "No room in shadow queue for %d packets! %d packets will be sent.",
nb_pkts, sq_free_size);
nb_pkts = sq_free_size;
}
@@ -2259,7 +2501,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
rte_zmalloc("mac_addrs",
ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
if (!eth_dev->data->mac_addrs) {
- RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
+ MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
ret = -ENOMEM;
goto out_free_priv;
}
@@ -2280,6 +2522,7 @@ mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
eth_dev->device = &vdev->device;
eth_dev->dev_ops = &mrvl_ops;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
out_free_mac:
rte_free(eth_dev->data->mac_addrs);
@@ -2397,9 +2640,9 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
*/
if (!mrvl_qos_cfg) {
cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
- RTE_LOG(INFO, PMD, "Parsing config file!\n");
+ MRVL_LOG(INFO, "Parsing config file!");
if (cfgnum > 1) {
- RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
+ MRVL_LOG(ERR, "Cannot handle more than one config file!");
goto out_free_kvlist;
} else if (cfgnum == 1) {
rte_kvargs_process(kvlist, MRVL_CFG_ARG,
@@ -2410,7 +2653,7 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
if (mrvl_dev_num)
goto init_devices;
- RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
+ MRVL_LOG(INFO, "Perform MUSDK initializations");
/*
* ret == -EEXIST is correct, it means DMA
* has been already initialized (by another PMD).
@@ -2420,13 +2663,13 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
if (ret != -EEXIST)
goto out_free_kvlist;
else
- RTE_LOG(INFO, PMD,
- "DMA memory has been already initialized by a different driver.\n");
+ MRVL_LOG(INFO,
+ "DMA memory has been already initialized by a different driver.");
}
ret = mrvl_init_pp2();
if (ret) {
- RTE_LOG(ERR, PMD, "Failed to init PP!\n");
+ MRVL_LOG(ERR, "Failed to init PP!");
goto out_deinit_dma;
}
@@ -2438,7 +2681,7 @@ rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
init_devices:
for (i = 0; i < ifnum; i++) {
- RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
+ MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
if (ret)
goto out_cleanup;
@@ -2482,9 +2725,9 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
if (!name)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing %s\n", name);
+ MRVL_LOG(INFO, "Removing %s", name);
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
char ifname[RTE_ETH_NAME_MAX_LEN];
rte_eth_dev_get_name_by_port(i, ifname);
@@ -2493,7 +2736,7 @@ rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
}
if (mrvl_dev_num == 0) {
- RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
+ MRVL_LOG(INFO, "Perform MUSDK deinit");
mrvl_deinit_hifs();
mrvl_deinit_pp2();
mv_sys_dma_mem_destroy();
@@ -2507,5 +2750,12 @@ static struct rte_vdev_driver pmd_mrvl_drv = {
.remove = rte_pmd_mrvl_remove,
};
-RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
-RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);
+RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
+RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
+
+RTE_INIT(mrvl_init_log)
+{
+ mrvl_logtype = rte_log_register("pmd.net.mvpp2");
+ if (mrvl_logtype >= 0)
+ rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/mrvl/mrvl_ethdev.h b/drivers/net/mvpp2/mrvl_ethdev.h
index f7afae5c..3726f788 100644
--- a/drivers/net/mrvl/mrvl_ethdev.h
+++ b/drivers/net/mvpp2/mrvl_ethdev.h
@@ -1,41 +1,14 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _MRVL_ETHDEV_H_
#define _MRVL_ETHDEV_H_
#include <rte_spinlock.h>
+#include <rte_flow_driver.h>
#include <env/mv_autogen_comp_flags.h>
#include <drivers/mv_pp2.h>
@@ -108,11 +81,29 @@ struct mrvl_priv {
uint8_t rss_hf_tcp;
uint8_t uc_mc_flushed;
uint8_t vlan_flushed;
+ uint8_t isolated;
struct pp2_ppio_params ppio_params;
struct pp2_cls_qos_tbl_params qos_tbl_params;
struct pp2_cls_tbl *qos_tbl;
uint16_t nb_rx_queues;
+
+ struct pp2_cls_tbl_params cls_tbl_params;
+ struct pp2_cls_tbl *cls_tbl;
+ uint32_t cls_tbl_pattern;
+ LIST_HEAD(mrvl_flows, rte_flow) flows;
+
+ struct pp2_cls_plcr *policer;
};
+/** Flow operations forward declaration. */
+extern const struct rte_flow_ops mrvl_flow_ops;
+
+/** Current log type. */
+extern int mrvl_logtype;
+
+#define MRVL_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype, "%s(): " fmt "\n", \
+ __func__, ##args)
+
#endif /* _MRVL_ETHDEV_H_ */
diff --git a/drivers/net/mvpp2/mrvl_flow.c b/drivers/net/mvpp2/mrvl_flow.c
new file mode 100644
index 00000000..ecc34192
--- /dev/null
+++ b/drivers/net/mvpp2/mrvl_flow.c
@@ -0,0 +1,2779 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Marvell International Ltd.
+ * Copyright(c) 2018 Semihalf.
+ * All rights reserved.
+ */
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+
+#include <arpa/inet.h>
+
+#ifdef container_of
+#undef container_of
+#endif
+
+#include "mrvl_ethdev.h"
+#include "mrvl_qos.h"
+#include "env/mv_common.h" /* for BIT() */
+
+/** Number of rules in the classifier table. */
+#define MRVL_CLS_MAX_NUM_RULES 20
+
+/** Size of the classifier key and mask strings. */
+#define MRVL_CLS_STR_SIZE_MAX 40
+
+/** Parsed fields in processed rte_flow_item. */
+enum mrvl_parsed_fields {
+ /* eth flags */
+ F_DMAC = BIT(0),
+ F_SMAC = BIT(1),
+ F_TYPE = BIT(2),
+ /* vlan flags */
+ F_VLAN_ID = BIT(3),
+ F_VLAN_PRI = BIT(4),
+ F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
+ /* ip4 flags */
+ F_IP4_TOS = BIT(6),
+ F_IP4_SIP = BIT(7),
+ F_IP4_DIP = BIT(8),
+ F_IP4_PROTO = BIT(9),
+ /* ip6 flags */
+ F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
+ F_IP6_SIP = BIT(11),
+ F_IP6_DIP = BIT(12),
+ F_IP6_FLOW = BIT(13),
+ F_IP6_NEXT_HDR = BIT(14),
+ /* tcp flags */
+ F_TCP_SPORT = BIT(15),
+ F_TCP_DPORT = BIT(16),
+ /* udp flags */
+ F_UDP_SPORT = BIT(17),
+ F_UDP_DPORT = BIT(18),
+};
+
+/** PMD-specific definition of a flow rule handle. */
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next;
+
+ enum mrvl_parsed_fields pattern;
+
+ struct pp2_cls_tbl_rule rule;
+ struct pp2_cls_cos_desc cos;
+ struct pp2_cls_tbl_action action;
+};
+
+static const enum rte_flow_item_type pattern_eth[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_ip6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+static const enum rte_flow_item_type pattern_udp[] = {
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END
+};
+
+#define MRVL_VLAN_ID_MASK 0x0fff
+#define MRVL_VLAN_PRI_MASK 0x7000
+#define MRVL_IPV4_DSCP_MASK 0xfc
+#define MRVL_IPV4_ADDR_MASK 0xffffffff
+#define MRVL_IPV6_FLOW_MASK 0x0fffff
+
+/**
+ * Given a flow item, return the next non-void one.
+ *
+ * @param items Pointer to the item in the table.
+ * @returns Next not-void item, NULL otherwise.
+ */
+static const struct rte_flow_item *
+mrvl_next_item(const struct rte_flow_item *items)
+{
+ const struct rte_flow_item *item = items;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return item;
+ }
+
+ return NULL;
+}
+
+/**
+ * Allocate memory for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ unsigned int id = rte_socket_id();
+
+ field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->key)
+ goto out;
+
+ field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
+ if (!field->mask)
+ goto out_mask;
+
+ return 0;
+out_mask:
+ rte_free(field->key);
+out:
+ field->key = NULL;
+ field->mask = NULL;
+ return -1;
+}
+
+/**
+ * Free memory allocated for classifier rule key and mask fields.
+ *
+ * @param field Pointer to the classifier rule.
+ */
+static void
+mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
+{
+ rte_free(field->key);
+ rte_free(field->mask);
+ field->key = NULL;
+ field->mask = NULL;
+}
+
+/**
+ * Free memory allocated for all classifier rule key and mask fields.
+ *
+ * @param rule Pointer to the classifier table rule.
+ */
+static void
+mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
+{
+ int i;
+
+ for (i = 0; i < rule->num_fields; i++)
+ mrvl_free_key_mask(&rule->fields[i]);
+ rule->num_fields = 0;
+}
+
+/*
+ * Initialize rte flow item parsing.
+ *
+ * @param item Pointer to the flow item.
+ * @param spec_ptr Pointer to the specific item pointer.
+ * @param mask_ptr Pointer to the specific item's mask pointer.
+ * @def_mask Pointer to the default mask.
+ * @size Size of the flow item.
+ * @error Pointer to the rte flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t zeros[size];
+
+ memset(zeros, 0, size);
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item\n");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set.
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified\n");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Spec should be specified\n");
+ return -rte_errno;
+ }
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored.
+ */
+ if (last != NULL &&
+ !memcmp(last, zeros, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Ranging is not supported\n");
+ return -rte_errno;
+ }
+
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+
+ return 0;
+}
+
+/**
+ * Parse the eth flow item.
+ *
+ * This will create classifier rule that matches either destination or source
+ * mac.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param mask Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_mac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ const uint8_t *k, *m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ if (parse_dst) {
+ k = spec->dst.addr_bytes;
+ m = mask->dst.addr_bytes;
+
+ flow->pattern |= F_DMAC;
+ } else {
+ k = spec->src.addr_bytes;
+ m = mask->src.addr_bytes;
+
+ flow->pattern |= F_SMAC;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 6;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ k[0], k[1], k[2], k[3], k[4], k[5]);
+
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
+ "%02x:%02x:%02x:%02x:%02x:%02x",
+ m[0], m[1], m[2], m[3], m[4], m[5]);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the eth flow item destination mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing the eth flow item source mac address.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_smac(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_mac(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the ether type field of the eth flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_type(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->type);
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_TYPE;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the vid field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches vid.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_ID;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the pri field of the vlan rte flow item.
+ *
+ * This will create classifier rule that matches pri.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
+ const struct rte_flow_item_vlan *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_VLAN_PRI;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the dscp field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches dscp field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP4_TOS;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv4 flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ struct in_addr k;
+ uint32_t m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ k.s_addr = spec->hdr.dst_addr;
+ m = rte_be_to_cpu_32(mask->hdr.dst_addr);
+
+ flow->pattern |= F_IP4_DIP;
+ } else {
+ k.s_addr = spec->hdr.src_addr;
+ m = rte_be_to_cpu_32(mask->hdr.src_addr);
+
+ flow->pattern |= F_IP4_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 4;
+
+ inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv4 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip4_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the proto field of the ipv4 rte flow item.
+ *
+ * This will create classifier rule that matches proto field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.next_proto_id;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP4_PROTO;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse either source or destination ip addresses of the ipv6 rte flow item.
+ *
+ * This will create classifier rule that matches either destination
+ * or source ip field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ int size = sizeof(spec->hdr.dst_addr);
+ struct in6_addr k, m;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ memset(&k, 0, sizeof(k));
+ if (parse_dst) {
+ memcpy(k.s6_addr, spec->hdr.dst_addr, size);
+ memcpy(m.s6_addr, mask->hdr.dst_addr, size);
+
+ flow->pattern |= F_IP6_DIP;
+ } else {
+ memcpy(k.s6_addr, spec->hdr.src_addr, size);
+ memcpy(m.s6_addr, mask->hdr.src_addr, size);
+
+ flow->pattern |= F_IP6_SIP;
+ }
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 16;
+
+ inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
+ inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing destination ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 1, flow);
+}
+
+/**
+ * Helper for parsing source ip of the ipv6 flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_ip6_addr(spec, mask, 0, flow);
+}
+
+/**
+ * Parse the flow label of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches flow field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
+ m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 3;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+ snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
+
+ flow->pattern |= F_IP6_FLOW;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse the next header of the ipv6 flow item.
+ *
+ * This will create classifier rule that matches next header field.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask __rte_unused,
+ struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint8_t k = spec->hdr.proto;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 1;
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->pattern |= F_IP6_NEXT_HDR;
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Parse destination or source port of the tcp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source tcp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_TCP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_TCP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the tcp source port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the tcp destination port of the tcp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
+ const struct rte_flow_item_tcp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_tcp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse destination or source port of the udp flow item.
+ *
+ * This will create classifier rule that matches either destination or
+ * source udp port.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static int
+mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask __rte_unused,
+ int parse_dst, struct rte_flow *flow)
+{
+ struct pp2_cls_rule_key_field *key_field;
+ uint16_t k;
+
+ if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
+ return -ENOSPC;
+
+ key_field = &flow->rule.fields[flow->rule.num_fields];
+ mrvl_alloc_key_mask(key_field);
+ key_field->size = 2;
+
+ if (parse_dst) {
+ k = rte_be_to_cpu_16(spec->hdr.dst_port);
+
+ flow->pattern |= F_UDP_DPORT;
+ } else {
+ k = rte_be_to_cpu_16(spec->hdr.src_port);
+
+ flow->pattern |= F_UDP_SPORT;
+ }
+
+ snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
+
+ flow->rule.num_fields += 1;
+
+ return 0;
+}
+
+/**
+ * Helper for parsing the udp source port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 0, flow);
+}
+
+/**
+ * Helper for parsing the udp destination port of the udp flow item.
+ *
+ * @param spec Pointer to the specific flow item.
+ * @param mask Pointer to the specific flow item's mask.
+ * @param flow Pointer to the flow.
+ * @return 0 in case of success, negative error value otherwise.
+ */
+static inline int
+mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct rte_flow *flow)
+{
+ return mrvl_parse_udp_port(spec, mask, 1, flow);
+}
+
+/**
+ * Parse eth flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
+ struct ether_addr zero;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth), error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
+ ret = mrvl_parse_dmac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
+ ret = mrvl_parse_smac(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->type) {
+ MRVL_LOG(WARNING, "eth type mask is ignored");
+ ret = mrvl_parse_type(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse vlan flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_vlan(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
+ uint16_t m;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_vlan_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
+
+ m = rte_be_to_cpu_16(mask->tci);
+ if (m & MRVL_VLAN_ID_MASK) {
+ MRVL_LOG(WARNING, "vlan id mask is ignored");
+ ret = mrvl_parse_vlan_id(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (m & MRVL_VLAN_PRI_MASK) {
+ MRVL_LOG(WARNING, "vlan pri mask is ignored");
+ ret = mrvl_parse_vlan_pri(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (flow->pattern & F_TYPE) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported");
+ return -rte_errno;
+ }
+ if (mask->inner_type) {
+ struct rte_flow_item_eth spec_eth = {
+ .type = spec->inner_type,
+ };
+ struct rte_flow_item_eth mask_eth = {
+ .type = mask->inner_type,
+ };
+
+ MRVL_LOG(WARNING, "inner eth type mask is ignored");
+ ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv4 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip4(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.version_ihl ||
+ mask->hdr.total_length ||
+ mask->hdr.packet_id ||
+ mask->hdr.fragment_offset ||
+ mask->hdr.time_to_live ||
+ mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
+ ret = mrvl_parse_ip4_dscp(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.src_addr) {
+ ret = mrvl_parse_ip4_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_addr) {
+ ret = mrvl_parse_ip4_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.next_proto_id) {
+ MRVL_LOG(WARNING, "next proto id mask is ignored");
+ ret = mrvl_parse_ip4_proto(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse ipv6 flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_ip6(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
+ struct ipv6_hdr zero;
+ uint32_t flow_mask;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec,
+ (const void **)&mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (ret)
+ return ret;
+
+ memset(&zero, 0, sizeof(zero));
+
+ if (mask->hdr.payload_len ||
+ mask->hdr.hop_limits) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (memcmp(mask->hdr.src_addr,
+ zero.src_addr, sizeof(mask->hdr.src_addr))) {
+ ret = mrvl_parse_ip6_sip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (memcmp(mask->hdr.dst_addr,
+ zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
+ ret = mrvl_parse_ip6_dip(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
+ if (flow_mask) {
+ ret = mrvl_parse_ip6_flow(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.proto) {
+ MRVL_LOG(WARNING, "next header mask is ignored");
+ ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse tcp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_tcp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.sent_seq ||
+ mask->hdr.recv_ack ||
+ mask->hdr.data_off ||
+ mask->hdr.tcp_flags ||
+ mask->hdr.rx_win ||
+ mask->hdr.cksum ||
+ mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ MRVL_LOG(WARNING, "tcp sport mask is ignored");
+ ret = mrvl_parse_tcp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ MRVL_LOG(WARNING, "tcp dport mask is ignored");
+ ret = mrvl_parse_tcp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse udp flow item.
+ *
+ * @param item Pointer to the flow item.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param fields Pointer to the parsed parsed fields enum.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_parse_udp(const struct rte_flow_item *item,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
+ int ret;
+
+ ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret)
+ return ret;
+
+ if (mask->hdr.dgram_len ||
+ mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by classifier\n");
+ return -rte_errno;
+ }
+
+ if (mask->hdr.src_port) {
+ MRVL_LOG(WARNING, "udp sport mask is ignored");
+ ret = mrvl_parse_udp_sport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ if (mask->hdr.dst_port) {
+ MRVL_LOG(WARNING, "udp dport mask is ignored");
+ ret = mrvl_parse_udp_dport(spec, mask, flow);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+out:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Reached maximum number of fields in cls tbl key\n");
+ return -rte_errno;
+}
+
+/**
+ * Parse flow pattern composed of the the eth item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_eth(pattern, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and vlan items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_eth(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param tcp 1 to parse tcp item, 0 to parse udp item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the eth, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_vlan(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ip4/ip6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_vlan(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv4 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the vlan and ipv6 items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int tcp)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+ item = mrvl_next_item(item + 1);
+
+ if (tcp)
+ return mrvl_parse_tcp(item, flow, error);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the vlan, ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ip4/ip6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and tcp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4/ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error, int ip6)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+ int ret;
+
+ ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
+ mrvl_parse_ip4(item, flow, error);
+ if (ret)
+ return ret;
+
+ item = mrvl_next_item(item + 1);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the ipv4 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
+}
+
+/**
+ * Parse flow pattern composed of the ipv6 and udp items.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static inline int
+mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
+}
+
+/**
+ * Parse flow pattern composed of the tcp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_tcp(item, flow, error);
+}
+
+/**
+ * Parse flow pattern composed of the udp item.
+ *
+ * @param pattern Pointer to the flow pattern table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item = mrvl_next_item(pattern);
+
+ return mrvl_parse_udp(item, flow, error);
+}
+
+/**
+ * Structure used to map specific flow pattern to the pattern parse callback
+ * which will iterate over each pattern item and extract relevant data.
+ */
+static const struct {
+ const enum rte_flow_item_type *pattern;
+ int (*parse)(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+} mrvl_patterns[] = {
+ { pattern_eth, mrvl_parse_pattern_eth },
+ { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
+ { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
+ { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
+ { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
+ { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
+ { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
+ { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
+ { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
+ { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
+ { pattern_vlan, mrvl_parse_pattern_vlan },
+ { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
+ { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
+ { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
+ { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
+ { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
+ { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
+ { pattern_ip, mrvl_parse_pattern_ip4 },
+ { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
+ { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
+ { pattern_ip6, mrvl_parse_pattern_ip6 },
+ { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
+ { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
+ { pattern_tcp, mrvl_parse_pattern_tcp },
+ { pattern_udp, mrvl_parse_pattern_udp }
+};
+
+/**
+ * Check whether provided pattern matches any of the supported ones.
+ *
+ * @param type_pattern Pointer to the pattern type.
+ * @param item_pattern Pointer to the flow pattern.
+ * @returns 1 in case of success, 0 value otherwise.
+ */
+static int
+mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
+ const struct rte_flow_item *item_pattern)
+{
+ const enum rte_flow_item_type *type = type_pattern;
+ const struct rte_flow_item *item = item_pattern;
+
+ for (;;) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ item++;
+ continue;
+ }
+
+ if (*type == RTE_FLOW_ITEM_TYPE_END ||
+ item->type == RTE_FLOW_ITEM_TYPE_END)
+ break;
+
+ if (*type != item->type)
+ break;
+
+ item++;
+ type++;
+ }
+
+ return *type == item->type;
+}
+
+/**
+ * Parse flow attribute.
+ *
+ * This will check whether the provided attribute's flags are supported.
+ *
+ * @param priv Unused
+ * @param attr Pointer to the flow attribute.
+ * @param flow Unused
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_attr *attr,
+ struct rte_flow *flow __rte_unused,
+ struct rte_flow_error *error)
+{
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->transfer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow pattern.
+ *
+ * Specific classifier rule will be created as well.
+ *
+ * @param priv Unused
+ * @param pattern Pointer to the flow pattern.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
+ if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
+ continue;
+
+ ret = mrvl_patterns[i].parse(pattern, flow, error);
+ if (ret)
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return ret;
+ }
+
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Unsupported pattern");
+
+ return -rte_errno;
+}
+
+/**
+ * Parse flow actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param actions Pointer the action table.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse_actions(struct mrvl_priv *priv,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ int specified = 0;
+
+ for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = 0;
+ flow->action.type = PP2_CLS_TBL_ACT_DROP;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *q =
+ (const struct rte_flow_action_queue *)
+ action->conf;
+
+ if (q->index > priv->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Queue index out of range");
+ return -rte_errno;
+ }
+
+ if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
+ /*
+ * Unknown TC mapping, mapping will not have
+ * a correct queue.
+ */
+ MRVL_LOG(ERR,
+ "Unknown TC mapping for queue %hu eth%hhu",
+ q->index, priv->ppio_id);
+
+ rte_flow_error_set(error, EFAULT,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ return -rte_errno;
+ }
+
+ MRVL_LOG(DEBUG,
+ "Action: Assign packets to queue %d, tc:%d, q:%d",
+ q->index, priv->rxq_map[q->index].tc,
+ priv->rxq_map[q->index].inq);
+
+ flow->cos.ppio = priv->ppio;
+ flow->cos.tc = priv->rxq_map[q->index].tc;
+ flow->action.type = PP2_CLS_TBL_ACT_DONE;
+ flow->action.cos = &flow->cos;
+ specified++;
+ } else {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Action not supported");
+ return -rte_errno;
+ }
+
+ }
+
+ if (!specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Action not specified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse flow attribute, pattern and actions.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ ret = mrvl_flow_parse_attr(priv, attr, flow, error);
+ if (ret)
+ return ret;
+
+ ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
+ if (ret)
+ return ret;
+
+ return mrvl_flow_parse_actions(priv, actions, flow, error);
+}
+
+static inline enum pp2_cls_tbl_type
+mrvl_engine_type(const struct rte_flow *flow)
+{
+ int i, size = 0;
+
+ for (i = 0; i < flow->rule.num_fields; i++)
+ size += flow->rule.fields[i].size;
+
+ /*
+ * For maskable engine type the key size must be up to 8 bytes.
+ * For keys with size bigger than 8 bytes, engine type must
+ * be set to exact match.
+ */
+ if (size > 8)
+ return PP2_CLS_TBL_EXACT_MATCH;
+
+ return PP2_CLS_TBL_MASKABLE;
+}
+
+static int
+mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
+ int ret;
+
+ if (priv->cls_tbl) {
+ pp2_cls_tbl_deinit(priv->cls_tbl);
+ priv->cls_tbl = NULL;
+ }
+
+ memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
+
+ priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
+ MRVL_LOG(INFO, "Setting cls search engine type to %s",
+ priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
+ "exact" : "maskable");
+ priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
+ priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
+ priv->cls_tbl_params.default_act.cos = &first_flow->cos;
+
+ if (first_flow->pattern & F_DMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_SMAC) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
+ key->key_size += 6;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TYPE) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
+ key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_ID) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_VLAN_PRI) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
+ key->proto_field[key->num_fields].field.vlan =
+ MV_NET_VLAN_F_PRI;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_TOS) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
+ key->key_size += 4;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP4_PROTO) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
+ key->proto_field[key->num_fields].field.ipv4 =
+ MV_NET_IP4_F_PROTO;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_SIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_DIP) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
+ key->key_size += 16;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_FLOW) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_FLOW;
+ key->key_size += 3;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_IP6_NEXT_HDR) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
+ key->proto_field[key->num_fields].field.ipv6 =
+ MV_NET_IP6_F_NEXT_HDR;
+ key->key_size += 1;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_TCP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_SPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ if (first_flow->pattern & F_UDP_DPORT) {
+ key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
+ key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
+ key->key_size += 2;
+ key->num_fields += 1;
+ }
+
+ ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
+ if (!ret)
+ priv->cls_tbl_pattern = first_flow->pattern;
+
+ return ret;
+}
+
+/**
+ * Check whether new flow can be added to the table
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the new flow.
+ * @return 1 in case flow can be added, 0 otherwise.
+ */
+static inline int
+mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
+{
+ return flow->pattern == priv->cls_tbl_pattern &&
+ mrvl_engine_type(flow) == priv->cls_tbl_params.type;
+}
+
+/**
+ * DPDK flow create callback called when flow is to be created.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns Pointer to the created flow in case of success, NULL otherwise.
+ */
+static struct rte_flow *
+mrvl_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow, *first;
+ int ret;
+
+ if (!dev->data->dev_started) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Port must be started first\n");
+ return NULL;
+ }
+
+ flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
+ if (!flow)
+ return NULL;
+
+ ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
+ if (ret)
+ goto out;
+
+ /*
+ * Four cases here:
+ *
+ * 1. In case table does not exist - create one.
+ * 2. In case table exists, is empty and new flow cannot be added
+ * recreate table.
+ * 3. In case table is not empty and new flow matches table format
+ * add it.
+ * 4. Otherwise flow cannot be added.
+ */
+ first = LIST_FIRST(&priv->flows);
+ if (!priv->cls_tbl) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
+ ret = mrvl_create_cls_table(dev, flow);
+ } else if (mrvl_flow_can_be_added(priv, flow)) {
+ ret = 0;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Pattern does not match cls table format\n");
+ goto out;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to create cls table\n");
+ goto out;
+ }
+
+ ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to add rule\n");
+ goto out;
+ }
+
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+
+ return flow;
+out:
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Remove classifier rule associated with given flow.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+
+ if (!priv->cls_tbl) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Classifier table not initialized");
+ return -rte_errno;
+ }
+
+ ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to remove rule");
+ return -rte_errno;
+ }
+
+ mrvl_free_all_key_mask(&flow->rule);
+
+ return 0;
+}
+
+/**
+ * DPDK flow destroy callback called when flow is to be removed.
+ *
+ * @param priv Pointer to the port's private data.
+ * @param flow Pointer to the flow.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+ struct rte_flow *f;
+ int ret;
+
+ LIST_FOREACH(f, &priv->flows, next) {
+ if (f == flow)
+ break;
+ }
+
+ if (!flow) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Rule was not found");
+ return -rte_errno;
+ }
+
+ LIST_REMOVE(f, next);
+
+ ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ rte_free(flow);
+
+ return 0;
+}
+
+/**
+ * DPDK flow callback called to verify given attribute, pattern and actions.
+ *
+ * @param dev Pointer to the device.
+ * @param attr Pointer to the flow attribute.
+ * @param pattern Pointer to the flow pattern.
+ * @param actions Pointer to the flow actions.
+ * @param error Pointer to the flow error.
+ * @returns 0 on success, negative value otherwise.
+ */
+static int
+mrvl_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ static struct rte_flow *flow;
+
+ flow = mrvl_flow_create(dev, attr, pattern, actions, error);
+ if (!flow)
+ return -rte_errno;
+
+ mrvl_flow_destroy(dev, flow, error);
+
+ return 0;
+}
+
+/**
+ * DPDK flow flush callback called when flows are to be flushed.
+ *
+ * @param dev Pointer to the device.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow = LIST_FIRST(&priv->flows);
+ int ret = mrvl_flow_remove(priv, flow, error);
+ if (ret)
+ return ret;
+
+ LIST_REMOVE(flow, next);
+ rte_free(flow);
+ }
+
+ return 0;
+}
+
+/**
+ * DPDK flow isolate callback called to isolate port.
+ *
+ * @param dev Pointer to the device.
+ * @param enable Pass 0/1 to disable/enable port isolation.
+ * @param error Pointer to the flow error.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
+ struct rte_flow_error *error)
+{
+ struct mrvl_priv *priv = dev->data->dev_private;
+
+ if (dev->data->dev_started) {
+ rte_flow_error_set(error, EBUSY,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Port must be stopped first\n");
+ return -rte_errno;
+ }
+
+ priv->isolated = enable;
+
+ return 0;
+}
+
+const struct rte_flow_ops mrvl_flow_ops = {
+ .validate = mrvl_flow_validate,
+ .create = mrvl_flow_create,
+ .destroy = mrvl_flow_destroy,
+ .flush = mrvl_flow_flush,
+ .isolate = mrvl_flow_isolate
+};
diff --git a/drivers/net/mrvl/mrvl_qos.c b/drivers/net/mvpp2/mrvl_qos.c
index fbb36813..71856c1a 100644
--- a/drivers/net/mrvl/mrvl_qos.c
+++ b/drivers/net/mvpp2/mrvl_qos.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <stdint.h>
@@ -64,12 +36,35 @@
#define MRVL_TOK_PCP "pcp"
#define MRVL_TOK_PORT "port"
#define MRVL_TOK_RXQ "rxq"
-#define MRVL_TOK_SP "SP"
#define MRVL_TOK_TC "tc"
#define MRVL_TOK_TXQ "txq"
#define MRVL_TOK_VLAN "vlan"
#define MRVL_TOK_VLAN_IP "vlan/ip"
-#define MRVL_TOK_WEIGHT "weight"
+
+/* egress specific configuration tokens */
+#define MRVL_TOK_BURST_SIZE "burst_size"
+#define MRVL_TOK_RATE_LIMIT "rate_limit"
+#define MRVL_TOK_RATE_LIMIT_ENABLE "rate_limit_enable"
+#define MRVL_TOK_SCHED_MODE "sched_mode"
+#define MRVL_TOK_SCHED_MODE_SP "sp"
+#define MRVL_TOK_SCHED_MODE_WRR "wrr"
+#define MRVL_TOK_WRR_WEIGHT "wrr_weight"
+
+/* policer specific configuration tokens */
+#define MRVL_TOK_PLCR_ENABLE "policer_enable"
+#define MRVL_TOK_PLCR_UNIT "token_unit"
+#define MRVL_TOK_PLCR_UNIT_BYTES "bytes"
+#define MRVL_TOK_PLCR_UNIT_PACKETS "packets"
+#define MRVL_TOK_PLCR_COLOR "color_mode"
+#define MRVL_TOK_PLCR_COLOR_BLIND "blind"
+#define MRVL_TOK_PLCR_COLOR_AWARE "aware"
+#define MRVL_TOK_PLCR_CIR "cir"
+#define MRVL_TOK_PLCR_CBS "cbs"
+#define MRVL_TOK_PLCR_EBS "ebs"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR "default_color"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN "green"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW "yellow"
+#define MRVL_TOK_PLCR_DEFAULT_COLOR_RED "red"
/** Number of tokens in range a-b = 2. */
#define MAX_RNG_TOKENS 2
@@ -131,12 +126,69 @@ get_outq_cfg(struct rte_cfgfile *file, int port, int outq,
if (rte_cfgfile_num_sections(file, sec_name, strlen(sec_name)) <= 0)
return 0;
+ /* Read scheduling mode */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_SCHED_MODE);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_SCHED_MODE_SP,
+ strlen(MRVL_TOK_SCHED_MODE_SP))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_SP;
+ } else if (!strncmp(entry, MRVL_TOK_SCHED_MODE_WRR,
+ strlen(MRVL_TOK_SCHED_MODE_WRR))) {
+ cfg->port[port].outq[outq].sched_mode =
+ PP2_PPIO_SCHED_M_WRR;
+ } else {
+ MRVL_LOG(ERR, "Unknown token: %s", entry);
+ return -1;
+ }
+ }
+
+ /* Read wrr weight */
+ if (cfg->port[port].outq[outq].sched_mode == PP2_PPIO_SCHED_M_WRR) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_WRR_WEIGHT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].weight = val;
+ }
+ }
+
+ /*
+ * There's no point in setting rate limiting for specific outq as
+ * global port rate limiting has priority.
+ */
+ if (cfg->port[port].rate_limit_enable) {
+ MRVL_LOG(WARNING, "Port %d rate limiting already enabled",
+ port);
+ return 0;
+ }
+
entry = rte_cfgfile_get_entry(file, sec_name,
- MRVL_TOK_WEIGHT);
+ MRVL_TOK_RATE_LIMIT_ENABLE);
if (entry) {
if (get_val_securely(entry, &val) < 0)
return -1;
- cfg->port[port].outq[outq].weight = (uint8_t)val;
+ cfg->port[port].outq[outq].rate_limit_enable = val;
+ }
+
+ if (!cfg->port[port].outq[outq].rate_limit_enable)
+ return 0;
+
+ /* Read CBS (in kB) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cbs = val;
+ }
+
+ /* Read CIR (in kbps) */
+ entry = rte_cfgfile_get_entry(file, sec_name, MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ cfg->port[port].outq[outq].rate_limit_params.cir = val;
}
return 0;
@@ -190,7 +242,7 @@ get_entry_values(const char *entry, uint8_t *tab,
return -1;
/* Copy the entry to safely use rte_strsplit(). */
- snprintf(entry_cpy, RTE_DIM(entry_cpy), "%s", entry);
+ strlcpy(entry_cpy, entry, RTE_DIM(entry_cpy));
/*
* If there are more tokens than array size, rte_strsplit will
@@ -288,7 +340,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].inq),
MRVL_PP2_RXQ_MAX);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
@@ -303,7 +355,7 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].pcp),
MAX_PCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
@@ -318,12 +370,31 @@ parse_tc_cfg(struct rte_cfgfile *file, int port, int tc,
RTE_DIM(cfg->port[port].tc[tc].dscp),
MAX_DSCP);
if (n < 0) {
- RTE_LOG(ERR, PMD, "Error %d while parsing: %s\n",
+ MRVL_LOG(ERR, "Error %d while parsing: %s",
n, entry);
return n;
}
cfg->port[port].tc[tc].dscps = n;
}
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_DEFAULT_COLOR);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_GREEN))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_GREEN;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_YELLOW))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_YELLOW;
+ } else if (!strncmp(entry, MRVL_TOK_PLCR_DEFAULT_COLOR_RED,
+ sizeof(MRVL_TOK_PLCR_DEFAULT_COLOR_RED))) {
+ cfg->port[port].tc[tc].color = PP2_PPIO_COLOR_RED;
+ } else {
+ MRVL_LOG(ERR, "Error while parsing: %s", entry);
+ return -1;
+ }
+ }
+
return 0;
}
@@ -364,7 +435,7 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
if (n == 0) {
/* This is weird, but not bad. */
- RTE_LOG(WARNING, PMD, "Empty configuration file?\n");
+ MRVL_LOG(WARNING, "Empty configuration file?");
return 0;
}
@@ -390,12 +461,124 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
return -1;
(*cfg)->port[n].default_tc = (uint8_t)val;
} else {
- RTE_LOG(ERR, PMD,
- "Default Traffic Class required in custom configuration!\n");
+ MRVL_LOG(ERR,
+ "Default Traffic Class required in custom configuration!");
return -1;
}
entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_enable = val;
+ }
+
+ if ((*cfg)->port[n].policer_enable) {
+ enum pp2_cls_plcr_token_unit unit;
+
+ /* Read policer token unit */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_UNIT);
+ if (entry) {
+ if (!strncmp(entry, MRVL_TOK_PLCR_UNIT_BYTES,
+ sizeof(MRVL_TOK_PLCR_UNIT_BYTES))) {
+ unit = PP2_CLS_PLCR_BYTES_TOKEN_UNIT;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_UNIT_PACKETS,
+ sizeof(MRVL_TOK_PLCR_UNIT_PACKETS))) {
+ unit = PP2_CLS_PLCR_PACKETS_TOKEN_UNIT;
+ } else {
+ MRVL_LOG(ERR, "Unknown token: %s",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.token_unit =
+ unit;
+ }
+
+ /* Read policer color mode */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_COLOR);
+ if (entry) {
+ enum pp2_cls_plcr_color_mode mode;
+
+ if (!strncmp(entry, MRVL_TOK_PLCR_COLOR_BLIND,
+ sizeof(MRVL_TOK_PLCR_COLOR_BLIND))) {
+ mode = PP2_CLS_PLCR_COLOR_BLIND_MODE;
+ } else if (!strncmp(entry,
+ MRVL_TOK_PLCR_COLOR_AWARE,
+ sizeof(MRVL_TOK_PLCR_COLOR_AWARE))) {
+ mode = PP2_CLS_PLCR_COLOR_AWARE_MODE;
+ } else {
+ MRVL_LOG(ERR,
+ "Error in parsing: %s",
+ entry);
+ return -1;
+ }
+ (*cfg)->port[n].policer_params.color_mode =
+ mode;
+ }
+
+ /* Read policer cir */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CIR);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cir = val;
+ }
+
+ /* Read policer cbs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_CBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.cbs = val;
+ }
+
+ /* Read policer ebs */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_PLCR_EBS);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].policer_params.ebs = val;
+ }
+ }
+
+ /*
+ * Read per-port rate limiting. Setting that will
+ * disable per-queue rate limiting.
+ */
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT_ENABLE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_enable = val;
+ }
+
+ if ((*cfg)->port[n].rate_limit_enable) {
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_BURST_SIZE);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cbs = val;
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
+ MRVL_TOK_RATE_LIMIT);
+ if (entry) {
+ if (get_val_securely(entry, &val) < 0)
+ return -1;
+ (*cfg)->port[n].rate_limit_params.cir = val;
+ }
+ }
+
+ entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_MAPPING_PRIORITY);
if (entry) {
if (!strncmp(entry, MRVL_TOK_VLAN_IP,
@@ -450,16 +633,18 @@ mrvl_get_qoscfg(const char *key __rte_unused, const char *path,
* @param param TC parameters entry.
* @param inqs Number of MUSDK in-queues in this TC.
* @param bpool Bpool for this TC.
+ * @param color Default color for this TC.
* @returns 0 in case of success, exits otherwise.
*/
static int
setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
- struct pp2_bpool *bpool)
+ struct pp2_bpool *bpool, enum pp2_ppio_color color)
{
struct pp2_ppio_inq_params *inq_params;
param->pkt_offset = MRVL_PKT_OFFS;
param->pools[0] = bpool;
+ param->default_color = color;
inq_params = rte_zmalloc_socket("inq_params",
inqs * sizeof(*inq_params),
@@ -479,6 +664,34 @@ setup_tc(struct pp2_ppio_tc_params *param, uint8_t inqs,
}
/**
+ * Setup ingress policer.
+ *
+ * @param priv Port's private data.
+ * @param params Pointer to the policer's configuration.
+ * @returns 0 in case of success, negative values otherwise.
+ */
+static int
+setup_policer(struct mrvl_priv *priv, struct pp2_cls_plcr_params *params)
+{
+ char match[16];
+ int ret;
+
+ snprintf(match, sizeof(match), "policer-%d:%d\n",
+ priv->pp_id, priv->ppio_id);
+ params->match = match;
+
+ ret = pp2_cls_plcr_init(params, &priv->policer);
+ if (ret) {
+ MRVL_LOG(ERR, "Failed to setup %s", match);
+ return -1;
+ }
+
+ priv->ppio_params.inqs_params.plcr = priv->policer;
+
+ return 0;
+}
+
+/**
* Configure RX Queues in a given port.
*
* Sets up RX queues, their Traffic Classes and DPDK rxq->(TC,inq) mapping.
@@ -496,10 +709,13 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
if (mrvl_qos_cfg == NULL ||
mrvl_qos_cfg->port[portid].use_global_defaults) {
- /* No port configuration, use default: 1 TC, no QoS. */
+ /*
+ * No port configuration, use default: 1 TC, no QoS,
+ * TC color set to green.
+ */
priv->ppio_params.inqs_params.num_tcs = 1;
setup_tc(&priv->ppio_params.inqs_params.tcs_params[0],
- max_queues, priv->bpool);
+ max_queues, priv->bpool, PP2_PPIO_COLOR_GREEN);
/* Direct mapping of queues i.e. 0->0, 1->1 etc. */
for (i = 0; i < max_queues; ++i) {
@@ -526,8 +742,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].pcps > RTE_DIM(port_cfg->tc[0].pcp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many PCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many PCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].pcps; ++i) {
@@ -548,8 +764,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].dscps > RTE_DIM(port_cfg->tc[0].dscp)) {
/* Better safe than sorry. */
- RTE_LOG(ERR, PMD,
- "Too many DSCPs configured in TC %zu!\n", tc);
+ MRVL_LOG(ERR,
+ "Too many DSCPs configured in TC %zu!", tc);
return -1;
}
for (i = 0; i < port_cfg->tc[tc].dscps; ++i) {
@@ -570,8 +786,8 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
for (tc = 0; tc < RTE_DIM(port_cfg->tc); ++tc) {
if (port_cfg->tc[tc].inqs > RTE_DIM(port_cfg->tc[0].inq)) {
/* Overflow. */
- RTE_LOG(ERR, PMD,
- "Too many RX queues configured per TC %zu!\n",
+ MRVL_LOG(ERR,
+ "Too many RX queues configured per TC %zu!",
tc);
return -1;
}
@@ -579,7 +795,7 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
uint8_t idx = port_cfg->tc[tc].inq[i];
if (idx > RTE_DIM(priv->rxq_map)) {
- RTE_LOG(ERR, PMD, "Bad queue index %d!\n", idx);
+ MRVL_LOG(ERR, "Bad queue index %d!", idx);
return -1;
}
@@ -597,11 +813,53 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
break;
setup_tc(&priv->ppio_params.inqs_params.tcs_params[i],
port_cfg->tc[i].inqs,
- priv->bpool);
+ priv->bpool, port_cfg->tc[i].color);
}
priv->ppio_params.inqs_params.num_tcs = i;
+ if (port_cfg->policer_enable)
+ return setup_policer(priv, &port_cfg->policer_params);
+
+ return 0;
+}
+
+/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues)
+{
+ /* We need only a subset of configuration. */
+ struct port_cfg *port_cfg = &mrvl_qos_cfg->port[portid];
+ int i;
+
+ if (mrvl_qos_cfg == NULL)
+ return 0;
+
+ priv->ppio_params.rate_limit_enable = port_cfg->rate_limit_enable;
+ if (port_cfg->rate_limit_enable)
+ priv->ppio_params.rate_limit_params =
+ port_cfg->rate_limit_params;
+
+ for (i = 0; i < max_queues; i++) {
+ struct pp2_ppio_outq_params *params =
+ &priv->ppio_params.outqs_params.outqs_params[i];
+
+ params->sched_mode = port_cfg->outq[i].sched_mode;
+ params->weight = port_cfg->outq[i].weight;
+ params->rate_limit_enable = port_cfg->outq[i].rate_limit_enable;
+ params->rate_limit_params = port_cfg->outq[i].rate_limit_params;
+ }
+
return 0;
}
@@ -620,7 +878,7 @@ mrvl_start_qos_mapping(struct mrvl_priv *priv)
size_t i;
if (priv->ppio == NULL) {
- RTE_LOG(ERR, PMD, "ppio must not be NULL here!\n");
+ MRVL_LOG(ERR, "ppio must not be NULL here!");
return -1;
}
diff --git a/drivers/net/mrvl/mrvl_qos.h b/drivers/net/mvpp2/mrvl_qos.h
index ae7508c9..fa9ddecb 100644
--- a/drivers/net/mrvl/mrvl_qos.h
+++ b/drivers/net/mvpp2/mrvl_qos.h
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _MRVL_QOS_H_
@@ -48,6 +20,8 @@
/* QoS config. */
struct mrvl_qos_cfg {
struct port_cfg {
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
struct {
uint8_t inq[MRVL_PP2_RXQ_MAX];
uint8_t dscp[MRVL_CP_PER_TC];
@@ -55,15 +29,21 @@ struct mrvl_qos_cfg {
uint8_t inqs;
uint8_t dscps;
uint8_t pcps;
+ enum pp2_ppio_color color;
} tc[MRVL_PP2_TC_MAX];
struct {
+ enum pp2_ppio_outq_sched_mode sched_mode;
uint8_t weight;
+ int rate_limit_enable;
+ struct pp2_ppio_rate_limit_params rate_limit_params;
} outq[MRVL_PP2_RXQ_MAX];
enum pp2_cls_qos_tbl_type mapping_priority;
uint16_t inqs;
uint16_t outqs;
uint8_t default_tc;
uint8_t use_global_defaults;
+ struct pp2_cls_plcr_params policer_params;
+ uint8_t policer_enable;
} port[RTE_MAX_ETHPORTS];
};
@@ -99,6 +79,20 @@ mrvl_configure_rxqs(struct mrvl_priv *priv, uint16_t portid,
uint16_t max_queues);
/**
+ * Configure TX Queues in a given port.
+ *
+ * Sets up TX queues egress scheduler and limiter.
+ *
+ * @param priv Port's private data
+ * @param portid DPDK port ID
+ * @param max_queues Maximum number of queues to configure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_configure_txqs(struct mrvl_priv *priv, uint16_t portid,
+ uint16_t max_queues);
+
+/**
* Start QoS mapping.
*
* Finalize QoS table configuration and initialize it in SDK. It can be done
diff --git a/drivers/net/mrvl/rte_pmd_mrvl_version.map b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
index a7530317..a7530317 100644
--- a/drivers/net/mrvl/rte_pmd_mrvl_version.map
+++ b/drivers/net/mvpp2/rte_pmd_mvpp2_version.map
diff --git a/drivers/net/netvsc/Makefile b/drivers/net/netvsc/Makefile
new file mode 100644
index 00000000..3c713af3
--- /dev/null
+++ b/drivers/net/netvsc/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_pmd_netvsc.a
+
+CFLAGS += -O3 $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+EXPORT_MAP := rte_pmd_netvsc_version.map
+
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_rndis.c
+SRCS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += hn_nvs.c
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_vmbus
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
new file mode 100644
index 00000000..78b842ba
--- /dev/null
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -0,0 +1,761 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_TX_OFFLOAD_CAPS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_VLAN_INSERT)
+
+#define HN_RX_OFFLOAD_CAPS (DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
+
+int hn_logtype_init;
+int hn_logtype_driver;
+
+struct hn_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct hn_xstats_name_off hn_stat_strings[] = {
+ { "good_packets", offsetof(struct hn_stats, packets) },
+ { "good_bytes", offsetof(struct hn_stats, bytes) },
+ { "errors", offsetof(struct hn_stats, errors) },
+ { "allocation_failed", offsetof(struct hn_stats, nomemory) },
+ { "multicast_packets", offsetof(struct hn_stats, multicast) },
+ { "broadcast_packets", offsetof(struct hn_stats, broadcast) },
+ { "undersize_packets", offsetof(struct hn_stats, size_bins[0]) },
+ { "size_64_packets", offsetof(struct hn_stats, size_bins[1]) },
+ { "size_65_127_packets", offsetof(struct hn_stats, size_bins[2]) },
+ { "size_128_255_packets", offsetof(struct hn_stats, size_bins[3]) },
+ { "size_256_511_packets", offsetof(struct hn_stats, size_bins[4]) },
+ { "size_512_1023_packets", offsetof(struct hn_stats, size_bins[5]) },
+ { "size_1024_1518_packets", offsetof(struct hn_stats, size_bins[6]) },
+ { "size_1519_max_packets", offsetof(struct hn_stats, size_bins[7]) },
+};
+
+static struct rte_eth_dev *
+eth_dev_vmbus_allocate(struct rte_vmbus_device *dev, size_t private_data_size)
+{
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ if (!dev)
+ return NULL;
+
+ name = dev->device.name;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not allocate rte ethdev");
+ return NULL;
+ }
+
+ if (private_data_size) {
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, private_data_size,
+ RTE_CACHE_LINE_SIZE, dev->device.numa_node);
+ if (!eth_dev->data->dev_private) {
+ PMD_DRV_LOG(NOTICE, "can not allocate driver data");
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(NOTICE, "can not attach secondary");
+ return NULL;
+ }
+ }
+
+ eth_dev->device = &dev->device;
+ eth_dev->intr_handle = &dev->intr_handle;
+
+ return eth_dev;
+}
+
+static void
+eth_dev_vmbus_release(struct rte_eth_dev *eth_dev)
+{
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+
+ eth_dev->data->dev_private = NULL;
+
+ /*
+ * Secondary process will check the name to attach.
+ * Clear this field to avoid attaching a released ports.
+ */
+ eth_dev->data->name[0] = '\0';
+
+ eth_dev->device = NULL;
+ eth_dev->intr_handle = NULL;
+}
+
+/* Update link status.
+ * Note: the DPDK definition of "wait_to_complete"
+ * means block this call until link is up.
+ * which is not worth supporting.
+ */
+static int
+hn_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct rte_eth_link link, old;
+ int error;
+
+ old = dev->data->dev_link;
+
+ error = hn_rndis_get_linkstatus(hv);
+ if (error)
+ return error;
+
+ hn_rndis_get_linkspeed(hv);
+
+ link = (struct rte_eth_link) {
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_autoneg = ETH_LINK_SPEED_FIXED,
+ .link_speed = hv->link_speed / 10000,
+ };
+
+ if (hv->link_status == NDIS_MEDIA_STATE_CONNECTED)
+ link.link_status = ETH_LINK_UP;
+ else
+ link.link_status = ETH_LINK_DOWN;
+
+ if (old.link_status == link.link_status)
+ return 0;
+
+ PMD_INIT_LOG(DEBUG, "Port %d is %s", dev->data->port_id,
+ (link.link_status == ETH_LINK_UP) ? "up" : "down");
+
+ return rte_eth_linkstatus_set(dev, &link);
+}
+
+static void hn_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+ dev_info->min_rx_bufsize = HN_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_pktlen = HN_MAX_XFER_LEN;
+ dev_info->max_mac_addrs = 1;
+
+ dev_info->hash_key_size = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ dev_info->flow_type_rss_offloads =
+ ETH_RSS_IPV4 | ETH_RSS_IPV6 | ETH_RSS_TCP | ETH_RSS_UDP;
+
+ dev_info->max_rx_queues = hv->max_queues;
+ dev_info->max_tx_queues = hv->max_queues;
+
+ hn_rndis_get_offload(hv, dev_info);
+}
+
+static void
+hn_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_PROMISCUOUS);
+}
+
+static void
+hn_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ uint32_t filter;
+
+ filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST;
+ if (dev->data->all_multicast)
+ filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
+ hn_rndis_set_rxfilter(hv, filter);
+}
+
+static void
+hn_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+static void
+hn_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_DIRECTED |
+ NDIS_PACKET_TYPE_BROADCAST);
+}
+
+/* Setup shared rx/tx queue data */
+static int hn_subchan_configure(struct hn_data *hv,
+ uint32_t subchan)
+{
+ struct vmbus_channel *primary = hn_primary_chan(hv);
+ int err;
+ unsigned int retry = 0;
+
+ PMD_DRV_LOG(DEBUG,
+ "open %u subchannels", subchan);
+
+ /* Send create sub channels command */
+ err = hn_nvs_alloc_subchans(hv, &subchan);
+ if (err)
+ return err;
+
+ while (subchan > 0) {
+ struct vmbus_channel *new_sc;
+ uint16_t chn_index;
+
+ err = rte_vmbus_subchan_open(primary, &new_sc);
+ if (err == -ENOENT && ++retry < 1000) {
+ /* This can happen if not ready yet */
+ rte_delay_ms(10);
+ continue;
+ }
+
+ if (err) {
+ PMD_DRV_LOG(ERR,
+ "open subchannel failed: %d", err);
+ return err;
+ }
+
+ retry = 0;
+ chn_index = rte_vmbus_sub_channel_index(new_sc);
+ if (chn_index == 0 || chn_index > hv->max_queues) {
+ PMD_DRV_LOG(ERR,
+ "Invalid subchannel offermsg channel %u",
+ chn_index);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(DEBUG, "new sub channel %u", chn_index);
+ hv->channels[chn_index] = new_sc;
+ --subchan;
+ }
+
+ return err;
+}
+
+static int hn_dev_configure(struct rte_eth_dev *dev)
+{
+ const struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+ const struct rte_eth_txmode *txmode = &dev_conf->txmode;
+
+ const struct rte_eth_rss_conf *rss_conf =
+ &dev_conf->rx_adv_conf.rss_conf;
+ struct hn_data *hv = dev->data->dev_private;
+ uint64_t unsupported;
+ int err, subchan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported TX offload: %#" PRIx64,
+ unsupported);
+ return -EINVAL;
+ }
+
+ unsupported = rxmode->offloads & ~HN_RX_OFFLOAD_CAPS;
+ if (unsupported) {
+ PMD_DRV_LOG(NOTICE,
+ "unsupported RX offload: %#" PRIx64,
+ rxmode->offloads);
+ return -EINVAL;
+ }
+
+ err = hn_rndis_conf_offload(hv, txmode->offloads,
+ rxmode->offloads);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "offload configure failed");
+ return err;
+ }
+
+ hv->num_queues = RTE_MAX(dev->data->nb_rx_queues,
+ dev->data->nb_tx_queues);
+ subchan = hv->num_queues - 1;
+ if (subchan > 0) {
+ err = hn_subchan_configure(hv, subchan);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "subchannel configuration failed");
+ return err;
+ }
+
+ err = hn_rndis_conf_rss(hv, rss_conf);
+ if (err) {
+ PMD_DRV_LOG(NOTICE,
+ "rss configuration failed");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int hn_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats->opackets += txq->stats.packets;
+ stats->obytes += txq->stats.bytes;
+ stats->oerrors += txq->stats.errors + txq->stats.nomemory;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->stats.packets;
+ stats->q_obytes[i] = txq->stats.bytes;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats->ipackets += rxq->stats.packets;
+ stats->ibytes += rxq->stats.bytes;
+ stats->ierrors += rxq->stats.errors;
+ stats->imissed += rxq->ring_full;
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->stats.packets;
+ stats->q_ibytes[i] = rxq->stats.bytes;
+ }
+ }
+
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+ return 0;
+}
+
+static void
+hn_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+ memset(&txq->stats, 0, sizeof(struct hn_stats));
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ memset(&rxq->stats, 0, sizeof(struct hn_stats));
+ rxq->ring_full = 0;
+ }
+}
+
+static int
+hn_dev_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
+{
+ unsigned int i, t, count = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!xstats_names)
+ return dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "tx_q%u_%s", i, hn_stat_strings[t].name);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ snprintf(xstats_names[count++].name,
+ RTE_ETH_XSTATS_NAME_SIZE,
+ "rx_q%u_%s", i,
+ hn_stat_strings[t].name);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i, t, count = 0;
+
+ const unsigned int nstats =
+ dev->data->nb_tx_queues * RTE_DIM(hn_stat_strings)
+ + dev->data->nb_rx_queues * RTE_DIM(hn_stat_strings);
+ const char *stats;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (n < nstats)
+ return nstats;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ const struct hn_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (!txq)
+ continue;
+
+ stats = (const char *)&txq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ const struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (!rxq)
+ continue;
+
+ stats = (const char *)&rxq->stats;
+ for (t = 0; t < RTE_DIM(hn_stat_strings); t++)
+ xstats[count++].value = *(const uint64_t *)
+ (stats + hn_stat_strings[t].offset);
+ }
+
+ return count;
+}
+
+static int
+hn_dev_start(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* check if lsc interrupt feature is enabled */
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ PMD_DRV_LOG(ERR, "link status not supported yet");
+ return -ENOTSUP;
+ }
+
+ return hn_rndis_set_rxfilter(hv,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+}
+
+static void
+hn_dev_stop(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ hn_rndis_set_rxfilter(hv, 0);
+}
+
+static void
+hn_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ PMD_INIT_LOG(DEBUG, "close");
+}
+
+static const struct eth_dev_ops hn_eth_dev_ops = {
+ .dev_configure = hn_dev_configure,
+ .dev_start = hn_dev_start,
+ .dev_stop = hn_dev_stop,
+ .dev_close = hn_dev_close,
+ .dev_infos_get = hn_dev_info_get,
+ .txq_info_get = hn_dev_tx_queue_info,
+ .rxq_info_get = hn_dev_rx_queue_info,
+ .promiscuous_enable = hn_dev_promiscuous_enable,
+ .promiscuous_disable = hn_dev_promiscuous_disable,
+ .allmulticast_enable = hn_dev_allmulticast_enable,
+ .allmulticast_disable = hn_dev_allmulticast_disable,
+ .tx_queue_setup = hn_dev_tx_queue_setup,
+ .tx_queue_release = hn_dev_tx_queue_release,
+ .rx_queue_setup = hn_dev_rx_queue_setup,
+ .rx_queue_release = hn_dev_rx_queue_release,
+ .link_update = hn_dev_link_update,
+ .stats_get = hn_dev_stats_get,
+ .xstats_get = hn_dev_xstats_get,
+ .xstats_get_names = hn_dev_xstats_get_names,
+ .stats_reset = hn_dev_stats_reset,
+ .xstats_reset = hn_dev_stats_reset,
+};
+
+/*
+ * Setup connection between PMD and kernel.
+ */
+static int
+hn_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /* Attach NVS */
+ error = hn_nvs_attach(hv, mtu);
+ if (error)
+ goto failed_nvs;
+
+ /* Attach RNDIS */
+ error = hn_rndis_attach(hv);
+ if (error)
+ goto failed_rndis;
+
+ /*
+ * NOTE:
+ * Under certain conditions on certain versions of Hyper-V,
+ * the RNDIS rxfilter is _not_ zero on the hypervisor side
+ * after the successful RNDIS initialization.
+ */
+ hn_rndis_set_rxfilter(hv, NDIS_PACKET_TYPE_NONE);
+ return 0;
+failed_rndis:
+ hn_nvs_detach(hv);
+failed_nvs:
+ return error;
+}
+
+static void
+hn_detach(struct hn_data *hv)
+{
+ hn_nvs_detach(hv);
+ hn_rndis_detach(hv);
+}
+
+static int
+eth_hn_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+ struct rte_device *device = eth_dev->device;
+ struct rte_vmbus_device *vmbus;
+ unsigned int rxr_cnt;
+ int err, max_chan;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vmbus = container_of(device, struct rte_vmbus_device, device);
+ eth_dev->dev_ops = &hn_eth_dev_ops;
+ eth_dev->tx_pkt_burst = &hn_xmit_pkts;
+ eth_dev->rx_pkt_burst = &hn_recv_pkts;
+
+ /*
+ * for secondary processes, we don't initialize any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Since Hyper-V only supports one MAC address, just use local data */
+ eth_dev->data->mac_addrs = &hv->mac_addr;
+
+ hv->vmbus = vmbus;
+ hv->rxbuf_res = &vmbus->resource[HV_RECV_BUF_MAP];
+ hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
+ hv->port_id = eth_dev->data->port_id;
+
+ /* Initialize primary channel input for control operations */
+ err = rte_vmbus_chan_open(vmbus, &hv->channels[0]);
+ if (err)
+ return err;
+
+ hv->primary = hn_rx_queue_alloc(hv, 0,
+ eth_dev->device->numa_node);
+
+ if (!hv->primary)
+ return -ENOMEM;
+
+ err = hn_attach(hv, ETHER_MTU);
+ if (err)
+ goto failed;
+
+ err = hn_tx_pool_init(eth_dev);
+ if (err)
+ goto failed;
+
+ err = hn_rndis_get_eaddr(hv, hv->mac_addr.addr_bytes);
+ if (err)
+ goto failed;
+
+ max_chan = rte_vmbus_max_channels(vmbus);
+ PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan);
+ if (max_chan <= 0)
+ goto failed;
+
+ if (hn_rndis_query_rsscaps(hv, &rxr_cnt) != 0)
+ rxr_cnt = 1;
+
+ hv->max_queues = RTE_MIN(rxr_cnt, (unsigned int)max_chan);
+
+ return 0;
+
+failed:
+ PMD_INIT_LOG(NOTICE, "device init failed");
+
+ hn_detach(hv);
+ return err;
+}
+
+static int
+eth_hn_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct hn_data *hv = eth_dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ hn_dev_stop(eth_dev);
+ hn_dev_close(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+
+ hn_detach(hv);
+ rte_vmbus_chan_close(hv->primary->chan);
+ rte_free(hv->primary);
+
+ eth_dev->data->mac_addrs = NULL;
+
+ return 0;
+}
+
+static int eth_hn_probe(struct rte_vmbus_driver *drv __rte_unused,
+ struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = eth_dev_vmbus_allocate(dev, sizeof(struct hn_data));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ ret = eth_hn_dev_init(eth_dev);
+ if (ret)
+ eth_dev_vmbus_release(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
+
+ return ret;
+}
+
+static int eth_hn_remove(struct rte_vmbus_device *dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev = rte_eth_dev_allocated(dev->device.name);
+ if (!eth_dev)
+ return -ENODEV;
+
+ ret = eth_hn_dev_uninit(eth_dev);
+ if (ret)
+ return ret;
+
+ eth_dev_vmbus_release(eth_dev);
+ return 0;
+}
+
+/* Network device GUID */
+static const rte_uuid_t hn_net_ids[] = {
+ /* f8615163-df3e-46c5-913f-f2d2f965ed0e */
+ RTE_UUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x913f, 0xf2d2f965ed0eULL),
+ { 0 }
+};
+
+static struct rte_vmbus_driver rte_netvsc_pmd = {
+ .id_table = hn_net_ids,
+ .probe = eth_hn_probe,
+ .remove = eth_hn_remove,
+};
+
+RTE_PMD_REGISTER_VMBUS(net_netvsc, rte_netvsc_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
+
+RTE_INIT(hn_init_log);
+static void
+hn_init_log(void)
+{
+ hn_logtype_init = rte_log_register("pmd.net.netvsc.init");
+ if (hn_logtype_init >= 0)
+ rte_log_set_level(hn_logtype_init, RTE_LOG_NOTICE);
+ hn_logtype_driver = rte_log_register("pmd.net.netvsc.driver");
+ if (hn_logtype_driver >= 0)
+ rte_log_set_level(hn_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/netvsc/hn_logs.h b/drivers/net/netvsc/hn_logs.h
new file mode 100644
index 00000000..cddadef0
--- /dev/null
+++ b/drivers/net/netvsc/hn_logs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#ifndef _HN_LOGS_H_
+#define _HN_LOGS_H_
+
+#include <rte_log.h>
+
+extern int hn_logtype_init;
+extern int hn_logtype_driver;
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_init, "%s(): " fmt "\n",\
+ __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() rx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, \
+ "%s() tx: " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, hn_logtype_driver, "%s(): " fmt "\n", \
+ __func__, ## args)
+
+#endif /* _HN_LOGS_H_ */
diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c
new file mode 100644
index 00000000..77d3b839
--- /dev/null
+++ b/drivers/net/netvsc/hn_nvs.c
@@ -0,0 +1,546 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Network Virtualization Service.
+ */
+
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+
+static const uint32_t hn_nvs_version[] = {
+ NVS_VERSION_61,
+ NVS_VERSION_6,
+ NVS_VERSION_5,
+ NVS_VERSION_4,
+ NVS_VERSION_2,
+ NVS_VERSION_1
+};
+
+static int hn_nvs_req_send(struct hn_data *hv,
+ void *req, uint32_t reqlen)
+{
+ return rte_vmbus_chan_send(hn_primary_chan(hv),
+ VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+}
+
+static int
+hn_nvs_execute(struct hn_data *hv,
+ void *req, uint32_t reqlen,
+ void *resp, uint32_t resplen,
+ uint32_t type)
+{
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ char buffer[NVS_RESPSIZE_MAX];
+ const struct hn_nvs_hdr *hdr;
+ uint32_t len;
+ int ret;
+
+ /* Send request to ring buffer */
+ ret = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ req, reqlen, 0,
+ VMBUS_CHANPKT_FLAG_RC, NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "send request failed: %d", ret);
+ return ret;
+ }
+
+ retry:
+ len = sizeof(buffer);
+ ret = rte_vmbus_chan_recv(chan, buffer, &len, NULL);
+ if (ret == -EAGAIN) {
+ rte_delay_us(HN_CHAN_INTERVAL_US);
+ goto retry;
+ }
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "recv response failed: %d", ret);
+ return ret;
+ }
+
+ hdr = (struct hn_nvs_hdr *)buffer;
+ if (hdr->type != type) {
+ PMD_DRV_LOG(ERR, "unexpected NVS resp %#x, expect %#x",
+ hdr->type, type);
+ return -EINVAL;
+ }
+
+ if (len < resplen) {
+ PMD_DRV_LOG(ERR,
+ "invalid NVS resp len %u (expect %u)",
+ len, resplen);
+ return -EINVAL;
+ }
+
+ memcpy(resp, buffer, resplen);
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_nvs_doinit(struct hn_data *hv, uint32_t nvs_ver)
+{
+ struct hn_nvs_init init;
+ struct hn_nvs_init_resp resp;
+ uint32_t status;
+ int error;
+
+ memset(&init, 0, sizeof(init));
+ init.type = NVS_TYPE_INIT;
+ init.ver_min = nvs_ver;
+ init.ver_max = nvs_ver;
+
+ error = hn_nvs_execute(hv, &init, sizeof(init),
+ &resp, sizeof(resp),
+ NVS_TYPE_INIT_RESP);
+ if (error)
+ return error;
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ /* Not fatal, try other versions */
+ PMD_INIT_LOG(DEBUG, "nvs init failed for ver 0x%x",
+ nvs_ver);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_conn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_conn conn;
+ struct hn_nvs_rxbuf_connresp resp;
+ uint32_t status;
+ int error;
+
+ /* Kernel has already setup RXBUF on primary channel. */
+
+ /*
+ * Connect RXBUF to NVS.
+ */
+ conn.type = NVS_TYPE_RXBUF_CONN;
+ conn.gpadl = hv->rxbuf_res->phys_addr;
+ conn.sig = NVS_RXBUF_SIG;
+ PMD_DRV_LOG(DEBUG, "connect rxbuff va=%p gpad=%#" PRIx64,
+ hv->rxbuf_res->addr,
+ hv->rxbuf_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &conn, sizeof(conn),
+ &resp, sizeof(resp),
+ NVS_TYPE_RXBUF_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "exec nvs rxbuf conn failed: %d",
+ error);
+ return error;
+ }
+
+ status = resp.status;
+ if (status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf conn failed: %x", status);
+ return -EIO;
+ }
+ if (resp.nsect != 1) {
+ PMD_DRV_LOG(ERR,
+ "nvs rxbuf response num sections %u != 1",
+ resp.nsect);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "receive buffer size %u count %u",
+ resp.nvs_sect[0].slotsz,
+ resp.nvs_sect[0].slotcnt);
+ hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
+
+ hv->rxbuf_info = rte_calloc("HN_RXBUF_INFO", hv->rxbuf_section_cnt,
+ sizeof(*hv->rxbuf_info), RTE_CACHE_LINE_SIZE);
+ if (!hv->rxbuf_info) {
+ PMD_DRV_LOG(ERR,
+ "could not allocate rxbuf info");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+hn_nvs_disconn_rxbuf(struct hn_data *hv)
+{
+ struct hn_nvs_rxbuf_disconn disconn;
+ int error;
+
+ /*
+ * Disconnect RXBUF from NVS.
+ */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_RXBUF_DISCONN;
+ disconn.sig = NVS_RXBUF_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs rxbuf disconn failed: %d",
+ error);
+ }
+
+ rte_free(hv->rxbuf_info);
+ /*
+ * Linger long enough for NVS to disconnect RXBUF.
+ */
+ rte_delay_ms(200);
+}
+
+static void
+hn_nvs_disconn_chim(struct hn_data *hv)
+{
+ int error;
+
+ if (hv->chim_cnt != 0) {
+ struct hn_nvs_chim_disconn disconn;
+
+ /* Disconnect chimney sending buffer from NVS. */
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.type = NVS_TYPE_CHIM_DISCONN;
+ disconn.sig = NVS_CHIM_SIG;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &disconn, sizeof(disconn));
+
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs chim disconn failed: %d", error);
+ }
+
+ hv->chim_cnt = 0;
+ /*
+ * Linger long enough for NVS to disconnect chimney
+ * sending buffer.
+ */
+ rte_delay_ms(200);
+ }
+}
+
+static int
+hn_nvs_conn_chim(struct hn_data *hv)
+{
+ struct hn_nvs_chim_conn chim;
+ struct hn_nvs_chim_connresp resp;
+ uint32_t sectsz;
+ unsigned long len = hv->chim_res->len;
+ int error;
+
+ /* Connect chimney sending buffer to NVS */
+ memset(&chim, 0, sizeof(chim));
+ chim.type = NVS_TYPE_CHIM_CONN;
+ chim.gpadl = hv->chim_res->phys_addr;
+ chim.sig = NVS_CHIM_SIG;
+ PMD_DRV_LOG(DEBUG, "connect send buf va=%p gpad=%#" PRIx64,
+ hv->chim_res->addr,
+ hv->chim_res->phys_addr);
+
+ error = hn_nvs_execute(hv, &chim, sizeof(chim),
+ &resp, sizeof(resp),
+ NVS_TYPE_CHIM_CONNRESP);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec nvs chim conn failed");
+ goto cleanup;
+ }
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_DRV_LOG(ERR, "nvs chim conn failed: %x",
+ resp.status);
+ error = -EIO;
+ goto cleanup;
+ }
+
+ sectsz = resp.sectsz;
+ if (sectsz == 0 || sectsz & (sizeof(uint32_t) - 1)) {
+ /* Can't use chimney sending buffer; done! */
+ PMD_DRV_LOG(NOTICE,
+ "invalid chimney sending buffer section size: %u",
+ sectsz);
+ return 0;
+ }
+
+ hv->chim_szmax = sectsz;
+ hv->chim_cnt = len / sectsz;
+
+ PMD_DRV_LOG(INFO, "send buffer %lu section size:%u, count:%u",
+ len, hv->chim_szmax, hv->chim_cnt);
+
+ if (len % hv->chim_szmax != 0) {
+ PMD_DRV_LOG(NOTICE,
+ "chimney sending sections are not properly aligned");
+ }
+
+ /* Done! */
+ return 0;
+
+cleanup:
+ hn_nvs_disconn_chim(hv);
+ return error;
+}
+
+/*
+ * Configure MTU and enable VLAN.
+ */
+static int
+hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu)
+{
+ struct hn_nvs_ndis_conf conf;
+ int error;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.type = NVS_TYPE_NDIS_CONF;
+ conf.mtu = mtu + ETHER_HDR_LEN;
+ conf.caps = NVS_NDIS_CONF_VLAN;
+
+ /* TODO enable SRIOV */
+ //if (hv->nvs_ver >= NVS_VERSION_5)
+ // conf.caps |= NVS_NDIS_CONF_SRIOV;
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &conf, sizeof(conf));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis conf failed: %d", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int
+hn_nvs_init_ndis(struct hn_data *hv)
+{
+ struct hn_nvs_ndis_init ndis;
+ int error;
+
+ memset(&ndis, 0, sizeof(ndis));
+ ndis.type = NVS_TYPE_NDIS_INIT;
+ ndis.ndis_major = NDIS_VERSION_MAJOR(hv->ndis_ver);
+ ndis.ndis_minor = NDIS_VERSION_MINOR(hv->ndis_ver);
+
+ /* NOTE: No response. */
+ error = hn_nvs_req_send(hv, &ndis, sizeof(ndis));
+ if (error)
+ PMD_DRV_LOG(ERR,
+ "send nvs ndis init failed: %d", error);
+
+ return error;
+}
+
+static int
+hn_nvs_init(struct hn_data *hv)
+{
+ unsigned int i;
+ int error;
+
+ /*
+ * Find the supported NVS version and set NDIS version accordingly.
+ */
+ for (i = 0; i < RTE_DIM(hn_nvs_version); ++i) {
+ error = hn_nvs_doinit(hv, hn_nvs_version[i]);
+ if (error) {
+ PMD_INIT_LOG(DEBUG, "version %#x error %d",
+ hn_nvs_version[i], error);
+ continue;
+ }
+
+ hv->nvs_ver = hn_nvs_version[i];
+
+ /* Set NDIS version according to NVS version. */
+ hv->ndis_ver = NDIS_VERSION_6_30;
+ if (hv->nvs_ver <= NVS_VERSION_4)
+ hv->ndis_ver = NDIS_VERSION_6_1;
+
+ PMD_INIT_LOG(DEBUG,
+ "NVS version %#x, NDIS version %u.%u",
+ hv->nvs_ver, NDIS_VERSION_MAJOR(hv->ndis_ver),
+ NDIS_VERSION_MINOR(hv->ndis_ver));
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR,
+ "no NVS compatible version available");
+ return -ENXIO;
+}
+
+int
+hn_nvs_attach(struct hn_data *hv, unsigned int mtu)
+{
+ int error;
+
+ /*
+ * Initialize NVS.
+ */
+ error = hn_nvs_init(hv);
+ if (error)
+ return error;
+
+ /** Configure NDIS before initializing it. */
+ if (hv->nvs_ver >= NVS_VERSION_2) {
+ error = hn_nvs_conf_ndis(hv, mtu);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Initialize NDIS.
+ */
+ error = hn_nvs_init_ndis(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect RXBUF.
+ */
+ error = hn_nvs_conn_rxbuf(hv);
+ if (error)
+ return error;
+
+ /*
+ * Connect chimney sending buffer.
+ */
+ error = hn_nvs_conn_chim(hv);
+ if (error) {
+ hn_nvs_disconn_rxbuf(hv);
+ return error;
+ }
+
+ return 0;
+}
+
+void
+hn_nvs_detach(struct hn_data *hv __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ /* NOTE: there are no requests to stop the NVS. */
+ hn_nvs_disconn_rxbuf(hv);
+ hn_nvs_disconn_chim(hv);
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+void
+hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
+{
+ unsigned int retries = 0;
+ struct hn_nvs_rndis_ack ack = {
+ .type = NVS_TYPE_RNDIS_ACK,
+ .status = NVS_STATUS_OK,
+ };
+ int error;
+
+ PMD_RX_LOG(DEBUG, "ack RX id %" PRIu64, tid);
+
+ again:
+ error = rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
+ &ack, sizeof(ack), tid,
+ VMBUS_CHANPKT_FLAG_NONE, NULL);
+
+ if (error == 0)
+ return;
+
+ if (error == -EAGAIN) {
+ /*
+ * NOTE:
+ * This should _not_ happen in real world, since the
+ * consumption of the TX bufring from the TX path is
+ * controlled.
+ */
+ PMD_RX_LOG(NOTICE, "RXBUF ack retry");
+ if (++retries < 10) {
+ rte_delay_ms(1);
+ goto again;
+ }
+ }
+ /* RXBUF leaks! */
+ PMD_DRV_LOG(ERR, "RXBUF ack failed");
+}
+
+int
+hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch)
+{
+ struct hn_nvs_subch_req req;
+ struct hn_nvs_subch_resp resp;
+ int error;
+
+ memset(&req, 0, sizeof(req));
+ req.type = NVS_TYPE_SUBCH_REQ;
+ req.op = NVS_SUBCH_OP_ALLOC;
+ req.nsubch = *nsubch;
+
+ error = hn_nvs_execute(hv, &req, sizeof(req),
+ &resp, sizeof(resp),
+ NVS_TYPE_SUBCH_RESP);
+ if (error)
+ return error;
+
+ if (resp.status != NVS_STATUS_OK) {
+ PMD_INIT_LOG(ERR,
+ "nvs subch alloc failed: %#x",
+ resp.status);
+ return -EIO;
+ }
+
+ if (resp.nsubch > *nsubch) {
+ PMD_INIT_LOG(NOTICE,
+ "%u subchans are allocated, requested %u",
+ resp.nsubch, *nsubch);
+ }
+ *nsubch = resp.nsubch;
+
+ return 0;
+}
+
+void
+hn_nvs_set_datapath(struct hn_data *hv, uint32_t path)
+{
+ struct hn_nvs_datapath dp;
+
+ memset(&dp, 0, sizeof(dp));
+ dp.type = NVS_TYPE_SET_DATAPATH;
+ dp.active_path = path;
+
+ hn_nvs_req_send(hv, &dp, sizeof(dp));
+}
diff --git a/drivers/net/netvsc/hn_nvs.h b/drivers/net/netvsc/hn_nvs.h
new file mode 100644
index 00000000..984a9c11
--- /dev/null
+++ b/drivers/net/netvsc/hn_nvs.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+/*
+ * The indirection table message is the largest message
+ * received from host, and that is 112 bytes.
+ */
+#define NVS_RESPSIZE_MAX 256
+
+/*
+ * NDIS protocol version numbers
+ */
+#define NDIS_VERSION_6_1 0x00060001
+#define NDIS_VERSION_6_20 0x00060014
+#define NDIS_VERSION_6_30 0x0006001e
+#define NDIS_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16)
+#define NDIS_VERSION_MINOR(ver) ((ver) & 0xffff)
+
+/*
+ * NVS versions.
+ */
+#define NVS_VERSION_1 0x00002
+#define NVS_VERSION_2 0x30002
+#define NVS_VERSION_4 0x40000
+#define NVS_VERSION_5 0x50000
+#define NVS_VERSION_6 0x60000
+#define NVS_VERSION_61 0x60001
+
+#define NVS_RXBUF_SIG 0xcafe
+#define NVS_CHIM_SIG 0xface
+
+#define NVS_CHIM_IDX_INVALID 0xffffffff
+
+#define NVS_RNDIS_MTYPE_DATA 0
+#define NVS_RNDIS_MTYPE_CTRL 1
+
+/*
+ * NVS message transacion status codes.
+ */
+#define NVS_STATUS_OK 1
+#define NVS_STATUS_FAILED 2
+
+/*
+ * NVS request/response message types.
+ */
+#define NVS_TYPE_INIT 1
+#define NVS_TYPE_INIT_RESP 2
+
+#define NVS_TYPE_NDIS_INIT 100
+#define NVS_TYPE_RXBUF_CONN 101
+#define NVS_TYPE_RXBUF_CONNRESP 102
+#define NVS_TYPE_RXBUF_DISCONN 103
+#define NVS_TYPE_CHIM_CONN 104
+#define NVS_TYPE_CHIM_CONNRESP 105
+#define NVS_TYPE_CHIM_DISCONN 106
+#define NVS_TYPE_RNDIS 107
+#define NVS_TYPE_RNDIS_ACK 108
+
+#define NVS_TYPE_NDIS_CONF 125
+#define NVS_TYPE_VFASSOC_NOTE 128 /* notification */
+#define NVS_TYPE_SET_DATAPATH 129
+#define NVS_TYPE_SUBCH_REQ 133
+#define NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */
+#define NVS_TYPE_TXTBL_NOTE 134 /* notification */
+
+
+/* NVS message common header */
+struct hn_nvs_hdr {
+ uint32_t type;
+} __rte_packed;
+
+struct hn_nvs_init {
+ uint32_t type; /* NVS_TYPE_INIT */
+ uint32_t ver_min;
+ uint32_t ver_max;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_init_resp {
+ uint32_t type; /* NVS_TYPE_INIT_RESP */
+ uint32_t ver; /* deprecated */
+ uint32_t rsvd;
+ uint32_t status; /* NVS_STATUS_ */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_ndis_conf {
+ uint32_t type; /* NVS_TYPE_NDIS_CONF */
+ uint32_t mtu;
+ uint32_t rsvd;
+ uint64_t caps; /* NVS_NDIS_CONF_ */
+ uint8_t rsvd1[20];
+} __rte_packed;
+
+#define NVS_NDIS_CONF_SRIOV 0x0004
+#define NVS_NDIS_CONF_VLAN 0x0008
+
+/* No response */
+struct hn_nvs_ndis_init {
+ uint32_t type; /* NVS_TYPE_NDIS_INIT */
+ uint32_t ndis_major; /* NDIS_VERSION_MAJOR_ */
+ uint32_t ndis_minor; /* NDIS_VERSION_MINOR_ */
+ uint8_t rsvd[28];
+} __rte_packed;
+
+#define NVS_DATAPATH_SYNTHETIC 0
+#define NVS_DATAPATH_VF 1
+
+/* No response */
+struct hn_nvs_datapath {
+ uint32_t type; /* NVS_TYPE_SET_DATAPATH */
+ uint32_t active_path;/* NVS_DATAPATH_* */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_conn {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONN */
+ uint32_t gpadl; /* RXBUF vmbus GPADL */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_rxbuf_sect {
+ uint32_t start;
+ uint32_t slotsz;
+ uint32_t slotcnt;
+ uint32_t end;
+} __rte_packed;
+
+struct hn_nvs_rxbuf_connresp {
+ uint32_t type; /* NVS_TYPE_RXBUF_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsect; /* # of elem in nvs_sect */
+ struct hn_nvs_rxbuf_sect nvs_sect[1];
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_rxbuf_disconn {
+ uint32_t type; /* NVS_TYPE_RXBUF_DISCONN */
+ uint16_t sig; /* NVS_RXBUF_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+struct hn_nvs_chim_conn {
+ uint32_t type; /* NVS_TYPE_CHIM_CONN */
+ uint32_t gpadl; /* chimney buf vmbus GPADL */
+ uint16_t sig; /* NDIS_NVS_CHIM_SIG */
+ uint8_t rsvd[30];
+} __rte_packed;
+
+struct hn_nvs_chim_connresp {
+ uint32_t type; /* NVS_TYPE_CHIM_CONNRESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t sectsz; /* section size */
+} __rte_packed;
+
+/* No response */
+struct hn_nvs_chim_disconn {
+ uint32_t type; /* NVS_TYPE_CHIM_DISCONN */
+ uint16_t sig; /* NVS_CHIM_SIG */
+ uint8_t rsvd[34];
+} __rte_packed;
+
+#define NVS_SUBCH_OP_ALLOC 1
+
+struct hn_nvs_subch_req {
+ uint32_t type; /* NVS_TYPE_SUBCH_REQ */
+ uint32_t op; /* NVS_SUBCH_OP_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_subch_resp {
+ uint32_t type; /* NVS_TYPE_SUBCH_RESP */
+ uint32_t status; /* NVS_STATUS_ */
+ uint32_t nsubch;
+ uint8_t rsvd[28];
+} __rte_packed;
+
+struct hn_nvs_rndis {
+ uint32_t type; /* NVS_TYPE_RNDIS */
+ uint32_t rndis_mtype;/* NVS_RNDIS_MTYPE_ */
+ /*
+ * Chimney sending buffer index and size.
+ *
+ * NOTE:
+ * If nvs_chim_idx is set to NVS_CHIM_IDX_INVALID
+ * and nvs_chim_sz is set to 0, then chimney sending
+ * buffer is _not_ used by this RNDIS message.
+ */
+ uint32_t chim_idx;
+ uint32_t chim_sz;
+ uint8_t rsvd[24];
+} __rte_packed;
+
+struct hn_nvs_rndis_ack {
+ uint32_t type; /* NVS_TYPE_RNDIS_ACK */
+ uint32_t status; /* NVS_STATUS_ */
+ uint8_t rsvd[32];
+} __rte_packed;
+
+
+int hn_nvs_attach(struct hn_data *hv, unsigned int mtu);
+void hn_nvs_detach(struct hn_data *hv);
+void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid);
+int hn_nvs_alloc_subchans(struct hn_data *hv, uint32_t *nsubch);
+void hn_nvs_set_datapath(struct hn_data *hv, uint32_t path);
+
+static inline int
+hn_nvs_send(struct vmbus_channel *chan, uint16_t flags,
+ void *nvs_msg, int nvs_msglen, uintptr_t sndc,
+ bool *need_sig)
+{
+ return rte_vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND,
+ nvs_msg, nvs_msglen, (uint64_t)sndc,
+ flags, need_sig);
+}
+
+static inline int
+hn_nvs_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], unsigned int sglen,
+ void *nvs_msg, int nvs_msglen,
+ uintptr_t sndc, bool *need_sig)
+{
+ return rte_vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen,
+ (uint64_t)sndc, need_sig);
+}
diff --git a/drivers/net/netvsc/hn_rndis.c b/drivers/net/netvsc/hn_rndis.c
new file mode 100644
index 00000000..bde33969
--- /dev/null
+++ b/drivers/net/netvsc/hn_rndis.c
@@ -0,0 +1,1099 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2010-2012 Citrix Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_bus_vmbus.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_nvs.h"
+#include "hn_rndis.h"
+#include "ndis.h"
+
+#define HN_RNDIS_XFER_SIZE 0x4000
+
+#define HN_NDIS_TXCSUM_CAP_IP4 \
+ (NDIS_TXCSUM_CAP_IP4 | NDIS_TXCSUM_CAP_IP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP4 \
+ (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+#define HN_NDIS_TXCSUM_CAP_TCP6 \
+ (NDIS_TXCSUM_CAP_TCP6 | NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_TXCSUM_CAP_UDP6 \
+ (NDIS_TXCSUM_CAP_UDP6 | NDIS_TXCSUM_CAP_IP6EXT)
+#define HN_NDIS_LSOV2_CAP_IP6 \
+ (NDIS_LSOV2_CAP_IP6EXT | NDIS_LSOV2_CAP_TCP6OPT)
+
+/* Get unique request id */
+static inline uint32_t
+hn_rndis_rid(struct hn_data *hv)
+{
+ uint32_t rid;
+
+ do {
+ rid = rte_atomic32_add_return(&hv->rndis_req_id, 1);
+ } while (rid == 0);
+
+ return rid;
+}
+
+static void *hn_rndis_alloc(struct hn_data *hv, size_t size)
+{
+ return rte_zmalloc_socket("RNDIS", size, PAGE_SIZE,
+ hv->vmbus->device.numa_node);
+}
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf)
+{
+ const union {
+ struct rndis_msghdr hdr;
+ struct rndis_packet_msg pkt;
+ struct rndis_init_req init_request;
+ struct rndis_init_comp init_complete;
+ struct rndis_halt_req halt;
+ struct rndis_query_req query_request;
+ struct rndis_query_comp query_complete;
+ struct rndis_set_req set_request;
+ struct rndis_set_comp set_complete;
+ struct rndis_reset_req reset_request;
+ struct rndis_reset_comp reset_complete;
+ struct rndis_keepalive_req keepalive_request;
+ struct rndis_keepalive_comp keepalive_complete;
+ struct rndis_status_msg indicate_status;
+ } *rndis_msg = buf;
+
+ switch (rndis_msg->hdr.type) {
+ case RNDIS_PACKET_MSG: {
+ const struct rndis_pktinfo *ppi;
+ unsigned int ppi_len;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_PACKET (len %u, data %u:%u, # oob %u %u:%u, pkt %u:%u)\n",
+ rndis_msg->pkt.len,
+ rndis_msg->pkt.dataoffset,
+ rndis_msg->pkt.datalen,
+ rndis_msg->pkt.oobdataelements,
+ rndis_msg->pkt.oobdataoffset,
+ rndis_msg->pkt.oobdatalen,
+ rndis_msg->pkt.pktinfooffset,
+ rndis_msg->pkt.pktinfolen);
+
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)buf
+ + RNDIS_PACKET_MSG_OFFSET_ABS(rndis_msg->pkt.pktinfooffset));
+
+ ppi_len = rndis_msg->pkt.pktinfolen;
+ while (ppi_len > 0) {
+ const void *ppi_data;
+
+ ppi_data = ppi->data;
+
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ " PPI (size %u, type %u, offs %u data %#x)\n",
+ ppi->size, ppi->type, ppi->offset,
+ *(const uint32_t *)ppi_data);
+ if (ppi->size == 0)
+ break;
+ ppi_len -= ppi->size;
+ ppi = (const struct rndis_pktinfo *)
+ ((const char *)ppi + ppi->size);
+ }
+ break;
+ }
+ case RNDIS_INITIALIZE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT (len %u id %#x, ver %u.%u max xfer %u)\n",
+ rndis_msg->init_request.len,
+ rndis_msg->init_request.rid,
+ rndis_msg->init_request.ver_major,
+ rndis_msg->init_request.ver_minor,
+ rndis_msg->init_request.max_xfersz);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INIT_C (len %u, id %#x, status 0x%x, vers %u.%u, "
+ "flags %d, max xfer %u, max pkts %u, aligned %u)\n",
+ rndis_msg->init_complete.len,
+ rndis_msg->init_complete.rid,
+ rndis_msg->init_complete.status,
+ rndis_msg->init_complete.ver_major,
+ rndis_msg->init_complete.ver_minor,
+ rndis_msg->init_complete.devflags,
+ rndis_msg->init_complete.pktmaxsz,
+ rndis_msg->init_complete.pktmaxcnt,
+ rndis_msg->init_complete.align);
+ break;
+
+ case RNDIS_HALT_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_HALT (len %u id %#x)\n",
+ rndis_msg->halt.len, rndis_msg->halt.rid);
+ break;
+
+ case RNDIS_QUERY_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_QUERY (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->query_request.len,
+ rndis_msg->query_request.rid,
+ rndis_msg->query_request.oid,
+ rndis_msg->query_request.infobuflen,
+ rndis_msg->query_request.infobufoffset);
+ break;
+
+ case RNDIS_QUERY_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_QUERY_C (len %u, id %#x, status 0x%x, buf %u:%u)\n",
+ rndis_msg->query_complete.len,
+ rndis_msg->query_complete.rid,
+ rndis_msg->query_complete.status,
+ rndis_msg->query_complete.infobuflen,
+ rndis_msg->query_complete.infobufoffset);
+ break;
+
+ case RNDIS_SET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_SET (len %u, id %#x, oid %#x, info %u:%u)\n",
+ rndis_msg->set_request.len,
+ rndis_msg->set_request.rid,
+ rndis_msg->set_request.oid,
+ rndis_msg->set_request.infobuflen,
+ rndis_msg->set_request.infobufoffset);
+ break;
+
+ case RNDIS_SET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
+ rndis_msg->set_complete.len,
+ rndis_msg->set_complete.rid,
+ rndis_msg->set_complete.status);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_MSG_INDICATE (len %u, status %#x, buf len %u, buf offset %u)\n",
+ rndis_msg->indicate_status.len,
+ rndis_msg->indicate_status.status,
+ rndis_msg->indicate_status.stbuflen,
+ rndis_msg->indicate_status.stbufoffset);
+ break;
+
+ case RNDIS_RESET_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET (len %u, id %#x)\n",
+ rndis_msg->reset_request.len,
+ rndis_msg->reset_request.rid);
+ break;
+
+ case RNDIS_RESET_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_RESET_C (len %u, status %#x address %#x)\n",
+ rndis_msg->reset_complete.len,
+ rndis_msg->reset_complete.status,
+ rndis_msg->reset_complete.adrreset);
+ break;
+
+ case RNDIS_KEEPALIVE_MSG:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE (len %u, id %#x)\n",
+ rndis_msg->keepalive_request.len,
+ rndis_msg->keepalive_request.rid);
+ break;
+
+ case RNDIS_KEEPALIVE_CMPLT:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS_KEEPALIVE_C (len %u, id %#x address %#x)\n",
+ rndis_msg->keepalive_complete.len,
+ rndis_msg->keepalive_complete.rid,
+ rndis_msg->keepalive_complete.status);
+ break;
+
+ default:
+ rte_log(RTE_LOG_DEBUG, hn_logtype_driver,
+ "RNDIS type %#x len %u\n",
+ rndis_msg->hdr.type,
+ rndis_msg->hdr.len);
+ break;
+ }
+}
+#endif
+
+static int hn_nvs_send_rndis_ctrl(struct vmbus_channel *chan,
+ const void *req, uint32_t reqlen)
+
+{
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_CTRL,
+ .chim_idx = NVS_CHIM_IDX_INVALID,
+ .chim_sz = 0
+ };
+ struct vmbus_gpa sg;
+ rte_iova_t addr;
+
+ addr = rte_malloc_virt2iova(req);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS send request can not get iova");
+ return -EINVAL;
+ }
+
+ if (unlikely(reqlen > PAGE_SIZE)) {
+ PMD_DRV_LOG(ERR, "RNDIS request %u greater than page size",
+ reqlen);
+ return -EINVAL;
+ }
+
+ sg.page = addr / PAGE_SIZE;
+ sg.ofs = addr & PAGE_MASK;
+ sg.len = reqlen;
+
+ if (sg.ofs + reqlen > PAGE_SIZE) {
+ PMD_DRV_LOG(ERR, "RNDIS request crosses page bounary");
+ return -EINVAL;
+ }
+
+ hn_rndis_dump(req);
+
+ return hn_nvs_send_sglist(chan, &sg, 1,
+ &nvs_rndis, sizeof(nvs_rndis), 0U, NULL);
+}
+
+void hn_rndis_link_status(struct hn_data *hv __rte_unused, const void *msg)
+{
+ const struct rndis_status_msg *indicate = msg;
+
+ hn_rndis_dump(msg);
+
+ PMD_DRV_LOG(DEBUG, "link status %#x", indicate->status);
+
+ switch (indicate->status) {
+ case RNDIS_STATUS_LINK_SPEED_CHANGE:
+ case RNDIS_STATUS_NETWORK_CHANGE:
+ case RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG:
+ /* ignore not in DPDK API */
+ break;
+
+ case RNDIS_STATUS_MEDIA_CONNECT:
+ case RNDIS_STATUS_MEDIA_DISCONNECT:
+ /* TODO handle as LSC interrupt */
+ break;
+ default:
+ PMD_DRV_LOG(NOTICE, "unknown RNDIS indication: %#x",
+ indicate->status);
+ }
+}
+
+/* Callback from hn_process_events when response is visible */
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len)
+{
+ const struct rndis_init_comp *hdr = data;
+
+ hn_rndis_dump(data);
+
+ if (len < sizeof(3 * sizeof(uint32_t))) {
+ PMD_DRV_LOG(ERR,
+ "missing RNDIS header %u", len);
+ return;
+ }
+
+ if (len < hdr->len) {
+ PMD_DRV_LOG(ERR,
+ "truncated RNDIS response %u", len);
+ return;
+ }
+
+ if (len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response exceeds buffer");
+ len = sizeof(hv->rndis_resp);
+ }
+
+ if (hdr->rid == 0) {
+ PMD_DRV_LOG(NOTICE,
+ "RNDIS response id zero!");
+ }
+
+ memcpy(hv->rndis_resp, data, len);
+
+ /* make sure response copied before update */
+ rte_smp_wmb();
+
+ if (rte_atomic32_cmpset(&hv->rndis_pending, hdr->rid, 0) == 0) {
+ PMD_DRV_LOG(ERR,
+ "received id %#x pending id %#x",
+ hdr->rid, (uint32_t)hv->rndis_pending);
+ }
+}
+
+/* Do request/response transaction */
+static int hn_rndis_exec1(struct hn_data *hv,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len)
+{
+ const struct rndis_halt_req *hdr = req;
+ uint32_t rid = hdr->rid;
+ struct vmbus_channel *chan = hn_primary_chan(hv);
+ int error;
+
+ if (comp_len > sizeof(hv->rndis_resp)) {
+ PMD_DRV_LOG(ERR,
+ "Expected completion size %u exceeds buffer %zu",
+ comp_len, sizeof(hv->rndis_resp));
+ return -EIO;
+ }
+
+ if (comp != NULL &&
+ rte_atomic32_cmpset(&hv->rndis_pending, 0, rid) == 0) {
+ PMD_DRV_LOG(ERR,
+ "Request already pending");
+ return -EBUSY;
+ }
+
+ error = hn_nvs_send_rndis_ctrl(chan, req, reqlen);
+ if (error) {
+ PMD_DRV_LOG(ERR, "RNDIS ctrl send failed: %d", error);
+ return error;
+ }
+
+ if (comp) {
+ /* Poll primary channel until response received */
+ while (hv->rndis_pending == rid)
+ hn_process_events(hv, 0);
+
+ memcpy(comp, hv->rndis_resp, comp_len);
+ }
+
+ return 0;
+}
+
+/* Do transaction and validate response */
+static int hn_rndis_execute(struct hn_data *hv, uint32_t rid,
+ const void *req, uint32_t reqlen,
+ void *comp, uint32_t comp_len, uint32_t comp_type)
+{
+ const struct rndis_comp_hdr *hdr = comp;
+ int ret;
+
+ memset(comp, 0, comp_len);
+
+ ret = hn_rndis_exec1(hv, req, reqlen, comp, comp_len);
+ if (ret < 0)
+ return ret;
+ /*
+ * Check this RNDIS complete message.
+ */
+ if (unlikely(hdr->type != comp_type)) {
+ PMD_DRV_LOG(ERR,
+ "unexpected RNDIS response complete %#x expect %#x",
+ hdr->type, comp_type);
+
+ return -ENXIO;
+ }
+ if (unlikely(hdr->rid != rid)) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS comp rid mismatch %#x, expect %#x",
+ hdr->rid, rid);
+ return -EINVAL;
+ }
+
+ /* All pass! */
+ return 0;
+}
+
+static int
+hn_rndis_query(struct hn_data *hv, uint32_t oid,
+ const void *idata, uint32_t idlen,
+ void *odata, uint32_t odlen)
+{
+ struct rndis_query_req *req;
+ struct rndis_query_comp *comp;
+ uint32_t reqlen, comp_len;
+ int error = -EIO;
+ unsigned int ofs;
+ uint32_t rid;
+
+ reqlen = sizeof(*req) + idlen;
+ req = hn_rndis_alloc(hv, reqlen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ comp_len = sizeof(*comp) + odlen;
+ comp = rte_zmalloc("QUERY", comp_len, PAGE_SIZE);
+ if (!comp) {
+ error = -ENOMEM;
+ goto done;
+ }
+ comp->status = RNDIS_STATUS_PENDING;
+
+ rid = hn_rndis_rid(hv);
+
+ req->type = RNDIS_QUERY_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobufoffset = RNDIS_QUERY_REQ_INFOBUFOFFSET;
+ req->infobuflen = idlen;
+
+ /* Input data immediately follows RNDIS query. */
+ memcpy(req + 1, idata, idlen);
+
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ comp, comp_len, RNDIS_QUERY_CMPLT);
+
+ if (error)
+ goto done;
+
+ if (comp->status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x failed: status 0x%08x",
+ oid, comp->status);
+ error = -EINVAL;
+ goto done;
+ }
+
+ if (comp->infobuflen == 0 || comp->infobufoffset == 0) {
+ /* No output data! */
+ PMD_DRV_LOG(ERR, "RNDIS query 0x%08x, no data", oid);
+ error = 0;
+ goto done;
+ }
+
+ /*
+ * Check output data length and offset.
+ */
+ /* ofs is the offset from the beginning of comp. */
+ ofs = RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(comp->infobufoffset);
+ if (ofs < sizeof(*comp) || ofs + comp->infobuflen > comp_len) {
+ PMD_DRV_LOG(ERR, "RNDIS query invalid comp ib off/len, %u/%u",
+ comp->infobufoffset, comp->infobuflen);
+ error = -EINVAL;
+ goto done;
+ }
+
+ /* Save output data. */
+ if (comp->infobuflen < odlen)
+ odlen = comp->infobuflen;
+
+ /* ofs is the offset from the beginning of comp. */
+ memcpy(odata, (const char *)comp + ofs, odlen);
+
+ error = 0;
+done:
+ rte_free(comp);
+ rte_free(req);
+ return error;
+}
+
+static int
+hn_rndis_halt(struct hn_data *hv)
+{
+ struct rndis_halt_req *halt;
+
+ halt = hn_rndis_alloc(hv, sizeof(*halt));
+ if (halt == NULL)
+ return -ENOMEM;
+
+ halt->type = RNDIS_HALT_MSG;
+ halt->len = sizeof(*halt);
+ halt->rid = hn_rndis_rid(hv);
+
+ /* No RNDIS completion; rely on NVS message send completion */
+ hn_rndis_exec1(hv, halt, sizeof(*halt), NULL, 0);
+
+ rte_free(halt);
+
+ PMD_INIT_LOG(DEBUG, "RNDIS halt done");
+ return 0;
+}
+
+static int
+hn_rndis_query_hwcaps(struct hn_data *hv, struct ndis_offload *caps)
+{
+ struct ndis_offload in;
+ uint32_t caps_len, size;
+ int error;
+
+ memset(caps, 0, sizeof(*caps));
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_OFFLOAD;
+
+ if (hv->ndis_ver >= NDIS_VERSION_6_30) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (hv->ndis_ver >= NDIS_VERSION_6_1) {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ in.ndis_hdr.ndis_rev = NDIS_OFFLOAD_REV_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+ in.ndis_hdr.ndis_size = size;
+
+ caps_len = NDIS_OFFLOAD_SIZE;
+ error = hn_rndis_query(hv, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ &in, size, caps, caps_len);
+ if (error)
+ return error;
+
+ /* Preliminary verification. */
+ if (caps->ndis_hdr.ndis_type != NDIS_OBJTYPE_OFFLOAD) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objtype 0x%02x",
+ caps->ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_rev < NDIS_OFFLOAD_REV_1) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objrev 0x%02x",
+ caps->ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps->ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u, data size %u",
+ caps->ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps->ndis_hdr.ndis_size < NDIS_OFFLOAD_SIZE_6_0) {
+ PMD_DRV_LOG(NOTICE, "invalid NDIS objsize %u",
+ caps->ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0)
+{
+ struct ndis_rss_caps in, caps;
+ unsigned int indsz, rxr_cnt;
+ uint32_t caps_len;
+ int error;
+
+ *rxr_cnt0 = 0;
+
+ if (hv->ndis_ver < NDIS_VERSION_6_20) {
+ PMD_DRV_LOG(DEBUG, "RSS not supported on this host");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&in, 0, sizeof(in));
+ in.ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_CAPS;
+ in.ndis_hdr.ndis_rev = NDIS_RSS_CAPS_REV_2;
+ in.ndis_hdr.ndis_size = NDIS_RSS_CAPS_SIZE;
+
+ caps_len = NDIS_RSS_CAPS_SIZE;
+ error = hn_rndis_query(hv, OID_GEN_RECEIVE_SCALE_CAPABILITIES,
+ &in, NDIS_RSS_CAPS_SIZE,
+ &caps, caps_len);
+ if (error)
+ return error;
+
+ PMD_INIT_LOG(DEBUG, "RX rings %u indirect %u caps %#x",
+ caps.ndis_nrxr, caps.ndis_nind, caps.ndis_caps);
+ /*
+ * Preliminary verification.
+ */
+ if (caps.ndis_hdr.ndis_type != NDIS_OBJTYPE_RSS_CAPS) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objtype 0x%02x",
+ caps.ndis_hdr.ndis_type);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_rev < NDIS_RSS_CAPS_REV_1) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objrev 0x%02x",
+ caps.ndis_hdr.ndis_rev);
+ return -EINVAL;
+ }
+ if (caps.ndis_hdr.ndis_size > caps_len) {
+ PMD_DRV_LOG(ERR,
+ "invalid NDIS objsize %u, data size %u",
+ caps.ndis_hdr.ndis_size, caps_len);
+ return -EINVAL;
+ } else if (caps.ndis_hdr.ndis_size < NDIS_RSS_CAPS_SIZE_6_0) {
+ PMD_DRV_LOG(ERR, "invalid NDIS objsize %u",
+ caps.ndis_hdr.ndis_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Save information for later RSS configuration.
+ */
+ if (caps.ndis_nrxr == 0) {
+ PMD_DRV_LOG(ERR, "0 RX rings!?");
+ return -EINVAL;
+ }
+ rxr_cnt = caps.ndis_nrxr;
+
+ if (caps.ndis_hdr.ndis_size == NDIS_RSS_CAPS_SIZE &&
+ caps.ndis_hdr.ndis_rev >= NDIS_RSS_CAPS_REV_2) {
+ if (caps.ndis_nind > NDIS_HASH_INDCNT) {
+ PMD_DRV_LOG(ERR,
+ "too many RSS indirect table entries %u",
+ caps.ndis_nind);
+ return -EOPNOTSUPP;
+ }
+ if (!rte_is_power_of_2(caps.ndis_nind)) {
+ PMD_DRV_LOG(ERR,
+ "RSS indirect table size is not power-of-2 %u",
+ caps.ndis_nind);
+ }
+
+ indsz = caps.ndis_nind;
+ } else {
+ indsz = NDIS_HASH_INDCNT;
+ }
+
+ if (indsz < rxr_cnt) {
+ PMD_DRV_LOG(NOTICE,
+ "# of RX rings (%d) > RSS indirect table size %d",
+ rxr_cnt, indsz);
+ rxr_cnt = indsz;
+ }
+
+ hv->rss_offloads = 0;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV4)
+ hv->rss_offloads |= ETH_RSS_IPV4
+ | ETH_RSS_NONFRAG_IPV4_TCP
+ | ETH_RSS_NONFRAG_IPV4_UDP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6)
+ hv->rss_offloads |= ETH_RSS_IPV6
+ | ETH_RSS_NONFRAG_IPV6_TCP;
+ if (caps.ndis_caps & NDIS_RSS_CAP_IPV6_EX)
+ hv->rss_offloads |= ETH_RSS_IPV6_EX
+ | ETH_RSS_IPV6_TCP_EX;
+
+ /* Commit! */
+ *rxr_cnt0 = rxr_cnt;
+
+ return 0;
+}
+
+static int
+hn_rndis_set(struct hn_data *hv, uint32_t oid, const void *data, uint32_t dlen)
+{
+ struct rndis_set_req *req;
+ struct rndis_set_comp comp;
+ uint32_t reqlen, comp_len;
+ uint32_t rid;
+ int error;
+
+ reqlen = sizeof(*req) + dlen;
+ req = rte_zmalloc("RNDIS_SET", reqlen, PAGE_SIZE);
+ if (!req)
+ return -ENOMEM;
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_SET_MSG;
+ req->len = reqlen;
+ req->rid = rid;
+ req->oid = oid;
+ req->infobuflen = dlen;
+ req->infobufoffset = RNDIS_SET_REQ_INFOBUFOFFSET;
+
+ /* Data immediately follows RNDIS set. */
+ memcpy(req + 1, data, dlen);
+
+ comp_len = sizeof(comp);
+ error = hn_rndis_execute(hv, rid, req, reqlen,
+ &comp, comp_len,
+ RNDIS_SET_CMPLT);
+ if (error) {
+ PMD_DRV_LOG(ERR, "exec RNDIS set %#" PRIx32 " failed",
+ oid);
+ error = EIO;
+ goto done;
+ }
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "RNDIS set %#" PRIx32 " failed: status %#" PRIx32,
+ oid, comp.status);
+ error = EIO;
+ goto done;
+ }
+
+done:
+ rte_free(req);
+ return error;
+}
+
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads, uint64_t rx_offloads)
+{
+ struct ndis_offload_params params;
+ struct ndis_offload hwcaps;
+ int error;
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ /* NOTE: 0 means "no change" */
+ memset(&params, 0, sizeof(params));
+
+ params.ndis_hdr.ndis_type = NDIS_OBJTYPE_DEFAULT;
+ if (hv->ndis_ver < NDIS_VERSION_6_30) {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_2;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE_6_1;
+ } else {
+ params.ndis_hdr.ndis_rev = NDIS_OFFLOAD_PARAMS_REV_3;
+ params.ndis_hdr.ndis_size = NDIS_OFFLOAD_PARAMS_SIZE;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4)
+ == NDIS_RXCSUM_CAP_TCP4)
+ params.ndis_tcp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6)
+ == NDIS_RXCSUM_CAP_TCP6)
+ params.ndis_tcp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4)
+ params.ndis_udp4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6)
+ == NDIS_TXCSUM_CAP_UDP6)
+ params.ndis_udp6csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+
+ if (rx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4)
+ params.ndis_udp4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+
+ if (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6)
+ params.ndis_udp6csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) {
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_IP4)
+ == NDIS_TXCSUM_CAP_IP4)
+ params.ndis_ip4csum = NDIS_OFFLOAD_PARAM_TX;
+ else
+ goto unsupported;
+ }
+ if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ params.ndis_ip4csum |= NDIS_OFFLOAD_PARAM_RX;
+ else
+ goto unsupported;
+ }
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023)
+ params.ndis_lsov2_ip4 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ params.ndis_lsov2_ip6 = NDIS_OFFLOAD_LSOV2_ON;
+ else
+ goto unsupported;
+ }
+
+ error = hn_rndis_set(hv, OID_TCP_OFFLOAD_PARAMETERS, &params,
+ params.ndis_hdr.ndis_size);
+ if (error) {
+ PMD_DRV_LOG(ERR, "offload config failed");
+ return error;
+ }
+
+ return 0;
+ unsupported:
+ PMD_DRV_LOG(NOTICE,
+ "offload tx:%" PRIx64 " rx:%" PRIx64 " not supported by this version",
+ tx_offloads, rx_offloads);
+ return -EINVAL;
+}
+
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ndis_offload hwcaps;
+ int error;
+
+ memset(&hwcaps, 0, sizeof(hwcaps));
+
+ error = hn_rndis_query_hwcaps(hv, &hwcaps);
+ if (error) {
+ PMD_DRV_LOG(ERR, "hwcaps query failed: %d", error);
+ return error;
+ }
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_IP4)
+ == HN_NDIS_TXCSUM_CAP_IP4)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & HN_NDIS_TXCSUM_CAP_TCP4)
+ == HN_NDIS_TXCSUM_CAP_TCP4 &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & HN_NDIS_TXCSUM_CAP_TCP6)
+ == HN_NDIS_TXCSUM_CAP_TCP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_txcsum & NDIS_TXCSUM_CAP_UDP6))
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM;
+
+ if ((hwcaps.ndis_lsov2.ndis_ip4_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.ndis_lsov2.ndis_ip6_opts & HN_NDIS_LSOV2_CAP_IP6)
+ == HN_NDIS_LSOV2_CAP_IP6)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+
+ if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_TCP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_TCP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if ((hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_UDP4) &&
+ (hwcaps.ndis_csum.ndis_ip6_rxcsum & NDIS_RXCSUM_CAP_UDP6))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_UDP_CKSUM;
+
+ return 0;
+}
+
+int
+hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter)
+{
+ int error;
+
+ error = hn_rndis_set(hv, OID_GEN_CURRENT_PACKET_FILTER,
+ &filter, sizeof(filter));
+ if (error) {
+ PMD_DRV_LOG(ERR, "set RX filter %#" PRIx32 " failed: %d",
+ filter, error);
+ } else {
+ PMD_DRV_LOG(DEBUG, "set RX filter %#" PRIx32 " done", filter);
+ }
+
+ return error;
+}
+
+/* The default RSS key.
+ * This value is the same as MLX5 so that flows will be
+ * received on same path for both VF ans synthetic NIC.
+ */
+static const uint8_t rss_default_key[NDIS_HASH_KEYSIZE_TOEPLITZ] = {
+ 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7,
+ 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94,
+ 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1,
+ 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59,
+ 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a,
+};
+
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ struct ndis_rssprm_toeplitz rssp;
+ struct ndis_rss_params *prm = &rssp.rss_params;
+ const uint8_t *rss_key = rss_conf->rss_key ? : rss_default_key;
+ uint32_t rss_hash;
+ unsigned int i;
+ int error;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&rssp, 0, sizeof(rssp));
+
+ prm->ndis_hdr.ndis_type = NDIS_OBJTYPE_RSS_PARAMS;
+ prm->ndis_hdr.ndis_rev = NDIS_RSS_PARAMS_REV_2;
+ prm->ndis_hdr.ndis_size = sizeof(*prm);
+ prm->ndis_flags = 0;
+
+ rss_hash = NDIS_HASH_FUNCTION_TOEPLITZ;
+ if (rss_conf->rss_hf & ETH_RSS_IPV4)
+ rss_hash |= NDIS_HASH_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV4;
+ if (rss_conf->rss_hf & ETH_RSS_IPV6)
+ rss_hash |= NDIS_HASH_IPV6;
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ rss_hash |= NDIS_HASH_TCP_IPV6;
+
+ prm->ndis_hash = rss_hash;
+ prm->ndis_indsize = sizeof(rssp.rss_ind[0]) * NDIS_HASH_INDCNT;
+ prm->ndis_indoffset = offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]);
+ prm->ndis_keysize = NDIS_HASH_KEYSIZE_TOEPLITZ;
+ prm->ndis_keyoffset = offsetof(struct ndis_rssprm_toeplitz, rss_key[0]);
+
+ for (i = 0; i < NDIS_HASH_INDCNT; i++)
+ rssp.rss_ind[i] = i % hv->num_queues;
+
+ /* Set hask key values */
+ memcpy(&rssp.rss_key, rss_key, NDIS_HASH_KEYSIZE_TOEPLITZ);
+
+ error = hn_rndis_set(hv, OID_GEN_RECEIVE_SCALE_PARAMETERS,
+ &rssp, sizeof(rssp));
+ if (error) {
+ PMD_DRV_LOG(ERR,
+ "RSS config num queues=%u failed: %d",
+ hv->num_queues, error);
+ }
+ return error;
+}
+
+static int hn_rndis_init(struct hn_data *hv)
+{
+ struct rndis_init_req *req;
+ struct rndis_init_comp comp;
+ uint32_t comp_len, rid;
+ int error;
+
+ req = hn_rndis_alloc(hv, sizeof(*req));
+ if (!req) {
+ PMD_DRV_LOG(ERR, "no memory for RNDIS init");
+ return -ENXIO;
+ }
+
+ rid = hn_rndis_rid(hv);
+ req->type = RNDIS_INITIALIZE_MSG;
+ req->len = sizeof(*req);
+ req->rid = rid;
+ req->ver_major = RNDIS_VERSION_MAJOR;
+ req->ver_minor = RNDIS_VERSION_MINOR;
+ req->max_xfersz = HN_RNDIS_XFER_SIZE;
+
+ comp_len = RNDIS_INIT_COMP_SIZE_MIN;
+ error = hn_rndis_execute(hv, rid, req, sizeof(*req),
+ &comp, comp_len,
+ RNDIS_INITIALIZE_CMPLT);
+ if (error)
+ goto done;
+
+ if (comp.status != RNDIS_STATUS_SUCCESS) {
+ PMD_DRV_LOG(ERR, "RNDIS init failed: status 0x%08x",
+ comp.status);
+ error = -EIO;
+ goto done;
+ }
+
+ hv->rndis_agg_size = comp.pktmaxsz;
+ hv->rndis_agg_pkts = comp.pktmaxcnt;
+ hv->rndis_agg_align = 1U << comp.align;
+
+ if (hv->rndis_agg_align < sizeof(uint32_t)) {
+ /*
+ * The RNDIS packet message encap assumes that the RNDIS
+ * packet message is at least 4 bytes aligned. Fix up the
+ * alignment here, if the remote side sets the alignment
+ * too low.
+ */
+ PMD_DRV_LOG(NOTICE,
+ "fixup RNDIS aggpkt align: %u -> %zu",
+ hv->rndis_agg_align, sizeof(uint32_t));
+ hv->rndis_agg_align = sizeof(uint32_t);
+ }
+
+ PMD_INIT_LOG(INFO,
+ "RNDIS ver %u.%u, aggpkt size %u, aggpkt cnt %u, aggpkt align %u",
+ comp.ver_major, comp.ver_minor,
+ hv->rndis_agg_size, hv->rndis_agg_pkts,
+ hv->rndis_agg_align);
+ error = 0;
+done:
+ rte_free(req);
+ return error;
+}
+
+int
+hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr)
+{
+ uint32_t eaddr_len;
+ int error;
+
+ eaddr_len = ETHER_ADDR_LEN;
+ error = hn_rndis_query(hv, OID_802_3_PERMANENT_ADDRESS, NULL, 0,
+ eaddr, eaddr_len);
+ if (error)
+ return error;
+
+ PMD_DRV_LOG(INFO, "MAC address %02x:%02x:%02x:%02x:%02x:%02x",
+ eaddr[0], eaddr[1], eaddr[2],
+ eaddr[3], eaddr[4], eaddr[5]);
+ return 0;
+}
+
+int
+hn_rndis_get_linkstatus(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_MEDIA_CONNECT_STATUS, NULL, 0,
+ &hv->link_status, sizeof(uint32_t));
+}
+
+int
+hn_rndis_get_linkspeed(struct hn_data *hv)
+{
+ return hn_rndis_query(hv, OID_GEN_LINK_SPEED, NULL, 0,
+ &hv->link_speed, sizeof(uint32_t));
+}
+
+int
+hn_rndis_attach(struct hn_data *hv)
+{
+ /* Initialize RNDIS. */
+ return hn_rndis_init(hv);
+}
+
+void
+hn_rndis_detach(struct hn_data *hv)
+{
+ /* Halt the RNDIS. */
+ hn_rndis_halt(hv);
+}
diff --git a/drivers/net/netvsc/hn_rndis.h b/drivers/net/netvsc/hn_rndis.h
new file mode 100644
index 00000000..89e2e6ba
--- /dev/null
+++ b/drivers/net/netvsc/hn_rndis.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+#include "rndis.h"
+
+struct hn_data;
+
+void hn_rndis_receive_response(struct hn_data *hv,
+ const void *data, uint32_t len);
+void hn_rndis_link_status(struct hn_data *hv, const void *data);
+int hn_rndis_attach(struct hn_data *hv);
+void hn_rndis_detach(struct hn_data *hv);
+int hn_rndis_get_eaddr(struct hn_data *hv, uint8_t *eaddr);
+int hn_rndis_get_linkstatus(struct hn_data *hv);
+int hn_rndis_get_linkspeed(struct hn_data *hv);
+int hn_rndis_set_rxfilter(struct hn_data *hv, uint32_t filter);
+void hn_rndis_rx_ctrl(struct hn_data *hv, const void *data,
+ int dlen);
+int hn_rndis_get_offload(struct hn_data *hv,
+ struct rte_eth_dev_info *dev_info);
+int hn_rndis_conf_offload(struct hn_data *hv,
+ uint64_t tx_offloads,
+ uint64_t rx_offloads);
+int hn_rndis_query_rsscaps(struct hn_data *hv,
+ unsigned int *rxr_cnt0);
+int hn_rndis_conf_rss(struct hn_data *hv,
+ const struct rte_eth_rss_conf *rss_conf);
+
+#ifdef RTE_LIBRTE_NETVSC_DEBUG_DUMP
+void hn_rndis_dump(const void *buf);
+#else
+#define hn_rndis_dump(buf)
+#endif
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
new file mode 100644
index 00000000..02ef27e3
--- /dev/null
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -0,0 +1,1334 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Microsoft Corporation
+ * Copyright(c) 2013-2016 Brocade Communications Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <strings.h>
+
+#include <rte_ethdev.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_net.h>
+#include <rte_bus_vmbus.h>
+#include <rte_spinlock.h>
+
+#include "hn_logs.h"
+#include "hn_var.h"
+#include "hn_rndis.h"
+#include "hn_nvs.h"
+#include "ndis.h"
+
+#define HN_NVS_SEND_MSG_SIZE \
+ (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
+
+#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
+#define HN_TXCOPY_THRESHOLD 512
+
+#define HN_RXCOPY_THRESHOLD 256
+#define HN_RXQ_EVENT_DEFAULT 2048
+
+struct hn_rxinfo {
+ uint32_t vlan_info;
+ uint32_t csum_info;
+ uint32_t hash_info;
+ uint32_t hash_value;
+};
+
+#define HN_RXINFO_VLAN 0x0001
+#define HN_RXINFO_CSUM 0x0002
+#define HN_RXINFO_HASHINF 0x0004
+#define HN_RXINFO_HASHVAL 0x0008
+#define HN_RXINFO_ALL \
+ (HN_RXINFO_VLAN | \
+ HN_RXINFO_CSUM | \
+ HN_RXINFO_HASHINF | \
+ HN_RXINFO_HASHVAL)
+
+#define HN_NDIS_VLAN_INFO_INVALID 0xffffffff
+#define HN_NDIS_RXCSUM_INFO_INVALID 0
+#define HN_NDIS_HASH_INFO_INVALID 0
+
+/*
+ * Per-transmit book keeping.
+ * A slot in transmit ring (chim_index) is reserved for each transmit.
+ *
+ * There are two types of transmit:
+ * - buffered transmit where chimney buffer is used and RNDIS header
+ * is in the buffer. mbuf == NULL for this case.
+ *
+ * - direct transmit where RNDIS header is in the in rndis_pkt
+ * mbuf is freed after transmit.
+ *
+ * Descriptors come from per-port pool which is used
+ * to limit number of outstanding requests per device.
+ */
+struct hn_txdesc {
+ struct rte_mbuf *m;
+
+ uint16_t queue_id;
+ uint16_t chim_index;
+ uint32_t chim_size;
+ uint32_t data_size;
+ uint32_t packets;
+
+ struct rndis_packet_msg *rndis_pkt;
+};
+
+#define HN_RNDIS_PKT_LEN \
+ (sizeof(struct rndis_packet_msg) + \
+ RNDIS_PKTINFO_SIZE(NDIS_HASH_VALUE_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
+ RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
+
+/* Minimum space required for a packet */
+#define HN_PKTSIZE_MIN(align) \
+ RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
+
+#define DEFAULT_TX_FREE_THRESH 32U
+
+static void
+hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
+{
+ uint32_t s = m->pkt_len;
+ const struct ether_addr *ea;
+
+ if (s == 64) {
+ stats->size_bins[1]++;
+ } else if (s > 64 && s < 1024) {
+ uint32_t bin;
+
+ /* count zeros, and offset into correct bin */
+ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
+ stats->size_bins[bin]++;
+ } else {
+ if (s < 64)
+ stats->size_bins[0]++;
+ else if (s < 1519)
+ stats->size_bins[6]++;
+ else if (s >= 1519)
+ stats->size_bins[7]++;
+ }
+
+ ea = rte_pktmbuf_mtod(m, const struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ stats->broadcast++;
+ else
+ stats->multicast++;
+ }
+}
+
+static inline unsigned int hn_rndis_pktlen(const struct rndis_packet_msg *pkt)
+{
+ return pkt->pktinfooffset + pkt->pktinfolen;
+}
+
+static inline uint32_t
+hn_rndis_pktmsg_offset(uint32_t ofs)
+{
+ return ofs - offsetof(struct rndis_packet_msg, dataoffset);
+}
+
+static void hn_txd_init(struct rte_mempool *mp __rte_unused,
+ void *opaque, void *obj, unsigned int idx)
+{
+ struct hn_txdesc *txd = obj;
+ struct rte_eth_dev *dev = opaque;
+ struct rndis_packet_msg *pkt;
+
+ memset(txd, 0, sizeof(*txd));
+ txd->chim_index = idx;
+
+ pkt = rte_malloc_socket("RNDIS_TX", HN_RNDIS_PKT_LEN,
+ rte_align32pow2(HN_RNDIS_PKT_LEN),
+ dev->device->numa_node);
+ if (!pkt)
+ rte_exit(EXIT_FAILURE, "can not allocate RNDIS header");
+
+ txd->rndis_pkt = pkt;
+}
+
+/*
+ * Unlike Linux and FreeBSD, this driver uses a mempool
+ * to limit outstanding transmits and reserve buffers
+ */
+int
+hn_tx_pool_init(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(name, sizeof(name),
+ "hn_txd_%u", dev->data->port_id);
+
+ PMD_INIT_LOG(DEBUG, "create a TX send pool %s n=%u size=%zu socket=%d",
+ name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ dev->device->numa_node);
+
+ mp = rte_mempool_create(name, hv->chim_cnt, sizeof(struct hn_txdesc),
+ HN_TXD_CACHE_SIZE, 0,
+ NULL, NULL,
+ hn_txd_init, dev,
+ dev->device->numa_node, 0);
+ if (!mp) {
+ PMD_DRV_LOG(ERR,
+ "mempool %s create failed: %d", name, rte_errno);
+ return -rte_errno;
+ }
+
+ hv->tx_pool = mp;
+ return 0;
+}
+
+static void hn_reset_txagg(struct hn_tx_queue *txq)
+{
+ txq->agg_szleft = txq->agg_szmax;
+ txq->agg_pktleft = txq->agg_pktmax;
+ txq->agg_txd = NULL;
+ txq->agg_prevpkt = NULL;
+}
+
+int
+hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc __rte_unused,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq;
+ uint32_t tx_free_thresh;
+
+ PMD_INIT_FUNC_TRACE();
+
+ txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->hv = hv;
+ txq->chan = hv->channels[queue_idx];
+ txq->port_id = dev->data->port_id;
+ txq->queue_id = queue_idx;
+
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh = RTE_MIN(hv->chim_cnt / 4,
+ DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= hv->chim_cnt - 3)
+ tx_free_thresh = hv->chim_cnt - 3;
+
+ txq->free_thresh = tx_free_thresh;
+
+ txq->agg_szmax = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
+ txq->agg_pktmax = hv->rndis_agg_pkts;
+ txq->agg_align = hv->rndis_agg_align;
+
+ hn_reset_txagg(txq);
+
+ dev->data->tx_queues[queue_idx] = txq;
+
+ return 0;
+}
+
+void
+hn_dev_tx_queue_release(void *arg)
+{
+ struct hn_tx_queue *txq = arg;
+ struct hn_txdesc *txd;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!txq)
+ return;
+
+ /* If any pending data is still present just drop it */
+ txd = txq->agg_txd;
+ if (txd)
+ rte_mempool_put(txq->hv->tx_pool, txd);
+
+ rte_free(txq);
+}
+
+void
+hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->nb_desc = hv->tx_pool->size;
+}
+
+static void
+hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
+ unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
+{
+ struct hn_txdesc *txd = (struct hn_txdesc *)xactid;
+ struct hn_tx_queue *txq;
+
+ /* Control packets are sent with xacid == 0 */
+ if (!txd)
+ return;
+
+ txq = dev->data->tx_queues[queue_id];
+ if (likely(ack->status == NVS_STATUS_OK)) {
+ PMD_TX_LOG(DEBUG, "port %u:%u complete tx %u packets %u bytes %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ txd->packets, txd->data_size);
+ txq->stats.bytes += txd->data_size;
+ txq->stats.packets += txd->packets;
+ } else {
+ PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
+ txq->port_id, txq->queue_id, txd->chim_index, ack->status);
+ ++txq->stats.errors;
+ }
+
+ rte_pktmbuf_free(txd->m);
+
+ rte_mempool_put(txq->hv->tx_pool, txd);
+}
+
+/* Handle transmit completion events */
+static void
+hn_nvs_handle_comp(struct rte_eth_dev *dev, uint16_t queue_id,
+ const struct vmbus_chanpkt_hdr *pkt,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case NVS_TYPE_RNDIS_ACK:
+ hn_nvs_send_completed(dev, queue_id, pkt->xactid, data);
+ break;
+
+ default:
+ PMD_TX_LOG(NOTICE,
+ "unexpected send completion type %u",
+ hdr->type);
+ }
+}
+
+/* Parse per-packet info (meta data) */
+static int
+hn_rndis_rxinfo(const void *info_data, unsigned int info_dlen,
+ struct hn_rxinfo *info)
+{
+ const struct rndis_pktinfo *pi = info_data;
+ uint32_t mask = 0;
+
+ while (info_dlen != 0) {
+ const void *data;
+ uint32_t dlen;
+
+ if (unlikely(info_dlen < sizeof(*pi)))
+ return -EINVAL;
+
+ if (unlikely(info_dlen < pi->size))
+ return -EINVAL;
+ info_dlen -= pi->size;
+
+ if (unlikely(pi->size & RNDIS_PKTINFO_SIZE_ALIGNMASK))
+ return -EINVAL;
+ if (unlikely(pi->size < pi->offset))
+ return -EINVAL;
+
+ dlen = pi->size - pi->offset;
+ data = pi->data;
+
+ switch (pi->type) {
+ case NDIS_PKTINFO_TYPE_VLAN:
+ if (unlikely(dlen < NDIS_VLAN_INFO_SIZE))
+ return -EINVAL;
+ info->vlan_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_VLAN;
+ break;
+
+ case NDIS_PKTINFO_TYPE_CSUM:
+ if (unlikely(dlen < NDIS_RXCSUM_INFO_SIZE))
+ return -EINVAL;
+ info->csum_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_CSUM;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHVAL:
+ if (unlikely(dlen < NDIS_HASH_VALUE_SIZE))
+ return -EINVAL;
+ info->hash_value = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHVAL;
+ break;
+
+ case NDIS_PKTINFO_TYPE_HASHINF:
+ if (unlikely(dlen < NDIS_HASH_INFO_SIZE))
+ return -EINVAL;
+ info->hash_info = *((const uint32_t *)data);
+ mask |= HN_RXINFO_HASHINF;
+ break;
+
+ default:
+ goto next;
+ }
+
+ if (mask == HN_RXINFO_ALL)
+ break; /* All found; done */
+next:
+ pi = (const struct rndis_pktinfo *)
+ ((const uint8_t *)pi + pi->size);
+ }
+
+ /*
+ * Final fixup.
+ * - If there is no hash value, invalidate the hash info.
+ */
+ if (!(mask & HN_RXINFO_HASHVAL))
+ info->hash_info = HN_NDIS_HASH_INFO_INVALID;
+ return 0;
+}
+
+/*
+ * Ack the consumed RXBUF associated w/ this channel packet,
+ * so that this RXBUF can be recycled by the hypervisor.
+ */
+static void hn_rx_buf_release(struct hn_rx_bufinfo *rxb)
+{
+ struct rte_mbuf_ext_shared_info *shinfo = &rxb->shinfo;
+ struct hn_data *hv = rxb->hv;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, -1) == 0) {
+ hn_nvs_ack_rxbuf(rxb->chan, rxb->xactid);
+ --hv->rxbuf_outstanding;
+ }
+}
+
+static void hn_rx_buf_free_cb(void *buf __rte_unused, void *opaque)
+{
+ hn_rx_buf_release(opaque);
+}
+
+static struct hn_rx_bufinfo *hn_rx_buf_init(const struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_rxbuf *pkt)
+{
+ struct hn_rx_bufinfo *rxb;
+
+ rxb = rxq->hv->rxbuf_info + pkt->hdr.xactid;
+ rxb->chan = rxq->chan;
+ rxb->xactid = pkt->hdr.xactid;
+ rxb->hv = rxq->hv;
+
+ rxb->shinfo.free_cb = hn_rx_buf_free_cb;
+ rxb->shinfo.fcb_opaque = rxb;
+ rte_mbuf_ext_refcnt_set(&rxb->shinfo, 1);
+ return rxb;
+}
+
+static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb,
+ uint8_t *data, unsigned int headroom, unsigned int dlen,
+ const struct hn_rxinfo *info)
+{
+ struct hn_data *hv = rxq->hv;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(rxq->mb_pool);
+ if (unlikely(!m)) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxq->port_id];
+
+ dev->data->rx_mbuf_alloc_failed++;
+ return;
+ }
+
+ /*
+ * For large packets, avoid copy if possible but need to keep
+ * some space available in receive area for later packets.
+ */
+ if (dlen >= HN_RXCOPY_THRESHOLD &&
+ hv->rxbuf_outstanding < hv->rxbuf_section_cnt / 2) {
+ struct rte_mbuf_ext_shared_info *shinfo;
+ const void *rxbuf;
+ rte_iova_t iova;
+
+ /*
+ * Build an external mbuf that points to recveive area.
+ * Use refcount to handle multiple packets in same
+ * receive buffer section.
+ */
+ rxbuf = hv->rxbuf_res->addr;
+ iova = rte_mem_virt2iova(rxbuf) + RTE_PTR_DIFF(data, rxbuf);
+ shinfo = &rxb->shinfo;
+
+ if (rte_mbuf_ext_refcnt_update(shinfo, 1) == 1)
+ ++hv->rxbuf_outstanding;
+
+ rte_pktmbuf_attach_extbuf(m, data, iova,
+ dlen + headroom, shinfo);
+ m->data_off = headroom;
+ } else {
+ /* Mbuf's in pool must be large enough to hold small packets */
+ if (unlikely(rte_pktmbuf_tailroom(m) < dlen)) {
+ rte_pktmbuf_free_seg(m);
+ ++rxq->stats.errors;
+ return;
+ }
+ rte_memcpy(rte_pktmbuf_mtod(m, void *),
+ data + headroom, dlen);
+ }
+
+ m->port = rxq->port_id;
+ m->pkt_len = dlen;
+ m->data_len = dlen;
+ m->packet_type = rte_net_get_ptype(m, NULL,
+ RTE_PTYPE_L2_MASK |
+ RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
+
+ if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
+ m->vlan_tci = info->vlan_info;
+ m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+ }
+
+ if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
+ if (info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
+ | NDIS_RXCSUM_INFO_TCPCS_OK))
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
+ | NDIS_RXCSUM_INFO_UDPCS_FAILED))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+
+ if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ m->hash.rss = info->hash_value;
+ }
+
+ PMD_RX_LOG(DEBUG,
+ "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
+ rxq->port_id, rxq->queue_id, rxb->xactid,
+ m->pkt_len, m->packet_type, m->ol_flags);
+
+ ++rxq->stats.packets;
+ rxq->stats.bytes += m->pkt_len;
+ hn_update_packet_stats(&rxq->stats, m);
+
+ if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
+ ++rxq->ring_full;
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void hn_rndis_rx_data(struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb,
+ void *data, uint32_t dlen)
+{
+ unsigned int data_off, data_len, pktinfo_off, pktinfo_len;
+ const struct rndis_packet_msg *pkt = data;
+ struct hn_rxinfo info = {
+ .vlan_info = HN_NDIS_VLAN_INFO_INVALID,
+ .csum_info = HN_NDIS_RXCSUM_INFO_INVALID,
+ .hash_info = HN_NDIS_HASH_INFO_INVALID,
+ };
+ int err;
+
+ hn_rndis_dump(pkt);
+
+ if (unlikely(dlen < sizeof(*pkt)))
+ goto error;
+
+ if (unlikely(dlen < pkt->len))
+ goto error; /* truncated RNDIS from host */
+
+ if (unlikely(pkt->len < pkt->datalen
+ + pkt->oobdatalen + pkt->pktinfolen))
+ goto error;
+
+ if (unlikely(pkt->datalen == 0))
+ goto error;
+
+ /* Check offsets. */
+ if (unlikely(pkt->dataoffset < RNDIS_PACKET_MSG_OFFSET_MIN))
+ goto error;
+
+ if (likely(pkt->pktinfooffset > 0) &&
+ unlikely(pkt->pktinfooffset < RNDIS_PACKET_MSG_OFFSET_MIN ||
+ (pkt->pktinfooffset & RNDIS_PACKET_MSG_OFFSET_ALIGNMASK)))
+ goto error;
+
+ data_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ data_len = pkt->datalen;
+ pktinfo_off = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->pktinfooffset);
+ pktinfo_len = pkt->pktinfolen;
+
+ if (likely(pktinfo_len > 0)) {
+ err = hn_rndis_rxinfo((const uint8_t *)pkt + pktinfo_off,
+ pktinfo_len, &info);
+ if (err)
+ goto error;
+ }
+
+ if (unlikely(data_off + data_len > pkt->len))
+ goto error;
+
+ if (unlikely(data_len < ETHER_HDR_LEN))
+ goto error;
+
+ hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
+ return;
+error:
+ ++rxq->stats.errors;
+}
+
+static void
+hn_rndis_receive(const struct rte_eth_dev *dev, struct hn_rx_queue *rxq,
+ struct hn_rx_bufinfo *rxb, void *buf, uint32_t len)
+{
+ const struct rndis_msghdr *hdr = buf;
+
+ switch (hdr->type) {
+ case RNDIS_PACKET_MSG:
+ if (dev->data->dev_started)
+ hn_rndis_rx_data(rxq, rxb, buf, len);
+ break;
+
+ case RNDIS_INDICATE_STATUS_MSG:
+ hn_rndis_link_status(rxq->hv, buf);
+ break;
+
+ case RNDIS_INITIALIZE_CMPLT:
+ case RNDIS_QUERY_CMPLT:
+ case RNDIS_SET_CMPLT:
+ hn_rndis_receive_response(rxq->hv, buf, len);
+ break;
+
+ default:
+ PMD_DRV_LOG(NOTICE,
+ "unexpected RNDIS message (type %#x len %u)",
+ hdr->type, len);
+ break;
+ }
+}
+
+static void
+hn_nvs_handle_rxbuf(struct rte_eth_dev *dev,
+ struct hn_data *hv,
+ struct hn_rx_queue *rxq,
+ const struct vmbus_chanpkt_hdr *hdr,
+ const void *buf)
+{
+ const struct vmbus_chanpkt_rxbuf *pkt;
+ const struct hn_nvs_hdr *nvs_hdr = buf;
+ uint32_t rxbuf_sz = hv->rxbuf_res->len;
+ char *rxbuf = hv->rxbuf_res->addr;
+ unsigned int i, hlen, count;
+ struct hn_rx_bufinfo *rxb;
+
+ /* At minimum we need type header */
+ if (unlikely(vmbus_chanpkt_datalen(hdr) < sizeof(*nvs_hdr))) {
+ PMD_RX_LOG(ERR, "invalid receive nvs RNDIS");
+ return;
+ }
+
+ /* Make sure that this is a RNDIS message. */
+ if (unlikely(nvs_hdr->type != NVS_TYPE_RNDIS)) {
+ PMD_RX_LOG(ERR, "nvs type %u, not RNDIS",
+ nvs_hdr->type);
+ return;
+ }
+
+ hlen = vmbus_chanpkt_getlen(hdr->hlen);
+ if (unlikely(hlen < sizeof(*pkt))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf chanpkt");
+ return;
+ }
+
+ pkt = container_of(hdr, const struct vmbus_chanpkt_rxbuf, hdr);
+ if (unlikely(pkt->rxbuf_id != NVS_RXBUF_SIG)) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_id 0x%08x",
+ pkt->rxbuf_id);
+ return;
+ }
+
+ count = pkt->rxbuf_cnt;
+ if (unlikely(hlen < offsetof(struct vmbus_chanpkt_rxbuf,
+ rxbuf[count]))) {
+ PMD_RX_LOG(ERR, "invalid rxbuf_cnt %u", count);
+ return;
+ }
+
+ if (pkt->hdr.xactid > hv->rxbuf_section_cnt) {
+ PMD_RX_LOG(ERR, "invalid rxbuf section id %" PRIx64,
+ pkt->hdr.xactid);
+ return;
+ }
+
+ /* Setup receive buffer info to allow for callback */
+ rxb = hn_rx_buf_init(rxq, pkt);
+
+ /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
+ for (i = 0; i < count; ++i) {
+ unsigned int ofs, len;
+
+ ofs = pkt->rxbuf[i].ofs;
+ len = pkt->rxbuf[i].len;
+
+ if (unlikely(ofs + len > rxbuf_sz)) {
+ PMD_RX_LOG(ERR,
+ "%uth RNDIS msg overflow ofs %u, len %u",
+ i, ofs, len);
+ continue;
+ }
+
+ if (unlikely(len == 0)) {
+ PMD_RX_LOG(ERR, "%uth RNDIS msg len %u", i, len);
+ continue;
+ }
+
+ hn_rndis_receive(dev, rxq, rxb,
+ rxbuf + ofs, len);
+ }
+
+ /* Send ACK now if external mbuf not used */
+ hn_rx_buf_release(rxb);
+}
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id)
+{
+ struct hn_rx_queue *rxq;
+
+ rxq = rte_zmalloc_socket("HN_RXQ",
+ sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq) {
+ rxq->hv = hv;
+ rxq->chan = hv->channels[queue_id];
+ rte_spinlock_init(&rxq->ring_lock);
+ rxq->port_id = hv->port_id;
+ rxq->queue_id = queue_id;
+ }
+ return rxq;
+}
+
+int
+hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ char ring_name[RTE_RING_NAMESIZE];
+ struct hn_rx_queue *rxq;
+ unsigned int count;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue_idx == 0) {
+ rxq = hv->primary;
+ } else {
+ rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
+ if (!rxq)
+ return -ENOMEM;
+ }
+
+ rxq->mb_pool = mp;
+ count = rte_mempool_avail_count(mp) / dev->data->nb_rx_queues;
+ if (nb_desc == 0 || nb_desc > count)
+ nb_desc = count;
+
+ /*
+ * Staging ring from receive event logic to rx_pkts.
+ * rx_pkts assumes caller is handling multi-thread issue.
+ * event logic has locking.
+ */
+ snprintf(ring_name, sizeof(ring_name),
+ "hn_rx_%u_%u", dev->data->port_id, queue_idx);
+ rxq->rx_ring = rte_ring_create(ring_name,
+ rte_align32pow2(nb_desc),
+ socket_id, 0);
+ if (!rxq->rx_ring)
+ goto fail;
+
+ dev->data->rx_queues[queue_idx] = rxq;
+ return 0;
+
+fail:
+ rte_ring_free(rxq->rx_ring);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ return -ENOMEM;
+}
+
+void
+hn_dev_rx_queue_release(void *arg)
+{
+ struct hn_rx_queue *rxq = arg;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!rxq)
+ return;
+
+ rte_ring_free(rxq->rx_ring);
+ rxq->rx_ring = NULL;
+ rxq->mb_pool = NULL;
+
+ if (rxq != rxq->hv->primary) {
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
+ }
+}
+
+void
+hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = 1;
+ qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
+}
+
+static void
+hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
+ PMD_DRV_LOG(ERR, "invalid nvs notify");
+ return;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "got notify, nvs type %u", hdr->type);
+}
+
+/*
+ * Process pending events on the channel.
+ * Called from both Rx queue poll and Tx cleanup
+ */
+void hn_process_events(struct hn_data *hv, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
+ struct hn_rx_queue *rxq;
+ uint32_t bytes_read = 0;
+ int ret = 0;
+
+ rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
+
+ /* If no pending data then nothing to do */
+ if (rte_vmbus_chan_rx_empty(rxq->chan))
+ return;
+
+ /*
+ * Since channel is shared between Rx and TX queue need to have a lock
+ * since DPDK does not force same CPU to be used for Rx/Tx.
+ */
+ if (unlikely(!rte_spinlock_trylock(&rxq->ring_lock)))
+ return;
+
+ for (;;) {
+ const struct vmbus_chanpkt_hdr *pkt;
+ uint32_t len = HN_RXQ_EVENT_DEFAULT;
+ const void *data;
+
+ ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
+ if (ret == -EAGAIN)
+ break; /* ring is empty */
+
+ else if (ret == -ENOBUFS)
+ rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)",
+ HN_RXQ_EVENT_DEFAULT, len);
+ else if (ret <= 0)
+ rte_exit(EXIT_FAILURE,
+ "vmbus ring buffer error: %d", ret);
+
+ bytes_read += ret;
+ pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
+ data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
+
+ switch (pkt->type) {
+ case VMBUS_CHANPKT_TYPE_COMP:
+ hn_nvs_handle_comp(dev, queue_id, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_RXBUF:
+ hn_nvs_handle_rxbuf(dev, hv, rxq, pkt, data);
+ break;
+
+ case VMBUS_CHANPKT_TYPE_INBAND:
+ hn_nvs_handle_notify(pkt, data);
+ break;
+
+ default:
+ PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
+ break;
+ }
+
+ if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
+ break;
+ }
+
+ if (bytes_read > 0)
+ rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
+
+ rte_spinlock_unlock(&rxq->ring_lock);
+}
+
+static void hn_append_to_chim(struct hn_tx_queue *txq,
+ struct rndis_packet_msg *pkt,
+ const struct rte_mbuf *m)
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ uint8_t *buf = (uint8_t *)pkt;
+ unsigned int data_offs;
+
+ hn_rndis_dump(pkt);
+
+ data_offs = RNDIS_PACKET_MSG_OFFSET_ABS(pkt->dataoffset);
+ txd->chim_size += pkt->len;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+ hn_update_packet_stats(&txq->stats, m);
+
+ for (; m; m = m->next) {
+ uint16_t len = rte_pktmbuf_data_len(m);
+
+ rte_memcpy(buf + data_offs,
+ rte_pktmbuf_mtod(m, const char *), len);
+ data_offs += len;
+ }
+}
+
+/*
+ * Send pending aggregated data in chimney buffer (if any).
+ * Returns error if send was unsuccessful because channel ring buffer
+ * was full.
+ */
+static int hn_flush_txagg(struct hn_tx_queue *txq, bool *need_sig)
+
+{
+ struct hn_txdesc *txd = txq->agg_txd;
+ struct hn_nvs_rndis rndis;
+ int ret;
+
+ if (!txd)
+ return 0;
+
+ rndis = (struct hn_nvs_rndis) {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_idx = txd->chim_index,
+ .chim_sz = txd->chim_size,
+ };
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index, txd->chim_size);
+
+ ret = hn_nvs_send(txq->chan, VMBUS_CHANPKT_FLAG_RC,
+ &rndis, sizeof(rndis), (uintptr_t)txd, need_sig);
+
+ if (likely(ret == 0))
+ hn_reset_txagg(txq);
+ else
+ PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d",
+ txq->port_id, txq->queue_id, ret);
+
+ return ret;
+}
+
+static struct hn_txdesc *hn_new_txd(struct hn_data *hv,
+ struct hn_tx_queue *txq)
+{
+ struct hn_txdesc *txd;
+
+ if (rte_mempool_get(hv->tx_pool, (void **)&txd)) {
+ ++txq->stats.nomemory;
+ PMD_TX_LOG(DEBUG, "tx pool exhausted!");
+ return NULL;
+ }
+
+ txd->m = NULL;
+ txd->queue_id = txq->queue_id;
+ txd->packets = 0;
+ txd->data_size = 0;
+ txd->chim_size = 0;
+
+ return txd;
+}
+
+static void *
+hn_try_txagg(struct hn_data *hv, struct hn_tx_queue *txq, uint32_t pktsize)
+{
+ struct hn_txdesc *agg_txd = txq->agg_txd;
+ struct rndis_packet_msg *pkt;
+ void *chim;
+
+ if (agg_txd) {
+ unsigned int padding, olen;
+
+ /*
+ * Update the previous RNDIS packet's total length,
+ * it can be increased due to the mandatory alignment
+ * padding for this RNDIS packet. And update the
+ * aggregating txdesc's chimney sending buffer size
+ * accordingly.
+ *
+ * Zero-out the padding, as required by the RNDIS spec.
+ */
+ pkt = txq->agg_prevpkt;
+ olen = pkt->len;
+ padding = RTE_ALIGN(olen, txq->agg_align) - olen;
+ if (padding > 0) {
+ agg_txd->chim_size += padding;
+ pkt->len += padding;
+ memset((uint8_t *)pkt + olen, 0, padding);
+ }
+
+ chim = (uint8_t *)pkt + pkt->len;
+
+ txq->agg_pktleft--;
+ txq->agg_szleft -= pktsize;
+ if (txq->agg_szleft < HN_PKTSIZE_MIN(txq->agg_align)) {
+ /*
+ * Probably can't aggregate more packets,
+ * flush this aggregating txdesc proactively.
+ */
+ txq->agg_pktleft = 0;
+ }
+ } else {
+ agg_txd = hn_new_txd(hv, txq);
+ if (!agg_txd)
+ return NULL;
+
+ chim = (uint8_t *)hv->chim_res->addr
+ + agg_txd->chim_index * hv->chim_szmax;
+
+ txq->agg_txd = agg_txd;
+ txq->agg_pktleft = txq->agg_pktmax - 1;
+ txq->agg_szleft = txq->agg_szmax - pktsize;
+ }
+ txq->agg_prevpkt = chim;
+
+ return chim;
+}
+
+static inline void *
+hn_rndis_pktinfo_append(struct rndis_packet_msg *pkt,
+ uint32_t pi_dlen, uint32_t pi_type)
+{
+ const uint32_t pi_size = RNDIS_PKTINFO_SIZE(pi_dlen);
+ struct rndis_pktinfo *pi;
+
+ /*
+ * Per-packet-info does not move; it only grows.
+ *
+ * NOTE:
+ * pktinfooffset in this phase counts from the beginning
+ * of rndis_packet_msg.
+ */
+ pi = (struct rndis_pktinfo *)((uint8_t *)pkt + hn_rndis_pktlen(pkt));
+
+ pkt->pktinfolen += pi_size;
+
+ pi->size = pi_size;
+ pi->type = pi_type;
+ pi->offset = RNDIS_PKTINFO_OFFSET;
+
+ return pi->data;
+}
+
+/* Put RNDIS header and packet info on packet */
+static void hn_encap(struct rndis_packet_msg *pkt,
+ uint16_t queue_id,
+ const struct rte_mbuf *m)
+{
+ unsigned int hlen = m->l2_len + m->l3_len;
+ uint32_t *pi_data;
+ uint32_t pkt_hlen;
+
+ pkt->type = RNDIS_PACKET_MSG;
+ pkt->len = m->pkt_len;
+ pkt->dataoffset = 0;
+ pkt->datalen = m->pkt_len;
+ pkt->oobdataoffset = 0;
+ pkt->oobdatalen = 0;
+ pkt->oobdataelements = 0;
+ pkt->pktinfooffset = sizeof(*pkt);
+ pkt->pktinfolen = 0;
+ pkt->vchandle = 0;
+ pkt->reserved = 0;
+
+ /*
+ * Set the hash value for this packet, to the queue_id to cause
+ * TX done event for this packet on the right channel.
+ */
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_HASH_VALUE_SIZE,
+ NDIS_PKTINFO_TYPE_HASHVAL);
+ *pi_data = queue_id;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_VLAN_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_VLAN);
+ *pi_data = m->vlan_tci;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_LSO2_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_LSO);
+
+ if (m->ol_flags & PKT_TX_IPV6) {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV6(hlen,
+ m->tso_segsz);
+ } else {
+ *pi_data = NDIS_LSO2_INFO_MAKEIPV4(hlen,
+ m->tso_segsz);
+ }
+ } else if (m->ol_flags &
+ (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)) {
+ pi_data = hn_rndis_pktinfo_append(pkt, NDIS_TXCSUM_INFO_SIZE,
+ NDIS_PKTINFO_TYPE_CSUM);
+ *pi_data = 0;
+
+ if (m->ol_flags & PKT_TX_IPV6)
+ *pi_data |= NDIS_TXCSUM_INFO_IPV6;
+ if (m->ol_flags & PKT_TX_IPV4) {
+ *pi_data |= NDIS_TXCSUM_INFO_IPV4;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_IPCS;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKTCPCS(hlen);
+ else if (m->ol_flags & PKT_TX_UDP_CKSUM)
+ *pi_data |= NDIS_TXCSUM_INFO_MKUDPCS(hlen);
+ }
+
+ pkt_hlen = pkt->pktinfooffset + pkt->pktinfolen;
+ /* Fixup RNDIS packet message total length */
+ pkt->len += pkt_hlen;
+
+ /* Convert RNDIS packet message offsets */
+ pkt->dataoffset = hn_rndis_pktmsg_offset(pkt_hlen);
+ pkt->pktinfooffset = hn_rndis_pktmsg_offset(pkt->pktinfooffset);
+}
+
+/* How many scatter gather list elements ar needed */
+static unsigned int hn_get_slots(const struct rte_mbuf *m)
+{
+ unsigned int slots = 1; /* for RNDIS header */
+
+ while (m) {
+ unsigned int size = rte_pktmbuf_data_len(m);
+ unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
+
+ slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+ m = m->next;
+ }
+
+ return slots;
+}
+
+/* Build scatter gather list from chained mbuf */
+static unsigned int hn_fill_sg(struct vmbus_gpa *sg,
+ const struct rte_mbuf *m)
+{
+ unsigned int segs = 0;
+
+ while (m) {
+ rte_iova_t addr = rte_mbuf_data_iova(m);
+ unsigned int page = addr / PAGE_SIZE;
+ unsigned int offset = addr & PAGE_MASK;
+ unsigned int len = rte_pktmbuf_data_len(m);
+
+ while (len > 0) {
+ unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+
+ sg[segs].page = page;
+ sg[segs].ofs = offset;
+ sg[segs].len = bytes;
+ segs++;
+
+ ++page;
+ offset = 0;
+ len -= bytes;
+ }
+ m = m->next;
+ }
+
+ return segs;
+}
+
+/* Transmit directly from mbuf */
+static int hn_xmit_sg(struct hn_tx_queue *txq,
+ const struct hn_txdesc *txd, const struct rte_mbuf *m,
+ bool *need_sig)
+{
+ struct vmbus_gpa sg[hn_get_slots(m)];
+ struct hn_nvs_rndis nvs_rndis = {
+ .type = NVS_TYPE_RNDIS,
+ .rndis_mtype = NVS_RNDIS_MTYPE_DATA,
+ .chim_sz = txd->chim_size,
+ };
+ rte_iova_t addr;
+ unsigned int segs;
+
+ /* attach aggregation data if present */
+ if (txd->chim_size > 0)
+ nvs_rndis.chim_idx = txd->chim_index;
+ else
+ nvs_rndis.chim_idx = NVS_CHIM_IDX_INVALID;
+
+ hn_rndis_dump(txd->rndis_pkt);
+
+ /* pass IOVA of rndis header in first segment */
+ addr = rte_malloc_virt2iova(txd->rndis_pkt);
+ if (unlikely(addr == RTE_BAD_IOVA)) {
+ PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova");
+ return -EINVAL;
+ }
+
+ sg[0].page = addr / PAGE_SIZE;
+ sg[0].ofs = addr & PAGE_MASK;
+ sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
+ segs = 1;
+
+ hn_update_packet_stats(&txq->stats, m);
+
+ segs += hn_fill_sg(sg + 1, m);
+
+ PMD_TX_LOG(DEBUG, "port %u:%u tx %u segs %u size %u",
+ txq->port_id, txq->queue_id, txd->chim_index,
+ segs, nvs_rndis.chim_sz);
+
+ return hn_nvs_send_sglist(txq->chan, sg, segs,
+ &nvs_rndis, sizeof(nvs_rndis),
+ (uintptr_t)txd, need_sig);
+}
+
+uint16_t
+hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hn_tx_queue *txq = ptxq;
+ struct hn_data *hv = txq->hv;
+ bool need_sig = false;
+ uint16_t nb_tx;
+ int ret;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
+ hn_process_events(hv, txq->queue_id);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+ uint32_t pkt_size = m->pkt_len + HN_RNDIS_PKT_LEN;
+ struct rndis_packet_msg *pkt;
+
+ /* For small packets aggregate them in chimney buffer */
+ if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+ /* If this packet will not fit, then flush */
+ if (txq->agg_pktleft == 0 ||
+ RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
+ if (hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ }
+
+ pkt = hn_try_txagg(hv, txq, pkt_size);
+ if (unlikely(!pkt))
+ break;
+
+ hn_encap(pkt, txq->queue_id, m);
+ hn_append_to_chim(txq, pkt, m);
+
+ rte_pktmbuf_free(m);
+
+ /* if buffer is full, flush */
+ if (txq->agg_pktleft == 0 &&
+ hn_flush_txagg(txq, &need_sig))
+ goto fail;
+ } else {
+ struct hn_txdesc *txd;
+
+ /* can send chimney data and large packet at once */
+ txd = txq->agg_txd;
+ if (txd) {
+ hn_reset_txagg(txq);
+ } else {
+ txd = hn_new_txd(hv, txq);
+ if (unlikely(!txd))
+ break;
+ }
+
+ pkt = txd->rndis_pkt;
+ txd->m = m;
+ txd->data_size += m->pkt_len;
+ ++txd->packets;
+
+ hn_encap(pkt, txq->queue_id, m);
+
+ ret = hn_xmit_sg(txq, txd, m, &need_sig);
+ if (unlikely(ret != 0)) {
+ PMD_TX_LOG(NOTICE, "sg send failed: %d", ret);
+ ++txq->stats.errors;
+ rte_mempool_put(hv->tx_pool, txd);
+ goto fail;
+ }
+ }
+ }
+
+ /* If partial buffer left, then try and send it.
+ * if that fails, then reuse it on next send.
+ */
+ hn_flush_txagg(txq, &need_sig);
+
+fail:
+ if (need_sig)
+ rte_vmbus_chan_signal_tx(txq->chan);
+
+ return nb_tx;
+}
+
+uint16_t
+hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hn_rx_queue *rxq = prxq;
+ struct hn_data *hv = rxq->hv;
+
+ if (unlikely(hv->closed))
+ return 0;
+
+ /* If ring is empty then process more */
+ if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ hn_process_events(hv, rxq->queue_id);
+
+ /* Get mbufs off staging ring */
+ return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
+ nb_pkts, NULL);
+}
diff --git a/drivers/net/netvsc/hn_var.h b/drivers/net/netvsc/hn_var.h
new file mode 100644
index 00000000..f7ff8585
--- /dev/null
+++ b/drivers/net/netvsc/hn_var.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2009-2018 Microsoft Corp.
+ * Copyright (c) 2016 Brocade Communications Systems, Inc.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ */
+
+/*
+ * Tunable ethdev params
+ */
+#define HN_MIN_RX_BUF_SIZE 1024
+#define HN_MAX_XFER_LEN 2048
+#define HN_MAX_MAC_ADDRS 1
+#define HN_MAX_CHANNELS 64
+
+/* Claimed to be 12232B */
+#define HN_MTU_MAX (9 * 1024)
+
+/* Retry interval */
+#define HN_CHAN_INTERVAL_US 100
+
+/* Buffers need to be aligned */
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#ifndef PAGE_MASK
+#define PAGE_MASK (PAGE_SIZE - 1)
+#endif
+
+struct hn_data;
+struct hn_txdesc;
+
+struct hn_stats {
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+ uint64_t nomemory;
+ uint64_t multicast;
+ uint64_t broadcast;
+ /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
+ uint64_t size_bins[8];
+};
+
+struct hn_tx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ uint16_t port_id;
+ uint16_t queue_id;
+ uint32_t free_thresh;
+
+ /* Applied packet transmission aggregation limits. */
+ uint32_t agg_szmax;
+ uint32_t agg_pktmax;
+ uint32_t agg_align;
+
+ /* Packet transmission aggregation states */
+ struct hn_txdesc *agg_txd;
+ uint32_t agg_pktleft;
+ uint32_t agg_szleft;
+ struct rndis_packet_msg *agg_prevpkt;
+
+ struct hn_stats stats;
+};
+
+struct hn_rx_queue {
+ struct hn_data *hv;
+ struct vmbus_channel *chan;
+ struct rte_mempool *mb_pool;
+ struct rte_ring *rx_ring;
+
+ rte_spinlock_t ring_lock;
+ uint32_t event_sz;
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct hn_stats stats;
+ uint64_t ring_full;
+
+ uint8_t event_buf[];
+};
+
+
+/* multi-packet data from host */
+struct hn_rx_bufinfo {
+ struct vmbus_channel *chan;
+ struct hn_data *hv;
+ uint64_t xactid;
+ struct rte_mbuf_ext_shared_info shinfo;
+} __rte_cache_aligned;
+
+struct hn_data {
+ struct rte_vmbus_device *vmbus;
+ struct hn_rx_queue *primary;
+ uint16_t port_id;
+ bool closed;
+ uint32_t link_status;
+ uint32_t link_speed;
+
+ struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
+ struct hn_rx_bufinfo *rxbuf_info;
+ uint32_t rxbuf_section_cnt; /* # of Rx sections */
+ volatile uint32_t rxbuf_outstanding;
+ uint16_t max_queues; /* Max available queues */
+ uint16_t num_queues;
+ uint64_t rss_offloads;
+
+ struct rte_mem_resource *chim_res; /* UIO resource for Tx */
+ struct rte_mempool *tx_pool; /* Tx descriptors */
+ uint32_t chim_szmax; /* Max size per buffer */
+ uint32_t chim_cnt; /* Max packets per buffer */
+
+ uint32_t nvs_ver;
+ uint32_t ndis_ver;
+ uint32_t rndis_agg_size;
+ uint32_t rndis_agg_pkts;
+ uint32_t rndis_agg_align;
+
+ volatile uint32_t rndis_pending;
+ rte_atomic32_t rndis_req_id;
+ uint8_t rndis_resp[256];
+
+ struct ether_addr mac_addr;
+ struct vmbus_channel *channels[HN_MAX_CHANNELS];
+};
+
+static inline struct vmbus_channel *
+hn_primary_chan(const struct hn_data *hv)
+{
+ return hv->channels[0];
+}
+
+void hn_process_events(struct hn_data *hv, uint16_t queue_id);
+
+uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+int hn_tx_pool_init(struct rte_eth_dev *dev);
+int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void hn_dev_tx_queue_release(void *arg);
+void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo);
+
+struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
+ uint16_t queue_id,
+ unsigned int socket_id);
+int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+void hn_dev_rx_queue_release(void *arg);
+void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo);
diff --git a/drivers/net/netvsc/meson.build b/drivers/net/netvsc/meson.build
new file mode 100644
index 00000000..a717cdd4
--- /dev/null
+++ b/drivers/net/netvsc/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Microsoft Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VMBUS_BUS')
+version = 2
+sources = files('hn_ethdev.c', 'hn_rxtx.c', 'hn_rndis.c', 'hn_nvs.c')
+
+deps += ['bus_vmbus' ]
+
+allow_experimental_apis = true
diff --git a/drivers/net/netvsc/ndis.h b/drivers/net/netvsc/ndis.h
new file mode 100644
index 00000000..2e7ca99b
--- /dev/null
+++ b/drivers/net/netvsc/ndis.h
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * All rights reserved.
+ */
+
+#ifndef _NET_NDIS_H_
+#define _NET_NDIS_H_
+
+#define NDIS_MEDIA_STATE_CONNECTED 0
+#define NDIS_MEDIA_STATE_DISCONNECTED 1
+
+#define NDIS_NETCHANGE_TYPE_POSSIBLE 1
+#define NDIS_NETCHANGE_TYPE_DEFINITE 2
+#define NDIS_NETCHANGE_TYPE_FROMMEDIA 3
+
+#define NDIS_OFFLOAD_SET_NOCHG 0
+#define NDIS_OFFLOAD_SET_ON 1
+#define NDIS_OFFLOAD_SET_OFF 2
+
+/* a.k.a GRE MAC */
+#define NDIS_ENCAP_TYPE_NVGRE 0x00000001
+
+#define NDIS_HASH_FUNCTION_MASK 0x000000FF /* see hash function */
+#define NDIS_HASH_TYPE_MASK 0x00FFFF00 /* see hash type */
+
+/* hash function */
+#define NDIS_HASH_FUNCTION_TOEPLITZ 0x00000001
+
+/* hash type */
+#define NDIS_HASH_IPV4 0x00000100
+#define NDIS_HASH_TCP_IPV4 0x00000200
+#define NDIS_HASH_IPV6 0x00000400
+#define NDIS_HASH_IPV6_EX 0x00000800
+#define NDIS_HASH_TCP_IPV6 0x00001000
+#define NDIS_HASH_TCP_IPV6_EX 0x00002000
+
+#define NDIS_HASH_KEYSIZE_TOEPLITZ 40
+#define NDIS_HASH_INDCNT 128
+
+#define NDIS_OBJTYPE_DEFAULT 0x80
+#define NDIS_OBJTYPE_RSS_CAPS 0x88
+#define NDIS_OBJTYPE_RSS_PARAMS 0x89
+#define NDIS_OBJTYPE_OFFLOAD 0xa7
+
+struct ndis_object_hdr {
+ uint8_t ndis_type; /* NDIS_OBJTYPE_ */
+ uint8_t ndis_rev; /* type specific */
+ uint16_t ndis_size; /* incl. this hdr */
+} __rte_packed;
+
+/*
+ * OID_TCP_OFFLOAD_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_DEFAULT
+ */
+struct ndis_offload_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint8_t ndis_ip4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp4csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_tcp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_udp6csum; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_lsov1; /* NDIS_OFFLOAD_PARAM_ */
+ uint8_t ndis_ipsecv1; /* NDIS_OFFLOAD_IPSECV1_ */
+ uint8_t ndis_lsov2_ip4; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_lsov2_ip6; /* NDIS_OFFLOAD_LSOV2_ */
+ uint8_t ndis_tcp4conn; /* 0 */
+ uint8_t ndis_tcp6conn; /* 0 */
+ uint32_t ndis_flags; /* 0 */
+ /* NDIS >= 6.1 */
+ uint8_t ndis_ipsecv2; /* NDIS_OFFLOAD_IPSECV2_ */
+ uint8_t ndis_ipsecv2_ip4;/* NDIS_OFFLOAD_IPSECV2_ */
+ /* NDIS >= 6.30 */
+ uint8_t ndis_rsc_ip4; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_rsc_ip6; /* NDIS_OFFLOAD_RSC_ */
+ uint8_t ndis_encap; /* NDIS_OFFLOAD_SET_ */
+ uint8_t ndis_encap_types;/* NDIS_ENCAP_TYPE_ */
+};
+
+#define NDIS_OFFLOAD_PARAMS_SIZE sizeof(struct ndis_offload_params)
+#define NDIS_OFFLOAD_PARAMS_SIZE_6_1 \
+ offsetof(struct ndis_offload_params, ndis_rsc_ip4)
+
+#define NDIS_OFFLOAD_PARAMS_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_PARAMS_REV_3 3 /* NDIS 6.30 */
+
+#define NDIS_OFFLOAD_PARAM_NOCHG 0 /* common */
+#define NDIS_OFFLOAD_PARAM_OFF 1
+#define NDIS_OFFLOAD_PARAM_TX 2
+#define NDIS_OFFLOAD_PARAM_RX 3
+#define NDIS_OFFLOAD_PARAM_TXRX 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV1_OFF 1
+#define NDIS_OFFLOAD_LSOV1_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV1_OFF 1
+#define NDIS_OFFLOAD_IPSECV1_AH 2
+#define NDIS_OFFLOAD_IPSECV1_ESP 3
+#define NDIS_OFFLOAD_IPSECV1_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_LSOV2_OFF 1
+#define NDIS_OFFLOAD_LSOV2_ON 2
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_IPSECV2_OFF 1
+#define NDIS_OFFLOAD_IPSECV2_AH 2
+#define NDIS_OFFLOAD_IPSECV2_ESP 3
+#define NDIS_OFFLOAD_IPSECV2_AH_ESP 4
+
+/* NDIS_OFFLOAD_PARAM_NOCHG */
+#define NDIS_OFFLOAD_RSC_OFF 1
+#define NDIS_OFFLOAD_RSC_ON 2
+
+/*
+ * OID_GEN_RECEIVE_SCALE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_RSS_CAPS
+ */
+struct ndis_rss_caps {
+ struct ndis_object_hdr ndis_hdr;
+ uint32_t ndis_caps; /* NDIS_RSS_CAP_ */
+ uint32_t ndis_nmsi; /* # of MSIs */
+ uint32_t ndis_nrxr; /* # of RX rings */
+ /* NDIS >= 6.30 */
+ uint16_t ndis_nind; /* # of indtbl ent. */
+ uint16_t ndis_pad;
+} __rte_packed;
+
+#define NDIS_RSS_CAPS_SIZE \
+ offsetof(struct ndis_rss_caps, ndis_pad)
+#define NDIS_RSS_CAPS_SIZE_6_0 \
+ offsetof(struct ndis_rss_caps, ndis_nind)
+
+#define NDIS_RSS_CAPS_REV_1 1 /* NDIS 6.{0,1,20} */
+#define NDIS_RSS_CAPS_REV_2 2 /* NDIS 6.30 */
+
+#define NDIS_RSS_CAP_MSI 0x01000000
+#define NDIS_RSS_CAP_CLASSIFY_ISR 0x02000000
+#define NDIS_RSS_CAP_CLASSIFY_DPC 0x04000000
+#define NDIS_RSS_CAP_MSIX 0x08000000
+#define NDIS_RSS_CAP_IPV4 0x00000100
+#define NDIS_RSS_CAP_IPV6 0x00000200
+#define NDIS_RSS_CAP_IPV6_EX 0x00000400
+#define NDIS_RSS_CAP_HASH_TOEPLITZ NDIS_HASH_FUNCTION_TOEPLITZ
+#define NDIS_RSS_CAP_HASHFUNC_MASK NDIS_HASH_FUNCTION_MASK
+
+/*
+ * OID_GEN_RECEIVE_SCALE_PARAMETERS
+ * ndis_type: NDIS_OBJTYPE_RSS_PARAMS
+ */
+struct ndis_rss_params {
+ struct ndis_object_hdr ndis_hdr;
+ uint16_t ndis_flags; /* NDIS_RSS_FLAG_ */
+ uint16_t ndis_bcpu; /* base cpu 0 */
+ uint32_t ndis_hash; /* NDIS_HASH_ */
+ uint16_t ndis_indsize; /* indirect table */
+ uint32_t ndis_indoffset;
+ uint16_t ndis_keysize; /* hash key */
+ uint32_t ndis_keyoffset;
+ /* NDIS >= 6.20 */
+ uint32_t ndis_cpumaskoffset;
+ uint32_t ndis_cpumaskcnt;
+ uint32_t ndis_cpumaskentsz;
+};
+
+#define NDIS_RSS_PARAMS_SIZE sizeof(struct ndis_rss_params)
+#define NDIS_RSS_PARAMS_SIZE_6_0 \
+ offsetof(struct ndis_rss_params, ndis_cpumaskoffset)
+
+#define NDIS_RSS_PARAMS_REV_1 1 /* NDIS 6.0 */
+#define NDIS_RSS_PARAMS_REV_2 2 /* NDIS 6.20 */
+
+#define NDIS_RSS_FLAG_NONE 0x0000
+#define NDIS_RSS_FLAG_BCPU_UNCHG 0x0001
+#define NDIS_RSS_FLAG_HASH_UNCHG 0x0002
+#define NDIS_RSS_FLAG_IND_UNCHG 0x0004
+#define NDIS_RSS_FLAG_KEY_UNCHG 0x0008
+#define NDIS_RSS_FLAG_DISABLE 0x0010
+
+/* non-standard convenient struct */
+struct ndis_rssprm_toeplitz {
+ struct ndis_rss_params rss_params;
+ /* Indirect table */
+ uint32_t rss_ind[NDIS_HASH_INDCNT];
+ /* Toeplitz hash key */
+ uint8_t rss_key[NDIS_HASH_KEYSIZE_TOEPLITZ];
+};
+
+#define NDIS_RSSPRM_TOEPLITZ_SIZE(nind) \
+ offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind])
+
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ uint32_t ndis_ip4_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip4_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ uint32_t ndis_ip6_txenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ uint32_t ndis_ip6_rxenc; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+};
+
+struct ndis_lsov1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_maxsize;
+ uint32_t ndis_minsegs;
+ uint32_t ndis_opts;
+};
+
+struct ndis_ipsecv1_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ah_esp;
+ uint32_t ndis_xport_tun;
+ uint32_t ndis_ip4_opts;
+ uint32_t ndis_flags;
+ uint32_t ndis_ip4_ah;
+ uint32_t ndis_ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ uint32_t ndis_ip4_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip4_maxsz;
+ uint32_t ndis_ip4_minsg;
+ uint32_t ndis_ip6_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint32_t ndis_ip6_maxsz;
+ uint32_t ndis_ip6_minsg;
+ uint32_t ndis_ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+};
+
+struct ndis_ipsecv2_offload {
+ uint32_t ndis_encap; /*NDIS_OFFLOAD_ENCAP_*/
+ uint16_t ndis_ip6;
+ uint16_t ndis_ip4opt;
+ uint16_t ndis_ip6ext;
+ uint16_t ndis_ah;
+ uint16_t ndis_esp;
+ uint16_t ndis_ah_esp;
+ uint16_t ndis_xport;
+ uint16_t ndis_tun;
+ uint16_t ndis_xport_tun;
+ uint16_t ndis_lso;
+ uint16_t ndis_extseq;
+ uint32_t ndis_udp_esp;
+ uint32_t ndis_auth;
+ uint32_t ndis_crypto;
+ uint32_t ndis_sa_caps;
+};
+
+struct ndis_rsc_offload {
+ uint16_t ndis_ip4;
+ uint16_t ndis_ip6;
+};
+
+struct ndis_encap_offload {
+ uint32_t ndis_flags;
+ uint32_t ndis_maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_hdr ndis_hdr;
+ struct ndis_csum_offload ndis_csum;
+ struct ndis_lsov1_offload ndis_lsov1;
+ struct ndis_ipsecv1_offload ndis_ipsecv1;
+ struct ndis_lsov2_offload ndis_lsov2;
+ uint32_t ndis_flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ndis_ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload ndis_rsc;
+ struct ndis_encap_offload ndis_encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ndis_ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, ndis_rsc)
+
+#define NDIS_OFFLOAD_REV_1 1 /* NDIS 6.0 */
+#define NDIS_OFFLOAD_REV_2 2 /* NDIS 6.1 */
+#define NDIS_OFFLOAD_REV_3 3 /* NDIS 6.30 */
+
+/*
+ * Per-packet-info
+ */
+
+/* VLAN */
+#define NDIS_VLAN_INFO_SIZE sizeof(uint32_t)
+#define NDIS_VLAN_INFO_PRI_MASK 0x0007
+#define NDIS_VLAN_INFO_CFI_MASK 0x0008
+#define NDIS_VLAN_INFO_ID_MASK 0xfff0
+#define NDIS_VLAN_INFO_MAKE(id, pri, cfi) \
+ (((pri) & NDIS_VLAN_INFO_PRI_MASK) | \
+ (((cfi) & 0x1) << 3) | (((id) & 0xfff) << 4))
+#define NDIS_VLAN_INFO_ID(inf) (((inf) & NDIS_VLAN_INFO_ID_MASK) >> 4)
+#define NDIS_VLAN_INFO_CFI(inf) (((inf) & NDIS_VLAN_INFO_CFI_MASK) >> 3)
+#define NDIS_VLAN_INFO_PRI(inf) ((inf) & NDIS_VLAN_INFO_PRI_MASK)
+
+/* Reception checksum */
+#define NDIS_RXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_RXCSUM_INFO_TCPCS_FAILED 0x0001
+#define NDIS_RXCSUM_INFO_UDPCS_FAILED 0x0002
+#define NDIS_RXCSUM_INFO_IPCS_FAILED 0x0004
+#define NDIS_RXCSUM_INFO_TCPCS_OK 0x0008
+#define NDIS_RXCSUM_INFO_UDPCS_OK 0x0010
+#define NDIS_RXCSUM_INFO_IPCS_OK 0x0020
+#define NDIS_RXCSUM_INFO_LOOPBACK 0x0040
+#define NDIS_RXCSUM_INFO_TCPCS_INVAL 0x0080
+#define NDIS_RXCSUM_INFO_IPCS_INVAL 0x0100
+
+/* LSOv2 */
+#define NDIS_LSO2_INFO_SIZE sizeof(uint32_t)
+#define NDIS_LSO2_INFO_MSS_MASK 0x000fffff
+#define NDIS_LSO2_INFO_THOFF_MASK 0x3ff00000
+#define NDIS_LSO2_INFO_ISLSO2 0x40000000
+#define NDIS_LSO2_INFO_ISIPV6 0x80000000
+
+#define NDIS_LSO2_INFO_MAKE(thoff, mss) \
+ ((((uint32_t)(mss)) & NDIS_LSO2_INFO_MSS_MASK) | \
+ ((((uint32_t)(thoff)) & 0x3ff) << 20) | \
+ NDIS_LSO2_INFO_ISLSO2)
+
+#define NDIS_LSO2_INFO_MAKEIPV4(thoff, mss) \
+ NDIS_LSO2_INFO_MAKE((thoff), (mss))
+
+#define NDIS_LSO2_INFO_MAKEIPV6(thoff, mss) \
+ (NDIS_LSO2_INFO_MAKE((thoff), (mss)) | NDIS_LSO2_INFO_ISIPV6)
+
+/* Transmission checksum */
+#define NDIS_TXCSUM_INFO_SIZE sizeof(uint32_t)
+#define NDIS_TXCSUM_INFO_IPV4 0x00000001
+#define NDIS_TXCSUM_INFO_IPV6 0x00000002
+#define NDIS_TXCSUM_INFO_TCPCS 0x00000004
+#define NDIS_TXCSUM_INFO_UDPCS 0x00000008
+#define NDIS_TXCSUM_INFO_IPCS 0x00000010
+#define NDIS_TXCSUM_INFO_THOFF 0x03ff0000
+
+#define NDIS_TXCSUM_INFO_MKL4CS(thoff, flag) \
+ ((((uint32_t)(thoff)) << 16) | (flag))
+
+#define NDIS_TXCSUM_INFO_MKTCPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_TCPCS)
+
+#define NDIS_TXCSUM_INFO_MKUDPCS(thoff) \
+ NDIS_TXCSUM_INFO_MKL4CS((thoff), NDIS_TXCSUM_INFO_UDPCS)
+
+#endif /* !_NET_NDIS_H_ */
diff --git a/drivers/net/netvsc/rndis.h b/drivers/net/netvsc/rndis.h
new file mode 100644
index 00000000..eac9a99f
--- /dev/null
+++ b/drivers/net/netvsc/rndis.h
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2018 Microsoft Corp.
+ * Copyright (c) 2010 Jonathan Armani <armani@openbsd.org>
+ * Copyright (c) 2010 Fabien Romano <fabien@openbsd.org>
+ * Copyright (c) 2010 Michael Knudsen <mk@openbsd.org>
+ * All rights reserved.
+ */
+
+#ifndef _NET_RNDIS_H_
+#define _NET_RNDIS_H_
+
+/* Canonical major/minor version as of 22th Aug. 2016. */
+#define RNDIS_VERSION_MAJOR 0x00000001
+#define RNDIS_VERSION_MINOR 0x00000000
+
+#define RNDIS_STATUS_SUCCESS 0x00000000
+#define RNDIS_STATUS_PENDING 0x00000103
+
+#define RNDIS_STATUS_ONLINE 0x40010003
+#define RNDIS_STATUS_RESET_START 0x40010004
+#define RNDIS_STATUS_RESET_END 0x40010005
+#define RNDIS_STATUS_RING_STATUS 0x40010006
+#define RNDIS_STATUS_CLOSED 0x40010007
+#define RNDIS_STATUS_WAN_LINE_UP 0x40010008
+#define RNDIS_STATUS_WAN_LINE_DOWN 0x40010009
+#define RNDIS_STATUS_WAN_FRAGMENT 0x4001000A
+#define RNDIS_STATUS_MEDIA_CONNECT 0x4001000B
+#define RNDIS_STATUS_MEDIA_DISCONNECT 0x4001000C
+#define RNDIS_STATUS_HARDWARE_LINE_UP 0x4001000D
+#define RNDIS_STATUS_HARDWARE_LINE_DOWN 0x4001000E
+#define RNDIS_STATUS_INTERFACE_UP 0x4001000F
+#define RNDIS_STATUS_INTERFACE_DOWN 0x40010010
+#define RNDIS_STATUS_MEDIA_BUSY 0x40010011
+#define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012
+#define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION
+#define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013
+#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018
+#define RNDIS_STATUS_TASK_OFFLOAD_CURRENT_CONFIG 0x40020006
+
+#define RNDIS_STATUS_FAILURE 0xC0000001
+#define RNDIS_STATUS_RESOURCES 0xC000009A
+#define RNDIS_STATUS_NOT_SUPPORTED 0xC00000BB
+#define RNDIS_STATUS_CLOSING 0xC0010002
+#define RNDIS_STATUS_BAD_VERSION 0xC0010004
+#define RNDIS_STATUS_BAD_CHARACTERISTICS 0xC0010005
+#define RNDIS_STATUS_ADAPTER_NOT_FOUND 0xC0010006
+#define RNDIS_STATUS_OPEN_FAILED 0xC0010007
+#define RNDIS_STATUS_DEVICE_FAILED 0xC0010008
+#define RNDIS_STATUS_MULTICAST_FULL 0xC0010009
+#define RNDIS_STATUS_MULTICAST_EXISTS 0xC001000A
+#define RNDIS_STATUS_MULTICAST_NOT_FOUND 0xC001000B
+#define RNDIS_STATUS_REQUEST_ABORTED 0xC001000C
+#define RNDIS_STATUS_RESET_IN_PROGRESS 0xC001000D
+#define RNDIS_STATUS_CLOSING_INDICATING 0xC001000E
+#define RNDIS_STATUS_INVALID_PACKET 0xC001000F
+#define RNDIS_STATUS_OPEN_LIST_FULL 0xC0010010
+#define RNDIS_STATUS_ADAPTER_NOT_READY 0xC0010011
+#define RNDIS_STATUS_ADAPTER_NOT_OPEN 0xC0010012
+#define RNDIS_STATUS_NOT_INDICATING 0xC0010013
+#define RNDIS_STATUS_INVALID_LENGTH 0xC0010014
+#define RNDIS_STATUS_INVALID_DATA 0xC0010015
+#define RNDIS_STATUS_BUFFER_TOO_SHORT 0xC0010016
+#define RNDIS_STATUS_INVALID_OID 0xC0010017
+#define RNDIS_STATUS_ADAPTER_REMOVED 0xC0010018
+#define RNDIS_STATUS_UNSUPPORTED_MEDIA 0xC0010019
+#define RNDIS_STATUS_GROUP_ADDRESS_IN_US 0xC001001A
+#define RNDIS_STATUS_FILE_NOT_FOUND 0xC001001B
+#define RNDIS_STATUS_ERROR_READING_FILE 0xC001001C
+#define RNDIS_STATUS_ALREADY_MAPPED 0xC001001D
+#define RNDIS_STATUS_RESOURCE_CONFLICT 0xC001001E
+#define RNDIS_STATUS_NO_CABLE 0xC001001F
+
+#define OID_GEN_SUPPORTED_LIST 0x00010101
+#define OID_GEN_HARDWARE_STATUS 0x00010102
+#define OID_GEN_MEDIA_SUPPORTED 0x00010103
+#define OID_GEN_MEDIA_IN_USE 0x00010104
+#define OID_GEN_MAXIMUM_LOOKAHEAD 0x00010105
+#define OID_GEN_MAXIMUM_FRAME_SIZE 0x00010106
+#define OID_GEN_LINK_SPEED 0x00010107
+#define OID_GEN_TRANSMIT_BUFFER_SPACE 0x00010108
+#define OID_GEN_RECEIVE_BUFFER_SPACE 0x00010109
+#define OID_GEN_TRANSMIT_BLOCK_SIZE 0x0001010A
+#define OID_GEN_RECEIVE_BLOCK_SIZE 0x0001010B
+#define OID_GEN_VENDOR_ID 0x0001010C
+#define OID_GEN_VENDOR_DESCRIPTION 0x0001010D
+#define OID_GEN_CURRENT_PACKET_FILTER 0x0001010E
+#define OID_GEN_CURRENT_LOOKAHEAD 0x0001010F
+#define OID_GEN_DRIVER_VERSION 0x00010110
+#define OID_GEN_MAXIMUM_TOTAL_SIZE 0x00010111
+#define OID_GEN_PROTOCOL_OPTIONS 0x00010112
+#define OID_GEN_MAC_OPTIONS 0x00010113
+#define OID_GEN_MEDIA_CONNECT_STATUS 0x00010114
+#define OID_GEN_MAXIMUM_SEND_PACKETS 0x00010115
+#define OID_GEN_VENDOR_DRIVER_VERSION 0x00010116
+#define OID_GEN_SUPPORTED_GUIDS 0x00010117
+#define OID_GEN_NETWORK_LAYER_ADDRESSES 0x00010118
+#define OID_GEN_TRANSPORT_HEADER_OFFSET 0x00010119
+#define OID_GEN_RECEIVE_SCALE_CAPABILITIES 0x00010203
+#define OID_GEN_RECEIVE_SCALE_PARAMETERS 0x00010204
+#define OID_GEN_MACHINE_NAME 0x0001021A
+#define OID_GEN_RNDIS_CONFIG_PARAMETER 0x0001021B
+#define OID_GEN_VLAN_ID 0x0001021C
+
+#define OID_802_3_PERMANENT_ADDRESS 0x01010101
+#define OID_802_3_CURRENT_ADDRESS 0x01010102
+#define OID_802_3_MULTICAST_LIST 0x01010103
+#define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104
+#define OID_802_3_MAC_OPTIONS 0x01010105
+#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
+#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
+#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
+#define OID_802_3_XMIT_DEFERRED 0x01020201
+#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
+#define OID_802_3_RCV_OVERRUN 0x01020203
+#define OID_802_3_XMIT_UNDERRUN 0x01020204
+#define OID_802_3_XMIT_HEARTBEAT_FAILURE 0x01020205
+#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
+#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
+
+#define OID_TCP_OFFLOAD_PARAMETERS 0xFC01020C
+#define OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020D
+
+#define RNDIS_MEDIUM_802_3 0x00000000
+
+/* Device flags */
+#define RNDIS_DF_CONNECTIONLESS 0x00000001
+#define RNDIS_DF_CONNECTION_ORIENTED 0x00000002
+
+/*
+ * Common RNDIS message header.
+ */
+struct rndis_msghdr {
+ uint32_t type;
+ uint32_t len;
+};
+
+/*
+ * RNDIS data message
+ */
+#define RNDIS_PACKET_MSG 0x00000001
+
+struct rndis_packet_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t dataoffset;
+ uint32_t datalen;
+ uint32_t oobdataoffset;
+ uint32_t oobdatalen;
+ uint32_t oobdataelements;
+ uint32_t pktinfooffset;
+ uint32_t pktinfolen;
+ uint32_t vchandle;
+ uint32_t reserved;
+};
+
+/*
+ * Minimum value for dataoffset, oobdataoffset, and
+ * pktinfooffset.
+ */
+#define RNDIS_PACKET_MSG_OFFSET_MIN \
+ (sizeof(struct rndis_packet_msg) - \
+ offsetof(struct rndis_packet_msg, dataoffset))
+
+/* Offset from the beginning of rndis_packet_msg. */
+#define RNDIS_PACKET_MSG_OFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_packet_msg, dataoffset))
+
+#define RNDIS_PACKET_MSG_OFFSET_ALIGN 4
+#define RNDIS_PACKET_MSG_OFFSET_ALIGNMASK \
+ (RNDIS_PACKET_MSG_OFFSET_ALIGN - 1)
+
+/* Per-packet-info for RNDIS data message */
+struct rndis_pktinfo {
+ uint32_t size;
+ uint32_t type; /* NDIS_PKTINFO_TYPE_ */
+ uint32_t offset;
+ uint8_t data[];
+};
+
+#define RNDIS_PKTINFO_OFFSET \
+ offsetof(struct rndis_pktinfo, data[0])
+#define RNDIS_PKTINFO_SIZE_ALIGN 4
+#define RNDIS_PKTINFO_SIZE_ALIGNMASK (RNDIS_PKTINFO_SIZE_ALIGN - 1)
+
+#define NDIS_PKTINFO_TYPE_CSUM 0
+#define NDIS_PKTINFO_TYPE_IPSEC 1
+#define NDIS_PKTINFO_TYPE_LSO 2
+#define NDIS_PKTINFO_TYPE_CLASSIFY 3
+/* reserved 4 */
+#define NDIS_PKTINFO_TYPE_SGLIST 5
+#define NDIS_PKTINFO_TYPE_VLAN 6
+#define NDIS_PKTINFO_TYPE_ORIG 7
+#define NDIS_PKTINFO_TYPE_PKT_CANCELID 8
+#define NDIS_PKTINFO_TYPE_ORIG_NBLIST 9
+#define NDIS_PKTINFO_TYPE_CACHE_NBLIST 10
+#define NDIS_PKTINFO_TYPE_PKT_PAD 11
+
+/* RNDIS extension */
+
+/* Per-packet hash info */
+#define NDIS_HASH_INFO_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHINF NDIS_PKTINFO_TYPE_ORIG_NBLIST
+/* NDIS_HASH_ */
+
+/* Per-packet hash value */
+#define NDIS_HASH_VALUE_SIZE sizeof(uint32_t)
+#define NDIS_PKTINFO_TYPE_HASHVAL NDIS_PKTINFO_TYPE_PKT_CANCELID
+
+/* Per-packet-info size */
+#define RNDIS_PKTINFO_SIZE(dlen) offsetof(struct rndis_pktinfo, data[dlen])
+
+/*
+ * RNDIS control messages
+ */
+
+/*
+ * Common header for RNDIS completion messages.
+ *
+ * NOTE: It does not apply to RNDIS_RESET_CMPLT.
+ */
+struct rndis_comp_hdr {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Initialize the device. */
+#define RNDIS_INITIALIZE_MSG 0x00000002
+#define RNDIS_INITIALIZE_CMPLT 0x80000002
+
+struct rndis_init_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t max_xfersz;
+};
+
+struct rndis_init_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t ver_major;
+ uint32_t ver_minor;
+ uint32_t devflags;
+ uint32_t medium;
+ uint32_t pktmaxcnt;
+ uint32_t pktmaxsz;
+ uint32_t align;
+ uint32_t aflistoffset;
+ uint32_t aflistsz;
+};
+
+#define RNDIS_INIT_COMP_SIZE_MIN \
+ offsetof(struct rndis_init_comp, aflistsz)
+
+/* Halt the device. No response sent. */
+#define RNDIS_HALT_MSG 0x00000003
+
+struct rndis_halt_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+/* Send a query object. */
+#define RNDIS_QUERY_MSG 0x00000004
+#define RNDIS_QUERY_CMPLT 0x80000004
+
+struct rndis_query_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_QUERY_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_query_req) - \
+ offsetof(struct rndis_query_req, rid))
+
+struct rndis_query_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+};
+
+/* infobuf offset from the beginning of rndis_query_comp. */
+#define RNDIS_QUERY_COMP_INFOBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_query_comp, rid))
+
+/* Send a set object request. */
+#define RNDIS_SET_MSG 0x00000005
+#define RNDIS_SET_CMPLT 0x80000005
+
+struct rndis_set_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t oid;
+ uint32_t infobuflen;
+ uint32_t infobufoffset;
+ uint32_t devicevchdl;
+};
+
+#define RNDIS_SET_REQ_INFOBUFOFFSET \
+ (sizeof(struct rndis_set_req) - \
+ offsetof(struct rndis_set_req, rid))
+
+struct rndis_set_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/*
+ * Parameter used by OID_GEN_RNDIS_CONFIG_PARAMETER.
+ */
+#define RNDIS_SET_PARAM_NUMERIC 0x00000000
+#define RNDIS_SET_PARAM_STRING 0x00000002
+
+struct rndis_set_parameter {
+ uint32_t nameoffset;
+ uint32_t namelen;
+ uint32_t type;
+ uint32_t valueoffset;
+ uint32_t valuelen;
+};
+
+/* Perform a soft reset on the device. */
+#define RNDIS_RESET_MSG 0x00000006
+#define RNDIS_RESET_CMPLT 0x80000006
+
+struct rndis_reset_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_reset_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t adrreset;
+};
+
+/* 802.3 link-state or undefined message error. Sent by device. */
+#define RNDIS_INDICATE_STATUS_MSG 0x00000007
+
+struct rndis_status_msg {
+ uint32_t type;
+ uint32_t len;
+ uint32_t status;
+ uint32_t stbuflen;
+ uint32_t stbufoffset;
+ /* rndis_diag_info */
+};
+
+/* stbuf offset from the beginning of rndis_status_msg. */
+#define RNDIS_STBUFOFFSET_ABS(ofs) \
+ ((ofs) + offsetof(struct rndis_status_msg, status))
+
+/*
+ * Immediately after rndis_status_msg.stbufoffset, if a control
+ * message is malformatted, or a packet message contains inappropriate
+ * content.
+ */
+struct rndis_diag_info {
+ uint32_t diagstatus;
+ uint32_t erroffset;
+};
+
+/* Keepalive message. May be sent by device. */
+#define RNDIS_KEEPALIVE_MSG 0x00000008
+#define RNDIS_KEEPALIVE_CMPLT 0x80000008
+
+struct rndis_keepalive_req {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+};
+
+struct rndis_keepalive_comp {
+ uint32_t type;
+ uint32_t len;
+ uint32_t rid;
+ uint32_t status;
+};
+
+/* Packet filter bits used by OID_GEN_CURRENT_PACKET_FILTER */
+#define NDIS_PACKET_TYPE_NONE 0x00000000
+#define NDIS_PACKET_TYPE_DIRECTED 0x00000001
+#define NDIS_PACKET_TYPE_MULTICAST 0x00000002
+#define NDIS_PACKET_TYPE_ALL_MULTICAST 0x00000004
+#define NDIS_PACKET_TYPE_BROADCAST 0x00000008
+#define NDIS_PACKET_TYPE_SOURCE_ROUTING 0x00000010
+#define NDIS_PACKET_TYPE_PROMISCUOUS 0x00000020
+#define NDIS_PACKET_TYPE_SMT 0x00000040
+#define NDIS_PACKET_TYPE_ALL_LOCAL 0x00000080
+#define NDIS_PACKET_TYPE_GROUP 0x00001000
+#define NDIS_PACKET_TYPE_ALL_FUNCTIONAL 0x00002000
+#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00004000
+#define NDIS_PACKET_TYPE_MAC_FRAME 0x00008000
+
+#endif /* !_NET_RNDIS_H_ */
diff --git a/drivers/net/netvsc/rte_pmd_netvsc_version.map b/drivers/net/netvsc/rte_pmd_netvsc_version.map
new file mode 100644
index 00000000..d534019a
--- /dev/null
+++ b/drivers/net/netvsc/rte_pmd_netvsc_version.map
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index aa3b68a4..ab4e0a7d 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -20,11 +20,24 @@ EXPORT_MAP := rte_pmd_nfp_version.map
LIBABIVER := 1
+VPATH += $(SRCDIR)/nfpcore
+
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cppcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_cpp_pcie_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mutex.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_resource.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_crc.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_mip.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nffw.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_hwinfo.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_rtsym.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_cmds.c
+SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nsp_eth.c
+
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
-SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nfpu.c
-SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_nspu.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
new file mode 100644
index 00000000..3ba37e27
--- /dev/null
+++ b/drivers/net/nfp/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('nfpcore/nfp_cpp_pcie_ops.c',
+ 'nfpcore/nfp_nsp.c',
+ 'nfpcore/nfp_cppcore.c',
+ 'nfpcore/nfp_resource.c',
+ 'nfpcore/nfp_mip.c',
+ 'nfpcore/nfp_nffw.c',
+ 'nfpcore/nfp_rtsym.c',
+ 'nfpcore/nfp_nsp_cmds.c',
+ 'nfpcore/nfp_crc.c',
+ 'nfpcore/nfp_mutex.c',
+ 'nfpcore/nfp_nsp_eth.c',
+ 'nfpcore/nfp_hwinfo.c',
+ 'nfp_net.c')
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index e5bfde62..6e5e305f 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
* All rights reserved.
*
* Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
@@ -55,7 +55,13 @@
#include <rte_alarm.h>
#include <rte_spinlock.h>
-#include "nfp_nfpu.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_hwinfo.h"
+#include "nfpcore/nfp_mip.h"
+#include "nfpcore/nfp_rtsym.h"
+#include "nfpcore/nfp_nsp.h"
+
#include "nfp_net_pmd.h"
#include "nfp_net_logs.h"
#include "nfp_net_ctrl.h"
@@ -103,13 +109,11 @@ static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
uint16_t reta_size);
static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int nfp_set_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
-/*
- * The offset of the queue controller queues in the PCIe Target. These
- * happen to be at the same offset on the NFP6000 and the NFP3200 so
- * we use a single macro here.
- */
-#define NFP_PCIE_QUEUE(_q) (0x800 * ((_q) & 0xff))
+/* The offset of the queue controller queues in the PCIe Target */
+#define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
/* Maximum value which can be added to a queue with one transaction */
#define NFP_QCP_MAX_ADD 0x7f
@@ -213,57 +217,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/*
- * Atomically reads link status information from global structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &dev->data->dev_link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/*
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
{
@@ -310,7 +263,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
for (i = 0; i < txq->tx_count; i++) {
if (txq->txbufs[i].mbuf) {
- rte_pktmbuf_free(txq->txbufs[i].mbuf);
+ rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
@@ -343,7 +296,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
uint32_t new;
struct timespec wait;
- PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
+ PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
hw->qcp_cfg);
if (hw->qcp_cfg == NULL)
@@ -354,7 +307,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
wait.tv_sec = 0;
wait.tv_nsec = 1000000;
- PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
+ PMD_DRV_LOG(DEBUG, "Polling for update ack...");
/* Poll update field, waiting for NFP to ack the config */
for (cnt = 0; ; cnt++) {
@@ -372,7 +325,7 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
}
nanosleep(&wait, 0); /* waiting for a 1ms */
}
- PMD_DRV_LOG(DEBUG, "Ack DONE\n");
+ PMD_DRV_LOG(DEBUG, "Ack DONE");
return 0;
}
@@ -390,7 +343,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
{
uint32_t err;
- PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
+ PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
ctrl, update);
rte_spinlock_lock(&hw->reconfig_lock);
@@ -427,8 +380,6 @@ nfp_net_configure(struct rte_eth_dev *dev)
struct rte_eth_conf *dev_conf;
struct rte_eth_rxmode *rxmode;
struct rte_eth_txmode *txmode;
- uint32_t new_ctrl = 0;
- uint32_t update = 0;
struct nfp_net_hw *hw;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -454,96 +405,17 @@ nfp_net_configure(struct rte_eth_dev *dev)
}
/* Checking RX mode */
- if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- update = NFP_NET_CFG_UPDATE_RSS;
- new_ctrl = NFP_NET_CFG_CTRL_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header");
- return -EINVAL;
- }
-
- if (rxmode->hw_ip_checksum) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
- } else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported");
- return -EINVAL;
- }
-
- if (rxmode->hw_vlan_strip) {
- if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
- new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
- } else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported");
- return -EINVAL;
- }
- }
-
- if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->jumbo_frame)
- hw->mtu = rxmode->max_rx_pkt_len;
-
- if (!rxmode->hw_strip_crc)
- PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
-
- if (rxmode->enable_scatter) {
- PMD_INIT_LOG(INFO, "Scatter not supported");
+ if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
+ !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
+ PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
}
- /* If next capabilities are supported, configure them by default */
-
- /* VLAN insertion */
- if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
- new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
-
- /* L2 broadcast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2BC;
-
- /* L2 multicast */
- if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
- new_ctrl |= NFP_NET_CFG_CTRL_L2MC;
-
- /* TX checksum offload */
- if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
- new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
-
- /* LSO offload */
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- new_ctrl |= NFP_NET_CFG_CTRL_LSO;
-
- /* RX gather */
- if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
- new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
-
- if (!new_ctrl)
- return 0;
-
- update |= NFP_NET_CFG_UPDATE_GEN;
-
- nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
- return -EIO;
-
- hw->ctrl = new_ctrl;
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads))
+ PMD_INIT_LOG(INFO, "HW does strip CRC. No configurable!");
return 0;
}
@@ -625,47 +497,29 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
#define ETH_ADDR_LEN 6
static void
-nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
{
int i;
for (i = 0; i < ETH_ADDR_LEN; i++)
- dst[ETH_ADDR_LEN - i - 1] = src[i];
+ dst[i] = src[i];
}
static int
nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
{
- union eth_table_entry *entry;
- int idx, i;
-
- idx = port;
- entry = hw->eth_table;
-
- /* Reading NFP ethernet table obtained before */
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
- if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
- /* port not in use */
- entry++;
- continue;
- }
- if (idx == 0)
- break;
- idx--;
- entry++;
- }
-
- if (i == NSP_ETH_MAX_COUNT)
- return -EINVAL;
+ struct nfp_eth_table *nfp_eth_table;
+ nfp_eth_table = nfp_eth_read_ports(hw->cpp);
/*
* hw points to port0 private data. We need hw now pointing to
* right port.
*/
hw += port;
- nfp_eth_copy_mac_reverse((uint8_t *)&hw->mac_addr,
- (uint8_t *)&entry->mac_addr);
+ nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
+ (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
+ free(nfp_eth_table);
return 0;
}
@@ -675,7 +529,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw)
uint32_t tmp;
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
- memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+ memcpy(&hw->mac_addr[0], &tmp, 4);
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
memcpy(&hw->mac_addr[4], &tmp, 2);
@@ -695,6 +549,37 @@ nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
}
+int
+nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct nfp_net_hw *hw;
+ uint32_t update, ctrl;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
+ PMD_INIT_LOG(INFO, "MAC address unable to change when"
+ " port enabled");
+ return -EBUSY;
+ }
+
+ if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
+ !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ return -EBUSY;
+
+ /* Writing new MAC to the specific port BAR address */
+ nfp_net_write_mac(hw, (uint8_t *)mac_addr);
+
+ /* Signal the NIC about the change */
+ update = NFP_NET_CFG_UPDATE_MACADDR;
+ ctrl = hw->ctrl | NFP_NET_CFG_CTRL_LIVE_ADDR;
+ if (nfp_net_reconfig(hw, ctrl, update) < 0) {
+ PMD_INIT_LOG(INFO, "MAC address update failed");
+ return -EIO;
+ }
+ return 0;
+}
+
static int
nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
struct rte_intr_handle *intr_handle)
@@ -729,7 +614,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
*/
nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
intr_handle->intr_vec[i] = i + 1;
- PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d\n", i,
+ PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
intr_handle->intr_vec[i]);
}
}
@@ -739,15 +624,75 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
return 0;
}
+static uint32_t
+nfp_check_offloads(struct rte_eth_dev *dev)
+{
+ struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
+ struct rte_eth_txmode *txmode;
+ uint32_t ctrl = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ dev_conf = &dev->data->dev_conf;
+ rxmode = &dev_conf->rxmode;
+ txmode = &dev_conf->txmode;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
+ ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
+ ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ hw->mtu = rxmode->max_rx_pkt_len;
+
+ if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
+ ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
+
+ /* L2 broadcast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
+ ctrl |= NFP_NET_CFG_CTRL_L2BC;
+
+ /* L2 multicast */
+ if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
+ ctrl |= NFP_NET_CFG_CTRL_L2MC;
+
+ /* TX checksum offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
+ ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
+
+ /* LSO offload */
+ if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ ctrl |= NFP_NET_CFG_CTRL_LSO;
+ else
+ ctrl |= NFP_NET_CFG_CTRL_LSO2;
+ }
+
+ /* RX gather */
+ if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ ctrl |= NFP_NET_CFG_CTRL_GATHER;
+
+ return ctrl;
+}
+
static int
nfp_net_start(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct rte_eth_conf *dev_conf;
- struct rte_eth_rxmode *rxmode;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ struct rte_eth_conf *dev_conf;
+ struct rte_eth_rxmode *rxmode;
uint32_t intr_vector;
int ret;
@@ -758,9 +703,6 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
- /* Writing configuration parameters in the device */
- nfp_net_params_setup(hw);
-
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
@@ -795,21 +737,22 @@ nfp_net_start(struct rte_eth_dev *dev)
rte_intr_enable(intr_handle);
+ new_ctrl = nfp_check_offloads(dev);
+
+ /* Writing configuration parameters in the device */
+ nfp_net_params_setup(hw);
+
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
- /* Checking RX mode */
if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
- if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
- if (!nfp_net_rss_config_default(dev))
- update |= NFP_NET_CFG_UPDATE_RSS;
- } else {
- PMD_INIT_LOG(INFO, "RSS not supported");
- return -EINVAL;
- }
+ nfp_net_rss_config_default(dev);
+ update |= NFP_NET_CFG_UPDATE_RSS;
+ new_ctrl |= NFP_NET_CFG_CTRL_RSS;
}
+
/* Enable device */
- new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
+ new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
@@ -831,7 +774,7 @@ nfp_net_start(struct rte_eth_dev *dev)
if (hw->is_pf)
/* Configure the physical port up */
- nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 1);
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
hw->ctrl = new_ctrl;
@@ -882,7 +825,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
if (hw->is_pf)
/* Configure the physical port down */
- nfp_nsp_eth_config(hw->nspu_desc, hw->pf_port_idx, 0);
+ nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
}
/* Reset and stop device. The device can not be restarted. */
@@ -936,7 +879,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
- PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -946,7 +889,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
}
if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
- PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
+ PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
return;
}
@@ -972,7 +915,7 @@ nfp_net_promisc_disable(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
- PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
+ PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
return;
}
@@ -999,8 +942,9 @@ static int
nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
struct nfp_net_hw *hw;
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint32_t nn_link_status;
+ int ret;
static const uint32_t ls_to_ethtool[] = {
[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
@@ -1013,13 +957,10 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
[NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
};
- PMD_DRV_LOG(DEBUG, "Link update\n");
+ PMD_DRV_LOG(DEBUG, "Link update");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- memset(&old, 0, sizeof(old));
- nfp_net_dev_atomic_read_link_status(dev, &old);
-
nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
memset(&link, 0, sizeof(struct rte_eth_link));
@@ -1037,16 +978,14 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
else
link.link_speed = ls_to_ethtool[nn_link_status];
- if (old.link_status != link.link_status) {
- nfp_net_dev_atomic_write_link_status(dev, &link);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == 0) {
if (link.link_status)
- PMD_DRV_LOG(INFO, "NIC Link is Up\n");
+ PMD_DRV_LOG(INFO, "NIC Link is Up");
else
- PMD_DRV_LOG(INFO, "NIC Link is Down\n");
- return 0;
+ PMD_DRV_LOG(INFO, "NIC Link is Down");
}
-
- return -1;
+ return ret;
}
static int
@@ -1214,7 +1153,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1230,6 +1168,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_KEEP_CRC;
+
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
@@ -1238,6 +1179,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
+
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = DEFAULT_RX_PTHRESH,
@@ -1256,8 +1203,6 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
},
.tx_free_thresh = DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
@@ -1268,12 +1213,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
- ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
- ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
-
- if (hw->cap & NFP_NET_CFG_CTRL_LSO)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
}
static const uint32_t *
@@ -1376,18 +1318,17 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_eth_link link;
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
if (link.link_status)
- RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
- dev->data->port_id, link.link_speed,
- link.link_duplex == ETH_LINK_FULL_DUPLEX
- ? "full-duplex" : "half-duplex");
+ PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+ dev->data->port_id, link.link_speed,
+ link.link_duplex == ETH_LINK_FULL_DUPLEX
+ ? "full-duplex" : "half-duplex");
else
- RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
- dev->data->port_id);
+ PMD_DRV_LOG(INFO, " Port %d: Link Down",
+ dev->data->port_id);
- RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
+ PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
}
@@ -1428,11 +1369,9 @@ nfp_net_dev_interrupt_handler(void *param)
struct rte_eth_link link;
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
+ PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
- /* get the link status */
- memset(&link, 0, sizeof(link));
- nfp_net_dev_atomic_read_link_status(dev, &link);
+ rte_eth_linkstatus_get(dev, &link);
nfp_net_link_update(dev, 0);
@@ -1449,7 +1388,7 @@ nfp_net_dev_interrupt_handler(void *param)
if (rte_eal_alarm_set(timeout * 1000,
nfp_net_dev_interrupt_delayed_handler,
(void *)dev) < 0) {
- RTE_LOG(ERR, PMD, "Error setting alarm");
+ PMD_INIT_LOG(ERR, "Error setting alarm");
/* Unmasking */
nfp_net_irq_unmask(dev);
}
@@ -1500,9 +1439,9 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
@@ -1534,7 +1473,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
(nb_desc > NFP_NET_MAX_RX_DESC) ||
(nb_desc < NFP_NET_MIN_RX_DESC)) {
- RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
@@ -1572,8 +1511,6 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_count = nb_desc;
rxq->port_id = dev->data->port_id;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
- : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
/*
@@ -1587,7 +1524,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
socket_id);
if (tz == NULL) {
- RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocatig rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
@@ -1605,7 +1542,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
- PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
+ PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
nfp_net_reset_rx_queue(rxq);
@@ -1630,7 +1567,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
uint64_t dma_addr;
unsigned i;
- PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
+ PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
rxq->rx_count);
for (i = 0; i < rxq->rx_count; i++) {
@@ -1638,7 +1575,7 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
if (mbuf == NULL) {
- RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
(unsigned)rxq->qidx);
return -ENOMEM;
}
@@ -1650,14 +1587,14 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxe[i].mbuf = mbuf;
- PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
+ PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
}
/* Make sure all writes are flushed before telling the hardware */
rte_wmb();
/* Not advertising the whole ring as the firmware gets confused if so */
- PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
+ PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
rxq->rx_count - 1);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
@@ -1683,7 +1620,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
(nb_desc > NFP_NET_MAX_TX_DESC) ||
(nb_desc < NFP_NET_MIN_TX_DESC)) {
- RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
+ PMD_DRV_LOG(ERR, "Wrong nb_desc value");
return -EINVAL;
}
@@ -1692,10 +1629,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DEFAULT_TX_FREE_THRESH);
if (tx_free_thresh > (nb_desc)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"tx_free_thresh must be less than the number of TX "
"descriptors. (tx_free_thresh=%u port=%d "
- "queue=%d)\n", (unsigned int)tx_free_thresh,
+ "queue=%d)", (unsigned int)tx_free_thresh,
dev->data->port_id, (int)queue_idx);
return -(EINVAL);
}
@@ -1705,7 +1642,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* calling nfp_net_stop
*/
if (dev->data->tx_queues[queue_idx]) {
- PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
+ PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
queue_idx);
nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
dev->data->tx_queues[queue_idx] = NULL;
@@ -1715,7 +1652,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
RTE_CACHE_LINE_SIZE, socket_id);
if (txq == NULL) {
- RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
return -ENOMEM;
}
@@ -1729,7 +1666,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
socket_id);
if (tz == NULL) {
- RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
+ PMD_DRV_LOG(ERR, "Error allocating tx dma");
nfp_net_tx_queue_release(txq);
return -ENOMEM;
}
@@ -1746,7 +1683,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
/* Saving physical and virtual addresses for the TX ring */
txq->dma = (uint64_t)tz->iova;
@@ -1760,7 +1696,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
nfp_net_tx_queue_release(txq);
return -ENOMEM;
}
- PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
+ PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
txq->txbufs, txq->txds, (unsigned long int)txq->dma);
nfp_net_reset_tx_queue(txq);
@@ -1786,7 +1722,7 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
uint64_t ol_flags;
struct nfp_net_hw *hw = txq->hw;
- if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
goto clean_txd;
ol_flags = mb->ol_flags;
@@ -1794,15 +1730,19 @@ nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
if (!(ol_flags & PKT_TX_TCP_SEG))
goto clean_txd;
- txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
- txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
+ txd->l3_offset = mb->l2_len;
+ txd->l4_offset = mb->l2_len + mb->l3_len;
+ txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+ txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
txd->flags = PCIE_DESC_TX_LSO;
return;
clean_txd:
txd->flags = 0;
+ txd->l3_offset = 0;
txd->l4_offset = 0;
- txd->lso = 0;
+ txd->lso_hdrlen = 0;
+ txd->mss = 0;
}
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
@@ -1888,14 +1828,10 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
return;
- if (NFD_CFG_MAJOR_VERSION_of(hw->ver) <= 3) {
- if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
- return;
-
- hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
- hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
-
- } else if (NFP_DESC_META_LEN(rxd)) {
+ /* this is true for new firmwares */
+ if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
+ (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
+ NFP_DESC_META_LEN(rxd))) {
/*
* new metadata api:
* <---- 32 bit ----->
@@ -1928,7 +1864,11 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
return;
}
} else {
- return;
+ if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
+ return;
+
+ hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
+ hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
}
mbuf->hash.rss = hash;
@@ -2019,16 +1959,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
}
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
/*
* Memory barrier to ensure that we won't do other
* reads before the DD bit.
*/
rte_rmb();
- rxds = &rxq->rxds[rxq->rd_p];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* free descriptor ring as soon as possible
@@ -2051,7 +1991,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb = rxb->mbuf;
rxb->mbuf = new_mb;
- PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
+ PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
rxds->rxd.data_len, rxq->mbuf_size);
/* Size of this segment */
@@ -2089,6 +2029,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
mb->nb_segs = 1;
mb->next = NULL;
+ mb->port = rxq->port_id;
+
/* Checking the RSS flag */
nfp_net_set_hash(rxq, rxds, mb);
@@ -2120,7 +2062,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (nb_hold == 0)
return nb_hold;
- PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
+ PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
nb_hold += rxq->nb_rx_hold;
@@ -2131,7 +2073,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rte_wmb();
if (nb_hold > rxq->rx_free_thresh) {
- PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
+ PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
rxq->port_id, (unsigned int)rxq->qidx,
(unsigned)nb_hold, (unsigned)avail);
nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
@@ -2155,14 +2097,14 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
int todo;
PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
- " status\n", txq->qidx);
+ " status", txq->qidx);
/* Work out how many packets have been sent */
qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
if (qcp_rd_p == txq->rd_p) {
PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
- "packets (%u, %u)\n", txq->qidx,
+ "packets (%u, %u)", txq->qidx,
qcp_rd_p, txq->rd_p);
return 0;
}
@@ -2172,7 +2114,7 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
else
todo = qcp_rd_p + txq->tx_count - txq->rd_p;
- PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
+ PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
qcp_rd_p, txq->rd_p, txq->rd_p);
if (todo == 0)
@@ -2226,7 +2168,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
hw = txq->hw;
txds = &txq->txds[txq->wr_p];
- PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
+ PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
txq->qidx, txq->wr_p, nb_pkts);
if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
@@ -2240,7 +2182,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
i = 0;
issued_descs = 0;
- PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
+ PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
txq->qidx, nb_pkts);
/* Sending packets */
while ((i < nb_pkts) && free_descs) {
@@ -2299,7 +2241,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
dma_size = pkt->data_len;
dma_addr = rte_mbuf_data_iova(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
- "%" PRIx64 "\n", dma_addr);
+ "%" PRIx64 "", dma_addr);
/* Filling descriptors fields */
txds->dma_len = dma_size;
@@ -2314,11 +2256,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->wr_p = 0;
pkt_size -= dma_size;
- if (!pkt_size)
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
else
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+ txds->offset_eop = 0;
pkt = pkt->next;
/* Referencing next free TX descriptor */
@@ -2349,7 +2295,7 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
(mask & ETH_VLAN_EXTEND_OFFLOAD))
- RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+ PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
" ETH_VLAN_EXTEND_OFFLOAD");
/* Enable vlan strip if it is not configured yet */
@@ -2386,9 +2332,9 @@ nfp_net_rss_reta_write(struct rte_eth_dev *dev,
NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
- RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
return -EINVAL;
}
@@ -2467,9 +2413,9 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
return -EINVAL;
if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
- RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
+ PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
+ "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
return -EINVAL;
}
@@ -2555,14 +2501,14 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
/* Checking if RSS is enabled */
if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
if (rss_hf != 0) { /* Enable RSS? */
- RTE_LOG(ERR, PMD, "RSS unsupported\n");
+ PMD_DRV_LOG(ERR, "RSS unsupported");
return -EINVAL;
}
return 0; /* Nothing to do */
}
if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
- RTE_LOG(ERR, PMD, "hash key too long\n");
+ PMD_DRV_LOG(ERR, "hash key too long");
return -EINVAL;
}
@@ -2634,7 +2580,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
uint16_t queue;
int i, j, ret;
- RTE_LOG(INFO, PMD, "setting default RSS conf for %u queues\n",
+ PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
rx_queues);
nfp_reta_conf[0].mask = ~0x0;
@@ -2654,7 +2600,7 @@ nfp_net_rss_config_default(struct rte_eth_dev *dev)
dev_conf = &dev->data->dev_conf;
if (!dev_conf) {
- RTE_LOG(INFO, PMD, "wrong rss conf");
+ PMD_DRV_LOG(INFO, "wrong rss conf");
return -EINVAL;
}
rss_conf = dev_conf->rx_adv_conf.rss_conf;
@@ -2679,6 +2625,7 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.dev_infos_get = nfp_net_infos_get,
.dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
.mtu_set = nfp_net_dev_mtu_set,
+ .mac_addr_set = nfp_set_mac_addr,
.vlan_offload_set = nfp_net_vlan_offload_set,
.reta_update = nfp_net_reta_update,
.reta_query = nfp_net_reta_query,
@@ -2734,10 +2681,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
uint64_t tx_bar_off = 0, rx_bar_off = 0;
uint32_t start_q;
int stride = 4;
-
- nspu_desc_t *nspu_desc = NULL;
- uint64_t bar_offset;
int port = 0;
+ int err;
PMD_INIT_FUNC_TRACE();
@@ -2747,18 +2692,17 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
if (port < 0 || port > 7) {
- RTE_LOG(ERR, PMD, "Port value is wrong\n");
+ PMD_DRV_LOG(ERR, "Port value is wrong");
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "Working with PF port value %d\n", port);
+ PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
/* This points to port 0 private data */
hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
/* This points to the specific port private data */
hw = &hwport0[port];
- hw->pf_port_idx = port;
} else {
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
hwport0 = 0;
@@ -2786,26 +2730,21 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
if (hw->ctrl_bar == NULL) {
- RTE_LOG(ERR, PMD,
- "hw->ctrl_bar is NULL. BAR0 not configured\n");
+ PMD_DRV_LOG(ERR,
+ "hw->ctrl_bar is NULL. BAR0 not configured");
return -ENODEV;
}
if (hw->is_pf && port == 0) {
- nspu_desc = hw->nspu_desc;
-
- if (nfp_nsp_map_ctrl_bar(nspu_desc, &bar_offset) != 0) {
- /*
- * A firmware should be there after PF probe so this
- * should not happen.
- */
- RTE_LOG(ERR, PMD, "PF BAR symbol resolution failed\n");
- return -ENODEV;
+ hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
+ hw->total_ports * 32768,
+ &hw->ctrl_area);
+ if (!hw->ctrl_bar) {
+ printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
+ return -EIO;
}
- /* vNIC PF control BAR is a subset of PF PCI device BAR */
- hw->ctrl_bar += bar_offset;
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
}
if (port > 0) {
@@ -2817,7 +2756,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
(port * NFP_PF_CSR_SLICE_SIZE);
}
- PMD_INIT_LOG(DEBUG, "ctrl bar: %p\n", hw->ctrl_bar);
+ PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
@@ -2828,31 +2767,34 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
case PCI_DEVICE_ID_NFP6000_PF_NIC:
case PCI_DEVICE_ID_NFP6000_VF_NIC:
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
- tx_bar_off = NFP_PCIE_QUEUE(start_q);
+ tx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
- rx_bar_off = NFP_PCIE_QUEUE(start_q);
+ rx_bar_off = start_q * NFP_QCP_QUEUE_ADDR_SZ;
break;
default:
- RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
- return -ENODEV;
+ PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
+ err = -ENODEV;
+ goto dev_err_ctrl_map;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "\n", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "\n", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
if (hw->is_pf && port == 0) {
/* configure access to tx/rx vNIC BARs */
- nfp_nsp_map_queues_bar(nspu_desc, &bar_offset);
- PMD_INIT_LOG(DEBUG, "tx/rx bar_offset: %" PRIx64 "\n",
- bar_offset);
- hwport0->hw_queues = (uint8_t *)pci_dev->mem_resource[0].addr;
-
- /* vNIC PF tx/rx BARs are a subset of PF PCI device */
- hwport0->hw_queues += bar_offset;
+ hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
+ NFP_PCIE_QUEUE(0),
+ NFP_QCP_QUEUE_AREA_SZ,
+ &hw->hwqueues_area);
+
+ if (!hwport0->hw_queues) {
+ printf("nfp_rtsym_map fails for net.qc");
+ err = -EIO;
+ goto dev_err_ctrl_map;
+ }
- /* Lets seize the chance to read eth table from hw */
- if (nfp_nsp_eth_read_table(nspu_desc, &hw->eth_table))
- return -ENODEV;
+ PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
+ hwport0->hw_queues);
}
if (hw->is_pf) {
@@ -2877,14 +2819,20 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
hw->mtu = ETHER_MTU;
+ /* VLAN insertion is incompatible with LSOv2 */
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
+ hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
+
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
else
hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
- PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
- hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s", hw->cap,
+ PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
+ NFD_CFG_MAJOR_VERSION_of(hw->ver),
+ NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
+
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
@@ -2894,8 +2842,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
+ hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
- hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
+ hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
+ hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
hw->ctrl = 0;
@@ -2912,7 +2863,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to space for MAC address");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto dev_err_queues_map;
}
if (hw->is_pf) {
@@ -2923,6 +2875,8 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
}
if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
+ PMD_INIT_LOG(INFO, "Using random mac address for port %d",
+ port);
/* Using random mac addresses for VFs */
eth_random_addr(&hw->mac_addr[0]);
nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
@@ -2951,11 +2905,19 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
nfp_net_stats_reset(eth_dev);
return 0;
+
+dev_err_queues_map:
+ nfp_cpp_area_free(hw->hwqueues_area);
+dev_err_ctrl_map:
+ nfp_cpp_area_free(hw->ctrl_area);
+
+ return err;
}
static int
nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
- nfpu_desc_t *nfpu_desc, void **priv)
+ struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
+ int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
{
struct rte_eth_dev *eth_dev;
struct nfp_net_hw *hw;
@@ -2993,12 +2955,16 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
* Then dev_private is adjusted per port.
*/
hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
- hw->nspu_desc = nfpu_desc->nspu;
- hw->nfpu_desc = nfpu_desc;
+ hw->cpp = cpp;
+ hw->hwinfo = hwinfo;
+ hw->sym_tbl = sym_tbl;
+ hw->pf_port_idx = phys_port;
hw->is_pf = 1;
if (ports > 1)
hw->pf_multiport_enabled = 1;
+ hw->total_ports = ports;
+
eth_dev->device = &dev->device;
rte_eth_copy_pci_info(eth_dev, dev);
@@ -3006,82 +2972,228 @@ nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
if (ret)
rte_eth_dev_release_port(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
rte_free(port_name);
return ret;
}
+#define DEFAULT_FW_PATH "/lib/firmware/netronome"
+
+static int
+nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ int fw_f;
+ char *fw_buf;
+ char fw_name[125];
+ char serial[40];
+ struct stat file_stat;
+ off_t fsize, bytes;
+
+ /* Looking for firmware file in order of priority */
+
+ /* First try to find a firmware image specific for this device */
+ sprintf(serial, "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
+ cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
+ cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
+ cpp->interface & 0xff);
+
+ sprintf(fw_name, "%s/%s.nffw", DEFAULT_FW_PATH, serial);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Then try the PCI name */
+ sprintf(fw_name, "%s/pci-%s.nffw", DEFAULT_FW_PATH, dev->device.name);
+
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f > 0)
+ goto read_fw;
+
+ /* Finally try the card type and media */
+ sprintf(fw_name, "%s/%s", DEFAULT_FW_PATH, card);
+ PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
+ fw_f = open(fw_name, O_RDONLY);
+ if (fw_f < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
+ return -ENOENT;
+ }
+
+read_fw:
+ if (fstat(fw_f, &file_stat) < 0) {
+ PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
+ close(fw_f);
+ return -ENOENT;
+ }
+
+ fsize = file_stat.st_size;
+ PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
+ fw_name, (uint64_t)fsize);
+
+ fw_buf = malloc((size_t)fsize);
+ if (!fw_buf) {
+ PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
+ close(fw_f);
+ return -ENOMEM;
+ }
+ memset(fw_buf, 0, fsize);
+
+ bytes = read(fw_f, fw_buf, fsize);
+ if (bytes != fsize) {
+ PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
+ "Just %" PRIu64 " of %" PRIu64 " bytes read",
+ (uint64_t)bytes, (uint64_t)fsize);
+ free(fw_buf);
+ close(fw_f);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "Uploading the firmware ...");
+ nfp_nsp_load_fw(nsp, fw_buf, bytes);
+ PMD_DRV_LOG(INFO, "Done");
+
+ free(fw_buf);
+ close(fw_f);
+
+ return 0;
+}
+
+static int
+nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
+ struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
+{
+ struct nfp_nsp *nsp;
+ const char *nfp_fw_model;
+ char card_desc[100];
+ int err = 0;
+
+ nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
+
+ if (nfp_fw_model) {
+ PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
+ } else {
+ PMD_DRV_LOG(ERR, "firmware model NOT found");
+ return -EIO;
+ }
+
+ if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
+ PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
+ nfp_eth_table->count);
+ return -EIO;
+ }
+
+ PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
+ nfp_eth_table->count);
+
+ PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
+
+ sprintf(card_desc, "nic_%s_%dx%d.nffw", nfp_fw_model,
+ nfp_eth_table->count, nfp_eth_table->ports[0].speed / 1000);
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
+ return -EIO;
+ }
+
+ nfp_nsp_device_soft_reset(nsp);
+ err = nfp_fw_upload(dev, nsp, card_desc);
+
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *dev)
{
- nfpu_desc_t *nfpu_desc;
- nspu_desc_t *nspu_desc;
- uint64_t offset_symbol;
- uint8_t *bar_offset;
- int major, minor;
+ struct nfp_cpp *cpp;
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
+ struct nfp_eth_table *nfp_eth_table = NULL;
int total_ports;
void *priv = 0;
int ret = -ENODEV;
+ int err;
int i;
if (!dev)
return ret;
- nfpu_desc = rte_malloc("nfp nfpu", sizeof(nfpu_desc_t), 0);
- if (!nfpu_desc)
- return -ENOMEM;
+ /*
+ * When device bound to UIO, the device could be used, by mistake,
+ * by two DPDK apps, and the UIO driver does not avoid it. This
+ * could lead to a serious problem when configuring the NFP CPP
+ * interface. Here we avoid this telling to the CPP init code to
+ * use a lock file if UIO is being used.
+ */
+ if (dev->kdrv == RTE_KDRV_VFIO)
+ cpp = nfp_cpp_from_device_name(dev, 0);
+ else
+ cpp = nfp_cpp_from_device_name(dev, 1);
- if (nfpu_open(dev, nfpu_desc, 0) < 0) {
- RTE_LOG(ERR, PMD,
- "nfpu_open failed\n");
- goto nfpu_error;
+ if (!cpp) {
+ PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
+ ret = -EIO;
+ goto error;
}
- nspu_desc = nfpu_desc->nspu;
+ hwinfo = nfp_hwinfo_read(cpp);
+ if (!hwinfo) {
+ PMD_DRV_LOG(ERR, "Error reading hwinfo table");
+ return -EIO;
+ }
+ nfp_eth_table = nfp_eth_read_ports(cpp);
+ if (!nfp_eth_table) {
+ PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
+ return -EIO;
+ }
- /* Check NSP ABI version */
- if (nfp_nsp_get_abi_version(nspu_desc, &major, &minor) < 0) {
- RTE_LOG(INFO, PMD, "NFP NSP not present\n");
+ if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
+ PMD_DRV_LOG(INFO, "Error when uploading firmware");
+ ret = -EIO;
goto error;
}
- PMD_INIT_LOG(INFO, "nspu ABI version: %d.%d\n", major, minor);
- if ((major == 0) && (minor < 20)) {
- RTE_LOG(INFO, PMD, "NFP NSP ABI version too old. Required 0.20 or higher\n");
+ /* Now the symbol table should be there */
+ sym_tbl = nfp_rtsym_table_read(cpp);
+ if (!sym_tbl) {
+ PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
+ " symbol table");
+ ret = -EIO;
goto error;
}
- ret = nfp_nsp_fw_setup(nspu_desc, "nfd_cfg_pf0_num_ports",
- &offset_symbol);
- if (ret)
+ total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
+ if (total_ports != (int)nfp_eth_table->count) {
+ PMD_DRV_LOG(ERR, "Inconsistent number of ports");
+ ret = -EIO;
goto error;
-
- bar_offset = (uint8_t *)dev->mem_resource[0].addr;
- bar_offset += offset_symbol;
- total_ports = (uint32_t)*bar_offset;
- PMD_INIT_LOG(INFO, "Total pf ports: %d\n", total_ports);
+ }
+ PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
if (total_ports <= 0 || total_ports > 8) {
- RTE_LOG(ERR, PMD, "nfd_cfg_pf0_num_ports symbol with wrong value");
+ PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
ret = -ENODEV;
goto error;
}
for (i = 0; i < total_ports; i++) {
- ret = nfp_pf_create_dev(dev, i, total_ports, nfpu_desc, &priv);
+ ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
+ nfp_eth_table->ports[i].index,
+ sym_tbl, &priv);
if (ret)
- goto error;
+ break;
}
- return 0;
-
error:
- nfpu_close(nfpu_desc);
-nfpu_error:
- rte_free(nfpu_desc);
-
+ free(nfp_eth_table);
return ret;
}
@@ -3129,8 +3241,19 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
+ /*
+ * hotplug is not possible with multiport PF although freeing
+ * data structures can be done for first port.
+ */
+ if (port != 0)
+ return -ENOTSUP;
hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
hw = &hwport0[port];
+ nfp_cpp_area_free(hw->ctrl_area);
+ nfp_cpp_area_free(hw->hwqueues_area);
+ free(hw->hwinfo);
+ free(hw->sym_tbl);
+ nfp_cpp_free(hw->cpp);
} else {
hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
}
@@ -3161,9 +3284,7 @@ RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
-RTE_INIT(nfp_init_log);
-static void
-nfp_init_log(void)
+RTE_INIT(nfp_init_log)
{
nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
if (nfp_logtype_init >= 0)
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index 1ebd99ca..21e17da1 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -120,6 +120,9 @@
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */
#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
+#define NFP_NET_CFG_CTRL_LSO2 (0x1 << 28) /* LSO/TSO (version 2) */
+#define NFP_NET_CFG_CTRL_RSS2 (0x1 << 29) /* RSS (version 2) */
+#define NFP_NET_CFG_CTRL_LIVE_ADDR (0x1 << 31) /* live MAC addr change */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -131,6 +134,7 @@
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
+#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
#define NFP_NET_CFG_TXRS_ENABLE 0x0008
#define NFP_NET_CFG_RXRS_ENABLE 0x0010
@@ -140,6 +144,8 @@
#define NFP_NET_CFG_LSC 0x0020
#define NFP_NET_CFG_MACADDR 0x0024
+#define NFP_NET_CFG_CTRL_LSO_ANY (NFP_NET_CFG_CTRL_LSO | NFP_NET_CFG_CTRL_LSO2)
+
/*
* Read-only words (0x0030 - 0x0050):
* @NFP_NET_CFG_VERSION: Firmware version number
diff --git a/drivers/net/nfp/nfp_net_eth.h b/drivers/net/nfp/nfp_net_eth.h
deleted file mode 100644
index af57f03c..00000000
--- a/drivers/net/nfp/nfp_net_eth.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_net_eth.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-union eth_table_entry {
- struct {
- uint64_t port;
- uint64_t state;
- uint8_t mac_addr[6];
- uint8_t resv[2];
- uint64_t control;
- };
- uint64_t raw[4];
-};
-
-#ifndef BIT_ULL
-#define BIT_ULL(a) (1ULL << (a))
-#endif
-
-#define NSP_ETH_NBI_PORT_COUNT 24
-#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
-#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * sizeof(union eth_table_entry))
-
-#define NSP_ETH_PORT_LANES 0xf
-#define NSP_ETH_PORT_INDEX 0xff00
-#define NSP_ETH_PORT_LABEL 0x3f000000000000
-#define NSP_ETH_PORT_PHYLABEL 0xfc0000000000000
-
-#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
-
-#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
-#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
-#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
-#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
-#define NSP_ETH_STATE_RATE 0xf00
-#define NSP_ETH_STATE_INTERFACE 0xff000
-#define NSP_ETH_STATE_MEDIA 0x300000
-#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
-#define NSP_ETH_STATE_ANEG 0x3800000
-
-#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
-#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
-#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
-#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
-#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
-#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
-#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
diff --git a/drivers/net/nfp/nfp_net_logs.h b/drivers/net/nfp/nfp_net_logs.h
index 3fe24e96..9952881c 100644
--- a/drivers/net/nfp/nfp_net_logs.h
+++ b/drivers/net/nfp/nfp_net_logs.h
@@ -36,19 +36,20 @@
extern int nfp_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+ rte_log(RTE_LOG_ ## level, nfp_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args)
+ RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args)
#else
#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
#endif
#ifdef RTE_LIBRTE_NFP_NET_DEBUG_TX
#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+ RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args)
#define ASSERT(x) if (!(x)) rte_panic("NFP_NET: x")
#else
#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
@@ -58,6 +59,6 @@ extern int nfp_logtype_init;
extern int nfp_logtype_driver;
#define PMD_DRV_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, nfp_logtype_driver, \
- "%s(): " fmt, __func__, ## args)
+ "%s(): " fmt "\n", __func__, ## args)
#endif /* _NFP_NET_LOGS_H_ */
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index 1ae0ea62..c1b044ee 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015 Netronome Systems, Inc.
+ * Copyright (c) 2014-2018 Netronome Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,6 +63,7 @@ struct nfp_net_adapter;
#define NFP_NET_CRTL_BAR 0
#define NFP_NET_TX_BAR 2
#define NFP_NET_RX_BAR 2
+#define NFP_QCP_QUEUE_AREA_SZ 0x80000
/* Macros for accessing the Queue Controller Peripheral 'CSRs' */
#define NFP_QCP_QUEUE_OFF(_x) ((_x) * 0x800)
@@ -184,18 +185,28 @@ static inline void nn_writeq(uint64_t val, volatile void *addr)
struct nfp_net_tx_desc {
union {
struct {
- uint8_t dma_addr_hi; /* High bits of host buf address */
+ uint8_t dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
- uint8_t offset_eop; /* Offset in buf where pkt starts +
+ uint8_t offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
- __le16 lso; /* MSS to be used for LSO */
- uint8_t l4_offset; /* LSO, where the L4 data starts */
- uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */
-
- __le16 vlan; /* VLAN tag to add if indicated */
+ __le16 mss; /* MSS to be used for LSO */
+ uint8_t lso_hdrlen; /* LSO, where the data starts */
+ uint8_t flags; /* TX Flags, see @PCIE_DESC_TX_* */
+
+ union {
+ struct {
+ /*
+ * L3 and L4 header offsets required
+ * for TSOv2
+ */
+ uint8_t l3_offset;
+ uint8_t l4_offset;
+ };
+ __le16 vlan; /* VLAN tag to add if indicated */
+ };
__le16 data_len; /* Length of frame + meta data */
} __attribute__((__packed__));
__le32 vals[4];
@@ -247,15 +258,13 @@ struct nfp_net_txq {
/*
* At this point 48 bytes have been used for all the fields in the
* TX critical path. We have room for 8 bytes and still all placed
- * in a cache line. We are not using the threshold values below nor
- * the txq_flags but if we need to, we can add the most used in the
- * remaining bytes.
+ * in a cache line. We are not using the threshold values below but
+ * if we need to, we can add the most used in the remaining bytes.
*/
uint32_t tx_rs_thresh; /* not used by now. Future? */
uint32_t tx_pthresh; /* not used by now. Future? */
uint32_t tx_hthresh; /* not used by now. Future? */
uint32_t tx_wthresh; /* not used by now. Future? */
- uint32_t txq_flags; /* not used by now. Future? */
uint16_t port_id;
int qidx;
int tx_qcidx;
@@ -430,20 +439,21 @@ struct nfp_net_hw {
/* Records starting point for counters */
struct rte_eth_stats eth_stats_base;
-#ifdef NFP_NET_LIBNFP
struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area;
- struct nfp_cpp_area *tx_area;
- struct nfp_cpp_area *rx_area;
+ struct nfp_cpp_area *hwqueues_area;
struct nfp_cpp_area *msix_area;
-#endif
+
uint8_t *hw_queues;
uint8_t is_pf;
uint8_t pf_port_idx;
uint8_t pf_multiport_enabled;
+ uint8_t total_ports;
+
union eth_table_entry *eth_table;
- nspu_desc_t *nspu_desc;
- nfpu_desc_t *nfpu_desc;
+
+ struct nfp_hwinfo *hwinfo;
+ struct nfp_rtsym_table *sym_tbl;
};
struct nfp_net_adapter {
diff --git a/drivers/net/nfp/nfp_nfpu.c b/drivers/net/nfp/nfp_nfpu.c
deleted file mode 100644
index f11afef3..00000000
--- a/drivers/net/nfp/nfp_nfpu.c
+++ /dev/null
@@ -1,108 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <errno.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/types.h>
-
-#include <rte_bus_pci.h>
-#include <rte_malloc.h>
-
-#include "nfp_nfpu.h"
-
-/* PF BAR and expansion BAR for the NSP interface */
-#define NFP_CFG_PCIE_BAR 0
-#define NFP_CFG_EXP_BAR 7
-
-#define NFP_CFG_EXP_BAR_CFG_BASE 0x30000
-
-/* There could be other NFP userspace tools using the NSP interface.
- * Make sure there is no other process using it and locking the access for
- * avoiding problems.
- */
-static int
-nspv_aquire_process_lock(nfpu_desc_t *desc)
-{
- int rc;
- struct flock lock;
- char lockname[30];
-
- memset(&lock, 0, sizeof(lock));
-
- snprintf(lockname, sizeof(lockname), "/var/lock/nfp%d", desc->nfp);
-
- /* Using S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH */
- desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
-
- if (desc->lock < 0)
- return desc->lock;
-
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
- rc = -1;
- while (rc != 0) {
- rc = fcntl(desc->lock, F_SETLK, &lock);
- if (rc < 0) {
- if ((errno != EAGAIN) && (errno != EACCES)) {
- close(desc->lock);
- return rc;
- }
- }
- }
-
- return 0;
-}
-
-int
-nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp)
-{
- void *cfg_base, *mem_base;
- size_t barsz;
- int ret = 0;
- int i = 0;
-
- desc->nfp = nfp;
-
- ret = nspv_aquire_process_lock(desc);
- if (ret)
- return -1;
-
- barsz = pci_dev->mem_resource[0].len;
-
- /* barsz in log2 */
- while (barsz >>= 1)
- i++;
-
- barsz = i;
-
- /* Sanity check: we can assume any bar size less than 1MB an error */
- if (barsz < 20)
- return -1;
-
- /* Getting address for NFP expansion BAR registers */
- cfg_base = pci_dev->mem_resource[0].addr;
- cfg_base = (uint8_t *)cfg_base + NFP_CFG_EXP_BAR_CFG_BASE;
-
- /* Getting address for NFP NSP interface registers */
- mem_base = pci_dev->mem_resource[0].addr;
- mem_base = (uint8_t *)mem_base + (NFP_CFG_EXP_BAR << (barsz - 3));
-
-
- desc->nspu = rte_malloc("nfp nspu", sizeof(nspu_desc_t), 0);
- nfp_nspu_init(desc->nspu, desc->nfp, NFP_CFG_PCIE_BAR, barsz,
- NFP_CFG_EXP_BAR, cfg_base, mem_base);
-
- return ret;
-}
-
-int
-nfpu_close(nfpu_desc_t *desc)
-{
- rte_free(desc->nspu);
- close(desc->lock);
- unlink("/var/lock/nfp0");
- return 0;
-}
diff --git a/drivers/net/nfp/nfp_nfpu.h b/drivers/net/nfp/nfp_nfpu.h
deleted file mode 100644
index e56fa099..00000000
--- a/drivers/net/nfp/nfp_nfpu.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_nfpu.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-/*
- * NFP User interface creates a window for talking with NFP NSP processor
- */
-
-
-#include <rte_bus_pci.h>
-#include "nfp_nspu.h"
-
-typedef struct {
- int nfp;
- int lock;
- nspu_desc_t *nspu;
-} nfpu_desc_t;
-
-int nfpu_open(struct rte_pci_device *pci_dev, nfpu_desc_t *desc, int nfp);
-int nfpu_close(nfpu_desc_t *desc);
diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c
deleted file mode 100644
index f9089832..00000000
--- a/drivers/net/nfp/nfp_nspu.c
+++ /dev/null
@@ -1,642 +0,0 @@
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/file.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-
-#include <rte_log.h>
-#include <rte_byteorder.h>
-
-#include "nfp_nfpu.h"
-
-#define CFG_EXP_BAR_ADDR_SZ 1
-#define CFG_EXP_BAR_MAP_TYPE 1
-
-#define EXP_BAR_TARGET_SHIFT 23
-#define EXP_BAR_LENGTH_SHIFT 27 /* 0=32, 1=64 bit increment */
-#define EXP_BAR_MAP_TYPE_SHIFT 29 /* Bulk BAR map */
-
-/* NFP target for NSP access */
-#define NFP_NSP_TARGET 7
-
-/* Expansion BARs for mapping PF vnic BARs */
-#define NFP_NET_PF_CFG_EXP_BAR 6
-#define NFP_NET_PF_HW_QUEUES_EXP_BAR 5
-
-/*
- * This is an NFP internal address used for configuring properly an NFP
- * expansion BAR.
- */
-#define MEM_CMD_BASE_ADDR 0x8100000000
-
-/* NSP interface registers */
-#define NSP_BASE (MEM_CMD_BASE_ADDR + 0x22100)
-#define NSP_STATUS 0x00
-#define NSP_COMMAND 0x08
-#define NSP_BUFFER 0x10
-#define NSP_DEFAULT_BUF 0x18
-#define NSP_DEFAULT_BUF_CFG 0x20
-
-#define NSP_MAGIC 0xab10
-#define NSP_STATUS_MAGIC(x) (((x) >> 48) & 0xffff)
-#define NSP_STATUS_MAJOR(x) (int)(((x) >> 44) & 0xf)
-#define NSP_STATUS_MINOR(x) (int)(((x) >> 32) & 0xfff)
-
-/* NSP commands */
-#define NSP_CMD_RESET 1
-#define NSP_CMD_FW_LOAD 6
-#define NSP_CMD_READ_ETH_TABLE 7
-#define NSP_CMD_WRITE_ETH_TABLE 8
-#define NSP_CMD_GET_SYMBOL 14
-
-#define NSP_BUFFER_CFG_SIZE_MASK (0xff)
-
-#define NSP_REG_ADDR(d, off, reg) ((uint8_t *)(d)->mem_base + (off) + (reg))
-#define NSP_REG_VAL(p) (*(uint64_t *)(p))
-
-/*
- * An NFP expansion BAR is configured for allowing access to a specific NFP
- * target:
- *
- * IN:
- * desc: struct with basic NSP addresses to work with
- * expbar: NFP PF expansion BAR index to configure
- * tgt: NFP target to configure access
- * addr: NFP target address
- *
- * OUT:
- * pcie_offset: NFP PCI BAR offset to work with
- */
-static void
-nfp_nspu_mem_bar_cfg(nspu_desc_t *desc, int expbar, int tgt,
- uint64_t addr, uint64_t *pcie_offset)
-{
- uint64_t x, y, barsz;
- uint32_t *expbar_ptr;
-
- barsz = desc->barsz;
-
- /*
- * NFP CPP address to configure. This comes from NFP 6000
- * datasheet document based on Bulk mapping.
- */
- x = (addr >> (barsz - 3)) << (21 - (40 - (barsz - 3)));
- x |= CFG_EXP_BAR_MAP_TYPE << EXP_BAR_MAP_TYPE_SHIFT;
- x |= CFG_EXP_BAR_ADDR_SZ << EXP_BAR_LENGTH_SHIFT;
- x |= tgt << EXP_BAR_TARGET_SHIFT;
-
- /* Getting expansion bar configuration register address */
- expbar_ptr = (uint32_t *)desc->cfg_base;
- /* Each physical PCI BAR has 8 NFP expansion BARs */
- expbar_ptr += (desc->pcie_bar * 8) + expbar;
-
- /* Writing to the expansion BAR register */
- *expbar_ptr = (uint32_t)x;
-
- /* Getting the pcie offset to work with from userspace */
- y = addr & ((uint64_t)(1 << (barsz - 3)) - 1);
- *pcie_offset = y;
-}
-
-/*
- * Configuring an expansion bar for accessing NSP userspace interface. This
- * function configures always the same expansion bar, which implies access to
- * previously configured NFP target is lost.
- */
-static void
-nspu_xlate(nspu_desc_t *desc, uint64_t addr, uint64_t *pcie_offset)
-{
- nfp_nspu_mem_bar_cfg(desc, desc->exp_bar, NFP_NSP_TARGET, addr,
- pcie_offset);
-}
-
-int
-nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor)
-{
- uint64_t pcie_offset;
- uint64_t nsp_reg;
-
- nspu_xlate(desc, NSP_BASE, &pcie_offset);
- nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, pcie_offset, NSP_STATUS));
-
- if (NSP_STATUS_MAGIC(nsp_reg) != NSP_MAGIC)
- return -1;
-
- *major = NSP_STATUS_MAJOR(nsp_reg);
- *minor = NSP_STATUS_MINOR(nsp_reg);
-
- return 0;
-}
-
-int
-nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
- int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap)
-{
- uint64_t offset, buffaddr;
- uint64_t nsp_reg;
-
- desc->nfp = nfp;
- desc->pcie_bar = pcie_bar;
- desc->exp_bar = exp_bar;
- desc->barsz = pcie_barsz;
- desc->windowsz = 1 << (desc->barsz - 3);
- desc->cfg_base = exp_bar_cfg_base;
- desc->mem_base = exp_bar_mmap;
-
- nspu_xlate(desc, NSP_BASE, &offset);
-
- /*
- * Other NSPU clients can use other buffers. Let's tell NSPU we use the
- * default buffer.
- */
- buffaddr = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF));
- NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_BUFFER)) = buffaddr;
-
- /* NFP internal addresses are 40 bits. Clean all other bits here */
- buffaddr = buffaddr & (((uint64_t)1 << 40) - 1);
- desc->bufaddr = buffaddr;
-
- /* Lets get information about the buffer */
- nsp_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_DEFAULT_BUF_CFG));
-
- /* Buffer size comes in MBs. Coversion to bytes */
- desc->buf_size = ((size_t)nsp_reg & NSP_BUFFER_CFG_SIZE_MASK) << 20;
-
- return 0;
-}
-
-#define NSPU_NFP_BUF(addr, base, off) \
- (*(uint64_t *)((uint8_t *)(addr)->mem_base + ((base) | (off))))
-
-#define NSPU_HOST_BUF(base, off) (*(uint64_t *)((uint8_t *)(base) + (off)))
-
-static int
-nspu_buff_write(nspu_desc_t *desc, void *buffer, size_t size)
-{
- uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
- uint64_t windowsz = desc->windowsz;
- uint64_t buffaddr, j, i = 0;
- int ret = 0;
-
- if (size > desc->buf_size)
- return -1;
-
- buffaddr = desc->bufaddr;
- windowsz = desc->windowsz;
-
- while (i < size) {
- /* Expansion bar reconfiguration per window size */
- nspu_xlate(desc, buffaddr + i, &pcie_offset);
- pcie_window_base = pcie_offset & (~(windowsz - 1));
- pcie_window_offset = pcie_offset & (windowsz - 1);
- for (j = pcie_window_offset; ((j < windowsz) && (i < size));
- j += 8) {
- NSPU_NFP_BUF(desc, pcie_window_base, j) =
- NSPU_HOST_BUF(buffer, i);
- i += 8;
- }
- }
-
- return ret;
-}
-
-static int
-nspu_buff_read(nspu_desc_t *desc, void *buffer, size_t size)
-{
- uint64_t pcie_offset, pcie_window_base, pcie_window_offset;
- uint64_t windowsz, i = 0, j;
- uint64_t buffaddr;
- int ret = 0;
-
- if (size > desc->buf_size)
- return -1;
-
- buffaddr = desc->bufaddr;
- windowsz = desc->windowsz;
-
- while (i < size) {
- /* Expansion bar reconfiguration per window size */
- nspu_xlate(desc, buffaddr + i, &pcie_offset);
- pcie_window_base = pcie_offset & (~(windowsz - 1));
- pcie_window_offset = pcie_offset & (windowsz - 1);
- for (j = pcie_window_offset; ((j < windowsz) && (i < size));
- j += 8) {
- NSPU_HOST_BUF(buffer, i) =
- NSPU_NFP_BUF(desc, pcie_window_base, j);
- i += 8;
- }
- }
-
- return ret;
-}
-
-static int
-nspu_command(nspu_desc_t *desc, uint16_t cmd, int read, int write,
- void *buffer, size_t rsize, size_t wsize)
-{
- uint64_t status, cmd_reg;
- uint64_t offset;
- int retry = 0;
- int retries = 120;
- int ret = 0;
-
- /* Same expansion BAR is used for different things */
- nspu_xlate(desc, NSP_BASE, &offset);
-
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
-
- while ((status & 0x1) && (retry < retries)) {
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- retry++;
- sleep(1);
- }
-
- if (retry == retries)
- return -1;
-
- if (write) {
- ret = nspu_buff_write(desc, buffer, wsize);
- if (ret)
- return ret;
-
- /* Expansion BAR changes when writing the buffer */
- nspu_xlate(desc, NSP_BASE, &offset);
- }
-
- NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND)) =
- (uint64_t)wsize << 32 | (uint64_t)cmd << 16 | 1;
-
- retry = 0;
-
- cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
- while ((cmd_reg & 0x1) && (retry < retries)) {
- cmd_reg = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_COMMAND));
- retry++;
- sleep(1);
- }
- if (retry == retries)
- return -1;
-
- retry = 0;
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- while ((status & 0x1) && (retry < retries)) {
- status = NSP_REG_VAL(NSP_REG_ADDR(desc, offset, NSP_STATUS));
- retry++;
- sleep(1);
- }
-
- if (retry == retries)
- return -1;
-
- ret = status & (0xff << 8);
- if (ret)
- return ret;
-
- if (read) {
- ret = nspu_buff_read(desc, buffer, rsize);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int
-nfp_fw_reset(nspu_desc_t *nspu_desc)
-{
- int res;
-
- res = nspu_command(nspu_desc, NSP_CMD_RESET, 0, 0, 0, 0, 0);
-
- if (res < 0)
- RTE_LOG(INFO, PMD, "fw reset failed: error %d", res);
-
- return res;
-}
-
-#define DEFAULT_FW_PATH "/lib/firmware/netronome"
-#define DEFAULT_FW_FILENAME "nic_dpdk_default.nffw"
-
-static int
-nfp_fw_upload(nspu_desc_t *nspu_desc)
-{
- int fw_f;
- char *fw_buf;
- char filename[100];
- struct stat file_stat;
- off_t fsize, bytes;
- ssize_t size;
- int ret;
-
- size = nspu_desc->buf_size;
-
- sprintf(filename, "%s/%s", DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- fw_f = open(filename, O_RDONLY);
- if (fw_f < 0) {
- RTE_LOG(INFO, PMD, "Firmware file %s/%s not found.",
- DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- return -ENOENT;
- }
-
- if (fstat(fw_f, &file_stat) < 0) {
- RTE_LOG(INFO, PMD, "Firmware file %s/%s size is unknown",
- DEFAULT_FW_PATH, DEFAULT_FW_FILENAME);
- close(fw_f);
- return -ENOENT;
- }
-
- fsize = file_stat.st_size;
- RTE_LOG(DEBUG, PMD, "Firmware file with size: %" PRIu64 "\n",
- (uint64_t)fsize);
-
- if (fsize > (off_t)size) {
- RTE_LOG(INFO, PMD, "fw file too big: %" PRIu64
- " bytes (%" PRIu64 " max)",
- (uint64_t)fsize, (uint64_t)size);
- close(fw_f);
- return -EINVAL;
- }
-
- fw_buf = malloc((size_t)size);
- if (!fw_buf) {
- RTE_LOG(INFO, PMD, "malloc failed for fw buffer");
- close(fw_f);
- return -ENOMEM;
- }
- memset(fw_buf, 0, size);
-
- bytes = read(fw_f, fw_buf, fsize);
- if (bytes != fsize) {
- RTE_LOG(INFO, PMD, "Reading fw to buffer failed.\n"
- "Just %" PRIu64 " of %" PRIu64 " bytes read.",
- (uint64_t)bytes, (uint64_t)fsize);
- free(fw_buf);
- close(fw_f);
- return -EIO;
- }
-
- ret = nspu_command(nspu_desc, NSP_CMD_FW_LOAD, 0, 1, fw_buf, 0, bytes);
-
- free(fw_buf);
- close(fw_f);
-
- return ret;
-}
-
-/* Firmware symbol descriptor size */
-#define NFP_SYM_DESC_LEN 40
-
-#define SYMBOL_DATA(b, off) (*(int64_t *)((b) + (off)))
-#define SYMBOL_UDATA(b, off) (*(uint64_t *)((b) + (off)))
-
-/* Firmware symbols contain information about how to access what they
- * represent. It can be as simple as an numeric variable declared at a
- * specific NFP memory, but it can also be more complex structures and
- * related to specific hardware functionalities or components. Target,
- * domain and address allow to create the BAR window for accessing such
- * hw object and size defines the length to map.
- *
- * A vNIC is a network interface implemented inside the NFP and using a
- * subset of device PCI BARs. Specific firmware symbols allow to map those
- * vNIC bars by host drivers like the NFP PMD.
- *
- * Accessing what the symbol represents implies to map the access through
- * a PCI BAR window. NFP expansion BARs are used in this regard through
- * the NSPU interface.
- */
-static int
-nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl,
- uint32_t expbar, uint64_t *pcie_offset,
- ssize_t *size)
-{
- int64_t type;
- int64_t target;
- int64_t domain;
- uint64_t addr;
- char *sym_buf;
- int ret = 0;
-
- sym_buf = malloc(desc->buf_size);
- if (!sym_buf)
- return -ENOMEM;
-
- strncpy(sym_buf, symbl, strlen(symbl));
- ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf,
- NFP_SYM_DESC_LEN, strlen(symbl));
- if (ret) {
- RTE_LOG(DEBUG, PMD, "symbol resolution (%s) failed\n", symbl);
- goto clean;
- }
-
- /* Reading symbol information */
- type = SYMBOL_DATA(sym_buf, 0);
- target = SYMBOL_DATA(sym_buf, 8);
- domain = SYMBOL_DATA(sym_buf, 16);
- addr = SYMBOL_UDATA(sym_buf, 24);
- *size = (ssize_t)SYMBOL_UDATA(sym_buf, 32);
-
- if (type != 1) {
- RTE_LOG(INFO, PMD, "wrong symbol type\n");
- ret = -EINVAL;
- goto clean;
- }
- if (!(target == 7 || target == -7)) {
- RTE_LOG(INFO, PMD, "wrong symbol target\n");
- ret = -EINVAL;
- goto clean;
- }
- if (domain == 8 || domain == 9) {
- RTE_LOG(INFO, PMD, "wrong symbol domain\n");
- ret = -EINVAL;
- goto clean;
- }
-
- /* Adjusting address based on symbol location */
- if ((domain >= 24) && (domain < 28) && (target == 7)) {
- addr = 1ULL << 37 | addr | ((uint64_t)domain & 0x3) << 35;
- } else {
- addr = 1ULL << 39 | addr | ((uint64_t)domain & 0x3f) << 32;
- if (target == -7)
- target = 7;
- }
-
- /* Configuring NFP expansion bar for mapping specific PCI BAR window */
- nfp_nspu_mem_bar_cfg(desc, expbar, target, addr, pcie_offset);
-
- /* This is the PCI BAR offset to use by the host */
- *pcie_offset |= ((expbar & 0x7) << (desc->barsz - 3));
-
-clean:
- free(sym_buf);
- return ret;
-}
-
-int
-nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset)
-{
- ssize_t bar0_sym_size;
-
- /* If the symbol resolution works, it implies a firmware app
- * is already there.
- */
- if (!nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size))
- return 0;
-
- /* No firmware app detected or not the right one */
- RTE_LOG(INFO, PMD, "No firmware detected. Resetting NFP...\n");
- if (nfp_fw_reset(desc) < 0) {
- RTE_LOG(ERR, PMD, "nfp fw reset failed\n");
- return -ENODEV;
- }
-
- RTE_LOG(INFO, PMD, "Reset done.\n");
- RTE_LOG(INFO, PMD, "Uploading firmware...\n");
-
- if (nfp_fw_upload(desc) < 0) {
- RTE_LOG(ERR, PMD, "nfp fw upload failed\n");
- return -ENODEV;
- }
-
- RTE_LOG(INFO, PMD, "Done.\n");
-
- /* Now the symbol should be there */
- if (nfp_nspu_set_bar_from_symbl(desc, sym, NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size)) {
- RTE_LOG(ERR, PMD, "nfp PF BAR symbol resolution failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-int
-nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
-{
- ssize_t bar0_sym_size;
-
- if (nfp_nspu_set_bar_from_symbl(desc, "_pf0_net_bar0",
- NFP_NET_PF_CFG_EXP_BAR,
- pcie_offset, &bar0_sym_size))
- return -ENODEV;
-
- return 0;
-}
-
-/*
- * This is a hardcoded fixed NFP internal CPP bus address for the hw queues unit
- * inside the PCIE island.
- */
-#define NFP_CPP_PCIE_QUEUES ((uint64_t)(1ULL << 39) | 0x80000 | \
- ((uint64_t)0x4 & 0x3f) << 32)
-
-/* Configure a specific NFP expansion bar for accessing the vNIC rx/tx BARs */
-void
-nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset)
-{
- nfp_nspu_mem_bar_cfg(desc, NFP_NET_PF_HW_QUEUES_EXP_BAR, 0,
- NFP_CPP_PCIE_QUEUES, pcie_offset);
-
- /* This is the pcie offset to use by the host */
- *pcie_offset |= ((NFP_NET_PF_HW_QUEUES_EXP_BAR & 0x7) << (27 - 3));
-}
-
-int
-nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up)
-{
- union eth_table_entry *entries, *entry;
- int modified;
- int ret, idx;
- int i;
-
- idx = port;
-
- RTE_LOG(INFO, PMD, "Hw ethernet port %d configure...\n", port);
- rte_spinlock_lock(&desc->nsp_lock);
- entries = malloc(NSP_ETH_TABLE_SIZE);
- if (!entries) {
- rte_spinlock_unlock(&desc->nsp_lock);
- return -ENOMEM;
- }
-
- ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, entries,
- NSP_ETH_TABLE_SIZE, 0);
- if (ret) {
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return ret;
- }
-
- entry = entries;
-
- for (i = 0; i < NSP_ETH_MAX_COUNT; i++) {
- /* ports in use do not appear sequentially in the table */
- if (!(entry->port & NSP_ETH_PORT_LANES_MASK)) {
- /* entry not in use */
- entry++;
- continue;
- }
- if (idx == 0)
- break;
- idx--;
- entry++;
- }
-
- if (i == NSP_ETH_MAX_COUNT) {
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return -EINVAL;
- }
-
- if (up && !(entry->state & NSP_ETH_STATE_CONFIGURED)) {
- entry->control |= NSP_ETH_STATE_CONFIGURED;
- modified = 1;
- }
-
- if (!up && (entry->state & NSP_ETH_STATE_CONFIGURED)) {
- entry->control &= ~NSP_ETH_STATE_CONFIGURED;
- modified = 1;
- }
-
- if (modified) {
- ret = nspu_command(desc, NSP_CMD_WRITE_ETH_TABLE, 0, 1, entries,
- 0, NSP_ETH_TABLE_SIZE);
- if (!ret)
- RTE_LOG(INFO, PMD,
- "Hw ethernet port %d configure done\n", port);
- else
- RTE_LOG(INFO, PMD,
- "Hw ethernet port %d configure failed\n", port);
- }
- rte_spinlock_unlock(&desc->nsp_lock);
- free(entries);
- return ret;
-}
-
-int
-nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table)
-{
- int ret;
-
- if (!table)
- return -EINVAL;
-
- RTE_LOG(INFO, PMD, "Reading hw ethernet table...\n");
-
- /* port 0 allocates the eth table and read it using NSPU */
- *table = malloc(NSP_ETH_TABLE_SIZE);
- if (!*table)
- return -ENOMEM;
-
- ret = nspu_command(desc, NSP_CMD_READ_ETH_TABLE, 1, 0, *table,
- NSP_ETH_TABLE_SIZE, 0);
- if (ret)
- return ret;
-
- RTE_LOG(INFO, PMD, "Done\n");
-
- return 0;
-}
diff --git a/drivers/net/nfp/nfp_nspu.h b/drivers/net/nfp/nfp_nspu.h
deleted file mode 100644
index 8c33835e..00000000
--- a/drivers/net/nfp/nfp_nspu.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2017 Netronome Systems, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution
- *
- * 3. Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * vim:shiftwidth=8:noexpandtab
- *
- * @file dpdk/pmd/nfp_nspu.h
- *
- * Netronome NFP_NET PDM driver
- */
-
-/*
- * NSP is the NFP Service Processor. NSPU is NSP Userspace interface.
- *
- * NFP NSP helps with firmware/hardware configuration. NSP is another component
- * in NFP programmable processor and accessing it from host requires to firstly
- * configure a specific NFP PCI expansion BAR.
- *
- * Once access is ready, configuration can be done reading and writing
- * from/to a specific PF PCI BAR window. This same interface will allow to
- * create other PCI BAR windows for accessing other NFP components.
- *
- * This file includes low-level functions, using the NSPU interface, and high
- * level functions, invoked by the PMD for using NSP services. This allows
- * firmware upload, vNIC PCI BARs mapping and other low-level configurations
- * like link setup.
- *
- * NSP access is done during initialization and it is not involved at all with
- * the fast path.
- */
-
-#include <rte_spinlock.h>
-#include "nfp_net_eth.h"
-
-typedef struct {
- int nfp; /* NFP device */
- int pcie_bar; /* PF PCI BAR to work with */
- int exp_bar; /* Expansion BAR number used by NSPU */
- int barsz; /* PCIE BAR log2 size */
- uint64_t bufaddr; /* commands buffer address */
- size_t buf_size; /* commands buffer size */
- uint64_t windowsz; /* NSPU BAR window size */
- void *cfg_base; /* Expansion BARs address */
- void *mem_base; /* NSP interface */
- rte_spinlock_t nsp_lock;
-} nspu_desc_t;
-
-int nfp_nspu_init(nspu_desc_t *desc, int nfp, int pcie_bar, size_t pcie_barsz,
- int exp_bar, void *exp_bar_cfg_base, void *exp_bar_mmap);
-int nfp_nsp_get_abi_version(nspu_desc_t *desc, int *major, int *minor);
-int nfp_nsp_fw_setup(nspu_desc_t *desc, const char *sym, uint64_t *pcie_offset);
-int nfp_nsp_map_ctrl_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
-void nfp_nsp_map_queues_bar(nspu_desc_t *desc, uint64_t *pcie_offset);
-int nfp_nsp_eth_config(nspu_desc_t *desc, int port, int up);
-int nfp_nsp_eth_read_table(nspu_desc_t *desc, union eth_table_entry **table);
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
new file mode 100644
index 00000000..6e380cca
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_cppat.h
@@ -0,0 +1,722 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPPAT_H__
+#define __NFP_CPPAT_H__
+
+#include "nfp_platform.h"
+#include "nfp_resid.h"
+
+/* This file contains helpers for creating CPP commands
+ *
+ * All magic NFP-6xxx IMB 'mode' numbers here are from:
+ * Databook (1 August 2013)
+ * - System Overview and Connectivity
+ * -- Internal Connectivity
+ * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
+ * ---- CPP addressing
+ * ----- Table 3.6. CPP Address Translation Mode Commands
+ */
+
+#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0);
+
+static uint64_t
+_nic_mask64(int msb, int lsb, int at0)
+{
+ uint64_t v;
+ int w = msb - lsb + 1;
+
+ if (w == 64)
+ return ~(uint64_t)0;
+
+ if ((lsb + w) > 64)
+ return 0;
+
+ v = (UINT64_C(1) << w) - 1;
+
+ if (at0)
+ return v;
+
+ return v << lsb;
+}
+
+/* For VQDR, we may not modify the Channel bits, which might overlap
+ * with the Index bit. When it does, we need to ensure that isld0 == isld1.
+ */
+static inline int
+_nfp6000_encode_basic(uint64_t *addr, int dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb;
+ int i, v = 0;
+ int isld[2];
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * In this specific mode we'd rather not modify the
+ * address but we can verify if the existing contents
+ * will point to a valid island.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1,
+ isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 34 : 26;
+
+ /* <39:34> or <31:26> */
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (dest_island == isld0) {
+ /* Only need to clear the Index bit */
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ /* Only need to set the Index bit */
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /* iid<0> = addr<30> = channel<0> */
+ /* channel<1> = addr<31> = Index */
+
+ /*
+ * Special case where we allow channel bits to be set
+ * before hand and with them select an island.
+ * So we need to confirm that it's at least plausible.
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ /*
+ * If dest_island is invalid, the current address won't
+ * go where expected.
+ */
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ **/
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or non-0
+ * island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ if (cpp_tgt == NFP6000_CPPTGT_VQDR && !addr40) {
+ /*
+ * iid<0> = addr<29> = data
+ * iid<1> = addr<30> = channel<0>
+ * channel<1> = addr<31> = Index
+ */
+ i = _nfp6000_decode_basic(*addr, &v, cpp_tgt, mode,
+ addr40, isld1, isld0);
+ if (i != 0)
+ /* Full Island ID and channel bits overlap */
+ return i;
+
+ if (dest_island != -1 && dest_island != v)
+ return NFP_ERRNO(EINVAL);
+
+ /* If dest_island was -1, we don't care */
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_basic(uint64_t addr, int *dest_island, int cpp_tgt, int mode,
+ int addr40, int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_MU:
+ /* This function doesn't handle MU */
+ return NFP_ERRNO(EINVAL);
+ case NFP6000_CPPTGT_CTXPB:
+ /* This function doesn't handle CTXPB */
+ return NFP_ERRNO(EINVAL);
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case 0:
+ /*
+ * For VQDR, in this mode for 32-bit addressing it would be
+ * islands 0, 16, 32 and 48 depending on channel and upper
+ * address bits. Since those are not all valid islands, most
+ * decode cases would result in bad island IDs, but we do them
+ * anyway since this is decoding an address that is already
+ * assumed to be used as-is to get to sram.
+ */
+ iid_lsb = (addr40) ? 34 : 26;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ /*
+ * For VQDR 32-bit, this would decode as:
+ * Channel 0: island#0
+ * Channel 1: island#0
+ * Channel 2: island#1
+ * Channel 3: island#1
+ *
+ * That would be valid as long as both islands have VQDR.
+ * Let's allow this.
+ */
+
+ idx_lsb = (addr40) ? 39 : 31;
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ /*
+ * For VQDR 32-bit:
+ * Channel 0: (island#0 | 0)
+ * Channel 1: (island#0 | 1)
+ * Channel 2: (island#1 | 0)
+ * Channel 3: (island#1 | 1)
+ *
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ /*
+ * In this mode the data address starts to affect the island ID
+ * so rather not allow it. In some really specific case one
+ * could use this to send the upper half of the VQDR channel to
+ * another MU, but this is getting very specific. However, as
+ * above for mode 0, this is the decoder and the caller should
+ * validate the resulting IID. This blindly does what the
+ * silicon would do.
+ */
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 39 : 31;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ return (addr40) ? 38 : 30;
+ default:
+ break;
+ }
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_encode_mu(uint64_t *addr, int dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ uint64_t _u64;
+ int iid_lsb, idx_lsb, locality_lsb;
+ int i, v;
+ int isld[2];
+ int da;
+
+ isld[0] = isld0;
+ isld[1] = isld1;
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+ if (dest_island == isld0) {
+ *addr &= ~_nic_mask64(idx_lsb, idx_lsb, 0);
+ return 0;
+ }
+
+ if (dest_island == isld1) {
+ *addr |= (UINT64_C(1) << idx_lsb);
+ return 0;
+ }
+
+ return NFP_ERRNO(ENODEV);
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld[0] &= ~1;
+ isld[1] &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ /*
+ * Try each option, take first one that fits. Not sure if we
+ * would want to do some smarter searching and prefer 0 or
+ * non-0 island IDs.
+ */
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 2; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+ return NFP_ERRNO(ENODEV);
+ case 3:
+ /*
+ * Only the EMU will use 40 bit addressing. Silently set the
+ * direct locality bit for everyone else. The SDK toolchain
+ * uses dest_island <= 0 to test for atypical address encodings
+ * to support access to local-island CTM with a 32-but address
+ * (high-locality is effectively ignored and just used for
+ * routing to island #0).
+ */
+ if (dest_island > 0 &&
+ (dest_island < 24 || dest_island > 26)) {
+ *addr |= ((uint64_t)_NIC_NFP6000_MU_LOCALITY_DIRECT)
+ << locality_lsb;
+ da = 1;
+ }
+
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ _u64 = _nic_mask64((iid_lsb + 5), iid_lsb, 0);
+ *addr &= ~_u64;
+ *addr |= (((uint64_t)dest_island) << iid_lsb) & _u64;
+ return 0;
+ }
+
+ isld[0] &= ~3;
+ isld[1] &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ for (i = 0; i < 2; i++) {
+ for (v = 0; v < 4; v++) {
+ if (dest_island != (isld[i] | v))
+ continue;
+ *addr &= ~_nic_mask64(idx_lsb, iid_lsb, 0);
+ *addr |= (((uint64_t)i) << idx_lsb);
+ *addr |= (((uint64_t)v) << iid_lsb);
+ return 0;
+ }
+ }
+
+ return NFP_ERRNO(ENODEV);
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_decode_mu(uint64_t addr, int *dest_island, int mode, int addr40,
+ int isld1, int isld0)
+{
+ int iid_lsb, idx_lsb, locality_lsb;
+ int da;
+
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+
+ if (((addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+ da = 1;
+ else
+ da = 0;
+
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ idx_lsb = (addr40) ? 37 : 29;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1;
+ else
+ *dest_island = isld0;
+
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+ /*
+ * Make sure we compare against isldN values by clearing the
+ * LSB. This is what the silicon does.
+ */
+ isld0 &= ~1;
+ isld1 &= ~1;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 1;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *dest_island = (int)(addr >> iid_lsb) & 0x3F;
+ return 0;
+ }
+
+ isld0 &= ~3;
+ isld1 &= ~3;
+
+ idx_lsb = (addr40) ? 37 : 29;
+ iid_lsb = idx_lsb - 2;
+
+ if (addr & _nic_mask64(idx_lsb, idx_lsb, 0))
+ *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+ else
+ *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_encode(uint64_t *addr, int dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_encode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_encode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return NFP_ERRNO(EINVAL);
+
+ *addr &= ~_nic_mask64(29, 24, 0);
+ *addr |= (((uint64_t)dest_island) << 24) &
+ _nic_mask64(29, 24, 0);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+static inline int
+_nfp6000_cppat_addr_decode(uint64_t addr, int *dest_island, int cpp_tgt,
+ int mode, int addr40, int isld1, int isld0)
+{
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ return _nfp6000_decode_basic(addr, dest_island, cpp_tgt, mode,
+ addr40, isld1, isld0);
+
+ case NFP6000_CPPTGT_MU:
+ return _nfp6000_decode_mu(addr, dest_island, mode, addr40,
+ isld1, isld0);
+
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return -EINVAL;
+ *dest_island = (int)(addr >> 24) & 0x3F;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+_nfp6000_cppat_addr_iid_clear(uint64_t *addr, int cpp_tgt, int mode, int addr40)
+{
+ int iid_lsb, locality_lsb, da;
+
+ switch (cpp_tgt) {
+ case NFP6000_CPPTGT_NBI:
+ case NFP6000_CPPTGT_VQDR:
+ case NFP6000_CPPTGT_ILA:
+ case NFP6000_CPPTGT_PCIE:
+ case NFP6000_CPPTGT_ARM:
+ case NFP6000_CPPTGT_CRYPTO:
+ case NFP6000_CPPTGT_CLS:
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 34 : 26;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ iid_lsb = (addr40) ? 39 : 31;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ iid_lsb = (addr40) ? 38 : 30;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_MU:
+ locality_lsb = _nfp6000_cppat_mu_locality_lsb(mode, addr40);
+ da = (((*addr >> locality_lsb) & 3) ==
+ _NIC_NFP6000_MU_LOCALITY_DIRECT);
+ switch (mode) {
+ case 0:
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ case 1:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+ iid_lsb = (addr40) ? 37 : 29;
+ *addr &= ~_nic_mask64(iid_lsb, iid_lsb, 0);
+ return 0;
+ case 2:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 36 : 28;
+ *addr &= ~_nic_mask64(iid_lsb + 1, iid_lsb, 0);
+ return 0;
+ case 3:
+ if (da) {
+ iid_lsb = (addr40) ? 32 : 24;
+ *addr &= ~(UINT64_C(0x3F) << iid_lsb);
+ return 0;
+ }
+
+ iid_lsb = (addr40) ? 35 : 27;
+ *addr &= ~_nic_mask64(iid_lsb + 2, iid_lsb, 0);
+ return 0;
+ default:
+ break;
+ }
+ case NFP6000_CPPTGT_CTXPB:
+ if (mode != 1 || addr40 != 0)
+ return 0;
+ *addr &= ~(UINT64_C(0x3F) << 24);
+ return 0;
+ default:
+ break;
+ }
+
+ return NFP_ERRNO(EINVAL);
+}
+
+#endif /* __NFP_CPPAT_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
new file mode 100644
index 00000000..d46574b1
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_platform.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_PLATFORM_H__
+#define __NFP_PLATFORM_H__
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <limits.h>
+#include <errno.h>
+
+#ifndef BIT_ULL
+#define BIT(x) (1 << (x))
+#define BIT_ULL(x) (1ULL << (x))
+#endif
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#define NFP_ERRNO(err) (errno = (err), -1)
+#define NFP_ERRNO_RET(err, ret) (errno = (err), (ret))
+#define NFP_NOERR(errv) (errno)
+#define NFP_ERRPTR(err) (errno = (err), NULL)
+#define NFP_PTRERR(errv) (errno)
+
+#endif /* __NFP_PLATFORM_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
new file mode 100644
index 00000000..0e03948e
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp-common/nfp_resid.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RESID_H__
+#define __NFP_RESID_H__
+
+#if (!defined(_NFP_RESID_NO_C_FUNC) && \
+ (defined(__NFP_TOOL_NFCC) || defined(__NFP_TOOL_NFAS)))
+#define _NFP_RESID_NO_C_FUNC
+#endif
+
+#ifndef _NFP_RESID_NO_C_FUNC
+#include "nfp_platform.h"
+#endif
+
+/*
+ * NFP Chip Architectures
+ *
+ * These are semi-arbitrary values to indicate an NFP architecture.
+ * They serve as a software view of a group of chip families, not necessarily a
+ * direct mapping to actual hardware design.
+ */
+#define NFP_CHIP_ARCH_YD 1
+#define NFP_CHIP_ARCH_TH 2
+
+/*
+ * NFP Chip Families.
+ *
+ * These are not enums, because they need to be microcode compatible.
+ * They are also not maskable.
+ *
+ * Note: The NFP-4xxx family is handled as NFP-6xxx in most software
+ * components.
+ *
+ */
+#define NFP_CHIP_FAMILY_NFP6000 0x6000 /* ARCH_TH */
+
+/* NFP Microengine/Flow Processing Core Versions */
+#define NFP_CHIP_ME_VERSION_2_7 0x0207
+#define NFP_CHIP_ME_VERSION_2_8 0x0208
+#define NFP_CHIP_ME_VERSION_2_9 0x0209
+
+/* NFP Chip Base Revisions. Minor stepping can just be added to these */
+#define NFP_CHIP_REVISION_A0 0x00
+#define NFP_CHIP_REVISION_B0 0x10
+#define NFP_CHIP_REVISION_C0 0x20
+#define NFP_CHIP_REVISION_PF 0xff /* Maximum possible revision */
+
+/* CPP Targets for each chip architecture */
+#define NFP6000_CPPTGT_NBI 1
+#define NFP6000_CPPTGT_VQDR 2
+#define NFP6000_CPPTGT_ILA 6
+#define NFP6000_CPPTGT_MU 7
+#define NFP6000_CPPTGT_PCIE 9
+#define NFP6000_CPPTGT_ARM 10
+#define NFP6000_CPPTGT_CRYPTO 12
+#define NFP6000_CPPTGT_CTXPB 14
+#define NFP6000_CPPTGT_CLS 15
+
+/*
+ * Wildcard indicating a CPP read or write action
+ *
+ * The action used will be either read or write depending on whether a read or
+ * write instruction/call is performed on the NFP_CPP_ID. It is recomended that
+ * the RW action is used even if all actions to be performed on a NFP_CPP_ID are
+ * known to be only reads or writes. Doing so will in many cases save NFP CPP
+ * internal software resources.
+ */
+#define NFP_CPP_ACTION_RW 32
+
+#define NFP_CPP_TARGET_ID_MASK 0x1f
+
+/*
+ * NFP_CPP_ID - pack target, token, and action into a CPP ID.
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP functions. Some
+ * CPP devices may allow wildcard identifiers to be specified.
+ *
+ * @param[in] target NFP CPP target id
+ * @param[in] action NFP CPP action id
+ * @param[in] token NFP CPP token id
+ * @return NFP CPP ID
+ */
+#define NFP_CPP_ID(target, action, token) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8))
+
+#define NFP_CPP_ISLAND_ID(target, action, token, island) \
+ ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \
+ (((action) & 0xff) << 8) | (((island) & 0xff) << 0))
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/**
+ * Return the NFP CPP target of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP target
+ */
+static inline uint8_t
+NFP_CPP_ID_TARGET_of(uint32_t id)
+{
+ return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
+}
+
+/*
+ * Return the NFP CPP token of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP token
+ */
+static inline uint8_t
+NFP_CPP_ID_TOKEN_of(uint32_t id)
+{
+ return (id >> 16) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ACTION_of(uint32_t id)
+{
+ return (id >> 8) & 0xff;
+}
+
+/*
+ * Return the NFP CPP action of a NFP CPP ID
+ * @param[in] id NFP CPP ID
+ * @return NFP CPP action
+ */
+static inline uint8_t
+NFP_CPP_ID_ISLAND_of(uint32_t id)
+{
+ return (id) & 0xff;
+}
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+/*
+ * Check if @p chip_family is an ARCH_TH chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_ARCH_TH(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Get the NFP_CHIP_ARCH_* of @p chip_family.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_ARCH(x) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ (NFP_FAMILY_IS_ARCH_TH(_x) ? NFP_CHIP_ARCH_TH : \
+ NFP_FAMILY_IS_ARCH_YD(_x) ? NFP_CHIP_ARCH_YD : -1) \
+ }))
+
+/*
+ * Check if @p chip_family is an NFP-6xxx chip.
+ * @param chip_family One of NFP_CHIP_FAMILY_*
+ */
+#define NFP_FAMILY_IS_NFP6000(chip_family) \
+ ((int)(chip_family) == (int)NFP_CHIP_FAMILY_NFP6000)
+
+/*
+ * Make microengine ID for NFP-6xxx.
+ * @param island_id Island ID.
+ * @param menum ME number, 0 based, within island.
+ *
+ * NOTE: menum should really be unsigned - MSC compiler throws error (not
+ * warning) if a clause is always true i.e. menum >= 0 if cluster_num is type
+ * unsigned int hence the cast of the menum to an int in that particular clause
+ */
+#define NFP6000_MEID(a, b) \
+ (__extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ (((((int)(_a) & 0x3F) == (int)(_a)) && \
+ (((int)(_b) >= 0) && ((int)(_b) < 12))) ? \
+ (int)(((_a) << 4) | ((_b) + 4)) : -1) \
+ }))
+
+/*
+ * Do a general sanity check on the ME ID.
+ * The check is on the highest possible island ID for the chip family and the
+ * microengine number must be a master ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_IS_VALID(meid) \
+ (__extension__ ({ \
+ typeof(meid) _a = (meid); \
+ ((((_a) >> 4) < 64) && (((_a) >> 4) >= 0) && \
+ (((_a) & 0xF) >= 4)) \
+ }))
+
+/*
+ * Extract island ID from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_ISLAND_of(meid) (((meid) >> 4) & 0x3F)
+
+/*
+ * Extract microengine number (0 based) from ME ID.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MENUM_of(meid) (((meid) & 0xF) - 4)
+
+/*
+ * Extract microengine group number (0 based) from ME ID.
+ * The group is two code-sharing microengines, so group 0 refers to MEs 0,1,
+ * group 1 refers to MEs 2,3 etc.
+ * @param meid ME ID as created by NFP6000_MEID
+ */
+#define NFP6000_MEID_MEGRP_of(meid) (NFP6000_MEID_MENUM_of(meid) >> 1)
+
+#ifndef _NFP_RESID_NO_C_FUNC
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param s A string of format iX.meY
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME ID part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp6000_idstr2meid(const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2island("i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp6000_idstr2island("i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the island part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the island ID, -1 on error.
+ */
+int nfp6000_idstr2island(const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp6000_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp6000_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME number, -1 on error.
+ */
+int nfp6000_idstr2menum(const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp6000_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp6000_idstr2ctxnum(const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp6000_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp6000_idstr2megrp("tg5", &c);
+ * // val == 2, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp6000_idstr2megrp(const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_meid2str(char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp6000_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp6000_meid2altstr(char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2str(char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_island2altstr(char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_menum2str(char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_ctxnum2str(char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp6000_megrp2str(char *s, int megrp);
+
+/*
+ * Convert a string to an ME ID.
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format iX.meY (or clX.meY)
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * string after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return ME ID on success, -1 on error.
+ */
+int nfp_idstr2meid(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract island ID from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2island(chip, "i32.me5", &c);
+ * // val == 32, c == "me5"
+ * val = nfp_idstr2island(chip, "i32", &c);
+ * // val == 32, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "iX.anything" or "iX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The island ID on succes, -1 on error.
+ */
+int nfp_idstr2island(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine number from string.
+ *
+ * Example:
+ * char *c;
+ * int menum = nfp_idstr2menum("me5.anything", &c);
+ * // menum == 5, c == "anything"
+ * menum = nfp_idstr2menum("me5", &c);
+ * // menum == 5, c == ""
+ *
+ * @param chip_family Chip family ID
+ * @param s A string of format "meX.anything" or "meX"
+ * @param endptr If non-NULL, *endptr will point to the trailing
+ * striong after the ME ID part of the string, which
+ * is either an empty string or the first character
+ * after the separating period.
+ * @return The ME number on succes, -1 on error.
+ */
+int nfp_idstr2menum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract context number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2ctxnum("ctx5.anything", &c);
+ * // val == 5, c == "anything"
+ * val = nfp_idstr2ctxnum("ctx5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "ctxN.anything" or "ctxN"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the context number part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the context number, -1 on error.
+ */
+int nfp_idstr2ctxnum(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Extract microengine group number from string.
+ *
+ * Example:
+ * char *c;
+ * int val = nfp_idstr2megrp("tg2.anything", &c);
+ * // val == 2, c == "anything"
+ * val = nfp_idstr2megrp("tg5", &c);
+ * // val == 5, c == ""
+ *
+ * @param s A string of format "tgX.anything" or "tgX"
+ * @param endptr If non-NULL, *endptr will point to the trailing string
+ * after the ME group part of the string, which is either
+ * an empty string or the first character after the separating
+ * period.
+ * @return If successful, the ME group number, -1 on error.
+ */
+int nfp_idstr2megrp(int chip_family, const char *s, const char **endptr);
+
+/*
+ * Create ME ID string of format "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_meid2str(int chip_family, char *s, int meid);
+
+/*
+ * Create ME ID string of format "name[.meY]" or "iX[.meY]".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param meid Microengine ID.
+ * @return Pointer to "s" on success, NULL on error.
+ *
+ * Similar to nfp_meid2str() except use an alias instead of "iX"
+ * if one exists for the island.
+ */
+const char *nfp_meid2altstr(int chip_family, char *s, int meid);
+
+/*
+ * Create string of format "iX".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2str(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "name", an island alias.
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param island_id Island ID.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_island2altstr(int chip_family, char *s, int island_id);
+
+/*
+ * Create string of format "meY".
+ *
+ * @param chip_family Chip family ID
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param menum Microengine number within island.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_menum2str(int chip_family, char *s, int menum);
+
+/*
+ * Create string of format "ctxY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param ctxnum Context number within microengine.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_ctxnum2str(int chip_family, char *s, int ctxnum);
+
+/*
+ * Create string of format "tgY".
+ *
+ * @param s Pointer to char buffer of size NFP_MEID_STR_SZ.
+ * The resulting string is output here.
+ * @param megrp Microengine group number within cluster.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_megrp2str(int chip_family, char *s, int megrp);
+
+/*
+ * Convert a two character string to revision number.
+ *
+ * Revision integer is 0x00 for A0, 0x11 for B1 etc.
+ *
+ * @param s Two character string.
+ * @return Revision number, -1 on error
+ */
+int nfp_idstr2rev(const char *s);
+
+/*
+ * Create string from revision number.
+ *
+ * String will be upper case.
+ *
+ * @param s Pointer to char buffer with size of at least 3
+ * for 2 characters and string terminator.
+ * @param rev Revision number.
+ * @return Pointer to "s" on success, NULL on error.
+ */
+const char *nfp_rev2str(char *s, int rev);
+
+/*
+ * Get the NFP CPP address from a string
+ *
+ * String is in the format [island@]target[:[action:[token:]]address]
+ *
+ * @param chip_family Chip family ID
+ * @param tid Pointer to string to parse
+ * @param cpp_idp Pointer to CPP ID
+ * @param cpp_addrp Pointer to CPP address
+ * @return 0 on success, or -1 and errno
+ */
+int nfp_str2cpp(int chip_family,
+ const char *tid,
+ uint32_t *cpp_idp,
+ uint64_t *cpp_addrp);
+
+
+#endif /* _NFP_RESID_NO_C_FUNC */
+
+#endif /* __NFP_RESID_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
new file mode 100644
index 00000000..47e1ddae
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp6000/nfp6000.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFP6000_H__
+#define __NFP_NFP6000_H__
+
+/* CPP Target IDs */
+#define NFP_CPP_TARGET_INVALID 0
+#define NFP_CPP_TARGET_NBI 1
+#define NFP_CPP_TARGET_QDR 2
+#define NFP_CPP_TARGET_ILA 6
+#define NFP_CPP_TARGET_MU 7
+#define NFP_CPP_TARGET_PCIE 9
+#define NFP_CPP_TARGET_ARM 10
+#define NFP_CPP_TARGET_CRYPTO 12
+#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */
+#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */
+#define NFP_CPP_TARGET_CT_XPB 14
+#define NFP_CPP_TARGET_LOCAL_SCRATCH 15
+#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH
+
+#define NFP_ISL_EMEM0 24
+
+#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL
+#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
+
+static inline int
+nfp_cppat_mu_locality_lsb(int mode, int addr40)
+{
+ switch (mode) {
+ case 0 ... 3:
+ return addr40 ? 38 : 30;
+ default:
+ return -EINVAL;
+ }
+}
+
+#endif /* NFP_NFP6000_H */
diff --git a/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
new file mode 100644
index 00000000..7ada1bb2
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp6000/nfp_xpb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_XPB_H__
+#define __NFP_XPB_H__
+
+/*
+ * For use with NFP6000 Databook "XPB Addressing" section
+ */
+#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24)
+
+#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000)
+
+#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
+
+/*
+ * For use with NFP6000 Databook "XPB Island and Device IDs" chapter
+ */
+#define NFP_XPB_DEVICE(island, slave, device) \
+ (NFP_XPB_OVERLAY(island) | \
+ (((slave) & 3) << 22) | \
+ (((device) & 0x3f) << 16))
+
+#endif /* NFP_XPB_H */
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp.h b/drivers/net/nfp/nfpcore/nfp_cpp.h
new file mode 100644
index 00000000..1427954c
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cpp.h
@@ -0,0 +1,781 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CPP_H__
+#define __NFP_CPP_H__
+
+#include <rte_ethdev_pci.h>
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp-common/nfp_resid.h"
+
+struct nfp_cpp_mutex;
+
+/*
+ * NFP CPP handle
+ */
+struct nfp_cpp {
+ uint32_t model;
+ uint32_t interface;
+ uint8_t *serial;
+ int serial_len;
+ void *priv;
+
+ /* Mutex cache */
+ struct nfp_cpp_mutex *mutex_cache;
+ const struct nfp_cpp_operations *op;
+
+ /*
+ * NFP-6xxx originating island IMB CPP Address Translation. CPP Target
+ * ID is index into array. Values are obtained at runtime from local
+ * island XPB CSRs.
+ */
+ uint32_t imb_cat_table[16];
+
+ int driver_lock_needed;
+};
+
+/*
+ * NFP CPP device area handle
+ */
+struct nfp_cpp_area {
+ struct nfp_cpp *cpp;
+ char *name;
+ unsigned long long offset;
+ unsigned long size;
+ /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+/*
+ * NFP CPP operations structure
+ */
+struct nfp_cpp_operations {
+ /* Size of priv area in struct nfp_cpp_area */
+ size_t area_priv_size;
+
+ /* Instance an NFP CPP */
+ int (*init)(struct nfp_cpp *cpp, struct rte_pci_device *dev);
+
+ /*
+ * Free the bus.
+ * Called only once, during nfp_cpp_unregister()
+ */
+ void (*free)(struct nfp_cpp *cpp);
+
+ /*
+ * Initialize a new NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+ int (*area_init)(struct nfp_cpp_area *area,
+ uint32_t dest,
+ unsigned long long address,
+ unsigned long size);
+ /*
+ * Clean up a NFP CPP area before it is freed
+ * NOTE: This is _not_ serialized
+ */
+ void (*area_cleanup)(struct nfp_cpp_area *area);
+
+ /*
+ * Acquire resources for a NFP CPP area
+ * Serialized
+ */
+ int (*area_acquire)(struct nfp_cpp_area *area);
+ /*
+ * Release resources for a NFP CPP area
+ * Serialized
+ */
+ void (*area_release)(struct nfp_cpp_area *area);
+ /*
+ * Return a void IO pointer to a NFP CPP area
+ * NOTE: This is _not_ serialized
+ */
+
+ void *(*area_iomem)(struct nfp_cpp_area *area);
+
+ void *(*area_mapped)(struct nfp_cpp_area *area);
+ /*
+ * Perform a read from a NFP CPP area
+ * Serialized
+ */
+ int (*area_read)(struct nfp_cpp_area *area,
+ void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+ /*
+ * Perform a write to a NFP CPP area
+ * Serialized
+ */
+ int (*area_write)(struct nfp_cpp_area *area,
+ const void *kernel_vaddr,
+ unsigned long offset,
+ unsigned int length);
+};
+
+/*
+ * This should be the only external function the transport
+ * module supplies
+ */
+const struct nfp_cpp_operations *nfp_cpp_transport_operations(void);
+
+/*
+ * Set the model id
+ *
+ * @param cpp NFP CPP operations structure
+ * @param model Model ID
+ */
+void nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param interface Interface ID
+ */
+void nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface);
+
+/*
+ * Set the private instance owned data of a nfp_cpp struct
+ *
+ * @param cpp NFP CPP operations structure
+ * @param serial NFP serial byte array
+ * @param len Length of the serial byte array
+ */
+int nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len);
+
+/*
+ * Set the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv);
+
+/*
+ * Return the private data of the nfp_cpp instance
+ *
+ * @param cpp NFP CPP operations structure
+ * @return Opaque device pointer
+ */
+void *nfp_cpp_priv(struct nfp_cpp *cpp);
+
+/*
+ * Get the privately allocated portion of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the private area, or NULL on failure
+ */
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
+
+uint32_t __nfp_cpp_model_autodetect(struct nfp_cpp *cpp);
+
+/*
+ * NFP CPP core interface for CPP clients.
+ */
+
+/*
+ * Open a NFP CPP handle to a CPP device
+ *
+ * @param[in] id 0-based ID for the CPP interface to use
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp *nfp_cpp_from_device_name(struct rte_pci_device *dev,
+ int driver_lock_needed);
+
+/*
+ * Free a NFP CPP handle
+ *
+ * @param[in] cpp NFP CPP handle
+ */
+void nfp_cpp_free(struct nfp_cpp *cpp);
+
+#define NFP_CPP_MODEL_INVALID 0xffffffff
+
+/*
+ * NFP_CPP_MODEL_CHIP_of - retrieve the chip ID from the model ID
+ *
+ * The chip ID is a 16-bit BCD+A-F encoding for the chip type.
+ *
+ * @param[in] model NFP CPP model id
+ * @return NFP CPP chip id
+ */
+#define NFP_CPP_MODEL_CHIP_of(model) (((model) >> 16) & 0xffff)
+
+/*
+ * NFP_CPP_MODEL_IS_6000 - Check for the NFP6000 family of devices
+ *
+ * NOTE: The NFP4000 series is considered as a NFP6000 series variant.
+ *
+ * @param[in] model NFP CPP model id
+ * @return true if model is in the NFP6000 family, false otherwise.
+ */
+#define NFP_CPP_MODEL_IS_6000(model) \
+ ((NFP_CPP_MODEL_CHIP_of(model) >= 0x4000) && \
+ (NFP_CPP_MODEL_CHIP_of(model) < 0x7000))
+
+/*
+ * nfp_cpp_model - Retrieve the Model ID of the NFP
+ *
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Model ID
+ */
+uint32_t nfp_cpp_model(struct nfp_cpp *cpp);
+
+/*
+ * NFP Interface types - logical interface for this CPP connection 4 bits are
+ * reserved for interface type.
+ */
+#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0
+#define NFP_CPP_INTERFACE_TYPE_PCI 0x1
+#define NFP_CPP_INTERFACE_TYPE_ARM 0x2
+#define NFP_CPP_INTERFACE_TYPE_RPC 0x3
+#define NFP_CPP_INTERFACE_TYPE_ILA 0x4
+
+/*
+ * Construct a 16-bit NFP Interface ID
+ *
+ * Interface IDs consists of 4 bits of interface type, 4 bits of unit
+ * identifier, and 8 bits of channel identifier.
+ *
+ * The NFP Interface ID is used in the implementation of NFP CPP API mutexes,
+ * which use the MU Atomic CompareAndWrite operation - hence the limit to 16
+ * bits to be able to use the NFP Interface ID as a lock owner.
+ *
+ * @param[in] type NFP Interface Type
+ * @param[in] unit Unit identifier for the interface type
+ * @param[in] channel Channel identifier for the interface unit
+ * @return Interface ID
+ */
+#define NFP_CPP_INTERFACE(type, unit, channel) \
+ ((((type) & 0xf) << 12) | \
+ (((unit) & 0xf) << 8) | \
+ (((channel) & 0xff) << 0))
+
+/*
+ * Get the interface type of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's type
+ */
+#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf)
+
+/*
+ * Get the interface unit of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's unit
+ */
+#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf)
+
+/*
+ * Get the interface channel of a NFP Interface ID
+ * @param[in] interface NFP Interface ID
+ * @return NFP Interface ID's channel
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff)
+
+/*
+ * Retrieve the Interface ID of the NFP
+ * @param[in] cpp NFP CPP handle
+ * @return NFP CPP Interface ID
+ */
+uint16_t nfp_cpp_interface(struct nfp_cpp *cpp);
+
+/*
+ * Retrieve the NFP Serial Number (unique per NFP)
+ * @param[in] cpp NFP CPP handle
+ * @param[out] serial Pointer to reference the serial number array
+ *
+ * @return size of the NFP6000 serial number, in bytes
+ */
+int nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Allocate a NFP CPP area handle, as an offset into a CPP ID, by a named owner
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] name Name of owner of the area
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ const char *name,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Free an allocated NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_free(struct nfp_cpp_area *area);
+
+/*
+ * Acquire the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
+
+/*
+ * Release the resources needed to access the NFP CPP area handle
+ *
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release(struct nfp_cpp_area *area);
+
+/*
+ * Allocate, then acquire the resources needed to access the NFP CPP area handle
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] size Size of the area to reserve
+ *
+ * @return NFP CPP handle, or NULL on failure (and set errno accordingly).
+ */
+struct nfp_cpp_area *nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp,
+ uint32_t cpp_id,
+ unsigned long long address,
+ unsigned long size);
+
+/*
+ * Release the resources, then free the NFP CPP area handle
+ * @param[in] area NFP CPP area handle
+ */
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
+
+uint8_t *nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target,
+ uint64_t addr, unsigned long size,
+ struct nfp_cpp_area **area);
+/*
+ * Return an IO pointer to the beginning of the NFP CPP area handle. The area
+ * must be acquired with 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ *
+ * @return Pointer to IO memory, or NULL on failure (and set errno accordingly).
+ */
+void *nfp_cpp_area_mapped(struct nfp_cpp_area *area);
+
+/*
+ * Read from a NFP CPP area handle into a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer to receive the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *buffer, size_t length);
+
+/*
+ * Write to a NFP CPP area handle from a buffer. The area must be acquired with
+ * 'nfp_cpp_area_acquire()' before calling this operation.
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] buffer Location of buffer that holds the data
+ * @param[in] length Length of the data to read
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *buffer, size_t length);
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
+
+/*
+ * Verify that IO can be performed on an offset in an area
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the area
+ * @param[in] size Size of region to validate
+ *
+ * @return 0 on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+ unsigned long long offset, unsigned long size);
+
+/*
+ * Get the NFP CPP handle that is the parent of a NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Get the name passed during allocation of the NFP CPP area handle
+ *
+ * @param cpp_area NFP CPP area handle
+ * @return Pointer to the area's name
+ */
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
+
+/*
+ * Read a block of data from a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy read data to
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes read on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_read(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, void *kernel_vaddr, size_t length);
+
+/*
+ * Write a block of data to a NFP CPP ID
+ *
+ * @param[in] cpp NFP CPP handle
+ * @param[in] cpp_id NFP CPP ID
+ * @param[in] address Offset into the NFP CPP ID address space
+ * @param[in] kernel_vaddr Buffer to copy write data from
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length);
+
+
+
+/*
+ * Fill a NFP CPP area handle and offset with a value
+ *
+ * @param[in] area NFP CPP area handle
+ * @param[in] offset Offset into the NFP CPP ID address space
+ * @param[in] value 32-bit value to fill area with
+ * @param[in] length Size of the area to reserve
+ *
+ * @return bytes written on success, -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length);
+
+/*
+ * Read a single 32-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value);
+
+/*
+ * Write a single 32-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 32-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value);
+
+/*
+ * Read a single 64-bit value from a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value output value
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value);
+
+/*
+ * Write a single 64-bit value to a NFP CPP area handle
+ *
+ * @param area NFP CPP area handle
+ * @param offset offset into NFP CPP area handle
+ * @param value value to write
+ *
+ * The area must be acquired with 'nfp_cpp_area_acquire()' before calling this
+ * operation.
+ *
+ * NOTE: offset must be 64-bit aligned.
+ *
+ * @return 0 on success, or -1 on error (and set errno accordingly).
+ */
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value);
+
+/*
+ * Write a single 32-bit value on the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t value);
+
+/*
+ * Read a single 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t *value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value);
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us);
+
+/*
+ * Read a 32-bit word from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t *value);
+
+/*
+ * Write a 32-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ *
+ */
+int nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint32_t value);
+
+/*
+ * Read a 64-bit work from a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value output value
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t *value);
+
+/*
+ * Write a 64-bit value to a NFP CPP ID
+ *
+ * @param cpp NFP CPP handle
+ * @param cpp_id NFP CPP ID
+ * @param address offset into the NFP CPP ID address space
+ * @param value value to write
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id,
+ unsigned long long address, uint64_t value);
+
+/*
+ * Initialize a mutex location
+
+ * The CPP target:address must point to a 64-bit aligned location, and will
+ * initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this nfp_cpp_interface().
+ *
+ * This function should only be called when setting up the initial lock state
+ * upon boot-up of the system.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key_id);
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and reserve
+ * 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the MU Atomic
+ * Engine's CmpAndSwap32 command are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key_id 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on
+ * failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address,
+ uint32_t key_id);
+
+/*
+ * Get the NFP CPP handle the mutex was created with
+ *
+ * @param mutex NFP mutex handle
+ * @return NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex key
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex key
+ */
+uint32_t nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex owner
+ *
+ * @param mutex NFP mutex handle
+ * @return Interface ID of the mutex owner
+ *
+ * NOTE: This is for debug purposes ONLY - the owner may change at any time,
+ * unless it has been locked by this NFP CPP handle.
+ */
+uint16_t nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex target
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP target (ie NFP_CPP_TARGET_MU)
+ */
+int nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Get the mutex address
+ *
+ * @param mutex NFP mutex handle
+ * @return Mutex CPP address
+ */
+uint64_t nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+
+#endif /* !__NFP_CPP_H__ */
diff --git a/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
new file mode 100644
index 00000000..c68d9400
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cpp_pcie_ops.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_cpp_pcie_ops.c
+ * Authors: Vinayak Tammineedi <vinayak.tammineedi@netronome.com>
+ *
+ * Multiplexes the NFP BARs between NFP internal resources and
+ * implements the PCIe specific interface for generic CPP bus access.
+ *
+ * The BARs are managed and allocated if they are available.
+ * The generic CPP bus abstraction builds upon this BAR interface.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <execinfo.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <dirent.h>
+#include <libgen.h>
+
+#include <sys/mman.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+
+#include <rte_ethdev_pci.h>
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0)
+
+#define NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(_x) (((_x) & 0x1f) << 16)
+#define NFP_PCIE_BAR_PCIE2CPP_BASEADDRESS(_x) (((_x) & 0xffff) << 0)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT(_x) (((_x) & 0x3) << 27)
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT 0
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT 1
+#define NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE 3
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE(_x) (((_x) & 0x7) << 29)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(_x) (((_x) >> 29) & 0x7)
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED 0
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK 1
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_TARGET 2
+#define NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL 3
+#define NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(_x) (((_x) & 0xf) << 23)
+#define NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(_x) (((_x) & 0x3) << 21)
+
+/*
+ * Minimal size of the PCIe cfg memory we depend on being mapped,
+ * queue controller and DMA controller don't have to be covered.
+ */
+#define NFP_PCI_MIN_MAP_SIZE 0x080000
+
+#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
+#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
+#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4))
+
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (NFP_PCIE_BAR(0) + ((bar) * 8 + (slot)) * 4)
+
+#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPBAR(bar, slot) \
+ (((bar) * 8 + (slot)) * 4)
+
+/*
+ * Define to enable a bit more verbose debug output.
+ * Set to 1 to enable a bit more verbose debug output.
+ */
+struct nfp_pcie_user;
+struct nfp6000_area_priv;
+
+/*
+ * struct nfp_bar - describes BAR configuration and usage
+ * @nfp: backlink to owner
+ * @barcfg: cached contents of BAR config CSR
+ * @base: the BAR's base CPP offset
+ * @mask: mask for the BAR aperture (read only)
+ * @bitsize: bitsize of BAR aperture (read only)
+ * @index: index of the BAR
+ * @lock: lock to specify if bar is in use
+ * @refcnt: number of current users
+ * @iomem: mapped IO memory
+ */
+#define NFP_BAR_MAX 7
+struct nfp_bar {
+ struct nfp_pcie_user *nfp;
+ uint32_t barcfg;
+ uint64_t base; /* CPP address base */
+ uint64_t mask; /* Bit mask of the bar */
+ uint32_t bitsize; /* Bit size of the bar */
+ int index;
+ int lock;
+
+ char *csr;
+ char *iomem;
+};
+
+#define BUSDEV_SZ 13
+struct nfp_pcie_user {
+ struct nfp_bar bar[NFP_BAR_MAX];
+
+ int device;
+ int lock;
+ char busdev[BUSDEV_SZ];
+ int barsz;
+ char *cfg;
+};
+
+static uint32_t
+nfp_bar_maptype(struct nfp_bar *bar)
+{
+ return NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_OF(bar->barcfg);
+}
+
+#define TARGET_WIDTH_32 4
+#define TARGET_WIDTH_64 8
+
+static int
+nfp_compute_bar(const struct nfp_bar *bar, uint32_t *bar_config,
+ uint64_t *bar_base, int tgt, int act, int tok,
+ uint64_t offset, size_t size, int width)
+{
+ uint32_t bitsize;
+ uint32_t newcfg;
+ uint64_t mask;
+
+ if (tgt >= 16)
+ return -EINVAL;
+
+ switch (width) {
+ case 8:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_64BIT);
+ break;
+ case 4:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_32BIT);
+ break;
+ case 0:
+ newcfg =
+ NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT
+ (NFP_PCIE_BAR_PCIE2CPP_LENGTHSELECT_0BYTE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (act != NFP_CPP_ACTION_RW && act != 0) {
+ /* Fixed CPP mapping with specific action */
+ mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
+
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_FIXED);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_ACTION_BASEADDRESS(act);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for Fixed mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>, action=%d\n",
+ (unsigned long long)offset,
+ (unsigned long long)(offset + size), act);
+ printf("\tBAR too small (0x%llx).\n",
+ (unsigned long long)mask);
+ return -EINVAL;
+ }
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created Fixed mapping\n", bar->index);
+ printf("\t%d:%d:%d:0x%#llx-0x%#llx>\n", tgt, act, tok,
+ (unsigned long long)offset,
+ (unsigned long long)(offset + mask));
+#endif
+
+ bitsize = 40 - 16;
+ } else {
+ mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
+
+ /* Bulk mapping */
+ newcfg |=
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE
+ (NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_BULK);
+
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TARGET_BASEADDRESS(tgt);
+ newcfg |= NFP_PCIE_BAR_PCIE2CPP_TOKEN_BASEADDRESS(tok);
+
+ if ((offset & mask) != ((offset + size - 1) & mask)) {
+ printf("BAR%d: Won't use for bulk mapping\n",
+ bar->index);
+ printf("\t<%#llx,%#llx>\n", (unsigned long long)offset,
+ (unsigned long long)(offset + size));
+ printf("\ttarget=%d, token=%d\n", tgt, tok);
+ printf("\tBAR too small (%#llx) - (%#llx != %#llx).\n",
+ (unsigned long long)mask,
+ (unsigned long long)(offset & mask),
+ (unsigned long long)(offset + size - 1) & mask);
+
+ return -EINVAL;
+ }
+
+ offset &= mask;
+
+#ifdef DEBUG
+ printf("BAR%d: Created bulk mapping %d:x:%d:%#llx-%#llx\n",
+ bar->index, tgt, tok, (unsigned long long)offset,
+ (unsigned long long)(offset + ~mask));
+#endif
+
+ bitsize = 40 - 21;
+ }
+
+ if (bar->bitsize < bitsize) {
+ printf("BAR%d: Too small for %d:%d:%d\n", bar->index, tgt, tok,
+ act);
+ return -EINVAL;
+ }
+
+ newcfg |= offset >> bitsize;
+
+ if (bar_base)
+ *bar_base = offset;
+
+ if (bar_config)
+ *bar_config = newcfg;
+
+ return 0;
+}
+
+static int
+nfp_bar_write(struct nfp_pcie_user *nfp, struct nfp_bar *bar,
+ uint32_t newcfg)
+{
+ int base, slot;
+
+ base = bar->index >> 3;
+ slot = bar->index & 7;
+
+ if (!nfp->cfg)
+ return (-ENOMEM);
+
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(base, slot);
+
+ *(uint32_t *)(bar->csr) = newcfg;
+
+ bar->barcfg = newcfg;
+#ifdef DEBUG
+ printf("BAR%d: updated to 0x%08x\n", bar->index, newcfg);
+#endif
+
+ return 0;
+}
+
+static int
+nfp_reconfigure_bar(struct nfp_pcie_user *nfp, struct nfp_bar *bar, int tgt,
+ int act, int tok, uint64_t offset, size_t size, int width)
+{
+ uint64_t newbase;
+ uint32_t newcfg;
+ int err;
+
+ err = nfp_compute_bar(bar, &newcfg, &newbase, tgt, act, tok, offset,
+ size, width);
+ if (err)
+ return err;
+
+ bar->base = newbase;
+
+ return nfp_bar_write(nfp, bar, newcfg);
+}
+
+/*
+ * Map all PCI bars. We assume that the BAR with the PCIe config block is
+ * already mapped.
+ *
+ * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
+ */
+static int
+nfp_enable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ bar->barcfg = 0;
+ bar->nfp = nfp;
+ bar->index = x;
+ bar->mask = (1 << (nfp->barsz - 3)) - 1;
+ bar->bitsize = nfp->barsz - 3;
+ bar->base = 0;
+ bar->iomem = NULL;
+ bar->lock = 0;
+ bar->csr = nfp->cfg +
+ NFP_PCIE_CFG_BAR_PCIETOCPPEXPBAR(bar->index >> 3,
+ bar->index & 7);
+
+ bar->iomem = nfp->cfg + (bar->index << bar->bitsize);
+ }
+ return 0;
+}
+
+static struct nfp_bar *
+nfp_alloc_bar(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (!bar->lock) {
+ bar->lock = 1;
+ return bar;
+ }
+ }
+ return NULL;
+}
+
+static void
+nfp_disable_bars(struct nfp_pcie_user *nfp)
+{
+ struct nfp_bar *bar;
+ int x;
+
+ for (x = ARRAY_SIZE(nfp->bar); x > 0; x--) {
+ bar = &nfp->bar[x - 1];
+ if (bar->iomem) {
+ bar->iomem = NULL;
+ bar->lock = 0;
+ }
+ }
+}
+
+/*
+ * Generic CPP bus access interface.
+ */
+
+struct nfp6000_area_priv {
+ struct nfp_bar *bar;
+ uint32_t bar_offset;
+
+ uint32_t target;
+ uint32_t action;
+ uint32_t token;
+ uint64_t offset;
+ struct {
+ int read;
+ int write;
+ int bar;
+ } width;
+ size_t size;
+ char *iomem;
+};
+
+static int
+nfp6000_area_init(struct nfp_cpp_area *area, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_pcie_user *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ uint32_t target = NFP_CPP_ID_TARGET_of(dest);
+ uint32_t action = NFP_CPP_ID_ACTION_of(dest);
+ uint32_t token = NFP_CPP_ID_TOKEN_of(dest);
+ int pp, ret = 0;
+
+ pp = nfp6000_target_pushpull(NFP_CPP_ID(target, action, token),
+ address);
+ if (pp < 0)
+ return pp;
+
+ priv->width.read = PUSH_WIDTH(pp);
+ priv->width.write = PULL_WIDTH(pp);
+
+ if (priv->width.read > 0 &&
+ priv->width.write > 0 && priv->width.read != priv->width.write)
+ return -EINVAL;
+
+ if (priv->width.read > 0)
+ priv->width.bar = priv->width.read;
+ else
+ priv->width.bar = priv->width.write;
+
+ priv->bar = nfp_alloc_bar(nfp);
+ if (priv->bar == NULL)
+ return -ENOMEM;
+
+ priv->target = target;
+ priv->action = action;
+ priv->token = token;
+ priv->offset = address;
+ priv->size = size;
+
+ ret = nfp_reconfigure_bar(nfp, priv->bar, priv->target, priv->action,
+ priv->token, priv->offset, priv->size,
+ priv->width.bar);
+
+ return ret;
+}
+
+static int
+nfp6000_area_acquire(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+ /* Calculate offset into BAR. */
+ if (nfp_bar_maptype(priv->bar) ==
+ NFP_PCIE_BAR_PCIE2CPP_MAPTYPE_GENERAL) {
+ priv->bar_offset = priv->offset &
+ (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(priv->bar,
+ priv->target);
+ priv->bar_offset +=
+ NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(priv->bar, priv->token);
+ } else {
+ priv->bar_offset = priv->offset & priv->bar->mask;
+ }
+
+ /* Must have been too big. Sub-allocate. */
+ if (!priv->bar->iomem)
+ return (-ENOMEM);
+
+ priv->iomem = priv->bar->iomem + priv->bar_offset;
+
+ return 0;
+}
+
+static void *
+nfp6000_area_mapped(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *area_priv = nfp_cpp_area_priv(area);
+
+ if (!area_priv->iomem)
+ return NULL;
+
+ return area_priv->iomem;
+}
+
+static void
+nfp6000_area_release(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ priv->bar->lock = 0;
+ priv->bar = NULL;
+ priv->iomem = NULL;
+}
+
+static void *
+nfp6000_area_iomem(struct nfp_cpp_area *area)
+{
+ struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+ return priv->iomem;
+}
+
+static int
+nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ uint64_t *wrptr64 = kernel_vaddr;
+ const volatile uint64_t *rdptr64;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32 = kernel_vaddr;
+ const volatile uint32_t *rdptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ rdptr64 = (uint64_t *)(priv->iomem + offset);
+ rdptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.read;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1)) {
+ printf("aread_read unaligned!!!\n");
+ return -EINVAL;
+ }
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW) {
+ is_64 = false;
+ }
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+static int
+nfp6000_area_write(struct nfp_cpp_area *area, const void *kernel_vaddr,
+ unsigned long offset, unsigned int length)
+{
+ const uint64_t *rdptr64 = kernel_vaddr;
+ uint64_t *wrptr64;
+ const uint32_t *rdptr32 = kernel_vaddr;
+ struct nfp6000_area_priv *priv;
+ uint32_t *wrptr32;
+ int width;
+ unsigned int n;
+ bool is_64;
+
+ priv = nfp_cpp_area_priv(area);
+ wrptr64 = (uint64_t *)(priv->iomem + offset);
+ wrptr32 = (uint32_t *)(priv->iomem + offset);
+
+ if (offset + length > priv->size)
+ return -EFAULT;
+
+ width = priv->width.write;
+
+ if (width <= 0)
+ return -EINVAL;
+
+ /* Unaligned? Translate to an explicit access */
+ if ((priv->offset + offset) & (width - 1))
+ return -EINVAL;
+
+ is_64 = width == TARGET_WIDTH_64;
+
+ /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
+ if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+ priv->action == NFP_CPP_ACTION_RW)
+ is_64 = false;
+
+ if (is_64) {
+ if (offset % sizeof(uint64_t) != 0 ||
+ length % sizeof(uint64_t) != 0)
+ return -EINVAL;
+ } else {
+ if (offset % sizeof(uint32_t) != 0 ||
+ length % sizeof(uint32_t) != 0)
+ return -EINVAL;
+ }
+
+ if (!priv->bar)
+ return -EFAULT;
+
+ if (is_64)
+ for (n = 0; n < length; n += sizeof(uint64_t)) {
+ *wrptr64 = *rdptr64;
+ wrptr64++;
+ rdptr64++;
+ }
+ else
+ for (n = 0; n < length; n += sizeof(uint32_t)) {
+ *wrptr32 = *rdptr32;
+ wrptr32++;
+ rdptr32++;
+ }
+
+ return n;
+}
+
+#define PCI_DEVICES "/sys/bus/pci/devices"
+
+static int
+nfp_acquire_process_lock(struct nfp_pcie_user *desc)
+{
+ int rc;
+ struct flock lock;
+ char lockname[30];
+
+ memset(&lock, 0, sizeof(lock));
+
+ snprintf(lockname, sizeof(lockname), "/var/lock/nfp_%s", desc->busdev);
+ desc->lock = open(lockname, O_RDWR | O_CREAT, 0666);
+ if (desc->lock < 0)
+ return desc->lock;
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ rc = -1;
+ while (rc != 0) {
+ rc = fcntl(desc->lock, F_SETLKW, &lock);
+ if (rc < 0) {
+ if (errno != EAGAIN && errno != EACCES) {
+ close(desc->lock);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_model(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint32_t model;
+
+ if (rte_pci_read_config(dev, &model, 4, 0x2e) < 0) {
+ printf("nfp set model failed\n");
+ return -1;
+ }
+
+ model = model << 16;
+ nfp_cpp_model_set(cpp, model);
+
+ return 0;
+}
+
+static int
+nfp6000_set_interface(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint16_t interface;
+
+ if (rte_pci_read_config(dev, &interface, 2, 0x154) < 0) {
+ printf("nfp set interface failed\n");
+ return -1;
+ }
+
+ nfp_cpp_interface_set(cpp, interface);
+
+ return 0;
+}
+
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+#define PCI_EXT_CAP_ID(header) (int)(header & 0x0000ffff)
+#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
+#define PCI_EXT_CAP_ID_DSN 0x03
+static int
+nfp_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
+{
+ uint32_t header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
+ return -1;
+ }
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
+ printf("nfp error reading extended capabilities\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nfp6000_set_serial(struct rte_pci_device *dev, struct nfp_cpp *cpp)
+{
+ uint16_t tmp;
+ uint8_t serial[6];
+ int serial_len = 6;
+ int pos;
+
+ pos = nfp_pci_find_next_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
+ if (pos <= 0) {
+ printf("PCI_EXT_CAP_ID_DSN not found. nfp set serial failed\n");
+ return -1;
+ } else {
+ pos += 6;
+ }
+
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[4] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[5] = (uint8_t)(tmp & 0xff);
+
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[2] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[3] = (uint8_t)(tmp & 0xff);
+
+ pos += 2;
+ if (rte_pci_read_config(dev, &tmp, 2, pos) < 0) {
+ printf("nfp set serial failed\n");
+ return -1;
+ }
+
+ serial[0] = (uint8_t)((tmp >> 8) & 0xff);
+ serial[1] = (uint8_t)(tmp & 0xff);
+
+ nfp_cpp_serial_set(cpp, serial, serial_len);
+
+ return 0;
+}
+
+static int
+nfp6000_set_barsz(struct rte_pci_device *dev, struct nfp_pcie_user *desc)
+{
+ unsigned long tmp;
+ int i = 0;
+
+ tmp = dev->mem_resource[0].len;
+
+ while (tmp >>= 1)
+ i++;
+
+ desc->barsz = i;
+ return 0;
+}
+
+static int
+nfp6000_init(struct nfp_cpp *cpp, struct rte_pci_device *dev)
+{
+ int ret = 0;
+ uint32_t model;
+ struct nfp_pcie_user *desc;
+
+ desc = malloc(sizeof(*desc));
+ if (!desc)
+ return -1;
+
+
+ memset(desc->busdev, 0, BUSDEV_SZ);
+ strlcpy(desc->busdev, dev->device.name, sizeof(desc->busdev));
+
+ if (cpp->driver_lock_needed) {
+ ret = nfp_acquire_process_lock(desc);
+ if (ret)
+ return -1;
+ }
+
+ if (nfp6000_set_model(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_interface(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_serial(dev, cpp) < 0)
+ return -1;
+ if (nfp6000_set_barsz(dev, desc) < 0)
+ return -1;
+
+ desc->cfg = (char *)dev->mem_resource[0].addr;
+
+ nfp_enable_bars(desc);
+
+ nfp_cpp_priv_set(cpp, desc);
+
+ model = __nfp_cpp_model_autodetect(cpp);
+ nfp_cpp_model_set(cpp, model);
+
+ return ret;
+}
+
+static void
+nfp6000_free(struct nfp_cpp *cpp)
+{
+ struct nfp_pcie_user *desc = nfp_cpp_priv(cpp);
+
+ nfp_disable_bars(desc);
+ if (cpp->driver_lock_needed)
+ close(desc->lock);
+ close(desc->device);
+ free(desc);
+}
+
+static const struct nfp_cpp_operations nfp6000_pcie_ops = {
+ .init = nfp6000_init,
+ .free = nfp6000_free,
+
+ .area_priv_size = sizeof(struct nfp6000_area_priv),
+ .area_init = nfp6000_area_init,
+ .area_acquire = nfp6000_area_acquire,
+ .area_release = nfp6000_area_release,
+ .area_mapped = nfp6000_area_mapped,
+ .area_read = nfp6000_area_read,
+ .area_write = nfp6000_area_write,
+ .area_iomem = nfp6000_area_iomem,
+};
+
+const struct
+nfp_cpp_operations *nfp_cpp_transport_operations(void)
+{
+ return &nfp6000_pcie_ops;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_cppcore.c b/drivers/net/nfp/nfpcore/nfp_cppcore.c
new file mode 100644
index 00000000..75d3c974
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_cppcore.c
@@ -0,0 +1,858 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/types.h>
+
+#include <rte_byteorder.h>
+#include <rte_ethdev_pci.h>
+
+#include "nfp_cpp.h"
+#include "nfp_target.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp6000/nfp_xpb.h"
+#include "nfp_nffw.h"
+
+#define NFP_PL_DEVICE_ID 0x00000004
+#define NFP_PL_DEVICE_ID_MASK 0xff
+
+#define NFP6000_ARM_GCSR_SOFTMODEL0 0x00400144
+
+void
+nfp_cpp_priv_set(struct nfp_cpp *cpp, void *priv)
+{
+ cpp->priv = priv;
+}
+
+void *
+nfp_cpp_priv(struct nfp_cpp *cpp)
+{
+ return cpp->priv;
+}
+
+void
+nfp_cpp_model_set(struct nfp_cpp *cpp, uint32_t model)
+{
+ cpp->model = model;
+}
+
+uint32_t
+nfp_cpp_model(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_MODEL_INVALID;
+
+ if (cpp->model == 0)
+ cpp->model = __nfp_cpp_model_autodetect(cpp);
+
+ return cpp->model;
+}
+
+void
+nfp_cpp_interface_set(struct nfp_cpp *cpp, uint32_t interface)
+{
+ cpp->interface = interface;
+}
+
+int
+nfp_cpp_serial(struct nfp_cpp *cpp, const uint8_t **serial)
+{
+ *serial = cpp->serial;
+ return cpp->serial_len;
+}
+
+int
+nfp_cpp_serial_set(struct nfp_cpp *cpp, const uint8_t *serial,
+ size_t serial_len)
+{
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ cpp->serial = malloc(serial_len);
+ if (!cpp->serial)
+ return -1;
+
+ memcpy(cpp->serial, serial, serial_len);
+ cpp->serial_len = serial_len;
+
+ return 0;
+}
+
+uint16_t
+nfp_cpp_interface(struct nfp_cpp *cpp)
+{
+ if (!cpp)
+ return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_INVALID, 0, 0);
+
+ return cpp->interface;
+}
+
+void *
+nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
+{
+ return &cpp_area[1];
+}
+
+struct nfp_cpp *
+nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->cpp;
+}
+
+const char *
+nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
+{
+ return cpp_area->name;
+}
+
+/*
+ * nfp_cpp_area_alloc - allocate a new CPP area
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area in bytes
+ *
+ * Allocate and initialize a CPP area structure. The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, uint32_t dest,
+ const char *name, unsigned long long address,
+ unsigned long size)
+{
+ struct nfp_cpp_area *area;
+ uint64_t tmp64 = (uint64_t)address;
+ int tmp, err;
+
+ if (!cpp)
+ return NULL;
+
+ /* CPP bus uses only a 40-bit address */
+ if ((address + size) > (1ULL << 40))
+ return NFP_ERRPTR(EFAULT);
+
+ /* Remap from cpp_island to cpp_target */
+ err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
+ if (err < 0)
+ return NULL;
+
+ address = (unsigned long long)tmp64;
+
+ if (!name)
+ name = "";
+
+ area = calloc(1, sizeof(*area) + cpp->op->area_priv_size +
+ strlen(name) + 1);
+ if (!area)
+ return NULL;
+
+ area->cpp = cpp;
+ area->name = ((char *)area) + sizeof(*area) + cpp->op->area_priv_size;
+ memcpy(area->name, name, strlen(name) + 1);
+
+ /*
+ * Preserve errno around the call to area_init, since most
+ * implementations will blindly call nfp_target_action_width()for both
+ * read or write modes, and that will set errno to EINVAL.
+ */
+ tmp = errno;
+
+ err = cpp->op->area_init(area, dest, address, size);
+ if (err < 0) {
+ free(area);
+ return NULL;
+ }
+
+ /* Restore errno */
+ errno = tmp;
+
+ area->offset = address;
+ area->size = size;
+
+ return area;
+}
+
+struct nfp_cpp_area *
+nfp_cpp_area_alloc(struct nfp_cpp *cpp, uint32_t dest,
+ unsigned long long address, unsigned long size)
+{
+ return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
+}
+
+/*
+ * nfp_cpp_area_alloc_acquire - allocate a new CPP area and lock it down
+ *
+ * @cpp: CPP handle
+ * @dest: CPP id
+ * @address: start address on CPP target
+ * @size: size of area
+ *
+ * Allocate and initilizae a CPP area structure, and lock it down so
+ * that it can be accessed directly.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * NOTE: The area must also be 'released' when the structure is freed.
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, unsigned long size)
+{
+ struct nfp_cpp_area *area;
+
+ area = nfp_cpp_area_alloc(cpp, destination, address, size);
+ if (!area)
+ return NULL;
+
+ if (nfp_cpp_area_acquire(area)) {
+ nfp_cpp_area_free(area);
+ return NULL;
+ }
+
+ return area;
+}
+
+/*
+ * nfp_cpp_area_free - free up the CPP area
+ * area: CPP area handle
+ *
+ * Frees up memory resources held by the CPP area.
+ */
+void
+nfp_cpp_area_free(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_cleanup)
+ area->cpp->op->area_cleanup(area);
+ free(area);
+}
+
+/*
+ * nfp_cpp_area_release_free - release CPP area and free it
+ * area: CPP area handle
+ *
+ * Releases CPP area and frees up memory resources held by the it.
+ */
+void
+nfp_cpp_area_release_free(struct nfp_cpp_area *area)
+{
+ nfp_cpp_area_release(area);
+ nfp_cpp_area_free(area);
+}
+
+/*
+ * nfp_cpp_area_acquire - lock down a CPP area for access
+ * @area: CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity. Area
+ * must always be locked down before being accessed.
+ */
+int
+nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_acquire) {
+ int err = area->cpp->op->area_acquire(area);
+
+ if (err < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_cpp_area_release - release a locked down CPP area
+ * @area: CPP area handle
+ *
+ * Releases a previously locked down CPP area.
+ */
+void
+nfp_cpp_area_release(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_release)
+ area->cpp->op->area_release(area);
+}
+
+/*
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ *
+ * @area: CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: pointer to the area, or NULL
+ */
+void *
+nfp_cpp_area_iomem(struct nfp_cpp_area *area)
+{
+ void *iomem = NULL;
+
+ if (area->cpp->op->area_iomem)
+ iomem = area->cpp->op->area_iomem(area);
+
+ return iomem;
+}
+
+/*
+ * nfp_cpp_area_read - read data from CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to put data into
+ * @length: number of bytes to read
+ *
+ * Read data from indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+ void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
+}
+
+/*
+ * nfp_cpp_area_write - write data to CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @kernel_vaddr: kernel address to read data from
+ * @length: number of bytes to write
+ *
+ * Write data to indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ */
+int
+nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+ const void *kernel_vaddr, size_t length)
+{
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EFAULT);
+
+ return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
+}
+
+void *
+nfp_cpp_area_mapped(struct nfp_cpp_area *area)
+{
+ if (area->cpp->op->area_mapped)
+ return area->cpp->op->area_mapped(area);
+ return NULL;
+}
+
+/*
+ * nfp_cpp_area_check_range - check if address range fits in CPP area
+ *
+ * @area: CPP area handle
+ * @offset: offset into CPP area
+ * @length: size of address range in bytes
+ *
+ * Check if address range fits within CPP area. Return 0 if area fits
+ * or -1 on error.
+ */
+int
+nfp_cpp_area_check_range(struct nfp_cpp_area *area, unsigned long long offset,
+ unsigned long length)
+{
+ if (((offset + length) > area->size))
+ return NFP_ERRNO(EFAULT);
+
+ return 0;
+}
+
+/*
+ * Return the correct CPP address, and fixup xpb_addr as needed,
+ * based upon NFP model.
+ */
+static uint32_t
+nfp_xpb_to_cpp(struct nfp_cpp *cpp, uint32_t *xpb_addr)
+{
+ uint32_t xpb;
+ int island;
+
+ if (!NFP_CPP_MODEL_IS_6000(cpp->model))
+ return 0;
+
+ xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
+
+ /*
+ * Ensure that non-local XPB accesses go out through the
+ * global XPBM bus.
+ */
+ island = ((*xpb_addr) >> 24) & 0x3f;
+
+ if (!island)
+ return xpb;
+
+ if (island == 1) {
+ /*
+ * Accesses to the ARM Island overlay uses Island 0
+ * Global Bit
+ */
+ (*xpb_addr) &= ~0x7f000000;
+ if (*xpb_addr < 0x60000)
+ *xpb_addr |= (1 << 30);
+ else
+ /* And only non-ARM interfaces use island id = 1 */
+ if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) !=
+ NFP_CPP_INTERFACE_TYPE_ARM)
+ *xpb_addr |= (1 << 24);
+ } else {
+ (*xpb_addr) |= (1 << 30);
+ }
+
+ return xpb;
+}
+
+int
+nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp = 0;
+
+ sz = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readl(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t *value)
+{
+ int sz;
+ uint32_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_32(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writel(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint32_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_32(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_readq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t *value)
+{
+ int sz;
+ uint64_t tmp;
+
+ sz = nfp_cpp_read(cpp, cpp_id, address, &tmp, sizeof(tmp));
+ *value = rte_le_to_cpu_64(tmp);
+
+ return (sz == sizeof(*value)) ? 0 : -1;
+}
+
+int
+nfp_cpp_writeq(struct nfp_cpp *cpp, uint32_t cpp_id, unsigned long long address,
+ uint64_t value)
+{
+ int sz;
+
+ value = rte_cpu_to_le_64(value);
+ sz = nfp_cpp_write(cpp, cpp_id, address, &value, sizeof(value));
+
+ return (sz == sizeof(value)) ? 0 : -1;
+}
+
+int
+nfp_xpb_writel(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
+}
+
+int
+nfp_xpb_readl(struct nfp_cpp *cpp, uint32_t xpb_addr, uint32_t *value)
+{
+ uint32_t cpp_dest;
+
+ cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+ return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
+}
+
+static struct nfp_cpp *
+nfp_cpp_alloc(struct rte_pci_device *dev, int driver_lock_needed)
+{
+ const struct nfp_cpp_operations *ops;
+ struct nfp_cpp *cpp;
+ int err;
+
+ ops = nfp_cpp_transport_operations();
+
+ if (!ops || !ops->init)
+ return NFP_ERRPTR(EINVAL);
+
+ cpp = calloc(1, sizeof(*cpp));
+ if (!cpp)
+ return NULL;
+
+ cpp->op = ops;
+ cpp->driver_lock_needed = driver_lock_needed;
+
+ if (cpp->op->init) {
+ err = cpp->op->init(cpp, dev);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+
+ if (NFP_CPP_MODEL_IS_6000(nfp_cpp_model(cpp))) {
+ uint32_t xpbaddr;
+ size_t tgt;
+
+ for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + (tgt * 4);
+ err = nfp_xpb_readl(cpp, xpbaddr,
+ (uint32_t *)&cpp->imb_cat_table[tgt]);
+ if (err < 0) {
+ free(cpp);
+ return NULL;
+ }
+ }
+ }
+
+ return cpp;
+}
+
+/*
+ * nfp_cpp_free - free the CPP handle
+ * @cpp: CPP handle
+ */
+void
+nfp_cpp_free(struct nfp_cpp *cpp)
+{
+ if (cpp->op && cpp->op->free)
+ cpp->op->free(cpp);
+
+ if (cpp->serial_len)
+ free(cpp->serial);
+
+ free(cpp);
+}
+
+struct nfp_cpp *
+nfp_cpp_from_device_name(struct rte_pci_device *dev, int driver_lock_needed)
+{
+ return nfp_cpp_alloc(dev, driver_lock_needed);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to modify
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_writelm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value)
+{
+ int err;
+ uint32_t tmp;
+
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ return err;
+
+ tmp &= ~mask;
+ tmp |= (mask & value);
+ return nfp_xpb_writel(cpp, xpb_tgt, tmp);
+}
+
+/*
+ * Modify bits of a 32-bit value from the XPB bus
+ *
+ * @param cpp NFP CPP device handle
+ * @param xpb_tgt XPB target and address
+ * @param mask mask of bits to alter
+ * @param value value to monitor for
+ * @param timeout_us maximum number of us to wait (-1 for forever)
+ *
+ * @return >= 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_xpb_waitlm(struct nfp_cpp *cpp, uint32_t xpb_tgt, uint32_t mask,
+ uint32_t value, int timeout_us)
+{
+ uint32_t tmp;
+ int err;
+
+ do {
+ err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+ if (err < 0)
+ goto exit;
+
+ if ((tmp & mask) == (value & mask)) {
+ if (timeout_us < 0)
+ timeout_us = 0;
+ break;
+ }
+
+ if (timeout_us < 0)
+ continue;
+
+ timeout_us -= 100;
+ usleep(100);
+ } while (timeout_us >= 0);
+
+ if (timeout_us < 0)
+ err = NFP_ERRNO(ETIMEDOUT);
+ else
+ err = timeout_us;
+
+exit:
+ return err;
+}
+
+/*
+ * nfp_cpp_read - read from CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer for result
+ * @length: number of bytes to read
+ */
+int
+nfp_cpp_read(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, void *kernel_vaddr, size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area) {
+ printf("Area allocation/acquire failed\n");
+ return -1;
+ }
+
+ err = nfp_cpp_area_read(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_write - write to CPP target
+ * @cpp: CPP handle
+ * @destination: CPP id
+ * @address: offset into CPP target
+ * @kernel_vaddr: kernel buffer to read from
+ * @length: number of bytes to write
+ */
+int
+nfp_cpp_write(struct nfp_cpp *cpp, uint32_t destination,
+ unsigned long long address, const void *kernel_vaddr,
+ size_t length)
+{
+ struct nfp_cpp_area *area;
+ int err;
+
+ area = nfp_cpp_area_alloc_acquire(cpp, destination, address, length);
+ if (!area)
+ return -1;
+
+ err = nfp_cpp_area_write(area, 0, kernel_vaddr, length);
+
+ nfp_cpp_area_release_free(area);
+ return err;
+}
+
+/*
+ * nfp_cpp_area_fill - fill a CPP area with a value
+ * @area: CPP area
+ * @offset: offset into CPP area
+ * @value: value to fill with
+ * @length: length of area to fill
+ */
+int
+nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+ uint32_t value, size_t length)
+{
+ int err;
+ size_t i;
+ uint64_t value64;
+
+ value = rte_cpu_to_le_32(value);
+ value64 = ((uint64_t)value << 32) | value;
+
+ if ((offset + length) > area->size)
+ return NFP_ERRNO(EINVAL);
+
+ if ((area->offset + offset) & 3)
+ return NFP_ERRNO(EINVAL);
+
+ if (((area->offset + offset) & 7) == 4 && length >= 4) {
+ err = nfp_cpp_area_write(area, offset, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ offset += sizeof(value);
+ length -= sizeof(value);
+ }
+
+ for (i = 0; (i + sizeof(value)) < length; i += sizeof(value64)) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value64,
+ sizeof(value64));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value64))
+ return NFP_ERRNO(ENOSPC);
+ }
+
+ if ((i + sizeof(value)) <= length) {
+ err =
+ nfp_cpp_area_write(area, offset + i, &value, sizeof(value));
+ if (err < 0)
+ return err;
+ if (err != sizeof(value))
+ return NFP_ERRNO(ENOSPC);
+ i += sizeof(value);
+ }
+
+ return (int)i;
+}
+
+/*
+ * NOTE: This code should not use nfp_xpb_* functions,
+ * as those are model-specific
+ */
+uint32_t
+__nfp_cpp_model_autodetect(struct nfp_cpp *cpp)
+{
+ uint32_t arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
+ uint32_t model = 0;
+
+ nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, &model);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ uint32_t tmp;
+
+ nfp_cpp_model_set(cpp, model);
+
+ /* The PL's PluDeviceID revision code is authoratative */
+ model &= ~0xff;
+ nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) +
+ NFP_PL_DEVICE_ID, &tmp);
+ model |= (NFP_PL_DEVICE_ID_MASK & tmp) - 0x10;
+ }
+
+ return model;
+}
+
+/*
+ * nfp_cpp_map_area() - Helper function to map an area
+ * @cpp: NFP CPP handler
+ * @domain: CPP domain
+ * @target: CPP target
+ * @addr: CPP address
+ * @size: Size of the area
+ * @area: Area handle (output)
+ *
+ * Map an area of IOMEM access. To undo the effect of this function call
+ * @nfp_cpp_area_release_free(*area).
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+uint8_t *
+nfp_cpp_map_area(struct nfp_cpp *cpp, int domain, int target, uint64_t addr,
+ unsigned long size, struct nfp_cpp_area **area)
+{
+ uint8_t *res;
+ uint32_t dest;
+
+ dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain);
+
+ *area = nfp_cpp_area_alloc_acquire(cpp, dest, addr, size);
+ if (!*area)
+ goto err_eio;
+
+ res = nfp_cpp_area_iomem(*area);
+ if (!res)
+ goto err_release_free;
+
+ return res;
+
+err_release_free:
+ nfp_cpp_area_release_free(*area);
+err_eio:
+ return NULL;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_crc.c b/drivers/net/nfp/nfpcore/nfp_crc.c
new file mode 100644
index 00000000..20431bf8
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_crc.c
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+
+#include "nfp_crc.h"
+
+static inline uint32_t
+nfp_crc32_be_generic(uint32_t crc, unsigned char const *p, size_t len,
+ uint32_t polynomial)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 24;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x80000000) ? polynomial :
+ 0);
+ }
+ return crc;
+}
+
+static inline uint32_t
+nfp_crc32_be(uint32_t crc, unsigned char const *p, size_t len)
+{
+ return nfp_crc32_be_generic(crc, p, len, CRCPOLY_BE);
+}
+
+static uint32_t
+nfp_crc32_posix_end(uint32_t crc, size_t total_len)
+{
+ /* Extend with the length of the string. */
+ while (total_len != 0) {
+ uint8_t c = total_len & 0xff;
+
+ crc = nfp_crc32_be(crc, &c, 1);
+ total_len >>= 8;
+ }
+
+ return ~crc;
+}
+
+uint32_t
+nfp_crc32_posix(const void *buff, size_t len)
+{
+ return nfp_crc32_posix_end(nfp_crc32_be(0, buff, len), len);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_crc.h b/drivers/net/nfp/nfpcore/nfp_crc.h
new file mode 100644
index 00000000..f99c89fc
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_crc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_CRC_H__
+#define __NFP_CRC_H__
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRCPOLY_LE 0xedb88320
+#define CRCPOLY_BE 0x04c11db7
+
+uint32_t nfp_crc32_posix(const void *buff, size_t len);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
new file mode 100644
index 00000000..c0516bf8
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.c
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
+ * after chip reset.
+ *
+ * Examples of the fields:
+ * me.count = 40
+ * me.mask = 0x7f_ffff_ffff
+ *
+ * me.count is the total number of MEs on the system.
+ * me.mask is the bitmask of MEs that are available for application usage.
+ *
+ * (ie, in this example, ME 39 has been reserved by boardconfig.)
+ */
+
+#include <stdio.h>
+#include <time.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_hwinfo.h"
+#include "nfp_crc.h"
+
+static int
+nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
+{
+ return hwinfo->version & NFP_HWINFO_VERSION_UPDATING;
+}
+
+static int
+nfp_hwinfo_db_walk(struct nfp_hwinfo *hwinfo, uint32_t size)
+{
+ const char *key, *val, *end = hwinfo->data + size;
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+ if (val >= end) {
+ printf("Bad HWINFO - overflowing key\n");
+ return -EINVAL;
+ }
+
+ if (val + strlen(val) + 1 > end) {
+ printf("Bad HWINFO - overflowing value\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int
+nfp_hwinfo_db_validate(struct nfp_hwinfo *db, uint32_t len)
+{
+ uint32_t size, new_crc, *crc;
+
+ size = db->size;
+ if (size > len) {
+ printf("Unsupported hwinfo size %u > %u\n", size, len);
+ return -EINVAL;
+ }
+
+ size -= sizeof(uint32_t);
+ new_crc = nfp_crc32_posix((char *)db, size);
+ crc = (uint32_t *)(db->start + size);
+ if (new_crc != *crc) {
+ printf("Corrupt hwinfo table (CRC mismatch)\n");
+ printf("\tcalculated 0x%x, expected 0x%x\n", new_crc, *crc);
+ return -EINVAL;
+ }
+
+ return nfp_hwinfo_db_walk(db, size);
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+{
+ struct nfp_hwinfo *header;
+ void *res;
+ uint64_t cpp_addr;
+ uint32_t cpp_id;
+ int err;
+ uint8_t *db;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
+ if (res) {
+ cpp_id = nfp_resource_cpp_id(res);
+ cpp_addr = nfp_resource_address(res);
+ *cpp_size = nfp_resource_size(res);
+
+ nfp_resource_release(res);
+
+ if (*cpp_size < HWINFO_SIZE_MIN)
+ return NULL;
+ } else {
+ return NULL;
+ }
+
+ db = malloc(*cpp_size + 1);
+ if (!db)
+ return NULL;
+
+ err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
+ if (err != (int)*cpp_size)
+ goto exit_free;
+
+ header = (void *)db;
+ printf("NFP HWINFO header: %08x\n", *(uint32_t *)header);
+ if (nfp_hwinfo_is_updating(header))
+ goto exit_free;
+
+ if (header->version != NFP_HWINFO_VERSION_2) {
+ printf("Unknown HWInfo version: 0x%08x\n",
+ header->version);
+ goto exit_free;
+ }
+
+ /* NULL-terminate for safety */
+ db[*cpp_size] = '\0';
+
+ return (void *)db;
+exit_free:
+ free(db);
+ return NULL;
+}
+
+static struct nfp_hwinfo *
+nfp_hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+{
+ struct timespec wait;
+ struct nfp_hwinfo *db;
+ int count;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 10000000;
+ count = 0;
+
+ for (;;) {
+ db = nfp_hwinfo_try_fetch(cpp, hwdb_size);
+ if (db)
+ return db;
+
+ nanosleep(&wait, NULL);
+ if (count++ > 200) {
+ printf("NFP access error\n");
+ return NULL;
+ }
+ }
+}
+
+struct nfp_hwinfo *
+nfp_hwinfo_read(struct nfp_cpp *cpp)
+{
+ struct nfp_hwinfo *db;
+ size_t hwdb_size = 0;
+ int err;
+
+ db = nfp_hwinfo_fetch(cpp, &hwdb_size);
+ if (!db)
+ return NULL;
+
+ err = nfp_hwinfo_db_validate(db, hwdb_size);
+ if (err) {
+ free(db);
+ return NULL;
+ }
+ return db;
+}
+
+/*
+ * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
+ * @hwinfo: NFP HWinfo table
+ * @lookup: HWInfo name to search for
+ *
+ * Return: Value of the HWInfo name, or NULL
+ */
+const char *
+nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup)
+{
+ const char *key, *val, *end;
+
+ if (!hwinfo || !lookup)
+ return NULL;
+
+ end = hwinfo->data + hwinfo->size - sizeof(uint32_t);
+
+ for (key = hwinfo->data; *key && key < end;
+ key = val + strlen(val) + 1) {
+ val = key + strlen(key) + 1;
+
+ if (strcmp(key, lookup) == 0)
+ return val;
+ }
+
+ return NULL;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_hwinfo.h b/drivers/net/nfp/nfpcore/nfp_hwinfo.h
new file mode 100644
index 00000000..ccc61632
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_hwinfo.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_HWINFO_H__
+#define __NFP_HWINFO_H__
+
+#include <inttypes.h>
+
+#define HWINFO_SIZE_MIN 0x100
+
+/*
+ * The Hardware Info Table defines the properties of the system.
+ *
+ * HWInfo v1 Table (fixed size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (1.0)
+ * 0x0004: uint32_t size Total size of the table, including the
+ * CRC32 (IEEE 802.3)
+ * 0x0008: uint32_t jumptab Offset of key/value table
+ * 0x000c: uint32_t keys Total number of keys in the key/value
+ * table
+ * NNNNNN: Key/value jump table and string data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * HWInfo v2 Table (variable size)
+ *
+ * 0x0000: uint32_t version Hardware Info Table version (2.0)
+ * 0x0004: uint32_t size Current size of the data area, excluding
+ * CRC32
+ * 0x0008: uint32_t limit Maximum size of the table
+ * 0x000c: uint32_t reserved Unused, set to zero
+ * NNNNNN: Key/value data
+ * (size - 4): uint32_t crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * If the HWInfo table is in the process of being updated, the low bit of
+ * version will be set.
+ *
+ * HWInfo v1 Key/Value Table
+ * -------------------------
+ *
+ * The key/value table is a set of offsets to ASCIIZ strings which have
+ * been strcmp(3) sorted (yes, please use bsearch(3) on the table).
+ *
+ * All keys are guaranteed to be unique.
+ *
+ * N+0: uint32_t key_1 Offset to the first key
+ * N+4: uint32_t val_1 Offset to the first value
+ * N+8: uint32_t key_2 Offset to the second key
+ * N+c: uint32_t val_2 Offset to the second value
+ * ...
+ *
+ * HWInfo v2 Key/Value Table
+ * -------------------------
+ *
+ * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
+ *
+ * Unsorted.
+ */
+
+#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_UPDATING BIT(0)
+
+struct nfp_hwinfo {
+ uint8_t start[0];
+
+ uint32_t version;
+ uint32_t size;
+
+ /* v2 specific fields */
+ uint32_t limit;
+ uint32_t resv;
+
+ char data[];
+};
+
+struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp);
+
+const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_mip.c b/drivers/net/nfp/nfpcore/nfp_mip.c
new file mode 100644
index 00000000..c86966df
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mip.c
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_nffw.h"
+
+#define NFP_MIP_SIGNATURE rte_cpu_to_le_32(0x0050494d) /* "MIP\0" */
+#define NFP_MIP_VERSION rte_cpu_to_le_32(1)
+#define NFP_MIP_MAX_OFFSET (256 * 1024)
+
+struct nfp_mip {
+ uint32_t signature;
+ uint32_t mip_version;
+ uint32_t mip_size;
+ uint32_t first_entry;
+
+ uint32_t version;
+ uint32_t buildnum;
+ uint32_t buildtime;
+ uint32_t loadtime;
+
+ uint32_t symtab_addr;
+ uint32_t symtab_size;
+ uint32_t strtab_addr;
+ uint32_t strtab_size;
+
+ char name[16];
+ char toolchain[32];
+};
+
+/* Read memory and check if it could be a valid MIP */
+static int
+nfp_mip_try_read(struct nfp_cpp *cpp, uint32_t cpp_id, uint64_t addr,
+ struct nfp_mip *mip)
+{
+ int ret;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
+ if (ret != sizeof(*mip)) {
+ printf("Failed to read MIP data (%d, %zu)\n",
+ ret, sizeof(*mip));
+ return -EIO;
+ }
+ if (mip->signature != NFP_MIP_SIGNATURE) {
+ printf("Incorrect MIP signature (0x%08x)\n",
+ rte_le_to_cpu_32(mip->signature));
+ return -EINVAL;
+ }
+ if (mip->mip_version != NFP_MIP_VERSION) {
+ printf("Unsupported MIP version (%d)\n",
+ rte_le_to_cpu_32(mip->mip_version));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Try to locate MIP using the resource table */
+static int
+nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
+{
+ struct nfp_nffw_info *nffw_info;
+ uint32_t cpp_id;
+ uint64_t addr;
+ int err;
+
+ nffw_info = nfp_nffw_info_open(cpp);
+ if (!nffw_info)
+ return -ENODEV;
+
+ err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
+ if (err)
+ goto exit_close_nffw;
+
+ err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
+exit_close_nffw:
+ nfp_nffw_info_close(nffw_info);
+ return err;
+}
+
+/*
+ * nfp_mip_open() - Get device MIP structure
+ * @cpp: NFP CPP Handle
+ *
+ * Copy MIP structure from NFP device and return it. The returned
+ * structure is handled internally by the library and should be
+ * freed by calling nfp_mip_close().
+ *
+ * Return: pointer to mip, NULL on failure.
+ */
+struct nfp_mip *
+nfp_mip_open(struct nfp_cpp *cpp)
+{
+ struct nfp_mip *mip;
+ int err;
+
+ mip = malloc(sizeof(*mip));
+ if (!mip)
+ return NULL;
+
+ err = nfp_mip_read_resource(cpp, mip);
+ if (err) {
+ free(mip);
+ return NULL;
+ }
+
+ mip->name[sizeof(mip->name) - 1] = 0;
+
+ return mip;
+}
+
+void
+nfp_mip_close(struct nfp_mip *mip)
+{
+ free(mip);
+}
+
+const char *
+nfp_mip_name(const struct nfp_mip *mip)
+{
+ return mip->name;
+}
+
+/*
+ * nfp_mip_symtab() - Get the address and size of the MIP symbol table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol table
+ * @size: Location for size of MIP symbol table
+ */
+void
+nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->symtab_addr);
+ *size = rte_le_to_cpu_32(mip->symtab_size);
+}
+
+/*
+ * nfp_mip_strtab() - Get the address and size of the MIP symbol name table
+ * @mip: MIP handle
+ * @addr: Location for NFP DDR address of MIP symbol name table
+ * @size: Location for size of MIP symbol name table
+ */
+void
+nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size)
+{
+ *addr = rte_le_to_cpu_32(mip->strtab_addr);
+ *size = rte_le_to_cpu_32(mip->strtab_size);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_mip.h b/drivers/net/nfp/nfpcore/nfp_mip.h
new file mode 100644
index 00000000..d0919b58
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mip.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_MIP_H__
+#define __NFP_MIP_H__
+
+#include "nfp_nffw.h"
+
+struct nfp_mip;
+
+struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
+void nfp_mip_close(struct nfp_mip *mip);
+
+const char *nfp_mip_name(const struct nfp_mip *mip);
+void nfp_mip_symtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+void nfp_mip_strtab(const struct nfp_mip *mip, uint32_t *addr, uint32_t *size);
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off);
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_mutex.c b/drivers/net/nfp/nfpcore/nfp_mutex.c
new file mode 100644
index 00000000..318c5800
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_mutex.c
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <errno.h>
+
+#include <malloc.h>
+#include <time.h>
+#include <sched.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define MUTEX_LOCKED(interface) ((((uint32_t)(interface)) << 16) | 0x000f)
+#define MUTEX_UNLOCK(interface) (0 | 0x0000)
+
+#define MUTEX_IS_LOCKED(value) (((value) & 0xffff) == 0x000f)
+#define MUTEX_IS_UNLOCKED(value) (((value) & 0xffff) == 0x0000)
+#define MUTEX_INTERFACE(value) (((value) >> 16) & 0xffff)
+
+/*
+ * If you need more than 65536 recursive locks, please
+ * rethink your code.
+ */
+#define MUTEX_DEPTH_MAX 0xffff
+
+struct nfp_cpp_mutex {
+ struct nfp_cpp *cpp;
+ uint8_t target;
+ uint16_t depth;
+ unsigned long long address;
+ uint32_t key;
+ unsigned int usage;
+ struct nfp_cpp_mutex *prev, *next;
+};
+
+static int
+_nfp_cpp_mutex_validate(uint32_t model, int *target, unsigned long long address)
+{
+ /* Address must be 64-bit aligned */
+ if (address & 7)
+ return NFP_ERRNO(EINVAL);
+
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ if (*target != NFP_CPP_TARGET_MU)
+ return NFP_ERRNO(EINVAL);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+
+ return 0;
+}
+
+/*
+ * Initialize a mutex location
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key Unique 32-bit value for this mutex
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, unsigned long long address,
+ uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ uint32_t muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */
+ int err;
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writel(cpp, muw, address + 4, key);
+ if (err < 0)
+ return err;
+
+ err =
+ nfp_cpp_writel(cpp, muw, address + 0,
+ MUTEX_LOCKED(nfp_cpp_interface(cpp)));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * Create a mutex handle from an address controlled by a MU Atomic engine
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine are supported.
+ *
+ * @param cpp NFP CPP handle
+ * @param target NFP CPP target ID (ie NFP_CPP_TARGET_CLS or
+ * NFP_CPP_TARGET_MU)
+ * @param address Offset into the address space of the NFP CPP target ID
+ * @param key 32-bit unique key (must match the key at this location)
+ *
+ * @return A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *
+nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+ unsigned long long address, uint32_t key)
+{
+ uint32_t model = nfp_cpp_model(cpp);
+ struct nfp_cpp_mutex *mutex;
+ uint32_t mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */
+ int err;
+ uint32_t tmp;
+
+ /* Look for cached mutex */
+ for (mutex = cpp->mutex_cache; mutex; mutex = mutex->next) {
+ if (mutex->target == target && mutex->address == address)
+ break;
+ }
+
+ if (mutex) {
+ if (mutex->key == key) {
+ mutex->usage++;
+ return mutex;
+ }
+
+ /* If the key doesn't match... */
+ return NFP_ERRPTR(EEXIST);
+ }
+
+ err = _nfp_cpp_mutex_validate(model, &target, address);
+ if (err < 0)
+ return NULL;
+
+ err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+ if (err < 0)
+ return NULL;
+
+ if (tmp != key)
+ return NFP_ERRPTR(EEXIST);
+
+ mutex = calloc(sizeof(*mutex), 1);
+ if (!mutex)
+ return NFP_ERRPTR(ENOMEM);
+
+ mutex->cpp = cpp;
+ mutex->target = target;
+ mutex->address = address;
+ mutex->key = key;
+ mutex->depth = 0;
+ mutex->usage = 1;
+
+ /* Add mutex to the cache */
+ if (cpp->mutex_cache) {
+ cpp->mutex_cache->prev = mutex;
+ mutex->next = cpp->mutex_cache;
+ cpp->mutex_cache = mutex;
+ } else {
+ cpp->mutex_cache = mutex;
+ }
+
+ return mutex;
+}
+
+struct nfp_cpp *
+nfp_cpp_mutex_cpp(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->cpp;
+}
+
+uint32_t
+nfp_cpp_mutex_key(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->key;
+}
+
+uint16_t
+nfp_cpp_mutex_owner(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t value, key;
+ int err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ return err;
+
+ if (key != mutex->key)
+ return NFP_ERRNO(EPERM);
+
+ if (!MUTEX_IS_LOCKED(value))
+ return 0;
+
+ return MUTEX_INTERFACE(value);
+}
+
+int
+nfp_cpp_mutex_target(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->target;
+}
+
+uint64_t
+nfp_cpp_mutex_address(struct nfp_cpp_mutex *mutex)
+{
+ return mutex->address;
+}
+
+/*
+ * Free a mutex handle - does not alter the lock state
+ *
+ * @param mutex NFP CPP Mutex handle
+ */
+void
+nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+ mutex->usage--;
+ if (mutex->usage > 0)
+ return;
+
+ /* Remove mutex from the cache */
+ if (mutex->next)
+ mutex->next->prev = mutex->prev;
+ if (mutex->prev)
+ mutex->prev->next = mutex->next;
+
+ /* If mutex->cpp == NULL, something broke */
+ if (mutex->cpp && mutex == mutex->cpp->mutex_cache)
+ mutex->cpp->mutex_cache = mutex->next;
+
+ free(mutex);
+}
+
+/*
+ * Lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+ int err;
+ time_t warn_at = time(NULL) + 15;
+
+ while ((err = nfp_cpp_mutex_trylock(mutex)) != 0) {
+ /* If errno != EBUSY, then the lock was damaged */
+ if (err < 0 && errno != EBUSY)
+ return err;
+ if (time(NULL) >= warn_at) {
+ printf("Warning: waiting for NFP mutex\n");
+ printf("\tusage:%u\n", mutex->usage);
+ printf("\tdepth:%hd]\n", mutex->depth);
+ printf("\ttarget:%d\n", mutex->target);
+ printf("\taddr:%llx\n", mutex->address);
+ printf("\tkey:%08x]\n", mutex->key);
+ warn_at = time(NULL) + 60;
+ }
+ sched_yield();
+ }
+ return 0;
+}
+
+/*
+ * Unlock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * @param mutex NFP CPP Mutex handle
+ *
+ * @return 0 on success, or -1 on failure (and set errno accordingly).
+ */
+int
+nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ struct nfp_cpp *cpp = mutex->cpp;
+ uint32_t key, value;
+ uint16_t interface = nfp_cpp_interface(cpp);
+ int err;
+
+ if (mutex->depth > 1) {
+ mutex->depth--;
+ return 0;
+ }
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+ if (err < 0)
+ goto exit;
+
+ err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ if (value != MUTEX_LOCKED(interface)) {
+ err = NFP_ERRNO(EACCES);
+ goto exit;
+ }
+
+ err = nfp_cpp_writel(cpp, muw, mutex->address, MUTEX_UNLOCK(interface));
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 0;
+
+exit:
+ return err;
+}
+
+/*
+ * Attempt to lock a mutex handle, using the NFP MU Atomic Engine
+ *
+ * Valid lock states:
+ *
+ * 0x....0000 - Unlocked
+ * 0x....000f - Locked
+ *
+ * @param mutex NFP CPP Mutex handle
+ * @return 0 if the lock succeeded, -1 on failure (and errno set
+ * appropriately).
+ */
+int
+nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+ uint32_t mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */
+ uint32_t muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */
+ uint32_t mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */
+ uint32_t key, value, tmp;
+ struct nfp_cpp *cpp = mutex->cpp;
+ int err;
+
+ if (mutex->depth > 0) {
+ if (mutex->depth == MUTEX_DEPTH_MAX)
+ return NFP_ERRNO(E2BIG);
+
+ mutex->depth++;
+ return 0;
+ }
+
+ /* Verify that the lock marker is not damaged */
+ err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+ if (err < 0)
+ goto exit;
+
+ if (key != mutex->key) {
+ err = NFP_ERRNO(EPERM);
+ goto exit;
+ }
+
+ /*
+ * Compare against the unlocked state, and if true,
+ * write the interface id into the top 16 bits, and
+ * mark as locked.
+ */
+ value = MUTEX_LOCKED(nfp_cpp_interface(cpp));
+
+ /*
+ * We use test_set_imm here, as it implies a read
+ * of the current state, and sets the bits in the
+ * bytemask of the command to 1s. Since the mutex
+ * is guaranteed to be 64-bit aligned, the bytemask
+ * of this 32-bit command is ensured to be 8'b00001111,
+ * which implies that the lower 4 bits will be set to
+ * ones regardless of the initial state.
+ *
+ * Since this is a 'Readback' operation, with no Pull
+ * data, we can treat this as a normal Push (read)
+ * atomic, which returns the original value.
+ */
+ err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+ if (err < 0)
+ goto exit;
+
+ /* Was it unlocked? */
+ if (MUTEX_IS_UNLOCKED(tmp)) {
+ /*
+ * The read value can only be 0x....0000 in the unlocked state.
+ * If there was another contending for this lock, then
+ * the lock state would be 0x....000f
+ *
+ * Write our owner ID into the lock
+ * While not strictly necessary, this helps with
+ * debug and bookkeeping.
+ */
+ err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+ if (err < 0)
+ goto exit;
+
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ /* Already locked by us? Success! */
+ if (tmp == value) {
+ mutex->depth = 1;
+ goto exit;
+ }
+
+ err = NFP_ERRNO(MUTEX_IS_LOCKED(tmp) ? EBUSY : EINVAL);
+
+exit:
+ return err;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.c b/drivers/net/nfp/nfpcore/nfp_nffw.c
new file mode 100644
index 00000000..8bec0e3c
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nffw.c
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp_mip.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+
+/*
+ * flg_info_version = flags[0]<27:16>
+ * This is a small version counter intended only to detect if the current
+ * implementation can read the current struct. Struct changes should be very
+ * rare and as such a 12-bit counter should cover large spans of time. By the
+ * time it wraps around, we don't expect to have 4096 versions of this struct
+ * to be in use at the same time.
+ */
+static uint32_t
+nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 16) & 0xfff;
+}
+
+/* flg_init = flags[0]<0> */
+static uint32_t
+nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
+{
+ return (res->flags[0] >> 0) & 1;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<31:31> */
+static uint32_t
+nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 31) & 1;
+}
+
+/* mip_cppid = mip_cppid */
+static uint32_t
+nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
+{
+ return fi->mip_cppid;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<8:8> */
+static uint32_t
+nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
+{
+ return (fi->loaded__mu_da__mip_off_hi >> 8) & 1;
+}
+
+/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
+static uint64_t
+nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
+{
+ uint64_t mip_off_hi = fi->loaded__mu_da__mip_off_hi;
+
+ return (mip_off_hi & 0xFF) << 32 | fi->mip_offset_lo;
+}
+
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12)
+
+static int
+nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+ unsigned int mode, addr40;
+ uint32_t xpbaddr, imbcppat;
+ int err;
+
+ /* Hardcoded XPB IMB Base, island 0 */
+ xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
+ err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
+ if (err < 0)
+ return err;
+
+ mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+ addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+ return nfp_cppat_mu_locality_lsb(mode, addr40);
+}
+
+static unsigned int
+nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
+{
+ /*
+ * For the this code, version 0 is most likely to be version 1 in this
+ * case. Since the kernel driver does not take responsibility for
+ * initialising the nfp.nffw resource, any previous code (CA firmware or
+ * userspace) that left the version 0 and did set the init flag is going
+ * to be version 1.
+ */
+ switch (nffw_res_info_version_get(fwinf)) {
+ case 0:
+ case 1:
+ *arr = &fwinf->info.v1.fwinfo[0];
+ return NFFW_FWINFO_CNT_V1;
+ case 2:
+ *arr = &fwinf->info.v2.fwinfo[0];
+ return NFFW_FWINFO_CNT_V2;
+ default:
+ *arr = NULL;
+ return 0;
+ }
+}
+
+/*
+ * nfp_nffw_info_open() - Acquire the lock on the NFFW table
+ * @cpp: NFP CPP handle
+ *
+ * Return: 0, or -ERRNO
+ */
+struct nfp_nffw_info *
+nfp_nffw_info_open(struct nfp_cpp *cpp)
+{
+ struct nfp_nffw_info_data *fwinf;
+ struct nfp_nffw_info *state;
+ uint32_t info_ver;
+ int err;
+
+ state = malloc(sizeof(*state));
+ if (!state)
+ return NULL;
+
+ memset(state, 0, sizeof(*state));
+
+ state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
+ if (!state->res)
+ goto err_free;
+
+ fwinf = &state->fwinf;
+
+ if (sizeof(*fwinf) > nfp_resource_size(state->res))
+ goto err_release;
+
+ err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+ nfp_resource_address(state->res),
+ fwinf, sizeof(*fwinf));
+ if (err < (int)sizeof(*fwinf))
+ goto err_release;
+
+ if (!nffw_res_flg_init_get(fwinf))
+ goto err_release;
+
+ info_ver = nffw_res_info_version_get(fwinf);
+ if (info_ver > NFFW_INFO_VERSION_CURRENT)
+ goto err_release;
+
+ state->cpp = cpp;
+ return state;
+
+err_release:
+ nfp_resource_release(state->res);
+err_free:
+ free(state);
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * @state: NFP FW info state
+ *
+ * Return: 0, or -ERRNO
+ */
+void
+nfp_nffw_info_close(struct nfp_nffw_info *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+/*
+ * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
+ * @state: NFP FW info state
+ *
+ * Return: First NFFW firmware info, NULL on failure
+ */
+static struct nffw_fwinfo *
+nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
+{
+ struct nffw_fwinfo *fwinfo;
+ unsigned int cnt, i;
+
+ cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
+ if (!cnt)
+ return NULL;
+
+ for (i = 0; i < cnt; i++)
+ if (nffw_fwinfo_loaded_get(&fwinfo[i]))
+ return &fwinfo[i];
+
+ return NULL;
+}
+
+/*
+ * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
+ * @state: NFP FW info state
+ * @cpp_id: Pointer to the CPP ID of the MIP
+ * @off: Pointer to the CPP Address of the MIP
+ *
+ * Return: 0, or -ERRNO
+ */
+int
+nfp_nffw_info_mip_first(struct nfp_nffw_info *state, uint32_t *cpp_id,
+ uint64_t *off)
+{
+ struct nffw_fwinfo *fwinfo;
+
+ fwinfo = nfp_nffw_info_fwid_first(state);
+ if (!fwinfo)
+ return -EINVAL;
+
+ *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
+ *off = nffw_fwinfo_mip_offset_get(fwinfo);
+
+ if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
+ int locality_off;
+
+ if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
+ return 0;
+
+ locality_off = nfp_mip_mu_locality_lsb(state->cpp);
+ if (locality_off < 0)
+ return locality_off;
+
+ *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+ *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nffw.h b/drivers/net/nfp/nfpcore/nfp_nffw.h
new file mode 100644
index 00000000..3bbdf1c1
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nffw.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_NFFW_H__
+#define __NFP_NFFW_H__
+
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+/*
+ * Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE 4
+
+#define NFFW_FWID_ALL 255
+
+/**
+ * NFFW_INFO_VERSION history:
+ * 0: This was never actually used (before versioning), but it refers to
+ * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
+ * changed to 200.
+ * 1: First versioned struct, with
+ * FWINFO_CNT = 120
+ * MEINFO_CNT = 120
+ * 2: FWINFO_CNT = 200
+ * MEINFO_CNT = 200
+ */
+#define NFFW_INFO_VERSION_CURRENT 2
+
+/* Enough for all current chip families */
+#define NFFW_MEINFO_CNT_V1 120
+#define NFFW_FWINFO_CNT_V1 120
+#define NFFW_MEINFO_CNT_V2 200
+#define NFFW_FWINFO_CNT_V2 200
+
+struct nffw_meinfo {
+ uint32_t ctxmask__fwid__meid;
+};
+
+struct nffw_fwinfo {
+ uint32_t loaded__mu_da__mip_off_hi;
+ uint32_t mip_cppid; /* 0 means no MIP */
+ uint32_t mip_offset_lo;
+};
+
+struct nfp_nffw_info_v1 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
+};
+
+struct nfp_nffw_info_v2 {
+ struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
+ struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
+};
+
+struct nfp_nffw_info_data {
+ uint32_t flags[2];
+ union {
+ struct nfp_nffw_info_v1 v1;
+ struct nfp_nffw_info_v2 v2;
+ } info;
+};
+
+struct nfp_nffw_info {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+
+ struct nfp_nffw_info_data fwinf;
+};
+
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
+void nfp_nffw_info_close(struct nfp_nffw_info *state);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.c b/drivers/net/nfp/nfpcore/nfp_nsp.c
new file mode 100644
index 00000000..876a4017
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.c
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#define NFP_SUBSYS "nfp_nsp"
+
+#include <stdio.h>
+#include <time.h>
+
+#include <rte_common.h>
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_resource.h"
+
+int
+nfp_nsp_config_modified(struct nfp_nsp *state)
+{
+ return state->modified;
+}
+
+void
+nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified)
+{
+ state->modified = modified;
+}
+
+void *
+nfp_nsp_config_entries(struct nfp_nsp *state)
+{
+ return state->entries;
+}
+
+unsigned int
+nfp_nsp_config_idx(struct nfp_nsp *state)
+{
+ return state->idx;
+}
+
+void
+nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx)
+{
+ state->entries = entries;
+ state->idx = idx;
+}
+
+void
+nfp_nsp_config_clear_state(struct nfp_nsp *state)
+{
+ state->entries = NULL;
+ state->idx = 0;
+}
+
+static void
+nfp_nsp_print_extended_error(uint32_t ret_val)
+{
+ int i;
+
+ if (!ret_val)
+ return;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_errors); i++)
+ if (ret_val == (uint32_t)nsp_errors[i].code)
+ printf("err msg: %s\n", nsp_errors[i].msg);
+}
+
+static int
+nfp_nsp_check(struct nfp_nsp *state)
+{
+ struct nfp_cpp *cpp = state->cpp;
+ uint64_t nsp_status, reg;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
+ if (err < 0)
+ return err;
+
+ if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
+ printf("Cannot detect NFP Service Processor\n");
+ return -ENODEV;
+ }
+
+ state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg);
+ state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg);
+
+ if (state->ver.major != NSP_MAJOR || state->ver.minor < NSP_MINOR) {
+ printf("Unsupported ABI %hu.%hu\n", state->ver.major,
+ state->ver.minor);
+ return -EINVAL;
+ }
+
+ if (reg & NSP_STATUS_BUSY) {
+ printf("Service processor busy!\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/*
+ * nfp_nsp_open() - Prepare for communication and lock the NSP resource.
+ * @cpp: NFP CPP Handle
+ */
+struct nfp_nsp *
+nfp_nsp_open(struct nfp_cpp *cpp)
+{
+ struct nfp_resource *res;
+ struct nfp_nsp *state;
+ int err;
+
+ res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
+ if (!res)
+ return NULL;
+
+ state = malloc(sizeof(*state));
+ if (!state) {
+ nfp_resource_release(res);
+ return NULL;
+ }
+ memset(state, 0, sizeof(*state));
+ state->cpp = cpp;
+ state->res = res;
+
+ err = nfp_nsp_check(state);
+ if (err) {
+ nfp_nsp_close(state);
+ return NULL;
+ }
+
+ return state;
+}
+
+/*
+ * nfp_nsp_close() - Clean up and unlock the NSP resource.
+ * @state: NFP SP state
+ */
+void
+nfp_nsp_close(struct nfp_nsp *state)
+{
+ nfp_resource_release(state->res);
+ free(state);
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_major(struct nfp_nsp *state)
+{
+ return state->ver.major;
+}
+
+uint16_t
+nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state)
+{
+ return state->ver.minor;
+}
+
+static int
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, uint64_t *reg, uint32_t nsp_cpp,
+ uint64_t addr, uint64_t mask, uint64_t val)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
+ if (err < 0)
+ return err;
+
+ if ((*reg & mask) == val)
+ return 0;
+
+ nanosleep(&wait, 0);
+ if (count++ > 1000)
+ return -ETIMEDOUT;
+ }
+}
+
+/*
+ * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * @state: NFP SP state
+ * @code: NFP SP Command Code
+ * @option: NFP SP Command Argument
+ * @buff_cpp: NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ *
+ * Return: 0 for success with no result
+ *
+ * positive value for NSP completion with a result code
+ *
+ * -EAGAIN if the NSP is not yet present
+ * -ENODEV if the NSP is not a supported model
+ * -EBUSY if the NSP is stuck
+ * -EINTR if interrupted while waiting for completion
+ * -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ */
+static int
+nfp_nsp_command(struct nfp_nsp *state, uint16_t code, uint32_t option,
+ uint32_t buff_cpp, uint64_t buff_addr)
+{
+ uint64_t reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command;
+ struct nfp_cpp *cpp = state->cpp;
+ uint32_t nsp_cpp;
+ int err;
+
+ nsp_cpp = nfp_resource_cpp_id(state->res);
+ nsp_base = nfp_resource_address(state->res);
+ nsp_status = nsp_base + NSP_STATUS;
+ nsp_command = nsp_base + NSP_COMMAND;
+ nsp_buffer = nsp_base + NSP_BUFFER;
+
+ err = nfp_nsp_check(state);
+ if (err)
+ return err;
+
+ if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
+ !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+ printf("Host buffer out of reach %08x %" PRIx64 "\n",
+ buff_cpp, buff_addr);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
+ FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
+ FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+ if (err < 0)
+ return err;
+
+ err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
+ FIELD_PREP(NSP_COMMAND_OPTION, option) |
+ FIELD_PREP(NSP_COMMAND_CODE, code) |
+ FIELD_PREP(NSP_COMMAND_START, 1));
+ if (err < 0)
+ return err;
+
+ /* Wait for NSP_COMMAND_START to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_command,
+ NSP_COMMAND_START, 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to start\n",
+ err, code);
+ return err;
+ }
+
+ /* Wait for NSP_STATUS_BUSY to go to 0 */
+ err = nfp_nsp_wait_reg(cpp, &reg, nsp_cpp, nsp_status, NSP_STATUS_BUSY,
+ 0);
+ if (err) {
+ printf("Error %d waiting for code 0x%04x to complete\n",
+ err, code);
+ return err;
+ }
+
+ err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val);
+ if (err < 0)
+ return err;
+ ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val);
+
+ err = FIELD_GET(NSP_STATUS_RESULT, reg);
+ if (err) {
+ printf("Result (error) code set: %d (%d) command: %d\n",
+ -err, (int)ret_val, code);
+ nfp_nsp_print_extended_error(ret_val);
+ return -err;
+ }
+
+ return ret_val;
+}
+
+#define SZ_1M 0x00100000
+
+static int
+nfp_nsp_command_buf(struct nfp_nsp *nsp, uint16_t code, uint32_t option,
+ const void *in_buf, unsigned int in_size, void *out_buf,
+ unsigned int out_size)
+{
+ struct nfp_cpp *cpp = nsp->cpp;
+ unsigned int max_size;
+ uint64_t reg, cpp_buf;
+ int ret, err;
+ uint32_t cpp_id;
+
+ if (nsp->ver.minor < 13) {
+ printf("NSP: Code 0x%04x with buffer not supported\n", code);
+ printf("\t(ABI %hu.%hu)\n", nsp->ver.major, nsp->ver.minor);
+ return -EOPNOTSUPP;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER_CONFIG,
+ &reg);
+ if (err < 0)
+ return err;
+
+ max_size = RTE_MAX(in_size, out_size);
+ if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
+ printf("NSP: default buffer too small for command 0x%04x\n",
+ code);
+ printf("\t(%llu < %u)\n",
+ FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+ max_size);
+ return -EINVAL;
+ }
+
+ err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+ nfp_resource_address(nsp->res) +
+ NSP_DFLT_BUFFER,
+ &reg);
+ if (err < 0)
+ return err;
+
+ cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+ cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
+
+ if (in_buf && in_size) {
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+ if (err < 0)
+ return err;
+ }
+ /* Zero out remaining part of the buffer */
+ if (out_buf && out_size && out_size > in_size) {
+ memset(out_buf, 0, out_size - in_size);
+ err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, out_buf,
+ out_size - in_size);
+ if (err < 0)
+ return err;
+ }
+
+ ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+ if (ret < 0)
+ return ret;
+
+ if (out_buf && out_size) {
+ err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+ if (err < 0)
+ return err;
+ }
+
+ return ret;
+}
+
+int
+nfp_nsp_wait(struct nfp_nsp *state)
+{
+ struct timespec wait;
+ int count;
+ int err;
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 25000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+ if (err != -EAGAIN)
+ break;
+
+ nanosleep(&wait, 0);
+
+ if (count++ > 1000) {
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+ if (err)
+ printf("NSP failed to respond %d\n", err);
+
+ return err;
+}
+
+int
+nfp_nsp_device_soft_reset(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+}
+
+int
+nfp_nsp_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0);
+}
+
+int
+nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
+ NULL, 0);
+}
+
+int
+nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0,
+ buf, size);
+}
+
+int
+nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf,
+ unsigned int size)
+{
+ return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, NULL,
+ 0, buf, size);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp.h b/drivers/net/nfp/nfpcore/nfp_nsp.h
new file mode 100644
index 00000000..c9c7b0d0
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NSP_NSP_H
+#define NSP_NSP_H 1
+
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+/* Offsets relative to the CSR base */
+#define NSP_STATUS 0x00
+#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48)
+#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44)
+#define NSP_STATUS_MINOR GENMASK_ULL(43, 32)
+#define NSP_STATUS_CODE GENMASK_ULL(31, 16)
+#define NSP_STATUS_RESULT GENMASK_ULL(15, 8)
+#define NSP_STATUS_BUSY BIT_ULL(0)
+
+#define NSP_COMMAND 0x08
+#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32)
+#define NSP_COMMAND_CODE GENMASK_ULL(31, 16)
+#define NSP_COMMAND_START BIT_ULL(0)
+
+/* CPP address to retrieve the data from */
+#define NSP_BUFFER 0x10
+#define NSP_BUFFER_CPP GENMASK_ULL(63, 40)
+#define NSP_BUFFER_PCIE GENMASK_ULL(39, 38)
+#define NSP_BUFFER_ADDRESS GENMASK_ULL(37, 0)
+
+#define NSP_DFLT_BUFFER 0x18
+
+#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0)
+
+#define NSP_MAGIC 0xab10
+#define NSP_MAJOR 0
+#define NSP_MINOR 8
+
+#define NSP_CODE_MAJOR GENMASK(15, 12)
+#define NSP_CODE_MINOR GENMASK(11, 0)
+
+enum nfp_nsp_cmd {
+ SPCODE_NOOP = 0, /* No operation */
+ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
+ SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */
+ SPCODE_PHY_INIT = 3, /* Initialize the PHY */
+ SPCODE_MAC_INIT = 4, /* Initialize the MAC */
+ SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */
+ SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */
+ SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */
+ SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */
+ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */
+ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */
+};
+
+static const struct {
+ int code;
+ const char *msg;
+} nsp_errors[] = {
+ { 6010, "could not map to phy for port" },
+ { 6011, "not an allowed rate/lanes for port" },
+ { 6012, "not an allowed rate/lanes for port" },
+ { 6013, "high/low error, change other port first" },
+ { 6014, "config not found in flash" },
+};
+
+struct nfp_nsp {
+ struct nfp_cpp *cpp;
+ struct nfp_resource *res;
+ struct {
+ uint16_t major;
+ uint16_t minor;
+ } ver;
+
+ /* Eth table config state */
+ int modified;
+ unsigned int idx;
+ void *entries;
+};
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_major(struct nfp_nsp *state);
+uint16_t nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_mac_reinit(struct nfp_nsp *state);
+int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask,
+ void *buf, unsigned int size);
+
+static inline int nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 20;
+}
+
+enum nfp_eth_interface {
+ NFP_INTERFACE_NONE = 0,
+ NFP_INTERFACE_SFP = 1,
+ NFP_INTERFACE_SFPP = 10,
+ NFP_INTERFACE_SFP28 = 28,
+ NFP_INTERFACE_QSFP = 40,
+ NFP_INTERFACE_CXP = 100,
+ NFP_INTERFACE_QSFP28 = 112,
+};
+
+enum nfp_eth_media {
+ NFP_MEDIA_DAC_PASSIVE = 0,
+ NFP_MEDIA_DAC_ACTIVE,
+ NFP_MEDIA_FIBRE,
+};
+
+enum nfp_eth_aneg {
+ NFP_ANEG_AUTO = 0,
+ NFP_ANEG_SEARCH,
+ NFP_ANEG_25G_CONSORTIUM,
+ NFP_ANEG_25G_IEEE,
+ NFP_ANEG_DISABLED,
+};
+
+enum nfp_eth_fec {
+ NFP_FEC_AUTO_BIT = 0,
+ NFP_FEC_BASER_BIT,
+ NFP_FEC_REED_SOLOMON_BIT,
+ NFP_FEC_DISABLED_BIT,
+};
+
+#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT)
+#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT)
+#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT)
+#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT)
+
+#define ETH_ALEN 6
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count: number of table entries
+ * @max_index: max of @index fields of all @ports
+ * @ports: table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index: chip-wide first channel index
+ * @nbi: NBI index
+ * @base: first channel index (within NBI)
+ * @lanes: number of channels
+ * @speed: interface speed (in Mbps)
+ * @interface: interface (module) plugged in
+ * @media: media type of the @interface
+ * @fec: forward error correction mode
+ * @aneg: auto negotiation mode
+ * @mac_addr: interface MAC address
+ * @label_port: port id
+ * @label_subport: id of interface within port (for split ports)
+ * @enabled: is enabled?
+ * @tx_enabled: is TX enabled?
+ * @rx_enabled: is RX enabled?
+ * @override_changed: is media reconfig pending?
+ *
+ * @port_type: one of %PORT_* defines for ethtool
+ * @port_lanes: total number of lanes on the port (sum of lanes of all subports)
+ * @is_split: is interface part of a split port
+ * @fec_modes_supported: bitmap of FEC modes supported
+ */
+struct nfp_eth_table {
+ unsigned int count;
+ unsigned int max_index;
+ struct nfp_eth_table_port {
+ unsigned int eth_index;
+ unsigned int index;
+ unsigned int nbi;
+ unsigned int base;
+ unsigned int lanes;
+ unsigned int speed;
+
+ unsigned int interface;
+ enum nfp_eth_media media;
+
+ enum nfp_eth_fec fec;
+ enum nfp_eth_aneg aneg;
+
+ uint8_t mac_addr[ETH_ALEN];
+
+ uint8_t label_port;
+ uint8_t label_subport;
+
+ int enabled;
+ int tx_enabled;
+ int rx_enabled;
+
+ int override_changed;
+
+ /* Computed fields */
+ uint8_t port_type;
+
+ unsigned int port_lanes;
+
+ int is_split;
+
+ unsigned int fec_modes_supported;
+ } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable);
+int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx,
+ int configed);
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
+
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf,
+ unsigned int size);
+void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries,
+ unsigned int idx);
+void nfp_nsp_config_clear_state(struct nfp_nsp *state);
+void nfp_nsp_config_set_modified(struct nfp_nsp *state, int modified);
+void *nfp_nsp_config_entries(struct nfp_nsp *state);
+int nfp_nsp_config_modified(struct nfp_nsp *state);
+unsigned int nfp_nsp_config_idx(struct nfp_nsp *state);
+
+static inline int nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
+{
+ return !!eth_port->fec_modes_supported;
+}
+
+static inline unsigned int
+nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port)
+{
+ return eth_port->fec_modes_supported;
+}
+
+struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx);
+int nfp_eth_config_commit_end(struct nfp_nsp *nsp);
+void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp);
+
+int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode);
+int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed);
+int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes);
+
+/**
+ * struct nfp_nsp_identify - NSP static information
+ * @version: opaque version string
+ * @flags: version flags
+ * @br_primary: branch id of primary bootloader
+ * @br_secondary: branch id of secondary bootloader
+ * @br_nsp: branch id of NSP
+ * @primary: version of primarary bootloader
+ * @secondary: version id of secondary bootloader
+ * @nsp: version id of NSP
+ * @sensor_mask: mask of present sensors available on NIC
+ */
+struct nfp_nsp_identify {
+ char version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp);
+
+enum nfp_nsp_sensor_id {
+ NFP_SENSOR_CHIP_TEMPERATURE,
+ NFP_SENSOR_ASSEMBLY_POWER,
+ NFP_SENSOR_ASSEMBLY_12V_POWER,
+ NFP_SENSOR_ASSEMBLY_3V3_POWER,
+};
+
+int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id,
+ long *val);
+
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
new file mode 100644
index 00000000..bfd1eddb
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_cmds.c
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp_nffw.h"
+
+struct nsp_identify {
+ uint8_t version[40];
+ uint8_t flags;
+ uint8_t br_primary;
+ uint8_t br_secondary;
+ uint8_t br_nsp;
+ uint16_t primary;
+ uint16_t secondary;
+ uint16_t nsp;
+ uint8_t reserved[6];
+ uint64_t sensor_mask;
+};
+
+struct nfp_nsp_identify *
+__nfp_nsp_identify(struct nfp_nsp *nsp)
+{
+ struct nfp_nsp_identify *nspi = NULL;
+ struct nsp_identify *ni;
+ int ret;
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 15)
+ return NULL;
+
+ ni = malloc(sizeof(*ni));
+ if (!ni)
+ return NULL;
+
+ memset(ni, 0, sizeof(*ni));
+ ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni));
+ if (ret < 0) {
+ printf("reading bsp version failed %d\n",
+ ret);
+ goto exit_free;
+ }
+
+ nspi = malloc(sizeof(*nspi));
+ if (!nspi)
+ goto exit_free;
+
+ memset(nspi, 0, sizeof(*nspi));
+ memcpy(nspi->version, ni->version, sizeof(nspi->version));
+ nspi->version[sizeof(nspi->version) - 1] = '\0';
+ nspi->flags = ni->flags;
+ nspi->br_primary = ni->br_primary;
+ nspi->br_secondary = ni->br_secondary;
+ nspi->br_nsp = ni->br_nsp;
+ nspi->primary = rte_le_to_cpu_16(ni->primary);
+ nspi->secondary = rte_le_to_cpu_16(ni->secondary);
+ nspi->nsp = rte_le_to_cpu_16(ni->nsp);
+ nspi->sensor_mask = rte_le_to_cpu_64(ni->sensor_mask);
+
+exit_free:
+ free(ni);
+ return nspi;
+}
+
+struct nfp_sensors {
+ uint32_t chip_temp;
+ uint32_t assembly_power;
+ uint32_t assembly_12v_power;
+ uint32_t assembly_3v3_power;
+};
+
+int
+nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, long *val)
+{
+ struct nfp_sensors s;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return -EIO;
+
+ ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s));
+ nfp_nsp_close(nsp);
+
+ if (ret < 0)
+ return ret;
+
+ switch (id) {
+ case NFP_SENSOR_CHIP_TEMPERATURE:
+ *val = rte_le_to_cpu_32(s.chip_temp);
+ break;
+ case NFP_SENSOR_ASSEMBLY_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_12V_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_12v_power);
+ break;
+ case NFP_SENSOR_ASSEMBLY_3V3_POWER:
+ *val = rte_le_to_cpu_32(s.assembly_3v3_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
new file mode 100644
index 00000000..67946891
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_nsp_eth.c
@@ -0,0 +1,665 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_nsp.h"
+#include "nfp6000/nfp6000.h"
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (64 - 1 - (h))))
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define FIELD_GET(_mask, _reg) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ (typeof(_x))(((_reg) & (_x)) >> __bf_shf(_x)); \
+ }))
+
+#define FIELD_FIT(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ !((((typeof(_x))_val) << __bf_shf(_x)) & ~(_x)); \
+ }))
+
+#define FIELD_PREP(_mask, _val) \
+ (__extension__ ({ \
+ typeof(_mask) _x = (_mask); \
+ ((typeof(_x))(_val) << __bf_shf(_x)) & (_x); \
+ }))
+
+#define NSP_ETH_NBI_PORT_COUNT 24
+#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \
+ sizeof(union eth_table_entry))
+
+#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0)
+#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8)
+#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48)
+#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54)
+#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60)
+#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61)
+
+#define NSP_ETH_PORT_LANES_MASK rte_cpu_to_le_64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_STATE_ENABLED BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8)
+#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12)
+#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20)
+#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22)
+#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
+#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
+
+#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
+#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3)
+#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4)
+#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5)
+#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
+#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
+
+/* Which connector port. */
+#define PORT_TP 0x00
+#define PORT_AUI 0x01
+#define PORT_MII 0x02
+#define PORT_FIBRE 0x03
+#define PORT_BNC 0x04
+#define PORT_DA 0x05
+#define PORT_NONE 0xef
+#define PORT_OTHER 0xff
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_5000 5000
+#define SPEED_10000 10000
+#define SPEED_14000 14000
+#define SPEED_20000 20000
+#define SPEED_25000 25000
+#define SPEED_40000 40000
+#define SPEED_50000 50000
+#define SPEED_56000 56000
+#define SPEED_100000 100000
+
+enum nfp_eth_raw {
+ NSP_ETH_RAW_PORT = 0,
+ NSP_ETH_RAW_STATE,
+ NSP_ETH_RAW_MAC,
+ NSP_ETH_RAW_CONTROL,
+
+ NSP_ETH_NUM_RAW
+};
+
+enum nfp_eth_rate {
+ RATE_INVALID = 0,
+ RATE_10M,
+ RATE_100M,
+ RATE_1G,
+ RATE_10G,
+ RATE_25G,
+};
+
+union eth_table_entry {
+ struct {
+ uint64_t port;
+ uint64_t state;
+ uint8_t mac_addr[6];
+ uint8_t resv[2];
+ uint64_t control;
+ };
+ uint64_t raw[NSP_ETH_NUM_RAW];
+};
+
+static const struct {
+ enum nfp_eth_rate rate;
+ unsigned int speed;
+} nsp_eth_rate_tbl[] = {
+ { RATE_INVALID, 0, },
+ { RATE_10M, SPEED_10, },
+ { RATE_100M, SPEED_100, },
+ { RATE_1G, SPEED_1000, },
+ { RATE_10G, SPEED_10000, },
+ { RATE_25G, SPEED_25000, },
+};
+
+static unsigned int
+nfp_eth_rate2speed(enum nfp_eth_rate rate)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].rate == rate)
+ return nsp_eth_rate_tbl[i].speed;
+
+ return 0;
+}
+
+static unsigned int
+nfp_eth_speed2rate(unsigned int speed)
+{
+ int i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(nsp_eth_rate_tbl); i++)
+ if (nsp_eth_rate_tbl[i].speed == speed)
+ return nsp_eth_rate_tbl[i].rate;
+
+ return RATE_INVALID;
+}
+
+static void
+nfp_eth_copy_mac_reverse(uint8_t *dst, const uint8_t *src)
+{
+ int i;
+
+ for (i = 0; i < (int)ETH_ALEN; i++)
+ dst[ETH_ALEN - i - 1] = src[i];
+}
+
+static void
+nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
+ unsigned int index, struct nfp_eth_table_port *dst)
+{
+ unsigned int rate;
+ unsigned int fec;
+ uint64_t port, state;
+
+ port = rte_le_to_cpu_64(src->port);
+ state = rte_le_to_cpu_64(src->state);
+
+ dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
+ dst->index = index;
+ dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
+ dst->base = index % NSP_ETH_NBI_PORT_COUNT;
+ dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
+
+ dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
+ dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
+ dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
+
+ rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state));
+ dst->speed = dst->lanes * rate;
+
+ dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state);
+ dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state);
+
+ nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
+
+ dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port);
+ dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17)
+ return;
+
+ dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state);
+ dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 22)
+ return;
+
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT;
+ fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port);
+ dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT;
+ if (dst->fec_modes_supported)
+ dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED;
+
+ dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state);
+}
+
+static void
+nfp_eth_calc_port_geometry(struct nfp_eth_table *table)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < table->count; i++) {
+ table->max_index = RTE_MAX(table->max_index,
+ table->ports[i].index);
+
+ for (j = 0; j < table->count; j++) {
+ if (table->ports[i].label_port !=
+ table->ports[j].label_port)
+ continue;
+ table->ports[i].port_lanes += table->ports[j].lanes;
+
+ if (i == j)
+ continue;
+ if (table->ports[i].label_subport ==
+ table->ports[j].label_subport)
+ printf("Port %d subport %d is a duplicate\n",
+ table->ports[i].label_port,
+ table->ports[i].label_subport);
+
+ table->ports[i].is_split = 1;
+ }
+ }
+}
+
+static void
+nfp_eth_calc_port_type(struct nfp_eth_table_port *entry)
+{
+ if (entry->interface == NFP_INTERFACE_NONE) {
+ entry->port_type = PORT_NONE;
+ return;
+ }
+
+ if (entry->media == NFP_MEDIA_FIBRE)
+ entry->port_type = PORT_FIBRE;
+ else
+ entry->port_type = PORT_DA;
+}
+
+static struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries;
+ struct nfp_eth_table *table;
+ uint32_t table_sz;
+ int i, j, ret, cnt = 0;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ cnt++;
+
+ /* Some versions of flash will give us 0 instead of port count. For
+ * those that give a port count, verify it against the value calculated
+ * above.
+ */
+ if (ret && ret != cnt) {
+ printf("table entry count (%d) unmatch entries present (%d)\n",
+ ret, cnt);
+ goto err;
+ }
+
+ table_sz = sizeof(*table) + sizeof(struct nfp_eth_table_port) * cnt;
+ table = malloc(table_sz);
+ if (!table)
+ goto err;
+
+ memset(table, 0, table_sz);
+ table->count = cnt;
+ for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
+ if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+ nfp_eth_port_translate(nsp, &entries[i], i,
+ &table->ports[j++]);
+
+ nfp_eth_calc_port_geometry(table);
+ for (i = 0; i < (int)table->count; i++)
+ nfp_eth_calc_port_type(&table->ports[i]);
+
+ free(entries);
+
+ return table;
+
+err:
+ free(entries);
+ return NULL;
+}
+
+/*
+ * nfp_eth_read_ports() - retrieve port information
+ * @cpp: NFP CPP handle
+ *
+ * Read the port information from the device. Returned structure should
+ * be freed with kfree() once no longer needed.
+ *
+ * Return: populated ETH table or NULL on error.
+ */
+struct nfp_eth_table *
+nfp_eth_read_ports(struct nfp_cpp *cpp)
+{
+ struct nfp_eth_table *ret;
+ struct nfp_nsp *nsp;
+
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp)
+ return NULL;
+
+ ret = __nfp_eth_read_ports(nsp);
+ nfp_nsp_close(nsp);
+
+ return ret;
+}
+
+struct nfp_nsp *
+nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ int ret;
+
+ entries = malloc(NSP_ETH_TABLE_SIZE);
+ if (!entries)
+ return NULL;
+
+ memset(entries, 0, NSP_ETH_TABLE_SIZE);
+ nsp = nfp_nsp_open(cpp);
+ if (!nsp) {
+ free(entries);
+ return nsp;
+ }
+
+ ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ if (ret < 0) {
+ printf("reading port table failed %d\n", ret);
+ goto err;
+ }
+
+ if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
+ printf("trying to set port state on disabled port %d\n", idx);
+ goto err;
+ }
+
+ nfp_nsp_config_set_state(nsp, entries, idx);
+ return nsp;
+
+err:
+ nfp_nsp_close(nsp);
+ free(entries);
+ return NULL;
+}
+
+void
+nfp_eth_config_cleanup_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+
+ nfp_nsp_config_set_modified(nsp, 0);
+ nfp_nsp_config_clear_state(nsp);
+ nfp_nsp_close(nsp);
+ free(entries);
+}
+
+/*
+ * nfp_eth_config_commit_end() - perform recorded configuration changes
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ *
+ * Perform the configuration which was requested with __nfp_eth_set_*()
+ * helpers and recorded in @nsp state. If device was already configured
+ * as requested or no __nfp_eth_set_*() operations were made no NSP command
+ * will be performed.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_config_commit_end(struct nfp_nsp *nsp)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ int ret = 1;
+
+ if (nfp_nsp_config_modified(nsp)) {
+ ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+ ret = ret < 0 ? ret : 0;
+ }
+
+ nfp_eth_config_cleanup_end(nsp);
+
+ return ret;
+}
+
+/*
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @enable: Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, int enable)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -1;
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (enable != (int)FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_ENABLED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * nfp_eth_set_configured() - set PHY module configured control bit
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @configed: Desired state
+ *
+ * Set the ifup/ifdown state on the PHY.
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, int configed)
+{
+ union eth_table_entry *entries;
+ struct nfp_nsp *nsp;
+ uint64_t reg;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ /*
+ * Older ABI versions did support this feature, however this has only
+ * been reliable since ABI 20.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 20) {
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ entries = nfp_nsp_config_entries(nsp);
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].state);
+ if (configed != (int)FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) {
+ reg = rte_le_to_cpu_64(entries[idx].control);
+ reg &= ~NSP_ETH_CTRL_CONFIGURED;
+ reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed);
+ entries[idx].control = rte_cpu_to_le_64(reg);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+static int
+nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
+ const uint64_t mask, const unsigned int shift,
+ unsigned int val, const uint64_t ctrl_bit)
+{
+ union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
+ unsigned int idx = nfp_nsp_config_idx(nsp);
+ uint64_t reg;
+
+ /*
+ * Note: set features were added in ABI 0.14 but the error
+ * codes were initially not populated correctly.
+ */
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 17) {
+ printf("set operations not supported, please update flash\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are already in requested state */
+ reg = rte_le_to_cpu_64(entries[idx].raw[raw_idx]);
+ if (val == (reg & mask) >> shift)
+ return 0;
+
+ reg &= ~mask;
+ reg |= (val << shift) & mask;
+ entries[idx].raw[raw_idx] = rte_cpu_to_le_64(reg);
+
+ entries[idx].control |= rte_cpu_to_le_64(ctrl_bit);
+
+ nfp_nsp_config_set_modified(nsp, 1);
+
+ return 0;
+}
+
+#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \
+ (__extension__ ({ \
+ typeof(mask) _x = (mask); \
+ nfp_eth_set_bit_config(nsp, raw_idx, _x, __bf_shf(_x), \
+ val, ctrl_bit); \
+ }))
+
+/*
+ * __nfp_eth_set_aneg() - set PHY autonegotiation control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired autonegotiation mode
+ *
+ * Allow/disallow PHY module to advertise/perform autonegotiation.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_ANEG, mode,
+ NSP_ETH_CTRL_SET_ANEG);
+}
+
+/*
+ * __nfp_eth_set_fec() - set PHY forward error correction control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @mode: Desired fec mode
+ *
+ * Set the PHY module forward error correction mode.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int
+__nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_FEC, mode,
+ NSP_ETH_CTRL_SET_FEC);
+}
+
+/*
+ * nfp_eth_set_fec() - set PHY forward error correction control mode
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @mode: Desired fec mode
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (!nsp)
+ return -EIO;
+
+ err = __nfp_eth_set_fec(nsp, mode);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/*
+ * __nfp_eth_set_speed() - set interface speed/rate
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @speed: Desired speed (per lane)
+ *
+ * Set lane speed. Provided @speed value should be subport speed divided
+ * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for
+ * 50G, etc.)
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed)
+{
+ enum nfp_eth_rate rate;
+
+ rate = nfp_eth_speed2rate(speed);
+ if (rate == RATE_INVALID) {
+ printf("could not find matching lane rate for speed %u\n",
+ speed);
+ return -EINVAL;
+ }
+
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE,
+ NSP_ETH_STATE_RATE, rate,
+ NSP_ETH_CTRL_SET_RATE);
+}
+
+/*
+ * __nfp_eth_set_split() - set interface lane split
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @lanes: Desired lanes per port
+ *
+ * Set number of lanes in the port.
+ * Will write to hwinfo overrides in the flash (persistent config).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int
+__nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES,
+ lanes, NSP_ETH_CTRL_SET_LANES);
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.c b/drivers/net/nfp/nfpcore/nfp_resource.c
new file mode 100644
index 00000000..dd41fa4d
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_resource.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include <endian.h>
+
+#include <rte_string_fns.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp_resource.h"
+#include "nfp_crc.h"
+
+#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME "nfp.res"
+#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */
+
+#define NFP_RESOURCE_ENTRY_NAME_SZ 8
+
+/*
+ * struct nfp_resource_entry - Resource table entry
+ * @owner: NFP CPP Lock, interface owner
+ * @key: NFP CPP Lock, posix_crc32(name, 8)
+ * @region: Memory region descriptor
+ * @name: ASCII, zero padded name
+ * @reserved
+ * @cpp_action: CPP Action
+ * @cpp_token: CPP Token
+ * @cpp_target: CPP Target ID
+ * @page_offset: 256-byte page offset into target's CPP address
+ * @page_size: size, in 256-byte pages
+ */
+struct nfp_resource_entry {
+ struct nfp_resource_entry_mutex {
+ uint32_t owner;
+ uint32_t key;
+ } mutex;
+ struct nfp_resource_entry_region {
+ uint8_t name[NFP_RESOURCE_ENTRY_NAME_SZ];
+ uint8_t reserved[5];
+ uint8_t cpp_action;
+ uint8_t cpp_token;
+ uint8_t cpp_target;
+ uint32_t page_offset;
+ uint32_t page_size;
+ } region;
+};
+
+#define NFP_RESOURCE_TBL_SIZE 4096
+#define NFP_RESOURCE_TBL_ENTRIES (int)(NFP_RESOURCE_TBL_SIZE / \
+ sizeof(struct nfp_resource_entry))
+
+struct nfp_resource {
+ char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
+ uint32_t cpp_id;
+ uint64_t addr;
+ uint64_t size;
+ struct nfp_cpp_mutex *mutex;
+};
+
+static int
+nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
+{
+ char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ + 2];
+ struct nfp_resource_entry entry;
+ uint32_t cpp_id, key;
+ int ret, i;
+
+ cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */
+
+ memset(name_pad, 0, sizeof(name_pad));
+ strlcpy(name_pad, res->name, sizeof(name_pad));
+
+ /* Search for a matching entry */
+ if (!memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8)) {
+ printf("Grabbing device lock not supported\n");
+ return -EOPNOTSUPP;
+ }
+ key = nfp_crc32_posix(name_pad, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+ uint64_t addr = NFP_RESOURCE_TBL_BASE +
+ sizeof(struct nfp_resource_entry) * i;
+
+ ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
+ if (ret != sizeof(entry))
+ return -EIO;
+
+ if (entry.mutex.key != key)
+ continue;
+
+ /* Found key! */
+ res->mutex =
+ nfp_cpp_mutex_alloc(cpp,
+ NFP_RESOURCE_TBL_TARGET, addr, key);
+ res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
+ entry.region.cpp_action,
+ entry.region.cpp_token);
+ res->addr = ((uint64_t)entry.region.page_offset) << 8;
+ res->size = (uint64_t)entry.region.page_size << 8;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int
+nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
+ struct nfp_cpp_mutex *dev_mutex)
+{
+ int err;
+
+ if (nfp_cpp_mutex_lock(dev_mutex))
+ return -EINVAL;
+
+ err = nfp_cpp_resource_find(cpp, res);
+ if (err)
+ goto err_unlock_dev;
+
+ err = nfp_cpp_mutex_trylock(res->mutex);
+ if (err)
+ goto err_res_mutex_free;
+
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return 0;
+
+err_res_mutex_free:
+ nfp_cpp_mutex_free(res->mutex);
+err_unlock_dev:
+ nfp_cpp_mutex_unlock(dev_mutex);
+
+ return err;
+}
+
+/*
+ * nfp_resource_acquire() - Acquire a resource handle
+ * @cpp: NFP CPP handle
+ * @name: Name of the resource
+ *
+ * NOTE: This function locks the acquired resource
+ *
+ * Return: NFP Resource handle, or ERR_PTR()
+ */
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+{
+ struct nfp_cpp_mutex *dev_mutex;
+ struct nfp_resource *res;
+ int err;
+ struct timespec wait;
+ int count;
+
+ res = malloc(sizeof(*res));
+ if (!res)
+ return NULL;
+
+ memset(res, 0, sizeof(*res));
+
+ strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+ dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+ NFP_RESOURCE_TBL_BASE,
+ NFP_RESOURCE_TBL_KEY);
+ if (!dev_mutex) {
+ free(res);
+ return NULL;
+ }
+
+ wait.tv_sec = 0;
+ wait.tv_nsec = 1000000;
+ count = 0;
+
+ for (;;) {
+ err = nfp_resource_try_acquire(cpp, res, dev_mutex);
+ if (!err)
+ break;
+ if (err != -EBUSY)
+ goto err_free;
+
+ if (count++ > 1000) {
+ printf("Error: resource %s timed out\n", name);
+ err = -EBUSY;
+ goto err_free;
+ }
+
+ nanosleep(&wait, NULL);
+ }
+
+ nfp_cpp_mutex_free(dev_mutex);
+
+ return res;
+
+err_free:
+ nfp_cpp_mutex_free(dev_mutex);
+ free(res);
+ return NULL;
+}
+
+/*
+ * nfp_resource_release() - Release a NFP Resource handle
+ * @res: NFP Resource handle
+ *
+ * NOTE: This function implictly unlocks the resource handle
+ */
+void
+nfp_resource_release(struct nfp_resource *res)
+{
+ nfp_cpp_mutex_unlock(res->mutex);
+ nfp_cpp_mutex_free(res->mutex);
+ free(res);
+}
+
+/*
+ * nfp_resource_cpp_id() - Return the cpp_id of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: NFP CPP ID
+ */
+uint32_t
+nfp_resource_cpp_id(const struct nfp_resource *res)
+{
+ return res->cpp_id;
+}
+
+/*
+ * nfp_resource_name() - Return the name of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: const char pointer to the name of the resource
+ */
+const char
+*nfp_resource_name(const struct nfp_resource *res)
+{
+ return res->name;
+}
+
+/*
+ * nfp_resource_address() - Return the address of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Address of the resource
+ */
+uint64_t
+nfp_resource_address(const struct nfp_resource *res)
+{
+ return res->addr;
+}
+
+/*
+ * nfp_resource_size() - Return the size in bytes of a resource handle
+ * @res: NFP Resource handle
+ *
+ * Return: Size of the resource in bytes
+ */
+uint64_t
+nfp_resource_size(const struct nfp_resource *res)
+{
+ return res->size;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_resource.h b/drivers/net/nfp/nfpcore/nfp_resource.h
new file mode 100644
index 00000000..06cc6f74
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_resource.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_RESOURCE_H
+#define NFP_RESOURCE_H
+
+#include "nfp_cpp.h"
+
+#define NFP_RESOURCE_NFP_NFFW "nfp.nffw"
+#define NFP_RESOURCE_NFP_HWINFO "nfp.info"
+#define NFP_RESOURCE_NSP "nfp.sp"
+
+/**
+ * Opaque handle to a NFP Resource
+ */
+struct nfp_resource;
+
+struct nfp_resource *nfp_resource_acquire(struct nfp_cpp *cpp,
+ const char *name);
+
+/**
+ * Release a NFP Resource, and free the handle
+ * @param[in] res NFP Resource handle
+ */
+void nfp_resource_release(struct nfp_resource *res);
+
+/**
+ * Return the CPP ID of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return CPP ID of the NFP Resource
+ */
+uint32_t nfp_resource_cpp_id(const struct nfp_resource *res);
+
+/**
+ * Return the name of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Name of the NFP Resource
+ */
+const char *nfp_resource_name(const struct nfp_resource *res);
+
+/**
+ * Return the target address of a NFP Resource
+ * @param[in] res NFP Resource handle
+ * @return Address of the NFP Resource
+ */
+uint64_t nfp_resource_address(const struct nfp_resource *res);
+
+uint64_t nfp_resource_size(const struct nfp_resource *res);
+
+#endif /* NFP_RESOURCE_H */
diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.c b/drivers/net/nfp/nfpcore/nfp_rtsym.c
new file mode 100644
index 00000000..cb7d83db
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.c
@@ -0,0 +1,327 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+/*
+ * nfp_rtsym.c
+ * Interface for accessing run-time symbol table
+ */
+
+#include <stdio.h>
+#include <rte_byteorder.h>
+#include "nfp_cpp.h"
+#include "nfp_mip.h"
+#include "nfp_rtsym.h"
+#include "nfp6000/nfp6000.h"
+
+/* These need to match the linker */
+#define SYM_TGT_LMEM 0
+#define SYM_TGT_EMU_CACHE 0x17
+
+struct nfp_rtsym_entry {
+ uint8_t type;
+ uint8_t target;
+ uint8_t island;
+ uint8_t addr_hi;
+ uint32_t addr_lo;
+ uint16_t name;
+ uint8_t menum;
+ uint8_t size_hi;
+ uint32_t size_lo;
+};
+
+struct nfp_rtsym_table {
+ struct nfp_cpp *cpp;
+ int num;
+ char *strtab;
+ struct nfp_rtsym symtab[];
+};
+
+static int
+nfp_meid(uint8_t island_id, uint8_t menum)
+{
+ return (island_id & 0x3F) == island_id && menum < 12 ?
+ (island_id << 4) | (menum + 4) : -1;
+}
+
+static void
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, uint32_t strtab_size,
+ struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
+{
+ sw->type = fw->type;
+ sw->name = cache->strtab + rte_le_to_cpu_16(fw->name) % strtab_size;
+ sw->addr = ((uint64_t)fw->addr_hi << 32) |
+ rte_le_to_cpu_32(fw->addr_lo);
+ sw->size = ((uint64_t)fw->size_hi << 32) |
+ rte_le_to_cpu_32(fw->size_lo);
+
+#ifdef DEBUG
+ printf("rtsym_entry_init\n");
+ printf("\tname=%s, addr=%" PRIx64 ", size=%" PRIu64 ",target=%d\n",
+ sw->name, sw->addr, sw->size, sw->target);
+#endif
+ switch (fw->target) {
+ case SYM_TGT_LMEM:
+ sw->target = NFP_RTSYM_TARGET_LMEM;
+ break;
+ case SYM_TGT_EMU_CACHE:
+ sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
+ break;
+ default:
+ sw->target = fw->target;
+ break;
+ }
+
+ if (fw->menum != 0xff)
+ sw->domain = nfp_meid(fw->island, fw->menum);
+ else if (fw->island != 0xff)
+ sw->domain = fw->island;
+ else
+ sw->domain = -1;
+}
+
+struct nfp_rtsym_table *
+nfp_rtsym_table_read(struct nfp_cpp *cpp)
+{
+ struct nfp_rtsym_table *rtbl;
+ struct nfp_mip *mip;
+
+ mip = nfp_mip_open(cpp);
+ rtbl = __nfp_rtsym_table_read(cpp, mip);
+ nfp_mip_close(mip);
+
+ return rtbl;
+}
+
+/*
+ * This looks more complex than it should be. But we need to get the type for
+ * the ~ right in round_down (it needs to be as wide as the result!), and we
+ * want to evaluate the macro arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+
+#define round_up(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((((_x) - 1) | __round_mask(_x, y)) + 1); \
+ }))
+
+#define round_down(x, y) \
+ (__extension__ ({ \
+ typeof(x) _x = (x); \
+ ((_x) & ~__round_mask(_x, y)); \
+ }))
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip)
+{
+ uint32_t strtab_addr, symtab_addr, strtab_size, symtab_size;
+ struct nfp_rtsym_entry *rtsymtab;
+ struct nfp_rtsym_table *cache;
+ const uint32_t dram =
+ NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
+ NFP_ISL_EMEM0;
+ int err, n, size;
+
+ if (!mip)
+ return NULL;
+
+ nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
+ nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
+
+ if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
+ return NULL;
+
+ /* Align to 64 bits */
+ symtab_size = round_up(symtab_size, 8);
+ strtab_size = round_up(strtab_size, 8);
+
+ rtsymtab = malloc(symtab_size);
+ if (!rtsymtab)
+ return NULL;
+
+ size = sizeof(*cache);
+ size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
+ size += strtab_size + 1;
+ cache = malloc(size);
+ if (!cache)
+ goto exit_free_rtsym_raw;
+
+ cache->cpp = cpp;
+ cache->num = symtab_size / sizeof(*rtsymtab);
+ cache->strtab = (void *)&cache->symtab[cache->num];
+
+ err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
+ if (err != (int)symtab_size)
+ goto exit_free_cache;
+
+ err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
+ if (err != (int)strtab_size)
+ goto exit_free_cache;
+ cache->strtab[strtab_size] = '\0';
+
+ for (n = 0; n < cache->num; n++)
+ nfp_rtsym_sw_entry_init(cache, strtab_size,
+ &cache->symtab[n], &rtsymtab[n]);
+
+ free(rtsymtab);
+
+ return cache;
+
+exit_free_cache:
+ free(cache);
+exit_free_rtsym_raw:
+ free(rtsymtab);
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_count() - Get the number of RTSYM descriptors
+ * @rtbl: NFP RTsym table
+ *
+ * Return: Number of RTSYM descriptors
+ */
+int
+nfp_rtsym_count(struct nfp_rtsym_table *rtbl)
+{
+ if (!rtbl)
+ return -EINVAL;
+
+ return rtbl->num;
+}
+
+/*
+ * nfp_rtsym_get() - Get the Nth RTSYM descriptor
+ * @rtbl: NFP RTsym table
+ * @idx: Index (0-based) of the RTSYM descriptor
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx)
+{
+ if (!rtbl)
+ return NULL;
+
+ if (idx >= rtbl->num)
+ return NULL;
+
+ return &rtbl->symtab[idx];
+}
+
+/*
+ * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name)
+{
+ int n;
+
+ if (!rtbl)
+ return NULL;
+
+ for (n = 0; n < rtbl->num; n++)
+ if (strcmp(name, rtbl->symtab[n].name) == 0)
+ return &rtbl->symtab[n];
+
+ return NULL;
+}
+
+/*
+ * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+ * @rtbl: NFP RTsym table
+ * @name: Symbol name
+ * @error: Poniter to error code (optional)
+ *
+ * Lookup a symbol, map, read it and return it's value. Value of the symbol
+ * will be interpreted as a simple little-endian unsigned value. Symbol can
+ * be 4 or 8 bytes in size.
+ *
+ * Return: value read, on error sets the error and returns ~0ULL.
+ */
+uint64_t
+nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error)
+{
+ const struct nfp_rtsym *sym;
+ uint32_t val32, id;
+ uint64_t val;
+ int err;
+
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ err = -ENOENT;
+ goto exit;
+ }
+
+ id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+#ifdef DEBUG
+ printf("Reading symbol %s with size %" PRIu64 " at %" PRIx64 "\n",
+ name, sym->size, sym->addr);
+#endif
+ switch (sym->size) {
+ case 4:
+ err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32);
+ val = val32;
+ break;
+ case 8:
+ err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val);
+ break;
+ default:
+ printf("rtsym '%s' unsupported size: %" PRId64 "\n",
+ name, sym->size);
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ err = -EIO;
+exit:
+ if (error)
+ *error = err;
+
+ if (err)
+ return ~0ULL;
+
+ return val;
+}
+
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area)
+{
+ const struct nfp_rtsym *sym;
+ uint8_t *mem;
+
+#ifdef DEBUG
+ printf("mapping symbol %s\n", name);
+#endif
+ sym = nfp_rtsym_lookup(rtbl, name);
+ if (!sym) {
+ printf("symbol lookup fails for %s\n", name);
+ return NULL;
+ }
+
+ if (sym->size < min_size) {
+ printf("Symbol %s too small (%" PRIu64 " < %u)\n", name,
+ sym->size, min_size);
+ return NULL;
+ }
+
+ mem = nfp_cpp_map_area(rtbl->cpp, sym->domain, sym->target, sym->addr,
+ sym->size, area);
+ if (!mem) {
+ printf("Failed to map symbol %s\n", name);
+ return NULL;
+ }
+#ifdef DEBUG
+ printf("symbol %s with address %p\n", name, mem);
+#endif
+
+ return mem;
+}
diff --git a/drivers/net/nfp/nfpcore/nfp_rtsym.h b/drivers/net/nfp/nfpcore/nfp_rtsym.h
new file mode 100644
index 00000000..8b494211
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_rtsym.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef __NFP_RTSYM_H__
+#define __NFP_RTSYM_H__
+
+#define NFP_RTSYM_TYPE_NONE 0
+#define NFP_RTSYM_TYPE_OBJECT 1
+#define NFP_RTSYM_TYPE_FUNCTION 2
+#define NFP_RTSYM_TYPE_ABS 3
+
+#define NFP_RTSYM_TARGET_NONE 0
+#define NFP_RTSYM_TARGET_LMEM -1
+#define NFP_RTSYM_TARGET_EMU_CACHE -7
+
+/*
+ * Structure describing a run-time NFP symbol.
+ *
+ * The memory target of the symbol is generally the CPP target number and can be
+ * used directly by the nfp_cpp API calls. However, in some cases (i.e., for
+ * local memory or control store) the target is encoded using a negative number.
+ *
+ * When the target type can not be used to fully describe the location of a
+ * symbol the domain field is used to further specify the location (i.e., the
+ * specific ME or island number).
+ *
+ * For ME target resources, 'domain' is an MEID.
+ * For Island target resources, 'domain' is an island ID, with the one exception
+ * of "sram" symbols for backward compatibility, which are viewed as global.
+ */
+struct nfp_rtsym {
+ const char *name;
+ uint64_t addr;
+ uint64_t size;
+ int type;
+ int target;
+ int domain;
+};
+
+struct nfp_rtsym_table;
+
+struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp);
+
+struct nfp_rtsym_table *
+__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip);
+
+int nfp_rtsym_count(struct nfp_rtsym_table *rtbl);
+
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx);
+
+const struct nfp_rtsym *
+nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name);
+
+uint64_t nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name,
+ int *error);
+uint8_t *
+nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name,
+ unsigned int min_size, struct nfp_cpp_area **area);
+#endif
diff --git a/drivers/net/nfp/nfpcore/nfp_target.h b/drivers/net/nfp/nfpcore/nfp_target.h
new file mode 100644
index 00000000..2884a003
--- /dev/null
+++ b/drivers/net/nfp/nfpcore/nfp_target.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Netronome Systems, Inc.
+ * All rights reserved.
+ */
+
+#ifndef NFP_TARGET_H
+#define NFP_TARGET_H
+
+#include "nfp-common/nfp_resid.h"
+#include "nfp-common/nfp_cppat.h"
+#include "nfp-common/nfp_platform.h"
+#include "nfp_cpp.h"
+
+#define P32 1
+#define P64 2
+
+#define PUSHPULL(_pull, _push) (((_pull) << 4) | ((_push) << 0))
+
+#ifndef NFP_ERRNO
+#include <errno.h>
+#define NFP_ERRNO(x) (errno = (x), -1)
+#endif
+
+static inline int
+pushpull_width(int pp)
+{
+ pp &= 0xf;
+
+ if (pp == 0)
+ return NFP_ERRNO(EINVAL);
+ return (2 << pp);
+}
+
+#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0)
+#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4)
+
+static inline int
+target_rw(uint32_t cpp_id, int pp, int start, int len)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < start || island > (start + len)))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0):
+ return PUSHPULL(0, pp);
+ case NFP_CPP_ID(0, 1, 0):
+ return PUSHPULL(pp, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(pp, pp);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_dma(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiDma */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiDma */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_stats(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiStats */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiStats */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_tm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiTM */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiTM */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi_ppc(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 0): /* ReadNbiPreclassifier */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* WriteNbiPreclassifier */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0):
+ return PUSHPULL(P64, P64);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_nbi(uint32_t cpp_id, uint64_t address)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+ uint64_t rel_addr = address & 0x3fFFFF;
+
+ if (island && (island < 8 || island > 9))
+ return NFP_ERRNO(EINVAL);
+
+ if (rel_addr < (1 << 20))
+ return nfp6000_nbi_dma(cpp_id);
+ if (rel_addr < (2 << 20))
+ return nfp6000_nbi_stats(cpp_id);
+ if (rel_addr < (3 << 20))
+ return nfp6000_nbi_tm(cpp_id);
+ return nfp6000_nbi_ppc(cpp_id);
+}
+
+/*
+ * This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+static inline int
+nfp6000_mu_common(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 0): /* read_be/write_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1): /* read_le/write_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 2): /* {read/write}_swap_be */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 3): /* {read/write}_swap_le */
+ return PUSHPULL(P64, P64);
+ case NFP_CPP_ID(0, 0, 0): /* read_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 1): /* read_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 2): /* read_swap_be */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 0, 3): /* read_swap_le */
+ return PUSHPULL(0, P64);
+ case NFP_CPP_ID(0, 1, 0): /* write_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 1): /* write_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 2): /* write_swap_be */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 1, 3): /* write_swap_le */
+ return PUSHPULL(P64, 0);
+ case NFP_CPP_ID(0, 3, 0): /* atomic_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 2): /* mask_compare_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* atomic_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 2): /* atomic_write_imm */
+ return PUSHPULL(0, 0);
+ case NFP_CPP_ID(0, 4, 3): /* swap_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 5, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 5, 3): /* test_set_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 6, 0): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 3): /* test_clr_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 7, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 7, 3): /* test_add_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 8, 0): /* addsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 3): /* test_sub_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 3): /* test_subsat_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 0): /* microq128_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 1): /* microq128_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 13, 2): /* microq128_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 0): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 3): /* test_xor_imm */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 0): /* read32_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 1): /* read32_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 2): /* read32_swap_be */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 28, 3): /* read32_swap_le */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 31, 0): /* write32_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 1): /* write32_le */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 2): /* write32_swap_be */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 31, 3): /* write32_swap_le */
+ return PUSHPULL(P32, 0);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp6000_mu_ctm(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 16, 1): /* packet_read_packet_status */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_emu(uint32_t cpp_id)
+{
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 18, 0): /* read_queue */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 1): /* read_queue_ring */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 18, 2): /* write_queue */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 18, 3): /* write_queue_ring */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 20, 2): /* journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 21, 0): /* get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 1): /* get_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 21, 2): /* get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 0): /* pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 1): /* pop_eop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 22, 2): /* pop_freely */
+ return PUSHPULL(0, P32);
+ default:
+ return nfp6000_mu_common(cpp_id);
+ }
+}
+
+static inline int
+nfp6000_mu_imu(uint32_t cpp_id)
+{
+ return nfp6000_mu_common(cpp_id);
+}
+
+static inline int
+nfp6000_mu(uint32_t cpp_id, uint64_t address)
+{
+ int pp;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island == 0) {
+ if (address < 0x2000000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x8000000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0x9800000000ULL)
+ pp = nfp6000_mu_ctm(cpp_id);
+ else if (address < 0x9C00000000ULL)
+ pp = nfp6000_mu_emu(cpp_id);
+ else if (address < 0xA000000000ULL)
+ pp = nfp6000_mu_imu(cpp_id);
+ else
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else if (island >= 24 && island <= 27) {
+ pp = nfp6000_mu_emu(cpp_id);
+ } else if (island >= 28 && island <= 31) {
+ pp = nfp6000_mu_imu(cpp_id);
+ } else if (island == 1 ||
+ (island >= 4 && island <= 7) ||
+ (island >= 12 && island <= 13) ||
+ (island >= 32 && island <= 47) ||
+ (island >= 48 && island <= 51)) {
+ pp = nfp6000_mu_ctm(cpp_id);
+ } else {
+ pp = NFP_ERRNO(EINVAL);
+ }
+
+ return pp;
+}
+
+static inline int
+nfp6000_ila(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 48 || island > 51))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* read_check_error */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 0): /* read_int */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* write_int */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 48, 4);
+ }
+}
+
+static inline int
+nfp6000_pci(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 4 || island > 7))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0):
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 4, 4);
+ }
+}
+
+static inline int
+nfp6000_crypto(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 12 || island > 15))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 2, 0):
+ return PUSHPULL(P64, 0);
+ default:
+ return target_rw(cpp_id, P64, 12, 4);
+ }
+}
+
+static inline int
+nfp6000_cap_xpb(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 1): /* RingGet */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 0, 2): /* Interthread Signal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 1): /* RingPut */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 1, 2): /* CTNNWr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* ReflectRd, signal none */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 1): /* ReflectRd, signal self */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 2): /* ReflectRd, signal remote */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 2, 3): /* ReflectRd, signal both */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 3, 0): /* ReflectWr, signal none */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 1): /* ReflectWr, signal self */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 2): /* ReflectWr, signal remote */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 3, 3): /* ReflectWr, signal both */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, NFP_CPP_ACTION_RW, 1):
+ return PUSHPULL(P32, P32);
+ default:
+ return target_rw(cpp_id, P32, 1, 63);
+ }
+}
+
+static inline int
+nfp6000_cls(uint32_t cpp_id)
+{
+ int island = NFP_CPP_ID_ISLAND_of(cpp_id);
+
+ if (island && (island < 1 || island > 63))
+ return NFP_ERRNO(EINVAL);
+
+ switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+ case NFP_CPP_ID(0, 0, 3): /* xor */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 0): /* set */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 2, 1): /* clr */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 0): /* add */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 4, 1): /* add64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 0): /* sub */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 1): /* sub64 */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 6, 2): /* subsat */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 2): /* hash_mask */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 8, 3): /* hash_clear */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 9, 0): /* ring_get */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 1): /* ring_pop */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 2): /* ring_get_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 9, 3): /* ring_pop_freely */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 10, 0): /* ring_put */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 10, 2): /* ring_journal */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 14, 0): /* reflect_write_sig_local */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 15, 1): /* reflect_read_sig_local */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 17, 2): /* statistic */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 24, 0): /* ring_read */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 24, 1): /* ring_write */
+ return PUSHPULL(P32, 0);
+ case NFP_CPP_ID(0, 25, 0): /* ring_workq_add_thread */
+ return PUSHPULL(0, P32);
+ case NFP_CPP_ID(0, 25, 1): /* ring_workq_add_work */
+ return PUSHPULL(P32, 0);
+ default:
+ return target_rw(cpp_id, P32, 0, 64);
+ }
+}
+
+static inline int
+nfp6000_target_pushpull(uint32_t cpp_id, uint64_t address)
+{
+ switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
+ case NFP6000_CPPTGT_NBI:
+ return nfp6000_nbi(cpp_id, address);
+ case NFP6000_CPPTGT_VQDR:
+ return target_rw(cpp_id, P32, 24, 4);
+ case NFP6000_CPPTGT_ILA:
+ return nfp6000_ila(cpp_id);
+ case NFP6000_CPPTGT_MU:
+ return nfp6000_mu(cpp_id, address);
+ case NFP6000_CPPTGT_PCIE:
+ return nfp6000_pci(cpp_id);
+ case NFP6000_CPPTGT_ARM:
+ if (address < 0x10000)
+ return target_rw(cpp_id, P64, 1, 1);
+ else
+ return target_rw(cpp_id, P32, 1, 1);
+ case NFP6000_CPPTGT_CRYPTO:
+ return nfp6000_crypto(cpp_id);
+ case NFP6000_CPPTGT_CTXPB:
+ return nfp6000_cap_xpb(cpp_id);
+ case NFP6000_CPPTGT_CLS:
+ return nfp6000_cls(cpp_id);
+ case 0:
+ return target_rw(cpp_id, P32, 4, 4);
+ default:
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_pushpull_width(int pp, int write_not_read)
+{
+ if (pp < 0)
+ return pp;
+
+ if (write_not_read)
+ return PULL_WIDTH(pp);
+ else
+ return PUSH_WIDTH(pp);
+}
+
+static inline int
+nfp6000_target_action_width(uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ int pp;
+
+ pp = nfp6000_target_pushpull(cpp_id, address);
+
+ return nfp_target_pushpull_width(pp, write_not_read);
+}
+
+static inline int
+nfp_target_action_width(uint32_t model, uint32_t cpp_id, uint64_t address,
+ int write_not_read)
+{
+ if (NFP_CPP_MODEL_IS_6000(model)) {
+ return nfp6000_target_action_width(cpp_id, address,
+ write_not_read);
+ } else {
+ return NFP_ERRNO(EINVAL);
+ }
+}
+
+static inline int
+nfp_target_cpp(uint32_t cpp_island_id, uint64_t cpp_island_address,
+ uint32_t *cpp_target_id, uint64_t *cpp_target_address,
+ const uint32_t *imb_table)
+{
+ int err;
+ int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+ int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+ uint32_t imb;
+
+ if (target < 0 || target >= 16)
+ return NFP_ERRNO(EINVAL);
+
+ if (island == 0) {
+ /* Already translated */
+ *cpp_target_id = cpp_island_id;
+ *cpp_target_address = cpp_island_address;
+ return 0;
+ }
+
+ if (!imb_table) {
+ /* CPP + Island only allowed on systems with IMB tables */
+ return NFP_ERRNO(EINVAL);
+ }
+
+ imb = imb_table[target];
+
+ *cpp_target_address = cpp_island_address;
+ err = _nfp6000_cppat_addr_encode(cpp_target_address, island, target,
+ ((imb >> 13) & 7),
+ ((imb >> 12) & 1),
+ ((imb >> 6) & 0x3f),
+ ((imb >> 0) & 0x3f));
+ if (err == 0) {
+ *cpp_target_id =
+ NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id),
+ NFP_CPP_ID_TOKEN_of(cpp_island_id));
+ }
+
+ return err;
+}
+
+#endif /* NFP_TARGET_H */
diff --git a/drivers/net/null/meson.build b/drivers/net/null/meson.build
index 68ac0d2a..60e2ce6c 100644
--- a/drivers/net/null/meson.build
+++ b/drivers/net/null/meson.build
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_eth_null.c')
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index d003b283..244f8654 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -73,6 +73,7 @@ struct pmd_internals {
struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
+ struct ether_addr eth_addr;
/** Bit mask of RSS offloads, the bit offset also means flow type */
uint64_t flow_type_rss_offloads;
@@ -84,16 +85,19 @@ struct pmd_internals {
uint8_t rss_key[40]; /**< 40-byte hash key. */
};
-
-
-static struct ether_addr eth_addr = { .addr_bytes = {0} };
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_null_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_null_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -105,10 +109,10 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
return 0;
packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
for (i = 0; i < nb_bufs; i++) {
- bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
- if (!bufs[i])
- break;
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
bufs[i]->port = h->internals->port_id;
@@ -130,10 +134,10 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
return 0;
packet_size = h->internals->packet_size;
+ if (rte_pktmbuf_alloc_bulk(h->mb_pool, bufs, nb_bufs) != 0)
+ return 0;
+
for (i = 0; i < nb_bufs; i++) {
- bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
- if (!bufs[i])
- break;
rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
packet_size);
bufs[i]->data_len = (uint16_t)packet_size;
@@ -301,6 +305,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->min_rx_bufsize = 0;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -461,10 +466,11 @@ eth_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused struct ether_addr *addr)
{
+ return 0;
}
static const struct eth_dev_ops ops = {
@@ -496,7 +502,7 @@ eth_dev_null_create(struct rte_vdev_device *dev,
{
const unsigned nb_rx_queues = 1;
const unsigned nb_tx_queues = 1;
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -510,22 +516,12 @@ eth_dev_null_create(struct rte_vdev_device *dev,
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
- RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
- dev->device.numa_node);
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
+ PMD_LOG(INFO, "Creating null ethdev on numa socket %u",
dev->device.numa_node);
- if (!data)
- return -ENOMEM;
eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
- if (!eth_dev) {
- rte_free(data);
+ if (!eth_dev)
return -ENOMEM;
- }
/* now put it all together
* - store queue data in internals,
@@ -540,19 +536,19 @@ eth_dev_null_create(struct rte_vdev_device *dev,
internals->packet_size = packet_size;
internals->packet_copy = packet_copy;
internals->port_id = eth_dev->data->port_id;
+ eth_random_addr(internals->eth_addr.addr_bytes);
internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
rte_memcpy(internals->rss_key, default_rss_key, 40);
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
- data->mac_addrs = &eth_addr;
+ data->mac_addrs = &internals->eth_addr;
- eth_dev->data = data;
eth_dev->dev_ops = &ops;
/* finally assign rx and tx ops */
@@ -564,6 +560,7 @@ eth_dev_null_create(struct rte_vdev_device *dev,
eth_dev->tx_pkt_burst = eth_null_tx;
}
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -608,6 +605,7 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
unsigned packet_size = default_packet_size;
unsigned packet_copy = default_packet_copy;
struct rte_kvargs *kvlist = NULL;
+ struct rte_eth_dev *eth_dev;
int ret;
if (!dev)
@@ -615,7 +613,21 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_null for %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
if (params != NULL) {
kvlist = rte_kvargs_parse(params, valid_arguments);
@@ -641,8 +653,8 @@ rte_pmd_null_probe(struct rte_vdev_device *dev)
}
}
- RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
- "packet copy is %s\n", packet_size,
+ PMD_LOG(INFO, "Configure pmd_null: packet size is %d, "
+ "packet copy is %s", packet_size,
packet_copy ? "enabled" : "disabled");
ret = eth_dev_null_create(dev, packet_size, packet_copy);
@@ -661,7 +673,7 @@ rte_pmd_null_remove(struct rte_vdev_device *dev)
if (!dev)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Closing null ethdev on numa socket %u",
rte_socket_id());
/* find the ethdev entry */
@@ -670,7 +682,6 @@ rte_pmd_null_remove(struct rte_vdev_device *dev)
return -1;
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -687,3 +698,10 @@ RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
RTE_PMD_REGISTER_PARAM_STRING(net_null,
"size=<int> "
"copy=<int>");
+
+RTE_INIT(eth_null_init_log)
+{
+ eth_null_logtype = rte_log_register("pmd.net.null");
+ if (eth_null_logtype >= 0)
+ rte_log_set_level(eth_null_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/octeontx/Makefile b/drivers/net/octeontx/Makefile
index 3e4a1066..885f1768 100644
--- a/drivers/net/octeontx/Makefile
+++ b/drivers/net/octeontx/Makefile
@@ -10,6 +10,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_pmd_octeontx.a
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx/
CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
EXPORT_MAP := rte_pmd_octeontx_version.map
@@ -46,7 +47,7 @@ endif
CFLAGS_octeontx_ethdev.o += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_common_octeontx
LDLIBS += -lrte_mempool_octeontx
LDLIBS += -lrte_eventdev
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/octeontx/base/octeontx_bgx.c b/drivers/net/octeontx/base/octeontx_bgx.c
index 8576d8ed..0e238826 100644
--- a/drivers/net/octeontx/base/octeontx_bgx.c
+++ b/drivers/net/octeontx/base/octeontx_bgx.c
@@ -19,7 +19,7 @@ octeontx_bgx_port_open(int port, octeontx_mbox_bgx_port_conf_t *conf)
hdr.msg = MBOX_BGX_PORT_OPEN;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
if (res < 0)
return -EACCES;
@@ -49,7 +49,7 @@ octeontx_bgx_port_close(int port)
hdr.msg = MBOX_BGX_PORT_CLOSE;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -66,7 +66,7 @@ octeontx_bgx_port_start(int port)
hdr.msg = MBOX_BGX_PORT_START;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -83,7 +83,7 @@ octeontx_bgx_port_stop(int port)
hdr.msg = MBOX_BGX_PORT_STOP;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -103,7 +103,7 @@ octeontx_bgx_port_get_config(int port, octeontx_mbox_bgx_port_conf_t *conf)
hdr.vfid = port;
memset(&bgx_conf, 0, sizeof(octeontx_mbox_bgx_port_conf_t));
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_conf, len);
if (res < 0)
return -EACCES;
@@ -135,7 +135,7 @@ octeontx_bgx_port_status(int port, octeontx_mbox_bgx_port_status_t *stat)
hdr.msg = MBOX_BGX_PORT_GET_STATUS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stat, len);
if (res < 0)
return -EACCES;
@@ -156,7 +156,7 @@ octeontx_bgx_port_stats(int port, octeontx_mbox_bgx_port_stats_t *stats)
hdr.msg = MBOX_BGX_PORT_GET_STATS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &bgx_stats, len);
if (res < 0)
return -EACCES;
@@ -181,7 +181,7 @@ octeontx_bgx_port_stats_clr(int port)
hdr.msg = MBOX_BGX_PORT_CLR_STATS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
@@ -200,7 +200,7 @@ octeontx_bgx_port_link_status(int port)
hdr.msg = MBOX_BGX_PORT_GET_LINK_STATUS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, &link, len);
+ res = octeontx_mbox_send(&hdr, NULL, 0, &link, len);
if (res < 0)
return -EACCES;
@@ -219,7 +219,7 @@ octeontx_bgx_port_promisc_set(int port, int en)
hdr.vfid = port;
prom = en ? 1 : 0;
- res = octeontx_ssovf_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
+ res = octeontx_mbox_send(&hdr, &prom, sizeof(prom), NULL, 0);
if (res < 0)
return -EACCES;
@@ -237,7 +237,7 @@ octeontx_bgx_port_mac_set(int port, uint8_t *mac_addr)
hdr.msg = MBOX_BGX_PORT_SET_MACADDR;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, mac_addr, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, mac_addr, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index 58a7f110..1babea0e 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -19,7 +19,7 @@ octeontx_pki_port_open(int port)
hdr.msg = MBOX_PKI_PORT_OPEN;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
if (res < 0)
return -EACCES;
return res;
@@ -38,7 +38,7 @@ octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg)
hdr.msg = MBOX_PKI_PORT_HASH_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &h_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &h_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -58,7 +58,7 @@ octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg)
hdr.msg = MBOX_PKI_PORT_PKTBUF_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &b_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &b_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
return res;
@@ -77,7 +77,7 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_CREATE_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -99,7 +99,7 @@ octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
hdr.msg = MBOX_PKI_PORT_ERRCHK_CONFIG;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &e_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &e_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index d036054c..764aff53 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -422,7 +422,7 @@ octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -442,7 +442,7 @@ octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &q_cfg, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -464,7 +464,7 @@ octeontx_pki_port_close(int port)
hdr.msg = MBOX_PKI_PORT_CLOSE;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -486,7 +486,7 @@ octeontx_pki_port_start(int port)
hdr.msg = MBOX_PKI_PORT_START;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
@@ -508,7 +508,7 @@ octeontx_pki_port_stop(int port)
hdr.msg = MBOX_PKI_PORT_STOP;
hdr.vfid = port;
- res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &ptype, len, NULL, 0);
if (res < 0)
return -EACCES;
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index b739c0b3..0f3d5d67 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -46,9 +46,7 @@ int otx_net_logtype_mbox;
int otx_net_logtype_init;
int otx_net_logtype_driver;
-RTE_INIT(otx_net_init_log);
-static void
-otx_net_init_log(void)
+RTE_INIT(otx_net_init_log)
{
otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox");
if (otx_net_logtype_mbox >= 0)
@@ -122,7 +120,7 @@ octeontx_port_open(struct octeontx_nic *nic)
int res;
res = 0;
-
+ memset(&bgx_port_conf, 0x0, sizeof(bgx_port_conf));
PMD_INIT_FUNC_TRACE();
res = octeontx_bgx_port_open(nic->port_id, &bgx_port_conf);
@@ -283,34 +281,17 @@ octeontx_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- if (!rxmode->hw_strip_crc) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
PMD_INIT_LOG(NOTICE, "can't disable hw crc strip");
- rxmode->hw_strip_crc = 1;
- }
-
- if (rxmode->hw_ip_checksum) {
- PMD_INIT_LOG(NOTICE, "rxcksum not supported");
- rxmode->hw_ip_checksum = 0;
- }
-
- if (rxmode->split_hdr_size) {
- octeontx_log_err("rxmode does not support split header");
- return -EINVAL;
- }
-
- if (rxmode->hw_vlan_filter) {
- octeontx_log_err("VLAN filter not supported");
- return -EINVAL;
+ rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
- if (rxmode->hw_vlan_extend) {
- octeontx_log_err("VLAN extended not supported");
- return -EINVAL;
- }
-
- if (rxmode->enable_lro) {
- octeontx_log_err("LRO not supported");
- return -EINVAL;
+ if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) {
+ PMD_INIT_LOG(NOTICE, "cant disable lockfree tx");
+ txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE;
}
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
@@ -372,6 +353,9 @@ octeontx_dev_close(struct rte_eth_dev *dev)
rte_free(txq);
}
+
+ dev->tx_pkt_burst = NULL;
+ dev->rx_pkt_burst = NULL;
}
static int
@@ -465,9 +449,6 @@ octeontx_dev_stop(struct rte_eth_dev *dev)
ret);
return;
}
-
- dev->tx_pkt_burst = NULL;
- dev->rx_pkt_burst = NULL;
}
static void
@@ -488,20 +469,6 @@ octeontx_dev_promisc_disable(struct rte_eth_dev *dev)
octeontx_port_promisc_set(nic, 0);
}
-static inline int
-octeontx_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static int
octeontx_port_link_status(struct octeontx_nic *nic)
{
@@ -532,7 +499,6 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link;
int res;
- res = 0;
PMD_INIT_FUNC_TRACE();
res = octeontx_port_link_status(nic);
@@ -566,6 +532,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
case OCTEONTX_LINK_SPEED_RESERVE1:
case OCTEONTX_LINK_SPEED_RESERVE2:
default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
octeontx_log_err("incorrect link speed %d", nic->speed);
break;
}
@@ -573,7 +540,7 @@ octeontx_dev_link_update(struct rte_eth_dev *dev,
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_autoneg = ETH_LINK_AUTONEG;
- return octeontx_atomic_write_link_status(dev, &link);
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -594,7 +561,7 @@ octeontx_dev_stats_reset(struct rte_eth_dev *dev)
octeontx_port_stats_clr(nic);
}
-static void
+static int
octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr)
{
@@ -605,6 +572,8 @@ octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev,
if (ret != 0)
octeontx_log_err("failed to set MAC address on port %d",
nic->port_id);
+
+ return ret;
}
static void
@@ -619,28 +588,25 @@ octeontx_dev_info(struct rte_eth_dev *dev,
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
ETH_LINK_SPEED_40G;
- dev_info->driver_name = RTE_STR(rte_octeontx_pmd);
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = PKI_MAX_PKTLEN;
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = PKO_MAX_NUM_DQ;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = 0,
.rx_drop_en = 0,
+ .offloads = OCTEONTX_RX_OFFLOADS,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = 0,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS |
- ETH_TXQ_FLAGS_NOXSUMS,
+ .offloads = OCTEONTX_TX_OFFLOADS,
};
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MT_LOCKFREE;
+ dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS;
+ dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS;
}
static void
@@ -744,7 +710,7 @@ octeontx_dev_tx_queue_release(void *tx_queue)
static int
octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct octeontx_nic *nic = octeontx_pmd_priv(dev);
struct octeontx_txq *txq = NULL;
@@ -753,7 +719,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
RTE_SET_USED(nb_desc);
RTE_SET_USED(socket_id);
- RTE_SET_USED(tx_conf);
dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
@@ -823,7 +788,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pki_qos_cfg_t pki_qos;
uintptr_t pool;
int ret, port;
- uint8_t gaura;
+ uint16_t gaura;
unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx;
unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx;
@@ -934,8 +899,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pool = (uintptr_t)mb_pool->pool_id;
- /* Get the gpool Id */
- gaura = octeontx_fpa_bufpool_gpool(pool);
+ /* Get the gaura Id */
+ gaura = octeontx_fpa_bufpool_gaura(pool);
pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
pki_qos.num_entry = 1;
@@ -1039,7 +1004,7 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
char octtx_name[OCTEONTX_MAX_NAME_LEN];
struct octeontx_nic *nic = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
const char *name = rte_vdev_device_name(dev);
PMD_INIT_FUNC_TRACE();
@@ -1050,18 +1015,14 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
if (eth_dev == NULL)
return -ENODEV;
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
eth_dev->tx_pkt_burst = octeontx_xmit_pkts;
eth_dev->rx_pkt_burst = octeontx_recv_pkts;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
- data = rte_zmalloc_socket(octtx_name, sizeof(*data), 0, socket_id);
- if (data == NULL) {
- octeontx_log_err("failed to allocate devdata");
- res = -ENOMEM;
- goto err;
- }
-
nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id);
if (nic == NULL) {
octeontx_log_err("failed to allocate nic structure");
@@ -1097,11 +1058,9 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
eth_dev->data->kdrv = RTE_KDRV_NONE;
eth_dev->data->numa_node = dev->device.numa_node;
- rte_memcpy(data, (eth_dev)->data, sizeof(*data));
+ data = eth_dev->data;
data->dev_private = nic;
-
data->port_id = eth_dev->data->port_id;
- snprintf(data->name, sizeof(data->name), "%s", eth_dev->data->name);
nic->ev_queues = 1;
nic->ev_ports = 1;
@@ -1120,7 +1079,6 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
goto err;
}
- eth_dev->data = data;
eth_dev->dev_ops = &octeontx_dev_ops;
/* Finally save ethdev pointer to the NIC structure */
@@ -1146,10 +1104,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev,
rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7]
[(nic->base_ochan >> 4) & 0xF] = data->port_id;
+ rte_eth_dev_probing_finish(eth_dev);
return data->port_id;
err:
- if (port)
+ if (nic)
octeontx_port_close(nic);
if (eth_dev != NULL) {
@@ -1188,7 +1147,6 @@ octeontx_remove(struct rte_vdev_device *dev)
rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
rte_event_dev_close(nic->evdev);
}
@@ -1210,12 +1168,28 @@ octeontx_probe(struct rte_vdev_device *dev)
struct rte_event_dev_config dev_conf;
const char *eventdev_name = "event_octeontx";
struct rte_event_dev_info info;
+ struct rte_eth_dev *eth_dev;
struct octeontx_vdev_init_params init_params = {
OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT
};
dev_name = rte_vdev_device_name(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(dev_name);
+ if (!eth_dev) {
+ RTE_LOG(ERR, PMD, "Failed to probe %s\n", dev_name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &octeontx_dev_ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
res = octeontx_parse_vdev_init_params(&init_params, dev);
if (res < 0)
return -EINVAL;
diff --git a/drivers/net/octeontx/octeontx_ethdev.h b/drivers/net/octeontx/octeontx_ethdev.h
index 10e42e14..14f16969 100644
--- a/drivers/net/octeontx/octeontx_ethdev.h
+++ b/drivers/net/octeontx/octeontx_ethdev.h
@@ -28,6 +28,10 @@
#define OCTEONTX_MAX_BGX_PORTS 4
#define OCTEONTX_MAX_LMAC_PER_BGX 4
+#define OCTEONTX_RX_OFFLOADS (DEV_RX_OFFLOAD_CRC_STRIP \
+ | DEV_RX_OFFLOAD_CHECKSUM)
+#define OCTEONTX_TX_OFFLOADS DEV_TX_OFFLOAD_MT_LOCKFREE
+
static inline struct octeontx_nic *
octeontx_pmd_priv(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 2502d90e..a9149b4e 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -31,7 +31,7 @@ __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
return -ENOSPC;
/* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)tx_pkt->pool->pool_id);
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
/* Setup PKO_SEND_HDR_S */
cmd_buf[0] = tx_pkt->data_len & 0xffff;
diff --git a/drivers/net/pcap/meson.build b/drivers/net/pcap/meson.build
index 8b81214e..0c4e0201 100644
--- a/drivers/net/pcap/meson.build
+++ b/drivers/net/pcap/meson.build
@@ -1,22 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-if meson.is_cross_build()
- pcap_dep = cc.find_library('pcap', required: false)
- if pcap_dep.found()
- ext_deps += pcap_dep
- else
- build = false
- endif
+pcap_dep = cc.find_library('pcap', required: false)
+if pcap_dep.found() and cc.has_header('pcap.h', dependencies: pcap_dep)
+ build = true
else
- pcap_dep = dependency('pcap', required: false)
- if pcap_dep.found() == true
- ext_deps += pcap_dep
- elif find_program('pcap-config', required: false).found() == true
- ext_deps += cc.find_library('pcap')
- else
- build = false
- endif
+ build = false
endif
sources = files('rte_eth_pcap.c')
+ext_deps += pcap_dep
pkgconfig_extra_libs += '-lpcap'
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index c1571e1f..e8810a17 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -26,6 +26,7 @@
#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
+#define ETH_PCAP_RX_IFACE_IN_ARG "rx_iface_in"
#define ETH_PCAP_TX_IFACE_ARG "tx_iface"
#define ETH_PCAP_IFACE_ARG "iface"
@@ -83,6 +84,7 @@ static const char *valid_arguments[] = {
ETH_PCAP_RX_PCAP_ARG,
ETH_PCAP_TX_PCAP_ARG,
ETH_PCAP_RX_IFACE_ARG,
+ ETH_PCAP_RX_IFACE_IN_ARG,
ETH_PCAP_TX_IFACE_ARG,
ETH_PCAP_IFACE_ARG,
NULL
@@ -96,9 +98,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG,
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_pcap_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_pcap_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static int
eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
const u_char *data, uint16_t data_len)
@@ -256,8 +264,8 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pcap_dump((u_char *)dumper_q->dumper, &header,
tx_pcap_data);
} else {
- RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -313,8 +321,8 @@ eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
ret = pcap_sendpacket(tx_queue->pcap,
tx_pcap_data, mbuf->pkt_len);
} else {
- RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
+ PMD_LOG(ERR,
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -346,7 +354,7 @@ open_iface_live(const char *iface, pcap_t **pcap) {
RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
+ PMD_LOG(ERR, "Couldn't open %s: %s", iface, errbuf);
return -1;
}
@@ -357,7 +365,7 @@ static int
open_single_iface(const char *iface, pcap_t **pcap)
{
if (open_iface_live(iface, pcap) < 0) {
- RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+ PMD_LOG(ERR, "Couldn't open interface %s", iface);
return -1;
}
@@ -376,7 +384,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
*/
tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
if (tx_pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+ PMD_LOG(ERR, "Couldn't create dead pcap");
return -1;
}
@@ -384,7 +392,7 @@ open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
*dumper = pcap_dump_open(tx_pcap, pcap_filename);
if (*dumper == NULL) {
pcap_close(tx_pcap);
- RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
+ PMD_LOG(ERR, "Couldn't open %s for writing.",
pcap_filename);
return -1;
}
@@ -398,7 +406,7 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
{
*pcap = pcap_open_offline(pcap_filename, errbuf);
if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename,
+ PMD_LOG(ERR, "Couldn't open %s: %s", pcap_filename,
errbuf);
return -1;
}
@@ -424,6 +432,7 @@ eth_dev_start(struct rte_eth_dev *dev)
return -1;
rx->pcap = tx->pcap;
}
+
goto status_up;
}
@@ -459,6 +468,12 @@ eth_dev_start(struct rte_eth_dev *dev)
}
status_up:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
dev->data->dev_link.link_status = ETH_LINK_UP;
return 0;
@@ -511,6 +526,12 @@ eth_dev_stop(struct rte_eth_dev *dev)
}
status_down:
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
@@ -532,6 +553,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -637,6 +659,38 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
+static int
+eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
@@ -645,6 +699,10 @@ static const struct eth_dev_ops ops = {
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
+ .rx_queue_start = eth_rx_queue_start,
+ .tx_queue_start = eth_tx_queue_start,
+ .rx_queue_stop = eth_rx_queue_stop,
+ .tx_queue_stop = eth_tx_queue_stop,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.link_update = eth_link_update,
@@ -652,6 +710,22 @@ static const struct eth_dev_ops ops = {
.stats_reset = eth_stats_reset,
};
+static int
+add_queue(struct pmd_devargs *pmd, const char *name, const char *type,
+ pcap_t *pcap, pcap_dumper_t *dumper)
+{
+ if (pmd->num_of_queue >= RTE_PMD_PCAP_MAX_QUEUES)
+ return -1;
+ if (pcap)
+ pmd->queue[pmd->num_of_queue].pcap = pcap;
+ if (dumper)
+ pmd->queue[pmd->num_of_queue].dumper = dumper;
+ pmd->queue[pmd->num_of_queue].name = name;
+ pmd->queue[pmd->num_of_queue].type = type;
+ pmd->num_of_queue++;
+ return 0;
+}
+
/*
* Function handler that opens the pcap file for reading a stores a
* reference of it for use it later on.
@@ -659,18 +733,16 @@ static const struct eth_dev_ops ops = {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
- return -1;
+ if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
+ return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = pcap_filename;
- rx->queue[i].type = key;
+ if (add_queue(rx, pcap_filename, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
}
return 0;
@@ -683,18 +755,16 @@ open_rx_pcap(const char *key, const char *value, void *extra_args)
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
const char *pcap_filename = value;
struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_queue; i++) {
- if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
- return -1;
+ if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
+ return -1;
- dumpers->queue[i].dumper = dumper;
- dumpers->queue[i].name = pcap_filename;
- dumpers->queue[i].type = key;
+ if (add_queue(dumpers, pcap_filename, key, NULL, dumper) < 0) {
+ pcap_dump_close(dumper);
+ return -1;
}
return 0;
@@ -720,48 +790,76 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args)
return 0;
}
+static inline int
+set_iface_direction(const char *iface, pcap_t *pcap,
+ pcap_direction_t direction)
+{
+ const char *direction_str = (direction == PCAP_D_IN) ? "IN" : "OUT";
+ if (pcap_setdirection(pcap, direction) < 0) {
+ PMD_LOG(ERR, "Setting %s pcap direction %s failed - %s\n",
+ iface, direction_str, pcap_geterr(pcap));
+ return -1;
+ }
+ PMD_LOG(INFO, "Setting %s pcap direction %s\n",
+ iface, direction_str);
+ return 0;
+}
+
+static inline int
+open_iface(const char *key, const char *value, void *extra_args)
+{
+ const char *iface = value;
+ struct pmd_devargs *pmd = extra_args;
+ pcap_t *pcap = NULL;
+
+ if (open_single_iface(iface, &pcap) < 0)
+ return -1;
+ if (add_queue(pmd, iface, key, pcap, NULL) < 0) {
+ pcap_close(pcap);
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Opens a NIC for reading packets from it
*/
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
- const char *iface = value;
- struct pmd_devargs *rx = extra_args;
- pcap_t *pcap = NULL;
+ int ret = open_iface(key, value, extra_args);
+ if (ret < 0)
+ return ret;
+ if (strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0) {
+ struct pmd_devargs *pmd = extra_args;
+ unsigned int qid = pmd->num_of_queue - 1;
- for (i = 0; i < rx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- rx->queue[i].pcap = pcap;
- rx->queue[i].name = iface;
- rx->queue[i].type = key;
+ set_iface_direction(pmd->queue[qid].name,
+ pmd->queue[qid].pcap,
+ PCAP_D_IN);
}
return 0;
}
+static inline int
+rx_iface_args_process(const char *key, const char *value, void *extra_args)
+{
+ if (strcmp(key, ETH_PCAP_RX_IFACE_ARG) == 0 ||
+ strcmp(key, ETH_PCAP_RX_IFACE_IN_ARG) == 0)
+ return open_rx_iface(key, value, extra_args);
+
+ return 0;
+}
+
/*
* Opens a NIC for writing packets to it
*/
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned int i;
- const char *iface = value;
- struct pmd_devargs *tx = extra_args;
- pcap_t *pcap;
-
- for (i = 0; i < tx->num_of_queue; i++) {
- if (open_single_iface(iface, &pcap) < 0)
- return -1;
- tx->queue[i].pcap = pcap;
- tx->queue[i].name = iface;
- tx->queue[i].type = key;
- }
-
- return 0;
+ return open_iface(key, value, extra_args);
}
static struct rte_vdev_driver pmd_pcap_drv;
@@ -773,27 +871,16 @@ pmd_init_internals(struct rte_vdev_device *vdev,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev)
{
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
unsigned int numa_node = vdev->device.numa_node;
- const char *name;
- name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %d\n",
+ PMD_LOG(INFO, "Creating pcap-backed ethdev on numa socket %d",
numa_node);
- /* now do all data allocation - for eth_dev structure
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- return -1;
-
/* reserve an ethdev entry */
*eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
- if (*eth_dev == NULL) {
- rte_free(data);
+ if (!(*eth_dev))
return -1;
- }
/* now put it all together
* - store queue data in internals,
@@ -802,7 +889,7 @@ pmd_init_internals(struct rte_vdev_device *vdev,
* - and point eth_dev structure to new eth_dev_data structure
*/
*internals = (*eth_dev)->data->dev_private;
- rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
+ data = (*eth_dev)->data;
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -812,7 +899,6 @@ pmd_init_internals(struct rte_vdev_device *vdev,
* NOTE: we'll replace the data element, of originally allocated
* eth_dev so the rings are local per-process
*/
- (*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
return 0;
@@ -899,6 +985,7 @@ eth_from_pcaps(struct rte_vdev_device *vdev,
else
eth_dev->tx_pkt_burst = eth_pcap_tx;
+ rte_eth_dev_probing_finish(eth_dev);
return 0;
}
@@ -910,16 +997,31 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
struct pmd_devargs dumpers = {0};
+ struct rte_eth_dev *eth_dev;
int single_iface = 0;
int ret;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_pcap for %s", name);
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ PMD_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -949,22 +1051,16 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
- if (pcaps.num_of_queue)
- is_rx_pcap = 1;
- else
- pcaps.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
-
- if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_rx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG) ? 1 : 0;
+ pcaps.num_of_queue = 0;
- if (is_rx_pcap)
+ if (is_rx_pcap) {
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
- else
- ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
- &open_rx_iface, &pcaps);
+ } else {
+ ret = rte_kvargs_process(kvlist, NULL,
+ &rx_iface_args_process, &pcaps);
+ }
if (ret < 0)
goto free_kvlist;
@@ -973,15 +1069,8 @@ pmd_pcap_probe(struct rte_vdev_device *dev)
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
- if (dumpers.num_of_queue)
- is_tx_pcap = 1;
- else
- dumpers.num_of_queue = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
-
- if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
- dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+ is_tx_pcap = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG) ? 1 : 0;
+ dumpers.num_of_queue = 0;
if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
@@ -1008,7 +1097,7 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %d\n",
+ PMD_LOG(INFO, "Closing pcap ethdev on numa socket %d",
rte_socket_id());
if (!dev)
@@ -1020,7 +1109,6 @@ pmd_pcap_remove(struct rte_vdev_device *dev)
return -1;
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
@@ -1038,5 +1126,13 @@ RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
ETH_PCAP_RX_PCAP_ARG "=<string> "
ETH_PCAP_TX_PCAP_ARG "=<string> "
ETH_PCAP_RX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_RX_IFACE_IN_ARG "=<ifc> "
ETH_PCAP_TX_IFACE_ARG "=<ifc> "
ETH_PCAP_IFACE_ARG "=<ifc>");
+
+RTE_INIT(eth_pcap_init_log)
+{
+ eth_pcap_logtype = rte_log_register("pmd.net.pcap");
+ if (eth_pcap_logtype >= 0)
+ rte_log_set_level(eth_pcap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd
deleted file mode 100644
index c7cbdccc..00000000
--- a/drivers/net/qede/LICENSE.qede_pmd
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of QLogic Corporation nor the name of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written consent.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index ccbffa45..488ca1d9 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -1,8 +1,7 @@
-# Copyright (c) 2016 QLogic Corporation.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2016 - 2018 Cavium Inc.
# All rights reserved.
-# www.qlogic.com
-#
-# See LICENSE.qede_pmd for copyright and licensing details.
+# www.cavium.com
include $(RTE_SDK)/mk/rte.vars.mk
@@ -73,8 +72,7 @@ ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev
CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion
endif
else #ICC
-CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type
-CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant
+CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant
endif
#
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index fe42f325..d5d6f8e2 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include <rte_memzone.h>
@@ -133,10 +131,10 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = 0;
+ core_id = rte_get_master_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
- mz = rte_memzone_reserve_aligned(mz_name, size,
- socket_id, 0, RTE_CACHE_LINE_SIZE);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
@@ -172,9 +170,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
if (core_id == (unsigned int)LCORE_ID_ANY)
- core_id = 0;
+ core_id = rte_get_master_lcore();
socket_id = rte_lcore_to_socket_id(core_id);
- mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align);
+ mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (!mz) {
DP_ERR(p_dev, "Unable to allocate DMA memory "
"of size %zu bytes - %s\n",
@@ -200,6 +199,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
+ while (j < ecore_mz_count - 1) {
+ ecore_mz_mapping[j] = ecore_mz_mapping[j + 1];
+ j++;
+ }
+ ecore_mz_count--;
return;
}
}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 52c2f0ec..630867fa 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __BCM_OSAL_H
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 9a6059ac..ca8e59db 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __COMMON_HSI__
@@ -96,10 +94,10 @@
/****************************************************************************/
-#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 30
-#define FW_REVISION_VERSION 12
-#define FW_ENGINEERING_VERSION 0
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 33
+#define FW_REVISION_VERSION 12
+#define FW_ENGINEERING_VERSION 0
/***********************/
/* COMMON HW CONSTANTS */
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index ce5f3a90..5d79fdf0 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_H
@@ -41,6 +39,9 @@
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+#define IS_ECORE_PACING(p_hwfn) \
+ (!!(p_hwfn->b_en_pacing))
+
#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 128 /* @DPDK */
#define ECORE_WFQ_UNIT 100
@@ -432,8 +433,10 @@ struct ecore_hw_info {
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_dmae_info {
- /* Mutex for synchronizing access to functions */
- osal_mutex_t mutex;
+ /* Spinlock for synchronizing access to functions */
+ osal_spinlock_t lock;
+
+ bool b_mem_ready;
u8 channel;
@@ -534,6 +537,12 @@ enum ecore_mf_mode_bit {
ECORE_MF_UFP_SPECIFIC,
ECORE_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ ECORE_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ ECORE_MF_8021AD_TAGGING,
};
enum ecore_ufp_mode {
@@ -672,6 +681,13 @@ struct ecore_hwfn {
/* Mechanism for recovering from doorbell drop */
struct ecore_db_recovery_info db_recovery_info;
+ /* Enable/disable pacing, if request to enable then
+ * IOV and mcos configuration will be skipped.
+ * this actually reflects the value requested in
+ * struct ecore_hw_prepare_params by ecore client.
+ */
+ bool b_en_pacing;
+
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
};
@@ -924,12 +940,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
#define PQ_FLAGS_ACK (1 << 4)
#define PQ_FLAGS_OFLD (1 << 5)
#define PQ_FLAGS_VFS (1 << 6)
+#define PQ_FLAGS_LLT (1 << 7)
/* physical queue index for cm context intialization */
u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+
+/* qm vport for rate limit configuration */
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h
index d8951bcc..ec773fbd 100644
--- a/drivers/net/qede/base/ecore_attn_values.h
+++ b/drivers/net/qede/base/ecore_attn_values.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ATTN_VALUES_H__
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index d8f69ad6..6d0382d3 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_CHAIN_H__
@@ -526,7 +524,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
- /* Use (page_cnt - 1) as a reset value for the prod/cons page's
+ /* Use "page_cnt-1" as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
@@ -726,6 +724,21 @@ out:
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "prod_idx-1" since ecore_chain_produce() advances the
+ * page index before the producer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (prod_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.prod_page_idx = page_idx;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
@@ -734,6 +747,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
}
/**
+ * @brief ecore_chain_set_cons - sets the cons to the given value
+ *
+ * @param cons_idx
+ * @param p_cons_elem
+ */
+static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
+ u32 cons_idx, void *p_cons_elem)
+{
+ if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
+ /* Use "cons_idx-1" since ecore_chain_consume() advances the
+ * page index before the consumer index when getting to
+ * "next_page_mask".
+ */
+ u32 elem_idx =
+ (cons_idx - 1 + p_chain->capacity) % p_chain->capacity;
+ u32 page_idx = elem_idx / p_chain->elem_per_page;
+
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx;
+ else
+ p_chain->pbl.c.u32.cons_page_idx = page_idx;
+ }
+
+ if (is_chain_u16(p_chain))
+ p_chain->u.chain16.cons_idx = (u16)cons_idx;
+ else
+ p_chain->u.chain32.cons_idx = cons_idx;
+
+ p_chain->p_cons_elem = p_cons_elem;
+}
+
+/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 50bd66da..bf36ce58 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -834,7 +832,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->t2_num_pages *
sizeof(struct ecore_dma_mem));
if (!p_mngr->t2) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
rc = ECORE_NOMEM;
goto t2_fail;
}
@@ -919,6 +917,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 ilt_size, i;
+ if (p_mngr->ilt_shadow == OSAL_NULL)
+ return;
+
ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
@@ -931,6 +932,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
p_dma->p_virt = OSAL_NULL;
}
OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = OSAL_NULL;
}
static enum _ecore_status_t
@@ -1000,8 +1002,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
size * sizeof(struct ecore_dma_mem));
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate ilt shadow table\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
@@ -1044,12 +1045,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].cid_map = OSAL_NULL;
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
OSAL_FREE(p_hwfn->p_dev,
p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
p_mngr->acquired_vf[type][vf].max_count = 0;
p_mngr->acquired_vf[type][vf].start_cid = 0;
}
@@ -1126,8 +1129,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
if (!p_mngr) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_cxt_mngr'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
return ECORE_NOMEM;
}
@@ -1189,21 +1191,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate the ILT shadow table */
rc = ecore_ilt_shadow_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
goto tables_alloc_fail;
}
/* Allocate the T2 table */
rc = ecore_cxt_src_t2_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
goto tables_alloc_fail;
}
/* Allocate and initialize the acquired cids bitmaps */
rc = ecore_cid_map_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
goto tables_alloc_fail;
}
@@ -1427,7 +1429,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct ecore_mcp_link_state *p_link;
@@ -1438,8 +1441,9 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
- ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
- p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ qm_info->max_phys_tcs_per_port,
+ is_pf_loading,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
@@ -1797,7 +1801,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn, p_ptt);
+ ecore_qm_init_pf(p_hwfn, p_ptt, true);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 54761e4e..f8c955ca 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _ECORE_CID_
@@ -107,8 +105,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
*
* @param p_hwfn
* @param p_ptt
+ * @param is_pf_loading
*/
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool is_pf_loading);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6d87620d..6c8b2831 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_CXT_API_H__
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 21ddda92..96678745 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -149,6 +147,10 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
}
p_data->arr[type].update = UPDATE_DCB_DSCP;
+ /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ p_data->arr[type].dont_add_vlan0 = true;
+
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
p_hwfn->hw_info.offload_tc = tc;
@@ -910,7 +912,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_dcbx_info'");
return ECORE_NOMEM;
}
@@ -935,6 +937,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_tc = p_src->arr[type].tc;
p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
p_data->dscp_val = p_src->arr[type].dscp_val;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 469e42dd..519e6cea 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_DCBX_H__
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 9ff4df4c..eaf8e082 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_DCBX_API_H__
@@ -29,6 +27,7 @@ struct ecore_dcbx_app_data {
u8 tc; /* Traffic Class */
bool dscp_enable; /* DSCP enabled */
u8 dscp_val; /* DSCP value */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#ifndef __EXTRACT__LINUX__
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 744d2043..31f1f3ee 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -39,7 +37,7 @@
* there's more than a single compiled ecore component in system].
*/
static osal_spinlock_t qm_lock;
-static bool qm_lock_init;
+static u32 qm_lock_ref_cnt;
/******************** Doorbell Recovery *******************/
/* The doorbell recovery mechanism consists of a list of entries which represent
@@ -227,7 +225,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock))
+ return ECORE_NOMEM;
#endif
OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
p_hwfn->db_recovery_info.db_recovery_counter = 0;
@@ -411,7 +410,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
}
}
-void ecore_init_struct(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev)
{
u8 i;
@@ -423,9 +422,10 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->b_active = false;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock))
+ goto handle_err;
#endif
- OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock);
}
/* hwfn 0 is always active */
@@ -433,6 +433,17 @@ void ecore_init_struct(struct ecore_dev *p_dev)
/* set the default cache alignment to 128 (may be overridden later) */
p_dev->cache_shift = 7;
+ return ECORE_SUCCESS;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+handle_err:
+ while (--i) {
+ struct ecore_hwfn *p_hwfn = OSAL_NULL;
+
+ p_hwfn = &p_dev->hwfns[i];
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
+ }
+ return ECORE_NOMEM;
+#endif
}
static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
@@ -500,11 +511,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
/* feature flags */
if (IS_ECORE_SRIOV(p_hwfn->p_dev))
flags |= PQ_FLAGS_VFS;
+ if (IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_RLS;
/* protocol flags */
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
- flags |= PQ_FLAGS_MCOS;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_FCOE:
flags |= PQ_FLAGS_OFLD;
@@ -513,11 +527,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case ECORE_PCI_ETH_ROCE:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
case ECORE_PCI_ETH_IWARP:
- flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
- PQ_FLAGS_OFLD;
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ if (!IS_ECORE_PACING(p_hwfn))
+ flags |= PQ_FLAGS_MCOS;
break;
default:
DP_ERR(p_hwfn, "unknown personality %d\n",
@@ -721,6 +738,7 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
"pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
/* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
qm_info->num_vports;
qm_info->qm_pq_params[pq_idx].tc_id = tc;
@@ -823,7 +841,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
}
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
@@ -833,6 +851,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
}
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+ u16 start_pq, pq, qm_pq_idx;
+
+ pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+ start_pq = p_hwfn->qm_info.start_pq;
+ qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+
+ if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+ DP_ERR(p_hwfn,
+ "qm_pq_idx %d must be smaller than %d\n",
+ qm_pq_idx, p_hwfn->qm_info.num_pqs);
+ }
+
+ return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+}
+
/* Functions for creating specific types of pqs */
static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
{
@@ -1025,10 +1060,9 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
for (i = 0; i < qm_info->num_pqs; i++) {
pq = &qm_info->qm_pq_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
- " rl_valid %d\n",
- qm_info->start_pq + i, pq->vport_id, pq->tc_id,
- pq->wrr_group, pq->rl_valid);
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
+ qm_info->start_pq + i, pq->port_id, pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid);
}
}
@@ -1083,7 +1117,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn, p_ptt);
+ ecore_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -1289,16 +1323,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info"
- " structure\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
/* DCBX initialization */
rc = ecore_dcbx_info_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
@@ -1307,7 +1339,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
- DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
}
@@ -1658,7 +1690,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_init_cache_line_size(p_hwfn, p_ptt);
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn),
+ hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2160,6 +2193,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -2205,42 +2243,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
} else {
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
- (1 << 2));
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
- 0x100);
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH registers after start PFn\n");
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt,
- PRS_REG_SEARCH_TCP_FIRST_FRAG);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
- prs_reg);
- prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
- DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ return rc;
}
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH registers after start PFn\n");
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_SEARCH_TCP_FIRST_FRAG);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
+ prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
}
- return rc;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
@@ -2283,14 +2322,15 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
}
static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt)
{
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1 << p_hwfn->abs_pf_id);
}
-static void
-ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
+static enum _ecore_status_t
+ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_load_req_params *p_load_req,
struct ecore_drv_load_params *p_drv_load)
{
/* Make sure that if ecore-client didn't provide inputs, all the
@@ -2302,15 +2342,51 @@ ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
- if (p_drv_load != OSAL_NULL) {
- p_load_req->drv_role = p_drv_load->is_crash_kernel ?
- ECORE_DRV_ROLE_KDUMP :
- ECORE_DRV_ROLE_OS;
+ if (p_drv_load == OSAL_NULL)
+ goto out;
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+
+ /* Old MFW versions don't support timeout values other than default and
+ * none, so these values are replaced according to the fall-back action.
+ */
+
+ if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT ||
+ p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE ||
+ (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) {
p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
- p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
- p_load_req->override_force_load =
- p_drv_load->override_force_load;
+ goto out;
+ }
+
+ switch (p_drv_load->mfw_timeout_fallback) {
+ case ECORE_TO_FALLBACK_TO_NONE:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE;
+ break;
+ case ECORE_TO_FALLBACK_TO_DEFAULT:
+ p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ break;
+ case ECORE_TO_FALLBACK_FAIL_LOAD:
+ DP_NOTICE(p_hwfn, false,
+ "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n",
+ p_drv_load->mfw_timeout_val,
+ ECORE_LOAD_REQ_LOCK_TO_DEFAULT,
+ ECORE_LOAD_REQ_LOCK_TO_NONE);
+ return ECORE_ABORTED;
}
+
+ DP_INFO(p_hwfn,
+ "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n",
+ p_drv_load->mfw_timeout_val,
+ (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ?
+ "default" : "none",
+ p_load_req->timeout_val);
+out:
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
@@ -2366,12 +2442,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- ecore_fill_load_req_params(&load_req_params,
- p_params->p_drv_load_params);
+ ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
+
+ rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
+ p_params->p_drv_load_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a LOAD_REQ command\n");
return rc;
}
@@ -2404,10 +2485,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
- if (!qm_lock_init) {
+ if (!qm_lock_ref_cnt) {
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock);
+ if (rc) {
+ DP_ERR(p_hwfn, "qm_lock allocation failed\n");
+ goto qm_lock_fail;
+ }
+#endif
OSAL_SPIN_LOCK_INIT(&qm_lock);
- qm_lock_init = true;
}
+ ++qm_lock_ref_cnt;
/* Clean up chip from previous driver if such remains exist.
* This is not needed when the PF is the first one on the
@@ -2423,9 +2511,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
}
- /* Log and clean previous pglue_b errors if such exist */
- ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
- ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+ /* Log and clear previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
@@ -2433,6 +2520,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
goto load_err;
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2462,15 +2556,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
goto load_err;
}
rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done failed, rc = %d\n", rc);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Sending load done was failed due to memory allocation failure\n");
+ goto load_err;
+ }
return rc;
+ }
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
@@ -2480,7 +2582,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
&param);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to send DCBX attention request\n");
return rc;
}
@@ -2513,6 +2615,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
return rc;
load_err:
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+qm_lock_fail:
+#endif
/* The MFW load lock should be released regardless of success or failure
* of initialization.
* TODO: replace this with an attempt to send cancel_load.
@@ -2547,8 +2655,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
if (i < ECORE_HW_STOP_RETRY_LIMIT)
return;
- DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
- " [Connection %02x Tasks %02x]\n",
+ DP_NOTICE(p_hwfn, false,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
@@ -2613,7 +2721,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
if (!p_dev->recov_in_prog) {
rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_REQ command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2628,7 +2736,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc = ecore_sp_pf_stop(p_hwfn);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2682,10 +2790,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+ --qm_lock_ref_cnt;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ if (!qm_lock_ref_cnt)
+ OSAL_SPIN_LOCK_DEALLOC(&qm_lock);
+#endif
+
if (!p_dev->recov_in_prog) {
- ecore_mcp_unload_done(p_hwfn, p_ptt);
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc == ECORE_NOMEM) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n");
+ rc = ecore_mcp_unload_done(p_hwfn, p_ptt);
+ }
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed sending a UNLOAD_DONE command. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
@@ -2936,7 +3055,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"MFW response failure for a max value setting of resource %d [%s]\n",
res_id, ecore_hw_get_resc_name(res_id));
return rc;
@@ -3496,9 +3615,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
break;
case NVM_CFG1_GLOB_MF_MODE_UFP:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
- 1 << ECORE_MF_UFP_SPECIFIC;
+ 1 << ECORE_MF_UFP_SPECIFIC |
+ 1 << ECORE_MF_8021Q_TAGGING;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_8021AD_TAGGING;
break;
-
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
@@ -3527,6 +3651,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
*/
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ case NVM_CFG1_GLOB_MF_MODE_BD:
p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3780,8 +3905,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
+ if (IS_ECORE_PACING(p_hwfn)) {
+ DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+ "Skipping IOV as packet pacing is requested\n");
+ }
+
/* Since all information is common, only first hwfns should do this */
- if (IS_LEAD_HWFN(p_hwfn)) {
+ if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
@@ -3866,7 +3996,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* that can result in performance penalty in some cases. 4
* represents a good tradeoff between performance and flexibility.
*/
- p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ if (IS_ECORE_PACING(p_hwfn))
+ p_hwfn->hw_info.num_hw_tc = 1;
+ else
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
/* start out with a single active tc. This can be increased either
* by dcbx negotiation or by upper layer driver
@@ -4037,7 +4170,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate PTT pool */
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
@@ -4062,7 +4195,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Initialize MCP structure */
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
@@ -4072,7 +4205,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
p_params->personality, p_params);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
+ DP_NOTICE(p_hwfn, false, "Failed to get HW information\n");
goto err2;
}
@@ -4115,7 +4248,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n");
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
@@ -4153,6 +4286,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
+ p_hwfn->b_en_pacing = p_params->b_en_pacing;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4188,6 +4322,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
+ p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
p_doorbell, p_params);
@@ -4205,8 +4340,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
ecore_mcp_free(p_hwfn);
ecore_hw_hwfn_free(p_hwfn);
} else {
- DP_NOTICE(p_dev, true,
- "What do we need to free when VF hwfn1 init fails\n");
+ DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n");
}
return rc;
}
@@ -4237,7 +4371,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
ecore_mcp_free(p_hwfn);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock);
#endif
}
@@ -4368,7 +4502,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4401,7 +4535,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4425,7 +4559,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
@@ -4449,7 +4583,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
- DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n");
return ECORE_NOMEM;
}
@@ -4457,7 +4591,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Failed to allocate chain memory\n");
return ECORE_NOMEM;
}
@@ -4497,7 +4631,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
page_cnt);
if (rc) {
- DP_NOTICE(p_dev, true,
+ DP_NOTICE(p_dev, false,
"Cannot allocate a chain with the given arguments:\n"
"[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 98bcabe8..02bacc22 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_DEV_API_H__
@@ -32,7 +30,7 @@ void ecore_init_dp(struct ecore_dev *p_dev,
*
* @param p_dev
*/
-void ecore_init_struct(struct ecore_dev *p_dev);
+enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -
@@ -57,6 +55,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
+enum ecore_mfw_timeout_fallback {
+ ECORE_TO_FALLBACK_TO_NONE,
+ ECORE_TO_FALLBACK_TO_DEFAULT,
+ ECORE_TO_FALLBACK_FAIL_LOAD,
+};
+
enum ecore_override_force_load {
ECORE_OVERRIDE_FORCE_LOAD_NONE,
ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
@@ -79,6 +83,11 @@ struct ecore_drv_load_params {
#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+ /* Action to take in case the MFW doesn't support timeout values other
+ * than default and none.
+ */
+ enum ecore_mfw_timeout_fallback mfw_timeout_fallback;
+
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
@@ -104,6 +113,9 @@ struct ecore_hw_init_params {
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
+
+ /* SPQ block timeout in msec */
+ u32 spq_timeout_ms;
};
/**
@@ -256,6 +268,9 @@ struct ecore_hw_prepare_params {
*/
bool b_relaxed_probe;
enum ecore_hw_prepare_result p_relaxed_res;
+
+ /* Enable/disable request by ecore client for pacing */
+ bool b_en_pacing;
};
/**
@@ -363,6 +378,7 @@ struct ecore_eth_stats_common {
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
+ u64 link_change_count;
};
struct ecore_eth_stats_bb {
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 2acd864d..8c8fed4e 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef GTT_REG_ADDR_H
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
index 2ddc5f19..adc20c0c 100644
--- a/drivers/net/qede/base/ecore_gtt_values.h
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __PREVENT_PXP_GLOBAL_WIN__
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index d8abd604..2d761b97 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HSI_COMMON__
@@ -381,7 +379,7 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 reserved16 /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_or_spq_prod /* word4 */;
- __le16 word5 /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -904,8 +902,10 @@ struct core_rx_start_ramrod_data {
/* if set, 802.1q tags will be removed and copied to CQE */
/* if set, 802.1q tags will be removed and copied to CQE */
u8 inner_vlan_stripping_en;
-/* if set, outer tag wont be stripped, valid only in MF OVLAN. */
- u8 outer_vlan_stripping_dis;
+/* if set and inner vlan does not exist, the outer vlan will copied to CQE as
+ * inner vlan. should be used in MF_OVLAN mode only.
+ */
+ u8 report_outer_vlan;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
@@ -946,7 +946,9 @@ struct core_tx_bd_data {
/* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
-/* Insert VLAN into packet */
+/* Insert VLAN into packet. Cannot be set for LB packets
+ * (tx_dst == CORE_TX_DEST_LB)
+ */
#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
@@ -1069,11 +1071,11 @@ struct core_tx_update_ramrod_data {
* Enum flag for what type of dcb data to update
*/
enum dcb_dscp_update_mode {
-/* use when no change should be done to dcb data */
+/* use when no change should be done to DCB data */
DONT_UPDATE_DCB_DSCP,
- UPDATE_DCB /* use to update only l2 (vlan) priority */,
- UPDATE_DSCP /* use to update only l3 dscp */,
- UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ UPDATE_DCB /* use to update only L2 (vlan) priority */,
+ UPDATE_DSCP /* use to update only IP DSCP */,
+ UPDATE_DCB_DSCP /* update vlan pri and DSCP */,
MAX_DCB_DSCP_UPDATE_FLAG
};
@@ -1291,10 +1293,15 @@ enum fw_flow_ctrl_mode {
* GFT profile type.
*/
enum gft_profile_type {
- GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */,
-/* L4 destination port, IP type and L4 type match. */
+/* tunnel type, inner 4 tuple, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_4_TUPLE,
+/* tunnel type, inner L4 destination port, IP type and L4 type match. */
GFT_PROFILE_TYPE_L4_DST_PORT,
- GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */,
+/* tunnel type, inner IP destination address and IP type match. */
+ GFT_PROFILE_TYPE_IP_DST_ADDR,
+/* tunnel type, inner IP source address and IP type match. */
+ GFT_PROFILE_TYPE_IP_SRC_ADDR,
+ GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */,
MAX_GFT_PROFILE_TYPE
};
@@ -1411,8 +1418,9 @@ struct vlan_header {
* outer tag configurations
*/
struct outer_tag_config_struct {
-/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with
- * Host Control mode. Else - 0
+/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0.
*/
u8 enable_stag_pri_change;
/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
@@ -1507,15 +1515,18 @@ struct pf_start_ramrod_data {
/*
- * Data for port update ramrod
+ * Per protocol DCB data
*/
struct protocol_dcb_data {
- u8 dcb_enable_flag /* dcbEnable flag value */;
- u8 dscp_enable_flag /* If set use dscp value */;
- u8 dcb_priority /* dcbPri flag value */;
- u8 dcb_tc /* dcb TC value */;
- u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
- u8 reserved0;
+ u8 dcb_enable_flag /* Enable DCB */;
+ u8 dscp_enable_flag /* Enable updating DSCP value */;
+ u8 dcb_priority /* DCB priority */;
+ u8 dcb_tc /* DCB TC */;
+ u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */;
+/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged
+ * frames
+ */
+ u8 dcb_dont_add_vlan0;
};
/*
@@ -1575,8 +1586,9 @@ struct pf_update_ramrod_data {
/* core iwarp related fields */
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
-/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis
- * and UFP with Host Control mode, else - 0.
+/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette
+ * Davis, UFP with Host Control mode, and UFP with DCB over base interface.
+ * else - 0
*/
u8 enable_stag_pri_change;
u8 reserved;
@@ -1739,6 +1751,7 @@ struct tstorm_per_port_stat {
struct regpair eth_vxlan_tunn_filter_discard;
/* GENEVE dropped packets */
struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt /* GFT dropped packets */;
};
@@ -2130,6 +2143,53 @@ struct e4_ystorm_core_conn_ag_ctx {
};
+struct fw_asserts_ram_section {
+/* The offset of the section in the RAM in RAM lines (64-bit units) */
+ __le16 section_ram_line_offset;
+/* The size of the section in RAM lines (64-bit units) */
+ __le16 section_ram_line_size;
+/* The offset of the asserts list within the section in dwords */
+ u8 list_dword_offset;
+/* The size of an assert list element in dwords */
+ u8 list_element_dword_size;
+ u8 list_num_elements /* The number of elements in the asserts list */;
+/* The offset of the next list index field within the section in dwords */
+ u8 list_next_index_dword_offset;
+};
+
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+ u8 eng /* Firmware engineering version number (for bootleg versions) */;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+/* Info regarding the FW asserts section in the Storm RAM */
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+
+struct fw_info_location {
+ __le32 grc_addr /* GRC address where the fw_info struct is located. */;
+/* Size of the fw_info structure (thats located at the grc_addr). */
+ __le32 size;
+};
+
+
+
+
/*
* IGU cleanup command
*/
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index ebb66482..bf548722 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HSI_DEBUG_TOOLS__
@@ -225,7 +223,7 @@ enum bin_dbg_buffer_type {
* Attention bit mapping
*/
struct dbg_attn_bit_mapping {
- __le16 data;
+ u16 data;
/* The index of an attention in the blocks attentions list
* (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
* (if is_unused_bit_cnt=1)
@@ -247,14 +245,14 @@ struct dbg_attn_block_type_data {
/* Offset of this block attention names in the debug attention name offsets
* array
*/
- __le16 names_offset;
- __le16 reserved1;
+ u16 names_offset;
+ u16 reserved1;
u8 num_regs /* Number of attention registers in this block */;
u8 reserved2;
/* Offset of this blocks attention registers in the attention registers array
* (in dbg_attn_reg units)
*/
- __le16 regs_offset;
+ u16 regs_offset;
};
/*
@@ -272,20 +270,20 @@ struct dbg_attn_block {
* Attention register result
*/
struct dbg_attn_reg_result {
- __le32 data;
+ u32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
/* Number of attention indexes in this register */
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
-/* The offset of this registers attentions within the blocks attentions
- * list (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
*/
- __le16 attn_idx_offset;
- __le16 reserved;
- __le32 sts_val /* Value read from the STS attention register */;
- __le32 mask_val /* Value read from the MASK attention register */;
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val /* Value read from the STS attention register */;
+ u32 mask_val /* Value read from the MASK attention register */;
};
/*
@@ -303,7 +301,7 @@ struct dbg_attn_block_result {
/* Offset of this registers block attention names in the attention name offsets
* array
*/
- __le16 names_offset;
+ u16 names_offset;
/* result data for each register in the block in which at least one attention
* bit is set
*/
@@ -316,7 +314,7 @@ struct dbg_attn_block_result {
* mode header
*/
struct dbg_mode_hdr {
- __le16 data;
+ u16 data;
/* indicates if a mode expression should be evaluated (0/1) */
#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
@@ -331,12 +329,11 @@ struct dbg_mode_hdr {
* Attention register
*/
struct dbg_attn_reg {
- struct dbg_mode_hdr mode /* Mode header */;
-/* The offset of this registers attentions within the blocks attentions
- * list (a value in the range 0..number of block attentions-1)
+/* The offset of this registers attentions within the blocks attentions list
+ * (a value in the range 0..number of block attentions-1)
*/
- __le16 attn_idx_offset;
- __le32 data;
+ u16 block_attn_offset;
+ u32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
@@ -344,9 +341,8 @@ struct dbg_attn_reg {
#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
/* STS_CLR attention register GRC address (in dwords) */
- __le32 sts_clr_address;
-/* MASK attention register GRC address (in dwords) */
- __le32 mask_address;
+ u32 sts_clr_address;
+ u32 mask_address /* MASK attention register GRC address (in dwords) */;
};
@@ -370,7 +366,7 @@ struct dbg_bus_block {
/* Indicates if this block has a latency events debug line (0/1). */
u8 has_latency_events;
/* Offset of this blocks lines in the Debug Bus lines array. */
- __le16 lines_offset;
+ u16 lines_offset;
};
@@ -383,7 +379,7 @@ struct dbg_bus_block_user_data {
/* Indicates if this block has a latency events debug line (0/1). */
u8 has_latency_events;
/* Offset of this blocks lines in the debug bus line name offsets array. */
- __le16 names_offset;
+ u16 names_offset;
};
@@ -422,13 +418,13 @@ struct dbg_dump_cond_hdr {
* memory data for registers dump
*/
struct dbg_dump_mem {
- __le32 dword0;
+ u32 dword0;
/* register address (in dwords) */
#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- __le32 dword1;
+ u32 dword1;
/* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
@@ -444,7 +440,7 @@ struct dbg_dump_mem {
* register data for registers dump
*/
struct dbg_dump_reg {
- __le32 data;
+ u32 data;
/* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
#define DBG_DUMP_REG_ADDRESS_SHIFT 0
@@ -460,7 +456,7 @@ struct dbg_dump_reg {
* split header for registers dump
*/
struct dbg_dump_split_hdr {
- __le32 hdr;
+ u32 hdr;
/* size in dwords of the data following this header */
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
@@ -474,8 +470,7 @@ struct dbg_dump_split_hdr {
*/
struct dbg_idle_chk_cond_hdr {
struct dbg_mode_hdr mode /* Mode header */;
-/* size in dwords of the data following this header */
- __le16 data_size;
+ u16 data_size /* size in dwords of the data following this header */;
};
@@ -483,7 +478,7 @@ struct dbg_idle_chk_cond_hdr {
* Idle Check condition register
*/
struct dbg_idle_chk_cond_reg {
- __le32 data;
+ u32 data;
/* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
@@ -493,7 +488,7 @@ struct dbg_idle_chk_cond_reg {
/* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- __le16 num_entries /* number of registers entries to check */;
+ u16 num_entries /* number of registers entries to check */;
u8 entry_size /* size of registers entry (in dwords) */;
u8 start_entry /* index of the first entry to check */;
};
@@ -503,7 +498,7 @@ struct dbg_idle_chk_cond_reg {
* Idle Check info register
*/
struct dbg_idle_chk_info_reg {
- __le32 data;
+ u32 data;
/* Register GRC address (in dwords) */
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
@@ -513,7 +508,7 @@ struct dbg_idle_chk_info_reg {
/* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- __le16 size /* register size in dwords */;
+ u16 size /* register size in dwords */;
struct dbg_mode_hdr mode /* Mode header */;
};
@@ -531,8 +526,8 @@ union dbg_idle_chk_reg {
* Idle Check result header
*/
struct dbg_idle_chk_result_hdr {
- __le16 rule_id /* Failing rule index */;
- __le16 mem_entry_id /* Failing memory entry index */;
+ u16 rule_id /* Failing rule index */;
+ u16 mem_entry_id /* Failing memory entry index */;
u8 num_dumped_cond_regs /* number of dumped condition registers */;
u8 num_dumped_info_regs /* number of dumped condition registers */;
u8 severity /* from dbg_idle_chk_severity_types enum */;
@@ -552,7 +547,7 @@ struct dbg_idle_chk_result_reg_hdr {
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
u8 start_entry /* index of the first checked entry */;
- __le16 size /* register size in dwords */;
+ u16 size /* register size in dwords */;
};
@@ -560,7 +555,7 @@ struct dbg_idle_chk_result_reg_hdr {
* Idle Check rule
*/
struct dbg_idle_chk_rule {
- __le16 rule_id /* Idle Check rule ID */;
+ u16 rule_id /* Idle Check rule ID */;
u8 severity /* value from dbg_idle_chk_severity_types enum */;
u8 cond_id /* Condition ID */;
u8 num_cond_regs /* number of condition registers */;
@@ -570,11 +565,11 @@ struct dbg_idle_chk_rule {
/* offset of this rules registers in the idle check register array
* (in dbg_idle_chk_reg units)
*/
- __le16 reg_offset;
+ u16 reg_offset;
/* offset of this rules immediate values in the immediate values array
* (in dwords)
*/
- __le16 imm_offset;
+ u16 imm_offset;
};
@@ -582,7 +577,7 @@ struct dbg_idle_chk_rule {
* Idle Check rule parsing data
*/
struct dbg_idle_chk_rule_parsing_data {
- __le32 data;
+ u32 data;
/* indicates if this register has a FW message */
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
@@ -693,8 +688,8 @@ struct dbg_bus_trigger_state_data {
* Debug Bus memory address
*/
struct dbg_bus_mem_addr {
- __le32 lo;
- __le32 hi;
+ u32 lo;
+ u32 hi;
};
/*
@@ -703,7 +698,7 @@ struct dbg_bus_mem_addr {
struct dbg_bus_pci_buf_data {
struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
- __le32 size /* PCI buffer size in bytes */;
+ u32 size /* PCI buffer size in bytes */;
};
/*
@@ -747,21 +742,20 @@ struct dbg_bus_storm_data {
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
/* EID filter params to filter on. Valid only if eid_filter_en is set. */
union dbg_bus_storm_eid_params eid_filter_params;
-/* CID to filter on. Valid only if cid_filter_en is set. */
- __le32 cid;
+ u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
};
/*
* Debug Bus data
*/
struct dbg_bus_data {
- __le32 app_version /* The tools version number of the application */;
+ u32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u8 hw_dwords /* HW dwords per cycle */;
/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
* HW ID of dword/qword i
*/
- __le16 hw_id_mask;
+ u16 hw_id_mask;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
@@ -783,7 +777,7 @@ struct dbg_bus_data {
* Valid only if both filter and trigger are enabled (0/1)
*/
u8 filter_post_trigger;
- __le16 reserved;
+ u16 reserved;
/* Indicates if the recording trigger is enabled (0/1) */
u8 trigger_en;
/* trigger states data */
@@ -933,9 +927,10 @@ struct dbg_grc_data {
/* Indicates if the GRC parameters were initialized */
u8 params_initialized;
u8 reserved1;
- __le16 reserved2;
-/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
- __le32 param_val[48];
+ u16 reserved2;
+/* Value of each GRC parameter. Array size must match the enum dbg_grc_params.
+ */
+ u32 param_val[48];
};
@@ -960,7 +955,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
- DBG_GRC_PARAM_RESERVED /* reserved */,
+/* MCP Trace meta data size in bytes */
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
@@ -1087,11 +1083,11 @@ enum dbg_storms {
* Idle Check data
*/
struct idle_chk_data {
- __le32 buf_size /* Idle check buffer size in dwords */;
+ u32 buf_size /* Idle check buffer size in dwords */;
/* Indicates if the idle check buffer size was set (0/1) */
u8 buf_size_set;
u8 reserved1;
- __le16 reserved2;
+ u16 reserved2;
};
/*
@@ -1109,7 +1105,7 @@ struct dbg_tools_data {
u8 initialized /* Indicates if the data was initialized */;
u8 use_dmae /* Indicates if DMAE should be used */;
/* Numbers of registers that were read since last log */
- __le32 num_regs_read;
+ u32 num_regs_read;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index ffbf5c71..6b512305 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HSI_ETH__
@@ -346,7 +344,7 @@ struct e4_xstorm_eth_conn_ag_ctx {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -1034,7 +1032,6 @@ struct eth_vport_rx_mode {
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
- __le16 reserved2[3];
};
@@ -1046,11 +1043,11 @@ struct eth_vport_tpa_param {
u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
-/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment
- * allowed
+/* If set, start each TPA segment on new BD (GRO mode). One BD per segment
+ * allowed.
*/
u8 tpa_pkt_split_flg;
-/* If set, put header of first TPA segment on bd and data on SGE */
+/* If set, put header of first TPA segment on first BD and data on second BD. */
u8 tpa_hdr_data_split_flg;
/* If set, GRO data consistent will checked for TPA continue */
u8 tpa_gro_consistent_flg;
@@ -1089,7 +1086,6 @@ struct eth_vport_tx_mode {
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
- __le16 reserved2[3];
};
@@ -1216,7 +1212,9 @@ struct rx_queue_update_ramrod_data {
u8 complete_cqe_flg /* post completion to the CQE ring if set */;
u8 complete_event_flg /* post completion to the event ring if set */;
u8 vport_id /* ID of virtual port */;
- u8 reserved[4];
+/* If set, update default rss queue to this RX queue. */
+ u8 set_default_rss_queue;
+ u8 reserved[3];
u8 reserved1 /* FW reserved. */;
u8 reserved2 /* FW reserved. */;
u8 reserved3 /* FW reserved. */;
@@ -1257,7 +1255,8 @@ struct rx_update_gft_filter_data {
__le16 action_icid;
__le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
__le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
- u8 vport_id /* RX vport Id. */;
+/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */
+ __le16 vport_id;
/* If set, action_icid will used for GFT filter update. */
u8 action_icid_valid;
/* If set, rx_qid will used for traffic steering, in additional to vport_id.
@@ -1273,7 +1272,10 @@ struct rx_update_gft_filter_data {
* case of error.
*/
u8 assert_on_error;
- u8 reserved[2];
+/* If set, inner VLAN will be removed regardless to VPORT configuration.
+ * Supported by E4 only.
+ */
+ u8 inner_vlan_removal_en;
};
@@ -1403,7 +1405,7 @@ struct vport_start_ramrod_data {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[5];
+ u8 reserved[1];
};
@@ -1486,6 +1488,7 @@ struct vport_update_ramrod_data {
struct vport_update_ramrod_data_cmn common;
struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
+ __le32 reserved[3];
/* TPA configuration parameters */
struct eth_vport_tpa_param tpa_param;
struct vport_update_ramrod_mcast approx_mcast;
@@ -1809,7 +1812,7 @@ struct E4XstormEthConnAgCtxDqExtLdPart {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -2153,7 +2156,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx {
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
+ __le16 updated_qm_pq_id /* word5 */;
__le16 conn_dpi /* conn_dpi */;
};
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index 48b0048f..d77edaa1 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HSI_INIT_FUNC__
@@ -24,10 +22,10 @@
* BRB RAM init requirements
*/
struct init_brb_ram_req {
- __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
- __le32 headroom_per_tc /* headroom size per TC, in bytes */;
- __le32 min_pkt_size /* min packet size, in bytes */;
- __le32 max_ports_per_engine /* min packet size, in bytes */;
+ u32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ u32 headroom_per_tc /* headroom size per TC, in bytes */;
+ u32 min_pkt_size /* min packet size, in bytes */;
+ u32 max_ports_per_engine /* min packet size, in bytes */;
u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
};
@@ -44,15 +42,14 @@ struct init_ets_tc_req {
* (indicated by the weight field)
*/
u8 use_wfq;
-/* An arbitration weight. Valid only if use_wfq is set. */
- __le16 weight;
+ u16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
};
/*
* ETS init requirements
*/
struct init_ets_req {
- __le32 mtu /* Max packet size (in bytes) */;
+ u32 mtu /* Max packet size (in bytes) */;
/* ETS initialization requirements per TC. */
struct init_ets_tc_req tc_req[NUM_OF_TCS];
};
@@ -64,12 +61,12 @@ struct init_ets_req {
*/
struct init_nig_lb_rl_req {
/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
- __le16 lb_mac_rate;
+ u16 lb_mac_rate;
/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
- __le16 lb_rate;
- __le32 mtu /* Max packet size (in bytes) */;
+ u16 lb_rate;
+ u32 mtu /* Max packet size (in bytes) */;
/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
- __le16 tc_rate[NUM_OF_PHYS_TCS];
+ u16 tc_rate[NUM_OF_PHYS_TCS];
};
@@ -98,10 +95,10 @@ struct init_qm_port_params {
/* Vector of valid bits for active TCs used by this port */
u8 active_phys_tcs;
/* number of PBF command lines that can be used by this port */
- __le16 num_pbf_cmd_lines;
+ u16 num_pbf_cmd_lines;
/* number of BTB blocks that can be used by this port */
- __le16 num_btb_blocks;
- __le16 reserved;
+ u16 num_btb_blocks;
+ u16 reserved;
};
@@ -114,6 +111,9 @@ struct init_qm_pq_params {
u8 wrr_group /* WRR group */;
/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
u8 rl_valid;
+ u8 port_id /* Port ID */;
+ u8 reserved0;
+ u16 reserved1;
};
@@ -124,13 +124,13 @@ struct init_qm_vport_params {
/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
* VPORT RL is globally disabled.
*/
- __le32 vport_rl;
+ u32 vport_rl;
/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
* globally disabled.
*/
- __le16 vport_wfq;
+ u16 vport_wfq;
/* the first Tx PQ ID associated with this VPORT for each TC. */
- __le16 first_tx_pq_id[NUM_OF_TCS];
+ u16 first_tx_pq_id[NUM_OF_TCS];
};
#endif /* __ECORE_HSI_INIT_FUNC__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 1f57e9b2..0e157f9b 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HSI_INIT_TOOL__
@@ -30,59 +28,13 @@ enum chip_ids {
};
-struct fw_asserts_ram_section {
-/* The offset of the section in the RAM in RAM lines (64-bit units) */
- __le16 section_ram_line_offset;
-/* The size of the section in RAM lines (64-bit units) */
- __le16 section_ram_line_size;
-/* The offset of the asserts list within the section in dwords */
- u8 list_dword_offset;
-/* The size of an assert list element in dwords */
- u8 list_element_dword_size;
- u8 list_num_elements /* The number of elements in the asserts list */;
-/* The offset of the next list index field within the section in dwords */
- u8 list_next_index_dword_offset;
-};
-
-
-struct fw_ver_num {
- u8 major /* Firmware major version number */;
- u8 minor /* Firmware minor version number */;
- u8 rev /* Firmware revision version number */;
-/* Firmware engineering version number (for bootleg versions) */
- u8 eng;
-};
-
-struct fw_ver_info {
- __le16 tools_ver /* Tools version number */;
- u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
- u8 reserved1;
- struct fw_ver_num num /* FW version number */;
- __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
- __le32 reserved2;
-};
-
-struct fw_info {
- struct fw_ver_info ver /* FW version information */;
-/* Info regarding the FW asserts section in the Storm RAM */
- struct fw_asserts_ram_section fw_asserts_section;
-};
-
-
-struct fw_info_location {
-/* GRC address where the fw_info struct is located. */
- __le32 grc_addr;
-/* Size of the fw_info structure (thats located at the grc_addr). */
- __le32 size;
-};
-
/*
* Binary buffer header
*/
struct bin_buffer_hdr {
/* buffer offset in bytes from the beginning of the binary file */
- __le32 offset;
- __le32 length /* buffer length in bytes */;
+ u32 offset;
+ u32 length /* buffer length in bytes */;
};
@@ -103,7 +55,7 @@ enum bin_init_buffer_type {
* init array header: raw
*/
struct init_array_raw_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
@@ -116,7 +68,7 @@ struct init_array_raw_hdr {
* init array header: standard
*/
struct init_array_standard_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
@@ -129,7 +81,7 @@ struct init_array_standard_hdr {
* init array header: zipped
*/
struct init_array_zipped_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
@@ -142,7 +94,7 @@ struct init_array_zipped_hdr {
* init array header: pattern
*/
struct init_array_pattern_hdr {
- __le32 data;
+ u32 data;
/* Init array type, from init_array_types enum */
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
@@ -223,14 +175,14 @@ enum init_array_types {
* init operation: callback
*/
struct init_callback_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_CALLBACK_OP_OP_MASK 0xF
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
+ u16 callback_id /* Callback ID */;
+ u16 block_id /* Blocks ID */;
};
@@ -238,7 +190,7 @@ struct init_callback_op {
* init operation: delay
*/
struct init_delay_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_DELAY_OP_OP_MASK 0xF
#define INIT_DELAY_OP_OP_SHIFT 0
@@ -252,7 +204,7 @@ struct init_delay_op {
* init operation: if_mode
*/
struct init_if_mode_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_MODE_OP_OP_MASK 0xF
#define INIT_IF_MODE_OP_OP_SHIFT 0
@@ -261,9 +213,8 @@ struct init_if_mode_op {
/* Commands to skip if the modes dont match */
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
-/* offset (in bytes) in modes expression buffer */
- __le16 modes_buf_offset;
+ u16 reserved2;
+ u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
};
@@ -271,7 +222,7 @@ struct init_if_mode_op {
* init operation: if_phase
*/
struct init_if_phase_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_IF_PHASE_OP_OP_MASK 0xF
#define INIT_IF_PHASE_OP_OP_SHIFT 0
@@ -283,7 +234,7 @@ struct init_if_phase_op {
/* Commands to skip if the phases dont match */
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
- __le32 phase_data;
+ u32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
@@ -308,21 +259,21 @@ enum init_mode_ops {
* init operation: raw
*/
struct init_raw_op {
- __le32 op_data;
+ u32 op_data;
/* Init operation, from init_op_types enum */
#define INIT_RAW_OP_OP_MASK 0xF
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
+ u32 param2 /* Init param 2 */;
};
/*
* init array params
*/
struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
+ u16 size /* array size in dwords */;
+ u16 offset /* array start offset in dwords */;
};
/*
@@ -330,11 +281,11 @@ struct init_op_array_params {
*/
union init_write_args {
/* value to write, used when init source is INIT_SRC_INLINE */
- __le32 inline_val;
+ u32 inline_val;
/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
- __le32 zeros_count;
+ u32 zeros_count;
/* array offset to write, used when init source is INIT_SRC_ARRAY */
- __le32 array_offset;
+ u32 array_offset;
/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
struct init_op_array_params runtime;
};
@@ -343,7 +294,7 @@ union init_write_args {
* init operation: write
*/
struct init_write_op {
- __le32 data;
+ u32 data;
/* init operation, from init_op_types enum */
#define INIT_WRITE_OP_OP_MASK 0xF
#define INIT_WRITE_OP_OP_SHIFT 0
@@ -365,7 +316,7 @@ struct init_write_op {
* init operation: read
*/
struct init_read_op {
- __le32 op_data;
+ u32 op_data;
/* init operation, from init_op_types enum */
#define INIT_READ_OP_OP_MASK 0xF
#define INIT_READ_OP_OP_SHIFT 0
@@ -378,7 +329,7 @@ struct init_read_op {
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT 9
/* expected polling value, used only when polling is done */
- __le32 expected_val;
+ u32 expected_val;
};
/*
@@ -444,11 +395,11 @@ enum init_source_types {
* Internal RAM Offsets macro data
*/
struct iro {
- __le32 base /* RAM field offset */;
- __le16 m1 /* multiplier 1 */;
- __le16 m2 /* multiplier 2 */;
- __le16 m3 /* multiplier 3 */;
- __le16 size /* RAM field size */;
+ u32 base /* RAM field offset */;
+ u16 m1 /* multiplier 1 */;
+ u16 m2 /* multiplier 2 */;
+ u16 m3 /* multiplier 3 */;
+ u16 size /* RAM field size */;
};
#endif /* __ECORE_HSI_INIT_TOOL__ */
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 84f273b0..51bba27e 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -38,6 +36,12 @@ struct ecore_ptt_pool {
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
+void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = OSAL_NULL;
+}
+
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
@@ -65,10 +69,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = p_pool;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) {
+ __ecore_ptt_pool_free(p_hwfn);
+ return ECORE_NOMEM;
+ }
#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
-
return ECORE_SUCCESS;
}
@@ -89,7 +95,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
#endif
- OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
+ __ecore_ptt_pool_free(p_hwfn);
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@@ -569,7 +575,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
if (*p_comp == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `p_completion_word'\n");
goto err;
}
@@ -578,7 +584,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(struct dmae_cmd));
if (*p_cmd == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct dmae_cmd'\n");
goto err;
}
@@ -587,12 +593,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
*p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
sizeof(u32) * DMAE_MAX_RW_SIZE);
if (*p_buff == OSAL_NULL) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `intermediate_buffer'\n");
goto err;
}
- p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+ p_hwfn->dmae_info.b_mem_ready = true;
return ECORE_SUCCESS;
err:
@@ -604,8 +611,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
{
dma_addr_t p_phys;
- /* Just make sure no one is in the middle */
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
+ p_hwfn->dmae_info.b_mem_ready = false;
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
@@ -630,8 +638,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
}
-
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
}
static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
@@ -777,6 +783,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
+ if (!p_hwfn->dmae_info.b_mem_ready) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ return ECORE_NOMEM;
+ }
+
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
@@ -870,7 +885,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
@@ -878,7 +893,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
ECORE_DMAE_ADDRESS_GRC,
size_in_dwords, &params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -896,14 +911,14 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
params.flags = flags;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
size_in_dwords, &params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -917,7 +932,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
{
enum _ecore_status_t rc;
- OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
@@ -925,7 +940,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
ECORE_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
- OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
+ OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
@@ -944,3 +959,74 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
}
+
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase)
+{
+ u32 size = OSAL_PAGE_SIZE / 2, val;
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return ECORE_NOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size);
+ p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(osal_uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ OSAL_MEM_ZERO((u8 *)p_virt + size, size);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n",
+ phase, (unsigned long)p_phys, p_virt,
+ (unsigned long)(p_phys + size),
+ (u8 *)p_virt + size, size);
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4 /* size_in_dwords */, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size));
+ p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(osal_uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn, false,
+ "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (unsigned long)p_phys +
+ ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = ECORE_UNKNOWN_ERROR;
+ goto out;
+ }
+ }
+
+out:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 0b9814f5..394207eb 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_HW_H__
@@ -255,4 +253,8 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type);
+enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ const char *phase);
+
#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index 4456af43..b8c2686f 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _ECORE_IGU_DEF_H_
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 1da80a65..b8496cb2 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -76,12 +74,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
-* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
-* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
-* although the credit increment value was the correct one and FW calculated
-* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
-* this point.
-*/
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
+ */
#define QM_RL_INC_VAL(rate) \
OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
(8 * 100)), 1)
@@ -182,7 +180,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4)
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -421,9 +419,9 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Tx PQ mapping runtime init values for the specified PF */
static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
@@ -437,7 +435,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
- u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
num_pqs = num_pf_pqs + num_vf_pqs;
@@ -467,11 +465,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
bool is_vf_pq, rl_valid;
u16 first_tx_pq_id;
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
- rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
- max_qm_global_rls;
+ rl_valid = pq_params[i].rl_valid > 0;
/* Update first Tx PQ of VPORT/TC */
vport_id_in_pf = pq_params[i].vport_id - start_vport;
@@ -492,28 +490,38 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
}
/* Check RL ID */
- if (pq_params[i].rl_valid && pq_params[i].vport_id >=
- max_qm_global_rls)
+ if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config\n");
+ rl_valid = false;
+ }
/* Prepare PQ map entry */
struct qm_rf_pq_map_e4 tx_pq_map;
+
QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
1 : 0,
first_tx_pq_id, rl_valid ?
pq_params[i].vport_id : 0,
ext_voq, pq_params[i].wrr_group);
- /* Set base address */
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Clear PQ pointer table entry (64 bit) */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
/* Write PQ info to RAM */
if (WRITE_PQ_INFO_TO_RAM != 0) {
u32 pq_info = 0;
+
pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
- pq_params[i].tc_id, port_id,
+ pq_params[i].tc_id,
+ pq_params[i].port_id,
rl_valid ? 1 : 0, rl_valid ?
pq_params[i].vport_id : 0);
ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
@@ -540,12 +548,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 pf_id,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_tids,
u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
- u16 i, pq_id, pq_group;
+ u16 i, j, pq_id, pq_group;
/* A single other PQ group is used in each PF, where PQ group i is used
* in PF i.
@@ -563,11 +572,19 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
mem_addr_4kb += pq_mem_4kb;
}
}
@@ -576,7 +593,6 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
* Return -1 on error.
*/
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u16 pf_wfq,
u8 max_phys_tcs_per_port,
@@ -595,7 +611,8 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id,
+ pq_params[i].tc_id,
max_phys_tcs_per_port);
crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
QM_REG_WFQPFCRD_RT_OFFSET :
@@ -604,12 +621,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
(pf_id % MAX_NUM_PFS_BB);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
- inc_val);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET +
+ pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
@@ -820,9 +837,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -850,20 +867,21 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids,
+ num_tids, 0);
#endif
/* Map Tx PQs */
- ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port,
+ is_pf_loading, num_pf_cids, num_vf_cids,
start_pq, num_pf_pqs, num_vf_pqs, start_vport,
other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
- if (ecore_pf_wfq_rt_init
- (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params))
+ if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq,
+ max_phys_tcs_per_port,
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
/* Init PF RL */
@@ -1419,7 +1437,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
-#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008
+#define PRS_ETH_OUTPUT_FORMAT -46832
+
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
@@ -1444,9 +1464,14 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1476,9 +1501,14 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1526,9 +1556,14 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
- if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
- (u32)PRS_ETH_TUNN_FIC_FORMAT);
+ if (reg_val) { /* TODO: handle E5 init */
+ reg_val = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
/* Update NIG register */
@@ -1548,6 +1583,36 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ip_geneve_enable ? 1 : 0);
}
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512
+
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
@@ -1664,6 +1729,10 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Tunnel type */
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
@@ -1675,9 +1744,14 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
- } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
}
ecore_wr(p_hwfn, p_ptt,
@@ -1921,3 +1995,53 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
+
+#define RSS_IND_TABLE_BASE_ADDR 4112
+#define RSS_IND_TABLE_VPORT_SIZE 16
+#define RSS_IND_TABLE_ENTRY_PER_LINE 8
+
+/* Update RSS indirection table entry. */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value)
+{
+ u32 cnt, rss_addr;
+ u32 *reg_val;
+ u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
+ u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
+
+ /* get entry address */
+ rss_addr = RSS_IND_TABLE_BASE_ADDR +
+ RSS_IND_TABLE_VPORT_SIZE * rss_id +
+ ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ /* prepare update command */
+ ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
+
+ for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
+ if (cnt == ind_table_index) {
+ rss_ind_entry[cnt] = ind_table_value;
+ rss_ind_mask[cnt] = 0xFFFF;
+ } else {
+ rss_ind_entry[cnt] = 0;
+ rss_ind_mask[cnt] = 0;
+ }
+ }
+
+ /* Update entry in HW*/
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+
+ reg_val = (u32 *)rss_ind_mask;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
+
+ reg_val = (u32 *)rss_ind_entry;
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
+ ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index ab560e59..1024bb26 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _INIT_FW_FUNCS_H
@@ -61,9 +59,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param is_pf_loading - indicates if the PF is currently loading,
+ * i.e. it has no allocated QM resources.
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -87,9 +86,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
+ bool is_pf_loading,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -259,6 +258,16 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct init_brb_ram_req *req);
#endif /* UNUSED_HSI_FUNC */
+/**
+ * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param enable - VXLAN no L2 enable flag.
+ */
+void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool enable);
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
@@ -462,4 +471,22 @@ void ecore_memset_session_ctx(void *p_ctx_mem,
void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
+
+/**
+ * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table
+ * entry.
+ * The function must run in exclusive mode to prevent wrong RSS configuration.
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param rss_id - RSS engine ID.
+ * @param ind_table_index - RSS indirect table index.
+ * @param ind_table_value - RSS indirect table new value.
+ */
+void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 rss_id,
+ u8 ind_table_index,
+ u16 ind_table_value);
+
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 91633c11..b7636f36 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
/* include the precompiled configuration values - only once */
@@ -389,23 +387,29 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
- DP_ERR(p_hwfn,
- "Timeout when polling reg: 0x%08x [ Waiting-for: %08x"
- " Got: %08x (comparsion %08x)]\n",
+ DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
-/* init_ops callbacks entry point.
- * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
- * Should be removed when the function is actually used.
- */
-static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt OSAL_UNUSED * p_ptt,
- struct init_callback_op OSAL_UNUSED * p_cmd)
+/* init_ops callbacks entry point */
+static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
{
- DP_NOTICE(p_hwfn, true,
- "Currently init values have no need of callbacks\n");
+ enum _ecore_status_t rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return ECORE_INVAL;
+ }
+
+ return rc;
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
@@ -513,7 +517,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
break;
case INIT_OP_CALLBACK:
- ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index e293a4a3..de7846d4 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_INIT_OPS__
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index e6cef85b..4c271d35 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -1,11 +1,11 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
+#include <rte_string_fns.h>
+
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
@@ -231,15 +231,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master)
static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u32 tmp, tmp2;
/* We've already cleared the timeout interrupt register, so we learn
- * of interrupts via the validity register
+ * of interrupts via the validity register.
+ * Any attention which is not for a timeout event is treated as fatal.
*/
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
- if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
+ if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
+ rc = ECORE_INVAL;
goto out;
+ }
/* Read the GRC timeout information */
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
@@ -263,11 +267,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
(tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
ECORE_GRC_ATTENTION_VF_SHIFT);
-out:
- /* Regardles of anything else, clean the validity bit */
+ /* Clean the validity bit */
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
- return ECORE_SUCCESS;
+out:
+ return rc;
}
#define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
@@ -285,9 +289,11 @@ out:
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init)
{
u32 tmp;
+ char str[512] = {0};
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
@@ -299,9 +305,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
-
- DP_NOTICE(p_hwfn, false,
- "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ OSAL_SNPRINTF(str, 512,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
addr_hi, addr_lo, details,
(u8)((details &
ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
@@ -318,6 +323,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
1 : 0),
(u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
1 : 0));
+ if (is_hw_init)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str);
+ else
+ DP_NOTICE(p_hwfn, false, "%s", str);
}
tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
@@ -393,7 +402,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
{
- return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
}
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
@@ -1104,9 +1113,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
p_aeu->bit_name,
num);
else
- OSAL_STRNCPY(bit_name,
- p_aeu->bit_name,
- 30);
+ strlcpy(bit_name,
+ p_aeu->bit_name,
+ sizeof(bit_name));
/* We now need to pass bitmask in its
* correct position.
@@ -1406,8 +1415,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
- DP_NOTICE(p_dev, true,
- "Failed to allocate `struct ecore_sb_attn_info'\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@@ -1415,8 +1423,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
- DP_NOTICE(p_dev, true,
- "Failed to allocate status block (attentions)\n");
+ DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1795,8 +1802,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*p_sb));
if (!p_sb) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sb_info'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
@@ -1804,7 +1810,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_phys, SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
return ECORE_NOMEM;
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 563051c3..041240d7 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_INT_H__
@@ -256,6 +254,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#endif
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+ struct ecore_ptt *p_ptt,
+ bool is_hw_init);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 24cdf5ed..aeaf469e 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_INT_API_H__
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 218ef50b..29001d71 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_SRIOV_API_H__
@@ -540,6 +538,17 @@ bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
u32 ecore_iov_pfvf_msg_length(void);
/**
+ * @brief Returns MAC address if one is configured
+ *
+ * @parm p_hwfn
+ * @parm rel_vf_id
+ *
+ * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC.
+ */
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
+
+/**
* @brief Returns forced MAC address if one is configured
*
* @parm p_hwfn
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index 360d7f88..05693029 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __IRO_H__
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 41532eeb..685fa2e8 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __IRO_VALUES_H__
@@ -13,9 +11,9 @@ static const struct iro iro_arr[51] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
- { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
+ { 0x4cb8, 0x88, 0x0, 0x0, 0x88},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6508, 0x20, 0x0, 0x0, 0x20},
+ { 0x6530, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -27,49 +25,49 @@ static const struct iro iro_arr[51] = {
/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2},
/* XSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4c40, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c48, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x3e10, 0x0, 0x0, 0x0, 0x78},
+ { 0x3e38, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x2b50, 0x0, 0x0, 0x0, 0x78},
+ { 0x2b78, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4c38, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4990, 0x0, 0x0, 0x0, 0x78},
+ { 0x4998, 0x0, 0x0, 0x0, 0x78},
/* USTORM_INTEG_TEST_DATA_OFFSET */
- { 0x7f48, 0x0, 0x0, 0x0, 0x78},
+ { 0x7f50, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x61e8, 0x10, 0x0, 0x0, 0x10},
+ { 0x6210, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x96b8, 0x30, 0x0, 0x0, 0x30},
+ { 0x96c0, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x4b60, 0x80, 0x0, 0x0, 0x40},
+ { 0x4b68, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
- { 0x53a0, 0x80, 0x4, 0x0, 0x4},
+ { 0x53a8, 0x80, 0x4, 0x0, 0x4},
/* MSTORM_TPA_TIMEOUT_US_OFFSET */
- { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
+ { 0xc7d0, 0x0, 0x0, 0x0, 0x4},
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0x4ba0, 0x80, 0x0, 0x0, 0x20},
+ { 0x4ba8, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x8150, 0x40, 0x0, 0x0, 0x30},
+ { 0x8158, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2ce8, 0x80, 0x0, 0x0, 0x38},
+ { 0x2d10, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf2b0, 0x78, 0x0, 0x0, 0x78},
+ { 0xf2b8, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
- { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
+ { 0xaf20, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
- { 0xafe8, 0x8, 0x0, 0x0, 0x8},
+ { 0xb010, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -81,37 +79,37 @@ static const struct iro iro_arr[51] = {
/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8},
/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0x200, 0x18, 0x8, 0x0, 0x8},
+ { 0x400, 0x18, 0x8, 0x0, 0x8},
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x18, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd878, 0x50, 0x0, 0x0, 0x3c},
+ { 0xd898, 0x50, 0x0, 0x0, 0x3c},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12908, 0x18, 0x0, 0x0, 0x10},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa8, 0x40, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa580, 0x50, 0x0, 0x0, 0x20},
+ { 0xa588, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x40, 0x0, 0x0, 0x28},
+ { 0x8700, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x102f8, 0x18, 0x0, 0x0, 0x10},
+ { 0x10300, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
- { 0xde28, 0x48, 0x0, 0x0, 0x38},
+ { 0xde48, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10760, 0x20, 0x0, 0x0, 0x20},
+ { 0x10768, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2d20, 0x80, 0x0, 0x0, 0x10},
+ { 0x2d48, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x5020, 0x10, 0x0, 0x0, 0x10},
+ { 0x5048, 0x10, 0x0, 0x0, 0x10},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
- { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+ { 0xc9b8, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
- { 0xeec0, 0x10, 0x0, 0x0, 0x10},
+ { 0xed90, 0x10, 0x0, 0x0, 0x10},
/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
- { 0xa398, 0x10, 0x0, 0x0, 0x10},
+ { 0xa520, 0x10, 0x0, 0x0, 0x10},
/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
- { 0x13100, 0x8, 0x0, 0x0, 0x8},
+ { 0x13108, 0x8, 0x0, 0x0, 0x8},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e3afc8a3..d71f4616 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -77,7 +75,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
+ return ECORE_NOMEM;
#endif
return ECORE_SUCCESS;
@@ -110,6 +109,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
break;
OSAL_VFREE(p_hwfn->p_dev,
p_hwfn->p_l2_info->pp_qid_usage[i]);
+ p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
@@ -119,6 +119,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
#endif
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+ p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
out_l2_info:
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
@@ -687,7 +688,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
}
@@ -1185,11 +1186,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * *pp_doorbell)
{
enum _ecore_status_t rc;
+ u16 pq_id;
+
+ /* TODO - set tc in the pq_params for multi-cos.
+ * If pacing is enabled then select queue according to
+ * rate limiter availability otherwise select queue based
+ * on multi cos.
+ */
+ if (IS_ECORE_PACING(p_hwfn))
+ pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ else
+ pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
- pbl_addr, pbl_size,
- ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
+ pbl_size, pq_id);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1556,8 +1566,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1596,8 +1606,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
0, sizeof(p_ramrod->approx_mcast.bins));
- OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport.
*/
@@ -1606,16 +1615,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- OSAL_SET_BIT(bit, bins);
+ bins[bit / 32] |= 1 << (bit % 32);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
}
}
@@ -1945,6 +1953,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
}
+
+ p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_change_count));
}
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
@@ -2061,11 +2074,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
*/
if (!p_dev->reset_stats)
DP_INFO(p_dev, "Reset stats not allocated\n");
- else
+ else {
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+ p_dev->reset_stats->common.link_change_count = 0;
+ }
}
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
@@ -2150,7 +2166,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2267,3 +2283,22 @@ out:
return rc;
}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 vport;
+
+ vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "About to rate limit qm vport %d for queue %d with rate %d\n",
+ vport, p_cid->rel.queue_id, rate);
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
+ p_link->speed);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index f4212cf2..8fa40302 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_L2_H__
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index ed9837bf..575b9e3a 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_L2_API_H__
@@ -332,7 +330,7 @@ struct ecore_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
- unsigned long bins[8];
+ u32 bins[8];
struct ecore_rss_params *rss_params;
struct ecore_filter_accept_flags accept_flags;
struct ecore_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 8edd2e96..ea14c172 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1,14 +1,13 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
+#include "nvm_cfg.h"
#include "ecore_mcp.h"
#include "mcp_public.h"
#include "reg_addr.h"
@@ -240,15 +239,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
/* Allocate mcp_info structure */
p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(*p_hwfn->mcp_info));
- if (!p_hwfn->mcp_info)
- goto err;
+ sizeof(*p_hwfn->mcp_info));
+ if (!p_hwfn->mcp_info) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n");
+ return ECORE_NOMEM;
+ }
p_info = p_hwfn->mcp_info;
/* Initialize the MFW spinlocks */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
+ OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
+ return ECORE_NOMEM;
+ }
#endif
OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
@@ -272,7 +280,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
err:
- DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n");
ecore_mcp_free(p_hwfn);
return ECORE_NOMEM;
}
@@ -593,7 +601,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n");
return ECORE_BUSY;
}
@@ -2121,19 +2129,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
if (!p_ptt) {
*p_media_type = MEDIA_UNSPECIFIED;
- return ECORE_INVAL;
+ rc = ECORE_INVAL;
} else {
*p_media_type = ecore_rd(p_hwfn, p_ptt,
p_hwfn->mcp_info->port_addr +
@@ -2144,6 +2153,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ rc = ECORE_INVAL;
+ } else {
+ *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+ }
+
+ return rc;
+}
+
+static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return 1;
+
+ return 0;
+}
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask)
+{
+ u32 transceiver_data, transceiver_type, transceiver_state;
+
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+
+ transceiver_state = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_STATE);
+
+ transceiver_type = GET_MFW_FIELD(transceiver_data,
+ ETH_TRANSCEIVER_TYPE);
+
+ if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
+ return ECORE_INVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+
+ default:
+ DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* TODO - Add support for VFs */
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
+ return ECORE_BUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ rc = ECORE_INVAL;
+ } else {
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt,
+ MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt,
+ nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
+ }
+
+ return rc;
+}
+
/* @DPDK */
/* Old MFW has a global configuration for all PFs regarding RDMA support */
static void
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 6afaf7de..8e125310 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_MCP_H__
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 225890e2..cfb9f99d 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_MCP_API_H__
@@ -595,6 +593,52 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
u32 *media_type);
/**
+ * @brief Get transceiver data of the port.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_transceiver_type - media type value
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_tranceiver_type);
+
+/**
+ * @brief Get transceiver supported speed mask.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_speed_mask - Bit mask of all supported speeds.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+
+enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_speed_mask);
+
+/**
+ * @brief Get board configuration.
+ *
+ * @param p_dev - ecore dev pointer
+ * @param p_ptt
+ * @param p_board_config - Board config.
+ *
+ * @return enum _ecore_status_t -
+ * ECORE_SUCCESS - Operation was successful.
+ * ECORE_BUSY - Operation failed
+ */
+enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *p_board_config);
+
+/**
* @brief - Sends a command to the MCP mailbox.
*
* @param p_hwfn - hw function
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index 3a1de094..f7666472 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1,3 +1,9 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index abca7408..f91b25e2 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_PROTO_IF_H__
@@ -31,6 +29,9 @@ struct ecore_eth_pf_params {
* This will set the maximal number of configured steering-filters.
*/
u32 num_arfs_filters;
+
+ /* To allow VF to change its MAC despite of PF set forced MAC. */
+ bool allow_vf_mac_change;
};
/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 1d085815..721b8c15 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __RT_DEFS_H__
@@ -205,329 +203,336 @@
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237
-#define QM_REG_PQTX2PF_0_RT_OFFSET 34238
-#define QM_REG_PQTX2PF_1_RT_OFFSET 34239
-#define QM_REG_PQTX2PF_2_RT_OFFSET 34240
-#define QM_REG_PQTX2PF_3_RT_OFFSET 34241
-#define QM_REG_PQTX2PF_4_RT_OFFSET 34242
-#define QM_REG_PQTX2PF_5_RT_OFFSET 34243
-#define QM_REG_PQTX2PF_6_RT_OFFSET 34244
-#define QM_REG_PQTX2PF_7_RT_OFFSET 34245
-#define QM_REG_PQTX2PF_8_RT_OFFSET 34246
-#define QM_REG_PQTX2PF_9_RT_OFFSET 34247
-#define QM_REG_PQTX2PF_10_RT_OFFSET 34248
-#define QM_REG_PQTX2PF_11_RT_OFFSET 34249
-#define QM_REG_PQTX2PF_12_RT_OFFSET 34250
-#define QM_REG_PQTX2PF_13_RT_OFFSET 34251
-#define QM_REG_PQTX2PF_14_RT_OFFSET 34252
-#define QM_REG_PQTX2PF_15_RT_OFFSET 34253
-#define QM_REG_PQTX2PF_16_RT_OFFSET 34254
-#define QM_REG_PQTX2PF_17_RT_OFFSET 34255
-#define QM_REG_PQTX2PF_18_RT_OFFSET 34256
-#define QM_REG_PQTX2PF_19_RT_OFFSET 34257
-#define QM_REG_PQTX2PF_20_RT_OFFSET 34258
-#define QM_REG_PQTX2PF_21_RT_OFFSET 34259
-#define QM_REG_PQTX2PF_22_RT_OFFSET 34260
-#define QM_REG_PQTX2PF_23_RT_OFFSET 34261
-#define QM_REG_PQTX2PF_24_RT_OFFSET 34262
-#define QM_REG_PQTX2PF_25_RT_OFFSET 34263
-#define QM_REG_PQTX2PF_26_RT_OFFSET 34264
-#define QM_REG_PQTX2PF_27_RT_OFFSET 34265
-#define QM_REG_PQTX2PF_28_RT_OFFSET 34266
-#define QM_REG_PQTX2PF_29_RT_OFFSET 34267
-#define QM_REG_PQTX2PF_30_RT_OFFSET 34268
-#define QM_REG_PQTX2PF_31_RT_OFFSET 34269
-#define QM_REG_PQTX2PF_32_RT_OFFSET 34270
-#define QM_REG_PQTX2PF_33_RT_OFFSET 34271
-#define QM_REG_PQTX2PF_34_RT_OFFSET 34272
-#define QM_REG_PQTX2PF_35_RT_OFFSET 34273
-#define QM_REG_PQTX2PF_36_RT_OFFSET 34274
-#define QM_REG_PQTX2PF_37_RT_OFFSET 34275
-#define QM_REG_PQTX2PF_38_RT_OFFSET 34276
-#define QM_REG_PQTX2PF_39_RT_OFFSET 34277
-#define QM_REG_PQTX2PF_40_RT_OFFSET 34278
-#define QM_REG_PQTX2PF_41_RT_OFFSET 34279
-#define QM_REG_PQTX2PF_42_RT_OFFSET 34280
-#define QM_REG_PQTX2PF_43_RT_OFFSET 34281
-#define QM_REG_PQTX2PF_44_RT_OFFSET 34282
-#define QM_REG_PQTX2PF_45_RT_OFFSET 34283
-#define QM_REG_PQTX2PF_46_RT_OFFSET 34284
-#define QM_REG_PQTX2PF_47_RT_OFFSET 34285
-#define QM_REG_PQTX2PF_48_RT_OFFSET 34286
-#define QM_REG_PQTX2PF_49_RT_OFFSET 34287
-#define QM_REG_PQTX2PF_50_RT_OFFSET 34288
-#define QM_REG_PQTX2PF_51_RT_OFFSET 34289
-#define QM_REG_PQTX2PF_52_RT_OFFSET 34290
-#define QM_REG_PQTX2PF_53_RT_OFFSET 34291
-#define QM_REG_PQTX2PF_54_RT_OFFSET 34292
-#define QM_REG_PQTX2PF_55_RT_OFFSET 34293
-#define QM_REG_PQTX2PF_56_RT_OFFSET 34294
-#define QM_REG_PQTX2PF_57_RT_OFFSET 34295
-#define QM_REG_PQTX2PF_58_RT_OFFSET 34296
-#define QM_REG_PQTX2PF_59_RT_OFFSET 34297
-#define QM_REG_PQTX2PF_60_RT_OFFSET 34298
-#define QM_REG_PQTX2PF_61_RT_OFFSET 34299
-#define QM_REG_PQTX2PF_62_RT_OFFSET 34300
-#define QM_REG_PQTX2PF_63_RT_OFFSET 34301
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34494
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34495
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34496
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34497
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34498
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34499
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34500
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34501
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34502
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34503
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34504
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34505
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34506
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34507
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34508
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34509
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34510
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34511
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34512
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34513
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34514
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34515
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34516
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34517
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34518
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34519
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34520
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34521
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34522
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34523
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34524
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34525
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34526
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34527
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34528
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34529
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34530
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34531
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34532
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34533
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34534
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34535
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34536
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34537
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34538
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34539
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34540
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34541
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34542
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34543
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34544
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34545
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34546
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34547
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34548
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34549
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34550
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34551
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34552
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34553
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34554
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34555
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34556
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34557
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 34842
+#define QM_REG_RLGLBLCRD_RT_OFFSET 35098
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098
-#define QM_REG_RLPFPERIOD_RT_OFFSET 35099
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100
-#define QM_REG_RLPFINCVAL_RT_OFFSET 35101
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35355
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35357
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 35133
+#define QM_REG_RLPFCRD_RT_OFFSET 35389
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 35149
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151
+#define QM_REG_RLPFENABLE_RT_OFFSET 35405
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 35183
+#define QM_REG_WFQPFCRD_RT_OFFSET 35439
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 35439
-#define QM_REG_WFQVPENABLE_RT_OFFSET 35440
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35695
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35696
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 35953
+#define QM_REG_TXPQMAP_RT_OFFSET 36209
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 36977
+#define QM_REG_WFQVPCRD_RT_OFFSET 37233
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 37489
+#define QM_REG_WFQVPMAP_RT_OFFSET 37745
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001
+#define QM_REG_PTRTBLTX_RT_OFFSET 38257
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 38321
+#define QM_REG_VOQCRDLINE_RT_OFFSET 39601
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
-#define RUNTIME_ARRAY_SIZE 41743
+#define RUNTIME_ARRAY_SIZE 43023
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index 86e84964..4633dbeb 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_SP_API_H__
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 7598e7a6..b43baf9d 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -295,6 +293,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
}
#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -308,7 +307,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
- int i;
+ u8 i;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
@@ -343,18 +342,27 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->outer_tag_config.outer_tag.tci =
OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+ } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ }
+
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode or, UFP with DCB over base interface.
+ */
if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
- p_ramrod->outer_tag_config.outer_tag.tpid =
- OSAL_CPU_TO_LE16(ETH_P_8021Q);
- if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
else
p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
- p_ramrod->outer_tag_config.pri_map_valid = 1;
- for (i = 0; i < 8; i++)
- p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
- (u8)i;
}
/* Place EQ address in RAMROD */
@@ -451,7 +459,8 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
return rc;
p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
- if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+ (p_hwfn->p_dcbx_info->results.dcbx_enabled))
p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
else
p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 98009c65..e57414cf 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_SP_COMMANDS_H__
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 70ffa8cd..776c86f7 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -30,7 +28,7 @@
#define SPQ_BLOCK_DELAY_MAX_ITER (10)
#define SPQ_BLOCK_DELAY_US (10)
-#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (200)
#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
@@ -60,8 +58,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
u32 iter_cnt;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
- iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
: SPQ_BLOCK_DELAY_MAX_ITER;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
+ iter_cnt *= 5;
+#endif
while (iter_cnt--) {
OSAL_POLL_MODE_DPC(p_hwfn);
@@ -138,6 +140,14 @@ err:
return ECORE_BUSY;
}
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms)
+{
+ p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
+ spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
+ SPQ_BLOCK_SLEEP_MAX_ITER;
+}
+
/***************************************************************************
* SPQ entries inner API
***************************************************************************/
@@ -389,7 +399,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
/* Allocate EQ struct */
p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
if (!p_eq) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_eq'\n");
return ECORE_NOMEM;
}
@@ -402,7 +412,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
num_elem,
sizeof(union event_ring_element),
&p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
@@ -547,8 +557,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_spq'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
return ECORE_NOMEM;
}
@@ -560,7 +569,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain, OSAL_NULL)) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -576,7 +585,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_phys = p_phys;
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+ if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
+ goto spq_allocate_fail;
#endif
p_hwfn->p_spq = p_spq;
@@ -630,9 +640,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate an SPQ entry for a pending"
- " ramrod\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
rc = ECORE_NOMEM;
goto out_unlock;
}
@@ -1013,7 +1021,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
p_consq =
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
if (!p_consq) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Failed to allocate `struct ecore_consq'\n");
return ECORE_NOMEM;
}
@@ -1026,7 +1034,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_PAGE_SIZE / 0x80,
0x80,
&p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 526cff08..6142c399 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_SPQ_H__
@@ -116,6 +114,9 @@ struct ecore_spq {
dma_addr_t p_phys;
struct ecore_spq_entry *p_virt;
+ /* SPQ max sleep iterations used in __ecore_spq_block() */
+ u32 block_sleep_max_iter;
+
/* Bitmap for handling out-of-order completions */
#define SPQ_RING_SIZE \
(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
@@ -150,6 +151,16 @@ struct ecore_port;
struct ecore_hwfn;
/**
+ * @brief ecore_set_spq_block_timeout - calculates the maximum sleep
+ * iterations used in __ecore_spq_block();
+ *
+ * @param p_hwfn
+ * @param spq_timeout_ms
+ */
+void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
+ u32 spq_timeout_ms);
+
+/**
* @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
* Pends it to the future list.
*
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b1e26d6f..f7ebf7ad 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -61,6 +59,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_QID",
"CHANNEL_TLV_COALESCE_READ",
+ "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+ "CHANNEL_TLV_UPDATE_MTU",
"CHANNEL_TLV_MAX"
};
@@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sriov'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
@@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->p_iov_info));
if (!p_dev->p_iov_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Can't support IOV due to lack of memory\n");
return ECORE_NOMEM;
}
@@ -1968,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
if (!p_vf->vport_instance)
return ECORE_INVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if ((events & (1 << MAC_ADDR_FORCED)) ||
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1989,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return rc;
}
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ p_vf->configured_features |=
+ 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
@@ -2854,6 +2858,45 @@ out:
length, status);
}
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_mtu_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Valiate PF can send such a request */
+ if (!p_vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing MTU update\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->update_mtu;
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.mtu = p_req->mtu;
+ rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+send_status:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_UPDATE_MTU,
+ sizeof(struct pfvf_def_resp_tlv),
+ status);
+ return rc;
+}
+
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type)
{
@@ -2975,8 +3018,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
}
@@ -4137,6 +4179,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_READ:
ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_MTU:
+ ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
@@ -4370,7 +4415,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ feature = 1 << MAC_ADDR_FORCED;
+
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
@@ -4411,9 +4460,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
return ECORE_SUCCESS;
}
+#ifndef LINUX_REMOVE
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid)
@@ -4470,6 +4523,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
*opaque_fid = vf_info->opaque_fid;
}
+#endif
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
@@ -4657,6 +4711,22 @@ u32 ecore_iov_pfvf_msg_length(void)
return sizeof(union pfvf_tlvs);
}
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ (1 << VFPF_BULLETIN_MAC_ADDR)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
{
struct ecore_vf_info *p_vf;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 850b1052..50c7d2c9 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_SRIOV_H__
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index c77ec260..b893f1d4 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_STATUS_H__
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index 034cf1eb..249136b0 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_UTILS_H__
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index e0f2dd5a..d2213f79 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -1275,8 +1273,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1473,7 +1470,7 @@ void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- OSAL_SET_BIT(bit, sp_params.bins);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}
@@ -1629,6 +1626,39 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_mtu_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ enum _ecore_status_t rc;
+
+ if (!mtu)
+ return ECORE_INVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
+ sizeof(*p_req));
+ p_req->mtu = mtu;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requesting MTU update to %d\n", mtu);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
+ rc = ECORE_INVAL;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index de2758cb..a07f82eb 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_VF_H__
@@ -319,5 +317,14 @@ void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
enum BAR_ID bar_id);
+
+/**
+ * @brief - ecore_vf_pf_update_mtu Update MTU for VF.
+ *
+ * @param p_hwfn
+ * @param - mtu
+ */
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index 9815cf8a..1a9fb3b1 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_VF_API_H__
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index ecb00649..c30677ab 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ECORE_VF_PF_IF_H__
@@ -396,7 +394,13 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
- u64 bins[8];
+ /* This was a mistake; There are only 256 approx bins,
+ * and in HSI they're divided into 32-bit values.
+ * As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
@@ -525,6 +529,18 @@ struct pfvf_read_coal_resp_tlv {
u8 padding[6];
};
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+struct vfpf_update_mtu_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 mtu;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -539,6 +555,8 @@ union vfpf_tlvs {
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+ struct vfpf_update_mtu_tlv update_mtu;
struct tlv_buffer_size tlv_buf_size;
};
@@ -669,6 +687,8 @@ enum {
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ CHANNEL_TLV_UPDATE_MTU,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 45a0356d..abfa6854 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef __ETH_COMMON__
@@ -119,6 +117,9 @@
/* Number of etherType values configured by driver for control frame check */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+/* GFS constants */
+#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
+
/*
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81ca6634..81aa88e7 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
/****************************************************************************
@@ -800,6 +798,7 @@ struct public_port {
#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
@@ -1777,6 +1776,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
/* MFW supports EEE */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW supports DRV_LOAD Timeout */
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index c99e805d..ab86260e 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
/****************************************************************************
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index ad15d28a..402f6204 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1,17 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
@@ -1222,3 +1212,5 @@
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
+
+#define RSS_REG_RSS_RAM_MASK 0x238c10UL
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index a91f4368..df52ea92 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "qede_ethdev.h"
@@ -16,7 +14,7 @@ int qede_logtype_init;
int qede_logtype_driver;
static const struct qed_eth_ops *qed_ops;
-static int64_t timer_period = 1;
+#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
/* VXLAN tunnel classification mapping */
const struct _qede_udp_tunn_types {
@@ -339,6 +337,24 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
+qede_interrupt_handler_intx(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+ u64 status;
+
+ /* Check if our device actually raised an interrupt */
+ status = ecore_int_igu_read_sisr_reg(ECORE_LEADING_HWFN(edev));
+ if (status & 0x1) {
+ qede_interrupt_action(ECORE_LEADING_HWFN(edev));
+
+ if (rte_intr_enable(eth_dev->intr_handle))
+ DP_ERR(edev, "rte_intr_enable failed\n");
+ }
+}
+
+static void
qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
@@ -499,6 +515,9 @@ qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
return 0;
}
+#define QEDE_NPAR_TX_SWITCHING "npar_tx_switching"
+#define QEDE_VF_TX_SWITCHING "vf_tx_switching"
+
/* Activate or deactivate vport via vport-update */
int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
{
@@ -515,12 +534,9 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
params.update_vport_active_tx_flg = 1;
params.vport_active_rx_flg = flg;
params.vport_active_tx_flg = flg;
- if (!qdev->enable_tx_switching) {
- if (IS_VF(edev)) {
- params.update_tx_switching_flg = 1;
- params.tx_switching_flg = !flg;
- DP_INFO(edev, "VF tx-switching is disabled\n");
- }
+ if (~qdev->enable_tx_switching & flg) {
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = !flg;
}
for_each_hwfn(edev, i) {
p_hwfn = &edev->hwfns[i];
@@ -591,38 +607,9 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
}
}
qdev->enable_lro = flg;
- DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
-
- return 0;
-}
-
-/* Update MTU via vport-update without doing port restart.
- * The vport must be deactivated before calling this API.
- */
-int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_sp_vport_update_params params;
- struct ecore_hwfn *p_hwfn;
- int rc;
- int i;
+ eth_dev->data->lro = flg;
- memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
- params.vport_id = 0;
- params.mtu = mtu;
- params.vport_id = 0;
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to update MTU\n");
- return -1;
- }
- }
- DP_INFO(edev, "MTU updated to %u\n", mtu);
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
return 0;
}
@@ -780,6 +767,36 @@ qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
}
static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
enum rte_eth_tunnel_type tunn_type, bool enable)
{
@@ -792,6 +809,9 @@ qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
case RTE_TUNNEL_TYPE_GENEVE:
rc = qede_geneve_enable(eth_dev, clss, enable);
break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
default:
rc = -EINVAL;
break;
@@ -817,10 +837,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
ETHER_ADDR_LEN) == 0) &&
ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
- DP_ERR(edev, "Unicast MAC is already added"
- " with vlan = %u, vni = %u\n",
- ucast->vlan, ucast->vni);
- return -EEXIST;
+ DP_INFO(edev, "Unicast MAC is already added"
+ " with vlan = %u, vni = %u\n",
+ ucast->vlan, ucast->vni);
+ return 0;
}
}
u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
@@ -854,47 +874,69 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
static int
-qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
- bool add)
+qede_add_mcast_filters(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ether_addr *mac_addr;
- struct qede_mcast_entry *tmp = NULL;
- struct qede_mcast_entry *m;
+ struct ecore_filter_mcast mcast;
+ struct qede_mcast_entry *m = NULL;
+ uint8_t i;
+ int rc;
- mac_addr = (struct ether_addr *)mcast->mac;
- if (add) {
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
- DP_ERR(edev,
- "Multicast MAC is already added\n");
- return -EEXIST;
- }
- }
+ for (i = 0; i < mc_addrs_num; i++) {
m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
- RTE_CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (!m) {
- DP_ERR(edev,
- "Did not allocate memory for mcast\n");
+ DP_ERR(edev, "Did not allocate memory for mcast\n");
return -ENOMEM;
}
- ether_addr_copy(mac_addr, &m->mac);
+ ether_addr_copy(&mc_addrs[i], &m->mac);
SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
- qdev->num_mc_addr++;
- } else {
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
- break;
- }
- if (tmp == NULL) {
- DP_INFO(edev, "Multicast mac is not found\n");
- return -EINVAL;
- }
- SLIST_REMOVE(&qdev->mc_list_head, tmp,
- qede_mcast_entry, list);
- qdev->num_mc_addr--;
}
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = mc_addrs_num;
+ mcast.opcode = ECORE_FILTER_ADD;
+ for (i = 0; i < mc_addrs_num; i++)
+ ether_addr_copy(&mc_addrs[i], (struct ether_addr *)
+ &mcast.mac[i]);
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to add multicast filter (rc = %d\n)", rc);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_mcast_entry *tmp = NULL;
+ struct ecore_filter_mcast mcast;
+ int j;
+ int rc;
+
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = qdev->num_mc_addr;
+ mcast.opcode = ECORE_FILTER_REMOVE;
+ j = 0;
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ ether_addr_copy(&tmp->mac, (struct ether_addr *)&mcast.mac[j]);
+ j++;
+ }
+ rc = ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to delete multicast filter\n");
+ return -1;
+ }
+ /* Init the list */
+ while (!SLIST_EMPTY(&qdev->mc_list_head)) {
+ tmp = SLIST_FIRST(&qdev->mc_list_head);
+ SLIST_REMOVE_HEAD(&qdev->mc_list_head, list);
+ }
+ SLIST_INIT(&qdev->mc_list_head);
return 0;
}
@@ -905,59 +947,25 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc;
- struct ecore_filter_mcast mcast;
- struct qede_mcast_entry *tmp;
- uint16_t j = 0;
+ enum _ecore_status_t rc = ECORE_INVAL;
- /* Multicast */
- if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
- if (add) {
- if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
- DP_ERR(edev,
- "Mcast filter table limit exceeded, "
- "Please enable mcast promisc mode\n");
- return -ECORE_INVAL;
- }
- }
- rc = qede_mcast_filter(eth_dev, ucast, add);
- if (rc == 0) {
- DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
- memset(&mcast, 0, sizeof(mcast));
- mcast.num_mc_addrs = qdev->num_mc_addr;
- mcast.opcode = ECORE_FILTER_ADD;
- SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
- ether_addr_copy(&tmp->mac,
- (struct ether_addr *)&mcast.mac[j]);
- j++;
- }
- rc = ecore_filter_mcast_cmd(edev, &mcast,
- ECORE_SPQ_MODE_CB, NULL);
- }
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to add multicast filter"
- " rc = %d, op = %d\n", rc, add);
- }
- } else { /* Unicast */
- if (add) {
- if (qdev->num_uc_addr >=
- qdev->dev_info.num_mac_filters) {
- DP_ERR(edev,
- "Ucast filter table limit exceeded,"
- " Please enable promisc mode\n");
- return -ECORE_INVAL;
- }
- }
- rc = qede_ucast_filter(eth_dev, ucast, add);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, ucast,
- ECORE_SPQ_MODE_CB, NULL);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
- rc, add);
- }
+ if (add && (qdev->num_uc_addr >= qdev->dev_info.num_mac_filters)) {
+ DP_ERR(edev, "Ucast filter table limit exceeded,"
+ " Please enable promisc mode\n");
+ return ECORE_INVAL;
}
+ rc = qede_ucast_filter(eth_dev, ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ /* Indicate error only for add filter operation.
+ * Delete filter operations are not severe.
+ */
+ if ((rc != ECORE_SUCCESS) && add)
+ DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+ rc, add);
+
return rc;
}
@@ -968,7 +976,11 @@ qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
struct ecore_filter_ucast ucast;
int re;
+ if (!is_valid_assigned_ether_addr(mac_addr))
+ return -EINVAL;
+
qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
@@ -990,6 +1002,9 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
return;
}
+ if (!is_valid_assigned_ether_addr(&eth_dev->data->mac_addrs[index]))
+ return;
+
qede_set_ucast_cmn_params(&ucast);
ucast.opcode = ECORE_FILTER_REMOVE;
ucast.type = ECORE_FILTER_MAC;
@@ -998,10 +1013,10 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
ether_addr_copy(&eth_dev->data->mac_addrs[index],
(struct ether_addr *)&ucast.mac);
- ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+ qede_mac_int_ops(eth_dev, &ucast, false);
}
-static void
+static int
qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
@@ -1010,12 +1025,12 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
DP_ERR(edev, "Setting MAC address is not allowed\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
+ return -EPERM;
}
- qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
+ qede_mac_addr_remove(eth_dev, 0);
+
+ return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
@@ -1093,9 +1108,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
if (tmp->vid == vlan_id) {
- DP_ERR(edev, "VLAN %u already configured\n",
- vlan_id);
- return -EEXIST;
+ DP_INFO(edev, "VLAN %u already configured\n",
+ vlan_id);
+ return 0;
}
}
@@ -1166,10 +1181,10 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
@@ -1177,7 +1192,7 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
if (mask & ETH_VLAN_FILTER_MASK) {
/* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
qede_vlan_filter_set(eth_dev, 0, 1);
} else {
if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
@@ -1187,7 +1202,8 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
/* Signal app that VLAN filtering is still
* enabled
*/
- rxmode->hw_vlan_filter = true;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
} else {
qede_vlan_filter_set(eth_dev, 0, 0);
}
@@ -1195,13 +1211,11 @@ static int qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
}
if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
+ DP_ERR(edev, "Extend VLAN not supported\n");
qdev->vlan_offload_mask = mask;
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+ DP_INFO(edev, "VLAN offload mask %d\n", mask);
return 0;
}
@@ -1267,19 +1281,25 @@ static void qede_fastpath_start(struct ecore_dev *edev)
static int qede_dev_start(struct rte_eth_dev *eth_dev)
{
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
PMD_INIT_FUNC_TRACE(edev);
+ /* Update MTU only if it has changed */
+ if (eth_dev->data->mtu != qdev->mtu) {
+ if (qede_update_mtu(eth_dev, qdev->mtu))
+ goto err;
+ }
+
/* Configure TPA parameters */
- if (rxmode->enable_lro) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) {
if (qede_enable_tpa(eth_dev, true))
return -EINVAL;
/* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ if (!eth_dev->data->scattered_rx)
+ rxmode->offloads |= DEV_RX_OFFLOAD_SCATTER;
}
/* Start queues */
@@ -1294,7 +1314,7 @@ static int qede_dev_start(struct rte_eth_dev *eth_dev)
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
if (qede_config_rss(eth_dev))
goto err;
@@ -1339,10 +1359,9 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "Device is stopped\n");
}
-#define QEDE_TX_SWITCHING "vf_txswitch"
-
const char *valid_args[] = {
- QEDE_TX_SWITCHING,
+ QEDE_NPAR_TX_SWITCHING,
+ QEDE_VF_TX_SWITCHING,
NULL,
};
@@ -1361,8 +1380,13 @@ static int qede_args_check(const char *key, const char *val, void *opaque)
return errno;
}
- if (strcmp(QEDE_TX_SWITCHING, key) == 0)
+ if ((strcmp(QEDE_NPAR_TX_SWITCHING, key) == 0) ||
+ ((strcmp(QEDE_VF_TX_SWITCHING, key) == 0) && IS_VF(edev))) {
qdev->enable_tx_switching = !!tmp;
+ DP_INFO(edev, "Disabling %s tx-switching\n",
+ strcmp(QEDE_NPAR_TX_SWITCHING, key) ?
+ "VF" : "NPAR");
+ }
return ret;
}
@@ -1411,15 +1435,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Check requirements for 100G mode */
if (ECORE_IS_CMT(edev)) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
@@ -1437,22 +1461,11 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
/* Parse devargs and fix up rxmode */
if (qede_args(eth_dev))
- return -ENOTSUP;
+ DP_NOTICE(edev, false,
+ "Invalid devargs supplied, requested change will not take effect\n");
- /* Sanity checks and throw warnings */
- if (rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
-
- if (!rxmode->hw_strip_crc)
- DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
-
- if (!rxmode->hw_ip_checksum)
- DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
- if (rxmode->header_split)
- DP_INFO(edev, "Header split enable is not supported\n");
- if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
- ETH_MQ_RX_RSS)) {
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE ||
+ rxmode->mq_mode == ETH_MQ_RX_RSS)) {
DP_ERR(edev, "Unsupported multi-queue mode\n");
return -ENOTSUP;
}
@@ -1467,19 +1480,22 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
return -ENOMEM;
/* If jumbo enabled adjust MTU */
- if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
+ if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
if (qede_start_vport(qdev, eth_dev->data->mtu))
return -1;
+
qdev->mtu = eth_dev->data->mtu;
/* Enable VLAN offloads by default */
ret = qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK);
if (ret)
return ret;
@@ -1515,7 +1531,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1534,26 +1549,42 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
-
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = QEDE_TXQ_FLAGS,
- };
-
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO);
-
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ dev_info->rx_queue_offload_capa = 0;
+
+ /* TX offloads are on a per-packet basis, so it is applicable
+ * to both at port and queue levels.
+ */
dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_TCP_TSO |
DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
+ dev_info->tx_queue_offload_capa = dev_info->tx_offload_capa;
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .offloads = DEV_TX_OFFLOAD_MULTI_SEGS,
+ };
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ /* Packets are always dropped if no descriptors are available */
+ .rx_drop_en = 1,
+ .offloads = 0,
+ };
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -1578,18 +1609,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output q_link;
+ struct rte_eth_link link;
uint16_t link_duplex;
- struct qed_link_output link;
- struct rte_eth_link *curr = &eth_dev->data->dev_link;
- memset(&link, 0, sizeof(struct qed_link_output));
- qdev->ops->common->get_link(edev, &link);
+ memset(&q_link, 0, sizeof(q_link));
+ memset(&link, 0, sizeof(link));
+
+ qdev->ops->common->get_link(edev, &q_link);
/* Link Speed */
- curr->link_speed = link.speed;
+ link.link_speed = q_link.speed;
/* Link Mode */
- switch (link.duplex) {
+ switch (q_link.duplex) {
case QEDE_DUPLEX_HALF:
link_duplex = ETH_LINK_HALF_DUPLEX;
break;
@@ -1600,21 +1633,20 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
default:
link_duplex = -1;
}
- curr->link_duplex = link_duplex;
+ link.link_duplex = link_duplex;
/* Link Status */
- curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ link.link_status = q_link.link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
/* AN */
- curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
+ link.link_autoneg = (q_link.supported_caps & QEDE_SUPPORTED_AUTONEG) ?
ETH_LINK_AUTONEG : ETH_LINK_FIXED;
DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n",
- curr->link_speed, curr->link_duplex,
- curr->link_autoneg, curr->link_status);
+ link.link_speed, link.link_duplex,
+ link.link_autoneg, link.link_status);
- /* return 0 means link status changed, -1 means not changed */
- return ((curr->link_status == link.link_up) ? -1 : 0);
+ return rte_eth_linkstatus_set(eth_dev, &link);
}
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -1661,7 +1693,7 @@ static void qede_poll_sp_sb_cb(void *param)
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
qede_interrupt_action(&edev->hwfns[1]);
- rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
qede_poll_sp_sb_cb,
(void *)eth_dev);
if (rc != 0) {
@@ -1700,8 +1732,20 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (ECORE_IS_CMT(edev))
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
}
@@ -1990,6 +2034,99 @@ static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
QED_FILTER_RX_MODE_TYPE_REGULAR);
}
+static int
+qede_set_mc_addr_list(struct rte_eth_dev *eth_dev, struct ether_addr *mc_addrs,
+ uint32_t mc_addrs_num)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint8_t i;
+
+ if (mc_addrs_num > ECORE_MAX_MC_ADDRS) {
+ DP_ERR(edev, "Reached max multicast filters limit,"
+ "Please enable multicast promisc mode\n");
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < mc_addrs_num; i++) {
+ if (!is_multicast_ether_addr(&mc_addrs[i])) {
+ DP_ERR(edev, "Not a valid multicast MAC\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Flush all existing entries */
+ if (qede_del_mcast_filters(eth_dev))
+ return -1;
+
+ /* Set new mcast list */
+ return qede_add_mcast_filters(eth_dev, mc_addrs, mc_addrs_num);
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ if (IS_PF(edev)) {
+ struct ecore_sp_vport_update_params params;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+ } else {
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_vf_pf_update_mtu(p_hwfn, mtu);
+ if (rc == ECORE_INVAL) {
+ DP_INFO(edev, "VF MTU Update TLV not supported\n");
+ /* Recreate vport */
+ rc = qede_start_vport(qdev, mtu);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Restore config lost due to vport stop */
+ if (eth_dev->data->promiscuous)
+ qede_promiscuous_enable(eth_dev);
+ else
+ qede_promiscuous_disable(eth_dev);
+
+ if (eth_dev->data->all_multicast)
+ qede_allmulticast_enable(eth_dev);
+ else
+ qede_allmulticast_disable(eth_dev);
+
+ qede_vlan_offload_set(eth_dev,
+ qdev->vlan_offload_mask);
+ } else if (rc != ECORE_SUCCESS) {
+ goto err;
+ }
+ }
+ }
+ DP_INFO(edev, "%s MTU updated to %u\n", IS_PF(edev) ? "PF" : "VF", mtu);
+
+ return 0;
+
+err:
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+}
+
static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
@@ -2065,6 +2202,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
RTE_PTYPE_TUNNEL_VXLAN,
RTE_PTYPE_L4_FRAG,
RTE_PTYPE_TUNNEL_GENEVE,
+ RTE_PTYPE_TUNNEL_GRE,
/* Inner */
RTE_PTYPE_INNER_L2_ETHER,
RTE_PTYPE_INNER_L2_ETHER_VLAN,
@@ -2143,7 +2281,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
vport_update_params.vport_id = 0;
/* pass the L2 handles instead of qids */
for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
- idx = qdev->rss_ind_table[i];
+ idx = i % QEDE_RSS_COUNT(qdev);
rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
}
vport_update_params.rss_params = &rss_params;
@@ -2393,7 +2531,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
restart = true;
}
rte_delay_ms(1000);
- qede_start_vport(qdev, mtu); /* Recreate vport */
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
@@ -2413,23 +2550,9 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
- /* Restore config lost due to vport stop */
- qede_mac_addr_set(dev, &qdev->primary_mac);
- if (dev->data->promiscuous)
- qede_promiscuous_enable(dev);
- else
- qede_promiscuous_disable(dev);
-
- if (dev->data->all_multicast)
- qede_allmulticast_enable(dev);
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- qede_allmulticast_disable(dev);
-
- qede_vlan_offload_set(dev, qdev->vlan_offload_mask);
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (!dev->data->dev_started && restart) {
qede_dev_start(dev);
@@ -2488,7 +2611,6 @@ qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
ECORE_TUNN_CLSS_MAC_VLAN, false);
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
DP_ERR(edev, "UDP port %u doesn't exist\n",
@@ -2578,7 +2700,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
qdev->vxlan.udp_port = udp_port;
break;
-
case RTE_TUNNEL_TYPE_GENEVE:
if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
DP_INFO(edev,
@@ -2616,7 +2737,6 @@ qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
qdev->geneve.udp_port = udp_port;
break;
-
default:
return ECORE_INVAL;
}
@@ -2782,7 +2902,8 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
qdev->geneve.filter_type = conf->filter_type;
}
- if (!qdev->vxlan.enable || !qdev->geneve.enable)
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
return qede_tunn_enable(eth_dev, clss,
conf->tunnel_type,
true);
@@ -2818,15 +2939,14 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
switch (filter_conf->tunnel_type) {
case RTE_TUNNEL_TYPE_VXLAN:
case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
DP_INFO(edev,
"Packet steering to the specified Rx queue"
" is not supported with UDP tunneling");
return(qede_tunn_filter_config(eth_dev, filter_op,
filter_conf));
- /* Place holders for future tunneling support */
case RTE_TUNNEL_TYPE_TEREDO:
case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
case RTE_L2_TUNNEL_TYPE_E_TAG:
DP_ERR(edev, "Unsupported tunnel type %d\n",
filter_conf->tunnel_type);
@@ -2871,6 +2991,7 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.promiscuous_disable = qede_promiscuous_disable,
.allmulticast_enable = qede_allmulticast_enable,
.allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
.dev_stop = qede_dev_stop,
.dev_close = qede_dev_close,
.stats_get = qede_get_stats,
@@ -2911,6 +3032,7 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.promiscuous_disable = qede_promiscuous_disable,
.allmulticast_enable = qede_allmulticast_enable,
.allmulticast_disable = qede_allmulticast_disable,
+ .set_mc_addr_list = qede_set_mc_addr_list,
.dev_stop = qede_dev_stop,
.dev_close = qede_dev_close,
.stats_get = qede_get_stats,
@@ -2928,6 +3050,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.mtu_set = qede_set_mtu,
.udp_tunnel_port_add = qede_udp_dst_port_add,
.udp_tunnel_port_del = qede_udp_dst_port_del,
+ .mac_addr_add = qede_mac_addr_add,
+ .mac_addr_remove = qede_mac_addr_remove,
+ .mac_addr_set = qede_mac_addr_set,
};
static void qede_update_pf_params(struct ecore_dev *edev)
@@ -2956,6 +3081,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
+ uint32_t int_mode;
int rc;
/* Extract key data structures */
@@ -3000,8 +3126,22 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return -ENODEV;
}
qede_update_pf_params(edev);
- rte_intr_callback_register(&pci_dev->intr_handle,
- qede_interrupt_handler, (void *)eth_dev);
+
+ switch (pci_dev->intr_handle.type) {
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ int_mode = ECORE_INT_MODE_INTA;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler_intx,
+ (void *)eth_dev);
+ break;
+ default:
+ int_mode = ECORE_INT_MODE_MSIX;
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ qede_interrupt_handler,
+ (void *)eth_dev);
+ }
+
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -3009,7 +3149,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Start the Slowpath-process */
memset(&params, 0, sizeof(struct qed_slowpath_params));
- params.int_mode = ECORE_INT_MODE_MSIX;
+
+ params.int_mode = int_mode;
params.drv_major = QEDE_PMD_VERSION_MAJOR;
params.drv_minor = QEDE_PMD_VERSION_MINOR;
params.drv_rev = QEDE_PMD_VERSION_REVISION;
@@ -3022,7 +3163,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
* interrupt vector but we need one for each engine.
*/
if (ECORE_IS_CMT(edev) && IS_PF(edev)) {
- rc = rte_eal_alarm_set(timer_period * US_PER_S,
+ rc = rte_eal_alarm_set(QEDE_SP_TIMER_PERIOD,
qede_poll_sp_sb_cb,
(void *)eth_dev);
if (rc != 0) {
@@ -3092,7 +3233,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ECORE_LEADING_HWFN(edev),
vf_mac,
&is_mac_forced);
- if (is_mac_exist && is_mac_forced) {
+ if (is_mac_exist) {
DP_INFO(edev, "VF macaddr received from PF\n");
ether_addr_copy((struct ether_addr *)&vf_mac,
&eth_dev->data->mac_addrs[0]);
@@ -3119,25 +3260,30 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
SLIST_INIT(&adapter->fdir_info.fdir_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
+ SLIST_INIT(&adapter->mc_list_head);
adapter->mtu = ETHER_MTU;
adapter->vport_started = false;
/* VF tunnel offloads is enabled by default in PF driver */
adapter->vxlan.num_filters = 0;
adapter->geneve.num_filters = 0;
+ adapter->ipgre.num_filters = 0;
if (is_vf) {
adapter->vxlan.enable = true;
adapter->vxlan.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
adapter->vxlan.udp_port = QEDE_VXLAN_DEF_PORT;
adapter->geneve.enable = true;
-
adapter->geneve.filter_type = ETH_TUNNEL_FILTER_IMAC |
ETH_TUNNEL_FILTER_IVLAN;
adapter->geneve.udp_port = QEDE_GENEVE_DEF_PORT;
+ adapter->ipgre.enable = true;
+ adapter->ipgre.filter_type = ETH_TUNNEL_FILTER_IMAC |
+ ETH_TUNNEL_FILTER_IVLAN;
} else {
adapter->vxlan.enable = false;
adapter->geneve.enable = false;
+ adapter->ipgre.enable = false;
}
DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -3295,9 +3441,7 @@ RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio-pci");
-RTE_INIT(qede_init_log);
-static void
-qede_init_log(void)
+RTE_INIT(qede_init_log)
{
qede_logtype_init = rte_log_register("pmd.net.qede.init");
if (qede_logtype_init >= 0)
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 23f7e0e2..6e9a5b4b 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
@@ -34,6 +32,7 @@
#include "base/nvm_cfg.h"
#include "base/ecore_sp_commands.h"
#include "base/ecore_l2.h"
+#include "base/ecore_vf.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -45,7 +44,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 7
+#define QEDE_PMD_VERSION_MINOR 9
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -170,7 +169,7 @@ struct qede_fdir_info {
#define QEDE_VXLAN_DEF_PORT (4789)
#define QEDE_GENEVE_DEF_PORT (6081)
-struct qede_udp_tunn {
+struct qede_tunn_params {
bool enable;
uint16_t num_filters;
uint16_t filter_type;
@@ -205,8 +204,9 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
- struct qede_udp_tunn vxlan;
- struct qede_udp_tunn geneve;
+ struct qede_tunn_params vxlan;
+ struct qede_tunn_params geneve;
+ struct qede_tunn_params ipgre;
struct qede_fdir_info fdir_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index da6364ee..83580d04 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2017 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include <rte_udp.h>
@@ -141,8 +139,8 @@ qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
if (add) {
SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
- DP_ERR(edev, "flowdir filter exist\n");
- rc = -EEXIST;
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = 0;
goto err2;
}
}
@@ -465,5 +463,8 @@ int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
udpv4_flow->src_port = ntuple->src_port;
udpv4_flow->dst_port = ntuple->dst_port;
}
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 246f0fd3..ee5e54c1 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _QEDE_IF_H
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 159315e7..3187d97b 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#ifndef _QEDE_LOGS_H_
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index ae187321..46fa8371 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -1,14 +1,13 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include <limits.h>
#include <time.h>
#include <rte_alarm.h>
+#include <rte_string_fns.h>
#include "qede_ethdev.h"
@@ -19,7 +18,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.30.12.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.33.12.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -62,6 +61,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
hw_prepare_params.allow_mdump = false;
+ hw_prepare_params.b_en_pacing = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
@@ -279,7 +279,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
hw_init_params.b_hw_start = true;
- hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.int_mode = params->int_mode;
hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
@@ -302,9 +302,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) | (params->drv_eng);
- /* TBD: strlcpy() */
- strncpy((char *)drv_version.name, (const char *)params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ strlcpy((char *)drv_version.name, (const char *)params->name,
+ sizeof(drv_version.name));
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
@@ -633,8 +632,11 @@ void qed_link_update(struct ecore_hwfn *hwfn)
{
struct ecore_dev *edev = hwfn->p_dev;
struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;
- qede_link_update((struct rte_eth_dev *)qdev->ethdev, 0);
+ if (!qede_link_update(dev, 0))
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int qed_drain(struct ecore_dev *edev)
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0de7c6b8..0f157ded 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include <rte_net.h>
@@ -87,7 +85,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
- if ((rxmode->enable_scatter) ||
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
@@ -192,9 +190,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
if (rxq) {
qede_rx_queue_release_mbufs(rxq);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
rte_free(rxq->sw_rx_ring);
rte_free(rxq);
}
@@ -350,9 +354,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
+ struct qede_dev *qdev = txq->qdev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
if (txq) {
qede_tx_queue_release_mbufs(txq);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
rte_free(txq->sw_tx_ring);
rte_free(txq);
}
@@ -441,8 +450,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct qede_fastpath *fp;
- struct qede_rx_queue *rxq;
- struct qede_tx_queue *txq;
uint16_t sb_idx;
uint8_t i;
@@ -467,21 +474,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
if (eth_dev->data->rx_queues[i]) {
qede_rx_queue_release(eth_dev->data->rx_queues[i]);
- rxq = eth_dev->data->rx_queues[i];
- qdev->ops->common->chain_free(edev,
- &rxq->rx_bd_ring);
- qdev->ops->common->chain_free(edev,
- &rxq->rx_comp_ring);
eth_dev->data->rx_queues[i] = NULL;
}
}
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
if (eth_dev->data->tx_queues[i]) {
- txq = eth_dev->data->tx_queues[i];
qede_tx_queue_release(eth_dev->data->tx_queues[i]);
- qdev->ops->common->chain_free(edev,
- &txq->tx_pbl);
eth_dev->data->tx_queues[i] = NULL;
}
}
@@ -1466,6 +1465,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+ } else {
+ packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
}
/* Common handling for non-tunnel packets and for inner
@@ -1487,7 +1488,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
ol_flags |= PKT_RX_IP_CKSUM_BAD;
} else {
ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
}
if (CQE_HAS_VLAN(parse_flag) ||
@@ -1631,6 +1631,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
+ start_seg++;
m_seg = m_seg->next;
}
@@ -1837,17 +1838,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* offloads. Don't rely on pkt_type marked by Rx, instead use
* tx_ol_flags to decide.
*/
- if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_VXLAN) ||
- ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP) ||
- ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_GENEVE)) {
+ tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+ if (tunn_flg) {
/* Check against max which is Tunnel IPv6 + ext */
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
break;
- tunn_flg = true;
+
/* First indicate its a tunnel pkt */
bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1971,7 +1969,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (tx_ol_flags & PKT_TX_VLAN_PKT) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
@@ -1986,7 +1984,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
* csum offload is requested then we need to force
* recalculation of L4 tunnel header csum also.
*/
- if (tunn_flg) {
+ if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+ PKT_TX_TUNNEL_GRE)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index f1d36661..e710fbae 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
@@ -76,8 +74,6 @@
ETH_RSS_VXLAN |\
ETH_RSS_GENEVE)
-#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-
#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
#define QEDE_RXTX_MAX(qdev) \
@@ -149,11 +145,11 @@
PKT_TX_TCP_SEG)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
- PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_VXLAN | \
PKT_TX_TUNNEL_GENEVE | \
- PKT_TX_TUNNEL_MPLSINUDP)
+ PKT_TX_TUNNEL_MPLSINUDP | \
+ PKT_TX_TUNNEL_GRE)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
diff --git a/drivers/net/ring/meson.build b/drivers/net/ring/meson.build
index e877a4b4..7659b04f 100644
--- a/drivers/net/ring/meson.build
+++ b/drivers/net/ring/meson.build
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_eth_ring.c')
install_headers('rte_eth_ring.h')
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index df13c44b..791deb0b 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -60,9 +60,15 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
+static int eth_ring_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, eth_ring_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static uint16_t
eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -158,6 +164,7 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -256,18 +263,9 @@ do_eth_dev_ring_create(const char *name,
void **tx_queues_local = NULL;
unsigned i;
- RTE_LOG(INFO, PMD, "Creating rings-backed ethdev on numa socket %u\n",
+ PMD_LOG(INFO, "Creating rings-backed ethdev on numa socket %u",
numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL) {
- rte_errno = ENOMEM;
- goto error;
- }
-
rx_queues_local = rte_zmalloc_socket(name,
sizeof(void *) * nb_rx_queues, 0, numa_node);
if (rx_queues_local == NULL) {
@@ -301,10 +299,8 @@ do_eth_dev_ring_create(const char *name,
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- /* NOTE: we'll replace the data element, of originally allocated eth_dev
- * so the rings are local per-process */
- rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data = eth_dev->data;
data->rx_queues = rx_queues_local;
data->tx_queues = tx_queues_local;
@@ -326,7 +322,6 @@ do_eth_dev_ring_create(const char *name,
data->dev_link = pmd_link;
data->mac_addrs = &internals->address;
- eth_dev->data = data;
eth_dev->dev_ops = &ops;
data->kdrv = RTE_KDRV_NONE;
data->numa_node = numa_node;
@@ -335,6 +330,7 @@ do_eth_dev_ring_create(const char *name,
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
+ rte_eth_dev_probing_finish(eth_dev);
*eth_dev_p = eth_dev;
return data->port_id;
@@ -342,7 +338,6 @@ do_eth_dev_ring_create(const char *name,
error:
rte_free(rx_queues_local);
rte_free(tx_queues_local);
- rte_free(data);
rte_free(internals);
return -1;
@@ -459,13 +454,13 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
ret = -EINVAL;
if (!name) {
- RTE_LOG(WARNING, PMD, "command line parameter is empty for ring pmd!\n");
+ PMD_LOG(WARNING, "command line parameter is empty for ring pmd!");
goto out;
}
node = strchr(name, ':');
if (!node) {
- RTE_LOG(WARNING, PMD, "could not parse node value from %s\n",
+ PMD_LOG(WARNING, "could not parse node value from %s",
name);
goto out;
}
@@ -475,7 +470,7 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
action = strchr(node, ':');
if (!action) {
- RTE_LOG(WARNING, PMD, "could not parse action value from %s\n",
+ PMD_LOG(WARNING, "could not parse action value from %s",
node);
goto out;
}
@@ -498,7 +493,8 @@ static int parse_kvlist (const char *key __rte_unused, const char *value, void *
info->list[info->count].node = strtol(node, &end, 10);
if ((errno != 0) || (*end != '\0')) {
- RTE_LOG(WARNING, PMD, "node value %s is unparseable as a number\n", node);
+ PMD_LOG(WARNING,
+ "node value %s is unparseable as a number", node);
goto out;
}
@@ -542,14 +538,14 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
- RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
+ PMD_LOG(INFO, "Initializing pmd_ring for %s", name);
if (params == NULL || params[0] == '\0') {
ret = eth_dev_ring_create(name, rte_socket_id(), DEV_CREATE,
&eth_dev);
if (ret == -1) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n", name);
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s", name);
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_ATTACH, &eth_dev);
}
@@ -557,13 +553,13 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
kvlist = rte_kvargs_parse(params, valid_arguments);
if (!kvlist) {
- RTE_LOG(INFO, PMD, "Ignoring unsupported parameters when creating"
- " rings-backed ethernet device\n");
+ PMD_LOG(INFO, "Ignoring unsupported parameters when creating"
+ " rings-backed ethernet device");
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_CREATE, &eth_dev);
if (ret == -1) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n",
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
name);
ret = eth_dev_ring_create(name, rte_socket_id(),
DEV_ATTACH, &eth_dev);
@@ -617,8 +613,8 @@ rte_pmd_ring_probe(struct rte_vdev_device *dev)
&eth_dev);
if ((ret == -1) &&
(info->list[info->count].action == DEV_CREATE)) {
- RTE_LOG(INFO, PMD,
- "Attach to pmd_ring for %s\n",
+ PMD_LOG(INFO,
+ "Attach to pmd_ring for %s",
name);
ret = eth_dev_ring_create(name,
info->list[info->count].node,
@@ -647,7 +643,7 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev)
struct ring_queue *r = NULL;
uint16_t i;
- RTE_LOG(INFO, PMD, "Un-Initializing pmd_ring for %s\n", name);
+ PMD_LOG(INFO, "Un-Initializing pmd_ring for %s", name);
if (name == NULL)
return -EINVAL;
@@ -675,8 +671,6 @@ rte_pmd_ring_remove(struct rte_vdev_device *dev)
rte_free(eth_dev->data->tx_queues);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
return 0;
}
@@ -690,3 +684,10 @@ RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
RTE_PMD_REGISTER_PARAM_STRING(net_ring,
ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
+
+RTE_INIT(eth_ring_init_log)
+{
+ eth_ring_logtype = rte_log_register("pmd.net.ring");
+ if (eth_ring_logtype >= 0)
+ rte_log_set_level(eth_ring_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 8a671dd2..3bb41a00 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -46,11 +46,11 @@ else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
# Suppress ICC false positive warning on 'bulk' may be used before its
# value is set
-CFLAGS_sfc_ef10_tx.o += -wd3656
+CFLAGS_sfc_ef10_tx.o += -diag-disable 3656
endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
-LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_pci -lrte_pci
#
# List of base driver object files for which
@@ -81,6 +81,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_essb_rx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
VPATH += $(SRCDIR)/base
@@ -115,6 +116,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_image.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
@@ -125,5 +127,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford2_nic.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index 05700c5c..7f89a7bf 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -10,7 +10,7 @@
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_QSTATS
#define EFX_EV_QSTAT_INCR(_eep, _stat) \
@@ -549,7 +549,8 @@ ef10_ev_qdestroy(
efx_nic_t *enp = eep->ee_enp;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) efx_mcdi_fini_evq(enp, eep->ee_index);
}
@@ -576,7 +577,7 @@ ef10_ev_qprime(
EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
ERF_DD_EVQ_IND_RPTR,
(rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
&dword, B_FALSE);
EFX_POPULATE_DWORD_2(dword,
@@ -584,11 +585,11 @@ ef10_ev_qprime(
EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
ERF_DD_EVQ_IND_RPTR,
rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
&dword, B_FALSE);
} else {
EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
&dword, B_FALSE);
}
@@ -701,13 +702,19 @@ ef10_ev_qmoderate(
EFE_DD_EVQ_IND_TIMER_FLAGS,
ERF_DD_EVQ_IND_TIMER_MODE, mode,
ERF_DD_EVQ_IND_TIMER_VAL, ticks);
- EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
eep->ee_index, &dword, 0);
} else {
- EFX_POPULATE_DWORD_2(dword,
+ /*
+ * NOTE: The TMR_REL field introduced in Medford2 is
+ * ignored on earlier EF10 controllers. See bug66418
+ * comment 9 for details.
+ */
+ EFX_POPULATE_DWORD_3(dword,
ERF_DZ_TC_TIMER_MODE, mode,
- ERF_DZ_TC_TIMER_VAL, ticks);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ ERF_DZ_TC_TIMER_VAL, ticks,
+ ERF_FZ_TC_TMR_REL_VAL, ticks);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
eep->ee_index, &dword, 0);
}
}
@@ -742,7 +749,7 @@ ef10_ev_qstats_update(
}
#endif /* EFSYS_OPT_QSTATS */
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
static __checkReturn boolean_t
ef10_ev_rx_packed_stream(
@@ -781,14 +788,25 @@ ef10_ev_rx_packed_stream(
if (new_buffer) {
flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * If both packed stream and equal stride super-buffer
+ * modes are compiled in, in theory credits should be
+ * be maintained for packed stream only, but right now
+ * these modes are not distinguished in the event queue
+ * Rx queue state and it is OK to increment the counter
+ * regardless (it might be event cheaper than branching
+ * since neighbour structure member are updated as well).
+ */
eersp->eers_rx_packed_stream_credits++;
+#endif
eersp->eers_rx_read_ptr++;
}
current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
/* Check for errors that invalidate checksum and L3/L4 fields */
- if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
- /* RX frame truncated (error flag is misnamed) */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
flags |= EFX_DISCARD;
goto deliver;
@@ -823,7 +841,7 @@ deliver:
return (should_abort);
}
-#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
static __checkReturn boolean_t
ef10_ev_rx(
@@ -857,7 +875,7 @@ ef10_ev_rx(
label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
eersp = &eep->ee_rxq_state[label];
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
/*
* Packed stream events are very different,
* so handle them separately
@@ -867,12 +885,23 @@ ef10_ev_rx(
#endif
size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
- l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
- cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
+
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
/* Drop this event */
@@ -914,8 +943,8 @@ ef10_ev_rx(
last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
/* Check for errors that invalidate checksum and L3/L4 fields */
- if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
- /* RX frame truncated (error flag is misnamed) */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
+ /* RX frame truncated */
EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
flags |= EFX_DISCARD;
goto deliver;
@@ -951,10 +980,22 @@ ef10_ev_rx(
flags |= EFX_CKSUM_IPV4;
}
- if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
flags |= EFX_PKT_TCP;
- } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
flags |= EFX_PKT_UDP;
} else {
@@ -966,10 +1007,22 @@ ef10_ev_rx(
case ESE_DZ_L3_CLASS_IP6_FRAG:
flags |= EFX_PKT_IPV6;
- if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
+ * only 2 bits wide on Medford2. Check it is safe to use the
+ * Medford2 field and values for all EF10 controllers.
+ */
+ EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
+ ESF_DE_RX_L4_CLASS_LBN);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
+ EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
+ ESE_DE_L4_CLASS_UNKNOWN);
+
+ if (l4_class == ESE_FZ_L4_CLASS_TCP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
flags |= EFX_PKT_TCP;
- } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
flags |= EFX_PKT_UDP;
} else {
@@ -1322,8 +1375,9 @@ ef10_ev_rxlabel_init(
__in efx_rxq_type_t type)
{
efx_evq_rxq_state_t *eersp;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
+ boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
#endif
_NOTE(ARGUNUSED(type))
@@ -1345,9 +1399,11 @@ ef10_ev_rxlabel_init(
eersp->eers_rx_read_ptr = 0;
#endif
eersp->eers_rx_mask = erp->er_mask;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
eersp->eers_rx_stream_npackets = 0;
- eersp->eers_rx_packed_stream = packed_stream;
+ eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
if (packed_stream) {
eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
@@ -1381,11 +1437,13 @@ ef10_ev_rxlabel_fini(
eersp->eers_rx_read_ptr = 0;
eersp->eers_rx_mask = 0;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
eersp->eers_rx_stream_npackets = 0;
eersp->eers_rx_packed_stream = B_FALSE;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
eersp->eers_rx_packed_stream_credits = 0;
#endif
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index 27b59987..ae872853 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_FILTER
@@ -95,7 +95,8 @@ ef10_filter_init(
ef10_filter_table_t *eftp;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
@@ -118,6 +119,10 @@ ef10_filter_init(
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_VNI_OR_VSID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST ==
MATCH_MASK(MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST));
EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST ==
@@ -150,7 +155,8 @@ ef10_filter_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_filter.ef_ef10_filter_table != NULL) {
EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
@@ -166,17 +172,24 @@ efx_mcdi_filter_op_add(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_V3_IN_LEN,
MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ efx_filter_match_flags_t match_flags;
efx_rc_t rc;
memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
+ req.emr_in_length = MC_CMD_FILTER_OP_V3_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_FILTER_OP_EXT_OUT_LEN;
+ /*
+ * Remove match flag for encapsulated filters that does not correspond
+ * to the MCDI match flags
+ */
+ match_flags = spec->efs_match_flags & ~EFX_FILTER_MATCH_ENCAP_TYPE;
+
switch (filter_op) {
case MC_CMD_FILTER_OP_IN_OP_REPLACE:
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_HANDLE_LO,
@@ -197,11 +210,16 @@ efx_mcdi_filter_op_add(
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_PORT_ID,
EVB_PORT_ID_ASSIGNED);
MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_MATCH_FIELDS,
- spec->efs_match_flags);
- MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
- MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
- MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
- spec->efs_dmaq_id);
+ match_flags);
+ if (spec->efs_dmaq_id == EFX_FILTER_SPEC_RX_DMAQ_ID_DROP) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP);
+ } else {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_DEST,
+ MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_EXT_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ }
#if EFSYS_OPT_RX_SCALE
if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
@@ -290,18 +308,45 @@ efx_mcdi_filter_op_add(
rc = EINVAL;
goto fail2;
}
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_VNI_OR_VSID),
+ spec->efs_vni_or_vsid, EFX_VNI_OR_VSID_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_EXT_IN_IFRM_DST_MAC),
+ spec->efs_ifrm_loc_mac, EFX_MAC_ADDR_LEN);
+ }
+
+ /*
+ * Set the "MARK" or "FLAG" action for all packets matching this filter
+ * if necessary (only useful with equal stride packed stream Rx mode
+ * which provide the information in pseudo-header).
+ * These actions require MC_CMD_FILTER_OP_V3_IN msgrequest.
+ */
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_MARK_VALUE,
+ spec->efs_mark);
+ } else if (spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_V3_IN_MATCH_ACTION,
+ MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG);
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail3;
+ goto fail4;
}
if (req.emr_out_length_used < MC_CMD_FILTER_OP_EXT_OUT_LEN) {
rc = EMSGSIZE;
- goto fail4;
+ goto fail5;
}
handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_EXT_OUT_HANDLE_LO);
@@ -309,6 +354,8 @@ efx_mcdi_filter_op_add(
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -413,6 +460,12 @@ ef10_filter_equal(
return (B_FALSE);
if (left->efs_encap_type != right->efs_encap_type)
return (B_FALSE);
+ if (memcmp(left->efs_vni_or_vsid, right->efs_vni_or_vsid,
+ EFX_VNI_OR_VSID_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_ifrm_loc_mac, right->efs_ifrm_loc_mac,
+ EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
return (B_TRUE);
@@ -495,7 +548,8 @@ ef10_filter_restore(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
@@ -570,7 +624,8 @@ ef10_filter_add_internal(
boolean_t locked = B_FALSE;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
hash = ef10_filter_hash(spec);
@@ -842,7 +897,8 @@ ef10_filter_delete(
boolean_t locked = B_FALSE;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
hash = ef10_filter_hash(spec);
@@ -890,6 +946,7 @@ efx_mcdi_get_parser_disp_info(
__in efx_nic_t *enp,
__out_ecount(buffer_length) uint32_t *buffer,
__in size_t buffer_length,
+ __in boolean_t encap,
__out size_t *list_lengthp)
{
efx_mcdi_req_t req;
@@ -906,7 +963,8 @@ efx_mcdi_get_parser_disp_info(
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
- MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP, encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
efx_mcdi_execute(enp, &req);
@@ -966,28 +1024,76 @@ ef10_filter_supported_filters(
__in size_t buffer_length,
__out size_t *list_lengthp)
{
-
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
size_t mcdi_list_length;
+ size_t mcdi_encap_list_length;
size_t list_length;
uint32_t i;
+ uint32_t next_buf_idx;
+ size_t next_buf_length;
efx_rc_t rc;
+ boolean_t no_space = B_FALSE;
efx_filter_match_flags_t all_filter_flags =
(EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_VNI_OR_VSID |
+ EFX_FILTER_MATCH_IFRM_LOC_MAC |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST |
+ EFX_FILTER_MATCH_ENCAP_TYPE |
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
- rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length,
- &mcdi_list_length);
+ /*
+ * Two calls to MC_CMD_GET_PARSER_DISP_INFO are needed: one to get the
+ * list of supported filters for ordinary packets, and then another to
+ * get the list of supported filters for encapsulated packets. To
+ * distinguish the second list from the first, the
+ * EFX_FILTER_MATCH_ENCAP_TYPE flag is added to each filter for
+ * encapsulated packets.
+ */
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length, B_FALSE,
+ &mcdi_list_length);
if (rc != 0) {
- if (rc == ENOSPC) {
- /* Pass through mcdi_list_length for the list length */
- *list_lengthp = mcdi_list_length;
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if (no_space) {
+ next_buf_idx = 0;
+ next_buf_length = 0;
+ } else {
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ next_buf_idx = mcdi_list_length;
+ next_buf_length = buffer_length - mcdi_list_length;
+ }
+
+ if (encp->enc_tunnel_encapsulations_supported != 0) {
+ rc = efx_mcdi_get_parser_disp_info(enp, &buffer[next_buf_idx],
+ next_buf_length, B_TRUE, &mcdi_encap_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC)
+ no_space = B_TRUE;
+ else
+ goto fail2;
+ } else {
+ for (i = next_buf_idx;
+ i < next_buf_idx + mcdi_encap_list_length; i++)
+ buffer[i] |= EFX_FILTER_MATCH_ENCAP_TYPE;
}
- goto fail1;
+ } else {
+ mcdi_encap_list_length = 0;
+ }
+
+ if (no_space) {
+ *list_lengthp = mcdi_list_length + mcdi_encap_list_length;
+ rc = ENOSPC;
+ goto fail3;
}
/*
@@ -1000,9 +1106,10 @@ ef10_filter_supported_filters(
* of the matches is preserved as they are ordered from highest to
* lowest priority.
*/
- EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ EFSYS_ASSERT(mcdi_list_length + mcdi_encap_list_length <=
+ buffer_length);
list_length = 0;
- for (i = 0; i < mcdi_list_length; i++) {
+ for (i = 0; i < mcdi_list_length + mcdi_encap_list_length; i++) {
if ((buffer[i] & ~all_filter_flags) == 0) {
buffer[list_length] = buffer[i];
list_length++;
@@ -1013,6 +1120,10 @@ ef10_filter_supported_filters(
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1636,4 +1747,4 @@ ef10_filter_default_rxq_clear(
#endif /* EFSYS_OPT_FILTER */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_image.c b/drivers/net/sfc/base/ef10_image.c
new file mode 100644
index 00000000..6fb7e476
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_image.c
@@ -0,0 +1,885 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
+
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+/*
+ * Utility routines to support limited parsing of ASN.1 tags. This is not a
+ * general purpose ASN.1 parser, but is sufficient to locate the required
+ * objects in a signed image with CMS headers.
+ */
+
+/* DER encodings for ASN.1 tags (see ITU-T X.690) */
+#define ASN1_TAG_INTEGER (0x02)
+#define ASN1_TAG_OCTET_STRING (0x04)
+#define ASN1_TAG_OBJ_ID (0x06)
+#define ASN1_TAG_SEQUENCE (0x30)
+#define ASN1_TAG_SET (0x31)
+
+#define ASN1_TAG_IS_PRIM(tag) ((tag & 0x20) == 0)
+
+#define ASN1_TAG_PRIM_CONTEXT(n) (0x80 + (n))
+#define ASN1_TAG_CONS_CONTEXT(n) (0xA0 + (n))
+
+typedef struct efx_asn1_cursor_s {
+ uint8_t *buffer;
+ uint32_t length;
+
+ uint8_t tag;
+ uint32_t hdr_size;
+ uint32_t val_size;
+} efx_asn1_cursor_t;
+
+
+/* Parse header of DER encoded ASN.1 TLV and match tag */
+static __checkReturn efx_rc_t
+efx_asn1_parse_header_match_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || cursor->buffer == NULL || cursor->length < 2) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ cursor->tag = cursor->buffer[0];
+ if (cursor->tag != tag) {
+ /* Tag not matched */
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ if ((cursor->tag & 0x1F) == 0x1F) {
+ /* Long tag format not used in CMS syntax */
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if ((cursor->buffer[1] & 0x80) == 0) {
+ /* Short form: length is 0..127 */
+ cursor->hdr_size = 2;
+ cursor->val_size = cursor->buffer[1];
+ } else {
+ /* Long form: length encoded as [0x80+nbytes][length bytes] */
+ uint32_t nbytes = cursor->buffer[1] & 0x7F;
+ uint32_t offset;
+
+ if (nbytes == 0) {
+ /* Indefinite length not allowed in DER encoding */
+ rc = EINVAL;
+ goto fail4;
+ }
+ if (2 + nbytes > cursor->length) {
+ /* Header length overflows image buffer */
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (nbytes > sizeof (uint32_t)) {
+ /* Length encoding too big */
+ rc = E2BIG;
+ goto fail5;
+ }
+ cursor->hdr_size = 2 + nbytes;
+ cursor->val_size = 0;
+ for (offset = 2; offset < cursor->hdr_size; offset++) {
+ cursor->val_size =
+ (cursor->val_size << 8) | cursor->buffer[offset];
+ }
+ }
+
+ if ((cursor->hdr_size + cursor->val_size) > cursor->length) {
+ /* Length overflows image buffer */
+ rc = E2BIG;
+ goto fail7;
+ }
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Enter nested ASN.1 TLV (contained in value of current TLV) */
+static __checkReturn efx_rc_t
+efx_asn1_enter_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (ASN1_TAG_IS_PRIM(tag)) {
+ /* Cannot enter a primitive tag */
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail3;
+ }
+
+ /* Limit cursor range to nested TLV */
+ cursor->buffer += cursor->hdr_size;
+ cursor->length = cursor->val_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Check that the current ASN.1 TLV matches the given tag and value.
+ * Advance cursor to next TLV on a successful match.
+ */
+static __checkReturn efx_rc_t
+efx_asn1_match_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __in const void *valp,
+ __in uint32_t val_size)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ if (cursor->val_size != val_size) {
+ /* Value size is different */
+ rc = EINVAL;
+ goto fail3;
+ }
+ if (memcmp(cursor->buffer + cursor->hdr_size, valp, val_size) != 0) {
+ /* Value content is different */
+ rc = EINVAL;
+ goto fail4;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Advance cursor to next TLV */
+static __checkReturn efx_rc_t
+efx_asn1_skip_tag(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ cursor->buffer += cursor->hdr_size + cursor->val_size;
+ cursor->length -= cursor->hdr_size + cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Return pointer to value octets and value size from current TLV */
+static __checkReturn efx_rc_t
+efx_asn1_get_tag_value(
+ __inout efx_asn1_cursor_t *cursor,
+ __in uint8_t tag,
+ __out uint8_t **valp,
+ __out uint32_t *val_sizep)
+{
+ efx_rc_t rc;
+
+ if (cursor == NULL || valp == NULL || val_sizep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_asn1_parse_header_match_tag(cursor, tag);
+ if (rc != 0) {
+ /* Invalid TLV or wrong tag */
+ goto fail2;
+ }
+ *valp = cursor->buffer + cursor->hdr_size;
+ *val_sizep = cursor->val_size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Utility routines for parsing CMS headers (see RFC2315, PKCS#7)
+ */
+
+/* OID 1.2.840.113549.1.7.2 */
+static const uint8_t PKCS7_SignedData[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x02 };
+
+/* OID 1.2.840.113549.1.7.1 */
+static const uint8_t PKCS7_Data[] =
+{ 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07, 0x01 };
+
+/* SignedData structure version */
+static const uint8_t SignedData_Version[] =
+{ 0x03 };
+
+/*
+ * Check for a valid image in signed image format. This uses CMS syntax
+ * (see RFC2315, PKCS#7) to provide signatures, and certificates required
+ * to validate the signatures. The encapsulated content is in unsigned image
+ * format (reflash header, image code, trailer checksum).
+ */
+static __checkReturn efx_rc_t
+efx_check_signed_image_header(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out uint32_t *content_offsetp,
+ __out uint32_t *content_lengthp)
+{
+ efx_asn1_cursor_t cursor;
+ uint8_t *valp;
+ uint32_t val_size;
+ efx_rc_t rc;
+
+ if (content_offsetp == NULL || content_lengthp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ cursor.buffer = (uint8_t *)bufferp;
+ cursor.length = buffer_size;
+
+ /* ContextInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail2;
+
+ /* ContextInfo.contentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_SignedData, sizeof (PKCS7_SignedData));
+ if (rc != 0)
+ goto fail3;
+
+ /* ContextInfo.content */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail4;
+
+ /* SignedData */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail5;
+
+ /* SignedData.version */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_INTEGER,
+ SignedData_Version, sizeof (SignedData_Version));
+ if (rc != 0)
+ goto fail6;
+
+ /* SignedData.digestAlgorithms */
+ rc = efx_asn1_skip_tag(&cursor, ASN1_TAG_SET);
+ if (rc != 0)
+ goto fail7;
+
+ /* SignedData.encapContentInfo */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_SEQUENCE);
+ if (rc != 0)
+ goto fail8;
+
+ /* SignedData.encapContentInfo.econtentType */
+ rc = efx_asn1_match_tag_value(&cursor, ASN1_TAG_OBJ_ID,
+ PKCS7_Data, sizeof (PKCS7_Data));
+ if (rc != 0)
+ goto fail9;
+
+ /* SignedData.encapContentInfo.econtent */
+ rc = efx_asn1_enter_tag(&cursor, ASN1_TAG_CONS_CONTEXT(0));
+ if (rc != 0)
+ goto fail10;
+
+ /*
+ * The octet string contains the image header, image code bytes and
+ * image trailer CRC (same as unsigned image layout).
+ */
+ valp = NULL;
+ val_size = 0;
+ rc = efx_asn1_get_tag_value(&cursor, ASN1_TAG_OCTET_STRING,
+ &valp, &val_size);
+ if (rc != 0)
+ goto fail11;
+
+ if ((valp == NULL) || (val_size == 0)) {
+ rc = EINVAL;
+ goto fail12;
+ }
+ if (valp < (uint8_t *)bufferp) {
+ rc = EINVAL;
+ goto fail13;
+ }
+ if ((valp + val_size) > ((uint8_t *)bufferp + buffer_size)) {
+ rc = EINVAL;
+ goto fail14;
+ }
+
+ *content_offsetp = (uint32_t)(valp - (uint8_t *)bufferp);
+ *content_lengthp = val_size;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_check_unsigned_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size)
+{
+ efx_image_header_t *header;
+ efx_image_trailer_t *trailer;
+ uint32_t crc;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) == EFX_IMAGE_HEADER_SIZE);
+ EFX_STATIC_ASSERT(sizeof (*trailer) == EFX_IMAGE_TRAILER_SIZE);
+
+ /* Must have at least enough space for required image header fields */
+ if (buffer_size < (EFX_FIELD_OFFSET(efx_image_header_t, eih_size) +
+ sizeof (header->eih_size))) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ header = (efx_image_header_t *)bufferp;
+
+ if (header->eih_magic != EFX_IMAGE_HEADER_MAGIC) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check image header version is same or higher than lowest required
+ * version.
+ */
+ if (header->eih_version < EFX_IMAGE_HEADER_VERSION) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Buffer must have space for image header, code and image trailer. */
+ if (buffer_size < (header->eih_size + header->eih_code_size +
+ EFX_IMAGE_TRAILER_SIZE)) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Check CRC from image buffer matches computed CRC. */
+ trailer = (efx_image_trailer_t *)((uint8_t *)header +
+ header->eih_size + header->eih_code_size);
+
+ crc = efx_crc32_calculate(0, (uint8_t *)header,
+ (header->eih_size + header->eih_code_size));
+
+ if (trailer->eit_crc != crc) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop)
+{
+ efx_image_format_t format = EFX_IMAGE_FORMAT_NO_IMAGE;
+ uint32_t image_offset;
+ uint32_t image_size;
+ void *imagep;
+ efx_rc_t rc;
+
+
+ EFSYS_ASSERT(infop != NULL);
+ if (infop == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ memset(infop, 0, sizeof (*infop));
+
+ if (bufferp == NULL || buffer_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /*
+ * Check if the buffer contains an image in signed format, and if so,
+ * locate the image header.
+ */
+ rc = efx_check_signed_image_header(bufferp, buffer_size,
+ &image_offset, &image_size);
+ if (rc == 0) {
+ /*
+ * Buffer holds signed image format. Check that the encapsulated
+ * content is in unsigned image format.
+ */
+ format = EFX_IMAGE_FORMAT_SIGNED;
+ } else {
+ /* Check if the buffer holds image in unsigned image format */
+ format = EFX_IMAGE_FORMAT_UNSIGNED;
+ image_offset = 0;
+ image_size = buffer_size;
+ }
+ if (image_offset + image_size > buffer_size) {
+ rc = E2BIG;
+ goto fail3;
+ }
+ imagep = (uint8_t *)bufferp + image_offset;
+
+ /* Check unsigned image layout (image header, code, image trailer) */
+ rc = efx_check_unsigned_image(imagep, image_size);
+ if (rc != 0)
+ goto fail4;
+
+ /* Return image details */
+ infop->eii_format = format;
+ infop->eii_imagep = bufferp;
+ infop->eii_image_size = buffer_size;
+ infop->eii_headerp = (efx_image_header_t *)imagep;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ infop->eii_format = EFX_IMAGE_FORMAT_INVALID;
+ infop->eii_imagep = NULL;
+ infop->eii_image_size = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp)
+{
+ signed_image_chunk_hdr_t chunk_hdr;
+ uint32_t hdr_offset;
+ struct {
+ uint32_t offset;
+ uint32_t size;
+ } cms_header, image_header, code, image_trailer, signature;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((infop != NULL) && (headerpp != NULL));
+
+ if ((bufferp == NULL) || (buffer_size == 0) ||
+ (infop == NULL) || (headerpp == NULL)) {
+ /* Invalid arguments */
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((infop->eii_format != EFX_IMAGE_FORMAT_SIGNED) ||
+ (infop->eii_imagep == NULL) ||
+ (infop->eii_headerp == NULL) ||
+ ((uint8_t *)infop->eii_headerp < (uint8_t *)infop->eii_imagep) ||
+ (infop->eii_image_size < EFX_IMAGE_HEADER_SIZE) ||
+ ((size_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep) >
+ (infop->eii_image_size - EFX_IMAGE_HEADER_SIZE))) {
+ /* Invalid image info */
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Locate image chunks in original signed image */
+ cms_header.offset = 0;
+ cms_header.size =
+ (uint32_t)((uint8_t *)infop->eii_headerp - infop->eii_imagep);
+ if ((cms_header.size > buffer_size) ||
+ (cms_header.offset > (buffer_size - cms_header.size))) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ image_header.offset = cms_header.offset + cms_header.size;
+ image_header.size = infop->eii_headerp->eih_size;
+ if ((image_header.size > buffer_size) ||
+ (image_header.offset > (buffer_size - image_header.size))) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ code.offset = image_header.offset + image_header.size;
+ code.size = infop->eii_headerp->eih_code_size;
+ if ((code.size > buffer_size) ||
+ (code.offset > (buffer_size - code.size))) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ image_trailer.offset = code.offset + code.size;
+ image_trailer.size = EFX_IMAGE_TRAILER_SIZE;
+ if ((image_trailer.size > buffer_size) ||
+ (image_trailer.offset > (buffer_size - image_trailer.size))) {
+ rc = EINVAL;
+ goto fail6;
+ }
+
+ signature.offset = image_trailer.offset + image_trailer.size;
+ signature.size = (uint32_t)(infop->eii_image_size - signature.offset);
+ if ((signature.size > buffer_size) ||
+ (signature.offset > (buffer_size - signature.size))) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ EFSYS_ASSERT3U(infop->eii_image_size, ==, cms_header.size +
+ image_header.size + code.size + image_trailer.size +
+ signature.size);
+
+ /* BEGIN CSTYLED */
+ /*
+ * Build signed image partition, inserting chunk headers.
+ *
+ * Signed Image: Image in NVRAM partition:
+ *
+ * +-----------------+ +-----------------+
+ * | CMS header | | mcfw.update |<----+
+ * +-----------------+ | | |
+ * | reflash header | +-----------------+ |
+ * +-----------------+ | chunk header: |-->--|-+
+ * | mcfw.update | | REFLASH_TRAILER | | |
+ * | | +-----------------+ | |
+ * +-----------------+ +-->| CMS header | | |
+ * | reflash trailer | | +-----------------+ | |
+ * +-----------------+ | | chunk header: |->-+ | |
+ * | signature | | | REFLASH_HEADER | | | |
+ * +-----------------+ | +-----------------+ | | |
+ * | | reflash header |<--+ | |
+ * | +-----------------+ | |
+ * | | chunk header: |-->--+ |
+ * | | IMAGE | |
+ * | +-----------------+ |
+ * | | reflash trailer |<------+
+ * | +-----------------+
+ * | | chunk header: |
+ * | | SIGNATURE |->-+
+ * | +-----------------+ |
+ * | | signature |<--+
+ * | +-----------------+
+ * | | ...unused... |
+ * | +-----------------+
+ * +-<-| chunk header: |
+ * >-->| CMS_HEADER |
+ * +-----------------+
+ *
+ * Each chunk header gives the partition offset and length of the image
+ * chunk's data. The image chunk data is immediately followed by the
+ * chunk header for the next chunk.
+ *
+ * The data chunk for the firmware code must be at the start of the
+ * partition (needed for the bootloader). The first chunk header in the
+ * chain (for the CMS header) is stored at the end of the partition. The
+ * chain of chunk headers maintains the same logical order of image
+ * chunks as the original signed image file. This set of constraints
+ * results in the layout used for the data chunks and chunk headers.
+ */
+ /* END CSTYLED */
+ memset(bufferp, buffer_size, 0xFF);
+
+ EFX_STATIC_ASSERT(sizeof (chunk_hdr) == SIGNED_IMAGE_CHUNK_HDR_LEN);
+ memset(&chunk_hdr, 0, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ /*
+ * CMS header
+ */
+ if (buffer_size < SIGNED_IMAGE_CHUNK_HDR_LEN) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ hdr_offset = buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN;
+
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_CMS_HEADER;
+ chunk_hdr.offset = code.size + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = cms_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, sizeof (chunk_hdr));
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail9;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + cms_header.offset,
+ cms_header.size);
+
+ /*
+ * Image header
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail10;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_HEADER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_header.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail11;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_header.offset,
+ image_header.size);
+
+ *headerpp = (efx_image_header_t *)(bufferp + chunk_hdr.offset);
+
+ /*
+ * Firmware code
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail12;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_IMAGE;
+ chunk_hdr.offset = 0;
+ chunk_hdr.len = code.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail13;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + code.offset,
+ code.size);
+
+ /*
+ * Image trailer (CRC)
+ */
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_REFLASH_TRAILER;
+ chunk_hdr.offset = hdr_offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = image_trailer.size;
+
+ hdr_offset = code.size;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail14;
+ }
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail15;
+ }
+ memcpy((uint8_t *)bufferp + chunk_hdr.offset,
+ infop->eii_imagep + image_trailer.offset,
+ image_trailer.size);
+
+ /*
+ * Signature
+ */
+ hdr_offset = chunk_hdr.offset + chunk_hdr.len;
+ if (hdr_offset > (buffer_size - SIGNED_IMAGE_CHUNK_HDR_LEN)) {
+ rc = ENOSPC;
+ goto fail16;
+ }
+ chunk_hdr.magic = SIGNED_IMAGE_CHUNK_HDR_MAGIC;
+ chunk_hdr.version = SIGNED_IMAGE_CHUNK_HDR_VERSION;
+ chunk_hdr.id = SIGNED_IMAGE_CHUNK_SIGNATURE;
+ chunk_hdr.offset = chunk_hdr.offset + SIGNED_IMAGE_CHUNK_HDR_LEN;
+ chunk_hdr.len = signature.size;
+
+ memcpy(bufferp + hdr_offset, &chunk_hdr, SIGNED_IMAGE_CHUNK_HDR_LEN);
+
+ if ((chunk_hdr.len > buffer_size) ||
+ (chunk_hdr.offset > (buffer_size - chunk_hdr.len))) {
+ rc = ENOSPC;
+ goto fail17;
+ }
+ memcpy(bufferp + chunk_hdr.offset,
+ infop->eii_imagep + signature.offset,
+ signature.size);
+
+ return (0);
+
+fail17:
+ EFSYS_PROBE(fail17);
+fail16:
+ EFSYS_PROBE(fail16);
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index e79f4d53..4751faf1 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -11,13 +11,27 @@
extern "C" {
#endif
-#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
-#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
-#elif EFSYS_OPT_HUNTINGTON
-#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
-#elif EFSYS_OPT_MEDFORD
-#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
-#endif
+
+/* Number of hardware PIO buffers (for compile-time resource dimensions) */
+#define EF10_MAX_PIOBUF_NBUFS (16)
+
+#if EFSYS_OPT_HUNTINGTON
+# if (EF10_MAX_PIOBUF_NBUFS < HUNT_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+# if (EF10_MAX_PIOBUF_NBUFS < MEDFORD2_PIOBUF_NBUFS)
+# error "EF10_MAX_PIOBUF_NBUFS too small"
+# endif
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
/*
* FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
@@ -742,6 +756,7 @@ extern void
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -753,6 +768,11 @@ ef10_tx_qdesc_vlantci_create(
__in uint16_t vlan_tci,
__out efx_desc_t *edp);
+extern void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
#if EFSYS_OPT_QSTATS
@@ -947,13 +967,15 @@ extern void
ef10_rx_qenable(
__in efx_rxq_t *erp);
+union efx_rxq_type_data_u;
+
extern __checkReturn efx_rc_t
ef10_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const union efx_rxq_type_data_u *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -1131,26 +1153,38 @@ efx_mcdi_get_clock(
extern __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp);
+
+extern __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
__in efx_nic_t *enp,
__out_opt uint32_t *vec_basep,
__out_opt uint32_t *pf_nvecp,
__out_opt uint32_t *vf_nvecp);
-extern __checkReturn efx_rc_t
-ef10_get_datapath_caps(
- __in efx_nic_t *enp);
-
extern __checkReturn efx_rc_t
ef10_get_privilege_mask(
__in efx_nic_t *enp,
__out uint32_t *maskp);
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
extern __checkReturn efx_rc_t
-ef10_external_port_mapping(
+efx_mcdi_get_nic_global(
__in efx_nic_t *enp,
- __in uint32_t port,
- __out uint8_t *external_portp);
+ __in uint32_t key,
+ __out uint32_t *valuep);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
+
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -1182,6 +1216,16 @@ ef10_external_port_mapping(
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/*
+ * Maximum DMA length and buffer stride alignment.
+ * (see SF-119419-TC, 3.2)
+ */
+#define EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT 64
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
index f79c44e3..1ffe266b 100644
--- a/drivers/net/sfc/base/ef10_intr.c
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
__checkReturn efx_rc_t
ef10_intr_init(
@@ -56,7 +56,8 @@ efx_mcdi_trigger_interrupt(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (level >= enp->en_nic_cfg.enc_intr_limit) {
rc = EINVAL;
@@ -129,7 +130,8 @@ ef10_intr_status_line(
efx_dword_t dword;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Read the queue mask and implicitly acknowledge the interrupt. */
EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
@@ -147,7 +149,8 @@ ef10_intr_status_message(
__out boolean_t *fatalp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
_NOTE(ARGUNUSED(enp, message))
@@ -170,4 +173,4 @@ ef10_intr_fini(
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
index db7692ee..1031e836 100644
--- a/drivers/net/sfc/base/ef10_mac.c
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
__checkReturn efx_rc_t
ef10_mac_poll(
@@ -356,7 +356,8 @@ ef10_mac_multicast_list_set(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
@@ -522,8 +523,44 @@ ef10_mac_stats_get_mask(
goto fail6;
}
+ if (encp->enc_fec_counters) {
+ const struct efx_mac_stats_range ef10_fec[] = {
+ { EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3 },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_fec, EFX_ARRAY_SIZE(ef10_fec))) != 0)
+ goto fail7;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V4) {
+ const struct efx_mac_stats_range ef10_rxdp_sdt[] = {
+ { EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_rxdp_sdt, EFX_ARRAY_SIZE(ef10_rxdp_sdt))) != 0)
+ goto fail8;
+ }
+
+ if (encp->enc_hlb_counters) {
+ const struct efx_mac_stats_range ef10_hlb[] = {
+ { EFX_MAC_RXDP_HLB_IDLE, EFX_MAC_RXDP_HLB_TIMEOUT },
+ };
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_hlb, EFX_ARRAY_SIZE(ef10_hlb))) != 0)
+ goto fail9;
+ }
+
return (0);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
@@ -551,16 +588,45 @@ ef10_mac_stats_update(
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__inout_opt uint32_t *generationp)
{
- efx_qword_t value;
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_qword_t generation_start;
efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
- _NOTE(ARGUNUSED(enp))
+ /*
+ * The MAC_STATS contain start and end generation counters used to
+ * detect when the DMA buffer has been updated during stats decode.
+ * All stats counters are 64bit unsigned values.
+ *
+ * Siena-compatible MAC stats contain MC_CMD_MAC_NSTATS 64bit counters.
+ * The generation end counter is at index MC_CMD_MAC_GENERATION_END
+ * (same as MC_CMD_MAC_NSTATS-1).
+ *
+ * Medford2 and later use a larger DMA buffer: MAC_STATS_NUM_STATS from
+ * MC_CMD_GET_CAPABILITIES_V4_OUT reports the number of 64bit counters.
+ *
+ * Firmware writes the generation end counter as the last counter in the
+ * DMA buffer. Do not use MC_CMD_MAC_GENERATION_END, as that is only
+ * correct for legacy Siena-compatible MAC stats.
+ */
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
/* Read END first so we don't race with the MC */
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
- EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
- &generation_end);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ EF10_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
EFSYS_MEM_READ_BARRIER();
/* TX */
@@ -851,7 +917,109 @@ ef10_mac_stats_update(
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V2)
+ goto done;
+
+ /* FEC */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_UNCORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_UNCORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_ERRORS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3]),
+ &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V3)
+ goto done;
+
+ /* CTPIO exceptions */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_BUSY_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_LONG_WRITE_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_MISSING_DBELL_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_OVERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_OVERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNDERFLOW_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_TIMEOUT_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_TIMEOUT_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_NONCONTIG_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FRM_CLOBBER_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_INVALID_WR_FAIL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_INVALID_WR_FAIL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_RUNT_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_RUNT_FALLBACK]), &value);
+
+ /* CTPIO per-port stats */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_SUCCESS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_SUCCESS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_FALLBACK, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_FALLBACK]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_POISON, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_POISON]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_CTPIO_ERASE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_CTPIO_ERASE]), &value);
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS_V4)
+ goto done;
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC]),
+ &value);
+
+ /* Head-of-line blocking */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_IDLE, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_IDLE]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_TIMEOUT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_TIMEOUT]), &value);
+
+done:
+ /* Read START generation counter */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
EFSYS_MEM_READ_BARRIER();
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
&generation_start);
@@ -866,8 +1034,15 @@ ef10_mac_stats_update(
*generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
#endif /* EFSYS_OPT_MAC_STATS */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c
index 1f9e573f..8a3fc3b4 100644
--- a/drivers/net/sfc/base/ef10_mcdi.c
+++ b/drivers/net/sfc/base/ef10_mcdi.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_MCDI
@@ -28,7 +28,8 @@ ef10_mcdi_init(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
/*
@@ -135,7 +136,8 @@ ef10_mcdi_send_request(
unsigned int pos;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Write the header */
for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
@@ -186,13 +188,17 @@ ef10_mcdi_read_response(
{
const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
efsys_mem_t *esmp = emtp->emt_dma_mem;
- unsigned int pos;
+ unsigned int pos = 0;
efx_dword_t data;
+ size_t remaining = length;
+
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
- for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
EFSYS_MEM_READD(esmp, offset + pos, &data);
- memcpy((uint8_t *)bufferp + pos, &data,
- MIN(sizeof (data), length - pos));
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
}
}
@@ -254,7 +260,8 @@ ef10_mcdi_feature_supported(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Use privilege mask state at MCDI attach.
@@ -315,4 +322,4 @@ fail1:
#endif /* EFSYS_OPT_MCDI */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index eb9ec2be..7dbf843b 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -10,7 +10,7 @@
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#include "ef10_tlv_layout.h"
@@ -25,7 +25,8 @@ efx_mcdi_get_port_assignment(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
@@ -70,7 +71,8 @@ efx_mcdi_get_port_modes(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_MODES;
@@ -250,7 +252,8 @@ efx_mcdi_get_mac_address_pf(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
@@ -308,7 +311,8 @@ efx_mcdi_get_mac_address_vf(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
@@ -372,7 +376,8 @@ efx_mcdi_get_clock(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CLOCK;
@@ -419,6 +424,64 @@ fail1:
}
__checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
efx_mcdi_get_vector_cfg(
__in efx_nic_t *enp,
__out_opt uint32_t *vec_basep,
@@ -783,7 +846,8 @@ ef10_nic_pio_alloc(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
EFSYS_ASSERT(bufnump);
EFSYS_ASSERT(handlep);
EFSYS_ASSERT(blknump);
@@ -925,62 +989,103 @@ fail1:
return (rc);
}
- __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
ef10_get_datapath_caps(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t flags;
- uint32_t flags2;
- uint32_t tso2nc;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)];
efx_rc_t rc;
- if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
- &flags2, &tso2nc)) != 0)
- goto fail1;
-
if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
goto fail1;
-#define CAP_FLAG(flags1, field) \
- ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-#define CAP_FLAG2(flags2, field) \
- ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V5_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+#define CAP_FLAGS1(_req, _flag) \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))
+
+#define CAP_FLAGS2(_req, _flag) \
+ (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \
+ (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \
+ (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))))
/*
* Huntington RXDP firmware inserts a 0 or 14 byte prefix.
* We only support the 14 byte prefix here.
*/
- if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ if (CAP_FLAGS1(req, RX_PREFIX_LEN_14) == 0) {
rc = ENOTSUP;
- goto fail2;
+ goto fail4;
}
encp->enc_rx_prefix_size = 14;
+ /* Check if the firmware supports additional RSS modes */
+ if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES))
+ encp->enc_rx_scale_additional_modes_supported = B_TRUE;
+ else
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
/* Check if the firmware supports TSO */
- encp->enc_fw_assisted_tso_enabled =
- CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, TX_TSO))
+ encp->enc_fw_assisted_tso_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
/* Check if the firmware supports FATSOv2 */
- encp->enc_fw_assisted_tso_v2_enabled =
- CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, TX_TSO_V2)) {
+ encp->enc_fw_assisted_tso_v2_enabled = B_TRUE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS);
+ } else {
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ }
- /* Get the number of TSO contexts (FATSOv2) */
- encp->enc_fw_assisted_tso_v2_n_contexts =
- CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+ /* Check if the firmware supports FATSOv2 encap */
+ if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP))
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE;
+ else
+ encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE;
/* Check if the firmware has vadapter/vport/vswitch support */
- encp->enc_datapath_cap_evb =
- CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, EVB))
+ encp->enc_datapath_cap_evb = B_TRUE;
+ else
+ encp->enc_datapath_cap_evb = B_FALSE;
/* Check if the firmware supports VLAN insertion */
- encp->enc_hw_tx_insert_vlan_enabled =
- CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, TX_VLAN_INSERTION))
+ encp->enc_hw_tx_insert_vlan_enabled = B_TRUE;
+ else
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
/* Check if the firmware supports RX event batching */
- encp->enc_rx_batching_enabled =
- CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_BATCHING))
+ encp->enc_rx_batching_enabled = B_TRUE;
+ else
+ encp->enc_rx_batching_enabled = B_FALSE;
/*
* Even if batching isn't reported as supported, we may still get
@@ -989,38 +1094,61 @@ ef10_get_datapath_caps(
encp->enc_rx_batch_max = 16;
/* Check if the firmware supports disabling scatter on RXQs */
- encp->enc_rx_disable_scatter_supported =
- CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_DISABLE_SCATTER))
+ encp->enc_rx_disable_scatter_supported = B_TRUE;
+ else
+ encp->enc_rx_disable_scatter_supported = B_FALSE;
/* Check if the firmware supports packed stream mode */
- encp->enc_rx_packed_stream_supported =
- CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM))
+ encp->enc_rx_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_packed_stream_supported = B_FALSE;
/*
* Check if the firmware supports configurable buffer sizes
* for packed stream mode (otherwise buffer size is 1Mbyte)
*/
- encp->enc_rx_var_packed_stream_supported =
- CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS))
+ encp->enc_rx_var_packed_stream_supported = B_TRUE;
+ else
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Check if the firmware supports equal stride super-buffer mode */
+ if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER))
+ encp->enc_rx_es_super_buffer_supported = B_TRUE;
+ else
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+
+ /* Check if the firmware supports FW subvariant w/o Tx checksumming */
+ if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM))
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE;
+ else
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
/* Check if the firmware supports set mac with running filters */
- encp->enc_allow_set_mac_with_installed_filters =
- CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
- B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED))
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ else
+ encp->enc_allow_set_mac_with_installed_filters = B_FALSE;
/*
* Check if firmware supports the extended MC_CMD_SET_MAC, which allows
* specifying which parameters to configure.
*/
- encp->enc_enhanced_set_mac_supported =
- CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, SET_MAC_ENHANCED))
+ encp->enc_enhanced_set_mac_supported = B_TRUE;
+ else
+ encp->enc_enhanced_set_mac_supported = B_FALSE;
/*
* Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
* us to let the firmware choose the settings to use on an EVQ.
*/
- encp->enc_init_evq_v2_supported =
- CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, INIT_EVQ_V2))
+ encp->enc_init_evq_v2_supported = B_TRUE;
+ else
+ encp->enc_init_evq_v2_supported = B_FALSE;
/*
* Check if firmware-verified NVRAM updates must be used.
@@ -1030,29 +1158,34 @@ ef10_get_datapath_caps(
* and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
* partition and report the result).
*/
- encp->enc_nvram_update_verify_result_supported =
- CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
- B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT))
+ encp->enc_nvram_update_verify_result_supported = B_TRUE;
+ else
+ encp->enc_nvram_update_verify_result_supported = B_FALSE;
/*
* Check if firmware provides packet memory and Rx datapath
* counters.
*/
- encp->enc_pm_and_rxdp_counters =
- CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS))
+ encp->enc_pm_and_rxdp_counters = B_TRUE;
+ else
+ encp->enc_pm_and_rxdp_counters = B_FALSE;
/*
* Check if the 40G MAC hardware is capable of reporting
* statistics for Tx size bins.
*/
- encp->enc_mac_stats_40g_tx_size_bins =
- CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+ if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS))
+ encp->enc_mac_stats_40g_tx_size_bins = B_TRUE;
+ else
+ encp->enc_mac_stats_40g_tx_size_bins = B_FALSE;
/*
* Check if firmware supports VXLAN and NVGRE tunnels.
* The capability indicates Geneve protocol support as well.
*/
- if (CAP_FLAG(flags, VXLAN_NVGRE)) {
+ if (CAP_FLAGS1(req, VXLAN_NVGRE)) {
encp->enc_tunnel_encapsulations_supported =
(1u << EFX_TUNNEL_PROTOCOL_VXLAN) |
(1u << EFX_TUNNEL_PROTOCOL_GENEVE) |
@@ -1066,11 +1199,136 @@ ef10_get_datapath_caps(
encp->enc_tunnel_config_udp_entries_max = 0;
}
-#undef CAP_FLAG
-#undef CAP_FLAG2
+ /*
+ * Check if firmware reports the VI window mode.
+ * Medford2 has a variable VI window size (8K, 16K or 64K).
+ * Medford and Huntington have a fixed 8K VI window size.
+ */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) {
+ uint8_t mode =
+ MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
+
+ switch (mode) {
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K;
+ break;
+ case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K;
+ break;
+ default:
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ break;
+ }
+ } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD)) {
+ /* Huntington and Medford have fixed 8K window size */
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+ } else {
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID;
+ }
+
+ /* Check if firmware supports extended MAC stats. */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
+ /* Extended stats buffer supported */
+ encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
+ } else {
+ /* Use Siena-compatible legacy MAC stats */
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+ }
+
+ if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2)
+ encp->enc_fec_counters = B_TRUE;
+ else
+ encp->enc_fec_counters = B_FALSE;
+
+ /* Check if the firmware provides head-of-line blocking counters */
+ if (CAP_FLAGS2(req, RXDP_HLB_IDLE))
+ encp->enc_hlb_counters = B_TRUE;
+ else
+ encp->enc_hlb_counters = B_FALSE;
+
+ if (CAP_FLAGS1(req, RX_RSS_LIMITED)) {
+ /* Only one exclusive RSS context is available per port. */
+ encp->enc_rx_scale_max_exclusive_contexts = 1;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_HUNTINGTON:
+ /*
+ * Packed stream firmware variant maintains a
+ * non-standard algorithm for hash computation.
+ * It implies explicit XORing together
+ * source + destination IP addresses (or last
+ * four bytes in the case of IPv6) and using the
+ * resulting value as the input to a Toeplitz hash.
+ */
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_PACKED_STREAM);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Port numbers cannot contribute to the hash value */
+ encp->enc_rx_scale_l4_hash_supported = B_FALSE;
+ } else {
+ /*
+ * Maximum number of exclusive RSS contexts.
+ * EF10 hardware supports 64 in total, but 6 are reserved
+ * for shared contexts. They are a global resource so
+ * not all may be available.
+ */
+ encp->enc_rx_scale_max_exclusive_contexts = 64 - 6;
+
+ encp->enc_rx_scale_hash_alg_mask =
+ (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is possible to use port numbers as
+ * the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+ }
+ /* Check if the firmware supports "FLAG" and "MARK" filter actions */
+ if (CAP_FLAGS2(req, FILTER_ACTION_FLAG))
+ encp->enc_filter_action_flag_supported = B_TRUE;
+ else
+ encp->enc_filter_action_flag_supported = B_FALSE;
+
+ if (CAP_FLAGS2(req, FILTER_ACTION_MARK))
+ encp->enc_filter_action_mark_supported = B_TRUE;
+ else
+ encp->enc_filter_action_mark_supported = B_FALSE;
+
+ /* Get maximum supported value for "MARK" filter action */
+ if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN)
+ encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req,
+ GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX);
+ else
+ encp->enc_filter_action_mark_max = 0;
+
+#undef CAP_FLAGS1
+#undef CAP_FLAGS2
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -1132,78 +1390,238 @@ fail1:
/*
- * Table of mapping schemes from port number to the number of the external
- * connector on the board. The external numbering does not distinguish
- * off-board separated outputs such as from multi-headed cables.
+ * Table of mapping schemes from port number to external number.
+ *
+ * Each port number ultimately corresponds to a connector: either as part of
+ * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on
+ * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T
+ * "Salina"). In general:
*
- * The count of adjacent port numbers that map to each external port
+ * Port number (0-based)
+ * |
+ * port mapping (n:1)
+ * |
+ * v
+ * External port number (normally 1-based)
+ * |
+ * fixed (1:1) or cable assembly (1:m)
+ * |
+ * v
+ * Connector
+ *
+ * The external numbering refers to the cages or magjacks on the board,
+ * as visibly annotated on the board or back panel. This table describes
+ * how to determine which external cage/magjack corresponds to the port
+ * numbers used by the driver.
+ *
+ * The count of adjacent port numbers that map to each external number,
* and the offset in the numbering, is determined by the chip family and
* current port mode.
*
* For the Huntington family, the current port mode cannot be discovered,
+ * but a single mapping is used by all modes for a given chip variant,
* so the mapping used is instead the last match in the table to the full
* set of port modes to which the NIC can be configured. Therefore the
* ordering of entries in the mapping table is significant.
*/
-static struct {
+static struct ef10_external_port_map_s {
efx_family_t family;
uint32_t modes_mask;
int32_t count;
int32_t offset;
} __ef10_external_port_mappings[] = {
- /* Supported modes with 1 output per external port */
+ /*
+ * Modes used by Huntington family controllers where each port
+ * number maps to a separate cage.
+ * SFN7x22F (Torino):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * SFN7xx4F (Pavia):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
{
EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1,
- 1
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */
+ 1, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes which for Huntington identify a chip variant where 2
+ * adjacent port numbers map to each cage.
+ * SFN7x42Q (Monza):
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G),
- 1,
- 1
+ EFX_FAMILY_HUNTINGTON,
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */
+ 2, /* ports per cage */
+ 1 /* first cage */
},
- /* Supported modes with 2 outputs per external port */
+ /*
+ * Modes that on Medford allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
{
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2,
- 1
+ EFX_FAMILY_MEDFORD,
+ (1U << TLV_PORT_MODE_10G) | /* mode 0 */
+ (1U << TLV_PORT_MODE_10G_10G), /* mode 2 */
+ 1, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes that on Medford allocate 2 adjacent port numbers to each
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
- 2,
- 1
+ (1U << TLV_PORT_MODE_40G) | /* mode 1 */
+ (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */
+ (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */
+ (1U << TLV_PORT_MODE_10G_10G_40G) | /* mode 7 */
+ /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */
+ 2, /* ports per cage */
+ 1 /* first cage */
},
- /* Supported modes with 4 outputs per external port */
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
- 4,
- 1,
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q) | /* mode 5 */
+ /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1), /* mode 4 */
+ 4, /* ports per cage */
+ 1 /* first cage */
},
+ /*
+ * Modes that on Medford allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
{
EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
- 4,
- 2
+ (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q2), /* mode 8 */
+ 4, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate each port number to a separate
+ * cage.
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 3
+ * port 3 -> cage 4
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */
+ (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */
+ (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */
+ (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */
+ (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */
+ (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */
+ (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */
+ 1, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * FIXME: Some port modes are not representable in this mapping:
+ * - TLV_PORT_MODE_1x2_2x1 (mode 17):
+ * port 0 -> cage 1
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ */
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */
+ (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */
+ (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */
+ (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */
+ (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */
+ (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */
+ 2, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 2 adjacent port numbers to each
+ * cage, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */
+ 2, /* ports per cage */
+ 2 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 1.
+ * port 0 -> cage 1
+ * port 1 -> cage 1
+ * port 2 -> cage 1
+ * port 3 -> cage 1
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */
+ 4, /* ports per cage */
+ 1 /* first cage */
+ },
+ /*
+ * Modes that on Medford2 allocate 4 adjacent port numbers to each
+ * connector, starting on cage 2.
+ * port 0 -> cage 2
+ * port 1 -> cage 2
+ * port 2 -> cage 2
+ * port 3 -> cage 2
+ */
+ {
+ EFX_FAMILY_MEDFORD2,
+ (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */
+ (1U << TLV_PORT_MODE_NA_1x2), /* mode 11 */
+ 4, /* ports per cage */
+ 2 /* first cage */
},
};
- __checkReturn efx_rc_t
+static __checkReturn efx_rc_t
ef10_external_port_mapping(
__in efx_nic_t *enp,
__in uint32_t port,
@@ -1219,7 +1637,7 @@ ef10_external_port_mapping(
if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
/*
- * No current port mode information
+ * No current port mode information (i.e. Huntington)
* - infer mapping from available modes
*/
if ((rc = efx_mcdi_get_port_modes(enp,
@@ -1236,18 +1654,23 @@ ef10_external_port_mapping(
}
/*
- * Infer the internal port -> external port mapping from
+ * Infer the internal port -> external number mapping from
* the possible port modes for this NIC.
*/
for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
- if (__ef10_external_port_mappings[i].family !=
- enp->en_family)
+ struct ef10_external_port_map_s *eepmp =
+ &__ef10_external_port_mappings[i];
+ if (eepmp->family != enp->en_family)
continue;
- matches = (__ef10_external_port_mappings[i].modes_mask &
- port_modes);
+ matches = (eepmp->modes_mask & port_modes);
if (matches != 0) {
- count = __ef10_external_port_mappings[i].count;
- offset = __ef10_external_port_mappings[i].offset;
+ /*
+ * Some modes match. For some Huntington boards
+ * there will be multiple matches. The mapping on the
+ * last match is used.
+ */
+ count = eepmp->count;
+ offset = eepmp->offset;
port_modes &= ~matches;
}
}
@@ -1272,18 +1695,193 @@ fail1:
return (rc);
}
+static __checkReturn efx_rc_t
+ef10_nic_board_cfg(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t board_type = 0;
+ uint32_t base, nvec;
+ uint32_t port;
+ uint32_t mask;
+ uint32_t pf;
+ uint32_t vf;
+ uint8_t mac_addr[6] = { 0 };
+ efx_rc_t rc;
+
+ /* Get the (zero-based) MCDI port number */
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /* EFX MCDI interface uses one-based port numbers */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /*
+ * Disable static config checking, ONLY for manufacturing test
+ * and setup at the factory, to allow the static config to be
+ * installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration (legacy) */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for EF10 */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail8;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail9;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail10;
+ encp->enc_privilege_mask = mask;
+
+ /* Get remaining controller-specific board config */
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail11;
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
__checkReturn efx_rc_t
ef10_nic_probe(
__in efx_nic_t *enp)
{
- const efx_nic_ops_t *enop = enp->en_enop;
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Read and clear any assertion state */
if ((rc = efx_mcdi_read_assertion(enp)) != 0)
@@ -1297,9 +1895,8 @@ ef10_nic_probe(
if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
goto fail3;
- if ((rc = enop->eno_board_cfg(enp)) != 0)
- if (rc != EACCES)
- goto fail4;
+ if ((rc = ef10_nic_board_cfg(enp)) != 0)
+ goto fail4;
/*
* Set default driver config limits (based on board config).
@@ -1494,10 +2091,12 @@ ef10_nic_init(
uint32_t i;
uint32_t retry;
uint32_t delay_us;
+ uint32_t vi_window_size;
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Enable reporting of some events (e.g. link change) */
if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
@@ -1555,15 +2154,21 @@ ef10_nic_init(
enp->en_arch.ef10.ena_pio_write_vi_base =
vi_count - enp->en_arch.ef10.ena_piobuf_count;
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=,
+ EFX_VI_WINDOW_SHIFT_INVALID);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=,
+ EFX_VI_WINDOW_SHIFT_64K);
+ vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift;
+
/* Save UC memory mapping details */
enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
if (enp->en_arch.ef10.ena_piobuf_count > 0) {
enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_pio_write_vi_base);
} else {
enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_vi_count);
}
@@ -1573,7 +2178,7 @@ ef10_nic_init(
enp->en_arch.ef10.ena_uc_mem_map_size;
enp->en_arch.ef10.ena_wc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
+ (vi_window_size *
enp->en_arch.ef10.ena_piobuf_count);
/* Link piobufs to extra VIs in WC mapping */
@@ -1653,7 +2258,8 @@ ef10_nic_get_vi_pool(
__out uint32_t *vi_countp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Report VIs that the client driver can use.
@@ -1674,7 +2280,8 @@ ef10_nic_get_bar_region(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* TODO: Specify host memory mapping alignment and granularity
@@ -1770,5 +2377,87 @@ fail1:
#endif /* EFSYS_OPT_DIAG */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __out uint32_t *valuep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_NIC_GLOBAL_IN_LEN,
+ MC_CMD_GET_NIC_GLOBAL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_nic_global(
+ __in efx_nic_t *enp,
+ __in uint32_t key,
+ __in uint32_t value)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_SET_NIC_GLOBAL_IN_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_NIC_GLOBAL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key);
+ MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
index 1904597c..2883ec8f 100644
--- a/drivers/net/sfc/base/ef10_nvram.c
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
@@ -1349,12 +1349,16 @@ ef10_nvram_partn_read_tlv(
*/
retry = 10;
do {
- rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
- seg_data, partn_size);
- } while ((rc == EAGAIN) && (--retry > 0));
+ if ((rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size)) != 0)
+ --retry;
+ } while ((rc == EAGAIN) && (retry > 0));
if (rc != 0) {
/* Failed to obtain consistent segment data */
+ if (rc == EAGAIN)
+ rc = EIO;
+
goto fail4;
}
@@ -2152,6 +2156,20 @@ static ef10_parttbl_entry_t medford_parttbl[] = {
PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
};
+static ef10_parttbl_entry_t medford2_parttbl[] = {
+ /* partn ports nvtype */
+ PARTN_MAP_ENTRY(MC_FIRMWARE, ALL, MC_FIRMWARE),
+ PARTN_MAP_ENTRY(MC_FIRMWARE_BACKUP, ALL, MC_GOLDEN),
+ PARTN_MAP_ENTRY(EXPANSION_ROM, ALL, BOOTROM),
+ PARTN_MAP_ENTRY(EXPROM_CONFIG, ALL, BOOTROM_CFG),
+ PARTN_MAP_ENTRY(DYNAMIC_CONFIG, ALL, DYNAMIC_CFG),
+ PARTN_MAP_ENTRY(FPGA, ALL, FPGA),
+ PARTN_MAP_ENTRY(FPGA_BACKUP, ALL, FPGA_BACKUP),
+ PARTN_MAP_ENTRY(LICENSE, ALL, LICENSE),
+ PARTN_MAP_ENTRY(EXPANSION_UEFI, ALL, UEFIROM),
+ PARTN_MAP_ENTRY(MUM_FIRMWARE, ALL, MUM_FIRMWARE),
+};
+
static __checkReturn efx_rc_t
ef10_parttbl_get(
__in efx_nic_t *enp,
@@ -2169,6 +2187,11 @@ ef10_parttbl_get(
*parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
break;
+ case EFX_FAMILY_MEDFORD2:
+ *parttblp = medford2_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford2_parttbl);
+ break;
+
default:
EFSYS_ASSERT(B_FALSE);
return (EINVAL);
@@ -2362,4 +2385,4 @@ fail1:
#endif /* EFSYS_OPT_NVRAM */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
index aa8d6a2b..84acb70a 100644
--- a/drivers/net/sfc/base/ef10_phy.c
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -7,7 +7,7 @@
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static void
mcdi_phy_decode_cap(
@@ -16,6 +16,32 @@ mcdi_phy_decode_cap(
{
uint32_t mask;
+#define CHECK_CAP(_cap) \
+ EFX_STATIC_ASSERT(EFX_PHY_CAP_##_cap == MC_CMD_PHY_CAP_##_cap##_LBN)
+
+ CHECK_CAP(10HDX);
+ CHECK_CAP(10FDX);
+ CHECK_CAP(100HDX);
+ CHECK_CAP(100FDX);
+ CHECK_CAP(1000HDX);
+ CHECK_CAP(1000FDX);
+ CHECK_CAP(10000FDX);
+ CHECK_CAP(25000FDX);
+ CHECK_CAP(40000FDX);
+ CHECK_CAP(50000FDX);
+ CHECK_CAP(100000FDX);
+ CHECK_CAP(PAUSE);
+ CHECK_CAP(ASYM);
+ CHECK_CAP(AN);
+ CHECK_CAP(DDM);
+ CHECK_CAP(BASER_FEC);
+ CHECK_CAP(BASER_FEC_REQUESTED);
+ CHECK_CAP(RS_FEC);
+ CHECK_CAP(RS_FEC_REQUESTED);
+ CHECK_CAP(25G_BASER_FEC);
+ CHECK_CAP(25G_BASER_FEC_REQUESTED);
+#undef CHECK_CAP
+
mask = 0;
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
mask |= (1 << EFX_PHY_CAP_10HDX);
@@ -31,8 +57,15 @@ mcdi_phy_decode_cap(
mask |= (1 << EFX_PHY_CAP_1000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_25000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_50000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100000FDX);
+
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
mask |= (1 << EFX_PHY_CAP_PAUSE);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
@@ -40,6 +73,22 @@ mcdi_phy_decode_cap(
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
mask |= (1 << EFX_PHY_CAP_AN);
+ /* FEC caps (supported on Medford2 and later) */
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_BASER_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_RS_FEC_REQUESTED);
+
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN))
+ mask |= (1 << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED);
+
*maskp = mask;
}
@@ -61,8 +110,14 @@ mcdi_phy_decode_link_mode(
if (!up)
*link_modep = EFX_LINK_DOWN;
+ else if (speed == 100000 && fd)
+ *link_modep = EFX_LINK_100000FDX;
+ else if (speed == 50000 && fd)
+ *link_modep = EFX_LINK_50000FDX;
else if (speed == 40000 && fd)
*link_modep = EFX_LINK_40000FDX;
+ else if (speed == 25000 && fd)
+ *link_modep = EFX_LINK_25000FDX;
else if (speed == 10000 && fd)
*link_modep = EFX_LINK_10000FDX;
else if (speed == 1000)
@@ -116,9 +171,18 @@ ef10_phy_link_ev(
case MCDI_EVENT_LINKCHANGE_SPEED_10G:
speed = 10000;
break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_25G:
+ speed = 25000;
+ break;
case MCDI_EVENT_LINKCHANGE_SPEED_40G:
speed = 40000;
break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_50G:
+ speed = 50000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_100G:
+ speed = 100000;
+ break;
default:
speed = 0;
break;
@@ -212,26 +276,10 @@ ef10_phy_get_link(
&elsp->els_link_mode, &elsp->els_fcntl);
#if EFSYS_OPT_LOOPBACK
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
-
+ /*
+ * MC_CMD_LOOPBACK and EFX_LOOPBACK names are equivalent, so use the
+ * MCDI value directly. Agreement is checked in efx_loopback_mask().
+ */
elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
#endif /* EFSYS_OPT_LOOPBACK */
@@ -289,7 +337,32 @@ ef10_phy_reconfigure(
PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
/* Too many fields for for POPULATE macros, so insert this afterwards */
MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25000FDX, (cap_mask >> EFX_PHY_CAP_25000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_50000FDX, (cap_mask >> EFX_PHY_CAP_50000FDX) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_100000FDX, (cap_mask >> EFX_PHY_CAP_100000FDX) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC, (cap_mask >> EFX_PHY_CAP_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_BASER_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC, (cap_mask >> EFX_PHY_CAP_RS_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_RS_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_RS_FEC_REQUESTED) & 0x1);
+
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC) & 0x1);
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_25G_BASER_FEC_REQUESTED,
+ (cap_mask >> EFX_PHY_CAP_25G_BASER_FEC_REQUESTED) & 0x1);
#if EFSYS_OPT_LOOPBACK
MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
@@ -304,9 +377,18 @@ ef10_phy_reconfigure(
case EFX_LINK_10000FDX:
speed = 10000;
break;
+ case EFX_LINK_25000FDX:
+ speed = 25000;
+ break;
case EFX_LINK_40000FDX:
speed = 40000;
break;
+ case EFX_LINK_50000FDX:
+ speed = 50000;
+ break;
+ case EFX_LINK_100000FDX:
+ speed = 100000;
+ break;
default:
speed = 0;
}
@@ -606,4 +688,4 @@ ef10_bist_stop(
#endif /* EFSYS_OPT_BIST */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 2bb6705d..313a3691 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
@@ -21,12 +21,16 @@ efx_mcdi_init_rxq(
__in efsys_mem_t *esmp,
__in boolean_t disable_scatter,
__in boolean_t want_inner_classes,
- __in uint32_t ps_bufsize)
+ __in uint32_t ps_bufsize,
+ __in uint32_t es_bufs_per_desc,
+ __in uint32_t es_max_dma_len,
+ __in uint32_t es_buf_stride,
+ __in uint32_t hol_block_timeout)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
- MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_V3_IN_LEN,
+ MC_CMD_INIT_RXQ_V3_OUT_LEN)];
int npages = EFX_RXQ_NBUFS(ndescs);
int i;
efx_qword_t *dma_addr;
@@ -37,8 +41,15 @@ efx_mcdi_init_rxq(
EFSYS_ASSERT3U(ndescs, <=, EFX_RXQ_MAXNDESCS);
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_RXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
if (ps_bufsize > 0)
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else if (es_bufs_per_desc > 0)
+ dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER;
else
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
@@ -65,9 +76,9 @@ efx_mcdi_init_rxq(
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+ req.emr_in_length = MC_CMD_INIT_RXQ_V3_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+ req.emr_out_length = MC_CMD_INIT_RXQ_V3_OUT_LEN;
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
@@ -87,6 +98,19 @@ efx_mcdi_init_rxq(
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ if (es_bufs_per_desc > 0) {
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET,
+ es_bufs_per_desc);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, es_max_dma_len);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_PACKET_STRIDE, es_buf_stride);
+ MCDI_IN_SET_DWORD(req,
+ INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT,
+ hol_block_timeout);
+ }
+
dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
addr = EFSYS_MEM_ADDR(esmp);
@@ -103,11 +127,13 @@ efx_mcdi_init_rxq(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail1;
+ goto fail2;
}
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -291,11 +317,34 @@ efx_mcdi_rss_context_set_flags(
__in uint32_t rss_context,
__in efx_rx_hash_type_t type)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rx_hash_type_t type_ipv4;
+ efx_rx_hash_type_t type_ipv4_tcp;
+ efx_rx_hash_type_t type_ipv6;
+ efx_rx_hash_type_t type_ipv6_tcp;
+ efx_rx_hash_type_t modes;
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
efx_rc_t rc;
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV4_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_TCP_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_LBN ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN);
+ EFX_STATIC_ASSERT(EFX_RX_CLASS_IPV6_WIDTH ==
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH);
+
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
rc = EINVAL;
goto fail1;
@@ -311,15 +360,57 @@ efx_mcdi_rss_context_set_flags(
MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
rss_context);
- MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE) | EFX_RX_HASH(IPV4_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE) | EFX_RX_HASH(IPV6_TCP, 2TUPLE) |
+ EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ /*
+ * Create a copy of the original hash type.
+ * The copy will be used to fill in RSS_MODE bits and
+ * may be cleared beforehand. The original variable
+ * and, thus, EN bits will remain unaffected.
+ */
+ modes = type;
+
+ /*
+ * If the firmware lacks support for additional modes, RSS_MODE
+ * fields must contain zeros, otherwise the operation will fail.
+ */
+ if (encp->enc_rx_scale_additional_modes_supported == B_FALSE)
+ modes = 0;
+
+#define EXTRACT_RSS_MODE(_type, _class) \
+ (EFX_EXTRACT_NATIVE(_type, 0, 31, \
+ EFX_LOW_BIT(EFX_RX_CLASS_##_class), \
+ EFX_HIGH_BIT(EFX_RX_CLASS_##_class)) & \
+ EFX_MASK32(EFX_RX_CLASS_##_class))
+
+ MCDI_IN_POPULATE_DWORD_10(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
- (type & EFX_RX_HASH_IPV4) ? 1 : 0,
+ ((type & type_ipv4) == type_ipv4) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
- (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
+ ((type & type_ipv4_tcp) == type_ipv4_tcp) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
- (type & EFX_RX_HASH_IPV6) ? 1 : 0,
+ ((type & type_ipv6) == type_ipv6) ? 1 : 0,
RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
- (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
+ ((type & type_ipv6_tcp) == type_ipv6_tcp) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV4),
+ RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_TCP),
+ RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6_UDP),
+ RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE,
+ EXTRACT_RSS_MODE(modes, IPV6));
+
+#undef EXTRACT_RSS_MODE
efx_mcdi_execute(enp, &req);
@@ -544,12 +635,13 @@ ef10_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_rc_t rc;
- EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
EFSYS_ASSERT3U(insert, ==, B_TRUE);
- if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << alg)) == 0 ||
+ insert == B_FALSE) {
rc = EINVAL;
goto fail1;
}
@@ -698,6 +790,7 @@ ef10_rx_prefix_hash(
_NOTE(ARGUNUSED(enp))
switch (func) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
case EFX_RX_HASHALG_TOEPLITZ:
return (buffer[0] |
(buffer[1] << 8) |
@@ -795,8 +888,8 @@ ef10_rx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
wptr, pushed & erp->er_mask);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
- erp->er_index, &dword, B_FALSE);
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
}
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -827,7 +920,7 @@ ef10_rx_qpush_ps_credits(
ERF_DZ_RX_DESC_MAGIC_CMD,
ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
ERF_DZ_RX_DESC_MAGIC_DATA, credits);
- EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ EFX_BAR_VI_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
erp->er_index, &dword, B_FALSE);
rxq_state->eers_rx_packed_stream_credits = 0;
@@ -926,7 +1019,7 @@ ef10_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -939,6 +1032,10 @@ ef10_rx_qcreate(
boolean_t disable_scatter;
boolean_t want_inner_classes;
unsigned int ps_buf_size;
+ uint32_t es_bufs_per_desc = 0;
+ uint32_t es_max_dma_len = 0;
+ uint32_t es_buf_stride = 0;
+ uint32_t hol_block_timeout = 0;
_NOTE(ARGUNUSED(id, erp, type_data))
@@ -965,7 +1062,7 @@ ef10_rx_qcreate(
break;
#if EFSYS_OPT_RX_PACKED_STREAM
case EFX_RXQ_TYPE_PACKED_STREAM:
- switch (type_data) {
+ switch (type_data->ertd_packed_stream.eps_buf_size) {
case EFX_RXQ_PACKED_STREAM_BUF_SIZE_1M:
ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
break;
@@ -987,6 +1084,19 @@ ef10_rx_qcreate(
}
break;
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER:
+ ps_buf_size = 0;
+ es_bufs_per_desc =
+ type_data->ertd_es_super_buffer.eessb_bufs_per_desc;
+ es_max_dma_len =
+ type_data->ertd_es_super_buffer.eessb_max_dma_len;
+ es_buf_stride =
+ type_data->ertd_es_super_buffer.eessb_buf_stride;
+ hol_block_timeout =
+ type_data->ertd_es_super_buffer.eessb_hol_block_timeout;
+ break;
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
default:
rc = ENOTSUP;
goto fail4;
@@ -1010,6 +1120,27 @@ ef10_rx_qcreate(
EFSYS_ASSERT(ps_buf_size == 0);
#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ if (es_bufs_per_desc > 0) {
+ if (encp->enc_rx_es_super_buffer_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail7;
+ }
+ if (!IS_P2ALIGNED(es_max_dma_len,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (!IS_P2ALIGNED(es_buf_stride,
+ EFX_RX_ES_SUPER_BUFFER_BUF_ALIGNMENT)) {
+ rc = EINVAL;
+ goto fail9;
+ }
+ }
+#else /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+ EFSYS_ASSERT(es_bufs_per_desc == 0);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
+
/* Scatter can only be disabled if the firmware supports doing so */
if (flags & EFX_RXQ_FLAG_SCATTER)
disable_scatter = B_FALSE;
@@ -1023,8 +1154,9 @@ ef10_rx_qcreate(
if ((rc = efx_mcdi_init_rxq(enp, ndescs, eep->ee_index, label, index,
esmp, disable_scatter, want_inner_classes,
- ps_buf_size)) != 0)
- goto fail7;
+ ps_buf_size, es_bufs_per_desc, es_max_dma_len,
+ es_buf_stride, hol_block_timeout)) != 0)
+ goto fail10;
erp->er_eep = eep;
erp->er_label = label;
@@ -1035,8 +1167,16 @@ ef10_rx_qcreate(
return (0);
+fail10:
+ EFSYS_PROBE(fail10);
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
+#endif /* EFSYS_OPT_RX_ES_SUPER_BUFFER */
#if EFSYS_OPT_RX_PACKED_STREAM
fail6:
EFSYS_PROBE(fail6);
@@ -1087,4 +1227,4 @@ ef10_rx_fini(
#endif /* EFSYS_OPT_RX_SCALE */
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_signed_image_layout.h b/drivers/net/sfc/base/ef10_signed_image_layout.h
new file mode 100644
index 00000000..a35d1601
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_signed_image_layout.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2016-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/* These structures define the layouts for the signed firmware image binary
+ * saved in NVRAM. The original image is in the Cryptographic message
+ * syntax (CMS) format which contains the bootable firmware binary plus the
+ * signatures. The entire image is written into NVRAM to enable the firmware
+ * to validate the signatures. However, the bootrom still requires the
+ * bootable-image to start at offset 0 of the NVRAM partition. Hence the image
+ * is parsed upfront by host utilities (sfupdate) and written into nvram as
+ * 'signed_image_chunks' described by a header.
+ *
+ * This file is used by the MC as well as host-utilities (sfupdate).
+ */
+
+
+#ifndef CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+#define CI_MGMT_SIGNED_IMAGE_LAYOUT_H
+
+/* Signed image chunk type identifiers */
+enum {
+ SIGNED_IMAGE_CHUNK_CMS_HEADER, /* CMS header describing the signed data */
+ SIGNED_IMAGE_CHUNK_REFLASH_HEADER, /* Reflash header */
+ SIGNED_IMAGE_CHUNK_IMAGE, /* Bootable binary image */
+ SIGNED_IMAGE_CHUNK_REFLASH_TRAILER, /* Reflash trailer */
+ SIGNED_IMAGE_CHUNK_SIGNATURE, /* Remaining contents of the signed image,
+ * including the certifiates and signature */
+ NUM_SIGNED_IMAGE_CHUNKS,
+};
+
+/* Magic */
+#define SIGNED_IMAGE_CHUNK_HDR_MAGIC 0xEF105161 /* EF10 SIGned Image */
+
+/* Initial version definition - version 1 */
+#define SIGNED_IMAGE_CHUNK_HDR_VERSION 0x1
+
+/* Header length is 32 bytes */
+#define SIGNED_IMAGE_CHUNK_HDR_LEN 32
+/* Structure describing the header of each chunk of signed image
+ * as stored in nvram
+ */
+typedef struct signed_image_chunk_hdr_e {
+ /* Magic field to recognise a valid entry
+ * should match SIGNED_IMAGE_CHUNK_HDR_MAGIC
+ */
+ uint32_t magic;
+ /* Version number of this header */
+ uint32_t version;
+ /* Chunk type identifier */
+ uint32_t id;
+ /* Chunk offset */
+ uint32_t offset;
+ /* Chunk length */
+ uint32_t len;
+ /* Reserved for future expansion of this structure - always set to zeros */
+ uint32_t reserved[3];
+} signed_image_chunk_hdr_t;
+
+#endif /* CI_MGMT_SIGNED_IMAGE_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h
index 2473a66a..56cffaee 100644
--- a/drivers/net/sfc/base/ef10_tlv_layout.h
+++ b/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -4,6 +4,14 @@
* All rights reserved.
*/
+/*
+ * This is NOT the original source file. Do NOT edit it.
+ * To update the tlv layout, please edit the copy in
+ * the sfregistry repo and then, in that repo,
+ * "make tlv_headers" or "make export" to
+ * regenerate and export all types of headers.
+ */
+
/* These structures define the layouts for the TLV items stored in static and
* dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
*
@@ -32,6 +40,7 @@
* 1: dynamic configuration
* 2: firmware internal use
* 3: license partition
+ * 4: tsa configuration
*
* - TTT is a type, which is just a unique value. The same type value
* might appear in both locations, indicating a relationship between
@@ -407,6 +416,8 @@ struct tlv_firmware_options {
#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+#define TLV_FIRMWARE_VARIANT_DPDK MC_CMD_FW_DPDK
+#define TLV_FIRMWARE_VARIANT_L3XUDP MC_CMD_FW_L3XUDP
};
/* Voltage settings
@@ -525,6 +536,17 @@ struct tlv_pcie_config_r2 {
* number of externally visible ports (and, hence, PF to port mapping), so must
* be done at boot time.
*
+ * Port mode naming convention is
+ *
+ * [nports_on_cage0]x[port_lane_width]_[nports_on_cage1]x[port_lane_width]
+ *
+ * Port lane width determines the capabilities (speeds) of the ports, subject
+ * to architecture capabilities (e.g. 25G support) and switch bandwidth
+ * constraints:
+ * - single lane ports can do 25G/10G/1G
+ * - dual lane ports can do 50G/25G/10G/1G (with fallback to 1 lane)
+ * - quad lane ports can do 100G/40G/50G/25G/10G/1G (with fallback to 2 or 1 lanes)
+
* This tag supercedes tlv_global_port_config.
*/
@@ -535,18 +557,58 @@ struct tlv_global_port_mode {
uint32_t length;
uint32_t port_mode;
#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
-#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
-#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
-#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
-#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
-#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
-#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
-#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+
+/* Huntington port modes */
+#define TLV_PORT_MODE_10G (0)
+#define TLV_PORT_MODE_40G (1)
+#define TLV_PORT_MODE_10G_10G (2)
+#define TLV_PORT_MODE_40G_40G (3)
+#define TLV_PORT_MODE_10G_10G_10G_10G (4)
+#define TLV_PORT_MODE_40G_10G_10G (6)
+#define TLV_PORT_MODE_10G_10G_40G (7)
+
+/* Medford (and later) port modes */
+#define TLV_PORT_MODE_1x1_NA (0) /* Single 10G/25G on mdi0 */
+#define TLV_PORT_MODE_1x4_NA (1) /* Single 100G/40G on mdi0 */
+#define TLV_PORT_MODE_NA_1x4 (22) /* Single 100G/40G on mdi1 */
+#define TLV_PORT_MODE_1x2_NA (10) /* Single 50G on mdi0 */
+#define TLV_PORT_MODE_NA_1x2 (11) /* Single 50G on mdi1 */
+#define TLV_PORT_MODE_1x1_1x1 (2) /* Single 10G/25G on mdi0, single 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x4 (3) /* Single 40G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_2x1_2x1 (5) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_4x1_NA (4) /* Quad 10G/25G on mdi0 */
+#define TLV_PORT_MODE_NA_4x1 (8) /* Quad 10G/25G on mdi1 */
+#define TLV_PORT_MODE_1x4_2x1 (6) /* Single 40G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x4 (7) /* Dual 10G/25G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x2 (12) /* Single 50G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_2x2_NA (13) /* Dual 50G on mdi0 */
+#define TLV_PORT_MODE_NA_2x2 (14) /* Dual 50G on mdi1 */
+#define TLV_PORT_MODE_1x4_1x2 (15) /* Single 40G on mdi0, single 50G on mdi1 */
+#define TLV_PORT_MODE_1x2_1x4 (16) /* Single 50G on mdi0, single 40G on mdi1 */
+#define TLV_PORT_MODE_1x2_2x1 (17) /* Single 50G on mdi0, dual 10G/25G on mdi1 */
+#define TLV_PORT_MODE_2x1_1x2 (18) /* Dual 10G/25G on mdi0, single 50G on mdi1 */
+
+/* Snapper-only Medford2 port modes.
+ * These modes are eftest only, to allow snapper explicit
+ * selection between multi-channel and LLPCS. In production,
+ * this selection is automatic and outside world should not
+ * care about LLPCS.
+ */
+#define TLV_PORT_MODE_2x1_2x1_LL (19) /* Dual 10G/25G on mdi0, dual 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_4x1_NA_LL (20) /* Quad 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_NA_4x1_LL (21) /* Quad 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_1x1_NA_LL (23) /* Single 10G/25G on mdi0, low-latency PCS */
+#define TLV_PORT_MODE_1x1_1x1_LL (24) /* Single 10G/25G on mdi0, single 10G/25G on mdi1, low-latency PCS */
+#define TLV_PORT_MODE_BUG63720_DO_NOT_USE (9) /* bug63720: Do not use */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
+
+/* Deprecated Medford aliases - DO NOT USE IN NEW CODE */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8)
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9)
+
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_1x1_1x1_LL
};
/* Type of the v-switch created implicitly by the firmware */
@@ -791,7 +853,7 @@ typedef struct tlv_license {
uint8_t data[];
} tlv_license_t;
-/* TSA NIC IP address configuration
+/* TSA NIC IP address configuration (DEPRECATED)
*
* Sets the TSA NIC IP address statically via configuration tool or dynamically
* via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
@@ -801,7 +863,7 @@ typedef struct tlv_license {
* released code yet.
*/
-#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000) /* DEPRECATED */
#define TLV_TSAN_IP_MODE_STATIC (0)
#define TLV_TSAN_IP_MODE_DHCP (1)
@@ -818,7 +880,7 @@ typedef struct tlv_tsan_config {
uint32_t bind_bkout; /* DEPRECATED */
} tlv_tsan_config_t;
-/* TSA Controller IP address configuration
+/* TSA Controller IP address configuration (DEPRECATED)
*
* Sets the TSA Controller IP address statically via configuration tool
*
@@ -827,7 +889,7 @@ typedef struct tlv_tsan_config {
* released code yet.
*/
-#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000) /* DEPRECATED */
#define TLV_MAX_TSACS (4)
typedef struct tlv_tsac_config {
@@ -838,7 +900,7 @@ typedef struct tlv_tsac_config {
uint32_t port[TLV_MAX_TSACS];
} tlv_tsac_config_t;
-/* Binding ticket
+/* Binding ticket (DEPRECATED)
*
* Sets the TSA NIC binding ticket used for binding process between the TSA NIC
* and the TSA Controller
@@ -848,7 +910,7 @@ typedef struct tlv_tsac_config {
* released code yet.
*/
-#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000) /* DEPRECATED */
typedef struct tlv_binding_ticket {
uint32_t tag;
@@ -873,7 +935,7 @@ typedef struct tlv_pik_sf {
uint8_t bytes[];
} tlv_pik_sf_t;
-/* CA root certificate
+/* CA root certificate (DEPRECATED)
*
* Sets the CA root certificate used for TSA Controller verfication during
* TLS connection setup between the TSA NIC and the TSA Controller
@@ -883,7 +945,7 @@ typedef struct tlv_pik_sf {
* released code yet.
*/
-#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000) /* DEPRECATED */
typedef struct tlv_ca_root_cert {
uint32_t tag;
@@ -933,4 +995,17 @@ struct tlv_fastpd_mode {
#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
};
+/* L3xUDP datapath firmware UDP port configuration
+ *
+ * Sets the list of UDP ports on which the encapsulation will be handled.
+ * The number of ports in the list is implied by the length of the TLV item.
+ */
+#define TLV_TAG_L3XUDP_PORTS (0x102a0000)
+struct tlv_l3xudp_ports {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t ports[];
+#define TLV_TAG_L3XUDP_PORTS_MAX_NUM_PORTS 16
+};
+
#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 69c75700..7d27f710 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -8,7 +8,7 @@
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
@@ -42,10 +42,15 @@ efx_mcdi_init_txq(
EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_TXQ_SIZE(ndescs))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
npages = EFX_TXQ_NBUFS(ndescs);
if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
rc = EINVAL;
- goto fail1;
+ goto fail2;
}
(void) memset(payload, 0, sizeof (payload));
@@ -94,11 +99,13 @@ efx_mcdi_init_txq(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail2;
+ goto fail3;
}
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -176,7 +183,7 @@ ef10_tx_qcreate(
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
uint16_t inner_csum;
- efx_qword_t desc;
+ efx_desc_t desc;
efx_rc_t rc;
_NOTE(ARGUNUSED(id))
@@ -201,19 +208,9 @@ ef10_tx_qcreate(
* a no-op TX option descriptor. See bug29981 for details.
*/
*addedp = 1;
- EFX_POPULATE_QWORD_6(desc,
- ESF_DZ_TX_DESC_IS_OPT, 1,
- ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
- ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
- (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
- ESF_DZ_TX_OPTION_IP_CSUM,
- (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
- ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
- (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
- ESF_DZ_TX_OPTION_INNER_IP_CSUM,
- (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
+ ef10_tx_qdesc_checksum_create(etp, flags, &desc);
- EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc.ed_eq);
ef10_tx_qpush(etp, *addedp, 0);
return (0);
@@ -511,8 +508,8 @@ ef10_tx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
wptr, id);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
- etp->et_index, &oword);
+ EFX_BAR_VI_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &oword);
} else {
efx_dword_t dword;
@@ -527,8 +524,8 @@ ef10_tx_qpush(
EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
wptr, id);
EFSYS_PIO_WRITE_BARRIER();
- EFX_BAR_TBL_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
- etp->et_index, &dword, B_FALSE);
+ EFX_BAR_VI_WRITED2(enp, ER_DZ_TX_DESC_UPD_REG,
+ etp->et_index, &dword, B_FALSE);
}
}
@@ -626,6 +623,7 @@ ef10_tx_qdesc_tso_create(
ef10_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -639,13 +637,14 @@ ef10_tx_qdesc_tso2_create(
EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
- EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ EFX_POPULATE_QWORD_6(edp[0].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
EFX_POPULATE_QWORD_4(edp[1].ed_eq,
ESF_DZ_TX_DESC_IS_OPT, 1,
@@ -675,6 +674,30 @@ ef10_tx_qdesc_vlantci_create(
ESF_DZ_TX_VLAN_TAG1, tci);
}
+ void
+ef10_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ _NOTE(ARGUNUSED(etp));
+
+ EFSYS_PROBE2(tx_desc_checksum_create, unsigned int, etp->et_index,
+ uint32_t, flags);
+
+ EFX_POPULATE_QWORD_6(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0);
+}
+
__checkReturn efx_rc_t
ef10_tx_qpace(
@@ -752,4 +775,4 @@ ef10_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c
index ad522e82..097fe1d4 100644
--- a/drivers/net/sfc/base/ef10_vpd.c
+++ b/drivers/net/sfc/base/ef10_vpd.c
@@ -10,7 +10,7 @@
#if EFSYS_OPT_VPD
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
#include "ef10_tlv_layout.h"
@@ -26,7 +26,8 @@ ef10_vpd_init(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_STATIC_VPD;
@@ -82,7 +83,8 @@ ef10_vpd_size(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* This function returns the total size the user should allocate
@@ -115,7 +117,8 @@ ef10_vpd_read(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
@@ -133,19 +136,22 @@ ef10_vpd_read(
rc = ENOSPC;
goto fail2;
}
- memcpy(data, dvpd, dvpd_size);
+ if (dvpd != NULL)
+ memcpy(data, dvpd, dvpd_size);
/* Pad data with all-1s, consistent with update operations */
memset(data + dvpd_size, 0xff, size - dvpd_size);
- EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
return (0);
fail2:
EFSYS_PROBE(fail2);
- EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+ if (dvpd != NULL)
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -167,7 +173,8 @@ ef10_vpd_verify(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/*
* Strictly you could take the view that dynamic vpd is optional.
@@ -288,7 +295,8 @@ ef10_vpd_get(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* Attempt to satisfy the request from svpd first */
if (enp->en_arch.ef10.ena_svpd_length > 0) {
@@ -334,7 +342,8 @@ ef10_vpd_set(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
/* If the provided (tag,keyword) exists in svpd, then it is readonly */
if (enp->en_arch.ef10.ena_svpd_length > 0) {
@@ -387,7 +396,8 @@ ef10_vpd_write(
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_nic_cfg.enc_vpd_is_global) {
tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
@@ -423,7 +433,8 @@ ef10_vpd_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2);
if (enp->en_arch.ef10.ena_svpd_length > 0) {
EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
@@ -434,6 +445,6 @@ ef10_vpd_fini(
}
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
index fe996e7c..5108b9b1 100644
--- a/drivers/net/sfc/base/efx.h
+++ b/drivers/net/sfc/base/efx.h
@@ -40,6 +40,7 @@ typedef enum efx_family_e {
EFX_FAMILY_SIENA,
EFX_FAMILY_HUNTINGTON,
EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_MEDFORD2,
EFX_FAMILY_NTYPES
} efx_family_t;
@@ -47,7 +48,8 @@ extern __checkReturn efx_rc_t
efx_family(
__in uint16_t venid,
__in uint16_t devid,
- __out efx_family_t *efp);
+ __out efx_family_t *efp,
+ __out unsigned int *membarp);
#define EFX_PCI_VENID_SFC 0x1924
@@ -69,7 +71,21 @@ efx_family(
#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
-#define EFX_MEM_BAR 2
+#define EFX_PCI_DEVID_MEDFORD2_PF_UNINIT 0x0B13
+#define EFX_PCI_DEVID_MEDFORD2 0x0B03 /* SFC9250 PF */
+#define EFX_PCI_DEVID_MEDFORD2_VF 0x1B03 /* SFC9250 VF */
+
+
+#define EFX_MEM_BAR_SIENA 2
+
+#define EFX_MEM_BAR_HUNTINGTON_PF 2
+#define EFX_MEM_BAR_HUNTINGTON_VF 0
+
+#define EFX_MEM_BAR_MEDFORD_PF 2
+#define EFX_MEM_BAR_MEDFORD_VF 0
+
+#define EFX_MEM_BAR_MEDFORD2 0
+
/* Error codes */
@@ -113,9 +129,22 @@ efx_nic_create(
__in efsys_lock_t *eslp,
__deref_out efx_nic_t **enpp);
+/* EFX_FW_VARIANT codes map one to one on MC_CMD_FW codes */
+typedef enum efx_fw_variant_e {
+ EFX_FW_VARIANT_FULL_FEATURED,
+ EFX_FW_VARIANT_LOW_LATENCY,
+ EFX_FW_VARIANT_PACKED_STREAM,
+ EFX_FW_VARIANT_HIGH_TX_RATE,
+ EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1,
+ EFX_FW_VARIANT_RULES_ENGINE,
+ EFX_FW_VARIANT_DPDK,
+ EFX_FW_VARIANT_DONT_CARE = 0xffffffff
+} efx_fw_variant_t;
+
extern __checkReturn efx_rc_t
efx_nic_probe(
- __in efx_nic_t *enp);
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv);
extern __checkReturn efx_rc_t
efx_nic_init(
@@ -171,7 +200,7 @@ efx_nic_check_pcie_link_speed(
#if EFSYS_OPT_MCDI
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/* Huntington and Medford require MCDIv2 commands */
#define WITH_MCDI_V2 1
#endif
@@ -307,7 +336,7 @@ efx_intr_fini(
#if EFSYS_OPT_MAC_STATS
-/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+/* START MKCONFIG GENERATED EfxHeaderMacBlock ea466a9bc8789994 */
typedef enum efx_mac_stat_e {
EFX_MAC_RX_OCTETS,
EFX_MAC_RX_PKTS,
@@ -390,6 +419,31 @@ typedef enum efx_mac_stat_e {
EFX_MAC_VADAPTER_TX_BAD_PACKETS,
EFX_MAC_VADAPTER_TX_BAD_BYTES,
EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_FEC_UNCORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_ERRORS,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE0,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE1,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE2,
+ EFX_MAC_FEC_CORRECTED_SYMBOLS_LANE3,
+ EFX_MAC_CTPIO_VI_BUSY_FALLBACK,
+ EFX_MAC_CTPIO_LONG_WRITE_SUCCESS,
+ EFX_MAC_CTPIO_MISSING_DBELL_FAIL,
+ EFX_MAC_CTPIO_OVERFLOW_FAIL,
+ EFX_MAC_CTPIO_UNDERFLOW_FAIL,
+ EFX_MAC_CTPIO_TIMEOUT_FAIL,
+ EFX_MAC_CTPIO_NONCONTIG_WR_FAIL,
+ EFX_MAC_CTPIO_FRM_CLOBBER_FAIL,
+ EFX_MAC_CTPIO_INVALID_WR_FAIL,
+ EFX_MAC_CTPIO_VI_CLOBBER_FALLBACK,
+ EFX_MAC_CTPIO_UNQUALIFIED_FALLBACK,
+ EFX_MAC_CTPIO_RUNT_FALLBACK,
+ EFX_MAC_CTPIO_SUCCESS,
+ EFX_MAC_CTPIO_FALLBACK,
+ EFX_MAC_CTPIO_POISON,
+ EFX_MAC_CTPIO_ERASE,
+ EFX_MAC_RXDP_SCATTER_DISABLED_TRUNC,
+ EFX_MAC_RXDP_HLB_IDLE,
+ EFX_MAC_RXDP_HLB_TIMEOUT,
EFX_MAC_NSTATS
} efx_mac_stat_t;
@@ -408,11 +462,16 @@ typedef enum efx_link_mode_e {
EFX_LINK_1000FDX,
EFX_LINK_10000FDX,
EFX_LINK_40000FDX,
+ EFX_LINK_25000FDX,
+ EFX_LINK_50000FDX,
+ EFX_LINK_100000FDX,
EFX_LINK_NMODES
} efx_link_mode_t;
#define EFX_MAC_ADDR_LEN 6
+#define EFX_VNI_OR_VSID_LEN 3
+
#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
#define EFX_MAC_MULTICAST_LIST_MAX 256
@@ -536,7 +595,6 @@ efx_mac_stats_get_mask(
((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
(1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
-#define EFX_MAC_STATS_SIZE 0x400
extern __checkReturn efx_rc_t
efx_mac_stats_clear(
@@ -545,8 +603,8 @@ efx_mac_stats_clear(
/*
* Upload mac statistics supported by the hardware into the given buffer.
*
- * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
- * and page aligned.
+ * The DMA buffer must be 4Kbyte aligned and sized to hold at least
+ * efx_nic_cfg_t::enc_mac_stats_nstats 64bit counters.
*
* The hardware will only DMA statistics that it understands (of course).
* Drivers should not make any assumptions about which statistics are
@@ -603,7 +661,7 @@ efx_mon_init(
#define EFX_MON_STATS_PAGE_SIZE 0x100
#define EFX_MON_MASK_ELEMENT_SIZE 32
-/* START MKCONFIG GENERATED MonitorHeaderStatsBlock aa0233c80156308e */
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 400fdb0517af1fca */
typedef enum efx_mon_stat_e {
EFX_MON_STAT_2_5V,
EFX_MON_STAT_VCCP1,
@@ -684,6 +742,10 @@ typedef enum efx_mon_stat_e {
EFX_MON_STAT_BOARD_BACK_TEMP,
EFX_MON_STAT_I1V8,
EFX_MON_STAT_I2V5,
+ EFX_MON_STAT_I3V3,
+ EFX_MON_STAT_I12V0,
+ EFX_MON_STAT_1_3V,
+ EFX_MON_STAT_I1V3,
EFX_MON_NSTATS
} efx_mon_stat_t;
@@ -788,6 +850,9 @@ typedef enum efx_loopback_type_e {
EFX_LOOPBACK_SD_FEP1_5_WS = 32,
EFX_LOOPBACK_SD_FEP_WS = 33,
EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_AOE_INT_NEAR = 35,
+ EFX_LOOPBACK_DATA_WS = 36,
+ EFX_LOOPBACK_FORCE_EXT_LINK = 37,
EFX_LOOPBACK_NTYPES
} efx_loopback_type_t;
@@ -843,6 +908,16 @@ typedef enum efx_phy_cap_type_e {
EFX_PHY_CAP_ASYM,
EFX_PHY_CAP_AN,
EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_DDM,
+ EFX_PHY_CAP_100000FDX,
+ EFX_PHY_CAP_25000FDX,
+ EFX_PHY_CAP_50000FDX,
+ EFX_PHY_CAP_BASER_FEC,
+ EFX_PHY_CAP_BASER_FEC_REQUESTED,
+ EFX_PHY_CAP_RS_FEC,
+ EFX_PHY_CAP_RS_FEC_REQUESTED,
+ EFX_PHY_CAP_25G_BASER_FEC,
+ EFX_PHY_CAP_25G_BASER_FEC_REQUESTED,
EFX_PHY_CAP_NTYPES
} efx_phy_cap_type_t;
@@ -1080,6 +1155,13 @@ typedef enum efx_tunnel_protocol_e {
EFX_TUNNEL_NPROTOS
} efx_tunnel_protocol_t;
+typedef enum efx_vi_window_shift_e {
+ EFX_VI_WINDOW_SHIFT_INVALID = 0,
+ EFX_VI_WINDOW_SHIFT_8K = 13,
+ EFX_VI_WINDOW_SHIFT_16K = 14,
+ EFX_VI_WINDOW_SHIFT_64K = 16,
+} efx_vi_window_shift_t;
+
typedef struct efx_nic_cfg_s {
uint32_t enc_board_type;
uint32_t enc_phy_type;
@@ -1093,6 +1175,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
#endif
unsigned int enc_features;
+ efx_vi_window_shift_t enc_vi_window_shift;
uint8_t enc_mac_addr[6];
uint8_t enc_port; /* PHY port number */
uint32_t enc_intr_vec_base;
@@ -1112,6 +1195,17 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_rx_buf_align_start;
uint32_t enc_rx_buf_align_end;
uint32_t enc_rx_scale_max_exclusive_contexts;
+ /*
+ * Mask of supported hash algorithms.
+ * Hash algorithm types are used as the bit indices.
+ */
+ uint32_t enc_rx_scale_hash_alg_mask;
+ /*
+ * Indicates whether port numbers can be included to the
+ * input data for hash computation.
+ */
+ boolean_t enc_rx_scale_l4_hash_supported;
+ boolean_t enc_rx_scale_additional_modes_supported;
#if EFSYS_OPT_LOOPBACK
efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
#endif /* EFSYS_OPT_LOOPBACK */
@@ -1137,11 +1231,11 @@ typedef struct efx_nic_cfg_s {
#if EFSYS_OPT_BIST
uint32_t enc_bist_mask;
#endif /* EFSYS_OPT_BIST */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
uint32_t enc_pf;
uint32_t enc_vf;
uint32_t enc_privilege_mask;
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
boolean_t enc_bug26807_workaround;
boolean_t enc_bug35388_workaround;
boolean_t enc_bug41750_workaround;
@@ -1165,6 +1259,7 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_tx_tso_tcp_header_offset_limit;
boolean_t enc_fw_assisted_tso_enabled;
boolean_t enc_fw_assisted_tso_v2_enabled;
+ boolean_t enc_fw_assisted_tso_v2_encap_enabled;
/* Number of TSO contexts on the NIC (FATSOv2) */
uint32_t enc_fw_assisted_tso_v2_n_contexts;
boolean_t enc_hw_tx_insert_vlan_enabled;
@@ -1178,6 +1273,8 @@ typedef struct efx_nic_cfg_s {
boolean_t enc_init_evq_v2_supported;
boolean_t enc_rx_packed_stream_supported;
boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_rx_es_super_buffer_supported;
+ boolean_t enc_fw_subvariant_no_tx_csum_supported;
boolean_t enc_pm_and_rxdp_counters;
boolean_t enc_mac_stats_40g_tx_size_bins;
uint32_t enc_tunnel_encapsulations_supported;
@@ -1196,6 +1293,14 @@ typedef struct efx_nic_cfg_s {
uint32_t enc_max_pcie_link_gen;
/* Firmware verifies integrity of NVRAM updates */
uint32_t enc_nvram_update_verify_result_supported;
+ /* Firmware support for extended MAC_STATS buffer */
+ uint32_t enc_mac_stats_nstats;
+ boolean_t enc_fec_counters;
+ boolean_t enc_hlb_counters;
+ /* Firmware support for "FLAG" and "MARK" filter actions */
+ boolean_t enc_filter_action_flag_supported;
+ boolean_t enc_filter_action_mark_supported;
+ uint32_t enc_filter_action_mark_max;
} efx_nic_cfg_t;
#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
@@ -1210,6 +1315,13 @@ extern const efx_nic_cfg_t *
efx_nic_cfg_get(
__in efx_nic_t *enp);
+/* RxDPCPU firmware id values by which FW variant can be identified */
+#define EFX_RXDP_FULL_FEATURED_FW_ID 0x0
+#define EFX_RXDP_LOW_LATENCY_FW_ID 0x1
+#define EFX_RXDP_PACKED_STREAM_FW_ID 0x2
+#define EFX_RXDP_RULES_ENGINE_FW_ID 0x5
+#define EFX_RXDP_DPDK_FW_ID 0x6
+
typedef struct efx_nic_fw_info_s {
/* Basic FW version information */
uint16_t enfi_mc_fw_version[4];
@@ -1498,6 +1610,92 @@ efx_bootcfg_write(
#endif /* EFSYS_OPT_BOOTCFG */
+#if EFSYS_OPT_IMAGE_LAYOUT
+
+#include "ef10_signed_image_layout.h"
+
+/*
+ * Image header used in unsigned and signed image layouts (see SF-102785-PS).
+ *
+ * NOTE:
+ * The image header format is extensible. However, older drivers require an
+ * exact match of image header version and header length when validating and
+ * writing firmware images.
+ *
+ * To avoid breaking backward compatibility, we use the upper bits of the
+ * controller version fields to contain an extra version number used for
+ * combined bootROM and UEFI ROM images on EF10 and later (to hold the UEFI ROM
+ * version). See bug39254 and SF-102785-PS for details.
+ */
+typedef struct efx_image_header_s {
+ uint32_t eih_magic;
+ uint32_t eih_version;
+ uint32_t eih_type;
+ uint32_t eih_subtype;
+ uint32_t eih_code_size;
+ uint32_t eih_size;
+ union {
+ uint32_t eih_controller_version_min;
+ struct {
+ uint16_t eih_controller_version_min_short;
+ uint8_t eih_extra_version_a;
+ uint8_t eih_extra_version_b;
+ };
+ };
+ union {
+ uint32_t eih_controller_version_max;
+ struct {
+ uint16_t eih_controller_version_max_short;
+ uint8_t eih_extra_version_c;
+ uint8_t eih_extra_version_d;
+ };
+ };
+ uint16_t eih_code_version_a;
+ uint16_t eih_code_version_b;
+ uint16_t eih_code_version_c;
+ uint16_t eih_code_version_d;
+} efx_image_header_t;
+
+#define EFX_IMAGE_HEADER_SIZE (40)
+#define EFX_IMAGE_HEADER_VERSION (4)
+#define EFX_IMAGE_HEADER_MAGIC (0x106F1A5)
+
+
+typedef struct efx_image_trailer_s {
+ uint32_t eit_crc;
+} efx_image_trailer_t;
+
+#define EFX_IMAGE_TRAILER_SIZE (4)
+
+typedef enum efx_image_format_e {
+ EFX_IMAGE_FORMAT_NO_IMAGE,
+ EFX_IMAGE_FORMAT_INVALID,
+ EFX_IMAGE_FORMAT_UNSIGNED,
+ EFX_IMAGE_FORMAT_SIGNED,
+} efx_image_format_t;
+
+typedef struct efx_image_info_s {
+ efx_image_format_t eii_format;
+ uint8_t * eii_imagep;
+ size_t eii_image_size;
+ efx_image_header_t * eii_headerp;
+} efx_image_info_t;
+
+extern __checkReturn efx_rc_t
+efx_check_reflash_image(
+ __in void *bufferp,
+ __in uint32_t buffer_size,
+ __out efx_image_info_t *infop);
+
+extern __checkReturn efx_rc_t
+efx_build_signed_image_write_buffer(
+ __out uint8_t *bufferp,
+ __in uint32_t buffer_size,
+ __in efx_image_info_t *infop,
+ __out efx_image_header_t **headerpp);
+
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
#if EFSYS_OPT_DIAG
typedef enum efx_pattern_type_t {
@@ -1674,7 +1872,7 @@ typedef __checkReturn boolean_t
__in uint32_t size,
__in uint16_t flags);
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
/*
* Packed stream mode is documented in SF-112241-TC.
@@ -1684,6 +1882,13 @@ typedef __checkReturn boolean_t
* packets are put there in a continuous stream.
* The main advantage of such an approach is that RX queue refilling
* happens much less frequently.
+ *
+ * Equal stride packed stream mode is documented in SF-119419-TC.
+ * The general idea is to utilize advantages of the packed stream,
+ * but avoid indirection in packets representation.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently and packets buffers are independent
+ * from upper layers point of view.
*/
typedef __checkReturn boolean_t
@@ -1784,7 +1989,7 @@ typedef __checkReturn boolean_t
typedef struct efx_ev_callbacks_s {
efx_initialized_ev_t eec_initialized;
efx_rx_ev_t eec_rx;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
efx_rx_ps_ev_t eec_rx_ps;
#endif
efx_tx_ev_t eec_tx;
@@ -1888,14 +2093,35 @@ efx_rx_scatter_enable(
typedef enum efx_rx_hash_alg_e {
EFX_RX_HASHALG_LFSR = 0,
- EFX_RX_HASHALG_TOEPLITZ
+ EFX_RX_HASHALG_TOEPLITZ,
+ EFX_RX_HASHALG_PACKED_STREAM,
+ EFX_RX_NHASHALGS
} efx_rx_hash_alg_t;
+/*
+ * Legacy hash type flags.
+ *
+ * They represent standard tuples for distinct traffic classes.
+ */
#define EFX_RX_HASH_IPV4 (1U << 0)
#define EFX_RX_HASH_TCPIPV4 (1U << 1)
#define EFX_RX_HASH_IPV6 (1U << 2)
#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+#define EFX_RX_HASH_LEGACY_MASK \
+ (EFX_RX_HASH_IPV4 | \
+ EFX_RX_HASH_TCPIPV4 | \
+ EFX_RX_HASH_IPV6 | \
+ EFX_RX_HASH_TCPIPV6)
+
+/*
+ * The type of the argument used by efx_rx_scale_mode_set() to
+ * provide a means for the client drivers to configure hashing.
+ *
+ * A properly constructed value can either be:
+ * - a combination of legacy flags
+ * - a combination of EFX_RX_HASH() flags
+ */
typedef unsigned int efx_rx_hash_type_t;
typedef enum efx_rx_hash_support_e {
@@ -1914,6 +2140,92 @@ typedef enum efx_rx_scale_context_type_e {
EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
} efx_rx_scale_context_type_t;
+/*
+ * Traffic classes eligible for hash computation.
+ *
+ * Select packet headers used in computing the receive hash.
+ * This uses the same encoding as the RSS_MODES field of
+ * MC_CMD_RSS_CONTEXT_SET_FLAGS.
+ */
+#define EFX_RX_CLASS_IPV4_TCP_LBN 8
+#define EFX_RX_CLASS_IPV4_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_UDP_LBN 12
+#define EFX_RX_CLASS_IPV4_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV4_LBN 16
+#define EFX_RX_CLASS_IPV4_WIDTH 4
+#define EFX_RX_CLASS_IPV6_TCP_LBN 20
+#define EFX_RX_CLASS_IPV6_TCP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_UDP_LBN 24
+#define EFX_RX_CLASS_IPV6_UDP_WIDTH 4
+#define EFX_RX_CLASS_IPV6_LBN 28
+#define EFX_RX_CLASS_IPV6_WIDTH 4
+
+#define EFX_RX_NCLASSES 6
+
+/*
+ * Ancillary flags used to construct generic hash tuples.
+ * This uses the same encoding as RSS_MODE_HASH_SELECTOR.
+ */
+#define EFX_RX_CLASS_HASH_SRC_ADDR (1U << 0)
+#define EFX_RX_CLASS_HASH_DST_ADDR (1U << 1)
+#define EFX_RX_CLASS_HASH_SRC_PORT (1U << 2)
+#define EFX_RX_CLASS_HASH_DST_PORT (1U << 3)
+
+/*
+ * Generic hash tuples.
+ *
+ * They express combinations of packet fields
+ * which can contribute to the hash value for
+ * a particular traffic class.
+ */
+#define EFX_RX_CLASS_HASH_DISABLE 0
+
+#define EFX_RX_CLASS_HASH_1TUPLE_SRC EFX_RX_CLASS_HASH_SRC_ADDR
+#define EFX_RX_CLASS_HASH_1TUPLE_DST EFX_RX_CLASS_HASH_DST_ADDR
+
+#define EFX_RX_CLASS_HASH_2TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_SRC \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT)
+
+#define EFX_RX_CLASS_HASH_2TUPLE_DST \
+ (EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_4TUPLE \
+ (EFX_RX_CLASS_HASH_SRC_ADDR | \
+ EFX_RX_CLASS_HASH_DST_ADDR | \
+ EFX_RX_CLASS_HASH_SRC_PORT | \
+ EFX_RX_CLASS_HASH_DST_PORT)
+
+#define EFX_RX_CLASS_HASH_NTUPLES 7
+
+/*
+ * Hash flag constructor.
+ *
+ * Resulting flags encode hash tuples for specific traffic classes.
+ * The client drivers are encouraged to use these flags to form
+ * a hash type value.
+ */
+#define EFX_RX_HASH(_class, _tuple) \
+ EFX_INSERT_FIELD_NATIVE32(0, 31, \
+ EFX_RX_CLASS_##_class, EFX_RX_CLASS_HASH_##_tuple)
+
+/*
+ * The maximum number of EFX_RX_HASH() flags.
+ */
+#define EFX_RX_HASH_NFLAGS (EFX_RX_NCLASSES * EFX_RX_CLASS_HASH_NTUPLES)
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp);
+
extern __checkReturn efx_rc_t
efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
@@ -1984,6 +2296,7 @@ efx_pseudo_hdr_pkt_length_get(
typedef enum efx_rxq_type_e {
EFX_RXQ_TYPE_DEFAULT,
EFX_RXQ_TYPE_PACKED_STREAM,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER,
EFX_RXQ_NTYPES
} efx_rxq_type_t;
@@ -2037,6 +2350,28 @@ efx_rx_qcreate_packed_stream(
#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+/* Maximum head-of-line block timeout in nanoseconds */
+#define EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX (400U * 1000 * 1000)
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+#endif
+
typedef struct efx_buffer_s {
efsys_dma_addr_t eb_addr;
size_t eb_size;
@@ -2226,6 +2561,7 @@ extern void
efx_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t tcp_mss,
__out_ecount(count) efx_desc_t *edp,
@@ -2237,6 +2573,12 @@ efx_tx_qdesc_vlantci_create(
__in uint16_t tci,
__out efx_desc_t *edp);
+extern void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp);
+
#if EFSYS_OPT_QSTATS
#if EFSYS_OPT_NAMES
@@ -2285,6 +2627,10 @@ efx_tx_qdestroy(
#define EFX_FILTER_FLAG_RX 0x08
/* Filter is for TX */
#define EFX_FILTER_FLAG_TX 0x10
+/* Set match flag on the received packet */
+#define EFX_FILTER_FLAG_ACTION_FLAG 0x20
+/* Set match mark on the received packet */
+#define EFX_FILTER_FLAG_ACTION_MARK 0x40
typedef uint8_t efx_filter_flags_t;
@@ -2313,10 +2659,19 @@ typedef uint8_t efx_filter_flags_t;
#define EFX_FILTER_MATCH_OUTER_VID 0x00000100
/* Match by IP transport protocol */
#define EFX_FILTER_MATCH_IP_PROTO 0x00000200
+/* Match by VNI or VSID */
+#define EFX_FILTER_MATCH_VNI_OR_VSID 0x00000800
+/* For encapsulated packets, match by inner frame local MAC address */
+#define EFX_FILTER_MATCH_IFRM_LOC_MAC 0x00010000
/* For encapsulated packets, match all multicast inner frames */
#define EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST 0x01000000
/* For encapsulated packets, match all unicast inner frames */
#define EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST 0x02000000
+/*
+ * Match by encap type, this flag does not correspond to
+ * the MCDI match flags and any unoccupied value may be used
+ */
+#define EFX_FILTER_MATCH_ENCAP_TYPE 0x20000000
/* Match otherwise-unmatched multicast and broadcast packets */
#define EFX_FILTER_MATCH_UNKNOWN_MCAST_DST 0x40000000
/* Match otherwise-unmatched unicast packets */
@@ -2359,6 +2714,9 @@ typedef struct efx_filter_spec_s {
uint16_t efs_rem_port;
efx_oword_t efs_rem_host;
efx_oword_t efs_loc_host;
+ uint8_t efs_vni_or_vsid[EFX_VNI_OR_VSID_LEN];
+ uint8_t efs_ifrm_loc_mac[EFX_MAC_ADDR_LEN];
+ uint32_t efs_mark;
} efx_filter_spec_t;
@@ -2454,6 +2812,13 @@ efx_filter_spec_set_encap_type(
__in efx_tunnel_protocol_t encap_type,
__in efx_filter_inner_frame_match_t inner_frame_match);
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr);
+
#if EFSYS_OPT_RX_SCALE
extern __checkReturn efx_rc_t
efx_filter_spec_set_rss_context(
@@ -2659,6 +3024,38 @@ efx_tunnel_reconfigure(
#endif /* EFSYS_OPT_TUNNEL */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+/**
+ * Firmware subvariant choice options.
+ *
+ * It may be switched to no Tx checksum if attached drivers are either
+ * preboot or firmware subvariant aware and no VIS are allocated.
+ * If may be always switched to default explicitly using set request or
+ * implicitly if unaware driver is attaching. If switching is done when
+ * a driver is attached, it gets MC_REBOOT event and should recreate its
+ * datapath.
+ *
+ * See SF-119419-TC DPDK Firmware Driver Interface and
+ * SF-109306-TC EF10 for Driver Writers for details.
+ */
+typedef enum efx_nic_fw_subvariant_e {
+ EFX_NIC_FW_SUBVARIANT_DEFAULT = 0,
+ EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM = 1,
+ EFX_NIC_FW_SUBVARIANT_NTYPES
+} efx_nic_fw_subvariant_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp);
+
+extern __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant);
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
index 0f71936f..715e18e8 100644
--- a/drivers/net/sfc/base/efx_bootcfg.c
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -68,6 +68,20 @@ efx_bootcfg_sector_info(
}
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -82,6 +96,10 @@ efx_bootcfg_sector_info(
return (0);
+#if EFSYS_OPT_MEDFORD2
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
#if EFSYS_OPT_MEDFORD
fail2:
EFSYS_PROBE(fail2);
@@ -191,19 +209,25 @@ efx_bootcfg_copy_sector(
size_t used_bytes;
efx_rc_t rc;
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (data_size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
/* Verify that the area is correctly formatted and checksummed */
rc = efx_bootcfg_verify(enp, sector, sector_length,
&used_bytes);
if (!handle_format_errors) {
if (rc != 0)
- goto fail1;
+ goto fail2;
if ((used_bytes < 2) ||
(sector[used_bytes - 1] != DHCP_END)) {
/* Block too short, or DHCP_END missing */
rc = ENOENT;
- goto fail2;
+ goto fail3;
}
}
@@ -237,9 +261,13 @@ efx_bootcfg_copy_sector(
*/
if (used_bytes > data_size) {
rc = ENOSPC;
- goto fail3;
+ goto fail4;
}
- memcpy(data, sector, used_bytes);
+
+ data[0] = 0; /* checksum, updated below */
+
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, sector + 1, used_bytes - 1);
/* Zero out the unused portion of the target buffer */
if (used_bytes < data_size)
@@ -253,6 +281,8 @@ efx_bootcfg_copy_sector(
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
@@ -277,20 +307,31 @@ efx_bootcfg_read(
efx_rc_t rc;
uint32_t sector_number;
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ /* Minimum buffer is checksum byte and DHCP_END terminator */
+ if (size < 2) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
sector_number = enp->en_nic_cfg.enc_pf;
#else
sector_number = 0;
#endif
rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
if (rc != 0)
- goto fail1;
+ goto fail2;
/* The bootcfg sector may be stored in a (larger) shared partition */
rc = efx_bootcfg_sector_info(enp, sector_number,
NULL, &sector_offset, &sector_length);
if (rc != 0)
- goto fail2;
+ goto fail3;
+
+ if (sector_length < 2) {
+ rc = EINVAL;
+ goto fail4;
+ }
if (sector_length > BOOTCFG_MAX_SIZE)
sector_length = BOOTCFG_MAX_SIZE;
@@ -298,7 +339,7 @@ efx_bootcfg_read(
if (sector_offset + sector_length > partn_length) {
/* Partition is too small */
rc = EFBIG;
- goto fail3;
+ goto fail5;
}
/*
@@ -311,28 +352,28 @@ efx_bootcfg_read(
EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
if (payload == NULL) {
rc = ENOMEM;
- goto fail4;
+ goto fail6;
}
} else
payload = (uint8_t *)data;
if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
- goto fail5;
+ goto fail7;
if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
sector_offset, (caddr_t)payload, sector_length)) != 0) {
(void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
- goto fail6;
+ goto fail8;
}
if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
- goto fail7;
+ goto fail9;
/* Verify that the area is correctly formatted and checksummed */
rc = efx_bootcfg_verify(enp, payload, sector_length,
&used_bytes);
if (rc != 0 || used_bytes == 0) {
- payload[0] = (uint8_t)(~DHCP_END & 0xff);
+ payload[0] = 0;
payload[1] = DHCP_END;
used_bytes = 2;
}
@@ -347,10 +388,8 @@ efx_bootcfg_read(
* so reinitialise the sector if there isn't room for the character.
*/
if (payload[used_bytes - 1] != DHCP_END) {
- if (used_bytes + 1 > sector_length) {
- payload[0] = 0;
+ if (used_bytes >= sector_length)
used_bytes = 1;
- }
payload[used_bytes] = DHCP_END;
++used_bytes;
@@ -362,10 +401,14 @@ efx_bootcfg_read(
*/
if (used_bytes > size) {
rc = ENOSPC;
- goto fail8;
+ goto fail10;
}
+
+ data[0] = 0; /* checksum, updated below */
+
if (sector_length > size) {
- memcpy(data, payload, used_bytes);
+ /* Copy all after the checksum to the target buffer */
+ memcpy(data + 1, payload + 1, used_bytes - 1);
EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
}
@@ -381,16 +424,20 @@ efx_bootcfg_read(
return (0);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
fail8:
EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
- if (sector_length > size)
- EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
fail4:
EFSYS_PROBE(fail4);
fail3:
@@ -418,7 +465,7 @@ efx_bootcfg_write(
efx_rc_t rc;
uint32_t sector_number;
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
sector_number = enp->en_nic_cfg.enc_pf;
#else
sector_number = 0;
diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h
index 58377759..ef5eadc6 100644
--- a/drivers/net/sfc/base/efx_check.h
+++ b/drivers/net/sfc/base/efx_check.h
@@ -30,8 +30,9 @@
#if EFSYS_OPT_CHECK_REG
/* Verify chip implements accessed registers */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_CHECK_REG */
@@ -44,15 +45,17 @@
#if EFSYS_OPT_DIAG
/* Support diagnostic hardware tests */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_DIAG */
#if EFSYS_OPT_EV_PREFETCH
/* Support optimized EVQ data access */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_EV_PREFETCH */
@@ -62,21 +65,23 @@
#if EFSYS_OPT_FILTER
/* Support hardware packet filters */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_FILTER */
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
# if !EFSYS_OPT_FILTER
-# error "HUNTINGTON or MEDFORD requires FILTER"
+# error "HUNTINGTON or MEDFORD or MEDFORD2 requires FILTER"
# endif
#endif /* EFSYS_OPT_HUNTINGTON */
#if EFSYS_OPT_LOOPBACK
/* Support hardware loopback modes */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_LOOPBACK */
@@ -90,21 +95,24 @@
#if EFSYS_OPT_MAC_STATS
/* Support MAC statistics */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MAC_STATS */
#if EFSYS_OPT_MCDI
/* Support management controller messages */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MCDI */
-#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
# if !EFSYS_OPT_MCDI
-# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# error "SIENA or HUNTINGTON or MEDFORD or MEDFORD2 requires MCDI"
# endif
#endif
@@ -144,15 +152,17 @@
#if EFSYS_OPT_MON_STATS
/* Support monitor statistics (voltage/temperature) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MON_STATS */
#if EFSYS_OPT_MON_MCDI
/* Support Monitor via mcdi */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_MON_MCDI*/
@@ -166,11 +176,19 @@
#if EFSYS_OPT_NVRAM
/* Support non volatile configuration */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_IMAGE_LAYOUT
+/* Support signed image layout handling */
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "IMAGE_LAYOUT requires MEDFORD or MEDFORD2"
+# endif
+#endif /* EFSYS_OPT_IMAGE_LAYOUT */
+
#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
#endif
@@ -200,8 +218,9 @@
#if EFSYS_OPT_PHY_LED_CONTROL
/* Support for PHY LED control */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PHY_LED_CONTROL requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
@@ -246,8 +265,9 @@
#if EFSYS_OPT_QSTATS
/* Support EVQ/RXQ/TXQ statistics */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_QSTATS */
@@ -257,15 +277,17 @@
#if EFSYS_OPT_RX_SCALE
/* Support receive scaling (RSS) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_RX_SCALE */
#if EFSYS_OPT_RX_SCATTER
/* Support receive scatter DMA */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_RX_SCATTER */
@@ -275,8 +297,9 @@
#if EFSYS_OPT_VPD
/* Support PCI Vital Product Data (VPD) */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_VPD */
@@ -290,8 +313,9 @@
#if EFSYS_OPT_BIST
/* Support BIST */
-# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || \
+ EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_BIST */
@@ -307,23 +331,37 @@
#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
/* Support adapters with missing static config (for factory use only) */
-# if !EFSYS_OPT_MEDFORD
-# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
#if EFSYS_OPT_RX_PACKED_STREAM
/* Support packed stream mode */
-# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
-# error "PACKED_STREAM requires HUNTINGTON or MEDFORD"
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD or MEDFORD2"
+# endif
+#endif
+
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+/* Support equal stride super-buffer mode */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "ES_SUPER_BUFFER requires MEDFORD2"
# endif
#endif
/* Support hardware assistance for tunnels */
#if EFSYS_OPT_TUNNEL
-# if !EFSYS_OPT_MEDFORD
-# error "TUNNEL requires MEDFORD"
+# if !(EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
+# error "TUNNEL requires MEDFORD or MEDFORD2"
# endif
#endif /* EFSYS_OPT_TUNNEL */
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+/* Advertise that the driver is firmware subvariant aware */
+# if !(EFSYS_OPT_MEDFORD2)
+# error "FW_SUBVARIANT_AWARE requires MEDFORD2"
+# endif
+#endif
+
#endif /* _SYS_EFX_CHECK_H */
diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c
index 949d352a..1139cc26 100644
--- a/drivers/net/sfc/base/efx_ev.c
+++ b/drivers/net/sfc/base/efx_ev.c
@@ -91,7 +91,7 @@ static const efx_ev_ops_t __efx_ev_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_ev_ops_t __efx_ev_ef10_ops = {
ef10_ev_init, /* eevo_init */
ef10_ev_fini, /* eevo_fini */
@@ -104,7 +104,7 @@ static const efx_ev_ops_t __efx_ev_ef10_ops = {
ef10_ev_qstats_update, /* eevo_qstats_update */
#endif
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
@@ -141,6 +141,12 @@ efx_ev_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
index b92541aa..412298ac 100644
--- a/drivers/net/sfc/base/efx_filter.c
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -56,7 +56,7 @@ static const efx_filter_ops_t __efx_filter_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_filter_ops_t __efx_filter_ef10_ops = {
ef10_filter_init, /* efo_init */
ef10_filter_fini, /* efo_fini */
@@ -66,7 +66,7 @@ static const efx_filter_ops_t __efx_filter_ef10_ops = {
ef10_filter_supported_filters, /* efo_supported_filters */
ef10_filter_reconfigure, /* efo_reconfigure */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_filter_insert(
@@ -74,12 +74,33 @@ efx_filter_insert(
__inout efx_filter_spec_t *spec)
{
const efx_filter_ops_t *efop = enp->en_efop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
EFSYS_ASSERT3P(spec, !=, NULL);
EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_MARK) &&
+ !encp->enc_filter_action_mark_supported) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((spec->efs_flags & EFX_FILTER_FLAG_ACTION_FLAG) &&
+ !encp->enc_filter_action_flag_supported) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
return (efop->efo_add(enp, spec, B_FALSE));
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
__checkReturn efx_rc_t
@@ -145,6 +166,12 @@ efx_filter_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -412,7 +439,7 @@ efx_filter_spec_set_encap_type(
__in efx_tunnel_protocol_t encap_type,
__in efx_filter_inner_frame_match_t inner_frame_match)
{
- uint32_t match_flags = 0;
+ uint32_t match_flags = EFX_FILTER_MATCH_ENCAP_TYPE;
uint8_t ip_proto;
efx_rc_t rc;
@@ -462,6 +489,43 @@ fail1:
return (rc);
}
+/*
+ * Specify inner and outer Ethernet address and VXLAN ID in filter
+ * specification.
+ */
+ __checkReturn efx_rc_t
+efx_filter_spec_set_vxlan_full(
+ __inout efx_filter_spec_t *spec,
+ __in const uint8_t *vxlan_id,
+ __in const uint8_t *inner_addr,
+ __in const uint8_t *outer_addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(vxlan_id, !=, NULL);
+ EFSYS_ASSERT3P(inner_addr, !=, NULL);
+ EFSYS_ASSERT3P(outer_addr, !=, NULL);
+
+ if ((inner_addr == NULL) && (outer_addr == NULL))
+ return (EINVAL);
+
+ if (vxlan_id != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ memcpy(spec->efs_vni_or_vsid, vxlan_id, EFX_VNI_OR_VSID_LEN);
+ }
+ if (outer_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, outer_addr, EFX_MAC_ADDR_LEN);
+ }
+ if (inner_addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_IFRM_LOC_MAC;
+ memcpy(spec->efs_ifrm_loc_mac, inner_addr, EFX_MAC_ADDR_LEN);
+ }
+ spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+
+ return (0);
+}
+
#if EFSYS_OPT_RX_SCALE
__checkReturn efx_rc_t
efx_filter_spec_set_rss_context(
@@ -953,6 +1017,7 @@ siena_filter_build(
default:
EFSYS_ASSERT(B_FALSE);
+ EFX_ZERO_OWORD(*filter);
return (0);
}
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index ed685cba..548834f9 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -29,9 +29,13 @@
#include "medford_impl.h"
#endif /* EFSYS_OPT_MEDFORD */
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if EFSYS_OPT_MEDFORD2
+#include "medford2_impl.h"
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
#include "ef10_impl.h"
-#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
#ifdef __cplusplus
extern "C" {
@@ -61,6 +65,7 @@ typedef enum efx_mac_type_e {
EFX_MAC_SIENA,
EFX_MAC_HUNTINGTON,
EFX_MAC_MEDFORD,
+ EFX_MAC_MEDFORD2,
EFX_MAC_NTYPES
} efx_mac_type_t;
@@ -112,16 +117,36 @@ typedef struct efx_tx_ops_s {
uint32_t, uint8_t,
efx_desc_t *);
void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
- uint32_t, uint16_t,
+ uint16_t, uint32_t, uint16_t,
efx_desc_t *, int);
void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
efx_desc_t *);
+ void (*etxo_qdesc_checksum_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
#if EFSYS_OPT_QSTATS
void (*etxo_qstats_update)(efx_txq_t *,
efsys_stat_t *);
#endif
} efx_tx_ops_t;
+typedef union efx_rxq_type_data_u {
+ /* Dummy member to have non-empty union if no options are enabled */
+ uint32_t ertd_dummy;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ struct {
+ uint32_t eps_buf_size;
+ } ertd_packed_stream;
+#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+ struct {
+ uint32_t eessb_bufs_per_desc;
+ uint32_t eessb_max_dma_len;
+ uint32_t eessb_buf_stride;
+ uint32_t eessb_hol_block_timeout;
+ } ertd_es_super_buffer;
+#endif
+} efx_rxq_type_data_t;
+
typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_init)(efx_nic_t *);
void (*erxo_fini)(efx_nic_t *);
@@ -158,7 +183,8 @@ typedef struct efx_rx_ops_s {
efx_rc_t (*erxo_qflush)(efx_rxq_t *);
void (*erxo_qenable)(efx_rxq_t *);
efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
- unsigned int, efx_rxq_type_t, uint32_t,
+ unsigned int, efx_rxq_type_t,
+ const efx_rxq_type_data_t *,
efsys_mem_t *, size_t, uint32_t,
unsigned int,
efx_evq_t *, efx_rxq_t *);
@@ -398,9 +424,9 @@ typedef struct efx_filter_s {
#if EFSYS_OPT_SIENA
siena_filter_t *ef_siena_filter;
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
ef10_filter_table_t *ef_ef10_filter_table;
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
} efx_filter_t;
#if EFSYS_OPT_SIENA
@@ -640,6 +666,7 @@ struct efx_nic_s {
const efx_ev_ops_t *en_eevop;
const efx_tx_ops_t *en_etxop;
const efx_rx_ops_t *en_erxop;
+ efx_fw_variant_t efv;
#if EFSYS_OPT_FILTER
efx_filter_t en_filter;
const efx_filter_ops_t *en_efop;
@@ -683,7 +710,7 @@ struct efx_nic_s {
#endif /* EFSYS_OPT_SIENA */
int enu_unused;
} en_u;
-#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2)
union en_arch {
struct {
int ena_vi_base;
@@ -704,7 +731,7 @@ struct efx_nic_s {
size_t ena_wc_mem_map_size;
} ef10;
} en_arch;
-#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2) */
};
@@ -716,9 +743,11 @@ typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
typedef struct efx_evq_rxq_state_s {
unsigned int eers_rx_read_ptr;
unsigned int eers_rx_mask;
-#if EFSYS_OPT_RX_PACKED_STREAM
+#if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
unsigned int eers_rx_stream_npackets;
boolean_t eers_rx_packed_stream;
+#endif
+#if EFSYS_OPT_RX_PACKED_STREAM
unsigned int eers_rx_packed_stream_credits;
#endif
} efx_evq_rxq_state_t;
@@ -825,6 +854,10 @@ struct efx_txq_s {
rev = 'E'; \
break; \
\
+ case EFX_FAMILY_MEDFORD2: \
+ rev = 'F'; \
+ break; \
+ \
default: \
rev = '?'; \
break; \
@@ -915,6 +948,15 @@ struct efx_txq_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
+/*
+ * Accessors for memory BAR non-VI tables.
+ *
+ * Code used on EF10 *must* use EFX_BAR_VI_*() macros for per-VI registers,
+ * to ensure the correct runtime VI window size is used on Medford2.
+ *
+ * Siena-only code may continue using EFX_BAR_TBL_*() macros for VI registers.
+ */
+
#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
@@ -941,21 +983,6 @@ struct efx_txq_s {
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
-#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
- do { \
- EFX_CHECK_REG((_enp), (_reg)); \
- EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
- uint32_t, (_index), \
- uint32_t, _reg ## _OFST, \
- uint32_t, (_edp)->ed_u32[0]); \
- EFSYS_BAR_WRITED((_enp)->en_esbp, \
- (_reg ## _OFST + \
- (2 * sizeof (efx_dword_t)) + \
- ((_index) * _reg ## _STEP)), \
- (_edp), (_lock)); \
- _NOTE(CONSTANTCONDITION) \
- } while (B_FALSE)
-
#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
@@ -1032,16 +1059,66 @@ struct efx_txq_s {
} while (B_FALSE)
/*
- * Allow drivers to perform optimised 128-bit doorbell writes.
+ * Accessors for memory BAR per-VI registers.
+ *
+ * The VI window size is 8KB for Medford and all earlier controllers.
+ * For Medford2, the VI window size can be 8KB, 16KB or 64KB.
+ */
+
+#define EFX_BAR_VI_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_vi_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_VI_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_vi_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ ((_reg ## _OFST) + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit VI doorbell writes.
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
* special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
* the need for locking in the host, and are the only ones known to be safe to
* use 128-bites write with.
*/
-#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+#define EFX_BAR_VI_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
- EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ EFSYS_PROBE7(efx_bar_vi_doorbell_writeo, \
const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
@@ -1050,7 +1127,8 @@ struct efx_txq_s {
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
- (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_reg ## _OFST + \
+ ((_index) << (_enp)->en_nic_cfg.enc_vi_window_shift)), \
(_eop)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c
index 83ca177d..b518916d 100644
--- a/drivers/net/sfc/base/efx_intr.c
+++ b/drivers/net/sfc/base/efx_intr.c
@@ -75,7 +75,7 @@ static const efx_intr_ops_t __efx_intr_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_intr_ops_t __efx_intr_ef10_ops = {
ef10_intr_init, /* eio_init */
ef10_intr_enable, /* eio_enable */
@@ -87,7 +87,7 @@ static const efx_intr_ops_t __efx_intr_ef10_ops = {
ef10_intr_fatal, /* eio_fatal */
ef10_intr_fini, /* eio_fini */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_intr_init(
@@ -132,6 +132,12 @@ efx_intr_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(B_FALSE);
rc = ENOTSUP;
@@ -283,6 +289,12 @@ siena_intr_init(
{
efx_intr_t *eip = &(enp->en_intr);
efx_oword_t oword;
+ efx_rc_t rc;
+
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_INTR_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
/*
* bug17213 workaround.
@@ -314,6 +326,11 @@ siena_intr_init(
EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
static void
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
index ad4d2211..49c00347 100644
--- a/drivers/net/sfc/base/efx_lic.c
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -10,6 +10,9 @@
#if EFSYS_OPT_LICENSING
#include "ef10_tlv_layout.h"
+#if EFSYS_OPT_SIENA
+#include "efx_regs_mcdi_aoe.h"
+#endif
#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
@@ -162,7 +165,7 @@ static const efx_lic_ops_t __efx_lic_v2_ops = {
#endif /* EFSYS_OPT_HUNTINGTON */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
efx_mcdi_licensing_v3_update_licenses(
@@ -286,7 +289,7 @@ static const efx_lic_ops_t __efx_lic_v3_ops = {
efx_lic_v3_finish_partition, /* elo_finish_partition */
};
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
/* V1 Licensing - used in Siena Modena only */
@@ -819,7 +822,7 @@ fail1:
/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn efx_rc_t
efx_mcdi_licensing_v3_update_licenses(
@@ -829,7 +832,8 @@ efx_mcdi_licensing_v3_update_licenses(
uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
@@ -866,7 +870,8 @@ efx_mcdi_licensing_v3_report_license(
MC_CMD_LICENSING_V3_OUT_LEN)];
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
@@ -930,7 +935,8 @@ efx_mcdi_licensing_v3_app_state(
uint32_t app_state;
efx_rc_t rc;
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT((enp->en_family == EFX_FAMILY_MEDFORD) ||
+ (enp->en_family == EFX_FAMILY_MEDFORD2));
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
@@ -1262,7 +1268,7 @@ fail1:
}
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_lic_init(
@@ -1296,6 +1302,12 @@ efx_lic_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
index 511f3eb5..57436b95 100644
--- a/drivers/net/sfc/base/efx_mac.c
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -39,7 +39,7 @@ static const efx_mac_ops_t __efx_mac_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_mac_ops_t __efx_mac_ef10_ops = {
ef10_mac_poll, /* emo_poll */
ef10_mac_up, /* emo_up */
@@ -62,7 +62,7 @@ static const efx_mac_ops_t __efx_mac_ef10_ops = {
ef10_mac_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MAC_STATS */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mac_pdu_set(
@@ -492,7 +492,7 @@ efx_mac_filter_default_rxq_clear(
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock 1a45a82fcfb30c1b */
static const char * const __efx_mac_stat_name[] = {
"rx_octets",
"rx_pkts",
@@ -575,6 +575,31 @@ static const char * const __efx_mac_stat_name[] = {
"vadapter_tx_bad_packets",
"vadapter_tx_bad_bytes",
"vadapter_tx_overflow",
+ "fec_uncorrected_errors",
+ "fec_corrected_errors",
+ "fec_corrected_symbols_lane0",
+ "fec_corrected_symbols_lane1",
+ "fec_corrected_symbols_lane2",
+ "fec_corrected_symbols_lane3",
+ "ctpio_vi_busy_fallback",
+ "ctpio_long_write_success",
+ "ctpio_missing_dbell_fail",
+ "ctpio_overflow_fail",
+ "ctpio_underflow_fail",
+ "ctpio_timeout_fail",
+ "ctpio_noncontig_wr_fail",
+ "ctpio_frm_clobber_fail",
+ "ctpio_invalid_wr_fail",
+ "ctpio_vi_clobber_fallback",
+ "ctpio_unqualified_fallback",
+ "ctpio_runt_fallback",
+ "ctpio_success",
+ "ctpio_fallback",
+ "ctpio_poison",
+ "ctpio_erase",
+ "rxdp_scatter_disabled_trunc",
+ "rxdp_hlb_idle",
+ "rxdp_hlb_timeout",
};
/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
@@ -826,6 +851,13 @@ efx_mac_select(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emop = &__efx_mac_ef10_ops;
+ type = EFX_MAC_MEDFORD2;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = EINVAL;
goto fail1;
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
index 347a5b35..d4ebcf26 100644
--- a/drivers/net/sfc/base/efx_mcdi.c
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -45,7 +45,7 @@ static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
ef10_mcdi_init, /* emco_init */
@@ -58,7 +58,7 @@ static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
ef10_mcdi_get_timeout, /* emco_get_timeout */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
@@ -92,6 +92,12 @@ efx_mcdi_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -1258,13 +1264,21 @@ efx_mcdi_drv_attach(
req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
/*
- * Use DONT_CARE for the datapath firmware type to ensure that the
- * driver can attach to an unprivileged function. The datapath firmware
- * type to use is controlled by the 'sfboot' utility.
+ * Typically, client drivers use DONT_CARE for the datapath firmware
+ * type to ensure that the driver can attach to an unprivileged
+ * function. The datapath firmware type to use is controlled by the
+ * 'sfboot' utility.
+ * If a client driver wishes to attach with a specific datapath firmware
+ * type, that can be passed in second argument of efx_nic_probe API. One
+ * such example is the ESXi native driver that attempts attaching with
+ * FULL_FEATURED datapath firmware type first and fall backs to
+ * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails.
*/
- MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE,
+ DRV_ATTACH_IN_ATTACH, attach ? 1 : 0,
+ DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE);
MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
- MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv);
efx_mcdi_execute(enp, &req);
@@ -1426,6 +1440,11 @@ efx_mcdi_get_phy_cfg(
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
MC_CMD_GET_PHY_CFG_OUT_LEN)];
+#if EFSYS_OPT_NAMES
+ const char *namep;
+ size_t namelen;
+#endif
+ uint32_t phy_media_type;
efx_rc_t rc;
(void) memset(payload, 0, sizeof (payload));
@@ -1449,10 +1468,12 @@ efx_mcdi_get_phy_cfg(
encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
#if EFSYS_OPT_NAMES
- (void) strncpy(encp->enc_phy_name,
- MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
- MIN(sizeof (encp->enc_phy_name) - 1,
- MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+ namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME);
+ namelen = MIN(sizeof (encp->enc_phy_name) - 1,
+ strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+ (void) memset(encp->enc_phy_name, 0,
+ sizeof (encp->enc_phy_name));
+ memcpy(encp->enc_phy_name, namep, namelen);
#endif /* EFSYS_OPT_NAMES */
(void) memset(encp->enc_phy_revision, 0,
sizeof (encp->enc_phy_revision));
@@ -1474,8 +1495,8 @@ efx_mcdi_get_phy_cfg(
EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
- epp->ep_fixed_port_type =
- (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type;
if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
@@ -1621,7 +1642,7 @@ fail1:
#if EFSYS_OPT_BIST
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/*
* Enter bist offline mode. This is a fw mode which puts the NIC into a state
* where memory BIST tests can be run and not much else can interfere or happen.
@@ -1657,7 +1678,7 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mcdi_bist_start(
@@ -1778,7 +1799,7 @@ efx_mcdi_mac_stats(
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
- MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ MC_CMD_MAC_STATS_V2_OUT_DMA_LEN)];
int clear = (action == EFX_STATS_CLEAR);
int upload = (action == EFX_STATS_UPLOAD);
int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
@@ -1791,7 +1812,7 @@ efx_mcdi_mac_stats(
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+ req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN;
MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, upload,
@@ -1801,19 +1822,35 @@ efx_mcdi_mac_stats(
MAC_STATS_IN_PERIODIC_NOEVENT, !events,
MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
- if (esmp != NULL) {
- int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+ if (enable || events || upload) {
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t bytes;
+
+ /* Periodic stats or stats upload require a DMA buffer */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small for legacy MAC stats */
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t);
- EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
- EFX_MAC_STATS_SIZE);
+ if (EFSYS_MEM_SIZE(esmp) < bytes) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail3;
+ }
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
- } else {
- EFSYS_ASSERT(!upload && !enable && !events);
}
/*
@@ -1831,12 +1868,18 @@ efx_mcdi_mac_stats(
if ((req.emr_rc != ENOENT) ||
(enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
rc = req.emr_rc;
- goto fail1;
+ goto fail4;
}
}
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1921,7 +1964,7 @@ fail1:
#endif /* EFSYS_OPT_MAC_STATS */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
/*
* This function returns the pf and vf number of a function. If it is a pf the
@@ -2020,7 +2063,7 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_mcdi_set_workaround(
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
index 4e69f048..253a9e60 100644
--- a/drivers/net/sfc/base/efx_mcdi.h
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -166,11 +166,11 @@ efx_mcdi_mac_spoofing_supported(
#if EFSYS_OPT_BIST
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
extern __checkReturn efx_rc_t
efx_mcdi_bist_enable_offline(
__in efx_nic_t *enp);
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
extern __checkReturn efx_rc_t
efx_mcdi_bist_start(
__in efx_nic_t *enp,
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
index 234a4201..9fc268ec 100644
--- a/drivers/net/sfc/base/efx_mon.c
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -99,7 +99,7 @@ fail1:
#if EFSYS_OPT_NAMES
-/* START MKCONFIG GENERATED MonitorStatNamesBlock d92af1538001301f */
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 8150a068198c0f96 */
static const char * const __mon_stat_name[] = {
"value_2_5v",
"value_vccp1",
@@ -180,6 +180,10 @@ static const char * const __mon_stat_name[] = {
"board_back_temp",
"i1v8",
"i2v5",
+ "i3v3",
+ "i12v0",
+ "1v3",
+ "i1v3",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
index e318c17d..6c162e03 100644
--- a/drivers/net/sfc/base/efx_nic.c
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -7,11 +7,13 @@
#include "efx.h"
#include "efx_impl.h"
+
__checkReturn efx_rc_t
efx_family(
__in uint16_t venid,
__in uint16_t devid,
- __out efx_family_t *efp)
+ __out efx_family_t *efp,
+ __out unsigned int *membarp)
{
if (venid == EFX_PCI_VENID_SFC) {
switch (devid) {
@@ -21,12 +23,10 @@ efx_family(
* Hardware default for PF0 of uninitialised Siena.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_SIENA;
- return (0);
-
case EFX_PCI_DEVID_BETHPAGE:
case EFX_PCI_DEVID_SIENA:
*efp = EFX_FAMILY_SIENA;
+ *membarp = EFX_MEM_BAR_SIENA;
return (0);
#endif /* EFSYS_OPT_SIENA */
@@ -36,17 +36,16 @@ efx_family(
* Hardware default for PF0 of uninitialised Huntington.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_HUNTINGTON;
- return (0);
-
case EFX_PCI_DEVID_FARMINGDALE:
case EFX_PCI_DEVID_GREENPORT:
*efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_PF;
return (0);
case EFX_PCI_DEVID_FARMINGDALE_VF:
case EFX_PCI_DEVID_GREENPORT_VF:
*efp = EFX_FAMILY_HUNTINGTON;
+ *membarp = EFX_MEM_BAR_HUNTINGTON_VF;
return (0);
#endif /* EFSYS_OPT_HUNTINGTON */
@@ -56,18 +55,30 @@ efx_family(
* Hardware default for PF0 of uninitialised Medford.
* manftest must be able to cope with this device id.
*/
- *efp = EFX_FAMILY_MEDFORD;
- return (0);
-
case EFX_PCI_DEVID_MEDFORD:
*efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_PF;
return (0);
case EFX_PCI_DEVID_MEDFORD_VF:
*efp = EFX_FAMILY_MEDFORD;
+ *membarp = EFX_MEM_BAR_MEDFORD_VF;
return (0);
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford2.
+ * manftest must be able to cope with this device id.
+ */
+ case EFX_PCI_DEVID_MEDFORD2:
+ case EFX_PCI_DEVID_MEDFORD2_VF:
+ *efp = EFX_FAMILY_MEDFORD2;
+ *membarp = EFX_MEM_BAR_MEDFORD2;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD2 */
+
case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
default:
break;
@@ -78,6 +89,7 @@ efx_family(
return (ENOTSUP);
}
+
#if EFSYS_OPT_SIENA
static const efx_nic_ops_t __efx_nic_siena_ops = {
@@ -135,6 +147,25 @@ static const efx_nic_ops_t __efx_nic_medford_ops = {
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+
+static const efx_nic_ops_t __efx_nic_medford2_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford2_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD2 */
+
__checkReturn efx_rc_t
efx_nic_create(
@@ -213,6 +244,22 @@ efx_nic_create(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ enp->en_enop = &__efx_nic_medford2_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = ENOTSUP;
goto fail2;
@@ -243,7 +290,8 @@ fail1:
__checkReturn efx_rc_t
efx_nic_probe(
- __in efx_nic_t *enp)
+ __in efx_nic_t *enp,
+ __in efx_fw_variant_t efv)
{
const efx_nic_ops_t *enop;
efx_rc_t rc;
@@ -254,7 +302,27 @@ efx_nic_probe(
#endif /* EFSYS_OPT_MCDI */
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+ /* Ensure FW variant codes match with MC_CMD_FW codes */
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED ==
+ MC_CMD_FW_FULL_FEATURED);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY ==
+ MC_CMD_FW_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM ==
+ MC_CMD_FW_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE ==
+ MC_CMD_FW_HIGH_TX_RATE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 ==
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE ==
+ MC_CMD_FW_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK ==
+ MC_CMD_FW_DPDK);
+ EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE ==
+ (int)MC_CMD_FW_DONT_CARE);
+
enop = enp->en_enop;
+ enp->efv = efv;
+
if ((rc = enop->eno_probe(enp)) != 0)
goto fail1;
@@ -536,6 +604,18 @@ efx_nic_get_fw_version(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+ /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */
+ EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP);
+ EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY);
+ EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM);
+ EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE);
+ EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID ==
+ MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK);
+
rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
if (rc != 0)
goto fail2;
@@ -607,48 +687,49 @@ efx_loopback_mask(
EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
EFSYS_ASSERT(maskp != NULL);
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
- EFX_LOOPBACK_XAUI_WS_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
- EFX_LOOPBACK_XAUI_WS_NEAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
- EFX_LOOPBACK_XFI_WS_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
- EFX_LOOPBACK_PMA_INT_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
- EFX_LOOPBACK_SD_FEP2_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
- EFX_LOOPBACK_SD_FEP1_5_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */
+#define LOOPBACK_CHECK(_mcdi, _efx) \
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx)
+
+ LOOPBACK_CHECK(NONE, OFF);
+ LOOPBACK_CHECK(DATA, DATA);
+ LOOPBACK_CHECK(GMAC, GMAC);
+ LOOPBACK_CHECK(XGMII, XGMII);
+ LOOPBACK_CHECK(XGXS, XGXS);
+ LOOPBACK_CHECK(XAUI, XAUI);
+ LOOPBACK_CHECK(GMII, GMII);
+ LOOPBACK_CHECK(SGMII, SGMII);
+ LOOPBACK_CHECK(XGBR, XGBR);
+ LOOPBACK_CHECK(XFI, XFI);
+ LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR);
+ LOOPBACK_CHECK(GMII_FAR, GMII_FAR);
+ LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR);
+ LOOPBACK_CHECK(XFI_FAR, XFI_FAR);
+ LOOPBACK_CHECK(GPHY, GPHY);
+ LOOPBACK_CHECK(PHYXS, PHY_XS);
+ LOOPBACK_CHECK(PCS, PCS);
+ LOOPBACK_CHECK(PMAPMD, PMA_PMD);
+ LOOPBACK_CHECK(XPORT, XPORT);
+ LOOPBACK_CHECK(XGMII_WS, XGMII_WS);
+ LOOPBACK_CHECK(XAUI_WS, XAUI_WS);
+ LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR);
+ LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR);
+ LOOPBACK_CHECK(GMII_WS, GMII_WS);
+ LOOPBACK_CHECK(XFI_WS, XFI_WS);
+ LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR);
+ LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS);
+ LOOPBACK_CHECK(PMA_INT, PMA_INT);
+ LOOPBACK_CHECK(SD_NEAR, SD_NEAR);
+ LOOPBACK_CHECK(SD_FAR, SD_FAR);
+ LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS);
+ LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS);
+ LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS);
+ LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS);
+ LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS);
+ LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR);
+ LOOPBACK_CHECK(DATA_WS, DATA_WS);
+ LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK);
+#undef LOOPBACK_CHECK
/* Build bitmask of possible loopback types */
EFX_ZERO_QWORD(mask);
@@ -706,7 +787,7 @@ efx_mcdi_get_loopback_modes(
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
- MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN)];
efx_qword_t mask;
efx_qword_t modes;
efx_rc_t rc;
@@ -716,7 +797,7 @@ efx_mcdi_get_loopback_modes(
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN;
efx_mcdi_execute(enp, &req);
@@ -757,18 +838,51 @@ efx_mcdi_get_loopback_modes(
MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
/* Response includes 40G loopback modes */
- modes =
- *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_40G);
EFX_AND_QWORD(modes, mask);
encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
}
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) {
+ /* Response includes 25G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_25G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_25000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) {
+ /* Response includes 50G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_50G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_50000FDX] = modes;
+ }
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) {
+ /* Response includes 100G loopback modes */
+ modes = *MCDI_OUT2(req, efx_qword_t,
+ GET_LOOPBACK_MODES_OUT_V2_100G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100000FDX] = modes;
+ }
+
EFX_ZERO_QWORD(modes);
EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]);
encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
return (0);
@@ -830,6 +944,82 @@ fail1:
return (rc);
}
+#if EFSYS_OPT_FW_SUBVARIANT_AWARE
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_subvariant(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_subvariant_t *subvariantp)
+{
+ efx_rc_t rc;
+ uint32_t value;
+
+ rc = efx_mcdi_get_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value);
+ if (rc != 0)
+ goto fail1;
+
+ /* Mapping is not required since values match MCDI */
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT);
+ EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM ==
+ MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM);
+
+ switch (value) {
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT:
+ case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM:
+ *subvariantp = value;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_fw_subvariant(
+ __in efx_nic_t *enp,
+ __in efx_nic_fw_subvariant_t subvariant)
+{
+ efx_rc_t rc;
+
+ switch (subvariant) {
+ case EFX_NIC_FW_SUBVARIANT_DEFAULT:
+ case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM:
+ /* Mapping is not required since values match MCDI */
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = efx_mcdi_set_nic_global(enp,
+ MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
__checkReturn efx_rc_t
efx_nic_check_pcie_link_speed(
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
index c2cc9ad3..be409c3a 100644
--- a/drivers/net/sfc/base/efx_nvram.c
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -30,7 +30,7 @@ static const efx_nvram_ops_t __efx_nvram_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
#if EFSYS_OPT_DIAG
@@ -49,7 +49,7 @@ static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
ef10_nvram_buffer_validate, /* envo_buffer_validate */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_nvram_init(
@@ -81,6 +81,12 @@ efx_nvram_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
index 069c2836..ba2f51c1 100644
--- a/drivers/net/sfc/base/efx_phy.c
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -27,7 +27,7 @@ static const efx_phy_ops_t __efx_phy_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_phy_power, /* epo_power */
NULL, /* epo_reset */
@@ -44,7 +44,7 @@ static const efx_phy_ops_t __efx_phy_ef10_ops = {
ef10_bist_stop, /* epo_bist_stop */
#endif /* EFSYS_OPT_BIST */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_phy_probe(
@@ -67,16 +67,25 @@ efx_phy_probe(
epop = &__efx_phy_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
+
#if EFSYS_OPT_HUNTINGTON
case EFX_FAMILY_HUNTINGTON:
epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_HUNTINGTON */
+
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
epop = &__efx_phy_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
+
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
rc = ENOTSUP;
goto fail1;
@@ -176,6 +185,7 @@ efx_phy_adv_cap_get(
break;
default:
EFSYS_ASSERT(B_FALSE);
+ *maskp = 0;
break;
}
}
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
index a792a9ef..33a1a084 100644
--- a/drivers/net/sfc/base/efx_port.c
+++ b/drivers/net/sfc/base/efx_port.c
@@ -120,7 +120,7 @@ efx_port_loopback_set(
EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
- loopback_type) == 0) {
+ (int)loopback_type) == 0) {
rc = ENOTSUP;
goto fail1;
}
@@ -180,6 +180,9 @@ static const char * const __efx_loopback_type_name[] = {
"SD_FEP1_5_WS",
"SD_FEP_WS",
"SD_FES_WS",
+ "AOE_INT_NEAR",
+ "DATA_WS",
+ "FORCE_EXT_LINK",
};
__checkReturn const char *
diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h
index 5f978305..968aaaca 100644
--- a/drivers/net/sfc/base/efx_regs_ef10.h
+++ b/drivers/net/sfc/base/efx_regs_ef10.h
@@ -24,7 +24,7 @@ extern "C" {
*/
#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
@@ -38,7 +38,7 @@ extern "C" {
*/
#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
@@ -54,7 +54,7 @@ extern "C" {
*/
#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
@@ -68,7 +68,7 @@ extern "C" {
*/
#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
@@ -82,7 +82,7 @@ extern "C" {
*/
#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
@@ -96,7 +96,7 @@ extern "C" {
*/
#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_EVQ_RPTR_REG_STEP 8192
#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
@@ -109,17 +109,95 @@ extern "C" {
/*
+ * EVQ_RPTR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_64K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_RPTR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_FZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_FZ_EVQ_RPTR_LBN 0
+#define ERF_FZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_RPTR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_RPTR_REG_16K_OFST 0x00000400
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_RPTR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_RPTR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_RPTR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_EVQ_RPTR_VLD_LBN 15; */
+/* defined as ERF_FZ_EVQ_RPTR_VLD_WIDTH 1 */
+/* defined as ERF_FZ_EVQ_RPTR_LBN 0; */
+/* defined as ERF_FZ_EVQ_RPTR_WIDTH 15 */
+
+
+/*
+ * EVQ_TMR_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_64K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_64K_STEP 65536
+#define ER_FZ_EVQ_TMR_REG_64K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_TC_TMR_REL_VAL_LBN 16
+#define ERF_FZ_TC_TMR_REL_VAL_WIDTH 14
+#define ERF_FZ_TC_TIMER_MODE_LBN 14
+#define ERF_FZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_FZ_TC_TIMER_VAL_LBN 0
+#define ERF_FZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * EVQ_TMR_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_EVQ_TMR_REG_16K_OFST 0x00000420
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_EVQ_TMR_REG_16K_STEP 16384
+#define ER_FZ_EVQ_TMR_REG_16K_ROWS 2048
+#define ER_FZ_EVQ_TMR_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
+/* defined as ERF_FZ_TC_TIMER_MODE_LBN 14; */
+/* defined as ERF_FZ_TC_TIMER_MODE_WIDTH 2 */
+/* defined as ERF_FZ_TC_TIMER_VAL_LBN 0; */
+/* defined as ERF_FZ_TC_TIMER_VAL_WIDTH 14 */
+
+
+/*
* EVQ_TMR_REG(32bit):
*
*/
#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_EVQ_TMR_REG_STEP 8192
#define ER_DZ_EVQ_TMR_REG_ROWS 2048
#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+/* defined as ERF_FZ_TC_TMR_REL_VAL_LBN 16; */
+/* defined as ERF_FZ_TC_TMR_REL_VAL_WIDTH 14 */
#define ERF_DZ_TC_TIMER_MODE_LBN 14
#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
#define ERF_DZ_TC_TIMER_VAL_LBN 0
@@ -127,12 +205,28 @@ extern "C" {
/*
+ * RX_DESC_UPD_REG_16K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_16K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_RX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_16K_RESET 0x0
+
+
+#define ERF_FZ_RX_DESC_WPTR_LBN 0
+#define ERF_FZ_RX_DESC_WPTR_WIDTH 12
+
+
+/*
* RX_DESC_UPD_REG(32bit):
*
*/
#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
@@ -141,13 +235,74 @@ extern "C" {
#define ERF_DZ_RX_DESC_WPTR_LBN 0
#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * RX_DESC_UPD_REG_64K(32bit):
+ *
+ */
+
+#define ER_FZ_RX_DESC_UPD_REG_64K_OFST 0x00000830
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_RX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_RX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_RX_DESC_UPD_REG_64K_RESET 0x0
+
+
+/* defined as ERF_FZ_RX_DESC_WPTR_LBN 0; */
+/* defined as ERF_FZ_RX_DESC_WPTR_WIDTH 12 */
+
+
+/*
+ * TX_DESC_UPD_REG_64K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_64K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_64K_STEP 65536
+#define ER_FZ_TX_DESC_UPD_REG_64K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_64K_RESET 0x0
+
+
+#define ERF_FZ_RSVD_LBN 76
+#define ERF_FZ_RSVD_WIDTH 20
+#define ERF_FZ_TX_DESC_WPTR_LBN 64
+#define ERF_FZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_FZ_TX_DESC_HWORD_LBN 32
+#define ERF_FZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_FZ_TX_DESC_LWORD_LBN 0
+#define ERF_FZ_TX_DESC_LWORD_WIDTH 32
+
+
+/*
+ * TX_DESC_UPD_REG_16K(96bit):
+ *
+ */
+
+#define ER_FZ_TX_DESC_UPD_REG_16K_OFST 0x00000a10
+/* medford2a0=pf_dbell_bar */
+#define ER_FZ_TX_DESC_UPD_REG_16K_STEP 16384
+#define ER_FZ_TX_DESC_UPD_REG_16K_ROWS 2048
+#define ER_FZ_TX_DESC_UPD_REG_16K_RESET 0x0
+
+
+/* defined as ERF_FZ_RSVD_LBN 76; */
+/* defined as ERF_FZ_RSVD_WIDTH 20 */
+/* defined as ERF_FZ_TX_DESC_WPTR_LBN 64; */
+/* defined as ERF_FZ_TX_DESC_WPTR_WIDTH 12 */
+/* defined as ERF_FZ_TX_DESC_HWORD_LBN 32; */
+/* defined as ERF_FZ_TX_DESC_HWORD_WIDTH 32 */
+/* defined as ERF_FZ_TX_DESC_LWORD_LBN 0; */
+/* defined as ERF_FZ_TX_DESC_LWORD_WIDTH 32 */
+
+
/*
* TX_DESC_UPD_REG(96bit):
*
*/
#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
-/* hunta0,medforda0=pcie_pf_bar2 */
+/* hunta0,medforda0,medford2a0=pf_dbell_bar */
#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
@@ -233,16 +388,24 @@ extern "C" {
#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
-#define ESF_DZ_RX_L4_CLASS_LBN 45
-#define ESF_DZ_RX_L4_CLASS_WIDTH 3
-#define ESE_DZ_L4_CLASS_RSVD7 7
-#define ESE_DZ_L4_CLASS_RSVD6 6
-#define ESE_DZ_L4_CLASS_RSVD5 5
-#define ESE_DZ_L4_CLASS_RSVD4 4
-#define ESE_DZ_L4_CLASS_RSVD3 3
-#define ESE_DZ_L4_CLASS_UDP 2
-#define ESE_DZ_L4_CLASS_TCP 1
-#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DE_RX_L4_CLASS_LBN 45
+#define ESF_DE_RX_L4_CLASS_WIDTH 3
+#define ESE_DE_L4_CLASS_RSVD7 7
+#define ESE_DE_L4_CLASS_RSVD6 6
+#define ESE_DE_L4_CLASS_RSVD5 5
+#define ESE_DE_L4_CLASS_RSVD4 4
+#define ESE_DE_L4_CLASS_RSVD3 3
+#define ESE_DE_L4_CLASS_UDP 2
+#define ESE_DE_L4_CLASS_TCP 1
+#define ESE_DE_L4_CLASS_UNKNOWN 0
+#define ESF_FZ_RX_FASTPD_INDCTR_LBN 47
+#define ESF_FZ_RX_FASTPD_INDCTR_WIDTH 1
+#define ESF_FZ_RX_L4_CLASS_LBN 45
+#define ESF_FZ_RX_L4_CLASS_WIDTH 2
+#define ESE_FZ_L4_CLASS_RSVD3 3
+#define ESE_FZ_L4_CLASS_UDP 2
+#define ESE_FZ_L4_CLASS_TCP 1
+#define ESE_FZ_L4_CLASS_UNKNOWN 0
#define ESF_DZ_RX_L3_CLASS_LBN 42
#define ESF_DZ_RX_L3_CLASS_WIDTH 3
#define ESE_DZ_L3_CLASS_RSVD7 7
@@ -289,6 +452,8 @@ extern "C" {
#define ESF_EZ_RX_ABORT_WIDTH 1
#define ESF_DZ_RX_ECC_ERR_LBN 29
#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_TRUNC_ERR_LBN 29
+#define ESF_DZ_RX_TRUNC_ERR_WIDTH 1
#define ESF_DZ_RX_CRC1_ERR_LBN 28
#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
#define ESF_DZ_RX_CRC0_ERR_LBN 27
@@ -419,6 +584,8 @@ extern "C" {
#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
@@ -429,7 +596,7 @@ extern "C" {
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2A_DESC */
+/* ES_TX_TSO_V2_DESC_A */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -449,7 +616,7 @@ extern "C" {
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_TSO_FATSO2B_DESC */
+/* ES_TX_TSO_V2_DESC_B */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
#define ESF_DZ_TX_OPTION_TYPE_LBN 60
@@ -463,12 +630,10 @@ extern "C" {
#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
-#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
-#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
-#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+#define ESF_DZ_TX_TSO_OUTER_IPID_LBN 0
+#define ESF_DZ_TX_TSO_OUTER_IPID_WIDTH 16
/* ES_TX_VLAN_DESC */
@@ -533,6 +698,21 @@ extern "C" {
#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+/* Equal stride super-buffer RX packet prefix (see SF-119419-TC) */
+#define ES_EZ_ESSB_RX_PREFIX_LEN 8
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_LBN 0
+#define ES_EZ_ESSB_RX_PREFIX_DATA_LEN_WIDTH 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_LBN 16
+#define ES_EZ_ESSB_RX_PREFIX_MARK_WIDTH 8
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN 28
+#define ES_EZ_ESSB_RX_PREFIX_HASH_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN 29
+#define ES_EZ_ESSB_RX_PREFIX_MARK_VALID_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN 30
+#define ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_WIDTH 1
+#define ES_EZ_ESSB_RX_PREFIX_HASH_LBN 32
+#define ES_EZ_ESSB_RX_PREFIX_HASH_WIDTH 32
+
/*
* An extra flag for the packed stream mode,
* signalling the start of a new buffer
diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h
index 7389877a..cf8a7936 100644
--- a/drivers/net/sfc/base/efx_regs_mcdi.h
+++ b/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -280,7 +280,8 @@
#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
/* Workaround 26807 could not be turned on/off because some functions
* have already installed filters. See the comment at
- * MC_CMD_WORKAROUND_BUG26807. */
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
/* The clock whose frequency you've attempted to set set
* doesn't exist on this NIC */
@@ -291,6 +292,18 @@
/* This command needs to be processed in the background but there were no
* resources to do so. Send it again after a command has completed. */
#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away. This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away. This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
#define MC_CMD_ERR_CODE_OFST 0
@@ -311,10 +324,17 @@
#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
-/* Points to the recovery mode entry point. */
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -368,7 +388,7 @@
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define MCDI_EVENT_LEVEL_INFO 0x0
+#define MCDI_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define MCDI_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -376,6 +396,7 @@
/* enum: Fatal. */
#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
@@ -386,14 +407,22 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
/* enum: 100Mbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
/* enum: 1Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
/* enum: 10Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
/* enum: 40Gbs */
-#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -482,8 +511,23 @@
#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define MCDI_EVENT_AOE_FC_RUNNING 0x14
#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
/* enum: Reading from NV failed */
@@ -536,6 +580,22 @@
#define MCDI_EVENT_MUM_WATCHDOG 0x3
#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DBRET_SEQ_LBN 0
+#define MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define MCDI_EVENT_SUC_WATCHDOG 0x4
+#define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -569,23 +629,23 @@
/* enum: Transmit error */
#define MCDI_EVENT_CODE_TX_ERR 0xb
/* enum: Tx flush has completed */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
/* enum: PTP packet received timestamp */
-#define MCDI_EVENT_CODE_PTP_RX 0xd
+#define MCDI_EVENT_CODE_PTP_RX 0xd
/* enum: PTP NIC failure */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
/* enum: PTP PPS event */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
/* enum: Rx flush has completed */
-#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
/* enum: Receive error */
#define MCDI_EVENT_CODE_RX_ERR 0x11
/* enum: AOE fault */
-#define MCDI_EVENT_CODE_AOE 0x12
+#define MCDI_EVENT_CODE_AOE 0x12
/* enum: Network port calibration failed (VCAL). */
-#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
/* enum: HW PPS event */
-#define MCDI_EVENT_CODE_HW_PPS 0x14
+#define MCDI_EVENT_CODE_HW_PPS 0x14
/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
* a different format)
*/
@@ -608,73 +668,99 @@
* been processed and it may now resend the command
*/
#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define MCDI_EVENT_CODE_SUC 0x1f
/* enum: Artificial event generated by host and posted via MC for test
* purposes.
*/
-#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LEN 4
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LEN 4
#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LEN 4
#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LEN 4
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
* timestamp
*/
#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LEN 4
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
* timestamp
*/
#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LEN 4
#define MCDI_EVENT_PTP_MAJOR_LBN 0
#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
* of timestamp
*/
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LEN 4
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
* timestamp
*/
#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LEN 4
#define MCDI_EVENT_PTP_MINOR_LBN 0
#define MCDI_EVENT_PTP_MINOR_WIDTH 32
/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
*/
#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LEN 4
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LEN 4
#define MCDI_EVENT_RX_ERR_DATA_LBN 0
#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LEN 4
#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
/* For CODE_PTP_TIME events, the major value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
/* For CODE_PTP_TIME events where report sync status is enabled, indicates
* whether the NIC clock has ever been set
*/
@@ -690,10 +776,17 @@
*/
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
/* Zero means that the request has been completed or authorized, and the driver
@@ -702,6 +795,10 @@
*/
#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define MCDI_EVENT_DBRET_DATA_OFST 0
+#define MCDI_EVENT_DBRET_DATA_LEN 4
+#define MCDI_EVENT_DBRET_DATA_LBN 0
+#define MCDI_EVENT_DBRET_DATA_WIDTH 32
/* FCDI_EVENT structuredef */
#define FCDI_EVENT_LEN 8
@@ -710,7 +807,7 @@
#define FCDI_EVENT_LEVEL_LBN 33
#define FCDI_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define FCDI_EVENT_LEVEL_INFO 0x0
+#define FCDI_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define FCDI_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -718,6 +815,7 @@
/* enum: Fatal. */
#define FCDI_EVENT_LEVEL_FATAL 0x3
#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
@@ -757,6 +855,7 @@
#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
#define FCDI_EVENT_ASSERT_TYPE_LBN 36
@@ -764,12 +863,15 @@
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LEN 4
#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_STATE_LEN 4
#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
@@ -778,6 +880,7 @@
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
/* Index of MC port being referred to */
@@ -785,9 +888,11 @@
#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
/* FC Port index that matches the MC port index in SRC */
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
#define FCDI_EVENT_BOOT_RESULT_OFST 0
+#define FCDI_EVENT_BOOT_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
#define FCDI_EVENT_BOOT_RESULT_LBN 0
@@ -804,14 +909,17 @@
#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
/* Number of timestamps following */
#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
/* Seconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
/* Nanoseconds field of a timestamp record */
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
/* Timestamp records comprising the event */
@@ -831,7 +939,7 @@
#define MUM_EVENT_LEVEL_LBN 33
#define MUM_EVENT_LEVEL_WIDTH 3
/* enum: Info. */
-#define MUM_EVENT_LEVEL_INFO 0x0
+#define MUM_EVENT_LEVEL_INFO 0x0
/* enum: Warning. */
#define MUM_EVENT_LEVEL_WARN 0x1
/* enum: Error. */
@@ -839,6 +947,7 @@
/* enum: Fatal. */
#define MUM_EVENT_LEVEL_FATAL 0x3
#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_DATA_LEN 4
#define MUM_EVENT_SENSOR_ID_LBN 0
#define MUM_EVENT_SENSOR_ID_WIDTH 8
/* Enum values, see field(s): */
@@ -876,18 +985,23 @@
/* enum: Link fault has been asserted, or has cleared. */
#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LEN 4
#define MUM_EVENT_SENSOR_DATA_LBN 0
#define MUM_EVENT_SENSOR_DATA_WIDTH 32
#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4
#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LEN 4
#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_TECH_LEN 4
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
@@ -911,7 +1025,9 @@
/***********************************/
/* MC_CMD_READ32
- * Read multiple 32byte words from MC memory.
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
*/
#define MC_CMD_READ32 0x1
#undef MC_CMD_0x1_PRIVILEGE_CTG
@@ -921,7 +1037,9 @@
/* MC_CMD_READ32_IN msgrequest */
#define MC_CMD_READ32_IN_LEN 8
#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_ADDR_LEN 4
#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define MC_CMD_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_READ32_OUT msgresponse */
#define MC_CMD_READ32_OUT_LENMIN 4
@@ -940,13 +1058,14 @@
#define MC_CMD_WRITE32 0x2
#undef MC_CMD_0x2_PRIVILEGE_CTG
-#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_WRITE32_IN msgrequest */
#define MC_CMD_WRITE32_IN_LENMIN 8
#define MC_CMD_WRITE32_IN_LENMAX 252
#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
@@ -958,12 +1077,14 @@
/***********************************/
/* MC_CMD_COPYCODE
- * Copy MC code between two locations and jump.
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
*/
#define MC_CMD_COPYCODE 0x3
#undef MC_CMD_0x3_PRIVILEGE_CTG
-#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
@@ -974,6 +1095,7 @@
* is a bitfield, with each bit as documented below.
*/
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
@@ -999,9 +1121,12 @@
#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define MC_CMD_COPYCODE_IN_JUMP_LEN 4
/* enum: Control should return to the caller rather than jumping */
#define MC_CMD_COPYCODE_JUMP_NONE 0x1
@@ -1016,12 +1141,13 @@
#define MC_CMD_SET_FUNC 0x4
#undef MC_CMD_0x4_PRIVILEGE_CTG
-#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4
/* MC_CMD_SET_FUNC_OUT msgresponse */
#define MC_CMD_SET_FUNC_OUT_LEN 0
@@ -1034,7 +1160,7 @@
#define MC_CMD_GET_BOOT_STATUS 0x5
#undef MC_CMD_0x5_PRIVILEGE_CTG
-#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
@@ -1043,9 +1169,11 @@
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
/* enum: indicates that the MC wasn't flash booted */
-#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
@@ -1069,11 +1197,13 @@
#define MC_CMD_GET_ASSERTS_IN_LEN 4
/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
/* enum: No assertions have failed. */
#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
/* enum: A system-level assertion has failed. */
@@ -1086,6 +1216,7 @@
#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
@@ -1096,7 +1227,9 @@
#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
/***********************************/
@@ -1113,12 +1246,14 @@
#define MC_CMD_LOG_CTRL_IN_LEN 8
/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
/* enum: UART. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
/* enum: Event queue. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
/* Legacy argument. Must be zero. */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
#define MC_CMD_LOG_CTRL_OUT_LEN 0
@@ -1140,23 +1275,29 @@
#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
/* placeholder, set to 0 */
#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
/* enum: Reserved version number to indicate "any" version. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
/* enum: Bootrom version value for Siena. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
/* enum: Bootrom version value for Huntington. */
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1168,9 +1309,11 @@
/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
@@ -1184,2421 +1327,6 @@
/***********************************/
-/* MC_CMD_FC
- * Perform an FC operation
- */
-#define MC_CMD_FC 0x9
-
-/* MC_CMD_FC_IN msgrequest */
-#define MC_CMD_FC_IN_LEN 4
-#define MC_CMD_FC_IN_OP_HDR_OFST 0
-#define MC_CMD_FC_IN_OP_LBN 0
-#define MC_CMD_FC_IN_OP_WIDTH 8
-/* enum: NULL MCDI command to FC. */
-#define MC_CMD_FC_OP_NULL 0x1
-/* enum: Unused opcode */
-#define MC_CMD_FC_OP_UNUSED 0x2
-/* enum: MAC driver commands */
-#define MC_CMD_FC_OP_MAC 0x3
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_READ32 0x4
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_WRITE32 0x5
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_TRC_READ 0x6
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_TRC_WRITE 0x7
-/* enum: FC firmware Version */
-#define MC_CMD_FC_OP_GET_VERSION 0x8
-/* enum: Read FC memory */
-#define MC_CMD_FC_OP_TRC_RX_READ 0x9
-/* enum: Write to FC memory */
-#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
-/* enum: SFP parameters */
-#define MC_CMD_FC_OP_SFP 0xb
-/* enum: DDR3 test */
-#define MC_CMD_FC_OP_DDR_TEST 0xc
-/* enum: Get Crash context from FC */
-#define MC_CMD_FC_OP_GET_ASSERT 0xd
-/* enum: Get FPGA Build registers */
-#define MC_CMD_FC_OP_FPGA_BUILD 0xe
-/* enum: Read map support commands */
-#define MC_CMD_FC_OP_READ_MAP 0xf
-/* enum: FC Capabilities */
-#define MC_CMD_FC_OP_CAPABILITIES 0x10
-/* enum: FC Global flags */
-#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
-/* enum: FC IO using relative addressing modes */
-#define MC_CMD_FC_OP_IO_REL 0x12
-/* enum: FPGA link information */
-#define MC_CMD_FC_OP_UHLINK 0x13
-/* enum: Configure loopbacks and link on FPGA ports */
-#define MC_CMD_FC_OP_SET_LINK 0x14
-/* enum: Licensing operations relating to AOE */
-#define MC_CMD_FC_OP_LICENSE 0x15
-/* enum: Startup information to the FC */
-#define MC_CMD_FC_OP_STARTUP 0x16
-/* enum: Configure a DMA read */
-#define MC_CMD_FC_OP_DMA 0x17
-/* enum: Configure a timed read */
-#define MC_CMD_FC_OP_TIMED_READ 0x18
-/* enum: Control UART logging */
-#define MC_CMD_FC_OP_LOG 0x19
-/* enum: Get the value of a given clock_id */
-#define MC_CMD_FC_OP_CLOCK 0x1a
-/* enum: DDR3/QDR3 parameters */
-#define MC_CMD_FC_OP_DDR 0x1b
-/* enum: PTP and timestamp control */
-#define MC_CMD_FC_OP_TIMESTAMP 0x1c
-/* enum: Commands for SPI Flash interface */
-#define MC_CMD_FC_OP_SPI 0x1d
-/* enum: Commands for diagnostic components */
-#define MC_CMD_FC_OP_DIAG 0x1e
-/* enum: External AOE port. */
-#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
-/* enum: Internal AOE port. */
-#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
-
-/* MC_CMD_FC_IN_NULL msgrequest */
-#define MC_CMD_FC_IN_NULL_LEN 4
-#define MC_CMD_FC_IN_CMD_OFST 0
-
-/* MC_CMD_FC_IN_PHY msgrequest */
-#define MC_CMD_FC_IN_PHY_LEN 5
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FC PHY driver operation code */
-#define MC_CMD_FC_IN_PHY_OP_OFST 4
-#define MC_CMD_FC_IN_PHY_OP_LEN 1
-/* enum: PHY init handler */
-#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
-/* enum: PHY reconfigure handler */
-#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
-/* enum: PHY reboot handler */
-#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
-/* enum: PHY get_supported_cap handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
-/* enum: PHY get_config handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
-/* enum: PHY get_media_info handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
-/* enum: PHY set_led handler */
-#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
-/* enum: PHY lasi_interrupt handler */
-#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
-/* enum: PHY check_link handler */
-#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
-/* enum: PHY fill_stats handler */
-#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
-/* enum: PHY bpx_link_state_changed handler */
-#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
-/* enum: PHY get_state handler */
-#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
-/* enum: PHY start_bist handler */
-#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
-/* enum: PHY poll_bist handler */
-#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
-/* enum: PHY nvram_test handler */
-#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
-/* enum: PHY relinquish handler */
-#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
-/* enum: PHY read connection from FC - may be not required */
-#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
-/* enum: PHY read flags from FC - may be not required */
-#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
-
-/* MC_CMD_FC_IN_PHY_INIT msgrequest */
-#define MC_CMD_FC_IN_PHY_INIT_LEN 4
-#define MC_CMD_FC_IN_PHY_CMD_OFST 0
-
-/* MC_CMD_FC_IN_MAC msgrequest */
-#define MC_CMD_FC_IN_MAC_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
-#define MC_CMD_FC_IN_MAC_OP_LBN 0
-#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
-/* enum: MAC reconfigure handler */
-#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
-/* enum: MAC Set command - same as MC_CMD_SET_MAC */
-#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
-/* enum: MAC statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
-/* enum: MAC RX statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
-/* enum: MAC TX statistics */
-#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
-/* enum: MAC Read status */
-#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
-#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
-#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
-/* enum: External FPGA port. */
-#define MC_CMD_FC_PORT_EXT 0x0
-/* enum: Internal Siena-facing FPGA ports. */
-#define MC_CMD_FC_PORT_INT 0x1
-#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
-#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
-#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
-#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
-/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
- * irrelevant. Port number is derived from pci_fn; passed in FC header.
- */
-#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
-/* enum: Override default port number. Port number determined by fields
- * PORT_TYPE and PORT_IDX.
- */
-#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
-
-/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
-#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
-#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-/* MTU size */
-#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
-/* Drain Tx FIFO */
-#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
-#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
-#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
-
-/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
-#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
-#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
-/* MC Statistics index */
-#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
-#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
-#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
-#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
-/* Number of statistics to read */
-#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
-#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
-#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
-
-/* MC_CMD_FC_IN_READ32 msgrequest */
-#define MC_CMD_FC_IN_READ32_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
-#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
-#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
-
-/* MC_CMD_FC_IN_WRITE32 msgrequest */
-#define MC_CMD_FC_IN_WRITE32_LENMIN 16
-#define MC_CMD_FC_IN_WRITE32_LENMAX 252
-#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
-#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
-#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
-#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
-#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
-
-/* MC_CMD_FC_IN_TRC_READ msgrequest */
-#define MC_CMD_FC_IN_TRC_READ_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
-
-/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
-#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
-#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
-
-/* MC_CMD_FC_IN_GET_VERSION msgrequest */
-#define MC_CMD_FC_IN_GET_VERSION_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
-#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
-
-/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
-#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
-#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
-#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
-
-/* MC_CMD_FC_IN_SFP msgrequest */
-#define MC_CMD_FC_IN_SFP_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Link speed is 100, 1000, 10000, 40000 */
-#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
-/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
-#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
-/* Not relevant for cards with QSFP modules. For older cards, true if module is
- * a dual speed SFP+ module.
- */
-#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
-/* True if an SFP Module is present (other fields valid when true) */
-#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
-/* The type of the SFP+ Module. For later cards with QSFP modules, this field
- * is unused and the type is communicated by other means.
- */
-#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
-/* Capabilities corresponding to 1 bits. */
-#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
-
-/* MC_CMD_FC_IN_DDR_TEST msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
-#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
-#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
-/* enum: DRAM Test Start */
-#define MC_CMD_FC_OP_DDR_TEST_START 0x1
-/* enum: DRAM Test Poll */
-#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
-
-/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
-#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
-#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
-#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
-#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
-#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
-#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
-
-/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
-#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
-#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
-/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
-/* Clear previous test result and prepare for restarting DDR test */
-#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
-
-/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
-#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
-#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FPGA build info operation code */
-#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
-/* enum: Get the build registers */
-#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
-/* enum: Get the services registers */
-#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
-/* enum: Get the BSP version */
-#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
-/* enum: Get build register for V2 (SFA974X) */
-#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
-/* enum: GEt the services register for V2 (SFA974X) */
-#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
-
-/* MC_CMD_FC_IN_READ_MAP msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
-#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
-#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
-/* enum: Get the number of map regions */
-#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
-/* enum: Get the specified map */
-#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
-
-/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
-#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
-
-/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
-#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-
-/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
-#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
-
-/* MC_CMD_FC_IN_IO_REL msgrequest */
-#define MC_CMD_FC_IN_IO_REL_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
-#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
-#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
-/* enum: Get the base address that the FC applies to relative commands */
-#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
-/* enum: Read data */
-#define MC_CMD_FC_IN_IO_REL_READ32 0x2
-/* enum: Write data */
-#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
-#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
-#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
-/* enum: Application address space */
-#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
-/* enum: Flash address space */
-#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
-
-/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
-#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-
-/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
-#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
-#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
-#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
-
-/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
-#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
-#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
-#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
-
-/* MC_CMD_FC_IN_UHLINK msgrequest */
-#define MC_CMD_FC_IN_UHLINK_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
-#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
-#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
-/* enum: Get PHY configuration info */
-#define MC_CMD_FC_OP_UHLINK_PHY 0x1
-/* enum: Get MAC configuration info */
-#define MC_CMD_FC_OP_UHLINK_MAC 0x2
-/* enum: Get Rx eye table */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
-/* enum: Get Rx eye plot */
-#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
-/* enum: Get Rx eye plot */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
-/* enum: Retune Rx settings */
-#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
-/* enum: Set loopback mode on fpga port */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
-/* enum: Get loopback mode config state on fpga port */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
-#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
-#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
-#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
-#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
-#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
-#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
-/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
- * irrelevant. Port number is derived from pci_fn; passed in FC header.
- */
-#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
-/* enum: Override default port number. Port number determined by fields
- * PORT_TYPE and PORT_IDX.
- */
-#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
-
-/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
-#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
-#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
-#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
-#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
-#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
-#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
-#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-
-/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
-#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
-#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
-
-/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
-#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
-
-/* MC_CMD_FC_IN_SET_LINK msgrequest */
-#define MC_CMD_FC_IN_SET_LINK_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
-#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
-#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
-#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
-#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
-#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
-#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
-#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
-#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
-#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
-
-/* MC_CMD_FC_IN_LICENSE msgrequest */
-#define MC_CMD_FC_IN_LICENSE_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
-#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
-#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
-
-/* MC_CMD_FC_IN_STARTUP msgrequest */
-#define MC_CMD_FC_IN_STARTUP_LEN 40
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
-#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
-/* Length of identifier */
-#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
-/* Identifier for AOE FPGA */
-#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
-#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
-#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
-
-/* MC_CMD_FC_IN_DMA msgrequest */
-#define MC_CMD_FC_IN_DMA_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DMA_OP_OFST 4
-#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
-#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DMA_STOP msgrequest */
-#define MC_CMD_FC_IN_DMA_STOP_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_DMA_READ msgrequest */
-#define MC_CMD_FC_IN_DMA_READ_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
-#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
-#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
-
-/* MC_CMD_FC_IN_TIMED_READ msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
-#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
-
-/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* Host supplied handle (unique) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
-/* Address into which to transfer data in host */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
-/* AOE address from which to transfer data */
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
-/* Length of AOE transfer (total) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
-/* Length of host transfer (total) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
-/* Offset back from aoe_address to apply operation to */
-#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
-/* Data to apply at offset */
-#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
-#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
-#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
-#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
-#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
-#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
-#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
-#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
-#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
-/* Period at which reads are performed (100ms units) */
-#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
-
-/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
-/* FC supplied handle */
-#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
-
-/* MC_CMD_FC_IN_LOG msgrequest */
-#define MC_CMD_FC_IN_LOG_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_LOG_OP_OFST 4
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
-#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
-
-/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
-/* Partition offset into flash */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
-/* Partition length */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
-/* Partition erase size */
-#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
-
-/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
-#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
-/* Enable/disable printing to JTAG UART */
-#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
-
-/* MC_CMD_FC_IN_CLOCK msgrequest */
-#define MC_CMD_FC_IN_CLOCK_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
-#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
-/* Perform a clock operation */
-#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
-#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
-#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
-
-/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
-#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
-/* Retrieve the clock value of the specified clock */
-/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
-
-/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
-/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
-/* Set the clock value of the specified clock */
-#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
-
-/* MC_CMD_FC_IN_DDR msgrequest */
-#define MC_CMD_FC_IN_DDR_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DDR_OP_OFST 4
-#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
-#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
-#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_OFST 8
-#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
-#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
-#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
-
-/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
-#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-/* Flags */
-#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
-#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
-/* 128-byte page of serial presence detect data read from module's EEPROM */
-#define MC_CMD_FC_IN_DDR_SPD_OFST 16
-#define MC_CMD_FC_IN_DDR_SPD_LEN 1
-#define MC_CMD_FC_IN_DDR_SPD_NUM 128
-/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
-#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
-
-/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
-#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-/* Size of DDR */
-#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
-
-/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
-#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
-/* Affected bank */
-/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
-
-/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* FC timestamp operation code */
-#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
-/* enum: Read transmit timestamp(s) */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
-/* enum: Read snapshot timestamps */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
-/* enum: Clear all transmit timestamps */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
-
-/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
-/* Control filtering of the returned timestamp and sequence number specified
- * here
- */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
-/* enum: Return most recent timestamp. No filtering */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
-/* enum: Match timestamp against the PTP clock ID, port number and sequence
- * number specified
- */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
-/* Clock identity of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
-/* Port number of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
-/* Sequence number of PTP packet for which timestamp required */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
-
-/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
-
-/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
-
-/* MC_CMD_FC_IN_SPI msgrequest */
-#define MC_CMD_FC_IN_SPI_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Basic commands for SPI Flash. */
-#define MC_CMD_FC_IN_SPI_OP_OFST 4
-/* enum: SPI Flash read */
-#define MC_CMD_FC_IN_SPI_READ 0x0
-/* enum: SPI Flash write */
-#define MC_CMD_FC_IN_SPI_WRITE 0x1
-/* enum: SPI Flash erase */
-#define MC_CMD_FC_IN_SPI_ERASE 0x2
-
-/* MC_CMD_FC_IN_SPI_READ msgrequest */
-#define MC_CMD_FC_IN_SPI_READ_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
-
-/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
-#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
-#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
-#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
-#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
-
-/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
-#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
-#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
-#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
-
-/* MC_CMD_FC_IN_DIAG msgrequest */
-#define MC_CMD_FC_IN_DIAG_LEN 8
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-/* Operation code indicating component type */
-#define MC_CMD_FC_IN_DIAG_OP_OFST 4
-/* enum: Power noise generator. */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
-/* enum: DDR soak test component. */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
-/* enum: Diagnostics datapath control component. */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
-/* enum: Read the configuration (the 32-bit values in each of the clock enable
- * count and toggle count registers)
- */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
-/* enum: Write a new configuration to the clock enable count and toggle count
- * registers
- */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
-
-/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
-/* The 32-bit value to be written to the toggle count register */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
-/* The 32-bit value to be written to the clock enable count register */
-#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
-/* enum: Starts DDR soak test on selected banks */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
-/* enum: Read status of DDR soak test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
-/* enum: Stop test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
-/* enum: Set or clear bit that triggers fake errors. These cause subsequent
- * tests to fail until the bit is cleared.
- */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
-/* Mask of DDR banks to be tested */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
-/* Pattern to use in the soak test */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
-/* Either multiple automatic tests until a STOP command is issued, or one
- * single test
- */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
-/* DDR bank to read status from */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
-#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
-#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
-#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
-#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
-#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
-/* Mask of DDR banks to be tested */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
-
-/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
-/* Mask of DDR banks to set/clear error flag on */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
-/* Sub-opcode describing the operation to be carried out */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
-/* enum: Set a known datapath configuration */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
-/* enum: Apply raw config to datapath control registers */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
-/* Datapath configuration identifier */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
-
-/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
-/* MC_CMD_FC_IN_CMD_OFST 0 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
-/* Value to write into control register 1 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
-/* Value to write into control register 2 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
-/* Value to write into control register 3 */
-#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
-
-/* MC_CMD_FC_OUT msgresponse */
-#define MC_CMD_FC_OUT_LEN 0
-
-/* MC_CMD_FC_OUT_NULL msgresponse */
-#define MC_CMD_FC_OUT_NULL_LEN 0
-
-/* MC_CMD_FC_OUT_READ32 msgresponse */
-#define MC_CMD_FC_OUT_READ32_LENMIN 4
-#define MC_CMD_FC_OUT_READ32_LENMAX 252
-#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_WRITE32 msgresponse */
-#define MC_CMD_FC_OUT_WRITE32_LEN 0
-
-/* MC_CMD_FC_OUT_TRC_READ msgresponse */
-#define MC_CMD_FC_OUT_TRC_READ_LEN 16
-#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
-#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
-
-/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
-#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
-#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
-#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
-#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
-
-/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
-#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
-#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
-
-/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
-#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
-#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
-#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
-
-/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
-#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
-#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
-
-/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
-#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
-#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
-#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
-#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
-#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
-#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
-#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
-#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
-#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
-#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
-#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
-#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
-#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
-/* enum: (Last entry) */
-#define MC_CMD_FC_MAC_RX_NSTATS 0x19
-
-/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
-#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
-#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
-#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
-#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
-#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
-#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
-#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
-#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
-#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
-#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
-#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
-#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
-#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
-/* enum: (Last entry) */
-#define MC_CMD_FC_MAC_TX_NSTATS 0x16
-
-/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
-#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
-/* MAC Statistics */
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
-#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
-
-/* MC_CMD_FC_OUT_MAC msgresponse */
-#define MC_CMD_FC_OUT_MAC_LEN 0
-
-/* MC_CMD_FC_OUT_SFP msgresponse */
-#define MC_CMD_FC_OUT_SFP_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
-/* enum: Test not yet initiated */
-#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
-/* enum: Test is in progress */
-#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
-/* enum: Timed completed */
-#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
-/* enum: Test did not complete in specified time */
-#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
-/* Test result from FPGA */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
-#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
-
-/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
-#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
-
-/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
-#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
-/* Assertion status flag. */
-#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
-#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
-/* enum: No crash data available */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
-/* enum: New crash data available */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
-/* enum: Crash data has been sent */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
-#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
-#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
-/* enum: No crash has been recorded. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
-/* enum: Crash due to exception. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
-/* enum: Crash due to assertion. */
-#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
-/* Failing PC value */
-#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
-/* Saved GP regs */
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
-#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
-/* Exception Type */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
-/* Instruction at which exception occurred */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
-/* BAD Address that triggered address-based exception */
-#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
-
-/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
-#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
-#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
-#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
-#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
-#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
-#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
-#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
-#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
-#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
-#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
-#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
-/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
-/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
-/* Build timestamp (seconds since epoch) */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
-/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
-/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
-#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
-
-/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
-#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
-/* Qsys system ID */
-#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
-#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
-#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
-#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
-
-/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
-/* Number of maps */
-#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
-
-/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
-/* Index of the map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
-/* Options for the map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
-/* Address of start of map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
-/* Length of address map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
-/* Component information field */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
-/* License expiry data for map */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
-/* Name of the component */
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
-#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
-
-/* MC_CMD_FC_OUT_READ_MAP msgresponse */
-#define MC_CMD_FC_OUT_READ_MAP_LEN 0
-
-/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
-#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
-/* Number of internal ports */
-#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
-/* Number of external ports */
-#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
-
-/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
-#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
-#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
-
-/* MC_CMD_FC_OUT_IO_REL msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_LEN 0
-
-/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
-#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
-
-/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
-#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
-#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
-#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
-/* Transceiver Transmit settings */
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
-/* Transceiver Receive settings */
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
-/* Rx eye opening */
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
-#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
-/* PCS status word */
-#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
-/* Link status word */
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
-/* Current SFp parameters applied */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
-/* Link speed is 100, 1000, 10000 */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
-/* Length of copper cable - zero when not relevant */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
-/* True if a dual speed SFP+ module */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
-/* True if an SFP Module is present (other fields valid when true) */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
-/* The type of the SFP+ Module */
-#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
-/* PHY config flags */
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
-#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
-
-/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
-/* MAC configuration applied */
-#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
-/* MTU size */
-#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
-/* IF Mode status */
-#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
-/* MAC address configured */
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
-#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
-
-/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
-/* Rx Eye measurements */
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
-#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
-
-/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
-/* Has the eye plot dump completed and data returned is valid? */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
-/* Rx Eye binary plot */
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
-#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
-
-/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
-
-/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
-#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
-
-/* MC_CMD_FC_OUT_UHLINK msgresponse */
-#define MC_CMD_FC_OUT_UHLINK_LEN 0
-
-/* MC_CMD_FC_OUT_SET_LINK msgresponse */
-#define MC_CMD_FC_OUT_SET_LINK_LEN 0
-
-/* MC_CMD_FC_OUT_LICENSE msgresponse */
-#define MC_CMD_FC_OUT_LICENSE_LEN 12
-/* Count of valid keys */
-#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
-/* Count of invalid keys */
-#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
-/* Count of blacklisted keys */
-#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
-
-/* MC_CMD_FC_OUT_STARTUP msgresponse */
-#define MC_CMD_FC_OUT_STARTUP_LEN 4
-/* Capabilities of the FPGA/FC */
-#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
-#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
-#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
-
-/* MC_CMD_FC_OUT_DMA_READ msgresponse */
-#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
-#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
-#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
-/* The data read */
-#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
-#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
-#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
-#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
-
-/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
-#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
-/* Timer handle */
-#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
-
-/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
-/* Host supplied handle (unique) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
-/* Address into which to transfer data in host */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
-/* AOE address from which to transfer data */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
-/* Length of AOE transfer (total) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
-/* Length of host transfer (total) */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
-/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
-#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
-/* When active, start read time */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
-/* When active, end read time */
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
-#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
-
-/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
-#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
-
-/* MC_CMD_FC_OUT_LOG msgresponse */
-#define MC_CMD_FC_OUT_LOG_LEN 0
-
-/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
-#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
-
-/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
-#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
-#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
-#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
-
-/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
-#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
-
-/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
-
-/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
-#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
-
-/* MC_CMD_FC_OUT_SPI_READ msgresponse */
-#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
-#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
-#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
-#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
-
-/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
-#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
-
-/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
-#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
-/* The 32-bit value read from the toggle count register */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
-/* The 32-bit value read from the clock enable count register */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
-
-/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
-/* DDR soak test status word; bits [4:0] are relevant. */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
-/* DDR soak test error count */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
-
-/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
-#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
-
-
-/***********************************/
-/* MC_CMD_AOE
- * AOE operations on MC
- */
-#define MC_CMD_AOE 0xa
-
-/* MC_CMD_AOE_IN msgrequest */
-#define MC_CMD_AOE_IN_LEN 4
-#define MC_CMD_AOE_IN_OP_HDR_OFST 0
-#define MC_CMD_AOE_IN_OP_LBN 0
-#define MC_CMD_AOE_IN_OP_WIDTH 8
-/* enum: FPGA and CPLD information */
-#define MC_CMD_AOE_OP_INFO 0x1
-/* enum: Currents and voltages read from MCP3424s; DEBUG */
-#define MC_CMD_AOE_OP_CURRENTS 0x2
-/* enum: Temperatures at locations around the PCB; DEBUG */
-#define MC_CMD_AOE_OP_TEMPERATURES 0x3
-/* enum: Set CPLD to idle */
-#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
-/* enum: Read from CPLD register */
-#define MC_CMD_AOE_OP_CPLD_READ 0x5
-/* enum: Write to CPLD register */
-#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
-/* enum: Execute CPLD instruction */
-#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
-/* enum: Reprogram the CPLD on the AOE device */
-#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
-/* enum: AOE power control */
-#define MC_CMD_AOE_OP_POWER 0x9
-/* enum: AOE image loading */
-#define MC_CMD_AOE_OP_LOAD 0xa
-/* enum: Fan monitoring */
-#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
-/* enum: Fan failures since last reset */
-#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
-/* enum: Get generic AOE MAC statistics */
-#define MC_CMD_AOE_OP_MAC_STATS 0xd
-/* enum: Retrieve PHY specific information */
-#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
-/* enum: Write a number of JTAG primitive commands, return will give data */
-#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
-/* enum: Control access to the FPGA via the Siena JTAG Chain */
-#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
-/* enum: Set the MTU offset between Siena and AOE MACs */
-#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
-/* enum: How link state is handled */
-#define MC_CMD_AOE_OP_LINK_STATE 0x12
-/* enum: How Siena MAC statistics are reported (deprecated - use
- * MC_CMD_AOE_OP_ASIC_STATS)
- */
-#define MC_CMD_AOE_OP_SIENA_STATS 0x13
-/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
- * command MC_CMD_AOE_OP_SIENA_STATS
- */
-#define MC_CMD_AOE_OP_ASIC_STATS 0x13
-/* enum: DDR memory information */
-#define MC_CMD_AOE_OP_DDR 0x14
-/* enum: FC control */
-#define MC_CMD_AOE_OP_FC 0x15
-/* enum: DDR ECC status reads */
-#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
-/* enum: Commands for MC-SPI Master emulation */
-#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
-/* enum: Commands for FC boot control */
-#define MC_CMD_AOE_OP_FC_BOOT 0x18
-
-/* MC_CMD_AOE_OUT msgresponse */
-#define MC_CMD_AOE_OUT_LEN 0
-
-/* MC_CMD_AOE_IN_INFO msgrequest */
-#define MC_CMD_AOE_IN_INFO_LEN 4
-#define MC_CMD_AOE_IN_CMD_OFST 0
-
-/* MC_CMD_AOE_IN_CURRENTS msgrequest */
-#define MC_CMD_AOE_IN_CURRENTS_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
-#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
-#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
-#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
-#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
-
-/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
-#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
-#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
-#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
-
-/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
-#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
-
-/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
-/* enum: Reprogram CPLD, poll for completion */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
-/* enum: Reprogram CPLD, send event on completion */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
-/* enum: Get status of reprogramming operation */
-#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
-
-/* MC_CMD_AOE_IN_POWER msgrequest */
-#define MC_CMD_AOE_IN_POWER_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Turn on or off AOE power */
-#define MC_CMD_AOE_IN_POWER_OP_OFST 4
-/* enum: Turn off FPGA power */
-#define MC_CMD_AOE_IN_POWER_OFF 0x0
-/* enum: Turn on FPGA power */
-#define MC_CMD_AOE_IN_POWER_ON 0x1
-/* enum: Clear peak power measurement */
-#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
-/* enum: Show current power in sensors output */
-#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
-/* enum: Show peak power in sensors output */
-#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
-/* enum: Show current DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
-/* enum: Show peak DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
-/* enum: Clear peak DDR current */
-#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
-
-/* MC_CMD_AOE_IN_LOAD msgrequest */
-#define MC_CMD_AOE_IN_LOAD_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
- */
-#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
-
-/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
-#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* If non zero report measured fan RPM rather than nominal */
-#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
-
-/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
-#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
-#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port */
-#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
-/* Host memory address for statistics */
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
-#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
-#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
-#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
-#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
-#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
-/* Length of DMA data (optional) */
-#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
-
-/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port */
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
-#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
-
-/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
-#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
-#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
-#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
-#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
-
-/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Enable or disable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
-/* enum: Enable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
-/* enum: Disable access */
-#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
-
-/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
-/* enum: Apply to all external ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
-/* enum: Apply to all internal ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
-/* The MTU offset to be applied to the external ports */
-#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
-
-/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
-#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
-#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
-#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
-/* enum: AOE and associated external port */
-#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
-/* enum: AOE and OR of all external ports */
-#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
-/* enum: Individual ports */
-#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
-/* enum: Configure link state mode on given AOE port */
-#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
-#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
-#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
-/* enum: No-op */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
-/* enum: logical OR of all SFP ports link status */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
-/* enum: logical AND of all SFP ports link status */
-#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
-#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
-#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
-
-/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
-#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* How MAC statistics are reported */
-#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
-/* enum: Statistics from Siena (default) */
-#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
-/* enum: Statistics from AOE external ports */
-#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
-
-/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
-#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* How MAC statistics are reported */
-#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
-/* enum: Statistics from the ASIC (default) */
-#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
-/* enum: Statistics from AOE external ports */
-#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
-
-/* MC_CMD_AOE_IN_DDR msgrequest */
-#define MC_CMD_AOE_IN_DDR_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
-/* Enum values, see field(s): */
-/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
-/* Page index of SPD data */
-#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
-
-/* MC_CMD_AOE_IN_FC msgrequest */
-#define MC_CMD_AOE_IN_FC_LEN 4
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-
-/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
-#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
-/* Enum values, see field(s): */
-/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* Basic commands for MC SPI Master emulation. */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
-/* enum: MC SPI read */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
-/* enum: MC SPI write */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
-
-/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
-#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
-
-/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
-#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
-/* MC_CMD_AOE_IN_CMD_OFST 0 */
-/* FC boot control flags */
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
-#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
-
-/* MC_CMD_AOE_OUT_INFO msgresponse */
-#define MC_CMD_AOE_OUT_INFO_LEN 44
-/* JTAG IDCODE of CPLD */
-#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
-/* Version of CPLD */
-#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
-/* JTAG IDCODE of FPGA */
-#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
-/* JTAG USERCODE of FPGA */
-#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
-/* FPGA type - read from CPLD straps */
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
-#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
-/* FPGA state (debug) */
-#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
-/* FPGA image - partition from which loaded */
-#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
-/* FC state */
-#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
-/* enum: Set if watchdog working */
-#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
-/* enum: Set if MC-FC communications working */
-#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
-/* Random pieces of information */
-#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
-/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
-#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
-/* enum: CPLD apparently good */
-#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
-/* enum: FPGA working normally */
-#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
-/* enum: FPGA is powered */
-#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
-/* enum: Board has incompatible SODIMMs fitted */
-#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
-/* enum: Board has ByteBlaster connected */
-#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
-/* enum: FPGA Boot flash has an invalid header. */
-#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
-/* enum: FPGA Application flash is accessible. */
-#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
-/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
-#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
-#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
-#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
-/* Result of FC booting - not valid while a ByteBlaster is connected. */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
-/* enum: No error */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
-/* enum: Bad address set in CPLD */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
-/* enum: Bad header */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
-/* enum: Bad text section details */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
-/* enum: Bad checksum */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
-/* enum: Bad BSP */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
-/* enum: Flash mode is invalid */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
-/* enum: FC application loaded and execution attempted */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
-/* enum: FC application Started */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
-/* enum: No bootrom in FPGA */
-#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
-
-/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
-#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
-/* Set of currents and voltages (mA or mV as appropriate) */
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
-#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
-#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
-#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
-
-/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
-#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
-/* Set of temperatures */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
-#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
-/* enum: The first set of enum values are for Modena code. */
-#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
-#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
-/* enum: The second set of enum values are for Sorrento code. */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
-#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
-
-/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
-#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
-/* The value read from the CPLD */
-#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
-
-/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
-#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
-/* Failure counts for each fan */
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
-#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
-
-/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
-#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
-/* Results of status command (only) */
-#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
-
-/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
-#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
-
-/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
-#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
-
-/* MC_CMD_AOE_OUT_LOAD msgresponse */
-#define MC_CMD_AOE_OUT_LOAD_LEN 0
-
-/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
-#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
-
-/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
- * for details
- */
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
-#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
-
-/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
-/* in bytes */
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
-#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
-
-/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
-#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
-/* Used to align the in and out data blocks so the MC can re-use the cmd */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
-/* out bytes */
-#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
-#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
-
-/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
-#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
-
-/* MC_CMD_AOE_OUT_DDR msgresponse */
-#define MC_CMD_AOE_OUT_DDR_LENMIN 17
-#define MC_CMD_AOE_OUT_DDR_LENMAX 252
-#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
-/* Information on the module. */
-#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
-#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
-#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
-#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
-#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
-#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
-/* Memory size, in MB. */
-#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
-/* The memory type, as reported from SPD information */
-#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
-/* Nominal voltage of the module (as applied) */
-#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
-/* SPD data read from the module */
-#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
-#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
-#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
-#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
-
-/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
-#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
-
-/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
-#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
-
-/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
-#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
-
-/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
-#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
-
-/* MC_CMD_AOE_OUT_FC msgresponse */
-#define MC_CMD_AOE_OUT_FC_LEN 0
-
-/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
-/* Flags describing status info on the module. */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
-/* DDR ECC status on the module. */
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
-#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
-
-/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
-#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
-
-/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
-#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
-
-
-/***********************************/
/* MC_CMD_PTP
* Perform PTP operation
*/
@@ -3616,41 +1344,54 @@
#define MC_CMD_PTP_OP_ENABLE 0x1
/* enum: Disable PTP packet timestamping operation. */
#define MC_CMD_PTP_OP_DISABLE 0x2
-/* enum: Send a PTP packet. */
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
#define MC_CMD_PTP_OP_TRANSMIT 0x3
/* enum: Read the current NIC time. */
#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
-/* enum: Get the current PTP status. */
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
#define MC_CMD_PTP_OP_STATUS 0x5
/* enum: Adjust the PTP NIC's time. */
#define MC_CMD_PTP_OP_ADJUST 0x6
/* enum: Synchronize host and NIC time. */
#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
-/* enum: Basic manufacturing tests. */
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
-/* enum: Packet based manufacturing tests. */
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
/* enum: Reset some of the PTP related statistics */
#define MC_CMD_PTP_OP_RESET_STATS 0xa
/* enum: Debug operations to MC. */
#define MC_CMD_PTP_OP_DEBUG 0xb
-/* enum: Read an FPGA register */
+/* enum: Read an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAREAD 0xc
-/* enum: Write an FPGA register */
+/* enum: Write an FPGA register. Siena PTP adapters only. */
#define MC_CMD_PTP_OP_FPGAWRITE 0xd
/* enum: Apply an offset to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
-/* enum: Change Apply an offset to the NIC clock */
+/* enum: Change the frequency correction applied to the NIC clock */
#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
-/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
-/* enum: Set the MC packet filter UUID for received PTP packets */
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
-/* enum: Set the MC packet filter Domain for received PTP packets */
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
-/* enum: Set the clock source */
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
-/* enum: Reset value of Timer Reg. */
+/* enum: Reset value of Timer Reg. Not implemented. */
#define MC_CMD_PTP_OP_RST_CLK 0x14
/* enum: Enable the forwarding of PPS events to the host */
#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
@@ -3671,7 +1412,7 @@
/* enum: Unsubscribe to stop receiving time events */
#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
- * input on the same NIC.
+ * input on the same NIC. Siena PTP adapters only.
*/
#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
/* enum: Set the PTP sync status. Status is used by firmware to report to event
@@ -3684,11 +1425,15 @@
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_CMD_LEN 4
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
-/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
-/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
/* enum: PTP, version 1 */
#define MC_CMD_PTP_MODE_V1 0x0
/* enum: PTP, version 1, with VLAN headers - deprecated */
@@ -3705,16 +1450,21 @@
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
@@ -3724,17 +1474,30 @@
/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_STATUS msgrequest */
#define MC_CMD_PTP_IN_STATUS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_ADJUST msgrequest */
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
@@ -3742,21 +1505,67 @@
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
/* enum: Number of fractional bits in frequency adjustment */
#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/* MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
/* Host address in which to write "synchronization started" indication (64
* bits)
*/
@@ -3768,42 +1577,58 @@
/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
-/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
-/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
@@ -3812,34 +1637,67 @@
/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
/* Time adjustment major value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
/* Time adjustment minor value */
#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
-/* enum: Number of fractional bits in frequency adjustment */
-/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* Enum values, see field(s): */
+/* MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Number of VLAN tags, 0 if not VLAN */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
/* Set of VLAN tags to filter against */
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
@@ -3848,9 +1706,12 @@
/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable UUID filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
/* UUID to filter against */
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
@@ -3860,62 +1721,82 @@
/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable Domain filtering, 0 to disable */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
/* Domain number to filter against */
#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Set the clock source. */
#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
/* enum: Internal. */
#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
-/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
#define MC_CMD_PTP_IN_RST_CLK_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
-/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* Enable or disable */
#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
/* enum: Enable */
#define MC_CMD_PTP_ENABLE_PPS 0x0
/* enum: Disable */
#define MC_CMD_PTP_DISABLE_PPS 0x1
-/* Queue id to send events back */
+/* Not used. Events are always sent to function relative queue 0. */
#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Original field containing queue ID. Now extended to include flags. */
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
@@ -3924,29 +1805,39 @@
/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* Unsubscribe options */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
/* enum: Unsubscribe a single queue */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
/* enum: Unsubscribe all queues */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
/* Event queue ID */
#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* 1 to enable PPS test mode, 0 to disable and return result. */
#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_CMD_LEN 4 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
/* NIC - Host System Clock Synchronization status */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
/* enum: Host System clock and NIC clock are not in sync */
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
/* enum: Host System clock and NIC clock are synchronized */
@@ -3955,8 +1846,11 @@
* no longer in sync.
*/
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
@@ -3965,12 +1859,16 @@
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
@@ -3982,47 +1880,85 @@
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
/* Minimum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
/* Maximum period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
/* Last period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
/* Mean period of PPS pulse in nanoseconds */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
/* Minimum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
/* Maximum offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
/* Last offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
/* Mean offset of PPS pulse in nanoseconds (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
@@ -4035,23 +1971,31 @@
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
/* Timestamp major value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
/* Timestamp minor value */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
/* enum: Successful test */
#define MC_CMD_PTP_MANF_SUCCESS 0x0
/* enum: FPGA load failed */
@@ -4084,15 +2028,19 @@
#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
@@ -4108,9 +2056,11 @@
/* Time format required/used by for this NIC. Applies to all PTP MCDI
* operations that pass times between the host and firmware. If this operation
* is not supported (older firmware) a format of seconds and nanoseconds should
- * be assumed.
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
*/
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
@@ -4126,12 +2076,16 @@
* be assumed.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
/* enum: Times are in seconds and nanoseconds */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
/* enum: Major register has units of seconds, minor 2^-27s per tick */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
/* Minimum acceptable value for a corrected synchronization timeset. When
* comparing host and NIC clock times, the MC returns a set of samples that
* contain the host start and end time, the MC time when the host start was
@@ -4140,46 +2094,66 @@
* end and start times minus the time that the MC waited for host end.
*/
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
/* Various PTP capabilities */
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
@@ -4194,14 +2168,17 @@
#define MC_CMD_CSR_READ32 0xc
#undef MC_CMD_0xc_PRIVILEGE_CTG
-#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_STEP_LEN 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
/* MC_CMD_CSR_READ32_OUT msgresponse */
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
@@ -4221,7 +2198,7 @@
#define MC_CMD_CSR_WRITE32 0xd
#undef MC_CMD_0xd_PRIVILEGE_CTG
-#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CSR_WRITE32_IN msgrequest */
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
@@ -4229,7 +2206,9 @@
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
@@ -4238,6 +2217,7 @@
/* MC_CMD_CSR_WRITE32_OUT msgresponse */
#define MC_CMD_CSR_WRITE32_OUT_LEN 4
#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -4248,7 +2228,7 @@
#define MC_CMD_HP 0x54
#undef MC_CMD_0x54_PRIVILEGE_CTG
-#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_HP_IN msgrequest */
#define MC_CMD_HP_IN_LEN 16
@@ -4259,6 +2239,7 @@
* sensors.
*/
#define MC_CMD_HP_IN_SUBCMD_OFST 0
+#define MC_CMD_HP_IN_SUBCMD_LEN 4
/* enum: OCSD (Option Card Sensor Data) sub-command. */
#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
/* enum: Last known valid HP sub-command. */
@@ -4273,10 +2254,12 @@
* NULL.)
*/
#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
/* MC_CMD_HP_OUT msgresponse */
#define MC_CMD_HP_OUT_LEN 4
#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
/* enum: OCSD stopped for this card. */
#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
/* enum: OCSD was successfully started with the address provided. */
@@ -4323,29 +2306,35 @@
* external devices.
*/
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define MC_CMD_MDIO_READ_IN_BUS_LEN 4
/* enum: Internal. */
#define MC_CMD_MDIO_BUS_INTERNAL 0x0
/* enum: External. */
#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
#define MC_CMD_MDIO_CLAUSE22 0x20
/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
/* Status the MDIO commands return the raw status bits from the MDIO block. A
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
/* enum: Good. */
#define MC_CMD_MDIO_STATUS_GOOD 0x8
@@ -4365,22 +2354,27 @@
* external devices.
*/
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
* want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
*/
/* MC_CMD_MDIO_CLAUSE22 0x20 */
/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
@@ -4388,6 +2382,7 @@
* "good" transaction should have the DONE bit set and all other bits clear.
*/
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -4399,7 +2394,7 @@
#define MC_CMD_DBI_WRITE 0x12
#undef MC_CMD_0x12_PRIVILEGE_CTG
-#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_WRITE_IN msgrequest */
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
@@ -4419,9 +2414,11 @@
/* MC_CMD_DBIWROP_TYPEDEF structuredef */
#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -4431,6 +2428,7 @@
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -4446,13 +2444,16 @@
#define MC_CMD_PORT_READ32_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
/***********************************/
@@ -4466,13 +2467,16 @@
#define MC_CMD_PORT_WRITE32_IN_LEN 8
/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
/***********************************/
@@ -4486,6 +2490,7 @@
#define MC_CMD_PORT_READ128_IN_LEN 4
/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
@@ -4494,6 +2499,7 @@
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
/***********************************/
@@ -4507,6 +2513,7 @@
#define MC_CMD_PORT_WRITE128_IN_LEN 20
/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
@@ -4515,6 +2522,7 @@
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
/* MC_CMD_CAPABILITIES structuredef */
#define MC_CMD_CAPABILITIES_LEN 4
@@ -4560,24 +2568,54 @@
#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
-/* See MC_CMD_CAPABILITIES */
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
-/* This field contains a 16-bit value for each of the types of NVRAM area. The
- * values are defined in the firmware/mc/platform/.c file for a specific board
- * type, but otherwise have no meaning to the MC; they are used by the driver
- * to manage selection of appropriate firmware updates.
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
*/
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
@@ -4592,7 +2630,7 @@
#define MC_CMD_DBI_READX 0x19
#undef MC_CMD_0x19_PRIVILEGE_CTG
-#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DBI_READX_IN msgrequest */
#define MC_CMD_DBI_READX_IN_LENMIN 8
@@ -4619,9 +2657,11 @@
/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
@@ -4639,7 +2679,7 @@
#define MC_CMD_SET_RAND_SEED 0x1a
#undef MC_CMD_0x1a_PRIVILEGE_CTG
-#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -4689,14 +2729,25 @@
#define MC_CMD_DRV_ATTACH_IN_LEN 12
/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
#define MC_CMD_DRV_ATTACH_LBN 0
#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
#define MC_CMD_DRV_PREBOOT_LBN 1
#define MC_CMD_DRV_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
/* enum: Prefer to use full featured firmware */
#define MC_CMD_FW_FULL_FEATURED 0x0
/* enum: Prefer to use firmware with fewer features but lower latency */
@@ -4713,20 +2764,35 @@
* support
*/
#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
/* enum: Only this option is allowed for non-admin functions */
-#define MC_CMD_FW_DONT_CARE 0xffffffff
+#define MC_CMD_FW_DONT_CARE 0xffffffff
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
/* enum: Labels the lowest-numbered function visible to the OS */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
/* enum: The function can control the link state of the physical port it is
@@ -4739,6 +2805,11 @@
* refers to the Sorrento external FPGA port.
*/
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
/***********************************/
@@ -4751,6 +2822,7 @@
#define MC_CMD_SHMUART_IN_LEN 4
/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define MC_CMD_SHMUART_IN_FLAG_LEN 4
/* MC_CMD_SHMUART_OUT msgresponse */
#define MC_CMD_SHMUART_OUT_LEN 0
@@ -4789,6 +2861,7 @@
* (TBD).
*/
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -4806,8 +2879,10 @@
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
@@ -4838,31 +2913,54 @@
/* MC_CMD_RXD_MONITOR_IN msgrequest */
#define MC_CMD_RXD_MONITOR_IN_LEN 12
#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4
#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
/* MC_CMD_RXD_MONITOR_OUT msgresponse */
#define MC_CMD_RXD_MONITOR_OUT_LEN 80
#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
/***********************************/
@@ -4872,13 +2970,14 @@
#define MC_CMD_PUTS 0x23
#undef MC_CMD_0x23_PRIVILEGE_CTG
-#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_PUTS_IN msgrequest */
#define MC_CMD_PUTS_IN_LENMIN 13
#define MC_CMD_PUTS_IN_LENMAX 252
#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_DEST_LEN 4
#define MC_CMD_PUTS_IN_UART_LBN 0
#define MC_CMD_PUTS_IN_UART_WIDTH 1
#define MC_CMD_PUTS_IN_PORT_LBN 1
@@ -4911,6 +3010,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
@@ -4927,8 +3027,10 @@
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
#define MC_CMD_PHY_CAP_10FDX_LBN 2
@@ -4953,17 +3055,39 @@
#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
#define MC_CMD_PHY_CAP_DDM_LBN 12
#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
/* enum: Xaui. */
#define MC_CMD_MEDIA_XAUI 0x1
/* enum: CX4. */
@@ -4979,6 +3103,7 @@
/* enum: QSFP+. */
#define MC_CMD_MEDIA_QSFP_PLUS 0x7
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
/* enum: Native clause 22 */
#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
@@ -5004,12 +3129,13 @@
#define MC_CMD_START_BIST 0x25
#undef MC_CMD_0x25_PRIVILEGE_CTG
-#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_LEN 4
/* enum: Run the PHY's short cable BIST. */
#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
/* enum: Run the PHY's long cable BIST. */
@@ -5043,7 +3169,7 @@
#define MC_CMD_POLL_BIST 0x26
#undef MC_CMD_0x26_PRIVILEGE_CTG
-#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_POLL_BIST_IN msgrequest */
#define MC_CMD_POLL_BIST_IN_LEN 0
@@ -5052,6 +3178,7 @@
#define MC_CMD_POLL_BIST_OUT_LEN 8
/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
/* enum: Running. */
#define MC_CMD_POLL_BIST_RUNNING 0x1
/* enum: Passed. */
@@ -5061,19 +3188,26 @@
/* enum: Timed-out. */
#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
/* enum: Ok. */
#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
/* enum: Open. */
@@ -5086,14 +3220,17 @@
#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
@@ -5101,9 +3238,11 @@
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
/* enum: Complete. */
#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
/* enum: Bus switch off I2C write. */
@@ -5127,9 +3266,11 @@
#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
/* enum: Test has completed. */
#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
/* enum: RAM test - walk ones. */
@@ -5146,8 +3287,10 @@
#define MC_CMD_POLL_BIST_MEM_ECC 0x6
/* Failure address, only valid if result is POLL_BIST_FAILED */
#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
/* Bus or address space to which the failure address corresponds */
#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
/* enum: MC MIPS bus. */
#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
/* enum: CSR IREG bus. */
@@ -5168,14 +3311,19 @@
#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
/* Pattern written to RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
/* Actual value read from RAM / register */
#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
/* ECC error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
/* ECC parity error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
/* ECC fatal error mask */
#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
/***********************************/
@@ -5222,83 +3370,83 @@
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
/* enum: None. */
-#define MC_CMD_LOOPBACK_NONE 0x0
+#define MC_CMD_LOOPBACK_NONE 0x0
/* enum: Data. */
-#define MC_CMD_LOOPBACK_DATA 0x1
+#define MC_CMD_LOOPBACK_DATA 0x1
/* enum: GMAC. */
-#define MC_CMD_LOOPBACK_GMAC 0x2
+#define MC_CMD_LOOPBACK_GMAC 0x2
/* enum: XGMII. */
#define MC_CMD_LOOPBACK_XGMII 0x3
/* enum: XGXS. */
-#define MC_CMD_LOOPBACK_XGXS 0x4
+#define MC_CMD_LOOPBACK_XGXS 0x4
/* enum: XAUI. */
-#define MC_CMD_LOOPBACK_XAUI 0x5
+#define MC_CMD_LOOPBACK_XAUI 0x5
/* enum: GMII. */
-#define MC_CMD_LOOPBACK_GMII 0x6
+#define MC_CMD_LOOPBACK_GMII 0x6
/* enum: SGMII. */
-#define MC_CMD_LOOPBACK_SGMII 0x7
+#define MC_CMD_LOOPBACK_SGMII 0x7
/* enum: XGBR. */
-#define MC_CMD_LOOPBACK_XGBR 0x8
+#define MC_CMD_LOOPBACK_XGBR 0x8
/* enum: XFI. */
-#define MC_CMD_LOOPBACK_XFI 0x9
+#define MC_CMD_LOOPBACK_XFI 0x9
/* enum: XAUI Far. */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
/* enum: GMII Far. */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
/* enum: SGMII Far. */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
/* enum: XFI Far. */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
/* enum: GPhy. */
-#define MC_CMD_LOOPBACK_GPHY 0xe
+#define MC_CMD_LOOPBACK_GPHY 0xe
/* enum: PhyXS. */
-#define MC_CMD_LOOPBACK_PHYXS 0xf
+#define MC_CMD_LOOPBACK_PHYXS 0xf
/* enum: PCS. */
-#define MC_CMD_LOOPBACK_PCS 0x10
+#define MC_CMD_LOOPBACK_PCS 0x10
/* enum: PMA-PMD. */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
/* enum: Cross-Port. */
-#define MC_CMD_LOOPBACK_XPORT 0x12
+#define MC_CMD_LOOPBACK_XPORT 0x12
/* enum: XGMII-Wireside. */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
/* enum: XAUI Wireside. */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
/* enum: XAUI Wireside Far. */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
/* enum: XAUI Wireside near. */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
/* enum: GMII Wireside. */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
/* enum: XFI Wireside. */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
/* enum: XFI Wireside Far. */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
/* enum: PhyXS Wireside. */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
/* enum: PMA lanes MAC-Serdes. */
-#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
/* enum: KR Serdes Parallel (Encoder). */
-#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
/* enum: KR Serdes Serial. */
-#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
/* enum: PMA lanes MAC-Serdes Wireside. */
-#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
/* enum: KR Serdes Parallel Wireside (Full PCS). */
-#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
-#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
-#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
/* enum: KR Serdes Serial Wireside. */
-#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
/* enum: Near side of AOE Siena side port */
-#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
/* enum: Medford Wireside datapath loopback */
-#define MC_CMD_LOOPBACK_DATA_WS 0x24
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
/* enum: Force link up without setting up any physical loopback (snapper use
* only)
*/
-#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -5328,6 +3476,174 @@
/* Enum values, see field(s): */
/* 100M */
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/* MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/* MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/* MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/* MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/* MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/* MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/* MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/* MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/* MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/* MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/* MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/* MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/* MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/* MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/* MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/* MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/* MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/* MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/* MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/* MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/* MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/* MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 25G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 50 loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported 100G loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/* Enum values, see field(s): */
+/* 100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define AN_TYPE_LEN 4
+#define AN_TYPE_TYPE_OFST 0
+#define AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define MC_CMD_AN_CLAUSE73 0x3
+#define AN_TYPE_TYPE_LBN 0
+#define AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define FEC_TYPE_LEN 4
+#define FEC_TYPE_TYPE_OFST 0
+#define FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define MC_CMD_FEC_RS 0x2
+#define FEC_TYPE_TYPE_LBN 0
+#define FEC_TYPE_TYPE_WIDTH 32
+
/***********************************/
/* MC_CMD_GET_LINK
@@ -5344,19 +3660,28 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
-/* near-side advertised capabilities */
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
-/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
/* Autonegotiated speed in mbit/s. The link may still be down even if this
* reads non-zero.
*/
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
@@ -5371,9 +3696,11 @@
#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
@@ -5383,6 +3710,97 @@
#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* FEC_TYPE/TYPE */
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+
/***********************************/
/* MC_CMD_SET_LINK
@@ -5396,10 +3814,14 @@
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
-/* ??? */
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define MC_CMD_SET_LINK_IN_CAP_LEN 4
/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_FLAGS_LEN 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
@@ -5408,12 +3830,14 @@
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
/* A loopback speed of "0" is supported, and means (choose any available
* speed).
*/
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
/* MC_CMD_SET_LINK_OUT msgresponse */
#define MC_CMD_SET_LINK_OUT_LEN 0
@@ -5432,9 +3856,10 @@
#define MC_CMD_SET_ID_LED_IN_LEN 4
/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
-#define MC_CMD_LED_OFF 0x0 /* enum */
-#define MC_CMD_LED_ON 0x1 /* enum */
-#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+#define MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
/* MC_CMD_SET_ID_LED_OUT msgresponse */
#define MC_CMD_SET_ID_LED_OUT_LEN 0
@@ -5455,17 +3880,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
#define MC_CMD_FCNTL_OFF 0x0
/* enum: Respond to flow control. */
@@ -5479,6 +3908,7 @@
/* enum: Issue flow control. */
#define MC_CMD_FCNTL_GENERATE 0x5
#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
@@ -5488,17 +3918,21 @@
* EtherII, VLAN, bug16011 padding).
*/
#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
/* enum: Respond to flow control. */
@@ -5512,6 +3946,7 @@
/* enum: Issue flow control. */
/* MC_CMD_FCNTL_GENERATE 0x5 */
#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
/* Select which parameters to configure. A parameter will only be modified if
@@ -5520,6 +3955,7 @@
* set).
*/
#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
@@ -5541,6 +3977,7 @@
* to 0.
*/
#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
/***********************************/
@@ -5574,53 +4011,53 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
/* enum: OUI. */
-#define MC_CMD_OUI 0x0
+#define MC_CMD_OUI 0x0
/* enum: PMA-PMD Link Up. */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
/* enum: PMA-PMD RX Fault. */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
/* enum: PMA-PMD TX Fault. */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
/* enum: PMA-PMD Signal */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
/* enum: PMA-PMD SNR A. */
-#define MC_CMD_PMA_PMD_SNR_A 0x5
+#define MC_CMD_PMA_PMD_SNR_A 0x5
/* enum: PMA-PMD SNR B. */
-#define MC_CMD_PMA_PMD_SNR_B 0x6
+#define MC_CMD_PMA_PMD_SNR_B 0x6
/* enum: PMA-PMD SNR C. */
-#define MC_CMD_PMA_PMD_SNR_C 0x7
+#define MC_CMD_PMA_PMD_SNR_C 0x7
/* enum: PMA-PMD SNR D. */
-#define MC_CMD_PMA_PMD_SNR_D 0x8
+#define MC_CMD_PMA_PMD_SNR_D 0x8
/* enum: PCS Link Up. */
-#define MC_CMD_PCS_LINK_UP 0x9
+#define MC_CMD_PCS_LINK_UP 0x9
/* enum: PCS RX Fault. */
-#define MC_CMD_PCS_RX_FAULT 0xa
+#define MC_CMD_PCS_RX_FAULT 0xa
/* enum: PCS TX Fault. */
-#define MC_CMD_PCS_TX_FAULT 0xb
+#define MC_CMD_PCS_TX_FAULT 0xb
/* enum: PCS BER. */
-#define MC_CMD_PCS_BER 0xc
+#define MC_CMD_PCS_BER 0xc
/* enum: PCS Block Errors. */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
/* enum: PhyXS Link Up. */
-#define MC_CMD_PHYXS_LINK_UP 0xe
+#define MC_CMD_PHYXS_LINK_UP 0xe
/* enum: PhyXS RX Fault. */
-#define MC_CMD_PHYXS_RX_FAULT 0xf
+#define MC_CMD_PHYXS_RX_FAULT 0xf
/* enum: PhyXS TX Fault. */
-#define MC_CMD_PHYXS_TX_FAULT 0x10
+#define MC_CMD_PHYXS_TX_FAULT 0x10
/* enum: PhyXS Align. */
-#define MC_CMD_PHYXS_ALIGN 0x11
+#define MC_CMD_PHYXS_ALIGN 0x11
/* enum: PhyXS Sync. */
-#define MC_CMD_PHYXS_SYNC 0x12
+#define MC_CMD_PHYXS_SYNC 0x12
/* enum: AN link-up. */
-#define MC_CMD_AN_LINK_UP 0x13
+#define MC_CMD_AN_LINK_UP 0x13
/* enum: AN Complete. */
-#define MC_CMD_AN_COMPLETE 0x14
+#define MC_CMD_AN_COMPLETE 0x14
/* enum: AN 10GBaseT Status. */
-#define MC_CMD_AN_10GBT_STATUS 0x15
+#define MC_CMD_AN_10GBT_STATUS 0x15
/* enum: Clause 22 Link-Up. */
-#define MC_CMD_CL22_LINK_UP 0x16
+#define MC_CMD_CL22_LINK_UP 0x16
/* enum: (Last entry) */
-#define MC_CMD_PHY_NSTATS 0x17
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
@@ -5647,6 +4084,7 @@
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_CMD_LEN 4
#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
@@ -5661,9 +4099,16 @@
#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
/* port id so vadapter stats can be provided */
#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -5675,141 +4120,289 @@
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
-#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
-#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
-#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
-#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
-#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
-#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
-#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
-#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
-#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
-#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
-#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
-#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
-#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
-#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
-#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
-#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
-#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
-#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
-#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
-#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
-#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
-#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
-#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
-#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
-#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
-#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
-#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
-#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
-#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
-#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
-#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
-#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
-#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
-#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
-#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
-#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
-#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
-#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
-#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
-#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
-#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
-#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
-#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
-#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
-#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
-#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
-#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
-#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
-#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
-#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
-#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
-#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
-#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
-#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
-#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
-#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
-#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
-#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
-#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
-#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
-#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
/* enum: PM discard_bb_overflow counter. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
/* enum: PM discard_vfifo_full counter. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
* capability only.
*/
-#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
/* enum: RXDP counter: Number of packets dropped due to the queue being
* disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
* with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
* PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
* Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
-#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
-#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
-#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
-#define MC_CMD_GMAC_DMABUF_START 0x40
+#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
-#define MC_CMD_GMAC_DMABUF_END 0x5f
-#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
-#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define MC_CMD_MAC_GENERATION_END 0x60
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V2 0x68
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V3 0x79
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define MC_CMD_MAC_NSTATS_V4 0x7d
+/* Other enum values, see field(s): */
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
/***********************************/
@@ -5821,21 +4414,28 @@
/* MC_CMD_SRIOV_IN msgrequest */
#define MC_CMD_SRIOV_IN_LEN 12
#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_ENABLE_LEN 4
#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4
#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
/* MC_CMD_SRIOV_OUT msgresponse */
#define MC_CMD_SRIOV_OUT_LEN 8
#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
@@ -5845,6 +4445,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
@@ -5855,6 +4456,7 @@
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
@@ -5907,24 +4509,26 @@
/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
-#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
/* enum: Magic */
-#define MC_CMD_WOL_TYPE_MAGIC 0x0
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
/* enum: MS Windows Magic */
#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
/* enum: IPv4 Syn */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
/* enum: IPv6 Syn */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
/* enum: Bitmap */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
/* enum: Link */
-#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
/* enum: (Above this for future use) */
-#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -5932,7 +4536,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
@@ -5941,9 +4547,13 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
@@ -5952,7 +4562,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
@@ -5965,7 +4577,9 @@
/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
@@ -5980,8 +4594,11 @@
/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
@@ -5990,6 +4607,7 @@
/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6004,6 +4622,7 @@
/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
@@ -6022,6 +4641,7 @@
/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
@@ -6063,6 +4683,7 @@
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
/* enum: Disabled callisto. */
#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
/* enum: MC firmware. */
@@ -6120,21 +4741,28 @@
/* MC_CMD_NVRAM_INFO_IN msgrequest */
#define MC_CMD_NVRAM_INFO_IN_LEN 4
#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_INFO_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_OUT_LEN 24
#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
@@ -6142,36 +4770,51 @@
#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
*/
#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
* Start a group of update operations on a virtual NVRAM partition. Locks
* required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
- * PHY_LOCK required and not held).
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
#undef MC_CMD_0x38_PRIVILEGE_CTG
@@ -6183,6 +4826,7 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
@@ -6193,9 +4837,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -6217,20 +4863,26 @@
/* MC_CMD_NVRAM_READ_IN msgrequest */
#define MC_CMD_NVRAM_READ_IN_LEN 12
#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
/* Optional control info. If a partition is stored with an A/B versioning
* scheme (i.e. in more than one physical partition in NVRAM) the host can set
* this to control which underlying physical partition is used to read data
@@ -6240,6 +4892,7 @@
* verifying by reading with MODE=TARGET_BACKUP.
*/
#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
/* enum: Same as omitting MODE: caller sees data in current partition unless it
* holds the write lock in which case it sees data in the partition it is
* updating.
@@ -6280,10 +4933,13 @@
#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
@@ -6307,10 +4963,13 @@
/* MC_CMD_NVRAM_ERASE_IN msgrequest */
#define MC_CMD_NVRAM_ERASE_IN_LEN 12
#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
@@ -6319,8 +4978,12 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
* Finish a group of update operations on a virtual NVRAM partition. Locks
- * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
- * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
#undef MC_CMD_0x3c_PRIVILEGE_CTG
@@ -6332,9 +4995,11 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
* request with additional flags indicating version of NVRAM_UPDATE commands in
@@ -6343,10 +5008,13 @@
*/
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
@@ -6373,6 +5041,7 @@
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
/* Result of nvram update completion processing */
#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
/* enum: Invalid return code; only non-zero values are defined. Defined as
* unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
*/
@@ -6407,6 +5076,8 @@
* only production signed images.
*/
#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
/***********************************/
@@ -6430,11 +5101,12 @@
#define MC_CMD_REBOOT 0x3d
#undef MC_CMD_0x3d_PRIVILEGE_CTG
-#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REBOOT_IN msgrequest */
#define MC_CMD_REBOOT_IN_LEN 4
#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_IN_FLAGS_LEN 4
#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
/* MC_CMD_REBOOT_OUT msgresponse */
@@ -6473,11 +5145,12 @@
#define MC_CMD_REBOOT_MODE 0x3f
#undef MC_CMD_0x3f_PRIVILEGE_CTG
-#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
/* enum: Normal. */
#define MC_CMD_REBOOT_MODE_NORMAL 0x0
/* enum: Power-on Reset. */
@@ -6492,6 +5165,7 @@
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
/***********************************/
@@ -6528,7 +5202,7 @@
#define MC_CMD_SENSOR_INFO 0x41
#undef MC_CMD_0x41_PRIVILEGE_CTG
-#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
@@ -6542,174 +5216,190 @@
* Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
*/
#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
/* enum: Controller temperature: degC */
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
/* enum: Phy common temperature: degC */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
/* enum: Controller cooling: bool */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
/* enum: Phy 0 temperature: degC */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
/* enum: Phy 0 cooling: bool */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
/* enum: Phy 1 temperature: degC */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
/* enum: Phy 1 cooling: bool */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
/* enum: 1.0v power: mV */
-#define MC_CMD_SENSOR_IN_1V0 0x7
+#define MC_CMD_SENSOR_IN_1V0 0x7
/* enum: 1.2v power: mV */
-#define MC_CMD_SENSOR_IN_1V2 0x8
+#define MC_CMD_SENSOR_IN_1V2 0x8
/* enum: 1.8v power: mV */
-#define MC_CMD_SENSOR_IN_1V8 0x9
+#define MC_CMD_SENSOR_IN_1V8 0x9
/* enum: 2.5v power: mV */
-#define MC_CMD_SENSOR_IN_2V5 0xa
+#define MC_CMD_SENSOR_IN_2V5 0xa
/* enum: 3.3v power: mV */
-#define MC_CMD_SENSOR_IN_3V3 0xb
+#define MC_CMD_SENSOR_IN_3V3 0xb
/* enum: 12v power: mV */
-#define MC_CMD_SENSOR_IN_12V0 0xc
+#define MC_CMD_SENSOR_IN_12V0 0xc
/* enum: 1.2v analogue power: mV */
-#define MC_CMD_SENSOR_IN_1V2A 0xd
+#define MC_CMD_SENSOR_IN_1V2A 0xd
/* enum: reference voltage: mV */
-#define MC_CMD_SENSOR_IN_VREF 0xe
+#define MC_CMD_SENSOR_IN_VREF 0xe
/* enum: AOE FPGA power: mV */
-#define MC_CMD_SENSOR_OUT_VAOE 0xf
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
/* enum: AOE FPGA temperature: degC */
-#define MC_CMD_SENSOR_AOE_TEMP 0x10
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
/* enum: AOE FPGA PSU temperature: degC */
-#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
/* enum: AOE PSU temperature: degC */
-#define MC_CMD_SENSOR_PSU_TEMP 0x12
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
/* enum: Fan 0 speed: RPM */
-#define MC_CMD_SENSOR_FAN_0 0x13
+#define MC_CMD_SENSOR_FAN_0 0x13
/* enum: Fan 1 speed: RPM */
-#define MC_CMD_SENSOR_FAN_1 0x14
+#define MC_CMD_SENSOR_FAN_1 0x14
/* enum: Fan 2 speed: RPM */
-#define MC_CMD_SENSOR_FAN_2 0x15
+#define MC_CMD_SENSOR_FAN_2 0x15
/* enum: Fan 3 speed: RPM */
-#define MC_CMD_SENSOR_FAN_3 0x16
+#define MC_CMD_SENSOR_FAN_3 0x16
/* enum: Fan 4 speed: RPM */
-#define MC_CMD_SENSOR_FAN_4 0x17
+#define MC_CMD_SENSOR_FAN_4 0x17
/* enum: AOE FPGA input power: mV */
-#define MC_CMD_SENSOR_IN_VAOE 0x18
+#define MC_CMD_SENSOR_IN_VAOE 0x18
/* enum: AOE FPGA current: mA */
-#define MC_CMD_SENSOR_OUT_IAOE 0x19
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
/* enum: AOE FPGA input current: mA */
-#define MC_CMD_SENSOR_IN_IAOE 0x1a
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
/* enum: NIC power consumption: W */
-#define MC_CMD_SENSOR_NIC_POWER 0x1b
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
/* enum: 0.9v power voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9 0x1c
+#define MC_CMD_SENSOR_IN_0V9 0x1c
/* enum: 0.9v power current: mA */
-#define MC_CMD_SENSOR_IN_I0V9 0x1d
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
/* enum: 1.2v power current: mA */
-#define MC_CMD_SENSOR_IN_I1V2 0x1e
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
/* enum: Not a sensor: reserved for the next page flag */
-#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
/* enum: 0.9v power voltage (at ADC): mV */
-#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
/* enum: Controller temperature 2: degC */
-#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
/* enum: Voltage regulator internal temperature: degC */
-#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
/* enum: 0.9V voltage regulator temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
/* enum: 1.2V voltage regulator temperature: degC */
-#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
/* enum: controller internal temperature sensor voltage (internal ADC): mV */
-#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
/* enum: controller internal temperature (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
/* enum: controller internal temperature sensor voltage (external ADC): mV */
-#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
/* enum: controller internal temperature (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
/* enum: ambient temperature: degC */
-#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
/* enum: air flow: bool */
-#define MC_CMD_SENSOR_AIRFLOW 0x2a
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
/* enum: voltage between VSS08D and VSS08D at CSR: mV */
-#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
-#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
/* enum: Hotpoint temperature: degC */
-#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
/* enum: Port 0 PHY power switch over-current: bool */
-#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
/* enum: Port 1 PHY power switch over-current: bool */
-#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
-/* enum: Mop-up microcontroller reference voltage (millivolts) */
-#define MC_CMD_SENSOR_MUM_VCC 0x30
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
/* enum: 0.9v power phase A voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9_A 0x31
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
/* enum: 0.9v power phase A current: mA */
-#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
/* enum: 0.9V voltage regulator phase A temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
/* enum: 0.9v power phase B voltage: mV */
-#define MC_CMD_SENSOR_IN_0V9_B 0x34
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
/* enum: 0.9v power phase B current: mA */
-#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
/* enum: 0.9V voltage regulator phase B temperature: degC */
-#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
-#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
/* enum: CCOM RTS temperature: degC */
-#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
/* enum: Not a sensor: reserved for the next page flag */
-#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
/* enum: controller internal temperature sensor voltage on master core
* (internal ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
/* enum: controller internal temperature on master core (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
/* enum: controller internal temperature sensor voltage on master core
* (external ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
/* enum: controller internal temperature on master core (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
/* enum: controller internal temperature on slave core sensor voltage (internal
* ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
/* enum: controller internal temperature on slave core (internal ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
/* enum: controller internal temperature on slave core sensor voltage (external
* ADC): mV
*/
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
/* enum: controller internal temperature on slave core (external ADC): degC */
-#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
-#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
/* enum: Temperature of SODIMM 0 (if installed): degC */
-#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
/* enum: Temperature of SODIMM 1 (if installed): degC */
-#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
-#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
-#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
/* enum: Controller die temperature (TDIODE): degC */
-#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
/* enum: Board temperature (front): degC */
-#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
/* enum: Board temperature (back): degC */
-#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE2_NEXT 0x5f
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -6723,6 +5413,7 @@
#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO_OUT */
#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
@@ -6775,7 +5466,7 @@
#define MC_CMD_READ_SENSORS 0x42
#undef MC_CMD_0x42_PRIVILEGE_CTG
-#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
@@ -6794,6 +5485,7 @@
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
/* Size in bytes of host buffer. */
#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
@@ -6810,17 +5502,17 @@
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
/* enum: Ok. */
-#define MC_CMD_SENSOR_STATE_OK 0x0
+#define MC_CMD_SENSOR_STATE_OK 0x0
/* enum: Breached warning threshold. */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
/* enum: Breached fatal threshold. */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
/* enum: Fault with sensor. */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
/* enum: Sensor is working but does not currently have a reading. */
-#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
/* enum: Sensor initialisation failed. */
-#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -6848,6 +5540,7 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
/* enum: Ok. */
#define MC_CMD_PHY_STATE_OK 0x1
/* enum: Faulty. */
@@ -6885,6 +5578,7 @@
/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6902,8 +5596,9 @@
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
-#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
@@ -6912,13 +5607,16 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
@@ -6929,6 +5627,7 @@
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
/***********************************/
@@ -6944,7 +5643,9 @@
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
@@ -6972,7 +5673,7 @@
#define MC_CMD_TESTASSERT 0x49
#undef MC_CMD_0x49_PRIVILEGE_CTG
-#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_TESTASSERT_IN msgrequest */
#define MC_CMD_TESTASSERT_IN_LEN 0
@@ -6984,20 +5685,21 @@
#define MC_CMD_TESTASSERT_V2_IN_LEN 4
/* How to provoke the assertion */
#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
* you're testing firmware, this is what you want.
*/
-#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
/* enum: Assert using assert(0); */
-#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
/* enum: Deliberately trigger a watchdog */
-#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
/* enum: Deliberately trigger a trap by loading from an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
/* enum: Deliberately trigger a trap by storing to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
/* enum: Jump to an invalid address */
-#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
@@ -7020,6 +5722,7 @@
#define MC_CMD_WORKAROUND_IN_LEN 8
/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_IN_TYPE_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_WORKAROUND_BUG17230 0x1
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -7048,6 +5751,7 @@
* the workaround
*/
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
#define MC_CMD_WORKAROUND_OUT_LEN 0
@@ -7057,6 +5761,7 @@
*/
#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
@@ -7078,6 +5783,7 @@
/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
@@ -7085,6 +5791,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
@@ -7099,17 +5806,19 @@
#define MC_CMD_NVRAM_TEST 0x4c
#undef MC_CMD_0x4c_PRIVILEGE_CTG
-#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_TEST_IN msgrequest */
#define MC_CMD_NVRAM_TEST_IN_LEN 4
#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
/* enum: Passed. */
#define MC_CMD_NVRAM_TEST_PASS 0x0
/* enum: Failed. */
@@ -7130,12 +5839,16 @@
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
@@ -7144,10 +5857,13 @@
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
/* enum: Out. */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
/* enum: In. */
@@ -7163,21 +5879,26 @@
#define MC_CMD_SENSOR_SET_LIMS 0x4e
#undef MC_CMD_0x4e_PRIVILEGE_CTG
-#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
@@ -7194,9 +5915,13 @@
/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
/***********************************/
@@ -7218,6 +5943,7 @@
#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
/* total number of partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
/* type ID code for each of NUM_PARTITIONS partitions */
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
@@ -7239,6 +5965,7 @@
#define MC_CMD_NVRAM_METADATA_IN_LEN 4
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
@@ -7246,7 +5973,9 @@
#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
/* Partition type ID code */
#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
@@ -7255,6 +5984,7 @@
#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
/* Subtype ID code for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
/* 1st component of W.X.Y.Z version number for content of this partition */
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
@@ -7296,8 +6026,10 @@
#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
/* Number of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
/* Spacing of allocated MAC addresses */
#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
/***********************************/
@@ -7307,12 +6039,13 @@
#define MC_CMD_CLP 0x56
#undef MC_CMD_0x56_PRIVILEGE_CTG
-#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_CLP_IN msgrequest */
#define MC_CMD_CLP_IN_LEN 4
/* Sub operation */
#define MC_CMD_CLP_IN_OP_OFST 0
+#define MC_CMD_CLP_IN_OP_LEN 4
/* enum: Return to factory default settings */
#define MC_CMD_CLP_OP_DEFAULT 0x1
/* enum: Set MAC address */
@@ -7330,6 +6063,7 @@
/* MC_CMD_CLP_IN_DEFAULT msgrequest */
#define MC_CMD_CLP_IN_DEFAULT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
@@ -7337,6 +6071,7 @@
/* MC_CMD_CLP_IN_SET_MAC msgrequest */
#define MC_CMD_CLP_IN_SET_MAC_LEN 12
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MAC address assigned to port */
#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
@@ -7350,6 +6085,7 @@
/* MC_CMD_CLP_IN_GET_MAC msgrequest */
#define MC_CMD_CLP_IN_GET_MAC_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
@@ -7363,6 +6099,7 @@
/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* Boot flag */
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
@@ -7373,6 +6110,7 @@
/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MC_CMD_CLP_IN_OP_LEN 4 */
/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
@@ -7391,11 +6129,12 @@
#define MC_CMD_MUM 0x57
#undef MC_CMD_0x57_PRIVILEGE_CTG
-#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_MUM_IN msgrequest */
#define MC_CMD_MUM_IN_LEN 4
#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_OP_LBN 0
#define MC_CMD_MUM_IN_OP_WIDTH 8
/* enum: NULL MCDI command to MUM */
@@ -7435,26 +6174,32 @@
#define MC_CMD_MUM_IN_NULL_LEN 4
/* MUM cmd header */
#define MC_CMD_MUM_IN_CMD_OFST 0
+#define MC_CMD_MUM_IN_CMD_LEN 4
/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_READ msgrequest */
#define MC_CMD_MUM_IN_READ_LEN 16
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to read from registers of */
#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE 0x1
/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
/* 32-bit address to read from */
#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define MC_CMD_MUM_IN_READ_ADDR_LEN 4
/* Number of words to read. */
#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
/* MC_CMD_MUM_IN_WRITE msgrequest */
#define MC_CMD_MUM_IN_WRITE_LENMIN 16
@@ -7462,12 +6207,15 @@
#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* ID of (device connected to MUM) to write to registers of */
#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
/* enum: Hittite HMC1035 clock generator on Sorrento board */
/* MC_CMD_MUM_DEV_HITTITE 0x1 */
/* 32-bit address to write to */
#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
/* Words to write */
#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
@@ -7480,12 +6228,16 @@
#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MUM I2C cmd code */
#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
/* Number of bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
/* Number of bytes to read */
#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
/* Bytes to write */
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
@@ -7496,21 +6248,28 @@
#define MC_CMD_MUM_IN_LOG_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_LOG_OP_OFST 4
-#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+#define MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */
/* Enable/disable debug output to UART */
#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
/* MC_CMD_MUM_IN_GPIO msgrequest */
#define MC_CMD_MUM_IN_GPIO_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
@@ -7523,40 +6282,56 @@
/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT register. */
#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
@@ -7569,26 +6344,34 @@
/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
@@ -7596,7 +6379,9 @@
#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
@@ -7606,13 +6391,16 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Bit-mask of clocks to be programmed */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
/* Control flags for clock programming */
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
@@ -7624,19 +6412,24 @@
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* Enable/Disable FPGA config from flash */
#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_IN_QSFP msgrequest */
#define MC_CMD_MUM_IN_QSFP_LEN 12
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
@@ -7646,52 +6439,77 @@
#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
/* MUM cmd header */
/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_CMD_LEN 4 */
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -7702,6 +6520,7 @@
/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
@@ -7739,8 +6558,10 @@
#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
/* The first 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO IN register. */
#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
@@ -7749,8 +6570,10 @@
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
/* The first 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
/* The second 32-bit word read from the GPIO OUT register. */
#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
@@ -7758,11 +6581,14 @@
/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
@@ -7791,6 +6617,7 @@
/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
@@ -7798,6 +6625,7 @@
/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
@@ -7805,7 +6633,9 @@
/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
@@ -7814,6 +6644,7 @@
/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
@@ -7821,6 +6652,7 @@
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
/* in bytes */
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
@@ -7829,11 +6661,14 @@
/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
@@ -7841,12 +6676,14 @@
#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
/* Discrete (soldered) DDR resistor strap info */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
/* Number of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
/* Array of SODIMM info records */
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
@@ -7907,18 +6744,19 @@
/* EVB_PORT_ID structuredef */
#define EVB_PORT_ID_LEN 4
#define EVB_PORT_ID_PORT_ID_OFST 0
+#define EVB_PORT_ID_PORT_ID_LEN 4
/* enum: An invalid port handle. */
-#define EVB_PORT_ID_NULL 0x0
+#define EVB_PORT_ID_NULL 0x0
/* enum: The port assigned to this function.. */
-#define EVB_PORT_ID_ASSIGNED 0x1000000
+#define EVB_PORT_ID_ASSIGNED 0x1000000
/* enum: External network port 0 */
-#define EVB_PORT_ID_MAC0 0x2000000
+#define EVB_PORT_ID_MAC0 0x2000000
/* enum: External network port 1 */
-#define EVB_PORT_ID_MAC1 0x2000001
+#define EVB_PORT_ID_MAC1 0x2000001
/* enum: External network port 2 */
-#define EVB_PORT_ID_MAC2 0x2000002
+#define EVB_PORT_ID_MAC2 0x2000002
/* enum: External network port 3 */
-#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_MAC3 0x2000003
#define EVB_PORT_ID_PORT_ID_LBN 0
#define EVB_PORT_ID_PORT_ID_WIDTH 32
@@ -7930,7 +6768,7 @@
#define EVB_VLAN_TAG_MODE_LBN 12
#define EVB_VLAN_TAG_MODE_WIDTH 4
/* enum: Insert the VLAN. */
-#define EVB_VLAN_TAG_INSERT 0x0
+#define EVB_VLAN_TAG_INSERT 0x0
/* enum: Replace the VLAN if already present. */
#define EVB_VLAN_TAG_REPLACE 0x1
@@ -7959,121 +6797,149 @@
#define NVRAM_PARTITION_TYPE_ID_OFST 0
#define NVRAM_PARTITION_TYPE_ID_LEN 2
/* enum: Primary MC firmware partition */
-#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
/* enum: Secondary MC firmware partition */
-#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
/* enum: Expansion ROM partition */
-#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
/* enum: Static configuration TLV partition */
-#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
/* enum: Dynamic configuration TLV partition */
-#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
/* enum: Expansion ROM configuration data for port 1 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
/* enum: Expansion ROM configuration data for port 2 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
/* enum: Expansion ROM configuration data for port 3 */
-#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
-#define NVRAM_PARTITION_TYPE_LOG 0x700
+#define NVRAM_PARTITION_TYPE_LOG 0x700
/* enum: Non-volatile log output of second core on dual-core device */
-#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
-#define NVRAM_PARTITION_TYPE_DUMP 0x800
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Application license key storage partition */
-#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
-#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
-#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
/* enum: Primary FPGA partition */
-#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
/* enum: Secondary FPGA partition */
-#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
/* enum: FC firmware partition */
-#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
/* enum: FC License partition */
-#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
/* enum: Non-volatile log output partition for FC */
-#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
/* enum: MUM firmware partition */
-#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
/* enum: MUM Non-volatile log output partition. */
-#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
/* enum: MUM Application table partition. */
-#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
/* enum: MUM boot rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
/* enum: MUM production signatures & calibration rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
/* enum: MUM user signatures & calibration rom partition. */
-#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
/* enum: MUM fuses and lockbits partition. */
-#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
/* enum: UEFI expansion ROM if separate from PXE */
-#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
-/* enum: Spare partition 0 */
-#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
/* enum: Used for XIP code of shmbooted images */
-#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
/* enum: Spare partition 2 */
-#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
/* enum: Manufacturing partition. Used during manufacture to pass information
* between XJTAG and Manftest.
*/
-#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
/* enum: Spare partition 4 */
-#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
/* enum: Spare partition 5 */
-#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
/* enum: Partition for reporting MC status. See mc_flash_layout.h
* medford_mc_status_hdr_t for layout on Medford.
*/
-#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
/* enum: Start of reserved value range (firmware may use for any purpose) */
-#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
-#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
/* enum: Recovery partition map (provided if real map is missing or corrupt) */
-#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
/* enum: Partition map (real map as stored in flash) */
-#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
#define NVRAM_PARTITION_TYPE_ID_LBN 0
#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
/* LICENSED_APP_ID structuredef */
#define LICENSED_APP_ID_LEN 4
#define LICENSED_APP_ID_ID_OFST 0
+#define LICENSED_APP_ID_ID_LEN 4
/* enum: OpenOnload */
-#define LICENSED_APP_ID_ONLOAD 0x1
+#define LICENSED_APP_ID_ONLOAD 0x1
/* enum: PTP timestamping */
-#define LICENSED_APP_ID_PTP 0x2
+#define LICENSED_APP_ID_PTP 0x2
/* enum: SolarCapture Pro */
-#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
/* enum: SolarSecure filter engine */
-#define LICENSED_APP_ID_SOLARSECURE 0x8
+#define LICENSED_APP_ID_SOLARSECURE 0x8
/* enum: Performance monitor */
-#define LICENSED_APP_ID_PERF_MONITOR 0x10
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
/* enum: SolarCapture Live */
-#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
/* enum: Capture SolarSystem */
-#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
/* enum: Network Access Control */
-#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
/* enum: TCP Direct */
-#define LICENSED_APP_ID_TCP_DIRECT 0x100
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
/* enum: Low Latency */
-#define LICENSED_APP_ID_LOW_LATENCY 0x200
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
/* enum: SolarCapture Tap */
-#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
/* enum: Capture SolarSystem 40G */
#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
/* enum: Capture SolarSystem 1G */
-#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define LICENSED_APP_ID_SCATRD 0x8000
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
@@ -8140,6 +7006,12 @@
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_DSHBRD_LBN 14
+#define LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define LICENSED_V3_APPS_SCATRD_LBN 15
+#define LICENSED_V3_APPS_SCATRD_WIDTH 1
#define LICENSED_V3_APPS_MASK_LBN 0
#define LICENSED_V3_APPS_MASK_WIDTH 64
@@ -8185,11 +7057,23 @@
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
/* enum: This is a TX completion event, not a timestamp */
-#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
/* enum: This is the low part of a TX timestamp event */
-#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
/* enum: This is the high part of a TX timestamp event */
-#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
/* upper 16 bits of timestamp data */
@@ -8232,6 +7116,42 @@
#define CTPIO_STATS_MAP_BUCKET_LBN 16
#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+/* MESSAGE_TYPE structuredef: When present this defines the meaning of a
+ * message, and is used to protect against chosen message attacks in signed
+ * messages, regardless their origin. The message type also defines the
+ * signature cryptographic algorithm, encoding, and message fields included in
+ * the signature. The values are used in different commands but must be unique
+ * across all commands, e.g. MC_CMD_TSA_BIND_IN_SECURE_UNBIND uses different
+ * message type than MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
+#define MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_MESSAGE_TYPE_OFST 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_LEN 4
+#define MESSAGE_TYPE_UNUSED 0x0 /* enum */
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND message. TSA_SECURE_UNBIND messages are
+ * ECDSA SECP384R1 signed using SHA384 message digest algorithm over fields
+ * MESSAGE_TYPE, TSANID, TSAID, and UNBINDTOKEN, and encoded as suggested by
+ * RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_UNBIND 0x1
+/* enum: Message type value for the response to a
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION message. TSA_SECURE_DECOMMISSION
+ * messages are ECDSA SECP384R1 signed using SHA384 message digest algorithm
+ * over fields MESSAGE_TYPE, TSAID, USER, and REASON, and encoded as suggested
+ * by RFC6979 (section 2.4).
+ */
+#define MESSAGE_TYPE_TSA_SECURE_DECOMMISSION 0x2
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message. This enum value is not sequential
+ * to other message types for backwards compatibility as the message type for
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS was defined before the existence of this
+ * global enum.
+ */
+#define MESSAGE_TYPE_SECURE_NIC_INFO_STATUS 0xdb4
+#define MESSAGE_TYPE_MESSAGE_TYPE_LBN 0
+#define MESSAGE_TYPE_MESSAGE_TYPE_WIDTH 32
+
/***********************************/
/* MC_CMD_READ_REGS
@@ -8240,7 +7160,7 @@
#define MC_CMD_READ_REGS 0x50
#undef MC_CMD_0x50_PRIVILEGE_CTG
-#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_REGS_IN msgrequest */
#define MC_CMD_READ_REGS_IN_LEN 0
@@ -8274,17 +7194,22 @@
#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
@@ -8300,6 +7225,7 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -8310,13 +7236,16 @@
#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -8327,6 +7256,7 @@
#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
@@ -8339,6 +7269,7 @@
#define MC_CMD_INIT_EVQ_OUT_LEN 4
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
@@ -8346,17 +7277,22 @@
#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
/* The initial timer value. The load value is ignored if the timer mode is DIS.
*/
#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
/* The reload value is ignored in one-shot modes */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
/* tbd */
#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
@@ -8393,6 +7329,7 @@
*/
#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
/* enum: Immediate */
@@ -8403,13 +7340,16 @@
#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
/* Target EVQ for wakeups if in wakeup mode. */
#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
/* Target interrupt if in interrupting mode (note union with target EVQ). Use
* MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
* purposes.
*/
#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
/* Event Counter Mode. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
/* enum: Disabled */
@@ -8420,6 +7360,7 @@
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
/* Event queue packet count threshold. */
#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
@@ -8432,8 +7373,10 @@
#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
/* Only valid if INTRFLAG was true */
#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
/* Actual configuration applied on the card */
#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
@@ -8448,17 +7391,17 @@
#define QUEUE_CRC_MODE_MODE_LBN 0
#define QUEUE_CRC_MODE_MODE_WIDTH 4
/* enum: No CRC. */
-#define QUEUE_CRC_MODE_NONE 0x0
+#define QUEUE_CRC_MODE_NONE 0x0
/* enum: CRC Fiber channel over ethernet. */
-#define QUEUE_CRC_MODE_FCOE 0x1
+#define QUEUE_CRC_MODE_FCOE 0x1
/* enum: CRC (digest) iSCSI header only. */
-#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
/* enum: CRC (digest) iSCSI header and payload. */
-#define QUEUE_CRC_MODE_ISCSI 0x3
+#define QUEUE_CRC_MODE_ISCSI 0x3
/* enum: CRC Fiber channel over IP over ethernet. */
-#define QUEUE_CRC_MODE_FCOIPOE 0x4
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
/* enum: CRC MPA. */
-#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_MPA 0x5
#define QUEUE_CRC_MODE_SPARE_LBN 4
#define QUEUE_CRC_MODE_SPARE_WIDTH 4
@@ -8482,17 +7425,22 @@
#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
*/
#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
@@ -8511,8 +7459,10 @@
#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
@@ -8527,17 +7477,26 @@
#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
-/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
-/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
@@ -8555,26 +7514,37 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
/* enum: One packet per descriptor (for normal networking) */
-#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
-#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
-#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -8583,6 +7553,116 @@
#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
/* MC_CMD_INIT_RXQ_OUT msgresponse */
#define MC_CMD_INIT_RXQ_OUT_LEN 0
@@ -8590,6 +7670,9 @@
/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
/***********************************/
/* MC_CMD_INIT_TXQ
@@ -8607,18 +7690,23 @@
#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
/* Size, in entries */
#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -8639,8 +7727,10 @@
#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
@@ -8655,18 +7745,23 @@
#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
/* Size, in entries */
#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
/* The EVQ to send events to. This is an index originally specified to
* INIT_EVQ.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
/* The value to put in the event data. Check hardware spec. for valid range. */
#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
/* Desired instance. Must be set to a specific instance, which is a function
* local queue index.
*/
#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
/* There will be more flags here. */
#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
@@ -8689,10 +7784,14 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
/* 64-bit address of 4k of 4k-aligned host memory buffer */
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
@@ -8702,6 +7801,7 @@
#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
/* Flags related to Qbb flow control mode. */
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
@@ -8729,6 +7829,7 @@
* passed to INIT_EVQ
*/
#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_EVQ_OUT msgresponse */
#define MC_CMD_FINI_EVQ_OUT_LEN 0
@@ -8747,6 +7848,7 @@
#define MC_CMD_FINI_RXQ_IN_LEN 4
/* Instance of RXQ to destroy */
#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_RXQ_OUT msgresponse */
#define MC_CMD_FINI_RXQ_OUT_LEN 0
@@ -8765,6 +7867,7 @@
#define MC_CMD_FINI_TXQ_IN_LEN 4
/* Instance of TXQ to destroy */
#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
/* MC_CMD_FINI_TXQ_OUT msgresponse */
#define MC_CMD_FINI_TXQ_OUT_LEN 0
@@ -8783,6 +7886,7 @@
#define MC_CMD_DRIVER_EVENT_IN_LEN 12
/* Handle of target EVQ */
#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
/* Bits 0 - 63 of event */
#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
@@ -8809,11 +7913,12 @@
#define MC_CMD_PROXY_CMD_IN_LEN 4
/* The handle of the target function. */
#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
-#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
/* MC_CMD_PROXY_CMD_OUT msgresponse */
#define MC_CMD_PROXY_CMD_OUT_LEN 0
@@ -8824,8 +7929,9 @@
#define MC_PROXY_STATUS_BUFFER_LEN 16
/* Handle allocated by the firmware for this proxy transaction */
#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
/* enum: An invalid handle. */
-#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
/* The requesting physical function number */
@@ -8854,6 +7960,7 @@
* elevated privilege mask granted to the requesting function.
*/
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
@@ -8871,6 +7978,7 @@
/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -8882,6 +7990,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -8891,6 +8000,7 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -8901,8 +8011,10 @@
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
@@ -8910,6 +8022,7 @@
/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
@@ -8921,6 +8034,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size REPLY_BLOCK_SIZE.
*/
@@ -8930,6 +8044,7 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
/* Must be a power of 2 */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
* of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
* host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
@@ -8940,12 +8055,15 @@
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
/* Must be a power of 2, or zero if this buffer is not provided */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
/* Applies to all three buffers */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
/* A bit mask defining which MCDI operations may be proxied */
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -8966,7 +8084,9 @@
/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
* is stored in the REPLY_BUFF.
*/
@@ -8982,6 +8102,7 @@
*/
#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
@@ -9002,17 +8123,22 @@
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
/* Owner ID to use */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
/* Size of buffer table pages to use, in bytes (note that only a few values are
* legal on any specific hardware).
*/
#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
/* Buffer table IDs for use in DMA descriptors. */
#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
/***********************************/
@@ -9029,10 +8155,13 @@
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
/* ID */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Num entries */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* Buffer table entry address */
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
@@ -9056,48 +8185,11 @@
/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
-/* PORT_CONFIG_ENTRY structuredef */
-#define PORT_CONFIG_ENTRY_LEN 16
-/* External port number (label) */
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
-#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
-/* Port core location */
-#define PORT_CONFIG_ENTRY_CORE_OFST 1
-#define PORT_CONFIG_ENTRY_CORE_LEN 1
-#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
-#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
-#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
-#define PORT_CONFIG_ENTRY_CORE_LBN 8
-#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
-/* Internal number (HW resource) relative to the core */
-#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
-#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
-#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
-/* Reserved */
-#define PORT_CONFIG_ENTRY_RSVD_OFST 3
-#define PORT_CONFIG_ENTRY_RSVD_LEN 1
-#define PORT_CONFIG_ENTRY_RSVD_LBN 24
-#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
-/* Bitmask of KR lanes used by the port */
-#define PORT_CONFIG_ENTRY_LANES_OFST 4
-#define PORT_CONFIG_ENTRY_LANES_LBN 32
-#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
-/* Port capabilities (MC_CMD_PHY_CAP_*) */
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
-#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
-/* Reserved (align to 16 bytes) */
-#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
-#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
-#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
-
/***********************************/
/* MC_CMD_FILTER_OP
@@ -9112,18 +8204,19 @@
#define MC_CMD_FILTER_OP_IN_LEN 108
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_IN_OP_LEN 4
/* enum: single-recipient filter insert */
-#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
/* enum: single-recipient filter remove */
-#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
/* enum: multi-recipient filter subscribe */
-#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
/* enum: multi-recipient filter unsubscribe */
-#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
/* enum: replace one recipient with another (warning - the filter handle may
* change)
*/
-#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
/* filter handle (for remove / unsubscribe operations) */
#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
@@ -9132,8 +8225,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
@@ -9164,43 +8259,49 @@
#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
/* enum: drop packets */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
/* enum: loop back to TXDP 0 */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
/* enum: loop back to TXDP 1 */
-#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
/* enum: receive to multiple queues using .1p mapping */
-#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
/* enum: install a filter entry that will never match; for test purposes only
*/
-#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
-#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
@@ -9231,8 +8332,10 @@
#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
/* Firmware defined register 1 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -9251,6 +8354,7 @@
#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* filter handle (for remove / unsubscribe operations) */
@@ -9261,8 +8365,10 @@
/* The port ID associated with the v-adaptor which should contain this filter.
*/
#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
/* fields to include in match criteria */
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
@@ -9321,43 +8427,49 @@
#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
/* receive destination */
#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
/* enum: drop packets */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
/* enum: receive to host */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
/* enum: receive to MC */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
/* enum: loop back to TXDP 0 */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
/* enum: loop back to TXDP 1 */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
/* receive queue handle (for multiple queue modes, this is the base queue) */
#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
/* enum: receive to multiple queues using .1p mapping */
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
/* enum: install a filter entry that will never match; for test purposes only
*/
-#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
* RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
* MC_CMD_DOT1P_MAPPING_ALLOC.
*/
#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
/* transmit domain (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
/* transmit destination (either set the MAC and/or PM bits for explicit
* control, or set this field to TX_DEST_DEFAULT for sensible default
* behaviour)
*/
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
/* enum: request default behaviour (based on filter type) */
-#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
@@ -9388,27 +8500,29 @@
#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
/* Firmware defined register 0 to match (reserved; set to 0) */
#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
* protocol is GRE) to match (as bytes in network order; set last byte to 0 for
* VXLAN/NVGRE, or 1 for Geneve)
*/
#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
/* enum: Match VXLAN traffic with this VNI */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
/* enum: Match Geneve traffic with this VNI */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
/* enum: Reserved for experimental development use */
-#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
/* enum: Match NVGRE traffic with this VSID */
-#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
/* source IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -9458,10 +8572,12 @@
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
* to 0)
*/
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
* order; set last 12 bytes to 0 for IPv4 address)
*/
@@ -9473,10 +8589,281 @@
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
/* MC_CMD_FILTER_OP_OUT msgresponse */
#define MC_CMD_FILTER_OP_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -9488,14 +8875,15 @@
#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
/* enum: guaranteed invalid filter handle (low 32 bits) */
-#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
/* enum: guaranteed invalid filter handle (high 32 bits) */
-#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
/* identifies the type of operation requested */
#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_FILTER_OP_EXT_IN/OP */
/* Returned filter handle (for insert / subscribe operations). Note that these
@@ -9523,21 +8911,22 @@
#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
/* enum: read the list of supported RX filter matches */
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
/* enum: read flags indicating restrictions on filter insertion for the calling
* client
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
/* enum: read properties relating to security rules (Medford-only; for use by
* SolarSecure apps, not directly by drivers. See SF-114946-SW.)
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
* encapsulated frames, which follow a different match sequence to normal
* frames (Medford only)
*/
-#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -9545,10 +8934,12 @@
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* number of supported match types */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
/* array of supported match types (valid MATCH_FIELDS values for
* MC_CMD_FILTER_OP) sorted in decreasing priority order
*/
@@ -9561,10 +8952,12 @@
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* bitfield of filter insertion restrictions */
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
@@ -9578,28 +8971,37 @@
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
/* identifies the type of operation requested */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
/* a version number representing the set of rule lookups that are implemented
* by the currently running firmware
*/
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4
/* enum: implements lookup sequences described in SF-114946-SW draft C */
-#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
/* the number of nodes in the subnet map */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4
/* the number of entries in one subnet map node */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4
/* minimum valid value for a subnet ID in a subnet map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4
/* maximum valid value for a subnet ID in a subnet map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4
/* the number of entries in the local and remote port range maps */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4
/* minimum valid value for a portrange ID in a port range map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4
/* maximum valid value for a portrange ID in a port range map leaf */
#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4
/***********************************/
@@ -9607,7 +9009,9 @@
* Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
* Please note that this interface is only of use to debug tools which have
* knowledge of firmware and hardware data structures; nothing here is intended
- * for use by normal driver code.
+ * for use by normal driver code. Note that although this command is in the
+ * Admin privilege group, in tamperproof adapters, only read operations are
+ * permitted.
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
#undef MC_CMD_0xe5_PRIVILEGE_CTG
@@ -9618,42 +9022,58 @@
#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
/* identifies the target of the operation */
#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
/* enum: RX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
/* enum: TX dispatcher CPU */
-#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
-/* enum: Lookup engine (with original metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
-#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
/* enum: RX1 dispatcher CPU (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
/* enum: Miscellaneous other state (only valid for Medford) */
-#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
-/* enum: read a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
-/* enum: write a word of DICPU DMEM or a LUE entry */
-#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
-/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
-#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
+ * tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
+ * permitted on tamperproof adapters.
+ */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
/* selector (for MISC_STATE target) */
#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
/* enum: Port to datapath mapping */
-#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
/* value to write (for LUE writes) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -9662,6 +9082,7 @@
#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
/* value read (for DMEM reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
/* value read (for LUE reads) */
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
@@ -9674,8 +9095,8 @@
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
-#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
-#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
/***********************************/
@@ -9707,6 +9128,7 @@
#define MC_CMD_SET_PF_COUNT_IN_LEN 4
/* New number of PFs on the device. */
#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
@@ -9728,6 +9150,7 @@
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
/***********************************/
@@ -9743,6 +9166,7 @@
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
/* Identifies the port assignment for this function. */
#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
@@ -9761,8 +9185,10 @@
#define MC_CMD_ALLOC_VIS_IN_LEN 8
/* The minimum number of VIs that is acceptable */
#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
/* The maximum number of VIs that would be useful */
#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
* Use extended version in new code.
@@ -9770,21 +9196,26 @@
#define MC_CMD_ALLOC_VIS_OUT_LEN 8
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -9820,15 +9251,20 @@
#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous. */
#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
/***********************************/
@@ -9844,19 +9280,24 @@
#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
/* Number of VFs currently enabled. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
/* Max number of VFs before sriov stride and offset may need to be changed. */
#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
/* RID offset of first VF from PF, or 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
/* RID offset of each subsequent VF from the previous, 0 for no change, or
* MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
*/
#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
@@ -9879,12 +9320,15 @@
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
/* The number of VIs allocated on this function */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
/* The base absolute VI number allocated to this function. Required to
* correctly interpret wakeup events.
*/
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
/* Function's port vi_shift value (always 0 on Huntington) */
#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
/***********************************/
@@ -9900,6 +9344,7 @@
#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
/* The VI number to query. */
#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
@@ -9933,6 +9378,7 @@
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
/* Combined metadata field. */
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
@@ -10015,6 +9461,7 @@
#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
/***********************************/
@@ -10030,6 +9477,7 @@
#define MC_CMD_FREE_PIOBUF_IN_LEN 4
/* Handle for allocated push I/O buffer. */
#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
@@ -10048,6 +9496,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
/* VI number to get information for. */
#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
@@ -10070,6 +9519,7 @@
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
/***********************************/
@@ -10085,6 +9535,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
/* VI number to set information for. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
/* Transaction processing steering hint 1 for use with the Rx Queue. */
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
@@ -10104,6 +9555,7 @@
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
@@ -10121,22 +9573,25 @@
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* enum: MISC. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
/* enum: IDO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
/* enum: RO. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
/* enum: TPH Type. */
-#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
@@ -10185,10 +9640,12 @@
/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
/* Amalgamated TLP info word. */
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
@@ -10229,7 +9686,7 @@
#define MC_CMD_SATELLITE_DOWNLOAD 0x91
#undef MC_CMD_0x91_PRIVILEGE_CTG
-#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
* are subtle, and so downloads must proceed in a number of phases.
@@ -10256,57 +9713,61 @@
* in a command from the host.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
/* Target for download. (These match the blob numbers defined in
* mc_flash_layout.h.)
*/
#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
/* enum: Valid in phase 2 (PHASE_IMEMS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
/* enum: Valid in phase 3 (PHASE_VECTORS) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
/* enum: Last chunk, containing checksum rather than data */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
/* enum: Abort download of this item */
-#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
/* Length of this chunk in bytes */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
/* Data for this chunk */
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
@@ -10317,24 +9778,26 @@
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
/* enum: Code download OK, completed. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
/* enum: Code download aborted as requested. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
/* enum: Code download OK so far, send next chunk. */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
/* enum: Download phases out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
/* enum: Bad target for this phase */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
/* enum: Chunk ID out of sequence */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
/* enum: Chunk length zero or too large */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
/* enum: Checksum was incorrect */
-#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
/***********************************/
@@ -10356,6 +9819,7 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
@@ -10418,48 +9882,58 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10469,39 +9943,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10511,36 +9989,42 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
@@ -10549,6 +10033,7 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
@@ -10611,48 +10096,58 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10662,39 +10157,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10704,38 +10203,45 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -10766,6 +10272,30 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -10779,18 +10309,18 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
/* enum: PF does not exist. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
/* enum: PF does exist but is not assigned to any external port. */
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
/* enum: This value indicates that PF is assigned, but it cannot be expressed
* in this field. It is intended for a possible future situation where a more
* complex scheme of PFs to ports mapping is being used. The future driver
* should look for a new field supporting the new scheme. The current/old
* driver should treat this value as PF_NOT_ASSIGNED.
*/
-#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
/* One byte per PF containing the number of its VFs, indexed by PF number. A
* special value indicates that a PF is not present.
*/
@@ -10798,9 +10328,9 @@
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
-/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
/* Number of VIs available for each external port */
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
@@ -10826,6 +10356,7 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
@@ -10888,48 +10419,58 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
/* enum: Standard RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
/* enum: Low latency RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
/* enum: Packed stream RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
/* enum: BIST RXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
/* enum: RXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
/* enum: RXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
/* enum: RXDP Test firmware image 3 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
/* enum: RXDP Test firmware image 4 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
/* enum: RXDP Test firmware image 5 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
/* enum: RXDP Test firmware image 6 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
/* enum: RXDP Test firmware image 7 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
/* enum: RXDP Test firmware image 9 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
/* enum: Standard TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
/* enum: Low latency TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
/* enum: High packet rate TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
/* enum: BIST TXDP firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
/* enum: TXDP Test firmware image 1 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
/* enum: TXDP CSR bus test firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -10939,39 +10480,43 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial RX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: RX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant RX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
/* enum: Low latency RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
/* enum: Packed stream RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
/* enum: RX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine RX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* enum: RX PD firmware parsing but not filtering network overlay tunnel
* encapsulations (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
@@ -10981,38 +10526,45 @@
/* enum: reserved value - do not use (may indicate alternative interpretation
* of REV field in future)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
/* enum: Trivial TX PD firmware for early Huntington development (Huntington
* development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
/* enum: TX PD firmware with approximately Siena-compatible behaviour
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
/* enum: Full featured TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
/* enum: (deprecated original name for the FULL_FEATURED variant) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
/* enum: siena_compat variant TX PD firmware using PM rather than MAC
* (Huntington development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
/* enum: TX PD firmware handling layer 2 only for high packet rate performance
* tests (Medford development only)
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
/* enum: Rules engine TX PD production firmware */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
/* Hardware capabilities of NIC */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
/* Second word of flags. Not present on older firmware (check the length). */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
@@ -11043,6 +10595,30 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
* on older firmware (check the length).
*/
@@ -11056,18 +10632,18 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
/* enum: PF does not exist. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
/* enum: PF does exist but is not assigned to any external port. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
/* enum: This value indicates that PF is assigned, but it cannot be expressed
* in this field. It is intended for a possible future situation where a more
* complex scheme of PFs to ports mapping is being used. The future driver
* should look for a new field supporting the new scheme. The current/old
* driver should treat this value as PF_NOT_ASSIGNED.
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
/* One byte per PF containing the number of its VFs, indexed by PF number. A
* special value indicates that a PF is not present.
*/
@@ -11075,9 +10651,9 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
/* enum: The caller is not permitted to access information on this PF. */
-/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
/* enum: PF does not exist. */
-/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
/* Number of VIs available for each external port */
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
@@ -11108,11 +10684,11 @@
/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
* CTPIO is not mapped.
*/
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
-#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
* (SF-115995-SW) in the present configuration of firmware and port mode.
*/
@@ -11124,6 +10700,723 @@
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -11144,7 +11437,16 @@
#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
-#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
/***********************************/
@@ -11163,6 +11465,7 @@
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
/***********************************/
@@ -11178,6 +11481,7 @@
#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
/* the bucket id */
#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
@@ -11196,17 +11500,22 @@
#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
/* the bucket id */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
/* the rate in mbps */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
/* the desired maximum fill level */
#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
@@ -11225,10 +11534,13 @@
#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -11237,25 +11549,32 @@
#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
/* the txq id */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
/* bitmask of the priority queues this txq is inserted into when inserted. */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
@@ -11264,18 +11583,23 @@
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
/* the reaction point (RP) bucket */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
/* an already reserved bucket (typically set to bucket associated with outer
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
/* an already reserved bucket (typically set to bucket associated with inner
* vswitch)
*/
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
/* the min bucket (typically for ETS/minimum bandwidth) */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
/* the static priority associated with the txq */
#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
@@ -11294,8 +11618,10 @@
#define MC_CMD_LINK_PIOBUF_IN_LEN 8
/* Handle for allocated push I/O buffer. */
#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
@@ -11314,6 +11640,7 @@
#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
/* Function Local Instance (VI) number. */
#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
@@ -11332,20 +11659,23 @@
#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
/* The port to connect to the v-switch's upstream port. */
#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of v-switch to create. */
#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
/* enum: VEB */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
/* enum: VEPA (obsolete) */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
/* enum: MUX */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
/* enum: Snapper specific; semantics TBD */
-#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
/* Flags controlling v-port creation */
#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
@@ -11356,6 +11686,7 @@
* v-ports with this number of tags.
*/
#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
@@ -11374,6 +11705,7 @@
#define MC_CMD_VSWITCH_FREE_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
@@ -11394,6 +11726,7 @@
#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
/* The port to which the v-switch is connected. */
#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
@@ -11412,28 +11745,31 @@
#define MC_CMD_VPORT_ALLOC_IN_LEN 20
/* The port to which the v-switch is connected. */
#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of the new v-port. */
#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
/* enum: VLAN (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
/* enum: VEB (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
/* enum: VEPA (obsolete) */
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
/* enum: A normal v-port receives packets which match a specified MAC and/or
* VLAN.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
/* enum: An expansion v-port packets traffic which don't match any other
* v-port.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
/* enum: An test v-port receives packets which match any filters installed by
* its downstream components.
*/
-#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
/* Flags controlling v-port creation */
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
@@ -11443,8 +11779,10 @@
* v-switch.
*/
#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -11454,6 +11792,7 @@
#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
/* The handle of the new v-port */
#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
/***********************************/
@@ -11469,6 +11808,7 @@
#define MC_CMD_VPORT_FREE_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_FREE_OUT msgresponse */
#define MC_CMD_VPORT_FREE_OUT_LEN 0
@@ -11487,18 +11827,23 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
/* The port to connect to the v-adaptor's port. */
#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Flags controlling v-adaptor creation */
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
/* The number of VLAN tags to transparently insert/remove. */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
@@ -11507,7 +11852,7 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
/* enum: Derive the MAC address from the upstream port */
-#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
@@ -11526,6 +11871,7 @@
#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
@@ -11544,6 +11890,7 @@
#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* The new MAC address to assign to this v-adaptor */
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
@@ -11565,6 +11912,7 @@
#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
@@ -11586,15 +11934,19 @@
#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
/* The port to which the v-adaptor is connected. */
#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
/* The number of VLAN tags that may still be added */
#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -11610,8 +11962,10 @@
#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
/* The port to assign. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
/* The target function to modify. */
#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
@@ -11633,9 +11987,13 @@
/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
/* Write enable bits 0-3, set to write, clear to read. */
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
@@ -11647,9 +12005,13 @@
*/
#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
/***********************************/
@@ -11665,11 +12027,13 @@
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
/* The handle of the owning upstream port */
#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
/* The handle of the new Onload stack */
#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
/***********************************/
@@ -11685,6 +12049,7 @@
#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
/* The handle of the Onload stack */
#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
@@ -11703,21 +12068,24 @@
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
/* The handle of the owning upstream port */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* The type of context to allocate */
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
/* enum: Allocate a context for exclusive use. The key and indirection table
* must be explicitly configured.
*/
-#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
/* enum: Allocate a context for shared use; this will spread across a range of
* queues, but the key and indirection table are pre-configured and may not be
* changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
*/
-#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
/* Number of queues spanned by this context, in the range 1-64; valid offsets
* in the indirection table will be in the range 0 to NUM_QUEUES-1.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
@@ -11726,8 +12094,9 @@
* handle.
*/
#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
/* enum: guaranteed invalid RSS context handle value */
-#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
/***********************************/
@@ -11743,6 +12112,7 @@
#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
@@ -11761,6 +12131,7 @@
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
@@ -11782,6 +12153,7 @@
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
@@ -11803,6 +12175,7 @@
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* The 128-byte indirection table (1 byte per entry) */
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
@@ -11824,6 +12197,7 @@
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
@@ -11845,6 +12219,7 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* Hash control flags. The _EN bits are always supported, but new modes are
* available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
* in this case, the MODE fields may be set to non-zero values, and will take
@@ -11858,6 +12233,7 @@
* particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
@@ -11898,6 +12274,7 @@
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
@@ -11915,6 +12292,7 @@
* always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
@@ -11952,11 +12330,13 @@
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
/* The handle of the owning upstream port */
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
* offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
* referenced RSS contexts must span no more than this number.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
@@ -11965,8 +12345,9 @@
* handle.
*/
#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
/* enum: guaranteed invalid .1p mapping handle value */
-#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
/***********************************/
@@ -11982,6 +12363,7 @@
#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
@@ -12000,6 +12382,7 @@
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
* handle)
*/
@@ -12023,6 +12406,7 @@
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
/* The handle of the .1p mapping */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
@@ -12049,10 +12433,13 @@
#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
/* Base absolute interrupt vector number. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
/***********************************/
@@ -12070,10 +12457,13 @@
* let the system find a suitable base.
*/
#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
/* Number of interrupt vectors allocate to this PF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
/* Number of interrupt vectors to allocate per VF. */
#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
@@ -12092,6 +12482,7 @@
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -12113,6 +12504,7 @@
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
/* The handle of the v-port */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
/* MAC address to add */
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
@@ -12134,6 +12526,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
@@ -12141,6 +12534,7 @@
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
/* The number of MAC addresses returned */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
/* Array of MAC addresses */
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
@@ -12163,8 +12557,10 @@
#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
/* The handle of the v-port */
#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
/* Flags requesting what should be changed. */
#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
@@ -12174,14 +12570,17 @@
* v-switch.
*/
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
/* The actual VLAN tags to insert/remove */
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
/* The number of MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
/* MAC addresses to add */
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
@@ -12190,6 +12589,7 @@
/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
@@ -12207,15 +12607,18 @@
#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
/* The handle of the v-port */
#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
/* The number of VLAN tags that may be used on a v-adaptor connected to this
* EVB port.
*/
#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
/***********************************/
@@ -12228,14 +12631,16 @@
#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
#undef MC_CMD_0xab_PRIVILEGE_CTG
-#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
/* Index of the first buffer table entry. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
/* Number of buffer table entries to dump. */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
@@ -12260,16 +12665,17 @@
/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
/* enum: pad to 64 bytes */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
/* enum: pad to 128 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
/* enum: pad to 256 bytes (Medford only) */
-#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -12290,6 +12696,7 @@
/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
@@ -12314,8 +12721,10 @@
#define MC_CMD_GET_CLOCK_OUT_LEN 8
/* System frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
/* DPCPU frequency, MHz */
#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/***********************************/
@@ -12325,69 +12734,83 @@
#define MC_CMD_SET_CLOCK 0xad
#undef MC_CMD_0xad_PRIVILEGE_CTG
-#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_CLOCK_IN msgrequest */
#define MC_CMD_SET_CLOCK_IN_LEN 28
/* Requested frequency in MHz for system clock domain */
#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
/* enum: Leave the system clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for inter-core clock domain */
#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
/* enum: Leave the inter-core clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for DPCPU clock domain */
#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
/* enum: Leave the DPCPU clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for PCS clock domain */
#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
/* enum: Leave the PCS clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for MC clock domain */
#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
/* enum: Leave the MC clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for rmon clock domain */
#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
/* enum: Leave the rmon clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
/* Requested frequency in MHz for vswitch clock domain */
#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
/* enum: Leave the vswitch clock domain frequency unchanged */
-#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
/* MC_CMD_SET_CLOCK_OUT msgresponse */
#define MC_CMD_SET_CLOCK_OUT_LEN 28
/* Resulting system frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
/* enum: The system clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
/* Resulting inter-core frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
/* enum: The inter-core clock domain doesn't exist / isn't used */
-#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
/* Resulting DPCPU frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
/* enum: The dpcpu clock domain doesn't exist */
-#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
/* Resulting PCS frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
/* enum: The PCS clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
/* Resulting MC frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
/* enum: The MC clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
/* Resulting rmon frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
/* enum: The rmon clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
/* Resulting vswitch frequency in MHz */
#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
/* enum: The vswitch clock domain doesn't exist / isn't controlled */
-#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
/***********************************/
@@ -12397,27 +12820,28 @@
#define MC_CMD_DPCPU_RPC 0xae
#undef MC_CMD_0xae_PRIVILEGE_CTG
-#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DPCPU_RPC_IN msgrequest */
#define MC_CMD_DPCPU_RPC_IN_LEN 36
#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
/* enum: RxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
/* enum: TxDPCPU0 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
/* enum: TxDPCPU1 */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
/* enum: RxDPCPU1 (Medford only) */
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
* DPCPU_RX0)
*/
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
* DPCPU_TX0)
*/
-#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
* initialised to zero
*/
@@ -12425,15 +12849,15 @@
#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
@@ -12444,11 +12868,11 @@
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
@@ -12457,21 +12881,24 @@
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
-#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
/* Register data to write. Only valid in write/write-read. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
/* Register address. */
#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
/* MC_CMD_DPCPU_RPC_OUT msgresponse */
#define MC_CMD_DPCPU_RPC_OUT_LEN 36
#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
/* DATA */
#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
@@ -12482,9 +12909,13 @@
#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
/***********************************/
@@ -12500,6 +12931,7 @@
#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
/* Interrupt level relative to base for function. */
#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
@@ -12512,14 +12944,15 @@
#define MC_CMD_SHMBOOT_OP 0xe6
#undef MC_CMD_0xe6_PRIVILEGE_CTG
-#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SHMBOOT_OP_IN msgrequest */
#define MC_CMD_SHMBOOT_OP_IN_LEN 4
/* Identifies the operation to perform */
#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
/* enum: Copy slave_data section to the slave core. (Greenport only) */
-#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
@@ -12532,13 +12965,16 @@
#define MC_CMD_CAP_BLK_READ 0xe7
#undef MC_CMD_0xe7_PRIVILEGE_CTG
-#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_CAP_BLK_READ_IN msgrequest */
#define MC_CMD_CAP_BLK_READ_IN_LEN 12
#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
@@ -12559,53 +12995,77 @@
#define MC_CMD_DUMP_DO 0xe8
#undef MC_CMD_0xe8_PRIVILEGE_CTG
-#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_DO_IN msgrequest */
#define MC_CMD_DUMP_DO_IN_LEN 52
#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_PADDING_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
-#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
-#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
-#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
/* enum: The uart port this command was received over (if using a uart
* transport)
*/
-#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
-#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
-#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/* MC_CMD_DUMP_DO_OUT msgresponse */
#define MC_CMD_DUMP_DO_OUT_LEN 4
#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
/***********************************/
@@ -12615,41 +13075,64 @@
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
#undef MC_CMD_0xe9_PRIVILEGE_CTG
-#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
/***********************************/
@@ -12661,17 +13144,20 @@
#define MC_CMD_SET_PSU 0xea
#undef MC_CMD_0xea_PRIVILEGE_CTG
-#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_SET_PSU_IN msgrequest */
#define MC_CMD_SET_PSU_IN_LEN 12
#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
-#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
-#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
-#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
/* desired value, eg voltage in mV */
#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define MC_CMD_SET_PSU_IN_VALUE_LEN 4
/* MC_CMD_SET_PSU_OUT msgresponse */
#define MC_CMD_SET_PSU_OUT_LEN 0
@@ -12692,7 +13178,9 @@
/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
/***********************************/
@@ -12704,7 +13192,7 @@
#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
#undef MC_CMD_0xed_PRIVILEGE_CTG
-#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
@@ -12730,12 +13218,16 @@
#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
@@ -12759,12 +13251,16 @@
#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
/* CRC32 over OFFSET, LENGTH, RESERVED */
#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
/* Offset from which to read the data */
#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
/* MC_CMD_UART_RECV_DATA_IN msgresponse */
#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
@@ -12772,12 +13268,16 @@
#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
/* Offset at which to write the data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
/* Length of data */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
/* Reserved for future use */
#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
@@ -12791,14 +13291,16 @@
#define MC_CMD_READ_FUSES 0xf0
#undef MC_CMD_0xf0_PRIVILEGE_CTG
-#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_FUSES_IN msgrequest */
#define MC_CMD_READ_FUSES_IN_LEN 8
/* Offset in OTP to read */
#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
/* Length of data to read in bytes */
#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
/* MC_CMD_READ_FUSES_OUT msgresponse */
#define MC_CMD_READ_FUSES_OUT_LENMIN 4
@@ -12806,6 +13308,7 @@
#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
/* Length of returned OTP data in bytes */
#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
/* Returned data */
#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
@@ -12820,7 +13323,7 @@
#define MC_CMD_KR_TUNE 0xf1
#undef MC_CMD_0xf1_PRIVILEGE_CTG
-#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_KR_TUNE_IN msgrequest */
#define MC_CMD_KR_TUNE_IN_LENMIN 4
@@ -12830,26 +13333,30 @@
#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
/* enum: Get current RXEQ settings */
-#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
/* enum: Override RXEQ settings */
-#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
/* enum: Get current TX Driver settings */
-#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
-#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
/* enum: Force KR Serdes reset / recalibration */
-#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
* signal.
*/
-#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
* caller should call this command repeatedly after starting eye plot, until no
* more data is returned.
*/
-#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
/* enum: Read Figure Of Merit (eye quality, higher is better). */
-#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* enum: Start/stop link training frames */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8
+/* enum: Issue KR link training command (control training coefficients) */
+#define MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -12883,44 +13390,98 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: Attenuation (0-15, Huntington) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
/* enum: CTLE Boost (0-15, Huntington) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
* positive, Medford - 0-31)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-31)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
* positive, Medford - 0-16)
*/
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
/* enum: Edge DFE DLEV (0-128 for Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
/* enum: Variable Gain Amplifier (0-15, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
/* enum: CTLE EQ Capacitor (0-15, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (0-7, Medford) */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
@@ -12985,39 +13546,39 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude (Huntington, Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
/* enum: De-Emphasis Tap1 Fine */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
/* enum: De-Emphasis Tap2 Fine (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
/* enum: Pre-Emphasis Magnitude (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
/* enum: Pre-Emphasis Fine (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
/* enum: TX Slew Rate Coarse control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
/* enum: TX Slew Rate Fine control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
/* enum: TX Termination Impedance control (Huntington) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
/* enum: TX Amplitude Fine control (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford) */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford, Medford2) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
@@ -13078,7 +13639,27 @@
/* Align the arguments to 32 bits */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1
+/* Scan duration / cycle count */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -13110,10 +13691,91 @@
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1
/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28
+/* Requested operation */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4
+/* Set INITIALIZE state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4
+/* Set PRESET state */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4
+/* C(-1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */
+/* C(0) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) request */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24
+/* C(-1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */
+/* C(0) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(-1) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4
+/* C(0) value */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4
+/* C(+1) status */
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20
+#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4
/***********************************/
@@ -13123,7 +13785,7 @@
#define MC_CMD_PCIE_TUNE 0xf2
#undef MC_CMD_0xf2_PRIVILEGE_CTG
-#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_PCIE_TUNE_IN msgrequest */
#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
@@ -13133,22 +13795,22 @@
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
/* enum: Get current RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
/* enum: Override RXEQ settings */
-#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
/* enum: Get current TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
/* enum: Override TX Driver settings */
-#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
-#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
* caller should call this command repeatedly after starting eye plot, until no
* more data is returned.
*/
-#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
-#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
/* Align the arguments to 32 bits */
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
@@ -13182,46 +13844,46 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: Attenuation (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
/* enum: CTLE Boost (0-15) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
/* enum: DFE DLev */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
/* enum: Figure of Merit */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
/* enum: CTLE EQ Capacitor (HF Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
/* enum: CTLE EQ Resistor (DC Gain) */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
@@ -13285,15 +13947,15 @@
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
/* enum: TxMargin (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
/* enum: TxSwing (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
/* enum: De-emphasis coefficient C(-1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
/* enum: De-emphasis coefficient C(0) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
/* enum: De-emphasis coefficient C(+1) (PIPE) */
-#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
/* Enum values, see field(s): */
@@ -13312,6 +13974,7 @@
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
@@ -13355,38 +14018,46 @@
#define MC_CMD_LICENSING_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_IN_OP_OFST 0
+#define MC_CMD_LICENSING_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
-#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
/* enum: report counts of installed licenses */
-#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
/* MC_CMD_LICENSING_OUT msgresponse */
#define MC_CMD_LICENSING_OUT_LEN 28
/* count of application keys which are valid */
#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being blacklisted */
#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
/* count of application keys which are invalid due to being for the wrong node
*/
#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
-#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
-#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
/***********************************/
@@ -13403,37 +14074,44 @@
#define MC_CMD_LICENSING_V3_IN_LEN 4
/* identifies the type of operation requested */
#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_IN_OP_LEN 4
/* enum: re-read and apply licenses after a license key partition update; note
* that this operation returns a zero-length response
*/
-#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
/* enum: report counts of installed licenses Returns EAGAIN if license
* processing (updating) has been started but not yet completed.
*/
-#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
/* MC_CMD_LICENSING_V3_OUT msgresponse */
#define MC_CMD_LICENSING_V3_OUT_LEN 88
/* count of keys which are valid */
#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
* MC_CMD_FC_OP_LICENSE)
*/
#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
/* count of keys which are invalid due to being unverifiable */
#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
/* count of keys which are invalid due to being for the wrong node */
#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
/* licensing state (for diagnostics; the exact meaning of the bits in this
* field are private to the firmware)
*/
#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
/* licensing subsystem self-test report (for manftest) */
#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
/* enum: licensing subsystem self-test failed */
-#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
/* enum: licensing subsystem self-test passed */
-#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
/* bitmask of licensed applications */
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
@@ -13471,8 +14149,10 @@
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
/* type of license (eg 3) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
/* length of the license ID (in bytes) */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
/* the unique license ID of the adapter */
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
@@ -13512,15 +14192,17 @@
#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
/* application ID to query (LICENSED_APP_ID_xxx) */
#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
/***********************************/
@@ -13548,10 +14230,11 @@
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
/* state of this application */
#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
/* enum: no (or invalid) license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
/* enum: a valid license is present for the application */
-#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
/***********************************/
@@ -13600,12 +14283,14 @@
#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
/* application ID */
#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
/* enum: validate application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
/* enum: mask application */
-#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
/* arguments specific to this particular operation */
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -13626,8 +14311,10 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
/* application ID */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
/* validation challenge */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
@@ -13636,6 +14323,7 @@
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
/* feature expiry (time_t) */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
/* validation response */
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
@@ -13644,10 +14332,13 @@
#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
/* application ID */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
/* the type of operation requested */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
/* flag */
#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
@@ -13686,12 +14377,14 @@
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
/* application expiry time */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
/* application expiry units */
#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
/* enum: expiry units are accounting units */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
/* enum: expiry units are calendar days */
-#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
/* base MAC address of the NIC stored in NVRAM (note that this is a constant
* value for a given NIC regardless which function is calling, effectively this
* is PF0 base MAC address)
@@ -13712,7 +14405,7 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
#undef MC_CMD_0xd5_PRIVILEGE_CTG
-#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
@@ -13723,10 +14416,11 @@
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
/* whether to turn on or turn off the masked features */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
/* enum: turn the features off */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
/* enum: turn the features back on */
-#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
@@ -13743,29 +14437,31 @@
#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
#undef MC_CMD_0xd6_PRIVILEGE_CTG
-#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
/* operation code */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
/* enum: install a new license, overwriting any existing temporary license.
* This is an asynchronous operation owing to the time taken to validate an
* ECDSA license
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
/* enum: clear the license immediately rather than waiting for the next power
* cycle
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
* operation
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
/* ECDSA license and signature */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
@@ -13773,23 +14469,26 @@
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
/* status code */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
/* enum: finished validating and installing license */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
/* enum: license validation and installation in progress */
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
/* enum: licensing error. More specific error messages are not provided to
* avoid exposing details of the licensing system to the client
*/
-#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
/* bitmask of licensed features */
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
@@ -13814,23 +14513,27 @@
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
* that these handles should be considered opaque to the host, although a value
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -13845,7 +14548,7 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
#undef MC_CMD_0xf8_PRIVILEGE_CTG
-#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
@@ -13854,20 +14557,24 @@
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -13885,19 +14592,21 @@
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
/* the type of configuration setting to change */
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
* internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
*/
-#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
* internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
* boolean.)
*/
-#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
/* handle for the entity to update: queue handle, EVB port ID, etc. depending
* on the type of configuration setting being changed
*/
#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* new value: the details depend on the type of configuration setting being
* changed
*/
@@ -13923,12 +14632,14 @@
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
/* the type of configuration setting to read */
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
* the type of configuration setting being read
*/
#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
@@ -13962,21 +14673,25 @@
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
/* configuration flags */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
/* receive queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
/* enum: receive to just the specified queue */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
/* enum: receive to multiple queues using RSS context */
-#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
* that these handles should be considered opaque to the host, although a value
* of 0xFFFFFFFF is guaranteed never to be a valid handle.
*/
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
@@ -13991,7 +14706,7 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
#undef MC_CMD_0xfc_PRIVILEGE_CTG
-#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
@@ -14000,18 +14715,22 @@
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
/* configuration flags */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
/* receiving queue handle (for RSS mode, this is the base queue) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
/* receive mode */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
/* enum: receiving to just the specified queue */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
/* enum: receiving to multiple queues using RSS context */
-#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
/* RSS context (for RX_MODE_RSS) */
#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
/***********************************/
@@ -14027,16 +14746,22 @@
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
/* The rx queue to get stats for. */
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
/***********************************/
@@ -14044,6 +14769,9 @@
* Find out about available PCIE resources
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#undef MC_CMD_0xfd_PRIVILEGE_CTG
+
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
@@ -14052,20 +14780,27 @@
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
/* The maximum number of PFs the device can expose */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
/* The maximum number of VFs the device can expose in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
/* The maximum number of MSI-X vectors the device can provide in total */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each PF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
/* the number of MSI-X vectors the device will allocate by default to each VF
*/
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one PF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
/* the maximum number of MSI-X vectors the device can allocate to any one VF */
#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
/***********************************/
@@ -14084,10 +14819,13 @@
#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
/* Default (canonical) board mode */
#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
/* Current board mode */
#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
/***********************************/
@@ -14097,21 +14835,26 @@
#define MC_CMD_READ_ATB 0x100
#undef MC_CMD_0x100_PRIVILEGE_CTG
-#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_READ_ATB_IN msgrequest */
#define MC_CMD_READ_ATB_IN_LEN 16
#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
-#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
-#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
/* MC_CMD_READ_ATB_OUT msgresponse */
#define MC_CMD_READ_ATB_OUT_LEN 4
#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
/***********************************/
@@ -14129,7 +14872,9 @@
/* Each workaround is represented by a single bit according to the enums below.
*/
#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
/* enum: Bug 17230 work around. */
#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
/* enum: Bug 35388 work around (unsafe EVQ writes). */
@@ -14165,50 +14910,63 @@
* 1,3 = 0x00030001
*/
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
-#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
/* New privilege mask to be set. The mask will only be changed if the MSB is
* set to 1.
*/
#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
* adress.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
/* enum: Privilege that allows a Function to change the MAC address configured
* in its associated vAdapter/vPort.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
/* enum: Privilege that allows a Function to install filters that specify VLANs
* that are not in the permit list for the associated vPort. This privilege is
* primarily to support ESX where vPorts are created that restrict traffic to
* only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
-#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
/* For an admin function, always all the privileges are reported. */
#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
/***********************************/
@@ -14226,27 +14984,30 @@
* e.g. VF 1,3 = 0x00030001
*/
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
/* New link state mode to be set */
#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
-#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
/* enum: Use this value to just read the existing setting without modifying it.
*/
-#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
/***********************************/
/* MC_CMD_GET_SNAPSHOT_LENGTH
- * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
* parameter to MC_CMD_INIT_RXQ.
*/
#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
@@ -14261,8 +15022,10 @@
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
/* Minimum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
/* Maximum acceptable snapshot length. */
#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
/***********************************/
@@ -14272,7 +15035,7 @@
#define MC_CMD_FUSE_DIAGS 0x102
#undef MC_CMD_0x102_PRIVILEGE_CTG
-#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_FUSE_DIAGS_IN msgrequest */
#define MC_CMD_FUSE_DIAGS_IN_LEN 0
@@ -14281,28 +15044,40 @@
#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
/* Total number of mismatched bits between pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 0 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 1 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
/* Total number of mismatched bits between pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
/* Checksum of data after logical OR of pairs in area 2 */
#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
/***********************************/
@@ -14320,14 +15095,16 @@
#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
/* The groups of functions to have their privilege masks modified. */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
-#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
-#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
/* For VFS_OF_PF specify the PF, for ONE specify the target function */
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
@@ -14336,10 +15113,12 @@
* refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
/* Privileges to be removed from the target functions. For privilege
* definitions refer to the command MC_CMD_PRIVILEGE_MASK
*/
#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
@@ -14358,8 +15137,10 @@
#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
@@ -14379,7 +15160,7 @@
#define MC_CMD_XPM_WRITE_BYTES 0x104
#undef MC_CMD_0x104_PRIVILEGE_CTG
-#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
@@ -14387,8 +15168,10 @@
#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
/* Start address (byte) */
#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
/* Data */
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
@@ -14406,14 +15189,16 @@
#define MC_CMD_XPM_READ_SECTOR 0x105
#undef MC_CMD_0x105_PRIVILEGE_CTG
-#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
/* Sector index */
#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
/* Sector size */
#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
@@ -14421,10 +15206,12 @@
#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
/* Sector type */
#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
-#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
-#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
/* Sector data */
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
@@ -14439,7 +15226,7 @@
#define MC_CMD_XPM_WRITE_SECTOR 0x106
#undef MC_CMD_0x106_PRIVILEGE_CTG
-#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
@@ -14456,10 +15243,12 @@
#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
/* Enum values, see field(s): */
/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
/* Sector data */
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
@@ -14470,6 +15259,7 @@
#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
/* New sector index */
#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
/***********************************/
@@ -14479,12 +15269,13 @@
#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
#undef MC_CMD_0x107_PRIVILEGE_CTG
-#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
/* Sector index */
#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
@@ -14497,14 +15288,16 @@
#define MC_CMD_XPM_BLANK_CHECK 0x108
#undef MC_CMD_0x108_PRIVILEGE_CTG
-#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
@@ -14512,6 +15305,7 @@
#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
/* Total number of bad (non-blank) locations */
#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
* into MCDI response)
*/
@@ -14528,14 +15322,16 @@
#define MC_CMD_XPM_REPAIR 0x109
#undef MC_CMD_0x109_PRIVILEGE_CTG
-#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_REPAIR_IN msgrequest */
#define MC_CMD_XPM_REPAIR_IN_LEN 8
/* Start address (byte) */
#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
/* Count (bytes) */
#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
/* MC_CMD_XPM_REPAIR_OUT msgresponse */
#define MC_CMD_XPM_REPAIR_OUT_LEN 0
@@ -14549,7 +15345,7 @@
#define MC_CMD_XPM_DECODER_TEST 0x10a
#undef MC_CMD_0x10a_PRIVILEGE_CTG
-#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
@@ -14569,7 +15365,7 @@
#define MC_CMD_XPM_WRITE_TEST 0x10b
#undef MC_CMD_0x10b_PRIVILEGE_CTG
-#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
@@ -14590,16 +15386,19 @@
#define MC_CMD_EXEC_SIGNED 0x10c
#undef MC_CMD_0x10c_PRIVILEGE_CTG
-#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_EXEC_SIGNED_IN msgrequest */
#define MC_CMD_EXEC_SIGNED_IN_LEN 28
/* the length of code to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
/* the length of date to include in the CMAC */
#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
/* the XPM sector containing the key to use */
#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
/* the expected CMAC value */
#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
@@ -14617,12 +15416,13 @@
#define MC_CMD_PREPARE_SIGNED 0x10d
#undef MC_CMD_0x10d_PRIVILEGE_CTG
-#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
/* the length of data area to clear */
#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
@@ -14639,12 +15439,13 @@
#define MC_CMD_SET_SECURITY_RULE 0x10f
#undef MC_CMD_0x10f_PRIVILEGE_CTG
-#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
/* fields to include in match criteria */
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_LEN 4
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
@@ -14701,8 +15502,10 @@
#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
/* Physical port to match (as little-endian 32-bit value) */
#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_LEN 4
/* Reserved; set to 0 */
#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_LEN 4
/* remote IP address to match (as bytes in network order; set last 12 bytes to
* 0 for IPv4 address)
*/
@@ -14719,58 +15522,85 @@
* MC_CMD_SUBNET_MAP_SET_NODE appropriately
*/
#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_LEN 4
/* remote portrange ID to match (as little-endian 32-bit value); note that
* remote port ranges are matched by mapping the remote port to a "portrange
* ID" via a data structure which must already have been configured using
* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
*/
#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_LEN 4
/* local portrange ID to match (as little-endian 32-bit value); note that local
* port ranges are matched by mapping the local port to a "portrange ID" via a
* data structure which must already have been configured using
* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
*/
#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_LEN 4
/* set the action for transmitted packets matching this rule */
#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_LEN 4
/* enum: make no decision */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
/* enum: decide to accept the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
/* enum: decide to drop the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_SAMPLE 0x4
/* enum: do not change the current TX action */
-#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
/* set the action for received packets matching this rule */
#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_LEN 4
/* enum: make no decision */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
/* enum: decide to accept the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
/* enum: decide to drop the packet */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: inform the TSA controller about some sample of packets matching this
+ * rule (via MC_CMD_TSA_INFO_IN_PKT_SAMPLE messages); may be bitwise-ORed with
+ * either the WHITELIST or BLACKLIST action
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_SAMPLE 0x4
/* enum: do not change the current RX action */
-#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
/* counter ID to associate with this rule; IDs are allocated using
* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
*/
#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_LEN 4
/* enum: special value for the null counter ID */
-#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+/* enum: special value to tell the MC to allocate an available counter */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_SW_AUTO 0xeeeeeeee
+/* enum: special value to request use of hardware counter (Medford2 only) */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_HW 0xffffffff
/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
-#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 32
/* new reference count for uses of counter ID */
#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_LEN 4
/* constructed match bits for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
/* constructed discriminator bits for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_LEN 4
/* base location for probes for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_LEN 4
/* step for probes for this rule (as a tracing aid only) */
#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_LEN 4
+/* ID for reading back the counter */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_OFST 28
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_ID_LEN 4
/***********************************/
@@ -14784,14 +15614,15 @@
#define MC_CMD_RESET_SECURITY_RULES 0x110
#undef MC_CMD_0x110_PRIVILEGE_CTG
-#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_LEN 4
/* enum: special value to reset all physical ports */
-#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
@@ -14836,12 +15667,13 @@
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
#undef MC_CMD_0x112_PRIVILEGE_CTG
-#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
/* the number of new counter IDs to request */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_LEN 4
/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
@@ -14851,6 +15683,7 @@
* requested if resources are unavailable)
*/
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_LEN 4
/* new counter ID(s) */
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
@@ -14869,7 +15702,7 @@
#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
#undef MC_CMD_0x113_PRIVILEGE_CTG
-#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
@@ -14877,6 +15710,7 @@
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
/* the number of counter IDs to free */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_LEN 4
/* the counter ID(s) to free */
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
@@ -14900,7 +15734,7 @@
#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
#undef MC_CMD_0x114_PRIVILEGE_CTG
-#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
@@ -14908,6 +15742,7 @@
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_LEN 4
/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
* to the next node, expressed as an offset in the trie memory (i.e. node ID
* multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
@@ -14928,7 +15763,7 @@
*/
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
-#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
/* final portrange ID for leaf nodes (don't care for branch nodes) */
@@ -14951,7 +15786,7 @@
#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
#undef MC_CMD_0x115_PRIVILEGE_CTG
-#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
@@ -14982,7 +15817,7 @@
#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
#undef MC_CMD_0x116_PRIVILEGE_CTG
-#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
@@ -15005,18 +15840,18 @@
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
/* enum: the IANA allocated UDP port for VXLAN */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
/* enum: the IANA allocated UDP port for Geneve */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
/* tunnel encapsulation protocol (only those named below are supported) */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
-#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
@@ -15073,18 +15908,22 @@
#define MC_CMD_RX_BALANCING 0x118
#undef MC_CMD_0x118_PRIVILEGE_CTG
-#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_RX_BALANCING_IN msgrequest */
#define MC_CMD_RX_BALANCING_IN_LEN 16
/* The RX port whose upconverter table will be modified */
#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4
/* The VLAN priority associated to the table index and vFIFO */
#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
/* The resulting bit of SRC^DST for indexing the table */
#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
/* The RX engine to which the vFIFO in the table entry will point to */
#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4
/* MC_CMD_RX_BALANCING_OUT msgresponse */
#define MC_CMD_RX_BALANCING_OUT_LEN 0
@@ -15093,12 +15932,7 @@
/***********************************/
/* MC_CMD_TSA_BIND
* TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
- * info in respect to the binding protocol. This MCDI command is only available
- * over a TLS secure connection between the TSAN and TSAC, and is not available
- * to host software. Note- The messages definitions that do comprise this MCDI
- * command deemed as provisional. This MCDI command has not yet been used in
- * any released code and may change during development. This note will be
- * removed once it is regarded as stable.
+ * info in respect to the binding protocol.
*/
#define MC_CMD_TSA_BIND 0x119
#undef MC_CMD_0x119_PRIVILEGE_CTG
@@ -15108,15 +15942,13 @@
/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
#define MC_CMD_TSA_BIND_IN_LEN 4
#define MC_CMD_TSA_BIND_IN_OP_OFST 0
-/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
- * the network adapter. More specifically, TSAN ID equals the MAC address of
- * the network adapter. TSAN ID is used as part of the TSAN authentication
- * protocol. Refer to SF-114946-SW for more information.
- */
+#define MC_CMD_TSA_BIND_IN_OP_LEN 4
+/* enum: Obsolete. Use MC_CMD_SECURE_NIC_INFO_IN_STATUS. */
#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
* of the binding procedure to authorize the binding of an adapter to a TSAID.
- * Refer to SF-114946-SW for more information.
+ * Refer to SF-114946-SW for more information. This sub-command is only
+ * available over a TLS secure connection between the TSAN and TSAC.
*/
#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
/* enum: Opcode associated with the propagation of a private key that TSAN uses
@@ -15124,18 +15956,47 @@
* uses this key for a signing operation. TSAC uses the counterpart public key
* to verify the signature. Note - The post-binding authentication occurs when
* the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
- * SF-114946-SW for more information.
+ * SF-114946-SW for more information. This sub-command is only available over a
+ * TLS secure connection between the TSAN and TSAC.
*/
#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
-/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
- * from the Nvram section.
+/* enum: Request an insecure unbinding operation. This sub-command is available
+ * for any privileged client.
*/
#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
-
-/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_OP_UNBIND_EXT 0x5
+/* enum: Opcode associated with the propagation of the unbinding secret token.
+ * TSAN persists the unbinding secret token. Refer to SF-115479-TC for more
+ * information. This sub-command is only available over a TLS secure connection
+ * between the TSAN and TSAC.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN 0x6
+/* enum: Obsolete. Use MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_OP_DECOMMISSION 0x7
+/* enum: Obsolete. Use MC_CMD_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_OP_GET_CERTIFICATE 0x8
+/* enum: Request a secure unbinding operation using unbinding token. This sub-
+ * command is available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_UNBIND 0x9
+/* enum: Request a secure decommissioning operation. This sub-command is
+ * available for any privileged client.
+ */
+#define MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION 0xa
+/* enum: Test facility that allows an adapter to be configured to behave as if
+ * Bound to a TSA controller with restricted MCDI administrator operations.
+ * This operation is primarily intended to aid host driver development.
+ */
+#define MC_CMD_TSA_BIND_OP_TEST_MCDI 0xb
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS.
+ */
#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_LEN 4
/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
* the nonce every time as part of the TSAN post-binding authentication
* procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
@@ -15148,6 +16009,7 @@
#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_LEN 4
/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
@@ -15155,6 +16017,7 @@
#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_LEN 4
/* This data blob contains the private key generated by the TSAC. TSAN uses
* this key for a signing operation. Note- This private key is used in
* conjunction with the post-binding TSAN authentication procedure that occurs
@@ -15166,26 +16029,261 @@
#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
-/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Request an insecure unbinding
+ * operation.
+ */
#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
/* The operation requested. */
#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_LEN 4
/* TSAN unique identifier for the network adapter */
#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
-/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+/* MC_CMD_TSA_BIND_IN_UNBIND_EXT msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMIN 93
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_LEN(num) (92+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_OP_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_OFST 10
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSANID_RSVD_LEN 2
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_OFST 12
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_OFST 76
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_UNBINDTOKEN_LEN 16
+/* This is the signature of the above mentioned fields- TSANID, TSAID and
+ * UNBINDTOKEN. As per current requirements, the SIG opaque data blob contains
+ * ECDSA ECC-384 based signature. The ECC curve is secp384r1. The signature is
+ * also ASN-1 encoded. Note- The signature is verified based on the public key
+ * stored into the root certificate that is provisioned on the adapter side.
+ * This key is known as the PUKtsaid. Refer to SF-115479-TC for more
+ * information.
+ */
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_OFST 92
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_UNBIND_EXT_SIG_MAXNUM 160
+
+/* MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_OP_LEN 4
+/* Unbinding secret token. TSAN persists the unbinding secret token. Refer to
+ * SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_UNBINDTOKEN_LEN 16
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * make usage of the decommission command that forces the adapter into
+ * unbinding state.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN_ADAPTER_BINDING_FAILURE 0x1
+
+/* MC_CMD_TSA_BIND_IN_DECOMMISSION msgrequest: Obsolete. Use
+ * MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMIN 109
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_LEN(num) (108+1*(num))
+/* This is the signature of the above mentioned fields- TSAID, USER and REASON.
+ * As per current requirements, the SIG opaque data blob contains ECDSA ECC-384
+ * based signature. The ECC curve is secp384r1. The signature is also ASN-1
+ * encoded . Note- The signature is verified based on the public key stored
+ * into the root certificate that is provisioned on the adapter side. This key
+ * is known as the PUKtsaid. Refer to SF-115479-TC for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_OFST 108
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_SIG_MAXNUM 144
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_OP_LEN 4
+/* This attribute identifies the TSA infrastructure domain. The length of the
+ * TSAID attribute is limited to 64 bytes. This is how TSA SDK defines the max
+ * length. Note- The TSAID is the Organizational Unit Name filed as part of the
+ * root and server certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_OFST 4
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_TSAID_NUM 64
+/* User ID that comes, as an example, from the Controller. Note- The 33 byte
+ * length of this attribute is max length of the linux user name plus null
+ * character.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_OFST 68
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_NUM 33
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_OFST 101
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_USER_RSVD_LEN 3
+/* Reason of why decommissioning happens Note- The list of reasons, defined as
+ * part of the enumeration below, can be extended.
+ */
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_OFST 104
+#define MC_CMD_TSA_BIND_IN_DECOMMISSION_REASON_LEN 4
+
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE msgrequest: Obsolete. Use
+ * MC_CMD_GET_CERTIFICATE.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_LEN 8
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_GET_CERTIFICATE. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_TYPE_LEN 4
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_UNUSED 0x0 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is used by the
+ * controller to verify the authenticity of the adapter.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AAC 0x1
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is used by
+ * the controller to verify the validity of AAC.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_CERTIFICATE_AASC 0x2
+
+/* MC_CMD_TSA_BIND_IN_SECURE_UNBIND msgrequest: Request a secure unbinding
+ * operation using unbinding token.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMIN 97
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LENMAX 200
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_LEN(num) (96+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_UNBIND. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_TSA_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_MESSAGE_TYPE_LEN 4
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_LEN 6
+/* Align the arguments to 32 bits */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_OFST 14
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSANID_RSVD_LEN 2
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_OFST 16
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_TSAID_NUM 64
+/* Unbinding secret token. The adapter validates this unbinding token by
+ * comparing it against the one stored on the adapter as part of the
+ * MC_CMD_TSA_BIND_IN_SET_UNBINDTOKEN msgrequest. Refer to SF-115479-TC for
+ * more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_OFST 80
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_UNBINDTOKEN_LEN 16
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_OFST 96
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_UNBIND_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION msgrequest: Request a secure
+ * decommissioning operation.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMIN 113
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LENMAX 216
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_LEN(num) (112+1*(num))
+/* The operation requested, must be MC_CMD_TSA_BIND_OP_SECURE_DECOMMISSION. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_OP_LEN 4
+/* Type of the message. (MESSAGE_TYPE_xxx) Must be
+ * MESSAGE_TYPE_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_MESSAGE_TYPE_LEN 4
+/* A NUL padded US-ASCII string identifying the TSA infrastructure domain. This
+ * field is for information only, and not used by the firmware. Note- The TSAID
+ * is the Organizational Unit Name field as part of the root and server
+ * certificates.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_OFST 8
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_TSAID_NUM 64
+/* A NUL padded US-ASCII string containing user name of the creator of the
+ * decommissioning ticket. This field is for information only, and not used by
+ * the firmware.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_OFST 72
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_USER_NUM 36
+/* Reason of why decommissioning happens */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_OFST 108
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_REASON_LEN 4
+/* enum: There are situations when the binding process does not complete
+ * successfully due to key, other attributes corruption at the database level
+ * (Controller). Adapter can't connect to the controller anymore. To recover,
+ * use the decommission command to force the adapter into unbound state.
+ */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_ADAPTER_BINDING_FAILURE 0x1
+/* The signature computed and encoded as specified by MESSAGE_TYPE. */
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_OFST 112
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_LEN 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SECURE_DECOMMISSION_SIG_MAXNUM 104
+
+/* MC_CMD_TSA_BIND_IN_TEST_MCDI msgrequest: Test mode that emulates MCDI
+ * interface restrictions of a bound adapter. This operation is intended for
+ * test use on adapters that are not deployed and bound to a TSA Controller.
+ * Using it on a Bound adapter will succeed but will not alter the MCDI
+ * privileges as MCDI operations will already be restricted.
+ */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_LEN 8
+/* The operation requested must be MC_CMD_TSA_BIND_OP_TEST_MCDI. */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_OP_LEN 4
+/* Enable or disable emulation of bound adapter */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_OFST 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_CTRL_LEN 4
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_DISABLE 0x0 /* enum */
+#define MC_CMD_TSA_BIND_IN_TEST_MCDI_ENABLE 0x1 /* enum */
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse: Obsolete. Use
+ * MC_CMD_SECURE_NIC_INFO_OUT_STATUS.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_ID that is sent back to
+ * the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_LEN 4
/* Rules engine type. Note- The rules engine type allows TSAC to further
* identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
* proper action accordingly. As an example, TSAC uses the rules engine type to
* select the SF key that differs in the case of TSAN vs. NIC Emulator.
*/
#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_LEN 4
/* enum: Hardware rules engine. */
#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
/* enum: Nic emulator rules engine. */
@@ -15209,8 +16307,11 @@
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_TICKET that is sent back
+ * to the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_LEN 4
/* The ticket represents the data blob construct that TSAN sends to TSAC as
* part of the binding protocol. From the TSAN perspective the ticket is an
* opaque construct. For more info refer to SF-115479-TC.
@@ -15222,23 +16323,142 @@
/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
-/* The operation completion code. */
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_KEY that is sent back to
+ * the caller.
+ */
#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_LEN 4
-/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse: Response to insecure unbind request.
+ */
#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
/* Same as MC_CMD_ERR field, but included as 0 in success cases */
#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_LEN 4
/* Extra status information */
#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_LEN 4
/* enum: Unbind successful. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
/* enum: TSANID mismatch */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
/* enum: Unable to remove the binding ticket from persistent storage. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
/* enum: TSAN is not bound to a binding ticket. */
-#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND_EXT msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_UNBIND.
+ */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_OFST 0
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_RESULT_LEN 4
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_OFST 4
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_INFO_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_EXT_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_SET_UNBINDTOKEN that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SET_UNBINDTOKEN_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_DECOMMISSION msgresponse: Obsolete. Use
+ * MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_DECOMMISSION that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_DECOMMISSION_OP_LEN 4
+
+/* MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMIN 9
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_LEN(num) (8+1*(num))
+/* The protocol operation code MC_CMD_TSA_BIND_OP_GET_CERTIFICATE that is sent
+ * back to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_OP_LEN 4
+/* Type of the certificate. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_TSA_BIND_IN_GET_CERTIFICATE/TYPE */
+/* The certificate data. */
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_CERTIFICATE_DATA_MAXNUM 244
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_UNBIND msgresponse: Response to secure unbind
+ * request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_UNBIND_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION msgresponse: Response to secure
+ * decommission request.
+ */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_LEN 8
+/* The protocol operation code that is sent back to the caller. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OP_LEN 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_OFST 4
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_RESULT_LEN 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a domain. */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_NOT_BOUND 0x3
+/* enum: Invalid unbind token */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_TOKEN 0x4
+/* enum: Invalid signature */
+#define MC_CMD_TSA_BIND_OUT_SECURE_DECOMMISSION_ERR_BAD_SIGNATURE 0x5
+
+/* MC_CMD_TSA_BIND_OUT_TEST_MCDI msgrequest */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_LEN 4
+/* The protocol operation code MC_CMD_TSA_BIND_OP_TEST_MCDI that is sent back
+ * to the caller.
+ */
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_OFST 0
+#define MC_CMD_TSA_BIND_OUT_TEST_MCDI_OP_LEN 4
/***********************************/
@@ -15251,9 +16471,9 @@
* will be loaded at power on or MC reboot, instead of the default ruleset.
* Rollback of the currently active ruleset to the cached version (when it is
* valid) is also supported. (Medford-only; for use by SolarSecure apps, not
- * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
- * provisional. It has not yet been used in any released code and may change
- * during development. This note will be removed once it is regarded as stable.
+ * directly by drivers. See SF-114946-SW.) NOTE - The only sub-operation
+ * allowed in an adapter bound to a TSA controller from the local host is
+ * OP_GET_CACHED_VERSION. All other sub-operations are prohibited.
*/
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
#undef MC_CMD_0x11a_PRIVILEGE_CTG
@@ -15264,18 +16484,19 @@
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
/* the operation to perform */
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_LEN 4
/* enum: reports the ruleset version that is cached in persistent storage but
* performs no other action
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
/* enum: rolls back the active state to the cached version. (May fail with
* ENOENT if there is no valid cached version.)
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
/* enum: commits the active state to the persistent cache */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
/* enum: invalidates the persistent cache without affecting the active state */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
@@ -15285,12 +16506,13 @@
* requested operation in the case of rollback, commit, or invalidate)
*/
#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_LEN 4
/* enum: persistent cache is invalid (the VERSION field will be empty in this
* case)
*/
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
/* enum: persistent cache is valid */
-#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
/* cached ruleset version (after completion of the requested operation, in the
* case of rollback, commit, or invalidate) as an opaque hash value in the same
* form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
@@ -15309,7 +16531,7 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
#undef MC_CMD_0x11c_PRIVILEGE_CTG
-#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
@@ -15317,8 +16539,10 @@
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
/* The tag to be appended */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
/* The length of the data */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
/* The data to be contained in the TLV structure */
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
@@ -15344,6 +16568,7 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
/* Data type to be checked */
#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
@@ -15351,10 +16576,13 @@
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
/* Number of sectors found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
/* Number of bytes found (test builds only) */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
/* Length of signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
/* Signature */
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
@@ -15380,23 +16608,29 @@
#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
/* Function-relative queue instance */
#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
/* Requested value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
/* Requested value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
-#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
/* Actual value for timer load (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
/* Actual value for timer reload (in nanoseconds) */
#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
/***********************************/
@@ -15415,29 +16649,35 @@
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
/* Reserved for future use. */
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
* nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
* load/reload count.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
* allowed for timer load/reload counts.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
* multiple of this step size will be rounded in an implementation defined
* manner.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
* meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
/* Timer durations requested via MCDI that are not a multiple of this step size
* will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
/* For timers updated using the bug35388 workaround, this is the time interval
* (in nanoseconds) for each increment of the timer load/reload count. The
* requested duration of a timer is this value multiplied by the timer
@@ -15445,17 +16685,20 @@
* is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, this is the maximum value
* allowed for timer load/reload counts. This field is only meaningful if the
* bug35388 workaround is enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
/* For timers updated using the bug35388 workaround, timer load/reload counts
* not a multiple of this step size will be rounded in an implementation
* defined manner. This field is only meaningful if the bug35388 workaround is
* enabled.
*/
#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
/***********************************/
@@ -15466,7 +16709,7 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
#undef MC_CMD_0x11d_PRIVILEGE_CTG
-#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
@@ -15474,34 +16717,40 @@
* local queue index.
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
/* Will the common pool be used as TX_vFIFO_ULL (1) */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
/* Number of buffers to reserve for the common pool */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
/* TX datapath to which the Common Pool is connected to. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
/* enum: Extracts information from function */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
/* Network port or RX Engine to which the common pool connects. */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
/* enum: Extracts information from function */
-/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
/* ID of the common pool allocated */
#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
/***********************************/
@@ -15512,42 +16761,49 @@
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
#undef MC_CMD_0x11e_PRIVILEGE_CTG
-#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
/* Common pool previously allocated to which the new vFIFO will be associated
*/
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
/* Port or RX engine to associate the vFIFO egress */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
/* enum: Extracts information from common pool */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
/* enum: To enable Switch loopback with Rx engine 0 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
/* enum: To enable Switch loopback with Rx engine 1 */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
/* Minimum number of buffers that the pool must have */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
/* enum: Do not check the space available */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
/* Will the vFIFO be used as TX_vFIFO_ULL */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
/* Network priority of the vFIFO,if applicable */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
/* enum: Search for the lowest unused priority */
-#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
/* Short vFIFO ID */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
/* Network priority of the vFIFO */
#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
/***********************************/
@@ -15558,12 +16814,13 @@
#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
#undef MC_CMD_0x11f_PRIVILEGE_CTG
-#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
/* Short vFIFO ID */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
@@ -15577,12 +16834,13 @@
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
#undef MC_CMD_0x121_PRIVILEGE_CTG
-#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
/* Common pool ID given when pool allocated */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
@@ -15604,16 +16862,17 @@
#define MC_CMD_REKEY 0x123
#undef MC_CMD_0x123_PRIVILEGE_CTG
-#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_REKEY_IN msgrequest */
#define MC_CMD_REKEY_IN_LEN 4
/* the type of operation requested */
#define MC_CMD_REKEY_IN_OP_OFST 0
+#define MC_CMD_REKEY_IN_OP_LEN 4
/* enum: Start the rekeying operation */
-#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
/* enum: Poll for completion of the rekeying operation */
-#define MC_CMD_REKEY_IN_OP_POLL 0x1
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
/* MC_CMD_REKEY_OUT msgresponse */
#define MC_CMD_REKEY_OUT_LEN 0
@@ -15627,7 +16886,7 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
#undef MC_CMD_0x124_PRIVILEGE_CTG
-#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
@@ -15636,8 +16895,10 @@
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
/* Available buffers for the ENG to NET vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
/***********************************/
@@ -15653,19 +16914,1231 @@
#define MC_CMD_SET_SECURITY_FUSES 0x126
#undef MC_CMD_0x126_PRIVILEGE_CTG
-#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
/* Flags specifying what type of security features are being set */
#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_LEN 4
#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_LBN 31
+#define MC_CMD_SET_SECURITY_FUSES_IN_SOFT_CONFIG_WIDTH 1
/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+/* MC_CMD_SET_SECURITY_FUSES_V2_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_LEN 4
+/* Flags specifying which security features are enforced on the NIC after the
+ * flags in the request have been applied. See
+ * MC_CMD_SET_SECURITY_FUSES_IN/FLAGS for flag definitions.
+ */
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_V2_OUT_FLAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_INFO
+ * Messages sent from TSA adapter to TSA controller. This command is only valid
+ * when the MCDI header has MESSAGE_TYPE set to MCDI_MESSAGE_TYPE_TSA. This
+ * command is not sent by the driver to the MC; it is sent from the MC to a TSA
+ * controller, being treated more like an alert message rather than a command;
+ * hence the MC does not expect a response in return. Doxbox reference
+ * SF-117371-SW
+ */
+#define MC_CMD_TSA_INFO 0x127
+#undef MC_CMD_0x127_PRIVILEGE_CTG
+
+#define MC_CMD_0x127_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_INFO_IN msgrequest */
+#define MC_CMD_TSA_INFO_IN_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_INFO_IN_OP_LBN 0
+#define MC_CMD_TSA_INFO_IN_OP_WIDTH 16
+/* enum: Information about recently discovered local IP address of the adapter
+ */
+#define MC_CMD_TSA_INFO_OP_LOCAL_IP 0x1
+/* enum: Information about a sampled packet that either - did not match any
+ * black/white-list filters and was allowed by the default filter or - did not
+ * match any black/white-list filters and was denied by the default filter
+ */
+#define MC_CMD_TSA_INFO_OP_PKT_SAMPLE 0x2
+
+/* MC_CMD_TSA_INFO_IN_LOCAL_IP msgrequest:
+ *
+ * The TSA controller maintains a list of IP addresses valid for each port of a
+ * TSA adapter. The TSA controller requires information from the adapter
+ * inorder to learn new IP addresses assigned to a physical port and to
+ * identify those that are no longer assigned to the physical port. For this
+ * purpose, the TSA adapter snoops ARP replys, gratuitous ARP requests and ARP
+ * probe packets seen on each physical port. This definition describes the
+ * format of the notification message sent from a TSA adapter to a TSA
+ * controller related to any information related to a change in IP address
+ * assignment for a port. Doxbox reference SF-117371.
+ *
+ * There may be a possibility of combining multiple notifications in a single
+ * message in future. When that happens, a new flag can be defined using the
+ * reserved bits to describe the extended format of this notification.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_LEN 18
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_OP_HDR_LEN 4
+/* Additional metadata describing the IP address information such as source of
+ * information retrieval, type of IP address, physical port number.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_LBN 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_LBN 16
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_REASON_WIDTH 8
+/* enum: ARP reply sent out of the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_TX_ARP 0x0
+/* enum: ARP probe packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_ARP_PROBE 0x1
+/* enum: Gratuitous ARP packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_GRATUITOUS_ARP 0x2
+/* enum: DHCP ACK packet received on the physical port */
+#define MC_CMD_TSA_INFO_IP_REASON_RX_DHCP_ACK 0x3
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_LBN 24
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_META_IPV4_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_LBN 25
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_RESERVED1_WIDTH 7
+/* IPV4 address retrieved from the sampled packets. This field is relevant only
+ * when META_IPV4 is set to 1.
+ */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_OFST 8
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_IPV4_ADDR_LEN 4
+/* Target MAC address retrieved from the sampled packet. */
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_OFST 12
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_LEN 1
+#define MC_CMD_TSA_INFO_IN_LOCAL_IP_MAC_ADDR_NUM 6
+
+/* MC_CMD_TSA_INFO_IN_PKT_SAMPLE msgrequest:
+ *
+ * It is desireable for the TSA controller to learn the traffic pattern of
+ * packets seen at the network port being monitored. In order to learn about
+ * the traffic pattern, the TSA controller may want to sample packets seen at
+ * the network port. Based on the packet samples that the TSA controller
+ * receives from the adapter, the controller may choose to configure additional
+ * black-list or white-list rules to allow or block packets as required.
+ *
+ * Although the entire sampled packet as seen on the network port is available
+ * to the MC the length of sampled packet sent to controller is restricted by
+ * MCDI payload size. Besides, the TSA controller does not require the entire
+ * packet to make decisions about filter updates. Hence the packet sample being
+ * passed to the controller is truncated to 128 bytes. This length is large
+ * enough to hold the ethernet header, IP header and maximum length of
+ * supported L4 protocol headers (IPv4 only, but can hold IPv6 header too, if
+ * required in future).
+ *
+ * The intention is that any future changes to this message format that are not
+ * backwards compatible will be defined with a new operation code.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_LEN 136
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_OFST 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_OP_HDR_LEN 4
+/* Additional metadata describing the sampled packet */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_OFST 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_LEN 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_LBN 0
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_PORT_INDEX_WIDTH 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_LBN 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_DIRECTION_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_LBN 9
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_RESERVED_WIDTH 7
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_MASK_WIDTH 4
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_LBN 16
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_ALLOW_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_LBN 17
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_DENY_WIDTH 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_LBN 18
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_META_ACTION_COUNT_WIDTH 1
+/* 128-byte raw prefix of the sampled packet which includes the ethernet
+ * header, IP header and L4 protocol header (only IPv4 supported initially).
+ * This provides the controller enough information about the packet sample to
+ * report traffic patterns seen on a network port and to make decisions
+ * concerning rule-set updates.
+ */
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_OFST 8
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_LEN 1
+#define MC_CMD_TSA_INFO_IN_PKT_SAMPLE_PACKET_DATA_NUM 128
+
+/* MC_CMD_TSA_INFO_OUT msgresponse */
+#define MC_CMD_TSA_INFO_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_HOST_INFO
+ * Commands to appply or retrieve host-related information from an adapter.
+ * Doxbox reference SF-117371-SW
+ */
+#define MC_CMD_HOST_INFO 0x128
+#undef MC_CMD_0x128_PRIVILEGE_CTG
+
+#define MC_CMD_0x128_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HOST_INFO_IN msgrequest */
+#define MC_CMD_HOST_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_HOST_INFO_IN_OP_LBN 0
+#define MC_CMD_HOST_INFO_IN_OP_WIDTH 16
+/* enum: Read a 16-byte unique host identifier from the adapter. This UUID
+ * helps to identify the host that an adapter is plugged into. This identifier
+ * is ideally the system UUID retrieved and set by the UEFI driver. If the UEFI
+ * driver is unable to extract the system UUID, it would still set a random
+ * 16-byte value into each supported SF adapter plugged into it. Host UUIDs may
+ * change if the system is power-cycled, however, they persist across adapter
+ * resets. If the host UUID was not set on an adapter, due to an unsupported
+ * version of UEFI driver, then this command returns an error. Doxbox reference
+ * - SF-117371-SW section 'Host UUID'.
+ */
+#define MC_CMD_HOST_INFO_OP_GET_UUID 0x0
+/* enum: Set a 16-byte unique host identifier on the adapter to identify the
+ * host that the adapter is plugged into. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_OP_SET_UUID 0x1
+
+/* MC_CMD_HOST_INFO_IN_GET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_LEN 4
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_GET_UUID_OP_HDR_LEN 4
+
+/* MC_CMD_HOST_INFO_OUT_GET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_LEN 16
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_OFST 0
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_OUT_GET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_IN_SET_UUID msgrequest */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_LEN 20
+/* sub-operation code info */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_OFST 0
+#define MC_CMD_HOST_INFO_IN_SET_UUID_OP_HDR_LEN 4
+/* 16-byte host UUID set on the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID for
+ * further details.
+ */
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_OFST 4
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_LEN 1
+#define MC_CMD_HOST_INFO_IN_SET_UUID_HOST_UUID_NUM 16
+
+/* MC_CMD_HOST_INFO_OUT_SET_UUID msgresponse */
+#define MC_CMD_HOST_INFO_OUT_SET_UUID_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAN_INFO
+ * Get TSA adapter information. TSA controllers query each TSA adapter to learn
+ * some configuration parameters of each adapter. Doxbox reference SF-117371-SW
+ * section 'Adapter Information'
+ */
+#define MC_CMD_TSAN_INFO 0x129
+#undef MC_CMD_0x129_PRIVILEGE_CTG
+
+#define MC_CMD_0x129_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSAN_INFO_IN msgrequest */
+#define MC_CMD_TSAN_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_TSAN_INFO_IN_OP_LBN 0
+#define MC_CMD_TSAN_INFO_IN_OP_WIDTH 16
+/* enum: Read configuration parameters and IDs that uniquely identify an
+ * adapter. The parameters include - host identification, adapter
+ * identification string and number of physical ports on the adapter.
+ */
+#define MC_CMD_TSAN_INFO_OP_GET_CFG 0x0
+
+/* MC_CMD_TSAN_INFO_IN_GET_CFG msgrequest */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_LEN 4
+/* sub-operation code info */
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_OFST 0
+#define MC_CMD_TSAN_INFO_IN_GET_CFG_OP_HDR_LEN 4
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_LEN 26
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_GUID_NUM 6
+
+/* MC_CMD_TSAN_INFO_OUT_GET_CFG_V2 msgresponse */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_LEN 36
+/* Information about the configuration parameters returned in this response. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_OFST 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CONFIG_WORD_LEN 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_CAP_FLAGS_WIDTH 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_LBN 0
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_FLAG_HOST_UUID_VALID_WIDTH 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_LBN 16
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_NUM_PORTS_WIDTH 8
+/* 16-byte host UUID read out of the adapter. See MC_CMD_HOST_INFO_OP_GET_UUID
+ * for further details.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_OFST 4
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_HOST_UUID_NUM 16
+/* A unique identifier per adapter. The base MAC address of the card is used
+ * for this purpose.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_OFST 20
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_LEN 1
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_GUID_NUM 6
+/* Unused bytes, defined for 32-bit alignment of new fields. */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_OFST 26
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_UNUSED_LEN 2
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_OFST 28
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_OFST 32
+#define MC_CMD_TSAN_INFO_OUT_GET_CFG_V2_STATS_WIDTH_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TSA_STATISTICS
+ * TSA adapter statistics operations.
+ */
+#define MC_CMD_TSA_STATISTICS 0x130
+#undef MC_CMD_0x130_PRIVILEGE_CTG
+
+#define MC_CMD_0x130_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_STATISTICS_IN msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_OP_CODE_LEN 4
+/* enum: Get the configuration parameters that describe the TSA statistics
+ * layout on the adapter.
+ */
+#define MC_CMD_TSA_STATISTICS_OP_GET_CONFIG 0x0
+/* enum: Read and/or clear TSA statistics counters. */
+#define MC_CMD_TSA_STATISTICS_OP_READ_CLEAR 0x1
+
+/* MC_CMD_TSA_STATISTICS_IN_GET_CONFIG msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_LEN 4
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_GET_CONFIG_OP_CODE_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_LEN 8
+/* Maximum number of TSA statistics counters in each direction of dataflow
+ * supported on the card. Note that the statistics counters are always
+ * allocated in pairs, i.e. a counter ID is associated with one Tx and one Rx
+ * counter.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_MAX_STATS_LEN 4
+/* Width of each statistics counter (represented in bits). This gives an
+ * indication of wrap point to the user.
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_OFST 4
+#define MC_CMD_TSA_STATISTICS_OUT_GET_CONFIG_STATS_WIDTH_LEN 4
+
+/* MC_CMD_TSA_STATISTICS_IN_READ_CLEAR msgrequest */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMIN 20
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LENMAX 252
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LEN(num) (16+4*(num))
+/* TSA statistics sub-operation code */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_OFST 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_OP_CODE_LEN 4
+/* Parameters describing the statistics operation */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_OFST 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_FLAGS_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_LBN 0
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_READ_WIDTH 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_LBN 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_CLEAR_WIDTH 1
+/* Counter ID list specification type */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_OFST 8
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_MODE_LEN 4
+/* enum: The statistics counters are specified as an unordered list of
+ * individual counter ID.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_LIST 0x0
+/* enum: The statistics counters are specified as a range of consecutive
+ * counter IDs.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_RANGE 0x1
+/* Number of statistics counters */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_OFST 12
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_NUM_STATS_LEN 4
+/* Counter IDs to be read/cleared. When mode is set to LIST, this entry holds a
+ * list of counter IDs to be operated on. When mode is set to RANGE, this entry
+ * holds a single counter ID representing the start of the range of counter IDs
+ * to be operated on.
+ */
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_OFST 16
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_LEN 4
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_IN_READ_CLEAR_COUNTER_ID_MAXNUM 59
+
+/* MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR msgresponse */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMIN 24
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LENMAX 248
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_LEN(num) (8+16*(num))
+/* Number of statistics counters returned in this response */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_OFST 0
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_NUM_STATS_LEN 4
+/* MC_TSA_STATISTICS_ENTRY Note that this field is expected to start at a
+ * 64-bit aligned offset
+ */
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_OFST 8
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_LEN 16
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MINNUM 1
+#define MC_CMD_TSA_STATISTICS_OUT_READ_CLEAR_STATS_COUNTERS_MAXNUM 15
+
+/* MC_TSA_STATISTICS_ENTRY structuredef */
+#define MC_TSA_STATISTICS_ENTRY_LEN 16
+/* Tx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LO_OFST 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_HI_OFST 4
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_LBN 0
+#define MC_TSA_STATISTICS_ENTRY_TX_STAT_WIDTH 64
+/* Rx statistics counter */
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LEN 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LO_OFST 8
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_HI_OFST 12
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_LBN 64
+#define MC_TSA_STATISTICS_ENTRY_RX_STAT_WIDTH 64
+
+
+/***********************************/
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET
+ * This request causes the NIC to find the initial NIC secret (programmed
+ * during ATE) in XPM memory and if and only if the NIC has already been
+ * rekeyed with MC_CMD_REKEY, erase it. This is used by manftest after
+ * installing TSA binding certificates. See SF-117631-TC.
+ */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET 0x131
+#undef MC_CMD_0x131_PRIVILEGE_CTG
+
+#define MC_CMD_0x131_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_IN msgrequest */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_IN_LEN 0
+
+/* MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT msgresponse */
+#define MC_CMD_ERASE_INITIAL_NIC_SECRET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_CONFIG
+ * TSA adapter configuration operations. This command is used to prepare the
+ * NIC for TSA binding.
+ */
+#define MC_CMD_TSA_CONFIG 0x64
+#undef MC_CMD_0x64_PRIVILEGE_CTG
+
+#define MC_CMD_0x64_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_CONFIG_IN msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_LEN 4
+/* TSA configuration sub-operation code */
+#define MC_CMD_TSA_CONFIG_IN_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_OP_LEN 4
+/* enum: Append a single item to the tsa_config partition. Items will be
+ * encrypted unless they are declared as non-sensitive. Returns
+ * MC_CMD_ERR_EEXIST if the tag is already present.
+ */
+#define MC_CMD_TSA_CONFIG_OP_APPEND 0x1
+/* enum: Reset the tsa_config partition to a clean state. */
+#define MC_CMD_TSA_CONFIG_OP_RESET 0x2
+/* enum: Read back a configured item from tsa_config partition. Returns
+ * MC_CMD_ERR_ENOENT if the item doesn't exist, or MC_CMD_ERR_EPERM if the item
+ * is declared as sensitive (i.e. is encrypted).
+ */
+#define MC_CMD_TSA_CONFIG_OP_READ 0x3
+
+/* MC_CMD_TSA_CONFIG_IN_APPEND msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMIN 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENMAX 252
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LEN(num) (12+1*(num))
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_APPEND.
+ */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_OP_LEN 4
+/* The tag to be appended */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_APPEND_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_OFST 8
+#define MC_CMD_TSA_CONFIG_IN_APPEND_LENGTH_LEN 4
+/* The item data */
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_OFST 12
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_IN_APPEND_DATA_MAXNUM 240
+
+/* MC_CMD_TSA_CONFIG_OUT_APPEND msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_APPEND_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_RESET msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_RESET_LEN 4
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_RESET.
+ */
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_RESET_OP_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_RESET msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_RESET_LEN 0
+
+/* MC_CMD_TSA_CONFIG_IN_READ msgrequest */
+#define MC_CMD_TSA_CONFIG_IN_READ_LEN 8
+/* TSA configuration sub-operation code. The value shall be
+ * MC_CMD_TSA_CONFIG_OP_READ.
+ */
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_OFST 0
+#define MC_CMD_TSA_CONFIG_IN_READ_OP_LEN 4
+/* The tag to be read */
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_OFST 4
+#define MC_CMD_TSA_CONFIG_IN_READ_TAG_LEN 4
+
+/* MC_CMD_TSA_CONFIG_OUT_READ msgresponse */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMIN 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENMAX 252
+#define MC_CMD_TSA_CONFIG_OUT_READ_LEN(num) (8+1*(num))
+/* The tag that was read */
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_OFST 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_TAG_LEN 4
+/* The length of the data in bytes */
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_OFST 4
+#define MC_CMD_TSA_CONFIG_OUT_READ_LENGTH_LEN 4
+/* The data of the item. */
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_OFST 8
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_LEN 1
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MINNUM 0
+#define MC_CMD_TSA_CONFIG_OUT_READ_DATA_MAXNUM 244
+
+/* MC_TSA_IPV4_ITEM structuredef */
+#define MC_TSA_IPV4_ITEM_LEN 8
+/* Additional metadata describing the IP address information such as the
+ * physical port number the address is being used on. Unused space in this
+ * field is reserved for future expansion.
+ */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_OFST 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LEN 4
+#define MC_TSA_IPV4_ITEM_PORT_IDX_LBN 0
+#define MC_TSA_IPV4_ITEM_PORT_IDX_WIDTH 8
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_LBN 0
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_META_WIDTH 32
+/* The IPv4 address in little endian byte order. */
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_OFST 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LEN 4
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_LBN 32
+#define MC_TSA_IPV4_ITEM_IPV4_ADDR_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_TSA_IPADDR
+ * TSA operations relating to the monitoring and expiry of local IP addresses
+ * discovered by the controller. These commands are sent from a TSA controller
+ * to a TSA adapter.
+ */
+#define MC_CMD_TSA_IPADDR 0x65
+#undef MC_CMD_0x65_PRIVILEGE_CTG
+
+#define MC_CMD_0x65_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_IPADDR_IN msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_LEN 4
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_OP_WIDTH 16
+/* enum: Request that the adapter verifies that the IPv4 addresses supplied are
+ * still in use by the host by sending ARP probes to the host. The MC does not
+ * wait for a response to the probes and sends an MCDI response to the
+ * controller once the probes have been sent to the host. The response to the
+ * probes (if there are any) will be forwarded to the controller using
+ * MC_CMD_TSA_INFO alerts.
+ */
+#define MC_CMD_TSA_IPADDR_OP_VALIDATE_IPV4 0x1
+/* enum: Notify the adapter that one or more IPv4 addresses are no longer valid
+ * for the host of the adapter. The adapter should remove the IPv4 addresses
+ * from its local cache.
+ */
+#define MC_CMD_TSA_IPADDR_OP_REMOVE_IPV4 0x2
+
+/* MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to validate. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses to validate, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_VALIDATE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_VALIDATE_IPV4_LEN 0
+
+/* MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4 msgrequest */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMIN 16
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LENMAX 248
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_LEN(num) (8+8*(num))
+/* Header containing information to identify which sub-operation of this
+ * command to perform. The header contains a 16-bit op-code. Unused space in
+ * this field is reserved for future expansion.
+ */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_OFST 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_HDR_LEN 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_LBN 0
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_OP_WIDTH 16
+/* Number of IPv4 addresses to remove. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_OFST 4
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_NUM_ITEMS_LEN 4
+/* The IPv4 addresses that have expired, in struct MC_TSA_IPV4_ITEM format. */
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LEN 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_LO_OFST 8
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_HI_OFST 12
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MINNUM 1
+#define MC_CMD_TSA_IPADDR_IN_REMOVE_IPV4_IPV4_ITEM_MAXNUM 30
+
+/* MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4 msgresponse */
+#define MC_CMD_TSA_IPADDR_OUT_REMOVE_IPV4_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SECURE_NIC_INFO
+ * Get secure NIC information. While many of the features reported by these
+ * commands are related to TSA, they must be supported in firmware where TSA is
+ * disabled.
+ */
+#define MC_CMD_SECURE_NIC_INFO 0x132
+#undef MC_CMD_0x132_PRIVILEGE_CTG
+
+#define MC_CMD_0x132_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SECURE_NIC_INFO_IN msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_LEN 4
+/* sub-operation code info */
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_HDR_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_LBN 0
+#define MC_CMD_SECURE_NIC_INFO_IN_OP_WIDTH 16
+/* enum: Get the status of various security settings, all signed along with a
+ * challenge chosen by the host.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OP_STATUS 0x0
+
+/* MC_CMD_SECURE_NIC_INFO_IN_STATUS msgrequest */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_LEN 24
+/* sub-operation code, must be MC_CMD_SECURE_NIC_INFO_OP_STATUS */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_OP_HDR_LEN 4
+/* Type of key to be used to sign response. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_KEY_TYPE_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_UNUSED 0x0 /* enum */
+/* enum: Solarflare adapter authentication key, installed by Manftest. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_SF_ADAPTER_AUTH 0x1
+/* enum: TSA binding key, installed after adapter is bound to a TSA controller.
+ * This is not supported in firmware which does not support TSA.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_TSA_BINDING 0x2
+/* enum: Customer adapter authentication key. Installed by the customer in the
+ * field, but otherwise similar to the Solarflare adapter authentication key.
+ */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CUSTOMER_ADAPTER_AUTH 0x3
+/* Random challenge generated by the host. */
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_OFST 8
+#define MC_CMD_SECURE_NIC_INFO_IN_STATUS_CHALLENGE_LEN 16
+
+/* MC_CMD_SECURE_NIC_INFO_OUT_STATUS msgresponse */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_LEN 420
+/* Length of the signature in MSG_SIGNATURE. */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_OFST 0
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN_LEN 4
+/* Signature over the message, starting at MESSAGE_TYPE and continuing to the
+ * end of the MCDI response, allowing the message format to be extended. The
+ * signature uses ECDSA 384 encoding in ASN.1 format. It has variable length,
+ * with a maximum of 384 bytes.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_OFST 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MSG_SIGNATURE_LEN 384
+/* Enum value indicating the type of response. This protects against chosen
+ * message attacks. The enum values are random rather than sequential to make
+ * it unlikely that values will be reused should other commands in a different
+ * namespace need to create signed messages.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_OFST 388
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_MESSAGE_TYPE_LEN 4
+/* enum: Message type value for the response to a
+ * MC_CMD_SECURE_NIC_INFO_IN_STATUS message.
+ */
+#define MC_CMD_SECURE_NIC_INFO_STATUS 0xdb4
+/* The challenge provided by the host in the MC_CMD_SECURE_NIC_INFO_IN_STATUS
+ * message
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_OFST 392
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_CHALLENGE_LEN 16
+/* The first 32 bits of XPM memory, which include security and flag bits, die
+ * ID and chip ID revision. The meaning of these bits is defined in
+ * mc/include/mc/xpm.h in the firmwaresrc repository.
+ */
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_OFST 408
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_XPM_STATUS_BITS_LEN 4
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_OFST 412
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_A_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_OFST 414
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_B_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_OFST 416
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_C_LEN 2
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_OFST 418
+#define MC_CMD_SECURE_NIC_INFO_OUT_STATUS_FIRMWARE_VERSION_D_LEN 2
+
+
+/***********************************/
+/* MC_CMD_TSA_TEST
+ * A simple ping-pong command just to test the adapter<>controller MCDI
+ * communication channel. This command makes not changes to the TSA adapter's
+ * internal state. It is used by the controller just to verify that the MCDI
+ * communication channel is working fine. This command takes no additonal
+ * parameters in request or response.
+ */
+#define MC_CMD_TSA_TEST 0x125
+#undef MC_CMD_0x125_PRIVILEGE_CTG
+
+#define MC_CMD_0x125_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_TEST_IN msgrequest */
+#define MC_CMD_TSA_TEST_IN_LEN 0
+
+/* MC_CMD_TSA_TEST_OUT msgresponse */
+#define MC_CMD_TSA_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_RULESET_OVERRIDE
+ * Override TSA ruleset that is currently active on the adapter. This operation
+ * does not modify the ruleset itself. This operation provides a mechanism to
+ * apply an allow-all or deny-all operation on all packets, thereby completely
+ * ignoring the rule-set configured on the adapter. The main purpose of this
+ * operation is to provide a deterministic state to the TSA firewall during
+ * rule-set transitions.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE 0x12a
+#undef MC_CMD_0x12a_PRIVILEGE_CTG
+
+#define MC_CMD_0x12a_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_IN msgrequest */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_LEN 4
+/* The override state to apply. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_OFST 0
+#define MC_CMD_TSA_RULESET_OVERRIDE_IN_STATE_LEN 4
+/* enum: No override in place - the existing ruleset is in operation. */
+#define MC_CMD_TSA_RULESET_OVERRIDE_NONE 0x0
+/* enum: Block all packets seen on all datapath channel except those packets
+ * required for basic configuration of the TSA NIC such as ARPs and TSA-
+ * communication traffic. Such exceptional traffic is handled differently
+ * compared to TSA rulesets.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_BLOCK 0x1
+/* enum: Allow all packets through all datapath channel. The TSA adapter
+ * behaves like a normal NIC without any firewalls.
+ */
+#define MC_CMD_TSA_RULESET_OVERRIDE_ALLOW 0x2
+
+/* MC_CMD_TSA_RULESET_OVERRIDE_OUT msgresponse */
+#define MC_CMD_TSA_RULESET_OVERRIDE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSAC_REQUEST
+ * Generic command to send requests from a TSA controller to a TSA adapter.
+ * Specific usage is determined by the TYPE field.
+ */
+#define MC_CMD_TSAC_REQUEST 0x12b
+#undef MC_CMD_0x12b_PRIVILEGE_CTG
+
+#define MC_CMD_0x12b_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_TSAC_REQUEST_IN msgrequest */
+#define MC_CMD_TSAC_REQUEST_IN_LEN 4
+/* The type of request from the controller. */
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_OFST 0
+#define MC_CMD_TSAC_REQUEST_IN_TYPE_LEN 4
+/* enum: Request the adapter to resend localIP information from it's cache. The
+ * command does not return any IP address information; IP addresses are sent as
+ * TSA notifications as descibed in MC_CMD_TSA_INFO_IN_LOCAL_IP.
+ */
+#define MC_CMD_TSAC_REQUEST_LOCALIP 0x0
+
+/* MC_CMD_TSAC_REQUEST_OUT msgresponse */
+#define MC_CMD_TSAC_REQUEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUC_VERSION
+ * Get the version of the SUC
+ */
+#define MC_CMD_SUC_VERSION 0x134
+#undef MC_CMD_0x134_PRIVILEGE_CTG
+
+#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SUC_VERSION_IN msgrequest */
+#define MC_CMD_SUC_VERSION_IN_LEN 0
+
+/* MC_CMD_SUC_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_VERSION_OUT_LEN 24
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
+#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
+/* The date, in seconds since the Unix epoch, when the firmware image was
+ * built.
+ */
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
+#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
+#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
+
+/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
+ * loader.
+ */
+#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
+/* enum: Requests the SUC boot version. */
+#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
+
+/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
+/* The SUC boot version */
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SUC_MANFTEST
+ * Operations to support manftest on SUC based systems.
+ */
+#define MC_CMD_SUC_MANFTEST 0x135
+#undef MC_CMD_0x135_PRIVILEGE_CTG
+
+#define MC_CMD_0x135_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND
+
+/* MC_CMD_SUC_MANFTEST_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_IN_LEN 4
+/* The manftest operation to be performed. */
+#define MC_CMD_SUC_MANFTEST_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_IN_OP_LEN 4
+/* enum: Read serial number and use count. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ 0x0
+/* enum: Update use count on wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE 0x1
+/* enum: Start an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START 0x2
+/* enum: Read the status of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS 0x3
+/* enum: Read the results of an ADC calibration. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT 0x4
+/* enum: Read the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ 0x5
+/* enum: Write the PCIe configuration. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE 0x6
+/* enum: Write FRU information to SUC. The FRU information is taken from the
+ * FRU_INFORMATION partition. Attempts to write to read-only FRUs are rejected.
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE 0x7
+
+/* MC_CMD_SUC_MANFTEST_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_LEN 20
+/* The serial number of the wearout adapter, see SF-112717-PR for format. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_SERIAL_NUMBER_LEN 16
+/* The use count of the wearout adapter. */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_OFST 16
+#define MC_CMD_SUC_MANFTEST_WEAROUT_READ_OUT_USE_COUNT_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE.
+ */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_WEAROUT_UPDATE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_START_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_LEN 4
+/* The combined status of the calibration operation. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FLAGS_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_LBN 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_CALIBRATING_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_LBN 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_FAILED_WIDTH 1
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_LBN 2
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_RESULT_WIDTH 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_LBN 6
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_STATUS_OUT_INDEX_WIDTH 2
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT.
+ */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_LEN 12
+/* The set of calibration results. */
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_OFST 0
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_LEN 4
+#define MC_CMD_SUC_MANFTEST_ADC_CALIBRATE_RESULT_OUT_VALUE_NUM 3
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_OFST 2
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_READ_OUT_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_LEN 8
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE.
+ */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_OP_LEN 4
+/* The PCIe vendor ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_OFST 4
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_VENDOR_ID_LEN 2
+/* The PCIe device ID. */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_OFST 6
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_IN_DEVICE_ID_LEN 2
+
+/* MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_CONFIG_PCIE_WRITE_OUT_LEN 0
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_IN msgrequest */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_LEN 4
+/* The manftest operation to be performed. This must be
+ * MC_CMD_SUC_MANFTEST_FRU_WRITE
+ */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_OFST 0
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_IN_OP_LEN 4
+
+/* MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT msgresponse */
+#define MC_CMD_SUC_MANFTEST_FRU_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_CERTIFICATE
+ * Request a certificate.
+ */
+#define MC_CMD_GET_CERTIFICATE 0x12c
+#undef MC_CMD_0x12c_PRIVILEGE_CTG
+
+#define MC_CMD_0x12c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CERTIFICATE_IN msgrequest */
+#define MC_CMD_GET_CERTIFICATE_IN_LEN 8
+/* Type of the certificate to be retrieved. */
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_IN_TYPE_LEN 4
+#define MC_CMD_GET_CERTIFICATE_IN_UNUSED 0x0 /* enum */
+#define MC_CMD_GET_CERTIFICATE_IN_AAC 0x1 /* enum */
+/* enum: Adapter Authentication Certificate (AAC). The AAC is unique to each
+ * adapter and is used to verify its authenticity. It is installed by Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH 0x1
+#define MC_CMD_GET_CERTIFICATE_IN_AASC 0x2 /* enum */
+/* enum: Adapter Authentication Signing Certificate (AASC). The AASC is shared
+ * by a group of adapters (typically a purchase order) and is used to verify
+ * the validity of AAC along with the SF root certificate. It is installed by
+ * Manftest.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_ADAPTER_AUTH_SIGNING 0x2
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AAC 0x3 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AAC is
+ * unique to each adapter and is used to verify its authenticity in cases where
+ * either the AAC is not installed or a customer desires to use their own
+ * certificate chain. It is installed by the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH 0x3
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_AASC 0x4 /* enum */
+/* enum: Customer Adapter Authentication Certificate. The Customer AASC is
+ * shared by a group of adapters and is used to verify the validity of the
+ * Customer AAC along with the customers root certificate. It is installed by
+ * the customer.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_CUSTOMER_ADAPTER_AUTH_SIGNING 0x4
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which the certificate is to be retrieved.
+ */
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_IN_OFFSET_LEN 4
+
+/* MC_CMD_GET_CERTIFICATE_OUT msgresponse */
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMIN 13
+#define MC_CMD_GET_CERTIFICATE_OUT_LENMAX 252
+#define MC_CMD_GET_CERTIFICATE_OUT_LEN(num) (12+1*(num))
+/* Type of the certificate. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_OFST 0
+#define MC_CMD_GET_CERTIFICATE_OUT_TYPE_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_GET_CERTIFICATE_IN/TYPE */
+/* Offset, measured in bytes, relative to the start of the certificate data
+ * from which data in this message starts.
+ */
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_OFST 4
+#define MC_CMD_GET_CERTIFICATE_OUT_OFFSET_LEN 4
+/* Total length of the certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_OFST 8
+#define MC_CMD_GET_CERTIFICATE_OUT_TOTAL_LENGTH_LEN 4
+/* The certificate data. */
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_OFST 12
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_LEN 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_CERTIFICATE_OUT_DATA_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_GET_NIC_GLOBAL
+ * Get a global value which applies to all PCI functions
+ */
+#define MC_CMD_GET_NIC_GLOBAL 0x12d
+#undef MC_CMD_0x12d_PRIVILEGE_CTG
+
+#define MC_CMD_0x12d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_GET_NIC_GLOBAL_IN_LEN 4
+/* Key to request value for, see enum values in MC_CMD_SET_NIC_GLOBAL. If the
+ * given key is unknown to the current firmware, the call will fail with
+ * ENOENT.
+ */
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_IN_KEY_LEN 4
+
+/* MC_CMD_GET_NIC_GLOBAL_OUT msgresponse */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_LEN 4
+/* Value of requested key, see key descriptions below. */
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_OFST 0
+#define MC_CMD_GET_NIC_GLOBAL_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_NIC_GLOBAL
+ * Set a global value which applies to all PCI functions. Most global values
+ * can only be changed under specific conditions, and this call will return an
+ * appropriate error otherwise (see key descriptions).
+ */
+#define MC_CMD_SET_NIC_GLOBAL 0x12e
+#undef MC_CMD_0x12e_PRIVILEGE_CTG
+
+#define MC_CMD_0x12e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_NIC_GLOBAL_IN msgrequest */
+#define MC_CMD_SET_NIC_GLOBAL_IN_LEN 8
+/* Key to change value of. Firmware will return ENOENT for keys it doesn't know
+ * about.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_OFST 0
+#define MC_CMD_SET_NIC_GLOBAL_IN_KEY_LEN 4
+/* enum: Request switching the datapath firmware sub-variant. Currently only
+ * useful when running the DPDK f/w variant. See key values below, and the DPDK
+ * section of the EF10 Driver Writers Guide. Note that any driver attaching
+ * with the SUBVARIANT_AWARE flag cleared is implicitly considered as a request
+ * to switch back to the default sub-variant, and will thus reset this value.
+ * If a sub-variant switch happens, all other PCI functions will get their
+ * resources reset (they will see an MC reboot).
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT 0x1
+/* New value to set, see key descriptions above. */
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_OFST 4
+#define MC_CMD_SET_NIC_GLOBAL_IN_VALUE_LEN 4
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Default sub-variant with support
+ * for maximum features for the current f/w variant. A request from a
+ * privileged function to set this particular value will always succeed.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT 0x0
+/* enum: Only if KEY = FIRMWARE_SUBVARIANT. Increases packet rate at the cost
+ * of not supporting any TX checksum offloads. Only supported when running some
+ * f/w variants, others will return ENOTSUP (as reported by the homonymous bit
+ * in MC_CMD_GET_CAPABILITIES_V2). Can only be set when no other drivers are
+ * attached, and the calling driver must have no resources allocated. See the
+ * DPDK section of the EF10 Driver Writers Guide for a more detailed
+ * description with possible error codes.
+ */
+#define MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM 0x1
+
+
+/***********************************/
+/* MC_CMD_LTSSM_TRACE_POLL
+ * Medford2 hardware has support for logging all LTSSM state transitions to a
+ * hardware buffer. When built with WITH_LTSSM_TRACE=1, the firmware will
+ * periodially dump the contents of this hardware buffer to an internal
+ * firmware buffer for later extraction.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL 0x12f
+#undef MC_CMD_0x12f_PRIVILEGE_CTG
+
+#define MC_CMD_0x12f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LTSSM_TRACE_POLL_IN msgrequest: Read transitions from the firmware
+ * internal buffer.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_LEN 4
+/* The maximum number of row that the caller can accept. The format of each row
+ * is defined in MC_CMD_LTSSM_TRACE_POLL_OUT.
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_IN_MAX_ROW_COUNT_LEN 4
+
+/* MC_CMD_LTSSM_TRACE_POLL_OUT msgresponse */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMIN 16
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LENMAX 248
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LEN(num) (8+8*(num))
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_OFST 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FLAGS_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_HW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_LBN 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_FW_BUFFER_OVERFLOW_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_LBN 31
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_CONTINUES_WIDTH 1
+/* The number of rows present in this response. */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_OFST 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROW_COUNT_LEN 4
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LEN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_LO_OFST 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_HI_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MINNUM 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_ROWS_MAXNUM 30
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_LBN 0
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_LTSSM_STATE_WIDTH 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_LBN 6
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_RDLH_LINK_UP_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_LBN 7
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_WAKE_N_WIDTH 1
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_LBN 8
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_PS_WIDTH 24
+/* The time of the LTSSM transition. Times are reported as fractional
+ * microseconds since MC boot (wrapping at 2^32us). The fractional part is
+ * reported in picoseconds. 0 <= TIMESTAMP_PS < 1000000 timestamp in seconds =
+ * ((TIMESTAMP_US + TIMESTAMP_PS / 1000000) / 1000000)
+ */
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_OFST 12
+#define MC_CMD_LTSSM_TRACE_POLL_OUT_TIMESTAMP_US_LEN 4
+
#endif /* _SIENA_MC_DRIVER_PCOL_H */
/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_regs_mcdi_aoe.h b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
new file mode 100644
index 00000000..6aaf212f
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_mcdi_aoe.h
@@ -0,0 +1,2914 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright 2008-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_AOE_H
+#define _SIENA_MC_DRIVER_PCOL_AOE_H
+
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_HDR_LEN 4
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+#define MC_CMD_FC_IN_CMD_LEN 4
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+#define MC_CMD_FC_IN_PHY_CMD_LEN 4
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_HEADER_LEN 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_LEN 4
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_LEN 4
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_LEN 4
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_MAC_HEADER_LEN 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_LEN 4
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+#define MC_CMD_FC_IN_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_LEN 4
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+#define MC_CMD_FC_IN_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_LEN 4
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+#define MC_CMD_FC_IN_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+#define MC_CMD_FC_IN_SFP_TYPE_LEN 4
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+#define MC_CMD_FC_IN_SFP_CAPS_LEN 4
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_LEN 4
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+#define MC_CMD_FC_IN_DDR_TEST_CMD_LEN 4
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_LEN 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_LEN 4
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_LEN 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAP_INDEX_LEN 4
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_LEN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_HEADER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_LEN 4
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_LEN 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_HEADER_LEN 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_LEN 4
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_LEN 4
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_LEN 4
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_LEN 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_LEN 4
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_MODE_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_SPEED_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_LEN 4
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_OP_LEN 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_BASE_LEN 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+#define MC_CMD_FC_IN_STARTUP_LENGTH_LEN 4
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_LEN 4
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_OP_LEN 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* MC_CMD_FC_IN_DMA_OP_LEN 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_LEN 4
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_OP_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_LEN 4
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_LEN 4
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_LEN 4
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_LEN 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_OP_LEN 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_LEN 4
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_LEN 4
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* MC_CMD_FC_IN_LOG_OP_LEN 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_LEN 4
+
+/* MC_CMD_FC_IN_CLOCK msgrequest: Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_OP_LEN 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_ID_LEN 4
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest: Retrieve the clock value of the
+ * specified clock
+ */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest: Set the clock value of the specified
+ * clock
+ */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_OP_LEN 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+/* MC_CMD_FC_IN_CLOCK_ID_LEN 4 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_OP_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_LEN 4
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_FLAGS_LEN 4
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+#define MC_CMD_FC_IN_DDR_SIZE_LEN 4
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* MC_CMD_FC_IN_DDR_OP_LEN 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* MC_CMD_FC_IN_DDR_BANK_LEN 4 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_OP_LEN 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_LEN 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_LEN 4
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_LEN 4
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_LEN 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_LEN 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_OP_LEN 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_OP_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_LEN 4
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_LEN 4
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_OP_LEN 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_LEN 4
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_LEN 4
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_LEN 4
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_LEN 4
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_LEN 4
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_LEN 4
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_LEN 4
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_LEN 4
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_LEN 4
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_LEN 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_LEN 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_LEN 4
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_LEN 4
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CMD_LEN 4 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_LEN 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_LEN 4
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_LEN 4
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_LEN 4
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_LEN 4
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_LEN 4
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_LEN 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_LEN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_LEN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_LEN 4
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_LEN 4
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_LEN 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_LEN 4
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_LEN 4
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_LEN 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_LEN 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_LEN 4
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_LEN 4
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_LEN 4
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_LEN 4
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_LEN 4
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_LEN 4
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_LEN 4
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_LEN 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_LEN 4
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_LEN 4
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_LEN 4
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_LEN 4
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_LEN 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_LEN 4
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_LEN 4
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_LEN 4
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_LEN 4
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_LEN 4
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_LEN 4
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_LEN 4
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_LEN 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_LEN 4
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_LEN 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_LEN 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_LEN 4
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_LEN 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_HDR_LEN 4
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+/* enum: Get number of internal ports */
+#define MC_CMD_AOE_OP_GET_ASIC_PORTS 0x19
+/* enum: Get FC assert information and register dump */
+#define MC_CMD_AOE_OP_GET_FC_ASSERT_INFO 0x1a
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+#define MC_CMD_AOE_IN_CMD_LEN 4
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_LEN 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_LEN 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_LEN 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+#define MC_CMD_AOE_IN_POWER_OP_LEN 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+#define MC_CMD_AOE_IN_LOAD_IMAGE_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_LEN 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_LEN 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_LEN 4
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_LEN 4
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_LEN 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_LEN 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_LEN 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_LEN 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_GET_ASIC_PORTS msgrequest */
+#define MC_CMD_AOE_IN_GET_ASIC_PORTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_GET_FC_ASSERT_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_FC_ASSERT_INFO_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_LEN 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_LEN 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_LEN 4
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_LEN 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_LEN 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_LEN 4
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_LEN 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_LEN 4
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* MC_CMD_AOE_IN_CMD_LEN 4 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_LEN 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_LBN 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_STATE_WIDTH 8
+/* enum: No crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0 */
+/* enum: New crash data available */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1 */
+/* enum: Crash data has been sent */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2 */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_LBN 0
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0 */
+/* enum: Crash due to exception. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1 */
+/* enum: Crash due to assertion. */
+/* MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2 */
+/* Failing PC value */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_OFST 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_OFST 8
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_LEN 4
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_OFST 132
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_TYPE_OFFS_LEN 4
+/* Instruction at which exception occurred */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_OFST 136
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_PC_ADDR_OFFS_LEN 4
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+#define MC_CMD_AOE_OUT_GET_FC_ASSERT_INFO_EXCEPTION_BAD_ADDR_OFFS_LEN 4
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_LEN 4
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_LEN 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_LEN 4
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_LEN 4
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_LEN 4
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_LEN 4
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_LEN 4
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_LEN 4
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+#define MC_CMD_AOE_OUT_INFO_FLAGS_LEN 4
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_LEN 4
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_LEN 4
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_LEN 4
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_LEN 4
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_LEN 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_LEN 4
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_LEN 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+#define MC_CMD_AOE_OUT_DDR_TYPE_LEN 4
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_LEN 4
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_GET_ASIC_PORTS msgresponse */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_LEN 4
+/* get the number of internal ports */
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_OFST 0
+#define MC_CMD_AOE_OUT_GET_ASIC_PORTS_COUNT_PORTS_LEN 4
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_LEN 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_LEN 4
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_AOE_H */
+/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
index c0dcb752..4fd73bab 100644
--- a/drivers/net/sfc/base/efx_rx.c
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -107,7 +107,7 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -151,7 +151,7 @@ static const efx_rx_ops_t __efx_rx_siena_ops = {
};
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_init, /* erxo_init */
ef10_rx_fini, /* erxo_fini */
@@ -178,7 +178,7 @@ static const efx_rx_ops_t __efx_rx_ef10_ops = {
ef10_rx_qcreate, /* erxo_qcreate */
ef10_rx_qdestroy, /* erxo_qdestroy */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
@@ -220,6 +220,12 @@ efx_rx_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -288,6 +294,94 @@ fail1:
#endif /* EFSYS_OPT_RX_SCATTER */
#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_hash_flags_get(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t hash_alg,
+ __inout_ecount(EFX_RX_HASH_NFLAGS) unsigned int *flags,
+ __out unsigned int *nflagsp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ boolean_t l4;
+ boolean_t additional_modes;
+ unsigned int *entryp = flags;
+ efx_rc_t rc;
+
+ if (flags == NULL || nflagsp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ l4 = encp->enc_rx_scale_l4_hash_supported;
+ additional_modes = encp->enc_rx_scale_additional_modes_supported;
+
+#define LIST_FLAGS(_entryp, _class, _l4_hashing, _additional_modes) \
+ do { \
+ if (_l4_hashing) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 4TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_DST); \
+ *(_entryp++) = \
+ EFX_RX_HASH(_class, 2TUPLE_SRC); \
+ } \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, 2TUPLE); \
+ \
+ if (_additional_modes) { \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_DST); \
+ *(_entryp++) = EFX_RX_HASH(_class, 1TUPLE_SRC); \
+ } \
+ \
+ *(_entryp++) = EFX_RX_HASH(_class, DISABLE); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+ switch (hash_alg) {
+ case EFX_RX_HASHALG_PACKED_STREAM:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+ /* FALLTHRU */
+ case EFX_RX_HASHALG_TOEPLITZ:
+ if ((encp->enc_rx_scale_hash_alg_mask & (1U << hash_alg)) == 0)
+ break;
+
+ LIST_FLAGS(entryp, IPV4_TCP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_TCP, l4, additional_modes);
+
+ if (additional_modes) {
+ LIST_FLAGS(entryp, IPV4_UDP, l4, additional_modes);
+ LIST_FLAGS(entryp, IPV6_UDP, l4, additional_modes);
+ }
+
+ LIST_FLAGS(entryp, IPV4, B_FALSE, additional_modes);
+ LIST_FLAGS(entryp, IPV6, B_FALSE, additional_modes);
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+#undef LIST_FLAGS
+
+ *nflagsp = (unsigned int)(entryp - flags);
+ EFSYS_ASSERT3U(*nflagsp, <=, EFX_RX_HASH_NFLAGS);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
__checkReturn efx_rc_t
efx_rx_hash_default_support_get(
__in efx_nic_t *enp,
@@ -419,19 +513,82 @@ efx_rx_scale_mode_set(
__in boolean_t insert)
{
const efx_rx_ops_t *erxop = enp->en_erxop;
+ unsigned int type_flags[EFX_RX_HASH_NFLAGS];
+ unsigned int type_nflags;
+ efx_rx_hash_type_t type_check;
+ unsigned int i;
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ /*
+ * Legacy flags and modern bits cannot be
+ * used at the same time in the hash type.
+ */
+ if ((type & EFX_RX_HASH_LEGACY_MASK) &&
+ (type & ~EFX_RX_HASH_LEGACY_MASK)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Translate legacy flags to the new representation
+ * so that chip-specific handlers will consider the
+ * new flags only.
+ */
+ if (type & EFX_RX_HASH_IPV4) {
+ type |= EFX_RX_HASH(IPV4, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV4_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV4)
+ type |= EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+
+ if (type & EFX_RX_HASH_IPV6) {
+ type |= EFX_RX_HASH(IPV6, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_TCP, 2TUPLE);
+ type |= EFX_RX_HASH(IPV6_UDP, 2TUPLE);
+ }
+
+ if (type & EFX_RX_HASH_TCPIPV6)
+ type |= EFX_RX_HASH(IPV6_TCP, 4TUPLE);
+
+ type &= ~EFX_RX_HASH_LEGACY_MASK;
+ type_check = type;
+
+ /*
+ * Get the list of supported hash flags and sanitise the input.
+ */
+ rc = efx_rx_scale_hash_flags_get(enp, alg, type_flags, &type_nflags);
+ if (rc != 0)
+ goto fail2;
+
+ for (i = 0; i < type_nflags; ++i) {
+ if ((type_check & type_flags[i]) == type_flags[i])
+ type_check &= ~(type_flags[i]);
+ }
+
+ if (type_check != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
if (erxop->erxo_scale_mode_set != NULL) {
if ((rc = erxop->erxo_scale_mode_set(enp, rss_context, alg,
type, insert)) != 0)
- goto fail1;
+ goto fail4;
}
return (0);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -594,7 +751,7 @@ efx_rx_qcreate_internal(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
@@ -655,8 +812,8 @@ efx_rx_qcreate(
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
- return efx_rx_qcreate_internal(enp, index, label, type, 0, esmp, ndescs,
- id, flags, eep, erpp);
+ return efx_rx_qcreate_internal(enp, index, label, type, NULL,
+ esmp, ndescs, id, flags, eep, erpp);
}
#if EFSYS_OPT_RX_PACKED_STREAM
@@ -672,13 +829,71 @@ efx_rx_qcreate_packed_stream(
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
+ efx_rxq_type_data_t type_data;
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_packed_stream.eps_buf_size = ps_buf_size;
+
return efx_rx_qcreate_internal(enp, index, label,
- EFX_RXQ_TYPE_PACKED_STREAM, ps_buf_size, esmp, ndescs,
+ EFX_RXQ_TYPE_PACKED_STREAM, &type_data, esmp, ndescs,
0 /* id unused on EF10 */, EFX_RXQ_FLAG_NONE, eep, erpp);
}
#endif
+#if EFSYS_OPT_RX_ES_SUPER_BUFFER
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate_es_super_buffer(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in uint32_t n_bufs_per_desc,
+ __in uint32_t max_dma_len,
+ __in uint32_t buf_stride,
+ __in uint32_t hol_block_timeout,
+ __in efsys_mem_t *esmp,
+ __in size_t ndescs,
+ __in unsigned int flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ efx_rc_t rc;
+ efx_rxq_type_data_t type_data;
+
+ if (hol_block_timeout > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(&type_data, 0, sizeof(type_data));
+
+ type_data.ertd_es_super_buffer.eessb_bufs_per_desc = n_bufs_per_desc;
+ type_data.ertd_es_super_buffer.eessb_max_dma_len = max_dma_len;
+ type_data.ertd_es_super_buffer.eessb_buf_stride = buf_stride;
+ type_data.ertd_es_super_buffer.eessb_hol_block_timeout =
+ hol_block_timeout;
+
+ rc = efx_rx_qcreate_internal(enp, index, label,
+ EFX_RXQ_TYPE_ES_SUPER_BUFFER, &type_data, esmp, ndescs,
+ 0 /* id unused on EF10 */, flags, eep, erpp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif
+
+
void
efx_rx_qdestroy(
__in efx_rxq_t *erp)
@@ -875,6 +1090,10 @@ siena_rx_scale_mode_set(
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
+ efx_rx_hash_type_t type_ipv4 = EFX_RX_HASH(IPV4, 2TUPLE);
+ efx_rx_hash_type_t type_ipv4_tcp = EFX_RX_HASH(IPV4_TCP, 4TUPLE);
+ efx_rx_hash_type_t type_ipv6 = EFX_RX_HASH(IPV6, 2TUPLE);
+ efx_rx_hash_type_t type_ipv6_tcp = EFX_RX_HASH(IPV6_TCP, 4TUPLE);
efx_rc_t rc;
if (rss_context != EFX_RSS_CONTEXT_DEFAULT) {
@@ -889,12 +1108,12 @@ siena_rx_scale_mode_set(
case EFX_RX_HASHALG_TOEPLITZ:
EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
- type & EFX_RX_HASH_IPV4,
- type & EFX_RX_HASH_TCPIPV4);
+ (type & type_ipv4) == type_ipv4,
+ (type & type_ipv4_tcp) == type_ipv4_tcp);
EFX_RX_TOEPLITZ_IPV6_HASH(enp,
- type & EFX_RX_HASH_IPV6,
- type & EFX_RX_HASH_TCPIPV6,
+ (type & type_ipv6) == type_ipv6,
+ (type & type_ipv6_tcp) == type_ipv6_tcp,
rc);
if (rc != 0)
goto fail2;
@@ -1320,7 +1539,7 @@ siena_rx_qcreate(
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
- __in uint32_t type_data,
+ __in const efx_rxq_type_data_t *type_data,
__in efsys_mem_t *esmp,
__in size_t ndescs,
__in uint32_t id,
diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c
index 1f0ba0a9..7851ff13 100644
--- a/drivers/net/sfc/base/efx_sram.c
+++ b/drivers/net/sfc/base/efx_sram.c
@@ -25,9 +25,10 @@ efx_sram_buf_tbl_set(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD) {
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
/*
* FIXME: the efx_sram_buf_tbl_*() functionality needs to be
* pulled inside the Falcon/Siena queue create/destroy code,
@@ -39,7 +40,7 @@ efx_sram_buf_tbl_set(
return (0);
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
if (stop >= EFX_BUF_TBL_SIZE) {
rc = EFBIG;
@@ -147,9 +148,10 @@ efx_sram_buf_tbl_clear(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD) {
+ enp->en_family == EFX_FAMILY_MEDFORD ||
+ enp->en_family == EFX_FAMILY_MEDFORD2) {
/*
* FIXME: the efx_sram_buf_tbl_*() functionality needs to be
* pulled inside the Falcon/Siena queue create/destroy code,
@@ -161,7 +163,7 @@ efx_sram_buf_tbl_clear(
return;
}
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
diff --git a/drivers/net/sfc/base/efx_tunnel.c b/drivers/net/sfc/base/efx_tunnel.c
index 25fa976f..399fd540 100644
--- a/drivers/net/sfc/base/efx_tunnel.c
+++ b/drivers/net/sfc/base/efx_tunnel.c
@@ -17,20 +17,20 @@ static const efx_tunnel_ops_t __efx_tunnel_dummy_ops = {
};
#endif /* EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON */
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn boolean_t
-medford_udp_encap_supported(
+ef10_udp_encap_supported(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-medford_tunnel_reconfigure(
+ef10_tunnel_reconfigure(
__in efx_nic_t *enp);
-static const efx_tunnel_ops_t __efx_tunnel_medford_ops = {
- medford_udp_encap_supported, /* eto_udp_encap_supported */
- medford_tunnel_reconfigure, /* eto_reconfigure */
+static const efx_tunnel_ops_t __efx_tunnel_ef10_ops = {
+ ef10_udp_encap_supported, /* eto_udp_encap_supported */
+ ef10_tunnel_reconfigure, /* eto_reconfigure */
};
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
static __checkReturn efx_rc_t
efx_mcdi_set_tunnel_encap_udp_ports(
@@ -161,10 +161,16 @@ efx_tunnel_init(
#if EFSYS_OPT_MEDFORD
case EFX_FAMILY_MEDFORD:
- etop = &__efx_tunnel_medford_ops;
+ etop = &__efx_tunnel_ef10_ops;
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etop = &__efx_tunnel_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -394,9 +400,9 @@ fail1:
return (rc);
}
-#if EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static __checkReturn boolean_t
-medford_udp_encap_supported(
+ef10_udp_encap_supported(
__in efx_nic_t *enp)
{
const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
@@ -410,7 +416,7 @@ medford_udp_encap_supported(
}
static __checkReturn efx_rc_t
-medford_tunnel_reconfigure(
+ef10_tunnel_reconfigure(
__in efx_nic_t *enp)
{
efx_tunnel_cfg_t *etcp = &enp->en_tunnel_cfg;
@@ -423,7 +429,7 @@ medford_tunnel_reconfigure(
memcpy(&etc, etcp, sizeof (etc));
EFSYS_UNLOCK(enp->en_eslp, state);
- if (medford_udp_encap_supported(enp) == B_FALSE) {
+ if (ef10_udp_encap_supported(enp) == B_FALSE) {
/*
* It is OK to apply empty UDP tunnel ports when UDP
* tunnel encapsulations are not supported - just nothing
@@ -458,6 +464,6 @@ fail1:
return (rc);
}
-#endif /* EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
#endif /* EFSYS_OPT_TUNNEL */
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
index 4e02c869..da37580a 100644
--- a/drivers/net/sfc/base/efx_tx.c
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -117,6 +117,7 @@ static const efx_tx_ops_t __efx_tx_siena_ops = {
NULL, /* etxo_qdesc_tso_create */
NULL, /* etxo_qdesc_tso2_create */
NULL, /* etxo_qdesc_vlantci_create */
+ NULL, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
siena_tx_qstats_update, /* etxo_qstats_update */
#endif
@@ -143,6 +144,7 @@ static const efx_tx_ops_t __efx_tx_hunt_ops = {
ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
ef10_tx_qstats_update, /* etxo_qstats_update */
#endif
@@ -169,12 +171,41 @@ static const efx_tx_ops_t __efx_tx_medford_ops = {
NULL, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
#if EFSYS_OPT_QSTATS
ef10_tx_qstats_update, /* etxo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+static const efx_tx_ops_t __efx_tx_medford2_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+ ef10_tx_qdesc_checksum_create, /* etxo_qdesc_checksum_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD2 */
+
+
__checkReturn efx_rc_t
efx_tx_init(
__in efx_nic_t *enp)
@@ -214,6 +245,12 @@ efx_tx_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ etxop = &__efx_tx_medford2_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
@@ -588,6 +625,7 @@ efx_tx_qdesc_tso_create(
efx_tx_qdesc_tso2_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
+ __in uint16_t outer_ipv4_id,
__in uint32_t tcp_seq,
__in uint16_t mss,
__out_ecount(count) efx_desc_t *edp,
@@ -599,7 +637,8 @@ efx_tx_qdesc_tso2_create(
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
- etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, outer_ipv4_id,
+ tcp_seq, mss, edp, count);
}
void
@@ -617,6 +656,21 @@ efx_tx_qdesc_vlantci_create(
etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
}
+ void
+efx_tx_qdesc_checksum_create(
+ __in efx_txq_t *etp,
+ __in uint16_t flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_checksum_create != NULL);
+
+ etxop->etxo_qdesc_checksum_create(etp, flags, edp);
+}
+
#if EFSYS_OPT_QSTATS
void
diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h
index 0581f67f..65168ab7 100644
--- a/drivers/net/sfc/base/efx_types.h
+++ b/drivers/net/sfc/base/efx_types.h
@@ -329,6 +329,16 @@ extern int fix_lint;
#endif
/*
+ * Saturation arithmetic subtract with minimum equal to zero.
+ *
+ * Use saturating arithmetic to ensure a non-negative result. This
+ * avoids undefined behaviour (and compiler warnings) when used as a
+ * shift count.
+ */
+#define EFX_SSUB(_val, _sub) \
+ ((_val) > (_sub) ? ((_val) - (_sub)) : 0)
+
+/*
* Extract bit field portion [low,high) from the native-endian element
* which contains bits [min,max).
*
@@ -347,8 +357,8 @@ extern int fix_lint;
((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
0U : \
((_low > _min) ? \
- ((_element) >> (_low - _min)) : \
- ((_element) << (_min - _low))))
+ ((_element) >> EFX_SSUB(_low, _min)) : \
+ ((_element) << EFX_SSUB(_min, _low))))
/*
* Extract bit field portion [low,high) from the 64-bit little-endian
@@ -537,29 +547,29 @@ extern int fix_lint;
(((_low > _max) || (_high < _min)) ? \
0U : \
((_low > _min) ? \
- (((uint64_t)(_value)) << (_low - _min)) : \
- (((uint64_t)(_value)) >> (_min - _low))))
+ (((uint64_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint64_t)(_value)) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
((_low > _min) ? \
- (((uint32_t)(_value)) << (_low - _min)) : \
- (((uint32_t)(_value)) >> (_min - _low))))
+ (((uint32_t)(_value)) << EFX_SSUB(_low, _min)) :\
+ (((uint32_t)(_value)) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
(uint16_t)((_low > _min) ? \
- ((_value) << (_low - _min)) : \
- ((_value) >> (_min - _low))))
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
(((_low > _max) || (_high < _min)) ? \
0U : \
(uint8_t)((_low > _min) ? \
- ((_value) << (_low - _min)) : \
- ((_value) >> (_min - _low))))
+ ((_value) << EFX_SSUB(_low, _min)) : \
+ ((_value) >> EFX_SSUB(_min, _low))))
/*
* Construct bit field portion
@@ -1288,22 +1298,22 @@ extern int fix_lint;
#define EFX_SHIFT64(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
- ((uint64_t)1 << ((_bit) - (_base))) : \
+ ((uint64_t)1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SHIFT32(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
- ((uint32_t)1 << ((_bit) - (_base))) : \
+ ((uint32_t)1 << EFX_SSUB((_bit),(_base))) : \
0U)
#define EFX_SHIFT16(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
- (uint16_t)(1 << ((_bit) - (_base))) : \
+ (uint16_t)(1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SHIFT8(_bit, _base) \
(((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
- (uint8_t)(1 << ((_bit) - (_base))) : \
+ (uint8_t)(1 << EFX_SSUB((_bit), (_base))) : \
0U)
#define EFX_SET_OWORD_BIT64(_oword, _bit) \
diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c
index 7b8138f3..6d783d74 100644
--- a/drivers/net/sfc/base/efx_vpd.c
+++ b/drivers/net/sfc/base/efx_vpd.c
@@ -44,7 +44,7 @@ static const efx_vpd_ops_t __efx_vpd_siena_ops = {
#endif /* EFSYS_OPT_SIENA */
-#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2
static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
ef10_vpd_init, /* evpdo_init */
@@ -59,7 +59,7 @@ static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
ef10_vpd_fini, /* evpdo_fini */
};
-#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD || EFSYS_OPT_MEDFORD2 */
__checkReturn efx_rc_t
efx_vpd_init(
@@ -91,6 +91,12 @@ efx_vpd_init(
break;
#endif /* EFSYS_OPT_MEDFORD */
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD2 */
+
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
index d03cc138..16ea81d2 100644
--- a/drivers/net/sfc/base/hunt_nic.c
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -36,7 +36,7 @@ hunt_nic_get_required_pcie_bandwidth(
goto out;
}
- if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) {
/*
* This needs the full PCIe bandwidth (and could use
* more) - roughly 64 Gbit/s for 8 lanes of Gen3.
@@ -45,9 +45,9 @@ hunt_nic_get_required_pcie_bandwidth(
EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
goto fail1;
} else {
- if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ if (port_modes & (1U << TLV_PORT_MODE_40G)) {
max_port_mode = TLV_PORT_MODE_40G;
- } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) {
max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
} else {
/* Assume two 10G ports */
@@ -76,90 +76,13 @@ fail1:
hunt_board_cfg(
__in efx_nic_t *enp)
{
- efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t mac_addr[6] = { 0 };
- uint32_t board_type = 0;
- ef10_link_state_t els;
efx_port_t *epp = &(enp->en_port);
- uint32_t port;
- uint32_t pf;
- uint32_t vf;
- uint32_t mask;
uint32_t flags;
uint32_t sysclk, dpcpu_clk;
- uint32_t base, nvec;
uint32_t bandwidth;
efx_rc_t rc;
- if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
- goto fail1;
-
- /*
- * NOTE: The MCDI protocol numbers ports from zero.
- * The common code MCDI interface numbers ports from one.
- */
- emip->emi_port = port + 1;
-
- if ((rc = ef10_external_port_mapping(enp, port,
- &encp->enc_external_port)) != 0)
- goto fail2;
-
- /*
- * Get PCIe function number from firmware (used for
- * per-function privilege and dynamic config info).
- * - PCIe PF: pf = PF number, vf = 0xffff.
- * - PCIe VF: pf = parent PF, vf = VF number.
- */
- if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
- goto fail3;
-
- encp->enc_pf = pf;
- encp->enc_vf = vf;
-
- /* MAC address for this function */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
- if ((rc == 0) && (mac_addr[0] & 0x02)) {
- /*
- * If the static config does not include a global MAC
- * address pool then the board may return a locally
- * administered MAC address (this should only happen on
- * incorrectly programmed boards).
- */
- rc = EINVAL;
- }
- } else {
- rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
- }
- if (rc != 0)
- goto fail4;
-
- EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
-
- /* Board configuration */
- rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
- if (rc != 0) {
- /* Unprivileged functions may not be able to read board cfg */
- if (rc == EACCES)
- board_type = 0;
- else
- goto fail5;
- }
-
- encp->enc_board_type = board_type;
- encp->enc_clk_mult = 1; /* not used for Huntington */
-
- /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
- if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
- goto fail6;
-
- /* Obtain the default PHY advertised capabilities */
- if ((rc = ef10_phy_get_link(enp, &els)) != 0)
- goto fail7;
- epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
-
/*
* Enable firmware workarounds for hardware errata.
* Expected responses are:
@@ -187,7 +110,7 @@ hunt_board_cfg(
else if ((rc == ENOTSUP) || (rc == ENOENT))
encp->enc_bug35388_workaround = B_FALSE;
else
- goto fail8;
+ goto fail1;
/*
* If the bug41750 workaround is enabled, then do not test interrupts,
@@ -206,7 +129,7 @@ hunt_board_cfg(
} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
encp->enc_bug41750_workaround = B_FALSE;
} else {
- goto fail9;
+ goto fail2;
}
if (EFX_PCI_FUNCTION_IS_VF(encp)) {
/* Interrupt testing does not work for VFs. See bug50084. */
@@ -244,12 +167,12 @@ hunt_board_cfg(
} else if ((rc == ENOTSUP) || (rc == ENOENT)) {
encp->enc_bug26807_workaround = B_FALSE;
} else {
- goto fail10;
+ goto fail3;
}
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
- goto fail11;
+ goto fail4;
/*
* The Huntington timer quantum is 1536 sysclk cycles, documented for
@@ -266,80 +189,23 @@ hunt_board_cfg(
encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
- /* Check capabilities of running datapath firmware */
- if ((rc = ef10_get_datapath_caps(enp)) != 0)
- goto fail12;
-
/* Alignment for receive packet DMA buffers */
encp->enc_rx_buf_align_start = 1;
encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
- /* Alignment for WPTR updates */
- encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
-
- /*
- * Maximum number of exclusive RSS contexts which can be allocated. The
- * hardware supports 64, but 6 are reserved for shared contexts. They
- * are a global resource so not all may be available.
- */
- encp->enc_rx_scale_max_exclusive_contexts = 58;
-
- encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
- /* No boundary crossing limits */
- encp->enc_tx_dma_desc_boundary = 0;
-
- /*
- * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
- * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
- * resources (allocated to this PCIe function), which is zero until
- * after we have allocated VIs.
- */
- encp->enc_evq_limit = 1024;
- encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
- encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
-
/*
* The workaround for bug35388 uses the top bit of transmit queue
* descriptor writes, preventing the use of 4096 descriptor TXQs.
*/
encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
- encp->enc_buftbl_limit = 0xFFFFFFFF;
-
+ EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
- /*
- * Get the current privilege mask. Note that this may be modified
- * dynamically, so this value is informational only. DO NOT use
- * the privilege mask to check for sufficient privileges, as that
- * can result in time-of-check/time-of-use bugs.
- */
- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
- goto fail13;
- encp->enc_privilege_mask = mask;
-
- /* Get interrupt vector limits */
- if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(encp))
- goto fail14;
-
- /* Ignore error (cannot query vector limits from a VF). */
- base = 0;
- nvec = 1024;
- }
- encp->enc_intr_vec_base = base;
- encp->enc_intr_limit = nvec;
-
- /*
- * Maximum number of bytes into the frame the TCP header can start for
- * firmware assisted TSO to work.
- */
- encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
-
if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
- goto fail15;
+ goto fail5;
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
/* All Huntington devices have a PCIe Gen3, 8 lane connector */
@@ -347,26 +213,6 @@ hunt_board_cfg(
return (0);
-fail15:
- EFSYS_PROBE(fail15);
-fail14:
- EFSYS_PROBE(fail14);
-fail13:
- EFSYS_PROBE(fail13);
-fail12:
- EFSYS_PROBE(fail12);
-fail11:
- EFSYS_PROBE(fail11);
-fail10:
- EFSYS_PROBE(fail10);
-fail9:
- EFSYS_PROBE(fail9);
-fail8:
- EFSYS_PROBE(fail8);
-fail7:
- EFSYS_PROBE(fail7);
-fail6:
- EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
index e4de0dab..940bd026 100644
--- a/drivers/net/sfc/base/mcdi_mon.c
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -135,6 +135,10 @@ static const struct mcdi_sensor_map_s {
STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
STAT(Px, I1V8), /* 0x51 IN_I1V8 */
STAT(Px, I2V5), /* 0x52 IN_I2V5 */
+ STAT(Px, I3V3), /* 0x53 IN_I3V3 */
+ STAT(Px, I12V0), /* 0x54 IN_I12V0 */
+ STAT(Px, 1_3V), /* 0x55 IN_1V3 */
+ STAT(Px, I1V3), /* 0x56 IN_I1V3 */
};
#define MCDI_STATIC_SENSOR_ASSERT(_field) \
@@ -477,6 +481,11 @@ mcdi_mon_cfg_build(
encp->enc_mon_type = EFX_MON_SFC92X0;
break;
#endif
+#if EFSYS_OPT_MEDFORD2
+ case EFX_FAMILY_MEDFORD2:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
default:
rc = EINVAL;
goto fail1;
diff --git a/drivers/net/sfc/base/medford2_impl.h b/drivers/net/sfc/base/medford2_impl.h
new file mode 100644
index 00000000..6259a700
--- /dev/null
+++ b/drivers/net/sfc/base/medford2_impl.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#ifndef _SYS_MEDFORD2_IMPL_H
+#define _SYS_MEDFORD2_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD2_PIOBUF_NBUFS (16)
+#define MEDFORD2_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD2_MIN_PIO_ALLOC_SIZE (MEDFORD2_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD2_IMPL_H */
diff --git a/drivers/net/sfc/base/medford2_nic.c b/drivers/net/sfc/base/medford2_nic.c
new file mode 100644
index 00000000..7f5ad175
--- /dev/null
+++ b/drivers/net/sfc/base/medford2_nic.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2015-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD2
+
+static __checkReturn efx_rc_t
+medford2_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /* FIXME: support new Medford2 dynamic port modes */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford2_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs on Medford2.
+ * See bug50084 and bug71432 comment 21.
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford2 */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail1;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail2;
+
+ /*
+ * The Medford2 timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail3;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ EFX_STATIC_ASSERT(MEDFORD2_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
+ encp->enc_piobuf_limit = MEDFORD2_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD2_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD2_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Medford2 stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford2_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail4;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD2 */
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index 1365e9e3..6dc895f5 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -11,64 +11,6 @@
#if EFSYS_OPT_MEDFORD
static __checkReturn efx_rc_t
-efx_mcdi_get_rxdp_config(
- __in efx_nic_t *enp,
- __out uint32_t *end_paddingp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
- MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
- uint32_t end_padding;
- efx_rc_t rc;
-
- memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
- GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
- /* RX DMA end padding is disabled */
- end_padding = 0;
- } else {
- switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
- GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
- end_padding = 64;
- break;
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
- end_padding = 128;
- break;
- case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
- end_padding = 256;
- break;
- default:
- rc = ENOTSUP;
- goto fail2;
- }
- }
-
- *end_paddingp = end_padding;
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
medford_nic_get_required_pcie_bandwidth(
__in efx_nic_t *enp,
__out uint32_t *bandwidth_mbpsp)
@@ -104,104 +46,13 @@ fail1:
medford_board_cfg(
__in efx_nic_t *enp)
{
- efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t mac_addr[6] = { 0 };
- uint32_t board_type = 0;
- ef10_link_state_t els;
- efx_port_t *epp = &(enp->en_port);
- uint32_t port;
- uint32_t pf;
- uint32_t vf;
- uint32_t mask;
uint32_t sysclk, dpcpu_clk;
- uint32_t base, nvec;
uint32_t end_padding;
uint32_t bandwidth;
efx_rc_t rc;
/*
- * FIXME: Likely to be incomplete and incorrect.
- * Parts of this should be shared with Huntington.
- */
-
- if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
- goto fail1;
-
- /*
- * NOTE: The MCDI protocol numbers ports from zero.
- * The common code MCDI interface numbers ports from one.
- */
- emip->emi_port = port + 1;
-
- if ((rc = ef10_external_port_mapping(enp, port,
- &encp->enc_external_port)) != 0)
- goto fail2;
-
- /*
- * Get PCIe function number from firmware (used for
- * per-function privilege and dynamic config info).
- * - PCIe PF: pf = PF number, vf = 0xffff.
- * - PCIe VF: pf = parent PF, vf = VF number.
- */
- if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
- goto fail3;
-
- encp->enc_pf = pf;
- encp->enc_vf = vf;
-
- /* MAC address for this function */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
-#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
- /*
- * Disable static config checking for Medford NICs, ONLY
- * for manufacturing test and setup at the factory, to
- * allow the static config to be installed.
- */
-#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
- if ((rc == 0) && (mac_addr[0] & 0x02)) {
- /*
- * If the static config does not include a global MAC
- * address pool then the board may return a locally
- * administered MAC address (this should only happen on
- * incorrectly programmed boards).
- */
- rc = EINVAL;
- }
-#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
- } else {
- rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
- }
- if (rc != 0)
- goto fail4;
-
- EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
-
- /* Board configuration */
- rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
- if (rc != 0) {
- /* Unprivileged functions may not be able to read board cfg */
- if (rc == EACCES)
- board_type = 0;
- else
- goto fail5;
- }
-
- encp->enc_board_type = board_type;
- encp->enc_clk_mult = 1; /* not used for Medford */
-
- /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
- if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
- goto fail6;
-
- /* Obtain the default PHY advertised capabilities */
- if ((rc = ef10_phy_get_link(enp, &els)) != 0)
- goto fail7;
- epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
- epp->ep_adv_cap_mask = els.els_adv_cap_mask;
-
- /*
* Enable firmware workarounds for hardware errata.
* Expected responses are:
* - 0 (zero):
@@ -220,8 +71,8 @@ medford_board_cfg(
if (EFX_PCI_FUNCTION_IS_VF(encp)) {
/*
- * Interrupt testing does not work for VFs. See bug50084.
- * FIXME: Does this still apply to Medford?
+ * Interrupt testing does not work for VFs. See bug50084 and
+ * bug71432 comment 21.
*/
encp->enc_bug41750_workaround = B_TRUE;
}
@@ -241,11 +92,11 @@ medford_board_cfg(
else if ((rc == ENOTSUP) || (rc == ENOENT))
encp->enc_bug61265_workaround = B_FALSE;
else
- goto fail8;
+ goto fail1;
/* Get clock frequencies (in MHz). */
if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
- goto fail9;
+ goto fail2;
/*
* The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
@@ -255,47 +106,19 @@ medford_board_cfg(
encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
- /* Check capabilities of running datapath firmware */
- if ((rc = ef10_get_datapath_caps(enp)) != 0)
- goto fail10;
-
/* Alignment for receive packet DMA buffers */
encp->enc_rx_buf_align_start = 1;
/* Get the RX DMA end padding alignment configuration */
if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
if (rc != EACCES)
- goto fail11;
+ goto fail3;
/* Assume largest tail padding size supported by hardware */
end_padding = 256;
}
encp->enc_rx_buf_align_end = end_padding;
- /* Alignment for WPTR updates */
- encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
-
- /*
- * Maximum number of exclusive RSS contexts which can be allocated. The
- * hardware supports 64, but 6 are reserved for shared contexts. They
- * are a global resource so not all may be available.
- */
- encp->enc_rx_scale_max_exclusive_contexts = 58;
-
- encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
- /* No boundary crossing limits */
- encp->enc_tx_dma_desc_boundary = 0;
-
- /*
- * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
- * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
- * resources (allocated to this PCIe function), which is zero until
- * after we have allocated VIs.
- */
- encp->enc_evq_limit = 1024;
- encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
- encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
-
/*
* The maximum supported transmit queue size is 2048. TXQs with 4096
* descriptors are not supported as the top bit is used for vfifo
@@ -303,41 +126,12 @@ medford_board_cfg(
*/
encp->enc_txq_max_ndescs = 2048;
- encp->enc_buftbl_limit = 0xFFFFFFFF;
-
+ EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
/*
- * Get the current privilege mask. Note that this may be modified
- * dynamically, so this value is informational only. DO NOT use
- * the privilege mask to check for sufficient privileges, as that
- * can result in time-of-check/time-of-use bugs.
- */
- if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
- goto fail12;
- encp->enc_privilege_mask = mask;
-
- /* Get interrupt vector limits */
- if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(encp))
- goto fail13;
-
- /* Ignore error (cannot query vector limits from a VF). */
- base = 0;
- nvec = 1024;
- }
- encp->enc_intr_vec_base = base;
- encp->enc_intr_limit = nvec;
-
- /*
- * Maximum number of bytes into the frame the TCP header can start for
- * firmware assisted TSO to work.
- */
- encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
-
- /*
* Medford stores a single global copy of VPD, not per-PF as on
* Huntington.
*/
@@ -345,32 +139,12 @@ medford_board_cfg(
rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
if (rc != 0)
- goto fail14;
+ goto fail4;
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
return (0);
-fail14:
- EFSYS_PROBE(fail14);
-fail13:
- EFSYS_PROBE(fail13);
-fail12:
- EFSYS_PROBE(fail12);
-fail11:
- EFSYS_PROBE(fail11);
-fail10:
- EFSYS_PROBE(fail10);
-fail9:
- EFSYS_PROBE(fail9);
-fail8:
- EFSYS_PROBE(fail8);
-fail7:
- EFSYS_PROBE(fail7);
-fail6:
- EFSYS_PROBE(fail6);
-fail5:
- EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
diff --git a/drivers/net/sfc/base/meson.build b/drivers/net/sfc/base/meson.build
index f1e49735..da2bf44d 100644
--- a/drivers/net/sfc/base/meson.build
+++ b/drivers/net/sfc/base/meson.build
@@ -34,6 +34,7 @@ sources = [
'siena_vpd.c',
'ef10_ev.c',
'ef10_filter.c',
+ 'ef10_image.c',
'ef10_intr.c',
'ef10_mac.c',
'ef10_mcdi.c',
@@ -44,7 +45,8 @@ sources = [
'ef10_tx.c',
'ef10_vpd.c',
'hunt_nic.c',
- 'medford_nic.c'
+ 'medford_nic.c',
+ 'medford2_nic.c'
]
extra_flags = [
diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h
index 91a9fe05..74bb9496 100644
--- a/drivers/net/sfc/base/siena_flash.h
+++ b/drivers/net/sfc/base/siena_flash.h
@@ -103,7 +103,14 @@ typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
/* the key, or 0xffff if unsigned. (Otherwise set to 0) */
efx_byte_t mumfw_subtype; /* MUM & SUC images: subtype. (Otherwise set to 0) */
efx_byte_t reserved_b[3]; /* (set to 0) */
- efx_dword_t reserved_c[6]; /* (set to 0) */
+ efx_dword_t security_level; /* This number increases every time a serious security flaw */
+ /* is fixed. A secure NIC may not downgrade to any image */
+ /* with a lower security level than the current image. */
+ /* Note: The number in this header should only be used for */
+ /* determining the level of new images, not to determine */
+ /* the level of the current image as this header is not */
+ /* protected by a CMAC. */
+ efx_dword_t reserved_c[5]; /* (set to 0) */
} siena_mc_boot_hdr_t;
#define SIENA_MC_BOOT_HDR_PADDING \
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
index 904e03ed..f8857cdd 100644
--- a/drivers/net/sfc/base/siena_mac.c
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -245,16 +245,28 @@ siena_mac_stats_update(
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__inout_opt uint32_t *generationp)
{
- efx_qword_t value;
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_qword_t generation_start;
efx_qword_t generation_end;
+ efx_qword_t value;
+ efx_rc_t rc;
- _NOTE(ARGUNUSED(enp))
+ if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) {
+ /* MAC stats count too small */
+ rc = ENOSPC;
+ goto fail1;
+ }
+ if (EFSYS_MEM_SIZE(esmp) <
+ (encp->enc_mac_stats_nstats * sizeof (efx_qword_t))) {
+ /* DMA buffer too small */
+ rc = ENOSPC;
+ goto fail2;
+ }
/* Read END first so we don't race with the MC */
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
- SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
- &generation_end);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
+ SIENA_MAC_STAT_READ(esmp, (encp->enc_mac_stats_nstats - 1),
+ &generation_end);
EFSYS_MEM_READ_BARRIER();
/* TX */
@@ -422,7 +434,7 @@ siena_mac_stats_update(
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
- EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFSYS_MEM_SIZE(esmp));
EFSYS_MEM_READ_BARRIER();
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
&generation_start);
@@ -437,6 +449,13 @@ siena_mac_stats_update(
*generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
#endif /* EFSYS_OPT_MAC_STATS */
diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c
index ef844591..d727c187 100644
--- a/drivers/net/sfc/base/siena_mcdi.c
+++ b/drivers/net/sfc/base/siena_mcdi.c
@@ -124,17 +124,21 @@ siena_mcdi_read_response(
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
unsigned int pdur;
- unsigned int pos;
+ unsigned int pos = 0;
efx_dword_t data;
+ size_t remaining = length;
EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
pdur = SIENA_MCDI_PDU(emip);
- for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ while (remaining > 0) {
+ size_t chunk = MIN(remaining, sizeof (data));
+
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
pdur + ((offset + pos) >> 2), &data, B_FALSE);
- memcpy((uint8_t *)bufferp + pos, &data,
- MIN(sizeof (data), length - pos));
+ memcpy((uint8_t *)bufferp + pos, &data, chunk);
+ pos += chunk;
+ remaining -= chunk;
}
}
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index f223c9be..31eef80b 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -66,6 +66,10 @@ siena_board_cfg(
uint32_t nevq, nrxq, ntxq;
efx_rc_t rc;
+ /* Siena has a fixed 8Kbyte VI window size */
+ EFX_STATIC_ASSERT(1U << EFX_VI_WINDOW_SHIFT_8K == 8192);
+ encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K;
+
/* External port identifier using one-based port numbering */
encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
@@ -114,6 +118,18 @@ siena_board_cfg(
/* There is one RSS context per function */
encp->enc_rx_scale_max_exclusive_contexts = 1;
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_LFSR);
+ encp->enc_rx_scale_hash_alg_mask |= (1U << EFX_RX_HASHALG_TOEPLITZ);
+
+ /*
+ * It is always possible to use port numbers
+ * as the input data for hash computation.
+ */
+ encp->enc_rx_scale_l4_hash_supported = B_TRUE;
+
+ /* There is no support for additional RSS modes */
+ encp->enc_rx_scale_additional_modes_supported = B_FALSE;
+
encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
/* Fragments must not span 4k boundaries. */
encp->enc_tx_dma_desc_boundary = 4096;
@@ -145,6 +161,8 @@ siena_board_cfg(
encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
encp->enc_rx_packed_stream_supported = B_FALSE;
encp->enc_rx_var_packed_stream_supported = B_FALSE;
+ encp->enc_rx_es_super_buffer_supported = B_FALSE;
+ encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE;
/* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
@@ -152,6 +170,12 @@ siena_board_cfg(
encp->enc_nvram_update_verify_result_supported = B_FALSE;
+ encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS;
+
+ encp->enc_filter_action_flag_supported = B_FALSE;
+ encp->enc_filter_action_mark_supported = B_FALSE;
+ encp->enc_filter_action_mark_max = 0;
+
return (0);
fail2:
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
index e72bba0b..8cdd2df7 100644
--- a/drivers/net/sfc/base/siena_nvram.c
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -304,15 +304,20 @@ siena_nvram_get_dynamic_cfg(
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
if (dcfg == NULL) {
rc = ENOMEM;
- goto fail2;
+ goto fail3;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
- goto fail3;
+ goto fail4;
/* Verify the magic */
if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
@@ -347,7 +352,7 @@ siena_nvram_get_dynamic_cfg(
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)dcfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
- goto fail4;
+ goto fail5;
}
/* Verify checksum */
@@ -389,13 +394,15 @@ done:
return (0);
+fail5:
+ EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
index d638646b..4b2190d3 100644
--- a/drivers/net/sfc/base/siena_phy.c
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -534,6 +534,11 @@ siena_phy_stats_update(
MC_CMD_PHY_STATS_OUT_DMA_LEN)];
efx_rc_t rc;
+ if ((esmp == NULL) || (EFSYS_MEM_SIZE(esmp) < EFX_PHY_STATS_SIZE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
(void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PHY_STATS;
req.emr_in_buf = payload;
@@ -550,7 +555,7 @@ siena_phy_stats_update(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail1;
+ goto fail2;
}
EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
@@ -559,6 +564,8 @@ siena_phy_stats_update(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c
index f188eb58..ebb12abf 100644
--- a/drivers/net/sfc/base/siena_vpd.c
+++ b/drivers/net/sfc/base/siena_vpd.c
@@ -36,21 +36,26 @@ siena_vpd_get_static(
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
+ if (size < SIENA_NVRAM_CHUNK) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
if (scfg == NULL) {
rc = ENOMEM;
- goto fail2;
+ goto fail3;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
- goto fail3;
+ goto fail4;
/* Verify the magic number */
if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
SIENA_MC_STATIC_CONFIG_MAGIC) {
rc = EINVAL;
- goto fail4;
+ goto fail5;
}
/* All future versions of the structure must be backwards compatible */
@@ -64,7 +69,7 @@ siena_vpd_get_static(
if (hdr_length > size || vpd_offset > size || vpd_length > size ||
vpd_length + vpd_offset > size) {
rc = EINVAL;
- goto fail5;
+ goto fail6;
}
/* Read the remainder of scfg + static vpd */
@@ -73,7 +78,7 @@ siena_vpd_get_static(
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)scfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
- goto fail6;
+ goto fail7;
}
/* Verify checksum */
@@ -82,7 +87,7 @@ siena_vpd_get_static(
cksum += ((uint8_t *)scfg)[pos];
if (cksum != 0) {
rc = EINVAL;
- goto fail7;
+ goto fail8;
}
if (vpd_length == 0)
@@ -92,7 +97,7 @@ siena_vpd_get_static(
EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
if (svpd == NULL) {
rc = ENOMEM;
- goto fail8;
+ goto fail9;
}
memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
}
@@ -104,6 +109,8 @@ siena_vpd_get_static(
return (0);
+fail9:
+ EFSYS_PROBE(fail9);
fail8:
EFSYS_PROBE(fail8);
fail7:
@@ -114,11 +121,11 @@ fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+fail3:
+ EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
index c7a54c3b..b9d2df58 100644
--- a/drivers/net/sfc/efsys.h
+++ b/drivers/net/sfc/efsys.h
@@ -26,6 +26,7 @@
#include <rte_io.h>
#include "sfc_debug.h"
+#include "sfc_log.h"
#ifdef __cplusplus
extern "C" {
@@ -119,6 +120,8 @@ prefetch_read_once(const volatile void *addr)
#define __out_ecount_opt(_n)
#define __out_bcount(_n)
#define __out_bcount_opt(_n)
+#define __out_bcount_part(_n, _l)
+#define __out_bcount_part_opt(_n, _l)
#define __deref_out
@@ -148,6 +151,8 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_HUNTINGTON 1
/* Enable SFN8xxx support */
#define EFSYS_OPT_MEDFORD 1
+/* Enable SFN2xxx support */
+#define EFSYS_OPT_MEDFORD2 1
#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
#define EFSYS_OPT_CHECK_REG 1
#else
@@ -161,7 +166,7 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_MAC_STATS 1
-#define EFSYS_OPT_LOOPBACK 0
+#define EFSYS_OPT_LOOPBACK 1
#define EFSYS_OPT_MON_MCDI 0
#define EFSYS_OPT_MON_STATS 0
@@ -174,6 +179,7 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_VPD 0
#define EFSYS_OPT_NVRAM 0
#define EFSYS_OPT_BOOTCFG 0
+#define EFSYS_OPT_IMAGE_LAYOUT 0
#define EFSYS_OPT_DIAG 0
#define EFSYS_OPT_RX_SCALE 1
@@ -192,8 +198,12 @@ prefetch_read_once(const volatile void *addr)
#define EFSYS_OPT_RX_PACKED_STREAM 0
+#define EFSYS_OPT_RX_ES_SUPER_BUFFER 1
+
#define EFSYS_OPT_TUNNEL 1
+#define EFSYS_OPT_FW_SUBVARIANT_AWARE 1
+
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;
@@ -370,6 +380,9 @@ typedef struct efsys_mem_s {
} while (B_FALSE)
+#define EFSYS_MEM_SIZE(_esmp) \
+ ((_esmp)->esm_mz->len)
+
#define EFSYS_MEM_ADDR(_esmp) \
((_esmp)->esm_addr)
@@ -721,7 +734,7 @@ typedef uint64_t efsys_stat_t;
#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
do { \
(void)(_esip); \
- RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
+ SFC_GENERIC_LOG(ERR, "FATAL ERROR #%u (0x%08x%08x)", \
(_code), (_dword0), (_dword1)); \
_NOTE(CONSTANTCONDITION); \
} while (B_FALSE)
diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index b60a8f58..2d34e869 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -6,7 +6,7 @@
# This software was jointly developed between OKTET Labs (under contract
# for Solarflare) and Solarflare Communications, Inc.
-if arch_subdir != 'x86'
+if arch_subdir != 'x86' or cc.sizeof('void *') == 4
build = false
endif
@@ -30,10 +30,6 @@ extra_flags += [
'-Wbad-function-cast'
]
-# Suppress ICC false positive warning on 'bulk' may be used before its
-# value is set
-extra_flags += '-wd3656'
-
foreach flag: extra_flags
if cc.has_argument(flag)
cflags += flag
@@ -58,6 +54,7 @@ sources = files(
'sfc_flow.c',
'sfc_dp.c',
'sfc_ef10_rx.c',
+ 'sfc_ef10_essb_rx.c',
'sfc_ef10_tx.c'
)
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index ac5fdcaa..6690053f 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -20,6 +20,8 @@
#include "sfc_ev.h"
#include "sfc_rx.h"
#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
int
@@ -81,14 +83,23 @@ sfc_phy_cap_from_link_speeds(uint32_t speeds)
phy_caps |=
(1 << EFX_PHY_CAP_1000FDX) |
(1 << EFX_PHY_CAP_10000FDX) |
- (1 << EFX_PHY_CAP_40000FDX);
+ (1 << EFX_PHY_CAP_25000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX) |
+ (1 << EFX_PHY_CAP_50000FDX) |
+ (1 << EFX_PHY_CAP_100000FDX);
}
if (speeds & ETH_LINK_SPEED_1G)
phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
if (speeds & ETH_LINK_SPEED_10G)
phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_25G)
+ phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
if (speeds & ETH_LINK_SPEED_40G)
phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+ if (speeds & ETH_LINK_SPEED_50G)
+ phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
+ if (speeds & ETH_LINK_SPEED_100G)
+ phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
return phy_caps;
}
@@ -113,10 +124,12 @@ sfc_check_conf(struct sfc_adapter *sa)
rc = EINVAL;
}
+#if !EFSYS_OPT_LOOPBACK
if (conf->lpbk_mode != 0) {
sfc_err(sa, "Loopback not supported");
rc = EINVAL;
}
+#endif
if (conf->dcb_capability_en != 0) {
sfc_err(sa, "Priority-based flow control not supported");
@@ -250,6 +263,58 @@ sfc_set_drv_limits(struct sfc_adapter *sa)
}
static int
+sfc_set_fw_subvariant(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
+ unsigned int txq_index;
+ efx_nic_fw_subvariant_t req_fw_subvariant;
+ efx_nic_fw_subvariant_t cur_fw_subvariant;
+ int rc;
+
+ if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
+ sfc_info(sa, "no-Tx-checksum subvariant not supported");
+ return 0;
+ }
+
+ for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
+ struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
+
+ if (txq_info->txq != NULL)
+ tx_offloads |= txq_info->txq->offloads;
+ }
+
+ if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
+ else
+ req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
+
+ rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to get FW subvariant: %d", rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant is %u vs required %u",
+ cur_fw_subvariant, req_fw_subvariant);
+
+ if (cur_fw_subvariant == req_fw_subvariant)
+ return 0;
+
+ rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
+ if (rc != 0) {
+ sfc_err(sa, "failed to set FW subvariant %u: %d",
+ req_fw_subvariant, rc);
+ return rc;
+ }
+ sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
+
+ return 0;
+}
+
+static int
sfc_try_start(struct sfc_adapter *sa)
{
const efx_nic_cfg_t *encp;
@@ -260,6 +325,11 @@ sfc_try_start(struct sfc_adapter *sa)
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
+ sfc_log_init(sa, "set FW subvariant");
+ rc = sfc_set_fw_subvariant(sa);
+ if (rc != 0)
+ goto fail_set_fw_subvariant;
+
sfc_log_init(sa, "set resource limits");
rc = sfc_set_drv_limits(sa);
if (rc != 0)
@@ -326,6 +396,7 @@ fail_tunnel_reconfigure:
fail_nic_init:
fail_set_drv_limits:
+fail_set_fw_subvariant:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
@@ -344,7 +415,7 @@ sfc_start(struct sfc_adapter *sa)
case SFC_ADAPTER_CONFIGURED:
break;
case SFC_ADAPTER_STARTED:
- sfc_info(sa, "already started");
+ sfc_notice(sa, "already started");
return 0;
default:
rc = EINVAL;
@@ -383,7 +454,7 @@ sfc_stop(struct sfc_adapter *sa)
case SFC_ADAPTER_STARTED:
break;
case SFC_ADAPTER_CONFIGURED:
- sfc_info(sa, "already stopped");
+ sfc_notice(sa, "already stopped");
return;
default:
sfc_err(sa, "stop in unexpected state %u", sa->state);
@@ -454,7 +525,7 @@ sfc_schedule_restart(struct sfc_adapter *sa)
else if (rc != 0)
sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
else
- sfc_info(sa, "restart scheduled");
+ sfc_notice(sa, "restart scheduled");
}
int
@@ -530,27 +601,18 @@ sfc_close(struct sfc_adapter *sa)
}
static int
-sfc_mem_bar_init(struct sfc_adapter *sa)
+sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
{
struct rte_eth_dev *eth_dev = sa->eth_dev;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
efsys_bar_t *ebp = &sa->mem_bar;
- unsigned int i;
- struct rte_mem_resource *res;
-
- for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
- res = &pci_dev->mem_resource[i];
- if ((res->len != 0) && (res->phys_addr != 0)) {
- /* Found first memory BAR */
- SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
- ebp->esb_rid = i;
- ebp->esb_dev = pci_dev;
- ebp->esb_base = res->addr;
- return 0;
- }
- }
+ struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
- return EFAULT;
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = membar;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
}
static void
@@ -562,7 +624,6 @@ sfc_mem_bar_fini(struct sfc_adapter *sa)
memset(ebp, 0, sizeof(*ebp));
}
-#if EFSYS_OPT_RX_SCALE
/*
* A fixed RSS key which has a property of being symmetric
* (symmetrical flows are distributed to the same CPU)
@@ -576,12 +637,11 @@ static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
};
-#endif
-#if EFSYS_OPT_RX_SCALE
static int
-sfc_set_rss_defaults(struct sfc_adapter *sa)
+sfc_rss_attach(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
int rc;
rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
@@ -596,26 +656,31 @@ sfc_set_rss_defaults(struct sfc_adapter *sa)
if (rc != 0)
goto fail_rx_init;
- rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
+ rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
if (rc != 0)
goto fail_scale_support_get;
- rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
+ rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
if (rc != 0)
goto fail_hash_support_get;
+ rc = sfc_rx_hash_init(sa);
+ if (rc != 0)
+ goto fail_rx_hash_init;
+
efx_rx_fini(sa->nic);
efx_ev_fini(sa->nic);
efx_intr_fini(sa->nic);
- sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
-
- rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
+ rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
return 0;
+fail_rx_hash_init:
fail_hash_support_get:
fail_scale_support_get:
+ efx_rx_fini(sa->nic);
+
fail_rx_init:
efx_ev_fini(sa->nic);
@@ -625,13 +690,12 @@ fail_ev_init:
fail_intr_init:
return rc;
}
-#else
-static int
-sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
+
+static void
+sfc_rss_detach(struct sfc_adapter *sa)
{
- return 0;
+ sfc_rx_hash_fini(sa);
}
-#endif
int
sfc_attach(struct sfc_adapter *sa)
@@ -690,9 +754,9 @@ sfc_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_port_attach;
- rc = sfc_set_rss_defaults(sa);
+ rc = sfc_rss_attach(sa);
if (rc != 0)
- goto fail_set_rss_defaults;
+ goto fail_rss_attach;
rc = sfc_filter_attach(sa);
if (rc != 0)
@@ -709,7 +773,9 @@ sfc_attach(struct sfc_adapter *sa)
return 0;
fail_filter_attach:
-fail_set_rss_defaults:
+ sfc_rss_detach(sa);
+
+fail_rss_attach:
sfc_port_detach(sa);
fail_port_attach:
@@ -741,6 +807,7 @@ sfc_detach(struct sfc_adapter *sa)
sfc_flow_fini(sa);
sfc_filter_detach(sa);
+ sfc_rss_detach(sa);
sfc_port_detach(sa);
sfc_ev_detach(sa);
sfc_intr_detach(sa);
@@ -749,10 +816,169 @@ sfc_detach(struct sfc_adapter *sa)
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
+static int
+sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint32_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
+ *value = EFX_FW_VARIANT_DONT_CARE;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
+ *value = EFX_FW_VARIANT_FULL_FEATURED;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
+ *value = EFX_FW_VARIANT_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
+ *value = EFX_FW_VARIANT_PACKED_STREAM;
+ else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
+ *value = EFX_FW_VARIANT_DPDK;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
+{
+ efx_nic_fw_info_t enfi;
+ int rc;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return rc;
+ else if (!enfi.enfi_dpcpu_fw_ids_valid)
+ return ENOTSUP;
+
+ /*
+ * Firmware variant can be uniquely identified by the RxDPCPU
+ * firmware id
+ */
+ switch (enfi.enfi_rx_dpcpu_fw_id) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ *efv = EFX_FW_VARIANT_FULL_FEATURED;
+ break;
+
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ *efv = EFX_FW_VARIANT_LOW_LATENCY;
+ break;
+
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ *efv = EFX_FW_VARIANT_PACKED_STREAM;
+ break;
+
+ case EFX_RXDP_DPDK_FW_ID:
+ *efv = EFX_FW_VARIANT_DPDK;
+ break;
+
+ default:
+ /*
+ * Other firmware variants are not considered, since they are
+ * not supported in the device parameters
+ */
+ *efv = EFX_FW_VARIANT_DONT_CARE;
+ break;
+ }
+
+ return 0;
+}
+
+static const char *
+sfc_fw_variant2str(efx_fw_variant_t efv)
+{
+ switch (efv) {
+ case EFX_RXDP_FULL_FEATURED_FW_ID:
+ return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
+ case EFX_RXDP_LOW_LATENCY_FW_ID:
+ return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
+ case EFX_RXDP_PACKED_STREAM_FW_ID:
+ return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
+ case EFX_RXDP_DPDK_FW_ID:
+ return SFC_KVARG_FW_VARIANT_DPDK;
+ default:
+ return "unknown";
+ }
+}
+
+static int
+sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
+{
+ int rc;
+ long value;
+
+ value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
+ sfc_kvarg_long_handler, &value);
+ if (rc != 0)
+ return rc;
+
+ if (value < 0 ||
+ (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
+ sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
+ "was set (%ld);", value);
+ sfc_err(sa, "it must not be less than 0 or greater than %u",
+ EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
+ return EINVAL;
+ }
+
+ sa->rxd_wait_timeout_ns = value;
+ return 0;
+}
+
+static int
+sfc_nic_probe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+ efx_fw_variant_t preferred_efv;
+ efx_fw_variant_t efv;
+ int rc;
+
+ preferred_efv = EFX_FW_VARIANT_DONT_CARE;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
+ sfc_kvarg_fv_variant_handler,
+ &preferred_efv);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
+ return rc;
+ }
+
+ rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
+ if (rc != 0)
+ return rc;
+
+ rc = efx_nic_probe(enp, preferred_efv);
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot set FW variant */
+ rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
+ }
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_get_fw_variant(sa, &efv);
+ if (rc == ENOTSUP) {
+ sfc_warn(sa, "FW variant can not be obtained");
+ return 0;
+ }
+ if (rc != 0)
+ return rc;
+
+ /* Check that firmware variant was changed to the requested one */
+ if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
+ sfc_warn(sa, "FW variant has not changed to the requested %s",
+ sfc_fw_variant2str(preferred_efv));
+ }
+
+ sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
+
+ return 0;
+}
+
int
sfc_probe(struct sfc_adapter *sa)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
+ unsigned int membar;
efx_nic_t *enp;
int rc;
@@ -763,17 +989,17 @@ sfc_probe(struct sfc_adapter *sa)
sa->socket_id = rte_socket_id();
rte_atomic32_init(&sa->restart_required);
- sfc_log_init(sa, "init mem bar");
- rc = sfc_mem_bar_init(sa);
- if (rc != 0)
- goto fail_mem_bar_init;
-
sfc_log_init(sa, "get family");
rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
- &sa->family);
+ &sa->family, &membar);
if (rc != 0)
goto fail_family;
- sfc_log_init(sa, "family is %u", sa->family);
+ sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa, membar);
+ if (rc != 0)
+ goto fail_mem_bar_init;
sfc_log_init(sa, "create nic");
rte_spinlock_init(&sa->nic_lock);
@@ -788,7 +1014,7 @@ sfc_probe(struct sfc_adapter *sa)
goto fail_mcdi_init;
sfc_log_init(sa, "probe nic");
- rc = efx_nic_probe(enp);
+ rc = sfc_nic_probe(sa);
if (rc != 0)
goto fail_nic_probe;
@@ -804,10 +1030,10 @@ fail_mcdi_init:
efx_nic_destroy(enp);
fail_nic_create:
-fail_family:
sfc_mem_bar_fini(sa);
fail_mem_bar_init:
+fail_family:
sfc_log_init(sa, "failed %d", rc);
return rc;
}
@@ -843,3 +1069,35 @@ sfc_unprobe(struct sfc_adapter *sa)
sfc_flow_fini(sa);
sa->state = SFC_ADAPTER_UNINITIALIZED;
}
+
+uint32_t
+sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
+ uint32_t ll_default)
+{
+ size_t lt_prefix_str_size = strlen(lt_prefix_str);
+ size_t lt_str_size_max;
+ char *lt_str = NULL;
+ int ret;
+
+ if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
+ ++lt_prefix_str_size; /* Reserve space for prefix separator */
+ lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
+ } else {
+ return RTE_LOGTYPE_PMD;
+ }
+
+ lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
+ if (lt_str == NULL)
+ return RTE_LOGTYPE_PMD;
+
+ strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
+ lt_str[lt_prefix_str_size - 1] = '.';
+ rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
+ lt_str_size_max - lt_prefix_str_size);
+ lt_str[lt_str_size_max - 1] = '\0';
+
+ ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
+ rte_free(lt_str);
+
+ return (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 75575349..51be4403 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -27,11 +27,6 @@
extern "C" {
#endif
-#if EFSYS_OPT_RX_SCALE
-/** RSS hash offloads mask */
-#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
-#endif
-
/*
* +---------------+
* | UNINITIALIZED |<-----------+
@@ -104,7 +99,7 @@ struct sfc_mcdi {
efsys_mem_t mem;
enum sfc_mcdi_state state;
efx_mcdi_transport_t transport;
- bool logging;
+ uint32_t logtype;
uint32_t proxy_handle;
efx_rc_t proxy_result;
};
@@ -156,6 +151,24 @@ struct sfc_port {
uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
};
+struct sfc_rss_hf_rte_to_efx {
+ uint64_t rte;
+ efx_rx_hash_type_t efx;
+};
+
+struct sfc_rss {
+ unsigned int channels;
+ efx_rx_scale_context_type_t context_type;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_alg_t hash_alg;
+ unsigned int hf_map_nb_entries;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+
+ efx_rx_hash_type_t hash_types;
+ unsigned int tbl[EFX_RSS_TBL_SIZE];
+ uint8_t key[EFX_RSS_KEY_SIZE];
+};
+
/* Adapter private data */
struct sfc_adapter {
/*
@@ -170,7 +183,7 @@ struct sfc_adapter {
uint16_t port_id;
struct rte_eth_dev *eth_dev;
struct rte_kvargs *kvargs;
- bool debug_init;
+ uint32_t logtype_main;
int socket_id;
efsys_bar_t mem_bar;
efx_family_t family;
@@ -225,15 +238,9 @@ struct sfc_adapter {
boolean_t tso;
- unsigned int rss_channels;
+ uint32_t rxd_wait_timeout_ns;
-#if EFSYS_OPT_RX_SCALE
- efx_rx_scale_context_type_t rss_support;
- efx_rx_hash_support_t hash_support;
- efx_rx_hash_type_t rss_hash_types;
- unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
- uint8_t rss_key[EFX_RSS_KEY_SIZE];
-#endif
+ struct sfc_rss rss;
/*
* Shared memory copy of the Rx datapath name to be used by
@@ -302,6 +309,10 @@ int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
size_t len, int socket_id, efsys_mem_t *esmp);
void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+uint32_t sfc_register_logtype(struct sfc_adapter *sa,
+ const char *lt_prefix_str,
+ uint32_t ll_default);
+
int sfc_probe(struct sfc_adapter *sa);
void sfc_unprobe(struct sfc_adapter *sa);
int sfc_attach(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c
index 9a5ca20b..b121dc09 100644
--- a/drivers/net/sfc/sfc_dp.c
+++ b/drivers/net/sfc/sfc_dp.c
@@ -14,6 +14,7 @@
#include <rte_log.h>
#include "sfc_dp.h"
+#include "sfc_log.h"
void
sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
@@ -63,8 +64,8 @@ int
sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
{
if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
- "sfc %s dapapath '%s' already registered\n",
+ SFC_GENERIC_LOG(ERR,
+ "sfc %s dapapath '%s' already registered",
entry->type == SFC_DP_RX ? "Rx" :
entry->type == SFC_DP_TX ? "Tx" :
"unknown",
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
index b142532d..3da65abe 100644
--- a/drivers/net/sfc/sfc_dp.h
+++ b/drivers/net/sfc/sfc_dp.h
@@ -15,6 +15,8 @@
#include <rte_pci.h>
+#include "sfc_log.h"
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -58,10 +60,10 @@ void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
const struct sfc_dp_queue *_dpq = (dpq); \
const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
\
- RTE_LOG(level, PMD, \
+ SFC_GENERIC_LOG(level, \
RTE_FMT("%s " PCI_PRI_FMT \
" #%" PRIu16 ".%" PRIu16 ": " \
- RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
dp_name, \
_addr->domain, _addr->bus, \
_addr->devid, _addr->function, \
@@ -77,7 +79,8 @@ struct sfc_dp {
enum sfc_dp_type type;
/* Mask of required hardware/firmware capabilities */
unsigned int hw_fw_caps;
-#define SFC_DP_HW_FW_CAP_EF10 0x1
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+#define SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER 0x2
};
/** List of datapath variants */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
index be725dcb..ce96e83f 100644
--- a/drivers/net/sfc/sfc_dp_rx.h
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -78,6 +78,8 @@ struct sfc_dp_rx_qcreate_info {
* doorbell
*/
volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
};
/**
@@ -88,10 +90,23 @@ struct sfc_dp_rx_qcreate_info {
typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
/**
+ * Test if an Rx datapath supports specific mempool ops.
+ *
+ * @param pool The name of the pool operations to test.
+ *
+ * @return Check status.
+ * @retval 0 Best mempool ops choice.
+ * @retval 1 Mempool ops are supported.
+ * @retval -ENOTSUP Mempool ops not supported.
+ */
+typedef int (sfc_dp_rx_pool_ops_supported_t)(const char *pool);
+
+/**
* Get size of receive and event queue rings by the number of Rx
- * descriptors.
+ * descriptors and mempool configuration.
*
* @param nb_rx_desc Number of Rx descriptors
+ * @param mb_pool mbuf pool with Rx buffers
* @param rxq_entries Location for number of Rx ring entries
* @param evq_entries Location for number of event ring entries
* @param rxq_max_fill_level Location for maximum Rx ring fill level
@@ -99,6 +114,7 @@ typedef void (sfc_dp_rx_get_dev_info_t)(struct rte_eth_dev_info *dev_info);
* @return 0 or positive errno.
*/
typedef int (sfc_dp_rx_qsize_up_rings_t)(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level);
@@ -146,6 +162,12 @@ typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
/**
+ * Packed stream receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ps_ev_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int id);
+
+/**
* Receive queue purge function called after queue flush.
*
* Should be used to free unused recevie buffers.
@@ -171,13 +193,18 @@ struct sfc_dp_rx {
#define SFC_DP_RX_FEAT_SCATTER 0x1
#define SFC_DP_RX_FEAT_MULTI_PROCESS 0x2
#define SFC_DP_RX_FEAT_TUNNELS 0x4
+#define SFC_DP_RX_FEAT_FLOW_FLAG 0x8
+#define SFC_DP_RX_FEAT_FLOW_MARK 0x10
+#define SFC_DP_RX_FEAT_CHECKSUM 0x20
sfc_dp_rx_get_dev_info_t *get_dev_info;
+ sfc_dp_rx_pool_ops_supported_t *pool_ops_supported;
sfc_dp_rx_qsize_up_rings_t *qsize_up_rings;
sfc_dp_rx_qcreate_t *qcreate;
sfc_dp_rx_qdestroy_t *qdestroy;
sfc_dp_rx_qstart_t *qstart;
sfc_dp_rx_qstop_t *qstop;
sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qrx_ps_ev_t *qrx_ps_ev;
sfc_dp_rx_qpurge_t *qpurge;
sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
sfc_dp_rx_qdesc_npending_t *qdesc_npending;
@@ -203,6 +230,7 @@ sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
extern struct sfc_dp_rx sfc_efx_rx;
extern struct sfc_dp_rx sfc_ef10_rx;
+extern struct sfc_dp_rx sfc_ef10_essb_rx;
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index 0c1aad90..eda9676c 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -39,8 +39,6 @@ struct sfc_dp_tx_qcreate_info {
unsigned int max_fill_level;
/** Minimum number of unused Tx descriptors to do reap */
unsigned int free_thresh;
- /** Transmit queue configuration flags */
- unsigned int flags;
/** Offloads enabled on the transmit queue */
uint64_t offloads;
/** Tx queue size */
@@ -57,6 +55,8 @@ struct sfc_dp_tx_qcreate_info {
unsigned int hw_index;
/** Virtual address of the memory-mapped BAR to push Tx doorbell */
volatile void *mem_bar;
+ /** VI window size shift */
+ unsigned int vi_window_shift;
};
/**
diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h
index ace6a1dd..a73e0bde 100644
--- a/drivers/net/sfc/sfc_ef10.h
+++ b/drivers/net/sfc/sfc_ef10.h
@@ -79,6 +79,40 @@ sfc_ef10_ev_present(const efx_qword_t ev)
~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
}
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8u
+
+static inline void
+sfc_ef10_rx_qpush(volatile void *doorbell, unsigned int added,
+ unsigned int ptr_mask)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(added, SFC_EF10_RX_WPTR_ALIGN) == added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, added & ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], doorbell);
+}
+
+
+const uint32_t * sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/sfc/sfc_ef10_essb_rx.c b/drivers/net/sfc/sfc_ef10_essb_rx.c
new file mode 100644
index 00000000..81c8f7fb
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_essb_rx.c
@@ -0,0 +1,723 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+/* EF10 equal stride packed stream receive native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+/* Tunnels are not supported */
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 0
+#include "sfc_ef10_rx_ev.h"
+
+#define sfc_ef10_essb_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, ERR, dpq, __VA_ARGS__)
+
+#define sfc_ef10_essb_rx_info(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10_ESSB, INFO, dpq, __VA_ARGS__)
+
+/*
+ * Fake length for RXQ descriptors in equal stride super-buffer mode
+ * to make hardware happy.
+ */
+#define SFC_EF10_ESSB_RX_FAKE_BUF_SIZE 32
+
+/**
+ * Minimum number of Rx buffers the datapath allows to use.
+ *
+ * Each HW Rx descriptor has many Rx buffers. The number of buffers
+ * in one HW Rx descriptor is equal to size of contiguous block
+ * provided by Rx buffers memory pool. The contiguous block size
+ * depends on CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB and rte_mbuf
+ * data size specified on the memory pool creation. Typical rte_mbuf
+ * data size is about 2k which makes a bit less than 32 buffers in
+ * contiguous block with default bucket size equal to 64k.
+ * Since HW Rx descriptors are pushed by 8 (see SFC_EF10_RX_WPTR_ALIGN),
+ * it makes about 256 as required minimum. Double it in advertised
+ * minimum to allow for at least 2 refill blocks.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_MIN 512
+
+/**
+ * Number of Rx buffers should be aligned to.
+ *
+ * There are no extra requirements on alignment since actual number of
+ * pushed Rx buffers will be multiple by contiguous block size which
+ * is unknown beforehand.
+ */
+#define SFC_EF10_ESSB_RX_DESCS_ALIGN 1
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ */
+#define SFC_EF10_ESSB_RXQ_LIMIT(_nevs) \
+ ((_nevs) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_essb_rx_sw_desc {
+ struct rte_mbuf *first_mbuf;
+};
+
+struct sfc_ef10_essb_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_ESSB_RXQ_STARTED 0x1
+#define SFC_EF10_ESSB_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_ESSB_RXQ_EXCEPTION 0x4
+ unsigned int rxq_ptr_mask;
+ unsigned int block_size;
+ unsigned int buf_stride;
+ unsigned int bufs_ptr;
+ unsigned int completed;
+ unsigned int pending_id;
+ unsigned int bufs_pending;
+ unsigned int left_in_completed;
+ unsigned int left_in_pending;
+ unsigned int evq_read_ptr;
+ unsigned int evq_ptr_mask;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_essb_rx_sw_desc *sw_ring;
+ uint16_t port_id;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int max_fill_level;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_essb_rxq *
+sfc_ef10_essb_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_essb_rxq, dp);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_next_mbuf(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_mbuf_by_index(const struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf *mbuf, unsigned int idx)
+{
+ return (struct rte_mbuf *)((uintptr_t)mbuf + idx * rxq->buf_stride);
+}
+
+static struct rte_mbuf *
+sfc_ef10_essb_maybe_next_completed(struct sfc_ef10_essb_rxq *rxq)
+{
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ if (rxq->left_in_completed != 0) {
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ return sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ } else {
+ rxq->completed++;
+ rxd = &rxq->sw_ring[rxq->completed & rxq->rxq_ptr_mask];
+ rxq->left_in_completed = rxq->block_size;
+ return rxd->first_mbuf;
+ }
+}
+
+static void
+sfc_ef10_essb_rx_qrefill(struct sfc_ef10_essb_rxq *rxq)
+{
+ const unsigned int rxq_ptr_mask = rxq->rxq_ptr_mask;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *mbuf_blocks[SFC_EF10_RX_WPTR_ALIGN];
+ unsigned int added = rxq->added;
+
+ free_space = rxq->max_fill_level - (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(mbuf_blocks);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_contig_blocks(rxq->refill_mb_pool,
+ mbuf_blocks, RTE_DIM(mbuf_blocks)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(mbuf_blocks);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & rxq_ptr_mask;
+ i < RTE_DIM(mbuf_blocks);
+ ++i, ++id) {
+ struct rte_mbuf *m = mbuf_blocks[i];
+ struct sfc_ef10_essb_rx_sw_desc *rxd;
+
+ SFC_ASSERT((id & ~rxq_ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->first_mbuf = m;
+
+ /* RX_KER_BYTE_CNT is ignored by firmware */
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT,
+ SFC_EF10_ESSB_RX_FAKE_BUF_SIZE,
+ ESF_DZ_RX_KER_BUF_ADDR,
+ rte_mbuf_data_iova_default(m));
+ }
+
+ added += RTE_DIM(mbuf_blocks);
+
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq->doorbell, added, rxq_ptr_mask);
+}
+
+static bool
+sfc_ef10_essb_rx_event_get(struct sfc_ef10_essb_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->evq_ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling
+ */
+ rxq->flags |= SFC_EF10_ESSB_RXQ_EXCEPTION;
+ sfc_ef10_essb_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_essb_rx_process_ev(struct sfc_ef10_essb_rxq *rxq, efx_qword_t rx_ev)
+{
+ unsigned int ready;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
+ rxq->bufs_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+
+ rxq->bufs_ptr += ready;
+ rxq->bufs_pending += ready;
+
+ SFC_ASSERT(ready > 0);
+ do {
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m0;
+
+ rxd = &rxq->sw_ring[rxq->pending_id];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_pending);
+
+ if (ready < rxq->left_in_pending) {
+ todo_bufs = ready;
+ ready = 0;
+ rxq->left_in_pending -= todo_bufs;
+ } else {
+ todo_bufs = rxq->left_in_pending;
+ ready -= todo_bufs;
+ rxq->left_in_pending = rxq->block_size;
+ if (rxq->pending_id != rxq->rxq_ptr_mask)
+ rxq->pending_id++;
+ else
+ rxq->pending_id = 0;
+ }
+
+ SFC_ASSERT(todo_bufs > 0);
+ --todo_bufs;
+
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m, ~0ull);
+
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+
+ m0 = m;
+ while (todo_bufs-- > 0) {
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+ /* Prefetch pseudo-header */
+ rte_prefetch0((uint8_t *)m->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+ } while (ready > 0);
+}
+
+static unsigned int
+sfc_ef10_essb_rx_get_pending(struct sfc_ef10_essb_rxq *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ unsigned int n_rx_pkts = 0;
+ unsigned int todo_bufs;
+ struct rte_mbuf *m;
+
+ while ((todo_bufs = RTE_MIN(nb_pkts - n_rx_pkts,
+ rxq->bufs_pending)) > 0) {
+ m = sfc_ef10_essb_maybe_next_completed(rxq);
+
+ todo_bufs = RTE_MIN(todo_bufs, rxq->left_in_completed);
+
+ rxq->bufs_pending -= todo_bufs;
+ rxq->left_in_completed -= todo_bufs;
+
+ SFC_ASSERT(todo_bufs > 0);
+ todo_bufs--;
+
+ do {
+ const efx_qword_t *qwordp;
+ uint16_t pkt_len;
+
+ /* Buffers to be discarded have 0 in packet type */
+ if (unlikely(m->packet_type == 0)) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ goto next_buf;
+ }
+
+ rx_pkts[n_rx_pkts++] = m;
+
+ /* Parse pseudo-header */
+ qwordp = (const efx_qword_t *)
+ ((uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM);
+ pkt_len =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_DATA_LEN);
+
+ m->data_off = RTE_PKTMBUF_HEADROOM +
+ ES_EZ_ESSB_RX_PREFIX_LEN;
+ m->port = rxq->port_id;
+
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ rte_pktmbuf_data_len(m) = pkt_len;
+
+ m->ol_flags |=
+ (PKT_RX_RSS_HASH *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH_VALID_LBN)) |
+ (PKT_RX_FDIR_ID *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK_VALID_LBN)) |
+ (PKT_RX_FDIR *
+ !!EFX_TEST_QWORD_BIT(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MATCH_FLAG_LBN));
+
+ /* EFX_QWORD_FIELD converts little-endian to CPU */
+ m->hash.rss =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_HASH);
+ m->hash.fdir.hi =
+ EFX_QWORD_FIELD(*qwordp,
+ ES_EZ_ESSB_RX_PREFIX_MARK);
+
+next_buf:
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ } while (todo_bufs-- > 0);
+ }
+
+ return n_rx_pkts;
+}
+
+
+static uint16_t
+sfc_ef10_essb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(rx_queue);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_essb_rx_get_pending(rxq, rx_pkts, nb_pkts);
+
+ while (n_rx_pkts != nb_pkts &&
+ sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ n_rx_pkts += sfc_ef10_essb_rx_get_pending(rxq,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_essb_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_essb_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags & (SFC_EF10_ESSB_RXQ_NOT_RUNNING |
+ SFC_EF10_ESSB_RXQ_EXCEPTION)))
+ return rxq->bufs_pending;
+
+ while (sfc_ef10_essb_rx_event_get(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+ sfc_ef10_essb_rx_process_ev(rxq, rx_ev);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->evq_ptr_mask,
+ evq_old_read_ptr, rxq->evq_read_ptr);
+
+ return rxq->bufs_pending;
+}
+
+static sfc_dp_rx_qdesc_status_t sfc_ef10_essb_rx_qdesc_status;
+static int
+sfc_ef10_essb_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int pending = sfc_ef10_essb_rx_qdesc_npending(dp_rxq);
+
+ if (offset < pending)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (offset < (rxq->added - rxq->completed) * rxq->block_size +
+ rxq->left_in_completed - rxq->block_size)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
+
+static sfc_dp_rx_get_dev_info_t sfc_ef10_essb_rx_get_dev_info;
+static void
+sfc_ef10_essb_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
+{
+ /*
+ * Number of descriptors just defines maximum number of pushed
+ * descriptors (fill level).
+ */
+ dev_info->rx_desc_lim.nb_min = SFC_EF10_ESSB_RX_DESCS_MIN;
+ dev_info->rx_desc_lim.nb_align = SFC_EF10_ESSB_RX_DESCS_ALIGN;
+}
+
+static sfc_dp_rx_pool_ops_supported_t sfc_ef10_essb_rx_pool_ops_supported;
+static int
+sfc_ef10_essb_rx_pool_ops_supported(const char *pool)
+{
+ SFC_ASSERT(pool != NULL);
+
+ if (strcmp(pool, "bucket") == 0)
+ return 0;
+
+ return -ENOTSUP;
+}
+
+static sfc_dp_rx_qsize_up_rings_t sfc_ef10_essb_rx_qsize_up_rings;
+static int
+sfc_ef10_essb_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ struct rte_mempool *mb_pool,
+ unsigned int *rxq_entries,
+ unsigned int *evq_entries,
+ unsigned int *rxq_max_fill_level)
+{
+ int rc;
+ struct rte_mempool_info mp_info;
+ unsigned int nb_hw_rx_desc;
+ unsigned int max_events;
+
+ rc = rte_mempool_ops_get_info(mb_pool, &mp_info);
+ if (rc != 0)
+ return -rc;
+ if (mp_info.contig_block_size == 0)
+ return EINVAL;
+
+ /*
+ * Calculate required number of hardware Rx descriptors each
+ * carrying contig block size Rx buffers.
+ * It cannot be less than Rx write pointer alignment plus 1
+ * in order to avoid cases when the ring is guaranteed to be
+ * empty.
+ */
+ nb_hw_rx_desc = RTE_MAX(SFC_DIV_ROUND_UP(nb_rx_desc,
+ mp_info.contig_block_size),
+ SFC_EF10_RX_WPTR_ALIGN + 1);
+ if (nb_hw_rx_desc <= EFX_RXQ_MINNDESCS) {
+ *rxq_entries = EFX_RXQ_MINNDESCS;
+ } else {
+ *rxq_entries = rte_align32pow2(nb_hw_rx_desc);
+ if (*rxq_entries > EFX_RXQ_MAXNDESCS)
+ return EINVAL;
+ }
+
+ max_events = RTE_ALIGN_FLOOR(nb_hw_rx_desc, SFC_EF10_RX_WPTR_ALIGN) *
+ mp_info.contig_block_size +
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ +
+ 1 /* Rx error */ + 1 /* flush */ + 1 /* head-tail space */;
+
+ *evq_entries = rte_align32pow2(max_events);
+ *evq_entries = RTE_MAX(*evq_entries, (unsigned int)EFX_EVQ_MINNEVS);
+ *evq_entries = RTE_MIN(*evq_entries, (unsigned int)EFX_EVQ_MAXNEVS);
+
+ /*
+ * May be even maximum event queue size is insufficient to handle
+ * so many Rx descriptors. If so, we should limit Rx queue fill level.
+ */
+ *rxq_max_fill_level = RTE_MIN(nb_rx_desc,
+ SFC_EF10_ESSB_RXQ_LIMIT(*evq_entries));
+ return 0;
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_essb_rx_qcreate;
+static int
+sfc_ef10_essb_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct rte_mempool * const mp = info->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+ struct sfc_ef10_essb_rxq *rxq;
+ int rc;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_get_contig_block_size;
+ }
+
+ /* Check if the mempool provides block dequeue */
+ rc = EINVAL;
+ if (mp_info.contig_block_size == 0)
+ goto fail_no_block_dequeue;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->block_size = mp_info.contig_block_size;
+ rxq->buf_stride = mp->header_size + mp->elt_size + mp->trailer_size;
+ rxq->rxq_ptr_mask = info->rxq_entries - 1;
+ rxq->evq_ptr_mask = info->evq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->port_id = port_id;
+
+ rxq->max_fill_level = info->max_fill_level / mp_info.contig_block_size;
+ rxq->refill_threshold =
+ RTE_MAX(info->refill_threshold / mp_info.contig_block_size,
+ SFC_EF10_RX_WPTR_ALIGN);
+ rxq->refill_mb_pool = mp;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ (info->hw_index << info->vi_window_shift);
+
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "block size is %u, buf stride is %u",
+ rxq->block_size, rxq->buf_stride);
+ sfc_ef10_essb_rx_info(&rxq->dp.dpq,
+ "max fill level is %u descs (%u bufs), "
+ "refill threashold %u descs (%u bufs)",
+ rxq->max_fill_level,
+ rxq->max_fill_level * rxq->block_size,
+ rxq->refill_threshold,
+ rxq->refill_threshold * rxq->block_size);
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_no_block_dequeue:
+fail_get_contig_block_size:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_essb_rx_qdestroy;
+static void
+sfc_ef10_essb_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_essb_rx_qstart;
+static int
+sfc_ef10_essb_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ /* Initialize before refill */
+ rxq->completed = rxq->pending_id = rxq->added = 0;
+ rxq->left_in_completed = rxq->left_in_pending = rxq->block_size;
+ rxq->bufs_ptr = UINT_MAX;
+ rxq->bufs_pending = 0;
+
+ sfc_ef10_essb_rx_qrefill(rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_STARTED;
+ rxq->flags &=
+ ~(SFC_EF10_ESSB_RXQ_NOT_RUNNING | SFC_EF10_ESSB_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_essb_rx_qstop;
+static void
+sfc_ef10_essb_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_ESSB_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_essb_rx_qrx_ev;
+static bool
+sfc_ef10_essb_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_essb_rxq *rxq;
+
+ rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq->flags & SFC_EF10_ESSB_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_essb_rx_qpurge;
+static void
+sfc_ef10_essb_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_essb_rxq *rxq = sfc_ef10_essb_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ const struct sfc_ef10_essb_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->rxq_ptr_mask];
+ m = sfc_ef10_essb_mbuf_by_index(rxq, rxd->first_mbuf,
+ rxq->block_size - rxq->left_in_completed);
+ while (rxq->left_in_completed > 0) {
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ m = sfc_ef10_essb_next_mbuf(rxq, m);
+ rxq->left_in_completed--;
+ }
+ rxq->left_in_completed = rxq->block_size;
+ }
+
+ rxq->flags &= ~SFC_EF10_ESSB_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_essb_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_ESSB,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10 |
+ SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER,
+ },
+ .features = SFC_DP_RX_FEAT_FLOW_FLAG |
+ SFC_DP_RX_FEAT_FLOW_MARK |
+ SFC_DP_RX_FEAT_CHECKSUM,
+ .get_dev_info = sfc_ef10_essb_rx_get_dev_info,
+ .pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
+ .qsize_up_rings = sfc_ef10_essb_rx_qsize_up_rings,
+ .qcreate = sfc_ef10_essb_rx_qcreate,
+ .qdestroy = sfc_ef10_essb_rx_qdestroy,
+ .qstart = sfc_ef10_essb_rx_qstart,
+ .qstop = sfc_ef10_essb_rx_qstop,
+ .qrx_ev = sfc_ef10_essb_rx_qrx_ev,
+ .qpurge = sfc_ef10_essb_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_essb_rx_qdesc_npending,
+ .qdesc_status = sfc_ef10_essb_rx_qdesc_status,
+ .pkt_burst = sfc_ef10_essb_recv_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index 0b3e8fbb..6a5052b9 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -26,16 +26,13 @@
#include "sfc_kvargs.h"
#include "sfc_ef10.h"
+#define SFC_EF10_RX_EV_ENCAP_SUPPORT 1
+#include "sfc_ef10_rx_ev.h"
+
#define sfc_ef10_rx_err(dpq, ...) \
SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
/**
- * Alignment requirement for value written to RX WPTR:
- * the WPTR must be aligned to an 8 descriptor boundary.
- */
-#define SFC_EF10_RX_WPTR_ALIGN 8
-
-/**
* Maximum number of descriptors/buffers in the Rx ring.
* It should guarantee that corresponding event queue never overfill.
* EF10 native datapath uses event queue of the same size as Rx queue.
@@ -88,29 +85,6 @@ sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
}
static void
-sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq)
-{
- efx_dword_t dword;
-
- /* Hardware has alignment restriction for WPTR */
- RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
- SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added);
-
- EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR,
- rxq->added & rxq->ptr_mask);
-
- /* DMA sync to device is not required */
-
- /*
- * rte_write32() has rte_io_wmb() which guarantees that the STORE
- * operations (i.e. Rx and event descriptor updates) that precede
- * the rte_io_wmb() call are visible to NIC before the STORE
- * operations that follow it (i.e. doorbell write).
- */
- rte_write32(dword.ed_u32[0], rxq->doorbell);
-}
-
-static void
sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
{
const unsigned int ptr_mask = rxq->ptr_mask;
@@ -120,6 +94,8 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
void *objs[SFC_RX_REFILL_BULK];
unsigned int added = rxq->added;
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+
free_space = rxq->max_fill_level - (added - rxq->completed);
if (free_space < rxq->refill_threshold)
@@ -178,7 +154,7 @@ sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
SFC_ASSERT(rxq->added != added);
rxq->added = added;
- sfc_ef10_rx_qpush(rxq);
+ sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
}
static void
@@ -225,137 +201,6 @@ sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
return n_rx_pkts;
}
-static void
-sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
- struct rte_mbuf *m)
-{
- uint32_t tun_ptype = 0;
- /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
- int8_t ip_csum_err_bit;
- /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
- int8_t l4_csum_err_bit;
- uint32_t l2_ptype = 0;
- uint32_t l3_ptype = 0;
- uint32_t l4_ptype = 0;
- uint64_t ol_flags = 0;
-
- if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
- goto done;
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
- default:
- /* Unexpected encapsulation tag class */
- SFC_ASSERT(false);
- /* FALLTHROUGH */
- case ESE_EZ_ENCAP_HDR_NONE:
- break;
- case ESE_EZ_ENCAP_HDR_VXLAN:
- /*
- * It is definitely UDP, but we have no information
- * about IPv4 vs IPv6 and VLAN tagging.
- */
- tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
- break;
- case ESE_EZ_ENCAP_HDR_GRE:
- /*
- * We have no information about IPv4 vs IPv6 and VLAN tagging.
- */
- tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
- break;
- }
-
- if (tun_ptype == 0) {
- ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
- l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
- } else {
- ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
- l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
- if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
- ESF_DZ_RX_IPCKSUM_ERR_LBN)))
- ol_flags |= PKT_RX_EIP_CKSUM_BAD;
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
- case ESE_DZ_ETH_TAG_CLASS_NONE:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
- RTE_PTYPE_INNER_L2_ETHER;
- break;
- case ESE_DZ_ETH_TAG_CLASS_VLAN1:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
- RTE_PTYPE_INNER_L2_ETHER_VLAN;
- break;
- case ESE_DZ_ETH_TAG_CLASS_VLAN2:
- l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
- RTE_PTYPE_INNER_L2_ETHER_QINQ;
- break;
- default:
- /* Unexpected Eth tag class */
- SFC_ASSERT(false);
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
- case ESE_DZ_L3_CLASS_IP4_FRAG:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
- RTE_PTYPE_INNER_L4_FRAG;
- /* FALLTHROUGH */
- case ESE_DZ_L3_CLASS_IP4:
- l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH |
- ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
- PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
- break;
- case ESE_DZ_L3_CLASS_IP6_FRAG:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
- RTE_PTYPE_INNER_L4_FRAG;
- /* FALLTHROUGH */
- case ESE_DZ_L3_CLASS_IP6:
- l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
- ol_flags |= PKT_RX_RSS_HASH;
- break;
- case ESE_DZ_L3_CLASS_ARP:
- /* Override Layer 2 packet type */
- /* There is no ARP classification for inner packets */
- if (tun_ptype == 0)
- l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
- break;
- default:
- /* Unexpected Layer 3 class */
- SFC_ASSERT(false);
- }
-
- switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
- case ESE_DZ_L4_CLASS_TCP:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
- RTE_PTYPE_INNER_L4_TCP;
- ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
- break;
- case ESE_DZ_L4_CLASS_UDP:
- l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
- RTE_PTYPE_INNER_L4_UDP;
- ol_flags |=
- (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
- PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
- break;
- case ESE_DZ_L4_CLASS_UNKNOWN:
- break;
- default:
- /* Unexpected Layer 4 class */
- SFC_ASSERT(false);
- }
-
- /* Remove RSS hash offload flag if RSS is not enabled */
- if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH)
- ol_flags &= ~PKT_RX_RSS_HASH;
-
-done:
- m->ol_flags = ol_flags;
- m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
-}
-
static uint16_t
sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
{
@@ -414,7 +259,10 @@ sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
m->rearm_data[0] = rxq->rearm_data;
/* Classify packet based on Rx event */
- sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m);
+ /* Mask RSS hash offload flag if RSS is not enabled */
+ sfc_ef10_rx_ev_to_offloads(rx_ev, m,
+ (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
+ ~0ull : ~PKT_RX_RSS_HASH);
/* data_off already moved past pseudo header */
pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
@@ -538,7 +386,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return n_rx_pkts;
}
-static const uint32_t *
+const uint32_t *
sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
{
static const uint32_t ef10_native_ptypes[] = {
@@ -587,8 +435,8 @@ sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
1u << EFX_TUNNEL_PROTOCOL_NVGRE):
return ef10_overlay_ptypes;
default:
- RTE_LOG(ERR, PMD,
- "Unexpected set of supported tunnel encapsulations: %#x\n",
+ SFC_GENERIC_LOG(ERR,
+ "Unexpected set of supported tunnel encapsulations: %#x",
tunnel_encaps);
/* FALLTHROUGH */
case 0:
@@ -632,6 +480,7 @@ sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
static int
sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level)
@@ -716,7 +565,7 @@ sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
rxq->rxq_hw_ring = info->rxq_hw_ring;
rxq->doorbell = (volatile uint8_t *)info->mem_bar +
ER_DZ_RX_DESC_UPD_REG_OFST +
- info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP;
+ (info->hw_index << info->vi_window_shift);
*dp_rxqp = &rxq->dp;
return 0;
@@ -809,7 +658,8 @@ struct sfc_dp_rx sfc_ef10_rx = {
.hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
},
.features = SFC_DP_RX_FEAT_MULTI_PROCESS |
- SFC_DP_RX_FEAT_TUNNELS,
+ SFC_DP_RX_FEAT_TUNNELS |
+ SFC_DP_RX_FEAT_CHECKSUM,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.qcreate = sfc_ef10_rx_qcreate,
diff --git a/drivers/net/sfc/sfc_ef10_rx_ev.h b/drivers/net/sfc/sfc_ef10_rx_ev.h
new file mode 100644
index 00000000..868c755f
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_rx_ev.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2018 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ */
+
+#ifndef _SFC_EF10_RX_EV_H
+#define _SFC_EF10_RX_EV_H
+
+#include <rte_mbuf.h>
+
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void
+sfc_ef10_rx_ev_to_offloads(const efx_qword_t rx_ev, struct rte_mbuf *m,
+ uint64_t ol_mask)
+{
+ uint32_t tun_ptype = 0;
+ /* Which event bit is mapped to PKT_RX_IP_CKSUM_* */
+ int8_t ip_csum_err_bit;
+ /* Which event bit is mapped to PKT_RX_L4_CKSUM_* */
+ int8_t l4_csum_err_bit;
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
+ /* Zero packet type is used as a marker to dicard bad packets */
+ goto done;
+ }
+
+#if SFC_EF10_RX_EV_ENCAP_SUPPORT
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_EZ_RX_ENCAP_HDR)) {
+ default:
+ /* Unexpected encapsulation tag class */
+ SFC_ASSERT(false);
+ /* FALLTHROUGH */
+ case ESE_EZ_ENCAP_HDR_NONE:
+ break;
+ case ESE_EZ_ENCAP_HDR_VXLAN:
+ /*
+ * It is definitely UDP, but we have no information
+ * about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
+ break;
+ case ESE_EZ_ENCAP_HDR_GRE:
+ /*
+ * We have no information about IPv4 vs IPv6 and VLAN tagging.
+ */
+ tun_ptype = RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ }
+#endif
+
+ if (tun_ptype == 0) {
+ ip_csum_err_bit = ESF_DZ_RX_IPCKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN;
+ } else {
+ ip_csum_err_bit = ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN;
+ l4_csum_err_bit = ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN;
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)))
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER :
+ RTE_PTYPE_INNER_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_VLAN :
+ RTE_PTYPE_INNER_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = (tun_ptype == 0) ? RTE_PTYPE_L2_ETHER_QINQ :
+ RTE_PTYPE_INNER_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV4_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev, ip_csum_err_bit)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_FRAG :
+ RTE_PTYPE_INNER_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype = (tun_ptype == 0) ? RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ /* There is no ARP classification for inner packets */
+ if (tun_ptype == 0)
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ case ESE_DZ_L3_CLASS_UNKNOWN:
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ /*
+ * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
+ * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
+ * and values for all EF10 controllers.
+ */
+ RTE_BUILD_BUG_ON(ESF_FZ_RX_L4_CLASS_LBN != ESF_DE_RX_L4_CLASS_LBN);
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_FZ_RX_L4_CLASS)) {
+ case ESE_FZ_L4_CLASS_TCP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_TCP != ESE_DE_L4_CLASS_TCP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_TCP :
+ RTE_PTYPE_INNER_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UDP:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UDP != ESE_DE_L4_CLASS_UDP);
+ l4_ptype = (tun_ptype == 0) ? RTE_PTYPE_L4_UDP :
+ RTE_PTYPE_INNER_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev, l4_csum_err_bit)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_FZ_L4_CLASS_UNKNOWN:
+ RTE_BUILD_BUG_ON(ESE_FZ_L4_CLASS_UNKNOWN !=
+ ESE_DE_L4_CLASS_UNKNOWN);
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+ SFC_ASSERT(l2_ptype != 0);
+
+done:
+ m->ol_flags = ol_flags & ol_mask;
+ m->packet_type = tun_ptype | l2_ptype | l3_ptype | l4_ptype;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_RX_EV_H */
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
index 12387972..d0daa3b3 100644
--- a/drivers/net/sfc/sfc_ef10_tx.c
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -531,7 +531,7 @@ sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
txq->txq_hw_ring = info->txq_hw_ring;
txq->doorbell = (volatile uint8_t *)info->mem_bar +
ER_DZ_TX_DESC_UPD_REG_OFST +
- info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+ (info->hw_index << info->vi_window_shift);
txq->evq_hw_ring = info->evq_hw_ring;
*dp_txqp = &txq->dp;
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 89a45290..9decbf5a 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -13,6 +13,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_errno.h>
+#include <rte_string_fns.h>
#include "efx.h"
@@ -27,6 +28,8 @@
#include "sfc_dp.h"
#include "sfc_dp_rx.h"
+uint32_t sfc_logtype_driver;
+
static struct sfc_dp_list sfc_dp_head =
TAILQ_HEAD_INITIALIZER(sfc_dp_head);
@@ -82,12 +85,11 @@ static void
sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct sfc_adapter *sa = dev->data->dev_private;
- const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
uint64_t txq_offloads_def = 0;
sfc_log_init(sa, "entry");
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
/* Autonegotiation may be disabled */
@@ -96,8 +98,14 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_1G;
if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_25G;
if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_50G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_100G;
dev_info->max_rx_queues = sa->rxq_max;
dev_info->max_tx_queues = sa->txq_max;
@@ -130,27 +138,17 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf.offloads |= txq_offloads_def;
- dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
- if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
- !encp->enc_hw_tx_insert_vlan_enabled)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
-
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
-
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
+ if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
+ uint64_t rte_hf = 0;
+ unsigned int i;
- if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
- dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ rte_hf |= rss->hf_map[i].rte;
-#if EFSYS_OPT_RX_SCALE
- if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
dev_info->reta_size = EFX_RSS_TBL_SIZE;
dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
- dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
+ dev_info->flow_type_rss_offloads = rte_hf;
}
-#endif
/* Initialize to hardware limits */
dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
@@ -236,22 +234,13 @@ static int
sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct sfc_adapter *sa = dev->data->dev_private;
- struct rte_eth_link *dev_link = &dev->data->dev_link;
- struct rte_eth_link old_link;
struct rte_eth_link current_link;
+ int ret;
sfc_log_init(sa, "entry");
-retry:
- EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
- *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
-
if (sa->state != SFC_ADAPTER_STARTED) {
sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
- if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- *(uint64_t *)&old_link,
- *(uint64_t *)&current_link))
- goto retry;
} else if (wait_to_complete) {
efx_link_mode_t link_mode;
@@ -259,21 +248,17 @@ retry:
link_mode = EFX_LINK_UNKNOWN;
sfc_port_link_mode_to_info(link_mode, &current_link);
- if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- *(uint64_t *)&old_link,
- *(uint64_t *)&current_link))
- goto retry;
} else {
sfc_ev_mgmt_qpoll(sa);
- *(int64_t *)&current_link =
- rte_atomic64_read((rte_atomic64_t *)dev_link);
+ rte_eth_linkstatus_get(dev, &current_link);
}
- if (old_link.link_status != current_link.link_status)
- sfc_info(sa, "Link status is %s",
- current_link.link_status ? "UP" : "DOWN");
+ ret = rte_eth_linkstatus_set(dev, &current_link);
+ if (ret == 0)
+ sfc_notice(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
- return old_link.link_status == current_link.link_status ? 0 : -1;
+ return ret;
}
static void
@@ -664,7 +649,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
if (xstats_names != NULL && nstats < xstats_count)
- strncpy(xstats_names[nstats].name,
+ strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
nstats++;
@@ -742,9 +727,8 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
char *name = xstats_names[nb_written++].name;
- strncpy(name, efx_mac_stat_name(sa->nic, i),
+ strlcpy(name, efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
- name[sizeof(xstats_names[0].name) - 1] = '\0';
}
++nb_supported;
@@ -890,14 +874,12 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
/*
- * The driver does not use it, but other PMDs update jumbo_frame
+ * The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
if (mtu > ETHER_MAX_LEN) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- rxmode->jumbo_frame = 1;
}
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
@@ -920,13 +902,14 @@ fail_inval:
SFC_ASSERT(rc > 0);
return -rc;
}
-static void
+static int
sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct sfc_adapter *sa = dev->data->dev_private;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
struct sfc_port *port = &sa->port;
- int rc;
+ struct ether_addr *old_addr = &dev->data->mac_addrs[0];
+ int rc = 0;
sfc_adapter_lock(sa);
@@ -936,15 +919,22 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
*/
ether_addr_copy(mac_addr, &port->default_mac_addr);
+ /*
+ * Neither of the two following checks can return
+ * an error. The new MAC address is preserved in
+ * the device private data and can be activated
+ * on the next port start if the user prevents
+ * isolated mode from being enabled.
+ */
if (port->isolated) {
- sfc_err(sa, "isolated mode is active on the port");
- sfc_err(sa, "will not set MAC address");
+ sfc_warn(sa, "isolated mode is active on the port");
+ sfc_warn(sa, "will not set MAC address");
goto unlock;
}
if (sa->state != SFC_ADAPTER_STARTED) {
- sfc_info(sa, "the port is not started");
- sfc_info(sa, "the new MAC address will be set on port start");
+ sfc_notice(sa, "the port is not started");
+ sfc_notice(sa, "the new MAC address will be set on port start");
goto unlock;
}
@@ -962,8 +952,12 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
* we also need to update unicast filters
*/
rc = sfc_set_rx_mode(sa);
- if (rc != 0)
+ if (rc != 0) {
sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ /* Rollback the old address */
+ (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
+ (void)sfc_set_rx_mode(sa);
+ }
} else {
sfc_warn(sa, "cannot set MAC address with filters installed");
sfc_warn(sa, "adapter will be restarted to pick the new MAC");
@@ -982,14 +976,13 @@ sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
unlock:
- /*
- * In the case of failure sa->port->default_mac_addr does not
- * need rollback since no error code is returned, and the upper
- * API will anyway update the external MAC address storage.
- * To be consistent with that new value it is better to keep
- * the device private value the same.
- */
+ if (rc != 0)
+ ether_addr_copy(old_addr, &port->default_mac_addr);
+
sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
}
@@ -1034,7 +1027,7 @@ sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
if (rc != 0)
sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
- SFC_ASSERT(rc > 0);
+ SFC_ASSERT(rc >= 0);
return -rc;
}
@@ -1062,9 +1055,7 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
qinfo->conf.rx_free_thresh = rxq->refill_threshold;
qinfo->conf.rx_drop_en = 1;
qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
- qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
qinfo->scattered_rx = 1;
@@ -1094,7 +1085,6 @@ sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
memset(qinfo, 0, sizeof(*qinfo));
- qinfo->conf.txq_flags = txq_info->txq->flags;
qinfo->conf.offloads = txq_info->txq->offloads;
qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
@@ -1352,18 +1342,18 @@ sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
}
-#if EFSYS_OPT_RX_SCALE
static int
sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
- if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
return -ENOTSUP;
- if (sa->rss_channels == 0)
+ if (rss->channels == 0)
return -EINVAL;
sfc_adapter_lock(sa);
@@ -1374,10 +1364,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
* flags which corresponds to the active EFX configuration stored
* locally in 'sfc_adapter' and kept up-to-date
*/
- rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
+ rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types);
rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
if (rss_conf->rss_key != NULL)
- rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE);
+ rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
sfc_adapter_unlock(sa);
@@ -1389,6 +1379,7 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
unsigned int efx_hash_types;
int rc = 0;
@@ -1396,35 +1387,31 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
if (port->isolated)
return -ENOTSUP;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
}
- if (sa->rss_channels == 0) {
+ if (rss->channels == 0) {
sfc_err(sa, "RSS is not configured");
return -EINVAL;
}
if ((rss_conf->rss_key != NULL) &&
- (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
+ (rss_conf->rss_key_len != sizeof(rss->key))) {
sfc_err(sa, "RSS key size is wrong (should be %lu)",
- sizeof(sa->rss_key));
- return -EINVAL;
- }
-
- if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
- sfc_err(sa, "unsupported hash functions requested");
+ sizeof(rss->key));
return -EINVAL;
}
sfc_adapter_lock(sa);
- efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
+ rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ goto fail_rx_hf_rte_to_efx;
rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- efx_hash_types, B_TRUE);
+ rss->hash_alg, efx_hash_types, B_TRUE);
if (rc != 0)
goto fail_scale_mode_set;
@@ -1433,15 +1420,15 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
rc = efx_rx_scale_key_set(sa->nic,
EFX_RSS_CONTEXT_DEFAULT,
rss_conf->rss_key,
- sizeof(sa->rss_key));
+ sizeof(rss->key));
if (rc != 0)
goto fail_scale_key_set;
}
- rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
+ rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
}
- sa->rss_hash_types = efx_hash_types;
+ rss->hash_types = efx_hash_types;
sfc_adapter_unlock(sa);
@@ -1450,10 +1437,11 @@ sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
fail_scale_key_set:
if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
EFX_RX_HASHALG_TOEPLITZ,
- sa->rss_hash_types, B_TRUE) != 0)
+ rss->hash_types, B_TRUE) != 0)
sfc_err(sa, "failed to restore RSS mode");
fail_scale_mode_set:
+fail_rx_hf_rte_to_efx:
sfc_adapter_unlock(sa);
return -rc;
}
@@ -1464,13 +1452,14 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
int entry;
- if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
return -ENOTSUP;
- if (sa->rss_channels == 0)
+ if (rss->channels == 0)
return -EINVAL;
if (reta_size != EFX_RSS_TBL_SIZE)
@@ -1483,7 +1472,7 @@ sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
if ((reta_conf[grp].mask >> grp_idx) & 1)
- reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
+ reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
}
sfc_adapter_unlock(sa);
@@ -1497,6 +1486,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
uint16_t reta_size)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rss *rss = &sa->rss;
struct sfc_port *port = &sa->port;
unsigned int *rss_tbl_new;
uint16_t entry;
@@ -1506,12 +1496,12 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
if (port->isolated)
return -ENOTSUP;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
sfc_err(sa, "RSS is not available");
return -ENOTSUP;
}
- if (sa->rss_channels == 0) {
+ if (rss->channels == 0) {
sfc_err(sa, "RSS is not configured");
return -EINVAL;
}
@@ -1522,13 +1512,13 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
return -EINVAL;
}
- rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
if (rss_tbl_new == NULL)
return -ENOMEM;
sfc_adapter_lock(sa);
- rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
+ rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
for (entry = 0; entry < reta_size; entry++) {
int grp_idx = entry % RTE_RETA_GROUP_SIZE;
@@ -1537,7 +1527,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
if (grp->mask & (1ull << grp_idx)) {
- if (grp->reta[grp_idx] >= sa->rss_channels) {
+ if (grp->reta[grp_idx] >= rss->channels) {
rc = EINVAL;
goto bad_reta_entry;
}
@@ -1552,7 +1542,7 @@ sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
goto fail_scale_tbl_set;
}
- rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+ rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
fail_scale_tbl_set:
bad_reta_entry:
@@ -1563,7 +1553,6 @@ bad_reta_entry:
SFC_ASSERT(rc >= 0);
return -rc;
}
-#endif
static int
sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
@@ -1621,6 +1610,21 @@ sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
return -rc;
}
+static int
+sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ /*
+ * If Rx datapath does not provide callback to check mempool,
+ * all pools are supported.
+ */
+ if (sa->dp_rx->pool_ops_supported == NULL)
+ return 1;
+
+ return sa->dp_rx->pool_ops_supported(pool);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
@@ -1658,12 +1662,10 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.mac_addr_set = sfc_mac_addr_set,
.udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
.udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
-#if EFSYS_OPT_RX_SCALE
.reta_update = sfc_dev_rss_reta_update,
.reta_query = sfc_dev_rss_reta_query,
.rss_hash_update = sfc_dev_rss_hash_update,
.rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
-#endif
.filter_ctrl = sfc_dev_filter_ctrl,
.set_mc_addr_list = sfc_set_mc_addr_list,
.rxq_info_get = sfc_rx_queue_info_get,
@@ -1671,6 +1673,7 @@ static const struct eth_dev_ops sfc_eth_dev_ops = {
.fw_version_get = sfc_fw_version_get,
.xstats_get_by_id = sfc_xstats_get_by_id,
.xstats_get_names_by_id = sfc_xstats_get_names_by_id,
+ .pool_ops_supported = sfc_pool_ops_supported,
};
/**
@@ -1700,6 +1703,7 @@ static int
sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
{
struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp;
unsigned int avail_caps = 0;
const char *rx_name = NULL;
const char *tx_name = NULL;
@@ -1708,12 +1712,17 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
switch (sa->family) {
case EFX_FAMILY_HUNTINGTON:
case EFX_FAMILY_MEDFORD:
+ case EFX_FAMILY_MEDFORD2:
avail_caps |= SFC_DP_HW_FW_CAP_EF10;
break;
default:
break;
}
+ encp = efx_nic_cfg_get(sa->nic);
+ if (encp->enc_rx_es_super_buffer_supported)
+ avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
+
rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
sfc_kvarg_string_handler, &rx_name);
if (rc != 0)
@@ -1749,7 +1758,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
goto fail_dp_rx_name;
}
- sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
+ sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name);
dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
@@ -1788,7 +1797,7 @@ sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
goto fail_dp_tx_name;
}
- sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
+ sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name);
dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
@@ -1903,6 +1912,7 @@ sfc_register_dp(void)
/* Register once */
if (TAILQ_EMPTY(&sfc_dp_head)) {
/* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
@@ -1935,15 +1945,13 @@ sfc_eth_dev_init(struct rte_eth_dev *dev)
/* Copy PCI device info to the dev->data */
rte_eth_copy_pci_info(dev, pci_dev);
+ sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
+ RTE_LOG_NOTICE);
+
rc = sfc_kvargs_parse(sa);
if (rc != 0)
goto fail_kvargs_parse;
- rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
- sfc_kvarg_bool_handler, &sa->debug_init);
- if (rc != 0)
- goto fail_kvarg_debug_init;
-
sfc_log_init(sa, "entry");
dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
@@ -1997,7 +2005,6 @@ fail_probe:
dev->data->mac_addrs = NULL;
fail_mac_addrs:
-fail_kvarg_debug_init:
sfc_kvargs_cleanup(sa);
fail_kvargs_parse:
@@ -2048,6 +2055,8 @@ static const struct rte_pci_id pci_id_sfc_efx_map[] = {
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
{ .vendor_id = 0 /* sentinel */ }
};
@@ -2079,6 +2088,15 @@ RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
- SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
- SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
- SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
+ SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
+
+RTE_INIT(sfc_driver_register_logtype)
+{
+ int ret;
+
+ ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
+ RTE_LOG_NOTICE);
+ sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
+}
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index 7abe61ae..f93d30e5 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -163,6 +163,35 @@ sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
}
static boolean_t
+sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
+ uint32_t pkt_count, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
+ evq->evq_index, label, id, pkt_count, flags);
+ return B_TRUE;
+}
+
+/* It is not actually used on datapath, but required on RxQ flush */
+static boolean_t
+sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ if (evq->sa->dp_rx->qrx_ps_ev != NULL)
+ return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id);
+ else
+ return B_FALSE;
+}
+
+static boolean_t
sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
{
struct sfc_evq *evq = arg;
@@ -382,27 +411,11 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
{
struct sfc_evq *evq = arg;
struct sfc_adapter *sa = evq->sa;
- struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
struct rte_eth_link new_link;
- uint64_t new_link_u64;
- uint64_t old_link_u64;
-
- EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
sfc_port_link_mode_to_info(link_mode, &new_link);
-
- new_link_u64 = *(uint64_t *)&new_link;
- do {
- old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
- if (old_link_u64 == new_link_u64)
- break;
-
- if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
- old_link_u64, new_link_u64)) {
- evq->sa->port.lsc_seq++;
- break;
- }
- } while (B_TRUE);
+ if (rte_eth_linkstatus_set(sa->eth_dev, &new_link))
+ evq->sa->port.lsc_seq++;
return B_FALSE;
}
@@ -410,6 +423,7 @@ sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
static const efx_ev_callbacks_t sfc_ev_callbacks = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -425,6 +439,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks = {
static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_efx_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
@@ -440,6 +455,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_dp_rx,
+ .eec_rx_ps = sfc_ev_dp_rx_ps,
.eec_tx = sfc_ev_nop_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_rxq_flush_done,
@@ -455,6 +471,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -470,6 +487,7 @@ static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
.eec_initialized = sfc_ev_initialized,
.eec_rx = sfc_ev_nop_rx,
+ .eec_rx_ps = sfc_ev_nop_rx_ps,
.eec_tx = sfc_ev_dp_tx,
.eec_exception = sfc_ev_exception,
.eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
@@ -837,7 +855,7 @@ static int
sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
const char *value_str, void *opaque)
{
- uint64_t *value = opaque;
+ uint32_t *value = opaque;
if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c
index 77e2ea56..6ff380a3 100644
--- a/drivers/net/sfc/sfc_filter.c
+++ b/drivers/net/sfc/sfc_filter.c
@@ -75,6 +75,7 @@ int
sfc_filter_attach(struct sfc_adapter *sa)
{
int rc;
+ unsigned int i;
sfc_log_init(sa, "entry");
@@ -88,6 +89,19 @@ sfc_filter_attach(struct sfc_adapter *sa)
efx_filter_fini(sa->nic);
+ sa->filter.supports_ip_proto_or_addr_filter = B_FALSE;
+ sa->filter.supports_rem_or_local_port_filter = B_FALSE;
+ for (i = 0; i < sa->filter.supported_match_num; ++i) {
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_IP_PROTO | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_HOST))
+ sa->filter.supports_ip_proto_or_addr_filter = B_TRUE;
+
+ if (sa->filter.supported_match[i] &
+ (EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))
+ sa->filter.supports_rem_or_local_port_filter = B_TRUE;
+ }
+
sfc_log_init(sa, "done");
return 0;
diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h
index d3e1c2f9..64ab114e 100644
--- a/drivers/net/sfc/sfc_filter.h
+++ b/drivers/net/sfc/sfc_filter.h
@@ -25,6 +25,16 @@ struct sfc_filter {
uint32_t *supported_match;
/** List of flow rules */
struct sfc_flow_list flow_list;
+ /**
+ * Supports any of ip_proto, remote host or local host
+ * filters. This flag is used for filter match exceptions
+ */
+ boolean_t supports_ip_proto_or_addr_filter;
+ /**
+ * Supports any of remote port or local port filters.
+ * This flag is used for filter match exceptions
+ */
+ boolean_t supports_rem_or_local_port_filter;
};
struct sfc_adapter;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 93cdf8f4..371648b0 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -7,6 +7,7 @@
* for Solarflare) and Solarflare Communications, Inc.
*/
+#include <rte_byteorder.h>
#include <rte_tailq.h>
#include <rte_common.h>
#include <rte_ethdev_driver.h>
@@ -22,13 +23,17 @@
#include "sfc_filter.h"
#include "sfc_flow.h"
#include "sfc_log.h"
+#include "sfc_dp_rx.h"
/*
* At now flow API is implemented in such a manner that each
- * flow rule is converted to a hardware filter.
+ * flow rule is converted to one or more hardware filters.
* All elements of flow rule (attributes, pattern items, actions)
* correspond to one or more fields in the efx_filter_spec_s structure
* that is responsible for the hardware filter.
+ * If some required field is unset in the flow rule, then a handful
+ * of filter copies will be created to cover all possible values
+ * of such a field.
*/
enum sfc_flow_item_layers {
@@ -57,6 +62,39 @@ static sfc_flow_item_parse sfc_flow_parse_ipv4;
static sfc_flow_item_parse sfc_flow_parse_ipv6;
static sfc_flow_item_parse sfc_flow_parse_tcp;
static sfc_flow_item_parse sfc_flow_parse_udp;
+static sfc_flow_item_parse sfc_flow_parse_vxlan;
+static sfc_flow_item_parse sfc_flow_parse_geneve;
+static sfc_flow_item_parse sfc_flow_parse_nvgre;
+
+typedef int (sfc_flow_spec_set_vals)(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error);
+
+typedef boolean_t (sfc_flow_spec_check)(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter);
+
+struct sfc_flow_copy_flag {
+ /* EFX filter specification match flag */
+ efx_filter_match_flags_t flag;
+ /* Number of values of corresponding field */
+ unsigned int vals_count;
+ /* Function to set values in specifications */
+ sfc_flow_spec_set_vals *set_vals;
+ /*
+ * Function to check that the specification is suitable
+ * for adding this match flag
+ */
+ sfc_flow_spec_check *spec_check;
+};
+
+static sfc_flow_spec_set_vals sfc_flow_set_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
+static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
+static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
+static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
static boolean_t
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
@@ -85,7 +123,6 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
const uint8_t *spec;
const uint8_t *mask;
const uint8_t *last;
- uint8_t match;
uint8_t supp;
unsigned int i;
@@ -115,13 +152,13 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
return -rte_errno;
}
- mask = (const uint8_t *)def_mask;
+ mask = def_mask;
} else {
- mask = (const uint8_t *)item->mask;
+ mask = item->mask;
}
- spec = (const uint8_t *)item->spec;
- last = (const uint8_t *)item->last;
+ spec = item->spec;
+ last = item->last;
if (spec == NULL)
goto exit;
@@ -146,12 +183,11 @@ sfc_flow_parse_init(const struct rte_flow_item *item,
return -rte_errno;
}
- /* Check that mask and spec not asks for more match than supp_mask */
+ /* Check that mask does not ask for more match than supp_mask */
for (i = 0; i < size; i++) {
- match = spec[i] | mask[i];
supp = ((const uint8_t *)supp_mask)[i];
- if ((match | supp) != supp) {
+ if (~supp & mask[i]) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Item's field is not supported");
@@ -184,11 +220,11 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
* Convert Ethernet item to EFX filter specification.
*
* @param item[in]
- * Item specification. Only source and destination addresses and
- * Ethernet type fields are supported. In addition to full and
- * empty masks of destination address, individual/group mask is
- * also supported. If the mask is NULL, default mask will be used.
- * Ranging is not supported.
+ * Item specification. Outer frame specification may only comprise
+ * source/destination addresses and Ethertype field.
+ * Inner frame specification may contain destination address only.
+ * There is support for individual/group mask as well as for empty and full.
+ * If the mask is NULL, default mask will be used. Ranging is not supported.
* @param efx_spec[in, out]
* EFX filter specification to update.
* @param[out] error
@@ -207,15 +243,32 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.type = 0xffff,
};
+ const struct rte_flow_item_eth ifrm_supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00
};
+ const struct rte_flow_item_eth *supp_mask_p;
+ const struct rte_flow_item_eth *def_mask_p;
+ uint8_t *loc_mac = NULL;
+ boolean_t is_ifrm = (efx_spec->efs_encap_type !=
+ EFX_TUNNEL_PROTOCOL_NONE);
+
+ if (is_ifrm) {
+ supp_mask_p = &ifrm_supp_mask;
+ def_mask_p = &ifrm_supp_mask;
+ loc_mac = efx_spec->efs_ifrm_loc_mac;
+ } else {
+ supp_mask_p = &supp_mask;
+ def_mask_p = &rte_flow_item_eth_mask;
+ loc_mac = efx_spec->efs_loc_mac;
+ }
rc = sfc_flow_parse_init(item,
(const void **)&spec,
(const void **)&mask,
- &supp_mask,
- &rte_flow_item_eth_mask,
+ supp_mask_p, def_mask_p,
sizeof(struct rte_flow_item_eth),
error);
if (rc != 0)
@@ -226,21 +279,30 @@ sfc_flow_parse_eth(const struct rte_flow_item *item,
return 0;
if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
- efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
- rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_LOC_MAC :
+ EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(loc_mac, spec->dst.addr_bytes,
EFX_MAC_ADDR_LEN);
} else if (memcmp(mask->dst.addr_bytes, ig_mask,
EFX_MAC_ADDR_LEN) == 0) {
if (is_unicast_ether_addr(&spec->dst))
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
else
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
} else if (!is_zero_ether_addr(&mask->dst)) {
goto fail_bad_mask;
}
+ /*
+ * ifrm_supp_mask ensures that the source address and
+ * ethertype masks are equal to zero in inner frame,
+ * so these fields are filled in only for the outer frame
+ */
if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
@@ -291,6 +353,7 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
.tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .inner_type = RTE_BE16(0xffff),
};
rc = sfc_flow_parse_init(item,
@@ -310,7 +373,8 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
* the outer tag and the next matches the inner tag.
*/
if (mask->tci == supp_mask.tci) {
- vid = rte_bswap16(spec->tci);
+ /* Apply mask to keep VID only */
+ vid = rte_bswap16(spec->tci & mask->tci);
if (!(efx_spec->efs_match_flags &
EFX_FILTER_MATCH_OUTER_VID)) {
@@ -333,6 +397,22 @@ sfc_flow_parse_vlan(const struct rte_flow_item *item,
return -rte_errno;
}
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN TPID matching is not supported");
+ return -rte_errno;
+ }
+ if (mask->inner_type == supp_mask.inner_type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->inner_type);
+ } else if (mask->inner_type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask for VLAN inner_type");
+ return -rte_errno;
+ }
+
return 0;
}
@@ -696,6 +776,253 @@ fail_bad_mask:
return -rte_errno;
}
+/*
+ * Filters for encapsulated packets match based on the EtherType and IP
+ * protocol in the outer frame.
+ */
+static int
+sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ uint8_t ip_proto,
+ struct rte_flow_error *error)
+{
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = ip_proto;
+ } else if (efx_spec->efs_ip_proto != ip_proto) {
+ switch (ip_proto) {
+ case EFX_IPPROTO_UDP:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be UDP "
+ "in VxLAN/GENEVE pattern");
+ return -rte_errno;
+
+ case EFX_IPPROTO_GRE:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be GRE "
+ "in NVGRE pattern");
+ return -rte_errno;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Only VxLAN/GENEVE/NVGRE tunneling patterns "
+ "are supported");
+ return -rte_errno;
+ }
+ }
+
+ if (efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer frame EtherType in pattern with tunneling "
+ "must be IPv4 or IPv6");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
+ const uint8_t *vni_or_vsid_val,
+ const uint8_t *vni_or_vsid_mask,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
+ 0xff, 0xff, 0xff
+ };
+
+ if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
+ EFX_VNI_OR_VSID_LEN) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
+ EFX_VNI_OR_VSID_LEN);
+ } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported VNI/VSID mask");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VXLAN network identifier field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vxlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_vxlan *spec = NULL;
+ const struct rte_flow_item_vxlan *mask = NULL;
+ const struct rte_flow_item_vxlan supp_mask = {
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert GENEVE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only Virtual Network Identifier and protocol type
+ * fields are supported. But protocol type can be only Ethernet (0x6558).
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_geneve(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_geneve *spec = NULL;
+ const struct rte_flow_item_geneve *mask = NULL;
+ const struct rte_flow_item_geneve supp_mask = {
+ .protocol = RTE_BE16(0xffff),
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_geneve_mask,
+ sizeof(struct rte_flow_item_geneve),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ if (mask->protocol == supp_mask.protocol) {
+ if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GENEVE encap. protocol must be Ethernet "
+ "(0x6558) in the GENEVE pattern item");
+ return -rte_errno;
+ }
+ } else if (mask->protocol != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported mask for GENEVE encap. protocol");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert NVGRE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only virtual subnet ID field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_nvgre(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_nvgre *spec = NULL;
+ const struct rte_flow_item_nvgre *mask = NULL;
+ const struct rte_flow_item_nvgre supp_mask = {
+ .tni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_GRE, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
+ mask->tni, item, error);
+
+ return rc;
+}
+
static const struct sfc_flow_item sfc_flow_items[] = {
{
.type = RTE_FLOW_ITEM_TYPE_VOID,
@@ -739,6 +1066,24 @@ static const struct sfc_flow_item sfc_flow_items[] = {
.layer = SFC_FLOW_ITEM_L4,
.parse = sfc_flow_parse_udp,
},
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_vxlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_geneve,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_nvgre,
+ },
};
/*
@@ -773,6 +1118,12 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
"Egress is not supported");
return -rte_errno;
}
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
if (attr->ingress == 0) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
@@ -780,8 +1131,8 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr,
return -rte_errno;
}
- flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
- flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
return 0;
}
@@ -806,6 +1157,7 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
{
int rc;
unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ boolean_t is_ifrm = B_FALSE;
const struct sfc_flow_item *item;
if (pattern == NULL) {
@@ -837,7 +1189,41 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
return -rte_errno;
}
- rc = item->parse(pattern, &flow->spec, error);
+ /*
+ * Allow only VOID and ETH pattern items in the inner frame.
+ * Also check that there is only one tunneling protocol.
+ */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "More than one tunneling protocol");
+ return -rte_errno;
+ }
+ is_ifrm = B_TRUE;
+ break;
+
+ default:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "There is an unsupported pattern item "
+ "in the inner frame");
+ return -rte_errno;
+ }
+ break;
+ }
+
+ rc = item->parse(pattern, &flow->spec.template, error);
if (rc != 0)
return rc;
@@ -859,28 +1245,27 @@ sfc_flow_parse_queue(struct sfc_adapter *sa,
return -EINVAL;
rxq = sa->rxq_info[queue->index].rxq;
- flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+ flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
return 0;
}
-#if EFSYS_OPT_RX_SCALE
static int
sfc_flow_parse_rss(struct sfc_adapter *sa,
- const struct rte_flow_action_rss *rss,
+ const struct rte_flow_action_rss *action_rss,
struct rte_flow *flow)
{
+ struct sfc_rss *rss = &sa->rss;
unsigned int rxq_sw_index;
struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max;
- const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
- uint64_t rss_hf;
- uint8_t *rss_key = NULL;
+ efx_rx_hash_type_t efx_hash_types;
+ const uint8_t *rss_key;
struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
unsigned int i;
- if (rss->num == 0)
+ if (action_rss->queue_num == 0)
return -EINVAL;
rxq_sw_index = sa->rxq_count - 1;
@@ -888,8 +1273,8 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
- for (i = 0; i < rss->num; ++i) {
- rxq_sw_index = rss->queue[i];
+ for (i = 0; i < action_rss->queue_num; ++i) {
+ rxq_sw_index = action_rss->queue[i];
if (rxq_sw_index >= sa->rxq_count)
return -EINVAL;
@@ -903,28 +1288,62 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
rxq_hw_index_max = rxq->hw_index;
}
- rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
- if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
+ switch (action_rss->func) {
+ case RTE_ETH_HASH_FUNCTION_DEFAULT:
+ case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (action_rss->level)
return -EINVAL;
- if (rss_conf != NULL) {
- if (rss_conf->rss_key_len != sizeof(sa->rss_key))
+ /*
+ * Dummy RSS action with only one queue and no specific settings
+ * for hash types and key does not require dedicated RSS context
+ * and may be simplified to single queue action.
+ */
+ if (action_rss->queue_num == 1 && action_rss->types == 0 &&
+ action_rss->key_len == 0) {
+ flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
+ return 0;
+ }
+
+ if (action_rss->types) {
+ int rc;
+
+ rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
+ &efx_hash_types);
+ if (rc != 0)
+ return -rc;
+ } else {
+ unsigned int i;
+
+ efx_hash_types = 0;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i)
+ efx_hash_types |= rss->hf_map[i].efx;
+ }
+
+ if (action_rss->key_len) {
+ if (action_rss->key_len != sizeof(rss->key))
return -EINVAL;
- rss_key = rss_conf->rss_key;
+ rss_key = action_rss->key;
} else {
- rss_key = sa->rss_key;
+ rss_key = rss->key;
}
flow->rss = B_TRUE;
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
- sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
- rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
+ sfc_rss_conf->rss_hash_types = efx_hash_types;
+ rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
- unsigned int rxq_sw_index = rss->queue[i % rss->num];
+ unsigned int nb_queues = action_rss->queue_num;
+ unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
@@ -932,47 +1351,101 @@ sfc_flow_parse_rss(struct sfc_adapter *sa,
return 0;
}
-#endif /* EFSYS_OPT_RX_SCALE */
+
+static int
+sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
+ unsigned int filters_count)
+{
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < filters_count; i++) {
+ int rc;
+
+ rc = efx_filter_remove(sa->nic, &spec->filters[i]);
+ if (ret == 0 && rc != 0) {
+ sfc_err(sa, "failed to remove filter specification "
+ "(rc = %d)", rc);
+ ret = rc;
+ }
+ }
+
+ return ret;
+}
+
+static int
+sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < spec->count; i++) {
+ rc = efx_filter_insert(sa->nic, &spec->filters[i]);
+ if (rc != 0) {
+ sfc_flow_spec_flush(sa, spec, i);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
+{
+ return sfc_flow_spec_flush(sa, spec, spec->count);
+}
static int
sfc_flow_filter_insert(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- efx_filter_spec_t *spec = &flow->spec;
-
-#if EFSYS_OPT_RX_SCALE
- struct sfc_flow_rss *rss = &flow->rss_conf;
+ struct sfc_rss *rss = &sa->rss;
+ struct sfc_flow_rss *flow_rss = &flow->rss_conf;
+ uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ unsigned int i;
int rc = 0;
if (flow->rss) {
- unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
- rss->rxq_hw_index_min + 1,
+ unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
+ flow_rss->rxq_hw_index_min + 1,
EFX_MAXRSS);
rc = efx_rx_scale_context_alloc(sa->nic,
EFX_RX_SCALE_EXCLUSIVE,
rss_spread,
- &spec->efs_rss_context);
+ &efs_rss_context);
if (rc != 0)
goto fail_scale_context_alloc;
- rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
- EFX_RX_HASHALG_TOEPLITZ,
- rss->rss_hash_types, B_TRUE);
+ rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
+ rss->hash_alg,
+ flow_rss->rss_hash_types, B_TRUE);
if (rc != 0)
goto fail_scale_mode_set;
- rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
- rss->rss_key,
- sizeof(sa->rss_key));
+ rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
+ flow_rss->rss_key,
+ sizeof(rss->key));
if (rc != 0)
goto fail_scale_key_set;
- spec->efs_dmaq_id = rss->rxq_hw_index_min;
- spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ /*
+ * At this point, fully elaborated filter specifications
+ * have been produced from the template. To make sure that
+ * RSS behaviour is consistent between them, set the same
+ * RSS context value everywhere.
+ */
+ for (i = 0; i < flow->spec.count; i++) {
+ efx_filter_spec_t *spec = &flow->spec.filters[i];
+
+ spec->efs_rss_context = efs_rss_context;
+ spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+ spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ }
}
- rc = efx_filter_insert(sa->nic, spec);
+ rc = sfc_flow_spec_insert(sa, &flow->spec);
if (rc != 0)
goto fail_filter_insert;
@@ -985,8 +1458,9 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
* the HW knows all the information needed to verify
* the table entries, and the operation will succeed
*/
- rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
- rss->rss_tbl, RTE_DIM(rss->rss_tbl));
+ rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
+ flow_rss->rss_tbl,
+ RTE_DIM(flow_rss->rss_tbl));
if (rc != 0)
goto fail_scale_tbl_set;
}
@@ -994,48 +1468,72 @@ sfc_flow_filter_insert(struct sfc_adapter *sa,
return 0;
fail_scale_tbl_set:
- efx_filter_remove(sa->nic, spec);
+ sfc_flow_spec_remove(sa, &flow->spec);
fail_filter_insert:
fail_scale_key_set:
fail_scale_mode_set:
- if (flow->rss)
- efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+ if (efs_rss_context != EFX_RSS_CONTEXT_DEFAULT)
+ efx_rx_scale_context_free(sa->nic, efs_rss_context);
fail_scale_context_alloc:
return rc;
-#else /* !EFSYS_OPT_RX_SCALE */
- return efx_filter_insert(sa->nic, spec);
-#endif /* EFSYS_OPT_RX_SCALE */
}
static int
sfc_flow_filter_remove(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- efx_filter_spec_t *spec = &flow->spec;
int rc = 0;
- rc = efx_filter_remove(sa->nic, spec);
+ rc = sfc_flow_spec_remove(sa, &flow->spec);
if (rc != 0)
return rc;
-#if EFSYS_OPT_RX_SCALE
- if (flow->rss)
+ if (flow->rss) {
+ /*
+ * All specifications for a given flow rule have the same RSS
+ * context, so that RSS context value is taken from the first
+ * filter specification
+ */
+ efx_filter_spec_t *spec = &flow->spec.filters[0];
+
rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
-#endif /* EFSYS_OPT_RX_SCALE */
+ }
return rc;
}
static int
+sfc_flow_parse_mark(struct sfc_adapter *sa,
+ const struct rte_flow_action_mark *mark,
+ struct rte_flow *flow)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
+ return EINVAL;
+
+ flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
+ flow->spec.template.efs_mark = mark->id;
+
+ return 0;
+}
+
+static int
sfc_flow_parse_actions(struct sfc_adapter *sa,
const struct rte_flow_action actions[],
struct rte_flow *flow,
struct rte_flow_error *error)
{
int rc;
- boolean_t is_specified = B_FALSE;
+ const unsigned int dp_rx_features = sa->dp_rx->features;
+ uint32_t actions_set = 0;
+ const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
+ (1UL << RTE_FLOW_ACTION_TYPE_RSS) |
+ (1UL << RTE_FLOW_ACTION_TYPE_DROP);
+ const uint32_t mark_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_MARK) |
+ (1UL << RTE_FLOW_ACTION_TYPE_FLAG);
if (actions == NULL) {
rte_flow_error_set(error, EINVAL,
@@ -1044,12 +1542,22 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
return -rte_errno;
}
+#define SFC_BUILD_SET_OVERFLOW(_action, _set) \
+ RTE_BUILD_BUG_ON(_action >= sizeof(_set) * CHAR_BIT)
+
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VOID,
+ actions_set);
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_QUEUE,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
rc = sfc_flow_parse_queue(sa, actions->conf, flow);
if (rc != 0) {
rte_flow_error_set(error, EINVAL,
@@ -1057,23 +1565,71 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
"Bad QUEUE action");
return -rte_errno;
}
-
- is_specified = B_TRUE;
break;
-#if EFSYS_OPT_RX_SCALE
case RTE_FLOW_ACTION_TYPE_RSS:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_RSS,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
+
rc = sfc_flow_parse_rss(sa, actions->conf, flow);
if (rc != 0) {
- rte_flow_error_set(error, rc,
+ rte_flow_error_set(error, -rc,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"Bad RSS action");
return -rte_errno;
}
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_DROP,
+ actions_set);
+ if ((actions_set & fate_actions_mask) != 0)
+ goto fail_fate_actions;
- is_specified = B_TRUE;
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_FLAG) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "FLAG action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
+
+ flow->spec.template.efs_flags |=
+ EFX_FILTER_FLAG_ACTION_FLAG;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
+ actions_set);
+ if ((actions_set & mark_actions_mask) != 0)
+ goto fail_actions_overlap;
+
+ if ((dp_rx_features & SFC_DP_RX_FEAT_FLOW_MARK) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "MARK action is not supported on the current Rx datapath");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_parse_mark(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad MARK action");
+ return -rte_errno;
+ }
break;
-#endif /* EFSYS_OPT_RX_SCALE */
default:
rte_flow_error_set(error, ENOTSUP,
@@ -1081,12 +1637,607 @@ sfc_flow_parse_actions(struct sfc_adapter *sa,
"Action is not supported");
return -rte_errno;
}
+
+ actions_set |= (1UL << actions->type);
+ }
+#undef SFC_BUILD_SET_OVERFLOW
+
+ /* When fate is unknown, drop traffic. */
+ if ((actions_set & fate_actions_mask) == 0) {
+ flow->spec.template.efs_dmaq_id =
+ EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
}
- if (!is_specified) {
+ return 0;
+
+fail_fate_actions:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Cannot combine several fate-deciding actions, "
+ "choose between QUEUE, RSS or DROP");
+ return -rte_errno;
+
+fail_actions_overlap:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Overlapping actions are not supported");
+ return -rte_errno;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_UNKNOWN_UCAST_DST
+ * and EFX_FILTER_MATCH_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
- "Action is unspecified");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_unknown_dst_flags(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_ETHER_TYPE match flag and EFX_ETHER_TYPE_IPV4 and
+ * EFX_ETHER_TYPE_IPV6 values of the corresponding field in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same EtherType value, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ethertypes(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const uint16_t vals[] = {
+ EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by Ethertype");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE;
+
+ /*
+ * The check above ensures that
+ * filters_count_for_one_val is not 0
+ */
+ spec->filters[i].efs_ether_type =
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
+ * in the same specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ if (filters_count_for_one_val != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by outer VLAN ID");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ spec->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_OUTER_VID;
+
+ spec->filters[i].efs_outer_vid = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
+ * specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_ifrm_unknown_dst_flags(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ static const efx_filter_match_flags_t vals[] = {
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
+ };
+
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect while copying "
+ "by inner frame unknown destination flags");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec->count; i++) {
+ /* The check above ensures that divisor can't be zero here */
+ spec->filters[i].efs_match_flags |=
+ vals[i / filters_count_for_one_val];
+ }
+
+ return 0;
+}
+
+/**
+ * Check that the following conditions are met:
+ * - the specification corresponds to a filter for encapsulated traffic
+ * - the list of supported filters has a filter
+ * with EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST flag instead of
+ * EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST, since this filter will also
+ * be inserted.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_ifrm_unknown_dst_flags(efx_filter_match_flags_t match,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_tunnel_protocol_t encap_type = spec->efs_encap_type;
+ efx_filter_match_flags_t match_mcast_dst;
+
+ if (encap_type == EFX_TUNNEL_PROTOCOL_NONE)
+ return B_FALSE;
+
+ match_mcast_dst =
+ (match & ~EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST) |
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_mcast_dst == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+/**
+ * Check that the list of supported filters has a filter that differs
+ * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
+ * in this case that filter will be used and the flag
+ * EFX_FILTER_MATCH_OUTER_VID is not needed.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_without_vid =
+ match & ~EFX_FILTER_MATCH_OUTER_VID;
+
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_without_vid == filter->supported_match[i])
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
+/*
+ * Match flags that can be automatically added to filters.
+ * Selecting the last minimum when searching for the copy flag ensures that the
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST flag has a higher priority than
+ * EFX_FILTER_MATCH_ETHER_TYPE. This is because the filter
+ * EFX_FILTER_MATCH_UNKNOWN_UCAST_DST is at the end of the list of supported
+ * filters.
+ */
+static const struct sfc_flow_copy_flag sfc_flow_copy_flags[] = {
+ {
+ .flag = EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_unknown_dst_flags,
+ .spec_check = sfc_flow_check_unknown_dst_flags,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_ETHER_TYPE,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ethertypes,
+ .spec_check = NULL,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
+ .vals_count = 2,
+ .set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
+ .spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
+ },
+ {
+ .flag = EFX_FILTER_MATCH_OUTER_VID,
+ .vals_count = 1,
+ .set_vals = sfc_flow_set_outer_vid_flag,
+ .spec_check = sfc_flow_check_outer_vid_flag,
+ },
+};
+
+/* Get item from array sfc_flow_copy_flags */
+static const struct sfc_flow_copy_flag *
+sfc_flow_get_copy_flag(efx_filter_match_flags_t flag)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ if (sfc_flow_copy_flags[i].flag == flag)
+ return &sfc_flow_copy_flags[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Make copies of the specifications, set match flag and values
+ * of the field that corresponds to it.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param flag[in]
+ * The match flag to add.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_add_match_flag(struct sfc_flow_spec *spec,
+ efx_filter_match_flags_t flag,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+ unsigned int new_filters_count;
+ unsigned int filters_count_for_one_val;
+ const struct sfc_flow_copy_flag *copy_flag;
+ int rc;
+
+ copy_flag = sfc_flow_get_copy_flag(flag);
+ if (copy_flag == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Unsupported spec field for copying");
+ return -rte_errno;
+ }
+
+ new_filters_count = spec->count * copy_flag->vals_count;
+ if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Too much EFX specifications in the flow rule");
+ return -rte_errno;
+ }
+
+ /* Copy filters specifications */
+ for (i = spec->count; i < new_filters_count; i++)
+ spec->filters[i] = spec->filters[i - spec->count];
+
+ filters_count_for_one_val = spec->count;
+ spec->count = new_filters_count;
+
+ rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+/**
+ * Check that the given set of match flags missing in the original filter spec
+ * could be covered by adding spec copies which specify the corresponding
+ * flags and packet field values to match.
+ *
+ * @param miss_flags[in]
+ * Flags that are missing until the supported filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter.
+ *
+ * @return
+ * Number of specifications after copy or 0, if the flags can not be added.
+ */
+static unsigned int
+sfc_flow_check_missing_flags(efx_filter_match_flags_t miss_flags,
+ efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t copy_flags = 0;
+ efx_filter_match_flags_t flag;
+ efx_filter_match_flags_t match = spec->efs_match_flags | miss_flags;
+ sfc_flow_spec_check *check;
+ unsigned int multiplier = 1;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ flag = sfc_flow_copy_flags[i].flag;
+ check = sfc_flow_copy_flags[i].spec_check;
+ if ((flag & miss_flags) == flag) {
+ if (check != NULL && (!check(match, spec, filter)))
+ continue;
+
+ copy_flags |= flag;
+ multiplier *= sfc_flow_copy_flags[i].vals_count;
+ }
+ }
+
+ if (copy_flags == miss_flags)
+ return multiplier;
+
+ return 0;
+}
+
+/**
+ * Attempt to supplement the specification template to the minimally
+ * supported set of match flags. To do this, it is necessary to copy
+ * the specifications, filling them with the values of fields that
+ * correspond to the missing flags.
+ * The necessary and sufficient filter is built from the fewest number
+ * of copies which could be made to cover the minimally required set
+ * of flags.
+ *
+ * @param sa[in]
+ * SFC adapter.
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_spec_filters_complete(struct sfc_adapter *sa,
+ struct sfc_flow_spec *spec,
+ struct rte_flow_error *error)
+{
+ struct sfc_filter *filter = &sa->filter;
+ efx_filter_match_flags_t miss_flags;
+ efx_filter_match_flags_t min_miss_flags = 0;
+ efx_filter_match_flags_t match;
+ unsigned int min_multiplier = UINT_MAX;
+ unsigned int multiplier;
+ unsigned int i;
+ int rc;
+
+ match = spec->template.efs_match_flags;
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if ((match & filter->supported_match[i]) == match) {
+ miss_flags = filter->supported_match[i] & (~match);
+ multiplier = sfc_flow_check_missing_flags(miss_flags,
+ &spec->template, filter);
+ if (multiplier > 0) {
+ if (multiplier <= min_multiplier) {
+ min_multiplier = multiplier;
+ min_miss_flags = miss_flags;
+ }
+ }
+ }
+ }
+
+ if (min_multiplier == UINT_MAX) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < RTE_DIM(sfc_flow_copy_flags); i++) {
+ efx_filter_match_flags_t flag = sfc_flow_copy_flags[i].flag;
+
+ if ((flag & min_miss_flags) == flag) {
+ rc = sfc_flow_spec_add_match_flag(spec, flag, error);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Check that set of match flags is referred to by a filter. Filter is
+ * described by match flags with the ability to add OUTER_VID and INNER_VID
+ * flags.
+ *
+ * @param match_flags[in]
+ * Set of match flags.
+ * @param flags_pattern[in]
+ * Pattern of filter match flags.
+ */
+static boolean_t
+sfc_flow_is_match_with_vids(efx_filter_match_flags_t match_flags,
+ efx_filter_match_flags_t flags_pattern)
+{
+ if ((match_flags & flags_pattern) != flags_pattern)
+ return B_FALSE;
+
+ switch (match_flags & ~flags_pattern) {
+ case 0:
+ case EFX_FILTER_MATCH_OUTER_VID:
+ case EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_INNER_VID:
+ return B_TRUE;
+ default:
+ return B_FALSE;
+ }
+}
+
+/**
+ * Check whether the spec maps to a hardware filter which is known to be
+ * ineffective despite being valid.
+ *
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ * @param spec[in]
+ * SFC flow specification.
+ */
+static boolean_t
+sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
+ struct sfc_flow_spec *spec)
+{
+ unsigned int i;
+ uint16_t ether_type;
+ uint8_t ip_proto;
+ efx_filter_match_flags_t match_flags;
+
+ for (i = 0; i < spec->count; i++) {
+ match_flags = spec->filters[i].efs_match_flags;
+
+ if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ether_type = spec->filters[i].efs_ether_type;
+ if (filter->supports_ip_proto_or_addr_filter &&
+ (ether_type == EFX_ETHER_TYPE_IPV4 ||
+ ether_type == EFX_ETHER_TYPE_IPV6))
+ return B_TRUE;
+ } else if (sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO) ||
+ sfc_flow_is_match_with_vids(match_flags,
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_MAC)) {
+ ip_proto = spec->filters[i].efs_ip_proto;
+ if (filter->supports_rem_or_local_port_filter &&
+ (ip_proto == EFX_IPPROTO_TCP ||
+ ip_proto == EFX_IPPROTO_UDP))
+ return B_TRUE;
+ }
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_flow_validate_match_flags(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ efx_filter_spec_t *spec_tmpl = &flow->spec.template;
+ efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
+ int rc;
+
+ /* Initialize the first filter spec with template */
+ flow->spec.filters[0] = *spec_tmpl;
+ flow->spec.count = 1;
+
+ if (!sfc_filter_is_match_supported(sa, match_flags)) {
+ rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+ }
+
+ if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "The flow rule pattern is unsupported");
return -rte_errno;
}
@@ -1116,12 +2267,11 @@ sfc_flow_parse(struct rte_eth_dev *dev,
if (rc != 0)
goto fail_bad_value;
- if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Flow rule pattern is not supported");
- return -rte_errno;
- }
+ rc = sfc_flow_validate_match_flags(sa, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ return 0;
fail_bad_value:
return rc;
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index 35472ad3..71ec18cb 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -19,7 +19,13 @@
extern "C" {
#endif
-#if EFSYS_OPT_RX_SCALE
+/*
+ * The maximum number of fully elaborated hardware filter specifications
+ * which can be produced from a template by means of multiplication, if
+ * missing match flags are needed to be taken into account
+ */
+#define SF_FLOW_SPEC_NB_FILTERS_MAX 8
+
/* RSS configuration storage */
struct sfc_flow_rss {
unsigned int rxq_hw_index_min;
@@ -28,15 +34,22 @@ struct sfc_flow_rss {
uint8_t rss_key[EFX_RSS_KEY_SIZE];
unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
};
-#endif /* EFSYS_OPT_RX_SCALE */
+
+/* Filter specification storage */
+struct sfc_flow_spec {
+ /* partial specification from flow rule */
+ efx_filter_spec_t template;
+ /* fully elaborated hardware filters specifications */
+ efx_filter_spec_t filters[SF_FLOW_SPEC_NB_FILTERS_MAX];
+ /* number of complete specifications */
+ unsigned int count;
+};
/* PMD-specific definition of the opaque type from rte_flow.h */
struct rte_flow {
- efx_filter_spec_t spec; /* filter specification */
-#if EFSYS_OPT_RX_SCALE
+ struct sfc_flow_spec spec; /* flow spec for hardware filter(s) */
boolean_t rss; /* RSS toggle */
struct sfc_flow_rss rss_conf; /* RSS configuration */
-#endif /* EFSYS_OPT_RX_SCALE */
TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
};
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
index d6c84927..fbdc7eea 100644
--- a/drivers/net/sfc/sfc_intr.c
+++ b/drivers/net/sfc/sfc_intr.c
@@ -86,7 +86,7 @@ sfc_intr_line_handler(void *cb_arg)
exit:
if (lsc_seq != sa->port.lsc_seq) {
- sfc_info(sa, "link status change event: link %s",
+ sfc_notice(sa, "link status change event: link %s",
sa->eth_dev->data->dev_link.link_status ?
"UP" : "DOWN");
_rte_eth_dev_callback_process(sa->eth_dev,
@@ -130,7 +130,7 @@ sfc_intr_message_handler(void *cb_arg)
exit:
if (lsc_seq != sa->port.lsc_seq) {
- sfc_info(sa, "link status change event");
+ sfc_notice(sa, "link status change event");
_rte_eth_dev_callback_process(sa->eth_dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
@@ -251,7 +251,7 @@ sfc_intr_configure(struct sfc_adapter *sa)
intr->handler = NULL;
intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
if (!intr->lsc_intr) {
- sfc_info(sa, "LSC tracking using interrupts is disabled");
+ sfc_notice(sa, "LSC tracking using interrupts is disabled");
goto done;
}
diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c
index 8f55e99b..7a89c769 100644
--- a/drivers/net/sfc/sfc_kvargs.c
+++ b/drivers/net/sfc/sfc_kvargs.c
@@ -23,11 +23,11 @@ sfc_kvargs_parse(struct sfc_adapter *sa)
struct rte_devargs *devargs = eth_dev->device->devargs;
const char **params = (const char *[]){
SFC_KVARG_STATS_UPDATE_PERIOD_MS,
- SFC_KVARG_DEBUG_INIT,
- SFC_KVARG_MCDI_LOGGING,
SFC_KVARG_PERF_PROFILE,
SFC_KVARG_RX_DATAPATH,
SFC_KVARG_TX_DATAPATH,
+ SFC_KVARG_FW_VARIANT,
+ SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
NULL,
};
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
index e7044ca6..4506667a 100644
--- a/drivers/net/sfc/sfc_kvargs.h
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -18,10 +18,6 @@ extern "C" {
#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
-#define SFC_KVARG_DEBUG_INIT "debug_init"
-
-#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
-
#define SFC_KVARG_PERF_PROFILE "perf_profile"
#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
@@ -37,11 +33,13 @@ extern "C" {
#define SFC_KVARG_DATAPATH_EFX "efx"
#define SFC_KVARG_DATAPATH_EF10 "ef10"
#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+#define SFC_KVARG_DATAPATH_EF10_ESSB "ef10_essb"
#define SFC_KVARG_RX_DATAPATH "rx_datapath"
#define SFC_KVARG_VALUES_RX_DATAPATH \
"[" SFC_KVARG_DATAPATH_EFX "|" \
- SFC_KVARG_DATAPATH_EF10 "]"
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_ESSB "]"
#define SFC_KVARG_TX_DATAPATH "tx_datapath"
#define SFC_KVARG_VALUES_TX_DATAPATH \
@@ -49,6 +47,22 @@ extern "C" {
SFC_KVARG_DATAPATH_EF10 "|" \
SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+#define SFC_KVARG_FW_VARIANT "fw_variant"
+
+#define SFC_KVARG_FW_VARIANT_DONT_CARE "dont-care"
+#define SFC_KVARG_FW_VARIANT_FULL_FEATURED "full-feature"
+#define SFC_KVARG_FW_VARIANT_LOW_LATENCY "ultra-low-latency"
+#define SFC_KVARG_FW_VARIANT_PACKED_STREAM "capture-packed-stream"
+#define SFC_KVARG_FW_VARIANT_DPDK "dpdk"
+#define SFC_KVARG_VALUES_FW_VARIANT \
+ "[" SFC_KVARG_FW_VARIANT_DONT_CARE "|" \
+ SFC_KVARG_FW_VARIANT_FULL_FEATURED "|" \
+ SFC_KVARG_FW_VARIANT_LOW_LATENCY "|" \
+ SFC_KVARG_FW_VARIANT_PACKED_STREAM "|" \
+ SFC_KVARG_FW_VARIANT_DPDK "]"
+
+#define SFC_KVARG_RXD_WAIT_TIMEOUT_NS "rxd_wait_timeout_ns"
+
struct sfc_adapter;
int sfc_kvargs_parse(struct sfc_adapter *sa);
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
index a18191ed..d6f34352 100644
--- a/drivers/net/sfc/sfc_log.h
+++ b/drivers/net/sfc/sfc_log.h
@@ -10,14 +10,35 @@
#ifndef _SFC_LOG_H_
#define _SFC_LOG_H_
+/** Generic driver log type */
+extern uint32_t sfc_logtype_driver;
+
+/** Common log type name prefix */
+#define SFC_LOGTYPE_PREFIX "pmd.net.sfc."
+
+/** Log PMD generic message, add a prefix and a line break */
+#define SFC_GENERIC_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, sfc_logtype_driver, \
+ RTE_FMT("PMD: " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
+ RTE_FMT_TAIL(__VA_ARGS__ ,)))
+
+/** Name prefix for the per-device log type used to report basic information */
+#define SFC_LOGTYPE_MAIN_STR SFC_LOGTYPE_PREFIX "main"
+
+/** Device MCDI log type name prefix */
+#define SFC_LOGTYPE_MCDI_STR SFC_LOGTYPE_PREFIX "mcdi"
+
+/** Level value used by MCDI log statements */
+#define SFC_LOG_LEVEL_MCDI RTE_LOG_INFO
+
/* Log PMD message, automatically add prefix and \n */
-#define SFC_LOG(sa, level, ...) \
+#define SFC_LOG(sa, level, type, ...) \
do { \
const struct sfc_adapter *__sa = (sa); \
\
- RTE_LOG(level, PMD, \
- RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
- RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ rte_log(level, type, \
+ RTE_FMT("PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu8 \
+ ": " RTE_FMT_HEAD(__VA_ARGS__ ,) "\n", \
__sa->pci_addr.domain, \
__sa->pci_addr.bus, \
__sa->pci_addr.devid, \
@@ -27,27 +48,55 @@
} while (0)
#define sfc_err(sa, ...) \
- SFC_LOG(sa, ERR, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_ERR, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_warn(sa, ...) \
- SFC_LOG(sa, WARNING, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_WARNING, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_notice(sa, ...) \
- SFC_LOG(sa, NOTICE, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_NOTICE, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_info(sa, ...) \
- SFC_LOG(sa, INFO, __VA_ARGS__)
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ __VA_ARGS__); \
+ } while (0)
#define sfc_log_init(sa, ...) \
do { \
const struct sfc_adapter *_sa = (sa); \
\
- if (_sa->debug_init) \
- SFC_LOG(_sa, INFO, \
- RTE_FMT("%s(): " \
- RTE_FMT_HEAD(__VA_ARGS__,), \
- __func__, \
- RTE_FMT_TAIL(__VA_ARGS__,))); \
+ SFC_LOG(_sa, RTE_LOG_INFO, _sa->logtype_main, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__ ,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__ ,))); \
} while (0)
+#define sfc_log_mcdi(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ SFC_LOG(_sa, SFC_LOG_LEVEL_MCDI, _sa->mcdi.logtype, \
+ __VA_ARGS__); \
+ } while (0)
+
+
#endif /* _SFC_LOG_H_ */
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
index 9d92b8c5..007506b4 100644
--- a/drivers/net/sfc/sfc_mcdi.c
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -15,7 +15,6 @@
#include "sfc.h"
#include "sfc_log.h"
-#include "sfc_kvargs.h"
#include "sfc_ev.h"
#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
@@ -176,7 +175,7 @@ sfc_mcdi_do_log(const struct sfc_adapter *sa,
* at the end which is required by netlogdecode.
*/
buffer[position] = '\0';
- sfc_info(sa, "%s \\", buffer);
+ sfc_log_mcdi(sa, "%s \\", buffer);
/* Preserve prefix for the next log message */
position = pfxsize;
}
@@ -198,10 +197,17 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type,
size_t pfxsize;
size_t start;
- if (!sa->mcdi.logging)
+ /*
+ * Unlike the other cases, MCDI logging implies more onerous work
+ * needed to produce a message. If the dynamic log level prevents
+ * the end result from being printed, the CPU time will be wasted.
+ *
+ * To avoid wasting time, the actual level is examined in advance.
+ */
+ if (rte_log_get_level(sa->mcdi.logtype) < (int)SFC_LOG_LEVEL_MCDI)
return;
- /* The format including prefix added by sfc_info() is the format
+ /* The format including prefix added by sfc_log_mcdi() is the format
* consumed by the Solarflare netlogdecode tool.
*/
pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
@@ -212,7 +218,7 @@ sfc_mcdi_logger(void *arg, efx_log_msg_t type,
start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
if (start != pfxsize) {
buffer[start] = '\0';
- sfc_info(sa, "%s", buffer);
+ sfc_log_mcdi(sa, "%s", buffer);
}
}
@@ -250,11 +256,8 @@ sfc_mcdi_init(struct sfc_adapter *sa)
if (rc != 0)
goto fail_dma_alloc;
- /* Convert negative error to positive used in the driver */
- rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
- sfc_kvarg_bool_handler, &mcdi->logging);
- if (rc != 0)
- goto fail_kvargs_process;
+ mcdi->logtype = sfc_register_logtype(sa, SFC_LOGTYPE_MCDI_STR,
+ RTE_LOG_NOTICE);
emtp = &mcdi->transport;
emtp->emt_context = sa;
@@ -274,8 +277,6 @@ sfc_mcdi_init(struct sfc_adapter *sa)
fail_mcdi_init:
memset(emtp, 0, sizeof(*emtp));
-
-fail_kvargs_process:
sfc_dma_free(sa, &mcdi->mem);
fail_dma_alloc:
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
index c423f527..5384dbbd 100644
--- a/drivers/net/sfc/sfc_port.c
+++ b/drivers/net/sfc/sfc_port.c
@@ -121,6 +121,28 @@ sfc_port_init_dev_link(struct sfc_adapter *sa)
return 0;
}
+#if EFSYS_OPT_LOOPBACK
+
+static efx_link_mode_t
+sfc_port_phy_caps_to_max_link_speed(uint32_t phy_caps)
+{
+ if (phy_caps & (1u << EFX_PHY_CAP_100000FDX))
+ return EFX_LINK_100000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_50000FDX))
+ return EFX_LINK_50000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_40000FDX))
+ return EFX_LINK_40000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_25000FDX))
+ return EFX_LINK_25000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_10000FDX))
+ return EFX_LINK_10000FDX;
+ if (phy_caps & (1u << EFX_PHY_CAP_1000FDX))
+ return EFX_LINK_1000FDX;
+ return EFX_LINK_UNKNOWN;
+}
+
+#endif
+
int
sfc_port_start(struct sfc_adapter *sa)
{
@@ -143,6 +165,21 @@ sfc_port_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_port_init;
+#if EFSYS_OPT_LOOPBACK
+ if (sa->eth_dev->data->dev_conf.lpbk_mode != 0) {
+ efx_link_mode_t link_mode;
+
+ link_mode =
+ sfc_port_phy_caps_to_max_link_speed(port->phy_adv_cap);
+ sfc_log_init(sa, "set loopback link_mode=%u type=%u", link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ rc = efx_port_loopback_set(sa->nic, link_mode,
+ sa->eth_dev->data->dev_conf.lpbk_mode);
+ if (rc != 0)
+ goto fail_loopback_set;
+ }
+#endif
+
sfc_log_init(sa, "set flow control to %#x autoneg=%u",
port->flow_ctrl, port->flow_ctrl_autoneg);
rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
@@ -155,6 +192,16 @@ sfc_port_start(struct sfc_adapter *sa)
SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+ /*
+ * No controls for FEC yet. Use default FEC mode.
+ * I.e. advertise everything supported (*_FEC=1), but do not request
+ * anything explicitly (*_FEC_REQUESTED=0).
+ */
+ phy_adv_cap |= port->phy_adv_cap_mask &
+ (1u << EFX_PHY_CAP_BASER_FEC |
+ 1u << EFX_PHY_CAP_RS_FEC |
+ 1u << EFX_PHY_CAP_25G_BASER_FEC);
+
sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
if (rc != 0)
@@ -268,6 +315,9 @@ fail_mac_addr_set:
fail_mac_pdu_set:
fail_phy_adv_cap_set:
fail_mac_fcntl_set:
+#if EFSYS_OPT_LOOPBACK
+fail_loopback_set:
+#endif
efx_port_fini(sa->nic);
fail_port_init:
@@ -323,6 +373,8 @@ sfc_port_attach(struct sfc_adapter *sa)
struct sfc_port *port = &sa->port;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
const struct ether_addr *from;
+ uint32_t mac_nstats;
+ size_t mac_stats_size;
long kvarg_stats_update_period_ms;
int rc;
@@ -358,7 +410,9 @@ sfc_port_attach(struct sfc_adapter *sa)
if (port->mac_stats_buf == NULL)
goto fail_mac_stats_buf_alloc;
- rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
+ mac_nstats = efx_nic_cfg_get(sa->nic)->enc_mac_stats_nstats;
+ mac_stats_size = RTE_ALIGN(mac_nstats * sizeof(uint64_t), EFX_BUF_SIZE);
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, mac_stats_size,
sa->socket_id, &port->mac_stats_dma_mem);
if (rc != 0)
goto fail_mac_stats_dma_alloc;
@@ -470,10 +524,22 @@ sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
link_info->link_speed = ETH_SPEED_NUM_10G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
+ case EFX_LINK_25000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_25G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
case EFX_LINK_40000FDX:
link_info->link_speed = ETH_SPEED_NUM_40G;
link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
break;
+ case EFX_LINK_50000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_50G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
default:
SFC_ASSERT(B_FALSE);
/* FALLTHROUGH */
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index abc53fb5..d8503e20 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -184,7 +184,6 @@ sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
return ptypes;
}
-#if EFSYS_OPT_RX_SCALE
static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
struct rte_mbuf *m)
@@ -205,14 +204,6 @@ sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
m->ol_flags |= PKT_RX_RSS_HASH;
}
}
-#else
-static void
-sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
- __rte_unused unsigned int flags,
- __rte_unused struct rte_mbuf *m)
-{
-}
-#endif
static uint16_t
sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
@@ -393,6 +384,7 @@ sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level)
@@ -525,7 +517,8 @@ struct sfc_dp_rx sfc_efx_rx = {
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_RX_FEAT_SCATTER,
+ .features = SFC_DP_RX_FEAT_SCATTER |
+ SFC_DP_RX_FEAT_CHECKSUM,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
@@ -608,7 +601,7 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
sfc_err(sa, "RxQ %u flush failed", sw_index);
if (rxq->state & SFC_RXQ_FLUSHED)
- sfc_info(sa, "RxQ %u flushed", sw_index);
+ sfc_notice(sa, "RxQ %u flushed", sw_index);
}
sa->dp_rx->qpurge(rxq->dp);
@@ -617,7 +610,8 @@ sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_rss *rss = &sa->rss;
+ boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
@@ -629,7 +623,7 @@ sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
* repeat this step without promiscuous and all-multicast flags set
*/
retry:
- rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
if (rc == 0)
return 0;
else if (rc != EOPNOTSUPP)
@@ -687,10 +681,37 @@ sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
if (rc != 0)
goto fail_ev_qstart;
- rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
- &rxq->mem, rxq_info->entries,
- 0 /* not used on EF10 */, rxq_info->type_flags,
- evq->common, &rxq->common);
+ switch (rxq_info->type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
+ rxq_info->type_flags, evq->common, &rxq->common);
+ break;
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
+ struct rte_mempool *mp = rxq->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_mp_get_info;
+ }
+ if (mp_info.contig_block_size <= 0) {
+ rc = EINVAL;
+ goto fail_bad_contig_block_size;
+ }
+ rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
+ mp_info.contig_block_size, rxq->buf_size,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ sa->rxd_wait_timeout_ns,
+ &rxq->mem, rxq_info->entries, rxq_info->type_flags,
+ evq->common, &rxq->common);
+ break;
+ }
+ default:
+ rc = ENOTSUP;
+ }
if (rc != 0)
goto fail_rx_qcreate;
@@ -721,6 +742,8 @@ fail_dp_qstart:
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
+fail_bad_contig_block_size:
+fail_mp_get_info:
sfc_ev_qstop(evq);
fail_ev_qstart:
@@ -770,9 +793,12 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
caps |= DEV_RX_OFFLOAD_CRC_STRIP;
- caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
+ caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
if (encp->enc_tunnel_encapsulations_supported &&
(sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
@@ -792,51 +818,11 @@ sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
return caps;
}
-static void
-sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Rx %s offload %s %s", offload_group,
- rte_eth_dev_rx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_rx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
- const struct rte_eth_rxconf *rx_conf)
+ const struct rte_eth_rxconf *rx_conf,
+ __rte_unused uint64_t offloads)
{
- uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
int rc = 0;
if (rx_conf->rx_thresh.pthresh != 0 ||
@@ -858,17 +844,6 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
rc = EINVAL;
}
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
- DEV_RX_OFFLOAD_CHECKSUM)
- sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-
- if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
- sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-
- if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -887,7 +862,7 @@ sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
order = MIN(order, rte_bsf32(data_off));
- return 1u << (order - 1);
+ return 1u << order;
}
static uint16_t
@@ -979,26 +954,29 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct rte_mempool *mb_pool)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
int rc;
unsigned int rxq_entries;
unsigned int evq_entries;
unsigned int rxq_max_fill_level;
+ uint64_t offloads;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
- rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
- &rxq_max_fill_level);
+ rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
+ &evq_entries, &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
- SFC_ASSERT(rxq_entries >= nb_rx_desc);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+ offloads = rx_conf->offloads |
+ sa->eth_dev->data->dev_conf.rxmode.offloads;
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -1011,7 +989,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -1027,9 +1005,14 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
rxq_info->entries = rxq_entries;
- rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
+ if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+ rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
+ else
+ rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
rxq_info->type_flags =
- (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
@@ -1054,6 +1037,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
rxq->refill_threshold =
RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
rxq->refill_mb_pool = mb_pool;
+ rxq->buf_size = buf_size;
rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
socket_id, &rxq->mem);
@@ -1068,10 +1052,8 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
-#if EFSYS_OPT_RX_SCALE
- if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
info.flags |= SFC_RXQ_FLAG_RSS_HASH;
-#endif
info.rxq_entries = rxq_info->entries;
info.rxq_hw_ring = rxq->mem.esm_base;
@@ -1079,6 +1061,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = rxq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
@@ -1140,85 +1123,228 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
rte_free(rxq);
}
-#if EFSYS_OPT_RX_SCALE
-efx_rx_hash_type_t
-sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+/*
+ * Mapping between RTE RSS hash functions and their EFX counterparts.
+ */
+struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
+ { ETH_RSS_NONFRAG_IPV4_TCP,
+ EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV4_UDP,
+ EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
+ { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV4, 2TUPLE) },
+ { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
+ ETH_RSS_IPV6_EX,
+ EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV6, 2TUPLE) }
+};
+
+static efx_rx_hash_type_t
+sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
+ unsigned int *hash_type_flags_supported,
+ unsigned int nb_hash_type_flags_supported)
+{
+ efx_rx_hash_type_t hash_type_masked = 0;
+ unsigned int i, j;
+
+ for (i = 0; i < nb_hash_type_flags_supported; ++i) {
+ unsigned int class_tuple_lbn[] = {
+ EFX_RX_CLASS_IPV4_TCP_LBN,
+ EFX_RX_CLASS_IPV4_UDP_LBN,
+ EFX_RX_CLASS_IPV4_LBN,
+ EFX_RX_CLASS_IPV6_TCP_LBN,
+ EFX_RX_CLASS_IPV6_UDP_LBN,
+ EFX_RX_CLASS_IPV6_LBN
+ };
+
+ for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
+ unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
+ unsigned int flag;
+
+ tuple_mask <<= class_tuple_lbn[j];
+ flag = hash_type & tuple_mask;
+
+ if (flag == hash_type_flags_supported[i])
+ hash_type_masked |= flag;
+ }
+ }
+
+ return hash_type_masked;
+}
+
+int
+sfc_rx_hash_init(struct sfc_adapter *sa)
+{
+ struct sfc_rss *rss = &sa->rss;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
+ efx_rx_hash_alg_t alg;
+ unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
+ unsigned int nb_flags_supp;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+ struct sfc_rss_hf_rte_to_efx *entry;
+ efx_rx_hash_type_t efx_hash_types;
+ unsigned int i;
+ int rc;
+
+ if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
+ alg = EFX_RX_HASHALG_TOEPLITZ;
+ else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
+ alg = EFX_RX_HASHALG_PACKED_STREAM;
+ else
+ return EINVAL;
+
+ rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
+ &nb_flags_supp);
+ if (rc != 0)
+ return rc;
+
+ hf_map = rte_calloc_socket("sfc-rss-hf-map",
+ RTE_DIM(sfc_rss_hf_map),
+ sizeof(*hf_map), 0, sa->socket_id);
+ if (hf_map == NULL)
+ return ENOMEM;
+
+ entry = hf_map;
+ efx_hash_types = 0;
+ for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
+ efx_rx_hash_type_t ht;
+
+ ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
+ flags_supp, nb_flags_supp);
+ if (ht != 0) {
+ entry->rte = sfc_rss_hf_map[i].rte;
+ entry->efx = ht;
+ efx_hash_types |= ht;
+ ++entry;
+ }
+ }
+
+ rss->hash_alg = alg;
+ rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
+ rss->hf_map = hf_map;
+ rss->hash_types = efx_hash_types;
+
+ return 0;
+}
+
+void
+sfc_rx_hash_fini(struct sfc_adapter *sa)
{
- efx_rx_hash_type_t efx_hash_types = 0;
+ struct sfc_rss *rss = &sa->rss;
- if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV4;
+ rte_free(rss->hf_map);
+}
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+int
+sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t hash_types = 0;
+ unsigned int i;
- if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV6;
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ uint64_t rte_mask = rss->hf_map[i].rte;
- if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+ if ((rte & rte_mask) != 0) {
+ rte &= ~rte_mask;
+ hash_types |= rss->hf_map[i].efx;
+ }
+ }
+
+ if (rte != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return EINVAL;
+ }
- return efx_hash_types;
+ *efx = hash_types;
+
+ return 0;
}
uint64_t
-sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ uint64_t rte = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
+
+ if ((efx & hash_type) == hash_type)
+ rte |= rss->hf_map[i].rte;
+ }
+
+ return rte;
+}
+
+static int
+sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
+ struct rte_eth_rss_conf *conf)
{
- uint64_t rss_hf = 0;
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t efx_hash_types = rss->hash_types;
+ uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
+ int rc;
- if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
- rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER);
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
+ conf->rss_key != NULL)
+ return EINVAL;
+ }
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (conf->rss_hf != 0) {
+ rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ return rc;
+ }
- if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
- rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+ if (conf->rss_key != NULL) {
+ if (conf->rss_key_len != sizeof(rss->key)) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(rss->key));
+ return EINVAL;
+ }
+ rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
+ }
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
- rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+ rss->hash_types = efx_hash_types;
- return rss_hf;
+ return 0;
}
-#endif
-#if EFSYS_OPT_RX_SCALE
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- sa->rss_hash_types, B_TRUE);
+ rss->hash_alg, rss->hash_types,
+ B_TRUE);
if (rc != 0)
goto finish;
rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_key,
- sizeof(sa->rss_key));
+ rss->key, sizeof(rss->key));
if (rc != 0)
goto finish;
rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_tbl, RTE_DIM(sa->rss_tbl));
+ rss->tbl, RTE_DIM(rss->tbl));
}
finish:
return rc;
}
-#else
-static int
-sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
-{
- return 0;
-}
-#endif
int
sfc_rx_start(struct sfc_adapter *sa)
@@ -1294,37 +1420,48 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
{
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
switch (rxmode->mq_mode) {
case ETH_MQ_RX_NONE:
/* No special checks are required */
break;
-#if EFSYS_OPT_RX_SCALE
case ETH_MQ_RX_RSS:
- if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
sfc_err(sa, "RSS is not available");
rc = EINVAL;
}
break;
-#endif
default:
sfc_err(sa, "Rx multi-queue mode %u not supported",
rxmode->mq_mode);
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_rx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
- if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
sfc_warn(sa, "FCS stripping cannot be disabled - always on");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- rxmode->hw_strip_crc = 1;
+ }
+
+ /*
+ * Requested offloads are validated against supported by ethdev,
+ * so unsupported offloads cannot be added as the result of
+ * below check.
+ */
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ }
+
+ if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
return rc;
@@ -1361,6 +1498,7 @@ sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
int
sfc_rx_configure(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
@@ -1410,21 +1548,26 @@ sfc_rx_configure(struct sfc_adapter *sa)
sa->rxq_count++;
}
-#if EFSYS_OPT_RX_SCALE
- sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+ rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
+ struct rte_eth_rss_conf *adv_conf_rss;
unsigned int sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
- sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ rss->tbl[sw_index] = sw_index % rss->channels;
+
+ adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
+ rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
+ if (rc != 0)
+ goto fail_rx_process_adv_conf_rss;
}
-#endif
done:
return 0;
+fail_rx_process_adv_conf_rss:
fail_rx_qinit_info:
fail_rxqs_realloc:
fail_rxqs_alloc:
@@ -1443,9 +1586,11 @@ fail_check_mode:
void
sfc_rx_close(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
+
sfc_rx_fini_queues(sa, 0);
- sa->rss_channels = 0;
+ rss->channels = 0;
rte_free(sa->rxq_info);
sa->rxq_info = NULL;
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
index 6706ee6f..3fba7d8a 100644
--- a/drivers/net/sfc/sfc_rx.h
+++ b/drivers/net/sfc/sfc_rx.h
@@ -60,6 +60,7 @@ struct sfc_rxq {
unsigned int hw_index;
unsigned int refill_threshold;
struct rte_mempool *refill_mb_pool;
+ uint16_t buf_size;
struct sfc_dp_rxq *dp;
unsigned int state;
};
@@ -152,10 +153,12 @@ unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
unsigned int sw_index);
int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
-#if EFSYS_OPT_RX_SCALE
-efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);
-uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types);
-#endif
+int sfc_rx_hash_init(struct sfc_adapter *sa);
+void sfc_rx_hash_fini(struct sfc_adapter *sa);
+int sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx);
+uint64_t sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa,
+ efx_rx_hash_type_t efx);
#ifdef __cplusplus
}
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
index ba8496df..effe9853 100644
--- a/drivers/net/sfc/sfc_tso.c
+++ b/drivers/net/sfc/sfc_tso.c
@@ -164,7 +164,8 @@ sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
sent_seq = rte_be_to_cpu_32(sent_seq);
- efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, 0, sent_seq,
+ m->tso_segsz,
*pend, EFX_TX_FATSOV2_OPT_NDESCS);
*pend += EFX_TX_FATSOV2_OPT_NDESCS;
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
index b4026851..4d543f68 100644
--- a/drivers/net/sfc/sfc_tweak.h
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -34,4 +34,12 @@
/** Number of mbufs to be freed in bulk in a single call */
#define SFC_TX_REAP_BULK_SIZE 32
+/**
+ * Default head-of-line block timeout to wait for Rx descriptor before
+ * packet drop because of no descriptors available.
+ *
+ * DPDK FW variant only with equal stride super-buffer Rx mode.
+ */
+#define SFC_RXD_WAIT_TIMEOUT_NS_DEF (200U * 1000)
+
#endif /* _SFC_TWEAK_H_ */
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 757b03ba..6d42a1a6 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -73,48 +73,10 @@ sfc_tx_get_queue_offload_caps(struct sfc_adapter *sa)
return caps;
}
-static void
-sfc_tx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Tx %s offload %s %s", offload_group,
- rte_eth_dev_tx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static int
-sfc_tx_queue_offload_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.txmode.offloads;
- uint64_t supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_tx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_tx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf,
+ uint64_t offloads)
{
int rc = 0;
@@ -138,15 +100,12 @@ sfc_tx_qcheck_conf(struct sfc_adapter *sa, unsigned int txq_max_fill_level,
}
/* We either perform both TCP and UDP offload, or no offload at all */
- if (((tx_conf->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
- ((tx_conf->offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
+ if (((offloads & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) !=
+ ((offloads & DEV_TX_OFFLOAD_UDP_CKSUM) == 0)) {
sfc_err(sa, "TCP and UDP offloads can't be set independently");
rc = EINVAL;
}
- if (sfc_tx_queue_offload_mismatch(sa, tx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
@@ -171,6 +130,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
struct sfc_txq *txq;
int rc = 0;
struct sfc_dp_tx_qcreate_info info;
+ uint64_t offloads;
sfc_log_init(sa, "TxQ = %u", sw_index);
@@ -183,7 +143,9 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
SFC_ASSERT(txq_entries >= nb_tx_desc);
SFC_ASSERT(txq_max_fill_level <= nb_tx_desc);
- rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf);
+ offloads = tx_conf->offloads |
+ sa->eth_dev->data->dev_conf.txmode.offloads;
+ rc = sfc_tx_qcheck_conf(sa, txq_max_fill_level, tx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
@@ -209,8 +171,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
txq->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
- txq->flags = tx_conf->txq_flags;
- txq->offloads = tx_conf->offloads;
+ txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
socket_id, &txq->mem);
@@ -220,8 +181,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
memset(&info, 0, sizeof(info));
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
- info.flags = tx_conf->txq_flags;
- info.offloads = tx_conf->offloads;
+ info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
info.txq_hw_ring = txq->mem.esm_base;
@@ -229,6 +189,7 @@ sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
info.evq_hw_ring = evq->mem.esm_base;
info.hw_index = txq->hw_index;
info.mem_bar = sa->mem_bar.esb_base;
+ info.vi_window_shift = encp->enc_vi_window_shift;
rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
@@ -303,9 +264,6 @@ sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
static int
sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
{
- uint64_t offloads_supported = sfc_tx_get_dev_offload_caps(sa) |
- sfc_tx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = txmode->offloads & ~offloads_supported;
int rc = 0;
switch (txmode->mq_mode) {
@@ -336,12 +294,6 @@ sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_tx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
- }
-
return rc;
}
@@ -477,39 +429,21 @@ sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
if (rc != 0)
goto fail_ev_qstart;
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application which expects that IPv4 checksum offload is enabled
- * all the time as there is no legacy flag to turn off the offload.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
- (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
+ if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
+ if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
(txq->offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
flags |= EFX_TXQ_CKSUM_TCPUDP;
- if ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
+ if (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
- * associated specifically with a legacy application which expects
- * both TCP checksum offload and TSO to be enabled because the legacy
- * API does not provide a dedicated mechanism to control TSO.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
+ if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO)
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
@@ -606,7 +540,7 @@ sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
sfc_err(sa, "TxQ %u flush timed out", sw_index);
if (txq->state & SFC_TXQ_FLUSHED)
- sfc_info(sa, "TxQ %u flushed", sw_index);
+ sfc_notice(sa, "TxQ %u flushed", sw_index);
}
sa->dp_tx->qreap(txq->dp);
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
index c2e5f13e..146b805c 100644
--- a/drivers/net/sfc/sfc_tx.h
+++ b/drivers/net/sfc/sfc_tx.h
@@ -58,7 +58,6 @@ struct sfc_txq {
struct sfc_dp_txq *dp;
efx_txq_t *common;
unsigned int free_thresh;
- unsigned int flags;
uint64_t offloads;
};
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index d56fecd6..ea9b65f4 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -8,13 +8,15 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_softnic.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_pipeline -lrte_port -lrte_table
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
LDLIBS += -lrte_bus_vdev
-EXPORT_MAP := rte_pmd_eth_softnic_version.map
+EXPORT_MAP := rte_pmd_softnic_version.map
LIBABIVER := 1
@@ -22,11 +24,30 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c
#
# Export include files
#
SYMLINK-y-include += rte_eth_softnic.h
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info Softnic PMD can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+clean:
+else
+
include $(RTE_SDK)/mk/rte.lib.mk
+
+endif
diff --git a/drivers/net/softnic/conn.c b/drivers/net/softnic/conn.c
new file mode 100644
index 00000000..990cf40f
--- /dev/null
+++ b/drivers/net/softnic/conn.c
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#define __USE_GNU
+#include <sys/socket.h>
+
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include "conn.h"
+
+#define MSG_CMD_TOO_LONG "Command too long."
+
+struct softnic_conn {
+ char *welcome;
+ char *prompt;
+ char *buf;
+ char *msg_in;
+ char *msg_out;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ size_t msg_in_len;
+ int fd_server;
+ int fd_client_group;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p)
+{
+ struct sockaddr_in server_address;
+ struct softnic_conn *conn;
+ int fd_server, fd_client_group, status;
+
+ memset(&server_address, 0, sizeof(server_address));
+
+ /* Check input arguments */
+ if (p == NULL ||
+ p->welcome == NULL ||
+ p->prompt == NULL ||
+ p->addr == NULL ||
+ p->buf_size == 0 ||
+ p->msg_in_len_max == 0 ||
+ p->msg_out_len_max == 0 ||
+ p->msg_handle == NULL)
+ return NULL;
+
+ status = inet_aton(p->addr, &server_address.sin_addr);
+ if (status == 0)
+ return NULL;
+
+ /* Memory allocation */
+ conn = calloc(1, sizeof(struct softnic_conn));
+ if (conn == NULL)
+ return NULL;
+
+ conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1);
+ conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1);
+ conn->buf = calloc(1, p->buf_size);
+ conn->msg_in = calloc(1, p->msg_in_len_max + 1);
+ conn->msg_out = calloc(1, p->msg_out_len_max + 1);
+
+ if (conn->welcome == NULL ||
+ conn->prompt == NULL ||
+ conn->buf == NULL ||
+ conn->msg_in == NULL ||
+ conn->msg_out == NULL) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ /* Server socket */
+ server_address.sin_family = AF_INET;
+ server_address.sin_port = htons(p->port);
+
+ fd_server = socket(AF_INET,
+ SOCK_STREAM | SOCK_NONBLOCK,
+ 0);
+ if (fd_server == -1) {
+ softnic_conn_free(conn);
+ return NULL;
+ }
+
+ status = bind(fd_server,
+ (struct sockaddr *)&server_address,
+ sizeof(server_address));
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ status = listen(fd_server, 16);
+ if (status == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Client group */
+ fd_client_group = epoll_create(1);
+ if (fd_client_group == -1) {
+ softnic_conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Fill in */
+ strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX);
+ strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX);
+ conn->buf_size = p->buf_size;
+ conn->msg_in_len_max = p->msg_in_len_max;
+ conn->msg_out_len_max = p->msg_out_len_max;
+ conn->msg_in_len = 0;
+ conn->fd_server = fd_server;
+ conn->fd_client_group = fd_client_group;
+ conn->msg_handle = p->msg_handle;
+ conn->msg_handle_arg = p->msg_handle_arg;
+
+ return conn;
+}
+
+void
+softnic_conn_free(struct softnic_conn *conn)
+{
+ if (conn == NULL)
+ return;
+
+ if (conn->fd_client_group)
+ close(conn->fd_client_group);
+
+ if (conn->fd_server)
+ close(conn->fd_server);
+
+ free(conn->msg_out);
+ free(conn->msg_in);
+ free(conn->prompt);
+ free(conn->welcome);
+ free(conn);
+}
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn)
+{
+ struct sockaddr_in client_address;
+ struct epoll_event event;
+ socklen_t client_address_length;
+ int fd_client, status;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Server socket */
+ client_address_length = sizeof(client_address);
+ fd_client = accept4(conn->fd_server,
+ (struct sockaddr *)&client_address,
+ &client_address_length,
+ SOCK_NONBLOCK);
+ if (fd_client == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+
+ /* Client group */
+ event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP;
+ event.data.fd = fd_client;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_ADD,
+ fd_client,
+ &event);
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ /* Client */
+ status = write(fd_client,
+ conn->welcome,
+ strlen(conn->welcome));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+data_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ ssize_t len, i, status;
+
+ /* Read input message */
+
+ len = read(fd_client,
+ conn->buf,
+ conn->buf_size);
+ if (len == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return 0;
+
+ return -1;
+ }
+ if (len == 0)
+ return 0;
+
+ /* Handle input messages */
+ for (i = 0; i < len; i++) {
+ if (conn->buf[i] == '\n') {
+ size_t n;
+
+ conn->msg_in[conn->msg_in_len] = 0;
+ conn->msg_out[0] = 0;
+
+ conn->msg_handle(conn->msg_in,
+ conn->msg_out,
+ conn->msg_out_len_max,
+ conn->msg_handle_arg);
+
+ n = strlen(conn->msg_out);
+ if (n) {
+ status = write(fd_client,
+ conn->msg_out,
+ n);
+ if (status == -1)
+ return status;
+ }
+
+ conn->msg_in_len = 0;
+ } else if (conn->msg_in_len < conn->msg_in_len_max) {
+ conn->msg_in[conn->msg_in_len] = conn->buf[i];
+ conn->msg_in_len++;
+ } else {
+ status = write(fd_client,
+ MSG_CMD_TOO_LONG,
+ strlen(MSG_CMD_TOO_LONG));
+ if (status == -1)
+ return status;
+
+ conn->msg_in_len = 0;
+ }
+ }
+
+ /* Write prompt */
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1)
+ return status;
+
+ return 0;
+}
+
+static int
+control_event_handle(struct softnic_conn *conn,
+ int fd_client)
+{
+ int status;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_DEL,
+ fd_client,
+ NULL);
+ if (status == -1)
+ return -1;
+
+ status = close(fd_client);
+ if (status == -1)
+ return -1;
+
+ return 0;
+}
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn)
+{
+ struct epoll_event event;
+ int fd_client, status, status_data = 0, status_control = 0;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Client group */
+ status = epoll_wait(conn->fd_client_group,
+ &event,
+ 1,
+ 0);
+ if (status == -1)
+ return -1;
+ if (status == 0)
+ return 0;
+
+ fd_client = event.data.fd;
+
+ /* Data available */
+ if (event.events & EPOLLIN)
+ status_data = data_event_handle(conn, fd_client);
+
+ /* Control events */
+ if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP))
+ status_control = control_event_handle(conn, fd_client);
+
+ if (status_data || status_control)
+ return -1;
+
+ return 0;
+}
diff --git a/drivers/net/softnic/conn.h b/drivers/net/softnic/conn.h
new file mode 100644
index 00000000..631edeef
--- /dev/null
+++ b/drivers/net/softnic/conn.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_CONN_H__
+#define __INCLUDE_CONN_H__
+
+#include <stdint.h>
+
+struct softnic_conn;
+
+#ifndef CONN_WELCOME_LEN_MAX
+#define CONN_WELCOME_LEN_MAX 1024
+#endif
+
+#ifndef CONN_PROMPT_LEN_MAX
+#define CONN_PROMPT_LEN_MAX 16
+#endif
+
+typedef void (*softnic_conn_msg_handle_t)(char *msg_in,
+ char *msg_out,
+ size_t msg_out_len_max,
+ void *arg);
+
+struct softnic_conn_params {
+ const char *welcome;
+ const char *prompt;
+ const char *addr;
+ uint16_t port;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ softnic_conn_msg_handle_t msg_handle;
+ void *msg_handle_arg;
+};
+
+struct softnic_conn *
+softnic_conn_init(struct softnic_conn_params *p);
+
+void
+softnic_conn_free(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_conn(struct softnic_conn *conn);
+
+int
+softnic_conn_poll_for_msg(struct softnic_conn *conn);
+
+#endif
diff --git a/drivers/net/softnic/firmware.cli b/drivers/net/softnic/firmware.cli
new file mode 100644
index 00000000..300cf6e3
--- /dev/null
+++ b/drivers/net/softnic/firmware.cli
@@ -0,0 +1,21 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2018 Intel Corporation
+
+link LINK dev 0000:02:00.0
+
+pipeline RX period 10 offset_port_id 0
+pipeline RX port in bsz 32 link LINK rxq 0
+pipeline RX port out bsz 32 swq RXQ0
+pipeline RX table match stub
+pipeline RX port in 0 table 0
+pipeline RX table 0 rule add match default action fwd port 0
+
+pipeline TX period 10 offset_port_id 0
+pipeline TX port in bsz 32 swq TXQ0
+pipeline TX port out bsz 32 link LINK txq 0
+pipeline TX table match stub
+pipeline TX port in 0 table 0
+pipeline TX table 0 rule add match default action fwd port 0
+
+thread 1 pipeline RX enable
+thread 1 pipeline TX enable
diff --git a/drivers/net/softnic/hash_func.h b/drivers/net/softnic/hash_func.h
new file mode 100644
index 00000000..198d2b20
--- /dev/null
+++ b/drivers/net/softnic/hash_func.h
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+#include <rte_common.h>
+
+static inline uint64_t
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = seed ^ (k[0] & m[0]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+
+ xor0 ^= k[2] & m[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4] & m[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+
+ xor0 ^= xor1;
+ xor2 ^= k[6] & m[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#elif defined(RTE_ARCH_ARM64)
+#include "hash_func_arm64.h"
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/hash_func_arm64.h b/drivers/net/softnic/hash_func_arm64.h
index ae6c0f41..ae6c0f41 100644
--- a/examples/ip_pipeline/pipeline/hash_func_arm64.h
+++ b/drivers/net/softnic/hash_func_arm64.h
diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build
new file mode 100644
index 00000000..ff982274
--- /dev/null
+++ b/drivers/net/softnic/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+install_headers('rte_eth_softnic.h')
+sources = files('rte_eth_softnic_tm.c',
+ 'rte_eth_softnic.c',
+ 'rte_eth_softnic_mempool.c',
+ 'rte_eth_softnic_swq.c',
+ 'rte_eth_softnic_link.c',
+ 'rte_eth_softnic_tap.c',
+ 'rte_eth_softnic_action.c',
+ 'rte_eth_softnic_pipeline.c',
+ 'rte_eth_softnic_thread.c',
+ 'rte_eth_softnic_cli.c',
+ 'parser.c',
+ 'conn.c')
+deps += ['pipeline', 'port', 'table', 'sched']
diff --git a/drivers/net/softnic/parser.c b/drivers/net/softnic/parser.c
new file mode 100644
index 00000000..a8688a21
--- /dev/null
+++ b/drivers/net/softnic/parser.c
@@ -0,0 +1,703 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ */
+
+/* For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+softnic_parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+softnic_parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = softnic_parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if (string == NULL ||
+ tokens == NULL ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if (i == *n_tokens &&
+ strtok_r(string, PARSE_DELIMITER, &string) != NULL)
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if (src == NULL ||
+ dst == NULL ||
+ size == NULL ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t)value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned int dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char)((val >> 8) & 0xff);
+ *tp++ = (unsigned char)(val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /* Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+softnic_parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ p->socket_id = s;
+ p->core_id = c;
+ p->thread_id = h;
+ return 0;
+}
diff --git a/drivers/net/softnic/parser.h b/drivers/net/softnic/parser.h
new file mode 100644
index 00000000..1ee3f82a
--- /dev/null
+++ b/drivers/net/softnic/parser.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef __INCLUDE_SOFTNIC_PARSER_H__
+#define __INCLUDE_SOFTNIC_PARSER_H__
+
+#include <stdint.h>
+
+#include <rte_ip.h>
+#include <rte_ether.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int softnic_parser_read_arg_bool(const char *p);
+
+int softnic_parser_read_int32(int32_t *value, const char *p);
+
+int softnic_parser_read_uint64(uint64_t *value, const char *p);
+int softnic_parser_read_uint32(uint32_t *value, const char *p);
+int softnic_parser_read_uint16(uint16_t *value, const char *p);
+int softnic_parser_read_uint8(uint8_t *value, const char *p);
+
+int softnic_parser_read_uint64_hex(uint64_t *value, const char *p);
+int softnic_parser_read_uint32_hex(uint32_t *value, const char *p);
+int softnic_parser_read_uint16_hex(uint16_t *value, const char *p);
+int softnic_parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int softnic_parse_mac_addr(const char *token, struct ether_addr *addr);
+int softnic_parse_mpls_labels(char *string,
+ uint32_t *labels, uint32_t *n_labels);
+
+struct softnic_cpu_core_params {
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t thread_id;
+};
+
+int softnic_parse_cpu_core(const char *entry,
+ struct softnic_cpu_core_params *p);
+
+int softnic_parse_tokenize_string(char *string,
+ char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index b0c13415..30fb3952 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -13,43 +13,51 @@
#include <rte_kvargs.h>
#include <rte_errno.h>
#include <rte_ring.h>
-#include <rte_sched.h>
#include <rte_tm_driver.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
-#define DEV_HARD(p) \
- (&rte_eth_devices[p->hard.port_id])
-
-#define PMD_PARAM_SOFT_TM "soft_tm"
-#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
-#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
-#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
-#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
-#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
-#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
-#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
-#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
-
-#define PMD_PARAM_HARD_NAME "hard_name"
-#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
+#define PMD_PARAM_FIRMWARE "firmware"
+#define PMD_PARAM_CONN_PORT "conn_port"
+#define PMD_PARAM_CPU_ID "cpu_id"
+#define PMD_PARAM_TM_N_QUEUES "tm_n_queues"
+#define PMD_PARAM_TM_QSIZE0 "tm_qsize0"
+#define PMD_PARAM_TM_QSIZE1 "tm_qsize1"
+#define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
+#define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
static const char *pmd_valid_args[] = {
- PMD_PARAM_SOFT_TM,
- PMD_PARAM_SOFT_TM_RATE,
- PMD_PARAM_SOFT_TM_NB_QUEUES,
- PMD_PARAM_SOFT_TM_QSIZE0,
- PMD_PARAM_SOFT_TM_QSIZE1,
- PMD_PARAM_SOFT_TM_QSIZE2,
- PMD_PARAM_SOFT_TM_QSIZE3,
- PMD_PARAM_SOFT_TM_ENQ_BSZ,
- PMD_PARAM_SOFT_TM_DEQ_BSZ,
- PMD_PARAM_HARD_NAME,
- PMD_PARAM_HARD_TX_QUEUE_ID,
+ PMD_PARAM_FIRMWARE,
+ PMD_PARAM_CONN_PORT,
+ PMD_PARAM_CPU_ID,
+ PMD_PARAM_TM_N_QUEUES,
+ PMD_PARAM_TM_QSIZE0,
+ PMD_PARAM_TM_QSIZE1,
+ PMD_PARAM_TM_QSIZE2,
+ PMD_PARAM_TM_QSIZE3,
NULL
};
+static const char welcome[] =
+ "\n"
+ "Welcome to Soft NIC!\n"
+ "\n";
+
+static const char prompt[] = "softnic> ";
+
+struct softnic_conn_params conn_params_default = {
+ .welcome = welcome,
+ .prompt = prompt,
+ .addr = "0.0.0.0",
+ .port = 0,
+ .buf_size = 1024 * 1024,
+ .msg_in_len_max = 1024,
+ .msg_out_len_max = 1024 * 1024,
+ .msg_handle = softnic_cli_process,
+ .msg_handle_arg = NULL,
+};
+
static const struct rte_eth_dev_info pmd_dev_info = {
.min_rx_bufsize = 0,
.max_rx_pktlen = UINT32_MAX,
@@ -65,8 +73,15 @@ static const struct rte_eth_dev_info pmd_dev_info = {
.nb_min = 0,
.nb_align = 1,
},
+ .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP,
};
+static int pmd_softnic_logtype;
+
+#define PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \
+ "%s(): " fmt "\n", __func__, ##args)
+
static void
pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
@@ -75,50 +90,36 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
}
static int
-pmd_dev_configure(struct rte_eth_dev *dev)
+pmd_dev_configure(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *p = dev->data->dev_private;
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
-
- if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
- return -1;
-
- if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
- return -1;
-
return 0;
}
static int
pmd_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
- uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id __rte_unused,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool __rte_unused)
{
+ char name[NAME_SIZE];
struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
- if (p->params.soft.intrusive == 0) {
- struct pmd_rx_queue *rxq;
-
- rxq = rte_zmalloc_socket(p->params.soft.name,
- sizeof(struct pmd_rx_queue), 0, socket_id);
- if (rxq == NULL)
- return -ENOMEM;
+ struct softnic_swq_params params = {
+ .size = nb_rx_desc,
+ };
- rxq->hard.port_id = p->hard.port_id;
- rxq->hard.rx_queue_id = rx_queue_id;
- dev->data->rx_queues[rx_queue_id] = rxq;
- } else {
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
- void *rxq = hard_dev->data->rx_queues[rx_queue_id];
+ snprintf(name, sizeof(name), "RXQ%u", rx_queue_id);
- if (rxq == NULL)
- return -1;
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
+ return -1;
- dev->data->rx_queues[rx_queue_id] = rxq;
- }
+ dev->data->rx_queues[rx_queue_id] = swq->r;
return 0;
}
@@ -126,21 +127,26 @@ static int
pmd_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
uint16_t nb_tx_desc,
- unsigned int socket_id,
+ unsigned int socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
- char name[size];
- struct rte_ring *r;
-
- snprintf(name, sizeof(name), "%s_txq%04x",
- dev->data->name, tx_queue_id);
- r = rte_ring_create(name, nb_tx_desc, socket_id,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
- if (r == NULL)
+ char name[NAME_SIZE];
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_swq *swq;
+
+ struct softnic_swq_params params = {
+ .size = nb_tx_desc,
+ };
+
+ snprintf(name, sizeof(name), "TXQ%u", tx_queue_id);
+
+ swq = softnic_swq_create(p,
+ name,
+ &params);
+ if (swq == NULL)
return -1;
- dev->data->tx_queues[tx_queue_id] = r;
+ dev->data->tx_queues[tx_queue_id] = swq->r;
return 0;
}
@@ -148,23 +154,19 @@ static int
pmd_dev_start(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+ int status;
- if (tm_used(dev)) {
- int status = tm_start(p);
-
- if (status)
- return status;
- }
+ /* Firmware */
+ status = softnic_cli_script_process(p,
+ p->params.firmware,
+ conn_params_default.msg_in_len_max,
+ conn_params_default.msg_out_len_max);
+ if (status)
+ return status;
+ /* Link UP */
dev->data->dev_link.link_status = ETH_LINK_UP;
- if (p->params.soft.intrusive) {
- struct rte_eth_dev *hard_dev = DEV_HARD(p);
-
- /* The hard_dev->rx_pkt_burst should be stable by now */
- dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
- }
-
return 0;
}
@@ -173,20 +175,27 @@ pmd_dev_stop(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
+ /* Link DOWN */
dev->data->dev_link.link_status = ETH_LINK_DOWN;
- if (tm_used(dev))
- tm_stop(p);
+ /* Firmware */
+ softnic_pipeline_disable_all(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_softnic_swq_free_keep_rxq_txq(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
}
static void
-pmd_dev_close(struct rte_eth_dev *dev)
+pmd_dev_close(struct rte_eth_dev *dev __rte_unused)
{
- uint32_t i;
-
- /* TX queues */
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
+ return;
}
static int
@@ -197,10 +206,9 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
}
static int
-pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
+pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
{
- *(const struct rte_tm_ops **)arg =
- (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
+ *(const struct rte_tm_ops **)arg = &pmd_tm_ops;
return 0;
}
@@ -222,12 +230,10 @@ pmd_rx_pkt_burst(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
- struct pmd_rx_queue *rx_queue = rxq;
-
- return rte_eth_rx_burst(rx_queue->hard.port_id,
- rx_queue->hard.rx_queue_id,
- rx_pkts,
- nb_pkts);
+ return (uint16_t)rte_ring_sc_dequeue_burst(rxq,
+ (void **)rx_pkts,
+ nb_pkts,
+ NULL);
}
static uint16_t
@@ -235,239 +241,56 @@ pmd_tx_pkt_burst(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
- return (uint16_t)rte_ring_enqueue_burst(txq,
+ return (uint16_t)rte_ring_sp_enqueue_burst(txq,
(void **)tx_pkts,
nb_pkts,
NULL);
}
-static __rte_always_inline int
-run_default(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
-
- /* Persistent context: Read Only (update not required) */
- struct rte_mbuf **pkts = p->soft.def.pkts;
- uint16_t nb_tx_queues = dev->data->nb_tx_queues;
-
- /* Persistent context: Read - Write (update required) */
- uint32_t txq_pos = p->soft.def.txq_pos;
- uint32_t pkts_len = p->soft.def.pkts_len;
- uint32_t flush_count = p->soft.def.flush_count;
-
- /* Not part of the persistent context */
- uint32_t pos;
- uint16_t i;
-
- /* Soft device TXQ read, Hard device TXQ write */
- for (i = 0; i < nb_tx_queues; i++) {
- struct rte_ring *txq = dev->data->tx_queues[txq_pos];
-
- /* Read soft device TXQ burst to packet enqueue buffer */
- pkts_len += rte_ring_sc_dequeue_burst(txq,
- (void **)&pkts[pkts_len],
- DEFAULT_BURST_SIZE,
- NULL);
-
- /* Increment soft device TXQ */
- txq_pos++;
- if (txq_pos >= nb_tx_queues)
- txq_pos = 0;
-
- /* Hard device TXQ write when complete burst is available */
- if (pkts_len >= DEFAULT_BURST_SIZE) {
- for (pos = 0; pos < pkts_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts[pos],
- (uint16_t)(pkts_len - pos));
-
- pkts_len = 0;
- flush_count = 0;
- break;
- }
- }
-
- if (flush_count >= FLUSH_COUNT_THRESHOLD) {
- for (pos = 0; pos < pkts_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts[pos],
- (uint16_t)(pkts_len - pos));
-
- pkts_len = 0;
- flush_count = 0;
- }
-
- p->soft.def.txq_pos = txq_pos;
- p->soft.def.pkts_len = pkts_len;
- p->soft.def.flush_count = flush_count + 1;
-
- return 0;
-}
-
-static __rte_always_inline int
-run_tm(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
-
- /* Persistent context: Read Only (update not required) */
- struct rte_sched_port *sched = p->soft.tm.sched;
- struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
- struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
- uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
- uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
- uint16_t nb_tx_queues = dev->data->nb_tx_queues;
-
- /* Persistent context: Read - Write (update required) */
- uint32_t txq_pos = p->soft.tm.txq_pos;
- uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
- uint32_t flush_count = p->soft.tm.flush_count;
-
- /* Not part of the persistent context */
- uint32_t pkts_deq_len, pos;
- uint16_t i;
-
- /* Soft device TXQ read, TM enqueue */
- for (i = 0; i < nb_tx_queues; i++) {
- struct rte_ring *txq = dev->data->tx_queues[txq_pos];
-
- /* Read TXQ burst to packet enqueue buffer */
- pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
- (void **)&pkts_enq[pkts_enq_len],
- enq_bsz,
- NULL);
-
- /* Increment TXQ */
- txq_pos++;
- if (txq_pos >= nb_tx_queues)
- txq_pos = 0;
-
- /* TM enqueue when complete burst is available */
- if (pkts_enq_len >= enq_bsz) {
- rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
-
- pkts_enq_len = 0;
- flush_count = 0;
- break;
- }
- }
-
- if (flush_count >= FLUSH_COUNT_THRESHOLD) {
- if (pkts_enq_len)
- rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
-
- pkts_enq_len = 0;
- flush_count = 0;
- }
-
- p->soft.tm.txq_pos = txq_pos;
- p->soft.tm.pkts_enq_len = pkts_enq_len;
- p->soft.tm.flush_count = flush_count + 1;
-
- /* TM dequeue, Hard device TXQ write */
- pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
-
- for (pos = 0; pos < pkts_deq_len; )
- pos += rte_eth_tx_burst(p->hard.port_id,
- p->params.hard.tx_queue_id,
- &pkts_deq[pos],
- (uint16_t)(pkts_deq_len - pos));
-
- return 0;
-}
-
-int
-rte_pmd_softnic_run(uint16_t port_id)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
-
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
-#endif
-
- return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
-}
-
-static struct ether_addr eth_addr = { .addr_bytes = {0} };
-
-static uint32_t
-eth_dev_speed_max_mbps(uint32_t speed_capa)
-{
- uint32_t rate_mbps[32] = {
- ETH_SPEED_NUM_NONE,
- ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_10M,
- ETH_SPEED_NUM_100M,
- ETH_SPEED_NUM_100M,
- ETH_SPEED_NUM_1G,
- ETH_SPEED_NUM_2_5G,
- ETH_SPEED_NUM_5G,
- ETH_SPEED_NUM_10G,
- ETH_SPEED_NUM_20G,
- ETH_SPEED_NUM_25G,
- ETH_SPEED_NUM_40G,
- ETH_SPEED_NUM_50G,
- ETH_SPEED_NUM_56G,
- ETH_SPEED_NUM_100G,
- };
-
- uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
- return rate_mbps[pos];
-}
-
-static int
-default_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
- 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.def.pkts == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-static void
-default_free(struct pmd_internals *p)
-{
- rte_free(p->soft.def.pkts);
-}
-
static void *
-pmd_init(struct pmd_params *params, int numa_node)
+pmd_init(struct pmd_params *params)
{
struct pmd_internals *p;
int status;
- p = rte_zmalloc_socket(params->soft.name,
+ p = rte_zmalloc_socket(params->name,
sizeof(struct pmd_internals),
0,
- numa_node);
+ params->cpu_id);
if (p == NULL)
return NULL;
+ /* Params */
memcpy(&p->params, params, sizeof(p->params));
- rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
- /* Default */
- status = default_init(p, params, numa_node);
+ /* Resources */
+ tm_hierarchy_init(p);
+
+ softnic_mempool_init(p);
+ softnic_swq_init(p);
+ softnic_link_init(p);
+ softnic_tmgr_init(p);
+ softnic_tap_init(p);
+ softnic_port_in_action_profile_init(p);
+ softnic_table_action_profile_init(p);
+ softnic_pipeline_init(p);
+
+ status = softnic_thread_init(p);
if (status) {
- free(p->params.hard.name);
rte_free(p);
return NULL;
}
- /* Traffic Management (TM)*/
- if (params->soft.flags & PMD_FEATURE_TM) {
- status = tm_init(p, params, numa_node);
- if (status) {
- default_free(p);
- free(p->params.hard.name);
+ if (params->conn_port) {
+ struct softnic_conn_params conn_params;
+
+ memcpy(&conn_params, &conn_params_default, sizeof(conn_params));
+ conn_params.port = p->params.conn_port;
+ conn_params.msg_handle_arg = p;
+
+ p->conn = softnic_conn_init(&conn_params);
+ if (p->conn == NULL) {
+ softnic_thread_free(p);
rte_free(p);
return NULL;
}
@@ -479,55 +302,62 @@ pmd_init(struct pmd_params *params, int numa_node)
static void
pmd_free(struct pmd_internals *p)
{
- if (p->params.soft.flags & PMD_FEATURE_TM)
- tm_free(p);
+ if (p == NULL)
+ return;
+
+ if (p->params.conn_port)
+ softnic_conn_free(p->conn);
- default_free(p);
+ softnic_thread_free(p);
+ softnic_pipeline_free(p);
+ softnic_table_action_profile_free(p);
+ softnic_port_in_action_profile_free(p);
+ softnic_tap_free(p);
+ softnic_tmgr_free(p);
+ softnic_link_free(p);
+ softnic_swq_free(p);
+ softnic_mempool_free(p);
+
+ tm_hierarchy_free(p);
- free(p->params.hard.name);
rte_free(p);
}
+static struct ether_addr eth_addr = {
+ .addr_bytes = {0},
+};
+
static int
pmd_ethdev_register(struct rte_vdev_device *vdev,
struct pmd_params *params,
void *dev_private)
{
- struct rte_eth_dev_info hard_info;
- struct rte_eth_dev *soft_dev;
- uint32_t hard_speed;
- int numa_node;
- uint16_t hard_port_id;
-
- rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
- rte_eth_dev_info_get(hard_port_id, &hard_info);
- hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
- numa_node = rte_eth_dev_socket_id(hard_port_id);
+ struct rte_eth_dev *dev;
/* Ethdev entry allocation */
- soft_dev = rte_eth_dev_allocate(params->soft.name);
- if (!soft_dev)
+ dev = rte_eth_dev_allocate(params->name);
+ if (!dev)
return -ENOMEM;
/* dev */
- soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
- NULL : /* set up later */
- pmd_rx_pkt_burst;
- soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
- soft_dev->tx_pkt_prepare = NULL;
- soft_dev->dev_ops = &pmd_ops;
- soft_dev->device = &vdev->device;
+ dev->rx_pkt_burst = pmd_rx_pkt_burst;
+ dev->tx_pkt_burst = pmd_tx_pkt_burst;
+ dev->tx_pkt_prepare = NULL;
+ dev->dev_ops = &pmd_ops;
+ dev->device = &vdev->device;
/* dev->data */
- soft_dev->data->dev_private = dev_private;
- soft_dev->data->dev_link.link_speed = hard_speed;
- soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- soft_dev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG;
- soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- soft_dev->data->mac_addrs = &eth_addr;
- soft_dev->data->promiscuous = 1;
- soft_dev->data->kdrv = RTE_KDRV_NONE;
- soft_dev->data->numa_node = numa_node;
+ dev->data->dev_private = dev_private;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G;
+ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ dev->data->mac_addrs = &eth_addr;
+ dev->data->promiscuous = 1;
+ dev->data->kdrv = RTE_KDRV_NONE;
+ dev->data->numa_node = params->cpu_id;
+
+ rte_eth_dev_probing_finish(dev);
return 0;
}
@@ -558,10 +388,21 @@ get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
}
static int
-pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
+get_uint16(const char *key __rte_unused, const char *value, void *extra_args)
+{
+ if (!value || !extra_args)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = strtoull(value, NULL, 0);
+
+ return 0;
+}
+
+static int
+pmd_parse_args(struct pmd_params *p, const char *params)
{
struct rte_kvargs *kvlist;
- int i, ret;
+ int ret = 0;
kvlist = rte_kvargs_parse(params, pmd_valid_args);
if (kvlist == NULL)
@@ -569,141 +410,71 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
/* Set default values */
memset(p, 0, sizeof(*p));
- p->soft.name = name;
- p->soft.intrusive = INTRUSIVE;
- p->soft.tm.rate = 0;
- p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
- p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
- p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
- p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
- p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
-
- /* SOFT: TM (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
- char *s;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
- &get_string, &s);
- if (ret < 0)
- goto out_free;
-
- if (strcmp(s, "on") == 0)
- p->soft.flags |= PMD_FEATURE_TM;
- else if (strcmp(s, "off") == 0)
- p->soft.flags &= ~PMD_FEATURE_TM;
- else
- ret = -EINVAL;
-
- free(s);
- if (ret)
- goto out_free;
- }
-
- /* SOFT: TM rate (measured in bytes/second) (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
- &get_uint32, &p->soft.tm.rate);
- if (ret < 0)
- goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
- }
-
- /* SOFT: TM number of queues (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
- &get_uint32, &p->soft.tm.nb_queues);
+ p->firmware = SOFTNIC_FIRMWARE;
+ p->cpu_id = SOFTNIC_CPU_ID;
+ p->tm.n_queues = SOFTNIC_TM_N_QUEUES;
+ p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE;
+ p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE;
+
+ /* Firmware script (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE,
+ &get_string, &p->firmware);
if (ret < 0)
goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM queue size 0 .. 3 (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
- &get_uint32, &qsize);
+ /* Connection listening port (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT,
+ &get_uint16, &p->conn_port);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[0] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
- &get_uint32, &qsize);
+ /* CPU ID (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID,
+ &get_uint32, &p->cpu_id);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[1] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
- &get_uint32, &qsize);
+ /* TM number of queues (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES,
+ &get_uint32, &p->tm.n_queues);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[2] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
- uint32_t qsize;
-
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
- &get_uint32, &qsize);
+ /* TM queue size 0 .. 3 (optional) */
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0,
+ &get_uint32, &p->tm.qsize[0]);
if (ret < 0)
goto out_free;
-
- p->soft.tm.qsize[3] = (uint16_t)qsize;
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM enqueue burst size (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
- &get_uint32, &p->soft.tm.enq_bsz);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1,
+ &get_uint32, &p->tm.qsize[1]);
if (ret < 0)
goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* SOFT: TM dequeue burst size (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
- &get_uint32, &p->soft.tm.deq_bsz);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2,
+ &get_uint32, &p->tm.qsize[2]);
if (ret < 0)
goto out_free;
-
- p->soft.flags |= PMD_FEATURE_TM;
}
- /* HARD: name (mandatory) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
- &get_string, &p->hard.name);
- if (ret < 0)
- goto out_free;
- } else {
- ret = -EINVAL;
- goto out_free;
- }
-
- /* HARD: tx_queue_id (optional) */
- if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
- ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
- &get_uint32, &p->hard.tx_queue_id);
+ if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) {
+ ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3,
+ &get_uint32, &p->tm.qsize[3]);
if (ret < 0)
goto out_free;
}
@@ -718,54 +489,31 @@ pmd_probe(struct rte_vdev_device *vdev)
{
struct pmd_params p;
const char *params;
- int status;
+ int status = 0;
- struct rte_eth_dev_info hard_info;
- uint32_t hard_speed;
- uint16_t hard_port_id;
- int numa_node;
void *dev_private;
+ const char *name = rte_vdev_device_name(vdev);
- RTE_LOG(INFO, PMD,
- "Probing device \"%s\"\n",
- rte_vdev_device_name(vdev));
+ PMD_LOG(INFO, "Probing device \"%s\"", name);
/* Parse input arguments */
params = rte_vdev_device_args(vdev);
if (!params)
return -EINVAL;
- status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
+ status = pmd_parse_args(&p, params);
if (status)
return status;
- /* Check input arguments */
- if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
- return -EINVAL;
-
- rte_eth_dev_info_get(hard_port_id, &hard_info);
- hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
- numa_node = rte_eth_dev_socket_id(hard_port_id);
-
- if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
- return -EINVAL;
-
- if (p.soft.flags & PMD_FEATURE_TM) {
- status = tm_params_check(&p, hard_speed);
-
- if (status)
- return status;
- }
+ p.name = name;
/* Allocate and initialize soft ethdev private data */
- dev_private = pmd_init(&p, numa_node);
+ dev_private = pmd_init(&p);
if (dev_private == NULL)
return -ENOMEM;
/* Register soft ethdev */
- RTE_LOG(INFO, PMD,
- "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
- p.soft.name, p.hard.name);
+ PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name);
status = pmd_ethdev_register(vdev, &p, dev_private);
if (status) {
@@ -785,8 +533,7 @@ pmd_remove(struct rte_vdev_device *vdev)
if (!vdev)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
- rte_vdev_device_name(vdev));
+ PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev));
/* Find the ethdev entry */
dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
@@ -795,9 +542,9 @@ pmd_remove(struct rte_vdev_device *vdev)
p = dev->data->dev_private;
/* Free device data structures*/
- pmd_free(p);
rte_free(dev->data);
rte_eth_dev_release_port(dev);
+ pmd_free(p);
return 0;
}
@@ -809,14 +556,39 @@ static struct rte_vdev_driver pmd_softnic_drv = {
RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
- PMD_PARAM_SOFT_TM "=on|off "
- PMD_PARAM_SOFT_TM_RATE "=<int> "
- PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
- PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
- PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
- PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
- PMD_PARAM_HARD_NAME "=<string> "
- PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");
+ PMD_PARAM_FIRMWARE "=<string> "
+ PMD_PARAM_CONN_PORT "=<uint16> "
+ PMD_PARAM_CPU_ID "=<uint32> "
+ PMD_PARAM_TM_N_QUEUES "=<uint32> "
+ PMD_PARAM_TM_QSIZE0 "=<uint32> "
+ PMD_PARAM_TM_QSIZE1 "=<uint32> "
+ PMD_PARAM_TM_QSIZE2 "=<uint32> "
+ PMD_PARAM_TM_QSIZE3 "=<uint32>"
+);
+
+
+RTE_INIT(pmd_softnic_init_log)
+{
+ pmd_softnic_logtype = rte_log_register("pmd.net.softnic");
+ if (pmd_softnic_logtype >= 0)
+ rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE);
+}
+
+int
+rte_pmd_softnic_manage(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+
+ softnic_conn_poll_for_conn(softnic->conn);
+
+ softnic_conn_poll_for_msg(softnic->conn);
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h
index 9a2c7ba9..048dfe6b 100644
--- a/drivers/net/softnic/rte_eth_softnic.h
+++ b/drivers/net/softnic/rte_eth_softnic.h
@@ -11,42 +11,53 @@
extern "C" {
#endif
-#ifndef SOFTNIC_SOFT_TM_NB_QUEUES
-#define SOFTNIC_SOFT_TM_NB_QUEUES 65536
+/** Firmware. */
+#ifndef SOFTNIC_FIRMWARE
+#define SOFTNIC_FIRMWARE "firmware.cli"
#endif
-#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE
-#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64
+/** TCP connection port (0 = no connectivity). */
+#ifndef SOFTNIC_CONN_PORT
+#define SOFTNIC_CONN_PORT 0
#endif
-#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ
-#define SOFTNIC_SOFT_TM_ENQ_BSZ 32
+/** NUMA node ID. */
+#ifndef SOFTNIC_CPU_ID
+#define SOFTNIC_CPU_ID 0
#endif
-#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ
-#define SOFTNIC_SOFT_TM_DEQ_BSZ 24
+/** Traffic Manager: Number of scheduler queues. */
+#ifndef SOFTNIC_TM_N_QUEUES
+#define SOFTNIC_TM_N_QUEUES (64 * 1024)
#endif
-#ifndef SOFTNIC_HARD_TX_QUEUE_ID
-#define SOFTNIC_HARD_TX_QUEUE_ID 0
+/** Traffic Manager: Scheduler queue size (per traffic class). */
+#ifndef SOFTNIC_TM_QUEUE_SIZE
+#define SOFTNIC_TM_QUEUE_SIZE 64
#endif
/**
- * Run the traffic management function on the softnic device
+ * Soft NIC run.
*
- * This function read the packets from the softnic input queues, insert into
- * QoS scheduler queues based on mbuf sched field value and transmit the
- * scheduled packets out through the hard device interface.
- *
- * @param portid
- * port id of the soft device.
+ * @param port_id
+ * Port ID of the Soft NIC device.
* @return
- * zero.
+ * Zero on success, error code otherwise.
*/
-
int
rte_pmd_softnic_run(uint16_t port_id);
+/**
+ * Soft NIC manage.
+ *
+ * @param port_id
+ * Port ID of the Soft NIC device.
+ * @return
+ * Zero on success, error code otherwise.
+ */
+int __rte_experimental
+rte_pmd_softnic_manage(uint16_t port_id);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/softnic/rte_eth_softnic_action.c b/drivers/net/softnic/rte_eth_softnic_action.c
new file mode 100644
index 00000000..c25f4dd9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_action.c
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include "hash_func.h"
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Input port
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->port_in_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_port_in_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->port_in_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_port_in_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params)
+{
+ struct softnic_port_in_action_profile *profile;
+ struct rte_port_in_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_port_in_action_profile_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_port_in_action_profile_create(0);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_FLTR,
+ &params->fltr);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_port_in_action_profile_freeze(ap);
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_port_in_action_profile));
+ if (profile == NULL) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node);
+
+ return profile;
+}
+
+/**
+ * Table
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->table_action_profile_list);
+
+ return 0;
+}
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_table_action_profile *profile;
+
+ profile = TAILQ_FIRST(&p->table_action_profile_list);
+ if (profile == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->table_action_profile_list, profile, node);
+ free(profile);
+ }
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_table_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &p->table_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params)
+{
+ struct softnic_table_action_profile *profile;
+ struct rte_table_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_table_action_profile_find(p, name) ||
+ params == NULL ||
+ ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0))
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) &&
+ params->lb.f_hash == NULL) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_table_action_profile_create(&params->common);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_FWD,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_MTR,
+ &params->mtr);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TM,
+ &params->tm);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_ENCAP,
+ &params->encap);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_NAT,
+ &params->nat);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TTL,
+ &params->ttl);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_STATS,
+ &params->stats);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TIME,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_table_action_profile_freeze(ap);
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct softnic_table_action_profile));
+ if (profile == NULL) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node);
+
+ return profile;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
new file mode 100644
index 00000000..0c7448cc
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -0,0 +1,5259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "parser.h"
+
+#ifndef CMD_MAX_TOKENS
+#define CMD_MAX_TOKENS 256
+#endif
+
+#define MSG_OUT_OF_MEMORY "Not enough memory.\n"
+#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n"
+#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n"
+#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n"
+#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n"
+#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n"
+#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n"
+#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n"
+#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n"
+#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n"
+#define MSG_CMD_FAIL "Command \"%s\" failed.\n"
+
+static int
+is_comment(char *in)
+{
+ if ((strlen(in) && index("!#%;", in[0])) ||
+ (strncmp(in, "//", 2) == 0) ||
+ (strncmp(in, "--", 2) == 0))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * mempool <mempool_name>
+ * buffer <buffer_size>
+ * pool <pool_size>
+ * cache <cache_size>
+ */
+static void
+cmd_mempool(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_mempool_params p;
+ char *name;
+ struct softnic_mempool *mempool;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "buffer") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pool_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "cache") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cache_size");
+ return;
+ }
+
+ mempool = softnic_mempool_create(softnic, name, &p);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * link <link_name>
+ * dev <device_name> | port <port_id>
+ */
+static void
+cmd_link(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_link_params p;
+ struct softnic_link *link;
+ char *name;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0) {
+ p.dev_name = tokens[3];
+ } else if (strcmp(tokens[2], "port") == 0) {
+ p.dev_name = NULL;
+
+ if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port");
+ return;
+ }
+
+ link = softnic_link_create(softnic, name, &p);
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * swq <swq_name>
+ * size <size>
+ */
+static void
+cmd_swq(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_swq_params p;
+ char *name;
+ struct softnic_swq *swq;
+
+ if (n_tokens != 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "size");
+ return;
+ }
+
+ swq = softnic_swq_create(softnic, name, &p);
+ if (swq == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shaper profile
+ * id <profile_id>
+ * rate <tb_rate> size <tb_size>
+ * adj <packet_length_adjust>
+ */
+static void
+cmd_tmgr_shaper_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_shaper_params sp;
+ struct rte_tm_error error;
+ uint32_t shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
+
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "rate") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate");
+ return;
+ }
+
+ if (strcmp(tokens[7], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_size");
+ return;
+ }
+
+ if (strcmp(tokens[9], "adj") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj");
+ return;
+ }
+
+ if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr shared shaper
+ * id <shared_shaper_id>
+ * profile <shaper_profile_id>
+ */
+static void
+cmd_tmgr_shared_shaper(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint32_t shared_shaper_id, shaper_profile_id;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[2], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[3], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return;
+
+ status = rte_tm_shared_shaper_add_update(port_id,
+ shared_shaper_id,
+ shaper_profile_id,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr node
+ * id <node_id>
+ * parent <parent_node_id | none>
+ * priority <priority>
+ * weight <weight>
+ * [shaper profile <shaper_profile_id>]
+ * [shared shaper <shared_shaper_id>]
+ * [nonleaf sp <n_sp_priorities>]
+ */
+static void
+cmd_tmgr_node(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ struct rte_tm_node_params np;
+ uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id;
+ uint16_t port_id;
+ int status;
+
+ memset(&np, 0, sizeof(struct rte_tm_node_params));
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ np.nonleaf.n_sp_priorities = 1;
+
+ if (n_tokens < 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "node") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "node_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "parent") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent");
+ return;
+ }
+
+ if (strcmp(tokens[5], "none") == 0)
+ parent_node_id = RTE_TM_NODE_ID_NULL;
+ else {
+ if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id");
+ return;
+ }
+ }
+
+ if (strcmp(tokens[6], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return;
+ }
+
+ if (strcmp(tokens[8], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight");
+ return;
+ }
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shaper") == 0) &&
+ (strcmp(tokens[1], "profile") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (strcmp(tokens[2], "none") == 0) {
+ np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+ } else {
+ if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id");
+ return;
+ }
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shaper profile */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "shared") == 0) &&
+ (strcmp(tokens[1], "shaper") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id");
+ return;
+ }
+
+ np.shared_shaper_id = &shared_shaper_id;
+ np.n_shared_shapers = 1;
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* shared shaper */
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[0], "nonleaf") == 0) &&
+ (strcmp(tokens[1], "sp") == 0)) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities");
+ return;
+ }
+
+ tokens += 3;
+ n_tokens -= 3;
+ } /* nonleaf sp <n_sp_priorities> */
+
+ if (n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_node_add(port_id,
+ node_id,
+ parent_node_id,
+ priority,
+ weight,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &np,
+ &error);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static uint32_t
+root_node_id(uint32_t n_spp,
+ uint32_t n_pps)
+{
+ uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE;
+ uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_pipes = n_spp * n_pps;
+
+ return n_queues + n_tc + n_pipes + n_spp;
+}
+
+static uint32_t
+subport_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues + n_tc + n_pipes + subport_id;
+}
+
+static uint32_t
+pipe_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ n_tc +
+ pipe_id +
+ subport_id * n_pps;
+}
+
+static uint32_t
+tc_node_id(uint32_t n_spp,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id)
+{
+ uint32_t n_pipes = n_spp * n_pps;
+ uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE;
+
+ return n_queues +
+ tc_id +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
+}
+
+static uint32_t
+queue_node_id(uint32_t n_spp __rte_unused,
+ uint32_t n_pps,
+ uint32_t subport_id,
+ uint32_t pipe_id,
+ uint32_t tc_id,
+ uint32_t queue_id)
+{
+ return queue_id +
+ tc_id * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE +
+ (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE;
+}
+
+struct tmgr_hierarchy_default_params {
+ uint32_t n_spp; /**< Number of subports per port. */
+ uint32_t n_pps; /**< Number of pipes per subport. */
+
+ struct {
+ uint32_t port;
+ uint32_t subport;
+ uint32_t pipe;
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shaper_profile_id;
+
+ struct {
+ uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } shared_shaper_id;
+
+ struct {
+ uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE];
+ } weight;
+};
+
+static int
+tmgr_hierarchy_default(struct pmd_internals *softnic,
+ struct tmgr_hierarchy_default_params *params)
+{
+ struct rte_tm_node_params root_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.port,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params subport_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.subport,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ };
+
+ struct rte_tm_node_params pipe_node_params = {
+ .shaper_profile_id = params->shaper_profile_id.pipe,
+ .nonleaf = {
+ .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE,
+ },
+ };
+
+ struct rte_tm_node_params tc_node_params[] = {
+ [0] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[0],
+ .shared_shaper_id = &params->shared_shaper_id.tc[0],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[0]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [1] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[1],
+ .shared_shaper_id = &params->shared_shaper_id.tc[1],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[1]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [2] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[2],
+ .shared_shaper_id = &params->shared_shaper_id.tc[2],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[2]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+
+ [3] = {
+ .shaper_profile_id = params->shaper_profile_id.tc[3],
+ .shared_shaper_id = &params->shared_shaper_id.tc[3],
+ .n_shared_shapers =
+ (&params->shared_shaper_id.tc_valid[3]) ? 1 : 0,
+ .nonleaf = {
+ .n_sp_priorities = 1,
+ },
+ },
+ };
+
+ struct rte_tm_node_params queue_node_params = {
+ .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE,
+ };
+
+ struct rte_tm_error error;
+ uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s;
+ int status;
+ uint16_t port_id;
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 0: Root node */
+ status = rte_tm_node_add(port_id,
+ root_node_id(n_spp, n_pps),
+ RTE_TM_NODE_ID_NULL,
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &root_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 1: Subport nodes */
+ for (s = 0; s < params->n_spp; s++) {
+ uint32_t p;
+
+ status = rte_tm_node_add(port_id,
+ subport_node_id(n_spp, n_pps, s),
+ root_node_id(n_spp, n_pps),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &subport_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 2: Pipe nodes */
+ for (p = 0; p < params->n_pps; p++) {
+ uint32_t t;
+
+ status = rte_tm_node_add(port_id,
+ pipe_node_id(n_spp, n_pps, s, p),
+ subport_node_id(n_spp, n_pps, s),
+ 0,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &pipe_node_params,
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 3: Traffic class nodes */
+ for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) {
+ uint32_t q;
+
+ status = rte_tm_node_add(port_id,
+ tc_node_id(n_spp, n_pps, s, p, t),
+ pipe_node_id(n_spp, n_pps, s, p),
+ t,
+ 1,
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &tc_node_params[t],
+ &error);
+ if (status)
+ return -1;
+
+ /* Hierarchy level 4: Queue nodes */
+ for (q = 0; q < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; q++) {
+ status = rte_tm_node_add(port_id,
+ queue_node_id(n_spp, n_pps, s, p, t, q),
+ tc_node_id(n_spp, n_pps, s, p, t),
+ 0,
+ params->weight.queue[q],
+ RTE_TM_NODE_LEVEL_ID_ANY,
+ &queue_node_params,
+ &error);
+ if (status)
+ return -1;
+ } /* Queue */
+ } /* TC */
+ } /* Pipe */
+ } /* Subport */
+
+ return 0;
+}
+
+
+/**
+ * tmgr hierarchy-default
+ * spp <n_subports_per_port>
+ * pps <n_pipes_per_subport>
+ * shaper profile
+ * port <profile_id>
+ * subport <profile_id>
+ * pipe <profile_id>
+ * tc0 <profile_id>
+ * tc1 <profile_id>
+ * tc2 <profile_id>
+ * tc3 <profile_id>
+ * shared shaper
+ * tc0 <id | none>
+ * tc1 <id | none>
+ * tc2 <id | none>
+ * tc3 <id | none>
+ * weight
+ * queue <q0> ... <q15>
+ */
+static void
+cmd_tmgr_hierarchy_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct tmgr_hierarchy_default_params p;
+ int i, status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens != 50) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy-default") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default");
+ return;
+ }
+
+ if (strcmp(tokens[2], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport");
+ return;
+ }
+
+ /* Shaper profile */
+
+ if (strcmp(tokens[6], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[7], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (strcmp(tokens[8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port profile id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "subport") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id");
+ return;
+ }
+
+ if (strcmp(tokens[12], "pipe") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[14], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[16], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[18], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id");
+ return;
+ }
+
+ if (strcmp(tokens[20], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id");
+ return;
+ }
+
+ /* Shared shaper */
+
+ if (strcmp(tokens[22], "shared") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared");
+ return;
+ }
+
+ if (strcmp(tokens[23], "shaper") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper");
+ return;
+ }
+
+ if (strcmp(tokens[24], "tc0") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0");
+ return;
+ }
+
+ if (strcmp(tokens[25], "none") == 0)
+ p.shared_shaper_id.tc_valid[0] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], tokens[25]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[0] = 1;
+ }
+
+ if (strcmp(tokens[26], "tc1") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1");
+ return;
+ }
+
+ if (strcmp(tokens[27], "none") == 0)
+ p.shared_shaper_id.tc_valid[1] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], tokens[27]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[1] = 1;
+ }
+
+ if (strcmp(tokens[28], "tc2") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2");
+ return;
+ }
+
+ if (strcmp(tokens[29], "none") == 0)
+ p.shared_shaper_id.tc_valid[2] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], tokens[29]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[2] = 1;
+ }
+
+ if (strcmp(tokens[30], "tc3") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3");
+ return;
+ }
+
+ if (strcmp(tokens[31], "none") == 0)
+ p.shared_shaper_id.tc_valid[3] = 0;
+ else {
+ if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], tokens[31]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3");
+ return;
+ }
+
+ p.shared_shaper_id.tc_valid[3] = 1;
+ }
+
+ /* Weight */
+
+ if (strcmp(tokens[32], "weight") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight");
+ return;
+ }
+
+ if (strcmp(tokens[33], "queue") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue");
+ return;
+ }
+
+ for (i = 0; i < 16; i++) {
+ if (softnic_parser_read_uint32(&p.weight.queue[i], tokens[34 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "weight queue");
+ return;
+ }
+ }
+
+ status = tmgr_hierarchy_default(softnic, &p);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr hierarchy commit
+ */
+static void
+cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_tm_error error;
+ uint16_t port_id;
+ int status;
+
+ if (n_tokens != 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "hierarchy") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy");
+ return;
+ }
+
+ if (strcmp(tokens[2], "commit") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit");
+ return;
+ }
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status != 0)
+ return;
+
+ status = rte_tm_hierarchy_commit(port_id, 1, &error);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tmgr <tmgr_name>
+ */
+static void
+cmd_tmgr(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tmgr_port *tmgr_port;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tmgr_port = softnic_tmgr_port_create(softnic, name);
+ if (tmgr_port == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * tap <tap_name>
+ */
+static void
+cmd_tap(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct softnic_tap *tap;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tap = softnic_tap_create(softnic, name);
+ if (tap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * port in action profile <profile_name>
+ * [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
+ * [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
+ */
+static void
+cmd_port_in_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_action_profile_params p;
+ struct softnic_port_in_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[2], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[3], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[4];
+
+ t0 = 5;
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "filter") == 0)) {
+ uint32_t size;
+
+ if (n_tokens < t0 + 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "match") == 0) {
+ p.fltr.filter_on_match = 1;
+ } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) {
+ p.fltr.filter_on_match = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ p.fltr.key_mask, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 6], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((softnic_parse_hex_string(tokens[t0 + 7],
+ p.fltr.key, &size) != 0) ||
+ size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_value");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.fltr.port_id,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR;
+ t0 += 10;
+ } /* filter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ uint32_t i;
+
+ if (n_tokens < t0 + 22) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "port in action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (softnic_parser_read_uint32(&p.lb.port_id[i],
+ tokens[t0 + 6 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB;
+ t0 += 22;
+ } /* balance */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_port_in_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * table action profile <profile_name>
+ * ipv4 | ipv6
+ * offset <ip_offset>
+ * fwd
+ * [balance offset <key_offset> mask <key_mask> outoffset <out_offset>]
+ * [meter srtcm | trtcm
+ * tc <n_tc>
+ * stats none | pkts | bytes | both]
+ * [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
+ * [encap ether | vlan | qinq | mpls | pppoe]
+ * [nat src | dst
+ * proto udp | tcp]
+ * [ttl drop | fwd
+ * stats none | pkts]
+ * [stats pkts | bytes | both]
+ * [time]
+ */
+static void
+cmd_table_action_profile(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_action_profile_params p;
+ struct softnic_table_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[3];
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ p.common.ip_version = 1;
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ p.common.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[5], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.common.ip_offset,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset");
+ return;
+ }
+
+ if (strcmp(tokens[7], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+
+ t0 = 8;
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "balance") == 0)) {
+ if (n_tokens < t0 + 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (softnic_parse_hex_string(tokens[t0 + 4],
+ p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "outoffset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.lb.out_offset,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "out_offset");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB;
+ t0 += 7;
+ } /* balance */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "meter") == 0)) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile meter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "srtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM;
+ } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) {
+ p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "srtcm or trtcm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "tc") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.mtr.n_tc,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_tc");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "none") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "pkts") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "bytes") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 5], "both") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR;
+ t0 += 6;
+ } /* meter */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "tm") == 0)) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile tm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_subports_per_port,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_pipes_per_subport");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM;
+ t0 += 5;
+ } /* tm */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "encap") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "ether") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER;
+ } else if (strcmp(tokens[t0 + 1], "vlan") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN;
+ } else if (strcmp(tokens[t0 + 1], "qinq") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ;
+ } else if (strcmp(tokens[t0 + 1], "mpls") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
+ } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) {
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
+ t0 += 2;
+ } /* encap */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "nat") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile nat");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "src") == 0) {
+ p.nat.source_nat = 1;
+ } else if (strcmp(tokens[t0 + 1], "dst") == 0) {
+ p.nat.source_nat = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "src or dst");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "proto") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "tcp") == 0) {
+ p.nat.proto = 0x06;
+ } else if (strcmp(tokens[t0 + 3], "udp") == 0) {
+ p.nat.proto = 0x11;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "tcp or udp");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT;
+ t0 += 4;
+ } /* nat */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "ttl") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile ttl");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "drop") == 0) {
+ p.ttl.drop = 1;
+ } else if (strcmp(tokens[t0 + 1], "fwd") == 0) {
+ p.ttl.drop = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "drop or fwd");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "none") == 0) {
+ p.ttl.n_packets_enabled = 0;
+ } else if (strcmp(tokens[t0 + 3], "pkts") == 0) {
+ p.ttl.n_packets_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL;
+ t0 += 4;
+ } /* ttl */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "stats") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "pkts") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 1], "bytes") == 0) {
+ p.stats.n_packets_enabled = 0;
+ p.stats.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 1], "both") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS;
+ t0 += 2;
+ } /* stats */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "time") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME;
+ t0 += 1;
+ } /* time */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = softnic_table_action_profile_create(softnic, name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name>
+ * period <timer_period_ms>
+ * offset_port_id <offset_port_id>
+ */
+static void
+cmd_pipeline(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct pipeline_params p;
+ char *name;
+ struct pipeline *pipeline;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "period") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.timer_period_ms,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms");
+ return;
+ }
+
+ if (strcmp(tokens[4], "offset_port_id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.offset_port_id,
+ tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id");
+ return;
+ }
+
+ pipeline = softnic_pipeline_create(softnic, name, &p);
+ if (pipeline == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in
+ * bsz <burst_size>
+ * link <link_name> rxq <queue_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name> mempool <mempool_name> mtu <mtu>
+ * | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ * [action <port_in_action_profile_name>]
+ * [disabled]
+ */
+static void
+cmd_pipeline_port_in(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_in_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int enabled, status;
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ t0 = 6;
+
+ if (strcmp(tokens[t0], "link") == 0) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in link");
+ return;
+ }
+
+ p.type = PORT_IN_RXQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.rxq.queue_id,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+ t0 += 4;
+ } else if (strcmp(tokens[t0], "swq") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in swq");
+ return;
+ }
+
+ p.type = PORT_IN_SWQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tmgr") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tmgr");
+ return;
+ }
+
+ p.type = PORT_IN_TMGR;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tap") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tap");
+ return;
+ }
+
+ p.type = PORT_IN_TAP;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.tap.mempool_name = tokens[t0 + 3];
+
+ if (strcmp(tokens[t0 + 4], "mtu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mtu");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.tap.mtu,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "mtu");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "source") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in source");
+ return;
+ }
+
+ p.type = PORT_IN_SOURCE;
+
+ p.dev_name = NULL;
+
+ if (strcmp(tokens[t0 + 1], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.source.mempool_name = tokens[t0 + 2];
+
+ if (strcmp(tokens[t0 + 3], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.source.file_name = tokens[t0 + 4];
+
+ if (strcmp(tokens[t0 + 5], "bpp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "bpp");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt,
+ tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_bytes_per_pkt");
+ return;
+ }
+
+ t0 += 7;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ enabled = 1;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "disabled") == 0)) {
+ enabled = 0;
+
+ t0 += 1;
+ }
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_in_create(softnic,
+ pipeline_name,
+ &p,
+ enabled);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out
+ * bsz <burst_size>
+ * link <link_name> txq <txq_id>
+ * | swq <swq_name>
+ * | tmgr <tmgr_name>
+ * | tap <tap_name>
+ * | sink [file <file_name> pkts <max_n_pkts>]
+ */
+static void
+cmd_pipeline_port_out(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_port_out_params p;
+ char *pipeline_name;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "link") == 0) {
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out link");
+ return;
+ }
+
+ p.type = PORT_OUT_TXQ;
+
+ p.dev_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.txq.queue_id,
+ tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+ } else if (strcmp(tokens[6], "swq") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out swq");
+ return;
+ }
+
+ p.type = PORT_OUT_SWQ;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tmgr") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tmgr");
+ return;
+ }
+
+ p.type = PORT_OUT_TMGR;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tap") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tap");
+ return;
+ }
+
+ p.type = PORT_OUT_TAP;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "sink") == 0) {
+ if ((n_tokens != 7) && (n_tokens != 11)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out sink");
+ return;
+ }
+
+ p.type = PORT_OUT_SINK;
+
+ p.dev_name = NULL;
+
+ if (n_tokens == 7) {
+ p.sink.file_name = NULL;
+ p.sink.max_n_pkts = 0;
+ } else {
+ if (strcmp(tokens[7], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.sink.file_name = tokens[8];
+
+ if (strcmp(tokens[9], "pkts") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.sink.max_n_pkts,
+ tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts");
+ return;
+ }
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table
+ * match
+ * acl
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | array
+ * offset <key_offset>
+ * size <n_keys>
+ * | hash
+ * ext | lru
+ * key <key_size>
+ * mask <key_mask>
+ * offset <key_offset>
+ * buckets <n_buckets>
+ * size <n_keys>
+ * | lpm
+ * ipv4 | ipv6
+ * offset <ip_header_offset>
+ * size <n_rules>
+ * | stub
+ * [action <table_action_profile_name>]
+ */
+static void
+cmd_pipeline_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
+ struct softnic_table_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int status;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (strcmp(tokens[3], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return;
+ }
+
+ t0 = 4;
+ if (strcmp(tokens[t0], "acl") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table acl");
+ return;
+ }
+
+ p.match_type = TABLE_ACL;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.acl.ip_version = 1;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.acl.ip_version = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "ip_header_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.acl.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "array") == 0) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table array");
+ return;
+ }
+
+ p.match_type = TABLE_ARRAY;
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.array.n_keys,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 5;
+ } else if (strcmp(tokens[t0], "hash") == 0) {
+ uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < t0 + 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table hash");
+ return;
+ }
+
+ p.match_type = TABLE_HASH;
+
+ if (strcmp(tokens[t0 + 1], "ext") == 0) {
+ p.match.hash.extendable_bucket = 1;
+ } else if (strcmp(tokens[t0 + 1], "lru") == 0) {
+ p.match.hash.extendable_bucket = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ext or lru");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ if ((softnic_parser_read_uint32(&p.match.hash.key_size,
+ tokens[t0 + 3]) != 0) ||
+ p.match.hash.key_size == 0 ||
+ p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_size");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ if ((softnic_parse_hex_string(tokens[t0 + 5],
+ key_mask, &key_mask_size) != 0) ||
+ key_mask_size != p.match.hash.key_size) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+ p.match.hash.key_mask = key_mask;
+
+ if (strcmp(tokens[t0 + 6], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.key_offset,
+ tokens[t0 + 7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "buckets") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_buckets,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 10], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.hash.n_keys,
+ tokens[t0 + 11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 12;
+ } else if (strcmp(tokens[t0], "lpm") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table lpm");
+ return;
+ }
+
+ p.match_type = TABLE_LPM;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0) {
+ p.match.lpm.key_size = 4;
+ } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) {
+ p.match.lpm.key_size = 16;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.match.lpm.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "stub") == 0) {
+ p.match_type = TABLE_STUB;
+
+ t0 += 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if (n_tokens > t0 &&
+ (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ if (n_tokens > t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_create(softnic, pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> table <table_id>
+ */
+static void
+cmd_pipeline_port_in_table(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id, table_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_connect_to_table(softnic,
+ pipeline_name,
+ port_id,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> stats read [clear]
+ */
+
+#define MSG_PIPELINE_PORT_IN_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_in_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_in_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_in_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> enable
+ */
+static void
+cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port in <port_id> disable
+ */
+static void
+cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> port out <port_id> stats read [clear]
+ */
+#define MSG_PIPELINE_PORT_OUT_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_out_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_out_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if (n_tokens != 7 &&
+ n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_port_out_stats_read(softnic,
+ pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> stats read [clear]
+ */
+#define MSG_PIPELINE_TABLE_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts in with lookup miss: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by others: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_table_stats(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_table_stats stats;
+ char *pipeline_name;
+ uint32_t table_id;
+ int clear, status;
+
+ if (n_tokens != 6 &&
+ n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[5], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 7) {
+ if (strcmp(tokens[6], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = softnic_pipeline_table_stats_read(softnic,
+ pipeline_name,
+ table_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+/**
+ * <match> ::=
+ *
+ * match
+ * acl
+ * priority <priority>
+ * ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth>
+ * <sp0> <sp1> <dp0> <dp1> <proto>
+ * | array <pos>
+ * | hash
+ * raw <key>
+ * | ipv4_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv6_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv4_addr <addr>
+ * | ipv6_addr <addr>
+ * | qinq <svlan> <cvlan>
+ * | lpm
+ * ipv4 | ipv6 <addr> <depth>
+ */
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t time_to_live;
+ uint8_t proto;
+ uint16_t hdr_checksum;
+ uint32_t sa;
+ uint32_t da;
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t sa[16];
+ uint8_t da[16];
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_addr {
+ uint32_t addr;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_addr {
+ uint8_t addr[16];
+} __attribute__((__packed__));
+
+static uint32_t
+parse_match(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_match *m)
+{
+ memset(m, 0, sizeof(*m));
+
+ if (n_tokens < 2)
+ return 0;
+
+ if (strcmp(tokens[0], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return 0;
+ }
+
+ if (strcmp(tokens[1], "acl") == 0) {
+ if (n_tokens < 14) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ACL;
+
+ if (strcmp(tokens[2], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.priority,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return 0;
+ }
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ struct in_addr saddr, daddr;
+
+ m->match.acl.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr);
+
+ if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr);
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ struct in6_addr saddr, daddr;
+
+ m->match.acl.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16);
+
+ if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.sa_depth,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint32(&m->match.acl.da_depth,
+ tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da_depth");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp0");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp1");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "proto");
+ return 0;
+ }
+
+ m->match.acl.proto_mask = 0xff;
+
+ return 14;
+ } /* acl */
+
+ if (strcmp(tokens[1], "array") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ARRAY;
+
+ if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pos");
+ return 0;
+ }
+
+ return 3;
+ } /* array */
+
+ if (strcmp(tokens[1], "hash") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_HASH;
+
+ if (strcmp(tokens[2], "raw") == 0) {
+ uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_hex_string(tokens[3],
+ m->match.hash.key, &key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key");
+ return 0;
+ }
+
+ return 4;
+ } /* hash raw */
+
+ if (strcmp(tokens[2], "ipv4_5tuple") == 0) {
+ struct pkt_key_ipv4_5tuple *ipv4 =
+ (struct pkt_key_ipv4_5tuple *)m->match.hash.key;
+ struct in_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ ipv4->sa = saddr.s_addr;
+ ipv4->da = daddr.s_addr;
+ ipv4->sp = rte_cpu_to_be_16(sp);
+ ipv4->dp = rte_cpu_to_be_16(dp);
+ ipv4->proto = proto;
+
+ return 8;
+ } /* hash ipv4_5tuple */
+
+ if (strcmp(tokens[2], "ipv6_5tuple") == 0) {
+ struct pkt_key_ipv6_5tuple *ipv6 =
+ (struct pkt_key_ipv6_5tuple *)m->match.hash.key;
+ struct in6_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ memcpy(ipv6->sa, saddr.s6_addr, 16);
+ memcpy(ipv6->da, daddr.s6_addr, 16);
+ ipv6->sp = rte_cpu_to_be_16(sp);
+ ipv6->dp = rte_cpu_to_be_16(dp);
+ ipv6->proto = proto;
+
+ return 8;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "ipv4_addr") == 0) {
+ struct pkt_key_ipv4_addr *ipv4_addr =
+ (struct pkt_key_ipv4_addr *)m->match.hash.key;
+ struct in_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ ipv4_addr->addr = addr.s_addr;
+
+ return 4;
+ } /* hash ipv4_addr */
+
+ if (strcmp(tokens[2], "ipv6_addr") == 0) {
+ struct pkt_key_ipv6_addr *ipv6_addr =
+ (struct pkt_key_ipv6_addr *)m->match.hash.key;
+ struct in6_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(ipv6_addr->addr, addr.s6_addr, 16);
+
+ return 4;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "qinq") == 0) {
+ struct pkt_key_qinq *qinq =
+ (struct pkt_key_qinq *)m->match.hash.key;
+ uint16_t svlan, cvlan;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) ||
+ svlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "svlan");
+ return 0;
+ }
+
+ if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) ||
+ cvlan > 0xFFF) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cvlan");
+ return 0;
+ }
+
+ qinq->svlan = rte_cpu_to_be_16(svlan);
+ qinq->cvlan = rte_cpu_to_be_16(cvlan);
+
+ return 5;
+ } /* hash qinq */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ } /* hash */
+
+ if (strcmp(tokens[1], "lpm") == 0) {
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_LPM;
+
+ if (strcmp(tokens[2], "ipv4") == 0) {
+ struct in_addr addr;
+
+ m->match.lpm.ip_version = 1;
+
+ if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ } else if (strcmp(tokens[2], "ipv6") == 0) {
+ struct in6_addr addr;
+
+ m->match.lpm.ip_version = 0;
+
+ if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(m->match.lpm.ipv6, addr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "depth");
+ return 0;
+ }
+
+ return 5;
+ } /* lpm */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "acl or array or hash or lpm");
+ return 0;
+}
+
+/**
+ * table_action ::=
+ *
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ * [balance <out0> ... <out7>]
+ * [meter
+ * tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]]
+ * [tm subport <subport_id> pipe <pipe_id>]
+ * [encap
+ * ether <da> <sa>
+ * | vlan <da> <sa> <pcp> <dei> <vid>
+ * | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid>
+ * | mpls unicast | multicast
+ * <da> <sa>
+ * label0 <label> <tc> <ttl>
+ * [label1 <label> <tc> <ttl>
+ * [label2 <label> <tc> <ttl>
+ * [label3 <label> <tc> <ttl>]]]
+ * | pppoe <da> <sa> <session_id>]
+ * [nat ipv4 | ipv6 <addr> <port>]
+ * [ttl dec | keep]
+ * [stats]
+ * [time]
+ *
+ * where:
+ * <pa> ::= g | y | r | drop
+ */
+static uint32_t
+parse_table_action_fwd(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "fwd") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "drop") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "port") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meta") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "table") == 0)) {
+ uint32_t id;
+
+ if (n_tokens < 2 ||
+ softnic_parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_balance(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t i;
+
+ if (n_tokens == 0 ||
+ (strcmp(tokens[0], "balance") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < RTE_TABLE_ACTION_LB_TABLE_SIZE)
+ return 0;
+
+ for (i = 0; i < RTE_TABLE_ACTION_LB_TABLE_SIZE; i++)
+ if (softnic_parser_read_uint32(&a->lb.out[i], tokens[i]) != 0)
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+ return 1 + RTE_TABLE_ACTION_LB_TABLE_SIZE;
+}
+
+static int
+parse_policer_action(char *token, enum rte_table_action_policer *a)
+{
+ if (strcmp(token, "g") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_GREEN;
+ return 0;
+ }
+
+ if (strcmp(token, "y") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(token, "r") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_RED;
+ return 0;
+ }
+
+ if (strcmp(token, "drop") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_DROP;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint32_t
+parse_table_action_meter_tc(char **tokens,
+ uint32_t n_tokens,
+ struct rte_table_action_mtr_tc_params *mtr)
+{
+ if (n_tokens < 9 ||
+ strcmp(tokens[0], "meter") ||
+ softnic_parser_read_uint32(&mtr->meter_profile_id, tokens[1]) ||
+ strcmp(tokens[2], "policer") ||
+ strcmp(tokens[3], "g") ||
+ parse_policer_action(tokens[4], &mtr->policer[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[5], "y") ||
+ parse_policer_action(tokens[6], &mtr->policer[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[7], "r") ||
+ parse_policer_action(tokens[8], &mtr->policer[e_RTE_METER_RED]))
+ return 0;
+
+ return 9;
+}
+
+static uint32_t
+parse_table_action_meter(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "meter"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < 10 ||
+ strcmp(tokens[0], "tc0") ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1,
+ &a->mtr.mtr[0]) == 0))
+ return 0;
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "tc1")) {
+ a->mtr.tc_mask = 1;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10;
+ }
+
+ if (n_tokens < 30 ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1, &a->mtr.mtr[1]) == 0) ||
+ strcmp(tokens[10], "tc2") ||
+ (parse_table_action_meter_tc(tokens + 11,
+ n_tokens - 11, &a->mtr.mtr[2]) == 0) ||
+ strcmp(tokens[20], "tc3") ||
+ (parse_table_action_meter_tc(tokens + 21,
+ n_tokens - 21, &a->mtr.mtr[3]) == 0))
+ return 0;
+
+ a->mtr.tc_mask = 0xF;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10 + 3 * 10;
+}
+
+static uint32_t
+parse_table_action_tm(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t subport_id, pipe_id;
+
+ if (n_tokens < 5 ||
+ strcmp(tokens[0], "tm") ||
+ strcmp(tokens[1], "subport") ||
+ softnic_parser_read_uint32(&subport_id, tokens[2]) ||
+ strcmp(tokens[3], "pipe") ||
+ softnic_parser_read_uint32(&pipe_id, tokens[4]))
+ return 0;
+
+ a->tm.subport_id = subport_id;
+ a->tm.pipe_id = pipe_id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TM;
+ return 5;
+}
+
+static uint32_t
+parse_table_action_encap(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "encap"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ /* ether */
+ if (n_tokens && (strcmp(tokens[0], "ether") == 0)) {
+ if (n_tokens < 3 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.ether.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.ether.ether.sa))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_ETHER;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 3;
+ }
+
+ /* vlan */
+ if (n_tokens && (strcmp(tokens[0], "vlan") == 0)) {
+ uint32_t pcp, dei, vid;
+
+ if (n_tokens < 6 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.vlan.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.vlan.ether.sa) ||
+ softnic_parser_read_uint32(&pcp, tokens[3]) ||
+ pcp > 0x7 ||
+ softnic_parser_read_uint32(&dei, tokens[4]) ||
+ dei > 0x1 ||
+ softnic_parser_read_uint32(&vid, tokens[5]) ||
+ vid > 0xFFF)
+ return 0;
+
+ a->encap.vlan.vlan.pcp = pcp & 0x7;
+ a->encap.vlan.vlan.dei = dei & 0x1;
+ a->encap.vlan.vlan.vid = vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 6;
+ }
+
+ /* qinq */
+ if (n_tokens && (strcmp(tokens[0], "qinq") == 0)) {
+ uint32_t svlan_pcp, svlan_dei, svlan_vid;
+ uint32_t cvlan_pcp, cvlan_dei, cvlan_vid;
+
+ if (n_tokens < 9 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.qinq.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.qinq.ether.sa) ||
+ softnic_parser_read_uint32(&svlan_pcp, tokens[3]) ||
+ svlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&svlan_dei, tokens[4]) ||
+ svlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&svlan_vid, tokens[5]) ||
+ svlan_vid > 0xFFF ||
+ softnic_parser_read_uint32(&cvlan_pcp, tokens[6]) ||
+ cvlan_pcp > 0x7 ||
+ softnic_parser_read_uint32(&cvlan_dei, tokens[7]) ||
+ cvlan_dei > 0x1 ||
+ softnic_parser_read_uint32(&cvlan_vid, tokens[8]) ||
+ cvlan_vid > 0xFFF)
+ return 0;
+
+ a->encap.qinq.svlan.pcp = svlan_pcp & 0x7;
+ a->encap.qinq.svlan.dei = svlan_dei & 0x1;
+ a->encap.qinq.svlan.vid = svlan_vid & 0xFFF;
+ a->encap.qinq.cvlan.pcp = cvlan_pcp & 0x7;
+ a->encap.qinq.cvlan.dei = cvlan_dei & 0x1;
+ a->encap.qinq.cvlan.vid = cvlan_vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 9;
+ }
+
+ /* mpls */
+ if (n_tokens && (strcmp(tokens[0], "mpls") == 0)) {
+ uint32_t label, tc, ttl;
+
+ if (n_tokens < 8)
+ return 0;
+
+ if (strcmp(tokens[1], "unicast") == 0)
+ a->encap.mpls.unicast = 1;
+ else if (strcmp(tokens[1], "multicast") == 0)
+ a->encap.mpls.unicast = 0;
+ else
+ return 0;
+
+ if (softnic_parse_mac_addr(tokens[2], &a->encap.mpls.ether.da) ||
+ softnic_parse_mac_addr(tokens[3], &a->encap.mpls.ether.sa) ||
+ strcmp(tokens[4], "label0") ||
+ softnic_parser_read_uint32(&label, tokens[5]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[6]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[7]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[0].label = label;
+ a->encap.mpls.mpls[0].tc = tc;
+ a->encap.mpls.mpls[0].ttl = ttl;
+
+ tokens += 8;
+ n_tokens -= 8;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label1")) {
+ a->encap.mpls.mpls_count = 1;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[1].label = label;
+ a->encap.mpls.mpls[1].tc = tc;
+ a->encap.mpls.mpls[1].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label2")) {
+ a->encap.mpls.mpls_count = 2;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[2].label = label;
+ a->encap.mpls.mpls[2].tc = tc;
+ a->encap.mpls.mpls[2].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if (n_tokens == 0 ||
+ strcmp(tokens[0], "label3")) {
+ a->encap.mpls.mpls_count = 3;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4;
+ }
+
+ if (n_tokens < 4 ||
+ softnic_parser_read_uint32(&label, tokens[1]) ||
+ label > 0xFFFFF ||
+ softnic_parser_read_uint32(&tc, tokens[2]) ||
+ tc > 0x7 ||
+ softnic_parser_read_uint32(&ttl, tokens[3]) ||
+ ttl > 0x3F)
+ return 0;
+
+ a->encap.mpls.mpls[3].label = label;
+ a->encap.mpls.mpls[3].tc = tc;
+ a->encap.mpls.mpls[3].ttl = ttl;
+
+ a->encap.mpls.mpls_count = 4;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4 + 4;
+ }
+
+ /* pppoe */
+ if (n_tokens && (strcmp(tokens[0], "pppoe") == 0)) {
+ if (n_tokens < 4 ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.pppoe.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.pppoe.ether.sa) ||
+ softnic_parser_read_uint16(&a->encap.pppoe.pppoe.session_id,
+ tokens[3]))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_PPPOE;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_nat(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 4 ||
+ strcmp(tokens[0], "nat"))
+ return 0;
+
+ if (strcmp(tokens[1], "ipv4") == 0) {
+ struct in_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv4_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 1;
+ a->nat.addr.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ if (strcmp(tokens[1], "ipv6") == 0) {
+ struct in6_addr addr;
+ uint16_t port;
+
+ if (softnic_parse_ipv6_addr(tokens[2], &addr) ||
+ softnic_parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 0;
+ memcpy(a->nat.addr.ipv6, addr.s6_addr, 16);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_ttl(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "ttl"))
+ return 0;
+
+ if (strcmp(tokens[1], "dec") == 0)
+ a->ttl.decrement = 1;
+ else if (strcmp(tokens[1], "keep") == 0)
+ a->ttl.decrement = 0;
+ else
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TTL;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_stats(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "stats"))
+ return 0;
+
+ a->stats.n_packets = 0;
+ a->stats.n_bytes = 0;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ return 1;
+}
+
+static uint32_t
+parse_table_action_time(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 1 ||
+ strcmp(tokens[0], "time"))
+ return 0;
+
+ a->time.time = rte_rdtsc();
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TIME;
+ return 1;
+}
+
+static uint32_t
+parse_table_action(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct softnic_table_rule_action *a)
+{
+ uint32_t n_tokens0 = n_tokens;
+
+ memset(a, 0, sizeof(*a));
+
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "action"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "fwd") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_fwd(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action fwd");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "balance") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_balance(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action balance");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meter") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_meter(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action meter");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "tm") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tm(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tm");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "encap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_encap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action encap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "nat") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_nat(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action nat");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "ttl") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_ttl(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action ttl");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "stats") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_stats(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action stats");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "time") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_time(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action time");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens0 - n_tokens == 1) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return 0;
+ }
+
+ return n_tokens0 - n_tokens;
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match <match>
+ * action <table_action>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ struct softnic_table_rule_action a;
+ char *pipeline_name;
+ void *data;
+ uint32_t table_id, t0, n_tokens_parsed;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ /* action */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (t0 != n_tokens) {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add(softnic,
+ pipeline_name,
+ table_id,
+ &m,
+ &a,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add
+ * match
+ * default
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ */
+static void
+cmd_softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_action action;
+ void *data;
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 11 &&
+ n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ if (strcmp(tokens[8], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return;
+ }
+
+ if (strcmp(tokens[9], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "fwd");
+ return;
+ }
+
+ action.action_mask = 1 << RTE_TABLE_ACTION_FWD;
+
+ if (strcmp(tokens[10], "drop") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_DROP;
+ } else if (strcmp(tokens[10], "port") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT;
+ action.fwd.id = id;
+ } else if (strcmp(tokens[10], "meta") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ } else if (strcmp(tokens[10], "table") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ action.fwd.id = id;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "drop or port or meta or table");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_add_default(softnic,
+ pipeline_name,
+ table_id,
+ &action,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>
+ *
+ * File <file_name>:
+ * - line format: match <match> action <action>
+ */
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size);
+
+static void
+cmd_softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, n_rules, n_rules_parsed, line_number;
+ int status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "bulk") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "bulk");
+ return;
+ }
+
+ file_name = tokens[7];
+
+ if ((softnic_parser_read_uint32(&n_rules, tokens[8]) != 0) ||
+ n_rules == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ /* Memory allocation. */
+ match = calloc(n_rules, sizeof(struct softnic_table_rule_match));
+ action = calloc(n_rules, sizeof(struct softnic_table_rule_action));
+ data = calloc(n_rules, sizeof(void *));
+ if (match == NULL ||
+ action == NULL ||
+ data == NULL) {
+ snprintf(out, out_size, MSG_OUT_OF_MEMORY);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Load rule file */
+ n_rules_parsed = n_rules;
+ status = cli_rule_file_process(file_name,
+ 1024,
+ match,
+ action,
+ &n_rules_parsed,
+ &line_number,
+ out,
+ out_size);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+ if (n_rules_parsed != n_rules) {
+ snprintf(out, out_size, MSG_FILE_NOT_ENOUGH, file_name);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Rule bulk add */
+ status = softnic_pipeline_table_rule_add_bulk(softnic,
+ pipeline_name,
+ table_id,
+ match,
+ action,
+ data,
+ &n_rules);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Memory free */
+ free(data);
+ free(action);
+ free(match);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match <match>
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_table_rule_match m;
+ char *pipeline_name;
+ uint32_t table_id, n_tokens_parsed, t0;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete(softnic,
+ pipeline_name,
+ table_id,
+ &m);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule delete
+ * match
+ * default
+ */
+static void
+cmd_softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ status = softnic_pipeline_table_rule_delete_default(softnic,
+ pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read stats [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id>
+ * add srtcm cir <cir> cbs <cbs> ebs <ebs>
+ * | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs>
+ */
+static void
+cmd_pipeline_table_meter_profile_add(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_meter_profile p;
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens < 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[8], "srtcm") == 0) {
+ if (n_tokens != 15) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_SRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.cbs, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[13], "ebs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ebs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.srtcm.ebs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ebs");
+ return;
+ }
+ } else if (strcmp(tokens[8], "trtcm") == 0) {
+ if (n_tokens != 17) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_TRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "pir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pir");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pir, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pir");
+ return;
+ }
+ if (strcmp(tokens[13], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.cbs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[15], "pbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pbs");
+ return;
+ }
+
+ if (softnic_parser_read_uint64(&p.trtcm.pbs, tokens[16]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pbs");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_add(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id,
+ &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id>
+ * meter profile <meter_profile_id> delete
+ */
+static void
+cmd_pipeline_table_meter_profile_delete(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ status = softnic_pipeline_table_mtr_profile_delete(softnic,
+ pipeline_name,
+ table_id,
+ meter_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read meter [clear]
+ */
+static void
+cmd_pipeline_table_rule_meter_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> dscp <file_name>
+ *
+ * File <file_name>:
+ * - exactly 64 lines
+ * - line format: <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r
+ */
+static int
+load_dscp_table(struct rte_table_action_dscp_table *dscp_table,
+ const char *file_name,
+ uint32_t *line_number)
+{
+ FILE *f = NULL;
+ uint32_t dscp, l;
+
+ /* Check input arguments */
+ if (dscp_table == NULL ||
+ file_name == NULL ||
+ line_number == NULL) {
+ if (line_number)
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Read file */
+ for (dscp = 0, l = 1; ; l++) {
+ char line[64];
+ char *tokens[3];
+ enum rte_meter_color color;
+ uint32_t tc_id, tc_queue_id, n_tokens = RTE_DIM(tokens);
+
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+
+ if (is_comment(line))
+ continue;
+
+ if (softnic_parse_tokenize_string(line, tokens, &n_tokens)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ if (n_tokens == 0)
+ continue;
+
+ if (dscp >= RTE_DIM(dscp_table->entry) ||
+ n_tokens != RTE_DIM(tokens) ||
+ softnic_parser_read_uint32(&tc_id, tokens[0]) ||
+ tc_id >= RTE_TABLE_ACTION_TC_MAX ||
+ softnic_parser_read_uint32(&tc_queue_id, tokens[1]) ||
+ tc_queue_id >= RTE_TABLE_ACTION_TC_QUEUE_MAX ||
+ (strlen(tokens[2]) != 1)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ switch (tokens[2][0]) {
+ case 'g':
+ case 'G':
+ color = e_RTE_METER_GREEN;
+ break;
+
+ case 'y':
+ case 'Y':
+ color = e_RTE_METER_YELLOW;
+ break;
+
+ case 'r':
+ case 'R':
+ color = e_RTE_METER_RED;
+ break;
+
+ default:
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ dscp_table->entry[dscp].tc_id = tc_id;
+ dscp_table->entry[dscp].tc_queue_id = tc_queue_id;
+ dscp_table->entry[dscp].color = color;
+ dscp++;
+ }
+
+ /* Close file */
+ fclose(f);
+ return 0;
+}
+
+static void
+cmd_pipeline_table_dscp(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_dscp_table dscp_table;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, line_number;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "dscp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dscp");
+ return;
+ }
+
+ file_name = tokens[5];
+
+ status = load_dscp_table(&dscp_table, file_name, &line_number);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ return;
+ }
+
+ status = softnic_pipeline_table_dscp_table_update(softnic,
+ pipeline_name,
+ table_id,
+ UINT64_MAX,
+ &dscp_table);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
+ * pipeline <pipeline_name> table <table_id> rule read ttl [clear]
+ */
+static void
+cmd_softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic __rte_unused,
+ char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> enable
+ */
+static void
+cmd_softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_enable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable");
+ return;
+ }
+}
+
+/**
+ * thread <thread_id> pipeline <pipeline_name> disable
+ */
+static void
+cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = softnic_thread_pipeline_disable(softnic, thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL,
+ "thread pipeline disable");
+ return;
+ }
+}
+
+void
+softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
+{
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ struct pmd_internals *softnic = arg;
+ int status;
+
+ if (is_comment(in))
+ return;
+
+ status = softnic_parse_tokenize_string(in, tokens, &n_tokens);
+ if (status) {
+ snprintf(out, out_size, MSG_ARG_TOO_MANY, "");
+ return;
+ }
+
+ if (n_tokens == 0)
+ return;
+
+ if (strcmp(tokens[0], "mempool") == 0) {
+ cmd_mempool(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "link") == 0) {
+ cmd_link(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "swq") == 0) {
+ cmd_swq(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tmgr") == 0) {
+ if (n_tokens == 2) {
+ cmd_tmgr(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shaper") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ cmd_tmgr_shaper_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "shared") == 0) &&
+ (strcmp(tokens[2], "shaper") == 0)) {
+ cmd_tmgr_shared_shaper(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "node") == 0)) {
+ cmd_tmgr_node(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 2 &&
+ (strcmp(tokens[1], "hierarchy-default") == 0)) {
+ cmd_tmgr_hierarchy_default(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[1], "hierarchy") == 0) &&
+ (strcmp(tokens[2], "commit") == 0)) {
+ cmd_tmgr_hierarchy_commit(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "tap") == 0) {
+ cmd_tap(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "port") == 0) {
+ cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "table") == 0) {
+ cmd_table_action_profile(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "pipeline") == 0) {
+ if (n_tokens >= 3 &&
+ (strcmp(tokens[2], "period") == 0)) {
+ cmd_pipeline(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_in(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_out(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 4 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[3], "match") == 0)) {
+ cmd_pipeline_table(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "table") == 0)) {
+ cmd_pipeline_port_in_table(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_in_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "enable") == 0)) {
+ cmd_softnic_pipeline_port_in_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "disable") == 0)) {
+ cmd_softnic_pipeline_port_in_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 6 &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_out_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "stats") == 0)) {
+ cmd_pipeline_table_stats(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "bulk") == 0)) {
+ cmd_softnic_pipeline_table_rule_add_bulk(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "delete") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_softnic_pipeline_table_rule_delete_default(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_softnic_pipeline_table_rule_delete(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "stats") == 0)) {
+ cmd_softnic_pipeline_table_rule_stats_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "add") == 0)) {
+ cmd_pipeline_table_meter_profile_add(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 8 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "delete") == 0)) {
+ cmd_pipeline_table_meter_profile_delete(softnic, tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "meter") == 0)) {
+ cmd_pipeline_table_rule_meter_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "dscp") == 0)) {
+ cmd_pipeline_table_dscp(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 7 &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "ttl") == 0)) {
+ cmd_softnic_pipeline_table_rule_ttl_read(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "thread") == 0) {
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "enable") == 0)) {
+ cmd_softnic_thread_pipeline_enable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if (n_tokens >= 5 &&
+ (strcmp(tokens[4], "disable") == 0)) {
+ cmd_softnic_thread_pipeline_disable(softnic, tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
+}
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max)
+{
+ char *msg_in = NULL, *msg_out = NULL;
+ FILE *f = NULL;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ msg_in_len_max == 0 ||
+ msg_out_len_max == 0)
+ return -EINVAL;
+
+ msg_in = malloc(msg_in_len_max + 1);
+ msg_out = malloc(msg_out_len_max + 1);
+ if (msg_in == NULL ||
+ msg_out == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -ENOMEM;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -EIO;
+ }
+
+ /* Read file */
+ for ( ; ; ) {
+ if (fgets(msg_in, msg_in_len_max + 1, f) == NULL)
+ break;
+
+ printf("%s", msg_in);
+ msg_out[0] = 0;
+
+ softnic_cli_process(msg_in,
+ msg_out,
+ msg_out_len_max,
+ softnic);
+
+ if (strlen(msg_out))
+ printf("%s", msg_out);
+ }
+
+ /* Close file */
+ fclose(f);
+ free(msg_out);
+ free(msg_in);
+ return 0;
+}
+
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct softnic_table_rule_match *m,
+ struct softnic_table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size)
+{
+ FILE *f = NULL;
+ char *line = NULL;
+ uint32_t rule_id, line_id;
+ int status = 0;
+
+ /* Check input arguments */
+ if (file_name == NULL ||
+ (strlen(file_name) == 0) ||
+ line_len_max == 0) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Memory allocation */
+ line = malloc(line_len_max + 1);
+ if (line == NULL) {
+ *line_number = 0;
+ return -ENOMEM;
+ }
+
+ /* Open file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ free(line);
+ return -EIO;
+ }
+
+ /* Read file */
+ for (line_id = 1, rule_id = 0; rule_id < *n_rules; line_id++) {
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens, n_tokens_parsed, t0;
+
+ /* Read next line from file. */
+ if (fgets(line, line_len_max + 1, f) == NULL)
+ break;
+
+ /* Comment. */
+ if (is_comment(line))
+ continue;
+
+ /* Parse line. */
+ n_tokens = RTE_DIM(tokens);
+ status = softnic_parse_tokenize_string(line, tokens, &n_tokens);
+ if (status) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Empty line. */
+ if (n_tokens == 0)
+ continue;
+ t0 = 0;
+
+ /* Rule match. */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Rule action. */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Line completed. */
+ if (t0 < n_tokens) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Increment rule count */
+ rule_id++;
+ }
+
+ /* Close file */
+ fclose(f);
+
+ /* Memory free */
+ free(line);
+
+ *n_rules = rule_id;
+ *line_number = line_id;
+ return status;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index 050e3e7e..a25eb874 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -5,79 +5,97 @@
#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
+#include <stddef.h>
#include <stdint.h>
+#include <sys/queue.h>
+#include <rte_mempool.h>
#include <rte_mbuf.h>
+#include <rte_ring.h>
+#include <rte_ethdev.h>
#include <rte_sched.h>
+#include <rte_port_in_action.h>
+#include <rte_table_action.h>
+#include <rte_pipeline.h>
+
#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>
#include "rte_eth_softnic.h"
+#include "conn.h"
+
+#define NAME_SIZE 64
/**
* PMD Parameters
*/
-enum pmd_feature {
- PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
-};
-
-#ifndef INTRUSIVE
-#define INTRUSIVE 0
-#endif
-
struct pmd_params {
- /** Parameters for the soft device (to be created) */
- struct {
- const char *name; /**< Name */
- uint32_t flags; /**< Flags */
+ const char *name;
+ const char *firmware;
+ uint16_t conn_port;
+ uint32_t cpu_id;
- /** 0 = Access hard device though API only (potentially slower,
- * but safer);
- * 1 = Access hard device private data structures is allowed
- * (potentially faster).
- */
- int intrusive;
+ /** Traffic Management (TM) */
+ struct {
+ uint32_t n_queues; /**< Number of queues */
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ } tm;
+};
- /** Traffic Management (TM) */
- struct {
- uint32_t rate; /**< Rate (bytes/second) */
- uint32_t nb_queues; /**< Number of queues */
- uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
- /**< Queue size per traffic class */
- uint32_t enq_bsz; /**< Enqueue burst size */
- uint32_t deq_bsz; /**< Dequeue burst size */
- } tm;
- } soft;
+/**
+ * MEMPOOL
+ */
+struct softnic_mempool_params {
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+};
- /** Parameters for the hard device (existing) */
- struct {
- char *name; /**< Name */
- uint16_t tx_queue_id; /**< TX queue ID */
- } hard;
+struct softnic_mempool {
+ TAILQ_ENTRY(softnic_mempool) node;
+ char name[NAME_SIZE];
+ struct rte_mempool *m;
+ uint32_t buffer_size;
};
+TAILQ_HEAD(softnic_mempool_list, softnic_mempool);
+
/**
- * Default Internals
+ * SWQ
*/
+struct softnic_swq_params {
+ uint32_t size;
+};
-#ifndef DEFAULT_BURST_SIZE
-#define DEFAULT_BURST_SIZE 32
-#endif
+struct softnic_swq {
+ TAILQ_ENTRY(softnic_swq) node;
+ char name[NAME_SIZE];
+ struct rte_ring *r;
+};
-#ifndef FLUSH_COUNT_THRESHOLD
-#define FLUSH_COUNT_THRESHOLD (1 << 17)
-#endif
+TAILQ_HEAD(softnic_swq_list, softnic_swq);
+
+/**
+ * LINK
+ */
+struct softnic_link_params {
+ const char *dev_name;
+ uint16_t port_id; /**< Valid only when *dev_name* is NULL. */
+};
-struct default_internals {
- struct rte_mbuf **pkts;
- uint32_t pkts_len;
- uint32_t txq_pos;
- uint32_t flush_count;
+struct softnic_link {
+ TAILQ_ENTRY(softnic_link) node;
+ char name[NAME_SIZE];
+ uint16_t port_id;
+ uint32_t n_rxq;
+ uint32_t n_txq;
};
+TAILQ_HEAD(softnic_link_list, softnic_link);
+
/**
- * Traffic Management (TM) Internals
+ * TMGR
*/
#ifndef TM_MAX_SUBPORTS
@@ -185,16 +203,281 @@ struct tm_internals {
/** Blueprints */
struct tm_params params;
+};
+
+struct softnic_tmgr_port {
+ TAILQ_ENTRY(softnic_tmgr_port) node;
+ char name[NAME_SIZE];
+ struct rte_sched_port *s;
+};
+
+TAILQ_HEAD(softnic_tmgr_port_list, softnic_tmgr_port);
+
+/**
+ * TAP
+ */
+struct softnic_tap {
+ TAILQ_ENTRY(softnic_tap) node;
+ char name[NAME_SIZE];
+ int fd;
+};
+
+TAILQ_HEAD(softnic_tap_list, softnic_tap);
+
+/**
+ * Input port action
+ */
+struct softnic_port_in_action_profile_params {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+struct softnic_port_in_action_profile {
+ TAILQ_ENTRY(softnic_port_in_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_port_in_action_profile_params params;
+ struct rte_port_in_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_port_in_action_profile_list, softnic_port_in_action_profile);
+
+/**
+ * Table action
+ */
+struct softnic_table_action_profile_params {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+};
+
+struct softnic_table_action_profile {
+ TAILQ_ENTRY(softnic_table_action_profile) node;
+ char name[NAME_SIZE];
+ struct softnic_table_action_profile_params params;
+ struct rte_table_action_profile *ap;
+};
+
+TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile);
+
+/**
+ * Pipeline
+ */
+struct pipeline_params {
+ uint32_t timer_period_ms;
+ uint32_t offset_port_id;
+};
+
+enum softnic_port_in_type {
+ PORT_IN_RXQ,
+ PORT_IN_SWQ,
+ PORT_IN_TMGR,
+ PORT_IN_TAP,
+ PORT_IN_SOURCE,
+};
+
+struct softnic_port_in_params {
+ /* Read */
+ enum softnic_port_in_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } rxq;
+
+ struct {
+ const char *mempool_name;
+ uint32_t mtu;
+ } tap;
+
+ struct {
+ const char *mempool_name;
+ const char *file_name;
+ uint32_t n_bytes_per_pkt;
+ } source;
+ };
+ uint32_t burst_size;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+enum softnic_port_out_type {
+ PORT_OUT_TXQ,
+ PORT_OUT_SWQ,
+ PORT_OUT_TMGR,
+ PORT_OUT_TAP,
+ PORT_OUT_SINK,
+};
+
+struct softnic_port_out_params {
+ enum softnic_port_out_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } txq;
- /** Run-time */
- struct rte_sched_port *sched;
- struct rte_mbuf **pkts_enq;
- struct rte_mbuf **pkts_deq;
- uint32_t pkts_enq_len;
- uint32_t txq_pos;
- uint32_t flush_count;
+ struct {
+ const char *file_name;
+ uint32_t max_n_pkts;
+ } sink;
+ };
+ uint32_t burst_size;
+ int retry;
+ uint32_t n_retries;
+};
+
+enum softnic_table_type {
+ TABLE_ACL,
+ TABLE_ARRAY,
+ TABLE_HASH,
+ TABLE_LPM,
+ TABLE_STUB,
+};
+
+struct softnic_table_acl_params {
+ uint32_t n_rules;
+ uint32_t ip_header_offset;
+ int ip_version;
+};
+
+struct softnic_table_array_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+};
+
+struct softnic_table_hash_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+ uint32_t key_size;
+ uint8_t *key_mask;
+ uint32_t n_buckets;
+ int extendable_bucket;
+};
+
+struct softnic_table_lpm_params {
+ uint32_t n_rules;
+ uint32_t key_offset;
+ uint32_t key_size;
+};
+
+struct softnic_table_params {
+ /* Match */
+ enum softnic_table_type match_type;
+ union {
+ struct softnic_table_acl_params acl;
+ struct softnic_table_array_params array;
+ struct softnic_table_hash_params hash;
+ struct softnic_table_lpm_params lpm;
+ } match;
+
+ /* Action */
+ const char *action_profile_name;
+};
+
+struct softnic_port_in {
+ struct softnic_port_in_params params;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *a;
+};
+
+struct softnic_table {
+ struct softnic_table_params params;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *a;
};
+struct pipeline {
+ TAILQ_ENTRY(pipeline) node;
+ char name[NAME_SIZE];
+
+ struct rte_pipeline *p;
+ struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct softnic_table table[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+
+ int enabled;
+ uint32_t thread_id;
+ uint32_t cpu_id;
+};
+
+TAILQ_HEAD(pipeline_list, pipeline);
+
+/**
+ * Thread
+ */
+#ifndef THREAD_PIPELINES_MAX
+#define THREAD_PIPELINES_MAX 256
+#endif
+
+#ifndef THREAD_MSGQ_SIZE
+#define THREAD_MSGQ_SIZE 64
+#endif
+
+#ifndef THREAD_TIMER_PERIOD_MS
+#define THREAD_TIMER_PERIOD_MS 100
+#endif
+
+/**
+ * Master thead: data plane thread context
+ */
+struct softnic_thread {
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ uint32_t enabled;
+};
+
+/**
+ * Data plane threads: context
+ */
+#ifndef TABLE_RULE_ACTION_SIZE_MAX
+#define TABLE_RULE_ACTION_SIZE_MAX 2048
+#endif
+
+struct softnic_table_data {
+ struct rte_table_action *a;
+};
+
+struct pipeline_data {
+ struct rte_pipeline *p;
+ struct softnic_table_data table_data[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+
+ uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
+};
+
+struct softnic_thread_data {
+ struct rte_pipeline *p[THREAD_PIPELINES_MAX];
+ uint32_t n_pipelines;
+
+ struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+ uint64_t time_next_min;
+ uint64_t iter;
+} __rte_cache_aligned;
+
/**
* PMD Internals
*/
@@ -202,61 +485,426 @@ struct pmd_internals {
/** Params */
struct pmd_params params;
- /** Soft device */
struct {
- struct default_internals def; /**< Default */
struct tm_internals tm; /**< Traffic Management */
} soft;
- /** Hard device */
- struct {
- uint16_t port_id;
- } hard;
-};
-
-struct pmd_rx_queue {
- /** Hard device */
- struct {
- uint16_t port_id;
- uint16_t rx_queue_id;
- } hard;
+ struct softnic_conn *conn;
+ struct softnic_mempool_list mempool_list;
+ struct softnic_swq_list swq_list;
+ struct softnic_link_list link_list;
+ struct softnic_tmgr_port_list tmgr_port_list;
+ struct softnic_tap_list tap_list;
+ struct softnic_port_in_action_profile_list port_in_action_profile_list;
+ struct softnic_table_action_profile_list table_action_profile_list;
+ struct pipeline_list pipeline_list;
+ struct softnic_thread thread[RTE_MAX_LCORE];
+ struct softnic_thread_data thread_data[RTE_MAX_LCORE];
};
/**
- * Traffic Management (TM) Operation
+ * MEMPOOL
*/
-extern const struct rte_tm_ops pmd_tm_ops;
+int
+softnic_mempool_init(struct pmd_internals *p);
+
+void
+softnic_mempool_free(struct pmd_internals *p);
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name);
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params);
+
+/**
+ * SWQ
+ */
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate);
+softnic_swq_init(struct pmd_internals *p);
+
+void
+softnic_swq_free(struct pmd_internals *p);
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p);
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name);
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params);
+
+/**
+ * LINK
+ */
int
-tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);
+softnic_link_init(struct pmd_internals *p);
void
-tm_free(struct pmd_internals *p);
+softnic_link_free(struct pmd_internals *p);
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params);
+/**
+ * TMGR
+ */
int
-tm_start(struct pmd_internals *p);
+softnic_tmgr_init(struct pmd_internals *p);
void
-tm_stop(struct pmd_internals *p);
+softnic_tmgr_free(struct pmd_internals *p);
-static inline int
-tm_enabled(struct rte_eth_dev *dev)
-{
- struct pmd_internals *p = dev->data->dev_private;
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name);
- return (p->params.soft.flags & PMD_FEATURE_TM);
-}
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name);
+
+void
+tm_hierarchy_init(struct pmd_internals *p);
+
+void
+tm_hierarchy_free(struct pmd_internals *p);
static inline int
tm_used(struct rte_eth_dev *dev)
{
struct pmd_internals *p = dev->data->dev_private;
- return (p->params.soft.flags & PMD_FEATURE_TM) &&
- p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
+ return p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
}
+extern const struct rte_tm_ops pmd_tm_ops;
+
+/**
+ * TAP
+ */
+int
+softnic_tap_init(struct pmd_internals *p);
+
+void
+softnic_tap_free(struct pmd_internals *p);
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name);
+
+/**
+ * Input port action
+ */
+int
+softnic_port_in_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_port_in_action_profile_free(struct pmd_internals *p);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_port_in_action_profile *
+softnic_port_in_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_port_in_action_profile_params *params);
+
+/**
+ * Table action
+ */
+int
+softnic_table_action_profile_init(struct pmd_internals *p);
+
+void
+softnic_table_action_profile_free(struct pmd_internals *p);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_table_action_profile *
+softnic_table_action_profile_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_table_action_profile_params *params);
+
+/**
+ * Pipeline
+ */
+int
+softnic_pipeline_init(struct pmd_internals *p);
+
+void
+softnic_pipeline_free(struct pmd_internals *p);
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p);
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p, const char *name);
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *p,
+ const char *name,
+ struct pipeline_params *params);
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled);
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id);
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params);
+
+int
+softnic_pipeline_table_create(struct pmd_internals *p,
+ const char *pipeline_name,
+ struct softnic_table_params *params);
+
+struct softnic_table_rule_match_acl {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t sa;
+ uint32_t da;
+ } ipv4;
+
+ struct {
+ uint8_t sa[16];
+ uint8_t da[16];
+ } ipv6;
+ };
+
+ uint32_t sa_depth;
+ uint32_t da_depth;
+ uint16_t sp0;
+ uint16_t sp1;
+ uint16_t dp0;
+ uint16_t dp1;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint32_t priority;
+};
+
+struct softnic_table_rule_match_array {
+ uint32_t pos;
+};
+
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
+#endif
+
+struct softnic_table_rule_match_hash {
+ uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
+};
+
+struct softnic_table_rule_match_lpm {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ uint32_t ipv4;
+ uint8_t ipv6[16];
+ };
+
+ uint8_t depth;
+};
+
+struct softnic_table_rule_match {
+ enum softnic_table_type match_type;
+
+ union {
+ struct softnic_table_rule_match_acl acl;
+ struct softnic_table_rule_match_array array;
+ struct softnic_table_rule_match_hash hash;
+ struct softnic_table_rule_match_lpm lpm;
+ } match;
+};
+
+struct softnic_table_rule_action {
+ uint64_t action_mask;
+ struct rte_table_action_fwd_params fwd;
+ struct rte_table_action_lb_params lb;
+ struct rte_table_action_mtr_params mtr;
+ struct rte_table_action_tm_params tm;
+ struct rte_table_action_encap_params encap;
+ struct rte_table_action_nat_params nat;
+ struct rte_table_action_ttl_params ttl;
+ struct rte_table_action_stats_params stats;
+ struct rte_table_action_time_params time;
+};
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id);
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules);
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data);
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match);
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id);
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id);
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table);
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *p,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+/**
+ * Thread
+ */
+int
+softnic_thread_init(struct pmd_internals *p);
+
+void
+softnic_thread_free(struct pmd_internals *p);
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *p,
+ uint32_t thread_id,
+ const char *pipeline_name);
+
+/**
+ * CLI
+ */
+void
+softnic_cli_process(char *in,
+ char *out,
+ size_t out_size,
+ void *arg);
+
+int
+softnic_cli_script_process(struct pmd_internals *softnic,
+ const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max);
+
#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */
diff --git a/drivers/net/softnic/rte_eth_softnic_link.c b/drivers/net/softnic/rte_eth_softnic_link.c
new file mode 100644
index 00000000..d669913a
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_link.c
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_link_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->link_list);
+
+ return 0;
+}
+
+void
+softnic_link_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_link *link;
+
+ link = TAILQ_FIRST(&p->link_list);
+ if (link == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->link_list, link, node);
+ free(link);
+ }
+}
+
+struct softnic_link *
+softnic_link_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_link *link;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(link, &p->link_list, node)
+ if (strcmp(link->name, name) == 0)
+ return link;
+
+ return NULL;
+}
+
+struct softnic_link *
+softnic_link_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_link_params *params)
+{
+ struct rte_eth_dev_info port_info;
+ struct softnic_link *link;
+ uint16_t port_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_link_find(p, name) ||
+ params == NULL)
+ return NULL;
+
+ port_id = params->port_id;
+ if (params->dev_name) {
+ int status;
+
+ status = rte_eth_dev_get_port_by_name(params->dev_name,
+ &port_id);
+
+ if (status)
+ return NULL;
+ } else {
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return NULL;
+ }
+
+ rte_eth_dev_info_get(port_id, &port_info);
+
+ /* Node allocation */
+ link = calloc(1, sizeof(struct softnic_link));
+ if (link == NULL)
+ return NULL;
+
+ /* Node fill in */
+ strlcpy(link->name, name, sizeof(link->name));
+ link->port_id = port_id;
+ link->n_rxq = port_info.nb_rx_queues;
+ link->n_txq = port_info.nb_tx_queues;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->link_list, link, node);
+
+ return link;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_mempool.c b/drivers/net/softnic/rte_eth_softnic_mempool.c
new file mode 100644
index 00000000..d5c569f9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_mempool.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+int
+softnic_mempool_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->mempool_list);
+
+ return 0;
+}
+
+void
+softnic_mempool_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_mempool *mempool;
+
+ mempool = TAILQ_FIRST(&p->mempool_list);
+ if (mempool == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mempool_list, mempool, node);
+ rte_mempool_free(mempool->m);
+ free(mempool);
+ }
+}
+
+struct softnic_mempool *
+softnic_mempool_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_mempool *mempool;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(mempool, &p->mempool_list, node)
+ if (strcmp(mempool->name, name) == 0)
+ return mempool;
+
+ return NULL;
+}
+
+struct softnic_mempool *
+softnic_mempool_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_mempool_params *params)
+{
+ char mempool_name[NAME_SIZE];
+ struct softnic_mempool *mempool;
+ struct rte_mempool *m;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_mempool_find(p, name) ||
+ params == NULL ||
+ params->buffer_size < BUFFER_SIZE_MIN ||
+ params->pool_size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(mempool_name, sizeof(mempool_name), "%s_%s",
+ p->params.name,
+ name);
+
+ m = rte_pktmbuf_pool_create(mempool_name,
+ params->pool_size,
+ params->cache_size,
+ 0,
+ params->buffer_size - sizeof(struct rte_mbuf),
+ p->params.cpu_id);
+
+ if (m == NULL)
+ return NULL;
+
+ /* Node allocation */
+ mempool = calloc(1, sizeof(struct softnic_mempool));
+ if (mempool == NULL) {
+ rte_mempool_free(m);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(mempool->name, name, sizeof(mempool->name));
+ mempool->m = m;
+ mempool->buffer_size = params->buffer_size;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->mempool_list, mempool, node);
+
+ return mempool;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c
new file mode 100644
index 00000000..45136a4a
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include <rte_string_fns.h>
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_source_sink.h>
+#include <rte_port_fd.h>
+#include <rte_port_sched.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include <rte_table_stub.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#include "hash_func.h"
+
+#ifndef PIPELINE_MSGQ_SIZE
+#define PIPELINE_MSGQ_SIZE 64
+#endif
+
+#ifndef TABLE_LPM_NUMBER_TBL8
+#define TABLE_LPM_NUMBER_TBL8 256
+#endif
+
+int
+softnic_pipeline_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->pipeline_list);
+
+ return 0;
+}
+
+void
+softnic_pipeline_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct pipeline *pipeline;
+
+ pipeline = TAILQ_FIRST(&p->pipeline_list);
+ if (pipeline == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->pipeline_list, pipeline, node);
+ rte_ring_free(pipeline->msgq_req);
+ rte_ring_free(pipeline->msgq_rsp);
+ rte_pipeline_free(pipeline->p);
+ free(pipeline);
+ }
+}
+
+void
+softnic_pipeline_disable_all(struct pmd_internals *p)
+{
+ struct pipeline *pipeline;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (pipeline->enabled)
+ softnic_thread_pipeline_disable(p,
+ pipeline->thread_id,
+ pipeline->name);
+}
+
+struct pipeline *
+softnic_pipeline_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct pipeline *pipeline;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(pipeline, &p->pipeline_list, node)
+ if (strcmp(name, pipeline->name) == 0)
+ return pipeline;
+
+ return NULL;
+}
+
+struct pipeline *
+softnic_pipeline_create(struct pmd_internals *softnic,
+ const char *name,
+ struct pipeline_params *params)
+{
+ char resource_name[NAME_MAX];
+ struct rte_pipeline_params pp;
+ struct pipeline *pipeline;
+ struct rte_pipeline *p;
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_pipeline_find(softnic, name) ||
+ params == NULL ||
+ params->timer_period_ms == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-REQ",
+ softnic->params.name,
+ name);
+
+ msgq_req = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_req == NULL)
+ return NULL;
+
+ snprintf(resource_name, sizeof(resource_name), "%s-%s-RSP",
+ softnic->params.name,
+ name);
+
+ msgq_rsp = rte_ring_create(resource_name,
+ PIPELINE_MSGQ_SIZE,
+ softnic->params.cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_rsp == NULL) {
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ snprintf(resource_name, sizeof(resource_name), "%s_%s",
+ softnic->params.name,
+ name);
+
+ pp.name = resource_name;
+ pp.socket_id = (int)softnic->params.cpu_id;
+ pp.offset_port_id = params->offset_port_id;
+
+ p = rte_pipeline_create(&pp);
+ if (p == NULL) {
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node allocation */
+ pipeline = calloc(1, sizeof(struct pipeline));
+ if (pipeline == NULL) {
+ rte_pipeline_free(p);
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(pipeline->name, name, sizeof(pipeline->name));
+ pipeline->p = p;
+ pipeline->n_ports_in = 0;
+ pipeline->n_ports_out = 0;
+ pipeline->n_tables = 0;
+ pipeline->msgq_req = msgq_req;
+ pipeline->msgq_rsp = msgq_rsp;
+ pipeline->timer_period_ms = params->timer_period_ms;
+ pipeline->enabled = 0;
+ pipeline->cpu_id = softnic->params.cpu_id;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&softnic->pipeline_list, pipeline, node);
+
+ return pipeline;
+}
+
+int
+softnic_pipeline_port_in_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_in_params *params,
+ int enabled)
+{
+ struct rte_pipeline_port_in_params p;
+
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_fd_reader_params fd;
+ struct rte_port_source_params source;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_port_in *port_in;
+ struct softnic_port_in_action_profile *ap;
+ struct rte_port_in_action *action;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_port_in_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ switch (params->type) {
+ case PORT_IN_RXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->rxq.queue_id >= link->n_rxq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->rxq.queue_id;
+
+ p.ops = &rte_port_ethdev_reader_ops;
+ p.arg_create = &pp.ethdev;
+ break;
+ }
+
+ case PORT_IN_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+
+ p.ops = &rte_port_ring_reader_ops;
+ p.arg_create = &pp.ring;
+ break;
+ }
+
+ case PORT_IN_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+
+ p.ops = &rte_port_sched_reader_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_IN_TAP:
+ {
+ struct softnic_tap *tap;
+ struct softnic_mempool *mempool;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ mempool = softnic_mempool_find(softnic, params->tap.mempool_name);
+ if (tap == NULL || mempool == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.mempool = mempool->m;
+ pp.fd.mtu = params->tap.mtu;
+
+ p.ops = &rte_port_fd_reader_ops;
+ p.arg_create = &pp.fd;
+ break;
+ }
+
+ case PORT_IN_SOURCE:
+ {
+ struct softnic_mempool *mempool;
+
+ mempool = softnic_mempool_find(softnic, params->source.mempool_name);
+ if (mempool == NULL)
+ return -1;
+
+ pp.source.mempool = mempool->m;
+ pp.source.file_name = params->source.file_name;
+ pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt;
+
+ p.ops = &rte_port_source_ops;
+ p.arg_create = &pp.source;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.burst_size = params->burst_size;
+
+ /* Resource create */
+ action = NULL;
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_port_in_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_port_in_action_params_get(action,
+ &p);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+ }
+
+ status = rte_pipeline_port_in_create(pipeline->p,
+ &p,
+ &port_id);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+
+ if (enabled)
+ rte_pipeline_port_in_enable(pipeline->p, port_id);
+
+ /* Pipeline */
+ port_in = &pipeline->port_in[pipeline->n_ports_in];
+ memcpy(&port_in->params, params, sizeof(*params));
+ port_in->ap = ap;
+ port_in->a = action;
+ pipeline->n_ports_in++;
+
+ return 0;
+}
+
+int
+softnic_pipeline_port_in_connect_to_table(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ port_id >= pipeline->n_ports_in ||
+ table_id >= pipeline->n_tables)
+ return -1;
+
+ /* Resource */
+ status = rte_pipeline_port_in_connect_to_table(pipeline->p,
+ port_id,
+ table_id);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_port_out_params *params)
+{
+ struct rte_pipeline_port_out_params p;
+
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_fd_writer_params fd;
+ struct rte_port_sink_params sink;
+ } pp;
+
+ union {
+ struct rte_port_ethdev_writer_nodrop_params ethdev;
+ struct rte_port_ring_writer_nodrop_params ring;
+ struct rte_port_fd_writer_nodrop_params fd;
+ } pp_nodrop;
+
+ struct pipeline *pipeline;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+ memset(&pp_nodrop, 0, sizeof(pp_nodrop));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL ||
+ params->burst_size == 0 ||
+ params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ switch (params->type) {
+ case PORT_OUT_TXQ:
+ {
+ struct softnic_link *link;
+
+ link = softnic_link_find(softnic, params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->txq.queue_id >= link->n_txq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->txq.queue_id;
+ pp.ethdev.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ethdev.port_id = link->port_id;
+ pp_nodrop.ethdev.queue_id = params->txq.queue_id;
+ pp_nodrop.ethdev.tx_burst_sz = params->burst_size;
+ pp_nodrop.ethdev.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ethdev_writer_ops;
+ p.arg_create = &pp.ethdev;
+ } else {
+ p.ops = &rte_port_ethdev_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ethdev;
+ }
+ break;
+ }
+
+ case PORT_OUT_SWQ:
+ {
+ struct softnic_swq *swq;
+
+ swq = softnic_swq_find(softnic, params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+ pp.ring.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ring.ring = swq->r;
+ pp_nodrop.ring.tx_burst_sz = params->burst_size;
+ pp_nodrop.ring.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ring_writer_ops;
+ p.arg_create = &pp.ring;
+ } else {
+ p.ops = &rte_port_ring_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ring;
+ }
+ break;
+ }
+
+ case PORT_OUT_TMGR:
+ {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+ pp.sched.tx_burst_sz = params->burst_size;
+
+ p.ops = &rte_port_sched_writer_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_OUT_TAP:
+ {
+ struct softnic_tap *tap;
+
+ tap = softnic_tap_find(softnic, params->dev_name);
+ if (tap == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.fd.fd = tap->fd;
+ pp_nodrop.fd.tx_burst_sz = params->burst_size;
+ pp_nodrop.fd.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_fd_writer_ops;
+ p.arg_create = &pp.fd;
+ } else {
+ p.ops = &rte_port_fd_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.fd;
+ }
+ break;
+ }
+
+ case PORT_OUT_SINK:
+ {
+ pp.sink.file_name = params->sink.file_name;
+ pp.sink.max_n_pkts = params->sink.max_n_pkts;
+
+ p.ops = &rte_port_sink_ops;
+ p.arg_create = &pp.sink;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ /* Resource create */
+ status = rte_pipeline_port_out_create(pipeline->p,
+ &p,
+ &port_id);
+
+ if (status)
+ return -1;
+
+ /* Pipeline */
+ pipeline->n_ports_out++;
+
+ return 0;
+}
+
+static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv6_hdr, proto),
+ },
+
+ /* Source IP address (IPv6) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ },
+
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ },
+
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ },
+
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ },
+
+ /* Destination IP address (IPv6) */
+ [5] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 5,
+ .input_index = 5,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ },
+
+ [6] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 6,
+ .input_index = 6,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ },
+
+ [7] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 7,
+ .input_index = 7,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ },
+
+ [8] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 8,
+ .input_index = 8,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ },
+
+ /* Source Port */
+ [9] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 9,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [10] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 10,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+int
+softnic_pipeline_table_create(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ struct softnic_table_params *params)
+{
+ char name[NAME_MAX];
+ struct rte_pipeline_table_params p;
+
+ union {
+ struct rte_table_acl_params acl;
+ struct rte_table_array_params array;
+ struct rte_table_hash_params hash;
+ struct rte_table_lpm_params lpm;
+ struct rte_table_lpm_ipv6_params lpm_ipv6;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_table_action_profile *ap;
+ struct rte_table_action *action;
+ uint32_t table_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ params == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = softnic_table_action_profile_find(softnic,
+ params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ snprintf(name, NAME_MAX, "%s_%s_table%u",
+ softnic->params.name, pipeline_name, pipeline->n_tables);
+
+ switch (params->match_type) {
+ case TABLE_ACL:
+ {
+ uint32_t ip_header_offset = params->match.acl.ip_header_offset -
+ (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ uint32_t i;
+
+ if (params->match.acl.n_rules == 0)
+ return -1;
+
+ pp.acl.name = name;
+ pp.acl.n_rules = params->match.acl.n_rules;
+ if (params->match.acl.ip_version) {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv4,
+ sizeof(table_acl_field_format_ipv4));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv4);
+ } else {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv6,
+ sizeof(table_acl_field_format_ipv6));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv6);
+ }
+
+ for (i = 0; i < pp.acl.n_rule_fields; i++)
+ pp.acl.field_format[i].offset += ip_header_offset;
+
+ p.ops = &rte_table_acl_ops;
+ p.arg_create = &pp.acl;
+ break;
+ }
+
+ case TABLE_ARRAY:
+ {
+ if (params->match.array.n_keys == 0)
+ return -1;
+
+ pp.array.n_entries = params->match.array.n_keys;
+ pp.array.offset = params->match.array.key_offset;
+
+ p.ops = &rte_table_array_ops;
+ p.arg_create = &pp.array;
+ break;
+ }
+
+ case TABLE_HASH:
+ {
+ struct rte_table_ops *ops;
+ rte_table_hash_op_hash f_hash;
+
+ if (params->match.hash.n_keys == 0)
+ return -1;
+
+ switch (params->match.hash.key_size) {
+ case 8:
+ f_hash = hash_default_key8;
+ break;
+ case 16:
+ f_hash = hash_default_key16;
+ break;
+ case 24:
+ f_hash = hash_default_key24;
+ break;
+ case 32:
+ f_hash = hash_default_key32;
+ break;
+ case 40:
+ f_hash = hash_default_key40;
+ break;
+ case 48:
+ f_hash = hash_default_key48;
+ break;
+ case 56:
+ f_hash = hash_default_key56;
+ break;
+ case 64:
+ f_hash = hash_default_key64;
+ break;
+ default:
+ return -1;
+ }
+
+ pp.hash.name = name;
+ pp.hash.key_size = params->match.hash.key_size;
+ pp.hash.key_offset = params->match.hash.key_offset;
+ pp.hash.key_mask = params->match.hash.key_mask;
+ pp.hash.n_keys = params->match.hash.n_keys;
+ pp.hash.n_buckets = params->match.hash.n_buckets;
+ pp.hash.f_hash = f_hash;
+ pp.hash.seed = 0;
+
+ if (params->match.hash.extendable_bucket)
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_ext_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_ext_ops;
+ break;
+ default:
+ ops = &rte_table_hash_ext_ops;
+ }
+ else
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_lru_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_lru_ops;
+ break;
+ default:
+ ops = &rte_table_hash_lru_ops;
+ }
+
+ p.ops = ops;
+ p.arg_create = &pp.hash;
+ break;
+ }
+
+ case TABLE_LPM:
+ {
+ if (params->match.lpm.n_rules == 0)
+ return -1;
+
+ switch (params->match.lpm.key_size) {
+ case 4:
+ {
+ pp.lpm.name = name;
+ pp.lpm.n_rules = params->match.lpm.n_rules;
+ pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm.flags = 0;
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ops;
+ p.arg_create = &pp.lpm;
+ break;
+ }
+
+ case 16:
+ {
+ pp.lpm_ipv6.name = name;
+ pp.lpm_ipv6.n_rules = params->match.lpm.n_rules;
+ pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm_ipv6.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ipv6_ops;
+ p.arg_create = &pp.lpm_ipv6;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ break;
+ }
+
+ case TABLE_STUB:
+ {
+ p.ops = &rte_table_stub_ops;
+ p.arg_create = NULL;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ /* Resource create */
+ action = NULL;
+ p.f_action_hit = NULL;
+ p.f_action_miss = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_table_action_create(ap->ap,
+ softnic->params.cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_table_action_table_params_get(action,
+ &p);
+ if (status ||
+ ((p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry)) >
+ TABLE_RULE_ACTION_SIZE_MAX)) {
+ rte_table_action_free(action);
+ return -1;
+ }
+ }
+
+ if (params->match_type == TABLE_LPM) {
+ if (params->match.lpm.key_size == 4)
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+
+ if (params->match.lpm.key_size == 16)
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ }
+
+ status = rte_pipeline_table_create(pipeline->p,
+ &p,
+ &table_id);
+ if (status) {
+ rte_table_action_free(action);
+ return -1;
+ }
+
+ /* Pipeline */
+ table = &pipeline->table[pipeline->n_tables];
+ memcpy(&table->params, params, sizeof(*params));
+ table->ap = ap;
+ table->a = action;
+ pipeline->n_tables++;
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_swq.c b/drivers/net/softnic/rte_eth_softnic_swq.c
new file mode 100644
index 00000000..2083d0a9
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_swq.c
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_swq_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->swq_list);
+
+ return 0;
+}
+
+void
+softnic_swq_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_swq *swq;
+
+ swq = TAILQ_FIRST(&p->swq_list);
+ if (swq == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+void
+softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p)
+{
+ struct softnic_swq *swq, *tswq;
+
+ TAILQ_FOREACH_SAFE(swq, &p->swq_list, node, tswq) {
+ if ((strncmp(swq->name, "RXQ", strlen("RXQ")) == 0) ||
+ (strncmp(swq->name, "TXQ", strlen("TXQ")) == 0))
+ continue;
+
+ TAILQ_REMOVE(&p->swq_list, swq, node);
+ rte_ring_free(swq->r);
+ free(swq);
+ }
+}
+
+struct softnic_swq *
+softnic_swq_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_swq *swq;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(swq, &p->swq_list, node)
+ if (strcmp(swq->name, name) == 0)
+ return swq;
+
+ return NULL;
+}
+
+struct softnic_swq *
+softnic_swq_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_swq_params *params)
+{
+ char ring_name[NAME_SIZE];
+ struct softnic_swq *swq;
+ struct rte_ring *r;
+ unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_swq_find(p, name) ||
+ params == NULL ||
+ params->size == 0)
+ return NULL;
+
+ /* Resource create */
+ snprintf(ring_name, sizeof(ring_name), "%s_%s",
+ p->params.name,
+ name);
+
+ r = rte_ring_create(ring_name,
+ params->size,
+ p->params.cpu_id,
+ flags);
+
+ if (r == NULL)
+ return NULL;
+
+ /* Node allocation */
+ swq = calloc(1, sizeof(struct softnic_swq));
+ if (swq == NULL) {
+ rte_ring_free(r);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(swq->name, name, sizeof(swq->name));
+ swq->r = r;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->swq_list, swq, node);
+
+ return swq;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_tap.c b/drivers/net/softnic/rte_eth_softnic_tap.c
new file mode 100644
index 00000000..bcc23a9f
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_tap.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <netinet/in.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#endif
+#include <sys/ioctl.h>
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+#define TAP_DEV "/dev/net/tun"
+
+int
+softnic_tap_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->tap_list);
+
+ return 0;
+}
+
+void
+softnic_tap_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tap *tap;
+
+ tap = TAILQ_FIRST(&p->tap_list);
+ if (tap == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tap_list, tap, node);
+ free(tap);
+ }
+}
+
+struct softnic_tap *
+softnic_tap_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tap, &p->tap_list, node)
+ if (strcmp(tap->name, name) == 0)
+ return tap;
+
+ return NULL;
+}
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p __rte_unused,
+ const char *name __rte_unused)
+{
+ return NULL;
+}
+
+#else
+
+struct softnic_tap *
+softnic_tap_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tap *tap;
+ struct ifreq ifr;
+ int fd, status;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tap_find(p, name))
+ return NULL;
+
+ /* Resource create */
+ fd = open(TAP_DEV, O_RDWR | O_NONBLOCK);
+ if (fd < 0)
+ return NULL;
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
+
+ status = ioctl(fd, TUNSETIFF, (void *)&ifr);
+ if (status < 0) {
+ close(fd);
+ return NULL;
+ }
+
+ /* Node allocation */
+ tap = calloc(1, sizeof(struct softnic_tap));
+ if (tap == NULL) {
+ close(fd);
+ return NULL;
+ }
+ /* Node fill in */
+ strlcpy(tap->name, name, sizeof(tap->name));
+ tap->fd = fd;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tap_list, tap, node);
+
+ return tap;
+}
+
+#endif
diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c
new file mode 100644
index 00000000..8a150903
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -0,0 +1,2929 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include "rte_eth_softnic_internals.h"
+
+/**
+ * Master thread: data plane thread init
+ */
+void
+softnic_thread_free(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ struct softnic_thread *t = &softnic->thread[i];
+
+ /* MSGQs */
+ if (t->msgq_req)
+ rte_ring_free(t->msgq_req);
+
+ if (t->msgq_rsp)
+ rte_ring_free(t->msgq_rsp);
+ }
+}
+
+int
+softnic_thread_init(struct pmd_internals *softnic)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ char ring_name[NAME_MAX];
+ struct rte_ring *msgq_req, *msgq_rsp;
+ struct softnic_thread *t = &softnic->thread[i];
+ struct softnic_thread_data *t_data = &softnic->thread_data[i];
+ uint32_t cpu_id = rte_lcore_to_socket_id(i);
+
+ /* MSGQs */
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ",
+ softnic->params.name,
+ i);
+
+ msgq_req = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_req == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP",
+ softnic->params.name,
+ i);
+
+ msgq_rsp = rte_ring_create(ring_name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_rsp == NULL) {
+ softnic_thread_free(softnic);
+ return -1;
+ }
+
+ /* Master thread records */
+ t->msgq_req = msgq_req;
+ t->msgq_rsp = msgq_rsp;
+ t->enabled = 1;
+
+ /* Data plane thread records */
+ t_data->n_pipelines = 0;
+ t_data->msgq_req = msgq_req;
+ t_data->msgq_rsp = msgq_rsp;
+ t_data->timer_period =
+ (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
+ t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
+ t_data->time_next_min = t_data->time_next;
+ }
+
+ return 0;
+}
+
+static inline int
+thread_is_running(uint32_t thread_id)
+{
+ enum rte_lcore_state_t thread_state;
+
+ thread_state = rte_eal_get_lcore_state(thread_id);
+ return (thread_state == RUNNING)? 1 : 0;
+}
+
+/**
+ * Pipeline is running when:
+ * (A) Pipeline is mapped to a data plane thread AND
+ * (B) Its data plane thread is in RUNNING state.
+ */
+static inline int
+pipeline_is_running(struct pipeline *p)
+{
+ if (p->enabled == 0)
+ return 0;
+
+ return thread_is_running(p->thread_id);
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum thread_req_type {
+ THREAD_REQ_PIPELINE_ENABLE = 0,
+ THREAD_REQ_PIPELINE_DISABLE,
+ THREAD_REQ_MAX
+};
+
+struct thread_msg_req {
+ enum thread_req_type type;
+
+ union {
+ struct {
+ struct rte_pipeline *p;
+ struct {
+ struct rte_table_action *a;
+ } table[RTE_PIPELINE_TABLE_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+ uint32_t n_tables;
+ } pipeline_enable;
+
+ struct {
+ struct rte_pipeline *p;
+ } pipeline_disable;
+ };
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/**
+ * Master thread
+ */
+static struct thread_msg_req *
+thread_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct thread_msg_req),
+ sizeof(struct thread_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+thread_msg_free(struct thread_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct thread_msg_rsp *
+thread_msg_send_recv(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ struct thread_msg_req *req)
+{
+ struct softnic_thread *t = &softnic->thread[thread_id];
+ struct rte_ring *msgq_req = t->msgq_req;
+ struct rte_ring *msgq_rsp = t->msgq_rsp;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_thread_pipeline_enable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL) ||
+ (p->n_ports_in == 0) ||
+ (p->n_ports_out == 0) ||
+ (p->n_tables == 0))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if ((t->enabled == 0) ||
+ p->enabled)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
+
+ if (td->n_pipelines >= THREAD_PIPELINES_MAX)
+ return -1;
+
+ /* Data plane thread */
+ td->p[td->n_pipelines] = p->p;
+
+ tdp->p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ tdp->table_data[i].a =
+ p->table[i].a;
+ tdp->n_tables = p->n_tables;
+
+ tdp->msgq_req = p->msgq_req;
+ tdp->msgq_rsp = p->msgq_rsp;
+ tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
+ tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
+
+ td->n_pipelines++;
+
+ /* Pipeline */
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_ENABLE;
+ req->pipeline_enable.p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ req->pipeline_enable.table[i].a =
+ p->table[i].a;
+ req->pipeline_enable.msgq_req = p->msgq_req;
+ req->pipeline_enable.msgq_rsp = p->msgq_rsp;
+ req->pipeline_enable.timer_period_ms = p->timer_period_ms;
+ req->pipeline_enable.n_tables = p->n_tables;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+}
+
+int
+softnic_thread_pipeline_disable(struct pmd_internals *softnic,
+ uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name);
+ struct softnic_thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL))
+ return -1;
+
+ t = &softnic->thread[thread_id];
+ if (t->enabled == 0)
+ return -1;
+
+ if (p->enabled == 0)
+ return 0;
+
+ if (p->thread_id != thread_id)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct softnic_thread_data *td = &softnic->thread_data[thread_id];
+ uint32_t i;
+
+ for (i = 0; i < td->n_pipelines; i++) {
+ struct pipeline_data *tdp = &td->pipeline_data[i];
+
+ if (tdp->p != p->p)
+ continue;
+
+ /* Data plane thread */
+ if (i < td->n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ td->p[td->n_pipelines - 1];
+ struct pipeline_data *tdp_last =
+ &td->pipeline_data[td->n_pipelines - 1];
+
+ td->p[i] = pipeline_last;
+ memcpy(tdp, tdp_last, sizeof(*tdp));
+ }
+
+ td->n_pipelines--;
+
+ /* Pipeline */
+ p->enabled = 0;
+
+ break;
+ }
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_DISABLE;
+ req->pipeline_disable.p = p->p;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(softnic, thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->enabled = 0;
+
+ return 0;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct thread_msg_req *
+thread_msg_recv(struct rte_ring *msgq_req)
+{
+ struct thread_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *msgq_rsp,
+ struct thread_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_enable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
+ uint32_t i;
+
+ /* Request */
+ if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ t->p[t->n_pipelines] = req->pipeline_enable.p;
+
+ p->p = req->pipeline_enable.p;
+ for (i = 0; i < req->pipeline_enable.n_tables; i++)
+ p->table_data[i].a =
+ req->pipeline_enable.table[i].a;
+
+ p->n_tables = req->pipeline_enable.n_tables;
+
+ p->msgq_req = req->pipeline_enable.msgq_req;
+ p->msgq_rsp = req->pipeline_enable.msgq_rsp;
+ p->timer_period =
+ (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
+ p->time_next = rte_get_tsc_cycles() + p->timer_period;
+
+ t->n_pipelines++;
+
+ /* Response */
+ rsp->status = 0;
+ return rsp;
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_disable(struct softnic_thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req;
+ uint32_t n_pipelines = t->n_pipelines;
+ struct rte_pipeline *pipeline = req->pipeline_disable.p;
+ uint32_t i;
+
+ /* find pipeline */
+ for (i = 0; i < n_pipelines; i++) {
+ struct pipeline_data *p = &t->pipeline_data[i];
+
+ if (p->p != pipeline)
+ continue;
+
+ if (i < n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ t->p[n_pipelines - 1];
+ struct pipeline_data *p_last =
+ &t->pipeline_data[n_pipelines - 1];
+
+ t->p[i] = pipeline_last;
+ memcpy(p, p_last, sizeof(*p));
+ }
+
+ t->n_pipelines--;
+
+ rsp->status = 0;
+ return rsp;
+ }
+
+ /* should not get here */
+ rsp->status = 0;
+ return rsp;
+}
+
+static void
+thread_msg_handle(struct softnic_thread_data *t)
+{
+ for ( ; ; ) {
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ req = thread_msg_recv(t->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case THREAD_REQ_PIPELINE_ENABLE:
+ rsp = thread_msg_handle_pipeline_enable(t, req);
+ break;
+
+ case THREAD_REQ_PIPELINE_DISABLE:
+ rsp = thread_msg_handle_pipeline_disable(t, req);
+ break;
+
+ default:
+ rsp = (struct thread_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ thread_msg_send(t->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum pipeline_req_type {
+ /* Port IN */
+ PIPELINE_REQ_PORT_IN_STATS_READ,
+ PIPELINE_REQ_PORT_IN_ENABLE,
+ PIPELINE_REQ_PORT_IN_DISABLE,
+
+ /* Port OUT */
+ PIPELINE_REQ_PORT_OUT_STATS_READ,
+
+ /* Table */
+ PIPELINE_REQ_TABLE_STATS_READ,
+ PIPELINE_REQ_TABLE_RULE_ADD,
+ PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_ADD_BULK,
+ PIPELINE_REQ_TABLE_RULE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_STATS_READ,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_MTR_READ,
+ PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
+ PIPELINE_REQ_TABLE_RULE_TTL_READ,
+ PIPELINE_REQ_MAX
+};
+
+struct pipeline_msg_req_port_in_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_port_out_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_rule_add {
+ struct softnic_table_rule_match match;
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_default {
+ struct softnic_table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_bulk {
+ struct softnic_table_rule_match *match;
+ struct softnic_table_rule_action *action;
+ void **data;
+ uint32_t n_rules;
+ int bulk;
+};
+
+struct pipeline_msg_req_table_rule_delete {
+ struct softnic_table_rule_match match;
+};
+
+struct pipeline_msg_req_table_rule_stats_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req_table_mtr_profile_add {
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+struct pipeline_msg_req_table_mtr_profile_delete {
+ uint32_t meter_profile_id;
+};
+
+struct pipeline_msg_req_table_rule_mtr_read {
+ void *data;
+ uint32_t tc_mask;
+ int clear;
+};
+
+struct pipeline_msg_req_table_dscp_table_update {
+ uint64_t dscp_mask;
+ struct rte_table_action_dscp_table dscp_table;
+};
+
+struct pipeline_msg_req_table_rule_ttl_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req {
+ enum pipeline_req_type type;
+ uint32_t id; /* Port IN, port OUT or table ID */
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_req_table_stats_read table_stats_read;
+ struct pipeline_msg_req_table_rule_add table_rule_add;
+ struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_req_table_rule_delete table_rule_delete;
+ struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
+ struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
+ struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
+ struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+struct pipeline_msg_rsp_port_in_stats_read {
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_msg_rsp_port_out_stats_read {
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_msg_rsp_table_stats_read {
+ struct rte_pipeline_table_stats stats;
+};
+
+struct pipeline_msg_rsp_table_rule_add {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_default {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_bulk {
+ uint32_t n_rules;
+};
+
+struct pipeline_msg_rsp_table_rule_stats_read {
+ struct rte_table_action_stats_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_mtr_read {
+ struct rte_table_action_mtr_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_ttl_read {
+ struct rte_table_action_ttl_counters stats;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_rsp_table_stats_read table_stats_read;
+ struct pipeline_msg_rsp_table_rule_add table_rule_add;
+ struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+/**
+ * Master thread
+ */
+static struct pipeline_msg_req *
+pipeline_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
+ sizeof(struct pipeline_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+pipeline_msg_free(struct pipeline_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_send_recv(struct pipeline *p,
+ struct pipeline_msg_req *req)
+{
+ struct rte_ring *msgq_req = p->msgq_req;
+ struct rte_ring *msgq_rsp = p->msgq_rsp;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+softnic_pipeline_port_in_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
+ req->id = port_id;
+ req->port_in_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_enable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_enable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_ENABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_in_disable(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_in)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_disable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_DISABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_port_out_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ port_id >= p->n_ports_out)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
+ req->id = port_id;
+ req->port_out_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_STATS_READ;
+ req->id = table_id;
+ req->table_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+static int
+match_check(struct softnic_table_rule_match *match,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table *table;
+
+ if (match == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ table = &p->table[table_id];
+ if (match->match_type != table->params.match_type)
+ return -1;
+
+ switch (match->match_type) {
+ case TABLE_ACL:
+ {
+ struct softnic_table_acl_params *t = &table->params.match.acl;
+ struct softnic_table_rule_match_acl *r = &match->match.acl;
+
+ if ((r->ip_version && (t->ip_version == 0)) ||
+ ((r->ip_version == 0) && t->ip_version))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->sa_depth > 32 ||
+ r->da_depth > 32)
+ return -1;
+ } else {
+ if (r->sa_depth > 128 ||
+ r->da_depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_ARRAY:
+ return 0;
+
+ case TABLE_HASH:
+ return 0;
+
+ case TABLE_LPM:
+ {
+ struct softnic_table_lpm_params *t = &table->params.match.lpm;
+ struct softnic_table_rule_match_lpm *r = &match->match.lpm;
+
+ if ((r->ip_version && (t->key_size != 4)) ||
+ ((r->ip_version == 0) && (t->key_size != 16)))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->depth > 32)
+ return -1;
+ } else {
+ if (r->depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_STUB:
+ return -1;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct softnic_table_action_profile *ap;
+
+ if (action == NULL ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ ap = p->table[table_id].ap;
+ if (action->action_mask != ap->params.action_mask)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
+ uint32_t tc_mask1 = action->mtr.tc_mask;
+
+ if (tc_mask1 != tc_mask0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ uint32_t n_subports_per_port =
+ ap->params.tm.n_subports_per_port;
+ uint32_t n_pipes_per_subport =
+ ap->params.tm.n_pipes_per_subport;
+ uint32_t subport_id = action->tm.subport_id;
+ uint32_t pipe_id = action->tm.pipe_id;
+
+ if (subport_id >= n_subports_per_port ||
+ pipe_id >= n_pipes_per_subport)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ uint64_t encap_mask = ap->params.encap.encap_mask;
+ enum rte_table_action_encap_type type = action->encap.type;
+
+ if ((encap_mask & (1LLU << type)) == 0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ int ip_version0 = ap->params.common.ip_version;
+ int ip_version1 = action->nat.ip_version;
+
+ if ((ip_version1 && (ip_version0 == 0)) ||
+ ((ip_version1 == 0) && ip_version0))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+action_default_check(struct softnic_table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ if (action == NULL ||
+ action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD) ||
+ p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT &&
+ action->fwd.id >= p->n_ports_out)
+ return -1;
+
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE &&
+ action->fwd.id >= p->n_tables)
+ return -1;
+ }
+
+ return 0;
+}
+
+union table_rule_match_low_level {
+ struct rte_table_acl_rule_add_params acl_add;
+ struct rte_table_acl_rule_delete_params acl_delete;
+ struct rte_table_array_key array;
+ uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_table_lpm_key lpm_ipv4;
+ struct rte_table_lpm_ipv6_key lpm_ipv6;
+};
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add);
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data);
+
+int
+softnic_pipeline_table_rule_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level match_ll;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ int key_found;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Table match-action rule conversion */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Add rule (match, action) to table */
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD;
+ req->id = table_id;
+ memcpy(&req->table_rule_add.match, match, sizeof(*match));
+ memcpy(&req->table_rule_add.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ action == NULL ||
+ data == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ action_default_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Apply actions */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
+ return 0;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
+ req->id = table_id;
+ memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add_default.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match,
+ struct softnic_table_rule_action *action,
+ void **data,
+ uint32_t *n_rules)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL ||
+ action == NULL ||
+ data == NULL ||
+ n_rules == NULL ||
+ (*n_rules == 0))
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ for (i = 0; i < *n_rules; i++)
+ if (match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ struct rte_pipeline_table_entry **entries_ptr =
+ (struct rte_pipeline_table_entry **)data;
+ uint32_t bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+ int *found;
+
+ /* Memory allocation */
+ match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(*n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(*n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < *n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ *n_rules,
+ found,
+ entries_ptr);
+ if (status)
+ *n_rules = 0;
+ } else {
+ for (i = 0; i < *n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &entries_ptr[i]);
+ if (status) {
+ *n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return status;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ *n_rules = 0;
+ return -1;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
+ req->id = table_id;
+ req->table_rule_add_bulk.match = match;
+ req->table_rule_add_bulk.action = action;
+ req->table_rule_add_bulk.data = data;
+ req->table_rule_add_bulk.n_rules = *n_rules;
+ req->table_rule_add_bulk.bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *n_rules = rsp->table_rule_add_bulk.n_rules;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ struct softnic_table_rule_match *match)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ match == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables ||
+ match_check(match, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ union table_rule_match_low_level match_ll;
+ int key_found;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status)
+ return -1;
+
+ status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
+ req->id = table_id;
+ memcpy(&req->table_rule_delete.match, match, sizeof(*match));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_stats_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
+ req->id = table_id;
+ req->table_rule_stats_read.data = data;
+ req->table_rule_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ profile == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
+ req->id = table_id;
+ req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
+ memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
+ req->id = table_id;
+ req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_mtr_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
+ req->id = table_id;
+ req->table_rule_mtr_read.data = data;
+ req->table_rule_mtr_read.tc_mask = tc_mask;
+ req->table_rule_mtr_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ dscp_table == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
+ req->id = table_id;
+ req->table_dscp_table_update.dscp_mask = dscp_mask;
+ memcpy(&req->table_dscp_table_update.dscp_table,
+ dscp_table, sizeof(*dscp_table));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL ||
+ data == NULL ||
+ stats == NULL)
+ return -1;
+
+ p = softnic_pipeline_find(softnic, pipeline_name);
+ if (p == NULL ||
+ table_id >= p->n_tables)
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_ttl_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
+ req->id = table_id;
+ req->table_rule_ttl_read.data = data;
+ req->table_rule_ttl_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct pipeline_msg_req *
+pipeline_msg_recv(struct rte_ring *msgq_req)
+{
+ struct pipeline_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **)&req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+pipeline_msg_send(struct rte_ring *msgq_rsp,
+ struct pipeline_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_in_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->port_in_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->port_out_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->port_out_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t port_id = req->id;
+ int clear = req->table_stats_read.clear;
+
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ port_id,
+ &rsp->table_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static int
+match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
+{
+ if (depth > 128)
+ return -1;
+
+ switch (depth / 32) {
+ case 0:
+ depth32[0] = depth;
+ depth32[1] = 0;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 1:
+ depth32[0] = 32;
+ depth32[1] = depth - 32;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 2:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = depth - 64;
+ depth32[3] = 0;
+ return 0;
+
+ case 3:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = depth - 96;
+ return 0;
+
+ case 4:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = 32;
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+match_convert(struct softnic_table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add)
+{
+ memset(ml, 0, sizeof(*ml));
+
+ switch (mh->match_type) {
+ case TABLE_ACL:
+ if (mh->match.acl.ip_version)
+ if (add) {
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_add.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_add.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_add.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_add.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_delete.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_delete.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ else
+ if (add) {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_add.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t)mh->match.acl.priority;
+ } else {
+ uint32_t *sa32 =
+ (uint32_t *)mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *)mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ sa32[0];
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_delete.field_value[2].value.u32 =
+ sa32[1];
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_delete.field_value[3].value.u32 =
+ sa32[2];
+ ml->acl_delete.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_delete.field_value[4].value.u32 =
+ sa32[3];
+ ml->acl_delete.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_delete.field_value[5].value.u32 =
+ da32[0];
+ ml->acl_delete.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_delete.field_value[6].value.u32 =
+ da32[1];
+ ml->acl_delete.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_delete.field_value[7].value.u32 =
+ da32[2];
+ ml->acl_delete.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_delete.field_value[8].value.u32 =
+ da32[3];
+ ml->acl_delete.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_delete.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ return 0;
+
+ case TABLE_ARRAY:
+ ml->array.pos = mh->match.array.pos;
+ return 0;
+
+ case TABLE_HASH:
+ memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
+ return 0;
+
+ case TABLE_LPM:
+ if (mh->match.lpm.ip_version) {
+ ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
+ ml->lpm_ipv4.depth = mh->match.lpm.depth;
+ } else {
+ memcpy(ml->lpm_ipv6.ip,
+ mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
+ ml->lpm_ipv6.depth = mh->match.lpm.depth;
+ }
+
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_convert(struct rte_table_action *a,
+ struct softnic_table_rule_action *action,
+ struct rte_pipeline_table_entry *data)
+{
+ int status;
+
+ /* Apply actions */
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_FWD,
+ &action->fwd);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_LB,
+ &action->lb);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_MTR,
+ &action->mtr);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TM,
+ &action->tm);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_ENCAP,
+ &action->encap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_NAT,
+ &action->nat);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TTL,
+ &action->ttl);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_STATS,
+ &action->stats);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TIME,
+ &action->time);
+
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_add.match;
+ struct softnic_table_rule_action *action = &req->table_rule_add.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int key_found, status;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_action *action = &req->table_rule_add_default.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int status;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *)p->buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_default.data = data_out;
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+
+ uint32_t table_id = req->id;
+ struct softnic_table_rule_match *match = req->table_rule_add_bulk.match;
+ struct softnic_table_rule_action *action = req->table_rule_add_bulk.action;
+ struct rte_pipeline_table_entry **data =
+ (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
+ uint32_t n_rules = req->table_rule_add_bulk.n_rules;
+ uint32_t bulk = req->table_rule_add_bulk.bulk;
+
+ struct rte_table_action *a = p->table_data[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ int *found, status;
+ uint32_t i;
+
+ /* Memory allocation */
+ match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ n_rules,
+ found,
+ data);
+ if (status)
+ n_rules = 0;
+ } else {
+ for (i = 0; i < n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &data[i]);
+ if (status) {
+ n_rules = i;
+ break;
+ }
+ }
+ }
+
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_bulk.n_rules = n_rules;
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return rsp;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ rsp->status = -1;
+ rsp->table_rule_add_bulk.n_rules = 0;
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ struct softnic_table_rule_match *match = &req->table_rule_delete.match;
+ uint32_t table_id = req->id;
+ int key_found, status;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_stats_read.data;
+ int clear = req->table_rule_stats_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_stats_read(a,
+ data,
+ &rsp->table_rule_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
+ struct rte_table_action_meter_profile *profile =
+ &req->table_mtr_profile_add.profile;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id =
+ req->table_mtr_profile_delete.meter_profile_id;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_mtr_read.data;
+ uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
+ int clear = req->table_rule_mtr_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ &rsp->table_rule_mtr_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
+ struct rte_table_action_dscp_table *dscp_table =
+ &req->table_dscp_table_update.dscp_table;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_ttl_read.data;
+ int clear = req->table_rule_ttl_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_ttl_read(a,
+ data,
+ &rsp->table_rule_ttl_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static void
+pipeline_msg_handle(struct pipeline_data *p)
+{
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+
+ req = pipeline_msg_recv(p->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case PIPELINE_REQ_PORT_IN_STATS_READ:
+ rsp = pipeline_msg_handle_port_in_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_ENABLE:
+ rsp = pipeline_msg_handle_port_in_enable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_DISABLE:
+ rsp = pipeline_msg_handle_port_in_disable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_OUT_STATS_READ:
+ rsp = pipeline_msg_handle_port_out_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_STATS_READ:
+ rsp = pipeline_msg_handle_table_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD:
+ rsp = pipeline_msg_handle_table_rule_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_add_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
+ rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE:
+ rsp = pipeline_msg_handle_table_rule_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_STATS_READ:
+ rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
+ rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
+ rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_MTR_READ:
+ rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
+ rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_TTL_READ:
+ rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
+ break;
+
+ default:
+ rsp = (struct pipeline_msg_rsp *)req;
+ rsp->status = -1;
+ }
+
+ pipeline_msg_send(p->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Data plane threads: main
+ */
+int
+rte_pmd_softnic_run(uint16_t port_id)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ struct pmd_internals *softnic;
+ struct softnic_thread_data *t;
+ uint32_t thread_id, j;
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
+#endif
+
+ softnic = dev->data->dev_private;
+ thread_id = rte_lcore_id();
+ t = &softnic->thread_data[thread_id];
+ t->iter++;
+
+ /* Data Plane */
+ for (j = 0; j < t->n_pipelines; j++)
+ rte_pipeline_run(t->p[j]);
+
+ /* Control Plane */
+ if ((t->iter & 0xFLLU) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t time_next_min = UINT64_MAX;
+
+ if (time < t->time_next_min)
+ return 0;
+
+ /* Pipeline message queues */
+ for (j = 0; j < t->n_pipelines; j++) {
+ struct pipeline_data *p =
+ &t->pipeline_data[j];
+ uint64_t time_next = p->time_next;
+
+ if (time_next <= time) {
+ pipeline_msg_handle(p);
+ rte_pipeline_flush(p->p);
+ time_next = time + p->timer_period;
+ p->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ /* Thread message queues */
+ {
+ uint64_t time_next = t->time_next;
+
+ if (time_next <= time) {
+ thread_msg_handle(t);
+ time_next = time + t->timer_period;
+ t->time_next = time_next;
+ }
+
+ if (time_next < time_next_min)
+ time_next_min = time_next;
+ }
+
+ t->time_next_min = time_next_min;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 79f1c6a8..baaafbe2 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -7,62 +7,148 @@
#include <string.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
#include "rte_eth_softnic_internals.h"
#include "rte_eth_softnic.h"
-#define BYTES_IN_MBPS (1000 * 1000 / 8)
#define SUBPORT_TC_PERIOD 10
#define PIPE_TC_PERIOD 40
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+softnic_tmgr_init(struct pmd_internals *p)
{
- uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
- uint32_t i;
+ TAILQ_INIT(&p->tmgr_port_list);
- /* rate */
- if (params->soft.tm.rate) {
- if (params->soft.tm.rate > hard_rate_bytes_per_sec)
- return -EINVAL;
- } else {
- params->soft.tm.rate =
- (hard_rate_bytes_per_sec > UINT32_MAX) ?
- UINT32_MAX : hard_rate_bytes_per_sec;
+ return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+ if (tmgr_port == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+ rte_sched_port_free(tmgr_port->s);
+ free(tmgr_port);
}
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
- /* nb_queues */
- if (params->soft.tm.nb_queues == 0)
- return -EINVAL;
+ if (name == NULL)
+ return NULL;
- if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
- params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+ TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
- params->soft.tm.nb_queues =
- rte_align32pow2(params->soft.tm.nb_queues);
+ return NULL;
+}
- /* qsize */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- if (params->soft.tm.qsize[i] == 0)
- return -EINVAL;
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+ struct tm_params *t = &p->soft.tm.params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tmgr_port_find(p, name))
+ return NULL;
+
+ /*
+ * Resource
+ */
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return NULL;
+
+ /* Port */
+ sched = rte_sched_port_config(&t->port_params);
+ if (sched == NULL)
+ return NULL;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+ int status;
- params->soft.tm.qsize[i] =
- rte_align32pow2(params->soft.tm.qsize[i]);
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Pipe */
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+ }
}
- /* enq_bsz, deq_bsz */
- if (params->soft.tm.enq_bsz == 0 ||
- params->soft.tm.deq_bsz == 0 ||
- params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
- return -EINVAL;
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
- return 0;
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = sched;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
}
-static void
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+ if (tmgr_port == NULL)
+ return NULL;
+
+ return tmgr_port->s;
+}
+
+void
tm_hierarchy_init(struct pmd_internals *p)
{
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+ memset(&p->soft.tm, 0, sizeof(p->soft.tm));
/* Initialize shaper profile list */
TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
@@ -77,8 +163,8 @@ tm_hierarchy_init(struct pmd_internals *p)
TAILQ_INIT(&p->soft.tm.h.nodes);
}
-static void
-tm_hierarchy_uninit(struct pmd_internals *p)
+void
+tm_hierarchy_free(struct pmd_internals *p)
{
/* Remove all nodes*/
for ( ; ; ) {
@@ -129,111 +215,7 @@ tm_hierarchy_uninit(struct pmd_internals *p)
free(shaper_profile);
}
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
-}
-
-int
-tm_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- uint32_t enq_bsz = params->soft.tm.enq_bsz;
- uint32_t deq_bsz = params->soft.tm.deq_bsz;
-
- p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
- 2 * enq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_enq == NULL)
- return -ENOMEM;
-
- p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
- deq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_deq == NULL) {
- rte_free(p->soft.tm.pkts_enq);
- return -ENOMEM;
- }
-
tm_hierarchy_init(p);
-
- return 0;
-}
-
-void
-tm_free(struct pmd_internals *p)
-{
- tm_hierarchy_uninit(p);
- rte_free(p->soft.tm.pkts_enq);
- rte_free(p->soft.tm.pkts_deq);
-}
-
-int
-tm_start(struct pmd_internals *p)
-{
- struct tm_params *t = &p->soft.tm.params;
- uint32_t n_subports, subport_id;
- int status;
-
- /* Is hierarchy frozen? */
- if (p->soft.tm.hierarchy_frozen == 0)
- return -1;
-
- /* Port */
- p->soft.tm.sched = rte_sched_port_config(&t->port_params);
- if (p->soft.tm.sched == NULL)
- return -1;
-
- /* Subport */
- n_subports = t->port_params.n_subports_per_port;
- for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport =
- t->port_params.n_pipes_per_subport;
- uint32_t pipe_id;
-
- status = rte_sched_subport_config(p->soft.tm.sched,
- subport_id,
- &t->subport_params[subport_id]);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
-
- /* Pipe */
- n_pipes_per_subport = t->port_params.n_pipes_per_subport;
- for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
- int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
- pipe_id;
- int profile_id = t->pipe_to_profile[pos];
-
- if (profile_id < 0)
- continue;
-
- status = rte_sched_pipe_config(p->soft.tm.sched,
- subport_id,
- pipe_id,
- profile_id);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-void
-tm_stop(struct pmd_internals *p)
-{
- if (p->soft.tm.sched)
- rte_sched_port_free(p->soft.tm.sched);
-
- /* Unfreeze hierarchy */
- p->soft.tm.hierarchy_frozen = 0;
}
static struct tm_shaper_profile *
@@ -384,7 +366,7 @@ static uint32_t
tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
{
struct pmd_internals *p = dev->data->dev_private;
- uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+ uint32_t n_queues_max = p->params.tm.n_queues;
uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
uint32_t n_subports_max = n_pipes_max;
@@ -429,7 +411,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- *is_leaf = node_id < p->params.soft.tm.nb_queues;
+ *is_leaf = node_id < p->params.tm.n_queues;
return 0;
}
@@ -479,6 +461,8 @@ static const struct rte_tm_capabilities tm_cap = {
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
.cman_wred_context_n_max = 0,
.cman_wred_context_private_n_max = 0,
@@ -667,6 +651,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.shaper_shared_n_max = 0,
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
@@ -828,6 +814,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
{.leaf = {
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
} },
@@ -1077,7 +1065,7 @@ update_subport_tc_rate(struct rte_eth_dev *dev,
subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched,
+ if (rte_sched_subport_config(SCHED(p),
subport_id, &subport_params))
return -1;
@@ -1243,12 +1231,23 @@ wred_profile_check(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
+ /* WRED profile should be in packet mode */
+ if (profile->packet_mode == 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
/* min_th <= max_th, max_th > 0 */
for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
- uint16_t min_th = profile->red_params[color].min_th;
- uint16_t max_th = profile->red_params[color].max_th;
+ uint32_t min_th = profile->red_params[color].min_th;
+ uint32_t max_th = profile->red_params[color].max_th;
- if (min_th > max_th || max_th == 0)
+ if (min_th > max_th ||
+ max_th == 0 ||
+ min_th > UINT16_MAX ||
+ max_th > UINT16_MAX)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_WRED_PROFILE,
@@ -1345,7 +1344,7 @@ node_add_check_port(struct rte_eth_dev *dev,
params->shaper_profile_id);
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1368,12 +1367,9 @@ node_add_check_port(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- /* Shaper must be valid.
- * Shaper profile peak rate must fit the configured port rate.
- */
+ /* Shaper must be valid */
if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
- sp == NULL ||
- sp->params.peak.rate > p->params.soft.tm.rate)
+ sp == NULL)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
@@ -1420,7 +1416,7 @@ node_add_check_subport(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1492,7 +1488,7 @@ node_add_check_pipe(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1569,7 +1565,7 @@ node_add_check_tc(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1642,7 +1638,7 @@ node_add_check_queue(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: leaf */
- if (node_id >= p->params.soft.tm.nb_queues)
+ if (node_id >= p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -2531,10 +2527,10 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev)
.n_subports_per_port = root->n_children,
.n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
- .qsize = {p->params.soft.tm.qsize[0],
- p->params.soft.tm.qsize[1],
- p->params.soft.tm.qsize[2],
- p->params.soft.tm.qsize[3],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
},
.pipe_profiles = t->pipe_profiles,
.n_pipe_profiles = t->n_pipe_profiles,
@@ -2597,10 +2593,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
status = hierarchy_commit_check(dev, error);
if (status) {
- if (clear_on_fail) {
- tm_hierarchy_uninit(p);
- tm_hierarchy_init(p);
- }
+ if (clear_on_fail)
+ tm_hierarchy_free(p);
return status;
}
@@ -2642,7 +2636,7 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2691,7 +2685,7 @@ update_queue_weight(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2824,7 +2818,7 @@ update_subport_rate(struct rte_eth_dev *dev,
subport_params.tb_size = sp->params.peak.size;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+ if (rte_sched_subport_config(SCHED(p), subport_id,
&subport_params))
return -1;
@@ -2871,7 +2865,7 @@ update_pipe_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2916,7 +2910,7 @@ update_tc_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -3051,8 +3045,7 @@ read_port_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3099,8 +3092,7 @@ read_subport_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, tc_id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3160,8 +3152,7 @@ read_pipe_stats(struct rte_eth_dev *dev,
i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3221,8 +3212,7 @@ read_tc_stats(struct rte_eth_dev *dev,
tc_id,
i);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3281,8 +3271,7 @@ read_queue_stats(struct rte_eth_dev *dev,
tc_id,
queue_id);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
diff --git a/drivers/net/softnic/rte_pmd_eth_softnic_version.map b/drivers/net/softnic/rte_pmd_softnic_version.map
index fb2cb68c..bc44b06f 100644
--- a/drivers/net/softnic/rte_pmd_eth_softnic_version.map
+++ b/drivers/net/softnic/rte_pmd_softnic_version.map
@@ -5,3 +5,9 @@ DPDK_17.11 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ rte_pmd_softnic_manage;
+};
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 0ebd3ec5..b77fae16 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (c) 2015 CESNET
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of CESNET nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015 CESNET
include $(RTE_SDK)/mk/rte.vars.mk
@@ -51,7 +23,6 @@ LIBABIVER := 1
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2_iobuf.c
#
# Export include files
diff --git a/drivers/net/szedata2/meson.build b/drivers/net/szedata2/meson.build
new file mode 100644
index 00000000..da373374
--- /dev/null
+++ b/drivers/net/szedata2/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+dep = cc.find_library('sze2', required: false)
+build = dep.found()
+ext_deps += dep
+sources = files('rte_eth_szedata2.c')
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index e53c738d..1d20cb51 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015 - 2016 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
*/
#include <stdint.h>
@@ -50,10 +21,9 @@
#include <rte_memcpy.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
-#include <rte_atomic.h>
#include "rte_eth_szedata2.h"
-#include "szedata2_iobuf.h"
+#include "szedata2_logs.h"
#define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
#define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
@@ -68,9 +38,53 @@
#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
+/**
+ * Format string for suffix used to differentiate between Ethernet ports
+ * on the same PCI device.
+ */
+#define SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT "-port%u"
+
+/**
+ * Maximum number of ports for one device.
+ */
+#define SZEDATA2_MAX_PORTS 2
+
+/**
+ * Entry in list of PCI devices for this driver.
+ */
+struct pci_dev_list_entry;
+struct pci_dev_list_entry {
+ LIST_ENTRY(pci_dev_list_entry) next;
+ struct rte_pci_device *pci_dev;
+ unsigned int port_count;
+};
+
+/* List of PCI devices with number of ports for this driver. */
+LIST_HEAD(pci_dev_list, pci_dev_list_entry) szedata2_pci_dev_list =
+ LIST_HEAD_INITIALIZER(szedata2_pci_dev_list);
+
+struct port_info {
+ unsigned int rx_base_id;
+ unsigned int tx_base_id;
+ unsigned int rx_count;
+ unsigned int tx_count;
+ int numa_node;
+};
+
+struct pmd_internals {
+ struct rte_eth_dev *dev;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+ unsigned int rxq_base_id;
+ unsigned int txq_base_id;
+ char *sze_dev_path;
+};
+
struct szedata2_rx_queue {
+ struct pmd_internals *priv;
struct szedata *sze;
uint8_t rx_channel;
+ uint16_t qid;
uint16_t in_port;
struct rte_mempool *mb_pool;
volatile uint64_t rx_pkts;
@@ -79,21 +93,17 @@ struct szedata2_rx_queue {
};
struct szedata2_tx_queue {
+ struct pmd_internals *priv;
struct szedata *sze;
uint8_t tx_channel;
+ uint16_t qid;
volatile uint64_t tx_pkts;
volatile uint64_t tx_bytes;
volatile uint64_t err_pkts;
};
-struct pmd_internals {
- struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
- struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
- uint16_t max_rx_queues;
- uint16_t max_tx_queues;
- char sze_dev[PATH_MAX];
- struct rte_mem_resource *pci_rsc;
-};
+int szedata2_logtype_init;
+int szedata2_logtype_driver;
static struct ether_addr eth_addr = {
.addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
@@ -133,8 +143,10 @@ eth_szedata2_rx(void *queue,
for (i = 0; i < nb_pkts; i++) {
mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
- if (unlikely(mbuf == NULL))
+ if (unlikely(mbuf == NULL)) {
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
break;
+ }
/* get the next sze packet */
if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
@@ -318,10 +330,10 @@ eth_szedata2_rx(void *queue,
* sze packet will not fit in one mbuf,
* scattered mode is not enabled, drop packet
*/
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"SZE segment %d bytes will not fit in one mbuf "
"(%d bytes), scattered mode is not enabled, "
- "drop packet!!\n",
+ "drop packet!!",
packet_size, buf_size);
rte_pktmbuf_free(mbuf);
}
@@ -354,6 +366,8 @@ eth_szedata2_rx_scattered(void *queue,
uint16_t packet_len1 = 0;
uint16_t packet_len2 = 0;
uint16_t hw_data_align;
+ uint64_t *mbuf_failed_ptr =
+ &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
return 0;
@@ -541,6 +555,7 @@ eth_szedata2_rx_scattered(void *queue,
sze->ct_rx_lck = ct_rx_lck_backup;
sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
+ sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
break;
}
@@ -590,6 +605,7 @@ eth_szedata2_rx_scattered(void *queue,
ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr =
ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
goto finish;
}
@@ -633,6 +649,7 @@ eth_szedata2_rx_scattered(void *queue,
ct_rx_rem_bytes_backup;
sze->ct_rx_cur_ptr =
ct_rx_cur_ptr_backup;
+ (*mbuf_failed_ptr)++;
goto finish;
}
@@ -889,7 +906,7 @@ eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
if (rxq->sze == NULL) {
uint32_t rx = 1 << rxq->rx_channel;
uint32_t tx = 0;
- rxq->sze = szedata_open(internals->sze_dev);
+ rxq->sze = szedata_open(internals->sze_dev_path);
if (rxq->sze == NULL)
return -EINVAL;
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
@@ -934,7 +951,7 @@ eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
if (txq->sze == NULL) {
uint32_t rx = 0;
uint32_t tx = 1 << txq->tx_channel;
- txq->sze = szedata_open(internals->sze_dev);
+ txq->sze = szedata_open(internals->sze_dev_path);
if (txq->sze == NULL)
return -EINVAL;
ret = szedata_subscribe3(txq->sze, &rx, &tx);
@@ -1017,7 +1034,7 @@ static int
eth_dev_configure(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *data = dev->data;
- if (data->dev_conf.rxmode.enable_scatter == 1) {
+ if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
dev->rx_pkt_burst = eth_szedata2_rx_scattered;
data->scattered_rx = 1;
} else {
@@ -1032,13 +1049,18 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = internals->max_rx_queues;
dev_info->max_tx_queues = internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->tx_offload_capa = 0;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_queue_offload_capa = 0;
dev_info->speed_capa = ETH_LINK_SPEED_100G;
}
@@ -1054,22 +1076,29 @@ eth_stats_get(struct rte_eth_dev *dev,
uint64_t tx_err_total = 0;
uint64_t rx_total_bytes = 0;
uint64_t tx_total_bytes = 0;
- const struct pmd_internals *internals = dev->data->dev_private;
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) {
- stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts;
- stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes;
- rx_total += stats->q_ipackets[i];
- rx_total_bytes += stats->q_ibytes[i];
+ for (i = 0; i < nb_rx; i++) {
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_ipackets[i] = rxq->rx_pkts;
+ stats->q_ibytes[i] = rxq->rx_bytes;
+ }
+ rx_total += rxq->rx_pkts;
+ rx_total_bytes += rxq->rx_bytes;
}
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) {
- stats->q_opackets[i] = internals->tx_queue[i].tx_pkts;
- stats->q_obytes[i] = internals->tx_queue[i].tx_bytes;
- stats->q_errors[i] = internals->tx_queue[i].err_pkts;
- tx_total += stats->q_opackets[i];
- tx_total_bytes += stats->q_obytes[i];
- tx_err_total += stats->q_errors[i];
+ for (i = 0; i < nb_tx; i++) {
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+
+ if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
+ stats->q_opackets[i] = txq->tx_pkts;
+ stats->q_obytes[i] = txq->tx_bytes;
+ stats->q_errors[i] = txq->err_pkts;
+ }
+ tx_total += txq->tx_pkts;
+ tx_total_bytes += txq->tx_bytes;
+ tx_err_total += txq->err_pkts;
}
stats->ipackets = rx_total;
@@ -1077,6 +1106,7 @@ eth_stats_get(struct rte_eth_dev *dev,
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
stats->oerrors = tx_err_total;
+ stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
return 0;
}
@@ -1087,17 +1117,18 @@ eth_stats_reset(struct rte_eth_dev *dev)
uint16_t i;
uint16_t nb_rx = dev->data->nb_rx_queues;
uint16_t nb_tx = dev->data->nb_tx_queues;
- struct pmd_internals *internals = dev->data->dev_private;
for (i = 0; i < nb_rx; i++) {
- internals->rx_queue[i].rx_pkts = 0;
- internals->rx_queue[i].rx_bytes = 0;
- internals->rx_queue[i].err_pkts = 0;
+ struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
+ rxq->rx_pkts = 0;
+ rxq->rx_bytes = 0;
+ rxq->err_pkts = 0;
}
for (i = 0; i < nb_tx; i++) {
- internals->tx_queue[i].tx_pkts = 0;
- internals->tx_queue[i].tx_bytes = 0;
- internals->tx_queue[i].err_pkts = 0;
+ struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
+ txq->tx_pkts = 0;
+ txq->tx_bytes = 0;
+ txq->err_pkts = 0;
}
}
@@ -1105,9 +1136,11 @@ static void
eth_rx_queue_release(void *q)
{
struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
- if (rxq->sze != NULL) {
- szedata_close(rxq->sze);
- rxq->sze = NULL;
+
+ if (rxq != NULL) {
+ if (rxq->sze != NULL)
+ szedata_close(rxq->sze);
+ rte_free(rxq);
}
}
@@ -1115,9 +1148,11 @@ static void
eth_tx_queue_release(void *q)
{
struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
- if (txq->sze != NULL) {
- szedata_close(txq->sze);
- txq->sze = NULL;
+
+ if (txq != NULL) {
+ if (txq->sze != NULL)
+ szedata_close(txq->sze);
+ rte_free(txq);
}
}
@@ -1142,111 +1177,34 @@ eth_dev_close(struct rte_eth_dev *dev)
dev->data->nb_tx_queues = 0;
}
-/**
- * Function takes value from first IBUF status register.
- * Values in IBUF and OBUF should be same.
- *
- * @param internals
- * Pointer to device private structure.
- * @return
- * Link speed constant.
- */
-static inline enum szedata2_link_speed
-get_link_speed(const struct pmd_internals *internals)
-{
- const volatile struct szedata2_ibuf *ibuf =
- ibuf_ptr_by_index(internals->pci_rsc, 0);
- uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
- switch (speed) {
- case 0x03:
- return SZEDATA2_LINK_SPEED_10G;
- case 0x04:
- return SZEDATA2_LINK_SPEED_40G;
- case 0x05:
- return SZEDATA2_LINK_SPEED_100G;
- default:
- return SZEDATA2_LINK_SPEED_DEFAULT;
- }
-}
-
static int
eth_link_update(struct rte_eth_dev *dev,
int wait_to_complete __rte_unused)
{
struct rte_eth_link link;
- struct rte_eth_link *link_ptr = &link;
- struct rte_eth_link *dev_link = &dev->data->dev_link;
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- const volatile struct szedata2_ibuf *ibuf;
- uint32_t i;
- bool link_is_up = false;
-
- switch (get_link_speed(internals)) {
- case SZEDATA2_LINK_SPEED_10G:
- link.link_speed = ETH_SPEED_NUM_10G;
- break;
- case SZEDATA2_LINK_SPEED_40G:
- link.link_speed = ETH_SPEED_NUM_40G;
- break;
- case SZEDATA2_LINK_SPEED_100G:
- link.link_speed = ETH_SPEED_NUM_100G;
- break;
- default:
- link.link_speed = ETH_SPEED_NUM_10G;
- break;
- }
- /* szedata2 uses only full duplex */
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
- /*
- * Link is considered up if at least one ibuf is enabled
- * and up.
- */
- if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
- link_is_up = true;
- break;
- }
- }
-
- link.link_status = (link_is_up) ? ETH_LINK_UP : ETH_LINK_DOWN;
+ memset(&link, 0, sizeof(link));
+ link.link_speed = ETH_SPEED_NUM_100G;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_status = ETH_LINK_UP;
link.link_autoneg = ETH_LINK_FIXED;
- rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
- *(uint64_t *)link_ptr);
-
+ rte_eth_linkstatus_set(dev, &link);
return 0;
}
static int
-eth_dev_set_link_up(struct rte_eth_dev *dev)
+eth_dev_set_link_up(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++)
- ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
- for (i = 0; i < szedata2_obuf_count; i++)
- obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
+ PMD_DRV_LOG(WARNING, "Setting link up is not supported.");
return 0;
}
static int
-eth_dev_set_link_down(struct rte_eth_dev *dev)
+eth_dev_set_link_down(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++)
- ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
- for (i = 0; i < szedata2_obuf_count; i++)
- obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
+ PMD_DRV_LOG(WARNING, "Setting link down is not supported.");
return 0;
}
@@ -1254,26 +1212,50 @@ static int
eth_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t rx_queue_id,
uint16_t nb_rx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
+ unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mb_pool)
{
- struct pmd_internals *internals = dev->data->dev_private;
- struct szedata2_rx_queue *rxq = &internals->rx_queue[rx_queue_id];
+ struct szedata2_rx_queue *rxq;
int ret;
- uint32_t rx = 1 << rx_queue_id;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t rx_channel = internals->rxq_base_id + rx_queue_id;
+ uint32_t rx = 1 << rx_channel;
uint32_t tx = 0;
- rxq->sze = szedata_open(internals->sze_dev);
- if (rxq->sze == NULL)
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->rx_queues[rx_queue_id] != NULL) {
+ eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queues[rx_queue_id] = NULL;
+ }
+
+ rxq = rte_zmalloc_socket("szedata2 rx queue",
+ sizeof(struct szedata2_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ return -ENOMEM;
+ }
+
+ rxq->priv = internals;
+ rxq->sze = szedata_open(internals->sze_dev_path);
+ if (rxq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
return -EINVAL;
+ }
ret = szedata_subscribe3(rxq->sze, &rx, &tx);
if (ret != 0 || rx == 0) {
- szedata_close(rxq->sze);
- rxq->sze = NULL;
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
+ "%" PRIu16 "!", rx_queue_id);
+ eth_rx_queue_release(rxq);
return -EINVAL;
}
- rxq->rx_channel = rx_queue_id;
+ rxq->rx_channel = rx_channel;
+ rxq->qid = rx_queue_id;
rxq->in_port = dev->data->port_id;
rxq->mb_pool = mb_pool;
rxq->rx_pkts = 0;
@@ -1281,6 +1263,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
rxq->err_pkts = 0;
dev->data->rx_queues[rx_queue_id] = rxq;
+
+ PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", rxq->qid, socket_id,
+ rxq->rx_channel);
+
return 0;
}
@@ -1288,89 +1275,93 @@ static int
eth_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t tx_queue_id,
uint16_t nb_tx_desc __rte_unused,
- unsigned int socket_id __rte_unused,
+ unsigned int socket_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- struct pmd_internals *internals = dev->data->dev_private;
- struct szedata2_tx_queue *txq = &internals->tx_queue[tx_queue_id];
+ struct szedata2_tx_queue *txq;
int ret;
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint8_t tx_channel = internals->txq_base_id + tx_queue_id;
uint32_t rx = 0;
- uint32_t tx = 1 << tx_queue_id;
+ uint32_t tx = 1 << tx_channel;
+
+ PMD_INIT_FUNC_TRACE();
- txq->sze = szedata_open(internals->sze_dev);
- if (txq->sze == NULL)
+ if (dev->data->tx_queues[tx_queue_id] != NULL) {
+ eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queues[tx_queue_id] = NULL;
+ }
+
+ txq = rte_zmalloc_socket("szedata2 tx queue",
+ sizeof(struct szedata2_tx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ return -ENOMEM;
+ }
+
+ txq->priv = internals;
+ txq->sze = szedata_open(internals->sze_dev_path);
+ if (txq->sze == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
return -EINVAL;
+ }
ret = szedata_subscribe3(txq->sze, &rx, &tx);
if (ret != 0 || tx == 0) {
- szedata_close(txq->sze);
- txq->sze = NULL;
+ PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
+ "%" PRIu16 "!", tx_queue_id);
+ eth_tx_queue_release(txq);
return -EINVAL;
}
- txq->tx_channel = tx_queue_id;
+ txq->tx_channel = tx_channel;
+ txq->qid = tx_queue_id;
txq->tx_pkts = 0;
txq->tx_bytes = 0;
txq->err_pkts = 0;
dev->data->tx_queues[tx_queue_id] = txq;
+
+ PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
+ "%u (channel id %u).", txq->qid, socket_id,
+ txq->tx_channel);
+
return 0;
}
-static void
+static int
eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
struct ether_addr *mac_addr __rte_unused)
{
+ return 0;
}
static void
-eth_promiscuous_enable(struct rte_eth_dev *dev)
+eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_PROMISC);
- }
+ PMD_DRV_LOG(WARNING, "Enabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
}
static void
-eth_promiscuous_disable(struct rte_eth_dev *dev)
+eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ONLY_VALID);
- }
+ PMD_DRV_LOG(WARNING, "Disabling promiscuous mode is not supported. "
+ "The card is always in promiscuous mode.");
}
static void
-eth_allmulticast_enable(struct rte_eth_dev *dev)
+eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
- }
+ PMD_DRV_LOG(WARNING, "Enabling allmulticast mode is not supported.");
}
static void
-eth_allmulticast_disable(struct rte_eth_dev *dev)
+eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internals *internals = (struct pmd_internals *)
- dev->data->dev_private;
- uint32_t i;
-
- for (i = 0; i < szedata2_ibuf_count; i++) {
- ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
- SZEDATA2_MAC_CHMODE_ONLY_VALID);
- }
+ PMD_DRV_LOG(WARNING, "Disabling allmulticast mode is not supported.");
}
static const struct eth_dev_ops ops = {
@@ -1417,9 +1408,9 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
FILE *fd;
char pcislot_path[PATH_MAX];
uint32_t domain;
- uint32_t bus;
- uint32_t devid;
- uint32_t function;
+ uint8_t bus;
+ uint8_t devid;
+ uint8_t function;
dir = opendir("/sys/class/combo");
if (dir == NULL)
@@ -1444,7 +1435,7 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
if (fd == NULL)
continue;
- ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8,
+ ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
&domain, &bus, &devid, &function);
fclose(fd);
if (ret != 4)
@@ -1464,109 +1455,62 @@ get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
return -1;
}
+/**
+ * @brief Initializes rte_eth_dev device.
+ * @param dev Device to initialize.
+ * @param pi Structure with info about DMA queues.
+ * @return 0 on success, negative error code on error.
+ */
static int
-rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
+rte_szedata2_eth_dev_init(struct rte_eth_dev *dev, struct port_info *pi)
{
+ int ret;
+ uint32_t szedata2_index;
+ char name[PATH_MAX];
struct rte_eth_dev_data *data = dev->data;
struct pmd_internals *internals = (struct pmd_internals *)
data->dev_private;
- struct szedata *szedata_temp;
- int ret;
- uint32_t szedata2_index;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_pci_addr *pci_addr = &pci_dev->addr;
- struct rte_mem_resource *pci_rsc =
- &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
- char rsc_filename[PATH_MAX];
- void *pci_resource_ptr = NULL;
- int fd;
- RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_INIT_FUNC_TRACE();
+ PMD_INIT_LOG(INFO, "Initializing eth_dev %s (driver %s)", data->name,
+ dev->device->driver->name);
+
+ /* Fill internal private structure. */
+ internals->dev = dev;
/* Get index of szedata2 device file and create path to device file */
- ret = get_szedata2_index(pci_addr, &szedata2_index);
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
return -ENODEV;
}
- snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
- szedata2_index);
-
- RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev);
-
- /*
- * Get number of available DMA RX and TX channels, which is maximum
- * number of queues that can be created and store it in private device
- * data structure.
- */
- szedata_temp = szedata_open(internals->sze_dev);
- if (szedata_temp == NULL) {
- RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s",
- internals->sze_dev);
- return -EINVAL;
+ snprintf(name, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+ internals->sze_dev_path = strdup(name);
+ if (internals->sze_dev_path == NULL) {
+ PMD_INIT_LOG(ERR, "strdup() failed!");
+ return -ENOMEM;
}
- internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
- SZE2_DIR_RX);
- internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
- SZE2_DIR_TX);
- szedata_close(szedata_temp);
-
- RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n",
- internals->max_rx_queues, internals->max_tx_queues);
+ PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev_path);
+ internals->max_rx_queues = pi->rx_count;
+ internals->max_tx_queues = pi->tx_count;
+ internals->rxq_base_id = pi->rx_base_id;
+ internals->txq_base_id = pi->tx_base_id;
+ PMD_INIT_LOG(INFO, "%u RX DMA channels from id %u",
+ internals->max_rx_queues, internals->rxq_base_id);
+ PMD_INIT_LOG(INFO, "%u TX DMA channels from id %u",
+ internals->max_tx_queues, internals->txq_base_id);
/* Set rx, tx burst functions */
- if (data->dev_conf.rxmode.enable_scatter == 1 ||
- data->scattered_rx == 1) {
+ if (data->scattered_rx == 1)
dev->rx_pkt_burst = eth_szedata2_rx_scattered;
- data->scattered_rx = 1;
- } else {
+ else
dev->rx_pkt_burst = eth_szedata2_rx;
- data->scattered_rx = 0;
- }
dev->tx_pkt_burst = eth_szedata2_tx;
/* Set function callbacks for Ethernet API */
dev->dev_ops = &ops;
- rte_eth_copy_pci_info(dev, pci_dev);
-
- /* mmap pci resource0 file to rte_mem_resource structure */
- if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
- 0) {
- RTE_LOG(ERR, PMD, "Missing resource%u file\n",
- PCI_RESOURCE_NUMBER);
- return -EINVAL;
- }
- snprintf(rsc_filename, PATH_MAX,
- "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
- pci_addr->domain, pci_addr->bus,
- pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
- fd = open(rsc_filename, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename);
- return -EINVAL;
- }
-
- pci_resource_ptr = mmap(0,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
- PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
- close(fd);
- if (pci_resource_ptr == MAP_FAILED) {
- RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
- rsc_filename, fd);
- return -EINVAL;
- }
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
- internals->pci_rsc = pci_rsc;
-
- RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
- "virt addr = %llx\n", PCI_RESOURCE_NUMBER,
- (unsigned long long)pci_rsc->phys_addr,
- (unsigned long long)pci_rsc->len,
- (unsigned long long)pci_rsc->addr);
-
/* Get link state */
eth_link_update(dev, 0);
@@ -1574,40 +1518,37 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
RTE_CACHE_LINE_SIZE);
if (data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
- munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
- return -EINVAL;
+ PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
+ free(internals->sze_dev_path);
+ return -ENOMEM;
}
ether_addr_copy(&eth_addr, data->mac_addrs);
- /* At initial state COMBO card is in promiscuous mode so disable it */
- eth_promiscuous_disable(dev);
-
- RTE_LOG(INFO, PMD, "szedata2 device ("
- PCI_PRI_FMT ") successfully initialized\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_INIT_LOG(INFO, "%s device %s successfully initialized",
+ dev->device->driver->name, data->name);
return 0;
}
+/**
+ * @brief Unitializes rte_eth_dev device.
+ * @param dev Device to uninitialize.
+ * @return 0 on success, negative error code on error.
+ */
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
- struct rte_pci_addr *pci_addr = &pci_dev->addr;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ free(internals->sze_dev_path);
rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
- munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
- RTE_LOG(INFO, PMD, "szedata2 device ("
- PCI_PRI_FMT ") successfully uninitialized\n",
- pci_addr->domain, pci_addr->bus, pci_addr->devid,
- pci_addr->function);
+ PMD_DRV_LOG(INFO, "%s device %s successfully uninitialized",
+ dev->device->driver->name, dev->data->name);
return 0;
}
@@ -1626,21 +1567,349 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
PCI_DEVICE_ID_NETCOPE_COMBO100G2)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
+ PCI_DEVICE_ID_NETCOPE_NFB200G2QL)
+ },
+ {
.vendor_id = 0,
}
};
+/**
+ * @brief Gets info about DMA queues for ports.
+ * @param pci_dev PCI device structure.
+ * @param port_count Pointer to variable set with number of ports.
+ * @param pi Pointer to array of structures with info about DMA queues
+ * for ports.
+ * @param max_ports Maximum number of ports.
+ * @return 0 on success, negative error code on error.
+ */
+static int
+get_port_info(struct rte_pci_device *pci_dev, unsigned int *port_count,
+ struct port_info *pi, unsigned int max_ports)
+{
+ struct szedata *szedata_temp;
+ char sze_dev_path[PATH_MAX];
+ uint32_t szedata2_index;
+ int ret;
+ uint16_t max_rx_queues;
+ uint16_t max_tx_queues;
+
+ if (max_ports == 0)
+ return -EINVAL;
+
+ memset(pi, 0, max_ports * sizeof(struct port_info));
+ *port_count = 0;
+
+ /* Get index of szedata2 device file and create path to device file */
+ ret = get_szedata2_index(&pci_dev->addr, &szedata2_index);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
+ return -ENODEV;
+ }
+ snprintf(sze_dev_path, PATH_MAX, SZEDATA2_DEV_PATH_FMT, szedata2_index);
+
+ /*
+ * Get number of available DMA RX and TX channels, which is maximum
+ * number of queues that can be created.
+ */
+ szedata_temp = szedata_open(sze_dev_path);
+ if (szedata_temp == NULL) {
+ PMD_INIT_LOG(ERR, "szedata_open(%s) failed", sze_dev_path);
+ return -EINVAL;
+ }
+ max_rx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_RX);
+ max_tx_queues = szedata_ifaces_available(szedata_temp, SZE2_DIR_TX);
+ PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
+ max_rx_queues, max_tx_queues);
+ if (max_rx_queues > RTE_ETH_SZEDATA2_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u RX queues exceeds supported number %u",
+ max_rx_queues, RTE_ETH_SZEDATA2_MAX_RX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ if (max_tx_queues > RTE_ETH_SZEDATA2_MAX_TX_QUEUES) {
+ PMD_INIT_LOG(ERR, "%u TX queues exceeds supported number %u",
+ max_tx_queues, RTE_ETH_SZEDATA2_MAX_TX_QUEUES);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+
+ if (pci_dev->id.device_id == PCI_DEVICE_ID_NETCOPE_NFB200G2QL) {
+ unsigned int i;
+ unsigned int rx_queues = max_rx_queues / max_ports;
+ unsigned int tx_queues = max_tx_queues / max_ports;
+
+ /*
+ * Number of queues reported by szedata_ifaces_available()
+ * is the number of all queues from all DMA controllers which
+ * may reside at different numa locations.
+ * All queues from the same DMA controller have the same numa
+ * node.
+ * Numa node from the first queue of each DMA controller is
+ * retrieved.
+ * If the numa node differs from the numa node of the queues
+ * from the previous DMA controller the queues are assigned
+ * to the next port.
+ */
+
+ for (i = 0; i < max_ports; i++) {
+ int numa_rx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_RX, rx_queues * i);
+ int numa_tx = szedata_get_area_numa_node(szedata_temp,
+ SZE2_DIR_TX, tx_queues * i);
+ unsigned int port_rx_queues = numa_rx != -1 ?
+ rx_queues : 0;
+ unsigned int port_tx_queues = numa_tx != -1 ?
+ tx_queues : 0;
+ PMD_INIT_LOG(DEBUG, "%u rx queues from id %u, numa %d",
+ rx_queues, rx_queues * i, numa_rx);
+ PMD_INIT_LOG(DEBUG, "%u tx queues from id %u, numa %d",
+ tx_queues, tx_queues * i, numa_tx);
+
+ if (port_rx_queues != 0 && port_tx_queues != 0 &&
+ numa_rx != numa_tx) {
+ PMD_INIT_LOG(ERR, "RX queue %u numa %d differs "
+ "from TX queue %u numa %d "
+ "unexpectedly",
+ rx_queues * i, numa_rx,
+ tx_queues * i, numa_tx);
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ } else if (port_rx_queues == 0 && port_tx_queues == 0) {
+ continue;
+ } else {
+ unsigned int j;
+ unsigned int current = *port_count;
+ int port_numa = port_rx_queues != 0 ?
+ numa_rx : numa_tx;
+
+ for (j = 0; j < *port_count; j++) {
+ if (pi[j].numa_node ==
+ port_numa) {
+ current = j;
+ break;
+ }
+ }
+ if (pi[current].rx_count == 0 &&
+ pi[current].tx_count == 0) {
+ pi[current].rx_base_id = rx_queues * i;
+ pi[current].tx_base_id = tx_queues * i;
+ (*port_count)++;
+ } else if ((rx_queues * i !=
+ pi[current].rx_base_id +
+ pi[current].rx_count) ||
+ (tx_queues * i !=
+ pi[current].tx_base_id +
+ pi[current].tx_count)) {
+ PMD_INIT_LOG(ERR, "Queue ids does not "
+ "fulfill constraints");
+ szedata_close(szedata_temp);
+ return -EINVAL;
+ }
+ pi[current].rx_count += port_rx_queues;
+ pi[current].tx_count += port_tx_queues;
+ pi[current].numa_node = port_numa;
+ }
+ }
+ } else {
+ pi[0].rx_count = max_rx_queues;
+ pi[0].tx_count = max_tx_queues;
+ pi[0].numa_node = pci_dev->device.numa_node;
+ *port_count = 1;
+ }
+
+ szedata_close(szedata_temp);
+ return 0;
+}
+
+/**
+ * @brief Allocates rte_eth_dev device.
+ * @param pci_dev Corresponding PCI device.
+ * @param numa_node NUMA node on which device is allocated.
+ * @param port_no Id of rte_eth_device created on PCI device pci_dev.
+ * @return Pointer to allocated device or NULL on error.
+ */
+static struct rte_eth_dev *
+szedata2_eth_dev_allocate(struct rte_pci_device *pci_dev, int numa_node,
+ unsigned int port_no)
+{
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, port_no);
+ PMD_INIT_LOG(DEBUG, "Allocating eth_dev %s", name);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ return NULL;
+
+ eth_dev->data->dev_private = rte_zmalloc_socket(name,
+ sizeof(struct pmd_internals), RTE_CACHE_LINE_SIZE,
+ numa_node);
+ if (!eth_dev->data->dev_private) {
+ rte_eth_dev_release_port(eth_dev);
+ return NULL;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev)
+ return NULL;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->numa_node = numa_node;
+ return eth_dev;
+}
+
+/**
+ * @brief Releases interval of rte_eth_dev devices from array.
+ * @param eth_devs Array of pointers to rte_eth_dev devices.
+ * @param from Index in array eth_devs to start with.
+ * @param to Index in array right after the last element to release.
+ *
+ * Used for releasing at failed initialization.
+ */
+static void
+szedata2_eth_dev_release_interval(struct rte_eth_dev **eth_devs,
+ unsigned int from, unsigned int to)
+{
+ unsigned int i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = from; i < to; i++) {
+ rte_szedata2_eth_dev_uninit(eth_devs[i]);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ }
+}
+
+/**
+ * @brief Callback .probe for struct rte_pci_driver.
+ */
static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_probe(pci_dev,
- sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
+ struct port_info port_info[SZEDATA2_MAX_PORTS];
+ unsigned int port_count;
+ int ret;
+ unsigned int i;
+ struct pci_dev_list_entry *list_entry;
+ struct rte_eth_dev *eth_devs[SZEDATA2_MAX_PORTS] = {NULL,};
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = get_port_info(pci_dev, &port_count, port_info,
+ SZEDATA2_MAX_PORTS);
+ if (ret != 0)
+ return ret;
+
+ if (port_count == 0) {
+ PMD_INIT_LOG(ERR, "No available ports!");
+ return -ENODEV;
+ }
+
+ list_entry = rte_zmalloc(NULL, sizeof(struct pci_dev_list_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (list_entry == NULL) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc() failed!");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ eth_devs[i] = szedata2_eth_dev_allocate(pci_dev,
+ port_info[i].numa_node, i);
+ if (eth_devs[i] == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to alloc eth_dev for port %u",
+ i);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return -ENOMEM;
+ }
+
+ ret = rte_szedata2_eth_dev_init(eth_devs[i], &port_info[i]);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Failed to init eth_dev for port %u",
+ i);
+ rte_eth_dev_pci_release(eth_devs[i]);
+ szedata2_eth_dev_release_interval(eth_devs, 0, i);
+ rte_free(list_entry);
+ return ret;
+ }
+
+ rte_eth_dev_probing_finish(eth_devs[i]);
+ }
+
+ /*
+ * Add pci_dev to list of PCI devices for this driver
+ * which is used at remove callback to release all created eth_devs.
+ */
+ list_entry->pci_dev = pci_dev;
+ list_entry->port_count = port_count;
+ LIST_INSERT_HEAD(&szedata2_pci_dev_list, list_entry, next);
+ return 0;
}
+/**
+ * @brief Callback .remove for struct rte_pci_driver.
+ */
static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev,
- rte_szedata2_eth_dev_uninit);
+ unsigned int i;
+ unsigned int port_count;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int ret;
+ int retval = 0;
+ bool found = false;
+ struct pci_dev_list_entry *list_entry = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ LIST_FOREACH(list_entry, &szedata2_pci_dev_list, next) {
+ if (list_entry->pci_dev == pci_dev) {
+ port_count = list_entry->port_count;
+ found = true;
+ break;
+ }
+ }
+ LIST_REMOVE(list_entry, next);
+ rte_free(list_entry);
+
+ if (!found) {
+ PMD_DRV_LOG(ERR, "PCI device " PCI_PRI_FMT " not found",
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < port_count; i++) {
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s"
+ SZEDATA2_ETH_DEV_NAME_SUFFIX_FMT,
+ pci_dev->device.name, i);
+ PMD_DRV_LOG(DEBUG, "Removing eth_dev %s", name);
+ eth_dev = rte_eth_dev_allocated(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR, "eth_dev %s not found", name);
+ retval = retval ? retval : -ENODEV;
+ }
+
+ ret = rte_szedata2_eth_dev_uninit(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "eth_dev %s uninit failed", name);
+ retval = retval ? retval : ret;
+ }
+
+ rte_eth_dev_pci_release(eth_dev);
+ }
+
+ return retval;
}
static struct rte_pci_driver szedata2_eth_driver = {
@@ -1652,4 +1921,14 @@ static struct rte_pci_driver szedata2_eth_driver = {
RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
- "* combo6core & combov3 & szedata2 & szedata2_cv3");
+ "* combo6core & combov3 & szedata2 & ( szedata2_cv3 | szedata2_cv3_fdt )");
+
+RTE_INIT(szedata2_init_log)
+{
+ szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
+ if (szedata2_logtype_init >= 0)
+ rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
+ szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
+ if (szedata2_logtype_driver >= 0)
+ rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index f25d4c59..26a82b35 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2015 - 2016 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 - 2016 CESNET
*/
#ifndef RTE_PMD_SZEDATA2_H_
@@ -47,9 +18,7 @@
#define PCI_DEVICE_ID_NETCOPE_COMBO80G 0xcb80
#define PCI_DEVICE_ID_NETCOPE_COMBO100G 0xc1c1
#define PCI_DEVICE_ID_NETCOPE_COMBO100G2 0xc2c1
-
-/* number of PCI resource used by COMBO card */
-#define PCI_RESOURCE_NUMBER 0
+#define PCI_DEVICE_ID_NETCOPE_NFB200G2QL 0xc250
/* szedata2_packet header length == 4 bytes == 2B segment size + 2B hw size */
#define RTE_SZE2_PACKET_HEADER_SIZE 4
diff --git a/drivers/net/szedata2/szedata2_iobuf.c b/drivers/net/szedata2/szedata2_iobuf.c
deleted file mode 100644
index 3b9a71fe..00000000
--- a/drivers/net/szedata2/szedata2_iobuf.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2017 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-
-#include <rte_common.h>
-
-#include "szedata2_iobuf.h"
-
-/*
- * IBUFs and OBUFs can generally be located at different offsets in different
- * firmwares (modes).
- * This part defines base offsets of IBUFs and OBUFs for various cards
- * and firmwares (modes).
- * Type of firmware (mode) is set through configuration option
- * CONFIG_RTE_LIBRTE_PMD_SZEDATA2_AS.
- * Possible values are:
- * 0 - for cards (modes):
- * NFB-100G1 (100G1)
- *
- * 1 - for cards (modes):
- * NFB-100G2Q (100G1)
- *
- * 2 - for cards (modes):
- * NFB-40G2 (40G2)
- * NFB-100G2C (100G2)
- * NFB-100G2Q (40G2)
- *
- * 3 - for cards (modes):
- * NFB-40G2 (10G8)
- * NFB-100G2Q (10G8)
- *
- * 4 - for cards (modes):
- * NFB-100G1 (10G10)
- *
- * 5 - for experimental firmwares and future use
- */
-#if !defined(RTE_LIBRTE_PMD_SZEDATA2_AS)
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has to be defined"
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 0
-
-/*
- * Cards (modes):
- * NFB-100G1 (100G1)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 1
-
-/*
- * Cards (modes):
- * NFB-100G2Q (100G1)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9800
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 2
-
-/*
- * Cards (modes):
- * NFB-40G2 (40G2)
- * NFB-100G2C (100G2)
- * NFB-100G2Q (40G2)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9800
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 3
-
-/*
- * Cards (modes):
- * NFB-40G2 (10G8)
- * NFB-100G2Q (10G8)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800,
- 0x8A00,
- 0x8C00,
- 0x8E00
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9200,
- 0x9400,
- 0x9600,
- 0x9800,
- 0x9A00,
- 0x9C00,
- 0x9E00
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 4
-
-/*
- * Cards (modes):
- * NFB-100G1 (10G10)
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800,
- 0x8A00,
- 0x8C00,
- 0x8E00,
- 0x9000,
- 0x9200
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0xA000,
- 0xA200,
- 0xA400,
- 0xA600,
- 0xA800,
- 0xAA00,
- 0xAC00,
- 0xAE00,
- 0xB000,
- 0xB200
-};
-
-#elif RTE_LIBRTE_PMD_SZEDATA2_AS == 5
-
-/*
- * Future use and experimental firmwares.
- */
-
-const uint32_t szedata2_ibuf_base_table[] = {
- 0x8000,
- 0x8200,
- 0x8400,
- 0x8600,
- 0x8800
-};
-const uint32_t szedata2_obuf_base_table[] = {
- 0x9000,
- 0x9200,
- 0x9400,
- 0x9600,
- 0x9800
-};
-
-#else
-#error "RTE_LIBRTE_PMD_SZEDATA2_AS has wrong value, see comments in config file"
-#endif
-
-const uint32_t szedata2_ibuf_count = RTE_DIM(szedata2_ibuf_base_table);
-const uint32_t szedata2_obuf_count = RTE_DIM(szedata2_obuf_base_table);
diff --git a/drivers/net/szedata2/szedata2_iobuf.h b/drivers/net/szedata2/szedata2_iobuf.h
deleted file mode 100644
index f1ccb3b2..00000000
--- a/drivers/net/szedata2/szedata2_iobuf.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2017 CESNET
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of CESNET nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SZEDATA2_IOBUF_H_
-#define _SZEDATA2_IOBUF_H_
-
-#include <stdint.h>
-#include <stdbool.h>
-
-#include <rte_byteorder.h>
-#include <rte_io.h>
-#include <rte_dev.h>
-
-/* IBUF offsets from the beginning of the PCI resource address space. */
-extern const uint32_t szedata2_ibuf_base_table[];
-extern const uint32_t szedata2_ibuf_count;
-
-/* OBUF offsets from the beginning of the PCI resource address space. */
-extern const uint32_t szedata2_obuf_base_table[];
-extern const uint32_t szedata2_obuf_count;
-
-enum szedata2_link_speed {
- SZEDATA2_LINK_SPEED_DEFAULT = 0,
- SZEDATA2_LINK_SPEED_10G,
- SZEDATA2_LINK_SPEED_40G,
- SZEDATA2_LINK_SPEED_100G,
-};
-
-enum szedata2_mac_check_mode {
- SZEDATA2_MAC_CHMODE_PROMISC = 0x0,
- SZEDATA2_MAC_CHMODE_ONLY_VALID = 0x1,
- SZEDATA2_MAC_CHMODE_ALL_BROADCAST = 0x2,
- SZEDATA2_MAC_CHMODE_ALL_MULTICAST = 0x3,
-};
-
-/**
- * Macro takes pointer to pci resource structure (rsc)
- * and returns pointer to mapped resource memory at
- * specified offset (offset) typecast to the type (type).
- */
-#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
- ((type)(((uint8_t *)(rsc)->addr) + (offset)))
-
-/**
- * Maximum possible number of MAC addresses (limited by IBUF status
- * register value MAC_COUNT which has 5 bits).
- */
-#define SZEDATA2_IBUF_MAX_MAC_COUNT 32
-
-/**
- * Structure describes IBUF address space.
- */
-struct szedata2_ibuf {
- /** Total Received Frames Counter low part */
- uint32_t trfcl; /**< 0x00 */
- /** Correct Frames Counter low part */
- uint32_t cfcl; /**< 0x04 */
- /** Discarded Frames Counter low part */
- uint32_t dfcl; /**< 0x08 */
- /** Counter of frames discarded due to buffer overflow low part */
- uint32_t bodfcl; /**< 0x0C */
- /** Total Received Frames Counter high part */
- uint32_t trfch; /**< 0x10 */
- /** Correct Frames Counter high part */
- uint32_t cfch; /**< 0x14 */
- /** Discarded Frames Counter high part */
- uint32_t dfch; /**< 0x18 */
- /** Counter of frames discarded due to buffer overflow high part */
- uint32_t bodfch; /**< 0x1C */
- /** IBUF enable register */
- uint32_t ibuf_en; /**< 0x20 */
- /** Error mask register */
- uint32_t err_mask; /**< 0x24 */
- /** IBUF status register */
- uint32_t ibuf_st; /**< 0x28 */
- /** IBUF command register */
- uint32_t ibuf_cmd; /**< 0x2C */
- /** Minimum frame length allowed */
- uint32_t mfla; /**< 0x30 */
- /** Frame MTU */
- uint32_t mtu; /**< 0x34 */
- /** MAC address check mode */
- uint32_t mac_chmode; /**< 0x38 */
- /** Octets Received OK Counter low part */
- uint32_t orocl; /**< 0x3C */
- /** Octets Received OK Counter high part */
- uint32_t oroch; /**< 0x40 */
- /** reserved */
- uint8_t reserved[60]; /**< 0x4C */
- /** IBUF memory for MAC addresses */
- uint32_t mac_mem[2 * SZEDATA2_IBUF_MAX_MAC_COUNT]; /**< 0x80 */
-} __rte_packed;
-
-/**
- * Structure describes OBUF address space.
- */
-struct szedata2_obuf {
- /** Total Sent Frames Counter low part */
- uint32_t tsfcl; /**< 0x00 */
- /** Octets Sent Counter low part */
- uint32_t oscl; /**< 0x04 */
- /** Total Discarded Frames Counter low part */
- uint32_t tdfcl; /**< 0x08 */
- /** reserved */
- uint32_t reserved1; /**< 0x0C */
- /** Total Sent Frames Counter high part */
- uint32_t tsfch; /**< 0x10 */
- /** Octets Sent Counter high part */
- uint32_t osch; /**< 0x14 */
- /** Total Discarded Frames Counter high part */
- uint32_t tdfch; /**< 0x18 */
- /** reserved */
- uint32_t reserved2; /**< 0x1C */
- /** OBUF enable register */
- uint32_t obuf_en; /**< 0x20 */
- /** reserved */
- uint64_t reserved3; /**< 0x24 */
- /** OBUF control register */
- uint32_t ctrl; /**< 0x2C */
- /** OBUF status register */
- uint32_t obuf_st; /**< 0x30 */
-} __rte_packed;
-
-/**
- * Wrapper for reading 4 bytes from device memory in correct endianness.
- *
- * @param addr
- * Address for reading.
- * @return
- * 4 B value.
- */
-static inline uint32_t
-szedata2_read32(const volatile void *addr)
-{
- return rte_le_to_cpu_32(rte_read32(addr));
-}
-
-/**
- * Wrapper for writing 4 bytes to device memory in correct endianness.
- *
- * @param value
- * Value to write.
- * @param addr
- * Address for writing.
- */
-static inline void
-szedata2_write32(uint32_t value, volatile void *addr)
-{
- rte_write32(rte_cpu_to_le_32(value), addr);
-}
-
-/**
- * Get pointer to IBUF structure according to specified index.
- *
- * @param rsc
- * Pointer to base address of memory resource.
- * @param index
- * Index of IBUF.
- * @return
- * Pointer to IBUF structure.
- */
-static inline struct szedata2_ibuf *
-ibuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
-{
- if (index >= szedata2_ibuf_count)
- index = szedata2_ibuf_count - 1;
- return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_ibuf_base_table[index],
- struct szedata2_ibuf *);
-}
-
-/**
- * Get pointer to OBUF structure according to specified idnex.
- *
- * @param rsc
- * Pointer to base address of memory resource.
- * @param index
- * Index of OBUF.
- * @return
- * Pointer to OBUF structure.
- */
-static inline struct szedata2_obuf *
-obuf_ptr_by_index(struct rte_mem_resource *rsc, uint32_t index)
-{
- if (index >= szedata2_obuf_count)
- index = szedata2_obuf_count - 1;
- return SZEDATA2_PCI_RESOURCE_PTR(rsc, szedata2_obuf_base_table[index],
- struct szedata2_obuf *);
-}
-
-/**
- * Checks if IBUF is enabled.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * true if IBUF is enabled.
- * false if IBUF is disabled.
- */
-static inline bool
-ibuf_is_enabled(const volatile struct szedata2_ibuf *ibuf)
-{
- return ((szedata2_read32(&ibuf->ibuf_en) & 0x1) != 0) ? true : false;
-}
-
-/**
- * Enables IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- */
-static inline void
-ibuf_enable(volatile struct szedata2_ibuf *ibuf)
-{
- szedata2_write32(szedata2_read32(&ibuf->ibuf_en) | 0x1, &ibuf->ibuf_en);
-}
-
-/**
- * Disables IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- */
-static inline void
-ibuf_disable(volatile struct szedata2_ibuf *ibuf)
-{
- szedata2_write32(szedata2_read32(&ibuf->ibuf_en) & ~0x1,
- &ibuf->ibuf_en);
-}
-
-/**
- * Checks if link is up.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * true if ibuf link is up.
- * false if ibuf link is down.
- */
-static inline bool
-ibuf_is_link_up(const volatile struct szedata2_ibuf *ibuf)
-{
- return ((szedata2_read32(&ibuf->ibuf_st) & 0x80) != 0) ? true : false;
-}
-
-/**
- * Get current MAC address check mode from IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @return
- * MAC address check mode constant.
- */
-static inline enum szedata2_mac_check_mode
-ibuf_mac_mode_read(const volatile struct szedata2_ibuf *ibuf)
-{
- switch (szedata2_read32(&ibuf->mac_chmode) & 0x3) {
- case 0x0:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- case 0x1:
- return SZEDATA2_MAC_CHMODE_ONLY_VALID;
- case 0x2:
- return SZEDATA2_MAC_CHMODE_ALL_BROADCAST;
- case 0x3:
- return SZEDATA2_MAC_CHMODE_ALL_MULTICAST;
- default:
- return SZEDATA2_MAC_CHMODE_PROMISC;
- }
-}
-
-/**
- * Writes mode in MAC address check mode register in IBUF.
- *
- * @param ibuf
- * Pointer to IBUF structure.
- * @param mode
- * MAC address check mode to set.
- */
-static inline void
-ibuf_mac_mode_write(volatile struct szedata2_ibuf *ibuf,
- enum szedata2_mac_check_mode mode)
-{
- szedata2_write32((szedata2_read32(&ibuf->mac_chmode) & ~0x3) | mode,
- &ibuf->mac_chmode);
-}
-
-/**
- * Checks if obuf is enabled.
- *
- * @param obuf
- * Pointer to OBUF structure.
- * @return
- * true if OBUF is enabled.
- * false if OBUF is disabled.
- */
-static inline bool
-obuf_is_enabled(const volatile struct szedata2_obuf *obuf)
-{
- return ((szedata2_read32(&obuf->obuf_en) & 0x1) != 0) ? true : false;
-}
-
-/**
- * Enables OBUF.
- *
- * @param obuf
- * Pointer to OBUF structure.
- */
-static inline void
-obuf_enable(volatile struct szedata2_obuf *obuf)
-{
- szedata2_write32(szedata2_read32(&obuf->obuf_en) | 0x1, &obuf->obuf_en);
-}
-
-/**
- * Disables OBUF.
- *
- * @param obuf
- * Pointer to OBUF structure.
- */
-static inline void
-obuf_disable(volatile struct szedata2_obuf *obuf)
-{
- szedata2_write32(szedata2_read32(&obuf->obuf_en) & ~0x1,
- &obuf->obuf_en);
-}
-
-#endif /* _SZEDATA2_IOBUF_H_ */
diff --git a/drivers/net/szedata2/szedata2_logs.h b/drivers/net/szedata2/szedata2_logs.h
new file mode 100644
index 00000000..8d06ffa3
--- /dev/null
+++ b/drivers/net/szedata2/szedata2_logs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 CESNET
+ */
+
+#ifndef _SZEDATA2_LOGS_H_
+#define _SZEDATA2_LOGS_H_
+
+#include <rte_log.h>
+
+extern int szedata2_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_init, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int szedata2_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, szedata2_logtype_driver, \
+ "%s(): " fmt "\n", __func__, ## args)
+
+#endif /* _SZEDATA2_LOGS_H_ */
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
index ccc5c5fc..32433653 100644
--- a/drivers/net/tap/Makefile
+++ b/drivers/net/tap/Makefile
@@ -24,7 +24,7 @@ CFLAGS += -I.
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_gso
CFLAGS += -DTAP_MAX_QUEUES=$(TAP_MAX_QUEUES)
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index f09db0ea..feb92b48 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -15,7 +15,9 @@
#include <rte_net.h>
#include <rte_debug.h>
#include <rte_ip.h>
+#include <rte_string_fns.h>
+#include <assert.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
@@ -34,6 +36,7 @@
#include <linux/if_ether.h>
#include <fcntl.h>
+#include <tap_rss.h>
#include <rte_eth_tap.h>
#include <tap_flow.h>
#include <tap_netlink.h>
@@ -42,13 +45,25 @@
/* Linux based path to the TUN device */
#define TUN_TAP_DEV_PATH "/dev/net/tun"
#define DEFAULT_TAP_NAME "dtap"
+#define DEFAULT_TUN_NAME "dtun"
#define ETH_TAP_IFACE_ARG "iface"
#define ETH_TAP_REMOTE_ARG "remote"
#define ETH_TAP_MAC_ARG "mac"
#define ETH_TAP_MAC_FIXED "fixed"
+#define ETH_TAP_USR_MAC_FMT "xx:xx:xx:xx:xx:xx"
+#define ETH_TAP_CMP_MAC_FMT "0123456789ABCDEFabcdef"
+#define ETH_TAP_MAC_ARG_FMT ETH_TAP_MAC_FIXED "|" ETH_TAP_USR_MAC_FMT
+
+#define TAP_GSO_MBUFS_PER_CORE 128
+#define TAP_GSO_MBUF_SEG_SIZE 128
+#define TAP_GSO_MBUF_CACHE_SIZE 4
+#define TAP_GSO_MBUFS_NUM \
+ (TAP_GSO_MBUFS_PER_CORE * TAP_GSO_MBUF_CACHE_SIZE)
+
static struct rte_vdev_driver pmd_tap_drv;
+static struct rte_vdev_driver pmd_tun_drv;
static const char *valid_arguments[] = {
ETH_TAP_IFACE_ARG,
@@ -57,7 +72,10 @@ static const char *valid_arguments[] = {
NULL
};
-static int tap_unit;
+static unsigned int tap_unit;
+static unsigned int tun_unit;
+
+static char tuntap_name[8];
static volatile uint32_t tap_trigger; /* Rx trigger */
@@ -65,7 +83,7 @@ static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
.link_status = ETH_LINK_DOWN,
- .link_autoneg = ETH_LINK_AUTONEG
+ .link_autoneg = ETH_LINK_FIXED,
};
static void
@@ -84,13 +102,20 @@ enum ioctl_mode {
static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
-/* Tun/Tap allocation routine
+/**
+ * Tun/Tap allocation routine
+ *
+ * @param[in] pmd
+ * Pointer to private structure.
+ *
+ * @param[in] is_keepalive
+ * Keepalive flag
*
- * name is the number of the interface to use, unless NULL to take the host
- * supplied name.
+ * @return
+ * -1 on failure, fd on success
*/
static int
-tun_alloc(struct pmd_internals *pmd)
+tun_alloc(struct pmd_internals *pmd, int is_keepalive)
{
struct ifreq ifr;
#ifdef IFF_MULTI_QUEUE
@@ -104,51 +129,64 @@ tun_alloc(struct pmd_internals *pmd)
* Do not set IFF_NO_PI as packet information header will be needed
* to check if a received packet has been truncated.
*/
- ifr.ifr_flags = IFF_TAP;
+ ifr.ifr_flags = (pmd->type == ETH_TUNTAP_TYPE_TAP) ?
+ IFF_TAP : IFF_TUN | IFF_POINTOPOINT;
snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
- RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
+ TAP_LOG(DEBUG, "ifr_name '%s'", ifr.ifr_name);
fd = open(TUN_TAP_DEV_PATH, O_RDWR);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create TAP interface\n");
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
goto error;
}
#ifdef IFF_MULTI_QUEUE
/* Grab the TUN features to verify we can work multi-queue */
if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
- RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
+ TAP_LOG(ERR, "%s unable to get TUN/TAP features",
+ tuntap_name);
goto error;
}
- RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
+ TAP_LOG(DEBUG, "%s Features %08x", tuntap_name, features);
if (features & IFF_MULTI_QUEUE) {
- RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
+ TAP_LOG(DEBUG, " Multi-queue support for %d queues",
RTE_PMD_TAP_MAX_QUEUES);
ifr.ifr_flags |= IFF_MULTI_QUEUE;
} else
#endif
{
ifr.ifr_flags |= IFF_ONE_QUEUE;
- RTE_LOG(DEBUG, PMD, " Single queue only support\n");
+ TAP_LOG(DEBUG, " Single queue only support");
}
/* Set the TUN/TAP configuration and set the name if needed */
if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
- RTE_LOG(WARNING, PMD,
- "Unable to set TUNSETIFF for %s\n",
- ifr.ifr_name);
- perror("TUNSETIFF");
+ TAP_LOG(WARNING, "Unable to set TUNSETIFF for %s: %s",
+ ifr.ifr_name, strerror(errno));
goto error;
}
+ if (is_keepalive) {
+ /*
+ * Detach the TUN/TAP keep-alive queue
+ * to avoid traffic through it
+ */
+ ifr.ifr_flags = IFF_DETACH_QUEUE;
+ if (ioctl(fd, TUNSETQUEUE, (void *)&ifr) < 0) {
+ TAP_LOG(WARNING,
+ "Unable to detach keep-alive queue for %s: %s",
+ ifr.ifr_name, strerror(errno));
+ goto error;
+ }
+ }
+
/* Always set the file descriptor to non-blocking */
if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
- RTE_LOG(WARNING, PMD,
- "Unable to set %s to nonblocking\n",
- ifr.ifr_name);
- perror("F_SETFL, NONBLOCK");
+ TAP_LOG(WARNING,
+ "Unable to set %s to nonblocking: %s",
+ ifr.ifr_name, strerror(errno));
goto error;
}
@@ -182,10 +220,11 @@ tun_alloc(struct pmd_internals *pmd)
fcntl(fd, F_SETFL, flags | O_ASYNC);
fcntl(fd, F_SETOWN, getpid());
} while (0);
+
if (errno) {
/* Disable trigger globally in case of error */
tap_trigger = 0;
- RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
+ TAP_LOG(WARNING, "Rx trigger disabled: %s",
strerror(errno));
}
@@ -255,14 +294,9 @@ static uint64_t
tap_rx_offload_get_port_capa(void)
{
/*
- * In order to support legacy apps,
- * report capabilities also as port capabilities.
+ * No specific port Rx offload capabilities.
*/
- return DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ return 0;
}
static uint64_t
@@ -275,21 +309,6 @@ tap_rx_offload_get_queue_capa(void)
DEV_RX_OFFLOAD_CRC_STRIP;
}
-static bool
-tap_rxq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
-{
- uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads;
- uint64_t queue_supp_offloads = tap_rx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_rx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
-}
-
/* Callback to handle the rx burst of packets to the correct interface and
* file descriptor(s) in a multi-queue setup.
*/
@@ -389,13 +408,9 @@ static uint64_t
tap_tx_offload_get_port_capa(void)
{
/*
- * In order to support legacy apps,
- * report capabilities also as port capabilities.
+ * No specific port Tx offload capabilities.
*/
- return DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ return 0;
}
static uint64_t
@@ -404,28 +419,45 @@ tap_tx_offload_get_queue_capa(void)
return DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
}
-static bool
-tap_txq_are_offloads_valid(struct rte_eth_dev *dev, uint64_t offloads)
+/* Finalize l4 checksum calculation */
+static void
+tap_tx_l4_cksum(uint16_t *l4_cksum, uint16_t l4_phdr_cksum,
+ uint32_t l4_raw_cksum)
{
- uint64_t port_offloads = dev->data->dev_conf.txmode.offloads;
- uint64_t queue_supp_offloads = tap_tx_offload_get_queue_capa();
- uint64_t port_supp_offloads = tap_tx_offload_get_port_capa();
-
- if ((offloads & (queue_supp_offloads | port_supp_offloads)) !=
- offloads)
- return false;
- /* Verify we have no conflict with port offloads */
- if ((port_offloads ^ offloads) & port_supp_offloads)
- return false;
- return true;
+ if (l4_cksum) {
+ uint32_t cksum;
+
+ cksum = __rte_raw_cksum_reduce(l4_raw_cksum);
+ cksum += l4_phdr_cksum;
+
+ cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+ cksum = (~cksum) & 0xffff;
+ if (cksum == 0)
+ cksum = 0xffff;
+ *l4_cksum = cksum;
+ }
}
+/* Accumaulate L4 raw checksums */
static void
-tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
- unsigned int l3_len)
+tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
+ uint32_t *l4_raw_cksum)
+{
+ if (l4_cksum == NULL)
+ return;
+
+ *l4_raw_cksum = __rte_raw_cksum(l4_data, l4_len, *l4_raw_cksum);
+}
+
+/* L3 and L4 pseudo headers checksum offloads */
+static void
+tap_tx_l3_cksum(char *packet, uint64_t ol_flags, unsigned int l2_len,
+ unsigned int l3_len, unsigned int l4_len, uint16_t **l4_cksum,
+ uint16_t *l4_phdr_cksum, uint32_t *l4_raw_cksum)
{
void *l3_hdr = packet + l2_len;
@@ -438,38 +470,137 @@ tap_tx_offload(char *packet, uint64_t ol_flags, unsigned int l2_len,
iph->hdr_checksum = (cksum == 0xffff) ? cksum : ~cksum;
}
if (ol_flags & PKT_TX_L4_MASK) {
- uint16_t l4_len;
- uint32_t cksum;
- uint16_t *l4_cksum;
void *l4_hdr;
l4_hdr = packet + l2_len + l3_len;
if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM)
- l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
+ *l4_cksum = &((struct udp_hdr *)l4_hdr)->dgram_cksum;
else if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM)
- l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
+ *l4_cksum = &((struct tcp_hdr *)l4_hdr)->cksum;
else
return;
- *l4_cksum = 0;
- if (ol_flags & PKT_TX_IPV4) {
- struct ipv4_hdr *iph = l3_hdr;
+ **l4_cksum = 0;
+ if (ol_flags & PKT_TX_IPV4)
+ *l4_phdr_cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
+ else
+ *l4_phdr_cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ *l4_raw_cksum = __rte_raw_cksum(l4_hdr, l4_len, 0);
+ }
+}
- l4_len = rte_be_to_cpu_16(iph->total_length) - l3_len;
- cksum = rte_ipv4_phdr_cksum(l3_hdr, 0);
- } else {
- struct ipv6_hdr *ip6h = l3_hdr;
+static inline void
+tap_write_mbufs(struct tx_queue *txq, uint16_t num_mbufs,
+ struct rte_mbuf **pmbufs,
+ uint16_t *num_packets, unsigned long *num_tx_bytes)
+{
+ int i;
+ uint16_t l234_hlen;
- /* payload_len does not include ext headers */
- l4_len = rte_be_to_cpu_16(ip6h->payload_len) -
- l3_len + sizeof(struct ipv6_hdr);
- cksum = rte_ipv6_phdr_cksum(l3_hdr, 0);
+ for (i = 0; i < num_mbufs; i++) {
+ struct rte_mbuf *mbuf = pmbufs[i];
+ struct iovec iovecs[mbuf->nb_segs + 2];
+ struct tun_pi pi = { .flags = 0, .proto = 0x00 };
+ struct rte_mbuf *seg = mbuf;
+ char m_copy[mbuf->data_len];
+ int proto;
+ int n;
+ int j;
+ int k; /* current index in iovecs for copying segments */
+ uint16_t seg_len; /* length of first segment */
+ uint16_t nb_segs;
+ uint16_t *l4_cksum; /* l4 checksum (pseudo header + payload) */
+ uint32_t l4_raw_cksum = 0; /* TCP/UDP payload raw checksum */
+ uint16_t l4_phdr_cksum = 0; /* TCP/UDP pseudo header checksum */
+ uint16_t is_cksum = 0; /* in case cksum should be offloaded */
+
+ l4_cksum = NULL;
+ if (txq->type == ETH_TUNTAP_TYPE_TUN) {
+ /*
+ * TUN and TAP are created with IFF_NO_PI disabled.
+ * For TUN PMD this mandatory as fields are used by
+ * Kernel tun.c to determine whether its IP or non IP
+ * packets.
+ *
+ * The logic fetches the first byte of data from mbuf
+ * then compares whether its v4 or v6. If first byte
+ * is 4 or 6, then protocol field is updated.
+ */
+ char *buff_data = rte_pktmbuf_mtod(seg, void *);
+ proto = (*buff_data & 0xf0);
+ pi.proto = (proto == 0x40) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv4) :
+ ((proto == 0x60) ?
+ rte_cpu_to_be_16(ETHER_TYPE_IPv6) :
+ 0x00);
}
- cksum += rte_raw_cksum(l4_hdr, l4_len);
- cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
- cksum = (~cksum) & 0xffff;
- if (cksum == 0)
- cksum = 0xffff;
- *l4_cksum = cksum;
+
+ k = 0;
+ iovecs[k].iov_base = &pi;
+ iovecs[k].iov_len = sizeof(pi);
+ k++;
+
+ nb_segs = mbuf->nb_segs;
+ if (txq->csum &&
+ ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
+ (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
+ is_cksum = 1;
+
+ /* Support only packets with at least layer 4
+ * header included in the first segment
+ */
+ seg_len = rte_pktmbuf_data_len(mbuf);
+ l234_hlen = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ if (seg_len < l234_hlen)
+ break;
+
+ /* To change checksums, work on a * copy of l2, l3
+ * headers + l4 pseudo header
+ */
+ rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
+ l234_hlen);
+ tap_tx_l3_cksum(m_copy, mbuf->ol_flags,
+ mbuf->l2_len, mbuf->l3_len, mbuf->l4_len,
+ &l4_cksum, &l4_phdr_cksum,
+ &l4_raw_cksum);
+ iovecs[k].iov_base = m_copy;
+ iovecs[k].iov_len = l234_hlen;
+ k++;
+
+ /* Update next iovecs[] beyond l2, l3, l4 headers */
+ if (seg_len > l234_hlen) {
+ iovecs[k].iov_len = seg_len - l234_hlen;
+ iovecs[k].iov_base =
+ rte_pktmbuf_mtod(seg, char *) +
+ l234_hlen;
+ tap_tx_l4_add_rcksum(iovecs[k].iov_base,
+ iovecs[k].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ k++;
+ nb_segs++;
+ }
+ seg = seg->next;
+ }
+
+ for (j = k; j <= nb_segs; j++) {
+ iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
+ iovecs[j].iov_base = rte_pktmbuf_mtod(seg, void *);
+ if (is_cksum)
+ tap_tx_l4_add_rcksum(iovecs[j].iov_base,
+ iovecs[j].iov_len, l4_cksum,
+ &l4_raw_cksum);
+ seg = seg->next;
+ }
+
+ if (is_cksum)
+ tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
+
+ /* copy the tx frame data */
+ n = writev(txq->fd, iovecs, j);
+ if (n <= 0)
+ break;
+ (*num_packets)++;
+ (*num_tx_bytes) += rte_pktmbuf_pkt_len(mbuf);
}
}
@@ -480,6 +611,7 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct tx_queue *txq = queue;
uint16_t num_tx = 0;
+ uint16_t num_packets = 0;
unsigned long num_tx_bytes = 0;
uint32_t max_size;
int i;
@@ -487,53 +619,70 @@ pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (unlikely(nb_pkts == 0))
return 0;
+ struct rte_mbuf *gso_mbufs[MAX_GSO_MBUFS];
max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
for (i = 0; i < nb_pkts; i++) {
- struct rte_mbuf *mbuf = bufs[num_tx];
- struct iovec iovecs[mbuf->nb_segs + 1];
- struct tun_pi pi = { .flags = 0 };
- struct rte_mbuf *seg = mbuf;
- char m_copy[mbuf->data_len];
- int n;
+ struct rte_mbuf *mbuf_in = bufs[num_tx];
+ struct rte_mbuf **mbuf;
+ uint16_t num_mbufs = 0;
+ uint16_t tso_segsz = 0;
+ int ret;
+ uint16_t hdrs_len;
int j;
+ uint64_t tso;
- /* stats.errs will be incremented */
- if (rte_pktmbuf_pkt_len(mbuf) > max_size)
- break;
+ tso = mbuf_in->ol_flags & PKT_TX_TCP_SEG;
+ if (tso) {
+ struct rte_gso_ctx *gso_ctx = &txq->gso_ctx;
- iovecs[0].iov_base = &pi;
- iovecs[0].iov_len = sizeof(pi);
- for (j = 1; j <= mbuf->nb_segs; j++) {
- iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
- iovecs[j].iov_base =
- rte_pktmbuf_mtod(seg, void *);
- seg = seg->next;
- }
- if (txq->csum &&
- ((mbuf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_IPV4) ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_UDP_CKSUM ||
- (mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM))) {
- /* Support only packets with all data in the same seg */
- if (mbuf->nb_segs > 1)
+ assert(gso_ctx != NULL);
+
+ /* TCP segmentation implies TCP checksum offload */
+ mbuf_in->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ /* gso size is calculated without ETHER_CRC_LEN */
+ hdrs_len = mbuf_in->l2_len + mbuf_in->l3_len +
+ mbuf_in->l4_len;
+ tso_segsz = mbuf_in->tso_segsz + hdrs_len;
+ if (unlikely(tso_segsz == hdrs_len) ||
+ tso_segsz > *txq->mtu) {
+ txq->stats.errs++;
break;
- /* To change checksums, work on a copy of data. */
- rte_memcpy(m_copy, rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_data_len(mbuf));
- tap_tx_offload(m_copy, mbuf->ol_flags,
- mbuf->l2_len, mbuf->l3_len);
- iovecs[1].iov_base = m_copy;
+ }
+ gso_ctx->gso_size = tso_segsz;
+ ret = rte_gso_segment(mbuf_in, /* packet to segment */
+ gso_ctx, /* gso control block */
+ (struct rte_mbuf **)&gso_mbufs, /* out mbufs */
+ RTE_DIM(gso_mbufs)); /* max tso mbufs */
+
+ /* ret contains the number of new created mbufs */
+ if (ret < 0)
+ break;
+
+ mbuf = gso_mbufs;
+ num_mbufs = ret;
+ } else {
+ /* stats.errs will be incremented */
+ if (rte_pktmbuf_pkt_len(mbuf_in) > max_size)
+ break;
+
+ /* ret 0 indicates no new mbufs were created */
+ ret = 0;
+ mbuf = &mbuf_in;
+ num_mbufs = 1;
}
- /* copy the tx frame data */
- n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
- if (n <= 0)
- break;
+ tap_write_mbufs(txq, num_mbufs, mbuf,
+ &num_packets, &num_tx_bytes);
num_tx++;
- num_tx_bytes += mbuf->pkt_len;
- rte_pktmbuf_free(mbuf);
+ /* free original mbuf */
+ rte_pktmbuf_free(mbuf_in);
+ /* free tso mbufs */
+ for (j = 0; j < ret; j++)
+ rte_pktmbuf_free(mbuf[j]);
}
- txq->stats.opackets += num_tx;
+ txq->stats.opackets += num_packets;
txq->stats.errs += nb_pkts - num_tx;
txq->stats.obytes += num_tx_bytes;
@@ -593,7 +742,9 @@ apply:
case SIOCSIFMTU:
break;
default:
- RTE_ASSERT(!"unsupported request type: must not happen");
+ RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+ pmd->name);
+ return -EINVAL;
}
if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
goto error;
@@ -602,8 +753,8 @@ apply:
return 0;
error:
- RTE_LOG(DEBUG, PMD, "%s: %s(%s) failed: %s(%d)\n", ifr->ifr_name,
- __func__, tap_ioctl_req2str(request), strerror(errno), errno);
+ TAP_LOG(DEBUG, "%s(%s) failed: %s(%d)", ifr->ifr_name,
+ tap_ioctl_req2str(request), strerror(errno), errno);
return -errno;
}
@@ -630,12 +781,22 @@ tap_link_set_up(struct rte_eth_dev *dev)
static int
tap_dev_start(struct rte_eth_dev *dev)
{
- int err;
+ int err, i;
err = tap_intr_handle_set(dev, 1);
if (err)
return err;
- return tap_link_set_up(dev);
+
+ err = tap_link_set_up(dev);
+ if (err)
+ return err;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return err;
}
/* This function gets called when the current port gets stopped.
@@ -643,6 +804,13 @@ tap_dev_start(struct rte_eth_dev *dev)
static void
tap_dev_stop(struct rte_eth_dev *dev)
{
+ int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
+
tap_intr_handle_set(dev, 0);
tap_link_set_down(dev);
}
@@ -650,39 +818,28 @@ tap_dev_stop(struct rte_eth_dev *dev)
static int
tap_dev_configure(struct rte_eth_dev *dev)
{
- uint64_t supp_tx_offloads = tap_tx_offload_get_port_capa();
- uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
-
- if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- tx_offloads, supp_tx_offloads);
- return -rte_errno;
- }
if (dev->data->nb_rx_queues > RTE_PMD_TAP_MAX_QUEUES) {
- RTE_LOG(ERR, PMD,
- "%s: number of rx queues %d exceeds max num of queues %d\n",
+ TAP_LOG(ERR,
+ "%s: number of rx queues %d exceeds max num of queues %d",
dev->device->name,
dev->data->nb_rx_queues,
RTE_PMD_TAP_MAX_QUEUES);
return -1;
}
if (dev->data->nb_tx_queues > RTE_PMD_TAP_MAX_QUEUES) {
- RTE_LOG(ERR, PMD,
- "%s: number of tx queues %d exceeds max num of queues %d\n",
+ TAP_LOG(ERR,
+ "%s: number of tx queues %d exceeds max num of queues %d",
dev->device->name,
dev->data->nb_tx_queues,
RTE_PMD_TAP_MAX_QUEUES);
return -1;
}
- RTE_LOG(INFO, PMD, "%s: %p: TX configured queues number: %u\n",
- dev->device->name, (void *)dev, dev->data->nb_tx_queues);
+ TAP_LOG(INFO, "%s: %p: TX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_tx_queues);
- RTE_LOG(INFO, PMD, "%s: %p: RX configured queues number: %u\n",
- dev->device->name, (void *)dev, dev->data->nb_rx_queues);
+ TAP_LOG(INFO, "%s: %p: RX configured queues number: %u",
+ dev->device->name, (void *)dev, dev->data->nb_rx_queues);
return 0;
}
@@ -732,7 +889,6 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->max_tx_queues = RTE_PMD_TAP_MAX_QUEUES;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->speed_capa = tap_dev_speed_capa();
dev_info->rx_queue_offload_capa = tap_rx_offload_get_queue_capa();
dev_info->rx_offload_capa = tap_rx_offload_get_port_capa() |
@@ -740,6 +896,12 @@ tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->tx_queue_offload_capa = tap_tx_offload_get_queue_capa();
dev_info->tx_offload_capa = tap_tx_offload_get_port_capa() |
dev_info->tx_queue_offload_capa;
+ dev_info->hash_key_size = TAP_RSS_HASH_KEY_SIZE;
+ /*
+ * limitation: TAP supports all of IP, UDP and TCP hash
+ * functions together and not in partial combinations
+ */
+ dev_info->flow_type_rss_offloads = ~TAP_RSS_HF_MASK;
}
static int
@@ -830,6 +992,15 @@ tap_dev_close(struct rte_eth_dev *dev)
ioctl(internals->ioctl_sock, SIOCSIFFLAGS,
&internals->remote_initial_flags);
}
+
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
+ /*
+ * Since TUN device has no more opened file descriptors
+ * it will be removed from kernel
+ */
}
static void
@@ -929,48 +1100,103 @@ tap_allmulti_disable(struct rte_eth_dev *dev)
tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
}
-static void
+static int
tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct pmd_internals *pmd = dev->data->dev_private;
enum ioctl_mode mode = LOCAL_ONLY;
struct ifreq ifr;
+ int ret;
+
+ if (pmd->type == ETH_TUNTAP_TYPE_TUN) {
+ TAP_LOG(ERR, "%s: can't MAC address for TUN",
+ dev->device->name);
+ return -ENOTSUP;
+ }
if (is_zero_ether_addr(mac_addr)) {
- RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
+ TAP_LOG(ERR, "%s: can't set an empty MAC address",
dev->device->name);
- return;
+ return -EINVAL;
}
/* Check the actual current MAC address on the tap netdevice */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY);
+ if (ret < 0)
+ return ret;
if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
- return;
+ return 0;
/* Check the current MAC address on the remote */
- if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY);
+ if (ret < 0)
+ return ret;
if (!is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
mac_addr))
mode = LOCAL_AND_REMOTE;
ifr.ifr_hwaddr.sa_family = AF_LOCAL;
rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode) < 0)
- return;
+ ret = tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, mode);
+ if (ret < 0)
+ return ret;
rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
if (pmd->remote_if_index && !pmd->flow_isolate) {
/* Replace MAC redirection rule after a MAC change */
- if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
- RTE_LOG(ERR, PMD,
- "%s: Couldn't delete MAC redirection rule\n",
+ ret = tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't delete MAC redirection rule",
dev->device->name);
- return;
+ return ret;
}
- if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
- RTE_LOG(ERR, PMD,
- "%s: Couldn't add MAC redirection rule\n",
+ ret = tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC);
+ if (ret < 0) {
+ TAP_LOG(ERR,
+ "%s: Couldn't add MAC redirection rule",
dev->device->name);
+ return ret;
+ }
}
+
+ return 0;
+}
+
+static int
+tap_gso_ctx_setup(struct rte_gso_ctx *gso_ctx, struct rte_eth_dev *dev)
+{
+ uint32_t gso_types;
+ char pool_name[64];
+
+ /*
+ * Create private mbuf pool with TAP_GSO_MBUF_SEG_SIZE bytes
+ * size per mbuf use this pool for both direct and indirect mbufs
+ */
+
+ struct rte_mempool *mp; /* Mempool for GSO packets */
+
+ /* initialize GSO context */
+ gso_types = DEV_TX_OFFLOAD_TCP_TSO;
+ snprintf(pool_name, sizeof(pool_name), "mp_%s", dev->device->name);
+ mp = rte_mempool_lookup((const char *)pool_name);
+ if (!mp) {
+ mp = rte_pktmbuf_pool_create(pool_name, TAP_GSO_MBUFS_NUM,
+ TAP_GSO_MBUF_CACHE_SIZE, 0,
+ RTE_PKTMBUF_HEADROOM + TAP_GSO_MBUF_SEG_SIZE,
+ SOCKET_ID_ANY);
+ if (!mp) {
+ struct pmd_internals *pmd = dev->data->dev_private;
+ RTE_LOG(DEBUG, PMD, "%s: failed to create mbuf pool for device %s\n",
+ pmd->name, dev->device->name);
+ return -1;
+ }
+ }
+
+ gso_ctx->direct_pool = mp;
+ gso_ctx->indirect_pool = mp;
+ gso_ctx->gso_types = gso_types;
+ gso_ctx->gso_size = 0; /* gso_size is set in tx_burst() per packet */
+ gso_ctx->flag = 0;
+
+ return 0;
}
static int
@@ -979,52 +1205,62 @@ tap_setup_queue(struct rte_eth_dev *dev,
uint16_t qid,
int is_rx)
{
+ int ret;
int *fd;
int *other_fd;
const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
+ struct rte_gso_ctx *gso_ctx;
if (is_rx) {
fd = &rx->fd;
other_fd = &tx->fd;
dir = "rx";
+ gso_ctx = NULL;
} else {
fd = &tx->fd;
other_fd = &rx->fd;
dir = "tx";
+ gso_ctx = &tx->gso_ctx;
}
if (*fd != -1) {
/* fd for this queue already exists */
- RTE_LOG(DEBUG, PMD, "%s: fd %d for %s queue qid %d exists\n",
+ TAP_LOG(DEBUG, "%s: fd %d for %s queue qid %d exists",
pmd->name, *fd, dir, qid);
+ gso_ctx = NULL;
} else if (*other_fd != -1) {
/* Only other_fd exists. dup it */
*fd = dup(*other_fd);
if (*fd < 0) {
*fd = -1;
- RTE_LOG(ERR, PMD, "%s: dup() failed.\n",
- pmd->name);
+ TAP_LOG(ERR, "%s: dup() failed.", pmd->name);
return -1;
}
- RTE_LOG(DEBUG, PMD, "%s: dup fd %d for %s queue qid %d (%d)\n",
+ TAP_LOG(DEBUG, "%s: dup fd %d for %s queue qid %d (%d)",
pmd->name, *other_fd, dir, qid, *fd);
} else {
/* Both RX and TX fds do not exist (equal -1). Create fd */
- *fd = tun_alloc(pmd);
+ *fd = tun_alloc(pmd, 0);
if (*fd < 0) {
*fd = -1; /* restore original value */
- RTE_LOG(ERR, PMD, "%s: tun_alloc() failed.\n",
- pmd->name);
+ TAP_LOG(ERR, "%s: tun_alloc() failed.", pmd->name);
return -1;
}
- RTE_LOG(DEBUG, PMD, "%s: add %s queue for qid %d fd %d\n",
+ TAP_LOG(DEBUG, "%s: add %s queue for qid %d fd %d",
pmd->name, dir, qid, *fd);
}
tx->mtu = &dev->data->mtu;
rx->rxmode = &dev->data->dev_conf.rxmode;
+ if (gso_ctx) {
+ ret = tap_gso_ctx_setup(gso_ctx, dev);
+ if (ret)
+ return -1;
+ }
+
+ tx->type = pmd->type;
return *fd;
}
@@ -1049,25 +1285,12 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
int i;
if (rx_queue_id >= dev->data->nb_rx_queues || !mp) {
- RTE_LOG(WARNING, PMD,
- "nb_rx_queues %d too small or mempool NULL\n",
+ TAP_LOG(WARNING,
+ "nb_rx_queues %d too small or mempool NULL",
dev->data->nb_rx_queues);
return -1;
}
- /* Verify application offloads are valid for our port and queue. */
- if (!tap_rxq_are_offloads_valid(dev, rx_conf->offloads)) {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "%p: Rx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64 "\n",
- (void *)dev, rx_conf->offloads,
- dev->data->dev_conf.rxmode.offloads,
- (tap_rx_offload_get_port_capa() |
- tap_rx_offload_get_queue_capa()));
- return -rte_errno;
- }
rxq->mp = mp;
rxq->trigger_seen = 1; /* force initial burst */
rxq->in_port = dev->data->port_id;
@@ -1075,8 +1298,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
iovecs = rte_zmalloc_socket(dev->device->name, sizeof(*iovecs), 0,
socket_id);
if (!iovecs) {
- RTE_LOG(WARNING, PMD,
- "%s: Couldn't allocate %d RX descriptors\n",
+ TAP_LOG(WARNING,
+ "%s: Couldn't allocate %d RX descriptors",
dev->device->name, nb_desc);
return -ENOMEM;
}
@@ -1095,8 +1318,8 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
for (i = 1; i <= nb_desc; i++) {
*tmp = rte_pktmbuf_alloc(rxq->mp);
if (!*tmp) {
- RTE_LOG(WARNING, PMD,
- "%s: couldn't allocate memory for queue %d\n",
+ TAP_LOG(WARNING,
+ "%s: couldn't allocate memory for queue %d",
dev->device->name, rx_queue_id);
ret = -ENOMEM;
goto error;
@@ -1108,7 +1331,7 @@ tap_rx_queue_setup(struct rte_eth_dev *dev,
tmp = &(*tmp)->next;
}
- RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
+ TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
return 0;
@@ -1131,39 +1354,24 @@ tap_tx_queue_setup(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
struct tx_queue *txq;
int ret;
+ uint64_t offloads;
if (tx_queue_id >= dev->data->nb_tx_queues)
return -1;
dev->data->tx_queues[tx_queue_id] = &internals->txq[tx_queue_id];
txq = dev->data->tx_queues[tx_queue_id];
- /*
- * Don't verify port offloads for application which
- * use the old API.
- */
- if (tx_conf != NULL &&
- !!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
- if (tap_txq_are_offloads_valid(dev, tx_conf->offloads)) {
- txq->csum = !!(tx_conf->offloads &
- (DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM));
- } else {
- rte_errno = ENOTSUP;
- RTE_LOG(ERR, PMD,
- "%p: Tx queue offloads 0x%" PRIx64
- " don't match port offloads 0x%" PRIx64
- " or supported offloads 0x%" PRIx64,
- (void *)dev, tx_conf->offloads,
- dev->data->dev_conf.txmode.offloads,
- tap_tx_offload_get_port_capa());
- return -rte_errno;
- }
- }
+
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->csum = !!(offloads &
+ (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM));
+
ret = tap_setup_queue(dev, internals, tx_queue_id, 0);
if (ret == -1)
return -1;
- RTE_LOG(DEBUG, PMD,
- " TX TAP device name %s, qid %d on fd %d csum %s\n",
+ TAP_LOG(DEBUG,
+ " TX TUNTAP device name %s, qid %d on fd %d csum %s",
internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
txq->csum ? "on" : "off");
@@ -1307,6 +1515,70 @@ tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
return 0;
}
+/**
+ * DPDK callback to update the RSS hash configuration.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[in] rss_conf
+ * RSS configuration data.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+tap_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ if (rss_conf->rss_hf & TAP_RSS_HF_MASK) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ if (rss_conf->rss_key && rss_conf->rss_key_len) {
+ /*
+ * Currently TAP RSS key is hard coded
+ * and cannot be updated
+ */
+ TAP_LOG(ERR,
+ "port %u RSS key cannot be updated",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ return 0;
+}
+
+static int
+tap_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+static int
+tap_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static int
+tap_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
static const struct eth_dev_ops ops = {
.dev_start = tap_dev_start,
.dev_stop = tap_dev_stop,
@@ -1315,6 +1587,10 @@ static const struct eth_dev_ops ops = {
.dev_infos_get = tap_dev_info,
.rx_queue_setup = tap_rx_queue_setup,
.tx_queue_setup = tap_tx_queue_setup,
+ .rx_queue_start = tap_rx_queue_start,
+ .tx_queue_start = tap_tx_queue_start,
+ .rx_queue_stop = tap_rx_queue_stop,
+ .tx_queue_stop = tap_tx_queue_stop,
.rx_queue_release = tap_rx_queue_release,
.tx_queue_release = tap_tx_queue_release,
.flow_ctrl_get = tap_flow_ctrl_get,
@@ -1332,12 +1608,14 @@ static const struct eth_dev_ops ops = {
.stats_get = tap_stats_get,
.stats_reset = tap_stats_reset,
.dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .rss_hash_update = tap_rss_hash_update,
.filter_ctrl = tap_dev_filter_ctrl,
};
static int
eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
- char *remote_iface, int fixed_mac_type)
+ char *remote_iface, struct ether_addr *mac_addr,
+ enum rte_tuntap_type type)
{
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
@@ -1346,34 +1624,31 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
struct ifreq ifr;
int i;
- RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
-
- data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
- if (!data) {
- RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
- goto error_exit_nodev;
- }
+ TAP_LOG(DEBUG, "%s device on numa %u",
+ tuntap_name, rte_socket_id());
dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
if (!dev) {
- RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
+ TAP_LOG(ERR, "%s Unable to allocate device struct",
+ tuntap_name);
goto error_exit_nodev;
}
pmd = dev->data->dev_private;
pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
+ pmd->type = type;
pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
if (pmd->ioctl_sock == -1) {
- RTE_LOG(ERR, PMD,
- "TAP Unable to get a socket for management: %s\n",
- strerror(errno));
+ TAP_LOG(ERR,
+ "%s Unable to get a socket for management: %s",
+ tuntap_name, strerror(errno));
goto error_exit;
}
/* Setup some default values */
- rte_memcpy(data, dev->data, sizeof(*data));
+ data = dev->data;
data->dev_private = pmd;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
data->numa_node = numa_node;
@@ -1384,7 +1659,6 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
data->nb_rx_queues = 0;
data->nb_tx_queues = 0;
- dev->data = data;
dev->dev_ops = &ops;
dev->rx_pkt_burst = pmd_rx_burst;
dev->tx_pkt_burst = pmd_tx_burst;
@@ -1394,39 +1668,43 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
dev->intr_handle = &pmd->intr_handle;
/* Presetup the fds to -1 as being not valid */
+ pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
pmd->rxq[i].fd = -1;
pmd->txq[i].fd = -1;
}
- if (fixed_mac_type) {
- /* fixed mac = 00:64:74:61:70:<iface_idx> */
- static int iface_idx;
- char mac[ETHER_ADDR_LEN] = "\0dtap";
-
- mac[ETHER_ADDR_LEN - 1] = iface_idx++;
- rte_memcpy(&pmd->eth_addr, mac, ETHER_ADDR_LEN);
- } else {
- eth_random_addr((uint8_t *)&pmd->eth_addr);
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ if (is_zero_ether_addr(mac_addr))
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ else
+ rte_memcpy(&pmd->eth_addr, mac_addr, sizeof(*mac_addr));
}
- /* Immediately create the netdevice (this will create the 1st queue). */
- /* rx queue */
- if (tap_setup_queue(dev, pmd, 0, 1) == -1)
- goto error_exit;
- /* tx queue */
- if (tap_setup_queue(dev, pmd, 0, 0) == -1)
+ /*
+ * Allocate a TUN device keep-alive file descriptor that will only be
+ * closed when the TUN device itself is closed or removed.
+ * This keep-alive file descriptor will guarantee that the TUN device
+ * exists even when all of its queues are closed
+ */
+ pmd->ka_fd = tun_alloc(pmd, 1);
+ if (pmd->ka_fd == -1) {
+ TAP_LOG(ERR, "Unable to create %s interface", tuntap_name);
goto error_exit;
+ }
ifr.ifr_mtu = dev->data->mtu;
if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE) < 0)
goto error_exit;
- memset(&ifr, 0, sizeof(struct ifreq));
- ifr.ifr_hwaddr.sa_family = AF_LOCAL;
- rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr, ETHER_ADDR_LEN);
- if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
- goto error_exit;
+ if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
+ memset(&ifr, 0, sizeof(struct ifreq));
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
+ ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error_exit;
+ }
/*
* Set up everything related to rte_flow:
@@ -1438,22 +1716,22 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
*/
pmd->nlsk_fd = tap_nl_init(0);
if (pmd->nlsk_fd == -1) {
- RTE_LOG(WARNING, PMD, "%s: failed to create netlink socket.\n",
+ TAP_LOG(WARNING, "%s: failed to create netlink socket.",
pmd->name);
goto disable_rte_flow;
}
pmd->if_index = if_nametoindex(pmd->name);
if (!pmd->if_index) {
- RTE_LOG(ERR, PMD, "%s: failed to get if_index.\n", pmd->name);
+ TAP_LOG(ERR, "%s: failed to get if_index.", pmd->name);
goto disable_rte_flow;
}
if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create multiq qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create multiq qdisc.",
pmd->name);
goto disable_rte_flow;
}
if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
pmd->name);
goto disable_rte_flow;
}
@@ -1462,7 +1740,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
if (strlen(remote_iface)) {
pmd->remote_if_index = if_nametoindex(remote_iface);
if (!pmd->remote_if_index) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s if_index.\n",
+ TAP_LOG(ERR, "%s: failed to get %s if_index.",
pmd->name, remote_iface);
goto error_remote;
}
@@ -1474,7 +1752,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
/* Replicate remote MAC address */
if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
pmd->name, pmd->remote_iface);
goto error_remote;
}
@@ -1482,7 +1760,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
ETHER_ADDR_LEN);
/* The desired MAC is already in ifreq after SIOCGIFHWADDR. */
if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to get %s MAC address.\n",
+ TAP_LOG(ERR, "%s: failed to get %s MAC address.",
pmd->name, remote_iface);
goto error_remote;
}
@@ -1495,7 +1773,7 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
if (qdisc_create_ingress(pmd->nlsk_fd,
pmd->remote_if_index) < 0) {
- RTE_LOG(ERR, PMD, "%s: failed to create ingress qdisc.\n",
+ TAP_LOG(ERR, "%s: failed to create ingress qdisc.",
pmd->remote_iface);
goto error_remote;
}
@@ -1504,26 +1782,27 @@ eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0 ||
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0 ||
tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0) {
- RTE_LOG(ERR, PMD,
- "%s: failed to create implicit rules.\n",
+ TAP_LOG(ERR,
+ "%s: failed to create implicit rules.",
pmd->name);
goto error_remote;
}
}
+ rte_eth_dev_probing_finish(dev);
return 0;
disable_rte_flow:
- RTE_LOG(ERR, PMD, " Disabling rte flow support: %s(%d)\n",
+ TAP_LOG(ERR, " Disabling rte flow support: %s(%d)",
strerror(errno), errno);
if (strlen(remote_iface)) {
- RTE_LOG(ERR, PMD, "Remote feature requires flow support.\n");
+ TAP_LOG(ERR, "Remote feature requires flow support.");
goto error_exit;
}
return 0;
error_remote:
- RTE_LOG(ERR, PMD, " Can't set up remote feature: %s(%d)\n",
+ TAP_LOG(ERR, " Can't set up remote feature: %s(%d)",
strerror(errno), errno);
tap_flow_implicit_flush(pmd, NULL);
@@ -1533,10 +1812,9 @@ error_exit:
rte_eth_dev_release_port(dev);
error_exit_nodev:
- RTE_LOG(ERR, PMD, "TAP Unable to initialize %s\n",
- rte_vdev_device_name(vdev));
+ TAP_LOG(ERR, "%s Unable to initialize %s",
+ tuntap_name, rte_vdev_device_name(vdev));
- rte_free(data);
return -EINVAL;
}
@@ -1548,7 +1826,7 @@ set_interface_name(const char *key __rte_unused,
char *name = (char *)extra_args;
if (value)
- snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN - 1);
else
snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
DEFAULT_TAP_NAME, (tap_unit - 1));
@@ -1564,20 +1842,136 @@ set_remote_iface(const char *key __rte_unused,
char *name = (char *)extra_args;
if (value)
- snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
+ strlcpy(name, value, RTE_ETH_NAME_MAX_LEN);
return 0;
}
+static int parse_user_mac(struct ether_addr *user_mac,
+ const char *value)
+{
+ unsigned int index = 0;
+ char mac_temp[strlen(ETH_TAP_USR_MAC_FMT) + 1], *mac_byte = NULL;
+
+ if (user_mac == NULL || value == NULL)
+ return 0;
+
+ strlcpy(mac_temp, value, sizeof(mac_temp));
+ mac_byte = strtok(mac_temp, ":");
+
+ while ((mac_byte != NULL) &&
+ (strlen(mac_byte) <= 2) &&
+ (strlen(mac_byte) == strspn(mac_byte,
+ ETH_TAP_CMP_MAC_FMT))) {
+ user_mac->addr_bytes[index++] = strtoul(mac_byte, NULL, 16);
+ mac_byte = strtok(NULL, ":");
+ }
+
+ return index;
+}
+
static int
set_mac_type(const char *key __rte_unused,
const char *value,
void *extra_args)
{
- if (value &&
- !strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED)))
- *(int *)extra_args = 1;
+ struct ether_addr *user_mac = extra_args;
+
+ if (!value)
+ return 0;
+
+ if (!strncasecmp(ETH_TAP_MAC_FIXED, value, strlen(ETH_TAP_MAC_FIXED))) {
+ static int iface_idx;
+
+ /* fixed mac = 00:64:74:61:70:<iface_idx> */
+ memcpy((char *)user_mac->addr_bytes, "\0dtap", ETHER_ADDR_LEN);
+ user_mac->addr_bytes[ETHER_ADDR_LEN - 1] = iface_idx++ + '0';
+ goto success;
+ }
+
+ if (parse_user_mac(user_mac, value) != 6)
+ goto error;
+success:
+ TAP_LOG(DEBUG, "TAP user MAC param (%s)", value);
return 0;
+
+error:
+ TAP_LOG(ERR, "TAP user MAC (%s) is not in format (%s|%s)",
+ value, ETH_TAP_MAC_FIXED, ETH_TAP_USR_MAC_FMT);
+ return -1;
+}
+
+/*
+ * Open a TUN interface device. TUN PMD
+ * 1) sets tap_type as false
+ * 2) intakes iface as argument.
+ * 3) as interface is virtual set speed to 10G
+ */
+static int
+rte_pmd_tun_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ char tun_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TUN");
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
+ snprintf(tun_name, sizeof(tun_name), "%s%u",
+ DEFAULT_TUN_NAME, tun_unit++);
+
+ if (params && (params[0] != '\0')) {
+ TAP_LOG(DEBUG, "parameters (%s)", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tun_name);
+
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = ETH_SPEED_NUM_10G;
+
+ TAP_LOG(NOTICE, "Initializing pmd_tun for %s as %s",
+ name, tun_name);
+
+ ret = eth_dev_tap_create(dev, tun_name, remote_iface, 0,
+ ETH_TUNTAP_TYPE_TUN);
+
+leave:
+ if (ret == -1) {
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
+ name, tun_name);
+ tun_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
}
/* Open a TAP interface device.
@@ -1591,18 +1985,35 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
int speed;
char tap_name[RTE_ETH_NAME_MAX_LEN];
char remote_iface[RTE_ETH_NAME_MAX_LEN];
- int fixed_mac_type = 0;
+ struct ether_addr user_mac = { .addr_bytes = {0} };
+ struct rte_eth_dev *eth_dev;
+
+ strcpy(tuntap_name, "TAP");
name = rte_vdev_device_name(dev);
params = rte_vdev_device_args(dev);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(params) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ TAP_LOG(ERR, "Failed to probe %s", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
+
speed = ETH_SPEED_NUM_10G;
- snprintf(tap_name, sizeof(tap_name), "%s%d",
+ snprintf(tap_name, sizeof(tap_name), "%s%u",
DEFAULT_TAP_NAME, tap_unit++);
memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
if (params && (params[0] != '\0')) {
- RTE_LOG(DEBUG, PMD, "parameters (%s)\n", params);
+ TAP_LOG(DEBUG, "parameters (%s)", params);
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist) {
@@ -1628,7 +2039,7 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
ret = rte_kvargs_process(kvlist,
ETH_TAP_MAC_ARG,
&set_mac_type,
- &fixed_mac_type);
+ &user_mac);
if (ret == -1)
goto leave;
}
@@ -1636,14 +2047,15 @@ rte_pmd_tap_probe(struct rte_vdev_device *dev)
}
pmd_link.link_speed = speed;
- RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
+ TAP_LOG(NOTICE, "Initializing pmd_tap for %s as %s",
name, tap_name);
- ret = eth_dev_tap_create(dev, tap_name, remote_iface, fixed_mac_type);
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface, &user_mac,
+ ETH_TUNTAP_TYPE_TAP);
leave:
if (ret == -1) {
- RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
+ TAP_LOG(ERR, "Failed to create pmd for %s as %s",
name, tap_name);
tap_unit--; /* Restore the unit number */
}
@@ -1652,7 +2064,7 @@ leave:
return ret;
}
-/* detach a TAP device.
+/* detach a TUNTAP device.
*/
static int
rte_pmd_tap_remove(struct rte_vdev_device *dev)
@@ -1661,15 +2073,17 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
struct pmd_internals *internals;
int i;
- RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
- rte_socket_id());
-
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (!eth_dev)
return 0;
internals = eth_dev->data->dev_private;
+
+ TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
+ (internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
+ rte_socket_id());
+
if (internals->nlsk_fd) {
tap_flow_flush(eth_dev, NULL);
tap_flow_implicit_flush(internals, NULL);
@@ -1688,20 +2102,39 @@ rte_pmd_tap_remove(struct rte_vdev_device *dev)
close(internals->ioctl_sock);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
+ if (internals->ka_fd != -1) {
+ close(internals->ka_fd);
+ internals->ka_fd = -1;
+ }
return 0;
}
+static struct rte_vdev_driver pmd_tun_drv = {
+ .probe = rte_pmd_tun_probe,
+ .remove = rte_pmd_tap_remove,
+};
+
static struct rte_vdev_driver pmd_tap_drv = {
.probe = rte_pmd_tap_probe,
.remove = rte_pmd_tap_remove,
};
+
RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
+RTE_PMD_REGISTER_VDEV(net_tun, pmd_tun_drv);
RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
+RTE_PMD_REGISTER_PARAM_STRING(net_tun,
+ ETH_TAP_IFACE_ARG "=<string> ");
RTE_PMD_REGISTER_PARAM_STRING(net_tap,
ETH_TAP_IFACE_ARG "=<string> "
- ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_FIXED " "
+ ETH_TAP_MAC_ARG "=" ETH_TAP_MAC_ARG_FMT " "
ETH_TAP_REMOTE_ARG "=<string>");
+int tap_logtype;
+
+RTE_INIT(tap_init_log)
+{
+ tap_logtype = rte_log_register("pmd.net.tap");
+ if (tap_logtype >= 0)
+ rte_log_set_level(tap_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
index 53a506ad..44e2773f 100644
--- a/drivers/net/tap/rte_eth_tap.h
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _RTE_ETH_TAP_H_
@@ -15,12 +15,22 @@
#include <rte_ethdev_driver.h>
#include <rte_ether.h>
+#include <rte_gso.h>
+#include "tap_log.h"
#ifdef IFF_MULTI_QUEUE
#define RTE_PMD_TAP_MAX_QUEUES TAP_MAX_QUEUES
#else
#define RTE_PMD_TAP_MAX_QUEUES 1
#endif
+#define MAX_GSO_MBUFS 64
+
+enum rte_tuntap_type {
+ ETH_TUNTAP_TYPE_UNKNOWN,
+ ETH_TUNTAP_TYPE_TUN,
+ ETH_TUNTAP_TYPE_TAP,
+ ETH_TUNTAP_TYPE_MAX,
+};
struct pkt_stats {
uint64_t opackets; /* Number of output packets */
@@ -47,15 +57,18 @@ struct rx_queue {
struct tx_queue {
int fd;
+ int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
struct pkt_stats stats; /* Stats for this TX queue */
+ struct rte_gso_ctx gso_ctx; /* GSO context */
};
struct pmd_internals {
struct rte_eth_dev *dev; /* Ethernet device. */
char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
+ int type; /* Type field - TUN|TAP */
struct ether_addr eth_addr; /* Mac address of the device port */
struct ifreq remote_initial_flags; /* Remote netdevice flags on init */
int remote_if_index; /* remote netdevice IF_INDEX */
@@ -76,6 +89,7 @@ struct pmd_internals {
struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
+ int ka_fd; /* keep-alive file descriptor */
};
/* tap_intr.c */
diff --git a/drivers/net/tap/tap_bpf.h b/drivers/net/tap/tap_bpf.h
index 1a70ffe2..9192686a 100644
--- a/drivers/net/tap/tap_bpf.h
+++ b/drivers/net/tap/tap_bpf.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef __TAP_BPF_H__
diff --git a/drivers/net/tap/tap_bpf_api.c b/drivers/net/tap/tap_bpf_api.c
index 109a681e..98f6a760 100644
--- a/drivers/net/tap/tap_bpf_api.c
+++ b/drivers/net/tap/tap_bpf_api.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
diff --git a/drivers/net/tap/tap_bpf_insns.h b/drivers/net/tap/tap_bpf_insns.h
index 89873b6d..79e3e66b 100644
--- a/drivers/net/tap/tap_bpf_insns.h
+++ b/drivers/net/tap/tap_bpf_insns.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <tap_bpf.h>
diff --git a/drivers/net/tap/tap_bpf_program.c b/drivers/net/tap/tap_bpf_program.c
index 8abb3b76..1cb73822 100644
--- a/drivers/net/tap/tap_bpf_program.c
+++ b/drivers/net/tap/tap_bpf_program.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <stdint.h>
@@ -84,7 +84,7 @@ struct ipv6_l3_l4_tuple {
__u16 sport;
} __attribute__((packed));
-static const __u8 def_rss_key[] = {
+static const __u8 def_rss_key[TAP_RSS_HASH_KEY_SIZE] = {
0xd1, 0x81, 0xc6, 0x2c,
0xf7, 0xf4, 0xdb, 0x5b,
0x19, 0x83, 0xa2, 0xfc,
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
index 551b2d83..0e01af62 100644
--- a/drivers/net/tap/tap_flow.c
+++ b/drivers/net/tap/tap_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
@@ -270,13 +270,13 @@ static const struct tap_flow_items tap_flow_items[] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6),
.mask = &(const struct rte_flow_item_vlan){
- .tpid = -1,
/* DEI matching is not supported */
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
.tci = 0xffef,
#else
.tci = 0xefff,
#endif
+ .inner_type = -1,
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.default_mask = &rte_flow_item_vlan_mask,
@@ -537,7 +537,7 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data)
if (!flow)
return 0;
msg = &flow->msg;
- if (!is_zero_ether_addr(&spec->dst)) {
+ if (!is_zero_ether_addr(&mask->dst)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
tap_nlattr_add(&msg->nh,
@@ -578,13 +578,19 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
/* use default mask if none provided */
if (!mask)
mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
- /* TC does not support tpid masking. Only accept if exact match. */
- if (mask->tpid && mask->tpid != 0xffff)
+ /* Outer TPID cannot be matched. */
+ if (info->eth_type)
return -1;
/* Double-tagging not supported. */
- if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
+ if (info->vlan)
return -1;
info->vlan = 1;
+ if (mask->inner_type) {
+ /* TC does not support partial eth_type masking */
+ if (mask->inner_type != RTE_BE16(0xffff))
+ return -1;
+ info->eth_type = spec->inner_type;
+ }
if (!flow)
return 0;
msg = &flow->msg;
@@ -645,13 +651,13 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IP);
if (!spec)
return 0;
- if (spec->hdr.dst_addr) {
+ if (mask->hdr.dst_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
- if (spec->hdr.src_addr) {
+ if (mask->hdr.src_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
@@ -701,13 +707,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
@@ -756,10 +762,10 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data)
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
@@ -802,10 +808,10 @@ tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
@@ -1033,6 +1039,12 @@ priv_flow_process(struct pmd_internals *pmd,
};
int action = 0; /* Only one action authorized for now */
+ if (attr->transfer) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL, "transfer is not supported");
+ return -rte_errno;
+ }
if (attr->group > MAX_GROUP) {
rte_flow_error_set(
error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -1140,6 +1152,7 @@ priv_flow_process(struct pmd_internals *pmd,
else
goto end;
}
+actions:
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
int err = 0;
@@ -1214,7 +1227,7 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_action_not_supported;
}
- if (flow && rss)
+ if (flow)
err = rss_add_actions(flow, pmd, rss, error);
} else {
goto exit_action_not_supported;
@@ -1222,6 +1235,16 @@ priv_flow_process(struct pmd_internals *pmd,
if (err)
goto exit_action_not_supported;
}
+ /* When fate is unknown, drop traffic. */
+ if (!action) {
+ static const struct rte_flow_action drop[] = {
+ { .type = RTE_FLOW_ACTION_TYPE_DROP, },
+ { .type = RTE_FLOW_ACTION_TYPE_END, },
+ };
+
+ actions = drop;
+ goto actions;
+ }
end:
if (flow)
tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
@@ -1376,8 +1399,8 @@ tap_flow_create(struct rte_eth_dev *dev,
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
@@ -1421,8 +1444,8 @@ tap_flow_create(struct rte_eth_dev *dev,
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1476,8 +1499,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
if (ret < 0 && errno == ENOENT)
ret = 0;
if (ret < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule deletion (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -1500,8 +1523,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd,
if (ret < 0 && errno == ENOENT)
ret = 0;
if (ret < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule deletion (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule deletion (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
@@ -1545,10 +1568,14 @@ tap_flow_isolate(struct rte_eth_dev *dev,
{
struct pmd_internals *pmd = dev->data->dev_private;
+ /* normalize 'set' variable to contain 0 or 1 values */
if (set)
- pmd->flow_isolate = 1;
- else
- pmd->flow_isolate = 0;
+ set = 1;
+ /* if already in the right isolation mode - nothing to do */
+ if ((set ^ pmd->flow_isolate) == 0)
+ return 0;
+ /* mark the isolation mode for tap_flow_implicit_create() */
+ pmd->flow_isolate = set;
/*
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
@@ -1556,20 +1583,20 @@ tap_flow_isolate(struct rte_eth_dev *dev,
if (!pmd->rxq[0].fd)
return 0;
if (set) {
- struct rte_flow *flow;
+ struct rte_flow *remote_flow;
while (1) {
- flow = LIST_FIRST(&pmd->implicit_flows);
- if (!flow)
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!remote_flow)
break;
/*
* Remove all implicit rules on the remote.
* Keep the local rule to redirect packets on TX.
* Keep also the last implicit local rule: ISOLATE.
*/
- if (flow->msg.t.tcm_ifindex == pmd->if_index)
+ if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
break;
- if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
+ if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
goto error;
}
/* Switch the TC rule according to pmd->flow_isolate */
@@ -1665,7 +1692,7 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
- RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
+ TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
goto fail;
}
msg = &remote_flow->msg;
@@ -1706,21 +1733,21 @@ int tap_flow_implicit_create(struct pmd_internals *pmd,
tap_flow_set_handle(remote_flow);
if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
- RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
+ TAP_LOG(ERR, "rte flow rule validation failed");
goto fail;
}
err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
if (err < 0) {
- RTE_LOG(ERR, PMD, "Failure sending nl request\n");
+ TAP_LOG(ERR, "Failure sending nl request");
goto fail;
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- /* Silently ignore re-entering remote promiscuous rule */
- if (errno == EEXIST && idx == TAP_REMOTE_PROMISC)
+ /* Silently ignore re-entering existing rule */
+ if (errno == EEXIST)
goto success;
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
goto fail;
}
@@ -1836,8 +1863,8 @@ static int rss_enable(struct pmd_internals *pmd,
sizeof(struct rss_key),
MAX_RSS_KEYS);
if (pmd->map_fd < 0) {
- RTE_LOG(ERR, PMD,
- "Failed to create BPF map (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to create BPF map (%d): %s",
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -1854,7 +1881,7 @@ static int rss_enable(struct pmd_internals *pmd,
for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
if (pmd->bpf_fd[i] < 0) {
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"Failed to load BPF section %s for queue %d",
SEC_NAME_CLS_Q, i);
rte_flow_error_set(
@@ -1868,7 +1895,7 @@ static int rss_enable(struct pmd_internals *pmd,
rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
if (!rss_flow) {
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"Cannot allocate memory for rte_flow");
return -1;
}
@@ -1911,8 +1938,8 @@ static int rss_enable(struct pmd_internals *pmd,
return -1;
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- RTE_LOG(ERR, PMD,
- "Kernel refused TC filter rule creation (%d): %s\n",
+ TAP_LOG(ERR,
+ "Kernel refused TC filter rule creation (%d): %s",
errno, strerror(errno));
return err;
}
@@ -2039,11 +2066,21 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
struct rte_flow_error *error)
{
/* 4096 is the maximum number of instructions for a BPF program */
- int i;
+ unsigned int i;
int err;
struct rss_key rss_entry = { .hash_fields = 0,
.key_size = 0 };
+ /* Check supported RSS features */
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "a nonzero RSS encapsulation level is not supported");
+
/* Get a new map key for a new RSS rule */
err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
if (err < 0) {
@@ -2055,8 +2092,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
}
/* Update RSS map entry with queues */
- rss_entry.nb_queues = rss->num;
- for (i = 0; i < rss->num; i++)
+ rss_entry.nb_queues = rss->queue_num;
+ for (i = 0; i < rss->queue_num; i++)
rss_entry.queues[i] = rss->queue[i];
rss_entry.hash_fields =
(1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
@@ -2066,8 +2103,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
&flow->key_idx, &rss_entry);
if (err) {
- RTE_LOG(ERR, PMD,
- "Failed to update BPF map entry #%u (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to update BPF map entry #%u (%d): %s",
flow->key_idx, errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -2085,8 +2122,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
flow->bpf_fd[SEC_L3_L4] =
tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
if (flow->bpf_fd[SEC_L3_L4] < 0) {
- RTE_LOG(ERR, PMD,
- "Failed to load BPF section %s (%d): %s\n",
+ TAP_LOG(ERR,
+ "Failed to load BPF section %s (%d): %s",
sec_name[SEC_L3_L4], errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
@@ -2147,9 +2184,8 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev,
*(const void **)arg = &tap_flow_ops;
return 0;
default:
- RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
- (void *)dev, filter_type);
+ TAP_LOG(ERR, "%p: filter type (%d) not supported",
+ dev, filter_type);
}
return -EINVAL;
}
-
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
index ac6a952d..ac60a9ae 100644
--- a/drivers/net/tap/tap_flow.h
+++ b/drivers/net/tap/tap_flow.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_FLOW_H_
diff --git a/drivers/net/tap/tap_intr.c b/drivers/net/tap/tap_intr.c
index b0e19914..fc590181 100644
--- a/drivers/net/tap/tap_intr.c
+++ b/drivers/net/tap/tap_intr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 Mellanox Technologies, Ltd.
+ * Copyright 2018 Mellanox Technologies, Ltd
*/
/**
@@ -62,7 +62,7 @@ tap_rx_intr_vec_install(struct rte_eth_dev *dev)
intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
if (intr_handle->intr_vec == NULL) {
rte_errno = ENOMEM;
- RTE_LOG(ERR, PMD,
+ TAP_LOG(ERR,
"failed to allocate memory for interrupt vector,"
" Rx interrupts will not be supported");
return -rte_errno;
diff --git a/drivers/net/tap/tap_log.h b/drivers/net/tap/tap_log.h
new file mode 100644
index 00000000..fa06843a
--- /dev/null
+++ b/drivers/net/tap/tap_log.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox Technologies, Ltd
+ */
+
+extern int tap_logtype;
+
+#define TAP_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, tap_logtype, "%s(): " fmt "\n", \
+ __func__, ## args)
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
index 82c8dc0e..6cb51009 100644
--- a/drivers/net/tap/tap_netlink.c
+++ b/drivers/net/tap/tap_netlink.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
@@ -13,6 +13,7 @@
#include <rte_malloc.h>
#include <tap_netlink.h>
#include <rte_random.h>
+#include "tap_log.h"
/* Must be quite large to support dumping a huge list of QDISC or filters. */
#define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */
@@ -45,19 +46,19 @@ tap_nl_init(uint32_t nl_groups)
fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
if (fd < 0) {
- RTE_LOG(ERR, PMD, "Unable to create a netlink socket\n");
+ TAP_LOG(ERR, "Unable to create a netlink socket");
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
- RTE_LOG(ERR, PMD, "Unable to set socket buffer send size\n");
+ TAP_LOG(ERR, "Unable to set socket buffer send size");
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
- RTE_LOG(ERR, PMD, "Unable to set socket buffer receive size\n");
+ TAP_LOG(ERR, "Unable to set socket buffer receive size");
return -1;
}
if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
- RTE_LOG(ERR, PMD, "Unable to bind to the netlink socket\n");
+ TAP_LOG(ERR, "Unable to bind to the netlink socket");
return -1;
}
return fd;
@@ -76,7 +77,7 @@ int
tap_nl_final(int nlsk_fd)
{
if (close(nlsk_fd)) {
- RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n",
+ TAP_LOG(ERR, "Failed to close netlink socket: %s (%d)",
strerror(errno), errno);
return -1;
}
@@ -117,7 +118,7 @@ tap_nl_send(int nlsk_fd, struct nlmsghdr *nh)
nh->nlmsg_seq = (uint32_t)rte_rand();
send_bytes = sendmsg(nlsk_fd, &msg, 0);
if (send_bytes < 0) {
- RTE_LOG(ERR, PMD, "Failed to send netlink message: %s (%d)\n",
+ TAP_LOG(ERR, "Failed to send netlink message: %s (%d)",
strerror(errno), errno);
return -1;
}
@@ -300,9 +301,8 @@ tap_nlattr_nested_start(struct nlmsg *msg, uint16_t type)
tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0);
if (!tail) {
- RTE_LOG(ERR, PMD,
- "Couldn't allocate memory for nested netlink"
- " attribute\n");
+ TAP_LOG(ERR,
+ "Couldn't allocate memory for nested netlink attribute");
return -1;
}
diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h
index fafef840..faa73ba1 100644
--- a/drivers/net/tap/tap_netlink.h
+++ b/drivers/net/tap/tap_netlink.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_NETLINK_H_
diff --git a/drivers/net/tap/tap_rss.h b/drivers/net/tap/tap_rss.h
index 3bb0d140..17606b2d 100644
--- a/drivers/net/tap/tap_rss.h
+++ b/drivers/net/tap/tap_rss.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_RSS_H_
@@ -9,6 +9,12 @@
#define TAP_MAX_QUEUES 16
#endif
+/* Fixed RSS hash key size in bytes. */
+#define TAP_RSS_HASH_KEY_SIZE 40
+
+/* Supported RSS */
+#define TAP_RSS_HF_MASK (~(ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP))
+
/* hashed fields for RSS */
enum hash_field {
HASH_FIELD_IPV4_L3, /* IPv4 src/dst addr */
diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c
index 954f13eb..3c9d0366 100644
--- a/drivers/net/tap/tap_tcmsgs.c
+++ b/drivers/net/tap/tap_tcmsgs.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <inttypes.h>
@@ -10,6 +10,7 @@
#include <rte_log.h>
#include <tap_tcmsgs.h>
+#include "tap_log.h"
struct qdisc {
uint32_t handle;
@@ -81,8 +82,8 @@ qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
if (!nlsk_fd) {
fd = tap_nl_init(0);
if (fd < 0) {
- RTE_LOG(ERR, PMD,
- "Could not delete QDISC: null netlink socket\n");
+ TAP_LOG(ERR,
+ "Could not delete QDISC: null netlink socket");
return -1;
}
} else {
@@ -261,7 +262,7 @@ qdisc_create_multiq(int nlsk_fd, uint16_t ifindex)
err = qdisc_add_multiq(nlsk_fd, ifindex);
if (err < 0 && errno != -EEXIST) {
- RTE_LOG(ERR, PMD, "Could not add multiq qdisc (%d): %s\n",
+ TAP_LOG(ERR, "Could not add multiq qdisc (%d): %s",
errno, strerror(errno));
return -1;
}
@@ -287,7 +288,7 @@ qdisc_create_ingress(int nlsk_fd, uint16_t ifindex)
err = qdisc_add_ingress(nlsk_fd, ifindex);
if (err < 0 && errno != -EEXIST) {
- RTE_LOG(ERR, PMD, "Could not add ingress qdisc (%d): %s\n",
+ TAP_LOG(ERR, "Could not add ingress qdisc (%d): %s",
errno, strerror(errno));
return -1;
}
diff --git a/drivers/net/tap/tap_tcmsgs.h b/drivers/net/tap/tap_tcmsgs.h
index f72f8c5c..8cedea84 100644
--- a/drivers/net/tap/tap_tcmsgs.h
+++ b/drivers/net/tap/tap_tcmsgs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef _TAP_TCMSGS_H_
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index ea8092ca..5b1abe20 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -699,6 +699,19 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
else
val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
+ nic->vlan_strip = enable;
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
+nicvf_first_skip_config(struct nicvf *nic, uint8_t num_dwords)
+{
+ uint64_t val;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ val &= ~(0xfULL);
+ val |= (num_dwords & 0xf);
+
nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
}
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 284d0bdf..fd13ea84 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -193,6 +193,7 @@ uint32_t nicvf_qsize_sq_roundup(uint32_t val);
void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
void nicvf_apad_config(struct nicvf *nic, bool enable);
+void nicvf_first_skip_config(struct nicvf *nic, uint8_t dwords);
int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
int nicvf_rss_term(struct nicvf *nic);
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index b13c21ff..b12c8ec5 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -171,7 +171,10 @@
/* Min/Max packet size */
#define NIC_HW_MIN_FRS (64)
-#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */
+/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */
+#define NIC_HW_L2_OVERHEAD (26)
+#define NIC_HW_MAX_MTU (9190)
+#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD)
#define NIC_HW_MAX_SEGS (12)
/* Descriptor alignments */
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index a65361fb..a55c3ca6 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -15,7 +15,6 @@
#include <sys/queue.h>
#include <rte_alarm.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_byteorder.h>
#include <rte_common.h>
@@ -35,6 +34,8 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_tailq.h>
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
#include "base/nicvf_plat.h"
@@ -51,10 +52,10 @@ static void nicvf_dev_stop(struct rte_eth_dev *dev);
static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
bool cleanup);
+static int nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask);
+static int nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-RTE_INIT(nicvf_init_log);
-static void
-nicvf_init_log(void)
+RTE_INIT(nicvf_init_log)
{
nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
if (nicvf_logtype_mbox >= 0)
@@ -69,25 +70,14 @@ nicvf_init_log(void)
rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
}
-static inline int
-nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
+static void
+nicvf_link_status_update(struct nicvf *nic,
+ struct rte_eth_link *link)
{
- struct rte_eth_link *dst = &dev->data->dev_link;
- struct rte_eth_link *src = link;
+ memset(link, 0, sizeof(*link));
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
+ link->link_status = nic->link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
-static inline void
-nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
-{
- link->link_status = nic->link_up;
- link->link_duplex = ETH_LINK_AUTONEG;
if (nic->duplex == NICVF_HALF_DUPLEX)
link->link_duplex = ETH_LINK_HALF_DUPLEX;
else if (nic->duplex == NICVF_FULL_DUPLEX)
@@ -101,12 +91,17 @@ nicvf_interrupt(void *arg)
{
struct rte_eth_dev *dev = arg;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_link link;
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic, &dev->data->dev_link);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ nicvf_link_status_update(nic, &link);
+ rte_eth_linkstatus_set(dev, &link);
+
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
+ }
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -153,24 +148,23 @@ nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (wait_to_complete) {
/* rte_eth_link_get() might need to wait up to 9 seconds */
for (i = 0; i < MAX_CHECK_TIME; i++) {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
- if (link.link_status)
+ nicvf_link_status_update(nic, &link);
+ if (link.link_status == ETH_LINK_UP)
break;
rte_delay_ms(CHECK_INTERVAL);
}
} else {
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ nicvf_link_status_update(nic, &link);
}
- return nicvf_atomic_write_link_status(dev, &link);
+
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD;
size_t i;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
@@ -188,7 +182,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
* Refuse mtu that requires the support of scattered packets
* when this feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
return -EINVAL;
@@ -202,11 +196,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
+ if (nicvf_mbox_update_hw_max_frs(nic, mtu))
return -EINVAL;
- /* Update max frame size */
- rxmode->max_rx_pkt_len = (uint32_t)frame_size;
+ /* Update max_rx_pkt_len */
+ rxmode->max_rx_pkt_len = mtu + ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
@@ -363,11 +357,9 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
- if (dev->rx_pkt_burst == nicvf_recv_pkts ||
- dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
- return ptypes;
- return NULL;
+ /* All Ptypes are supported in all Rx functions. */
+ return ptypes;
}
static void
@@ -891,7 +883,7 @@ nicvf_dev_tx_queue_release(void *sq)
static void
nicvf_set_tx_function(struct rte_eth_dev *dev)
{
- struct nicvf_txq *txq;
+ struct nicvf_txq *txq = NULL;
size_t i;
bool multiseg = false;
@@ -912,6 +904,9 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
dev->tx_pkt_burst = nicvf_xmit_pkts;
}
+ if (!txq)
+ return;
+
if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
else
@@ -921,13 +916,23 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
static void
nicvf_set_rx_function(struct rte_eth_dev *dev)
{
- if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
- } else {
- PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
- dev->rx_pkt_burst = nicvf_recv_pkts;
- }
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ const eth_rx_burst_t rx_burst_func[2][2][2] = {
+ /* [NORMAL/SCATTER] [CKSUM/NO_CKSUM] [VLAN_STRIP/NO_VLAN_STRIP] */
+ [0][0][0] = nicvf_recv_pkts_no_offload,
+ [0][0][1] = nicvf_recv_pkts_vlan_strip,
+ [0][1][0] = nicvf_recv_pkts_cksum,
+ [0][1][1] = nicvf_recv_pkts_cksum_vlan_strip,
+ [1][0][0] = nicvf_recv_pkts_multiseg_no_offload,
+ [1][0][1] = nicvf_recv_pkts_multiseg_vlan_strip,
+ [1][1][0] = nicvf_recv_pkts_multiseg_cksum,
+ [1][1][1] = nicvf_recv_pkts_multiseg_cksum_vlan_strip,
+ };
+
+ dev->rx_pkt_burst =
+ rx_burst_func[dev->data->scattered_rx]
+ [nic->offload_cksum][nic->vlan_strip];
}
static int
@@ -939,7 +944,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
bool is_single_pool;
struct nicvf_txq *txq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
@@ -953,17 +958,6 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- conf_offloads = tx_conf->offloads;
- offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- unsupported_offloads = conf_offloads & ~offload_capa;
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Tx deferred start is not supported */
if (tx_conf->tx_deferred_start) {
PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1015,9 +1009,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
txq->tx_free_thresh = tx_free_thresh;
txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
- txq->offloads = conf_offloads;
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+ txq->offloads = offloads;
- is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
+ is_single_pool = !!(offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
/* Choose optimum free threshold value for multipool case */
if (!is_single_pool) {
@@ -1248,6 +1243,7 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
{
uintptr_t p;
struct rte_mbuf mb_def;
+ struct nicvf *nic = rxq->nic;
RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
@@ -1257,8 +1253,11 @@ nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
offsetof(struct rte_mbuf, data_off) != 4);
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
offsetof(struct rte_mbuf, data_off) != 6);
+ RTE_BUILD_BUG_ON(offsetof(struct nicvf_rxq, rxq_fastpath_data_end) -
+ offsetof(struct nicvf_rxq,
+ rxq_fastpath_data_start) > 128);
mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + (nic->skip_bytes);
mb_def.port = rxq->port_id;
rte_mbuf_refcnt_set(&mb_def, 1);
@@ -1277,10 +1276,20 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t rx_free_thresh;
struct nicvf_rxq *rxq;
struct nicvf *nic = nicvf_pmd_priv(dev);
- uint64_t conf_offloads, offload_capa, unsupported_offloads;
+ uint64_t offloads;
+ uint32_t buffsz;
+ struct rte_pktmbuf_pool_private *mbp_priv;
PMD_INIT_FUNC_TRACE();
+ /* First skip check */
+ mbp_priv = rte_mempool_get_priv(mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz < (uint32_t)(nic->skip_bytes)) {
+ PMD_INIT_LOG(ERR, "First skip is more than configured buffer size");
+ return -EINVAL;
+ }
+
if (qidx >= MAX_RCV_QUEUES_PER_QS)
nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
@@ -1291,24 +1300,6 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
-
- conf_offloads = rx_conf->offloads;
-
- if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- offload_capa = NICVF_RX_OFFLOAD_CAPA;
- unsupported_offloads = conf_offloads & ~offload_capa;
-
- if (unsupported_offloads) {
- PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
- "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- unsupported_offloads, conf_offloads, offload_capa);
- return -ENOTSUP;
- }
-
/* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1316,7 +1307,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
}
/* Mempool memory must be physically contiguous */
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG) {
PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
@@ -1334,6 +1325,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
return -EINVAL;
}
+
/* Check rx_free_thresh upper bound */
rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
rx_conf->rx_free_thresh :
@@ -1389,10 +1381,11 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
+ offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
" phy=0x%" PRIx64 " offloads=0x%" PRIx64,
nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
- rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
+ rte_mempool_avail_count(mp), rxq->phys, offloads);
dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1408,8 +1401,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
/* Autonegotiation may be disabled */
dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
dev_info->speed_capa |= ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M |
@@ -1418,7 +1409,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa |= ETH_LINK_SPEED_40G;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
+ dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN;
dev_info->max_rx_queues =
(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_tx_queues =
@@ -1445,12 +1436,6 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
- .txq_flags =
- ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOREFCOUNT |
- ETH_TXQ_FLAGS_NOMULTMEMP |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP,
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
@@ -1495,7 +1480,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
struct rte_mbuf *mbuf;
uint16_t rx_start, rx_end;
uint16_t tx_start, tx_end;
- bool vlan_strip;
+ int mask;
PMD_INIT_FUNC_TRACE();
@@ -1541,6 +1526,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
return -EINVAL;
}
rxq->mbuf_phys_off -= data_off;
+ rxq->mbuf_phys_off -= nic->skip_bytes;
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
@@ -1615,9 +1601,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
/* Configure VLAN Strip */
- vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP);
- nicvf_vlan_hw_strip(nic, vlan_strip);
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ret = nicvf_vlan_offload_config(dev, mask);
/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
* to the 64bit memory address.
@@ -1751,8 +1737,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
/* Setup MTU based on max_rx_pkt_len or default */
mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
dev->data->dev_conf.rxmode.max_rx_pkt_len
- - ETHER_HDR_LEN - ETHER_CRC_LEN
- : ETHER_MTU;
+ - ETHER_HDR_LEN : ETHER_MTU;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
@@ -1771,7 +1756,7 @@ nicvf_dev_start(struct rte_eth_dev *dev)
return ret;
}
- /* Configure callbacks based on scatter mode */
+ /* Configure callbacks based on offloads */
nicvf_set_tx_function(dev);
nicvf_set_rx_function(dev);
@@ -1923,8 +1908,6 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
uint8_t cqcount;
- uint64_t conf_rx_offloads, rx_offload_capa;
- uint64_t conf_tx_offloads, tx_offload_capa;
PMD_INIT_FUNC_TRACE();
@@ -1933,32 +1916,10 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
- conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
- tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
-
- if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
- PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_tx_offloads, tx_offload_capa);
- return -ENOTSUP;
- }
-
- if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
- PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
- rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
- }
-
- conf_rx_offloads = rxmode->offloads;
- rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
-
- if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
- PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
- "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
- conf_rx_offloads, rx_offload_capa);
- return -ENOTSUP;
- }
-
- if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
}
@@ -2014,6 +1975,9 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
}
}
+ if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ nic->offload_cksum = 1;
+
PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
dev->data->port_id, nicvf_hw_cap(nic));
@@ -2033,6 +1997,7 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
.dev_infos_get = nicvf_dev_info_get,
.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
.mtu_set = nicvf_dev_set_mtu,
+ .vlan_offload_set = nicvf_vlan_offload_set,
.reta_update = nicvf_dev_reta_update,
.reta_query = nicvf_dev_reta_query,
.rss_hash_update = nicvf_dev_rss_hash_update,
@@ -2050,6 +2015,83 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = {
};
static int
+nicvf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct rte_eth_rxmode *rxmode;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ rxmode = &dev->data->dev_conf.rxmode;
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ nicvf_vlan_hw_strip(nic, true);
+ else
+ nicvf_vlan_hw_strip(nic, false);
+ }
+
+ return 0;
+}
+
+static int
+nicvf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ nicvf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static inline int
+nicvf_set_first_skip(struct rte_eth_dev *dev)
+{
+ int bytes_to_skip = 0;
+ int ret = 0;
+ unsigned int i;
+ struct rte_kvargs *kvlist;
+ static const char *const skip[] = {
+ SKIP_DATA_BYTES,
+ NULL};
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (!dev->device->devargs) {
+ nicvf_first_skip_config(nic, 0);
+ return ret;
+ }
+
+ kvlist = rte_kvargs_parse(dev->device->devargs->args, skip);
+ if (!kvlist)
+ return -EINVAL;
+
+ if (kvlist->count == 0)
+ goto exit;
+
+ for (i = 0; i != kvlist->count; ++i) {
+ const struct rte_kvargs_pair *pair = &kvlist->pairs[i];
+
+ if (!strcmp(pair->key, SKIP_DATA_BYTES))
+ bytes_to_skip = atoi(pair->value);
+ }
+
+ /*128 bytes amounts to one cache line*/
+ if (bytes_to_skip >= 0 && bytes_to_skip < 128) {
+ if (!(bytes_to_skip % 8)) {
+ nicvf_first_skip_config(nic, (bytes_to_skip / 8));
+ nic->skip_bytes = bytes_to_skip;
+ goto kvlist_free;
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be multiple of 8");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ PMD_INIT_LOG(ERR, "skip_data_bytes should be less than 128");
+ ret = -EINVAL;
+ goto exit;
+ }
+exit:
+ nicvf_first_skip_config(nic, 0);
+kvlist_free:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+static int
nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
{
int ret;
@@ -2158,6 +2200,11 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto malloc_fail;
}
+ ret = nicvf_set_first_skip(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to configure first skip");
+ goto malloc_fail;
+ }
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
@@ -2230,3 +2277,4 @@ static struct rte_pci_driver rte_nicvf_pmd = {
RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_thunderx, SKIP_DATA_BYTES "=<int>");
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index ea8dccd1..ae440fef 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -38,6 +38,7 @@
DEV_TX_OFFLOAD_MULTI_SEGS)
#define NICVF_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_CRC_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
@@ -51,6 +52,7 @@
#define VLAN_TAG_SIZE 4 /* 802.3ac tag */
+#define SKIP_DATA_BYTES "skip_data_bytes"
static inline struct nicvf *
nicvf_pmd_priv(struct rte_eth_dev *eth_dev)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 72305d9d..247c3568 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -162,12 +162,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
@@ -218,12 +220,14 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
return i;
}
@@ -327,6 +331,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
}
+static inline uint64_t __hot
+nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
+{
+ static const uint64_t flag_table[3] __rte_cache_aligned = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ };
+
+ const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 |
+ (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK);
+ return flag_table[idx];
+}
+
static inline int __hot
nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
{
@@ -385,11 +403,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
if (likely(cqe_rx_w0.rss_alg)) {
pkt->hash.rss = cqe_rx_w2.rss_tag;
pkt->ol_flags |= PKT_RX_RSS_HASH;
+
}
}
-uint16_t __hot
-nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+static __rte_always_inline uint16_t
+nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
+ const uint32_t flag)
{
uint32_t i, to_process;
struct cqe_rx_t *cqe_rx;
@@ -420,7 +440,19 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
+
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci =
+ rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
pkt->data_len = cqe_rx_w3.rb0_sz;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
@@ -445,11 +477,43 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return to_process;
}
-static inline uint16_t __hot
+uint16_t __hot
+nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+static __rte_always_inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
- uint64_t mbuf_init)
+ uint64_t mbuf_init, const uint32_t flag)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -467,12 +531,22 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
- pkt->ol_flags = 0;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
nicvf_mbuff_init_mseg_update(
pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_NONE)
+ pkt->ol_flags = 0;
+ if (flag & NICVF_RX_OFFLOAD_CKSUM)
+ pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0);
+ if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) {
+ if (unlikely(cqe_rx_w0.vlan_stripped)) {
+ pkt->ol_flags |= PKT_RX_VLAN
+ | PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci);
+ }
+ }
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
*rx_pkt = pkt;
@@ -491,9 +565,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
return nb_segs;
}
-uint16_t __hot
+static __rte_always_inline uint16_t __hot
nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+ uint16_t nb_pkts, const uint32_t flag)
{
union cq_entry_t *cq_entry;
struct cqe_rx_t *cqe_rx;
@@ -515,7 +589,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- rx_pkts + i, rbptr_offset, mbuf_init);
+ rx_pkts + i, rbptr_offset, mbuf_init, flag);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -535,6 +609,38 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
return to_process;
}
+uint16_t __hot
+nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
+uint16_t __hot
+nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts,
+ NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
+}
+
uint32_t
nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
{
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 8bdd582e..a39808cb 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -8,6 +8,10 @@
#include <rte_byteorder.h>
#include <rte_ethdev_driver.h>
+#define NICVF_RX_OFFLOAD_NONE 0x1
+#define NICVF_RX_OFFLOAD_CKSUM 0x2
+#define NICVF_RX_OFFLOAD_VLAN_STRIP 0x4
+
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
#ifndef __hot
@@ -86,9 +90,23 @@ nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
-uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts);
-uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_no_offload(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_cksum(void *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t pkts);
+uint16_t nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t nicvf_recv_pkts_multiseg_no_offload(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts);
uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index d4a83c37..dd52f38e 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -55,25 +55,27 @@ union mbuf_initializer {
};
struct nicvf_rxq {
+ MARKER rxq_fastpath_data_start;
+ uint8_t rbptr_offset;
+ uint16_t rx_free_thresh;
+ uint32_t head;
+ uint32_t qlen_mask;
+ int32_t recv_buffers;
+ int32_t available_space;
uint64_t mbuf_phys_off;
uintptr_t cq_status;
uintptr_t cq_door;
- union mbuf_initializer mbuf_initializer;
- nicvf_iova_addr_t phys;
- union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
- struct nicvf *nic;
struct rte_mempool *pool;
- uint32_t head;
- uint32_t qlen_mask;
- int32_t available_space;
- int32_t recv_buffers;
- uint16_t rx_free_thresh;
- uint16_t queue_id;
- uint16_t precharge_cnt;
+ union cq_entry_t *desc;
+ union mbuf_initializer mbuf_initializer;
+ MARKER rxq_fastpath_data_end;
uint8_t rx_drop_en;
+ uint16_t precharge_cnt;
uint16_t port_id;
- uint8_t rbptr_offset;
+ uint16_t queue_id;
+ struct nicvf *nic;
+ nicvf_iova_addr_t phys;
} __rte_cache_aligned;
struct nicvf {
@@ -85,6 +87,8 @@ struct nicvf {
bool loopback_supported;
bool pf_acked:1;
bool pf_nacked:1;
+ bool offload_cksum:1;
+ bool vlan_strip:1;
uint64_t hwcap;
uint8_t link_up;
uint8_t duplex;
@@ -99,6 +103,7 @@ struct nicvf {
struct rte_intr_handle intr_handle;
uint8_t cpi_alg;
uint16_t mtu;
+ int skip_bytes;
bool vlan_filter_en;
uint8_t mac_addr[ETHER_ADDR_LEN];
/* secondary queue set support */
diff --git a/drivers/net/vdev_netvsc/Makefile b/drivers/net/vdev_netvsc/Makefile
index 7be17137..690cb8f8 100644
--- a/drivers/net/vdev_netvsc/Makefile
+++ b/drivers/net/vdev_netvsc/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2017 6WIND S.A.
-# Copyright 2017 Mellanox Technologies, Ltd.
+# Copyright 2017 Mellanox Technologies, Ltd
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/net/vdev_netvsc/vdev_netvsc.c b/drivers/net/vdev_netvsc/vdev_netvsc.c
index cbf4d590..48717f2f 100644
--- a/drivers/net/vdev_netvsc/vdev_netvsc.c
+++ b/drivers/net/vdev_netvsc/vdev_netvsc.c
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2017 6WIND S.A.
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/sockios.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <netinet/ip.h>
@@ -33,9 +35,11 @@
#include <rte_hypervisor.h>
#include <rte_kvargs.h>
#include <rte_log.h>
+#include <rte_string_fns.h>
#define VDEV_NETVSC_DRIVER net_vdev_netvsc
#define VDEV_NETVSC_DRIVER_NAME RTE_STR(VDEV_NETVSC_DRIVER)
+#define VDEV_NETVSC_DRIVER_NAME_LEN 15
#define VDEV_NETVSC_ARG_IFACE "iface"
#define VDEV_NETVSC_ARG_MAC "mac"
#define VDEV_NETVSC_ARG_FORCE "force"
@@ -96,6 +100,43 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
}
/**
+ * Determine if a network interface is NetVSC.
+ *
+ * @param[in] iface
+ * Pointer to netdevice description structure (name and index).
+ *
+ * @return
+ * A nonzero value when interface is detected as NetVSC. In case of error,
+ * rte_errno is updated and 0 returned.
+ */
+static int
+vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
+{
+ static const char temp[] = "/sys/class/net/%s/device/class_id";
+ char path[sizeof(temp) + IF_NAMESIZE];
+ FILE *f;
+ int ret;
+ int len = 0;
+
+ ret = snprintf(path, sizeof(path), temp, iface->if_name);
+ if (ret == -1 || (size_t)ret >= sizeof(path)) {
+ rte_errno = ENOBUFS;
+ return 0;
+ }
+ f = fopen(path, "r");
+ if (!f) {
+ rte_errno = errno;
+ return 0;
+ }
+ ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
+ if (ret == EOF)
+ rte_errno = errno;
+ ret = len == (int)strlen(NETVSC_CLASS_ID);
+ fclose(f);
+ return ret;
+}
+
+/**
* Iterate over system network interfaces.
*
* This function runs a given callback function for each netdevice found on
@@ -104,6 +145,8 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
* @param func
* Callback function pointer. List traversal is aborted when this function
* returns a nonzero value.
+ * @param is_netvsc
+ * Indicates the device type to iterate - netvsc or non-netvsc.
* @param ...
* Variable parameter list passed as @p va_list to @p func.
*
@@ -115,7 +158,7 @@ vdev_netvsc_ctx_destroy(struct vdev_netvsc_ctx *ctx)
static int
vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
const struct ether_addr *eth_addr,
- va_list ap), ...)
+ va_list ap), int is_netvsc, ...)
{
struct if_nameindex *iface = if_nameindex();
int s = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
@@ -133,11 +176,15 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
goto error;
}
for (i = 0; iface[i].if_name; ++i) {
+ int is_netvsc_ret;
struct ifreq req;
struct ether_addr eth_addr;
va_list ap;
- strncpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
+ is_netvsc_ret = vdev_netvsc_iface_is_netvsc(&iface[i]) ? 1 : 0;
+ if (is_netvsc ^ is_netvsc_ret)
+ continue;
+ strlcpy(req.ifr_name, iface[i].if_name, sizeof(req.ifr_name));
if (ioctl(s, SIOCGIFHWADDR, &req) == -1) {
DRV_LOG(WARNING, "cannot retrieve information about"
" interface \"%s\": %s",
@@ -151,7 +198,7 @@ vdev_netvsc_foreach_iface(int (*func)(const struct if_nameindex *iface,
}
memcpy(eth_addr.addr_bytes, req.ifr_hwaddr.sa_data,
RTE_DIM(eth_addr.addr_bytes));
- va_start(ap, func);
+ va_start(ap, is_netvsc);
ret = func(&iface[i], &eth_addr, ap);
va_end(ap);
if (ret)
@@ -166,77 +213,100 @@ error:
}
/**
- * Determine if a network interface is NetVSC.
- *
- * @param[in] iface
- * Pointer to netdevice description structure (name and index).
- *
- * @return
- * A nonzero value when interface is detected as NetVSC. In case of error,
- * rte_errno is updated and 0 returned.
- */
-static int
-vdev_netvsc_iface_is_netvsc(const struct if_nameindex *iface)
-{
- static const char temp[] = "/sys/class/net/%s/device/class_id";
- char path[sizeof(temp) + IF_NAMESIZE];
- FILE *f;
- int ret;
- int len = 0;
-
- ret = snprintf(path, sizeof(path), temp, iface->if_name);
- if (ret == -1 || (size_t)ret >= sizeof(path)) {
- rte_errno = ENOBUFS;
- return 0;
- }
- f = fopen(path, "r");
- if (!f) {
- rte_errno = errno;
- return 0;
- }
- ret = fscanf(f, NETVSC_CLASS_ID "%n", &len);
- if (ret == EOF)
- rte_errno = errno;
- ret = len == (int)strlen(NETVSC_CLASS_ID);
- fclose(f);
- return ret;
-}
-
-/**
* Determine if a network interface has a route.
*
* @param[in] name
* Network device name.
+ * @param[in] family
+ * Address family: AF_INET for IPv4 or AF_INET6 for IPv6.
*
* @return
- * A nonzero value when interface has an route. In case of error,
- * rte_errno is updated and 0 returned.
+ * 1 when interface has a route, negative errno value in case of error and
+ * 0 otherwise.
*/
static int
-vdev_netvsc_has_route(const char *name)
+vdev_netvsc_has_route(const struct if_nameindex *iface,
+ const unsigned char family)
{
- FILE *fp;
+ /*
+ * The implementation can be simpler by getifaddrs() function usage but
+ * it works for IPv6 only starting from glibc 2.3.3.
+ */
+ char buf[4096];
+ int len;
int ret = 0;
- char route[NETVSC_MAX_ROUTE_LINE_SIZE];
- char *netdev;
-
- fp = fopen("/proc/net/route", "r");
- if (!fp) {
- rte_errno = errno;
- return 0;
+ int res;
+ int sock;
+ struct nlmsghdr *retmsg = (struct nlmsghdr *)buf;
+ struct sockaddr_nl sa;
+ struct {
+ struct nlmsghdr nlhdr;
+ struct ifaddrmsg addrmsg;
+ } msg;
+
+ if (!iface || (family != AF_INET && family != AF_INET6)) {
+ DRV_LOG(ERR, "%s", rte_strerror(EINVAL));
+ return -EINVAL;
}
- while (fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL) {
- netdev = strtok(route, "\t");
- if (strcmp(netdev, name) == 0) {
- ret = 1;
- break;
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock == -1) {
+ DRV_LOG(ERR, "cannot open socket: %s", rte_strerror(errno));
+ return -errno;
+ }
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+ sa.nl_groups = RTMGRP_LINK | RTMGRP_IPV4_IFADDR;
+ res = bind(sock, (struct sockaddr *)&sa, sizeof(sa));
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot bind socket: %s", rte_strerror(errno));
+ goto close;
+ }
+ memset(&msg, 0, sizeof(msg));
+ msg.nlhdr.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
+ msg.nlhdr.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ msg.nlhdr.nlmsg_type = RTM_GETADDR;
+ msg.nlhdr.nlmsg_pid = getpid();
+ msg.addrmsg.ifa_family = family;
+ msg.addrmsg.ifa_index = iface->if_index;
+ res = send(sock, &msg, msg.nlhdr.nlmsg_len, 0);
+ if (res == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot send socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ memset(buf, 0, sizeof(buf));
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len == -1) {
+ ret = -errno;
+ DRV_LOG(ERR, "cannot receive socket message: %s",
+ rte_strerror(errno));
+ goto close;
+ }
+ while (NLMSG_OK(retmsg, (unsigned int)len)) {
+ struct ifaddrmsg *retaddr =
+ (struct ifaddrmsg *)NLMSG_DATA(retmsg);
+
+ if (retaddr->ifa_family == family &&
+ retaddr->ifa_index == iface->if_index) {
+ struct rtattr *retrta = IFA_RTA(retaddr);
+ int attlen = IFA_PAYLOAD(retmsg);
+
+ while (RTA_OK(retrta, attlen)) {
+ if (retrta->rta_type == IFA_ADDRESS) {
+ ret = 1;
+ DRV_LOG(DEBUG, "interface %s has IP",
+ iface->if_name);
+ goto close;
+ }
+ retrta = RTA_NEXT(retrta, attlen);
+ }
}
- /* Move file pointer to the next line. */
- while (strchr(route, '\n') == NULL &&
- fgets(route, NETVSC_MAX_ROUTE_LINE_SIZE, fp) != NULL)
- ;
+ retmsg = NLMSG_NEXT(retmsg, len);
}
- fclose(fp);
+close:
+ close(sock);
return ret;
}
@@ -259,12 +329,15 @@ static int
vdev_netvsc_sysfs_readlink(char *buf, size_t size, const char *if_name,
const char *relpath)
{
+ struct vdev_netvsc_ctx *ctx;
+ char in[RTE_MAX(sizeof(ctx->yield), 256u)];
int ret;
- ret = snprintf(buf, size, "/sys/class/net/%s/%s", if_name, relpath);
- if (ret == -1 || (size_t)ret >= size)
+ ret = snprintf(in, sizeof(in) - 1, "/sys/class/net/%s/%s",
+ if_name, relpath);
+ if (ret == -1 || (size_t)ret >= sizeof(in))
return -ENOBUFS;
- ret = readlink(buf, buf, size);
+ ret = readlink(in, buf, size);
if (ret == -1)
return -errno;
if ((size_t)ret >= size - 1)
@@ -314,11 +387,9 @@ vdev_netvsc_device_probe(const struct if_nameindex *iface,
DRV_LOG(DEBUG,
"NetVSC interface \"%s\" (index %u) renamed \"%s\"",
ctx->if_name, ctx->if_index, iface->if_name);
- strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
return 0;
}
- if (vdev_netvsc_iface_is_netvsc(iface))
- return 0;
if (!is_same_ether_addr(eth_addr, &ctx->if_addr))
return 0;
/* Look for associated PCI device. */
@@ -387,7 +458,8 @@ vdev_netvsc_alarm(__rte_unused void *arg)
int ret;
LIST_FOREACH(ctx, &vdev_netvsc_ctx_list, entry) {
- ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0,
+ ctx);
if (ret < 0)
break;
}
@@ -443,7 +515,6 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
{
const char *name = va_arg(ap, const char *);
struct rte_kvargs *kvargs = va_arg(ap, struct rte_kvargs *);
- int force = va_arg(ap, int);
unsigned int specified = va_arg(ap, unsigned int);
unsigned int *matched = va_arg(ap, unsigned int *);
unsigned int i;
@@ -497,18 +568,12 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
iface->if_name, iface->if_index);
return 0;
}
- if (!vdev_netvsc_iface_is_netvsc(iface)) {
- if (!specified || !force)
- return 0;
- DRV_LOG(WARNING,
- "using non-NetVSC interface \"%s\" (index %u)",
- iface->if_name, iface->if_index);
- }
/* Routed NetVSC should not be probed. */
- if (vdev_netvsc_has_route(iface->if_name)) {
- if (!specified || !force)
+ if (vdev_netvsc_has_route(iface, AF_INET) ||
+ vdev_netvsc_has_route(iface, AF_INET6)) {
+ if (!specified)
return 0;
- DRV_LOG(WARNING, "using routed NetVSC interface \"%s\""
+ DRV_LOG(WARNING, "probably using routed NetVSC interface \"%s\""
" (index %u)", iface->if_name, iface->if_index);
}
/* Create interface context. */
@@ -520,7 +585,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
goto error;
}
ctx->id = vdev_netvsc_ctx_count;
- strncpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
+ strlcpy(ctx->if_name, iface->if_name, sizeof(ctx->if_name));
ctx->if_index = iface->if_index;
ctx->if_addr = *eth_addr;
ctx->pipe[0] = -1;
@@ -551,13 +616,13 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
name, ctx->id);
if (ret == -1 || (size_t)ret >= sizeof(ctx->name))
++i;
- ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_%s",
- ctx->name);
+ ret = snprintf(ctx->devname, sizeof(ctx->devname), "net_failsafe_vsc%u",
+ ctx->id);
if (ret == -1 || (size_t)ret >= sizeof(ctx->devname))
++i;
ret = snprintf(ctx->devargs, sizeof(ctx->devargs),
- "fd(%d),dev(net_tap_%s,remote=%s)",
- ctx->pipe[0], ctx->name, ctx->if_name);
+ "fd(%d),dev(net_tap_vsc%u,remote=%s)",
+ ctx->pipe[0], ctx->id, ctx->if_name);
if (ret == -1 || (size_t)ret >= sizeof(ctx->devargs))
++i;
if (i) {
@@ -569,7 +634,7 @@ vdev_netvsc_netvsc_probe(const struct if_nameindex *iface,
/* Request virtual device generation. */
DRV_LOG(DEBUG, "generating virtual device \"%s\" with arguments \"%s\"",
ctx->devname, ctx->devargs);
- vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, ctx);
+ vdev_netvsc_foreach_iface(vdev_netvsc_device_probe, 0, ctx);
ret = rte_eal_hotplug_add("vdev", ctx->devname, ctx->devargs);
if (ret)
goto error;
@@ -639,16 +704,32 @@ vdev_netvsc_vdev_probe(struct rte_vdev_device *dev)
rte_kvargs_free(kvargs);
return 0;
}
+ if (specified > 1) {
+ DRV_LOG(ERR, "More than one way used to specify the netvsc"
+ " device.");
+ goto error;
+ }
rte_eal_alarm_cancel(vdev_netvsc_alarm, NULL);
/* Gather interfaces. */
- ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, name, kvargs,
- force, specified, &matched);
+ ret = vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 1, name,
+ kvargs, specified, &matched);
if (ret < 0)
goto error;
- if (matched < specified)
- DRV_LOG(WARNING,
- "some of the specified parameters did not match"
- " recognized network interfaces");
+ if (specified && matched < specified) {
+ if (!force) {
+ DRV_LOG(ERR, "Cannot find the specified netvsc device");
+ goto error;
+ }
+ /* Try to force probing on non-netvsc specified device. */
+ if (vdev_netvsc_foreach_iface(vdev_netvsc_netvsc_probe, 0, name,
+ kvargs, specified, &matched) < 0)
+ goto error;
+ if (matched < specified) {
+ DRV_LOG(ERR, "Cannot find the specified device");
+ goto error;
+ }
+ DRV_LOG(WARNING, "non-netvsc device was probed as netvsc");
+ }
ret = rte_eal_alarm_set(VDEV_NETVSC_PROBE_MS * 1000,
vdev_netvsc_alarm, NULL);
if (ret < 0) {
@@ -718,7 +799,8 @@ static int
vdev_netvsc_cmp_rte_device(const struct rte_device *dev1,
__rte_unused const void *_dev2)
{
- return strcmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME);
+ return strncmp(dev1->devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN);
}
/**
@@ -733,14 +815,15 @@ vdev_netvsc_scan_callback(__rte_unused void *arg)
struct rte_devargs *devargs;
struct rte_bus *vbus = rte_bus_find_by_name("vdev");
- TAILQ_FOREACH(devargs, &devargs_list, next)
- if (!strcmp(devargs->name, VDEV_NETVSC_DRIVER_NAME))
+ RTE_EAL_DEVARGS_FOREACH("vdev", devargs)
+ if (!strncmp(devargs->name, VDEV_NETVSC_DRIVER_NAME,
+ VDEV_NETVSC_DRIVER_NAME_LEN))
return;
dev = (struct rte_vdev_device *)vbus->find_device(NULL,
vdev_netvsc_cmp_rte_device, VDEV_NETVSC_DRIVER_NAME);
if (dev)
return;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
+ if (rte_devargs_add(RTE_DEVTYPE_VIRTUAL, VDEV_NETVSC_DRIVER_NAME))
DRV_LOG(ERR, "unable to add netvsc devargs.");
}
diff --git a/drivers/net/vhost/meson.build b/drivers/net/vhost/meson.build
new file mode 100644
index 00000000..9b067c35
--- /dev/null
+++ b/drivers/net/vhost/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+build = dpdk_conf.has('RTE_LIBRTE_VHOST')
+version = 2
+sources = files('rte_eth_vhost.c')
+install_headers('rte_eth_vhost.h')
+deps += 'vhost'
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 3aae01c3..e58f3221 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016 IGEL Co., Ltd.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IGEL Co.,Ltd. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
*/
#include <unistd.h>
#include <pthread.h>
@@ -46,6 +18,11 @@
#include "rte_eth_vhost.h"
+static int vhost_logtype;
+
+#define VHOST_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, vhost_logtype, __VA_ARGS__)
+
enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
#define ETH_VHOST_IFACE_ARG "iface"
@@ -117,7 +94,9 @@ struct pmd_internal {
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ int vid;
rte_atomic32_t started;
+ uint8_t vlan_strip;
};
struct internal_list {
@@ -421,6 +400,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
+ bufs[i]->vlan_tci = 0;
+
+ if (r->internal->vlan_strip)
+ rte_vlan_strip(bufs[i]);
+
r->stats.bytes += bufs[i]->pkt_len;
}
@@ -437,7 +421,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_tx = 0;
- uint16_t nb_send = nb_bufs;
+ uint16_t nb_send = 0;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -447,6 +431,22 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
goto out;
+ for (i = 0; i < nb_bufs; i++) {
+ struct rte_mbuf *m = bufs[i];
+
+ /* Do VLAN tag insertion */
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ int error = rte_vlan_insert(&m);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+ }
+
+ bufs[nb_send] = m;
+ ++nb_send;
+ }
+
/* Enqueue packets to guest RX queue */
while (nb_send) {
uint16_t nb_pkts;
@@ -488,6 +488,11 @@ out:
static int
eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
{
+ struct pmd_internal *internal = dev->data->dev_private;
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ internal->vlan_strip = !!(rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+
return 0;
}
@@ -519,6 +524,136 @@ find_internal_resource(char *ifname)
return list;
}
+static int
+eth_rxq_intr_enable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring\n", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Enable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 1);
+ rte_wmb();
+
+ return ret;
+}
+
+static int
+eth_rxq_intr_disable(struct rte_eth_dev *dev, uint16_t qid)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int ret = 0;
+
+ vq = dev->data->rx_queues[qid];
+ if (!vq) {
+ VHOST_LOG(ERR, "rxq%d is not setup yet\n", qid);
+ return -1;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (qid << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(ERR, "Failed to get rxq%d's vring", qid);
+ return ret;
+ }
+ VHOST_LOG(INFO, "Disable interrupt for rxq%d\n", qid);
+ rte_vhost_enable_guest_notification(vq->vid, (qid << 1) + 1, 0);
+ rte_wmb();
+
+ return 0;
+}
+
+static void
+eth_vhost_uninstall_intr(struct rte_eth_dev *dev)
+{
+ struct rte_intr_handle *intr_handle = dev->intr_handle;
+
+ if (intr_handle) {
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
+ free(intr_handle);
+ }
+
+ dev->intr_handle = NULL;
+}
+
+static int
+eth_vhost_install_intr(struct rte_eth_dev *dev)
+{
+ struct rte_vhost_vring vring;
+ struct vhost_queue *vq;
+ int count = 0;
+ int nb_rxq = dev->data->nb_rx_queues;
+ int i;
+ int ret;
+
+ /* uninstall firstly if we are reconnecting */
+ if (dev->intr_handle)
+ eth_vhost_uninstall_intr(dev);
+
+ dev->intr_handle = malloc(sizeof(*dev->intr_handle));
+ if (!dev->intr_handle) {
+ VHOST_LOG(ERR, "Fail to allocate intr_handle\n");
+ return -ENOMEM;
+ }
+ memset(dev->intr_handle, 0, sizeof(*dev->intr_handle));
+
+ dev->intr_handle->efd_counter_size = sizeof(uint64_t);
+
+ dev->intr_handle->intr_vec =
+ malloc(nb_rxq * sizeof(dev->intr_handle->intr_vec[0]));
+
+ if (!dev->intr_handle->intr_vec) {
+ VHOST_LOG(ERR,
+ "Failed to allocate memory for interrupt vector\n");
+ free(dev->intr_handle);
+ return -ENOMEM;
+ }
+
+ VHOST_LOG(INFO, "Prepare intr vec\n");
+ for (i = 0; i < nb_rxq; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq) {
+ VHOST_LOG(INFO, "rxq-%d not setup yet, skip!\n", i);
+ continue;
+ }
+
+ ret = rte_vhost_get_vhost_vring(vq->vid, (i << 1) + 1, &vring);
+ if (ret < 0) {
+ VHOST_LOG(INFO,
+ "Failed to get rxq-%d's vring, skip!\n", i);
+ continue;
+ }
+
+ if (vring.kickfd < 0) {
+ VHOST_LOG(INFO,
+ "rxq-%d's kickfd is invalid, skip!\n", i);
+ continue;
+ }
+ dev->intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ dev->intr_handle->efds[i] = vring.kickfd;
+ count++;
+ VHOST_LOG(INFO, "Installed intr vec for rxq-%d\n", i);
+ }
+
+ dev->intr_handle->nb_efd = count;
+ dev->intr_handle->max_intr = count + 1;
+ dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+
+ return 0;
+}
+
static void
update_queuing_status(struct rte_eth_dev *dev)
{
@@ -527,6 +662,9 @@ update_queuing_status(struct rte_eth_dev *dev)
unsigned int i;
int allow_queuing = 1;
+ if (!dev->data->rx_queues || !dev->data->tx_queues)
+ return;
+
if (rte_atomic32_read(&internal->started) == 0 ||
rte_atomic32_read(&internal->dev_attached) == 0)
allow_queuing = 0;
@@ -551,13 +689,37 @@ update_queuing_status(struct rte_eth_dev *dev)
}
}
+static void
+queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
+{
+ struct vhost_queue *vq;
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+}
+
static int
new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
- struct vhost_queue *vq;
+ struct rte_eth_conf *dev_conf;
unsigned i;
char ifname[PATH_MAX];
#ifdef RTE_LIBRTE_VHOST_NUMA
@@ -567,12 +729,13 @@ new_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
+ VHOST_LOG(INFO, "Invalid device name: %s\n", ifname);
return -1;
}
eth_dev = list->eth_dev;
internal = eth_dev->data->dev_private;
+ dev_conf = &eth_dev->data->dev_conf;
#ifdef RTE_LIBRTE_VHOST_NUMA
newnode = rte_vhost_get_numa_node(vid);
@@ -580,21 +743,19 @@ new_device(int vid)
eth_dev->data->numa_node = newnode;
#endif
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
+ internal->vid = vid;
+ if (rte_atomic32_read(&internal->started) == 1) {
+ queue_setup(eth_dev, internal);
+
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ } else {
+ VHOST_LOG(INFO, "RX/TX queues not exist yet\n");
}
for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
@@ -607,7 +768,7 @@ new_device(int vid)
rte_atomic32_set(&internal->dev_attached, 1);
update_queuing_status(eth_dev);
- RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
+ VHOST_LOG(INFO, "Vhost device %d created\n", vid);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
@@ -628,7 +789,7 @@ destroy_device(int vid)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
return;
}
eth_dev = list->eth_dev;
@@ -639,17 +800,19 @@ destroy_device(int vid)
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
}
state = vring_states[eth_dev->data->port_id];
@@ -661,7 +824,8 @@ destroy_device(int vid)
state->max_vring = 0;
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
+ VHOST_LOG(INFO, "Vhost device %d destroyed\n", vid);
+ eth_vhost_uninstall_intr(eth_dev);
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -677,7 +841,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
list = find_internal_resource(ifname);
if (list == NULL) {
- RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
+ VHOST_LOG(ERR, "Invalid interface name: %s\n", ifname);
return -1;
}
@@ -689,7 +853,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
state->max_vring = RTE_MAX(vring, state->max_vring);
rte_spinlock_unlock(&state->lock);
- RTE_LOG(INFO, PMD, "vring%u is %s\n",
+ VHOST_LOG(INFO, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
@@ -712,13 +876,13 @@ rte_eth_vhost_get_queue_event(uint16_t port_id,
int idx;
if (port_id >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, PMD, "Invalid port id\n");
+ VHOST_LOG(ERR, "Invalid port id\n");
return -1;
}
state = vring_states[port_id];
if (!state) {
- RTE_LOG(ERR, PMD, "Unused port\n");
+ VHOST_LOG(ERR, "Unused port\n");
return -1;
}
@@ -770,12 +934,25 @@ rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
}
static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *eth_dev)
{
- struct pmd_internal *internal = dev->data->dev_private;
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &eth_dev->data->dev_conf;
+
+ queue_setup(eth_dev, internal);
+
+ if (rte_atomic32_read(&internal->dev_attached) == 1) {
+ if (dev_conf->intr_conf.rxq) {
+ if (eth_vhost_install_intr(eth_dev) < 0) {
+ VHOST_LOG(INFO,
+ "Failed to install interrupt handler.");
+ return -1;
+ }
+ }
+ }
rte_atomic32_set(&internal->started, 1);
- update_queuing_status(dev);
+ update_queuing_status(eth_dev);
return 0;
}
@@ -813,10 +990,13 @@ eth_dev_close(struct rte_eth_dev *dev)
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- rte_free(dev->data->rx_queues[i]);
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_free(dev->data->tx_queues[i]);
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
rte_free(dev->data->mac_addrs);
free(internal->dev_name);
@@ -838,7 +1018,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
+ VHOST_LOG(ERR, "Failed to allocate memory for rx queue\n");
return -ENOMEM;
}
@@ -860,7 +1040,7 @@ eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (vq == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
+ VHOST_LOG(ERR, "Failed to allocate memory for tx queue\n");
return -ENOMEM;
}
@@ -878,7 +1058,7 @@ eth_dev_info(struct rte_eth_dev *dev,
internal = dev->data->dev_private;
if (internal == NULL) {
- RTE_LOG(ERR, PMD, "Invalid device specified\n");
+ VHOST_LOG(ERR, "Invalid device specified\n");
return;
}
@@ -887,6 +1067,11 @@ eth_dev_info(struct rte_eth_dev *dev,
dev_info->max_rx_queues = internal->max_queues;
dev_info->max_tx_queues = internal->max_queues;
dev_info->min_rx_bufsize = 0;
+
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -1007,6 +1192,8 @@ static const struct eth_dev_ops ops = {
.xstats_reset = vhost_dev_xstats_reset,
.xstats_get = vhost_dev_xstats_get,
.xstats_get_names = vhost_dev_xstats_get_names,
+ .rx_queue_intr_enable = eth_rxq_intr_enable,
+ .rx_queue_intr_disable = eth_rxq_intr_disable,
};
static struct rte_vdev_driver pmd_vhost_drv;
@@ -1016,23 +1203,16 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
int16_t queues, const unsigned int numa_node, uint64_t flags)
{
const char *name = rte_vdev_device_name(dev);
- struct rte_eth_dev_data *data = NULL;
+ struct rte_eth_dev_data *data;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct ether_addr *eth_addr = NULL;
struct rte_vhost_vring_state *vring_state = NULL;
struct internal_list *list = NULL;
- RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
+ VHOST_LOG(INFO, "Creating VHOST-USER backend on numa socket %u\n",
numa_node);
- /* now do all data allocation - for eth_dev structure and internal
- * (private) data
- */
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error;
-
list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
if (list == NULL)
goto error;
@@ -1074,15 +1254,11 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- /* We'll replace the 'data' originally allocated by eth_dev. So the
- * vhost PMD resources won't be shared between multi processes.
- */
- rte_memcpy(data, eth_dev->data, sizeof(*data));
- eth_dev->data = data;
-
+ data = eth_dev->data;
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
+ internal->vid = -1;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
data->dev_flags = RTE_ETH_DEV_INTR_LSC;
@@ -1097,16 +1273,17 @@ eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
goto error;
if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
- RTE_LOG(ERR, PMD, "Can't register callbacks\n");
+ VHOST_LOG(ERR, "Can't register callbacks\n");
goto error;
}
if (rte_vhost_driver_start(iface_name) < 0) {
- RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
+ VHOST_LOG(ERR, "Failed to start driver for %s\n",
iface_name);
goto error;
}
+ rte_eth_dev_probing_finish(eth_dev);
return data->port_id;
error:
@@ -1120,7 +1297,6 @@ error:
rte_eth_dev_release_port(eth_dev);
rte_free(internal);
rte_free(list);
- rte_free(data);
return -1;
}
@@ -1164,9 +1340,24 @@ rte_pmd_vhost_probe(struct rte_vdev_device *dev)
int client_mode = 0;
int dequeue_zero_copy = 0;
int iommu_support = 0;
+ struct rte_eth_dev *eth_dev;
+ const char *name = rte_vdev_device_name(dev);
+
+ VHOST_LOG(INFO, "Initializing pmd_vhost for %s\n", name);
- RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
- rte_vdev_device_name(dev));
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ strlen(rte_vdev_device_args(dev)) == 0) {
+ eth_dev = rte_eth_dev_attach_secondary(name);
+ if (!eth_dev) {
+ VHOST_LOG(ERR, "Failed to probe %s\n", name);
+ return -1;
+ }
+ /* TODO: request info from primary to set up Rx and Tx */
+ eth_dev->dev_ops = &ops;
+ eth_dev->device = &dev->device;
+ rte_eth_dev_probing_finish(eth_dev);
+ return 0;
+ }
kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
@@ -1239,7 +1430,7 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
struct rte_eth_dev *eth_dev = NULL;
name = rte_vdev_device_name(dev);
- RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
+ VHOST_LOG(INFO, "Un-Initializing pmd_vhost for %s\n", name);
/* find an ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
@@ -1251,8 +1442,6 @@ rte_pmd_vhost_remove(struct rte_vdev_device *dev)
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;
- rte_free(eth_dev->data);
-
rte_eth_dev_release_port(eth_dev);
return 0;
@@ -1268,3 +1457,10 @@ RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
"queues=<int>");
+
+RTE_INIT(vhost_init_log)
+{
+ vhost_logtype = rte_log_register("pmd.net.vhost");
+ if (vhost_logtype >= 0)
+ rte_log_set_level(vhost_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index 948f3c81..0e68b9f6 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -1,36 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 IGEL Co., Ltd.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of IGEL Co., Ltd. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 IGEL Co., Ltd.
+ * Copyright(c) 2016-2018 Intel Corporation
*/
-
#ifndef _RTE_ETH_VHOST_H_
#define _RTE_ETH_VHOST_H_
diff --git a/drivers/net/virtio/meson.build b/drivers/net/virtio/meson.build
new file mode 100644
index 00000000..e43ce6bb
--- /dev/null
+++ b/drivers/net/virtio/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources += files('virtio_ethdev.c',
+ 'virtio_pci.c',
+ 'virtio_rxtx.c',
+ 'virtio_rxtx_simple.c',
+ 'virtqueue.c')
+deps += ['kvargs', 'bus_pci']
+
+if arch_subdir == 'x86'
+ sources += files('virtio_rxtx_simple_sse.c')
+elif arch_subdir == 'arm' and host_machine.cpu_family().startswith('aarch64')
+ sources += files('virtio_rxtx_simple_neon.c')
+endif
+
+if host_machine.system() == 'linux'
+ dpdk_conf.set('RTE_VIRTIO_USER', 1)
+
+ sources += files('virtio_user_ethdev.c',
+ 'virtio_user/vhost_kernel.c',
+ 'virtio_user/vhost_kernel_tap.c',
+ 'virtio_user/vhost_user.c',
+ 'virtio_user/virtio_user_dev.c')
+ deps += ['bus_vdev']
+endif
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 884f74ad..614357da 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -14,7 +14,6 @@
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -29,6 +28,7 @@
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
+#include <rte_kvargs.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
@@ -68,7 +68,7 @@ static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
-static void virtio_mac_addr_set(struct rte_eth_dev *dev,
+static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_intr_enable(struct rte_eth_dev *dev);
@@ -392,8 +392,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY,
- 0, VIRTIO_PCI_VRING_ALIGN);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
@@ -418,8 +418,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, 0,
- RTE_CACHE_LINE_SIZE);
+ SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
hdr_mz = rte_memzone_lookup(vq_hdr_name);
@@ -774,46 +774,6 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.mac_addr_set = virtio_mac_addr_set,
};
-static inline int
-virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static inline int
-virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
@@ -1097,7 +1057,7 @@ virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
virtio_mac_table_set(hw, uc, mc);
}
-static void
+static int
virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct virtio_hw *hw = dev->data->dev_private;
@@ -1113,9 +1073,14 @@ virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
- virtio_send_command(hw->cvq, &ctrl, &len, 1);
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
- virtio_set_hwaddr(hw);
+ return virtio_send_command(hw->cvq, &ctrl, &len, 1);
+ }
+
+ if (!vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
+ return -ENOTSUP;
+
+ virtio_set_hwaddr(hw);
+ return 0;
}
static int
@@ -1273,9 +1238,16 @@ static void
virtio_notify_peers(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct virtnet_rx *rxvq = dev->data->rx_queues[0];
+ struct virtnet_rx *rxvq;
struct rte_mbuf *rarp_mbuf;
+ if (!dev->data->rx_queues)
+ return;
+
+ rxvq = dev->data->rx_queues[0];
+ if (!rxvq)
+ return;
+
rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
(struct ether_addr *)hw->mac_addr);
if (rarp_mbuf == NULL) {
@@ -1333,7 +1305,8 @@ virtio_interrupt_handler(void *param)
if (isr & VIRTIO_NET_S_ANNOUNCE) {
virtio_notify_peers(dev);
- virtio_ack_link_announce(dev);
+ if (hw->cvq)
+ virtio_ack_link_announce(dev);
}
}
@@ -1347,6 +1320,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using mergeable buffer Rx path on port %u",
@@ -1358,10 +1336,10 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_simple_tx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
} else {
PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
eth_dev->data->port_id);
@@ -1744,9 +1722,51 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+static int vdpa_check_handler(__rte_unused const char *key,
+ const char *value, __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+vdpa_mode_selected(struct rte_devargs *devargs)
+{
+ struct rte_kvargs *kvlist;
+ const char *key = "vdpa";
+ int ret = 0;
+
+ if (devargs == NULL)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto exit;
+
+ /* vdpa mode selected when there's a key-value pair: vdpa=1 */
+ if (rte_kvargs_process(kvlist, key,
+ vdpa_check_handler, NULL) < 0) {
+ goto exit;
+ }
+ ret = 1;
+
+exit:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
+ /* virtio pmd skips probe if device needs to work in vdpa mode */
+ if (vdpa_mode_selected(pci_dev->device.devargs))
+ return 1;
+
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
eth_virtio_dev_init);
}
@@ -1766,9 +1786,7 @@ static struct rte_pci_driver rte_virtio_pmd = {
.remove = eth_virtio_pci_remove,
};
-RTE_INIT(rte_virtio_pmd_init);
-static void
-rte_virtio_pmd_init(void)
+RTE_INIT(rte_virtio_pmd_init)
{
if (rte_eal_iopl_init() != 0) {
PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
@@ -1778,6 +1796,22 @@ rte_virtio_pmd_init(void)
rte_pci_register(&rte_virtio_pmd);
}
+static bool
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
/*
* Configure virtio device
* It returns 0 on success.
@@ -1786,7 +1820,10 @@ static int
virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t rx_offloads = rxmode->offloads;
+ uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
int ret;
@@ -1799,18 +1836,24 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- /* The name hw_ip_checksum is a bit confusing since it can be
- * set by the application to request L3 and/or L4 checksums. In
- * case of virtio, only L4 checksum is supported.
- */
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM))
+ req_features |= (1ULL << VIRTIO_NET_F_CSUM);
+
+ if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+
/* if request features changed, reinit the device */
if (req_features != hw->req_guest_features) {
ret = virtio_init_device(dev, req_features);
@@ -1818,14 +1861,15 @@ virtio_dev_configure(struct rte_eth_dev *dev)
return ret;
}
- if (rxmode->hw_ip_checksum &&
+ if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM)) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
+ if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
@@ -1837,15 +1881,19 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ hw->vlan_strip = 1;
- if (rxmode->hw_vlan_filter
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
}
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* Enable vector (0) for Link State Intrerrupt */
if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
@@ -1857,20 +1905,30 @@ virtio_dev_configure(struct rte_eth_dev *dev)
rte_spinlock_init(&hw->state_lock);
hw->use_simple_rx = 1;
- hw->use_simple_tx = 1;
+
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ hw->use_inorder_tx = 1;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_inorder_rx = 1;
+ hw->use_simple_rx = 0;
+ } else {
+ hw->use_inorder_rx = 0;
+ }
+ }
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_simple_rx = 0;
}
- if (rxmode->hw_ip_checksum)
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_VLAN_STRIP))
hw->use_simple_rx = 0;
return 0;
@@ -2028,21 +2086,21 @@ virtio_dev_stop(struct rte_eth_dev *dev)
hw->started = 0;
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_write_link_status(dev, &link);
+ rte_eth_linkstatus_set(dev, &link);
rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
{
- struct rte_eth_link link, old;
+ struct rte_eth_link link;
uint16_t status;
struct virtio_hw *hw = dev->data->dev_private;
+
memset(&link, 0, sizeof(link));
- virtio_dev_atomic_read_link_status(dev, &link);
- old = link;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
if (hw->started == 0) {
link.link_status = ETH_LINK_DOWN;
@@ -2063,9 +2121,8 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
} else {
link.link_status = ETH_LINK_UP;
}
- virtio_dev_atomic_write_link_status(dev, &link);
- return (old.link_status == link.link_status) ? -1 : 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -2073,9 +2130,10 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t offloads = rxmode->offloads;
if (mask & ETH_VLAN_FILTER_MASK) {
- if (rxmode->hw_vlan_filter &&
+ if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
@@ -2086,7 +2144,7 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_STRIP_MASK)
- hw->vlan_strip = rxmode->hw_vlan_strip;
+ hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
@@ -2099,7 +2157,6 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -2107,31 +2164,32 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
- };
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_CRC_STRIP;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM;
}
+ if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
if ((host_features & tso_mask) == tso_mask)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
- if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VLAN_INSERT;
+ if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
- if ((hw->guest_features & tso_mask) == tso_mask)
+ if ((host_features & tso_mask) == tso_mask)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
@@ -2150,9 +2208,7 @@ RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(virtio_init_log);
-static void
-virtio_init_log(void)
+RTE_INIT(virtio_init_log)
{
virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
if (virtio_logtype_init >= 0)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 4539d2e4..b726ad10 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -28,14 +28,12 @@
1u << VIRTIO_NET_F_CTRL_VQ | \
1u << VIRTIO_NET_F_CTRL_RX | \
1u << VIRTIO_NET_F_CTRL_VLAN | \
- 1u << VIRTIO_NET_F_CSUM | \
- 1u << VIRTIO_NET_F_HOST_TSO4 | \
- 1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
1u << VIRTIO_NET_F_MTU | \
1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
@@ -43,6 +41,7 @@
1u << VIRTIO_NET_F_GUEST_CSUM | \
1u << VIRTIO_NET_F_GUEST_TSO4 | \
1u << VIRTIO_NET_F_GUEST_TSO6)
+
/*
* CQ function prototype
*/
@@ -75,9 +74,15 @@ uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index a28ba833..58fdd3d4 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -6,6 +6,7 @@
#define _VIRTIO_PCI_H_
#include <stdint.h>
+#include <stdbool.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
@@ -121,6 +122,12 @@ struct virtnet_ctl;
#define VIRTIO_TRANSPORT_F_START 28
#define VIRTIO_TRANSPORT_F_END 34
+/*
+ * Inorder feature indicates that all buffers are used by the device
+ * in the same order in which they have been made available.
+ */
+#define VIRTIO_F_IN_ORDER 35
+
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
/* The Host publishes the avail index for which it expects a kick
@@ -232,7 +239,10 @@ struct virtio_hw {
uint8_t use_msix;
uint8_t modern;
uint8_t use_simple_rx;
- uint8_t use_simple_tx;
+ uint8_t use_inorder_rx;
+ uint8_t use_inorder_tx;
+ bool has_tx_offload;
+ bool has_rx_offload;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 8dbf2a30..eb891433 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,10 +38,6 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
-
-#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
int
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
@@ -52,6 +48,13 @@ virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
}
void
+vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
+{
+ vq->vq_free_cnt += num;
+ vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
+}
+
+void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp, *dp_tail;
@@ -119,6 +122,44 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
return i;
}
+static uint16_t
+virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_mbuf *cookie;
+ uint16_t used_idx = 0;
+ uint16_t i;
+
+ if (unlikely(num == 0))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ /* Desc idx same as used idx */
+ uep = &vq->vq_ring.used->ring[used_idx];
+ len[i] = uep->len;
+ cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
+
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+ vq->vq_used_cons_idx++;
+ vq->vq_descx[used_idx].cookie = NULL;
+ }
+
+ vq_ring_free_inorder(vq, used_idx, i);
+ return i;
+}
+
#ifndef DEFAULT_TX_FREE_THRESH
#define DEFAULT_TX_FREE_THRESH 32
#endif
@@ -147,6 +188,83 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
}
}
+/* Cleanup from completed inorder transmits. */
+static void
+virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
+{
+ uint16_t i, used_idx, desc_idx = 0, last_idx;
+ int16_t free_cnt = 0;
+ struct vq_desc_extra *dxp = NULL;
+
+ if (unlikely(num == 0))
+ return;
+
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+
+ used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
+
+ last_idx = desc_idx + dxp->ndescs - 1;
+ free_cnt = last_idx - vq->vq_desc_tail_idx;
+ if (free_cnt <= 0)
+ free_cnt += vq->vq_nentries;
+
+ vq_ring_free_inorder(vq, last_idx, free_cnt);
+}
+
+static inline int
+virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtio_hw *hw = vq->hw;
+ struct vring_desc *start_dp;
+ uint16_t head_idx, idx, i = 0;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = head_idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr =
+ VIRTIO_MBUF_ADDR(cookies[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len =
+ cookies[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
+ start_dp[idx].flags = VRING_DESC_F_WRITE;
+
+ vq_update_avail_ring(vq, idx);
+ head_idx++;
+ i++;
+ }
+
+ vq->vq_desc_head_idx += num;
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
static inline int
virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
@@ -229,13 +347,6 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
}
}
-static inline int
-tx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
-}
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
@@ -244,8 +355,111 @@ tx_offload_enabled(struct virtio_hw *hw)
} while (0)
static inline void
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
+ struct rte_mbuf *cookie,
+ bool offload)
+{
+ if (offload) {
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct udp_hdr,
+ dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
+
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ virtio_tso_fix_cksum(cookie);
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len =
+ cookie->l2_len +
+ cookie->l3_len +
+ cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ }
+}
+
+static inline void
+virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
+ struct rte_mbuf **cookies,
+ uint16_t num)
+{
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_desc *start_dp;
+ struct virtio_net_hdr *hdr;
+ uint16_t idx;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ uint16_t i = 0;
+
+ idx = vq->vq_desc_head_idx;
+ start_dp = vq->vq_ring.desc;
+
+ while (i < num) {
+ idx = idx & (vq->vq_nentries - 1);
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookies[i];
+ dxp->ndescs = 1;
+
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookies[i], head_size);
+ cookies[i]->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+
+ virtqueue_xmit_offload(hdr, cookies[i],
+ vq->hw->has_tx_offload);
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
+ start_dp[idx].len = cookies[i]->data_len;
+ start_dp[idx].flags = 0;
+
+ vq_update_avail_ring(vq, idx);
+
+ idx++;
+ i++;
+ };
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
+}
+
+static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- uint16_t needed, int use_indirect, int can_push)
+ uint16_t needed, int use_indirect, int can_push,
+ int in_order)
{
struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
@@ -255,9 +469,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- int offload;
- offload = tx_offload_enabled(vq->hw);
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
dxp = &vq->vq_descx[idx];
@@ -274,8 +486,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* which is wrong. Below subtract restores correct pkt size.
*/
cookie->pkt_len -= head_size;
+
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0) {
+ if (!vq->hw->has_tx_offload) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
@@ -312,49 +525,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
idx = start_dp[idx].next;
}
- /* Checksum Offload / TSO */
- if (offload) {
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
- switch (cookie->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct udp_hdr,
- dgram_cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- default:
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- break;
- }
-
- /* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- virtio_tso_fix_cksum(cookie);
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
- VIRTIO_NET_HDR_GSO_TCPV6 :
- VIRTIO_NET_HDR_GSO_TCPV4;
- hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len =
- cookie->l2_len +
- cookie->l3_len +
- cookie->l4_len;
- } else {
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
- }
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
@@ -366,11 +537,15 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
if (use_indirect)
idx = vq->vq_ring.desc[head_idx].next;
- vq->vq_desc_head_idx = idx;
- if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
- vq->vq_desc_tail_idx = idx;
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = idx;
vq_update_avail_ring(vq, head_idx);
+
+ if (!in_order) {
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = idx;
+ }
}
void
@@ -389,7 +564,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
@@ -410,6 +585,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
rte_exit(EXIT_FAILURE,
"Cannot allocate mbufs for rx virtqueue");
}
+
dev->data->rx_queues[queue_idx] = rxvq;
return 0;
@@ -424,7 +600,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
struct virtnet_rx *rxvq = &vq->rxq;
struct rte_mbuf *m;
uint16_t desc_idx;
- int error, nbufs;
+ int error, nbufs, i;
PMD_INIT_FUNC_TRACE();
@@ -454,6 +630,25 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
}
+ } else if (hw->use_inorder_rx) {
+ if ((!virtqueue_full(vq))) {
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
+ free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq,
+ pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
+ }
+
+ nbufs += free_cnt;
+ vq_update_avail_idx(vq);
+ }
} else {
while (!virtqueue_full(vq)) {
m = rte_mbuf_raw_alloc(rxvq->mpool);
@@ -501,10 +696,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- /* cannot use simple rxtx funcs with multisegs or offloads */
- if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS)
- hw->use_simple_tx = 0;
-
if (nb_desc == 0 || nb_desc > vq->vq_nentries)
nb_desc = vq->vq_nentries;
vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -539,31 +730,11 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
struct virtio_hw *hw = dev->data->dev_private;
struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
- uint16_t mid_idx = vq->vq_nentries >> 1;
- struct virtnet_tx *txvq = &vq->txq;
- uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- if (hw->use_simple_tx) {
- for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] =
- desc_idx + mid_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].next =
- desc_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].addr =
- txvq->virtio_net_hdr_mem +
- offsetof(struct virtio_tx_region, tx_hdr);
- vq->vq_ring.desc[desc_idx + mid_idx].len =
- vq->hw->vtnet_hdr_size;
- vq->vq_ring.desc[desc_idx + mid_idx].flags =
- VRING_DESC_F_NEXT;
- vq->vq_ring.desc[desc_idx].flags = 0;
- }
- for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
- desc_idx++)
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- }
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
VIRTQUEUE_DUMP(vq);
@@ -579,6 +750,19 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* successful since it was just dequeued.
*/
error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (unlikely(error)) {
+ RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+ rte_pktmbuf_free(m);
+ }
+}
+
+static void
+virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
+{
+ int error;
+
+ error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
if (unlikely(error)) {
RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
rte_pktmbuf_free(m);
@@ -617,6 +801,15 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
}
}
+static inline void
+virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
+{
+ VIRTIO_DUMP_PACKET(m, m->data_len);
+
+ rxvq->stats.bytes += m->pkt_len;
+ virtio_update_packet_stats(&rxvq->stats, m);
+}
+
/* Optionally fill offload information in structure */
static int
virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
@@ -689,14 +882,6 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
return 0;
}
-static inline int
-rx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
-}
-
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
@@ -712,7 +897,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int error;
uint32_t i, nb_enqueued;
uint32_t hdr_size;
- int offload;
struct virtio_net_hdr *hdr;
nb_rx = 0;
@@ -734,7 +918,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
@@ -763,24 +946,20 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (hw->vlan_strip)
rte_vlan_strip(rxm);
- if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
}
- VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
+ virtio_rx_stats_updated(rxvq, rxm);
rx_pkts[nb_rx++] = rxm;
-
- rxvq->stats.bytes += rxm->pkt_len;
- virtio_update_packet_stats(&rxvq->stats, rxm);
}
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -810,6 +989,193 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
uint16_t
+virtio_recv_mergeable_pkts_inorder(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev;
+ uint16_t nb_used, num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t nb_enqueued;
+ uint32_t seg_num;
+ uint32_t seg_res;
+ uint32_t hdr_size;
+ int32_t i;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+ nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
+
+ virtio_rmb();
+
+ PMD_RX_LOG(DEBUG, "used:%d", nb_used);
+
+ nb_enqueued = 0;
+ seg_num = 1;
+ seg_res = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)
+ ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
+ - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (vq->hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf_inorder(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+
+ prev = rcv_pkts[nb_rx];
+ if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
+ num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
+ rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ };
+ seg_res -= rcv_cnt;
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ virtio_discard_rxbuf_inorder(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
+ free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+uint16_t
virtio_recv_mergeable_pkts(void *rx_queue,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
@@ -828,7 +1194,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
- int offload;
nb_rx = 0;
if (unlikely(hw->started == 0))
@@ -846,7 +1211,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
extra_idx = 0;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
@@ -891,7 +1255,8 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rx_pkts[nb_rx] = rxm;
prev = rxm;
- if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
@@ -953,7 +1318,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxvq->stats.packets += nb_rx;
/* Allocate new mbuf for the used descriptor */
- error = ENOSPC;
while (likely(!virtqueue_full(vq))) {
new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
if (unlikely(new_mbuf == NULL)) {
@@ -1056,7 +1420,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* Enqueue Packet buffers */
- virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
+ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
+ can_push, 0);
txvq->stats.bytes += txm->pkt_len;
virtio_update_packet_stats(&txvq->stats, txm);
@@ -1075,3 +1440,116 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_tx;
}
+
+uint16_t
+virtio_xmit_pkts_inorder(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
+ struct rte_mbuf *inorder_pkts[nb_pkts];
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ VIRTQUEUE_DUMP(vq);
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ virtio_rmb();
+ if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ if (unlikely(!vq->vq_free_cnt))
+ virtio_xmit_cleanup_inorder(vq, nb_used);
+
+ nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
+ inorder_pkts[nb_inorder_pkts] = txm;
+ nb_inorder_pkts++;
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ continue;
+ }
+
+ if (nb_inorder_pkts) {
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+ nb_inorder_pkts = 0;
+ }
+
+ slots = txm->nb_segs + 1;
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(vq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup_inorder(vq, need);
+
+ need = slots - vq->vq_free_cnt;
+
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ /* Transmit all inorder packets */
+ if (nb_inorder_pkts)
+ virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
+ nb_inorder_pkts);
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(vq);
+
+ if (unlikely(virtqueue_kick_prepare(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ VIRTQUEUE_DUMP(vq);
+
+ return nb_tx;
+}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 51520758..31e565b4 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -27,73 +27,6 @@
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-uint16_t
-virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- struct virtnet_tx *txvq = tx_queue;
- struct virtqueue *vq = txvq->vq;
- struct virtio_hw *hw = vq->hw;
- uint16_t nb_used;
- uint16_t desc_idx;
- struct vring_desc *start_dp;
- uint16_t nb_tail, nb_commit;
- int i;
- uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
- uint16_t nb_tx = 0;
-
- if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
- return nb_tx;
-
- nb_used = VIRTQUEUE_NUSED(vq);
- rte_compiler_barrier();
-
- if (nb_used >= VIRTIO_TX_FREE_THRESH)
- virtio_xmit_cleanup_simple(vq);
-
- nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
- desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
- start_dp = vq->vq_ring.desc;
- nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
-
- if (nb_commit >= nb_tail) {
- for (i = 0; i < nb_tail; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_tail; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
- nb_commit -= nb_tail;
- desc_idx = 0;
- }
- for (i = 0; i < nb_commit; i++)
- vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
- for (i = 0; i < nb_commit; i++) {
- start_dp[desc_idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
- start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
- tx_pkts++;
- desc_idx++;
- }
-
- rte_compiler_barrier();
-
- vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
- vq->vq_avail_idx += nb_pkts;
- vq->vq_ring.avail->idx = vq->vq_avail_idx;
- txvq->stats.packets += nb_pkts;
-
- if (likely(nb_pkts)) {
- if (unlikely(virtqueue_kick_prepare(vq)))
- virtqueue_notify(vq);
- }
-
- return nb_pkts;
-}
-
int __attribute__((cold))
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index 303904d6..dc97e4cc 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -55,53 +55,4 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
vq_update_avail_idx(vq);
}
-#define VIRTIO_TX_FREE_THRESH 32
-#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
-#define VIRTIO_TX_FREE_NR 32
-/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
-static inline void
-virtio_xmit_cleanup_simple(struct virtqueue *vq)
-{
- uint16_t i, desc_idx;
- uint32_t nb_free = 0;
- struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
-
- desc_idx = (uint16_t)(vq->vq_used_cons_idx &
- ((vq->vq_nentries >> 1) - 1));
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void **)free,
- RTE_MIN(RTE_DIM(free),
- nb_free));
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free,
- RTE_MIN(RTE_DIM(free), nb_free));
- } else {
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = rte_pktmbuf_prefree_seg(m);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
- vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
-}
-
#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 8d0a1ab2..b2444096 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -70,6 +70,32 @@ static uint64_t vhost_req_user_to_kernel[] = {
[VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
};
+struct walk_arg {
+ struct vhost_memory_kernel *vm;
+ uint32_t region_nr;
+};
+static int
+add_memory_region(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len, void *arg)
+{
+ struct walk_arg *wa = arg;
+ struct vhost_memory_region *mr;
+ void *start_addr;
+
+ if (wa->region_nr >= max_regions)
+ return -1;
+
+ mr = &wa->vm->regions[wa->region_nr++];
+ start_addr = ms->addr;
+
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
+ mr->memory_size = len;
+ mr->mmap_offset = 0;
+
+ return 0;
+}
+
/* By default, vhost kernel module allows 64 regions, but DPDK allows
* 256 segments. As a relief, below function merges those virtually
* adjacent memsegs into one region.
@@ -77,63 +103,24 @@ static uint64_t vhost_req_user_to_kernel[] = {
static struct vhost_memory_kernel *
prepare_vhost_memory_kernel(void)
{
- uint32_t i, j, k = 0;
- struct rte_memseg *seg;
- struct vhost_memory_region *mr;
struct vhost_memory_kernel *vm;
+ struct walk_arg wa;
vm = malloc(sizeof(struct vhost_memory_kernel) +
- max_regions *
- sizeof(struct vhost_memory_region));
+ max_regions *
+ sizeof(struct vhost_memory_region));
if (!vm)
return NULL;
- for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
- seg = &rte_eal_get_configuration()->mem_config->memseg[i];
- if (!seg->addr)
- break;
-
- int new_region = 1;
-
- for (j = 0; j < k; ++j) {
- mr = &vm->regions[j];
-
- if (mr->userspace_addr + mr->memory_size ==
- (uint64_t)(uintptr_t)seg->addr) {
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
-
- if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
- mr->userspace_addr) {
- mr->guest_phys_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr =
- (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size += seg->len;
- new_region = 0;
- break;
- }
- }
-
- if (new_region == 0)
- continue;
-
- mr = &vm->regions[k++];
- /* use vaddr here! */
- mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
- mr->memory_size = seg->len;
- mr->mmap_offset = 0;
+ wa.region_nr = 0;
+ wa.vm = vm;
- if (k >= max_regions) {
- free(vm);
- return NULL;
- }
+ if (rte_memseg_contig_walk(add_memory_region, &wa) < 0) {
+ free(vm);
+ return NULL;
}
- vm->nregions = k;
+ vm->nregions = wa.region_nr;
vm->padding = 0;
return vm;
}
@@ -351,7 +338,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
else
hdr_size = sizeof(struct virtio_net_hdr);
- tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+ (char *)dev->mac_addr);
if (tapfd < 0) {
PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
return -1;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 1a47a348..9ea7ade7 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -7,15 +7,19 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <net/if.h>
+#include <net/if_arp.h>
#include <errno.h>
#include <string.h>
#include <limits.h>
+#include <rte_ether.h>
+
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
int
-vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac)
{
unsigned int tap_features;
int sndbuf = INT_MAX;
@@ -94,6 +98,14 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
strerror(errno));
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
+ goto error;
+ }
+
if (!(*p_ifname))
*p_ifname = strdup(ifr.ifr_name);
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index 7d52e6b7..01a026f5 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -35,4 +35,5 @@
/* Constants */
#define PATH_NET_TUN "/dev/net/tun"
-int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 91c6449b..ef6e43df 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -138,12 +138,13 @@ struct hugepage_file_info {
static int
get_hugepage_file_info(struct hugepage_file_info huges[], int max)
{
- int idx;
+ int idx, k, exist;
FILE *f;
char buf[BUFSIZ], *tmp, *tail;
char *str_underline, *str_start;
int huge_index;
uint64_t v_start, v_end;
+ struct stat stats;
f = fopen("/proc/self/maps", "r");
if (!f) {
@@ -183,16 +184,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max)
if (sscanf(str_start, "map_%d", &huge_index) != 1)
continue;
+ /* skip duplicated file which is mapped to different regions */
+ for (k = 0, exist = -1; k < idx; ++k) {
+ if (!strcmp(huges[k].path, tmp)) {
+ exist = k;
+ break;
+ }
+ }
+ if (exist >= 0)
+ continue;
+
if (idx >= max) {
PMD_DRV_LOG(ERR, "Exceed maximum of %d", max);
goto error;
}
+
huges[idx].addr = v_start;
- huges[idx].size = v_end - v_start;
+ huges[idx].size = v_end - v_start; /* To be corrected later */
snprintf(huges[idx].path, PATH_MAX, "%s", tmp);
idx++;
}
+ /* correct the size for files who have many regions */
+ for (k = 0; k < idx; ++k) {
+ if (stat(huges[k].path, &stats) < 0) {
+ PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n",
+ huges[k].path, strerror(errno));
+ continue;
+ }
+ huges[k].size = stats.st_size;
+ PMD_DRV_LOG(INFO, "file %s, size %zx\n",
+ huges[k].path, huges[k].size);
+ }
+
fclose(f);
return idx;
@@ -263,6 +287,9 @@ vhost_user_sock(struct virtio_user_dev *dev,
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+ if (dev->is_server && vhostfd < 0)
+ return -1;
+
msg.request = req;
msg.flags = VHOST_USER_VERSION;
msg.size = 0;
@@ -378,6 +405,30 @@ vhost_user_sock(struct virtio_user_dev *dev,
return 0;
}
+#define MAX_VIRTIO_USER_BACKLOG 1
+static int
+virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
+{
+ int ret;
+ int flag;
+ int fd = dev->listenfd;
+
+ ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
+ dev->path, strerror(errno));
+ return -1;
+ }
+ ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
+ if (ret < 0)
+ return -1;
+
+ flag = fcntl(fd, F_GETFL);
+ fcntl(fd, F_SETFL, flag | O_NONBLOCK);
+
+ return 0;
+}
+
/**
* Set up environment to talk with a vhost user backend.
*
@@ -405,13 +456,24 @@ vhost_user_setup(struct virtio_user_dev *dev)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
- if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
- PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
- close(fd);
- return -1;
+
+ if (dev->is_server) {
+ dev->listenfd = fd;
+ if (virtio_user_start_server(dev, &un) < 0) {
+ PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = -1;
+ } else {
+ if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
+ PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+ dev->vhostfd = fd;
}
- dev->vhostfd = fd;
return 0;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index f90fee9e..7df600b0 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -17,6 +17,8 @@
#include "virtio_user_dev.h"
#include "../virtio_ethdev.h"
+#define VIRTIO_USER_MEM_EVENT_CLB_NAME "virtio_user_mem_event_clb"
+
static int
virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
@@ -94,11 +96,27 @@ virtio_user_queue_setup(struct virtio_user_dev *dev,
}
int
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+int
virtio_user_start_device(struct virtio_user_dev *dev)
{
uint64_t features;
int ret;
+ pthread_mutex_lock(&dev->mutex);
+
+ if (is_vhost_user_by_type(dev->path) && dev->vhostfd < 0)
+ goto error;
+
/* Do not check return as already done in init, or reset in stop */
dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL);
@@ -132,8 +150,12 @@ virtio_user_start_device(struct virtio_user_dev *dev)
*/
dev->ops->enable_qp(dev, 0, 1);
+ dev->started = true;
+ pthread_mutex_unlock(&dev->mutex);
+
return 0;
error:
+ pthread_mutex_unlock(&dev->mutex);
/* TODO: free resource here or caller to check */
return -1;
}
@@ -142,13 +164,17 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
+ pthread_mutex_lock(&dev->mutex);
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
PMD_DRV_LOG(INFO, "Failed to reset the device\n");
+ pthread_mutex_unlock(&dev->mutex);
return -1;
}
+ dev->started = false;
+ pthread_mutex_unlock(&dev->mutex);
return 0;
}
@@ -174,17 +200,6 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
}
}
-int
-is_vhost_user_by_type(const char *path)
-{
- struct stat sb;
-
- if (stat(path, &sb) == -1)
- return 0;
-
- return S_ISSOCK(sb.st_mode);
-}
-
static int
virtio_user_dev_init_notify(struct virtio_user_dev *dev)
{
@@ -254,10 +269,41 @@ virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
eth_dev->intr_handle->fd = -1;
if (dev->vhostfd >= 0)
eth_dev->intr_handle->fd = dev->vhostfd;
+ else if (dev->is_server)
+ eth_dev->intr_handle->fd = dev->listenfd;
return 0;
}
+static void
+virtio_user_mem_event_cb(enum rte_mem_event type __rte_unused,
+ const void *addr __rte_unused,
+ size_t len __rte_unused,
+ void *arg)
+{
+ struct virtio_user_dev *dev = arg;
+ uint16_t i;
+
+ pthread_mutex_lock(&dev->mutex);
+
+ if (dev->started == false)
+ goto exit;
+
+ /* Step 1: pause the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 0);
+
+ /* Step 2: update memory regions */
+ dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
+
+ /* Step 3: resume the active queues */
+ for (i = 0; i < dev->queue_pairs; i++)
+ dev->ops->enable_qp(dev, i, 1);
+
+exit:
+ pthread_mutex_unlock(&dev->mutex);
+}
+
static int
virtio_user_dev_setup(struct virtio_user_dev *dev)
{
@@ -267,21 +313,32 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
dev->vhostfds = NULL;
dev->tapfds = NULL;
- if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
- } else {
- dev->ops = &ops_kernel;
-
- dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
- dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
- if (!dev->vhostfds || !dev->tapfds) {
- PMD_INIT_LOG(ERR, "Failed to malloc");
+ if (dev->is_server) {
+ if (access(dev->path, F_OK) == 0 &&
+ !is_vhost_user_by_type(dev->path)) {
+ PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
-
- for (q = 0; q < dev->max_queue_pairs; ++q) {
- dev->vhostfds[q] = -1;
- dev->tapfds[q] = -1;
+ dev->ops = &ops_user;
+ } else {
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs *
+ sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
}
}
@@ -314,17 +371,22 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_CSUM | \
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname)
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order)
{
+ pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
+ dev->started = 0;
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
+ dev->unsupported_features = 0;
parse_mac(dev, mac);
if (*ifname) {
@@ -337,18 +399,45 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
return -1;
}
- if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
- PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
- return -1;
+ if (!dev->is_server) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER,
+ NULL) < 0) {
+ PMD_INIT_LOG(ERR, "set_owner fails: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ } else {
+ /* We just pretend vhost-user can support all these features.
+ * Note that this could be problematic that if some feature is
+ * negotiated but not supported by the vhost-user which comes
+ * later.
+ */
+ dev->device_features = VIRTIO_USER_SUPPORTED_FEATURES;
}
- if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
- &dev->device_features) < 0) {
- PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
- return -1;
+ if (!mrg_rxbuf) {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MRG_RXBUF);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MRG_RXBUF);
+ }
+
+ if (!in_order) {
+ dev->device_features &= ~(1ull << VIRTIO_F_IN_ORDER);
+ dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
}
- if (dev->mac_specified)
+
+ if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
@@ -363,6 +452,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->unsupported_features |= (1ull << VIRTIO_NET_F_MQ);
+ dev->unsupported_features |=
+ (1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
/* The backend will not report this feature, we add it explicitly */
@@ -370,6 +467,16 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
+ dev->unsupported_features |= ~VIRTIO_USER_SUPPORTED_FEATURES;
+
+ if (rte_mem_event_callback_register(VIRTIO_USER_MEM_EVENT_CLB_NAME,
+ virtio_user_mem_event_cb, dev)) {
+ if (rte_errno != ENOTSUP) {
+ PMD_INIT_LOG(ERR, "Failed to register mem event"
+ " callback\n");
+ return -1;
+ }
+ }
return 0;
}
@@ -381,6 +488,8 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
virtio_user_stop_device(dev);
+ rte_mem_event_callback_unregister(VIRTIO_USER_MEM_EVENT_CLB_NAME, dev);
+
for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
close(dev->callfds[i]);
close(dev->kickfds[i]);
@@ -388,6 +497,11 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
close(dev->vhostfd);
+ if (dev->is_server && dev->listenfd >= 0) {
+ close(dev->listenfd);
+ dev->listenfd = -1;
+ }
+
if (dev->vhostfds) {
for (i = 0; i < dev->max_queue_pairs; ++i)
close(dev->vhostfds[i]);
@@ -396,9 +510,12 @@ virtio_user_dev_uninit(struct virtio_user_dev *dev)
}
free(dev->ifname);
+
+ if (dev->is_server)
+ unlink(dev->path);
}
-static uint8_t
+uint8_t
virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
{
uint16_t i;
@@ -410,11 +527,17 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
return -1;
}
- for (i = 0; i < q_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 1);
- for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= dev->ops->enable_qp(dev, i, 0);
-
+ /* Server mode can't enable queue pairs if vhostfd is invalid,
+ * always return 0 in this case.
+ */
+ if (dev->vhostfd >= 0) {
+ for (i = 0; i < q_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 1);
+ for (i = q_pairs; i < dev->max_queue_pairs; ++i)
+ ret |= dev->ops->enable_qp(dev, i, 0);
+ } else if (!dev->is_server) {
+ ret = ~0;
+ }
dev->queue_pairs = q_pairs;
return ret;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 64467b4f..d6e0e137 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -6,6 +6,7 @@
#define _VIRTIO_USER_DEV_H
#include <limits.h>
+#include <stdbool.h>
#include "../virtio_pci.h"
#include "../virtio_ring.h"
#include "vhost.h"
@@ -13,6 +14,8 @@
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
+ int listenfd; /* listening fd */
+ bool is_server; /* server or client mode */
/* for vhost_kernel backend */
char *ifname;
@@ -30,19 +33,24 @@ struct virtio_user_dev {
* and will be sync with device
*/
uint64_t device_features; /* supported features by device */
+ uint64_t unsupported_features; /* unsupported features mask */
uint8_t status;
- uint8_t port_id;
+ uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
struct virtio_user_backend_ops *ops;
+ pthread_mutex_t mutex;
+ bool started;
};
int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac, char **ifname);
+ int cq, int queue_size, const char *mac, char **ifname,
+ int mrg_rxbuf, int in_order);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 26364900..525d16ca 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -24,15 +24,92 @@
#define virtio_user_get_dev(hw) \
((struct virtio_user_dev *)(hw)->virtio_user_dev)
+static int
+virtio_user_server_reconnect(struct virtio_user_dev *dev)
+{
+ int ret;
+ int flag;
+ int connectfd;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+
+ connectfd = accept(dev->listenfd, NULL, NULL);
+ if (connectfd < 0)
+ return -1;
+
+ dev->vhostfd = connectfd;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
+ &dev->device_features) < 0) {
+ PMD_INIT_LOG(ERR, "get_features failed: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ /* umask vhost-user unsupported features */
+ dev->device_features &= ~(dev->unsupported_features);
+
+ dev->features &= dev->device_features;
+
+ flag = fcntl(connectfd, F_GETFD);
+ fcntl(connectfd, F_SETFL, flag | O_NONBLOCK);
+
+ ret = virtio_user_start_device(dev);
+ if (ret < 0)
+ return -1;
+
+ if (dev->queue_pairs > 1) {
+ ret = virtio_user_handle_mq(dev, dev->queue_pairs);
+ if (ret != 0) {
+ PMD_INIT_LOG(ERR, "Fails to enable multi-queue pairs!");
+ return -1;
+ }
+ }
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return -1;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler,
+ eth_dev);
+ eth_dev->intr_handle->fd = connectfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+ }
+ PMD_INIT_LOG(NOTICE, "server mode virtio-user reconnection succeeds!");
+ return 0;
+}
+
static void
virtio_user_delayed_handler(void *param)
{
struct virtio_hw *hw = (struct virtio_hw *)param;
- struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[hw->port_id];
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- rte_intr_callback_unregister(dev->intr_handle,
- virtio_interrupt_handler,
- dev);
+ if (rte_intr_disable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt disable failed");
+ return;
+ }
+ rte_intr_callback_unregister(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (dev->is_server) {
+ if (dev->vhostfd >= 0) {
+ close(dev->vhostfd);
+ dev->vhostfd = -1;
+ }
+ eth_dev->intr_handle->fd = dev->listenfd;
+ rte_intr_callback_register(eth_dev->intr_handle,
+ virtio_interrupt_handler, eth_dev);
+ if (rte_intr_enable(eth_dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return;
+ }
+ }
}
static void
@@ -67,12 +144,10 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
dev->status &= (~VIRTIO_NET_S_LINK_UP);
PMD_DRV_LOG(ERR, "virtio-user port %u is down",
hw->port_id);
- /* Only client mode is available now. Once the
- * connection is broken, it can never be up
- * again. Besides, this function could be called
- * in the process of interrupt handling,
- * callback cannot be unregistered here, set an
- * alarm to do it.
+
+ /* This function could be called in the process
+ * of interrupt handling, callback cannot be
+ * unregistered here, set an alarm to do it.
*/
rte_eal_alarm_set(1,
virtio_user_delayed_handler,
@@ -85,7 +160,12 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
PMD_DRV_LOG(ERR, "error clearing O_NONBLOCK flag");
return;
}
+ } else if (dev->is_server) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ if (virtio_user_server_reconnect(dev) >= 0)
+ dev->status |= VIRTIO_NET_S_LINK_UP;
}
+
*(uint16_t *)dst = dev->status;
}
@@ -278,12 +358,19 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_QUEUE_SIZE,
#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
VIRTIO_USER_ARG_INTERFACE_NAME,
+#define VIRTIO_USER_ARG_SERVER_MODE "server"
+ VIRTIO_USER_ARG_SERVER_MODE,
+#define VIRTIO_USER_ARG_MRG_RXBUF "mrg_rxbuf"
+ VIRTIO_USER_ARG_MRG_RXBUF,
+#define VIRTIO_USER_ARG_IN_ORDER "in_order"
+ VIRTIO_USER_ARG_IN_ORDER,
NULL
};
#define VIRTIO_USER_DEF_CQ_EN 0
#define VIRTIO_USER_DEF_Q_NUM 1
#define VIRTIO_USER_DEF_Q_SZ 256
+#define VIRTIO_USER_DEF_SERVER_MODE 0
static int
get_string_arg(const char *key __rte_unused,
@@ -349,7 +436,8 @@ virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
hw->use_msix = 1;
hw->modern = 0;
hw->use_simple_rx = 0;
- hw->use_simple_tx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
return eth_dev;
}
@@ -378,6 +466,9 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t queues = VIRTIO_USER_DEF_Q_NUM;
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
+ uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
+ uint64_t mrg_rxbuf = 1;
+ uint64_t in_order = 1;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
@@ -445,6 +536,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_SERVER_MODE) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_SERVER_MODE,
+ &get_integer_arg, &server_mode) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_SERVER_MODE);
+ goto end;
+ }
+ }
+
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM,
&get_integer_arg, &cq) < 0) {
@@ -468,7 +568,27 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
goto end;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MRG_RXBUF) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MRG_RXBUF,
+ &get_integer_arg, &mrg_rxbuf) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_MRG_RXBUF);
+ goto end;
+ }
+ }
+
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_IN_ORDER) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_IN_ORDER,
+ &get_integer_arg, &in_order) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_IN_ORDER);
+ goto end;
+ }
+ }
+
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ struct virtio_user_dev *vu_dev;
+
eth_dev = virtio_user_eth_dev_alloc(dev);
if (!eth_dev) {
PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
@@ -476,12 +596,19 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
hw = eth_dev->data->dev_private;
+ vu_dev = virtio_user_get_dev(hw);
+ if (server_mode == 1)
+ vu_dev->is_server = true;
+ else
+ vu_dev->is_server = false;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr, &ifname) < 0) {
+ queue_size, mac_addr, &ifname, mrg_rxbuf,
+ in_order) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
} else {
eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
if (!eth_dev)
@@ -494,6 +621,8 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
virtio_user_eth_dev_free(eth_dev);
goto end;
}
+
+ rte_eth_dev_probing_finish(eth_dev);
ret = 0;
end:
@@ -552,4 +681,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"cq=<int> "
"queue_size=<int> "
"queues=<int> "
- "iface=<string>");
+ "iface=<string> "
+ "server=<0|1> "
+ "mrg_rxbuf=<0|1> "
+ "in_order=<0|1>");
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index a7d0a9cb..56a77cc7 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -74,6 +74,14 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
+ } else if (hw->use_inorder_rx) {
+ desc_idx = (uint16_t)uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq_ring_free_inorder(vq, desc_idx, 1);
} else {
desc_idx = (uint16_t)uep->id;
dxp = &vq->vq_descx[desc_idx];
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 14364f35..26518ed9 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -306,6 +306,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
+ uint16_t num);
static inline void
vq_update_avail_idx(struct virtqueue *vq)
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 6bfbf019..f1141da6 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -8,6 +8,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_vmxnet3_uio.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
@@ -15,7 +16,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
#
# CFLAGS for icc
#
-CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259
+CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869
+CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
#
diff --git a/drivers/net/vmxnet3/base/upt1_defs.h b/drivers/net/vmxnet3/base/upt1_defs.h
index cf9141b2..5fd7a397 100644
--- a/drivers/net/vmxnet3/base/upt1_defs.h
+++ b/drivers/net/vmxnet3/base/upt1_defs.h
@@ -1,9 +1,6 @@
-/*********************************************************
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- *********************************************************/
+ */
/* upt1_defs.h
*
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index a455e270..bbec708c 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -1,9 +1,6 @@
-/*********************************************************
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (C) 2007 VMware, Inc. All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- *
- *********************************************************/
+ */
/*
* vmxnet3_defs.h --
@@ -327,7 +324,32 @@ struct Vmxnet3_RxCompDescExt {
uint8 segCnt; /* Number of aggregated packets */
uint8 dupAckCnt; /* Number of duplicate Acks */
__le16 tsDelta; /* TCP timestamp difference */
- __le32 dword2[2];
+ __le32 dword2;
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint32 gen : 1; /* generation bit */
+ uint32 type : 7; /* completion type */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 mss : 16;
+#else
+ uint32 mss : 16;
+ uint32 tuc : 1; /* TCP/UDP Checksum Correct */
+ uint32 udp : 1; /* UDP packet */
+ uint32 tcp : 1; /* TCP packet */
+ uint32 ipc : 1; /* IP Checksum Correct */
+ uint32 v6 : 1; /* IPv6 */
+ uint32 v4 : 1; /* IPv4 */
+ uint32 frg : 1; /* IP Fragment */
+ uint32 fcs : 1; /* Frame CRC correct */
+ uint32 type : 7; /* completion type */
+ uint32 gen : 1; /* generation bit */
+#endif /* __BIG_ENDIAN_BITFIELD */
}
#include "vmware_pack_end.h"
Vmxnet3_RxCompDescExt;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 4e68aae6..2613cd13 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -20,7 +20,6 @@
#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
@@ -43,6 +42,24 @@
#define VMXNET3_TX_MAX_SEG UINT8_MAX
+#define VMXNET3_TX_OFFLOAD_CAP \
+ (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define VMXNET3_RX_OFFLOAD_CAP \
+ (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_LRO | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_CRC_STRIP)
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -73,7 +90,7 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vid, int on);
static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
-static void vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
+static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void vmxnet3_interrupt_handler(void *param);
@@ -151,66 +168,14 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
if (mz)
rte_memzone_free(mz);
return rte_memzone_reserve_aligned(z_name, size, socket_id,
- 0, align);
+ RTE_MEMZONE_IOVA_CONTIG, align);
}
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
-}
-
-/**
- * Atomically reads the link status information from global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to read from.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-
-static int
-vmxnet3_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Atomically writes the link status information into global
- * structure rte_eth_dev.
- *
- * @param dev
- * - Pointer to the structure rte_eth_dev to write to.
- * - Pointer to the buffer to be saved with the link status.
- *
- * @return
- * - On success, zero.
- * - On failure, negative value.
- */
-static int
-vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
}
/*
@@ -267,6 +232,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
struct rte_pci_device *pci_dev;
struct vmxnet3_hw *hw = eth_dev->data->dev_private;
uint32_t mac_hi, mac_lo, ver;
+ struct rte_eth_link link;
PMD_INIT_FUNC_TRACE();
@@ -369,6 +335,13 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
+ /* set the initial link status */
+ memset(&link, 0, sizeof(link));
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(eth_dev, &link);
+
return 0;
}
@@ -612,9 +585,12 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
uint32_t mtu = dev->data->mtu;
Vmxnet3_DriverShared *shared = hw->shared;
Vmxnet3_DSDevRead *devRead = &shared->devRead;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
uint32_t i;
int ret;
+ hw->mtu = mtu;
+
shared->magic = VMXNET3_REV1_MAGIC;
devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
@@ -644,6 +620,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
+ txq->shared = &hw->tqd_start[i];
+
tqd->ctrl.txNumDeferred = 0;
tqd->ctrl.txThreshold = 1;
tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
@@ -664,6 +642,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+ rxq->shared = &hw->rqd_start[i];
+
rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
@@ -685,10 +665,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
devRead->rxFilterConf.rxMode = 0;
/* Setting up feature flags */
- if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+ if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- if (dev->data->dev_conf.rxmode.enable_lro) {
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
devRead->misc.uptFeatures |= VMXNET3_F_LRO;
devRead->misc.maxNumRxSG = 0;
}
@@ -853,7 +833,10 @@ vmxnet3_dev_stop(struct rte_eth_dev *dev)
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
- vmxnet3_dev_atomic_write_link_status(dev, &link);
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
+ rte_eth_linkstatus_set(dev, &link);
}
/*
@@ -1054,18 +1037,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_errors[i] = rxStats.pktsRxError;
stats->ierrors += rxStats.pktsRxError;
- stats->rx_nombuf += rxStats.pktsRxOutOfBuf;
+ stats->imissed += rxStats.pktsRxOutOfBuf;
}
return 0;
}
static void
-vmxnet3_dev_info_get(struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
-
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
@@ -1073,7 +1054,6 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
- dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
@@ -1090,17 +1070,10 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev,
.nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
-
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
+ dev_info->rx_queue_offload_capa = 0;
+ dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
+ dev_info->tx_queue_offload_capa = 0;
}
static const uint32_t *
@@ -1117,13 +1090,14 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
return NULL;
}
-static void
+static int
vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
vmxnet3_write_mac(hw, mac_addr->addr_bytes);
+ return 0;
}
/* return 0 means link status changed, -1 means not changed */
@@ -1132,25 +1106,21 @@ __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old = { 0 }, link;
+ struct rte_eth_link link;
uint32_t ret;
memset(&link, 0, sizeof(link));
- vmxnet3_dev_atomic_read_link_status(dev, &old);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (ret & 0x1) {
+ if (ret & 0x1)
link.link_status = ETH_LINK_UP;
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_AUTONEG;
- }
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_speed = ETH_SPEED_NUM_10G;
+ link.link_autoneg = ETH_LINK_FIXED;
- vmxnet3_dev_atomic_write_link_status(dev, &link);
-
- return (old.link_status == link.link_status) ? -1 : 0;
+ return rte_eth_linkstatus_set(dev, &link);
}
static int
@@ -1197,8 +1167,9 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1260,9 +1231,10 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
struct vmxnet3_hw *hw = dev->data->dev_private;
Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
uint32_t *vf_table = devRead->rxFilterConf.vfTable;
+ uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
else
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
@@ -1272,7 +1244,7 @@ vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
else
memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
@@ -1348,9 +1320,7 @@ RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_INIT(vmxnet3_init_log);
-static void
-vmxnet3_init_log(void)
+RTE_INIT(vmxnet3_init_log)
{
vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
if (vmxnet3_logtype_init >= 0)
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index b2a8cf35..d3f2b352 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -87,6 +87,7 @@ struct vmxnet3_hw {
uint64_t queueDescPA;
uint16_t queue_desc_len;
+ uint16_t mtu;
VMXNET3_RSSConf *rss_conf;
uint64_t rss_confPA;
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index 552180e8..50992349 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -14,7 +14,7 @@
#define VMXNET3_DEF_RX_RING_SIZE 128
/* Default rx data ring desc size */
-#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 256
#define VMXNET3_SUCCESS 0
#define VMXNET3_FAIL -1
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 3a8c62fc..cf85f3d6 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -457,6 +457,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
+ /* Skip empty packets */
+ if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+ txq->stats.drop_total++;
+ rte_pktmbuf_free(txm);
+ nb_tx++;
+ continue;
+ }
+
tdd = (struct Vmxnet3_TxDataDesc *)
((uint8 *)txq->data_ring.base +
txq->cmd_ring.next2fill *
@@ -477,6 +485,11 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+
+ /* Skip empty segments */
+ if (unlikely(m_seg->data_len == 0))
+ continue;
+
if (copy_size) {
uint64 offset =
(uint64)txq->cmd_ring.next2fill *
@@ -646,37 +659,154 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
return i;
}
-
-/* Receive side checksum and other offloads */
-static void
-vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+/* MSS not provided by vmxnet3, guess one with available information */
+static uint16_t
+vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm)
{
- /* Check for RSS */
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
- rxm->ol_flags |= PKT_RX_RSS_HASH;
- rxm->hash.rss = rcd->rssHash;
- }
+ uint32_t hlen, slen;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct tcp_hdr *tcp_hdr;
+ char *ptr;
+
+ RTE_ASSERT(rcd->tcp);
+
+ ptr = rte_pktmbuf_mtod(rxm, char *);
+ slen = rte_pktmbuf_data_len(rxm);
+ hlen = sizeof(struct ether_hdr);
- /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
if (rcd->v4) {
- struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+ if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
+ return hw->mtu - sizeof(struct ipv4_hdr)
+ - sizeof(struct tcp_hdr);
+
+ ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+ hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+ } else if (rcd->v6) {
+ if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
+ return hw->mtu - sizeof(struct ipv6_hdr) -
+ sizeof(struct tcp_hdr);
+
+ ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
+ hlen += sizeof(struct ipv6_hdr);
+ if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
+ int frag;
+
+ rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
+ &hlen, &frag);
+ }
+ }
+
+ if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
+ return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+ sizeof(struct ether_hdr);
- if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
- rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
- else
- rxm->packet_type = RTE_PTYPE_L3_IPV4;
+ tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+ hlen += (tcp_hdr->data_off & 0xf0) >> 2;
- if (!rcd->cnc) {
- if (!rcd->ipc)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (rxm->udata64 > 1)
+ return (rte_pktmbuf_pkt_len(rxm) - hlen +
+ rxm->udata64 - 1) / rxm->udata64;
+ else
+ return hw->mtu - hlen + sizeof(struct ether_hdr);
+}
- if ((rcd->tcp || rcd->udp) && !rcd->tuc)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+/* Receive side checksum and other offloads */
+static inline void
+vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm, const uint8_t sop)
+{
+ uint64_t ol_flags = rxm->ol_flags;
+ uint32_t packet_type = rxm->packet_type;
+
+ /* Offloads set in sop */
+ if (sop) {
+ /* Set packet type */
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
+ /* Check large packet receive */
+ if (VMXNET3_VERSION_GE_2(hw) &&
+ rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+ const Vmxnet3_RxCompDescExt *rcde =
+ (const Vmxnet3_RxCompDescExt *)rcd;
+
+ rxm->tso_segsz = rcde->mss;
+ rxm->udata64 = rcde->segCnt;
+ ol_flags |= PKT_RX_LRO;
+ }
+ } else { /* Offloads set in eop */
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
+
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
+
+ /* Check packet type, checksum errors, etc. */
+ if (rcd->cnc) {
+ ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else {
+ if (rcd->v4) {
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ if (rcd->ipc)
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else if (rcd->v6) {
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else {
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ }
+
+ /* Old variants of vmxnet3 do not provide MSS */
+ if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ rxm->tso_segsz = vmxnet3_guess_mss(hw,
+ rcd, rxm);
}
- } else {
- rxm->packet_type = RTE_PTYPE_UNKNOWN;
}
+
+ rxm->ol_flags = ol_flags;
+ rxm->packet_type = packet_type;
}
/*
@@ -776,6 +906,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
+ rxm->packet_type = 0;
/*
* If this is the first buffer of the received packet,
@@ -807,29 +938,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
rxq->start_seg = rxm;
- vmxnet3_rx_offload(rcd, rxm);
+ rxq->last_seg = rxm;
+ vmxnet3_rx_offload(hw, rcd, rxm, 1);
} else {
struct rte_mbuf *start = rxq->start_seg;
RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
- start->pkt_len += rxm->data_len;
- start->nb_segs++;
+ if (rxm->data_len) {
+ start->pkt_len += rxm->data_len;
+ start->nb_segs++;
- rxq->last_seg->next = rxm;
+ rxq->last_seg->next = rxm;
+ rxq->last_seg = rxm;
+ } else {
+ rte_pktmbuf_free_seg(rxm);
+ }
}
- rxq->last_seg = rxm;
if (rcd->eop) {
struct rte_mbuf *start = rxq->start_seg;
- /* Check for hardware stripped VLAN tag */
- if (rcd->ts) {
- start->ol_flags |= (PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED);
- start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
- }
-
+ vmxnet3_rx_offload(hw, rcd, start, 0);
rx_pkts[nb_rx++] = start;
rxq->start_seg = NULL;
}
@@ -883,7 +1013,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
@@ -895,12 +1025,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
- ETH_TXQ_FLAGS_NOXSUMSCTP) {
- PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
- return -EINVAL;
- }
-
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
@@ -910,7 +1034,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->shared = &hw->tqd_start[queue_idx];
+ txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
@@ -1013,7 +1137,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mp = mp;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->shared = &hw->rqd_start[queue_idx];
+ rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
diff --git a/drivers/raw/Makefile b/drivers/raw/Makefile
index da7c8b44..8e29b4a5 100644
--- a/drivers/raw/Makefile
+++ b/drivers/raw/Makefile
@@ -5,5 +5,10 @@ include $(RTE_SDK)/mk/rte.vars.mk
# DIRS-$(<configuration>) += <directory>
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += skeleton_rawdev
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/raw/dpaa2_cmdif/Makefile b/drivers/raw/dpaa2_cmdif/Makefile
new file mode 100644
index 00000000..9b863dda
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_cmdif.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+
+EXPORT_MAP := rte_pmd_dpaa2_cmdif_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += dpaa2_cmdif.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV)-include += rte_pmd_dpaa2_cmdif.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
new file mode 100644
index 00000000..469960a3
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif.c
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_atomic.h>
+#include <rte_interrupts.h>
+#include <rte_branch_prediction.h>
+#include <rte_lcore.h>
+
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_cmdif_logs.h"
+#include "rte_pmd_dpaa2_cmdif.h"
+
+/* Dynamic log type identifier */
+int dpaa2_cmdif_logtype;
+
+/* CMDIF driver name */
+#define DPAA2_CMDIF_PMD_NAME dpaa2_dpci
+
+/* CMDIF driver object */
+static struct rte_vdev_driver dpaa2_cmdif_drv;
+
+/*
+ * This API provides the DPCI device ID in 'attr_value'.
+ * The device ID shall be passed by GPP to the AIOP using CMDIF commands.
+ */
+static int
+dpaa2_cmdif_get_attr(struct rte_rawdev *dev,
+ const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(attr_name);
+
+ if (!attr_value) {
+ DPAA2_CMDIF_ERR("Invalid arguments for getting attributes");
+ return -EINVAL;
+ }
+ *attr_value = cidev->dpci_id;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_enqueue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_send_cnxt;
+ struct dpaa2_queue *txq;
+ struct qbman_fd fd;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_send_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ txq = &(cidev->tx_queue[cmdif_send_cnxt->priority]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /* Set some of the FD parameters to i.
+ * For performance reasons do not memset
+ */
+ fd.simple.bpid_offset = 0;
+ fd.simple.ctrl = 0;
+
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(buffers[0]->buf_addr));
+ DPAA2_SET_FD_LEN(&fd, cmdif_send_cnxt->size);
+ DPAA2_SET_FD_FRC(&fd, cmdif_send_cnxt->frc);
+ DPAA2_SET_FD_FLC(&fd, cmdif_send_cnxt->flc);
+
+ /* Enqueue a packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_CMDIF_ERR("Transmit failure with err: %d\n", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_CMDIF_DP_DEBUG("Successfully transmitted a packet\n");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_dequeue_bufs(struct rte_rawdev *dev,
+ struct rte_rawdev_buf **buffers,
+ unsigned int count,
+ rte_rawdev_obj_t context)
+{
+ struct dpaa2_dpci_dev *cidev = dev->dev_private;
+ struct rte_dpaa2_cmdif_context *cmdif_rcv_cnxt;
+ struct dpaa2_queue *rxq;
+ struct qbman_swp *swp;
+ struct qbman_result *dq_storage;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ uint8_t status;
+ int ret;
+
+ DPAA2_CMDIF_FUNC_TRACE();
+
+ RTE_SET_USED(count);
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_CMDIF_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ cmdif_rcv_cnxt = (struct rte_dpaa2_cmdif_context *)(context);
+ rxq = &(cidev->rx_queue[cmdif_rcv_cnxt->priority]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, rxq->fqid);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_CMDIF_DP_WARN("VDQ cmd not issued. QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with new token by QBMAN */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_CMDIF_DP_DEBUG("No frame is delivered\n");
+ return 0;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ buffers[0]->buf_addr = (void *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd) + DPAA2_GET_FD_OFFSET(fd));
+ cmdif_rcv_cnxt->size = DPAA2_GET_FD_LEN(fd);
+ cmdif_rcv_cnxt->flc = DPAA2_GET_FD_FLC(fd);
+ cmdif_rcv_cnxt->frc = DPAA2_GET_FD_FRC(fd);
+
+ DPAA2_CMDIF_DP_DEBUG("packet received\n");
+
+ return 1;
+}
+
+static const struct rte_rawdev_ops dpaa2_cmdif_ops = {
+ .attr_get = dpaa2_cmdif_get_attr,
+ .enqueue_bufs = dpaa2_cmdif_enqueue_bufs,
+ .dequeue_bufs = dpaa2_cmdif_dequeue_bufs,
+};
+
+static int
+dpaa2_cmdif_create(const char *name,
+ struct rte_vdev_device *vdev,
+ int socket_id)
+{
+ struct rte_rawdev *rawdev;
+ struct dpaa2_dpci_dev *cidev;
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct dpaa2_dpci_dev),
+ socket_id);
+ if (!rawdev) {
+ DPAA2_CMDIF_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ rawdev->dev_ops = &dpaa2_cmdif_ops;
+ rawdev->device = &vdev->device;
+ rawdev->driver_name = vdev->device.driver->name;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cidev = rte_dpaa2_alloc_dpci_dev();
+ if (!cidev) {
+ DPAA2_CMDIF_ERR("Unable to allocate CI device");
+ rte_rawdev_pmd_release(rawdev);
+ return -ENODEV;
+ }
+
+ rawdev->dev_private = cidev;
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_destroy(const char *name)
+{
+ int ret;
+ struct rte_rawdev *rdev;
+
+ rdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rdev) {
+ DPAA2_CMDIF_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ /* The primary process will only free the DPCI device */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_dpaa2_free_dpci_dev(rdev->dev_private);
+
+ ret = rte_rawdev_pmd_release(rdev);
+ if (ret)
+ DPAA2_CMDIF_DEBUG("Device cleanup failed");
+
+ return 0;
+}
+
+static int
+dpaa2_cmdif_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret = 0;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_create(name, vdev, rte_socket_id());
+
+ return ret;
+}
+
+static int
+dpaa2_cmdif_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+
+ DPAA2_CMDIF_INFO("Closing %s on NUMA node %d", name, rte_socket_id());
+
+ ret = dpaa2_cmdif_destroy(name);
+
+ return ret;
+}
+
+static struct rte_vdev_driver dpaa2_cmdif_drv = {
+ .probe = dpaa2_cmdif_probe,
+ .remove = dpaa2_cmdif_remove
+};
+
+RTE_PMD_REGISTER_VDEV(DPAA2_CMDIF_PMD_NAME, dpaa2_cmdif_drv);
+
+RTE_INIT(dpaa2_cmdif_init_log)
+{
+ dpaa2_cmdif_logtype = rte_log_register("pmd.raw.dpaa2.cmdif");
+ if (dpaa2_cmdif_logtype >= 0)
+ rte_log_set_level(dpaa2_cmdif_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
new file mode 100644
index 00000000..8991e832
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/dpaa2_cmdif_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_CMDIF_LOGS_H__
+#define __DPAA2_CMDIF_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_cmdif_logtype;
+
+#define DPAA2_CMDIF_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_cmdif_logtype, "dpaa2_cmdif: " \
+ fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_cmdif_logtype, "dpaa2_cmdif: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_CMDIF_FUNC_TRACE() DPAA2_CMDIF_DEBUG(">>")
+
+#define DPAA2_CMDIF_INFO(fmt, args...) \
+ DPAA2_CMDIF_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_ERR(fmt, args...) \
+ DPAA2_CMDIF_LOG(ERR, fmt, ## args)
+#define DPAA2_CMDIF_WARN(fmt, args...) \
+ DPAA2_CMDIF_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_CMDIF_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_cmdif: " fmt "\n", ## args)
+
+#define DPAA2_CMDIF_DP_DEBUG(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_CMDIF_DP_INFO(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_CMDIF_DP_WARN(fmt, args...) \
+ DPAA2_CMDIF_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_CMDIF_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/meson.build b/drivers/raw/dpaa2_cmdif/meson.build
new file mode 100644
index 00000000..1d146872
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'bus_vdev']
+sources = files('dpaa2_cmdif.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_cmdif.h')
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
new file mode 100644
index 00000000..483b66ea
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_CMDIF_H__
+#define __RTE_PMD_DPAA2_CMDIF_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 AIOP CMDIF PMD specific structures.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** The context required in the I/O path for DPAA2 AIOP Command Interface */
+struct rte_dpaa2_cmdif_context {
+ /** Size to populate in QBMAN FD */
+ uint32_t size;
+ /** FRC to populate in QBMAN FD */
+ uint32_t frc;
+ /** FLC to populate in QBMAN FD */
+ uint64_t flc;
+ /** Priority of the command. This priority determines DPCI Queue*/
+ uint8_t priority;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_PMD_DPAA2_CMDIF_H__ */
diff --git a/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/dpaa2_cmdif/rte_pmd_dpaa2_cmdif_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/dpaa2_qdma/Makefile b/drivers/raw/dpaa2_qdma/Makefile
new file mode 100644
index 00000000..d88809ea
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/Makefile
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_qdma.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_mempool
+LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_ring
+
+EXPORT_MAP := rte_pmd_dpaa2_qdma_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += dpaa2_qdma.c
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV)-include += rte_pmd_dpaa2_qdma.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.c b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
new file mode 100644
index 00000000..2787d302
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.c
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#include <string.h>
+
+#include <rte_eal.h>
+#include <rte_fslmc.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_rawdev.h>
+#include <rte_rawdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include <mc/fsl_dpdmai.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+
+#include "dpaa2_qdma.h"
+#include "dpaa2_qdma_logs.h"
+#include "rte_pmd_dpaa2_qdma.h"
+
+/* Dynamic log type identifier */
+int dpaa2_qdma_logtype;
+
+/* QDMA device */
+static struct qdma_device qdma_dev;
+
+/* QDMA H/W queues list */
+TAILQ_HEAD(qdma_hw_queue_list, qdma_hw_queue);
+static struct qdma_hw_queue_list qdma_queue_list
+ = TAILQ_HEAD_INITIALIZER(qdma_queue_list);
+
+/* QDMA Virtual Queues */
+struct qdma_virt_queue *qdma_vqs;
+
+/* QDMA per core data */
+struct qdma_per_core_info qdma_core_info[RTE_MAX_LCORE];
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev.max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+int __rte_experimental
+rte_qdma_init(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_init(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev.num_hw_queues;
+}
+
+int __rte_experimental
+rte_qdma_reset(void)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use && (qdma_vqs[i].num_enqueues !=
+ qdma_vqs[i].num_dequeues))
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ }
+ if (qdma_vqs)
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Free the FLE pool */
+ if (qdma_dev.fle_pool)
+ rte_mempool_free(qdma_dev.fle_pool);
+
+ /* Reset QDMA device structure */
+ qdma_dev.mode = RTE_QDMA_MODE_HW;
+ qdma_dev.max_hw_queues_per_core = 0;
+ qdma_dev.fle_pool = NULL;
+ qdma_dev.fle_pool_count = 0;
+ qdma_dev.max_vqs = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config)
+{
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev.state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Reset the QDMA device */
+ ret = rte_qdma_reset();
+ if (ret) {
+ DPAA2_QDMA_ERR("Resetting QDMA failed");
+ return ret;
+ }
+
+ /* Set mode */
+ qdma_dev.mode = qdma_config->mode;
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev.max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ qdma_vqs = rte_malloc("qdma_virtual_queues",
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev.max_vqs = qdma_config->max_vqs;
+
+ /* Allocate FLE pool */
+ qdma_dev.fle_pool = rte_mempool_create("qdma_fle_pool",
+ qdma_config->fle_pool_count, QDMA_FLE_POOL_SIZE,
+ QDMA_FLE_CACHE_SIZE(qdma_config->fle_pool_count), 0,
+ NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0);
+ if (!qdma_dev.fle_pool) {
+ DPAA2_QDMA_ERR("qdma_fle_pool create failed");
+ rte_free(qdma_vqs);
+ qdma_vqs = NULL;
+ return -ENOMEM;
+ }
+ qdma_dev.fle_pool_count = qdma_config->fle_pool_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_start(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags)
+{
+ char ring_name[32];
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ /* Get a free Virtual Queue */
+ for (i = 0; i < qdma_dev.max_vqs; i++) {
+ if (qdma_vqs[i].in_use == 0)
+ break;
+ }
+
+ /* Return in case no VQ is free */
+ if (i == qdma_dev.max_vqs) {
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ if (qdma_dev.mode == RTE_QDMA_MODE_HW ||
+ (flags & RTE_QDMA_VQ_EXCLUSIVE_PQ)) {
+ /* Allocate HW queue for a VQ */
+ qdma_vqs[i].hw_queue = alloc_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 1;
+ } else {
+ /* Allocate a Ring for Virutal Queue in VQ mode */
+ sprintf(ring_name, "status ring %d", i);
+ qdma_vqs[i].status_ring = rte_ring_create(ring_name,
+ qdma_dev.fle_pool_count, rte_socket_id(), 0);
+ if (!qdma_vqs[i].status_ring) {
+ DPAA2_QDMA_ERR("Status ring creation failed for vq");
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return rte_errno;
+ }
+
+ /* Get a HW queue (shared) for a VQ */
+ qdma_vqs[i].hw_queue = get_hw_queue(lcore_id);
+ qdma_vqs[i].exclusive_hw_queue = 0;
+ }
+
+ if (qdma_vqs[i].hw_queue == NULL) {
+ DPAA2_QDMA_ERR("No H/W queue available for VQ");
+ if (qdma_vqs[i].status_ring)
+ rte_ring_free(qdma_vqs[i].status_ring);
+ qdma_vqs[i].status_ring = NULL;
+ rte_spinlock_unlock(&qdma_dev.lock);
+ return -ENODEV;
+ }
+
+ qdma_vqs[i].in_use = 1;
+ qdma_vqs[i].lcore_id = lcore_id;
+
+ rte_spinlock_unlock(&qdma_dev.lock);
+
+ return i;
+}
+
+static void
+dpaa2_qdma_populate_fle(struct qbman_fle *fle,
+ uint64_t src, uint64_t dest,
+ size_t len, uint32_t flags)
+{
+ struct qdma_sdd *sdd;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ sdd = (struct qdma_sdd *)((uint8_t *)(fle) +
+ (DPAA2_QDMA_MAX_FLE * sizeof(struct qbman_fle)));
+
+ /* first frame list to source descriptor */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sdd));
+ DPAA2_SET_FLE_LEN(fle, (2 * (sizeof(struct qdma_sdd))));
+
+ /* source and destination descriptor */
+ DPAA2_SET_SDD_RD_COHERENT(sdd); /* source descriptor CMD */
+ sdd++;
+ DPAA2_SET_SDD_WR_COHERENT(sdd); /* dest descriptor CMD */
+
+ fle++;
+ /* source frame list to source buffer */
+ if (flags & RTE_QDMA_JOB_SRC_PHY) {
+ DPAA2_SET_FLE_ADDR(fle, src);
+ DPAA2_SET_FLE_BMT(fle);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(src));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ fle++;
+ /* destination frame list to destination buffer */
+ if (flags & RTE_QDMA_JOB_DEST_PHY) {
+ DPAA2_SET_FLE_BMT(fle);
+ DPAA2_SET_FLE_ADDR(fle, dest);
+ } else {
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(dest));
+ }
+ DPAA2_SET_FLE_LEN(fle, len);
+
+ /* Final bit: 1, for last frame list */
+ DPAA2_SET_FLE_FIN(fle);
+}
+
+static int
+dpdmai_dev_enqueue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t txq_id,
+ uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_io_meta *io_meta;
+ struct qbman_fd fd;
+ struct dpaa2_queue *txq;
+ struct qbman_fle *fle;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ /*
+ * Get an FLE/SDD from FLE pool.
+ * Note: IO metadata is before the FLE and SDD memory.
+ */
+ ret = rte_mempool_get(qdma_dev.fle_pool, (void **)(&io_meta));
+ if (ret) {
+ DPAA2_QDMA_DP_WARN("Memory alloc failed for FLE");
+ return ret;
+ }
+
+ /* Set the metadata */
+ io_meta->cnxt = (size_t)job;
+ io_meta->id = vq_id;
+
+ fle = (struct qbman_fle *)(io_meta + 1);
+
+ /* populate Frame descriptor */
+ memset(&fd, 0, sizeof(struct qbman_fd));
+ DPAA2_SET_FD_ADDR(&fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(&fd);
+ DPAA2_SET_FD_FRC(&fd, QDMA_SER_CTX);
+
+ /* Populate FLE */
+ memset(fle, 0, QDMA_FLE_POOL_SIZE);
+ dpaa2_qdma_populate_fle(fle, job->src, job->dest, job->len, job->flags);
+
+ /* Enqueue the packet to the QBMAN */
+ do {
+ ret = qbman_swp_enqueue_multiple(swp, &eqdesc, &fd, NULL, 1);
+ if (ret < 0 && ret != -EBUSY)
+ DPAA2_QDMA_ERR("Transmit failure with err: %d", ret);
+ } while (ret == -EBUSY);
+
+ DPAA2_QDMA_DP_DEBUG("Successfully transmitted a packet");
+
+ return ret;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i, ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ ret = rte_qdma_vq_enqueue(vq_id, job[i]);
+ if (ret < 0)
+ break;
+ }
+
+ return i;
+}
+
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != qdma_vq->lcore_id) {
+ DPAA2_QDMA_ERR("QDMA enqueue for vqid %d on wrong core",
+ vq_id);
+ return -EINVAL;
+ }
+
+ ret = dpdmai_dev_enqueue(dpdmai_dev, qdma_pq->queue_id, vq_id, job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR("DPDMAI device enqueue failed: %d", ret);
+ return ret;
+ }
+
+ qdma_vq->num_enqueues++;
+
+ return 1;
+}
+
+/* Function to receive a QDMA job for a given device and queue*/
+static int
+dpdmai_dev_dequeue(struct dpaa2_dpdmai_dev *dpdmai_dev,
+ uint16_t rxq_id,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job)
+{
+ struct qdma_io_meta *io_meta;
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ const struct qbman_fd *fd;
+ struct qbman_swp *swp;
+ struct qbman_fle *fle;
+ uint32_t fqid;
+ uint8_t status;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ dq_storage = rxq->q_storage->dq_storage[0];
+ fqid = rxq->fqid;
+
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ qbman_pull_desc_set_numframes(&pulldesc, 1);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN("VDQ command not issued. QBMAN busy");
+ continue;
+ }
+ break;
+ }
+
+ /* Check if previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+ /* Loop until dq_storage is updated with new token by QBMAN */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ DPAA2_QDMA_DP_DEBUG("No frame is delivered");
+ return 0;
+ }
+
+ /* Get the FD */
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ /*
+ * Fetch metadata from FLE. job and vq_id were set
+ * in metadata in the enqueue operation.
+ */
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ io_meta = (struct qdma_io_meta *)(fle) - 1;
+ if (vq_id)
+ *vq_id = io_meta->id;
+
+ *job = (struct rte_qdma_job *)(size_t)io_meta->cnxt;
+ (*job)->status = DPAA2_GET_FD_ERR(fd);
+
+ /* Free FLE to the pool */
+ rte_mempool_put(qdma_dev.fle_pool, io_meta);
+
+ DPAA2_QDMA_DP_DEBUG("packet received");
+
+ return 1;
+}
+
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < nb_jobs; i++) {
+ job[i] = rte_qdma_vq_dequeue(vq_id);
+ if (!job[i])
+ break;
+ }
+
+ return i;
+}
+
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ struct rte_qdma_job *job = NULL;
+ struct qdma_virt_queue *temp_qdma_vq;
+ int dequeue_budget = QDMA_DEQUEUE_BUDGET;
+ int ring_count, ret, i;
+ uint16_t temp_vq_id;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Return error in case of wrong lcore_id */
+ if (rte_lcore_id() != (unsigned int)(qdma_vq->lcore_id)) {
+ DPAA2_QDMA_ERR("QDMA dequeue for vqid %d on wrong core",
+ vq_id);
+ return NULL;
+ }
+
+ /* Only dequeue when there are pending jobs on VQ */
+ if (qdma_vq->num_enqueues == qdma_vq->num_dequeues)
+ return NULL;
+
+ if (qdma_vq->exclusive_hw_queue) {
+ /* In case of exclusive queue directly fetch from HW queue */
+ ret = dpdmai_dev_dequeue(dpdmai_dev, qdma_pq->queue_id,
+ NULL, &job);
+ if (ret < 0) {
+ DPAA2_QDMA_ERR(
+ "Dequeue from DPDMAI device failed: %d", ret);
+ return NULL;
+ }
+ } else {
+ /*
+ * Get the QDMA completed jobs from the software ring.
+ * In case they are not available on the ring poke the HW
+ * to fetch completed jobs from corresponding HW queues
+ */
+ ring_count = rte_ring_count(qdma_vq->status_ring);
+ if (ring_count == 0) {
+ /* TODO - How to have right budget */
+ for (i = 0; i < dequeue_budget; i++) {
+ ret = dpdmai_dev_dequeue(dpdmai_dev,
+ qdma_pq->queue_id, &temp_vq_id, &job);
+ if (ret == 0)
+ break;
+ temp_qdma_vq = &qdma_vqs[temp_vq_id];
+ rte_ring_enqueue(temp_qdma_vq->status_ring,
+ (void *)(job));
+ ring_count = rte_ring_count(
+ qdma_vq->status_ring);
+ if (ring_count)
+ break;
+ }
+ }
+
+ /* Dequeue job from the software ring to provide to the user */
+ rte_ring_dequeue(qdma_vq->status_ring, (void **)&job);
+ if (job)
+ qdma_vq->num_dequeues++;
+ }
+
+ return job;
+}
+
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_status)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (qdma_vq->in_use) {
+ vq_status->exclusive_hw_queue = qdma_vq->exclusive_hw_queue;
+ vq_status->lcore_id = qdma_vq->lcore_id;
+ vq_status->num_enqueues = qdma_vq->num_enqueues;
+ vq_status->num_dequeues = qdma_vq->num_dequeues;
+ vq_status->num_pending_jobs = vq_status->num_enqueues -
+ vq_status->num_dequeues;
+ }
+}
+
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id)
+{
+ struct qdma_virt_queue *qdma_vq = &qdma_vqs[vq_id];
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ if (qdma_vq->num_enqueues != qdma_vq->num_dequeues)
+ return -EBUSY;
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ if (qdma_vq->exclusive_hw_queue)
+ free_hw_queue(qdma_vq->hw_queue);
+ else {
+ if (qdma_vqs->status_ring)
+ rte_ring_free(qdma_vqs->status_ring);
+
+ put_hw_queue(qdma_vq->hw_queue);
+ }
+
+ memset(qdma_vq, 0, sizeof(struct qdma_virt_queue));
+
+ rte_spinlock_lock(&qdma_dev.lock);
+
+ return 0;
+}
+
+void __rte_experimental
+rte_qdma_stop(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev.state = 0;
+}
+
+void __rte_experimental
+rte_qdma_destroy(void)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rte_qdma_reset();
+}
+
+static const struct rte_rawdev_ops dpaa2_qdma_ops;
+
+static int
+add_hw_queues_to_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ for (i = 0; i < dpdmai_dev->num_queues; i++) {
+ queue = rte_zmalloc(NULL, sizeof(struct qdma_hw_queue), 0);
+ if (!queue) {
+ DPAA2_QDMA_ERR(
+ "Memory allocation failed for QDMA queue");
+ return -ENOMEM;
+ }
+
+ queue->dpdmai_dev = dpdmai_dev;
+ queue->queue_id = i;
+
+ TAILQ_INSERT_TAIL(&qdma_queue_list, queue, next);
+ qdma_dev.num_hw_queues++;
+ }
+
+ return 0;
+}
+
+static void
+remove_hw_queues_from_list(struct dpaa2_dpdmai_dev *dpdmai_dev)
+{
+ struct qdma_hw_queue *queue = NULL;
+ struct qdma_hw_queue *tqueue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ TAILQ_FOREACH_SAFE(queue, &qdma_queue_list, next, tqueue) {
+ if (queue->dpdmai_dev == dpdmai_dev) {
+ TAILQ_REMOVE(&qdma_queue_list, queue, next);
+ rte_free(queue);
+ queue = NULL;
+ }
+ }
+}
+
+static int
+dpaa2_dpdmai_dev_uninit(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Remove HW queues from global list */
+ remove_hw_queues_from_list(dpdmai_dev);
+
+ ret = dpdmai_disable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("dmdmai disable failed");
+
+ /* Set up the DQRR storage for Rx */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ struct dpaa2_queue *rxq = &(dpdmai_dev->rx_queue[i]);
+
+ if (rxq->q_storage) {
+ dpaa2_free_dq_storage(rxq->q_storage);
+ rte_free(rxq->q_storage);
+ }
+ }
+
+ /* Close the device at underlying layer*/
+ ret = dpdmai_close(&dpdmai_dev->dpdmai, CMD_PRI_LOW, dpdmai_dev->token);
+ if (ret)
+ DPAA2_QDMA_ERR("Failure closing dpdmai device");
+
+ return 0;
+}
+
+static int
+dpaa2_dpdmai_dev_init(struct rte_rawdev *rawdev, int dpdmai_id)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct dpdmai_rx_queue_cfg rx_queue_cfg;
+ struct dpdmai_attr attr;
+ struct dpdmai_rx_queue_attr rx_attr;
+ struct dpdmai_tx_queue_attr tx_attr;
+ int ret, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Open DPDMAI device */
+ dpdmai_dev->dpdmai_id = dpdmai_id;
+ dpdmai_dev->dpdmai.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX];
+ ret = dpdmai_open(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->dpdmai_id, &dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai_open() failed with err: %d", ret);
+ return ret;
+ }
+
+ /* Get DPDMAI attributes */
+ ret = dpdmai_get_attributes(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, &attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpdmai get attributes failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->num_queues = attr.num_of_priorities;
+
+ /* Set up Rx Queues */
+ for (i = 0; i < attr.num_of_priorities; i++) {
+ struct dpaa2_queue *rxq;
+
+ memset(&rx_queue_cfg, 0, sizeof(struct dpdmai_rx_queue_cfg));
+ ret = dpdmai_set_rx_queue(&dpdmai_dev->dpdmai,
+ CMD_PRI_LOW,
+ dpdmai_dev->token,
+ i, &rx_queue_cfg);
+ if (ret) {
+ DPAA2_QDMA_ERR("Setting Rx queue failed with err: %d",
+ ret);
+ goto init_err;
+ }
+
+ /* Allocate DQ storage for the DPDMAI Rx queues */
+ rxq = &(dpdmai_dev->rx_queue[i]);
+ rxq->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq->q_storage) {
+ DPAA2_QDMA_ERR("q_storage allocation failed");
+ ret = -ENOMEM;
+ goto init_err;
+ }
+
+ memset(rxq->q_storage, 0, sizeof(struct queue_storage_info_t));
+ ret = dpaa2_alloc_dq_storage(rxq->q_storage);
+ if (ret) {
+ DPAA2_QDMA_ERR("dpaa2_alloc_dq_storage failed");
+ goto init_err;
+ }
+ }
+
+ /* Get Rx and Tx queues FQID's */
+ for (i = 0; i < DPDMAI_PRIO_NUM; i++) {
+ ret = dpdmai_get_rx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &rx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->rx_queue[i].fqid = rx_attr.fqid;
+
+ ret = dpdmai_get_tx_queue(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token, i, &tx_attr);
+ if (ret) {
+ DPAA2_QDMA_ERR("Reading device failed with err: %d",
+ ret);
+ goto init_err;
+ }
+ dpdmai_dev->tx_queue[i].fqid = tx_attr.fqid;
+ }
+
+ /* Enable the device */
+ ret = dpdmai_enable(&dpdmai_dev->dpdmai, CMD_PRI_LOW,
+ dpdmai_dev->token);
+ if (ret) {
+ DPAA2_QDMA_ERR("Enabling device failed with err: %d", ret);
+ goto init_err;
+ }
+
+ /* Add the HW queue to the global list */
+ ret = add_hw_queues_to_list(dpdmai_dev);
+ if (ret) {
+ DPAA2_QDMA_ERR("Adding H/W queue to list failed");
+ goto init_err;
+ }
+ DPAA2_QDMA_DEBUG("Initialized dpdmai object successfully");
+
+ return 0;
+init_err:
+ dpaa2_dpdmai_dev_uninit(rawdev);
+ return ret;
+}
+
+static int
+rte_dpaa2_qdma_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ rawdev = rte_rawdev_pmd_allocate(dpaa2_dev->device.name,
+ sizeof(struct dpaa2_dpdmai_dev),
+ rte_socket_id());
+ if (!rawdev) {
+ DPAA2_QDMA_ERR("Unable to allocate rawdevice");
+ return -EINVAL;
+ }
+
+ dpaa2_dev->rawdev = rawdev;
+ rawdev->dev_ops = &dpaa2_qdma_ops;
+ rawdev->device = &dpaa2_dev->device;
+ rawdev->driver_name = dpaa2_drv->driver.name;
+
+ /* Invoke PMD device initialization function */
+ ret = dpaa2_dpdmai_dev_init(rawdev, dpaa2_dev->object_id);
+ if (ret) {
+ rte_rawdev_pmd_release(rawdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rte_dpaa2_qdma_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_rawdev *rawdev = dpaa2_dev->rawdev;
+ int ret;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ dpaa2_dpdmai_dev_uninit(rawdev);
+
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ DPAA2_QDMA_ERR("Device cleanup failed");
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_qdma_pmd = {
+ .drv_flags = RTE_DPAA2_DRV_IOVA_AS_VA,
+ .drv_type = DPAA2_QDMA,
+ .probe = rte_dpaa2_qdma_probe,
+ .remove = rte_dpaa2_qdma_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_qdma, rte_dpaa2_qdma_pmd);
+
+RTE_INIT(dpaa2_qdma_init_log)
+{
+ dpaa2_qdma_logtype = rte_log_register("pmd.raw.dpaa2.qdma");
+ if (dpaa2_qdma_logtype >= 0)
+ rte_log_set_level(dpaa2_qdma_logtype, RTE_LOG_INFO);
+}
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
new file mode 100644
index 00000000..c6a05780
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_H__
+#define __DPAA2_QDMA_H__
+
+struct qdma_sdd;
+struct qdma_io_meta;
+
+#define DPAA2_QDMA_MAX_FLE 3
+#define DPAA2_QDMA_MAX_SDD 2
+
+/** FLE pool size: 3 Frame list + 2 source/destination descriptor */
+#define QDMA_FLE_POOL_SIZE (sizeof(struct qdma_io_meta) + \
+ sizeof(struct qbman_fle) * DPAA2_QDMA_MAX_FLE + \
+ sizeof(struct qdma_sdd) * DPAA2_QDMA_MAX_SDD)
+/** FLE pool cache size */
+#define QDMA_FLE_CACHE_SIZE(_num) (_num/(RTE_MAX_LCORE * 2))
+
+/** Notification by FQD_CTX[fqid] */
+#define QDMA_SER_CTX (1 << 8)
+
+/**
+ * Source descriptor command read transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_RD_COHERENT(sdd) ((sdd)->cmd = (0xb << 28))
+/**
+ * Destination descriptor command write transaction type for RBP=0:
+ * coherent copy of cacheable memory
+ */
+#define DPAA2_SET_SDD_WR_COHERENT(sdd) ((sdd)->cmd = (0x6 << 28))
+
+/** Maximum possible H/W Queues on each core */
+#define MAX_HW_QUEUE_PER_CORE 64
+
+/**
+ * In case of Virtual Queue mode, this specifies the number of
+ * dequeue the 'qdma_vq_dequeue/multi' API does from the H/W Queue
+ * in case there is no job present on the Virtual Queue ring.
+ */
+#define QDMA_DEQUEUE_BUDGET 64
+
+/**
+ * Represents a QDMA device.
+ * A single QDMA device exists which is combination of multiple DPDMAI rawdev's.
+ */
+struct qdma_device {
+ /** total number of hw queues. */
+ uint16_t num_hw_queues;
+ /**
+ * Maximum number of hw queues to be alocated per core.
+ * This is limited by MAX_HW_QUEUE_PER_CORE
+ */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /** Device state - started or stopped */
+ uint8_t state;
+ /** FLE pool for the device */
+ struct rte_mempool *fle_pool;
+ /** FLE pool size */
+ int fle_pool_count;
+ /** A lock to QDMA device whenever required */
+ rte_spinlock_t lock;
+};
+
+/** Represents a QDMA H/W queue */
+struct qdma_hw_queue {
+ /** Pointer to Next instance */
+ TAILQ_ENTRY(qdma_hw_queue) next;
+ /** DPDMAI device to communicate with HW */
+ struct dpaa2_dpdmai_dev *dpdmai_dev;
+ /** queue ID to communicate with HW */
+ uint16_t queue_id;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** Number of users of this hw queue */
+ uint32_t num_users;
+};
+
+/** Represents a QDMA virtual queue */
+struct qdma_virt_queue {
+ /** Status ring of the virtual queue */
+ struct rte_ring *status_ring;
+ /** Associated hw queue */
+ struct qdma_hw_queue *hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /** States if this vq is in use or not */
+ uint8_t in_use;
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+};
+
+/** Represents a QDMA per core hw queues allocation in virtual mode */
+struct qdma_per_core_info {
+ /** list for allocated hw queues */
+ struct qdma_hw_queue *hw_queues[MAX_HW_QUEUE_PER_CORE];
+ /* Number of hw queues allocated for this core */
+ uint16_t num_hw_queues;
+};
+
+/** Metadata which is stored with each operation */
+struct qdma_io_meta {
+ /**
+ * Context which is stored in the FLE pool (just before the FLE).
+ * QDMA job is stored as a this context as a part of metadata.
+ */
+ uint64_t cnxt;
+ /** VQ ID is stored as a part of metadata of the enqueue command */
+ uint64_t id;
+};
+
+/** Source/Destination Descriptor */
+struct qdma_sdd {
+ uint32_t rsv;
+ /** Stride configuration */
+ uint32_t stride;
+ /** Route-by-port command */
+ uint32_t rbpcmd;
+ uint32_t cmd;
+} __attribute__((__packed__));
+
+/** Represents a DPDMAI raw device */
+struct dpaa2_dpdmai_dev {
+ /** Pointer to Next device instance */
+ TAILQ_ENTRY(dpaa2_qdma_device) next;
+ /** handle to DPDMAI object */
+ struct fsl_mc_io dpdmai;
+ /** HW ID for DPDMAI object */
+ uint32_t dpdmai_id;
+ /** Tocken of this device */
+ uint16_t token;
+ /** Number of queue in this DPDMAI device */
+ uint8_t num_queues;
+ /** RX queues */
+ struct dpaa2_queue rx_queue[DPDMAI_PRIO_NUM];
+ /** TX queues */
+ struct dpaa2_queue tx_queue[DPDMAI_PRIO_NUM];
+};
+
+#endif /* __DPAA2_QDMA_H__ */
diff --git a/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
new file mode 100644
index 00000000..4779e4ce
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/dpaa2_qdma_logs.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __DPAA2_QDMA_LOGS_H__
+#define __DPAA2_QDMA_LOGS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int dpaa2_qdma_logtype;
+
+#define DPAA2_QDMA_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_qdma_logtype, "dpaa2_qdma: " \
+ fmt "\n", ## args)
+
+#define DPAA2_QDMA_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_qdma_logtype, "dpaa2_qdma: %s(): " \
+ fmt "\n", __func__, ## args)
+
+#define DPAA2_QDMA_FUNC_TRACE() DPAA2_QDMA_DEBUG(">>")
+
+#define DPAA2_QDMA_INFO(fmt, args...) \
+ DPAA2_QDMA_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_ERR(fmt, args...) \
+ DPAA2_QDMA_LOG(ERR, fmt, ## args)
+#define DPAA2_QDMA_WARN(fmt, args...) \
+ DPAA2_QDMA_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_QDMA_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "dpaa2_qdma: " fmt "\n", ## args)
+
+#define DPAA2_QDMA_DP_DEBUG(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_QDMA_DP_INFO(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_QDMA_DP_WARN(fmt, args...) \
+ DPAA2_QDMA_DP_LOG(WARNING, fmt, ## args)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __DPAA2_QDMA_LOGS_H__ */
diff --git a/drivers/raw/dpaa2_qdma/meson.build b/drivers/raw/dpaa2_qdma/meson.build
new file mode 100644
index 00000000..b6a081f1
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+build = dpdk_conf.has('RTE_LIBRTE_DPAA2_MEMPOOL')
+deps += ['rawdev', 'mempool_dpaa2', 'ring']
+sources = files('dpaa2_qdma.c')
+
+allow_experimental_apis = true
+
+install_headers('rte_pmd_dpaa2_qdma.h')
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
new file mode 100644
index 00000000..17fffcb7
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma.h
@@ -0,0 +1,286 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ */
+
+#ifndef __RTE_PMD_DPAA2_QDMA_H__
+#define __RTE_PMD_DPAA2_QDMA_H__
+
+/**
+ * @file
+ *
+ * NXP dpaa2 QDMA specific structures.
+ *
+ */
+
+/** Determines the mode of operation */
+enum {
+ /**
+ * Allocate a H/W queue per VQ i.e. Exclusive hardware queue for a VQ.
+ * This mode will have best performance.
+ */
+ RTE_QDMA_MODE_HW,
+ /**
+ * A VQ shall not have an exclusive associated H/W queue.
+ * Rather a H/W Queue will be shared by multiple Virtual Queues.
+ * This mode will have intermediate data structures to support
+ * multi VQ to PQ mappings thus having some performance implications.
+ * Note: Even in this mode there is an option to allocate a H/W
+ * queue for a VQ. Please see 'RTE_QDMA_VQ_EXCLUSIVE_PQ' flag.
+ */
+ RTE_QDMA_MODE_VIRTUAL
+};
+
+/**
+ * If user has configued a Virtual Queue mode, but for some particular VQ
+ * user needs an exclusive H/W queue associated (for better performance
+ * on that particular VQ), then user can pass this flag while creating the
+ * Virtual Queue. A H/W queue will be allocated corresponding to
+ * VQ which uses this flag.
+ */
+#define RTE_QDMA_VQ_EXCLUSIVE_PQ (1ULL)
+
+/** States if the source addresses is physical. */
+#define RTE_QDMA_JOB_SRC_PHY (1ULL)
+
+/** States if the destination addresses is physical. */
+#define RTE_QDMA_JOB_DEST_PHY (1ULL << 1)
+
+/** Provides QDMA device attributes */
+struct rte_qdma_attr {
+ /** total number of hw QDMA queues present */
+ uint16_t num_hw_queues;
+};
+
+/** QDMA device configuration structure */
+struct rte_qdma_config {
+ /** Number of maximum hw queues to allocate per core. */
+ uint16_t max_hw_queues_per_core;
+ /** Maximum number of VQ's to be used. */
+ uint16_t max_vqs;
+ /** mode of operation - physical(h/w) or virtual */
+ uint8_t mode;
+ /**
+ * User provides this as input to the driver as a size of the FLE pool.
+ * FLE's (and corresponding source/destination descriptors) are
+ * allocated by the driver at enqueue time to store src/dest and
+ * other data and are freed at the dequeue time. This determines the
+ * maximum number of inflight jobs on the QDMA device. This should
+ * be power of 2.
+ */
+ int fle_pool_count;
+};
+
+/** Provides QDMA device statistics */
+struct rte_qdma_vq_stats {
+ /** States if this vq has exclusively associated hw queue */
+ uint8_t exclusive_hw_queue;
+ /** Associated lcore id */
+ uint32_t lcore_id;
+ /* Total number of enqueues on this VQ */
+ uint64_t num_enqueues;
+ /* Total number of dequeues from this VQ */
+ uint64_t num_dequeues;
+ /* total number of pending jobs in this VQ */
+ uint64_t num_pending_jobs;
+};
+
+/** Determines a QDMA job */
+struct rte_qdma_job {
+ /** Source Address from where DMA is (to be) performed */
+ uint64_t src;
+ /** Destination Address where DMA is (to be) done */
+ uint64_t dest;
+ /** Length of the DMA operation in bytes. */
+ uint32_t len;
+ /** See RTE_QDMA_JOB_ flags */
+ uint32_t flags;
+ /**
+ * User can specify a context which will be maintained
+ * on the dequeue operation.
+ */
+ uint64_t cnxt;
+ /**
+ * Status of the transaction.
+ * This is filled in the dequeue operation by the driver.
+ */
+ uint8_t status;
+};
+
+/**
+ * Initialize the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_init(void);
+
+/**
+ * Get the QDMA attributes.
+ *
+ * @param qdma_attr
+ * QDMA attributes providing total number of hw queues etc.
+ */
+void __rte_experimental
+rte_qdma_attr_get(struct rte_qdma_attr *qdma_attr);
+
+/**
+ * Reset the QDMA device. This API will completely reset the QDMA
+ * device, bringing it to original state as if only rte_qdma_init() API
+ * has been called.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_reset(void);
+
+/**
+ * Configure the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_configure(struct rte_qdma_config *qdma_config);
+
+/**
+ * Start the QDMA device.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_start(void);
+
+/**
+ * Create a Virtual Queue on a particular lcore id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param lcore_id
+ * LCORE ID on which this particular queue would be associated with.
+ * @param flags
+ * RTE_QDMA_VQ_ flags. See macro definitions.
+ *
+ * @returns
+ * - >= 0: Virtual queue ID.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_create(uint32_t lcore_id, uint32_t flags);
+
+/**
+ * Enqueue multiple jobs to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA jobs provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs containing relevant information related to DMA.
+ * @param nb_jobs
+ * Number of QDMA jobs provided by the user.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Enqueue a single job to a Virtual Queue.
+ * If the enqueue is successful, the H/W will perform DMA operations
+ * on the basis of the QDMA job provided.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * A QDMA Job containing relevant information related to DMA.
+ *
+ * @returns
+ * - >=0: Number of jobs successfully submitted
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_enqueue(uint16_t vq_id,
+ struct rte_qdma_job *job);
+
+/**
+ * Dequeue multiple completed jobs from a Virtual Queue.
+ * Provides the list of completed jobs capped by nb_jobs.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param job
+ * List of QDMA Jobs returned from the API.
+ * @param nb_jobs
+ * Number of QDMA jobs requested for dequeue by the user.
+ *
+ * @returns
+ * Number of jobs actually dequeued.
+ */
+int __rte_experimental
+rte_qdma_vq_dequeue_multi(uint16_t vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs);
+
+/**
+ * Dequeue a single completed jobs from a Virtual Queue.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ *
+ * @returns
+ * - A completed job or NULL if no job is there.
+ */
+struct rte_qdma_job * __rte_experimental
+rte_qdma_vq_dequeue(uint16_t vq_id);
+
+/**
+ * Get a Virtual Queue statistics.
+ *
+ * @param vq_id
+ * Virtual Queue ID.
+ * @param vq_stats
+ * VQ statistics structure which will be filled in by the driver.
+ */
+void __rte_experimental
+rte_qdma_vq_stats(uint16_t vq_id,
+ struct rte_qdma_vq_stats *vq_stats);
+
+/**
+ * Destroy the Virtual Queue specified by vq_id.
+ * This API can be called from any thread/core. User can create/destroy
+ * VQ's at runtime.
+ *
+ * @param vq_id
+ * Virtual Queue ID which needs to be deinialized.
+ *
+ * @returns
+ * - 0: Success.
+ * - <0: Error code.
+ */
+int __rte_experimental
+rte_qdma_vq_destroy(uint16_t vq_id);
+
+/**
+ * Stop QDMA device.
+ */
+void __rte_experimental
+rte_qdma_stop(void);
+
+/**
+ * Destroy the QDMA device.
+ */
+void __rte_experimental
+rte_qdma_destroy(void);
+
+#endif /* __RTE_PMD_DPAA2_QDMA_H__*/
diff --git a/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
new file mode 100644
index 00000000..fe42a227
--- /dev/null
+++ b/drivers/raw/dpaa2_qdma/rte_pmd_dpaa2_qdma_version.map
@@ -0,0 +1,20 @@
+EXPERIMENTAL {
+ global:
+
+ rte_qdma_attr_get;
+ rte_qdma_configure;
+ rte_qdma_destroy;
+ rte_qdma_init;
+ rte_qdma_reset;
+ rte_qdma_start;
+ rte_qdma_stop;
+ rte_qdma_vq_create;
+ rte_qdma_vq_destroy;
+ rte_qdma_vq_dequeue;
+ rte_qdma_vq_dequeue_multi;
+ rte_qdma_vq_enqueue;
+ rte_qdma_vq_enqueue_multi;
+ rte_qdma_vq_stats;
+
+ local: *;
+};
diff --git a/drivers/raw/ifpga_rawdev/Makefile b/drivers/raw/ifpga_rawdev/Makefile
new file mode 100644
index 00000000..f3b9d5e6
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ifpga_rawdev.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/bus/ifpga
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev
+LDLIBS += -lrte_eal
+LDLIBS += -lrte_rawdev
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+LDLIBS += -lrte_bus_ifpga
+
+EXPORT_MAP := rte_pmd_ifpga_rawdev_version.map
+
+LIBABIVER := 1
+
+VPATH += $(SRCDIR)/base
+
+include $(RTE_SDK)/drivers/raw/ifpga_rawdev/base/Makefile
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += ifpga_rawdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/raw/ifpga_rawdev/base/Makefile b/drivers/raw/ifpga_rawdev/base/Makefile
new file mode 100644
index 00000000..d79da72b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/Makefile
@@ -0,0 +1,26 @@
+#SPDX-License-Identifier: BSD-3-Clause
+#Copyright(c) 2010-2018 Intel Corporation
+
+ifneq ($(CONFIG_RTE_LIBRTE_EAL),)
+OSDEP := osdep_rte
+else
+OSDEP := osdep_raw
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/raw/ifpga_rawdev/base/$(OSDEP)
+
+SRCS-y += ifpga_api.c
+SRCS-y += ifpga_enumerate.c
+SRCS-y += ifpga_feature_dev.c
+SRCS-y += ifpga_fme.c
+SRCS-y += ifpga_fme_iperf.c
+SRCS-y += ifpga_fme_dperf.c
+SRCS-y += ifpga_fme_error.c
+SRCS-y += ifpga_port.c
+SRCS-y += ifpga_port_error.c
+SRCS-y += opae_hw_api.c
+SRCS-y += opae_ifpga_hw_api.c
+SRCS-y += opae_debug.c
+SRCS-y += ifpga_fme_pr.c
+
+SRCS-y += $(wildcard $(SRCDIR)/base/$(OSDEP)/*.c)
diff --git a/drivers/raw/ifpga_rawdev/base/README b/drivers/raw/ifpga_rawdev/base/README
new file mode 100644
index 00000000..5bc2ed0f
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/README
@@ -0,0 +1,31 @@
+..
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+Intel iFPGA driver
+==================
+
+This directory contains source code of Intel FPGA driver released by
+the team which develops Intel FPGA Open Programmable Acceleration Engine (OPAE).
+The directory of base/ contains the original source package. The base code
+currently supports Intel FPGA solutions including integrated solution (Intel(R)
+Xeon(R) CPU with FPGAs) and discrete solution (Intel(R) Programmable Acceleration
+Card with Intel(R) Arria(R) 10 FPGA) and it could be extended to support more FPGA
+devices in the future.
+
+Please refer to [1][2] for more introduction on OPAE and Intel FPGAs.
+
+[1] https://01.org/OPAE
+[2] https://www.altera.com/solutions/acceleration-hub/overview.html
+
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ osdep_raw/osdep_generic.h
+ osdep_rte/osdep_generic.h
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.c b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
new file mode 100644
index 00000000..540e171a
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.c
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_api.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+#include "opae_hw_api.h"
+
+/* Accelerator APIs */
+static int ifpga_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return fpga_get_afu_uuid(port, uuid);
+}
+
+static int ifpga_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_bridge *br = acc->br;
+ struct ifpga_port_hw *port;
+ struct fpga_uafu_irq_set irq_set;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ if (start >= afu_info->num_irqs || start + count > afu_info->num_irqs)
+ return -EINVAL;
+
+ port = br->data;
+
+ irq_set.start = start;
+ irq_set.count = count;
+ irq_set.evtfds = evtfds;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_UINT, &irq_set);
+}
+
+static int ifpga_acc_get_info(struct opae_accelerator *acc,
+ struct opae_acc_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -ENODEV;
+
+ info->num_regions = afu_info->num_regions;
+ info->num_irqs = afu_info->num_irqs;
+
+ return 0;
+}
+
+static int ifpga_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (info->index >= afu_info->num_regions)
+ return -EINVAL;
+
+ /* always one RW region only for AFU now */
+ info->flags = ACC_REGION_READ | ACC_REGION_WRITE | ACC_REGION_MMIO;
+ info->len = afu_info->region[info->index].len;
+ info->addr = afu_info->region[info->index].addr;
+
+ return 0;
+}
+
+static int ifpga_acc_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ switch (byte) {
+ case 8:
+ *(u64 *)data = opae_readq(region->addr + offset);
+ break;
+ case 4:
+ *(u32 *)data = opae_readl(region->addr + offset);
+ break;
+ case 2:
+ *(u16 *)data = opae_readw(region->addr + offset);
+ break;
+ case 1:
+ *(u8 *)data = opae_readb(region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ifpga_acc_write(struct opae_accelerator *acc,
+ unsigned int region_idx, u64 offset,
+ unsigned int byte, void *data)
+{
+ struct ifpga_afu_info *afu_info = acc->data;
+ struct opae_reg_region *region;
+
+ if (!afu_info)
+ return -EINVAL;
+
+ if (offset + byte <= offset)
+ return -EINVAL;
+
+ if (region_idx >= afu_info->num_regions)
+ return -EINVAL;
+
+ region = &afu_info->region[region_idx];
+ if (offset + byte > region->len)
+ return -EINVAL;
+
+ /* normal mmio case */
+ switch (byte) {
+ case 8:
+ opae_writeq(*(u64 *)data, region->addr + offset);
+ break;
+ case 4:
+ opae_writel(*(u32 *)data, region->addr + offset);
+ break;
+ case 2:
+ opae_writew(*(u16 *)data, region->addr + offset);
+ break;
+ case 1:
+ opae_writeb(*(u8 *)data, region->addr + offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct opae_accelerator_ops ifpga_acc_ops = {
+ .read = ifpga_acc_read,
+ .write = ifpga_acc_write,
+ .set_irq = ifpga_acc_set_irq,
+ .get_info = ifpga_acc_get_info,
+ .get_region_info = ifpga_acc_get_region_info,
+ .get_uuid = ifpga_acc_get_uuid,
+};
+
+/* Bridge APIs */
+
+static int ifpga_br_reset(struct opae_bridge *br)
+{
+ struct ifpga_port_hw *port = br->data;
+
+ return fpga_port_reset(port);
+}
+
+struct opae_bridge_ops ifpga_br_ops = {
+ .reset = ifpga_br_reset,
+};
+
+/* Manager APIs */
+static int ifpga_mgr_flash(struct opae_manager *mgr, int id, void *buf,
+ u32 size, u64 *status)
+{
+ struct ifpga_fme_hw *fme = mgr->data;
+ struct ifpga_hw *hw = fme->parent;
+
+ return ifpga_pr(hw, id, buf, size, status);
+}
+
+struct opae_manager_ops ifpga_mgr_ops = {
+ .flash = ifpga_mgr_flash,
+};
+
+/* Adapter APIs */
+static int ifpga_adapter_enumerate(struct opae_adapter *adapter)
+{
+ struct ifpga_hw *hw = malloc(sizeof(*hw));
+
+ if (hw) {
+ memset(hw, 0, sizeof(*hw));
+ hw->pci_data = adapter->data;
+ hw->adapter = adapter;
+ if (ifpga_bus_enumerate(hw))
+ goto error;
+ return ifpga_bus_init(hw);
+ }
+
+error:
+ return -ENOMEM;
+}
+
+struct opae_adapter_ops ifpga_adapter_ops = {
+ .enumerate = ifpga_adapter_enumerate,
+};
+
+/**
+ * ifpga_pr - do the partial reconfiguration for a given port device
+ * @hw: pointer to the HW structure
+ * @port_id: the port device id
+ * @buffer: the buffer of the bitstream
+ * @size: the size of the bitstream
+ * @status: hardware status including PR error code if return -EIO.
+ *
+ * @return
+ * - 0: Success, partial reconfiguration finished.
+ * - <0: Error code returned in partial reconfiguration.
+ **/
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+
+ return do_pr(hw, port_id, buffer, size, status);
+}
+
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_get_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_get_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop)
+{
+ if (!hw || !prop)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_prop(&hw->fme, prop);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_prop(&hw->port[port_id], prop);
+ }
+
+ return -ENOENT;
+}
+
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set)
+{
+ if (!hw || !irq_set)
+ return -EINVAL;
+
+ switch (fiu_id) {
+ case FEATURE_FIU_ID_FME:
+ return fme_set_irq(&hw->fme, feature_id, irq_set);
+ case FEATURE_FIU_ID_PORT:
+ if (!is_valid_port_id(hw, port_id))
+ return -ENODEV;
+ return port_set_irq(&hw->port[port_id], feature_id, irq_set);
+ }
+
+ return -ENOENT;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_api.h b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
new file mode 100644
index 00000000..dae7ca14
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_API_H_
+#define _IFPGA_API_H_
+
+#include "opae_hw_api.h"
+#include "ifpga_hw.h"
+
+extern struct opae_adapter_ops ifpga_adapter_ops;
+extern struct opae_manager_ops ifpga_mgr_ops;
+extern struct opae_bridge_ops ifpga_br_ops;
+extern struct opae_accelerator_ops ifpga_acc_ops;
+
+/* common APIs */
+int ifpga_get_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_prop(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ struct feature_prop *prop);
+int ifpga_set_irq(struct ifpga_hw *hw, u32 fiu_id, u32 port_id,
+ u32 feature_id, void *irq_set);
+
+/* FME APIs */
+int ifpga_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+#endif /* _IFPGA_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_compat.h b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
new file mode 100644
index 00000000..931c8938
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_compat.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_COMPAT_H_
+#define _IFPGA_COMPAT_H_
+
+#include "opae_osdep.h"
+
+#undef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+
+#define IFPGA_PAGE_SHIFT 12
+#define IFPGA_PAGE_SIZE (1 << IFPGA_PAGE_SHIFT)
+#define IFPGA_PAGE_MASK (~(IFPGA_PAGE_SIZE - 1))
+#define IFPGA_PAGE_ALIGN(addr) (((addr) + IFPGA_PAGE_SIZE - 1)\
+ & IFPGA_PAGE_MASK)
+#define IFPGA_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), IFPGA_PAGE_SIZE)
+
+#define readl(addr) opae_readl(addr)
+#define readq(addr) opae_readq(addr)
+#define writel(value, addr) opae_writel(value, addr)
+#define writeq(value, addr) opae_writeq(value, addr)
+
+#define malloc(size) opae_malloc(size)
+#define zmalloc(size) opae_zmalloc(size)
+#define free(size) opae_free(size)
+
+/*
+ * Wait register's _field to be changed to the given value (_expect's _field)
+ * by polling with given interval and timeout.
+ */
+#define fpga_wait_register_field(_field, _expect, _reg_addr, _timeout, _invl)\
+({ \
+ int wait = 0; \
+ int ret = -ETIMEDOUT; \
+ typeof(_expect) value; \
+ for (; wait <= _timeout; wait += _invl) { \
+ value.csr = readq(_reg_addr); \
+ if (_expect._field == value._field) { \
+ ret = 0; \
+ break; \
+ } \
+ udelay(_invl); \
+ } \
+ ret; \
+})
+
+#define __maybe_unused __attribute__((__unused__))
+
+#define UNUSED(x) (void)(x)
+
+#endif /* _IFPGA_COMPAT_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_defines.h b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
new file mode 100644
index 00000000..aa025272
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_defines.h
@@ -0,0 +1,1663 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_DEFINES_H_
+#define _IFPGA_DEFINES_H_
+
+#include "ifpga_compat.h"
+
+#define MAX_FPGA_PORT_NUM 4
+
+#define FME_FEATURE_HEADER "fme_hdr"
+#define FME_FEATURE_THERMAL_MGMT "fme_thermal"
+#define FME_FEATURE_POWER_MGMT "fme_power"
+#define FME_FEATURE_GLOBAL_IPERF "fme_iperf"
+#define FME_FEATURE_GLOBAL_ERR "fme_error"
+#define FME_FEATURE_PR_MGMT "fme_pr"
+#define FME_FEATURE_HSSI_ETH "fme_hssi"
+#define FME_FEATURE_GLOBAL_DPERF "fme_dperf"
+#define FME_FEATURE_QSPI_FLASH "fme_qspi_flash"
+
+#define PORT_FEATURE_HEADER "port_hdr"
+#define PORT_FEATURE_UAFU "port_uafu"
+#define PORT_FEATURE_ERR "port_err"
+#define PORT_FEATURE_UMSG "port_umsg"
+#define PORT_FEATURE_PR "port_pr"
+#define PORT_FEATURE_UINT "port_uint"
+#define PORT_FEATURE_STP "port_stp"
+
+/*
+ * do not check the revision id as id may be dynamic under
+ * some cases, e.g, UAFU.
+ */
+#define SKIP_REVISION_CHECK 0xff
+
+#define FME_HEADER_REVISION 1
+#define FME_THERMAL_MGMT_REVISION 0
+#define FME_POWER_MGMT_REVISION 1
+#define FME_GLOBAL_IPERF_REVISION 1
+#define FME_GLOBAL_ERR_REVISION 1
+#define FME_PR_MGMT_REVISION 2
+#define FME_HSSI_ETH_REVISION 0
+#define FME_GLOBAL_DPERF_REVISION 0
+#define FME_QSPI_REVISION 0
+
+#define PORT_HEADER_REVISION 0
+/* UAFU's header info depends on the downloaded GBS */
+#define PORT_UAFU_REVISION SKIP_REVISION_CHECK
+#define PORT_ERR_REVISION 1
+#define PORT_UMSG_REVISION 0
+#define PORT_UINT_REVISION 0
+#define PORT_STP_REVISION 1
+
+#define FEATURE_TYPE_AFU 0x1
+#define FEATURE_TYPE_BBB 0x2
+#define FEATURE_TYPE_PRIVATE 0x3
+#define FEATURE_TYPE_FIU 0x4
+
+#define FEATURE_FIU_ID_FME 0x0
+#define FEATURE_FIU_ID_PORT 0x1
+
+#define FEATURE_ID_HEADER 0x0
+#define FEATURE_ID_AFU 0xff
+
+enum fpga_id_type {
+ FME_ID,
+ PORT_ID,
+ FPGA_ID_MAX,
+};
+
+enum fme_feature_id {
+ FME_FEATURE_ID_HEADER = 0x0,
+
+ FME_FEATURE_ID_THERMAL_MGMT = 0x1,
+ FME_FEATURE_ID_POWER_MGMT = 0x2,
+ FME_FEATURE_ID_GLOBAL_IPERF = 0x3,
+ FME_FEATURE_ID_GLOBAL_ERR = 0x4,
+ FME_FEATURE_ID_PR_MGMT = 0x5,
+ FME_FEATURE_ID_HSSI_ETH = 0x6,
+ FME_FEATURE_ID_GLOBAL_DPERF = 0x7,
+ FME_FEATURE_ID_QSPI_FLASH = 0x8,
+
+ /* one for fme header. */
+ FME_FEATURE_ID_MAX = 0x9,
+};
+
+enum port_feature_id {
+ PORT_FEATURE_ID_HEADER = 0x0,
+ PORT_FEATURE_ID_ERROR = 0x1,
+ PORT_FEATURE_ID_UMSG = 0x2,
+ PORT_FEATURE_ID_UINT = 0x3,
+ PORT_FEATURE_ID_STP = 0x4,
+ PORT_FEATURE_ID_UAFU = 0x5,
+ PORT_FEATURE_ID_MAX = 0x6,
+};
+
+/*
+ * All headers and structures must be byte-packed to match the spec.
+ */
+#pragma pack(push, 1)
+
+struct feature_header {
+ union {
+ u64 csr;
+ struct {
+ u16 id:12;
+ u8 revision:4;
+ u32 next_header_offset:24;
+ u8 end_of_list:1;
+ u32 reserved:19;
+ u8 type:4;
+ };
+ };
+};
+
+struct feature_bbb_header {
+ struct uuid guid;
+};
+
+struct feature_afu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fiu_header {
+ struct uuid guid;
+ union {
+ u64 csr;
+ struct {
+ u64 next_afu:24;
+ u64 reserved:40;
+ };
+ };
+};
+
+struct feature_fme_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_verid; /* Fabric version ID */
+ u8 socket_id:1; /* Socket id */
+ u8 rsvd1:3; /* Reserved */
+ /* pci0 link available yes /no */
+ u8 pci0_link_avile:1;
+ /* pci1 link available yes /no */
+ u8 pci1_link_avile:1;
+ /* Coherent (QPI/UPI) link available yes /no */
+ u8 qpi_link_avile:1;
+ u8 rsvd2:1; /* Reserved */
+ /* IOMMU or VT-d supported yes/no */
+ u8 iommu_support:1;
+ u8 num_ports:3; /* Number of ports */
+ u8 sf_fab_ctl:1; /* Internal validation bit */
+ u8 rsvd3:3; /* Reserved */
+ /*
+ * Address width supported in bits
+ * BXT -0x26 , SKX -0x30
+ */
+ u8 address_width_bits:6;
+ u8 rsvd4:2; /* Reserved */
+ /* Size of cache supported in kb */
+ u16 cache_size:12;
+ u8 cache_assoc:4; /* Cache Associativity */
+ u16 rsvd5:15; /* Reserved */
+ u8 lock_bit:1; /* Lock bit */
+ };
+ };
+};
+
+#define FME_AFU_ACCESS_PF 0
+#define FME_AFU_ACCESS_VF 1
+
+struct feature_fme_port {
+ union {
+ u64 csr;
+ struct {
+ u32 port_offset:24;
+ u8 reserved1;
+ u8 port_bar:3;
+ u32 reserved2:20;
+ u8 afu_access_control:1;
+ u8 reserved3:4;
+ u8 port_implemented:1;
+ u8 reserved4:3;
+ };
+ };
+};
+
+struct feature_fme_fab_status {
+ union {
+ u64 csr;
+ struct {
+ u8 upilink_status:4; /* UPI Link Status */
+ u8 rsvd1:4; /* Reserved */
+ u8 pci0link_status:1; /* pci0 link status */
+ u8 rsvd2:3; /* Reserved */
+ u8 pci1link_status:1; /* pci1 link status */
+ u64 rsvd3:51; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_base {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Base Address of memory range */
+ u8 protected_base_addrss:4;
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_genprotrange2_limit {
+ union {
+ u64 csr;
+ struct {
+ u16 rsvd1; /* Reserved */
+ /* Limit Address of memory range */
+ u8 protected_limit_addrss:4;
+ u16 rsvd2:11; /* Reserved */
+ u8 enable:1; /* Enable GENPROTRANGE check */
+ u32 rsvd3; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_dxe_lock {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Determines write access to the DXE region CSRs
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_early_lock:1;
+ /*
+ * Determines write access to the HSSI CSR
+ * 1 - CSR region is locked;
+ * 0 - it is open for write access.
+ */
+ u8 dxe_late_lock:1;
+ u64 rsvd:62;
+ };
+ };
+};
+
+#define HSSI_ID_NO_HASSI 0
+#define HSSI_ID_PCIE_RP 1
+#define HSSI_ID_ETHERNET 2
+
+struct feature_fme_bitstream_id {
+ union {
+ u64 csr;
+ struct {
+ u32 gitrepo_hash:32; /* GIT repository hash */
+ /*
+ * HSSI configuration identifier:
+ * 0 - No HSSI
+ * 1 - PCIe-RP
+ * 2 - Ethernet
+ */
+ u8 hssi_id:4;
+ u16 rsvd1:12; /* Reserved */
+ /* Bitstream version patch number */
+ u8 bs_verpatch:4;
+ /* Bitstream version minor number */
+ u8 bs_verminor:4;
+ /* Bitstream version major number */
+ u8 bs_vermajor:4;
+ /* Bitstream version debug number */
+ u8 bs_verdebug:4;
+ };
+ };
+};
+
+struct feature_fme_bitstream_md {
+ union {
+ u64 csr;
+ struct {
+ /* Seed number userd for synthesis flow */
+ u8 synth_seed:4;
+ /* Synthesis date(day number - 2 digits) */
+ u8 synth_day:8;
+ /* Synthesis date(month number - 2 digits) */
+ u8 synth_month:8;
+ /* Synthesis date(year number - 2 digits) */
+ u8 synth_year:8;
+ u64 rsvd:36; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_iommu_ctrl {
+ union {
+ u64 csr;
+ struct {
+ /* Disables IOMMU prefetcher for C0 channel */
+ u8 prefetch_disableC0:1;
+ /* Disables IOMMU prefetcher for C1 channel */
+ u8 prefetch_disableC1:1;
+ /* Disables IOMMU partial cache line writes */
+ u8 prefetch_wrdisable:1;
+ u8 rsvd1:1; /* Reserved */
+ /*
+ * Select counter and read value from register
+ * iommu_stat.dbg_counters
+ * 0 - Number of 4K page translation response
+ * 1 - Number of 2M page translation response
+ * 2 - Number of 1G page translation response
+ */
+ u8 counter_sel:2;
+ u32 rsvd2:26; /* Reserved */
+ /* Connected to IOMMU SIP Capabilities */
+ u32 capecap_defeature;
+ };
+ };
+};
+
+struct feature_fme_iommu_stat {
+ union {
+ u64 csr;
+ struct {
+ /* Translation Enable bit from IOMMU SIP */
+ u8 translation_enable:1;
+ /* Drain request in progress */
+ u8 drain_req_inprog:1;
+ /* Invalidation current state */
+ u8 inv_state:3;
+ /* C0 Response Buffer current state */
+ u8 respbuffer_stateC0:3;
+ /* C1 Response Buffer current state */
+ u8 respbuffer_stateC1:3;
+ /* Last request ID to IOMMU SIP */
+ u8 last_reqID:4;
+ /* Last IOMMU SIP response ID value */
+ u8 last_respID:4;
+ /* Last IOMMU SIP response status value */
+ u8 last_respstatus:3;
+ /* C0 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC0:1;
+ /* C1 Transaction Buffer is not empty */
+ u8 transbuf_notEmptyC1:1;
+ /* C0 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC0:1;
+ /* C1 Request FIFO is not empty */
+ u8 reqFIFO_notemptyC1:1;
+ /* C0 Response FIFO is not empty */
+ u8 respFIFO_notemptyC0:1;
+ /* C1 Response FIFO is not empty */
+ u8 respFIFO_notemptyC1:1;
+ /* C0 Response FIFO overflow detected */
+ u8 respFIFO_overflowC0:1;
+ /* C1 Response FIFO overflow detected */
+ u8 respFIFO_overflowC1:1;
+ /* C0 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC0:1;
+ /* C1 Transaction Buffer overflow detected */
+ u8 tranbuf_overflowC1:1;
+ /* Request FIFO overflow detected */
+ u8 reqFIFO_overflow:1;
+ /* IOMMU memory read in progress */
+ u8 memrd_inprog:1;
+ /* IOMMU memory write in progress */
+ u8 memwr_inprog:1;
+ u8 rsvd1:1; /* Reserved */
+ /* Value of counter selected by iommu_ctl.counter_sel */
+ u16 dbg_counters:16;
+ u16 rsvd2:12; /* Reserved */
+ };
+ };
+};
+
+struct feature_fme_pcie0_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_bar_lock:1; /* Lock VT-D BAR register */
+ u64 rsvd1:3;
+ u64 rciep:1; /* Configure PCIE0 as RCiEP */
+ u64 rsvd2:59;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR rule is valid or not */
+ /*
+ * SMRR memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_base {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:12;
+ u64 base:20; /* SMRR2 memory range base address */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_smrr2_mask {
+ union {
+ u64 csr;
+ struct {
+ u64 rsvd1:11;
+ u64 valid:1; /* LLPR_SMRR2 rule is valid or not */
+ /*
+ * SMRR2 memory range mask which determines the range
+ * of region being mapped
+ */
+ u64 phys_mask:20;
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_base {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of base address memory range */
+ u64 me_base:27;
+ u64 rsvd:37;
+ };
+ };
+};
+
+struct feature_fme_llpr_meseg_limit {
+ union {
+ u64 csr;
+ struct {
+ /* A[45:19] of limit address memory range */
+ u64 me_limit:27;
+ u64 rsvd1:4;
+ u64 enable:1; /* Enable LLPR MESEG rule */
+ u64 rsvd2:32;
+ };
+ };
+};
+
+struct feature_fme_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 reserved;
+ u64 scratchpad;
+ struct feature_fme_capability capability;
+ struct feature_fme_port port[MAX_FPGA_PORT_NUM];
+ struct feature_fme_fab_status fab_status;
+ struct feature_fme_bitstream_id bitstream_id;
+ struct feature_fme_bitstream_md bitstream_md;
+ struct feature_fme_genprotrange2_base genprotrange2_base;
+ struct feature_fme_genprotrange2_limit genprotrange2_limit;
+ struct feature_fme_dxe_lock dxe_lock;
+ struct feature_fme_iommu_ctrl iommu_ctrl;
+ struct feature_fme_iommu_stat iommu_stat;
+ struct feature_fme_pcie0_ctrl pcie0_control;
+ struct feature_fme_llpr_smrr_base smrr_base;
+ struct feature_fme_llpr_smrr_mask smrr_mask;
+ struct feature_fme_llpr_smrr2_base smrr2_base;
+ struct feature_fme_llpr_smrr2_mask smrr2_mask;
+ struct feature_fme_llpr_meseg_base meseg_base;
+ struct feature_fme_llpr_meseg_limit meseg_limit;
+};
+
+struct feature_port_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 port_number:2; /* Port Number 0-3 */
+ u8 rsvd1:6; /* Reserved */
+ u16 mmio_size; /* User MMIO size in KB */
+ u8 rsvd2; /* Reserved */
+ u8 sp_intr_num:4; /* Supported interrupts num */
+ u32 rsvd3:28; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_control {
+ union {
+ u64 csr;
+ struct {
+ u8 port_sftrst:1; /* Port Soft Reset */
+ u8 rsvd1:1; /* Reserved */
+ u8 latency_tolerance:1;/* '1' >= 40us, '0' < 40us */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_sftrst_ack:1; /* HW ACK for Soft Reset */
+ u64 rsvd3:59; /* Reserved */
+ };
+ };
+};
+
+#define PORT_POWER_STATE_NORMAL 0
+#define PORT_POWER_STATE_AP1 1
+#define PORT_POWER_STATE_AP2 2
+#define PORT_POWER_STATE_AP6 6
+
+struct feature_port_status {
+ union {
+ u64 csr;
+ struct {
+ u8 port_freeze:1; /* '1' - freezed '0' - normal */
+ u8 rsvd1:7; /* Reserved */
+ u8 power_state:4; /* Power State */
+ u8 ap1_event:1; /* AP1 event was detected */
+ u8 ap2_event:1; /* AP2 event was detected */
+ u64 rsvd2:50; /* Reserved */
+ };
+ };
+};
+
+/* Port Header Register Set */
+struct feature_port_header {
+ struct feature_header header;
+ struct feature_afu_header afu_header;
+ u64 port_mailbox;
+ u64 scratchpad;
+ struct feature_port_capability capability;
+ struct feature_port_control control;
+ struct feature_port_status status;
+ u64 rsvd2;
+ u64 user_clk_freq_cmd0;
+ u64 user_clk_freq_cmd1;
+ u64 user_clk_freq_sts0;
+ u64 user_clk_freq_sts1;
+};
+
+struct feature_fme_tmp_threshold {
+ union {
+ u64 csr;
+ struct {
+ u8 tmp_thshold1:7; /* temperature Threshold 1 */
+ /* temperature Threshold 1 enable/disable */
+ u8 tmp_thshold1_enable:1;
+ u8 tmp_thshold2:7; /* temperature Threshold 2 */
+ /* temperature Threshold 2 enable /disable */
+ u8 tmp_thshold2_enable:1;
+ u8 pro_hot_setpoint:7; /* Proc Hot set point */
+ u8 rsvd4:1; /* Reserved */
+ u8 therm_trip_thshold:7; /* Thermeal Trip Threshold */
+ u8 rsvd3:1; /* Reserved */
+ u8 thshold1_status:1; /* Threshold 1 Status */
+ u8 thshold2_status:1; /* Threshold 2 Status */
+ u8 rsvd5:1; /* Reserved */
+ /* Thermeal Trip Threshold status */
+ u8 therm_trip_thshold_status:1;
+ u8 rsvd6:4; /* Reserved */
+ /* Validation mode- Force Proc Hot */
+ u8 valmodeforce:1;
+ /* Validation mode - Therm trip Hot */
+ u8 valmodetherm:1;
+ u8 rsvd2:2; /* Reserved */
+ u8 thshold_policy:1; /* threshold policy */
+ u32 rsvd:19; /* Reserved */
+ };
+ };
+};
+
+/* Temperature Sensor Read values format 1 */
+struct feature_fme_temp_rdsensor_fmt1 {
+ union {
+ u64 csr;
+ struct {
+ /* Reads out FPGA temperature in celsius */
+ u8 fpga_temp:7;
+ u8 rsvd0:1; /* Reserved */
+ /* Temperature reading sequence number */
+ u16 tmp_reading_seq_num;
+ /* Temperature reading is valid */
+ u8 tmp_reading_valid:1;
+ u8 rsvd1:7; /* Reserved */
+ u16 dbg_mode:10; /* Debug mode */
+ u32 rsvd2:22; /* Reserved */
+ };
+ };
+};
+
+/* Temperature sensor read values format 2 */
+struct feature_fme_temp_rdsensor_fmt2 {
+ u64 rsvd; /* Reserved */
+};
+
+/* Temperature Threshold Capability Register */
+struct feature_fme_tmp_threshold_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Temperature Threshold Unsupported */
+ u8 tmp_thshold_disabled:1;
+ u64 rsvd:63; /* Reserved */
+ };
+ };
+};
+
+/* FME THERNAL FEATURE */
+struct feature_fme_thermal {
+ struct feature_header header;
+ struct feature_fme_tmp_threshold threshold;
+ struct feature_fme_temp_rdsensor_fmt1 rdsensor_fm1;
+ struct feature_fme_temp_rdsensor_fmt2 rdsensor_fm2;
+ struct feature_fme_tmp_threshold_cap threshold_cap;
+};
+
+/* Power Status register */
+struct feature_fme_pm_status {
+ union {
+ u64 csr;
+ struct {
+ /* FPGA Power consumed, The format is to be defined */
+ u32 pwr_consumed:18;
+ /* FPGA Latency Tolerance Reporting */
+ u8 fpga_latency_report:1;
+ u64 rsvd:45; /* Reserved */
+ };
+ };
+};
+
+/* AP Thresholds */
+struct feature_fme_pm_ap_threshold {
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Number of clocks (5ns period) for assertion
+ * of FME_data
+ */
+ u8 threshold1:7;
+ u8 rsvd1:1;
+ u8 threshold2:7;
+ u8 rsvd2:1;
+ u8 threshold1_status:1;
+ u8 threshold2_status:1;
+ u64 rsvd3:46; /* Reserved */
+ };
+ };
+};
+
+/* Xeon Power Limit */
+struct feature_fme_pm_xeon_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FPGA Power Limit */
+struct feature_fme_pm_fpga_limit {
+ union {
+ u64 csr;
+ struct {
+ /* Power limit in Watts in 12.3 format */
+ u16 pwr_limit:15;
+ /* Indicates that power limit has been written */
+ u8 enable:1;
+ /* 0 - Turbe range, 1 - Entire range */
+ u8 clamping:1;
+ /* Time constant in XXYYY format */
+ u8 time:7;
+ u64 rsvd:40; /* Reserved */
+ };
+ };
+};
+
+/* FME POWER FEATURE */
+struct feature_fme_power {
+ struct feature_header header;
+ struct feature_fme_pm_status status;
+ struct feature_fme_pm_ap_threshold threshold;
+ struct feature_fme_pm_xeon_limit xeon_limit;
+ struct feature_fme_pm_fpga_limit fpga_limit;
+};
+
+#define CACHE_CHANNEL_RD 0
+#define CACHE_CHANNEL_WR 1
+
+enum iperf_cache_events {
+ IPERF_CACHE_RD_HIT,
+ IPERF_CACHE_WR_HIT,
+ IPERF_CACHE_RD_MISS,
+ IPERF_CACHE_WR_MISS,
+ IPERF_CACHE_RSVD, /* reserved */
+ IPERF_CACHE_HOLD_REQ,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN,
+ IPERF_CACHE_TX_REQ_STALL,
+ IPERF_CACHE_RX_REQ_STALL,
+ IPERF_CACHE_EVICTIONS,
+};
+
+/* FPMON Cache Control */
+struct feature_fme_ifpmon_ch_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd1:7; /* Reserved */
+ u8 freeze:1; /* Freeze if set to 1 */
+ u8 rsvd2:7; /* Reserved */
+ u8 cache_event:4; /* Select the cache event */
+ u8 cci_chsel:1; /* Select the channel */
+ u64 rsvd3:43; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Cache Counter */
+struct feature_fme_ifpmon_ch_ctr {
+ union {
+ u64 csr;
+ struct {
+ /* Cache Counter for even addresse */
+ u64 cache_counter:48;
+ u16 rsvd:12; /* Reserved */
+ /* Cache Event being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+enum iperf_fab_events {
+ IPERF_FAB_PCIE0_RD,
+ IPERF_FAB_PCIE0_WR,
+ IPERF_FAB_PCIE1_RD,
+ IPERF_FAB_PCIE1_WR,
+ IPERF_FAB_UPI_RD,
+ IPERF_FAB_UPI_WR,
+ IPERF_FAB_MMIO_RD,
+ IPERF_FAB_MMIO_WR,
+};
+
+#define FAB_DISABLE_FILTER 0
+#define FAB_ENABLE_FILTER 1
+
+/* FPMON FAB Control */
+struct feature_fme_ifpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_ifpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_ifpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+enum iperf_vtd_events {
+ IPERF_VTD_AFU_MEM_RD_TRANS,
+ IPERF_VTD_AFU_MEM_WR_TRANS,
+ IPERF_VTD_AFU_DEVTLB_RD_HIT,
+ IPERF_VTD_AFU_DEVTLB_WR_HIT,
+ IPERF_VTD_DEVTLB_4K_FILL,
+ IPERF_VTD_DEVTLB_2M_FILL,
+ IPERF_VTD_DEVTLB_1G_FILL,
+};
+
+/* VT-d control register */
+struct feature_fme_ifpmon_vtd_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d event counter */
+struct feature_fme_ifpmon_vtd_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+enum iperf_vtd_sip_events {
+ IPERF_VTD_SIP_IOTLB_4K_HIT,
+ IPERF_VTD_SIP_IOTLB_2M_HIT,
+ IPERF_VTD_SIP_IOTLB_1G_HIT,
+ IPERF_VTD_SIP_SLPWC_L3_HIT,
+ IPERF_VTD_SIP_SLPWC_L4_HIT,
+ IPERF_VTD_SIP_RCC_HIT,
+ IPERF_VTD_SIP_IOTLB_4K_MISS,
+ IPERF_VTD_SIP_IOTLB_2M_MISS,
+ IPERF_VTD_SIP_IOTLB_1G_MISS,
+ IPERF_VTD_SIP_SLPWC_L3_MISS,
+ IPERF_VTD_SIP_SLPWC_L4_MISS,
+ IPERF_VTD_SIP_RCC_MISS,
+};
+
+/* VT-d SIP control register */
+struct feature_fme_ifpmon_vtd_sip_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 vtd_evtcode:4; /* VTd and TLB event code */
+ u64 rsvd2:44; /* Reserved */
+ };
+ };
+};
+
+/* VT-d SIP event counter */
+struct feature_fme_ifpmon_vtd_sip_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 vtd_counter:48; /* VTd event counter */
+ u16 rsvd:12; /* Reserved */
+ u8 event_code:4; /* VTd event code */
+ };
+ };
+};
+
+/* FME IPERF FEATURE */
+struct feature_fme_iperf {
+ struct feature_header header;
+ struct feature_fme_ifpmon_ch_ctl ch_ctl;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr0;
+ struct feature_fme_ifpmon_ch_ctr ch_ctr1;
+ struct feature_fme_ifpmon_fab_ctl fab_ctl;
+ struct feature_fme_ifpmon_fab_ctr fab_ctr;
+ struct feature_fme_ifpmon_clk_ctr clk;
+ struct feature_fme_ifpmon_vtd_ctl vtd_ctl;
+ struct feature_fme_ifpmon_vtd_ctr vtd_ctr;
+ struct feature_fme_ifpmon_vtd_sip_ctl vtd_sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr vtd_sip_ctr;
+};
+
+enum dperf_fab_events {
+ DPERF_FAB_PCIE0_RD,
+ DPERF_FAB_PCIE0_WR,
+ DPERF_FAB_MMIO_RD = 6,
+ DPERF_FAB_MMIO_WR,
+};
+
+/* FPMON FAB Control */
+struct feature_fme_dfpmon_fab_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 reset_counters:1; /* Reset Counters */
+ u8 rsvd:7; /* Reserved */
+ u8 freeze:1; /* Set to 1 frozen counter */
+ u8 rsvd1:7; /* Reserved */
+ u8 fab_evtcode:4; /* Fabric Event Code */
+ u8 port_id:2; /* Port ID */
+ u8 rsvd2:1; /* Reserved */
+ u8 port_filter:1; /* Port Filter */
+ u64 rsvd3:40; /* Reserved */
+ };
+ };
+};
+
+/* FPMON Event Counter */
+struct feature_fme_dfpmon_fab_ctr {
+ union {
+ u64 csr;
+ struct {
+ u64 fab_cnt:60; /* Fabric event counter */
+ /* Fabric event code being reported */
+ u8 event_code:4;
+ };
+ };
+};
+
+/* FPMON Clock Counter */
+struct feature_fme_dfpmon_clk_ctr {
+ u64 afu_interf_clock; /* Clk_16UI (AFU clock) counter. */
+};
+
+/* FME DPERF FEATURE */
+struct feature_fme_dperf {
+ struct feature_header header;
+ u64 rsvd[3];
+ struct feature_fme_dfpmon_fab_ctl fab_ctl;
+ struct feature_fme_dfpmon_fab_ctr fab_ctr;
+ struct feature_fme_dfpmon_clk_ctr clk;
+};
+
+struct feature_fme_error0 {
+#define FME_ERROR0_MASK 0xFFUL
+#define FME_ERROR0_MASK_DEFAULT 0x40UL /* pcode workaround */
+ union {
+ u64 csr;
+ struct {
+ u8 fabric_err:1; /* Fabric error */
+ u8 fabfifo_overflow:1; /* Fabric fifo overflow */
+ u8 kticdc_parity_err:2;/* KTI CDC Parity Error */
+ u8 iommu_parity_err:1; /* IOMMU Parity error */
+ /* AFU PF/VF access mismatch detected */
+ u8 afu_acc_mode_err:1;
+ u8 mbp_err:1; /* Indicates an MBP event */
+ /* PCIE0 CDC Parity Error */
+ u8 pcie0cdc_parity_err:5;
+ /* PCIE1 CDC Parity Error */
+ u8 pcie1cdc_parity_err:5;
+ /* CVL CDC Parity Error */
+ u8 cvlcdc_parity_err:3;
+ u64 rsvd:44; /* Reserved */
+ };
+ };
+};
+
+/* PCIe0 Error Status register */
+struct feature_fme_pcie0_error {
+#define FME_PCIE0_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:52; /* Reserved */
+ u8 vfnumb_err:1; /* Number of error VF */
+ u8 funct_type_err:1; /* Virtual (1) or Physical */
+ };
+ };
+};
+
+/* PCIe1 Error Status register */
+struct feature_fme_pcie1_error {
+#define FME_PCIE1_ERROR_MASK 0xFFUL
+ union {
+ u64 csr;
+ struct {
+ u8 formattype_err:1; /* TLP format/type error */
+ u8 MWAddr_err:1; /* TLP MW address error */
+ u8 MWAddrLength_err:1; /* TLP MW length error */
+ u8 MRAddr_err:1; /* TLP MR address error */
+ u8 MRAddrLength_err:1; /* TLP MR length error */
+ u8 cpl_tag_err:1; /* TLP CPL tag error */
+ u8 cpl_status_err:1; /* TLP CPL status error */
+ u8 cpl_timeout_err:1; /* TLP CPL timeout */
+ u8 cci_parity_err:1; /* CCI bridge parity error */
+ u8 rxpoison_tlp_err:1; /* Received a TLP with EP set */
+ u64 rsvd:54; /* Reserved */
+ };
+ };
+};
+
+/* FME First Error register */
+struct feature_fme_first_error {
+#define FME_FIRST_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered first
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered first
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* FME Next Error register */
+struct feature_fme_next_error {
+#define FME_NEXT_ERROR_MASK ((1ULL << 60) - 1)
+ union {
+ u64 csr;
+ struct {
+ /*
+ * Indicates the Error Register that was
+ * triggered second
+ */
+ u64 err_reg_status:60;
+ /*
+ * Holds 60 LSBs from the Error register that was
+ * triggered second
+ */
+ u8 errReg_id:4;
+ };
+ };
+};
+
+/* RAS Non Fatal Error Status register */
+struct feature_fme_ras_nonfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* thremal threshold AP1 */
+ u8 temp_thresh_ap1:1;
+ /* thremal threshold AP2 */
+ u8 temp_thresh_ap2:1;
+ u8 pcie_error:1; /* pcie Error */
+ u8 portfatal_error:1; /* port fatal error */
+ u8 proc_hot:1; /* Indicates a ProcHot event */
+ /* Indicates an AFU PF/VF access mismatch */
+ u8 afu_acc_mode_err:1;
+ /* Injected nonfata Error */
+ u8 injected_nonfata_err:1;
+ u8 rsvd1:2;
+ /* Temperature threshold triggered AP6*/
+ u8 temp_thresh_AP6:1;
+ /* Power threshold triggered AP1 */
+ u8 power_thresh_AP1:1;
+ /* Power threshold triggered AP2 */
+ u8 power_thresh_AP2:1;
+ /* Indicates a MBP event */
+ u8 mbp_err:1;
+ u64 rsvd2:51; /* Reserved */
+ };
+ };
+};
+
+/* RAS Catastrophic Fatal Error Status register */
+struct feature_fme_ras_catfaterror {
+ union {
+ u64 csr;
+ struct {
+ /* KTI Link layer error detected */
+ u8 ktilink_fatal_err:1;
+ /* tag-n-cache error detected */
+ u8 tagcch_fatal_err:1;
+ /* CCI error detected */
+ u8 cci_fatal_err:1;
+ /* KTI Protocol error detected */
+ u8 ktiprpto_fatal_err:1;
+ /* Fatal DRAM error detected */
+ u8 dram_fatal_err:1;
+ /* IOMMU detected */
+ u8 iommu_fatal_err:1;
+ /* Fabric Fatal Error */
+ u8 fabric_fatal_err:1;
+ /* PCIe possion Error */
+ u8 pcie_poison_err:1;
+ /* Injected fatal Error */
+ u8 inject_fata_err:1;
+ /* Catastrophic CRC Error */
+ u8 crc_catast_err:1;
+ /* Catastrophic Thermal Error */
+ u8 therm_catast_err:1;
+ /* Injected Catastrophic Error */
+ u8 injected_catast_err:1;
+ u64 rsvd:52;
+ };
+ };
+};
+
+/* RAS Error injection register */
+struct feature_fme_ras_error_inj {
+#define FME_RAS_ERROR_INJ_MASK 0x7UL
+ union {
+ u64 csr;
+ struct {
+ u8 catast_error:1; /* Catastrophic error flag */
+ u8 fatal_error:1; /* Fatal error flag */
+ u8 nonfatal_error:1; /* NonFatal error flag */
+ u64 rsvd:61; /* Reserved */
+ };
+ };
+};
+
+/* FME error capabilities */
+struct feature_fme_error_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* FME ERR FEATURE */
+struct feature_fme_err {
+ struct feature_header header;
+ struct feature_fme_error0 fme_err_mask;
+ struct feature_fme_error0 fme_err;
+ struct feature_fme_pcie0_error pcie0_err_mask;
+ struct feature_fme_pcie0_error pcie0_err;
+ struct feature_fme_pcie1_error pcie1_err_mask;
+ struct feature_fme_pcie1_error pcie1_err;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ struct feature_fme_ras_nonfaterror ras_nonfat_mask;
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+ struct feature_fme_ras_catfaterror ras_catfat_mask;
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+ struct feature_fme_ras_error_inj ras_error_inj;
+ struct feature_fme_error_capability fme_err_capability;
+};
+
+/* FME Partial Reconfiguration Control */
+struct feature_fme_pr_ctl {
+ union {
+ u64 csr;
+ struct {
+ u8 pr_reset:1; /* Reset PR Engine */
+ u8 rsvd3:3; /* Reserved */
+ u8 pr_reset_ack:1; /* Reset PR Engine Ack */
+ u8 rsvd4:3; /* Reserved */
+ u8 pr_regionid:2; /* PR Region ID */
+ u8 rsvd1:2; /* Reserved */
+ u8 pr_start_req:1; /* PR Start Request */
+ u8 pr_push_complete:1; /* PR Data push complete */
+ u8 pr_kind:1; /* PR Data push complete */
+ u32 rsvd:17; /* Reserved */
+ u32 config_data; /* Config data TBD */
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Status */
+struct feature_fme_pr_status {
+ union {
+ u64 csr;
+ struct {
+ u16 pr_credit:9; /* PR Credits */
+ u8 rsvd2:7; /* Reserved */
+ u8 pr_status:1; /* PR status */
+ u8 rsvd:3; /* Reserved */
+ /* Altra PR Controller Block status */
+ u8 pr_controller_status:3;
+ u8 rsvd1:1; /* Reserved */
+ u8 pr_host_status:4; /* PR Host status */
+ u8 rsvd3:4; /* Reserved */
+ /* Security Block Status fields (TBD) */
+ u32 security_bstatus;
+ };
+ };
+};
+
+/* FME Partial Reconfiguration Data */
+struct feature_fme_pr_data {
+ union {
+ u64 csr; /* PR data from the raw-binary file */
+ struct {
+ /* PR data from the raw-binary file */
+ u32 pr_data_raw;
+ u32 rsvd;
+ };
+ };
+};
+
+/* FME PR Public Key */
+struct feature_fme_pr_key {
+ u64 key; /* FME PR Public Hash */
+};
+
+/* FME PR FEATURE */
+struct feature_fme_pr {
+ struct feature_header header;
+ /*Partial Reconfiguration control */
+ struct feature_fme_pr_ctl ccip_fme_pr_control;
+
+ /* Partial Reconfiguration Status */
+ struct feature_fme_pr_status ccip_fme_pr_status;
+
+ /* Partial Reconfiguration data */
+ struct feature_fme_pr_data ccip_fme_pr_data;
+
+ /* Partial Reconfiguration data */
+ u64 ccip_fme_pr_err;
+
+ u64 rsvd1[3];
+
+ /* Partial Reconfiguration data registers */
+ u64 fme_pr_data1;
+ u64 fme_pr_data2;
+ u64 fme_pr_data3;
+ u64 fme_pr_data4;
+ u64 fme_pr_data5;
+ u64 fme_pr_data6;
+ u64 fme_pr_data7;
+ u64 fme_pr_data8;
+
+ u64 rsvd2[5];
+
+ /* PR Interface ID */
+ u64 fme_pr_intfc_id_l;
+ u64 fme_pr_intfc_id_h;
+
+ /* MSIX filed to be Added */
+};
+
+/* FME HSSI Control */
+struct feature_fme_hssi_eth_ctrl {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u16 address:16; /* HSSI address */
+ /*
+ * HSSI comamnd
+ * 0x0 - No request
+ * 0x08 - SW register RD request
+ * 0x10 - SW register WR request
+ * 0x40 - Auxiliar bus RD request
+ * 0x80 - Auxiliar bus WR request
+ */
+ u16 cmd:16;
+ };
+ };
+};
+
+/* FME HSSI Status */
+struct feature_fme_hssi_eth_stat {
+ union {
+ u64 csr;
+ struct {
+ u32 data:32; /* HSSI data */
+ u8 acknowledge:1; /* HSSI acknowledge */
+ u8 spare:1; /* HSSI spare */
+ u32 rsvd:30; /* Reserved */
+ };
+ };
+};
+
+/* FME HSSI FEATURE */
+struct feature_fme_hssi {
+ struct feature_header header;
+ struct feature_fme_hssi_eth_ctrl hssi_control;
+ struct feature_fme_hssi_eth_stat hssi_status;
+};
+
+#define PORT_ERR_MASK 0xfff0703ff001f
+struct feature_port_err_key {
+ union {
+ u64 csr;
+ struct {
+ /* Tx Channel0: Overflow */
+ u8 tx_ch0_overflow:1;
+ /* Tx Channel0: Invalid request encoding */
+ u8 tx_ch0_invaldreq :1;
+ /* Tx Channel0: Request with cl_len=3 not supported */
+ u8 tx_ch0_cl_len3:1;
+ /* Tx Channel0: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch0_cl_len2:1;
+ /* Tx Channel0: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch0_cl_len4:1;
+
+ u16 rsvd1:4; /* Reserved */
+
+ /* AFU MMIO RD received while PORT is in reset */
+ u8 mmio_rd_whilerst:1;
+ /* AFU MMIO WR received while PORT is in reset */
+ u8 mmio_wr_whilerst:1;
+
+ u16 rsvd2:5; /* Reserved */
+
+ /* Tx Channel1: Overflow */
+ u8 tx_ch1_overflow:1;
+ /* Tx Channel1: Invalid request encoding */
+ u8 tx_ch1_invaldreq:1;
+ /* Tx Channel1: Request with cl_len=3 not supported */
+ u8 tx_ch1_cl_len3:1;
+ /* Tx Channel1: Request with cl_len=2 not aligned 2CL */
+ u8 tx_ch1_cl_len2:1;
+ /* Tx Channel1: Request with cl_len=4 not aligned 4CL */
+ u8 tx_ch1_cl_len4:1;
+
+ /* Tx Channel1: Insufficient data payload */
+ u8 tx_ch1_insuff_data:1;
+ /* Tx Channel1: Data payload overrun */
+ u8 tx_ch1_data_overrun:1;
+ /* Tx Channel1 : Incorrect address */
+ u8 tx_ch1_incorr_addr:1;
+ /* Tx Channel1 : NON-Zero SOP Detected */
+ u8 tx_ch1_nzsop:1;
+ /* Tx Channel1 : Illegal VC_SEL, atomic request VLO */
+ u8 tx_ch1_illegal_vcsel:1;
+
+ u8 rsvd3:6; /* Reserved */
+
+ /* MMIO Read Timeout in AFU */
+ u8 mmioread_timeout:1;
+
+ /* Tx Channel2: FIFO Overflow */
+ u8 tx_ch2_fifo_overflow:1;
+
+ /* MMIO read is not matching pending request */
+ u8 unexp_mmio_resp:1;
+
+ u8 rsvd4:5; /* Reserved */
+
+ /* Number of pending Requests: counter overflow */
+ u8 tx_req_counter_overflow:1;
+ /* Req with Address violating SMM Range */
+ u8 llpr_smrr_err:1;
+ /* Req with Address violating second SMM Range */
+ u8 llpr_smrr2_err:1;
+ /* Req with Address violating ME Stolen message */
+ u8 llpr_mesg_err:1;
+ /* Req with Address violating Generic Protected Range */
+ u8 genprot_range_err:1;
+ /* Req with Address violating Legacy Range low */
+ u8 legrange_low_err:1;
+ /* Req with Address violating Legacy Range High */
+ u8 legrange_high_err:1;
+ /* Req with Address violating VGA memory range */
+ u8 vgmem_range_err:1;
+ u8 page_fault_err:1; /* Page fault */
+ u8 pmr_err:1; /* PMR Error */
+ u8 ap6_event:1; /* AP6 event */
+ /* VF FLR detected on Port with PF access control */
+ u8 vfflr_access_err:1;
+ u16 rsvd5:12; /* Reserved */
+ };
+ };
+};
+
+/* Port first error register, not contain all error bits in error register. */
+struct feature_port_first_err_key {
+ union {
+ u64 csr;
+ struct {
+ u8 tx_ch0_overflow:1;
+ u8 tx_ch0_invaldreq :1;
+ u8 tx_ch0_cl_len3:1;
+ u8 tx_ch0_cl_len2:1;
+ u8 tx_ch0_cl_len4:1;
+ u8 rsvd1:4; /* Reserved */
+ u8 mmio_rd_whilerst:1;
+ u8 mmio_wr_whilerst:1;
+ u8 rsvd2:5; /* Reserved */
+ u8 tx_ch1_overflow:1;
+ u8 tx_ch1_invaldreq:1;
+ u8 tx_ch1_cl_len3:1;
+ u8 tx_ch1_cl_len2:1;
+ u8 tx_ch1_cl_len4:1;
+ u8 tx_ch1_insuff_data:1;
+ u8 tx_ch1_data_overrun:1;
+ u8 tx_ch1_incorr_addr:1;
+ u8 tx_ch1_nzsop:1;
+ u8 tx_ch1_illegal_vcsel:1;
+ u8 rsvd3:6; /* Reserved */
+ u8 mmioread_timeout:1;
+ u8 tx_ch2_fifo_overflow:1;
+ u8 rsvd4:6; /* Reserved */
+ u8 tx_req_counter_overflow:1;
+ u32 rsvd5:23; /* Reserved */
+ };
+ };
+};
+
+/* Port malformed Req0 */
+struct feature_port_malformed_req0 {
+ u64 header_lsb;
+};
+
+/* Port malformed Req1 */
+struct feature_port_malformed_req1 {
+ u64 header_msb;
+};
+
+/* Port debug register */
+struct feature_port_debug {
+ u64 port_debug;
+};
+
+/* Port error capabilities */
+struct feature_port_err_capability {
+ union {
+ u64 csr;
+ struct {
+ u8 support_intr:1;
+ /* MSI-X vector table entry number */
+ u16 intr_vector_num:12;
+ u64 rsvd:51; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE ERROR */
+struct feature_port_error {
+ struct feature_header header;
+ struct feature_port_err_key error_mask;
+ struct feature_port_err_key port_error;
+ struct feature_port_first_err_key port_first_error;
+ struct feature_port_malformed_req0 malreq0;
+ struct feature_port_malformed_req1 malreq1;
+ struct feature_port_debug port_debug;
+ struct feature_port_err_capability error_capability;
+};
+
+/* Port UMSG Capability */
+struct feature_port_umsg_cap {
+ union {
+ u64 csr;
+ struct {
+ /* Number of umsg allocated to this port */
+ u8 umsg_allocated;
+ /* Enable / Disable UMsg engine for this port */
+ u8 umsg_enable:1;
+ /* Usmg initialization status */
+ u8 umsg_init_complete:1;
+ /* IOMMU can not translate the umsg base address */
+ u8 umsg_trans_error:1;
+ u64 rsvd:53; /* Reserved */
+ };
+ };
+};
+
+/* Port UMSG base address */
+struct feature_port_umsg_baseaddr {
+ union {
+ u64 csr;
+ struct {
+ u64 base_addr:48; /* 48 bit physical address */
+ u16 rsvd; /* Reserved */
+ };
+ };
+};
+
+struct feature_port_umsg_mode {
+ union {
+ u64 csr;
+ struct {
+ u32 umsg_hint_enable; /* UMSG hint enable/disable */
+ u32 rsvd; /* Reserved */
+ };
+ };
+};
+
+/* PORT FEATURE UMSG */
+struct feature_port_umsg {
+ struct feature_header header;
+ struct feature_port_umsg_cap capability;
+ struct feature_port_umsg_baseaddr baseaddr;
+ struct feature_port_umsg_mode mode;
+};
+
+#define UMSG_EN_POLL_INVL 10 /* us */
+#define UMSG_EN_POLL_TIMEOUT 1000 /* us */
+
+/* Port UINT Capability */
+struct feature_port_uint_cap {
+ union {
+ u64 csr;
+ struct {
+ u16 intr_num:12; /* Supported interrupts num */
+ /* First MSI-X vector table entry number */
+ u16 first_vec_num:12;
+ u64 rsvd:40;
+ };
+ };
+};
+
+/* PORT FEATURE UINT */
+struct feature_port_uint {
+ struct feature_header header;
+ struct feature_port_uint_cap capability;
+};
+
+/* STP region supports mmap operation, so use page aligned size. */
+#define PORT_FEATURE_STP_REGION_SIZE \
+ IFPGA_PAGE_ALIGN(sizeof(struct feature_port_stp))
+
+/* Port STP status register (for debug only)*/
+struct feature_port_stp_status {
+ union {
+ u64 csr;
+ struct {
+ /* SLD Hub end-point read/write timeout */
+ u8 sld_ep_timeout:1;
+ /* Remote STP in reset/disable */
+ u8 rstp_disabled:1;
+ u8 unsupported_read:1;
+ /* MMIO timeout detected and faked with a response */
+ u8 mmio_timeout:1;
+ u8 txfifo_count:4;
+ u8 rxfifo_count:4;
+ u8 txfifo_overflow:1;
+ u8 txfifo_underflow:1;
+ u8 rxfifo_overflow:1;
+ u8 rxfifo_underflow:1;
+ /* Number of MMIO write requests */
+ u16 write_requests;
+ /* Number of MMIO read requests */
+ u16 read_requests;
+ /* Number of MMIO read responses */
+ u16 read_responses;
+ };
+ };
+};
+
+/*
+ * PORT FEATURE STP
+ * Most registers in STP region are not touched by driver, but mmapped to user
+ * space. So they are not defined in below data structure, as its actual size
+ * is 0x18c per spec.
+ */
+struct feature_port_stp {
+ struct feature_header header;
+ struct feature_port_stp_status stp_status;
+};
+
+/**
+ * enum fpga_pr_states - fpga PR states
+ * @FPGA_PR_STATE_UNKNOWN: can't determine state
+ * @FPGA_PR_STATE_WRITE_INIT: preparing FPGA for programming
+ * @FPGA_PR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage
+ * @FPGA_PR_STATE_WRITE: writing image to FPGA
+ * @FPGA_PR_STATE_WRITE_ERR: Error while writing FPGA
+ * @FPGA_PR_STATE_WRITE_COMPLETE: Doing post programming steps
+ * @FPGA_PR_STATE_WRITE_COMPLETE_ERR: Error during WRITE_COMPLETE
+ * @FPGA_PR_STATE_OPERATING: FPGA PR done
+ */
+enum fpga_pr_states {
+ /* canot determine state states */
+ FPGA_PR_STATE_UNKNOWN,
+
+ /* write sequence: init, write, complete */
+ FPGA_PR_STATE_WRITE_INIT,
+ FPGA_PR_STATE_WRITE_INIT_ERR,
+ FPGA_PR_STATE_WRITE,
+ FPGA_PR_STATE_WRITE_ERR,
+ FPGA_PR_STATE_WRITE_COMPLETE,
+ FPGA_PR_STATE_WRITE_COMPLETE_ERR,
+
+ /* FPGA PR done */
+ FPGA_PR_STATE_DONE,
+};
+
+/*
+ * FPGA Manager flags
+ * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ */
+#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
+
+/**
+ * struct fpga_pr_info - specific information to a FPGA PR
+ * @flags: boolean flags as defined above
+ * @pr_err: PR error code
+ * @state: fpga manager state
+ * @port_id: port id
+ */
+struct fpga_pr_info {
+ u32 flags;
+ u64 pr_err;
+ enum fpga_pr_states state;
+ int port_id;
+};
+
+#define DEFINE_FPGA_PR_ERR_MSG(_name_) \
+static const char * const _name_[] = { \
+ "PR operation error detected", \
+ "PR CRC error detected", \
+ "PR incompatiable bitstream error detected", \
+ "PR IP protocol error detected", \
+ "PR FIFO overflow error detected", \
+ "PR timeout error detected", \
+ "PR secure load error detected", \
+}
+
+#define RST_POLL_INVL 10 /* us */
+#define RST_POLL_TIMEOUT 1000 /* us */
+
+#define PR_WAIT_TIMEOUT 15000000
+
+#define PR_HOST_STATUS_IDLE 0
+#define PR_MAX_ERR_NUM 7
+
+DEFINE_FPGA_PR_ERR_MSG(pr_err_msg);
+
+/*
+ * green bitstream header must be byte-packed to match the
+ * real file format.
+ */
+struct bts_header {
+ u64 guid_h;
+ u64 guid_l;
+ u32 metadata_len;
+};
+
+#define GBS_GUID_H 0x414750466e6f6558
+#define GBS_GUID_L 0x31303076534247b7
+#define is_valid_bts(bts_hdr) \
+ (((bts_hdr)->guid_h == GBS_GUID_H) && \
+ ((bts_hdr)->guid_l == GBS_GUID_L))
+
+#pragma pack(pop)
+#endif /* _BASE_IFPGA_DEFINES_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
new file mode 100644
index 00000000..f0939dc3
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.c
@@ -0,0 +1,821 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "ifpga_api.h"
+
+#include "ifpga_hw.h"
+#include "ifpga_enumerate.h"
+#include "ifpga_feature_dev.h"
+
+struct build_feature_devs_info {
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_afu_info *acc_info;
+
+ void *fiu;
+ enum fpga_id_type current_type;
+ int current_port_id;
+
+ void *ioaddr;
+ void *ioend;
+ uint64_t phys_addr;
+ int current_bar;
+
+ void *pfme_hdr;
+
+ struct ifpga_hw *hw;
+};
+
+struct feature_info {
+ const char *name;
+ u32 resource_size;
+ int feature_index;
+ int revision_id;
+ unsigned int vec_start;
+ unsigned int vec_cnt;
+
+ struct feature_ops *ops;
+};
+
+/* indexed by fme feature IDs which are defined in 'enum fme_feature_id'. */
+static struct feature_info fme_features[] = {
+ {
+ .name = FME_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_fme_header),
+ .feature_index = FME_FEATURE_ID_HEADER,
+ .revision_id = FME_HEADER_REVISION,
+ .ops = &fme_hdr_ops,
+ },
+ {
+ .name = FME_FEATURE_THERMAL_MGMT,
+ .resource_size = sizeof(struct feature_fme_thermal),
+ .feature_index = FME_FEATURE_ID_THERMAL_MGMT,
+ .revision_id = FME_THERMAL_MGMT_REVISION,
+ .ops = &fme_thermal_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_POWER_MGMT,
+ .resource_size = sizeof(struct feature_fme_power),
+ .feature_index = FME_FEATURE_ID_POWER_MGMT,
+ .revision_id = FME_POWER_MGMT_REVISION,
+ .ops = &fme_power_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_IPERF,
+ .resource_size = sizeof(struct feature_fme_iperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_IPERF,
+ .revision_id = FME_GLOBAL_IPERF_REVISION,
+ .ops = &fme_global_iperf_ops,
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_ERR,
+ .resource_size = sizeof(struct feature_fme_err),
+ .feature_index = FME_FEATURE_ID_GLOBAL_ERR,
+ .revision_id = FME_GLOBAL_ERR_REVISION,
+ .ops = &fme_global_err_ops,
+ },
+ {
+ .name = FME_FEATURE_PR_MGMT,
+ .resource_size = sizeof(struct feature_fme_pr),
+ .feature_index = FME_FEATURE_ID_PR_MGMT,
+ .revision_id = FME_PR_MGMT_REVISION,
+ .ops = &fme_pr_mgmt_ops,
+ },
+ {
+ .name = FME_FEATURE_HSSI_ETH,
+ .resource_size = sizeof(struct feature_fme_hssi),
+ .feature_index = FME_FEATURE_ID_HSSI_ETH,
+ .revision_id = FME_HSSI_ETH_REVISION
+ },
+ {
+ .name = FME_FEATURE_GLOBAL_DPERF,
+ .resource_size = sizeof(struct feature_fme_dperf),
+ .feature_index = FME_FEATURE_ID_GLOBAL_DPERF,
+ .revision_id = FME_GLOBAL_DPERF_REVISION,
+ .ops = &fme_global_dperf_ops,
+ }
+};
+
+static struct feature_info port_features[] = {
+ {
+ .name = PORT_FEATURE_HEADER,
+ .resource_size = sizeof(struct feature_port_header),
+ .feature_index = PORT_FEATURE_ID_HEADER,
+ .revision_id = PORT_HEADER_REVISION,
+ .ops = &port_hdr_ops,
+ },
+ {
+ .name = PORT_FEATURE_ERR,
+ .resource_size = sizeof(struct feature_port_error),
+ .feature_index = PORT_FEATURE_ID_ERROR,
+ .revision_id = PORT_ERR_REVISION,
+ .ops = &port_error_ops,
+ },
+ {
+ .name = PORT_FEATURE_UMSG,
+ .resource_size = sizeof(struct feature_port_umsg),
+ .feature_index = PORT_FEATURE_ID_UMSG,
+ .revision_id = PORT_UMSG_REVISION,
+ },
+ {
+ .name = PORT_FEATURE_UINT,
+ .resource_size = sizeof(struct feature_port_uint),
+ .feature_index = PORT_FEATURE_ID_UINT,
+ .revision_id = PORT_UINT_REVISION,
+ .ops = &port_uint_ops,
+ },
+ {
+ .name = PORT_FEATURE_STP,
+ .resource_size = PORT_FEATURE_STP_REGION_SIZE,
+ .feature_index = PORT_FEATURE_ID_STP,
+ .revision_id = PORT_STP_REVISION,
+ .ops = &port_stp_ops,
+ },
+ {
+ .name = PORT_FEATURE_UAFU,
+ /* UAFU feature size should be read from PORT_CAP.MMIOSIZE.
+ * Will set uafu feature size while parse port device.
+ */
+ .resource_size = 0,
+ .feature_index = PORT_FEATURE_ID_UAFU,
+ .revision_id = PORT_UAFU_REVISION
+ },
+};
+
+static u64 feature_id(void __iomem *start)
+{
+ struct feature_header header;
+
+ header.csr = readq(start);
+
+ switch (header.type) {
+ case FEATURE_TYPE_FIU:
+ return FEATURE_ID_HEADER;
+ case FEATURE_TYPE_PRIVATE:
+ return header.id;
+ case FEATURE_TYPE_AFU:
+ return FEATURE_ID_AFU;
+ }
+
+ WARN_ON(1);
+ return 0;
+}
+
+static int
+build_info_add_sub_feature(struct build_feature_devs_info *binfo,
+ struct feature_info *finfo, void __iomem *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct feature *feature = NULL;
+ int feature_idx = finfo->feature_index;
+ unsigned int vec_start = finfo->vec_start;
+ unsigned int vec_cnt = finfo->vec_cnt;
+ struct feature_irq_ctx *ctx = NULL;
+ int port_id, ret = 0;
+ unsigned int i;
+
+ if (binfo->current_type == FME_ID) {
+ feature = &hw->fme.sub_feature[feature_idx];
+ feature->parent = &hw->fme;
+ } else if (binfo->current_type == PORT_ID) {
+ port_id = binfo->current_port_id;
+ feature = &hw->port[port_id].sub_feature[feature_idx];
+ feature->parent = &hw->port[port_id];
+ } else {
+ return -EFAULT;
+ }
+
+ feature->state = IFPGA_FEATURE_ATTACHED;
+ feature->addr = start;
+ feature->id = feature_id(start);
+ feature->size = finfo->resource_size;
+ feature->name = finfo->name;
+ feature->revision = finfo->revision_id;
+ feature->ops = finfo->ops;
+ feature->phys_addr = binfo->phys_addr +
+ ((u8 *)start - (u8 *)binfo->ioaddr);
+
+ if (vec_cnt) {
+ if (vec_start + vec_cnt <= vec_start)
+ return -EINVAL;
+
+ ctx = zmalloc(sizeof(*ctx) * vec_cnt);
+ if (!ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < vec_cnt; i++) {
+ ctx[i].eventfd = -1;
+ ctx[i].idx = vec_start + i;
+ }
+ }
+
+ feature->ctx = ctx;
+ feature->ctx_num = vec_cnt;
+ feature->vfio_dev_fd = binfo->pci_data->vfio_dev_fd;
+
+ return ret;
+}
+
+static int
+create_feature_instance(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ struct feature_header *hdr = start;
+
+ if (finfo->revision_id != SKIP_REVISION_CHECK &&
+ hdr->revision > finfo->revision_id) {
+ dev_err(binfo, "feature %s revision :default:%x, now at:%x, mis-match.\n",
+ finfo->name, finfo->revision_id, hdr->revision);
+ }
+
+ return build_info_add_sub_feature(binfo, finfo, start);
+}
+
+/*
+ * UAFU GUID is dynamic as it can be changed after FME downloads different
+ * Green Bitstream to the port, so we treat the unknown GUIDs which are
+ * attached on port's feature list as UAFU.
+ */
+static bool feature_is_UAFU(struct build_feature_devs_info *binfo)
+{
+ if (binfo->current_type != PORT_ID)
+ return false;
+
+ return true;
+}
+
+static int parse_feature_port_uafu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct ifpga_afu_info *info;
+ void *start = (void *)hdr;
+ int ret;
+
+ if (port_features[id].resource_size) {
+ ret = create_feature_instance(binfo, hdr, &port_features[id]);
+ } else {
+ dev_err(binfo, "the uafu feature header is mis-configured.\n");
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* FIXME: need to figure out a better name */
+ info = malloc(sizeof(*info));
+ if (!info)
+ return -ENOMEM;
+
+ info->region[0].addr = start;
+ info->region[0].phys_addr = binfo->phys_addr +
+ (uint8_t *)start - (uint8_t *)binfo->ioaddr;
+ info->region[0].len = port_features[id].resource_size;
+ port_features[id].resource_size = 0;
+ info->num_regions = 1;
+
+ binfo->acc_info = info;
+
+ return ret;
+}
+
+static int parse_feature_afus(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ int ret;
+ struct feature_afu_header *afu_hdr, header;
+ u8 __iomem *start;
+ u8 __iomem *end = binfo->ioend;
+
+ start = (u8 __iomem *)hdr;
+ for (; start < end; start += header.next_afu) {
+ if ((unsigned int)(end - start) <
+ (unsigned int)(sizeof(*afu_hdr) + sizeof(*hdr)))
+ return -EINVAL;
+
+ hdr = (struct feature_header *)start;
+ afu_hdr = (struct feature_afu_header *)(hdr + 1);
+ header.csr = readq(&afu_hdr->csr);
+
+ if (feature_is_UAFU(binfo)) {
+ ret = parse_feature_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+ }
+
+ if (!header.next_afu)
+ break;
+ }
+
+ return 0;
+}
+
+/* create and register proper private data */
+static int build_info_commit_dev(struct build_feature_devs_info *binfo)
+{
+ struct ifpga_afu_info *info = binfo->acc_info;
+ struct ifpga_hw *hw = binfo->hw;
+ struct opae_manager *mgr;
+ struct opae_bridge *br;
+ struct opae_accelerator *acc;
+
+ if (!binfo->fiu)
+ return 0;
+
+ if (binfo->current_type == PORT_ID) {
+ /* return error if no valid acc info data structure */
+ if (!info)
+ return -EFAULT;
+
+ br = opae_bridge_alloc(hw->adapter->name, &ifpga_br_ops,
+ binfo->fiu);
+ if (!br)
+ return -ENOMEM;
+
+ br->id = binfo->current_port_id;
+
+ /* update irq info */
+ info->num_irqs = port_features[PORT_FEATURE_ID_UINT].vec_cnt;
+
+ acc = opae_accelerator_alloc(hw->adapter->name,
+ &ifpga_acc_ops, info);
+ if (!acc) {
+ opae_bridge_free(br);
+ return -ENOMEM;
+ }
+
+ acc->br = br;
+ acc->index = br->id;
+
+ opae_adapter_add_acc(hw->adapter, acc);
+
+ } else if (binfo->current_type == FME_ID) {
+ mgr = opae_manager_alloc(hw->adapter->name, &ifpga_mgr_ops,
+ binfo->fiu);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->adapter = hw->adapter;
+ hw->adapter->mgr = mgr;
+ }
+
+ binfo->fiu = NULL;
+
+ return 0;
+}
+
+static int
+build_info_create_dev(struct build_feature_devs_info *binfo,
+ enum fpga_id_type type, unsigned int index)
+{
+ int ret;
+
+ ret = build_info_commit_dev(binfo);
+ if (ret)
+ return ret;
+
+ binfo->current_type = type;
+
+ if (type == FME_ID) {
+ binfo->fiu = &binfo->hw->fme;
+ } else if (type == PORT_ID) {
+ binfo->fiu = &binfo->hw->port[index];
+ binfo->current_port_id = index;
+ }
+
+ return 0;
+}
+
+static int parse_feature_fme(struct build_feature_devs_info *binfo,
+ struct feature_header *start)
+{
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ int ret;
+
+ ret = build_info_create_dev(binfo, FME_ID, 0);
+ if (ret)
+ return ret;
+
+ /* Update FME states */
+ fme->state = IFPGA_FME_IMPLEMENTED;
+ fme->parent = hw;
+ spinlock_init(&fme->lock);
+
+ return create_feature_instance(binfo, start,
+ &fme_features[FME_FEATURE_ID_HEADER]);
+}
+
+static int parse_feature_port(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_hw *hw = binfo->hw;
+ struct ifpga_port_hw *port;
+ unsigned int port_id;
+ int ret;
+
+ /* Get current port's id */
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_id = capability.port_number;
+
+ ret = build_info_create_dev(binfo, PORT_ID, port_id);
+ if (ret)
+ return ret;
+
+ /*found a Port device*/
+ port = &hw->port[port_id];
+ port->port_id = binfo->current_port_id;
+ port->parent = hw;
+ port->state = IFPGA_PORT_ATTACHED;
+ spinlock_init(&port->lock);
+
+ return create_feature_instance(binfo, start,
+ &port_features[PORT_FEATURE_ID_HEADER]);
+}
+
+static void enable_port_uafu(struct build_feature_devs_info *binfo,
+ void __iomem *start)
+{
+ enum port_feature_id id = PORT_FEATURE_ID_UAFU;
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+ struct ifpga_port_hw *port = &binfo->hw->port[binfo->current_port_id];
+
+ port_hdr = (struct feature_port_header *)start;
+ capability.csr = readq(&port_hdr->capability);
+ port_features[id].resource_size = (capability.mmio_size << 10);
+
+ /*
+ * From spec, to Enable UAFU, we should reset related port,
+ * or the whole mmio space in this UAFU will be invalid
+ */
+ if (port_features[id].resource_size)
+ fpga_port_reset(port);
+}
+
+static int parse_feature_fiu(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ struct feature_fiu_header *fiu_hdr, fiu_header;
+ u8 __iomem *start = (u8 __iomem *)hdr;
+ int ret;
+
+ header.csr = readq(hdr);
+
+ switch (header.id) {
+ case FEATURE_FIU_ID_FME:
+ ret = parse_feature_fme(binfo, hdr);
+ binfo->pfme_hdr = hdr;
+ if (ret)
+ return ret;
+ break;
+ case FEATURE_FIU_ID_PORT:
+ ret = parse_feature_port(binfo, hdr);
+ enable_port_uafu(binfo, hdr);
+ if (ret)
+ return ret;
+
+ /* Check Port FIU's next_afu pointer to User AFU DFH */
+ fiu_hdr = (struct feature_fiu_header *)(hdr + 1);
+ fiu_header.csr = readq(&fiu_hdr->csr);
+
+ if (fiu_header.next_afu) {
+ start += fiu_header.next_afu;
+ ret = parse_feature_afus(binfo,
+ (struct feature_header *)start);
+ if (ret)
+ return ret;
+ } else {
+ dev_info(binfo, "No AFUs detected on Port\n");
+ }
+
+ break;
+ default:
+ dev_info(binfo, "FIU TYPE %d is not supported yet.\n",
+ header.id);
+ }
+
+ return 0;
+}
+
+static void parse_feature_irqs(struct build_feature_devs_info *binfo,
+ void __iomem *start, struct feature_info *finfo)
+{
+ finfo->vec_start = 0;
+ finfo->vec_cnt = 0;
+
+ UNUSED(binfo);
+
+ if (!strcmp(finfo->name, PORT_FEATURE_UINT)) {
+ struct feature_port_uint *port_uint = start;
+ struct feature_port_uint_cap uint_cap;
+
+ uint_cap.csr = readq(&port_uint->capability);
+ if (uint_cap.intr_num) {
+ finfo->vec_start = uint_cap.first_vec_num;
+ finfo->vec_cnt = uint_cap.intr_num;
+ } else {
+ dev_debug(binfo, "UAFU doesn't support interrupt\n");
+ }
+ } else if (!strcmp(finfo->name, PORT_FEATURE_ERR)) {
+ struct feature_port_error *port_err = start;
+ struct feature_port_err_capability port_err_cap;
+
+ port_err_cap.csr = readq(&port_err->error_capability);
+ if (port_err_cap.support_intr) {
+ finfo->vec_start = port_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "Port error doesn't support interrupt\n");
+ }
+
+ } else if (!strcmp(finfo->name, FME_FEATURE_GLOBAL_ERR)) {
+ struct feature_fme_err *fme_err = start;
+ struct feature_fme_error_capability fme_err_cap;
+
+ fme_err_cap.csr = readq(&fme_err->fme_err_capability);
+ if (fme_err_cap.support_intr) {
+ finfo->vec_start = fme_err_cap.intr_vector_num;
+ finfo->vec_cnt = 1;
+ } else {
+ dev_debug(&binfo, "FME error doesn't support interrupt\n");
+ }
+ }
+}
+
+static int parse_feature_fme_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ if (header.id >= ARRAY_SIZE(fme_features)) {
+ dev_err(binfo, "FME feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &fme_features[header.id]);
+
+ return create_feature_instance(binfo, hdr, &fme_features[header.id]);
+}
+
+static int parse_feature_port_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ enum port_feature_id id;
+
+ header.csr = readq(hdr);
+ /*
+ * the region of port feature id is [0x10, 0x13], + 1 to reserve 0
+ * which is dedicated for port-hdr.
+ */
+ id = (header.id & 0x000f) + 1;
+
+ if (id >= ARRAY_SIZE(port_features)) {
+ dev_err(binfo, "Port feature id %x is not supported yet.\n",
+ header.id);
+ return 0;
+ }
+
+ parse_feature_irqs(binfo, hdr, &port_features[id]);
+
+ return create_feature_instance(binfo, hdr, &port_features[id]);
+}
+
+static int parse_feature_private(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+
+ header.csr = readq(hdr);
+
+ switch (binfo->current_type) {
+ case FME_ID:
+ return parse_feature_fme_private(binfo, hdr);
+ case PORT_ID:
+ return parse_feature_port_private(binfo, hdr);
+ default:
+ dev_err(binfo, "private feature %x belonging to AFU %d (unknown_type) is not supported yet.\n",
+ header.id, binfo->current_type);
+ }
+ return 0;
+}
+
+static int parse_feature(struct build_feature_devs_info *binfo,
+ struct feature_header *hdr)
+{
+ struct feature_header header;
+ int ret = 0;
+
+ header.csr = readq(hdr);
+
+ switch (header.type) {
+ case FEATURE_TYPE_AFU:
+ ret = parse_feature_afus(binfo, hdr);
+ break;
+ case FEATURE_TYPE_PRIVATE:
+ ret = parse_feature_private(binfo, hdr);
+ break;
+ case FEATURE_TYPE_FIU:
+ ret = parse_feature_fiu(binfo, hdr);
+ break;
+ default:
+ dev_err(binfo, "Feature Type %x is not supported.\n",
+ hdr->type);
+ };
+
+ return ret;
+}
+
+static int
+parse_feature_list(struct build_feature_devs_info *binfo, u8 __iomem *start)
+{
+ struct feature_header *hdr, header;
+ u8 __iomem *end = (u8 __iomem *)binfo->ioend;
+ int ret = 0;
+
+ for (; start < end; start += header.next_header_offset) {
+ if ((unsigned int)(end - start) < (unsigned int)sizeof(*hdr)) {
+ dev_err(binfo, "The region is too small to contain a feature.\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ hdr = (struct feature_header *)start;
+ ret = parse_feature(binfo, hdr);
+ if (ret)
+ return ret;
+
+ header.csr = readq(hdr);
+ if (!header.next_header_offset)
+ break;
+ }
+
+ return build_info_commit_dev(binfo);
+}
+
+/* switch the memory mapping to BAR# @bar */
+static int parse_switch_to(struct build_feature_devs_info *binfo, int bar)
+{
+ struct opae_adapter_data_pci *pci_data = binfo->pci_data;
+
+ if (!pci_data->region[bar].addr)
+ return -ENOMEM;
+
+ binfo->ioaddr = pci_data->region[bar].addr;
+ binfo->ioend = (u8 __iomem *)binfo->ioaddr + pci_data->region[bar].len;
+ binfo->phys_addr = pci_data->region[bar].phys_addr;
+ binfo->current_bar = bar;
+
+ return 0;
+}
+
+static int parse_ports_from_fme(struct build_feature_devs_info *binfo)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_port port;
+ int i = 0, ret = 0;
+
+ if (!binfo->pfme_hdr) {
+ dev_info(binfo, "VF is detected.\n");
+ return ret;
+ }
+
+ fme_hdr = binfo->pfme_hdr;
+
+ do {
+ port.csr = readq(&fme_hdr->port[i]);
+ if (!port.port_implemented)
+ break;
+
+ /* skip port which only could be accessed via VF */
+ if (port.afu_access_control == FME_AFU_ACCESS_VF)
+ continue;
+
+ ret = parse_switch_to(binfo, port.port_bar);
+ if (ret)
+ break;
+
+ ret = parse_feature_list(binfo,
+ (u8 __iomem *)binfo->ioaddr +
+ port.port_offset);
+ if (ret)
+ break;
+ } while (++i < MAX_FPGA_PORT_NUM);
+
+ return ret;
+}
+
+static struct build_feature_devs_info *
+build_info_alloc_and_init(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+
+ binfo = zmalloc(sizeof(*binfo));
+ if (!binfo)
+ return binfo;
+
+ binfo->hw = hw;
+ binfo->pci_data = hw->pci_data;
+
+ /* fpga feature list starts from BAR 0 */
+ if (parse_switch_to(binfo, 0)) {
+ free(binfo);
+ return NULL;
+ }
+
+ return binfo;
+}
+
+static void build_info_free(struct build_feature_devs_info *binfo)
+{
+ free(binfo);
+}
+
+static void ifpga_print_device_feature_list(struct ifpga_hw *hw)
+{
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct ifpga_port_hw *port;
+ struct feature *feature;
+ int i, j;
+
+ dev_info(hw, "found fme_device, is in PF: %s\n",
+ is_ifpga_hw_pf(hw) ? "yes" : "no");
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr: 0x%lx\n",
+ feature->name, feature->addr,
+ feature->addr + feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++) {
+ port = &hw->port[i];
+
+ if (port->state != IFPGA_PORT_ATTACHED)
+ continue;
+
+ dev_info(hw, "port device: %d\n", port->port_id);
+
+ for (j = 0; j < PORT_FEATURE_ID_MAX; j++) {
+ feature = &port->sub_feature[j];
+ if (feature->state != IFPGA_FEATURE_ATTACHED)
+ continue;
+
+ dev_info(hw, "%12s: 0x%p - 0x%p - paddr:0x%lx\n",
+ feature->name,
+ feature->addr,
+ feature->addr +
+ feature->size - 1,
+ (unsigned long)feature->phys_addr);
+ }
+ }
+}
+
+int ifpga_bus_enumerate(struct ifpga_hw *hw)
+{
+ struct build_feature_devs_info *binfo;
+ int ret;
+
+ binfo = build_info_alloc_and_init(hw);
+ if (!binfo)
+ return -ENOMEM;
+
+ ret = parse_feature_list(binfo, binfo->ioaddr);
+ if (ret)
+ goto exit;
+
+ ret = parse_ports_from_fme(binfo);
+ if (ret)
+ goto exit;
+
+ ifpga_print_device_feature_list(hw);
+
+exit:
+ build_info_free(binfo);
+ return ret;
+}
+
+int ifpga_bus_init(struct ifpga_hw *hw)
+{
+ int i;
+
+ fme_hw_init(&hw->fme);
+ for (i = 0; i < MAX_FPGA_PORT_NUM; i++)
+ port_hw_init(&hw->port[i]);
+
+ return 0;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
new file mode 100644
index 00000000..14131e32
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_enumerate.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_ENUMERATE_H_
+#define _IFPGA_ENUMERATE_H_
+
+int ifpga_bus_init(struct ifpga_hw *hw);
+int ifpga_bus_enumerate(struct ifpga_hw *hw);
+
+#endif /* _IFPGA_ENUMERATE_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
new file mode 100644
index 00000000..be7ac9ee
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.c
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <sys/ioctl.h>
+
+#include "ifpga_feature_dev.h"
+
+/*
+ * Enable Port by clear the port soft reset bit, which is set by default.
+ * The AFU is unable to respond to any MMIO access while in reset.
+ * __fpga_port_enable function should only be used after __fpga_port_disable
+ * function.
+ */
+void __fpga_port_enable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ WARN_ON(!port->disable_count);
+
+ if (--port->disable_count != 0)
+ return;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x0;
+ writeq(control.csr, &port_hdr->control);
+}
+
+int __fpga_port_disable(struct ifpga_port_hw *port)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ if (port->disable_count++ != 0)
+ return 0;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ WARN_ON(!port_hdr);
+
+ /* Set port soft reset */
+ control.csr = readq(&port_hdr->control);
+ control.port_sftrst = 0x1;
+ writeq(control.csr, &port_hdr->control);
+
+ /*
+ * HW sets ack bit to 1 when all outstanding requests have been drained
+ * on this port and minimum soft reset pulse width has elapsed.
+ * Driver polls port_soft_reset_ack to determine if reset done by HW.
+ */
+ control.port_sftrst_ack = 1;
+
+ if (fpga_wait_register_field(port_sftrst_ack, control,
+ &port_hdr->control, RST_POLL_TIMEOUT,
+ RST_POLL_INVL)) {
+ dev_err(port, "timeout, fail to reset device\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid)
+{
+ struct feature_port_header *port_hdr;
+ u64 guidl, guidh;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_UAFU);
+
+ spinlock_lock(&port->lock);
+ guidl = readq(&port_hdr->afu_header.guid.b[0]);
+ guidh = readq(&port_hdr->afu_header.guid.b[8]);
+ spinlock_unlock(&port->lock);
+
+ memcpy(uuid->b, &guidl, sizeof(u64));
+ memcpy(uuid->b + 8, &guidh, sizeof(u64));
+
+ return 0;
+}
+
+/* Mask / Unmask Port Errors by the Error Mask register. */
+void port_err_mask(struct ifpga_port_hw *port, bool mask)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key err_mask;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ if (mask)
+ err_mask.csr = PORT_ERR_MASK;
+ else
+ err_mask.csr = 0;
+
+ writeq(err_mask.csr, &port_err->error_mask);
+}
+
+/* Clear All Port Errors. */
+int port_err_clear(struct ifpga_port_hw *port, u64 err)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_error *port_err;
+ struct feature_port_err_key mask;
+ struct feature_port_first_err_key first;
+ struct feature_port_status status;
+ int ret = 0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ /*
+ * Clear All Port Errors
+ *
+ * - Check for AP6 State
+ * - Halt Port by keeping Port in reset
+ * - Set PORT Error mask to all 1 to mask errors
+ * - Clear all errors
+ * - Set Port mask to all 0 to enable errors
+ * - All errors start capturing new errors
+ * - Enable Port by pulling the port out of reset
+ */
+
+ /* If device is still in AP6 state, can not clear any error.*/
+ status.csr = readq(&port_hdr->status);
+ if (status.power_state == PORT_POWER_STATE_AP6) {
+ dev_err(dev, "Could not clear errors, device in AP6 state.\n");
+ return -EBUSY;
+ }
+
+ /* Halt Port by keeping Port in reset */
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ /* Mask all errors */
+ port_err_mask(port, true);
+
+ /* Clear errors if err input matches with current port errors.*/
+ mask.csr = readq(&port_err->port_error);
+
+ if (mask.csr == err) {
+ writeq(mask.csr, &port_err->port_error);
+
+ first.csr = readq(&port_err->port_first_error);
+ writeq(first.csr, &port_err->port_first_error);
+ } else {
+ ret = -EBUSY;
+ }
+
+ /* Clear mask */
+ port_err_mask(port, false);
+
+ /* Enable the Port by clear the reset */
+ __fpga_port_enable(port);
+
+ return ret;
+}
+
+int port_clear_error(struct ifpga_port_hw *port)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+
+ dev_info(port, "read port error: 0x%lx\n", (unsigned long)error.csr);
+
+ return port_err_clear(port, error.csr);
+}
+
+void fme_hw_uinit(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int fme_hw_init(struct ifpga_fme_hw *fme)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ for (i = 0; i < FME_FEATURE_ID_MAX; i++) {
+ feature = &fme->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ fme_hw_uinit(fme);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void port_hw_uinit(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->state == IFPGA_FEATURE_ATTACHED &&
+ feature->ops && feature->ops->uinit)
+ feature->ops->uinit(feature);
+ }
+}
+
+int port_hw_init(struct ifpga_port_hw *port)
+{
+ struct feature *feature;
+ int i, ret;
+
+ if (port->state == IFPGA_PORT_UNUSED)
+ return 0;
+
+ for (i = 0; i < PORT_FEATURE_ID_MAX; i++) {
+ feature = &port->sub_feature[i];
+ if (feature->ops && feature->ops->init) {
+ ret = feature->ops->init(feature);
+ if (ret) {
+ port_hw_uinit(port);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
new file mode 100644
index 00000000..7a39a580
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_feature_dev.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_FEATURE_DEV_H_
+#define _IFPGA_FEATURE_DEV_H_
+
+#include "ifpga_hw.h"
+
+static inline struct ifpga_port_hw *
+get_port(struct ifpga_hw *hw, u32 port_id)
+{
+ if (!is_valid_port_id(hw, port_id))
+ return NULL;
+
+ return &hw->port[port_id];
+}
+
+#define ifpga_for_each_fme_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (FME_FEATURE_ID_MAX); (feature)++)
+
+#define ifpga_for_each_port_feature(hw, feature) \
+ for ((feature) = (hw)->sub_feature; \
+ (feature) < (hw)->sub_feature + (PORT_FEATURE_ID_MAX); (feature)++)
+
+static inline struct feature *
+get_fme_feature_by_id(struct ifpga_fme_hw *fme, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_fme_feature(fme, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline struct feature *
+get_port_feature_by_id(struct ifpga_port_hw *port, u64 id)
+{
+ struct feature *feature;
+
+ ifpga_for_each_port_feature(port, feature) {
+ if (feature->id == id)
+ return feature;
+ }
+
+ return NULL;
+}
+
+static inline void *
+get_fme_feature_ioaddr_by_index(struct ifpga_fme_hw *fme, int index)
+{
+ return fme->sub_feature[index].addr;
+}
+
+static inline void *
+get_port_feature_ioaddr_by_index(struct ifpga_port_hw *port, int index)
+{
+ return port->sub_feature[index].addr;
+}
+
+static inline bool
+is_fme_feature_present(struct ifpga_fme_hw *fme, int index)
+{
+ return !!get_fme_feature_ioaddr_by_index(fme, index);
+}
+
+static inline bool
+is_port_feature_present(struct ifpga_port_hw *port, int index)
+{
+ return !!get_port_feature_ioaddr_by_index(port, index);
+}
+
+int fpga_get_afu_uuid(struct ifpga_port_hw *port, struct uuid *uuid);
+
+int __fpga_port_disable(struct ifpga_port_hw *port);
+void __fpga_port_enable(struct ifpga_port_hw *port);
+
+static inline int fpga_port_disable(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_disable(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+static inline int fpga_port_enable(struct ifpga_port_hw *port)
+{
+ spinlock_lock(&port->lock);
+ __fpga_port_enable(port);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static inline int __fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ ret = __fpga_port_disable(port);
+ if (ret)
+ return ret;
+
+ __fpga_port_enable(port);
+
+ return 0;
+}
+
+static inline int fpga_port_reset(struct ifpga_port_hw *port)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = __fpga_port_reset(port);
+ spinlock_unlock(&port->lock);
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status);
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop);
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set);
+
+int fme_hw_init(struct ifpga_fme_hw *fme);
+void fme_hw_uinit(struct ifpga_fme_hw *fme);
+void port_hw_uinit(struct ifpga_port_hw *port);
+int port_hw_init(struct ifpga_port_hw *port);
+int port_clear_error(struct ifpga_port_hw *port);
+void port_err_mask(struct ifpga_port_hw *port, bool mask);
+int port_err_clear(struct ifpga_port_hw *port, u64 err);
+
+extern struct feature_ops fme_hdr_ops;
+extern struct feature_ops fme_thermal_mgmt_ops;
+extern struct feature_ops fme_power_mgmt_ops;
+extern struct feature_ops fme_global_err_ops;
+extern struct feature_ops fme_pr_mgmt_ops;
+extern struct feature_ops fme_global_iperf_ops;
+extern struct feature_ops fme_global_dperf_ops;
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop);
+
+/* This struct is used when parsing uafu irq_set */
+struct fpga_uafu_irq_set {
+ u32 start;
+ u32 count;
+ s32 *evtfds;
+};
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
+
+extern struct feature_ops port_hdr_ops;
+extern struct feature_ops port_error_ops;
+extern struct feature_ops port_stp_ops;
+extern struct feature_ops port_uint_ops;
+
+/* help functions for feature ops */
+int fpga_msix_set_block(struct feature *feature, unsigned int start,
+ unsigned int count, s32 *fds);
+
+#endif /* _IFPGA_FEATURE_DEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
new file mode 100644
index 00000000..4be60c0c
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme.c
@@ -0,0 +1,734 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PWR_THRESHOLD_MAX 0x7F
+
+int fme_get_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_prop(struct ifpga_fme_hw *fme, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int fme_set_irq(struct ifpga_fme_hw *fme, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!fme)
+ return -ENOENT;
+
+ feature = get_fme_feature_by_id(fme, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+/* fme private feature head */
+static int fme_hdr_init(struct feature *feature)
+{
+ struct feature_fme_header *fme_hdr;
+
+ fme_hdr = (struct feature_fme_header *)feature->addr;
+
+ dev_info(NULL, "FME HDR Init.\n");
+ dev_info(NULL, "FME cap %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ return 0;
+}
+
+static void fme_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME HDR UInit.\n");
+}
+
+static int fme_hdr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&fme_hdr->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_hdr_get_ports_num(struct ifpga_fme_hw *fme, u64 *ports_num)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *ports_num = fme_capability.num_ports;
+
+ return 0;
+}
+
+static int fme_hdr_get_cache_size(struct ifpga_fme_hw *fme, u64 *cache_size)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *cache_size = fme_capability.cache_size;
+
+ return 0;
+}
+
+static int fme_hdr_get_version(struct ifpga_fme_hw *fme, u64 *version)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *version = fme_capability.fabric_verid;
+
+ return 0;
+}
+
+static int fme_hdr_get_socket_id(struct ifpga_fme_hw *fme, u64 *socket_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+ struct feature_fme_capability fme_capability;
+
+ fme_capability.csr = readq(&fme_hdr->capability);
+ *socket_id = fme_capability.socket_id;
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_id(struct ifpga_fme_hw *fme,
+ u64 *bitstream_id)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_id = readq(&fme_hdr->bitstream_id);
+
+ return 0;
+}
+
+static int fme_hdr_get_bitstream_metadata(struct ifpga_fme_hw *fme,
+ u64 *bitstream_metadata)
+{
+ struct feature_fme_header *fme_hdr
+ = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ *bitstream_metadata = readq(&fme_hdr->bitstream_md);
+
+ return 0;
+}
+
+static int
+fme_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_HDR_PROP_REVISION:
+ return fme_hdr_get_revision(fme, &prop->data);
+ case FME_HDR_PROP_PORTS_NUM:
+ return fme_hdr_get_ports_num(fme, &prop->data);
+ case FME_HDR_PROP_CACHE_SIZE:
+ return fme_hdr_get_cache_size(fme, &prop->data);
+ case FME_HDR_PROP_VERSION:
+ return fme_hdr_get_version(fme, &prop->data);
+ case FME_HDR_PROP_SOCKET_ID:
+ return fme_hdr_get_socket_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_ID:
+ return fme_hdr_get_bitstream_id(fme, &prop->data);
+ case FME_HDR_PROP_BITSTREAM_METADATA:
+ return fme_hdr_get_bitstream_metadata(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_hdr_ops = {
+ .init = fme_hdr_init,
+ .uinit = fme_hdr_uinit,
+ .get_prop = fme_hdr_get_prop,
+};
+
+/* thermal management */
+static int fme_thermal_get_threshold1(struct ifpga_fme_hw *fme, u64 *thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1 = temp_threshold.tmp_thshold1;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1(struct ifpga_fme_hw *fme, u64 thres1)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres1 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres1 == 0) {
+ tmp_threshold.tmp_thshold1_enable = 0;
+ tmp_threshold.tmp_thshold1 = thres1;
+ } else {
+ tmp_threshold.tmp_thshold1_enable = 1;
+ tmp_threshold.tmp_thshold1 = thres1;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2(struct ifpga_fme_hw *fme, u64 *thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres2 = temp_threshold.tmp_thshold2;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold2(struct ifpga_fme_hw *fme, u64 thres2)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_tmp_threshold tmp_threshold;
+ struct feature_fme_capability fme_capability;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+ fme_capability.csr = readq(&fme_hdr->capability);
+
+ if (fme_capability.lock_bit == 1) {
+ spinlock_unlock(&fme->lock);
+ return -EBUSY;
+ } else if (thres2 > 100) {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ } else if (thres2 == 0) {
+ tmp_threshold.tmp_thshold2_enable = 0;
+ tmp_threshold.tmp_thshold2 = thres2;
+ } else {
+ tmp_threshold.tmp_thshold2_enable = 1;
+ tmp_threshold.tmp_thshold2 = thres2;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold_trip(struct ifpga_fme_hw *fme,
+ u64 *thres_trip)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres_trip = temp_threshold.therm_trip_thshold;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold1_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold2_reached(struct ifpga_fme_hw *fme,
+ u64 *thres1_reached)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_reached = temp_threshold.thshold2_status;
+
+ return 0;
+}
+
+static int fme_thermal_get_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 *thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold temp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_threshold.csr = readq(&thermal->threshold);
+ *thres1_policy = temp_threshold.thshold_policy;
+
+ return 0;
+}
+
+static int fme_thermal_set_threshold1_policy(struct ifpga_fme_hw *fme,
+ u64 thres1_policy)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_tmp_threshold tmp_threshold;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ spinlock_lock(&fme->lock);
+ tmp_threshold.csr = readq(&thermal->threshold);
+
+ if (thres1_policy == 0) {
+ tmp_threshold.thshold_policy = 0;
+ } else if (thres1_policy == 1) {
+ tmp_threshold.thshold_policy = 1;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(tmp_threshold.csr, &thermal->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_thermal_get_temperature(struct ifpga_fme_hw *fme, u64 *temp)
+{
+ struct feature_fme_thermal *thermal;
+ struct feature_fme_temp_rdsensor_fmt1 temp_rdsensor_fmt1;
+
+ thermal = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+
+ temp_rdsensor_fmt1.csr = readq(&thermal->rdsensor_fm1);
+ *temp = temp_rdsensor_fmt1.fpga_temp;
+
+ return 0;
+}
+
+static int fme_thermal_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_thermal *fme_thermal
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_THERMAL_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_thermal->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define FME_THERMAL_CAP_NO_TMP_THRESHOLD 0x1
+
+static int fme_thermal_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_thermal *fme_thermal;
+ struct feature_fme_tmp_threshold_cap thermal_cap;
+
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt Init.\n");
+
+ fme_thermal = (struct feature_fme_thermal *)feature->addr;
+ thermal_cap.csr = readq(&fme_thermal->threshold_cap);
+
+ dev_info(NULL, "FME thermal cap %llx.\n",
+ (unsigned long long)fme_thermal->threshold_cap.csr);
+
+ if (thermal_cap.tmp_thshold_disabled)
+ feature->cap |= FME_THERMAL_CAP_NO_TMP_THRESHOLD;
+
+ return 0;
+}
+
+static void fme_thermal_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME thermal mgmt UInit.\n");
+}
+
+static int
+fme_thermal_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_set_threshold1(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_set_threshold2(fme, prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_set_threshold1_policy(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int
+fme_thermal_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ if (feature->cap & FME_THERMAL_CAP_NO_TMP_THRESHOLD &&
+ prop->prop_id != FME_THERMAL_PROP_TEMPERATURE &&
+ prop->prop_id != FME_THERMAL_PROP_REVISION)
+ return -ENOENT;
+
+ switch (prop->prop_id) {
+ case FME_THERMAL_PROP_THRESHOLD1:
+ return fme_thermal_get_threshold1(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2:
+ return fme_thermal_get_threshold2(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD_TRIP:
+ return fme_thermal_get_threshold_trip(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_REACHED:
+ return fme_thermal_get_threshold1_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD2_REACHED:
+ return fme_thermal_get_threshold2_reached(fme, &prop->data);
+ case FME_THERMAL_PROP_THRESHOLD1_POLICY:
+ return fme_thermal_get_threshold1_policy(fme, &prop->data);
+ case FME_THERMAL_PROP_TEMPERATURE:
+ return fme_thermal_get_temperature(fme, &prop->data);
+ case FME_THERMAL_PROP_REVISION:
+ return fme_thermal_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_thermal_mgmt_ops = {
+ .init = fme_thermal_mgmt_init,
+ .uinit = fme_thermal_mgmt_uinit,
+ .get_prop = fme_thermal_get_prop,
+ .set_prop = fme_thermal_set_prop,
+};
+
+static int fme_pwr_get_consumed(struct ifpga_fme_hw *fme, u64 *consumed)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *consumed = pm_status.pwr_consumed;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold1;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold1(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold1 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2(struct ifpga_fme_hw *fme, u64 *threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold = pm_ap_threshold.threshold2;
+
+ return 0;
+}
+
+static int fme_pwr_set_threshold2(struct ifpga_fme_hw *fme, u64 threshold)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ spinlock_lock(&fme->lock);
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ if (threshold <= PWR_THRESHOLD_MAX) {
+ pm_ap_threshold.threshold2 = threshold;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(pm_ap_threshold.csr, &fme_power->threshold);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold1_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold1_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_threshold2_status(struct ifpga_fme_hw *fme,
+ u64 *threshold_status)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_ap_threshold pm_ap_threshold;
+
+ pm_ap_threshold.csr = readq(&fme_power->threshold);
+
+ *threshold_status = pm_ap_threshold.threshold2_status;
+
+ return 0;
+}
+
+static int fme_pwr_get_rtl(struct ifpga_fme_hw *fme, u64 *rtl)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_status pm_status;
+
+ pm_status.csr = readq(&fme_power->status);
+
+ *rtl = pm_status.fpga_latency_report;
+
+ return 0;
+}
+
+static int fme_pwr_get_xeon_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_xeon_limit xeon_limit;
+
+ xeon_limit.csr = readq(&fme_power->xeon_limit);
+
+ if (!xeon_limit.enable)
+ xeon_limit.pwr_limit = 0;
+
+ *limit = xeon_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_fpga_limit(struct ifpga_fme_hw *fme, u64 *limit)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_fme_pm_fpga_limit fpga_limit;
+
+ fpga_limit.csr = readq(&fme_power->fpga_limit);
+
+ if (!fpga_limit.enable)
+ fpga_limit.pwr_limit = 0;
+
+ *limit = fpga_limit.pwr_limit;
+
+ return 0;
+}
+
+static int fme_pwr_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_power *fme_power
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_POWER_MGMT);
+ struct feature_header header;
+
+ header.csr = readq(&fme_power->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_power_mgmt_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt Init.\n");
+
+ return 0;
+}
+
+static void fme_power_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME power mgmt UInit.\n");
+}
+
+static int fme_power_mgmt_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_CONSUMED:
+ return fme_pwr_get_consumed(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_get_threshold1(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_get_threshold2(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD1_STATUS:
+ return fme_pwr_get_threshold1_status(fme, &prop->data);
+ case FME_PWR_PROP_THRESHOLD2_STATUS:
+ return fme_pwr_get_threshold2_status(fme, &prop->data);
+ case FME_PWR_PROP_RTL:
+ return fme_pwr_get_rtl(fme, &prop->data);
+ case FME_PWR_PROP_XEON_LIMIT:
+ return fme_pwr_get_xeon_limit(fme, &prop->data);
+ case FME_PWR_PROP_FPGA_LIMIT:
+ return fme_pwr_get_fpga_limit(fme, &prop->data);
+ case FME_PWR_PROP_REVISION:
+ return fme_pwr_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_power_mgmt_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ switch (prop->prop_id) {
+ case FME_PWR_PROP_THRESHOLD1:
+ return fme_pwr_set_threshold1(fme, prop->data);
+ case FME_PWR_PROP_THRESHOLD2:
+ return fme_pwr_set_threshold2(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_power_mgmt_ops = {
+ .init = fme_power_mgmt_init,
+ .uinit = fme_power_mgmt_uinit,
+ .get_prop = fme_power_mgmt_get_prop,
+ .set_prop = fme_power_mgmt_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
new file mode 100644
index 00000000..1773b87e
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_dperf.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_dperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_clk_ctr clk;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ clk.afu_interf_clock = readq(&dperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_dperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_header header;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ header.csr = readq(&dperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+#define DPERF_TIMEOUT 30
+
+static bool fabric_pobj_is_enabled(int port_id,
+ struct feature_fme_dperf *dperf)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum dperf_fab_events fab_event)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dfpmon_fab_ctr ctr;
+ struct feature_fme_dperf *dperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, dperf))
+ goto exit;
+
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &dperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &dperf->fab_ctr, DPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&dperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_dperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, DPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, DPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(mmio_read, DPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, DPERF_FAB_MMIO_WR);
+
+static int fme_dperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_dperf *dperf;
+ int status;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ status = fabric_pobj_is_enabled(port_id, dperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_dperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ struct feature_fme_dperf *dperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, dperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&dperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_dperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_dperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_dperf *dperf;
+ struct feature_fme_dfpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ dperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_DPERF);
+ ctl.csr = readq(&dperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &dperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+
+static int fme_global_dperf_init(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf Init.\n");
+
+ return 0;
+}
+
+static void fme_global_dperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_dperf UInit.\n");
+}
+
+static int fme_dperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_dperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_dperf_get_fab_port_pcie0_read(fme, sub, &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_dperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* MMIO_READ */
+ return fme_dperf_get_fab_port_mmio_read(fme, sub, &prop->data);
+ case 0x5: /* MMIO_WRITE */
+ return fme_dperf_get_fab_port_mmio_write(fme, sub, &prop->data);
+ case 0x6: /* ENABLE */
+ return fme_dperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_dperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_dperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_dperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_dperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE - fab root only prop */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_dperf_set_fab_freeze(fme, prop->data);
+ case 0x6: /* ENABLE - fab both root and sub */
+ return fme_dperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_dperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_FAB:
+ return fme_dperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_dperf_ops = {
+ .init = fme_global_dperf_init,
+ .uinit = fme_global_dperf_uinit,
+ .get_prop = fme_global_dperf_get_prop,
+ .set_prop = fme_global_dperf_set_prop,
+
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
new file mode 100644
index 00000000..8c26fb25
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_error.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int fme_err_get_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ *val = fme_error0.csr;
+
+ return 0;
+}
+
+static int fme_err_get_first_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_first_error fme_first_err;
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ *val = fme_first_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_get_next_error(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_next_error fme_next_err;
+
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+ *val = fme_next_err.err_reg_status;
+
+ return 0;
+}
+
+static int fme_err_set_clear(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_error0 fme_error0;
+ struct feature_fme_first_error fme_first_err;
+ struct feature_fme_next_error fme_next_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_ERROR0_MASK, &fme_err->fme_err_mask);
+
+ fme_error0.csr = readq(&fme_err->fme_err);
+ if (val != fme_error0.csr) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ fme_first_err.csr = readq(&fme_err->fme_first_err);
+ fme_next_err.csr = readq(&fme_err->fme_next_err);
+
+ writeq(fme_error0.csr & FME_ERROR0_MASK, &fme_err->fme_err);
+ writeq(fme_first_err.csr & FME_FIRST_ERROR_MASK,
+ &fme_err->fme_first_err);
+ writeq(fme_next_err.csr & FME_NEXT_ERROR_MASK,
+ &fme_err->fme_next_err);
+
+exit:
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_revision(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_header header;
+
+ header.csr = readq(&fme_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int fme_err_get_pcie0_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ *val = pcie0_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie0_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie0_error pcie0_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE0_ERROR_MASK, &fme_err->pcie0_err_mask);
+
+ pcie0_err.csr = readq(&fme_err->pcie0_err);
+ if (val != pcie0_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie0_err.csr & FME_PCIE0_ERROR_MASK,
+ &fme_err->pcie0_err);
+
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_pcie1_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ *val = pcie1_err.csr;
+
+ return 0;
+}
+
+static int fme_err_set_pcie1_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_pcie1_error pcie1_err;
+ int ret = 0;
+
+ spinlock_lock(&fme->lock);
+ writeq(FME_PCIE1_ERROR_MASK, &fme_err->pcie1_err_mask);
+
+ pcie1_err.csr = readq(&fme_err->pcie1_err);
+ if (val != pcie1_err.csr)
+ ret = -EBUSY;
+ else
+ writeq(pcie1_err.csr & FME_PCIE1_ERROR_MASK,
+ &fme_err->pcie1_err);
+
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+static int fme_err_get_nonfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_nonfaterror ras_nonfaterr;
+
+ ras_nonfaterr.csr = readq(&fme_err->ras_nonfaterr);
+ *val = ras_nonfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_catfatal_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_catfaterror ras_catfaterr;
+
+ ras_catfaterr.csr = readq(&fme_err->ras_catfaterr);
+ *val = ras_catfaterr.csr;
+
+ return 0;
+}
+
+static int fme_err_get_inject_errors(struct ifpga_fme_hw *fme, u64 *val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+ *val = ras_error_inj.csr & FME_RAS_ERROR_INJ_MASK;
+
+ return 0;
+}
+
+static int fme_err_set_inject_errors(struct ifpga_fme_hw *fme, u64 val)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+ struct feature_fme_ras_error_inj ras_error_inj;
+
+ spinlock_lock(&fme->lock);
+ ras_error_inj.csr = readq(&fme_err->ras_error_inj);
+
+ if (val <= FME_RAS_ERROR_INJ_MASK) {
+ ras_error_inj.csr = val;
+ } else {
+ spinlock_unlock(&fme->lock);
+ return -EINVAL;
+ }
+
+ writeq(ras_error_inj.csr, &fme_err->ras_error_inj);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static void fme_error_enable(struct ifpga_fme_hw *fme)
+{
+ struct feature_fme_err *fme_err
+ = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_ERR);
+
+ writeq(FME_ERROR0_MASK_DEFAULT, &fme_err->fme_err_mask);
+ writeq(0UL, &fme_err->pcie0_err_mask);
+ writeq(0UL, &fme_err->pcie1_err_mask);
+ writeq(0UL, &fme_err->ras_nonfat_mask);
+ writeq(0UL, &fme_err->ras_catfat_mask);
+}
+
+static int fme_global_error_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+
+ fme_error_enable(fme);
+
+ if (feature->ctx_num)
+ fme->capability |= FPGA_FME_CAP_ERR_IRQ;
+
+ return 0;
+}
+
+static void fme_global_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int fme_err_fme_err_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* ERRORS */
+ return fme_err_get_errors(fme, &prop->data);
+ case 0x2: /* FIRST_ERROR */
+ return fme_err_get_first_error(fme, &prop->data);
+ case 0x3: /* NEXT_ERROR */
+ return fme_err_get_next_error(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x5: /* REVISION */
+ return fme_err_get_revision(fme, &prop->data);
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_get_pcie0_errors(fme, &prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_get_pcie1_errors(fme, &prop->data);
+ case 0x8: /* NONFATAL_ERRORS */
+ return fme_err_get_nonfatal_errors(fme, &prop->data);
+ case 0x9: /* CATFATAL_ERRORS */
+ return fme_err_get_catfatal_errors(fme, &prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_get_inject_errors(fme, &prop->data);
+ case 0xb: /* REVISION*/
+ return fme_err_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_get_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_fme_err_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x4: /* CLEAR */
+ return fme_err_set_clear(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_err_root_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x6: /* PCIE0_ERRORS */
+ return fme_err_set_pcie0_errors(fme, prop->data);
+ case 0x7: /* PCIE1_ERRORS */
+ return fme_err_set_pcie1_errors(fme, prop->data);
+ case 0xa: /* INJECT_ERRORS */
+ return fme_err_set_inject_errors(fme, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ /* PROP_SUB is never used */
+ if (sub != PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (top) {
+ case ERR_PROP_TOP_FME_ERR:
+ return fme_err_fme_err_set_prop(feature, prop);
+ case ERR_PROP_TOP_UNUSED:
+ return fme_err_root_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_err_ops = {
+ .init = fme_global_error_init,
+ .uinit = fme_global_error_uinit,
+ .get_prop = fme_global_error_get_prop,
+ .set_prop = fme_global_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
new file mode 100644
index 00000000..e6c40a19
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_iperf.c
@@ -0,0 +1,715 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+#define PERF_OBJ_ROOT_ID 0xff
+
+static int fme_iperf_get_clock(struct ifpga_fme_hw *fme, u64 *clock)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_clk_ctr clk;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ clk.afu_interf_clock = readq(&iperf->clk);
+
+ *clock = clk.afu_interf_clock;
+ return 0;
+}
+
+static int fme_iperf_get_revision(struct ifpga_fme_hw *fme, u64 *revision)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_header header;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ header.csr = readq(&iperf->header);
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int fme_iperf_get_cache_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ *freeze = (u64)ctl.freeze;
+ return 0;
+}
+
+static int fme_iperf_set_cache_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->ch_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define IPERF_TIMEOUT 30
+
+static u64 read_cache_counter(struct ifpga_fme_hw *fme,
+ u8 channel, enum iperf_cache_events event)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_ch_ctl ctl;
+ struct feature_fme_ifpmon_ch_ctr ctr0, ctr1;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* set channel access type and cache event code. */
+ ctl.csr = readq(&iperf->ch_ctl);
+ ctl.cci_chsel = channel;
+ ctl.cache_event = event;
+ writeq(ctl.csr, &iperf->ch_ctl);
+
+ /* check the event type in the counter registers */
+ ctr0.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr0,
+ &iperf->ch_ctr0, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched cache event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr0.csr = readq(&iperf->ch_ctr0);
+ ctr1.csr = readq(&iperf->ch_ctr1);
+ counter = ctr0.cache_counter + ctr1.cache_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define CACHE_SHOW(name, type, event) \
+static int fme_iperf_get_cache_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_cache_counter(fme, type, event); \
+ return 0; \
+}
+
+CACHE_SHOW(read_hit, CACHE_CHANNEL_RD, IPERF_CACHE_RD_HIT);
+CACHE_SHOW(read_miss, CACHE_CHANNEL_RD, IPERF_CACHE_RD_MISS);
+CACHE_SHOW(write_hit, CACHE_CHANNEL_WR, IPERF_CACHE_WR_HIT);
+CACHE_SHOW(write_miss, CACHE_CHANNEL_WR, IPERF_CACHE_WR_MISS);
+CACHE_SHOW(hold_request, CACHE_CHANNEL_RD, IPERF_CACHE_HOLD_REQ);
+CACHE_SHOW(tx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_TX_REQ_STALL);
+CACHE_SHOW(rx_req_stall, CACHE_CHANNEL_RD, IPERF_CACHE_RX_REQ_STALL);
+CACHE_SHOW(rx_eviction, CACHE_CHANNEL_RD, IPERF_CACHE_EVICTIONS);
+CACHE_SHOW(data_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_DATA_WR_PORT_CONTEN);
+CACHE_SHOW(tag_write_port_contention, CACHE_CHANNEL_WR,
+ IPERF_CACHE_TAG_WR_PORT_CONTEN);
+
+static int fme_iperf_get_vtd_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_vtd_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static u64 read_iommu_sip_counter(struct ifpga_fme_hw *fme,
+ enum iperf_vtd_sip_events event)
+{
+ struct feature_fme_ifpmon_vtd_sip_ctl sip_ctl;
+ struct feature_fme_ifpmon_vtd_sip_ctr sip_ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ sip_ctl.csr = readq(&iperf->vtd_sip_ctl);
+ sip_ctl.vtd_evtcode = event;
+ writeq(sip_ctl.csr, &iperf->vtd_sip_ctl);
+
+ sip_ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, sip_ctr,
+ &iperf->vtd_sip_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd SIP event type in counter registers\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ sip_ctr.csr = readq(&iperf->vtd_sip_ctr);
+ counter = sip_ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_SIP_SHOW(name, event) \
+static int fme_iperf_get_vtd_sip_##name(struct ifpga_fme_hw *fme, \
+ u64 *counter) \
+{ \
+ *counter = read_iommu_sip_counter(fme, event); \
+ return 0; \
+}
+
+VTD_SIP_SHOW(iotlb_4k_hit, IPERF_VTD_SIP_IOTLB_4K_HIT);
+VTD_SIP_SHOW(iotlb_2m_hit, IPERF_VTD_SIP_IOTLB_2M_HIT);
+VTD_SIP_SHOW(iotlb_1g_hit, IPERF_VTD_SIP_IOTLB_1G_HIT);
+VTD_SIP_SHOW(slpwc_l3_hit, IPERF_VTD_SIP_SLPWC_L3_HIT);
+VTD_SIP_SHOW(slpwc_l4_hit, IPERF_VTD_SIP_SLPWC_L4_HIT);
+VTD_SIP_SHOW(rcc_hit, IPERF_VTD_SIP_RCC_HIT);
+VTD_SIP_SHOW(iotlb_4k_miss, IPERF_VTD_SIP_IOTLB_4K_MISS);
+VTD_SIP_SHOW(iotlb_2m_miss, IPERF_VTD_SIP_IOTLB_2M_MISS);
+VTD_SIP_SHOW(iotlb_1g_miss, IPERF_VTD_SIP_IOTLB_1G_MISS);
+VTD_SIP_SHOW(slpwc_l3_miss, IPERF_VTD_SIP_SLPWC_L3_MISS);
+VTD_SIP_SHOW(slpwc_l4_miss, IPERF_VTD_SIP_SLPWC_L4_MISS);
+VTD_SIP_SHOW(rcc_miss, IPERF_VTD_SIP_RCC_MISS);
+
+static u64 read_iommu_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_vtd_events base_event)
+{
+ struct feature_fme_ifpmon_vtd_ctl ctl;
+ struct feature_fme_ifpmon_vtd_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ enum iperf_vtd_events event = base_event + port_id;
+ u64 counter;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->vtd_ctl);
+ ctl.vtd_evtcode = event;
+ writeq(ctl.csr, &iperf->vtd_ctl);
+
+ ctr.event_code = event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->vtd_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->vtd_ctr);
+ counter = ctr.vtd_counter;
+ spinlock_unlock(&fme->lock);
+
+ return counter;
+}
+
+#define VTD_PORT_SHOW(name, base_event) \
+static int fme_iperf_get_vtd_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_iommu_counter(fme, port_id, base_event); \
+ return 0; \
+}
+
+VTD_PORT_SHOW(read_transaction, IPERF_VTD_AFU_MEM_RD_TRANS);
+VTD_PORT_SHOW(write_transaction, IPERF_VTD_AFU_MEM_WR_TRANS);
+VTD_PORT_SHOW(devtlb_read_hit, IPERF_VTD_AFU_DEVTLB_RD_HIT);
+VTD_PORT_SHOW(devtlb_write_hit, IPERF_VTD_AFU_DEVTLB_WR_HIT);
+VTD_PORT_SHOW(devtlb_4k_fill, IPERF_VTD_DEVTLB_4K_FILL);
+VTD_PORT_SHOW(devtlb_2m_fill, IPERF_VTD_DEVTLB_2M_FILL);
+VTD_PORT_SHOW(devtlb_1g_fill, IPERF_VTD_DEVTLB_1G_FILL);
+
+static bool fabric_pobj_is_enabled(u8 port_id, struct feature_fme_iperf *iperf)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+
+ if (ctl.port_filter == FAB_DISABLE_FILTER)
+ return port_id == PERF_OBJ_ROOT_ID;
+
+ return port_id == ctl.port_id;
+}
+
+static u64 read_fabric_counter(struct ifpga_fme_hw *fme, u8 port_id,
+ enum iperf_fab_events fab_event)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_ifpmon_fab_ctr ctr;
+ struct feature_fme_iperf *iperf;
+ u64 counter = 0;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is disabled, force the counter to return zero. */
+ if (!fabric_pobj_is_enabled(port_id, iperf))
+ goto exit;
+
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.fab_evtcode = fab_event;
+ writeq(ctl.csr, &iperf->fab_ctl);
+
+ ctr.event_code = fab_event;
+
+ if (fpga_wait_register_field(event_code, ctr,
+ &iperf->fab_ctr, IPERF_TIMEOUT, 1)) {
+ dev_err(fme, "timeout, unmatched VTd event type in counter registers.\n");
+ spinlock_unlock(&fme->lock);
+ return -ETIMEDOUT;
+ }
+
+ ctr.csr = readq(&iperf->fab_ctr);
+ counter = ctr.fab_cnt;
+exit:
+ spinlock_unlock(&fme->lock);
+ return counter;
+}
+
+#define FAB_PORT_SHOW(name, event) \
+static int fme_iperf_get_fab_port_##name(struct ifpga_fme_hw *fme, \
+ u8 port_id, u64 *counter) \
+{ \
+ *counter = read_fabric_counter(fme, port_id, event); \
+ return 0; \
+}
+
+FAB_PORT_SHOW(pcie0_read, IPERF_FAB_PCIE0_RD);
+FAB_PORT_SHOW(pcie0_write, IPERF_FAB_PCIE0_WR);
+FAB_PORT_SHOW(pcie1_read, IPERF_FAB_PCIE1_RD);
+FAB_PORT_SHOW(pcie1_write, IPERF_FAB_PCIE1_WR);
+FAB_PORT_SHOW(upi_read, IPERF_FAB_UPI_RD);
+FAB_PORT_SHOW(upi_write, IPERF_FAB_UPI_WR);
+FAB_PORT_SHOW(mmio_read, IPERF_FAB_MMIO_RD);
+FAB_PORT_SHOW(mmio_write, IPERF_FAB_MMIO_WR);
+
+static int fme_iperf_get_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 *enable)
+{
+ struct feature_fme_iperf *iperf;
+ int status;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ status = fabric_pobj_is_enabled(port_id, iperf);
+ *enable = (u64)status;
+
+ return 0;
+}
+
+/*
+ * If enable one port or all port event counter in fabric, other
+ * fabric event counter originally enabled will be disable automatically.
+ */
+static int fme_iperf_set_fab_port_enable(struct ifpga_fme_hw *fme,
+ u8 port_id, u64 enable)
+{
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ struct feature_fme_iperf *iperf;
+ bool state;
+
+ state = !!enable;
+
+ if (!state)
+ return -EINVAL;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+
+ /* if it is already enabled. */
+ if (fabric_pobj_is_enabled(port_id, iperf))
+ return 0;
+
+ spinlock_lock(&fme->lock);
+ ctl.csr = readq(&iperf->fab_ctl);
+ if (port_id == PERF_OBJ_ROOT_ID) {
+ ctl.port_filter = FAB_DISABLE_FILTER;
+ } else {
+ ctl.port_filter = FAB_ENABLE_FILTER;
+ ctl.port_id = port_id;
+ }
+
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+static int fme_iperf_get_fab_freeze(struct ifpga_fme_hw *fme, u64 *freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ *freeze = (u64)ctl.freeze;
+
+ return 0;
+}
+
+static int fme_iperf_set_fab_freeze(struct ifpga_fme_hw *fme, u64 freeze)
+{
+ struct feature_fme_iperf *iperf;
+ struct feature_fme_ifpmon_fab_ctl ctl;
+ bool state;
+
+ state = !!freeze;
+
+ spinlock_lock(&fme->lock);
+ iperf = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_GLOBAL_IPERF);
+ ctl.csr = readq(&iperf->fab_ctl);
+ ctl.freeze = state;
+ writeq(ctl.csr, &iperf->fab_ctl);
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+#define PERF_MAX_PORT_NUM 1
+#define FME_IPERF_CAP_IOMMU 0x1
+
+static int fme_global_iperf_init(struct feature *feature)
+{
+ struct ifpga_fme_hw *fme;
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+
+ dev_info(NULL, "FME global_iperf Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme, FME_FEATURE_ID_HEADER);
+
+ /* check if iommu is not supported on this device. */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ dev_info(NULL, "FME HEAD fme_capability %llx.\n",
+ (unsigned long long)fme_hdr->capability.csr);
+
+ if (fme_capability.iommu_support)
+ feature->cap |= FME_IPERF_CAP_IOMMU;
+
+ return 0;
+}
+
+static void fme_global_iperf_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME global_iperf UInit.\n");
+}
+
+static int fme_iperf_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* CLOCK */
+ return fme_iperf_get_clock(fme, &prop->data);
+ case 0x2: /* REVISION */
+ return fme_iperf_get_revision(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_cache_freeze(fme, &prop->data);
+ case 0x2: /* READ_HIT */
+ return fme_iperf_get_cache_read_hit(fme, &prop->data);
+ case 0x3: /* READ_MISS */
+ return fme_iperf_get_cache_read_miss(fme, &prop->data);
+ case 0x4: /* WRITE_HIT */
+ return fme_iperf_get_cache_write_hit(fme, &prop->data);
+ case 0x5: /* WRITE_MISS */
+ return fme_iperf_get_cache_write_miss(fme, &prop->data);
+ case 0x6: /* HOLD_REQUEST */
+ return fme_iperf_get_cache_hold_request(fme, &prop->data);
+ case 0x7: /* TX_REQ_STALL */
+ return fme_iperf_get_cache_tx_req_stall(fme, &prop->data);
+ case 0x8: /* RX_REQ_STALL */
+ return fme_iperf_get_cache_rx_req_stall(fme, &prop->data);
+ case 0x9: /* RX_EVICTION */
+ return fme_iperf_get_cache_rx_eviction(fme, &prop->data);
+ case 0xa: /* DATA_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_data_write_port_contention(fme,
+ &prop->data);
+ case 0xb: /* TAG_WRITE_PORT_CONTENTION */
+ return fme_iperf_get_cache_tag_write_port_contention(fme,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_root_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ return fme_iperf_get_vtd_freeze(fme, &prop->data);
+ case 0x2: /* IOTLB_4K_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_4k_hit(fme, &prop->data);
+ case 0x3: /* IOTLB_2M_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_2m_hit(fme, &prop->data);
+ case 0x4: /* IOTLB_1G_HIT */
+ return fme_iperf_get_vtd_sip_iotlb_1g_hit(fme, &prop->data);
+ case 0x5: /* SLPWC_L3_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l3_hit(fme, &prop->data);
+ case 0x6: /* SLPWC_L4_HIT */
+ return fme_iperf_get_vtd_sip_slpwc_l4_hit(fme, &prop->data);
+ case 0x7: /* RCC_HIT */
+ return fme_iperf_get_vtd_sip_rcc_hit(fme, &prop->data);
+ case 0x8: /* IOTLB_4K_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_4k_miss(fme, &prop->data);
+ case 0x9: /* IOTLB_2M_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_2m_miss(fme, &prop->data);
+ case 0xa: /* IOTLB_1G_MISS */
+ return fme_iperf_get_vtd_sip_iotlb_1g_miss(fme, &prop->data);
+ case 0xb: /* SLPWC_L3_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l3_miss(fme, &prop->data);
+ case 0xc: /* SLPWC_L4_MISS */
+ return fme_iperf_get_vtd_sip_slpwc_l4_miss(fme, &prop->data);
+ case 0xd: /* RCC_MISS */
+ return fme_iperf_get_vtd_sip_rcc_miss(fme, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_sub_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub > PERF_MAX_PORT_NUM)
+ return -ENOENT;
+
+ switch (id) {
+ case 0xe: /* READ_TRANSACTION */
+ return fme_iperf_get_vtd_port_read_transaction(fme, sub,
+ &prop->data);
+ case 0xf: /* WRITE_TRANSACTION */
+ return fme_iperf_get_vtd_port_write_transaction(fme, sub,
+ &prop->data);
+ case 0x10: /* DEVTLB_READ_HIT */
+ return fme_iperf_get_vtd_port_devtlb_read_hit(fme, sub,
+ &prop->data);
+ case 0x11: /* DEVTLB_WRITE_HIT */
+ return fme_iperf_get_vtd_port_devtlb_write_hit(fme, sub,
+ &prop->data);
+ case 0x12: /* DEVTLB_4K_FILL */
+ return fme_iperf_get_vtd_port_devtlb_4k_fill(fme, sub,
+ &prop->data);
+ case 0x13: /* DEVTLB_2M_FILL */
+ return fme_iperf_get_vtd_port_devtlb_2m_fill(fme, sub,
+ &prop->data);
+ case 0x14: /* DEVTLB_1G_FILL */
+ return fme_iperf_get_vtd_port_devtlb_1g_fill(fme, sub,
+ &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED)
+ return fme_iperf_vtd_root_get_prop(feature, prop);
+
+ return fme_iperf_vtd_sub_get_prop(feature, prop);
+}
+
+static int fme_iperf_fab_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ /* Other properties are present for both top and sub levels */
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_get_fab_freeze(fme, &prop->data);
+ case 0x2: /* PCIE0_READ */
+ return fme_iperf_get_fab_port_pcie0_read(fme, sub,
+ &prop->data);
+ case 0x3: /* PCIE0_WRITE */
+ return fme_iperf_get_fab_port_pcie0_write(fme, sub,
+ &prop->data);
+ case 0x4: /* PCIE1_READ */
+ return fme_iperf_get_fab_port_pcie1_read(fme, sub,
+ &prop->data);
+ case 0x5: /* PCIE1_WRITE */
+ return fme_iperf_get_fab_port_pcie1_write(fme, sub,
+ &prop->data);
+ case 0x6: /* UPI_READ */
+ return fme_iperf_get_fab_port_upi_read(fme, sub,
+ &prop->data);
+ case 0x7: /* UPI_WRITE */
+ return fme_iperf_get_fab_port_upi_write(fme, sub,
+ &prop->data);
+ case 0x8: /* MMIO_READ */
+ return fme_iperf_get_fab_port_mmio_read(fme, sub,
+ &prop->data);
+ case 0x9: /* MMIO_WRITE */
+ return fme_iperf_get_fab_port_mmio_write(fme, sub,
+ &prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_get_fab_port_enable(fme, sub, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_get_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_get_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_get_prop(feature, prop);
+ case PERF_PROP_TOP_UNUSED:
+ return fme_iperf_root_get_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_iperf_cache_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_cache_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_vtd_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ if (sub == PERF_PROP_SUB_UNUSED && id == 0x1) /* FREEZE */
+ return fme_iperf_set_vtd_freeze(fme, prop->data);
+
+ return -ENOENT;
+}
+
+static int fme_iperf_fab_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme = feature->parent;
+ u8 sub = GET_FIELD(PROP_SUB, prop->prop_id);
+ u16 id = GET_FIELD(PROP_ID, prop->prop_id);
+
+ switch (id) {
+ case 0x1: /* FREEZE */
+ if (sub != PERF_PROP_SUB_UNUSED)
+ return -ENOENT;
+ return fme_iperf_set_fab_freeze(fme, prop->data);
+ case 0xa: /* ENABLE */
+ return fme_iperf_set_fab_port_enable(fme, sub, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int fme_global_iperf_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ u8 top = GET_FIELD(PROP_TOP, prop->prop_id);
+
+ switch (top) {
+ case PERF_PROP_TOP_CACHE:
+ return fme_iperf_cache_set_prop(feature, prop);
+ case PERF_PROP_TOP_VTD:
+ return fme_iperf_vtd_set_prop(feature, prop);
+ case PERF_PROP_TOP_FAB:
+ return fme_iperf_fab_set_prop(feature, prop);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops fme_global_iperf_ops = {
+ .init = fme_global_iperf_init,
+ .uinit = fme_global_iperf_uinit,
+ .get_prop = fme_global_iperf_get_prop,
+ .set_prop = fme_global_iperf_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
new file mode 100644
index 00000000..ec0beeb1
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_fme_pr.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static u64
+pr_err_handle(struct feature_fme_pr *fme_pr)
+{
+ struct feature_fme_pr_status fme_pr_status;
+ unsigned long err_code;
+ u64 fme_pr_error;
+ int i;
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ if (!fme_pr_status.pr_status)
+ return 0;
+
+ err_code = readq(&fme_pr->ccip_fme_pr_err);
+ fme_pr_error = err_code;
+
+ for (i = 0; i < PR_MAX_ERR_NUM; i++) {
+ if (err_code & (1 << i))
+ dev_info(NULL, "%s\n", pr_err_msg[i]);
+ }
+
+ writeq(fme_pr_error, &fme_pr->ccip_fme_pr_err);
+ return fme_pr_error;
+}
+
+static int fme_pr_write_init(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ if (info->flags != FPGA_MGR_PARTIAL_RECONFIG)
+ return -EINVAL;
+
+ dev_info(fme_dev, "resetting PR before initiated PR\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ fme_pr_ctl.pr_reset_ack = 1;
+
+ if (fpga_wait_register_field(pr_reset_ack, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_reset = 0;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "waiting for PR resource in HW to be initialized and ready\n");
+
+ fme_pr_status.pr_host_status = PR_HOST_STATUS_IDLE;
+
+ if (fpga_wait_register_field(pr_host_status, fme_pr_status,
+ &fme_pr->ccip_fme_pr_status,
+ PR_WAIT_TIMEOUT, 1)) {
+ dev_err(fme_dev, "maximum PR timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "check if have any previous PR error\n");
+ pr_err_handle(fme_pr);
+ return 0;
+}
+
+static int fme_pr_write(struct ifpga_fme_hw *fme_dev,
+ int port_id, const char *buf, size_t count,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+ struct feature_fme_pr_status fme_pr_status;
+ struct feature_fme_pr_data fme_pr_data;
+ int delay, pr_credit;
+ int ret = 0;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+ if (!fme_pr)
+ return -EINVAL;
+
+ dev_info(fme_dev, "set PR port ID and start request\n");
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_regionid = port_id;
+ fme_pr_ctl.pr_start_req = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "pushing data from bitstream to HW\n");
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+
+ while (count > 0) {
+ delay = 0;
+ while (pr_credit <= 1) {
+ if (delay++ > PR_WAIT_TIMEOUT) {
+ dev_err(fme_dev, "maximum try\n");
+
+ info->pr_err = pr_err_handle(fme_pr);
+ return info->pr_err ? -EIO : -ETIMEDOUT;
+ }
+ udelay(1);
+
+ fme_pr_status.csr = readq(&fme_pr->ccip_fme_pr_status);
+ pr_credit = fme_pr_status.pr_credit;
+ };
+
+ if (count >= fme_dev->pr_bandwidth) {
+ switch (fme_dev->pr_bandwidth) {
+ case 4:
+ fme_pr_data.rsvd = 0;
+ fme_pr_data.pr_data_raw = *((const u32 *)buf);
+ writeq(fme_pr_data.csr,
+ &fme_pr->ccip_fme_pr_data);
+ break;
+ default:
+ ret = -EFAULT;
+ goto done;
+ }
+
+ buf += fme_dev->pr_bandwidth;
+ count -= fme_dev->pr_bandwidth;
+ pr_credit--;
+ } else {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return ret;
+}
+
+static int fme_pr_write_complete(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_fme_pr_ctl fme_pr_ctl;
+
+ fme_pr = get_fme_feature_ioaddr_by_index(fme_dev,
+ FME_FEATURE_ID_PR_MGMT);
+
+ fme_pr_ctl.csr = readq(&fme_pr->ccip_fme_pr_control);
+ fme_pr_ctl.pr_push_complete = 1;
+ writeq(fme_pr_ctl.csr, &fme_pr->ccip_fme_pr_control);
+
+ dev_info(fme_dev, "green bitstream push complete\n");
+ dev_info(fme_dev, "waiting for HW to release PR resource\n");
+
+ fme_pr_ctl.pr_start_req = 0;
+
+ if (fpga_wait_register_field(pr_start_req, fme_pr_ctl,
+ &fme_pr->ccip_fme_pr_control,
+ PR_WAIT_TIMEOUT, 1)) {
+ printf("maximum try.\n");
+ return -ETIMEDOUT;
+ }
+
+ dev_info(fme_dev, "PR operation complete, checking status\n");
+ info->pr_err = pr_err_handle(fme_pr);
+ if (info->pr_err)
+ return -EIO;
+
+ dev_info(fme_dev, "PR done successfully\n");
+ return 0;
+}
+
+static int fpga_pr_buf_load(struct ifpga_fme_hw *fme_dev,
+ struct fpga_pr_info *info, const char *buf,
+ size_t count)
+{
+ int ret;
+
+ info->state = FPGA_PR_STATE_WRITE_INIT;
+ ret = fme_pr_write_init(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error preparing FPGA for writing\n");
+ info->state = FPGA_PR_STATE_WRITE_INIT_ERR;
+ return ret;
+ }
+
+ /*
+ * Write the FPGA image to the FPGA.
+ */
+ info->state = FPGA_PR_STATE_WRITE;
+ ret = fme_pr_write(fme_dev, info->port_id, buf, count, info);
+ if (ret) {
+ dev_err(fme_dev, "Error while writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_ERR;
+ return ret;
+ }
+
+ /*
+ * After all the FPGA image has been written, do the device specific
+ * steps to finish and set the FPGA into operating mode.
+ */
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE;
+ ret = fme_pr_write_complete(fme_dev, info);
+ if (ret) {
+ dev_err(fme_dev, "Error after writing image data to FPGA\n");
+ info->state = FPGA_PR_STATE_WRITE_COMPLETE_ERR;
+ return ret;
+ }
+ info->state = FPGA_PR_STATE_DONE;
+
+ return 0;
+}
+
+static int fme_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size,
+ u64 *status)
+{
+ struct feature_fme_header *fme_hdr;
+ struct feature_fme_capability fme_capability;
+ struct ifpga_fme_hw *fme = &hw->fme;
+ struct fpga_pr_info info;
+ struct ifpga_port_hw *port;
+ int ret = 0;
+
+ if (!buffer || size == 0)
+ return -EINVAL;
+ if (fme->state != IFPGA_FME_IMPLEMENTED)
+ return -EINVAL;
+
+ /*
+ * Padding extra zeros to align PR buffer with PR bandwidth, HW will
+ * ignore these zeros automatically.
+ */
+ size = IFPGA_ALIGN(size, fme->pr_bandwidth);
+
+ /* get fme header region */
+ fme_hdr = get_fme_feature_ioaddr_by_index(fme,
+ FME_FEATURE_ID_HEADER);
+ if (!fme_hdr)
+ return -EINVAL;
+
+ /* check port id */
+ fme_capability.csr = readq(&fme_hdr->capability);
+ if (port_id >= fme_capability.num_ports) {
+ dev_err(fme, "port number more than maximum\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(struct fpga_pr_info));
+ info.flags = FPGA_MGR_PARTIAL_RECONFIG;
+ info.port_id = port_id;
+
+ spinlock_lock(&fme->lock);
+
+ /* get port device by port_id */
+ port = &hw->port[port_id];
+
+ /* Disable Port before PR */
+ fpga_port_disable(port);
+
+ ret = fpga_pr_buf_load(fme, &info, (void *)buffer, size);
+
+ *status = info.pr_err;
+
+ /* Re-enable Port after PR finished */
+ fpga_port_enable(port);
+ spinlock_unlock(&fme->lock);
+
+ return ret;
+}
+
+int do_pr(struct ifpga_hw *hw, u32 port_id, void *buffer, u32 size, u64 *status)
+{
+ struct bts_header *bts_hdr;
+ void *buf;
+ struct ifpga_port_hw *port;
+ int ret;
+
+ if (!buffer || size == 0) {
+ dev_err(hw, "invalid parameter\n");
+ return -EINVAL;
+ }
+
+ bts_hdr = (struct bts_header *)buffer;
+
+ if (is_valid_bts(bts_hdr)) {
+ dev_info(hw, "this is a valid bitsteam..\n");
+ size -= (sizeof(struct bts_header) +
+ bts_hdr->metadata_len);
+ buf = (u8 *)buffer + sizeof(struct bts_header) +
+ bts_hdr->metadata_len;
+ } else {
+ return -EINVAL;
+ }
+
+ /* clean port error before do PR */
+ port = &hw->port[port_id];
+ ret = port_clear_error(port);
+ if (ret) {
+ dev_err(hw, "port cannot clear error\n");
+ return -EINVAL;
+ }
+
+ return fme_pr(hw, port_id, buf, size, status);
+}
+
+static int fme_pr_mgmt_init(struct feature *feature)
+{
+ struct feature_fme_pr *fme_pr;
+ struct feature_header fme_pr_header;
+ struct ifpga_fme_hw *fme;
+
+ dev_info(NULL, "FME PR MGMT Init.\n");
+
+ fme = (struct ifpga_fme_hw *)feature->parent;
+
+ fme_pr = (struct feature_fme_pr *)feature->addr;
+
+ fme_pr_header.csr = readq(&fme_pr->header);
+ if (fme_pr_header.revision == 2) {
+ dev_info(NULL, "using 512-bit PR\n");
+ fme->pr_bandwidth = 64;
+ } else {
+ dev_info(NULL, "using 32-bit PR\n");
+ fme->pr_bandwidth = 4;
+ }
+
+ return 0;
+}
+
+static void fme_pr_mgmt_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "FME PR MGMT UInit.\n");
+}
+
+struct feature_ops fme_pr_mgmt_ops = {
+ .init = fme_pr_mgmt_init,
+ .uinit = fme_pr_mgmt_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_hw.h b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
new file mode 100644
index 00000000..a20520c9
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_hw.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_HW_H_
+#define _IFPGA_HW_H_
+
+#include "ifpga_defines.h"
+#include "opae_ifpga_hw_api.h"
+
+enum ifpga_feature_state {
+ IFPGA_FEATURE_UNUSED = 0,
+ IFPGA_FEATURE_ATTACHED,
+};
+
+struct feature_irq_ctx {
+ int eventfd;
+ int idx;
+};
+
+struct feature {
+ enum ifpga_feature_state state;
+ const char *name;
+ u64 id;
+ u8 *addr;
+ uint64_t phys_addr;
+ u32 size;
+ int revision;
+ u64 cap;
+ int vfio_dev_fd;
+ struct feature_irq_ctx *ctx;
+ unsigned int ctx_num;
+
+ void *parent; /* to parent hw data structure */
+
+ struct feature_ops *ops;/* callback to this private feature */
+};
+
+struct feature_ops {
+ int (*init)(struct feature *feature);
+ void (*uinit)(struct feature *feature);
+ int (*get_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_prop)(struct feature *feature, struct feature_prop *prop);
+ int (*set_irq)(struct feature *feature, void *irq_set);
+};
+
+enum ifpga_fme_state {
+ IFPGA_FME_UNUSED = 0,
+ IFPGA_FME_IMPLEMENTED,
+};
+
+struct ifpga_fme_hw {
+ enum ifpga_fme_state state;
+
+ struct feature sub_feature[FME_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect hardware access */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ /* provied by HEADER feature */
+ u32 port_num;
+ struct uuid bitstream_id;
+ u64 bitstream_md;
+ size_t pr_bandwidth;
+ u32 socket_id;
+ u32 fabric_version_id;
+ u32 cache_size;
+
+ u32 capability;
+};
+
+enum ifpga_port_state {
+ IFPGA_PORT_UNUSED = 0,
+ IFPGA_PORT_ATTACHED,
+ IFPGA_PORT_DETACHED,
+};
+
+struct ifpga_port_hw {
+ enum ifpga_port_state state;
+
+ struct feature sub_feature[PORT_FEATURE_ID_MAX];
+ spinlock_t lock; /* protect access to hw */
+
+ void *parent; /* pointer to ifpga_hw */
+
+ int port_id; /* provied by HEADER feature */
+ struct uuid afu_id; /* provied by User AFU feature */
+
+ unsigned int disable_count;
+
+ u32 capability;
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+ u8 *stp_addr;
+ u32 stp_size;
+};
+
+#define AFU_MAX_REGION 1
+
+struct ifpga_afu_info {
+ struct opae_reg_region region[AFU_MAX_REGION];
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct ifpga_hw {
+ struct opae_adapter *adapter;
+ struct opae_adapter_data_pci *pci_data;
+
+ struct ifpga_fme_hw fme;
+ struct ifpga_port_hw port[MAX_FPGA_PORT_NUM];
+};
+
+static inline bool is_ifpga_hw_pf(struct ifpga_hw *hw)
+{
+ return hw->fme.state != IFPGA_FME_UNUSED;
+}
+
+static inline bool is_valid_port_id(struct ifpga_hw *hw, u32 port_id)
+{
+ if (port_id >= MAX_FPGA_PORT_NUM ||
+ hw->port[port_id].state != IFPGA_PORT_ATTACHED)
+ return false;
+
+ return true;
+}
+#endif /* _IFPGA_HW_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port.c b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
new file mode 100644
index 00000000..a962f5b4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port.c
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+int port_get_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->get_prop)
+ return feature->ops->get_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_prop(struct ifpga_port_hw *port, struct feature_prop *prop)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, prop->feature_id);
+
+ if (feature && feature->ops && feature->ops->set_prop)
+ return feature->ops->set_prop(feature, prop);
+
+ return -ENOENT;
+}
+
+int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set)
+{
+ struct feature *feature;
+
+ if (!port)
+ return -ENOENT;
+
+ feature = get_port_feature_by_id(port, feature_id);
+
+ if (feature && feature->ops && feature->ops->set_irq)
+ return feature->ops->set_irq(feature, irq_set);
+
+ return -ENOENT;
+}
+
+static int port_get_revision(struct ifpga_port_hw *port, u64 *revision)
+{
+ struct feature_port_header *port_hdr
+ = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+ struct feature_header header;
+
+ header.csr = readq(&port_hdr->header);
+
+ *revision = header.revision;
+
+ return 0;
+}
+
+static int port_get_portidx(struct ifpga_port_hw *port, u64 *idx)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_capability capability;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ capability.csr = readq(&port_hdr->capability);
+ *idx = capability.port_number;
+
+ return 0;
+}
+
+static int port_get_latency_tolerance(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_control control;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ control.csr = readq(&port_hdr->control);
+ *val = control.latency_tolerance;
+
+ return 0;
+}
+
+static int port_get_ap1_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap1_event;
+
+ return 0;
+}
+
+static int port_set_ap1_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap1_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_ap2_event(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.ap2_event;
+
+ return 0;
+}
+
+static int port_set_ap2_event(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ status.ap2_event = val;
+ writeq(status.csr, &port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_power_state(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+ struct feature_port_status status;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ status.csr = readq(&port_hdr->status);
+ spinlock_unlock(&port->lock);
+
+ *val = status.power_state;
+
+ return 0;
+}
+
+static int port_get_userclk_freqcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_set_userclk_freqcntrcmd(struct ifpga_port_hw *port, u64 val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ writeq(val, &port_hdr->user_clk_freq_cmd1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts0);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_get_userclk_freqcntrsts(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_header *port_hdr;
+
+ port_hdr = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_HEADER);
+
+ spinlock_lock(&port->lock);
+ *val = readq(&port_hdr->user_clk_freq_sts1);
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static int port_hdr_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port hdr Init.\n");
+
+ fpga_port_reset(port);
+
+ return 0;
+}
+
+static void port_hdr_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port hdr uinit.\n");
+}
+
+static int port_hdr_get_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_REVISION:
+ return port_get_revision(port, &prop->data);
+ case PORT_HDR_PROP_PORTIDX:
+ return port_get_portidx(port, &prop->data);
+ case PORT_HDR_PROP_LATENCY_TOLERANCE:
+ return port_get_latency_tolerance(port, &prop->data);
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_get_ap1_event(port, &prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_get_ap2_event(port, &prop->data);
+ case PORT_HDR_PROP_POWER_STATE:
+ return port_get_power_state(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_get_userclk_freqcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_get_userclk_freqcntrcmd(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQSTS:
+ return port_get_userclk_freqsts(port, &prop->data);
+ case PORT_HDR_PROP_USERCLK_CNTRSTS:
+ return port_get_userclk_freqcntrsts(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_hdr_set_prop(struct feature *feature, struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_HDR_PROP_AP1_EVENT:
+ return port_set_ap1_event(port, prop->data);
+ case PORT_HDR_PROP_AP2_EVENT:
+ return port_set_ap2_event(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCMD:
+ return port_set_userclk_freqcmd(port, prop->data);
+ case PORT_HDR_PROP_USERCLK_FREQCNTRCMD:
+ return port_set_userclk_freqcntrcmd(port, prop->data);
+ }
+
+ return -ENOENT;
+}
+
+struct feature_ops port_hdr_ops = {
+ .init = port_hdr_init,
+ .uinit = port_hdr_uinit,
+ .get_prop = port_hdr_get_prop,
+ .set_prop = port_hdr_set_prop,
+};
+
+static int port_stp_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port stp Init.\n");
+
+ spinlock_lock(&port->lock);
+ port->stp_addr = feature->addr;
+ port->stp_size = feature->size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_stp_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "port stp uinit.\n");
+}
+
+struct feature_ops port_stp_ops = {
+ .init = port_stp_init,
+ .uinit = port_stp_uinit,
+};
+
+static int port_uint_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "PORT UINT Init.\n");
+
+ spinlock_lock(&port->lock);
+ if (feature->ctx_num) {
+ port->capability |= FPGA_PORT_CAP_UAFU_IRQ;
+ port->num_uafu_irqs = feature->ctx_num;
+ }
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_uint_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+
+ dev_info(NULL, "PORT UINT UInit.\n");
+}
+
+struct feature_ops port_uint_ops = {
+ .init = port_uint_init,
+ .uinit = port_uint_uinit,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
new file mode 100644
index 00000000..23db562b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/ifpga_port_error.c
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "ifpga_feature_dev.h"
+
+static int port_err_get_revision(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_header header;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ header.csr = readq(&port_err->header);
+ *val = header.revision;
+
+ return 0;
+}
+
+static int port_err_get_errors(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_err_key error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ error.csr = readq(&port_err->port_error);
+ *val = error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_error(struct ifpga_port_hw *port, u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_first_err_key first_error;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+ first_error.csr = readq(&port_err->port_first_error);
+ *val = first_error.csr;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_lsb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req0 malreq0;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq0.header_lsb = readq(&port_err->malreq0);
+ *val = malreq0.header_lsb;
+
+ return 0;
+}
+
+static int port_err_get_first_malformed_req_msb(struct ifpga_port_hw *port,
+ u64 *val)
+{
+ struct feature_port_error *port_err;
+ struct feature_port_malformed_req1 malreq1;
+
+ port_err = get_port_feature_ioaddr_by_index(port,
+ PORT_FEATURE_ID_ERROR);
+
+ malreq1.header_msb = readq(&port_err->malreq1);
+ *val = malreq1.header_msb;
+
+ return 0;
+}
+
+static int port_err_set_clear(struct ifpga_port_hw *port, u64 val)
+{
+ int ret;
+
+ spinlock_lock(&port->lock);
+ ret = port_err_clear(port, val);
+ spinlock_unlock(&port->lock);
+
+ return ret;
+}
+
+static int port_error_init(struct feature *feature)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ dev_info(NULL, "port error Init.\n");
+
+ spinlock_lock(&port->lock);
+ port_err_mask(port, false);
+ if (feature->ctx_num)
+ port->capability |= FPGA_PORT_CAP_ERR_IRQ;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+static void port_error_uinit(struct feature *feature)
+{
+ UNUSED(feature);
+}
+
+static int port_error_get_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ switch (prop->prop_id) {
+ case PORT_ERR_PROP_REVISION:
+ return port_err_get_revision(port, &prop->data);
+ case PORT_ERR_PROP_ERRORS:
+ return port_err_get_errors(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_ERROR:
+ return port_err_get_first_error(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB:
+ return port_err_get_first_malformed_req_lsb(port, &prop->data);
+ case PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB:
+ return port_err_get_first_malformed_req_msb(port, &prop->data);
+ }
+
+ return -ENOENT;
+}
+
+static int port_error_set_prop(struct feature *feature,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port = feature->parent;
+
+ if (prop->prop_id == PORT_ERR_PROP_CLEAR)
+ return port_err_set_clear(port, prop->data);
+
+ return -ENOENT;
+}
+
+struct feature_ops port_error_ops = {
+ .init = port_error_init,
+ .uinit = port_error_uinit,
+ .get_prop = port_error_get_prop,
+ .set_prop = port_error_set_prop,
+};
diff --git a/drivers/raw/ifpga_rawdev/base/meson.build b/drivers/raw/ifpga_rawdev/base/meson.build
new file mode 100644
index 00000000..cb655352
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/meson.build
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = [
+ 'ifpga_api.c',
+ 'ifpga_enumerate.c',
+ 'ifpga_feature_dev.c',
+ 'ifpga_fme.c',
+ 'ifpga_fme_iperf.c',
+ 'ifpga_fme_dperf.c',
+ 'ifpga_fme_error.c',
+ 'ifpga_port.c',
+ 'ifpga_port_error.c',
+ 'ifpga_fme_pr.c',
+ 'opae_hw_api.c',
+ 'opae_ifpga_hw_api.c',
+ 'opae_debug.c'
+]
+
+error_cflags = ['-Wno-sign-compare', '-Wno-unused-value',
+ '-Wno-format', '-Wno-unused-but-set-variable',
+ '-Wno-strict-aliasing'
+]
+c_args = cflags
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('ifpga_rawdev_base', sources,
+ dependencies: static_rte_eal,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.c b/drivers/raw/ifpga_rawdev/base/opae_debug.c
new file mode 100644
index 00000000..024d7d29
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.c
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#define OPAE_HW_DEBUG
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+
+void opae_manager_dump(struct opae_manager *mgr)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Manger %s\n", mgr->name);
+ opae_log("OPAE Manger OPs = %p\n", mgr->ops);
+ opae_log("OPAE Manager Private Data = %p\n", mgr->data);
+ opae_log("OPAE Adapter(parent) = %p\n", mgr->adapter);
+ opae_log("==========================\n");
+}
+
+void opae_bridge_dump(struct opae_bridge *br)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Bridge %s\n", br->name);
+ opae_log("OPAE Bridge ID = %d\n", br->id);
+ opae_log("OPAE Bridge OPs = %p\n", br->ops);
+ opae_log("OPAE Bridge Private Data = %p\n", br->data);
+ opae_log("OPAE Accelerator(under this bridge) = %p\n", br->acc);
+ opae_log("==========================\n");
+}
+
+void opae_accelerator_dump(struct opae_accelerator *acc)
+{
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Accelerator %s\n", acc->name);
+ opae_log("OPAE Accelerator Index = %d\n", acc->index);
+ opae_log("OPAE Accelerator OPs = %p\n", acc->ops);
+ opae_log("OPAE Accelerator Private Data = %p\n", acc->data);
+ opae_log("OPAE Bridge (upstream) = %p\n", acc->br);
+ opae_log("OPAE Manager (upstream) = %p\n", acc->mgr);
+ opae_log("==========================\n");
+
+ if (acc->br)
+ opae_bridge_dump(acc->br);
+}
+
+static void opae_adapter_data_dump(void *data)
+{
+ struct opae_adapter_data *d = data;
+ struct opae_adapter_data_pci *d_pci;
+ struct opae_reg_region *r;
+ int i;
+
+ opae_log("=====%s=====\n", __func__);
+
+ switch (d->type) {
+ case OPAE_FPGA_PCI:
+ d_pci = (struct opae_adapter_data_pci *)d;
+
+ opae_log("OPAE Adapter Type = PCI\n");
+ opae_log("PCI Device ID: 0x%04x\n", d_pci->device_id);
+ opae_log("PCI Vendor ID: 0x%04x\n", d_pci->vendor_id);
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ r = &d_pci->region[i];
+ opae_log("PCI Bar %d: phy(%llx) len(%llx) addr(%p)\n",
+ i, (unsigned long long)r->phys_addr,
+ (unsigned long long)r->len, r->addr);
+ }
+ break;
+ case OPAE_FPGA_NET:
+ break;
+ }
+
+ opae_log("==========================\n");
+}
+
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose)
+{
+ struct opae_accelerator *acc;
+
+ opae_log("=====%s=====\n", __func__);
+ opae_log("OPAE Adapter %s\n", adapter->name);
+ opae_log("OPAE Adapter OPs = %p\n", adapter->ops);
+ opae_log("OPAE Adapter Private Data = %p\n", adapter->data);
+ opae_log("OPAE Manager (downstream) = %p\n", adapter->mgr);
+
+ if (verbose) {
+ if (adapter->mgr)
+ opae_manager_dump(adapter->mgr);
+
+ opae_adapter_for_each_acc(adapter, acc)
+ opae_accelerator_dump(acc);
+
+ if (adapter->data)
+ opae_adapter_data_dump(adapter->data);
+ }
+
+ opae_log("==========================\n");
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_debug.h b/drivers/raw/ifpga_rawdev/base/opae_debug.h
new file mode 100644
index 00000000..a03dff92
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_debug.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_DEBUG_H_
+#define _OPAE_DEBUG_H_
+
+#ifdef OPAE_HW_DEBUG
+#define opae_log(fmt, args...) printf(fmt, ## args)
+#else
+#define opae_log(fme, args...) do {} while (0)
+#endif
+
+void opae_manager_dump(struct opae_manager *mgr);
+void opae_bridge_dump(struct opae_bridge *br);
+void opae_accelerator_dump(struct opae_accelerator *acc);
+void opae_adapter_dump(struct opae_adapter *adapter, int verbose);
+
+#endif /* _OPAE_DEBUG_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
new file mode 100644
index 00000000..a533dfea
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_hw_api.h"
+#include "opae_debug.h"
+#include "ifpga_api.h"
+
+/* OPAE Bridge Functions */
+
+/**
+ * opae_bridge_alloc - alloc opae_bridge data structure
+ * @name: bridge name.
+ * @ops: ops of this bridge.
+ * @data: private data of this bridge.
+ *
+ * Return opae_bridge on success, otherwise NULL.
+ */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data)
+{
+ struct opae_bridge *br = opae_zmalloc(sizeof(*br));
+
+ if (!br)
+ return NULL;
+
+ br->name = name;
+ br->ops = ops;
+ br->data = data;
+
+ opae_log("%s %p\n", __func__, br);
+
+ return br;
+}
+
+/**
+ * opae_bridge_reset - reset opae_bridge
+ * @br: bridge to be reset.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_bridge_reset(struct opae_bridge *br)
+{
+ if (!br)
+ return -EINVAL;
+
+ if (br->ops && br->ops->reset)
+ return br->ops->reset(br);
+
+ opae_log("%s no ops\n", __func__);
+
+ return -ENOENT;
+}
+
+/* Accelerator Functions */
+
+/**
+ * opae_accelerator_alloc - alloc opae_accelerator data structure
+ * @name: accelerator name.
+ * @ops: ops of this accelerator.
+ * @data: private data of this accelerator.
+ *
+ * Return: opae_accelerator on success, otherwise NULL.
+ */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data)
+{
+ struct opae_accelerator *acc = opae_zmalloc(sizeof(*acc));
+
+ if (!acc)
+ return NULL;
+
+ acc->name = name;
+ acc->ops = ops;
+ acc->data = data;
+
+ opae_log("%s %p\n", __func__, acc);
+
+ return acc;
+}
+
+/**
+ * opae_acc_reg_read - read accelerator's register from its reg region.
+ * @acc: accelerator to read.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: read operation width, e.g 4 byte = 32bit read.
+ * @data: data to store the value read from the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->read)
+ return acc->ops->read(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_reg_write - write to accelerator's register from its reg region.
+ * @acc: accelerator to write.
+ * @region_idx: reg region index.
+ * @offset: reg offset.
+ * @byte: write operation width, e.g 4 byte = 32bit write.
+ * @data: data stored the value to write to the register.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data)
+{
+ if (!acc || !data)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->write)
+ return acc->ops->write(acc, region_idx, offset, byte, data);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_info - get information of an accelerator.
+ * @acc: targeted accelerator
+ * @info: accelerator info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_info)
+ return acc->ops->get_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_region_info - get information of an accelerator register region.
+ * @acc: targeted accelerator
+ * @info: accelerator region info data structure to be filled.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info)
+{
+ if (!acc || !info)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_region_info)
+ return acc->ops->get_region_info(acc, info);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_set_irq - set an accelerator's irq.
+ * @acc: targeted accelerator
+ * @start: start vector number
+ * @count: count of vectors to be set from the start vector
+ * @evtfds: event fds to be notified when corresponding irqs happens
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[])
+{
+ if (!acc || !acc->data)
+ return -EINVAL;
+
+ if (start + count <= start)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->set_irq)
+ return acc->ops->set_irq(acc, start, count, evtfds);
+
+ return -ENOENT;
+}
+
+/**
+ * opae_acc_get_uuid - get accelerator's UUID.
+ * @acc: targeted accelerator
+ * @uuid: a pointer to UUID
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid)
+{
+ if (!acc || !uuid)
+ return -EINVAL;
+
+ if (acc->ops && acc->ops->get_uuid)
+ return acc->ops->get_uuid(acc, uuid);
+
+ return -ENOENT;
+}
+
+/* Manager Functions */
+
+/**
+ * opae_manager_alloc - alloc opae_manager data structure
+ * @name: manager name.
+ * @ops: ops of this manager.
+ * @data: private data of this manager.
+ *
+ * Return: opae_manager on success, otherwise NULL.
+ */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data)
+{
+ struct opae_manager *mgr = opae_zmalloc(sizeof(*mgr));
+
+ if (!mgr)
+ return NULL;
+
+ mgr->name = name;
+ mgr->ops = ops;
+ mgr->data = data;
+
+ opae_log("%s %p\n", __func__, mgr);
+
+ return mgr;
+}
+
+/**
+ * opae_manager_flash - flash a reconfiguration image via opae_manager
+ * @mgr: opae_manager for flash.
+ * @id: id of target region (accelerator).
+ * @buf: image data buffer.
+ * @size: buffer size.
+ * @status: status to store flash result.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_manager_flash(struct opae_manager *mgr, int id, void *buf, u32 size,
+ u64 *status)
+{
+ if (!mgr)
+ return -EINVAL;
+
+ if (mgr && mgr->ops && mgr->ops->flash)
+ return mgr->ops->flash(mgr, id, buf, size, status);
+
+ return -ENOENT;
+}
+
+/* Adapter Functions */
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @type: opae_adapter_type.
+ *
+ * Return: opae_adapter_data on success, otherwise NULL.
+ */
+void *opae_adapter_data_alloc(enum opae_adapter_type type)
+{
+ struct opae_adapter_data *data;
+ int size;
+
+ switch (type) {
+ case OPAE_FPGA_PCI:
+ size = sizeof(struct opae_adapter_data_pci);
+ break;
+ case OPAE_FPGA_NET:
+ size = sizeof(struct opae_adapter_data_net);
+ break;
+ default:
+ size = sizeof(struct opae_adapter_data);
+ break;
+ }
+
+ data = opae_zmalloc(size);
+ if (!data)
+ return NULL;
+
+ data->type = type;
+
+ return data;
+}
+
+static struct opae_adapter_ops *match_ops(struct opae_adapter *adapter)
+{
+ struct opae_adapter_data *data;
+
+ if (!adapter || !adapter->data)
+ return NULL;
+
+ data = adapter->data;
+
+ if (data->type == OPAE_FPGA_PCI)
+ return &ifpga_adapter_ops;
+
+ return NULL;
+}
+
+/**
+ * opae_adapter_data_alloc - alloc opae_adapter_data data structure
+ * @name: adapter name.
+ * @data: private data of this adapter.
+ *
+ * Return: opae_adapter on success, otherwise NULL.
+ */
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data)
+{
+ struct opae_adapter *adapter = opae_zmalloc(sizeof(*adapter));
+
+ if (!adapter)
+ return NULL;
+
+ TAILQ_INIT(&adapter->acc_list);
+ adapter->data = data;
+ adapter->name = name;
+ adapter->ops = match_ops(adapter);
+
+ return adapter;
+}
+
+/**
+ * opae_adapter_enumerate - enumerate this adapter
+ * @adapter: adapter to enumerate.
+ *
+ * Return: 0 on success, otherwise error code.
+ */
+int opae_adapter_enumerate(struct opae_adapter *adapter)
+{
+ int ret = -ENOENT;
+
+ if (!adapter)
+ return -EINVAL;
+
+ if (adapter->ops && adapter->ops->enumerate)
+ ret = adapter->ops->enumerate(adapter);
+
+ if (!ret)
+ opae_adapter_dump(adapter, 1);
+
+ return ret;
+}
+
+/**
+ * opae_adapter_destroy - destroy this adapter
+ * @adapter: adapter to destroy.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+void opae_adapter_destroy(struct opae_adapter *adapter)
+{
+ if (adapter && adapter->ops && adapter->ops->destroy)
+ adapter->ops->destroy(adapter);
+}
+
+/**
+ * opae_adapter_get_acc - find and return accelerator with matched id
+ * @adapter: adapter to find the accelerator.
+ * @acc_id: id (index) of the accelerator.
+ *
+ * destroy things allocated during adapter enumeration.
+ */
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id)
+{
+ struct opae_accelerator *acc = NULL;
+
+ if (!adapter)
+ return NULL;
+
+ opae_adapter_for_each_acc(adapter, acc)
+ if (acc->index == acc_id)
+ return acc;
+
+ return NULL;
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
new file mode 100644
index 00000000..4bbc9df5
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_hw_api.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_HW_API_H_
+#define _OPAE_HW_API_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include "opae_osdep.h"
+
+#ifndef PCI_MAX_RESOURCE
+#define PCI_MAX_RESOURCE 6
+#endif
+
+struct opae_adapter;
+
+enum opae_adapter_type {
+ OPAE_FPGA_PCI,
+ OPAE_FPGA_NET,
+};
+
+/* OPAE Manager Data Structure */
+struct opae_manager_ops;
+
+/*
+ * opae_manager has pointer to its parent adapter, as it could be able to manage
+ * all components on this FPGA device (adapter). If not the case, don't set this
+ * adapter, which limit opae_manager ops to manager itself.
+ */
+struct opae_manager {
+ const char *name;
+ struct opae_adapter *adapter;
+ struct opae_manager_ops *ops;
+ void *data;
+};
+
+/* FIXME: add more management ops, e.g power/thermal and etc */
+struct opae_manager_ops {
+ int (*flash)(struct opae_manager *mgr, int id, void *buffer,
+ u32 size, u64 *status);
+};
+
+/* OPAE Manager APIs */
+struct opae_manager *
+opae_manager_alloc(const char *name, struct opae_manager_ops *ops, void *data);
+#define opae_manager_free(mgr) opae_free(mgr)
+int opae_manager_flash(struct opae_manager *mgr, int acc_id, void *buf,
+ u32 size, u64 *status);
+
+/* OPAE Bridge Data Structure */
+struct opae_bridge_ops;
+
+/*
+ * opae_bridge only has pointer to its downstream accelerator.
+ */
+struct opae_bridge {
+ const char *name;
+ int id;
+ struct opae_accelerator *acc;
+ struct opae_bridge_ops *ops;
+ void *data;
+};
+
+struct opae_bridge_ops {
+ int (*reset)(struct opae_bridge *br);
+};
+
+/* OPAE Bridge APIs */
+struct opae_bridge *
+opae_bridge_alloc(const char *name, struct opae_bridge_ops *ops, void *data);
+int opae_bridge_reset(struct opae_bridge *br);
+#define opae_bridge_free(br) opae_free(br)
+
+/* OPAE Acceleraotr Data Structure */
+struct opae_accelerator_ops;
+
+/*
+ * opae_accelerator has pointer to its upstream bridge(port).
+ * In some cases, if we allow same user to do PR on its own accelerator, then
+ * set the manager pointer during the enumeration. But in other cases, the PR
+ * functions only could be done via manager in another module / thread / service
+ * / application for better protection.
+ */
+struct opae_accelerator {
+ TAILQ_ENTRY(opae_accelerator) node;
+ const char *name;
+ int index;
+ struct opae_bridge *br;
+ struct opae_manager *mgr;
+ struct opae_accelerator_ops *ops;
+ void *data;
+};
+
+struct opae_acc_info {
+ unsigned int num_regions;
+ unsigned int num_irqs;
+};
+
+struct opae_acc_region_info {
+ u32 flags;
+#define ACC_REGION_READ (1 << 0)
+#define ACC_REGION_WRITE (1 << 1)
+#define ACC_REGION_MMIO (1 << 2)
+ u32 index;
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_accelerator_ops {
+ int (*read)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*write)(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+ int (*get_info)(struct opae_accelerator *acc,
+ struct opae_acc_info *info);
+ int (*get_region_info)(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+ int (*set_irq)(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+ int (*get_uuid)(struct opae_accelerator *acc,
+ struct uuid *uuid);
+};
+
+/* OPAE accelerator APIs */
+struct opae_accelerator *
+opae_accelerator_alloc(const char *name, struct opae_accelerator_ops *ops,
+ void *data);
+#define opae_accelerator_free(acc) opae_free(acc)
+int opae_acc_get_info(struct opae_accelerator *acc, struct opae_acc_info *info);
+int opae_acc_get_region_info(struct opae_accelerator *acc,
+ struct opae_acc_region_info *info);
+int opae_acc_set_irq(struct opae_accelerator *acc,
+ u32 start, u32 count, s32 evtfds[]);
+int opae_acc_get_uuid(struct opae_accelerator *acc,
+ struct uuid *uuid);
+
+static inline struct opae_bridge *
+opae_acc_get_br(struct opae_accelerator *acc)
+{
+ return acc ? acc->br : NULL;
+}
+
+static inline struct opae_manager *
+opae_acc_get_mgr(struct opae_accelerator *acc)
+{
+ return acc ? acc->mgr : NULL;
+}
+
+int opae_acc_reg_read(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_reg_write(struct opae_accelerator *acc, unsigned int region_idx,
+ u64 offset, unsigned int byte, void *data);
+
+#define opae_acc_reg_read64(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 8, data)
+#define opae_acc_reg_write64(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 8, data)
+#define opae_acc_reg_read32(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 4, data)
+#define opae_acc_reg_write32(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 4, data)
+#define opae_acc_reg_read16(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 2, data)
+#define opae_acc_reg_write16(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 2, data)
+#define opae_acc_reg_read8(acc, region, offset, data) \
+ opae_acc_reg_read(acc, region, offset, 1, data)
+#define opae_acc_reg_write8(acc, region, offset, data) \
+ opae_acc_reg_write(acc, region, offset, 1, data)
+
+/*for data stream read/write*/
+int opae_acc_data_read(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+int opae_acc_data_write(struct opae_accelerator *acc, unsigned int flags,
+ u64 offset, unsigned int byte, void *data);
+
+/* OPAE Adapter Data Structure */
+struct opae_adapter_data {
+ enum opae_adapter_type type;
+};
+
+struct opae_reg_region {
+ u64 phys_addr;
+ u64 len;
+ u8 *addr;
+};
+
+struct opae_adapter_data_pci {
+ enum opae_adapter_type type;
+ u16 device_id;
+ u16 vendor_id;
+ struct opae_reg_region region[PCI_MAX_RESOURCE];
+ int vfio_dev_fd; /* VFIO device file descriptor */
+};
+
+/* FIXME: OPAE_FPGA_NET type */
+struct opae_adapter_data_net {
+ enum opae_adapter_type type;
+};
+
+struct opae_adapter_ops {
+ int (*enumerate)(struct opae_adapter *adapter);
+ void (*destroy)(struct opae_adapter *adapter);
+};
+
+TAILQ_HEAD(opae_accelerator_list, opae_accelerator);
+
+#define opae_adapter_for_each_acc(adatper, acc) \
+ TAILQ_FOREACH(acc, &adapter->acc_list, node)
+
+struct opae_adapter {
+ const char *name;
+ struct opae_manager *mgr;
+ struct opae_accelerator_list acc_list;
+ struct opae_adapter_ops *ops;
+ void *data;
+};
+
+/* OPAE Adapter APIs */
+void *opae_adapter_data_alloc(enum opae_adapter_type type);
+#define opae_adapter_data_free(data) opae_free(data)
+
+struct opae_adapter *opae_adapter_alloc(const char *name, void *data);
+#define opae_adapter_free(adapter) opae_free(adapter)
+
+int opae_adapter_enumerate(struct opae_adapter *adapter);
+void opae_adapter_destroy(struct opae_adapter *adapter);
+static inline struct opae_manager *
+opae_adapter_get_mgr(struct opae_adapter *adapter)
+{
+ return adapter ? adapter->mgr : NULL;
+}
+
+struct opae_accelerator *
+opae_adapter_get_acc(struct opae_adapter *adapter, int acc_id);
+
+static inline void opae_adapter_add_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_INSERT_TAIL(&adapter->acc_list, acc, node);
+}
+
+static inline void opae_adapter_remove_acc(struct opae_adapter *adapter,
+ struct opae_accelerator *acc)
+{
+ TAILQ_REMOVE(&adapter->acc_list, acc, node);
+}
+#endif /* _OPAE_HW_API_H_*/
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
new file mode 100644
index 00000000..89c7b492
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.c
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include "opae_ifpga_hw_api.h"
+#include "ifpga_api.h"
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_get_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_prop(fme->parent, FEATURE_FIU_ID_FME, 0, prop);
+}
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data || !fme_info)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ spinlock_lock(&fme->lock);
+ fme_info->capability = fme->capability;
+ spinlock_unlock(&fme->lock);
+
+ return 0;
+}
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set)
+{
+ struct ifpga_fme_hw *fme;
+
+ if (!mgr || !mgr->data)
+ return -EINVAL;
+
+ fme = mgr->data;
+
+ return ifpga_set_irq(fme->parent, FEATURE_FIU_ID_FME, 0,
+ IFPGA_FME_FEATURE_ID_GLOBAL_ERR, err_irq_set);
+}
+
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_get_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_prop(port->parent, FEATURE_FIU_ID_PORT,
+ port->port_id, prop);
+}
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !port_info)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ port_info->capability = port->capability;
+ port_info->num_uafu_irqs = port->num_uafu_irqs;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data || !info)
+ return -EINVAL;
+
+ /* Only support STP region now */
+ if (info->index != PORT_REGION_INDEX_STP)
+ return -EINVAL;
+
+ port = br->data;
+
+ spinlock_lock(&port->lock);
+ info->addr = port->stp_addr;
+ info->size = port->stp_size;
+ spinlock_unlock(&port->lock);
+
+ return 0;
+}
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set)
+{
+ struct ifpga_port_hw *port;
+
+ if (!br || !br->data)
+ return -EINVAL;
+
+ port = br->data;
+
+ return ifpga_set_irq(port->parent, FEATURE_FIU_ID_PORT, port->port_id,
+ IFPGA_PORT_FEATURE_ID_ERROR, err_irq_set);
+}
diff --git a/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
new file mode 100644
index 00000000..4c2c9904
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_ifpga_hw_api.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_IFPGA_HW_API_H_
+#define _OPAE_IFPGA_HW_API_H_
+
+#include "opae_hw_api.h"
+
+/**
+ * struct feature_prop - data structure for feature property
+ * @feature_id: id of this feature.
+ * @prop_id: id of this property under this feature.
+ * @data: property value to set/get.
+ */
+struct feature_prop {
+ u64 feature_id;
+ u64 prop_id;
+ u64 data;
+};
+
+#define IFPGA_FIU_ID_FME 0x0
+#define IFPGA_FIU_ID_PORT 0x1
+
+#define IFPGA_FME_FEATURE_ID_HEADER 0x0
+#define IFPGA_FME_FEATURE_ID_THERMAL_MGMT 0x1
+#define IFPGA_FME_FEATURE_ID_POWER_MGMT 0x2
+#define IFPGA_FME_FEATURE_ID_GLOBAL_IPERF 0x3
+#define IFPGA_FME_FEATURE_ID_GLOBAL_ERR 0x4
+#define IFPGA_FME_FEATURE_ID_PR_MGMT 0x5
+#define IFPGA_FME_FEATURE_ID_HSSI 0x6
+#define IFPGA_FME_FEATURE_ID_GLOBAL_DPERF 0x7
+
+#define IFPGA_PORT_FEATURE_ID_HEADER 0x0
+#define IFPGA_PORT_FEATURE_ID_AFU 0xff
+#define IFPGA_PORT_FEATURE_ID_ERROR 0x10
+#define IFPGA_PORT_FEATURE_ID_UMSG 0x11
+#define IFPGA_PORT_FEATURE_ID_UINT 0x12
+#define IFPGA_PORT_FEATURE_ID_STP 0x13
+
+/*
+ * PROP format (TOP + SUB + ID)
+ *
+ * (~0x0) means this field is unused.
+ */
+#define PROP_TOP GENMASK(31, 24)
+#define PROP_TOP_UNUSED 0xff
+#define PROP_SUB GENMASK(23, 16)
+#define PROP_SUB_UNUSED 0xff
+#define PROP_ID GENMASK(15, 0)
+
+#define PROP(_top, _sub, _id) \
+ (SET_FIELD(PROP_TOP, _top) | SET_FIELD(PROP_SUB, _sub) |\
+ SET_FIELD(PROP_ID, _id))
+
+/* FME head feature's properties*/
+#define FME_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define FME_HDR_PROP_PORTS_NUM 0x2 /* RDONLY */
+#define FME_HDR_PROP_CACHE_SIZE 0x3 /* RDONLY */
+#define FME_HDR_PROP_VERSION 0x4 /* RDONLY */
+#define FME_HDR_PROP_SOCKET_ID 0x5 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_ID 0x6 /* RDONLY */
+#define FME_HDR_PROP_BITSTREAM_METADATA 0x7 /* RDONLY */
+
+/* FME error reporting feature's properties */
+/* FME error reporting properties format */
+#define ERR_PROP(_top, _id) PROP(_top, 0xff, _id)
+#define ERR_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define ERR_PROP_TOP_FME_ERR 0x1
+#define ERR_PROP_ROOT(_id) ERR_PROP(0xff, _id)
+#define ERR_PROP_FME_ERR(_id) ERR_PROP(ERR_PROP_TOP_FME_ERR, _id)
+
+#define FME_ERR_PROP_ERRORS ERR_PROP_FME_ERR(0x1)
+#define FME_ERR_PROP_FIRST_ERROR ERR_PROP_FME_ERR(0x2)
+#define FME_ERR_PROP_NEXT_ERROR ERR_PROP_FME_ERR(0x3)
+#define FME_ERR_PROP_CLEAR ERR_PROP_FME_ERR(0x4) /* WO */
+#define FME_ERR_PROP_REVISION ERR_PROP_ROOT(0x5)
+#define FME_ERR_PROP_PCIE0_ERRORS ERR_PROP_ROOT(0x6) /* RW */
+#define FME_ERR_PROP_PCIE1_ERRORS ERR_PROP_ROOT(0x7) /* RW */
+#define FME_ERR_PROP_NONFATAL_ERRORS ERR_PROP_ROOT(0x8)
+#define FME_ERR_PROP_CATFATAL_ERRORS ERR_PROP_ROOT(0x9)
+#define FME_ERR_PROP_INJECT_ERRORS ERR_PROP_ROOT(0xa) /* RW */
+
+/* FME thermal feature's properties */
+#define FME_THERMAL_PROP_THRESHOLD1 0x1 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD2 0x2 /* RW */
+#define FME_THERMAL_PROP_THRESHOLD_TRIP 0x3 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_REACHED 0x4 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD2_REACHED 0x5 /* RDONLY */
+#define FME_THERMAL_PROP_THRESHOLD1_POLICY 0x6 /* RW */
+#define FME_THERMAL_PROP_TEMPERATURE 0x7 /* RDONLY */
+#define FME_THERMAL_PROP_REVISION 0x8 /* RDONLY */
+
+/* FME power feature's properties */
+#define FME_PWR_PROP_CONSUMED 0x1 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD1 0x2 /* RW */
+#define FME_PWR_PROP_THRESHOLD2 0x3 /* RW */
+#define FME_PWR_PROP_THRESHOLD1_STATUS 0x4 /* RDONLY */
+#define FME_PWR_PROP_THRESHOLD2_STATUS 0x5 /* RDONLY */
+#define FME_PWR_PROP_RTL 0x6 /* RDONLY */
+#define FME_PWR_PROP_XEON_LIMIT 0x7 /* RDONLY */
+#define FME_PWR_PROP_FPGA_LIMIT 0x8 /* RDONLY */
+#define FME_PWR_PROP_REVISION 0x9 /* RDONLY */
+
+/* FME iperf/dperf PROP format */
+#define PERF_PROP_TOP_CACHE 0x1
+#define PERF_PROP_TOP_VTD 0x2
+#define PERF_PROP_TOP_FAB 0x3
+#define PERF_PROP_TOP_UNUSED PROP_TOP_UNUSED
+#define PERF_PROP_SUB_UNUSED PROP_SUB_UNUSED
+
+#define PERF_PROP_ROOT(_id) PROP(0xff, 0xff, _id)
+#define PERF_PROP_CACHE(_id) PROP(PERF_PROP_TOP_CACHE, 0xff, _id)
+#define PERF_PROP_VTD(_sub, _id) PROP(PERF_PROP_TOP_VTD, _sub, _id)
+#define PERF_PROP_VTD_ROOT(_id) PROP(PERF_PROP_TOP_VTD, 0xff, _id)
+#define PERF_PROP_FAB(_sub, _id) PROP(PERF_PROP_TOP_FAB, _sub, _id)
+#define PERF_PROP_FAB_ROOT(_id) PROP(PERF_PROP_TOP_FAB, 0xff, _id)
+
+/* FME iperf feature's properties */
+#define FME_IPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_IPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* iperf CACHE properties */
+#define FME_IPERF_PROP_CACHE_FREEZE PERF_PROP_CACHE(0x1) /* RW */
+#define FME_IPERF_PROP_CACHE_READ_HIT PERF_PROP_CACHE(0x2)
+#define FME_IPERF_PROP_CACHE_READ_MISS PERF_PROP_CACHE(0x3)
+#define FME_IPERF_PROP_CACHE_WRITE_HIT PERF_PROP_CACHE(0x4)
+#define FME_IPERF_PROP_CACHE_WRITE_MISS PERF_PROP_CACHE(0x5)
+#define FME_IPERF_PROP_CACHE_HOLD_REQUEST PERF_PROP_CACHE(0x6)
+#define FME_IPERF_PROP_CACHE_TX_REQ_STALL PERF_PROP_CACHE(0x7)
+#define FME_IPERF_PROP_CACHE_RX_REQ_STALL PERF_PROP_CACHE(0x8)
+#define FME_IPERF_PROP_CACHE_RX_EVICTION PERF_PROP_CACHE(0x9)
+#define FME_IPERF_PROP_CACHE_DATA_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xa)
+#define FME_IPERF_PROP_CACHE_TAG_WRITE_PORT_CONTENTION PERF_PROP_CACHE(0xb)
+/* iperf VTD properties */
+#define FME_IPERF_PROP_VTD_FREEZE PERF_PROP_VTD_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_HIT PERF_PROP_VTD_ROOT(0x2)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_HIT PERF_PROP_VTD_ROOT(0x3)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_HIT PERF_PROP_VTD_ROOT(0x4)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_HIT PERF_PROP_VTD_ROOT(0x5)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_HIT PERF_PROP_VTD_ROOT(0x6)
+#define FME_IPERF_PROP_VTD_SIP_RCC_HIT PERF_PROP_VTD_ROOT(0x7)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_4K_MISS PERF_PROP_VTD_ROOT(0x8)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_2M_MISS PERF_PROP_VTD_ROOT(0x9)
+#define FME_IPERF_PROP_VTD_SIP_IOTLB_1G_MISS PERF_PROP_VTD_ROOT(0xa)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L3_MISS PERF_PROP_VTD_ROOT(0xb)
+#define FME_IPERF_PROP_VTD_SIP_SLPWC_L4_MISS PERF_PROP_VTD_ROOT(0xc)
+#define FME_IPERF_PROP_VTD_SIP_RCC_MISS PERF_PROP_VTD_ROOT(0xd)
+#define FME_IPERF_PROP_VTD_PORT_READ_TRANSACTION(n) PERF_PROP_VTD(n, 0xe)
+#define FME_IPERF_PROP_VTD_PORT_WRITE_TRANSACTION(n) PERF_PROP_VTD(n, 0xf)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_READ_HIT(n) PERF_PROP_VTD(n, 0x10)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_WRITE_HIT(n) PERF_PROP_VTD(n, 0x11)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_4K_FILL(n) PERF_PROP_VTD(n, 0x12)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_2M_FILL(n) PERF_PROP_VTD(n, 0x13)
+#define FME_IPERF_PROP_VTD_PORT_DEVTLB_1G_FILL(n) PERF_PROP_VTD(n, 0x14)
+/* iperf FAB properties */
+#define FME_IPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_IPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_IPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_IPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_IPERF_PROP_FAB_PCIE1_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_IPERF_PROP_FAB_PCIE1_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_IPERF_PROP_FAB_PORT_PCIE1_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_IPERF_PROP_FAB_UPI_READ PERF_PROP_FAB_ROOT(0x6)
+#define FME_IPERF_PROP_FAB_PORT_UPI_READ(n) PERF_PROP_FAB(n, 0x6)
+#define FME_IPERF_PROP_FAB_UPI_WRITE PERF_PROP_FAB_ROOT(0x7)
+#define FME_IPERF_PROP_FAB_PORT_UPI_WRITE(n) PERF_PROP_FAB(n, 0x7)
+#define FME_IPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x8)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x8)
+#define FME_IPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x9)
+#define FME_IPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x9)
+#define FME_IPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0xa) /* RW */
+#define FME_IPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0xa) /* RW */
+
+/* FME dperf properties */
+#define FME_DPERF_PROP_CLOCK PERF_PROP_ROOT(0x1)
+#define FME_DPERF_PROP_REVISION PERF_PROP_ROOT(0x2)
+
+/* dperf FAB properties */
+#define FME_DPERF_PROP_FAB_FREEZE PERF_PROP_FAB_ROOT(0x1) /* RW */
+#define FME_DPERF_PROP_FAB_PCIE0_READ PERF_PROP_FAB_ROOT(0x2)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_READ(n) PERF_PROP_FAB(n, 0x2)
+#define FME_DPERF_PROP_FAB_PCIE0_WRITE PERF_PROP_FAB_ROOT(0x3)
+#define FME_DPERF_PROP_FAB_PORT_PCIE0_WRITE(n) PERF_PROP_FAB(n, 0x3)
+#define FME_DPERF_PROP_FAB_MMIO_READ PERF_PROP_FAB_ROOT(0x4)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_READ(n) PERF_PROP_FAB(n, 0x4)
+#define FME_DPERF_PROP_FAB_MMIO_WRITE PERF_PROP_FAB_ROOT(0x5)
+#define FME_DPERF_PROP_FAB_PORT_MMIO_WRITE(n) PERF_PROP_FAB(n, 0x5)
+#define FME_DPERF_PROP_FAB_ENABLE PERF_PROP_FAB_ROOT(0x6) /* RW */
+#define FME_DPERF_PROP_FAB_PORT_ENABLE(n) PERF_PROP_FAB(n, 0x6) /* RW */
+
+/*PORT hdr feature's properties*/
+#define PORT_HDR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_HDR_PROP_PORTIDX 0x2 /* RDONLY */
+#define PORT_HDR_PROP_LATENCY_TOLERANCE 0x3 /* RDONLY */
+#define PORT_HDR_PROP_AP1_EVENT 0x4 /* RW */
+#define PORT_HDR_PROP_AP2_EVENT 0x5 /* RW */
+#define PORT_HDR_PROP_POWER_STATE 0x6 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_FREQCMD 0x7 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQCNTRCMD 0x8 /* RW */
+#define PORT_HDR_PROP_USERCLK_FREQSTS 0x9 /* RDONLY */
+#define PORT_HDR_PROP_USERCLK_CNTRSTS 0xa /* RDONLY */
+
+/*PORT error feature's properties*/
+#define PORT_ERR_PROP_REVISION 0x1 /* RDONLY */
+#define PORT_ERR_PROP_ERRORS 0x2 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_ERROR 0x3 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_LSB 0x4 /* RDONLY */
+#define PORT_ERR_PROP_FIRST_MALFORMED_REQ_MSB 0x5 /* RDONLY */
+#define PORT_ERR_PROP_CLEAR 0x6 /* WRONLY */
+
+int opae_manager_ifpga_get_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_manager_ifpga_set_prop(struct opae_manager *mgr,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_get_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+int opae_bridge_ifpga_set_prop(struct opae_bridge *br,
+ struct feature_prop *prop);
+
+/*
+ * Retrieve information about the fpga fme.
+ * Driver fills the info in provided struct fpga_fme_info.
+ */
+struct fpga_fme_info {
+ u32 capability; /* The capability of FME device */
+#define FPGA_FME_CAP_ERR_IRQ (1 << 0) /* Support fme error interrupt */
+};
+
+int opae_manager_ifpga_get_info(struct opae_manager *mgr,
+ struct fpga_fme_info *fme_info);
+
+/* Set eventfd information for ifpga FME error interrupt */
+struct fpga_fme_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_manager_ifpga_set_err_irq(struct opae_manager *mgr,
+ struct fpga_fme_err_irq_set *err_irq_set);
+
+/*
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct fpga_port_info.
+ */
+struct fpga_port_info {
+ u32 capability; /* The capability of port device */
+#define FPGA_PORT_CAP_ERR_IRQ (1 << 0) /* Support port error interrupt */
+#define FPGA_PORT_CAP_UAFU_IRQ (1 << 1) /* Support uafu error interrupt */
+ u32 num_umsgs; /* The number of allocated umsgs */
+ u32 num_uafu_irqs; /* The number of uafu interrupts */
+};
+
+int opae_bridge_ifpga_get_info(struct opae_bridge *br,
+ struct fpga_port_info *port_info);
+/*
+ * Retrieve region information about the fpga port.
+ * Driver needs to fill the index of struct fpga_port_region_info.
+ */
+struct fpga_port_region_info {
+ u32 index;
+#define PORT_REGION_INDEX_STP (1 << 1) /* Signal Tap Region */
+ u64 size; /* Region Size */
+ u8 *addr; /* Base address of the region */
+};
+
+int opae_bridge_ifpga_get_region_info(struct opae_bridge *br,
+ struct fpga_port_region_info *info);
+
+/* Set eventfd information for ifpga port error interrupt */
+struct fpga_port_err_irq_set {
+ s32 evtfd; /* Eventfd handler */
+};
+
+int opae_bridge_ifpga_set_err_irq(struct opae_bridge *br,
+ struct fpga_port_err_irq_set *err_irq_set);
+
+#endif /* _OPAE_IFPGA_HW_API_H_ */
diff --git a/drivers/raw/ifpga_rawdev/base/opae_osdep.h b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
new file mode 100644
index 00000000..90f54f77
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/opae_osdep.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OPAE_OSDEP_H
+#define _OPAE_OSDEP_H
+
+#include <string.h>
+#include <stdbool.h>
+
+#ifdef RTE_LIBRTE_EAL
+#include "osdep_rte/osdep_generic.h"
+#else
+#include "osdep_raw/osdep_generic.h"
+#endif
+
+#define __iomem
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+struct uuid {
+ u8 b[16];
+};
+
+#ifndef LINUX_MACROS
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* BIT_ULL */
+#ifndef GENMASK
+#define GENMASK(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif /* GENMASK */
+#ifndef GENMASK_ULL
+#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#endif /* GENMASK_ULL */
+#endif /* LINUX_MACROS */
+
+#define SET_FIELD(m, v) (((v) << (__builtin_ffsll(m) - 1)) & (m))
+#define GET_FIELD(m, v) (((v) & (m)) >> (__builtin_ffsll(m) - 1))
+
+#define dev_err(x, args...) dev_printf(ERR, args)
+#define dev_info(x, args...) dev_printf(INFO, args)
+#define dev_warn(x, args...) dev_printf(WARNING, args)
+
+#ifdef OPAE_DEBUG
+#define dev_debug(x, args...) dev_printf(DEBUG, args)
+#else
+#define dev_debug(x, args...) do { } while (0)
+#endif
+
+#define pr_err(y, args...) dev_err(0, y, ##args)
+#define pr_warn(y, args...) dev_warn(0, y, ##args)
+#define pr_info(y, args...) dev_info(0, y, ##args)
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define udelay(x) opae_udelay(x)
+#define msleep(x) opae_udelay(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
new file mode 100644
index 00000000..895a1d82
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_raw/osdep_generic.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RAW_GENERIC_H
+#define _OSDEP_RAW_GENERIC_H
+
+#define compiler_barrier() (asm volatile ("" : : : "memory"))
+
+#define io_wmb() compiler_barrier()
+#define io_rmb() compiler_barrier()
+
+static inline uint8_t opae_readb(const volatile void *addr)
+{
+ uint8_t val;
+
+ val = *(const volatile uint8_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint16_t opae_readw(const volatile void *addr)
+{
+ uint16_t val;
+
+ val = *(const volatile uint16_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint32_t opae_readl(const volatile void *addr)
+{
+ uint32_t val;
+
+ val = *(const volatile uint32_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline uint64_t opae_readq(const volatile void *addr)
+{
+ uint64_t val;
+
+ val = *(const volatile uint64_t *)addr;
+ io_rmb();
+ return val;
+}
+
+static inline void opae_writeb(uint8_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint8_t *)addr = value;
+}
+
+static inline void opae_writew(uint16_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint16_t *)addr = value;
+}
+
+static inline void opae_writel(uint32_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint32_t *)addr = value;
+}
+
+static inline void opae_writeq(uint64_t value, volatile void *addr)
+{
+ io_wmb();
+ *(volatile uint64_t *)addr = value;
+}
+
+#define opae_free(addr) free(addr)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
new file mode 100644
index 00000000..76902e28
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/base/osdep_rte/osdep_generic.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _OSDEP_RTE_GENERIC_H
+#define _OSDEP_RTE_GENERIC_H
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+
+#define dev_printf(level, fmt, args...) \
+ RTE_LOG(level, PMD, "osdep_rte: " fmt, ## args)
+
+#define osdep_panic(...) rte_panic(...)
+
+#define opae_udelay(x) rte_delay_us(x)
+
+#define opae_readb(addr) rte_read8(addr)
+#define opae_readw(addr) rte_read16(addr)
+#define opae_readl(addr) rte_read32(addr)
+#define opae_readq(addr) rte_read64(addr)
+#define opae_writeb(value, addr) rte_write8(value, addr)
+#define opae_writew(value, addr) rte_write16(value, addr)
+#define opae_writel(value, addr) rte_write32(value, addr)
+#define opae_writeq(value, addr) rte_write64(value, addr)
+
+#define opae_malloc(size) rte_malloc(NULL, size, 0)
+#define opae_zmalloc(size) rte_zmalloc(NULL, size, 0)
+#define opae_free(addr) rte_free(addr)
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define min(a, b) RTE_MIN(a, b)
+#define max(a, b) RTE_MAX(a, b)
+
+#define spinlock_t rte_spinlock_t
+#define spinlock_init(x) rte_spinlock_init(x)
+#define spinlock_lock(x) rte_spinlock_lock(x)
+#define spinlock_unlock(x) rte_spinlock_unlock(x)
+
+#endif
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.c b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
new file mode 100644
index 00000000..3fed0578
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.c
@@ -0,0 +1,617 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <rte_log.h>
+#include <rte_bus.h>
+#include <rte_eal_memconfig.h>
+#include <rte_malloc.h>
+#include <rte_devargs.h>
+#include <rte_memcpy.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_kvargs.h>
+#include <rte_alarm.h>
+
+#include <rte_errno.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_bus_vdev.h>
+
+#include "base/opae_hw_api.h"
+#include "rte_rawdev.h"
+#include "rte_rawdev_pmd.h"
+#include "rte_bus_ifpga.h"
+#include "ifpga_common.h"
+#include "ifpga_logs.h"
+#include "ifpga_rawdev.h"
+
+int ifpga_rawdev_logtype;
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+/* PCI Device ID */
+#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
+#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
+#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
+/* VF Device */
+#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
+#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
+#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
+#define RTE_MAX_RAW_DEVICE 10
+
+static const struct rte_pci_id pci_ifpga_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static int
+ifpga_fill_afu_dev(struct opae_accelerator *acc,
+ struct rte_afu_device *afu_dev)
+{
+ struct rte_mem_resource *res = afu_dev->mem_resource;
+ struct opae_acc_region_info region_info;
+ struct opae_acc_info info;
+ unsigned long i;
+ int ret;
+
+ ret = opae_acc_get_info(acc, &info);
+ if (ret)
+ return ret;
+
+ if (info.num_regions > PCI_MAX_RESOURCE)
+ return -EFAULT;
+
+ afu_dev->num_region = info.num_regions;
+
+ for (i = 0; i < info.num_regions; i++) {
+ region_info.index = i;
+ ret = opae_acc_get_region_info(acc, &region_info);
+ if (ret)
+ return ret;
+
+ if ((region_info.flags & ACC_REGION_MMIO) &&
+ (region_info.flags & ACC_REGION_READ) &&
+ (region_info.flags & ACC_REGION_WRITE)) {
+ res[i].phys_addr = region_info.phys_addr;
+ res[i].len = region_info.len;
+ res[i].addr = region_info.addr;
+ } else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void
+ifpga_rawdev_info_get(struct rte_rawdev *dev,
+ rte_rawdev_obj_t dev_info)
+{
+ struct opae_adapter *adapter;
+ struct opae_accelerator *acc;
+ struct rte_afu_device *afu_dev;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ if (!dev_info) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid request");
+ return;
+ }
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return;
+
+ afu_dev = dev_info;
+ afu_dev->rawdev = dev;
+
+ /* find opae_accelerator and fill info into afu_device */
+ opae_adapter_for_each_acc(adapter, acc) {
+ if (acc->index != afu_dev->id.port)
+ continue;
+
+ if (ifpga_fill_afu_dev(acc, afu_dev)) {
+ IFPGA_RAWDEV_PMD_ERR("cannot get info\n");
+ return;
+ }
+ }
+}
+
+static int
+ifpga_rawdev_configure(const struct rte_rawdev *dev,
+ rte_rawdev_obj_t config)
+{
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ return config ? 0 : 1;
+}
+
+static int
+ifpga_rawdev_start(struct rte_rawdev *dev)
+{
+ int ret = 0;
+ struct opae_adapter *adapter;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ return ret;
+}
+
+static void
+ifpga_rawdev_stop(struct rte_rawdev *dev)
+{
+ dev->started = 0;
+}
+
+static int
+ifpga_rawdev_close(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+ifpga_rawdev_reset(struct rte_rawdev *dev)
+{
+ return dev ? 0:1;
+}
+
+static int
+fpga_pr(struct rte_rawdev *raw_dev, u32 port_id, u64 *buffer, u32 size,
+ u64 *status)
+{
+
+ struct opae_adapter *adapter;
+ struct opae_manager *mgr;
+ struct opae_accelerator *acc;
+ struct opae_bridge *br;
+ int ret;
+
+ adapter = ifpga_rawdev_get_priv(raw_dev);
+ if (!adapter)
+ return -ENODEV;
+
+ mgr = opae_adapter_get_mgr(adapter);
+ if (!mgr)
+ return -ENODEV;
+
+ acc = opae_adapter_get_acc(adapter, port_id);
+ if (!acc)
+ return -ENODEV;
+
+ br = opae_acc_get_br(acc);
+ if (!br)
+ return -ENODEV;
+
+ ret = opae_manager_flash(mgr, port_id, buffer, size, status);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s pr error %d\n", __func__, ret);
+ return ret;
+ }
+
+ ret = opae_bridge_reset(br);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("%s reset port:%d error %d\n",
+ __func__, port_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+rte_fpga_do_pr(struct rte_rawdev *rawdev, int port_id,
+ const char *file_name)
+{
+ struct stat file_stat;
+ int file_fd;
+ int ret = 0;
+ ssize_t buffer_size;
+ void *buffer;
+ u64 pr_error;
+
+ if (!file_name)
+ return -EINVAL;
+
+ file_fd = open(file_name, O_RDONLY);
+ if (file_fd < 0) {
+ IFPGA_RAWDEV_PMD_ERR("%s: open file error: %s\n",
+ __func__, file_name);
+ IFPGA_RAWDEV_PMD_ERR("Message : %s\n", strerror(errno));
+ return -EINVAL;
+ }
+ ret = stat(file_name, &file_stat);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("stat on bitstream file failed: %s\n",
+ file_name);
+ return -EINVAL;
+ }
+ buffer_size = file_stat.st_size;
+ IFPGA_RAWDEV_PMD_INFO("bitstream file size: %zu\n", buffer_size);
+ buffer = rte_malloc(NULL, buffer_size, 0);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto close_fd;
+ }
+
+ /*read the raw data*/
+ if (buffer_size != read(file_fd, (void *)buffer, buffer_size)) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+ /*do PR now*/
+ ret = fpga_pr(rawdev, port_id, buffer, buffer_size, &pr_error);
+ IFPGA_RAWDEV_PMD_INFO("downloading to device port %d....%s.\n", port_id,
+ ret ? "failed" : "success");
+ if (ret) {
+ ret = -EINVAL;
+ goto free_buffer;
+ }
+
+free_buffer:
+ if (buffer)
+ rte_free(buffer);
+close_fd:
+ close(file_fd);
+ file_fd = 0;
+ return ret;
+}
+
+static int
+ifpga_rawdev_pr(struct rte_rawdev *dev,
+ rte_rawdev_obj_t pr_conf)
+{
+ struct opae_adapter *adapter;
+ struct rte_afu_pr_conf *afu_pr_conf;
+ int ret;
+ struct uuid uuid;
+ struct opae_accelerator *acc;
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+
+ adapter = ifpga_rawdev_get_priv(dev);
+ if (!adapter)
+ return -ENODEV;
+
+ if (!pr_conf)
+ return -EINVAL;
+
+ afu_pr_conf = pr_conf;
+
+ if (afu_pr_conf->pr_enable) {
+ ret = rte_fpga_do_pr(dev,
+ afu_pr_conf->afu_id.port,
+ afu_pr_conf->bs_path);
+ if (ret) {
+ IFPGA_RAWDEV_PMD_ERR("do pr error %d\n", ret);
+ return ret;
+ }
+ }
+
+ acc = opae_adapter_get_acc(adapter, afu_pr_conf->afu_id.port);
+ if (!acc)
+ return -ENODEV;
+
+ ret = opae_acc_get_uuid(acc, &uuid);
+ if (ret)
+ return ret;
+
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_low, uuid.b, sizeof(u64));
+ memcpy(&afu_pr_conf->afu_id.uuid.uuid_high, uuid.b + 8, sizeof(u64));
+
+ IFPGA_RAWDEV_PMD_INFO("%s: uuid_l=0x%lx, uuid_h=0x%lx\n", __func__,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_low,
+ (unsigned long)afu_pr_conf->afu_id.uuid.uuid_high);
+
+ return 0;
+}
+
+static const struct rte_rawdev_ops ifpga_rawdev_ops = {
+ .dev_info_get = ifpga_rawdev_info_get,
+ .dev_configure = ifpga_rawdev_configure,
+ .dev_start = ifpga_rawdev_start,
+ .dev_stop = ifpga_rawdev_stop,
+ .dev_close = ifpga_rawdev_close,
+ .dev_reset = ifpga_rawdev_reset,
+
+ .queue_def_conf = NULL,
+ .queue_setup = NULL,
+ .queue_release = NULL,
+
+ .attr_get = NULL,
+ .attr_set = NULL,
+
+ .enqueue_bufs = NULL,
+ .dequeue_bufs = NULL,
+
+ .dump = NULL,
+
+ .xstats_get = NULL,
+ .xstats_get_names = NULL,
+ .xstats_get_by_name = NULL,
+ .xstats_reset = NULL,
+
+ .firmware_status_get = NULL,
+ .firmware_version_get = NULL,
+ .firmware_load = ifpga_rawdev_pr,
+ .firmware_unload = NULL,
+
+ .dev_selftest = NULL,
+};
+
+static int
+ifpga_rawdev_create(struct rte_pci_device *pci_dev,
+ int socket_id)
+{
+ int ret = 0;
+ struct rte_rawdev *rawdev = NULL;
+ struct opae_adapter *adapter = NULL;
+ struct opae_manager *mgr = NULL;
+ struct opae_adapter_data_pci *data = NULL;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ int i;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Init %s on NUMA node %d", name, rte_socket_id());
+
+ /* Allocate device structure */
+ rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct opae_adapter),
+ socket_id);
+ if (rawdev == NULL) {
+ IFPGA_RAWDEV_PMD_ERR("Unable to allocate rawdevice");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ /* alloc OPAE_FPGA_PCI data to register to OPAE hardware level API */
+ data = opae_adapter_data_alloc(OPAE_FPGA_PCI);
+ if (!data) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ /* init opae_adapter_data_pci for device specific information */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ data->region[i].phys_addr = pci_dev->mem_resource[i].phys_addr;
+ data->region[i].len = pci_dev->mem_resource[i].len;
+ data->region[i].addr = pci_dev->mem_resource[i].addr;
+ }
+ data->device_id = pci_dev->id.device_id;
+ data->vendor_id = pci_dev->id.vendor_id;
+
+ /* create a opae_adapter based on above device data */
+ adapter = opae_adapter_alloc(pci_dev->device.name, data);
+ if (!adapter) {
+ ret = -ENOMEM;
+ goto free_adapter_data;
+ }
+
+ rawdev->dev_ops = &ifpga_rawdev_ops;
+ rawdev->device = &pci_dev->device;
+ rawdev->driver_name = pci_dev->device.driver->name;
+
+ rawdev->dev_private = adapter;
+
+ /* must enumerate the adapter before use it */
+ ret = opae_adapter_enumerate(adapter);
+ if (ret)
+ goto free_adapter;
+
+ /* get opae_manager to rawdev */
+ mgr = opae_adapter_get_mgr(adapter);
+ if (mgr) {
+ /* PF function */
+ IFPGA_RAWDEV_PMD_INFO("this is a PF function");
+ }
+
+ return ret;
+
+free_adapter:
+ if (adapter)
+ opae_adapter_free(adapter);
+free_adapter_data:
+ if (data)
+ opae_adapter_data_free(data);
+cleanup:
+ if (rawdev)
+ rte_rawdev_pmd_release(rawdev);
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_destroy(struct rte_pci_device *pci_dev)
+{
+ int ret;
+ struct rte_rawdev *rawdev;
+ char name[RTE_RAWDEV_NAME_MAX_LEN];
+ struct opae_adapter *adapter;
+
+ if (!pci_dev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid pci_dev of the device!");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ memset(name, 0, sizeof(name));
+ snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "IFPGA:%x:%02x.%x",
+ pci_dev->addr.bus, pci_dev->addr.devid, pci_dev->addr.function);
+
+ IFPGA_RAWDEV_PMD_INFO("Closing %s on NUMA node %d",
+ name, rte_socket_id());
+
+ rawdev = rte_rawdev_pmd_get_named_dev(name);
+ if (!rawdev) {
+ IFPGA_RAWDEV_PMD_ERR("Invalid device name (%s)", name);
+ return -EINVAL;
+ }
+
+ adapter = ifpga_rawdev_get_priv(rawdev);
+ if (!adapter)
+ return -ENODEV;
+
+ opae_adapter_data_free(adapter->data);
+ opae_adapter_free(adapter);
+
+ /* rte_rawdev_close is called by pmd_release */
+ ret = rte_rawdev_pmd_release(rawdev);
+ if (ret)
+ IFPGA_RAWDEV_PMD_DEBUG("Device cleanup failed");
+
+ return ret;
+}
+
+static int
+ifpga_rawdev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+
+ IFPGA_RAWDEV_PMD_FUNC_TRACE();
+ return ifpga_rawdev_create(pci_dev, rte_socket_id());
+}
+
+static int
+ifpga_rawdev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return ifpga_rawdev_destroy(pci_dev);
+}
+
+static struct rte_pci_driver rte_ifpga_rawdev_pmd = {
+ .id_table = pci_ifpga_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ifpga_rawdev_pci_probe,
+ .remove = ifpga_rawdev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(ifpga_rawdev_pci_driver, rte_ifpga_rawdev_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(ifpga_rawdev_pci_driver, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(ifpga_rawdev_init_log)
+{
+ ifpga_rawdev_logtype = rte_log_register("driver.raw.init");
+ if (ifpga_rawdev_logtype >= 0)
+ rte_log_set_level(ifpga_rawdev_logtype, RTE_LOG_NOTICE);
+}
+
+static const char * const valid_args[] = {
+#define IFPGA_ARG_NAME "ifpga"
+ IFPGA_ARG_NAME,
+#define IFPGA_ARG_PORT "port"
+ IFPGA_ARG_PORT,
+#define IFPGA_AFU_BTS "afu_bts"
+ IFPGA_AFU_BTS,
+ NULL
+};
+
+static int
+ifpga_cfg_probe(struct rte_vdev_device *dev)
+{
+ struct rte_devargs *devargs;
+ struct rte_kvargs *kvlist = NULL;
+ int port;
+ char *name = NULL;
+ char dev_name[RTE_RAWDEV_NAME_MAX_LEN];
+
+ devargs = dev->device.devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, valid_args);
+ if (!kvlist) {
+ IFPGA_RAWDEV_PMD_LOG(ERR, "error when parsing param");
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_NAME) == 1) {
+ if (rte_kvargs_process(kvlist, IFPGA_ARG_NAME,
+ &rte_ifpga_get_string_arg, &name) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_count(kvlist, IFPGA_ARG_PORT) == 1) {
+ if (rte_kvargs_process(kvlist,
+ IFPGA_ARG_PORT,
+ &rte_ifpga_get_integer32_arg,
+ &port) < 0) {
+ IFPGA_RAWDEV_PMD_ERR("error to parse %s",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+ } else {
+ IFPGA_RAWDEV_PMD_ERR("arg %s is mandatory for ifpga bus",
+ IFPGA_ARG_PORT);
+ goto end;
+ }
+
+ memset(dev_name, 0, sizeof(dev_name));
+ snprintf(dev_name, RTE_RAWDEV_NAME_MAX_LEN, "%d|%s",
+ port, name);
+
+ rte_eal_hotplug_add(RTE_STR(IFPGA_BUS_NAME),
+ dev_name, devargs->args);
+end:
+ if (kvlist)
+ rte_kvargs_free(kvlist);
+ if (name)
+ free(name);
+
+ return 0;
+}
+
+static int
+ifpga_cfg_remove(struct rte_vdev_device *vdev)
+{
+ IFPGA_RAWDEV_PMD_INFO("Remove ifpga_cfg %p",
+ vdev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver ifpga_cfg_driver = {
+ .probe = ifpga_cfg_probe,
+ .remove = ifpga_cfg_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(ifpga_rawdev_cfg, ifpga_cfg_driver);
+RTE_PMD_REGISTER_ALIAS(ifpga_rawdev_cfg, ifpga_cfg);
+RTE_PMD_REGISTER_PARAM_STRING(ifpga_rawdev_cfg,
+ "ifpga=<string> "
+ "port=<int> "
+ "afu_bts=<path>");
+
diff --git a/drivers/raw/ifpga_rawdev/ifpga_rawdev.h b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
new file mode 100644
index 00000000..c7759b8b
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/ifpga_rawdev.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _IFPGA_RAWDEV_H_
+#define _IFPGA_RAWDEV_H_
+
+extern int ifpga_rawdev_logtype;
+
+#define IFPGA_RAWDEV_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, ifpga_rawdev_logtype, "ifgpa: " fmt, \
+ ##args)
+
+#define IFPGA_RAWDEV_PMD_FUNC_TRACE() IFPGA_RAWDEV_PMD_LOG(DEBUG, ">>")
+
+#define IFPGA_RAWDEV_PMD_DEBUG(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(DEBUG, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_INFO(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(INFO, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_ERR(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(ERR, fmt, ## args)
+#define IFPGA_RAWDEV_PMD_WARN(fmt, args...) \
+ IFPGA_RAWDEV_PMD_LOG(WARNING, fmt, ## args)
+
+enum ifpga_rawdev_device_state {
+ IFPGA_IDLE,
+ IFPGA_READY,
+ IFPGA_ERROR
+};
+
+static inline struct opae_adapter *
+ifpga_rawdev_get_priv(const struct rte_rawdev *rawdev)
+{
+ return rawdev->dev_private;
+}
+
+#endif /* _IFPGA_RAWDEV_H_ */
diff --git a/drivers/raw/ifpga_rawdev/meson.build b/drivers/raw/ifpga_rawdev/meson.build
new file mode 100644
index 00000000..67256872
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/meson.build
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+version = 1
+
+subdir('base')
+objs = [base_objs]
+
+deps += ['rawdev', 'pci', 'bus_pci', 'kvargs',
+ 'bus_vdev', 'bus_ifpga']
+sources = files('ifpga_rawdev.c')
+
+includes += include_directories('base')
+
+allow_experimental_apis = true
diff --git a/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/raw/ifpga_rawdev/rte_pmd_ifpga_rawdev_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/raw/meson.build b/drivers/raw/meson.build
new file mode 100644
index 00000000..a61cdcce
--- /dev/null
+++ b/drivers/raw/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+drivers = ['skeleton_rawdev', 'dpaa2_cmdif', 'dpaa2_qdma', 'ifpga_rawdev']
+std_deps = ['rawdev']
+config_flag_fmt = 'RTE_LIBRTE_PMD_@0@_RAWDEV'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/raw/skeleton_rawdev/Makefile b/drivers/raw/skeleton_rawdev/Makefile
index bacc66dd..3f97c2ee 100644
--- a/drivers/raw/skeleton_rawdev/Makefile
+++ b/drivers/raw/skeleton_rawdev/Makefile
@@ -8,7 +8,6 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pmd_skeleton_rawdev.a
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal
diff --git a/drivers/raw/skeleton_rawdev/meson.build b/drivers/raw/skeleton_rawdev/meson.build
new file mode 100644
index 00000000..b4a6ed08
--- /dev/null
+++ b/drivers/raw/skeleton_rawdev/meson.build
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+deps += ['rawdev', 'kvargs', 'mbuf', 'bus_vdev']
+sources = files('skeleton_rawdev.c',
+ 'skeleton_rawdev_test.c')
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
index 6bdbbb50..6518a2d9 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev.c
@@ -305,6 +305,18 @@ static int skeleton_rawdev_queue_release(struct rte_rawdev *dev,
return ret;
}
+static uint16_t skeleton_rawdev_queue_count(struct rte_rawdev *dev)
+{
+ struct skeleton_rawdev *skeldev;
+
+ SKELETON_PMD_FUNC_TRACE();
+
+ RTE_FUNC_PTR_OR_ERR_RET(dev, -EINVAL);
+
+ skeldev = skeleton_rawdev_get_priv(dev);
+ return skeldev->num_queues;
+}
+
static int skeleton_rawdev_get_attr(struct rte_rawdev *dev,
const char *attr_name,
uint64_t *attr_value)
@@ -524,6 +536,7 @@ static const struct rte_rawdev_ops skeleton_rawdev_ops = {
.queue_def_conf = skeleton_rawdev_queue_def_conf,
.queue_setup = skeleton_rawdev_queue_setup,
.queue_release = skeleton_rawdev_queue_release,
+ .queue_count = skeleton_rawdev_queue_count,
.attr_get = skeleton_rawdev_get_attr,
.attr_set = skeleton_rawdev_set_attr,
@@ -744,10 +757,7 @@ static struct rte_vdev_driver skeleton_pmd_drv = {
RTE_PMD_REGISTER_VDEV(SKELETON_PMD_RAWDEV_NAME, skeleton_pmd_drv);
-RTE_INIT(skeleton_pmd_init_log);
-
-static void
-skeleton_pmd_init_log(void)
+RTE_INIT(skeleton_pmd_init_log)
{
skeleton_pmd_logtype = rte_log_register("rawdev.skeleton");
if (skeleton_pmd_logtype >= 0)
diff --git a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
index 795f24bc..3405b898 100644
--- a/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
+++ b/drivers/raw/skeleton_rawdev/skeleton_rawdev_test.c
@@ -194,6 +194,18 @@ test_rawdev_queue_default_conf_get(void)
}
static int
+test_rawdev_queue_count(void)
+{
+ unsigned int q_count;
+
+ /* Get the current configuration */
+ q_count = rte_rawdev_queue_count(TEST_DEV_ID);
+ RTE_TEST_ASSERT_EQUAL(q_count, 1, "Invalid queue count (%d)", q_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
test_rawdev_queue_setup(void)
{
int ret;
@@ -288,6 +300,7 @@ test_rawdev_attr_set_get(void)
"Attribute (Test2) not set correctly (%" PRIu64 ")",
ret_value);
+ free(dummy_value);
return TEST_SUCCESS;
}
@@ -379,8 +392,6 @@ test_rawdev_enqdeq(void)
cleanup:
if (buffers[0].buf_addr)
free(buffers[0].buf_addr);
- if (deq_buffers)
- free(deq_buffers);
return TEST_FAILED;
}
@@ -430,6 +441,7 @@ test_rawdev_skeldev(void)
SKELDEV_TEST_RUN(test_rawdev_configure, NULL,
test_rawdev_queue_default_conf_get);
SKELDEV_TEST_RUN(test_rawdev_configure, NULL, test_rawdev_queue_setup);
+ SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_queue_count);
SKELDEV_TEST_RUN(test_rawdev_queue_setup, NULL,
test_rawdev_queue_release);
SKELDEV_TEST_RUN(NULL, NULL, test_rawdev_attr_set_get);
diff --git a/examples/Makefile b/examples/Makefile
index 17ecf7f6..481720cb 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2016 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 6WIND S.A.
ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
@@ -93,6 +66,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost_crypto
+endif
DIRS-y += vmdq
DIRS-y += vmdq_dcb
ifeq ($(CONFIG_RTE_LIBRTE_POWER), y)
diff --git a/examples/bbdev_app/main.c b/examples/bbdev_app/main.c
index 3c452569..045a190b 100644
--- a/examples/bbdev_app/main.c
+++ b/examples/bbdev_app/main.c
@@ -64,11 +64,7 @@ static const struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -1017,7 +1013,7 @@ int
main(int argc, char **argv)
{
int ret;
- unsigned int nb_bbdevs, nb_ports, flags, lcore_id;
+ unsigned int nb_bbdevs, flags, lcore_id;
void *sigret;
struct app_config_params app_params = def_app_config;
struct rte_mempool *ethdev_mbuf_mempool, *bbdev_mbuf_mempool;
@@ -1079,12 +1075,10 @@ main(int argc, char **argv)
nb_bbdevs, app_params.bbdev_id);
printf("Number of bbdevs detected: %d\n", nb_bbdevs);
- /* Get the number of available ethdev devices */
- nb_ports = rte_eth_dev_count();
- if (nb_ports <= app_params.port_id)
+ if (!rte_eth_dev_is_valid_port(app_params.port_id))
rte_exit(EXIT_FAILURE,
- "%u ports detected, cannot use port with ID %u!\n",
- nb_ports, app_params.port_id);
+ "cannot use port with ID %u!\n",
+ app_params.port_id);
/* create the mbuf mempool for ethdev pkts */
ethdev_mbuf_mempool = rte_pktmbuf_pool_create("ethdev_mbuf_pool",
diff --git a/examples/bond/Makefile b/examples/bond/Makefile
index 44d10d4f..d6e500aa 100644
--- a/examples/bond/Makefile
+++ b/examples/bond/Makefile
@@ -25,6 +25,8 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
@@ -59,6 +61,7 @@ CFLAGS_main.o += -Wno-return-type
endif
CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
LDLIBS += -lrte_pmd_bond
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 455f108e..23d0981a 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -122,7 +122,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
@@ -147,13 +146,25 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
struct rte_eth_txconf txq_conf;
struct rte_eth_conf local_port_conf = port_conf;
- if (portid >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(portid))
rte_exit(EXIT_FAILURE, "Invalid port\n");
rte_eth_dev_info_get(portid, &dev_info);
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
if (retval != 0)
rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
@@ -177,7 +188,6 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
/* TX setup */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &txq_conf);
@@ -246,7 +256,6 @@ bond_port_init(struct rte_mempool *mbuf_pool)
/* TX setup */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
@@ -738,17 +747,17 @@ int
main(int argc, char *argv[])
{
int ret;
- uint8_t nb_ports, i;
+ uint16_t nb_ports, i;
/* init EAL */
ret = rte_eal_init(argc, argv);
- rte_eal_devargs_dump(stdout);
+ rte_devargs_dump(stdout);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
argc -= ret;
argv += ret;
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "Give at least one port\n");
else if (nb_ports > MAX_PORTS)
@@ -761,7 +770,7 @@ main(int argc, char *argv[])
/* initialize all ports */
slaves_count = nb_ports;
- for (i = 0; i < nb_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
slave_port_init(i, mbuf_pool);
slaves[i] = i;
}
diff --git a/examples/bond/meson.build b/examples/bond/meson.build
index 8f65e4a8..82e355a4 100644
--- a/examples/bond/meson.build
+++ b/examples/bond/meson.build
@@ -7,6 +7,7 @@
# DPDK instance, use 'make'
deps += 'pmd_bond'
+allow_experimental_apis = true
sources = files(
'main.c'
)
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index c49d680b..03a05e3d 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -80,7 +80,6 @@ static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -116,7 +115,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -124,6 +123,17 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf,
+ port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
if (retval != 0)
return retval;
@@ -141,7 +151,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < txRings; q++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
@@ -193,12 +202,12 @@ struct lcore_params {
static int
lcore_rx(struct lcore_params *p)
{
- const uint16_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count_avail();
const int socket_id = rte_socket_id();
uint16_t port;
struct rte_mbuf *bufs[BURST_SIZE*2];
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
@@ -295,11 +304,11 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
}
static inline void
-flush_all_ports(struct output_buffer *tx_buffers, uint16_t nb_ports)
+flush_all_ports(struct output_buffer *tx_buffers)
{
uint16_t outp;
- for (outp = 0; outp < nb_ports; outp++) {
+ RTE_ETH_FOREACH_DEV(outp) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << outp)) == 0)
continue;
@@ -367,11 +376,10 @@ static int
lcore_tx(struct rte_ring *in_r)
{
static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
- const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
uint16_t port;
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
@@ -386,7 +394,7 @@ lcore_tx(struct rte_ring *in_r)
printf("\nCore %u doing packet TX.\n", rte_lcore_id());
while (!quit_signal) {
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << port)) == 0)
continue;
@@ -398,7 +406,7 @@ lcore_tx(struct rte_ring *in_r)
/* if we get no traffic, flush anything we have */
if (unlikely(nb_rx == 0)) {
- flush_all_ports(tx_buffers, nb_ports);
+ flush_all_ports(tx_buffers);
continue;
}
@@ -446,14 +454,14 @@ print_stats(void)
unsigned int i, j;
const unsigned int num_workers = rte_lcore_count() - 4;
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_eth_stats_get(i, &eth_stats);
app_stats.port_rx_pkts[i] = eth_stats.ipackets;
app_stats.port_tx_pkts[i] = eth_stats.opackets;
}
printf("\n\nRX Thread:\n");
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
printf("Port %u Pktsin : %5.2f\n", i,
(app_stats.port_rx_pkts[i] -
prev_app_stats.port_rx_pkts[i])/1000000.0);
@@ -492,7 +500,7 @@ print_stats(void)
printf(" - Dequeued: %5.2f\n",
(app_stats.tx.dequeue_pkts -
prev_app_stats.tx.dequeue_pkts)/1000000.0);
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
printf("Port %u Pktsout: %5.2f\n",
i, (app_stats.port_tx_pkts[i] -
prev_app_stats.port_tx_pkts[i])/1000000.0);
@@ -543,7 +551,7 @@ lcore_worker(struct lcore_params *p)
* for single port, xor_val will be zero so we won't modify the output
* port, otherwise we send traffic from 0 to 1, 2 to 3, and vice versa
*/
- const unsigned xor_val = (rte_eth_dev_count() > 1);
+ const unsigned xor_val = (rte_eth_dev_count_avail() > 1);
struct rte_mbuf *buf[8] __rte_cache_aligned;
for (i = 0; i < 8; i++)
@@ -679,7 +687,7 @@ main(int argc, char *argv[])
"1 lcore for packet TX\n"
"and at least 1 lcore for worker threads\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
if (nb_ports != 1 && (nb_ports & 1))
@@ -694,7 +702,7 @@ main(int argc, char *argv[])
nb_ports_available = nb_ports;
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
printf("\nSkipping disabled port %d\n", portid);
diff --git a/examples/ethtool/Makefile b/examples/ethtool/Makefile
index 2b40b4b6..3d9d4f06 100644
--- a/examples/ethtool/Makefile
+++ b/examples/ethtool/Makefile
@@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
else
diff --git a/examples/ethtool/ethtool-app/ethapp.c b/examples/ethtool/ethtool-app/ethapp.c
index 0c3f1f6e..a4e64b35 100644
--- a/examples/ethtool/ethtool-app/ethapp.c
+++ b/examples/ethtool/ethtool-app/ethapp.c
@@ -75,6 +75,9 @@ cmdline_parse_token_num_t pcmd_int_token_port =
/* Commands taking port id and string */
cmdline_parse_token_string_t pcmd_eeprom_token_cmd =
TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, cmd, "eeprom");
+cmdline_parse_token_string_t pcmd_module_eeprom_token_cmd =
+ TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, cmd,
+ "module-eeprom");
cmdline_parse_token_string_t pcmd_mtu_token_cmd =
TOKEN_STRING_INITIALIZER(struct pcmd_intstr_params, cmd, "mtu");
cmdline_parse_token_string_t pcmd_regs_token_cmd =
@@ -145,9 +148,9 @@ pcmd_drvinfo_callback(__rte_unused void *ptr_params,
__rte_unused void *ptr_data)
{
struct ethtool_drvinfo info;
- int id_port;
+ uint16_t id_port;
- for (id_port = 0; id_port < rte_eth_dev_count(); id_port++) {
+ RTE_ETH_FOREACH_DEV(id_port) {
memset(&info, 0, sizeof(info));
if (rte_ethtool_get_drvinfo(id_port, &info)) {
printf("Error getting info for port %i\n", id_port);
@@ -167,10 +170,10 @@ pcmd_link_callback(__rte_unused void *ptr_params,
__rte_unused struct cmdline *ctx,
__rte_unused void *ptr_data)
{
- int num_ports = rte_eth_dev_count();
- int id_port, stat_port;
+ uint16_t id_port;
+ int stat_port;
- for (id_port = 0; id_port < num_ports; id_port++) {
+ RTE_ETH_FOREACH_DEV(id_port) {
if (!rte_eth_dev_is_valid_port(id_port))
continue;
stat_port = rte_ethtool_get_link(id_port);
@@ -298,6 +301,54 @@ pcmd_eeprom_callback(void *ptr_params,
static void
+pcmd_module_eeprom_callback(void *ptr_params,
+ __rte_unused struct cmdline *ctx,
+ __rte_unused void *ptr_data)
+{
+ struct pcmd_intstr_params *params = ptr_params;
+ struct ethtool_eeprom info_eeprom;
+ uint32_t module_info[2];
+ int stat;
+ unsigned char bytes_eeprom[EEPROM_DUMP_CHUNKSIZE];
+ FILE *fp_eeprom;
+
+ if (!rte_eth_dev_is_valid_port(params->port)) {
+ printf("Error: Invalid port number %i\n", params->port);
+ return;
+ }
+
+ stat = rte_ethtool_get_module_info(params->port, module_info);
+ if (stat != 0) {
+ printf("Module EEPROM information read error %i\n", stat);
+ return;
+ }
+
+ info_eeprom.len = module_info[1];
+ info_eeprom.offset = 0;
+
+ stat = rte_ethtool_get_module_eeprom(params->port,
+ &info_eeprom, bytes_eeprom);
+ if (stat != 0) {
+ printf("Module EEPROM read error %i\n", stat);
+ return;
+ }
+
+ fp_eeprom = fopen(params->opt, "wb");
+ if (fp_eeprom == NULL) {
+ printf("Error opening '%s' for writing\n", params->opt);
+ return;
+ }
+ printf("Total plugin module EEPROM length: %i bytes\n",
+ info_eeprom.len);
+ if (fwrite(bytes_eeprom, 1, info_eeprom.len,
+ fp_eeprom) != info_eeprom.len) {
+ printf("Error writing '%s'\n", params->opt);
+ }
+ fclose(fp_eeprom);
+}
+
+
+static void
pcmd_pause_callback(void *ptr_params,
__rte_unused struct cmdline *ctx,
void *ptr_data)
@@ -664,6 +715,18 @@ cmdline_parse_inst_t pcmd_eeprom = {
NULL
},
};
+cmdline_parse_inst_t pcmd_module_eeprom = {
+ .f = pcmd_module_eeprom_callback,
+ .data = NULL,
+ .help_str = "module-eeprom <port_id> <filename>\n"
+ " Dump plugin module EEPROM to file",
+ .tokens = {
+ (void *)&pcmd_module_eeprom_token_cmd,
+ (void *)&pcmd_intstr_token_port,
+ (void *)&pcmd_intstr_token_opt,
+ NULL
+ },
+};
cmdline_parse_inst_t pcmd_pause_noopt = {
.f = pcmd_pause_callback,
.data = (void *)0x01,
@@ -816,6 +879,7 @@ cmdline_parse_inst_t pcmd_vlan = {
cmdline_parse_ctx_t list_prompt_commands[] = {
(cmdline_parse_inst_t *)&pcmd_drvinfo,
(cmdline_parse_inst_t *)&pcmd_eeprom,
+ (cmdline_parse_inst_t *)&pcmd_module_eeprom,
(cmdline_parse_inst_t *)&pcmd_link,
(cmdline_parse_inst_t *)&pcmd_macaddr_get,
(cmdline_parse_inst_t *)&pcmd_macaddr,
diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c
index 702feabe..dc93adfe 100644
--- a/examples/ethtool/ethtool-app/main.c
+++ b/examples/ethtool/ethtool-app/main.c
@@ -99,7 +99,6 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
- cfg_port.rxmode.ignore_offload_bitfield = 1;
for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
struct app_port *ptr_port = &app_cfg->ports[idx_port];
@@ -142,7 +141,6 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports)
"rte_eth_rx_queue_setup failed"
);
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
if (rte_eth_tx_queue_setup(
idx_port, 0, nb_txd,
rte_eth_dev_socket_id(idx_port), &txconf) < 0)
@@ -251,7 +249,7 @@ int main(int argc, char **argv)
if (cnt_args_parsed < 0)
rte_exit(EXIT_FAILURE, "rte_eal_init(): Failed");
- cnt_ports = rte_eth_dev_count();
+ cnt_ports = rte_eth_dev_count_avail();
printf("Number of NICs: %i\n", cnt_ports);
if (cnt_ports == 0)
rte_exit(EXIT_FAILURE, "No available NIC ports!\n");
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index fbafa6d3..6eaa640b 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
@@ -25,6 +25,7 @@ SRCS-y := rte_ethtool.c
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 90dfbb73..e6a2e88c 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -22,6 +22,8 @@ rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo)
{
struct rte_eth_dev_info dev_info;
struct rte_dev_reg_info reg_info;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus = NULL;
int n;
int ret;
@@ -46,15 +48,17 @@ rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->version, sizeof(drvinfo->version), "%s",
rte_version());
/* TODO: replace bus_info by rte_devargs.name */
- if (dev_info.pci_dev)
+ if (dev_info.device)
+ bus = rte_bus_find_by_device(dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(dev_info.device);
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
"%04x:%02x:%02x.%x",
- dev_info.pci_dev->addr.domain,
- dev_info.pci_dev->addr.bus,
- dev_info.pci_dev->addr.devid,
- dev_info.pci_dev->addr.function);
- else
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+ } else {
snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info), "N/A");
+ }
memset(&reg_info, 0, sizeof(reg_info));
rte_eth_dev_get_reg_info(port_id, &reg_info);
@@ -174,6 +178,36 @@ rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
}
int
+rte_ethtool_get_module_info(uint16_t port_id, uint32_t *modinfo)
+{
+ struct rte_eth_dev_module_info *info;
+
+ info = (struct rte_eth_dev_module_info *)modinfo;
+ return rte_eth_dev_get_module_info(port_id, info);
+}
+
+int
+rte_ethtool_get_module_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
+ void *words)
+{
+ struct rte_dev_eeprom_info eeprom_info;
+ int status;
+
+ if (eeprom == NULL || words == NULL)
+ return -EINVAL;
+
+ eeprom_info.offset = eeprom->offset;
+ eeprom_info.length = eeprom->len;
+ eeprom_info.data = words;
+
+ status = rte_eth_dev_get_module_eeprom(port_id, &eeprom_info);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+int
rte_ethtool_get_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param)
{
diff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h
index c9623962..43adc97a 100644
--- a/examples/ethtool/lib/rte_ethtool.h
+++ b/examples/ethtool/lib/rte_ethtool.h
@@ -154,6 +154,40 @@ int rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words);
/**
+ * Retrieve the type and size of plugin module EEPROM
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param modinfo
+ * The pointer that provides the type and size of plugin module EEPROM.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - others depends on the specific operations implementation.
+ */
+int rte_ethtool_get_module_info(uint16_t port_id, uint32_t *modinfo);
+
+/**
+ * Retrieve the data of plugin module EEPROM
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param eeprom
+ * The pointer of ethtool_eeprom that provides plugin module eeprom
+ * offset and length
+ * @param words
+ * A buffer that holds data read from plugin module eeprom
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - others depends on the specific operations implementation.
+ */
+int rte_ethtool_get_module_eeprom(uint16_t port_id,
+ struct ethtool_eeprom *eeprom, void *words);
+
+/**
* Retrieve the Ethernet device pause frame configuration according to
* parameter attributes desribed by ethtool data structure,
* ethtool_pauseparam.
diff --git a/examples/ethtool/meson.build b/examples/ethtool/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/ethtool/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/eventdev_pipeline/main.c b/examples/eventdev_pipeline/main.c
index 2422c184..700bc696 100644
--- a/examples/eventdev_pipeline/main.c
+++ b/examples/eventdev_pipeline/main.c
@@ -267,7 +267,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -285,7 +284,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -293,6 +292,17 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ port_conf_default.rx_adv_conf.rss_conf.rss_hf,
+ port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
@@ -307,7 +317,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf_default.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@@ -339,10 +348,9 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
static int
-init_ports(unsigned int num_ports)
+init_ports(uint16_t num_ports)
{
- uint8_t portid;
- unsigned int i;
+ uint16_t portid, i;
if (!cdata.num_mbuf)
cdata.num_mbuf = 16384 * num_ports;
@@ -354,12 +362,12 @@ init_ports(unsigned int num_ports)
/* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
- for (portid = 0; portid < num_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mp) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
- for (i = 0; i < num_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
void *userdata = (void *)(uintptr_t) i;
fdata->tx_buf[i] =
rte_malloc(NULL, RTE_ETH_TX_BUFFER_SIZE(32), 0);
@@ -375,13 +383,13 @@ init_ports(unsigned int num_ports)
}
static void
-do_capability_setup(uint16_t nb_ethdev, uint8_t eventdev_id)
+do_capability_setup(uint8_t eventdev_id)
{
- int i;
+ uint16_t i;
uint8_t mt_unsafe = 0;
uint8_t burst = 0;
- for (i = 0; i < nb_ethdev; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
struct rte_eth_dev_info dev_info;
memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
@@ -430,7 +438,7 @@ int
main(int argc, char **argv)
{
struct worker_data *worker_data;
- unsigned int num_ports;
+ uint16_t num_ports;
int lcore_id;
int err;
@@ -452,7 +460,7 @@ main(int argc, char **argv)
/* Parse cli options*/
parse_app_args(argc, argv);
- num_ports = rte_eth_dev_count();
+ num_ports = rte_eth_dev_count_avail();
if (num_ports == 0)
rte_panic("No ethernet ports found\n");
@@ -483,7 +491,7 @@ main(int argc, char **argv)
fprintf(stderr, "Warning: More than one eventdev, using idx 0");
- do_capability_setup(num_ports, 0);
+ do_capability_setup(0);
fdata->cap.check_opt();
worker_data = rte_calloc(0, cdata.num_workers,
diff --git a/examples/eventdev_pipeline/pipeline_worker_generic.c b/examples/eventdev_pipeline/pipeline_worker_generic.c
index c673160f..2215e9eb 100644
--- a/examples/eventdev_pipeline/pipeline_worker_generic.c
+++ b/examples/eventdev_pipeline/pipeline_worker_generic.c
@@ -138,7 +138,7 @@ consumer(void)
&packet, 1, 0);
if (n == 0) {
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rte_eth_tx_buffer_flush(i, 0, fdata->tx_buf[i]);
return 0;
}
@@ -196,14 +196,13 @@ consumer_burst(void)
unsigned int i, j;
uint8_t dev_id = cons_data.dev_id;
uint8_t port_id = cons_data.port_id;
- uint16_t nb_ports = rte_eth_dev_count();
do {
uint16_t n = rte_event_dequeue_burst(dev_id, port_id,
packets, RTE_DIM(packets), 0);
if (n == 0) {
- for (j = 0; j < nb_ports; j++)
+ RTE_ETH_FOREACH_DEV(j)
rte_eth_tx_buffer_flush(j, 0, fdata->tx_buf[j]);
return 0;
}
@@ -521,7 +520,7 @@ generic_opt_check(void)
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
rte_exit(EXIT_FAILURE,
diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c
index b254b03f..3dbde92d 100644
--- a/examples/eventdev_pipeline/pipeline_worker_tx.c
+++ b/examples/eventdev_pipeline/pipeline_worker_tx.c
@@ -422,7 +422,7 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
const uint8_t dev_id = 0;
const uint8_t nb_ports = cdata.num_workers;
uint8_t nb_slots = 0;
- uint8_t nb_queues = rte_eth_dev_count();
+ uint8_t nb_queues = rte_eth_dev_count_avail();
/*
* In case where all type queues are not enabled, use queues equal to
@@ -431,7 +431,7 @@ setup_eventdev_worker_tx(struct cons_data *cons_data,
*/
if (!atq) {
nb_queues *= cdata.num_stages;
- nb_queues += rte_eth_dev_count();
+ nb_queues += rte_eth_dev_count_avail();
}
struct rte_event_dev_config config = {
@@ -735,7 +735,7 @@ worker_tx_opt_check(void)
rte_exit(EXIT_FAILURE,
"Event dev doesn't support all type queues\n");
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
ret = rte_event_eth_rx_adapter_caps_get(0, i, &cap);
if (ret)
rte_exit(EXIT_FAILURE,
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index 3e5b1e71..440422bc 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -41,13 +41,21 @@
#include <rte_string_fns.h>
#include <rte_cycles.h>
+#ifndef APP_MAX_LCORE
+#if (RTE_MAX_LCORE > 64)
+#define APP_MAX_LCORE 64
+#else
+#define APP_MAX_LCORE RTE_MAX_LCORE
+#endif
+#endif
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
#define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args)
#define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args)
/* Max ports than can be used (each port is associated with two lcores) */
-#define MAX_PORTS (RTE_MAX_LCORE / 2)
+#define MAX_PORTS (APP_MAX_LCORE / 2)
/* Max size of a single packet */
#define MAX_PACKET_SZ (2048)
@@ -80,7 +88,6 @@
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.rxmode = {
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -101,7 +108,7 @@ static uint64_t input_cores_mask = 0;
static uint64_t output_cores_mask = 0;
/* Array storing port_id that is associated with each lcore */
-static uint16_t port_ids[RTE_MAX_LCORE];
+static uint16_t port_ids[APP_MAX_LCORE];
/* Structure type for recording lcore-specific stats */
struct stats {
@@ -111,7 +118,7 @@ struct stats {
} __rte_cache_aligned;
/* Array of lcore-specific stats */
-static struct stats lcore_stats[RTE_MAX_LCORE];
+static struct stats lcore_stats[APP_MAX_LCORE];
/* Print out statistics on packets handled */
static void
@@ -124,6 +131,9 @@ print_stats(void)
" Lcore Port RX TX Dropped on TX\n"
"------- ------ ------------ ------------ ---------------\n");
RTE_LCORE_FOREACH(i) {
+ /* limit ourselves to application supported cores only */
+ if (i >= APP_MAX_LCORE)
+ break;
printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
i, (unsigned)port_ids[i],
lcore_stats[i].rx, lcore_stats[i].tx,
@@ -330,7 +340,9 @@ setup_port_lcore_affinities(void)
uint16_t rx_port = 0;
/* Setup port_ids[] array, and check masks were ok */
- RTE_LCORE_FOREACH(i) {
+ for (i = 0; i < APP_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
if (input_cores_mask & (1ULL << i)) {
/* Skip ports that are not enabled */
while ((ports_mask & (1 << rx_port)) == 0) {
@@ -447,7 +459,6 @@ init_port(uint16_t port)
port, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port),
@@ -465,7 +476,7 @@ init_port(uint16_t port)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -477,7 +488,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -549,7 +560,7 @@ main(int argc, char** argv)
}
/* Get number of ports found in scan */
- nb_sys_ports = rte_eth_dev_count();
+ nb_sys_ports = rte_eth_dev_count_avail();
if (nb_sys_ports == 0)
FATAL_ERROR("No supported Ethernet device found");
/* Find highest port set in portmask */
@@ -561,14 +572,14 @@ main(int argc, char** argv)
FATAL_ERROR("Port mask requires more ports than available");
/* Initialise each port */
- for (port = 0; port < nb_sys_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Skip ports that are not enabled */
if ((ports_mask & (1 << port)) == 0) {
continue;
}
init_port(port);
}
- check_all_ports_link_status(nb_sys_ports, ports_mask);
+ check_all_ports_link_status(ports_mask);
/* Launch per-lcore function on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
index 32d9b1d3..19961292 100644
--- a/examples/flow_classify/flow_classify.c
+++ b/examples/flow_classify/flow_classify.c
@@ -62,7 +62,6 @@ const char cb_port_delim[] = ":";
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
@@ -200,7 +199,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -222,7 +221,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@@ -259,8 +257,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(struct flow_classifier *cls_app)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ uint16_t port;
int ret;
int i = 0;
@@ -275,7 +272,7 @@ lcore_main(struct flow_classifier *cls_app)
* Check that the port is on the same NUMA node as the polling thread
* for best performance.
*/
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
printf("\n\n");
@@ -294,7 +291,7 @@ lcore_main(struct flow_classifier *cls_app)
* on the paired port.
* The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
*/
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
@@ -753,8 +750,8 @@ int
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
- uint8_t nb_ports;
- uint8_t portid;
+ uint16_t nb_ports;
+ uint16_t portid;
int ret;
int socket_id;
struct rte_table_acl_params table_acl_params;
@@ -777,7 +774,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
/* Check that there is an even number of ports to send/receive on. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
@@ -789,7 +786,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* Initialize all ports. */
- for (portid = 0; portid < nb_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
portid);
diff --git a/examples/flow_filtering/Makefile b/examples/flow_filtering/Makefile
index 01bb4cd8..8f86b7b2 100644
--- a/examples/flow_filtering/Makefile
+++ b/examples/flow_filtering/Makefile
@@ -1,34 +1,5 @@
-#
-# BSD LICENSE
-#
-# Copyright 2017 Mellanox.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Mellanox nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 Mellanox Technologies, Ltd
APP = flow
diff --git a/examples/flow_filtering/flow_blocks.c b/examples/flow_filtering/flow_blocks.c
index 61b045af..4da45928 100644
--- a/examples/flow_filtering/flow_blocks.c
+++ b/examples/flow_filtering/flow_blocks.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Mellanox nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#define MAX_PATTERN_NUM 4
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
index 0bb81a8d..ce91e8a6 100644
--- a/examples/flow_filtering/main.c
+++ b/examples/flow_filtering/main.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Mellanox. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include <stdio.h>
@@ -149,7 +121,6 @@ init_port(void)
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -161,6 +132,22 @@ init_port(void)
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO,
},
+ /*
+ * Initialize fdir_conf of rte_eth_conf.
+ * Fdir is used in flow filtering for I40e,
+ * so rte_flow rules involve some fdir
+ * configurations. In long term it's better
+ * that drivers don't require any fdir
+ * configuration for rte_flow, but we need to
+ * get this workaround so that sample app can
+ * run on I40e.
+ */
+ .fdir_conf = {
+ .mode = RTE_FDIR_MODE_PERFECT,
+ .pballoc = RTE_FDIR_PBALLOC_64K,
+ .status = RTE_FDIR_REPORT_STATUS,
+ .drop_queue = 127,
+ },
};
struct rte_eth_txconf txq_conf;
struct rte_eth_rxconf rxq_conf;
@@ -232,7 +219,7 @@ int
main(int argc, char **argv)
{
int ret;
- uint8_t nr_ports;
+ uint16_t nr_ports;
struct rte_flow_error error;
ret = rte_eal_init(argc, argv);
@@ -243,7 +230,7 @@ main(int argc, char **argv)
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
- nr_ports = rte_eth_dev_count();
+ nr_ports = rte_eth_dev_count_avail();
if (nr_ports == 0)
rte_exit(EXIT_FAILURE, ":: no Ethernet ports found\n");
port_id = 0;
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index d3b1da6c..5306d767 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -140,7 +140,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
@@ -571,7 +570,7 @@ print_ethaddr(const char *name, struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -583,7 +582,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -843,7 +842,7 @@ main(int argc, char **argv)
struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int socket, ret;
- unsigned nb_ports;
+ uint16_t nb_ports;
uint16_t queueid = 0;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
@@ -861,7 +860,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid arguments");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
@@ -876,7 +875,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
struct rte_eth_rxconf rxq_conf;
@@ -973,7 +972,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socket, txconf);
@@ -994,7 +992,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
@@ -1016,7 +1014,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index 27f7cc6a..3fb98ce3 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -1,34 +1,23 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2014 Intel Corporation
+# Copyright(c) 2010-2018 Intel Corporation
# binary name
APP = ip_pipeline
# all source are stored in SRCS-y
-SRCS-y := main.c
-SRCS-y += config_parse.c
+SRCS-y := action.c
+SRCS-y += cli.c
+SRCS-y += conn.c
+SRCS-y += kni.c
+SRCS-y += link.c
+SRCS-y += main.c
+SRCS-y += mempool.c
SRCS-y += parser.c
-SRCS-y += config_parse_tm.c
-SRCS-y += config_check.c
-SRCS-y += init.c
+SRCS-y += pipeline.c
+SRCS-y += swq.c
+SRCS-y += tap.c
SRCS-y += thread.c
-SRCS-y += thread_fe.c
-SRCS-y += cpu_core_map.c
-
-SRCS-y += pipeline_common_be.c
-SRCS-y += pipeline_common_fe.c
-SRCS-y += pipeline_master_be.c
-SRCS-y += pipeline_master.c
-SRCS-y += pipeline_passthrough_be.c
-SRCS-y += pipeline_passthrough.c
-SRCS-y += pipeline_firewall_be.c
-SRCS-y += pipeline_firewall.c
-SRCS-y += pipeline_flow_classification_be.c
-SRCS-y += pipeline_flow_classification.c
-SRCS-y += pipeline_flow_actions_be.c
-SRCS-y += pipeline_flow_actions.c
-SRCS-y += pipeline_routing_be.c
-SRCS-y += pipeline_routing.c
+SRCS-y += tmgr.c
# Build using pkg-config variables if possible
$(shell pkg-config --exists libdpdk)
@@ -46,8 +35,7 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
-VPATH += pipeline
-CFLAGS += -I. -I./pipeline/
+CFLAGS += -I.
OBJS := $(patsubst %.c,build/%.o,$(SRCS-y))
@@ -74,21 +62,28 @@ ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
endif
-VPATH += $(SRCDIR)/pipeline
-
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-INC += $(sort $(wildcard *.h)) $(sort $(wildcard pipeline/*.h))
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+clean:
+else
+
+INC += $(sort $(wildcard *.h))
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := $(SRCS-y)
-CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/pipeline
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(SRCDIR)
CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS) -Wno-error=unused-function -Wno-error=unused-variable
+CFLAGS += $(WERROR_FLAGS)
include $(RTE_SDK)/mk/rte.extapp.mk
endif
+endif
diff --git a/examples/ip_pipeline/action.c b/examples/ip_pipeline/action.c
new file mode 100644
index 00000000..a29c2b36
--- /dev/null
+++ b/examples/ip_pipeline/action.c
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include "action.h"
+#include "hash_func.h"
+
+/**
+ * Input port
+ */
+static struct port_in_action_profile_list port_in_action_profile_list;
+
+int
+port_in_action_profile_init(void)
+{
+ TAILQ_INIT(&port_in_action_profile_list);
+
+ return 0;
+}
+
+struct port_in_action_profile *
+port_in_action_profile_find(const char *name)
+{
+ struct port_in_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &port_in_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct port_in_action_profile *
+port_in_action_profile_create(const char *name,
+ struct port_in_action_profile_params *params)
+{
+ struct port_in_action_profile *profile;
+ struct rte_port_in_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ port_in_action_profile_find(name) ||
+ (params == NULL))
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) &&
+ (params->lb.f_hash == NULL)) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+ /* Resource */
+ ap = rte_port_in_action_profile_create(0);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_FLTR,
+ &params->fltr);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) {
+ status = rte_port_in_action_profile_action_register(ap,
+ RTE_PORT_IN_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_port_in_action_profile_freeze(ap);
+ if (status) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct port_in_action_profile));
+ if (profile == NULL) {
+ rte_port_in_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&port_in_action_profile_list, profile, node);
+
+ return profile;
+}
+
+/**
+ * Table
+ */
+static struct table_action_profile_list table_action_profile_list;
+
+int
+table_action_profile_init(void)
+{
+ TAILQ_INIT(&table_action_profile_list);
+
+ return 0;
+}
+
+struct table_action_profile *
+table_action_profile_find(const char *name)
+{
+ struct table_action_profile *profile;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(profile, &table_action_profile_list, node)
+ if (strcmp(profile->name, name) == 0)
+ return profile;
+
+ return NULL;
+}
+
+struct table_action_profile *
+table_action_profile_create(const char *name,
+ struct table_action_profile_params *params)
+{
+ struct table_action_profile *profile;
+ struct rte_table_action_profile *ap;
+ int status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ table_action_profile_find(name) ||
+ (params == NULL) ||
+ ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0))
+ return NULL;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) &&
+ (params->lb.f_hash == NULL)) {
+ switch (params->lb.key_size) {
+ case 8:
+ params->lb.f_hash = hash_default_key8;
+ break;
+
+ case 16:
+ params->lb.f_hash = hash_default_key16;
+ break;
+
+ case 24:
+ params->lb.f_hash = hash_default_key24;
+ break;
+
+ case 32:
+ params->lb.f_hash = hash_default_key32;
+ break;
+
+ case 40:
+ params->lb.f_hash = hash_default_key40;
+ break;
+
+ case 48:
+ params->lb.f_hash = hash_default_key48;
+ break;
+
+ case 56:
+ params->lb.f_hash = hash_default_key56;
+ break;
+
+ case 64:
+ params->lb.f_hash = hash_default_key64;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ params->lb.seed = 0;
+ }
+
+ /* Resource */
+ ap = rte_table_action_profile_create(&params->common);
+ if (ap == NULL)
+ return NULL;
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_FWD,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_LB,
+ &params->lb);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_MTR,
+ &params->mtr);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TM,
+ &params->tm);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_ENCAP,
+ &params->encap);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_NAT,
+ &params->nat);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TTL,
+ &params->ttl);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_STATS,
+ &params->stats);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TIME,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ status = rte_table_action_profile_freeze(ap);
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node allocation */
+ profile = calloc(1, sizeof(struct table_action_profile));
+ if (profile == NULL) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(profile->name, name, sizeof(profile->name));
+ memcpy(&profile->params, params, sizeof(*params));
+ profile->ap = ap;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&table_action_profile_list, profile, node);
+
+ return profile;
+}
diff --git a/examples/ip_pipeline/action.h b/examples/ip_pipeline/action.h
new file mode 100644
index 00000000..417200e8
--- /dev/null
+++ b/examples/ip_pipeline/action.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_ACTION_H_
+#define _INCLUDE_ACTION_H_
+
+#include <sys/queue.h>
+
+#include <rte_port_in_action.h>
+#include <rte_table_action.h>
+
+#include "common.h"
+
+/**
+ * Input port action
+ */
+struct port_in_action_profile_params {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+struct port_in_action_profile {
+ TAILQ_ENTRY(port_in_action_profile) node;
+ char name[NAME_SIZE];
+ struct port_in_action_profile_params params;
+ struct rte_port_in_action_profile *ap;
+};
+
+TAILQ_HEAD(port_in_action_profile_list, port_in_action_profile);
+
+int
+port_in_action_profile_init(void);
+
+struct port_in_action_profile *
+port_in_action_profile_find(const char *name);
+
+struct port_in_action_profile *
+port_in_action_profile_create(const char *name,
+ struct port_in_action_profile_params *params);
+
+/**
+ * Table action
+ */
+struct table_action_profile_params {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+};
+
+struct table_action_profile {
+ TAILQ_ENTRY(table_action_profile) node;
+ char name[NAME_SIZE];
+ struct table_action_profile_params params;
+ struct rte_table_action_profile *ap;
+};
+
+TAILQ_HEAD(table_action_profile_list, table_action_profile);
+
+int
+table_action_profile_init(void);
+
+struct table_action_profile *
+table_action_profile_find(const char *name);
+
+struct table_action_profile *
+table_action_profile_create(const char *name,
+ struct table_action_profile_params *params);
+
+#endif /* _INCLUDE_ACTION_H_ */
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
deleted file mode 100644
index 907d4e7c..00000000
--- a/examples/ip_pipeline/app.h
+++ /dev/null
@@ -1,1401 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#ifndef __INCLUDE_APP_H__
-#define __INCLUDE_APP_H__
-
-#include <stdint.h>
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_mempool.h>
-#include <rte_ring.h>
-#include <rte_sched.h>
-#include <cmdline_parse.h>
-
-#include <rte_ethdev.h>
-#ifdef RTE_LIBRTE_KNI
-#include <rte_kni.h>
-#endif
-
-#include "cpu_core_map.h"
-#include "pipeline.h"
-
-#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
-#define APP_LINK_PCI_BDF_SIZE 16
-
-#ifndef APP_LINK_MAX_HWQ_IN
-#define APP_LINK_MAX_HWQ_IN 128
-#endif
-
-#ifndef APP_LINK_MAX_HWQ_OUT
-#define APP_LINK_MAX_HWQ_OUT 128
-#endif
-
-struct app_mempool_params {
- char *name;
- uint32_t parsed;
- uint32_t buffer_size;
- uint32_t pool_size;
- uint32_t cache_size;
- uint32_t cpu_socket_id;
-};
-
-struct app_link_params {
- char *name;
- uint32_t parsed;
- uint32_t pmd_id; /* Generated based on port mask */
- uint32_t arp_q; /* 0 = Disabled (packets go to default queue 0) */
- uint32_t tcp_syn_q; /* 0 = Disabled (pkts go to default queue) */
- uint32_t ip_local_q; /* 0 = Disabled (pkts go to default queue 0) */
- uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
- uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
- uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
- uint32_t rss_qs[APP_LINK_MAX_HWQ_IN];
- uint32_t n_rss_qs;
- uint64_t rss_proto_ipv4;
- uint64_t rss_proto_ipv6;
- uint64_t rss_proto_l2;
- uint32_t promisc;
- uint32_t state; /* DOWN = 0, UP = 1 */
- uint32_t ip; /* 0 = Invalid */
- uint32_t depth; /* Valid only when IP is valid */
- uint64_t mac_addr; /* Read from HW */
- char pci_bdf[APP_LINK_PCI_BDF_SIZE];
-
- struct rte_eth_conf conf;
-};
-
-struct app_pktq_hwq_in_params {
- char *name;
- uint32_t parsed;
- uint32_t mempool_id; /* Position in the app->mempool_params */
- uint32_t size;
- uint32_t burst;
-
- struct rte_eth_rxconf conf;
-};
-
-struct app_pktq_hwq_out_params {
- char *name;
- uint32_t parsed;
- uint32_t size;
- uint32_t burst;
- uint32_t dropless;
- uint64_t n_retries;
- struct rte_eth_txconf conf;
-};
-
-struct app_pktq_swq_params {
- char *name;
- uint32_t parsed;
- uint32_t size;
- uint32_t burst_read;
- uint32_t burst_write;
- uint32_t dropless;
- uint64_t n_retries;
- uint32_t cpu_socket_id;
- uint32_t ipv4_frag;
- uint32_t ipv6_frag;
- uint32_t ipv4_ras;
- uint32_t ipv6_ras;
- uint32_t mtu;
- uint32_t metadata_size;
- uint32_t mempool_direct_id;
- uint32_t mempool_indirect_id;
-};
-
-struct app_pktq_kni_params {
- char *name;
- uint32_t parsed;
-
- uint32_t socket_id;
- uint32_t core_id;
- uint32_t hyper_th_id;
- uint32_t force_bind;
-
- uint32_t mempool_id; /* Position in the app->mempool_params */
- uint32_t burst_read;
- uint32_t burst_write;
- uint32_t dropless;
- uint64_t n_retries;
-};
-
-#ifndef APP_FILE_NAME_SIZE
-#define APP_FILE_NAME_SIZE 256
-#endif
-
-#ifndef APP_MAX_SCHED_SUBPORTS
-#define APP_MAX_SCHED_SUBPORTS 8
-#endif
-
-#ifndef APP_MAX_SCHED_PIPES
-#define APP_MAX_SCHED_PIPES 4096
-#endif
-
-struct app_pktq_tm_params {
- char *name;
- uint32_t parsed;
- const char *file_name;
- struct rte_sched_port_params sched_port_params;
- struct rte_sched_subport_params
- sched_subport_params[APP_MAX_SCHED_SUBPORTS];
- struct rte_sched_pipe_params
- sched_pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
- int sched_pipe_to_profile[APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES];
- uint32_t burst_read;
- uint32_t burst_write;
-};
-
-struct app_pktq_tap_params {
- char *name;
- uint32_t parsed;
- uint32_t burst_read;
- uint32_t burst_write;
- uint32_t dropless;
- uint64_t n_retries;
- uint32_t mempool_id; /* Position in the app->mempool_params */
-};
-
-struct app_pktq_source_params {
- char *name;
- uint32_t parsed;
- uint32_t mempool_id; /* Position in the app->mempool_params array */
- uint32_t burst;
- const char *file_name; /* Full path of PCAP file to be copied to mbufs */
- uint32_t n_bytes_per_pkt;
-};
-
-struct app_pktq_sink_params {
- char *name;
- uint8_t parsed;
- const char *file_name; /* Full path of PCAP file to be copied to mbufs */
- uint32_t n_pkts_to_dump;
-};
-
-struct app_msgq_params {
- char *name;
- uint32_t parsed;
- uint32_t size;
- uint32_t cpu_socket_id;
-};
-
-enum app_pktq_in_type {
- APP_PKTQ_IN_HWQ,
- APP_PKTQ_IN_SWQ,
- APP_PKTQ_IN_TM,
- APP_PKTQ_IN_TAP,
- APP_PKTQ_IN_KNI,
- APP_PKTQ_IN_SOURCE,
-};
-
-struct app_pktq_in_params {
- enum app_pktq_in_type type;
- uint32_t id; /* Position in the appropriate app array */
-};
-
-enum app_pktq_out_type {
- APP_PKTQ_OUT_HWQ,
- APP_PKTQ_OUT_SWQ,
- APP_PKTQ_OUT_TM,
- APP_PKTQ_OUT_TAP,
- APP_PKTQ_OUT_KNI,
- APP_PKTQ_OUT_SINK,
-};
-
-struct app_pktq_out_params {
- enum app_pktq_out_type type;
- uint32_t id; /* Position in the appropriate app array */
-};
-
-#define APP_PIPELINE_TYPE_SIZE PIPELINE_TYPE_SIZE
-
-#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
-#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
-#define APP_MAX_PIPELINE_MSGQ_IN PIPELINE_MAX_MSGQ_IN
-#define APP_MAX_PIPELINE_MSGQ_OUT PIPELINE_MAX_MSGQ_OUT
-
-#define APP_MAX_PIPELINE_ARGS PIPELINE_MAX_ARGS
-
-struct app_pipeline_params {
- char *name;
- uint8_t parsed;
-
- char type[APP_PIPELINE_TYPE_SIZE];
-
- uint32_t socket_id;
- uint32_t core_id;
- uint32_t hyper_th_id;
-
- struct app_pktq_in_params pktq_in[APP_MAX_PIPELINE_PKTQ_IN];
- struct app_pktq_out_params pktq_out[APP_MAX_PIPELINE_PKTQ_OUT];
- uint32_t msgq_in[APP_MAX_PIPELINE_MSGQ_IN];
- uint32_t msgq_out[APP_MAX_PIPELINE_MSGQ_OUT];
-
- uint32_t n_pktq_in;
- uint32_t n_pktq_out;
- uint32_t n_msgq_in;
- uint32_t n_msgq_out;
-
- uint32_t timer_period;
-
- char *args_name[APP_MAX_PIPELINE_ARGS];
- char *args_value[APP_MAX_PIPELINE_ARGS];
- uint32_t n_args;
-};
-
-struct app_params;
-
-typedef void (*app_link_op)(struct app_params *app,
- uint32_t link_id,
- uint32_t up,
- void *arg);
-
-#ifndef APP_MAX_PIPELINES
-#define APP_MAX_PIPELINES 64
-#endif
-
-struct app_link_data {
- app_link_op f_link[APP_MAX_PIPELINES];
- void *arg[APP_MAX_PIPELINES];
-};
-
-struct app_pipeline_data {
- void *be;
- void *fe;
- struct pipeline_type *ptype;
- uint64_t timer_period;
- uint32_t enabled;
-};
-
-struct app_thread_pipeline_data {
- uint32_t pipeline_id;
- void *be;
- pipeline_be_op_run f_run;
- pipeline_be_op_timer f_timer;
- uint64_t timer_period;
- uint64_t deadline;
-};
-
-#ifndef APP_MAX_THREAD_PIPELINES
-#define APP_MAX_THREAD_PIPELINES 64
-#endif
-
-#ifndef APP_THREAD_TIMER_PERIOD
-#define APP_THREAD_TIMER_PERIOD 1
-#endif
-
-struct app_thread_data {
- struct app_thread_pipeline_data regular[APP_MAX_THREAD_PIPELINES];
- struct app_thread_pipeline_data custom[APP_MAX_THREAD_PIPELINES];
-
- uint32_t n_regular;
- uint32_t n_custom;
-
- uint64_t timer_period;
- uint64_t thread_req_deadline;
-
- uint64_t deadline;
-
- struct rte_ring *msgq_in;
- struct rte_ring *msgq_out;
-
- uint64_t headroom_time;
- uint64_t headroom_cycles;
- double headroom_ratio;
-} __rte_cache_aligned;
-
-#ifndef APP_MAX_LINKS
-#define APP_MAX_LINKS 16
-#endif
-
-struct app_eal_params {
- /* Map lcore set to physical cpu set */
- char *coremap;
-
- /* Core ID that is used as master */
- uint32_t master_lcore_present;
- uint32_t master_lcore;
-
- /* Number of memory channels */
- uint32_t channels_present;
- uint32_t channels;
-
- /* Memory to allocate (see also --socket-mem) */
- uint32_t memory_present;
- uint32_t memory;
-
- /* Force number of memory ranks (don't detect) */
- uint32_t ranks_present;
- uint32_t ranks;
-
- /* Add a PCI device in black list. */
- char *pci_blacklist[APP_MAX_LINKS];
-
- /* Add a PCI device in white list. */
- char *pci_whitelist[APP_MAX_LINKS];
-
- /* Add a virtual device. */
- char *vdev[APP_MAX_LINKS];
-
- /* Use VMware TSC map instead of native RDTSC */
- uint32_t vmware_tsc_map_present;
- int vmware_tsc_map;
-
- /* Type of this process (primary|secondary|auto) */
- char *proc_type;
-
- /* Set syslog facility */
- char *syslog;
-
- /* Set default log level */
- uint32_t log_level_present;
- uint32_t log_level;
-
- /* Display version information on startup */
- uint32_t version_present;
- int version;
-
- /* This help */
- uint32_t help_present;
- int help;
-
- /* Use malloc instead of hugetlbfs */
- uint32_t no_huge_present;
- int no_huge;
-
- /* Disable PCI */
- uint32_t no_pci_present;
- int no_pci;
-
- /* Disable HPET */
- uint32_t no_hpet_present;
- int no_hpet;
-
- /* No shared config (mmap'd files) */
- uint32_t no_shconf_present;
- int no_shconf;
-
- /* Add driver */
- char *add_driver;
-
- /* Memory to allocate on sockets (comma separated values)*/
- char *socket_mem;
-
- /* Directory where hugetlbfs is mounted */
- char *huge_dir;
-
- /* Prefix for hugepage filenames */
- char *file_prefix;
-
- /* Base virtual address */
- char *base_virtaddr;
-
- /* Create /dev/uioX (usually done by hotplug) */
- uint32_t create_uio_dev_present;
- int create_uio_dev;
-
- /* Interrupt mode for VFIO (legacy|msi|msix) */
- char *vfio_intr;
-
- uint32_t parsed;
-};
-
-#ifndef APP_APPNAME_SIZE
-#define APP_APPNAME_SIZE 256
-#endif
-
-#ifndef APP_MAX_MEMPOOLS
-#define APP_MAX_MEMPOOLS 8
-#endif
-
-#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
-
-#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
-
-#ifndef APP_MAX_PKTQ_SWQ
-#define APP_MAX_PKTQ_SWQ 256
-#endif
-
-#define APP_MAX_PKTQ_TM APP_MAX_LINKS
-
-#ifndef APP_MAX_PKTQ_TAP
-#define APP_MAX_PKTQ_TAP APP_MAX_LINKS
-#endif
-
-#define APP_MAX_PKTQ_KNI APP_MAX_LINKS
-
-#ifndef APP_MAX_PKTQ_SOURCE
-#define APP_MAX_PKTQ_SOURCE 64
-#endif
-
-#ifndef APP_MAX_PKTQ_SINK
-#define APP_MAX_PKTQ_SINK 64
-#endif
-
-#ifndef APP_MAX_MSGQ
-#define APP_MAX_MSGQ 256
-#endif
-
-#ifndef APP_EAL_ARGC
-#define APP_EAL_ARGC 64
-#endif
-
-#ifndef APP_MAX_PIPELINE_TYPES
-#define APP_MAX_PIPELINE_TYPES 64
-#endif
-
-#ifndef APP_MAX_THREADS
-#define APP_MAX_THREADS RTE_MAX_LCORE
-#endif
-
-#ifndef APP_MAX_CMDS
-#define APP_MAX_CMDS 64
-#endif
-
-#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
-#define APP_THREAD_HEADROOM_STATS_COLLECT 1
-#endif
-
-#define APP_CORE_MASK_SIZE \
- (RTE_MAX_LCORE / 64 + ((RTE_MAX_LCORE % 64) ? 1 : 0))
-
-struct app_params {
- /* Config */
- char app_name[APP_APPNAME_SIZE];
- const char *config_file;
- const char *script_file;
- const char *parser_file;
- const char *output_file;
- const char *preproc;
- const char *preproc_args;
- uint64_t port_mask;
- uint32_t log_level;
-
- struct app_eal_params eal_params;
- struct app_mempool_params mempool_params[APP_MAX_MEMPOOLS];
- struct app_link_params link_params[APP_MAX_LINKS];
- struct app_pktq_hwq_in_params hwq_in_params[APP_MAX_HWQ_IN];
- struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
- struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
- struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
- struct app_pktq_tap_params tap_params[APP_MAX_PKTQ_TAP];
- struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI];
- struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
- struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
- struct app_msgq_params msgq_params[APP_MAX_MSGQ];
- struct app_pipeline_params pipeline_params[APP_MAX_PIPELINES];
-
- uint32_t n_mempools;
- uint32_t n_links;
- uint32_t n_pktq_hwq_in;
- uint32_t n_pktq_hwq_out;
- uint32_t n_pktq_swq;
- uint32_t n_pktq_tm;
- uint32_t n_pktq_tap;
- uint32_t n_pktq_kni;
- uint32_t n_pktq_source;
- uint32_t n_pktq_sink;
- uint32_t n_msgq;
- uint32_t n_pipelines;
-
- /* Init */
- char *eal_argv[1 + APP_EAL_ARGC];
- struct cpu_core_map *core_map;
- uint64_t core_mask[APP_CORE_MASK_SIZE];
- struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
- struct app_link_data link_data[APP_MAX_LINKS];
- struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
- struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
- int tap[APP_MAX_PKTQ_TAP];
-#ifdef RTE_LIBRTE_KNI
- struct rte_kni *kni[APP_MAX_PKTQ_KNI];
-#endif /* RTE_LIBRTE_KNI */
- struct rte_ring *msgq[APP_MAX_MSGQ];
- struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
- struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
- struct app_thread_data thread_data[APP_MAX_THREADS];
- cmdline_parse_ctx_t cmds[APP_MAX_CMDS + 1];
-
- int eal_argc;
- uint32_t n_pipeline_types;
- uint32_t n_cmds;
-};
-
-#define APP_PARAM_VALID(obj) ((obj)->name != NULL)
-
-#define APP_PARAM_COUNT(obj_array, n_objs) \
-{ \
- size_t i; \
- \
- n_objs = 0; \
- for (i = 0; i < RTE_DIM(obj_array); i++) \
- if (APP_PARAM_VALID(&((obj_array)[i]))) \
- n_objs++; \
-}
-
-#define APP_PARAM_FIND(obj_array, key) \
-({ \
- ssize_t obj_idx; \
- const ssize_t obj_count = RTE_DIM(obj_array); \
- \
- for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
- if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
- continue; \
- \
- if (strcmp(key, (obj_array)[obj_idx].name) == 0) \
- break; \
- } \
- obj_idx < obj_count ? obj_idx : -ENOENT; \
-})
-
-#define APP_PARAM_FIND_BY_ID(obj_array, prefix, id, obj) \
-do { \
- char name[APP_PARAM_NAME_SIZE]; \
- ssize_t pos; \
- \
- sprintf(name, prefix "%" PRIu32, id); \
- pos = APP_PARAM_FIND(obj_array, name); \
- obj = (pos < 0) ? NULL : &((obj_array)[pos]); \
-} while (0)
-
-#define APP_PARAM_GET_ID(obj, prefix, id) \
-do \
- sscanf(obj->name, prefix "%" SCNu32, &id); \
-while (0) \
-
-#define APP_CHECK(exp, fmt, ...) \
-do { \
- if (!(exp)) { \
- fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
- abort(); \
- } \
-} while (0)
-
-enum app_log_level {
- APP_LOG_LEVEL_HIGH = 1,
- APP_LOG_LEVEL_LOW,
- APP_LOG_LEVELS
-};
-
-#define APP_LOG(app, level, fmt, ...) \
-do { \
- if (app->log_level >= APP_LOG_LEVEL_ ## level) \
- fprintf(stdout, "[APP] " fmt "\n", ## __VA_ARGS__); \
-} while (0)
-
-static inline uint32_t
-app_link_get_n_rxq(struct app_params *app, struct app_link_params *link)
-{
- uint32_t n_rxq = 0, link_id, i;
- uint32_t n_pktq_hwq_in = RTE_MIN(app->n_pktq_hwq_in,
- RTE_DIM(app->hwq_in_params));
-
- APP_PARAM_GET_ID(link, "LINK", link_id);
-
- for (i = 0; i < n_pktq_hwq_in; i++) {
- struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
- uint32_t rxq_link_id, rxq_queue_id;
-
- sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
- &rxq_link_id, &rxq_queue_id);
- if (rxq_link_id == link_id)
- n_rxq++;
- }
-
- return n_rxq;
-}
-
-static inline uint32_t
-app_link_get_n_txq(struct app_params *app, struct app_link_params *link)
-{
- uint32_t n_txq = 0, link_id, i;
- uint32_t n_pktq_hwq_out = RTE_MIN(app->n_pktq_hwq_out,
- RTE_DIM(app->hwq_out_params));
-
- APP_PARAM_GET_ID(link, "LINK", link_id);
-
- for (i = 0; i < n_pktq_hwq_out; i++) {
- struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
- uint32_t txq_link_id, txq_queue_id;
-
- sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
- &txq_link_id, &txq_queue_id);
- if (txq_link_id == link_id)
- n_txq++;
- }
-
- return n_txq;
-}
-
-static inline uint32_t
-app_rxq_get_readers(struct app_params *app, struct app_pktq_hwq_in_params *rxq)
-{
- uint32_t pos = rxq - app->hwq_in_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_HWQ) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline uint32_t
-app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
-{
- uint32_t pos = swq - app->swq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_SWQ) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline struct app_pipeline_params *
-app_swq_get_reader(struct app_params *app,
- struct app_pktq_swq_params *swq,
- uint32_t *pktq_in_id)
-{
- struct app_pipeline_params *reader = NULL;
- uint32_t pos = swq - app->swq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_SWQ) &&
- (pktq->id == pos)) {
- n_readers++;
- reader = p;
- id = j;
- }
- }
- }
-
- if (n_readers != 1)
- return NULL;
-
- *pktq_in_id = id;
- return reader;
-}
-
-static inline uint32_t
-app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
-{
- uint32_t pos = tm - app->tm_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_TM) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline struct app_pipeline_params *
-app_tm_get_reader(struct app_params *app,
- struct app_pktq_tm_params *tm,
- uint32_t *pktq_in_id)
-{
- struct app_pipeline_params *reader = NULL;
- uint32_t pos = tm - app->tm_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_TM) &&
- (pktq->id == pos)) {
- n_readers++;
- reader = p;
- id = j;
- }
- }
- }
-
- if (n_readers != 1)
- return NULL;
-
- *pktq_in_id = id;
- return reader;
-}
-
-static inline uint32_t
-app_tap_get_readers(struct app_params *app, struct app_pktq_tap_params *tap)
-{
- uint32_t pos = tap - app->tap_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_TAP) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline struct app_pipeline_params *
-app_tap_get_reader(struct app_params *app,
- struct app_pktq_tap_params *tap,
- uint32_t *pktq_in_id)
-{
- struct app_pipeline_params *reader = NULL;
- uint32_t pos = tap - app->tap_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_TAP) &&
- (pktq->id == pos)) {
- n_readers++;
- reader = p;
- id = j;
- }
- }
- }
-
- if (n_readers != 1)
- return NULL;
-
- *pktq_in_id = id;
- return reader;
-}
-
-static inline uint32_t
-app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni)
-{
- uint32_t pos = kni - app->kni_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_KNI) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline struct app_pipeline_params *
-app_kni_get_reader(struct app_params *app,
- struct app_pktq_kni_params *kni,
- uint32_t *pktq_in_id)
-{
- struct app_pipeline_params *reader = NULL;
- uint32_t pos = kni - app->kni_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_KNI) &&
- (pktq->id == pos)) {
- n_readers++;
- reader = p;
- id = j;
- }
- }
- }
-
- if (n_readers != 1)
- return NULL;
-
- *pktq_in_id = id;
- return reader;
-}
-
-static inline uint32_t
-app_source_get_readers(struct app_params *app,
-struct app_pktq_source_params *source)
-{
- uint32_t pos = source - app->source_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
- uint32_t j;
-
- for (j = 0; j < n_pktq_in; j++) {
- struct app_pktq_in_params *pktq = &p->pktq_in[j];
-
- if ((pktq->type == APP_PKTQ_IN_SOURCE) &&
- (pktq->id == pos))
- n_readers++;
- }
- }
-
- return n_readers;
-}
-
-static inline uint32_t
-app_msgq_get_readers(struct app_params *app, struct app_msgq_params *msgq)
-{
- uint32_t pos = msgq - app->msgq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_readers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_msgq_in = RTE_MIN(p->n_msgq_in, RTE_DIM(p->msgq_in));
- uint32_t j;
-
- for (j = 0; j < n_msgq_in; j++)
- if (p->msgq_in[j] == pos)
- n_readers++;
- }
-
- return n_readers;
-}
-
-static inline uint32_t
-app_txq_get_writers(struct app_params *app, struct app_pktq_hwq_out_params *txq)
-{
- uint32_t pos = txq - app->hwq_out_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_HWQ) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline uint32_t
-app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
-{
- uint32_t pos = swq - app->swq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline struct app_pipeline_params *
-app_swq_get_writer(struct app_params *app,
- struct app_pktq_swq_params *swq,
- uint32_t *pktq_out_id)
-{
- struct app_pipeline_params *writer = NULL;
- uint32_t pos = swq - app->swq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
- (pktq->id == pos)) {
- n_writers++;
- writer = p;
- id = j;
- }
- }
- }
-
- if (n_writers != 1)
- return NULL;
-
- *pktq_out_id = id;
- return writer;
-}
-
-static inline uint32_t
-app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
-{
- uint32_t pos = tm - app->tm_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_TM) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline struct app_pipeline_params *
-app_tm_get_writer(struct app_params *app,
- struct app_pktq_tm_params *tm,
- uint32_t *pktq_out_id)
-{
- struct app_pipeline_params *writer = NULL;
- uint32_t pos = tm - app->tm_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_TM) &&
- (pktq->id == pos)) {
- n_writers++;
- writer = p;
- id = j;
- }
- }
- }
-
- if (n_writers != 1)
- return NULL;
-
- *pktq_out_id = id;
- return writer;
-}
-
-static inline uint32_t
-app_tap_get_writers(struct app_params *app, struct app_pktq_tap_params *tap)
-{
- uint32_t pos = tap - app->tap_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_TAP) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline struct app_pipeline_params *
-app_tap_get_writer(struct app_params *app,
- struct app_pktq_tap_params *tap,
- uint32_t *pktq_out_id)
-{
- struct app_pipeline_params *writer = NULL;
- uint32_t pos = tap - app->tap_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_TAP) &&
- (pktq->id == pos)) {
- n_writers++;
- writer = p;
- id = j;
- }
- }
- }
-
- if (n_writers != 1)
- return NULL;
-
- *pktq_out_id = id;
- return writer;
-}
-
-static inline uint32_t
-app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni)
-{
- uint32_t pos = kni - app->kni_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_KNI) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline struct app_pipeline_params *
-app_kni_get_writer(struct app_params *app,
- struct app_pktq_kni_params *kni,
- uint32_t *pktq_out_id)
-{
- struct app_pipeline_params *writer = NULL;
- uint32_t pos = kni - app->kni_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, id = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_KNI) &&
- (pktq->id == pos)) {
- n_writers++;
- writer = p;
- id = j;
- }
- }
- }
-
- if (n_writers != 1)
- return NULL;
-
- *pktq_out_id = id;
- return writer;
-}
-
-static inline uint32_t
-app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
-{
- uint32_t pos = sink - app->sink_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
- RTE_DIM(p->pktq_out));
- uint32_t j;
-
- for (j = 0; j < n_pktq_out; j++) {
- struct app_pktq_out_params *pktq = &p->pktq_out[j];
-
- if ((pktq->type == APP_PKTQ_OUT_SINK) &&
- (pktq->id == pos))
- n_writers++;
- }
- }
-
- return n_writers;
-}
-
-static inline uint32_t
-app_msgq_get_writers(struct app_params *app, struct app_msgq_params *msgq)
-{
- uint32_t pos = msgq - app->msgq_params;
- uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
- RTE_DIM(app->pipeline_params));
- uint32_t n_writers = 0, i;
-
- for (i = 0; i < n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- uint32_t n_msgq_out = RTE_MIN(p->n_msgq_out,
- RTE_DIM(p->msgq_out));
- uint32_t j;
-
- for (j = 0; j < n_msgq_out; j++)
- if (p->msgq_out[j] == pos)
- n_writers++;
- }
-
- return n_writers;
-}
-
-static inline struct app_link_params *
-app_get_link_for_rxq(struct app_params *app, struct app_pktq_hwq_in_params *p)
-{
- char link_name[APP_PARAM_NAME_SIZE];
- ssize_t link_param_idx;
- uint32_t rxq_link_id, rxq_queue_id;
-
- sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
- &rxq_link_id, &rxq_queue_id);
- sprintf(link_name, "LINK%" PRIu32, rxq_link_id);
- link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
- APP_CHECK((link_param_idx >= 0),
- "Cannot find %s for %s", link_name, p->name);
-
- return &app->link_params[link_param_idx];
-}
-
-static inline struct app_link_params *
-app_get_link_for_txq(struct app_params *app, struct app_pktq_hwq_out_params *p)
-{
- char link_name[APP_PARAM_NAME_SIZE];
- ssize_t link_param_idx;
- uint32_t txq_link_id, txq_queue_id;
-
- sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
- &txq_link_id, &txq_queue_id);
- sprintf(link_name, "LINK%" PRIu32, txq_link_id);
- link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
- APP_CHECK((link_param_idx >= 0),
- "Cannot find %s for %s", link_name, p->name);
-
- return &app->link_params[link_param_idx];
-}
-
-static inline struct app_link_params *
-app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
-{
- char link_name[APP_PARAM_NAME_SIZE];
- uint32_t link_id;
- ssize_t link_param_idx;
-
- sscanf(p_tm->name, "TM%" PRIu32, &link_id);
- sprintf(link_name, "LINK%" PRIu32, link_id);
- link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
- APP_CHECK((link_param_idx >= 0),
- "Cannot find %s for %s", link_name, p_tm->name);
-
- return &app->link_params[link_param_idx];
-}
-
-static inline struct app_link_params *
-app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
-{
- char link_name[APP_PARAM_NAME_SIZE];
- uint32_t link_id;
- ssize_t link_param_idx;
-
- sscanf(p_kni->name, "KNI%" PRIu32, &link_id);
- sprintf(link_name, "LINK%" PRIu32, link_id);
- link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
- APP_CHECK((link_param_idx >= 0),
- "Cannot find %s for %s", link_name, p_kni->name);
-
- return &app->link_params[link_param_idx];
-}
-
-static inline uint32_t
-app_core_is_enabled(struct app_params *app, uint32_t lcore_id)
-{
- return(app->core_mask[lcore_id / 64] &
- (1LLU << (lcore_id % 64)));
-}
-
-static inline void
-app_core_enable_in_core_mask(struct app_params *app, int lcore_id)
-{
- app->core_mask[lcore_id / 64] |= 1LLU << (lcore_id % 64);
-
-}
-
-static inline void
-app_core_build_core_mask_string(struct app_params *app, char *mask_buffer)
-{
- int i;
-
- mask_buffer[0] = '\0';
- for (i = (int)RTE_DIM(app->core_mask); i > 0; i--) {
- /* For Hex representation of bits in uint64_t */
- char buffer[(64 / 8) * 2 + 1];
- memset(buffer, 0, sizeof(buffer));
- snprintf(buffer, sizeof(buffer), "%016" PRIx64,
- app->core_mask[i-1]);
- strcat(mask_buffer, buffer);
- }
-}
-
-void app_pipeline_params_get(struct app_params *app,
- struct app_pipeline_params *p_in,
- struct pipeline_params *p_out);
-
-int app_config_init(struct app_params *app);
-
-int app_config_args(struct app_params *app,
- int argc, char **argv);
-
-int app_config_preproc(struct app_params *app);
-
-int app_config_parse(struct app_params *app,
- const char *file_name);
-
-int app_config_parse_tm(struct app_params *app);
-
-void app_config_save(struct app_params *app,
- const char *file_name);
-
-int app_config_check(struct app_params *app);
-
-int app_init(struct app_params *app);
-
-int app_post_init(struct app_params *app);
-
-int app_thread(void *arg);
-
-int app_pipeline_type_register(struct app_params *app,
- struct pipeline_type *ptype);
-
-struct pipeline_type *app_pipeline_type_find(struct app_params *app,
- char *name);
-
-void app_link_up_internal(struct app_params *app,
- struct app_link_params *cp);
-
-void app_link_down_internal(struct app_params *app,
- struct app_link_params *cp);
-
-#endif
diff --git a/examples/ip_pipeline/cli.c b/examples/ip_pipeline/cli.c
new file mode 100644
index 00000000..102a1d6b
--- /dev/null
+++ b/examples/ip_pipeline/cli.c
@@ -0,0 +1,5240 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+
+#include "cli.h"
+#include "kni.h"
+#include "link.h"
+#include "mempool.h"
+#include "parser.h"
+#include "pipeline.h"
+#include "swq.h"
+#include "tap.h"
+#include "thread.h"
+#include "tmgr.h"
+
+#ifndef CMD_MAX_TOKENS
+#define CMD_MAX_TOKENS 256
+#endif
+
+#define MSG_OUT_OF_MEMORY "Not enough memory.\n"
+#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n"
+#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n"
+#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n"
+#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n"
+#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n"
+#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n"
+#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n"
+#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n"
+#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n"
+#define MSG_CMD_FAIL "Command \"%s\" failed.\n"
+
+static int
+is_comment(char *in)
+{
+ if ((strlen(in) && index("!#%;", in[0])) ||
+ (strncmp(in, "//", 2) == 0) ||
+ (strncmp(in, "--", 2) == 0))
+ return 1;
+
+ return 0;
+}
+
+static const char cmd_mempool_help[] =
+"mempool <mempool_name>\n"
+" buffer <buffer_size>\n"
+" pool <pool_size>\n"
+" cache <cache_size>\n"
+" cpu <cpu_id>\n";
+
+static void
+cmd_mempool(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct mempool_params p;
+ char *name;
+ struct mempool *mempool;
+
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "buffer") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer");
+ return;
+ }
+
+ if (parser_read_uint32(&p.buffer_size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool");
+ return;
+ }
+
+ if (parser_read_uint32(&p.pool_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pool_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "cache") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cache_size, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cache_size");
+ return;
+ }
+
+ if (strcmp(tokens[8], "cpu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cpu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cpu_id, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cpu_id");
+ return;
+ }
+
+ mempool = mempool_create(name, &p);
+ if (mempool == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_link_help[] =
+"link <link_name>\n"
+" dev <device_name> | port <port_id>\n"
+" rxq <n_queues> <queue_size> <mempool_name>\n"
+" txq <n_queues> <queue_size>\n"
+" promiscuous on | off\n"
+" [rss <qid_0> ... <qid_n>]\n";
+
+static void
+cmd_link(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct link_params p;
+ struct link_params_rss rss;
+ struct link *link;
+ char *name;
+
+ memset(&p, 0, sizeof(p));
+
+ if ((n_tokens < 13) || (n_tokens > 14 + LINK_RXQ_RSS_MAX)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0)
+ p.dev_name = tokens[3];
+ else if (strcmp(tokens[2], "port") == 0) {
+ p.dev_name = NULL;
+
+ if (parser_read_uint16(&p.port_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (parser_read_uint32(&p.rx.n_queues, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_queues");
+ return;
+ }
+ if (parser_read_uint32(&p.rx.queue_size, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_size");
+ return;
+ }
+
+ p.rx.mempool_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tx.n_queues, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_queues");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tx.queue_size, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_size");
+ return;
+ }
+
+ if (strcmp(tokens[11], "promiscuous") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "promiscuous");
+ return;
+ }
+
+ if (strcmp(tokens[12], "on") == 0)
+ p.promiscuous = 1;
+ else if (strcmp(tokens[12], "off") == 0)
+ p.promiscuous = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "on or off");
+ return;
+ }
+
+ /* RSS */
+ p.rx.rss = NULL;
+ if (n_tokens > 13) {
+ uint32_t queue_id, i;
+
+ if (strcmp(tokens[13], "rss") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rss");
+ return;
+ }
+
+ p.rx.rss = &rss;
+
+ rss.n_queues = 0;
+ for (i = 14; i < n_tokens; i++) {
+ if (parser_read_uint32(&queue_id, tokens[i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+
+ rss.queue_id[rss.n_queues] = queue_id;
+ rss.n_queues++;
+ }
+ }
+
+ link = link_create(name, &p);
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/* Print the link stats and info */
+static void
+print_link_info(struct link *link, char *out, size_t out_size)
+{
+ struct rte_eth_stats stats;
+ struct ether_addr mac_addr;
+ struct rte_eth_link eth_link;
+ uint16_t mtu;
+
+ memset(&stats, 0, sizeof(stats));
+ rte_eth_stats_get(link->port_id, &stats);
+
+ rte_eth_macaddr_get(link->port_id, &mac_addr);
+ rte_eth_link_get(link->port_id, &eth_link);
+ rte_eth_dev_get_mtu(link->port_id, &mtu);
+
+ snprintf(out, out_size,
+ "\n"
+ "%s: flags=<%s> mtu %u\n"
+ "\tether %02X:%02X:%02X:%02X:%02X:%02X rxqueues %u txqueues %u\n"
+ "\tport# %u speed %u Mbps\n"
+ "\tRX packets %" PRIu64" bytes %" PRIu64"\n"
+ "\tRX errors %" PRIu64" missed %" PRIu64" no-mbuf %" PRIu64"\n"
+ "\tTX packets %" PRIu64" bytes %" PRIu64"\n"
+ "\tTX errors %" PRIu64"\n",
+ link->name,
+ eth_link.link_status == 0 ? "DOWN" : "UP",
+ mtu,
+ mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
+ mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
+ mac_addr.addr_bytes[4], mac_addr.addr_bytes[5],
+ link->n_rxq,
+ link->n_txq,
+ link->port_id,
+ eth_link.link_speed,
+ stats.ipackets,
+ stats.ibytes,
+ stats.ierrors,
+ stats.imissed,
+ stats.rx_nombuf,
+ stats.opackets,
+ stats.obytes,
+ stats.oerrors);
+}
+
+/*
+ * link show [<link_name>]
+ */
+static void
+cmd_link_show(char **tokens, uint32_t n_tokens, char *out, size_t out_size)
+{
+ struct link *link;
+ char *link_name;
+
+ if (n_tokens != 2 && n_tokens != 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (n_tokens == 2) {
+ link = link_next(NULL);
+
+ while (link != NULL) {
+ out_size = out_size - strlen(out);
+ out = &out[strlen(out)];
+
+ print_link_info(link, out, out_size);
+ link = link_next(link);
+ }
+ } else {
+ out_size = out_size - strlen(out);
+ out = &out[strlen(out)];
+
+ link_name = tokens[2];
+ link = link_find(link_name);
+
+ if (link == NULL) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "Link does not exist");
+ return;
+ }
+ print_link_info(link, out, out_size);
+ }
+}
+
+static const char cmd_swq_help[] =
+"swq <swq_name>\n"
+" size <size>\n"
+" cpu <cpu_id>\n";
+
+static void
+cmd_swq(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct swq_params p;
+ char *name;
+ struct swq *swq;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (parser_read_uint32(&p.size, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "size");
+ return;
+ }
+
+ if (strcmp(tokens[4], "cpu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cpu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cpu_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cpu_id");
+ return;
+ }
+
+ swq = swq_create(name, &p);
+ if (swq == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_tmgr_subport_profile_help[] =
+"tmgr subport profile\n"
+" <tb_rate> <tb_size>\n"
+" <tc0_rate> <tc1_rate> <tc2_rate> <tc3_rate>\n"
+" <tc_period>\n";
+
+static void
+cmd_tmgr_subport_profile(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_sched_subport_params p;
+ int status, i;
+
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&p.tb_rate, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tb_size, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_size");
+ return;
+ }
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ if (parser_read_uint32(&p.tc_rate[i], tokens[5 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc_rate");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tc_period, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc_period");
+ return;
+ }
+
+ status = tmgr_subport_profile_add(&p);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_tmgr_pipe_profile_help[] =
+"tmgr pipe profile\n"
+" <tb_rate> <tb_size>\n"
+" <tc0_rate> <tc1_rate> <tc2_rate> <tc3_rate>\n"
+" <tc_period>\n"
+" <tc_ov_weight>\n"
+" <wrr_weight0..15>\n";
+
+static void
+cmd_tmgr_pipe_profile(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_sched_pipe_params p;
+ int status, i;
+
+ if (n_tokens != 27) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&p.tb_rate, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tb_size, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tb_size");
+ return;
+ }
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ if (parser_read_uint32(&p.tc_rate[i], tokens[5 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc_rate");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tc_period, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc_period");
+ return;
+ }
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ if (parser_read_uint8(&p.tc_ov_weight, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "tc_ov_weight");
+ return;
+ }
+#endif
+
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++)
+ if (parser_read_uint8(&p.wrr_weights[i], tokens[11 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "wrr_weights");
+ return;
+ }
+
+ status = tmgr_pipe_profile_add(&p);
+ if (status != 0) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_tmgr_help[] =
+"tmgr <tmgr_name>\n"
+" rate <rate>\n"
+" spp <n_subports_per_port>\n"
+" pps <n_pipes_per_subport>\n"
+" qsize <qsize_tc0> <qsize_tc1> <qsize_tc2> <qsize_tc3>\n"
+" fo <frame_overhead>\n"
+" mtu <mtu>\n"
+" cpu <cpu_id>\n";
+
+static void
+cmd_tmgr(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct tmgr_port_params p;
+ char *name;
+ struct tmgr_port *tmgr_port;
+ int i;
+
+ if (n_tokens != 19) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "rate") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate");
+ return;
+ }
+
+ if (parser_read_uint32(&p.rate, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "rate");
+ return;
+ }
+
+ if (strcmp(tokens[4], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (parser_read_uint32(&p.n_subports_per_port, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[6], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (parser_read_uint32(&p.n_pipes_per_subport, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport");
+ return;
+ }
+
+ if (strcmp(tokens[8], "qsize") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "qsize");
+ return;
+ }
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ if (parser_read_uint16(&p.qsize[i], tokens[9 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "qsize");
+ return;
+ }
+
+ if (strcmp(tokens[13], "fo") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fo");
+ return;
+ }
+
+ if (parser_read_uint32(&p.frame_overhead, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "frame_overhead");
+ return;
+ }
+
+ if (strcmp(tokens[15], "mtu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mtu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.mtu, tokens[16]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "mtu");
+ return;
+ }
+
+ if (strcmp(tokens[17], "cpu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cpu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cpu_id, tokens[18]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cpu_id");
+ return;
+ }
+
+ tmgr_port = tmgr_port_create(name, &p);
+ if (tmgr_port == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_tmgr_subport_help[] =
+"tmgr <tmgr_name> subport <subport_id>\n"
+" profile <subport_profile_id>\n";
+
+static void
+cmd_tmgr_subport(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint32_t subport_id, subport_profile_id;
+ int status;
+ char *name;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (parser_read_uint32(&subport_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport_id");
+ return;
+ }
+
+ if (parser_read_uint32(&subport_profile_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport_profile_id");
+ return;
+ }
+
+ status = tmgr_subport_config(name, subport_id, subport_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_tmgr_subport_pipe_help[] =
+"tmgr <tmgr_name> subport <subport_id> pipe\n"
+" from <pipe_id_first> to <pipe_id_last>\n"
+" profile <pipe_profile_id>\n";
+
+static void
+cmd_tmgr_subport_pipe(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint32_t subport_id, pipe_id_first, pipe_id_last, pipe_profile_id;
+ int status;
+ char *name;
+
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (parser_read_uint32(&subport_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "subport_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "pipe") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe");
+ return;
+ }
+
+ if (strcmp(tokens[5], "from") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "from");
+ return;
+ }
+
+ if (parser_read_uint32(&pipe_id_first, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_id_first");
+ return;
+ }
+
+ if (strcmp(tokens[7], "to") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "to");
+ return;
+ }
+
+ if (parser_read_uint32(&pipe_id_last, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_id_last");
+ return;
+ }
+
+ if (strcmp(tokens[9], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (parser_read_uint32(&pipe_profile_id, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id");
+ return;
+ }
+
+ status = tmgr_pipe_config(name, subport_id, pipe_id_first,
+ pipe_id_last, pipe_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_tap_help[] =
+"tap <tap_name>\n";
+
+static void
+cmd_tap(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *name;
+ struct tap *tap;
+
+ if (n_tokens != 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ tap = tap_create(name);
+ if (tap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_kni_help[] =
+"kni <kni_name>\n"
+" link <link_name>\n"
+" mempool <mempool_name>\n"
+" [thread <thread_id>]\n";
+
+static void
+cmd_kni(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct kni_params p;
+ char *name;
+ struct kni *kni;
+
+ memset(&p, 0, sizeof(p));
+ if ((n_tokens != 6) && (n_tokens != 8)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "link") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "link");
+ return;
+ }
+
+ p.link_name = tokens[3];
+
+ if (strcmp(tokens[4], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mempool");
+ return;
+ }
+
+ p.mempool_name = tokens[5];
+
+ if (n_tokens == 8) {
+ if (strcmp(tokens[6], "thread") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "thread");
+ return;
+ }
+
+ if (parser_read_uint32(&p.thread_id, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ p.force_bind = 1;
+ } else
+ p.force_bind = 0;
+
+ kni = kni_create(name, &p);
+ if (kni == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_port_in_action_profile_help[] =
+"port in action profile <profile_name>\n"
+" [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]\n"
+" [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]\n";
+
+static void
+cmd_port_in_action_profile(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct port_in_action_profile_params p;
+ struct port_in_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[2], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[3], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[4];
+
+ t0 = 5;
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "filter") == 0)) {
+ uint32_t size;
+
+ if (n_tokens < t0 + 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "match") == 0)
+ p.fltr.filter_on_match = 1;
+ else if (strcmp(tokens[t0 + 1], "mismatch") == 0)
+ p.fltr.filter_on_match = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.fltr.key_offset, tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((parse_hex_string(tokens[t0 + 5], p.fltr.key_mask, &size) != 0) ||
+ (size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE)) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 6], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE;
+ if ((parse_hex_string(tokens[t0 + 7], p.fltr.key, &size) != 0) ||
+ (size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE)) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_value");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&p.fltr.port_id, tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR;
+ t0 += 10;
+ } /* filter */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "balance") == 0)) {
+ uint32_t i;
+
+ if (n_tokens < t0 + 22) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "port in action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.lb.key_offset, tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (parse_hex_string(tokens[t0 + 4], p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ for (i = 0; i < 16; i++)
+ if (parser_read_uint32(&p.lb.port_id[i], tokens[t0 + 6 + i]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB;
+ t0 += 22;
+ } /* balance */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = port_in_action_profile_create(name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_table_action_profile_help[] =
+"table action profile <profile_name>\n"
+" ipv4 | ipv6\n"
+" offset <ip_offset>\n"
+" fwd\n"
+" [balance offset <key_offset> mask <key_mask> outoffset <out_offset>]\n"
+" [meter srtcm | trtcm\n"
+" tc <n_tc>\n"
+" stats none | pkts | bytes | both]\n"
+" [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]\n"
+" [encap ether | vlan | qinq | mpls | pppoe]\n"
+" [nat src | dst\n"
+" proto udp | tcp]\n"
+" [ttl drop | fwd\n"
+" stats none | pkts]\n"
+" [stats pkts | bytes | both]\n"
+" [time]\n";
+
+static void
+cmd_table_action_profile(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct table_action_profile_params p;
+ struct table_action_profile *ap;
+ char *name;
+ uint32_t t0;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action");
+ return;
+ }
+
+ if (strcmp(tokens[2], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ name = tokens[3];
+
+ if (strcmp(tokens[4], "ipv4") == 0)
+ p.common.ip_version = 1;
+ else if (strcmp(tokens[4], "ipv6") == 0)
+ p.common.ip_version = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[5], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.common.ip_offset, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset");
+ return;
+ }
+
+ if (strcmp(tokens[7], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+
+ t0 = 8;
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "balance") == 0)) {
+ if (n_tokens < t0 + 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.lb.key_offset, tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX;
+ if (parse_hex_string(tokens[t0 + 4], p.lb.key_mask, &p.lb.key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "outoffset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.lb.out_offset, tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "out_offset");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB;
+ t0 += 7;
+ } /* balance */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "meter") == 0)) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile meter");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "srtcm") == 0)
+ p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM;
+ else if (strcmp(tokens[t0 + 1], "trtcm") == 0)
+ p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "srtcm or trtcm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "tc") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc");
+ return;
+ }
+
+ if (parser_read_uint32(&p.mtr.n_tc, tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_tc");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 5], "none") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "pkts") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 5], "bytes") == 0) {
+ p.mtr.n_packets_enabled = 0;
+ p.mtr.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 5], "both") == 0) {
+ p.mtr.n_packets_enabled = 1;
+ p.mtr.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR;
+ t0 += 6;
+ } /* meter */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "tm") == 0)) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile tm");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "spp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tm.n_subports_per_port,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_subports_per_port");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "pps") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tm.n_pipes_per_subport,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_pipes_per_subport");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM;
+ t0 += 5;
+ } /* tm */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "encap") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "ether") == 0)
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER;
+ else if (strcmp(tokens[t0 + 1], "vlan") == 0)
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN;
+ else if (strcmp(tokens[t0 + 1], "qinq") == 0)
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ;
+ else if (strcmp(tokens[t0 + 1], "mpls") == 0)
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
+ else if (strcmp(tokens[t0 + 1], "pppoe") == 0)
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
+ t0 += 2;
+ } /* encap */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "nat") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile nat");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "src") == 0)
+ p.nat.source_nat = 1;
+ else if (strcmp(tokens[t0 + 1], "dst") == 0)
+ p.nat.source_nat = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "src or dst");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "proto") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "tcp") == 0)
+ p.nat.proto = 0x06;
+ else if (strcmp(tokens[t0 + 3], "udp") == 0)
+ p.nat.proto = 0x11;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "tcp or udp");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT;
+ t0 += 4;
+ } /* nat */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "ttl") == 0)) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile ttl");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "drop") == 0)
+ p.ttl.drop = 1;
+ else if (strcmp(tokens[t0 + 1], "fwd") == 0)
+ p.ttl.drop = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "drop or fwd");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "none") == 0)
+ p.ttl.n_packets_enabled = 0;
+ else if (strcmp(tokens[t0 + 3], "pkts") == 0)
+ p.ttl.n_packets_enabled = 1;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "none or pkts");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL;
+ t0 += 4;
+ } /* ttl */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "stats") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "table action profile stats");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 1], "pkts") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 0;
+ } else if (strcmp(tokens[t0 + 1], "bytes") == 0) {
+ p.stats.n_packets_enabled = 0;
+ p.stats.n_bytes_enabled = 1;
+ } else if (strcmp(tokens[t0 + 1], "both") == 0) {
+ p.stats.n_packets_enabled = 1;
+ p.stats.n_bytes_enabled = 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "pkts or bytes or both");
+ return;
+ }
+
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS;
+ t0 += 2;
+ } /* stats */
+
+ if ((t0 < n_tokens) && (strcmp(tokens[t0], "time") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME;
+ t0 += 1;
+ } /* time */
+
+ if (t0 < n_tokens) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ ap = table_action_profile_create(name, &p);
+ if (ap == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_pipeline_help[] =
+"pipeline <pipeline_name>\n"
+" period <timer_period_ms>\n"
+" offset_port_id <offset_port_id>\n"
+" cpu <cpu_id>\n";
+
+static void
+cmd_pipeline(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct pipeline_params p;
+ char *name;
+ struct pipeline *pipeline;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "period") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period");
+ return;
+ }
+
+ if (parser_read_uint32(&p.timer_period_ms, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms");
+ return;
+ }
+
+ if (strcmp(tokens[4], "offset_port_id") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id");
+ return;
+ }
+
+ if (parser_read_uint32(&p.offset_port_id, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id");
+ return;
+ }
+
+ if (strcmp(tokens[6], "cpu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cpu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.cpu_id, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cpu_id");
+ return;
+ }
+
+ pipeline = pipeline_create(name, &p);
+ if (pipeline == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_pipeline_port_in_help[] =
+"pipeline <pipeline_name> port in\n"
+" bsz <burst_size>\n"
+" link <link_name> rxq <queue_id>\n"
+" | swq <swq_name>\n"
+" | tmgr <tmgr_name>\n"
+" | tap <tap_name> mempool <mempool_name> mtu <mtu>\n"
+" | kni <kni_name>\n"
+" | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>\n"
+" [action <port_in_action_profile_name>]\n"
+" [disabled]\n";
+
+static void
+cmd_pipeline_port_in(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct port_in_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int enabled, status;
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ t0 = 6;
+
+ if (strcmp(tokens[t0], "link") == 0) {
+ if (n_tokens < t0 + 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in link");
+ return;
+ }
+
+ p.type = PORT_IN_RXQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "rxq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
+ return;
+ }
+
+ if (parser_read_uint16(&p.rxq.queue_id, tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_id");
+ return;
+ }
+ t0 += 4;
+ } else if (strcmp(tokens[t0], "swq") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in swq");
+ return;
+ }
+
+ p.type = PORT_IN_SWQ;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tmgr") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tmgr");
+ return;
+ }
+
+ p.type = PORT_IN_TMGR;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "tap") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in tap");
+ return;
+ }
+
+ p.type = PORT_IN_TAP;
+
+ p.dev_name = tokens[t0 + 1];
+
+ if (strcmp(tokens[t0 + 2], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.tap.mempool_name = tokens[t0 + 3];
+
+ if (strcmp(tokens[t0 + 4], "mtu") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mtu");
+ return;
+ }
+
+ if (parser_read_uint32(&p.tap.mtu, tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "mtu");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "kni") == 0) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in kni");
+ return;
+ }
+
+ p.type = PORT_IN_KNI;
+
+ p.dev_name = tokens[t0 + 1];
+
+ t0 += 2;
+ } else if (strcmp(tokens[t0], "source") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in source");
+ return;
+ }
+
+ p.type = PORT_IN_SOURCE;
+
+ p.dev_name = NULL;
+
+ if (strcmp(tokens[t0 + 1], "mempool") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "mempool");
+ return;
+ }
+
+ p.source.mempool_name = tokens[t0 + 2];
+
+ if (strcmp(tokens[t0 + 3], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.source.file_name = tokens[t0 + 4];
+
+ if (strcmp(tokens[t0 + 5], "bpp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "bpp");
+ return;
+ }
+
+ if (parser_read_uint32(&p.source.n_bytes_per_pkt, tokens[t0 + 6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "n_bytes_per_pkt");
+ return;
+ }
+
+ t0 += 7;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if ((n_tokens > t0) && (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ enabled = 1;
+ if ((n_tokens > t0) &&
+ (strcmp(tokens[t0], "disabled") == 0)) {
+ enabled = 0;
+
+ t0 += 1;
+ }
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = pipeline_port_in_create(pipeline_name,
+ &p, enabled);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_pipeline_port_out_help[] =
+"pipeline <pipeline_name> port out\n"
+" bsz <burst_size>\n"
+" link <link_name> txq <txq_id>\n"
+" | swq <swq_name>\n"
+" | tmgr <tmgr_name>\n"
+" | tap <tap_name>\n"
+" | kni <kni_name>\n"
+" | sink [file <file_name> pkts <max_n_pkts>]\n";
+
+static void
+cmd_pipeline_port_out(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct port_out_params p;
+ char *pipeline_name;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+
+ if (n_tokens < 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (strcmp(tokens[4], "bsz") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz");
+ return;
+ }
+
+ if (parser_read_uint32(&p.burst_size, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "burst_size");
+ return;
+ }
+
+ if (strcmp(tokens[6], "link") == 0) {
+ if (n_tokens != 10) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out link");
+ return;
+ }
+
+ p.type = PORT_OUT_TXQ;
+
+ p.dev_name = tokens[7];
+
+ if (strcmp(tokens[8], "txq") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
+ return;
+ }
+
+ if (parser_read_uint16(&p.txq.queue_id, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+ } else if (strcmp(tokens[6], "swq") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out swq");
+ return;
+ }
+
+ p.type = PORT_OUT_SWQ;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tmgr") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tmgr");
+ return;
+ }
+
+ p.type = PORT_OUT_TMGR;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "tap") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out tap");
+ return;
+ }
+
+ p.type = PORT_OUT_TAP;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "kni") == 0) {
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out kni");
+ return;
+ }
+
+ p.type = PORT_OUT_KNI;
+
+ p.dev_name = tokens[7];
+ } else if (strcmp(tokens[6], "sink") == 0) {
+ if ((n_tokens != 7) && (n_tokens != 11)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out sink");
+ return;
+ }
+
+ p.type = PORT_OUT_SINK;
+
+ p.dev_name = NULL;
+
+ if (n_tokens == 7) {
+ p.sink.file_name = NULL;
+ p.sink.max_n_pkts = 0;
+ } else {
+ if (strcmp(tokens[7], "file") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "file");
+ return;
+ }
+
+ p.sink.file_name = tokens[8];
+
+ if (strcmp(tokens[9], "pkts") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts");
+ return;
+ }
+
+ if (parser_read_uint32(&p.sink.max_n_pkts, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts");
+ return;
+ }
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = pipeline_port_out_create(pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_pipeline_table_help[] =
+"pipeline <pipeline_name> table\n"
+" match\n"
+" acl\n"
+" ipv4 | ipv6\n"
+" offset <ip_header_offset>\n"
+" size <n_rules>\n"
+" | array\n"
+" offset <key_offset>\n"
+" size <n_keys>\n"
+" | hash\n"
+" ext | lru\n"
+" key <key_size>\n"
+" mask <key_mask>\n"
+" offset <key_offset>\n"
+" buckets <n_buckets>\n"
+" size <n_keys>\n"
+" | lpm\n"
+" ipv4 | ipv6\n"
+" offset <ip_header_offset>\n"
+" size <n_rules>\n"
+" | stub\n"
+" [action <table_action_profile_name>]\n";
+
+static void
+cmd_pipeline_table(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
+ struct table_params p;
+ char *pipeline_name;
+ uint32_t t0;
+ int status;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (strcmp(tokens[3], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return;
+ }
+
+ t0 = 4;
+ if (strcmp(tokens[t0], "acl") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table acl");
+ return;
+ }
+
+ p.match_type = TABLE_ACL;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0)
+ p.match.acl.ip_version = 1;
+ else if (strcmp(tokens[t0 + 1], "ipv6") == 0)
+ p.match.acl.ip_version = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.acl.ip_header_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "ip_header_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.acl.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "array") == 0) {
+ if (n_tokens < t0 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table array");
+ return;
+ }
+
+ p.match_type = TABLE_ARRAY;
+
+ if (strcmp(tokens[t0 + 1], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.array.key_offset,
+ tokens[t0 + 2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 3], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.array.n_keys,
+ tokens[t0 + 4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 5;
+ } else if (strcmp(tokens[t0], "hash") == 0) {
+ uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < t0 + 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table hash");
+ return;
+ }
+
+ p.match_type = TABLE_HASH;
+
+ if (strcmp(tokens[t0 + 1], "ext") == 0)
+ p.match.hash.extendable_bucket = 1;
+ else if (strcmp(tokens[t0 + 1], "lru") == 0)
+ p.match.hash.extendable_bucket = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ext or lru");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "key") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key");
+ return;
+ }
+
+ if ((parser_read_uint32(&p.match.hash.key_size,
+ tokens[t0 + 3]) != 0) ||
+ (p.match.hash.key_size == 0) ||
+ (p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX)) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_size");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "mask") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask");
+ return;
+ }
+
+ if ((parse_hex_string(tokens[t0 + 5],
+ key_mask, &key_mask_size) != 0) ||
+ (key_mask_size != p.match.hash.key_size)) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
+ return;
+ }
+ p.match.hash.key_mask = key_mask;
+
+ if (strcmp(tokens[t0 + 6], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.hash.key_offset,
+ tokens[t0 + 7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 8], "buckets") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.hash.n_buckets,
+ tokens[t0 + 9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 10], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.hash.n_keys,
+ tokens[t0 + 11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_keys");
+ return;
+ }
+
+ t0 += 12;
+ } else if (strcmp(tokens[t0], "lpm") == 0) {
+ if (n_tokens < t0 + 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline table lpm");
+ return;
+ }
+
+ p.match_type = TABLE_LPM;
+
+ if (strcmp(tokens[t0 + 1], "ipv4") == 0)
+ p.match.lpm.key_size = 4;
+ else if (strcmp(tokens[t0 + 1], "ipv6") == 0)
+ p.match.lpm.key_size = 16;
+ else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.lpm.key_offset,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 4], "size") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size");
+ return;
+ }
+
+ if (parser_read_uint32(&p.match.lpm.n_rules,
+ tokens[t0 + 5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ t0 += 6;
+ } else if (strcmp(tokens[t0], "stub") == 0) {
+ p.match_type = TABLE_STUB;
+
+ t0 += 1;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ p.action_profile_name = NULL;
+ if ((n_tokens > t0) && (strcmp(tokens[t0], "action") == 0)) {
+ if (n_tokens < t0 + 2) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, "action");
+ return;
+ }
+
+ p.action_profile_name = tokens[t0 + 1];
+
+ t0 += 2;
+ }
+
+ if (n_tokens > t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = pipeline_table_create(pipeline_name, &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+static const char cmd_pipeline_port_in_table_help[] =
+"pipeline <pipeline_name> port in <port_id> table <table_id>\n";
+
+static void
+cmd_pipeline_port_in_table(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id, table_id;
+ int status;
+
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = pipeline_port_in_connect_to_table(pipeline_name,
+ port_id,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_port_in_stats_help[] =
+"pipeline <pipeline_name> port in <port_id> stats read [clear]\n";
+
+#define MSG_PIPELINE_PORT_IN_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_in_stats(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_in_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if ((n_tokens != 7) && (n_tokens != 8)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = pipeline_port_in_stats_read(pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+
+static const char cmd_pipeline_port_in_enable_help[] =
+"pipeline <pipeline_name> port in <port_id> enable\n";
+
+static void
+cmd_pipeline_port_in_enable(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = pipeline_port_in_enable(pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_port_in_disable_help[] =
+"pipeline <pipeline_name> port in <port_id> disable\n";
+
+static void
+cmd_pipeline_port_in_disable(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t port_id;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "in") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = pipeline_port_in_disable(pipeline_name, port_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_port_out_stats_help[] =
+"pipeline <pipeline_name> port out <port_id> stats read [clear]\n";
+
+#define MSG_PIPELINE_PORT_OUT_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts dropped by AH: %" PRIu64 "\n" \
+ "Pkts dropped by other: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_port_out_stats(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_port_out_stats stats;
+ char *pipeline_name;
+ uint32_t port_id;
+ int clear, status;
+
+ if ((n_tokens != 7) && (n_tokens != 8)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "port") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (strcmp(tokens[3], "out") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ if (strcmp(tokens[5], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[6], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 8) {
+ if (strcmp(tokens[7], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = pipeline_port_out_stats_read(pipeline_name,
+ port_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+
+static const char cmd_pipeline_table_stats_help[] =
+"pipeline <pipeline_name> table <table_id> stats read [clear]\n";
+
+#define MSG_PIPELINE_TABLE_STATS \
+ "Pkts in: %" PRIu64 "\n" \
+ "Pkts in with lookup miss: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \
+ "Pkts in with lookup miss dropped by others: %" PRIu64 "\n"
+
+static void
+cmd_pipeline_table_stats(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_pipeline_table_stats stats;
+ char *pipeline_name;
+ uint32_t table_id;
+ int clear, status;
+
+ if ((n_tokens != 6) && (n_tokens != 7)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "stats") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats");
+ return;
+ }
+
+ if (strcmp(tokens[5], "read") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read");
+ return;
+ }
+
+ clear = 0;
+ if (n_tokens == 7) {
+ if (strcmp(tokens[6], "clear") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "clear");
+ return;
+ }
+
+ clear = 1;
+ }
+
+ status = pipeline_table_stats_read(pipeline_name,
+ table_id,
+ &stats,
+ clear);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+
+ snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+/**
+ * <match> ::=
+ *
+ * match
+ * acl
+ * priority <priority>
+ * ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth>
+ * <sp0> <sp1> <dp0> <dp1> <proto>
+ * | array <pos>
+ * | hash
+ * raw <key>
+ * | ipv4_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv6_5tuple <sa> <da> <sp> <dp> <proto>
+ * | ipv4_addr <addr>
+ * | ipv6_addr <addr>
+ * | qinq <svlan> <cvlan>
+ * | lpm
+ * ipv4 | ipv6 <addr> <depth>
+ */
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t time_to_live;
+ uint8_t proto;
+ uint16_t hdr_checksum;
+ uint32_t sa;
+ uint32_t da;
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t sa[16];
+ uint8_t da[16];
+ uint16_t sp;
+ uint16_t dp;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_addr {
+ uint32_t addr;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_addr {
+ uint8_t addr[16];
+} __attribute__((__packed__));
+
+static uint32_t
+parse_match(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct table_rule_match *m)
+{
+ memset(m, 0, sizeof(*m));
+
+ if (n_tokens < 2)
+ return 0;
+
+ if (strcmp(tokens[0], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match");
+ return 0;
+ }
+
+ if (strcmp(tokens[1], "acl") == 0) {
+ if (n_tokens < 14) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ACL;
+
+ if (strcmp(tokens[2], "priority") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority");
+ return 0;
+ }
+
+ if (parser_read_uint32(&m->match.acl.priority,
+ tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "priority");
+ return 0;
+ }
+
+ if (strcmp(tokens[4], "ipv4") == 0) {
+ struct in_addr saddr, daddr;
+
+ m->match.acl.ip_version = 1;
+
+ if (parse_ipv4_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr);
+
+ if (parse_ipv4_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr);
+ } else if (strcmp(tokens[4], "ipv6") == 0) {
+ struct in6_addr saddr, daddr;
+
+ m->match.acl.ip_version = 0;
+
+ if (parse_ipv6_addr(tokens[5], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16);
+
+ if (parse_ipv6_addr(tokens[7], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+ memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (parser_read_uint32(&m->match.acl.sa_depth,
+ tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth");
+ return 0;
+ }
+
+ if (parser_read_uint32(&m->match.acl.da_depth,
+ tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da_depth");
+ return 0;
+ }
+
+ if (parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp0");
+ return 0;
+ }
+
+ if (parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp1");
+ return 0;
+ }
+
+ if (parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp0");
+ return 0;
+ }
+
+ if (parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp1");
+ return 0;
+ }
+
+ if (parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "proto");
+ return 0;
+ }
+
+ m->match.acl.proto_mask = 0xff;
+
+ return 14;
+ } /* acl */
+
+ if (strcmp(tokens[1], "array") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_ARRAY;
+
+ if (parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pos");
+ return 0;
+ }
+
+ return 3;
+ } /* array */
+
+ if (strcmp(tokens[1], "hash") == 0) {
+ if (n_tokens < 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_HASH;
+
+ if (strcmp(tokens[2], "raw") == 0) {
+ uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (parse_hex_string(tokens[3],
+ m->match.hash.key, &key_size) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "key");
+ return 0;
+ }
+
+ return 4;
+ } /* hash raw */
+
+ if (strcmp(tokens[2], "ipv4_5tuple") == 0) {
+ struct pkt_key_ipv4_5tuple *ipv4 =
+ (struct pkt_key_ipv4_5tuple *) m->match.hash.key;
+ struct in_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (parse_ipv4_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (parse_ipv4_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ ipv4->sa = saddr.s_addr;
+ ipv4->da = daddr.s_addr;
+ ipv4->sp = rte_cpu_to_be_16(sp);
+ ipv4->dp = rte_cpu_to_be_16(dp);
+ ipv4->proto = proto;
+
+ return 8;
+ } /* hash ipv4_5tuple */
+
+ if (strcmp(tokens[2], "ipv6_5tuple") == 0) {
+ struct pkt_key_ipv6_5tuple *ipv6 =
+ (struct pkt_key_ipv6_5tuple *) m->match.hash.key;
+ struct in6_addr saddr, daddr;
+ uint16_t sp, dp;
+ uint8_t proto;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (parse_ipv6_addr(tokens[3], &saddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sa");
+ return 0;
+ }
+
+ if (parse_ipv6_addr(tokens[4], &daddr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "da");
+ return 0;
+ }
+
+ if (parser_read_uint16(&sp, tokens[5]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "sp");
+ return 0;
+ }
+
+ if (parser_read_uint16(&dp, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "dp");
+ return 0;
+ }
+
+ if (parser_read_uint8(&proto, tokens[7]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "proto");
+ return 0;
+ }
+
+ memcpy(ipv6->sa, saddr.s6_addr, 16);
+ memcpy(ipv6->da, daddr.s6_addr, 16);
+ ipv6->sp = rte_cpu_to_be_16(sp);
+ ipv6->dp = rte_cpu_to_be_16(dp);
+ ipv6->proto = proto;
+
+ return 8;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "ipv4_addr") == 0) {
+ struct pkt_key_ipv4_addr *ipv4_addr =
+ (struct pkt_key_ipv4_addr *) m->match.hash.key;
+ struct in_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ ipv4_addr->addr = addr.s_addr;
+
+ return 4;
+ } /* hash ipv4_addr */
+
+ if (strcmp(tokens[2], "ipv6_addr") == 0) {
+ struct pkt_key_ipv6_addr *ipv6_addr =
+ (struct pkt_key_ipv6_addr *) m->match.hash.key;
+ struct in6_addr addr;
+
+ if (n_tokens < 4) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if (parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(ipv6_addr->addr, addr.s6_addr, 16);
+
+ return 4;
+ } /* hash ipv6_5tuple */
+
+ if (strcmp(tokens[2], "qinq") == 0) {
+ struct pkt_key_qinq *qinq =
+ (struct pkt_key_qinq *) m->match.hash.key;
+ uint16_t svlan, cvlan;
+
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return 0;
+ }
+
+ if ((parser_read_uint16(&svlan, tokens[3]) != 0) ||
+ (svlan > 0xFFF)) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "svlan");
+ return 0;
+ }
+
+ if ((parser_read_uint16(&cvlan, tokens[4]) != 0) ||
+ (cvlan > 0xFFF)) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cvlan");
+ return 0;
+ }
+
+ qinq->svlan = rte_cpu_to_be_16(svlan);
+ qinq->cvlan = rte_cpu_to_be_16(cvlan);
+
+ return 5;
+ } /* hash qinq */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ } /* hash */
+
+ if (strcmp(tokens[1], "lpm") == 0) {
+ if (n_tokens < 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return 0;
+ }
+
+ m->match_type = TABLE_LPM;
+
+ if (strcmp(tokens[2], "ipv4") == 0) {
+ struct in_addr addr;
+
+ m->match.lpm.ip_version = 1;
+
+ if (parse_ipv4_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ } else if (strcmp(tokens[2], "ipv6") == 0) {
+ struct in6_addr addr;
+
+ m->match.lpm.ip_version = 0;
+
+ if (parse_ipv6_addr(tokens[3], &addr) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "addr");
+ return 0;
+ }
+
+ memcpy(m->match.lpm.ipv6, addr.s6_addr, 16);
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "ipv4 or ipv6");
+ return 0;
+ }
+
+ if (parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "depth");
+ return 0;
+ }
+
+ return 5;
+ } /* lpm */
+
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "acl or array or hash or lpm");
+ return 0;
+}
+
+/**
+ * table_action ::=
+ *
+ * action
+ * fwd
+ * drop
+ * | port <port_id>
+ * | meta
+ * | table <table_id>
+ * [balance <out0> ... <out7>]
+ * [meter
+ * tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa>
+ * tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]]
+ * [tm subport <subport_id> pipe <pipe_id>]
+ * [encap
+ * ether <da> <sa>
+ * | vlan <da> <sa> <pcp> <dei> <vid>
+ * | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid>
+ * | mpls unicast | multicast
+ * <da> <sa>
+ * label0 <label> <tc> <ttl>
+ * [label1 <label> <tc> <ttl>
+ * [label2 <label> <tc> <ttl>
+ * [label3 <label> <tc> <ttl>]]]
+ * | pppoe <da> <sa> <session_id>]
+ * [nat ipv4 | ipv6 <addr> <port>]
+ * [ttl dec | keep]
+ * [stats]
+ * [time]
+ *
+ * where:
+ * <pa> ::= g | y | r | drop
+ */
+static uint32_t
+parse_table_action_fwd(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens == 0) || (strcmp(tokens[0], "fwd") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "drop") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "port") == 0)) {
+ uint32_t id;
+
+ if ((n_tokens < 2) ||
+ parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meta") == 0)) {
+ a->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 1;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "table") == 0)) {
+ uint32_t id;
+
+ if ((n_tokens < 2) ||
+ parser_read_uint32(&id, tokens[1]))
+ return 0;
+
+ a->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ a->fwd.id = id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ return 1 + 2;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_balance(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ uint32_t i;
+
+ if ((n_tokens == 0) || (strcmp(tokens[0], "balance") != 0))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens < RTE_TABLE_ACTION_LB_TABLE_SIZE)
+ return 0;
+
+ for (i = 0; i < RTE_TABLE_ACTION_LB_TABLE_SIZE; i++)
+ if (parser_read_uint32(&a->lb.out[i], tokens[i]) != 0)
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+ return 1 + RTE_TABLE_ACTION_LB_TABLE_SIZE;
+
+}
+
+static int
+parse_policer_action(char *token, enum rte_table_action_policer *a)
+{
+ if (strcmp(token, "g") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_GREEN;
+ return 0;
+ }
+
+ if (strcmp(token, "y") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(token, "r") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_COLOR_RED;
+ return 0;
+ }
+
+ if (strcmp(token, "drop") == 0) {
+ *a = RTE_TABLE_ACTION_POLICER_DROP;
+ return 0;
+ }
+
+ return -1;
+}
+
+static uint32_t
+parse_table_action_meter_tc(char **tokens,
+ uint32_t n_tokens,
+ struct rte_table_action_mtr_tc_params *mtr)
+{
+ if ((n_tokens < 9) ||
+ strcmp(tokens[0], "meter") ||
+ parser_read_uint32(&mtr->meter_profile_id, tokens[1]) ||
+ strcmp(tokens[2], "policer") ||
+ strcmp(tokens[3], "g") ||
+ parse_policer_action(tokens[4], &mtr->policer[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[5], "y") ||
+ parse_policer_action(tokens[6], &mtr->policer[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[7], "r") ||
+ parse_policer_action(tokens[8], &mtr->policer[e_RTE_METER_RED]))
+ return 0;
+
+ return 9;
+}
+
+static uint32_t
+parse_table_action_meter(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens == 0) || strcmp(tokens[0], "meter"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if ((n_tokens < 10) ||
+ strcmp(tokens[0], "tc0") ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1,
+ &a->mtr.mtr[0]) == 0))
+ return 0;
+
+ tokens += 10;
+ n_tokens -= 10;
+
+ if ((n_tokens == 0) || strcmp(tokens[0], "tc1")) {
+ a->mtr.tc_mask = 1;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10;
+ }
+
+ if ((n_tokens < 30) ||
+ (parse_table_action_meter_tc(tokens + 1,
+ n_tokens - 1, &a->mtr.mtr[1]) == 0) ||
+ strcmp(tokens[10], "tc2") ||
+ (parse_table_action_meter_tc(tokens + 11,
+ n_tokens - 11, &a->mtr.mtr[2]) == 0) ||
+ strcmp(tokens[20], "tc3") ||
+ (parse_table_action_meter_tc(tokens + 21,
+ n_tokens - 21, &a->mtr.mtr[3]) == 0))
+ return 0;
+
+ a->mtr.tc_mask = 0xF;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ return 1 + 10 + 3 * 10;
+}
+
+static uint32_t
+parse_table_action_tm(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ uint32_t subport_id, pipe_id;
+
+ if ((n_tokens < 5) ||
+ strcmp(tokens[0], "tm") ||
+ strcmp(tokens[1], "subport") ||
+ parser_read_uint32(&subport_id, tokens[2]) ||
+ strcmp(tokens[3], "pipe") ||
+ parser_read_uint32(&pipe_id, tokens[4]))
+ return 0;
+
+ a->tm.subport_id = subport_id;
+ a->tm.pipe_id = pipe_id;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TM;
+ return 5;
+}
+
+static uint32_t
+parse_table_action_encap(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens == 0) || strcmp(tokens[0], "encap"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ /* ether */
+ if (n_tokens && (strcmp(tokens[0], "ether") == 0)) {
+ if ((n_tokens < 3) ||
+ parse_mac_addr(tokens[1], &a->encap.ether.ether.da) ||
+ parse_mac_addr(tokens[2], &a->encap.ether.ether.sa))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_ETHER;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 3;
+ }
+
+ /* vlan */
+ if (n_tokens && (strcmp(tokens[0], "vlan") == 0)) {
+ uint32_t pcp, dei, vid;
+
+ if ((n_tokens < 6) ||
+ parse_mac_addr(tokens[1], &a->encap.vlan.ether.da) ||
+ parse_mac_addr(tokens[2], &a->encap.vlan.ether.sa) ||
+ parser_read_uint32(&pcp, tokens[3]) ||
+ (pcp > 0x7) ||
+ parser_read_uint32(&dei, tokens[4]) ||
+ (dei > 0x1) ||
+ parser_read_uint32(&vid, tokens[5]) ||
+ (vid > 0xFFF))
+ return 0;
+
+ a->encap.vlan.vlan.pcp = pcp & 0x7;
+ a->encap.vlan.vlan.dei = dei & 0x1;
+ a->encap.vlan.vlan.vid = vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 6;
+ }
+
+ /* qinq */
+ if (n_tokens && (strcmp(tokens[0], "qinq") == 0)) {
+ uint32_t svlan_pcp, svlan_dei, svlan_vid;
+ uint32_t cvlan_pcp, cvlan_dei, cvlan_vid;
+
+ if ((n_tokens < 9) ||
+ parse_mac_addr(tokens[1], &a->encap.qinq.ether.da) ||
+ parse_mac_addr(tokens[2], &a->encap.qinq.ether.sa) ||
+ parser_read_uint32(&svlan_pcp, tokens[3]) ||
+ (svlan_pcp > 0x7) ||
+ parser_read_uint32(&svlan_dei, tokens[4]) ||
+ (svlan_dei > 0x1) ||
+ parser_read_uint32(&svlan_vid, tokens[5]) ||
+ (svlan_vid > 0xFFF) ||
+ parser_read_uint32(&cvlan_pcp, tokens[6]) ||
+ (cvlan_pcp > 0x7) ||
+ parser_read_uint32(&cvlan_dei, tokens[7]) ||
+ (cvlan_dei > 0x1) ||
+ parser_read_uint32(&cvlan_vid, tokens[8]) ||
+ (cvlan_vid > 0xFFF))
+ return 0;
+
+ a->encap.qinq.svlan.pcp = svlan_pcp & 0x7;
+ a->encap.qinq.svlan.dei = svlan_dei & 0x1;
+ a->encap.qinq.svlan.vid = svlan_vid & 0xFFF;
+ a->encap.qinq.cvlan.pcp = cvlan_pcp & 0x7;
+ a->encap.qinq.cvlan.dei = cvlan_dei & 0x1;
+ a->encap.qinq.cvlan.vid = cvlan_vid & 0xFFF;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 9;
+ }
+
+ /* mpls */
+ if (n_tokens && (strcmp(tokens[0], "mpls") == 0)) {
+ uint32_t label, tc, ttl;
+
+ if (n_tokens < 8)
+ return 0;
+
+ if (strcmp(tokens[1], "unicast") == 0)
+ a->encap.mpls.unicast = 1;
+ else if (strcmp(tokens[1], "multicast") == 0)
+ a->encap.mpls.unicast = 0;
+ else
+ return 0;
+
+ if (parse_mac_addr(tokens[2], &a->encap.mpls.ether.da) ||
+ parse_mac_addr(tokens[3], &a->encap.mpls.ether.sa) ||
+ strcmp(tokens[4], "label0") ||
+ parser_read_uint32(&label, tokens[5]) ||
+ (label > 0xFFFFF) ||
+ parser_read_uint32(&tc, tokens[6]) ||
+ (tc > 0x7) ||
+ parser_read_uint32(&ttl, tokens[7]) ||
+ (ttl > 0x3F))
+ return 0;
+
+ a->encap.mpls.mpls[0].label = label;
+ a->encap.mpls.mpls[0].tc = tc;
+ a->encap.mpls.mpls[0].ttl = ttl;
+
+ tokens += 8;
+ n_tokens -= 8;
+
+ if ((n_tokens == 0) || strcmp(tokens[0], "label1")) {
+ a->encap.mpls.mpls_count = 1;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8;
+ }
+
+ if ((n_tokens < 4) ||
+ parser_read_uint32(&label, tokens[1]) ||
+ (label > 0xFFFFF) ||
+ parser_read_uint32(&tc, tokens[2]) ||
+ (tc > 0x7) ||
+ parser_read_uint32(&ttl, tokens[3]) ||
+ (ttl > 0x3F))
+ return 0;
+
+ a->encap.mpls.mpls[1].label = label;
+ a->encap.mpls.mpls[1].tc = tc;
+ a->encap.mpls.mpls[1].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if ((n_tokens == 0) || strcmp(tokens[0], "label2")) {
+ a->encap.mpls.mpls_count = 2;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4;
+ }
+
+ if ((n_tokens < 4) ||
+ parser_read_uint32(&label, tokens[1]) ||
+ (label > 0xFFFFF) ||
+ parser_read_uint32(&tc, tokens[2]) ||
+ (tc > 0x7) ||
+ parser_read_uint32(&ttl, tokens[3]) ||
+ (ttl > 0x3F))
+ return 0;
+
+ a->encap.mpls.mpls[2].label = label;
+ a->encap.mpls.mpls[2].tc = tc;
+ a->encap.mpls.mpls[2].ttl = ttl;
+
+ tokens += 4;
+ n_tokens -= 4;
+
+ if ((n_tokens == 0) || strcmp(tokens[0], "label3")) {
+ a->encap.mpls.mpls_count = 3;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4;
+ }
+
+ if ((n_tokens < 4) ||
+ parser_read_uint32(&label, tokens[1]) ||
+ (label > 0xFFFFF) ||
+ parser_read_uint32(&tc, tokens[2]) ||
+ (tc > 0x7) ||
+ parser_read_uint32(&ttl, tokens[3]) ||
+ (ttl > 0x3F))
+ return 0;
+
+ a->encap.mpls.mpls[3].label = label;
+ a->encap.mpls.mpls[3].tc = tc;
+ a->encap.mpls.mpls[3].ttl = ttl;
+
+ a->encap.mpls.mpls_count = 4;
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 8 + 4 + 4 + 4;
+ }
+
+ /* pppoe */
+ if (n_tokens && (strcmp(tokens[0], "pppoe") == 0)) {
+ if ((n_tokens < 4) ||
+ parse_mac_addr(tokens[1], &a->encap.pppoe.ether.da) ||
+ parse_mac_addr(tokens[2], &a->encap.pppoe.ether.sa) ||
+ parser_read_uint16(&a->encap.pppoe.pppoe.session_id,
+ tokens[3]))
+ return 0;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_PPPOE;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_nat(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 4) ||
+ strcmp(tokens[0], "nat"))
+ return 0;
+
+ if (strcmp(tokens[1], "ipv4") == 0) {
+ struct in_addr addr;
+ uint16_t port;
+
+ if (parse_ipv4_addr(tokens[2], &addr) ||
+ parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 1;
+ a->nat.addr.ipv4 = rte_be_to_cpu_32(addr.s_addr);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ if (strcmp(tokens[1], "ipv6") == 0) {
+ struct in6_addr addr;
+ uint16_t port;
+
+ if (parse_ipv6_addr(tokens[2], &addr) ||
+ parser_read_uint16(&port, tokens[3]))
+ return 0;
+
+ a->nat.ip_version = 0;
+ memcpy(a->nat.addr.ipv6, addr.s6_addr, 16);
+ a->nat.port = port;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_NAT;
+ return 4;
+ }
+
+ return 0;
+}
+
+static uint32_t
+parse_table_action_ttl(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "ttl"))
+ return 0;
+
+ if (strcmp(tokens[1], "dec") == 0)
+ a->ttl.decrement = 1;
+ else if (strcmp(tokens[1], "keep") == 0)
+ a->ttl.decrement = 0;
+ else
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TTL;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_stats(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 1) ||
+ strcmp(tokens[0], "stats"))
+ return 0;
+
+ a->stats.n_packets = 0;
+ a->stats.n_bytes = 0;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ return 1;
+}
+
+static uint32_t
+parse_table_action_time(char **tokens,
+ uint32_t n_tokens,
+ struct table_rule_action *a)
+{
+ if ((n_tokens < 1) ||
+ strcmp(tokens[0], "time"))
+ return 0;
+
+ a->time.time = rte_rdtsc();
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TIME;
+ return 1;
+}
+
+static uint32_t
+parse_table_action(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size,
+ struct table_rule_action *a)
+{
+ uint32_t n_tokens0 = n_tokens;
+
+ memset(a, 0, sizeof(*a));
+
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "action"))
+ return 0;
+
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens && (strcmp(tokens[0], "fwd") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_fwd(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action fwd");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "balance") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_balance(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action balance");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "meter") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_meter(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action meter");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "tm") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tm(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tm");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "encap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_encap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action encap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "nat") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_nat(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action nat");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "ttl") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_ttl(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action ttl");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "stats") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_stats(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action stats");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "time") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_time(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action time");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens0 - n_tokens == 1) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return 0;
+ }
+
+ return n_tokens0 - n_tokens;
+}
+
+
+static const char cmd_pipeline_table_rule_add_help[] =
+"pipeline <pipeline_name> table <table_id> rule add\n"
+" match <match>\n"
+" action <table_action>\n";
+
+static void
+cmd_pipeline_table_rule_add(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct table_rule_match m;
+ struct table_rule_action a;
+ char *pipeline_name;
+ void *data;
+ uint32_t table_id, t0, n_tokens_parsed;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ /* action */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (t0 != n_tokens) {
+ snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
+ return;
+ }
+
+ status = pipeline_table_rule_add(pipeline_name, table_id,
+ &m, &a, &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_add_default_help[] =
+"pipeline <pipeline_name> table <table_id> rule add\n"
+" match\n"
+" default\n"
+" action\n"
+" fwd\n"
+" drop\n"
+" | port <port_id>\n"
+" | meta\n"
+" | table <table_id>\n";
+
+static void
+cmd_pipeline_table_rule_add_default(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct table_rule_action action;
+ void *data;
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if ((n_tokens != 11) && (n_tokens != 12)) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ if (strcmp(tokens[8], "action") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "action");
+ return;
+ }
+
+ if (strcmp(tokens[9], "fwd") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "fwd");
+ return;
+ }
+
+ action.action_mask = 1 << RTE_TABLE_ACTION_FWD;
+
+ if (strcmp(tokens[10], "drop") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_DROP;
+ } else if (strcmp(tokens[10], "port") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "port_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT;
+ action.fwd.id = id;
+ } else if (strcmp(tokens[10], "meta") == 0) {
+ if (n_tokens != 11) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ } else if (strcmp(tokens[10], "table") == 0) {
+ uint32_t id;
+
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&id, tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ action.fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ action.fwd.id = id;
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "drop or port or meta or table");
+ return;
+ }
+
+ status = pipeline_table_rule_add_default(pipeline_name,
+ table_id,
+ &action,
+ &data);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_add_bulk_help[] =
+"pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules>\n"
+"\n"
+" File <file_name>:\n"
+" - line format: match <match> action <action>\n";
+
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct table_rule_match *m,
+ struct table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size);
+
+static void
+cmd_pipeline_table_rule_add_bulk(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct table_rule_match *match;
+ struct table_rule_action *action;
+ void **data;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, n_rules, n_rules_parsed, line_number;
+ int status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[6], "bulk") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "bulk");
+ return;
+ }
+
+ file_name = tokens[7];
+
+ if ((parser_read_uint32(&n_rules, tokens[8]) != 0) ||
+ (n_rules == 0)) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "n_rules");
+ return;
+ }
+
+ /* Memory allocation. */
+ match = calloc(n_rules, sizeof(struct table_rule_match));
+ action = calloc(n_rules, sizeof(struct table_rule_action));
+ data = calloc(n_rules, sizeof(void *));
+ if ((match == NULL) || (action == NULL) || (data == NULL)) {
+ snprintf(out, out_size, MSG_OUT_OF_MEMORY);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Load rule file */
+ n_rules_parsed = n_rules;
+ status = cli_rule_file_process(file_name,
+ 1024,
+ match,
+ action,
+ &n_rules_parsed,
+ &line_number,
+ out,
+ out_size);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+ if (n_rules_parsed != n_rules) {
+ snprintf(out, out_size, MSG_FILE_NOT_ENOUGH, file_name);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Rule bulk add */
+ status = pipeline_table_rule_add_bulk(pipeline_name,
+ table_id,
+ match,
+ action,
+ data,
+ &n_rules);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ free(data);
+ free(action);
+ free(match);
+ return;
+ }
+
+ /* Memory free */
+ free(data);
+ free(action);
+ free(match);
+}
+
+
+static const char cmd_pipeline_table_rule_delete_help[] =
+"pipeline <pipeline_name> table <table_id> rule delete\n"
+" match <match>\n";
+
+static void
+cmd_pipeline_table_rule_delete(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct table_rule_match m;
+ char *pipeline_name;
+ uint32_t table_id, n_tokens_parsed, t0;
+ int status;
+
+ if (n_tokens < 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ t0 = 6;
+
+ /* match */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m);
+ if (n_tokens_parsed == 0)
+ return;
+ t0 += n_tokens_parsed;
+
+ if (n_tokens != t0) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = pipeline_table_rule_delete(pipeline_name,
+ table_id,
+ &m);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_delete_default_help[] =
+"pipeline <pipeline_name> table <table_id> rule delete\n"
+" match\n"
+" default\n";
+
+static void
+cmd_pipeline_table_rule_delete_default(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "rule") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule");
+ return;
+ }
+
+ if (strcmp(tokens[5], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ if (strcmp(tokens[6], "match") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "match");
+ return;
+ }
+
+ if (strcmp(tokens[7], "default") != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "default");
+ return;
+ }
+
+ status = pipeline_table_rule_delete_default(pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_stats_read_help[] =
+"pipeline <pipeline_name> table <table_id> rule read stats [clear]\n";
+
+static void
+cmd_pipeline_table_rule_stats_read(char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+
+static const char cmd_pipeline_table_meter_profile_add_help[] =
+"pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id>\n"
+" add srtcm cir <cir> cbs <cbs> ebs <ebs>\n"
+" | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs>\n";
+
+static void
+cmd_pipeline_table_meter_profile_add(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_meter_profile p;
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens < 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "add") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add");
+ return;
+ }
+
+ if (strcmp(tokens[8], "srtcm") == 0) {
+ if (n_tokens != 15) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_SRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (parser_read_uint64(&p.srtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (parser_read_uint64(&p.srtcm.cbs, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[13], "ebs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ebs");
+ return;
+ }
+
+ if (parser_read_uint64(&p.srtcm.ebs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "ebs");
+ return;
+ }
+ } else if (strcmp(tokens[8], "trtcm") == 0) {
+ if (n_tokens != 17) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ p.alg = RTE_TABLE_ACTION_METER_TRTCM;
+
+ if (strcmp(tokens[9], "cir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir");
+ return;
+ }
+
+ if (parser_read_uint64(&p.trtcm.cir, tokens[10]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cir");
+ return;
+ }
+
+ if (strcmp(tokens[11], "pir") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pir");
+ return;
+ }
+
+ if (parser_read_uint64(&p.trtcm.pir, tokens[12]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pir");
+ return;
+ }
+ if (strcmp(tokens[13], "cbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs");
+ return;
+ }
+
+ if (parser_read_uint64(&p.trtcm.cbs, tokens[14]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "cbs");
+ return;
+ }
+
+ if (strcmp(tokens[15], "pbs") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pbs");
+ return;
+ }
+
+ if (parser_read_uint64(&p.trtcm.pbs, tokens[16]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "pbs");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ status = pipeline_table_mtr_profile_add(pipeline_name,
+ table_id,
+ meter_profile_id,
+ &p);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_meter_profile_delete_help[] =
+"pipeline <pipeline_name> table <table_id>\n"
+" meter profile <meter_profile_id> delete\n";
+
+static void
+cmd_pipeline_table_meter_profile_delete(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t table_id, meter_profile_id;
+ int status;
+
+ if (n_tokens != 8) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "meter") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter");
+ return;
+ }
+
+ if (strcmp(tokens[5], "profile") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile");
+ return;
+ }
+
+ if (parser_read_uint32(&meter_profile_id, tokens[6]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id");
+ return;
+ }
+
+ if (strcmp(tokens[7], "delete") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete");
+ return;
+ }
+
+ status = pipeline_table_mtr_profile_delete(pipeline_name,
+ table_id,
+ meter_profile_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_meter_read_help[] =
+"pipeline <pipeline_name> table <table_id> rule read meter [clear]\n";
+
+static void
+cmd_pipeline_table_rule_meter_read(char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+
+static const char cmd_pipeline_table_dscp_help[] =
+"pipeline <pipeline_name> table <table_id> dscp <file_name>\n"
+"\n"
+" File <file_name>:\n"
+" - exactly 64 lines\n"
+" - line format: <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r\n";
+
+static int
+load_dscp_table(struct rte_table_action_dscp_table *dscp_table,
+ const char *file_name,
+ uint32_t *line_number)
+{
+ FILE *f = NULL;
+ uint32_t dscp, l;
+
+ /* Check input arguments */
+ if ((dscp_table == NULL) ||
+ (file_name == NULL) ||
+ (line_number == NULL)) {
+ if (line_number)
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Read file */
+ for (dscp = 0, l = 1; ; l++) {
+ char line[64];
+ char *tokens[3];
+ enum rte_meter_color color;
+ uint32_t tc_id, tc_queue_id, n_tokens = RTE_DIM(tokens);
+
+ if (fgets(line, sizeof(line), f) == NULL)
+ break;
+
+ if (is_comment(line))
+ continue;
+
+ if (parse_tokenize_string(line, tokens, &n_tokens)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ if (n_tokens == 0)
+ continue;
+
+ if ((dscp >= RTE_DIM(dscp_table->entry)) ||
+ (n_tokens != RTE_DIM(tokens)) ||
+ parser_read_uint32(&tc_id, tokens[0]) ||
+ (tc_id >= RTE_TABLE_ACTION_TC_MAX) ||
+ parser_read_uint32(&tc_queue_id, tokens[1]) ||
+ (tc_queue_id >= RTE_TABLE_ACTION_TC_QUEUE_MAX) ||
+ (strlen(tokens[2]) != 1)) {
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ switch (tokens[2][0]) {
+ case 'g':
+ case 'G':
+ color = e_RTE_METER_GREEN;
+ break;
+
+ case 'y':
+ case 'Y':
+ color = e_RTE_METER_YELLOW;
+ break;
+
+ case 'r':
+ case 'R':
+ color = e_RTE_METER_RED;
+ break;
+
+ default:
+ *line_number = l;
+ fclose(f);
+ return -EINVAL;
+ }
+
+ dscp_table->entry[dscp].tc_id = tc_id;
+ dscp_table->entry[dscp].tc_queue_id = tc_queue_id;
+ dscp_table->entry[dscp].color = color;
+ dscp++;
+ }
+
+ /* Close file */
+ fclose(f);
+ return 0;
+}
+
+static void
+cmd_pipeline_table_dscp(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct rte_table_action_dscp_table dscp_table;
+ char *pipeline_name, *file_name;
+ uint32_t table_id, line_number;
+ int status;
+
+ if (n_tokens != 6) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ pipeline_name = tokens[1];
+
+ if (strcmp(tokens[2], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&table_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "dscp") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dscp");
+ return;
+ }
+
+ file_name = tokens[5];
+
+ status = load_dscp_table(&dscp_table, file_name, &line_number);
+ if (status) {
+ snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number);
+ return;
+ }
+
+ status = pipeline_table_dscp_table_update(pipeline_name,
+ table_id,
+ UINT64_MAX,
+ &dscp_table);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+
+static const char cmd_pipeline_table_rule_ttl_read_help[] =
+"pipeline <pipeline_name> table <table_id> rule read ttl [clear]\n";
+
+static void
+cmd_pipeline_table_rule_ttl_read(char **tokens,
+ uint32_t n_tokens __rte_unused,
+ char *out,
+ size_t out_size)
+{
+ snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]);
+}
+
+
+static const char cmd_thread_pipeline_enable_help[] =
+"thread <thread_id> pipeline <pipeline_name> enable\n";
+
+static void
+cmd_thread_pipeline_enable(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "enable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable");
+ return;
+ }
+
+ status = thread_pipeline_enable(thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable");
+ return;
+ }
+}
+
+
+static const char cmd_thread_pipeline_disable_help[] =
+"thread <thread_id> pipeline <pipeline_name> disable\n";
+
+static void
+cmd_thread_pipeline_disable(char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t thread_id;
+ int status;
+
+ if (n_tokens != 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (parser_read_uint32(&thread_id, tokens[1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "thread_id");
+ return;
+ }
+
+ if (strcmp(tokens[2], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[3];
+
+ if (strcmp(tokens[4], "disable") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable");
+ return;
+ }
+
+ status = thread_pipeline_disable(thread_id, pipeline_name);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL,
+ "thread pipeline disable");
+ return;
+ }
+}
+
+static void
+cmd_help(char **tokens, uint32_t n_tokens, char *out, size_t out_size)
+{
+ tokens++;
+ n_tokens--;
+
+ if (n_tokens == 0) {
+ snprintf(out, out_size,
+ "Type 'help <command>' for details on each command.\n\n"
+ "List of commands:\n"
+ "\tmempool\n"
+ "\tlink\n"
+ "\tswq\n"
+ "\ttmgr subport profile\n"
+ "\ttmgr pipe profile\n"
+ "\ttmgr\n"
+ "\ttmgr subport\n"
+ "\ttmgr subport pipe\n"
+ "\ttap\n"
+ "\tkni\n"
+ "\tport in action profile\n"
+ "\ttable action profile\n"
+ "\tpipeline\n"
+ "\tpipeline port in\n"
+ "\tpipeline port out\n"
+ "\tpipeline table\n"
+ "\tpipeline port in table\n"
+ "\tpipeline port in stats\n"
+ "\tpipeline port in enable\n"
+ "\tpipeline port in disable\n"
+ "\tpipeline port out stats\n"
+ "\tpipeline table stats\n"
+ "\tpipeline table rule add\n"
+ "\tpipeline table rule add default\n"
+ "\tpipeline table rule add bulk\n"
+ "\tpipeline table rule delete\n"
+ "\tpipeline table rule delete default\n"
+ "\tpipeline table rule stats read\n"
+ "\tpipeline table meter profile add\n"
+ "\tpipeline table meter profile delete\n"
+ "\tpipeline table rule meter read\n"
+ "\tpipeline table dscp\n"
+ "\tpipeline table rule ttl read\n"
+ "\tthread pipeline enable\n"
+ "\tthread pipeline disable\n\n");
+ return;
+ }
+
+ if (strcmp(tokens[0], "mempool") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_mempool_help);
+ return;
+ }
+
+ if (strcmp(tokens[0], "link") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_link_help);
+ return;
+ }
+
+ if (strcmp(tokens[0], "swq") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_swq_help);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tmgr") == 0) {
+ if (n_tokens == 1) {
+ snprintf(out, out_size, "\n%s\n", cmd_tmgr_help);
+ return;
+ }
+
+ if ((n_tokens == 2) &&
+ (strcmp(tokens[1], "subport")) == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_tmgr_subport_help);
+ return;
+ }
+
+ if ((n_tokens == 3) &&
+ (strcmp(tokens[1], "subport") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_tmgr_subport_profile_help);
+ return;
+ }
+
+ if ((n_tokens == 3) &&
+ (strcmp(tokens[1], "subport") == 0) &&
+ (strcmp(tokens[2], "pipe") == 0)) {
+ snprintf(out, out_size, "\n%s\n", cmd_tmgr_subport_pipe_help);
+ return;
+ }
+
+ if ((n_tokens == 3) &&
+ (strcmp(tokens[1], "pipe") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ snprintf(out, out_size, "\n%s\n", cmd_tmgr_pipe_profile_help);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "tap") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_tap_help);
+ return;
+ }
+
+ if (strcmp(tokens[0], "kni") == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_kni_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[0], "port") == 0) &&
+ (strcmp(tokens[1], "in") == 0) &&
+ (strcmp(tokens[2], "action") == 0) &&
+ (strcmp(tokens[3], "profile") == 0)) {
+ snprintf(out, out_size, "\n%s\n", cmd_port_in_action_profile_help);
+ return;
+ }
+
+ if ((n_tokens == 3) &&
+ (strcmp(tokens[0], "table") == 0) &&
+ (strcmp(tokens[1], "action") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ snprintf(out, out_size, "\n%s\n", cmd_table_action_profile_help);
+ return;
+ }
+
+ if ((strcmp(tokens[0], "pipeline") == 0) && (n_tokens == 1)) {
+ snprintf(out, out_size, "\n%s\n", cmd_pipeline_help);
+ return;
+ }
+
+ if ((strcmp(tokens[0], "pipeline") == 0) &&
+ (strcmp(tokens[1], "port") == 0)) {
+ if ((n_tokens == 3) && (strcmp(tokens[2], "in")) == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_pipeline_port_in_help);
+ return;
+ }
+
+ if ((n_tokens == 3) && (strcmp(tokens[2], "out")) == 0) {
+ snprintf(out, out_size, "\n%s\n", cmd_pipeline_port_out_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "in") == 0) &&
+ (strcmp(tokens[3], "table") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_port_in_table_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "in") == 0) &&
+ (strcmp(tokens[3], "stats") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_port_in_stats_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "in") == 0) &&
+ (strcmp(tokens[3], "enable") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_port_in_enable_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "in") == 0) &&
+ (strcmp(tokens[3], "disable") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_port_in_disable_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "out") == 0) &&
+ (strcmp(tokens[3], "stats") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_port_out_stats_help);
+ return;
+ }
+ }
+
+ if ((strcmp(tokens[0], "pipeline") == 0) &&
+ (strcmp(tokens[1], "table") == 0)) {
+ if (n_tokens == 2) {
+ snprintf(out, out_size, "\n%s\n", cmd_pipeline_table_help);
+ return;
+ }
+
+ if ((n_tokens == 3) && strcmp(tokens[2], "stats") == 0) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_stats_help);
+ return;
+ }
+
+ if ((n_tokens == 3) && strcmp(tokens[2], "dscp") == 0) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_dscp_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "add") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_add_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "add") == 0) &&
+ (strcmp(tokens[4], "default") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_add_default_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "add") == 0) &&
+ (strcmp(tokens[4], "bulk") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_add_bulk_help);
+ return;
+ }
+
+ if ((n_tokens == 4) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "delete") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_delete_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "delete") == 0) &&
+ (strcmp(tokens[4], "default") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_delete_default_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "stats") == 0) &&
+ (strcmp(tokens[4], "read") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_stats_read_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "meter") == 0) &&
+ (strcmp(tokens[3], "profile") == 0) &&
+ (strcmp(tokens[4], "add") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_meter_profile_add_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "meter") == 0) &&
+ (strcmp(tokens[3], "profile") == 0) &&
+ (strcmp(tokens[4], "delete") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_meter_profile_delete_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "meter") == 0) &&
+ (strcmp(tokens[4], "read") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_meter_read_help);
+ return;
+ }
+
+ if ((n_tokens == 5) &&
+ (strcmp(tokens[2], "rule") == 0) &&
+ (strcmp(tokens[3], "ttl") == 0) &&
+ (strcmp(tokens[4], "read") == 0)) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_pipeline_table_rule_ttl_read_help);
+ return;
+ }
+ }
+
+ if ((n_tokens == 3) &&
+ (strcmp(tokens[0], "thread") == 0) &&
+ (strcmp(tokens[1], "pipeline") == 0)) {
+ if (strcmp(tokens[2], "enable") == 0) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_thread_pipeline_enable_help);
+ return;
+ }
+
+ if (strcmp(tokens[2], "disable") == 0) {
+ snprintf(out, out_size, "\n%s\n",
+ cmd_thread_pipeline_disable_help);
+ return;
+ }
+ }
+
+ snprintf(out, out_size, "Invalid command\n");
+}
+
+void
+cli_process(char *in, char *out, size_t out_size)
+{
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ if (is_comment(in))
+ return;
+
+ status = parse_tokenize_string(in, tokens, &n_tokens);
+ if (status) {
+ snprintf(out, out_size, MSG_ARG_TOO_MANY, "");
+ return;
+ }
+
+ if (n_tokens == 0)
+ return;
+
+ if (strcmp(tokens[0], "help") == 0) {
+ cmd_help(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "mempool") == 0) {
+ cmd_mempool(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "link") == 0) {
+ if (strcmp(tokens[1], "show") == 0) {
+ cmd_link_show(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_link(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "swq") == 0) {
+ cmd_swq(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tmgr") == 0) {
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[1], "subport") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ cmd_tmgr_subport_profile(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[1], "pipe") == 0) &&
+ (strcmp(tokens[2], "profile") == 0)) {
+ cmd_tmgr_pipe_profile(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "subport") == 0) &&
+ (strcmp(tokens[4], "profile") == 0)) {
+ cmd_tmgr_subport(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "subport") == 0) &&
+ (strcmp(tokens[4], "pipe") == 0)) {
+ cmd_tmgr_subport_pipe(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_tmgr(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "tap") == 0) {
+ cmd_tap(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "kni") == 0) {
+ cmd_kni(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "port") == 0) {
+ cmd_port_in_action_profile(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "table") == 0) {
+ cmd_table_action_profile(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if (strcmp(tokens[0], "pipeline") == 0) {
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[2], "period") == 0)) {
+ cmd_pipeline(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_in(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[4], "bsz") == 0)) {
+ cmd_pipeline_port_out(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 4) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[3], "match") == 0)) {
+ cmd_pipeline_table(tokens, n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 6) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "table") == 0)) {
+ cmd_pipeline_port_in_table(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 6) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_in_stats(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 6) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "enable") == 0)) {
+ cmd_pipeline_port_in_enable(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 6) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "in") == 0) &&
+ (strcmp(tokens[5], "disable") == 0)) {
+ cmd_pipeline_port_in_disable(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 6) &&
+ (strcmp(tokens[2], "port") == 0) &&
+ (strcmp(tokens[3], "out") == 0) &&
+ (strcmp(tokens[5], "stats") == 0)) {
+ cmd_pipeline_port_out_stats(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "stats") == 0)) {
+ cmd_pipeline_table_stats(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if ((n_tokens >= 8) &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_pipeline_table_rule_add_default(tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_pipeline_table_rule_add(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "add") == 0) &&
+ (strcmp(tokens[6], "bulk") == 0)) {
+ cmd_pipeline_table_rule_add_bulk(tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "delete") == 0) &&
+ (strcmp(tokens[6], "match") == 0)) {
+ if ((n_tokens >= 8) &&
+ (strcmp(tokens[7], "default") == 0)) {
+ cmd_pipeline_table_rule_delete_default(tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ cmd_pipeline_table_rule_delete(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "stats") == 0)) {
+ cmd_pipeline_table_rule_stats_read(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 8) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "add") == 0)) {
+ cmd_pipeline_table_meter_profile_add(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 8) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "meter") == 0) &&
+ (strcmp(tokens[5], "profile") == 0) &&
+ (strcmp(tokens[7], "delete") == 0)) {
+ cmd_pipeline_table_meter_profile_delete(tokens,
+ n_tokens, out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "meter") == 0)) {
+ cmd_pipeline_table_rule_meter_read(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "dscp") == 0)) {
+ cmd_pipeline_table_dscp(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 7) &&
+ (strcmp(tokens[2], "table") == 0) &&
+ (strcmp(tokens[4], "rule") == 0) &&
+ (strcmp(tokens[5], "read") == 0) &&
+ (strcmp(tokens[6], "ttl") == 0)) {
+ cmd_pipeline_table_rule_ttl_read(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ if (strcmp(tokens[0], "thread") == 0) {
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[4], "enable") == 0)) {
+ cmd_thread_pipeline_enable(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+
+ if ((n_tokens >= 5) &&
+ (strcmp(tokens[4], "disable") == 0)) {
+ cmd_thread_pipeline_disable(tokens, n_tokens,
+ out, out_size);
+ return;
+ }
+ }
+
+ snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
+}
+
+int
+cli_script_process(const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max)
+{
+ char *msg_in = NULL, *msg_out = NULL;
+ FILE *f = NULL;
+
+ /* Check input arguments */
+ if ((file_name == NULL) ||
+ (strlen(file_name) == 0) ||
+ (msg_in_len_max == 0) ||
+ (msg_out_len_max == 0))
+ return -EINVAL;
+
+ msg_in = malloc(msg_in_len_max + 1);
+ msg_out = malloc(msg_out_len_max + 1);
+ if ((msg_in == NULL) ||
+ (msg_out == NULL)) {
+ free(msg_out);
+ free(msg_in);
+ return -ENOMEM;
+ }
+
+ /* Open input file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ free(msg_out);
+ free(msg_in);
+ return -EIO;
+ }
+
+ /* Read file */
+ for ( ; ; ) {
+ if (fgets(msg_in, msg_in_len_max + 1, f) == NULL)
+ break;
+
+ printf("%s", msg_in);
+ msg_out[0] = 0;
+
+ cli_process(msg_in,
+ msg_out,
+ msg_out_len_max);
+
+ if (strlen(msg_out))
+ printf("%s", msg_out);
+ }
+
+ /* Close file */
+ fclose(f);
+ free(msg_out);
+ free(msg_in);
+ return 0;
+}
+
+static int
+cli_rule_file_process(const char *file_name,
+ size_t line_len_max,
+ struct table_rule_match *m,
+ struct table_rule_action *a,
+ uint32_t *n_rules,
+ uint32_t *line_number,
+ char *out,
+ size_t out_size)
+{
+ FILE *f = NULL;
+ char *line = NULL;
+ uint32_t rule_id, line_id;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((file_name == NULL) ||
+ (strlen(file_name) == 0) ||
+ (line_len_max == 0)) {
+ *line_number = 0;
+ return -EINVAL;
+ }
+
+ /* Memory allocation */
+ line = malloc(line_len_max + 1);
+ if (line == NULL) {
+ *line_number = 0;
+ return -ENOMEM;
+ }
+
+ /* Open file */
+ f = fopen(file_name, "r");
+ if (f == NULL) {
+ *line_number = 0;
+ free(line);
+ return -EIO;
+ }
+
+ /* Read file */
+ for (line_id = 1, rule_id = 0; rule_id < *n_rules; line_id++) {
+ char *tokens[CMD_MAX_TOKENS];
+ uint32_t n_tokens, n_tokens_parsed, t0;
+
+ /* Read next line from file. */
+ if (fgets(line, line_len_max + 1, f) == NULL)
+ break;
+
+ /* Comment. */
+ if (is_comment(line))
+ continue;
+
+ /* Parse line. */
+ n_tokens = RTE_DIM(tokens);
+ status = parse_tokenize_string(line, tokens, &n_tokens);
+ if (status) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Empty line. */
+ if (n_tokens == 0)
+ continue;
+ t0 = 0;
+
+ /* Rule match. */
+ n_tokens_parsed = parse_match(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &m[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Rule action. */
+ n_tokens_parsed = parse_table_action(tokens + t0,
+ n_tokens - t0,
+ out,
+ out_size,
+ &a[rule_id]);
+ if (n_tokens_parsed == 0) {
+ status = -EINVAL;
+ break;
+ }
+ t0 += n_tokens_parsed;
+
+ /* Line completed. */
+ if (t0 < n_tokens) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* Increment rule count */
+ rule_id++;
+ }
+
+ /* Close file */
+ fclose(f);
+
+ /* Memory free */
+ free(line);
+
+ *n_rules = rule_id;
+ *line_number = line_id;
+ return status;
+}
diff --git a/examples/ip_pipeline/cli.h b/examples/ip_pipeline/cli.h
new file mode 100644
index 00000000..992e4c3a
--- /dev/null
+++ b/examples/ip_pipeline/cli.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_CLI_H__
+#define __INCLUDE_CLI_H__
+
+#include <stddef.h>
+
+void
+cli_process(char *in, char *out, size_t out_size);
+
+int
+cli_script_process(const char *file_name,
+ size_t msg_in_len_max,
+ size_t msg_out_len_max);
+
+#endif
diff --git a/examples/ip_pipeline/common.h b/examples/ip_pipeline/common.h
new file mode 100644
index 00000000..0886dfbe
--- /dev/null
+++ b/examples/ip_pipeline/common.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_COMMON_H_
+#define _INCLUDE_COMMON_H_
+
+#ifndef NAME_SIZE
+#define NAME_SIZE 64
+#endif
+
+#endif /* _INCLUDE_COMMON_H_ */
diff --git a/examples/ip_pipeline/config/action.cfg b/examples/ip_pipeline/config/action.cfg
deleted file mode 100644
index 994ae94a..00000000
--- a/examples/ip_pipeline/config/action.cfg
+++ /dev/null
@@ -1,68 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; ________________
-; RXQ0.0 --->| |---> TXQ0.0
-; | |
-; RXQ1.0 --->| |---> TXQ1.0
-; | Flow |
-; RXQ2.0 --->| Actions |---> TXQ2.0
-; | |
-; RXQ3.0 --->| |---> TXQ3.0
-; |________________|
-;
-;
-; Input packet: Ethernet/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = FLOW_ACTIONS
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
-n_flows = 65536
-n_meters_per_flow = 4
-flow_id_offset = 286; ipdaddr
-ip_hdr_offset = 270
-color_offset = 128
diff --git a/examples/ip_pipeline/config/action.sh b/examples/ip_pipeline/config/action.sh
deleted file mode 100644
index 2986ae60..00000000
--- a/examples/ip_pipeline/config/action.sh
+++ /dev/null
@@ -1,119 +0,0 @@
-#
-# run ./config/action.sh
-#
-
-p 1 action flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 0 policer 0 g G y Y r R
-p 1 action flow 0 meter 1 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 0 policer 1 g G y Y r R
-p 1 action flow 0 meter 2 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 0 policer 2 g G y Y r R
-p 1 action flow 0 meter 3 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 0 policer 3 g G y Y r R
-p 1 action flow 0 port 0
-
-p 1 action flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 1 policer 0 g G y Y r R
-p 1 action flow 1 meter 1 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 1 policer 1 g G y Y r R
-p 1 action flow 1 meter 2 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 1 policer 2 g G y Y r R
-p 1 action flow 1 meter 3 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 1 policer 3 g G y Y r R
-p 1 action flow 1 port 1
-
-p 1 action flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 2 policer 0 g G y Y r R
-p 1 action flow 2 meter 1 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 2 policer 1 g G y Y r R
-p 1 action flow 2 meter 2 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 2 policer 2 g G y Y r R
-p 1 action flow 2 meter 3 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 2 policer 3 g G y Y r R
-p 1 action flow 2 port 2
-
-p 1 action flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 3 policer 0 g G y Y r R
-p 1 action flow 3 meter 1 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 3 policer 1 g G y Y r R
-p 1 action flow 3 meter 2 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 3 policer 2 g G y Y r R
-p 1 action flow 3 meter 3 trtcm 1250000000 1250000000 1000000 1000000
-p 1 action flow 3 policer 3 g G y Y r R
-p 1 action flow 3 port 3
-
-#p 1 action flow bulk ./config/action.txt
-
-#p 1 action flow ls
-
-p 1 action flow 0 stats
-p 1 action flow 1 stats
-p 1 action flow 2 stats
-p 1 action flow 3 stats
-
-p 1 action dscp 0 class 0 color G
-p 1 action dscp 1 class 1 color G
-p 1 action dscp 2 class 2 color G
-p 1 action dscp 3 class 3 color G
-p 1 action dscp 4 class 0 color G
-p 1 action dscp 5 class 1 color G
-p 1 action dscp 6 class 2 color G
-p 1 action dscp 7 class 3 color G
-p 1 action dscp 8 class 0 color G
-p 1 action dscp 9 class 1 color G
-p 1 action dscp 10 class 2 color G
-p 1 action dscp 11 class 3 color G
-p 1 action dscp 12 class 0 color G
-p 1 action dscp 13 class 1 color G
-p 1 action dscp 14 class 2 color G
-p 1 action dscp 15 class 3 color G
-p 1 action dscp 16 class 0 color G
-p 1 action dscp 17 class 1 color G
-p 1 action dscp 18 class 2 color G
-p 1 action dscp 19 class 3 color G
-p 1 action dscp 20 class 0 color G
-p 1 action dscp 21 class 1 color G
-p 1 action dscp 22 class 2 color G
-p 1 action dscp 23 class 3 color G
-p 1 action dscp 24 class 0 color G
-p 1 action dscp 25 class 1 color G
-p 1 action dscp 26 class 2 color G
-p 1 action dscp 27 class 3 color G
-p 1 action dscp 27 class 0 color G
-p 1 action dscp 29 class 1 color G
-p 1 action dscp 30 class 2 color G
-p 1 action dscp 31 class 3 color G
-p 1 action dscp 32 class 0 color G
-p 1 action dscp 33 class 1 color G
-p 1 action dscp 34 class 2 color G
-p 1 action dscp 35 class 3 color G
-p 1 action dscp 36 class 0 color G
-p 1 action dscp 37 class 1 color G
-p 1 action dscp 38 class 2 color G
-p 1 action dscp 39 class 3 color G
-p 1 action dscp 40 class 0 color G
-p 1 action dscp 41 class 1 color G
-p 1 action dscp 42 class 2 color G
-p 1 action dscp 43 class 3 color G
-p 1 action dscp 44 class 0 color G
-p 1 action dscp 45 class 1 color G
-p 1 action dscp 46 class 2 color G
-p 1 action dscp 47 class 3 color G
-p 1 action dscp 48 class 0 color G
-p 1 action dscp 49 class 1 color G
-p 1 action dscp 50 class 2 color G
-p 1 action dscp 51 class 3 color G
-p 1 action dscp 52 class 0 color G
-p 1 action dscp 53 class 1 color G
-p 1 action dscp 54 class 2 color G
-p 1 action dscp 55 class 3 color G
-p 1 action dscp 56 class 0 color G
-p 1 action dscp 57 class 1 color G
-p 1 action dscp 58 class 2 color G
-p 1 action dscp 59 class 3 color G
-p 1 action dscp 60 class 0 color G
-p 1 action dscp 61 class 1 color G
-p 1 action dscp 62 class 2 color G
-p 1 action dscp 63 class 3 color G
-
-p 1 action dscp ls
diff --git a/examples/ip_pipeline/config/action.txt b/examples/ip_pipeline/config/action.txt
deleted file mode 100644
index f14207b9..00000000
--- a/examples/ip_pipeline/config/action.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# p <pipelineid> action flow bulk ./config/action.txt
-#
-
-flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 0
-flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 1
-flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 2
-flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 3
diff --git a/examples/ip_pipeline/config/diagram-generator.py b/examples/ip_pipeline/config/diagram-generator.py
deleted file mode 100755
index d9efc75e..00000000
--- a/examples/ip_pipeline/config/diagram-generator.py
+++ /dev/null
@@ -1,317 +0,0 @@
-#!/usr/bin/env python
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2016 Intel Corporation
-
-#
-# This script creates a visual representation for a configuration file used by
-# the DPDK ip_pipeline application.
-#
-# The input configuration file is translated to an output file in DOT syntax,
-# which is then used to create the image file using graphviz
-# (www.graphviz.org).
-#
-
-from __future__ import print_function
-import argparse
-import re
-import os
-
-#
-# Command to generate the image file
-#
-DOT_COMMAND = 'dot -Gsize=20,30 -Tpng %s > %s'
-
-#
-# Layout of generated DOT file
-#
-DOT_INTRO = \
- '#\n# Command to generate image file:\n# \t%s\n#\n\n'
-DOT_GRAPH_BEGIN = \
- 'digraph g {\n graph [ splines = true rankdir = "LR" ]\n'
-DOT_NODE_LINK_RX = \
- ' "%s RX" [ shape = box style = filled fillcolor = yellowgreen ]\n'
-DOT_NODE_LINK_TX = \
- ' "%s TX" [ shape = box style = filled fillcolor = yellowgreen ]\n'
-DOT_NODE_KNI_RX = \
- ' "%s RX" [ shape = box style = filled fillcolor = orange ]\n'
-DOT_NODE_KNI_TX = \
- ' "%s TX" [ shape = box style = filled fillcolor = orange ]\n'
-DOT_NODE_TAP_RX = \
- ' "%s RX" [ shape = box style = filled fillcolor = gold ]\n'
-DOT_NODE_TAP_TX = \
- ' "%s TX" [ shape = box style = filled fillcolor = gold ]\n'
-DOT_NODE_SOURCE = \
- ' "%s" [ shape = box style = filled fillcolor = darkgreen ]\n'
-DOT_NODE_SINK = \
- ' "%s" [ shape = box style = filled fillcolor = peachpuff ]\n'
-DOT_NODE_PIPELINE = \
- ' "%s" [ shape = box style = filled fillcolor = royalblue ]\n'
-DOT_EDGE_PKTQ = \
- ' "%s" -> "%s" [ label = "%s" color = gray ]\n'
-DOT_GRAPH_END = \
- '}\n'
-
-# Relationships between the graph nodes and the graph edges:
-#
-# Edge ID | Edge Label | Writer Node | Reader Node | Dependencies
-# --------+------------+-------------+---------------+--------------
-# RXQx.y | RXQx.y | LINKx | PIPELINEz | LINKx
-# TXQx.y | TXQx.y | PIPELINEz | LINKx | LINKx
-# SWQx | SWQx | PIPELINEy | PIPELINEz | -
-# TMx | TMx | PIPELINEy | PIPELINEz | LINKx
-# KNIx RX | KNIx | KNIx RX | PIPELINEy | KNIx, LINKx
-# KNIx TX | KNIx | PIPELINEy | KNIx TX | KNIx, LINKx
-# TAPx RX | TAPx | TAPx RX | PIPELINEy | TAPx
-# TAPx TX | TAPx | PIPELINEy | TAPx TX | TAPx
-# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx
-# SINKx | SINKx | PIPELINEy | SINKx | SINKx
-
-
-#
-# Parse the input configuration file to detect the graph nodes and edges
-#
-def process_config_file(cfgfile):
- edges = {}
- links = set()
- knis = set()
- taps = set()
- sources = set()
- sinks = set()
- pipelines = set()
- pipeline = ''
-
- dotfile = cfgfile + '.txt'
- imgfile = cfgfile + '.png'
-
- #
- # Read configuration file
- #
- lines = open(cfgfile, 'r')
- for line in lines:
- # Remove any leading and trailing white space characters
- line = line.strip()
-
- # Remove any comment at end of line
- line, sep, tail = line.partition(';')
-
- # Look for next "PIPELINE" section
- match = re.search(r'\[(PIPELINE\d+)\]', line)
- if match:
- pipeline = match.group(1)
- continue
-
- # Look for next "pktq_in" section entry
- match = re.search(r'pktq_in\s*=\s*(.+)', line)
- if match:
- pipelines.add(pipeline)
- for q in re.findall('\S+', match.group(1)):
- match_rxq = re.search(r'^RXQ(\d+)\.\d+$', q)
- match_swq = re.search(r'^SWQ\d+$', q)
- match_tm = re.search(r'^TM(\d+)$', q)
- match_kni = re.search(r'^KNI(\d+)$', q)
- match_tap = re.search(r'^TAP\d+$', q)
- match_source = re.search(r'^SOURCE\d+$', q)
-
- # Set ID for the current packet queue (graph edge)
- q_id = ''
- if match_rxq or match_swq or match_tm or match_source:
- q_id = q
- elif match_kni or match_tap:
- q_id = q + ' RX'
- else:
- print('Error: Unrecognized pktq_in element "%s"' % q)
- return
-
- # Add current packet queue to the set of graph edges
- if q_id not in edges:
- edges[q_id] = {}
- if 'label' not in edges[q_id]:
- edges[q_id]['label'] = q
- if 'readers' not in edges[q_id]:
- edges[q_id]['readers'] = []
- if 'writers' not in edges[q_id]:
- edges[q_id]['writers'] = []
-
- # Add reader for the new edge
- edges[q_id]['readers'].append(pipeline)
-
- # Check for RXQ
- if match_rxq:
- link = 'LINK' + str(match_rxq.group(1))
- edges[q_id]['writers'].append(link + ' RX')
- links.add(link)
- continue
-
- # Check for SWQ
- if match_swq:
- continue
-
- # Check for TM
- if match_tm:
- link = 'LINK' + str(match_tm.group(1))
- links.add(link)
- continue
-
- # Check for KNI
- if match_kni:
- link = 'LINK' + str(match_kni.group(1))
- edges[q_id]['writers'].append(q_id)
- knis.add(q)
- links.add(link)
- continue
-
- # Check for TAP
- if match_tap:
- edges[q_id]['writers'].append(q_id)
- taps.add(q)
- continue
-
- # Check for SOURCE
- if match_source:
- edges[q_id]['writers'].append(q)
- sources.add(q)
- continue
-
- continue
-
- # Look for next "pktq_out" section entry
- match = re.search(r'pktq_out\s*=\s*(.+)', line)
- if match:
- for q in re.findall('\S+', match.group(1)):
- match_txq = re.search(r'^TXQ(\d+)\.\d+$', q)
- match_swq = re.search(r'^SWQ\d+$', q)
- match_tm = re.search(r'^TM(\d+)$', q)
- match_kni = re.search(r'^KNI(\d+)$', q)
- match_tap = re.search(r'^TAP(\d+)$', q)
- match_sink = re.search(r'^SINK(\d+)$', q)
-
- # Set ID for the current packet queue (graph edge)
- q_id = ''
- if match_txq or match_swq or match_tm or match_sink:
- q_id = q
- elif match_kni or match_tap:
- q_id = q + ' TX'
- else:
- print('Error: Unrecognized pktq_out element "%s"' % q)
- return
-
- # Add current packet queue to the set of graph edges
- if q_id not in edges:
- edges[q_id] = {}
- if 'label' not in edges[q_id]:
- edges[q_id]['label'] = q
- if 'readers' not in edges[q_id]:
- edges[q_id]['readers'] = []
- if 'writers' not in edges[q_id]:
- edges[q_id]['writers'] = []
-
- # Add writer for the new edge
- edges[q_id]['writers'].append(pipeline)
-
- # Check for TXQ
- if match_txq:
- link = 'LINK' + str(match_txq.group(1))
- edges[q_id]['readers'].append(link + ' TX')
- links.add(link)
- continue
-
- # Check for SWQ
- if match_swq:
- continue
-
- # Check for TM
- if match_tm:
- link = 'LINK' + str(match_tm.group(1))
- links.add(link)
- continue
-
- # Check for KNI
- if match_kni:
- link = 'LINK' + str(match_kni.group(1))
- edges[q_id]['readers'].append(q_id)
- knis.add(q)
- links.add(link)
- continue
-
- # Check for TAP
- if match_tap:
- edges[q_id]['readers'].append(q_id)
- taps.add(q)
- continue
-
- # Check for SINK
- if match_sink:
- edges[q_id]['readers'].append(q)
- sinks.add(q)
- continue
-
- continue
-
- #
- # Write DOT file
- #
- print('Creating DOT file "%s" ...' % dotfile)
- dot_cmd = DOT_COMMAND % (dotfile, imgfile)
- file = open(dotfile, 'w')
- file.write(DOT_INTRO % dot_cmd)
- file.write(DOT_GRAPH_BEGIN)
-
- # Write the graph nodes to the DOT file
- for l in sorted(links):
- file.write(DOT_NODE_LINK_RX % l)
- file.write(DOT_NODE_LINK_TX % l)
- for k in sorted(knis):
- file.write(DOT_NODE_KNI_RX % k)
- file.write(DOT_NODE_KNI_TX % k)
- for t in sorted(taps):
- file.write(DOT_NODE_TAP_RX % t)
- file.write(DOT_NODE_TAP_TX % t)
- for s in sorted(sources):
- file.write(DOT_NODE_SOURCE % s)
- for s in sorted(sinks):
- file.write(DOT_NODE_SINK % s)
- for p in sorted(pipelines):
- file.write(DOT_NODE_PIPELINE % p)
-
- # Write the graph edges to the DOT file
- for q in sorted(edges.keys()):
- rw = edges[q]
- if 'writers' not in rw:
- print('Error: "%s" has no writer' % q)
- return
- if 'readers' not in rw:
- print('Error: "%s" has no reader' % q)
- return
- for w in rw['writers']:
- for r in rw['readers']:
- file.write(DOT_EDGE_PKTQ % (w, r, rw['label']))
-
- file.write(DOT_GRAPH_END)
- file.close()
-
- #
- # Execute the DOT command to create the image file
- #
- print('Creating image file "%s" ...' % imgfile)
- if os.system('which dot > /dev/null'):
- print('Error: Unable to locate "dot" executable.'
- 'Please install the "graphviz" package (www.graphviz.org).')
- return
-
- os.system(dot_cmd)
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='Create diagram for IP '
- 'pipeline configuration '
- 'file.')
-
- parser.add_argument(
- '-f',
- '--file',
- help='input configuration file (e.g. "ip_pipeline.cfg")',
- required=True)
-
- args = parser.parse_args()
-
- process_config_file(args.file)
diff --git a/examples/ip_pipeline/config/edge_router_downstream.cfg b/examples/ip_pipeline/config/edge_router_downstream.cfg
deleted file mode 100644
index c6b4e1f2..00000000
--- a/examples/ip_pipeline/config/edge_router_downstream.cfg
+++ /dev/null
@@ -1,97 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-; An edge router typically sits between two networks such as the provider
-; core network and the provider access network. A typical packet processing
-; pipeline for the downstream traffic (i.e. traffic from core to access
-; network) contains the following functional blocks: Packet RX & Routing,
-; Traffic management and Packet TX. The input packets are assumed to be
-; IPv4, while the output packets are Q-in-Q IPv4.
-;
-; A simple implementation for this functional pipeline is presented below.
-;
-; Packet Rx & Traffic Management Packet Tx
-; Routing (Pass-Through) (Pass-Through)
-; _____________________ SWQ0 ______________________ SWQ4 _____________________
-; RXQ0.0 --->| |----->| |----->| |---> TXQ0.0
-; | | SWQ1 | | SWQ5 | |
-; RXQ1.0 --->| |----->| |----->| |---> TXQ1.0
-; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) |
-; RXQ2.0 --->| |----->| |----->| |---> TXQ2.0
-; | | SWQ3 | | SWQ7 | |
-; RXQ3.0 --->| |----->| |----->| |---> TXQ3.0
-; |_____________________| |______________________| |_____________________|
-; | | ^ | ^ | ^ | ^
-; | |__| |__| |__| |__|
-; +--> SINK0 TM0 TM1 TM2 TM3
-; (Default)
-;
-; Input packet: Ethernet/IPv4
-; Output packet: Ethernet/QinQ/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = ROUTING
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
-encap = ethernet_qinq
-qinq_sched = test
-ip_hdr_offset = 270
-
-[PIPELINE2]
-type = PASS-THROUGH
-core = 2
-pktq_in = SWQ0 SWQ1 SWQ2 SWQ3 TM0 TM1 TM2 TM3
-pktq_out = TM0 TM1 TM2 TM3 SWQ4 SWQ5 SWQ6 SWQ7
-
-[PIPELINE3]
-type = PASS-THROUGH
-core = 3
-pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
-
-[MEMPOOL0]
-pool_size = 2M
diff --git a/examples/ip_pipeline/config/edge_router_downstream.sh b/examples/ip_pipeline/config/edge_router_downstream.sh
deleted file mode 100644
index 67c3a0d1..00000000
--- a/examples/ip_pipeline/config/edge_router_downstream.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# run ./config/edge_router_downstream.sh
-#
-
-################################################################################
-# Routing: Ether QinQ, ARP off
-################################################################################
-p 1 route add default 4 #SINK0
-p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 256 257
-p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 258 259
-p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 260 261
-p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 262 263
-#p 1 route ls
diff --git a/examples/ip_pipeline/config/edge_router_upstream.cfg b/examples/ip_pipeline/config/edge_router_upstream.cfg
deleted file mode 100644
index dea42b95..00000000
--- a/examples/ip_pipeline/config/edge_router_upstream.cfg
+++ /dev/null
@@ -1,124 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-; An edge router typically sits between two networks such as the provider
-; core network and the provider access network. A typical packet processing
-; pipeline for the upstream traffic (i.e. traffic from access to core
-; network) contains the following functional blocks: Packet RX & Firewall,
-; Flow classification, Metering, Routing and Packet TX. The input packets
-; are assumed to be Q-in-Q IPv4, while the output packets are MPLS IPv4
-; (with variable number of labels per route).
-;
-; A simple implementation for this functional pipeline is presented below.
-;
-; Packet RX & Pass-Through Flow Classification Flow Actions Routing
-: Firewall
-; __________ SWQ0 __________ SWQ4 __________ SWQ8 __________ SWQ12 __________
-; RXQ0.0 --->| |------>| |------>| |------>| |------>| |------> TXQ0.0
-; | | SWQ1 | | SWQ5 | | SWQ9 | | SWQ13 | |
-; RXQ1.0 --->| |------>| |------>| |------>| |------>| |------> TXQ1.0
-; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) | SWQ10 | (P4) | SWQ14 | (P5) |
-; RXQ2.0 --->| |------>| |------>| |------>| |------>| |------> TXQ2.0
-; | | SWQ3 | | SWQ7 | | SWQ11 | | SWQ15 | |
-; RXQ3.0 --->| |------>| |------>| |------>| |------>| |------> TXQ3.0
-; |__________| |__________| |__________| |__________| |__________|
-; | | |
-; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Default)
-;
-; Input packet: Ethernet/QinQ/IPv4
-; Output packet: Ethernet/MPLS/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 QinQ header 270 8
-; 4 IPv4 header 278 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = FIREWALL
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
-n_rules = 4096
-pkt_type = qinq_ipv4
-
-[PIPELINE2]
-type = PASS-THROUGH
-core = 2
-pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
-pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
-dma_size = 8
-dma_dst_offset = 128
-dma_src_offset = 268; 1st Ethertype offset
-dma_src_mask = 00000FFF00000FFF; qinq
-dma_hash_offset = 136; dma_dst_offset + dma_size
-
-[PIPELINE3]
-type = FLOW_CLASSIFICATION
-core = 2
-pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
-pktq_out = SWQ8 SWQ9 SWQ10 SWQ11 SINK1
-n_flows = 65536
-key_size = 8; dma_size
-key_offset = 128; dma_dst_offset
-hash_offset = 136; dma_hash_offset
-flowid_offset = 192
-
-[PIPELINE4]
-type = FLOW_ACTIONS
-core = 3
-pktq_in = SWQ8 SWQ9 SWQ10 SWQ11
-pktq_out = SWQ12 SWQ13 SWQ14 SWQ15
-n_flows = 65536
-n_meters_per_flow = 1
-flow_id_offset = 192; flowid_offset
-ip_hdr_offset = 278
-color_offset = 196; flowid_offset + sizeof(flow_id)
-
-[PIPELINE5]
-type = ROUTING
-core = 4
-pktq_in = SWQ12 SWQ13 SWQ14 SWQ15
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2
-encap = ethernet_mpls
-mpls_color_mark = yes
-ip_hdr_offset = 278
-color_offset = 196; flowid_offset + sizeof(flow_id)
diff --git a/examples/ip_pipeline/config/edge_router_upstream.sh b/examples/ip_pipeline/config/edge_router_upstream.sh
deleted file mode 100644
index 5d574c1a..00000000
--- a/examples/ip_pipeline/config/edge_router_upstream.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# run ./config/edge_router_upstream.sh
-#
-
-################################################################################
-# Firewall
-################################################################################
-p 1 firewall add default 4 #SINK0
-p 1 firewall add bulk ./config/edge_router_upstream_firewall.txt
-#p 1 firewall ls
-
-################################################################################
-# Flow Classification
-################################################################################
-p 3 flow add default 4 #SINK1
-p 3 flow add qinq bulk ./config/edge_router_upstream_flow.txt
-#p 3 flow ls
-
-################################################################################
-# Flow Actions - Metering and Policing
-################################################################################
-p 4 action flow bulk ./config/edge_router_upstream_action.txt
-#p 4 action flow ls
-
-################################################################################
-# Routing: Ether MPLS, ARP off
-################################################################################
-p 5 route add default 4 #SINK2
-p 5 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 0:1
-p 5 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 10:11
-p 5 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 20:21
-p 5 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 30:31
-#p 5 route ls
diff --git a/examples/ip_pipeline/config/firewall.cfg b/examples/ip_pipeline/config/firewall.cfg
deleted file mode 100644
index 2f5dd9f6..00000000
--- a/examples/ip_pipeline/config/firewall.cfg
+++ /dev/null
@@ -1,68 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; _______________
-; RXQ0.0 --->| |---> TXQ0.0
-; | |
-; RXQ1.0 --->| |---> TXQ1.0
-; | Firewall |
-; RXQ2.0 --->| |---> TXQ2.0
-; | |
-; RXQ3.0 --->| |---> TXQ3.0
-; |_______________|
-; |
-; +-----------> SINK0 (default rule)
-;
-; Input packet: Ethernet/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = FIREWALL
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-n_rules = 4096
-pkt_type = ipv4
-;pkt_type = vlan_ipv4
-;pkt_type = qinq_ipv4
diff --git a/examples/ip_pipeline/config/firewall.sh b/examples/ip_pipeline/config/firewall.sh
deleted file mode 100644
index c83857ee..00000000
--- a/examples/ip_pipeline/config/firewall.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# run ./config/firewall.sh
-#
-
-p 1 firewall add default 4 #SINK0
-p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
-p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
-p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
-p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
-
-#p 1 firewall add bulk ./config/firewall.txt
-
-p 1 firewall ls
diff --git a/examples/ip_pipeline/config/firewall.txt b/examples/ip_pipeline/config/firewall.txt
deleted file mode 100644
index 54cfffda..00000000
--- a/examples/ip_pipeline/config/firewall.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# p <pipelineid> firewall add bulk ./config/firewall.txt
-# p <pipelineid> firewall del bulk ./config/firewall.txt
-#
-
-priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
-priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
-priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
-priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
diff --git a/examples/ip_pipeline/config/flow.cfg b/examples/ip_pipeline/config/flow.cfg
deleted file mode 100644
index cec990ab..00000000
--- a/examples/ip_pipeline/config/flow.cfg
+++ /dev/null
@@ -1,72 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; ________________
-; RXQ0.0 --->| |---> TXQ0.0
-; | |
-; RXQ1.0 --->| |---> TXQ1.0
-; | Flow |
-; RXQ2.0 --->| Classification |---> TXQ2.0
-; | |
-; RXQ3.0 --->| |---> TXQ3.0
-; |________________|
-; |
-; +-----------> SINK0 (flow lookup miss)
-;
-; Input packet: Ethernet/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 QinQ/IPv4/IPv6 header 270 8/20/40
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = FLOW_CLASSIFICATION
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-n_flows = 65536
-;key_size = 8 ; QinQ key size
-;key_offset = 268 ; QinQ key offset
-;key_mask = 00000FFF00000FFF ; QinQ key mask
-key_size = 16 ; IPv4 5-tuple key size
-key_offset = 278 ; IPv4 5-tuple key offset
-key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
-flowid_offset = 128
diff --git a/examples/ip_pipeline/config/flow.sh b/examples/ip_pipeline/config/flow.sh
deleted file mode 100644
index 489c7079..00000000
--- a/examples/ip_pipeline/config/flow.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#
-# run ./config/flow.sh
-#
-
-################################################################################
-# Flow classification (QinQ)
-################################################################################
-#p 1 flow add default 4 #SINK0
-#p 1 flow add qinq 100 200 port 0 id 0
-#p 1 flow add qinq 101 201 port 1 id 1
-#p 1 flow add qinq 102 202 port 2 id 2
-#p 1 flow add qinq 103 203 port 3 id 3
-
-#p 1 flow add qinq bulk ./config/flow.txt
-
-################################################################################
-# Flow classification (IPv4 5-tuple)
-################################################################################
-p 1 flow add default 4 #SINK0
-p 1 flow add ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
-p 1 flow add ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
-p 1 flow add ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
-p 1 flow add ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
-
-#p 1 flow add ipv4 bulk ./config/flow.txt
diff --git a/examples/ip_pipeline/config/flow.txt b/examples/ip_pipeline/config/flow.txt
deleted file mode 100644
index c1a141dd..00000000
--- a/examples/ip_pipeline/config/flow.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-#
-# p <pipelineid> flow add qinq bulk ./config/flow.txt
-#
-
-#qinq 100 200 port 0 id 0
-#qinq 101 201 port 1 id 1
-#qinq 102 202 port 2 id 2
-#qinq 103 203 port 3 id 3
-
-#
-# p <pipelineid> flow add ipv4 bulk ./config/flow.txt
-#
-
-ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
-ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
-ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
-ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
diff --git a/examples/ip_pipeline/config/ip_pipeline.cfg b/examples/ip_pipeline/config/ip_pipeline.cfg
deleted file mode 100644
index 095ed25e..00000000
--- a/examples/ip_pipeline/config/ip_pipeline.cfg
+++ /dev/null
@@ -1,9 +0,0 @@
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = PASS-THROUGH
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
diff --git a/examples/ip_pipeline/config/ip_pipeline.sh b/examples/ip_pipeline/config/ip_pipeline.sh
deleted file mode 100644
index 4fca2597..00000000
--- a/examples/ip_pipeline/config/ip_pipeline.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-#run config/ip_pipeline.sh
-#
-
-p 1 ping
diff --git a/examples/ip_pipeline/config/kni.cfg b/examples/ip_pipeline/config/kni.cfg
deleted file mode 100644
index cea208b4..00000000
--- a/examples/ip_pipeline/config/kni.cfg
+++ /dev/null
@@ -1,67 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2016 Intel Corporation.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-;
-; ______________ ______________________
-; | | KNI0 | |
-; RXQ0.0 --->| |------->|--+ |
-; | | KNI1 | | br0 |
-; TXQ1.0 <---| |<-------|<-+ |
-; | Pass-through | | Linux Kernel |
-; | (P1) | | Network Stack |
-; | | KNI1 | |
-; RXQ1.0 --->| |------->|--+ |
-; | | KNI0 | | br0 |
-; TXQ0.0 <---| |<-------|<-+ |
-; |______________| |______________________|
-;
-; Insert Linux kernel KNI module:
-; [Linux]$ insmod rte_kni.ko
-;
-; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
-; [Linux]$ ifconfig KNI0 up
-; [Linux]$ ifconfig KNI1 up
-; [Linux]$ brctl addbr "br0"
-; [Linux]$ brctl addif br0 KNI0
-; [Linux]$ brctl addif br0 KNI1
-; [Linux]$ ifconfig br0 up
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = PASS-THROUGH
-core = 1
-pktq_in = RXQ0.0 KNI1 RXQ1.0 KNI0
-pktq_out = KNI0 TXQ1.0 KNI1 TXQ0.0
diff --git a/examples/ip_pipeline/config/l2fwd.cfg b/examples/ip_pipeline/config/l2fwd.cfg
deleted file mode 100644
index a1df9e6a..00000000
--- a/examples/ip_pipeline/config/l2fwd.cfg
+++ /dev/null
@@ -1,58 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-;
-; The pass-through pipeline below connects the input ports to the output ports
-; as follows: RXQ0.0 -> TXQ1.0, RXQ1.0 -> TXQ0.0, RXQ2.0 -> TXQ3.0 and
-; RXQ3.0 -> TXQ2.0.
-; ________________
-; RXQ0.0 --->|................|---> TXQ1.0
-; | |
-; RXQ1.0 --->|................|---> TXQ0.0
-; | Pass-through |
-; RXQ2.0 --->|................|---> TXQ3.0
-; | |
-; RXQ3.0 --->|................|---> TXQ2.0
-; |________________|
-;
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = PASS-THROUGH
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ1.0 TXQ0.0 TXQ3.0 TXQ2.0
diff --git a/examples/ip_pipeline/config/l3fwd.cfg b/examples/ip_pipeline/config/l3fwd.cfg
deleted file mode 100644
index 02c8f36f..00000000
--- a/examples/ip_pipeline/config/l3fwd.cfg
+++ /dev/null
@@ -1,68 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; _______________
-; RXQ0.0 --->| |---> TXQ0.0
-; | |
-; RXQ1.0 --->| |---> TXQ1.0
-; | Routing |
-; RXQ2.0 --->| |---> TXQ2.0
-; | |
-; RXQ3.0 --->| |---> TXQ3.0
-; |_______________|
-; |
-; +-----------> SINK0 (route miss)
-;
-; Input packet: Ethernet/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = ROUTING
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-encap = ethernet
-;encap = ethernet_qinq
-;encap = ethernet_mpls
-ip_hdr_offset = 270
diff --git a/examples/ip_pipeline/config/l3fwd.sh b/examples/ip_pipeline/config/l3fwd.sh
deleted file mode 100644
index 47406aa4..00000000
--- a/examples/ip_pipeline/config/l3fwd.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# run ./config/l3fwd.sh
-#
-
-################################################################################
-# Routing: encap = ethernet, arp = off
-################################################################################
-p 1 route add default 4 #SINK0
-p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
-p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
-p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
-p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
-p 1 route ls
-
-################################################################################
-# Routing: encap = ethernet_qinq, arp = off
-################################################################################
-#p 1 route add default 4 #SINK0
-#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 1000 2000
-#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 1001 2001
-#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 1002 2002
-#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 1003 2003
-#p 1 route ls
-
-################################################################################
-# Routing: encap = ethernet_mpls, arp = off
-################################################################################
-#p 1 route add default 4 #SINK0
-#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 1000:2000
-#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 1001:2001
-#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 1002:2002
-#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 1003:2003
-#p 1 route ls
diff --git a/examples/ip_pipeline/config/l3fwd_arp.cfg b/examples/ip_pipeline/config/l3fwd_arp.cfg
deleted file mode 100644
index 2c63c8fd..00000000
--- a/examples/ip_pipeline/config/l3fwd_arp.cfg
+++ /dev/null
@@ -1,70 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; _______________
-; RXQ0.0 --->| |---> TXQ0.0
-; | |
-; RXQ1.0 --->| |---> TXQ1.0
-; | Routing |
-; RXQ2.0 --->| |---> TXQ2.0
-; | |
-; RXQ3.0 --->| |---> TXQ3.0
-; |_______________|
-; |
-; +-----------> SINK0 (route miss)
-;
-; Input packet: Ethernet/IPv4
-;
-; Packet buffer layout:
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = ROUTING
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
-encap = ethernet
-;encap = ethernet_qinq
-;encap = ethernet_mpls
-n_arp_entries = 1024
-ip_hdr_offset = 270
-arp_key_offset = 128
diff --git a/examples/ip_pipeline/config/l3fwd_arp.sh b/examples/ip_pipeline/config/l3fwd_arp.sh
deleted file mode 100644
index 20bea582..00000000
--- a/examples/ip_pipeline/config/l3fwd_arp.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# run ./config/l3fwd_arp.sh
-#
-
-################################################################################
-# ARP
-################################################################################
-p 1 arp add default 4 #SINK0
-p 1 arp add 0 10.0.0.1 a0:b0:c0:d0:e0:f0
-p 1 arp add 1 11.0.0.1 a1:b1:c1:d1:e1:f1
-p 1 arp add 2 12.0.0.1 a2:b2:c2:d2:e2:f2
-p 1 arp add 3 13.0.0.1 a3:b3:c3:d3:e3:f3
-p 1 arp ls
-
-################################################################################
-# Routing: encap = ethernet, arp = on
-################################################################################
-p 1 route add default 4 #SINK0
-p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1
-p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1
-p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1
-p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1
-p 1 route ls
-
-################################################################################
-# Routing: encap = ethernet_qinq, arp = on
-################################################################################
-#p 1 route add default 4 #SINK0
-#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 qinq 1000 2000
-#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 qinq 1001 2001
-#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 qinq 1002 2002
-#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 qinq 1003 2003
-#p 1 route ls
-
-################################################################################
-# Routing: encap = ethernet_mpls, arp = on
-################################################################################
-#p 1 route add default 4 #SINK0
-#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 mpls 1000:2000
-#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 mpls 1001:2001
-#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 mpls 1002:2002
-#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 mpls 1003:2003
-#p 1 route ls
diff --git a/examples/ip_pipeline/config/network_layers.cfg b/examples/ip_pipeline/config/network_layers.cfg
deleted file mode 100644
index 397b5d77..00000000
--- a/examples/ip_pipeline/config/network_layers.cfg
+++ /dev/null
@@ -1,227 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; The diagram below shows how additional protocol components can be plugged into
-; the IP layer implemented by the ip_pipeline application. Pick your favorite
-; open source components for dynamic ARP, ICMP, UDP or TCP termination, etc and
-; connect them through SWQs to the IP infrastructure.
-;
-; The input packets with local destination are sent to the UDP/TCP applications
-; while the input packets with remote destination are routed back to the
-; network. Additional features can easily be added to this setup:
-; * IP Reassembly: add SWQs with IP reassembly enabled (typically required for
-; the input traffic with local destination);
-; * IP Fragmentation: add SWQs with IP fragmentation enabled (typically
-; required to enforce the MTU for the routed output traffic);
-; * Traffic Metering: add Flow Action pipeline instances (e.g. for metering the
-; TCP connections or ICMP input traffic);
-; * Traffic Management: add TMs for the required output LINKs;
-; * Protocol encapsulations (QinQ, MPLS) for the output packets: part of the
-; routing pipeline configuration.
-;
-; _________ _________
-; | | | |
-; | UDP | | TCP |
-; | App | | App |
-; |_________| |_________|
-; ^ | ^ |
-; __|___V__ __|___V__
-; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
-; | UDP |-------+ | TCP |------------+
-; | | | | | |
-; |_________| | |_________| |
-; ^ | ^ |
-; | SWQ2 | | SWQ3 |
-; | (UDP RX) | | (TCP RX) |
-; ____|____ | ____|____ |
-; | | | | | |
-; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
-; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
-; |_________| (Deny)| | |_________| (RST) |
-; RXQ<0..3>.2 -------------------------|-----+ |
-; (TCP local dest) | |
-; | +------------------------------+
-; | |
-; _V_____V_
-; | |
-; | Routing | TXQ<0..3>.0
-; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
-; (IP remote dest) | (P1) |
-; |_________|
-; | ^ |
-; SWQ4 +-------------+ | | SWQ5 (ARP miss)
-; (Route miss) | | +------------+
-; | +-------------+ |
-; ___V__|__ SWQ6 ____V____
-; | | (ICMP TX) | | TXQ<0..3>.1
-; RXQ<0..3>.3 ------>| ICMP | +------>| Dyn ARP +------------->
-; (IP local dest) | | | | |
-; |_________| | |_________|
-; RXQ<0..3>.4 -------------------------------+
-; (ARP)
-;
-; This configuration file implements the diagram presented below, where the
-; dynamic ARP, ICMP, UDP and TCP components have been stubbed out and replaced
-; with loop-back and packet drop devices.
-;
-; _________ _________
-; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
-; |Loobpack |-------+ |Loopback |------------+
-; | (P4) | | | (P5) | |
-; |_________| | |_________| |
-; ^ | ^ |
-; | SWQ2 | | SWQ3 |
-; | (UDP RX) | | (TCP RX) |
-; ____|____ | ____|____ |
-; | | | | | |
-; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
-; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
-; |_________| (Deny)| | |_________| (RST) |
-; RXQ<0..3>.2 -------------------------|-----+ |
-; (TCP local dest) | |
-; | +------------------------------+
-; | |
-; _V_____V_
-; | |
-; | Routing | TXQ<0..3>.0
-; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
-; (IP remote dest) | (P1) |
-; |_________|
-; | |
-; SINK2 |<---+ +--->| SINK3
-; (Route miss) (ARP miss)
-;
-; _________ _________
-; | | | |
-; RXQ<0..3>.3 ------>| Drop +--->| SINK<4..7> +------>| Drop +--->| SINK<8..11>
-; (IP local dest) | (P6) | (IP local dest) | | (P7) | (ARP)
-; |_________| | |_________|
-; RXQ<0..3>.4 ------------------------------------+
-; (ARP)
-;
-;
-; Input packet: Ethernet/IPv4 or Ethernet/ARP
-; Output packet: Ethernet/IPv4 or Ethernet/ARP
-;
-; Packet buffer layout (for input IPv4 packets):
-; # Field Name Offset (Bytes) Size (Bytes)
-; 0 Mbuf 0 128
-; 1 Headroom 128 128
-; 2 Ethernet header 256 14
-; 3 IPv4 header 270 20
-; 4 ICMP/UDP/TCP header 290 8/8/20
-
-[EAL]
-log_level = 0
-
-[LINK0]
-udp_local_q = 1
-tcp_local_q = 2
-ip_local_q = 3
-arp_q = 4
-
-[LINK1]
-udp_local_q = 1
-tcp_local_q = 2
-ip_local_q = 3
-arp_q = 4
-
-[LINK2]
-udp_local_q = 1
-tcp_local_q = 2
-ip_local_q = 3
-arp_q = 4
-
-[LINK3]
-udp_local_q = 1
-tcp_local_q = 2
-ip_local_q = 3
-arp_q = 4
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = ROUTING
-core = 1
-pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0 SWQ0 SWQ1
-pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2 SINK3
-port_local_dest = 4 ; SINK2 (Drop)
-n_arp_entries = 1000
-ip_hdr_offset = 270
-arp_key_offset = 128
-
-[PIPELINE2]
-type = FIREWALL
-core = 1
-pktq_in = RXQ0.1 RXQ1.1 RXQ2.1 RXQ3.1
-pktq_out = SWQ2 SINK0
-n_rules = 4096
-
-[PIPELINE3]
-type = FLOW_CLASSIFICATION
-core = 1
-pktq_in = RXQ0.2 RXQ1.2 RXQ2.2 RXQ3.2
-pktq_out = SWQ3 SINK1
-n_flows = 65536
-key_size = 16 ; IPv4 5-tuple key size
-key_offset = 278 ; IPv4 5-tuple key offset
-key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
-flowid_offset = 128 ; Flow ID effectively acts as TCP socket ID
-
-[PIPELINE4]
-type = PASS-THROUGH ; Loop-back (UDP place-holder)
-core = 1
-pktq_in = SWQ2
-pktq_out = SWQ0
-swap = 282 286 ; IPSRC <-> IPDST
-swap = 290 292 ; PORTSRC <-> PORTDST
-
-[PIPELINE5]
-type = PASS-THROUGH ; Loop-back (TCP place-holder)
-core = 1
-pktq_in = SWQ3
-pktq_out = SWQ1
-swap = 282 286 ; IPSRC <-> IPDST
-swap = 290 292 ; PORTSRC <-> PORTDST
-
-[PIPELINE6]
-type = PASS-THROUGH ; Drop (ICMP place-holder)
-core = 1
-pktq_in = RXQ0.3 RXQ1.3 RXQ2.3 RXQ3.3
-pktq_out = SINK4 SINK5 SINK6 SINK7
-
-[PIPELINE7]
-type = PASS-THROUGH ; Drop (Dynamic ARP place-holder)
-core = 1
-pktq_in = RXQ0.4 RXQ1.4 RXQ2.4 RXQ3.4
-pktq_out = SINK8 SINK9 SINK10 SINK11
diff --git a/examples/ip_pipeline/config/network_layers.sh b/examples/ip_pipeline/config/network_layers.sh
deleted file mode 100644
index 449b0069..00000000
--- a/examples/ip_pipeline/config/network_layers.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-# run ./config/network_layers.sh
-#
-
-################################################################################
-# Link configuration
-################################################################################
-# Routes added implicitly when links are brought UP:
-# IP Prefix = 10.0.0.1/16 => (Port 0, Local)
-# IP Prefix = 10.0.0.1/32 => (Port 4, Local)
-# IP Prefix = 10.1.0.1/16 => (Port 1, Local)
-# IP Prefix = 10.1.0.1/32 => (Port 4, Local)
-# IP Prefix = 10.2.0.1/16 => (Port 2, Local)
-# IP Prefix = 10.2.0.1/32 => (Port 4, Local)
-# IP Prefix = 10.3.0.1/16 => (Port 3, Local)
-# IP Prefix = 10.3.0.1/32 => (Port 4, Local)
-link 0 down
-link 1 down
-link 2 down
-link 3 down
-link 0 config 10.0.0.1 16
-link 1 config 10.1.0.1 16
-link 2 config 10.2.0.1 16
-link 3 config 10.3.0.1 16
-link 0 up
-link 1 up
-link 2 up
-link 3 up
-#link ls
-
-################################################################################
-# Static ARP
-################################################################################
-p 1 arp add default 5 #SINK3
-p 1 arp add 0 10.0.0.2 a0:b0:c0:d0:e0:f0
-p 1 arp add 1 10.1.0.2 a1:b1:c1:d1:e1:f1
-p 1 arp add 2 10.2.0.2 a2:b2:c2:d2:e2:f2
-p 1 arp add 3 10.3.0.2 a3:b3:c3:d3:e3:f3
-#p 1 arp ls
-
-################################################################################
-# Routes
-################################################################################
-p 1 route add default 4 #SINK2
-p 1 route add 100.0.0.0 16 port 0 ether 10.0.0.2
-p 1 route add 100.1.0.0 16 port 1 ether 10.1.0.2
-p 1 route add 100.2.0.0 16 port 2 ether 10.2.0.2
-p 1 route add 100.3.0.0 16 port 3 ether 10.3.0.2
-#p 1 route ls
-
-################################################################################
-# Local destination UDP traffic
-################################################################################
-# Prio = Lowest: [SA = ANY, DA = ANY, SP = ANY, DP = ANY, PROTO = ANY] => Drop
-# Prio = 1 (High): [SA = ANY, DA = 10.0.0.1, SP = ANY, DP = 1000, PROTO = UDP] => Allow
-# Prio = 1 (High): [SA = ANY, DA = 10.1.0.1, SP = ANY, DP = 1001, PROTO = UDP] => Allow
-# Prio = 1 (High): [SA = ANY, DA = 10.2.0.1, SP = ANY, DP = 1002, PROTO = UDP] => Allow
-# Prio = 1 (High): [SA = ANY, DA = 10.3.0.1, SP = ANY, DP = 1003, PROTO = UDP] => Allow
-p 2 firewall add default 1 #SINK0
-p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.0.0.1 32 0 65535 1000 1000 17 0xF port 0
-p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.1.0.1 32 0 65535 1001 1001 17 0xF port 0
-p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.2.0.1 32 0 65535 1002 1002 17 0xF port 0
-p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.3.0.1 32 0 65535 1003 1003 17 0xF port 0
-#p 2 firewall ls
-
-################################################################################
-# Local destination TCP traffic
-################################################################################
-# Unknown connection => Drop
-# TCP [SA = 100.0.0.10, DA = 10.0.0.1, SP = 1000, DP = 80] => socket ID = 0
-# TCP [SA = 100.1.0.10, DA = 10.1.0.1, SP = 1001, DP = 80] => socket ID = 1
-# TCP [SA = 100.2.0.10, DA = 10.2.0.1, SP = 1002, DP = 80] => socket ID = 2
-# TCP [SA = 100.3.0.10, DA = 10.3.0.1, SP = 1003, DP = 80] => socket ID = 3
-p 3 flow add default 1 #SINK1
-p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 0 id 0
-p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 0 id 1
-p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 0 id 2
-p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 0 id 3
-#p 3 flow ls
diff --git a/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/examples/ip_pipeline/config/pipeline-to-core-mapping.py
deleted file mode 100755
index fc52b2b3..00000000
--- a/examples/ip_pipeline/config/pipeline-to-core-mapping.py
+++ /dev/null
@@ -1,906 +0,0 @@
-#!/usr/bin/env python
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2016 Intel Corporation
-
-#
-# This script maps the set of pipelines identified (MASTER pipelines are
-# ignored) from the input configuration file to the set of cores
-# provided as input argument and creates configuration files for each of
-# the mapping combinations.
-#
-
-from __future__ import print_function
-from collections import namedtuple
-import argparse
-import array
-import errno
-import itertools
-import os
-import re
-import sys
-
-# default values
-enable_stage0_traceout = 1
-enable_stage1_traceout = 1
-enable_stage2_traceout = 1
-
-enable_stage1_fileout = 1
-enable_stage2_fileout = 1
-
-Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
-constants = Constants(16, 64)
-
-# pattern for physical core
-pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
-reg_phycore = re.compile(pattern_phycore)
-
-
-def popcount(mask):
- return bin(mask).count("1")
-
-
-def len2mask(length):
- if (length == 0):
- return 0
-
- if (length > 64):
- sys.exit('error: len2mask - length %i > 64. exiting' % length)
-
- return int('1' * length, 2)
-
-
-def bitstring_write(n, n_bits):
- tmpstr = ""
- if (n_bits > 64):
- return
-
- i = n_bits - 1
- while (i >= 0):
- cond = (n & (1 << i))
- if (cond):
- print('1', end='')
- tmpstr += '1'
- else:
- print('0', end='')
- tmpstr += '0'
- i -= 1
- return tmpstr
-
-
-class Cores0:
-
- def __init__(self):
- self.n_pipelines = 0
-
-
-class Cores1:
-
- def __init__(self):
- self.pipelines = 0
- self.n_pipelines = 0
-
-
-class Cores2:
-
- def __init__(self):
- self.pipelines = 0
- self.n_pipelines = 0
- self.counter = 0
- self.counter_max = 0
- self.bitpos = array.array(
- "L", itertools.repeat(0, constants.MAX_PIPELINES))
-
-
-class Context0:
-
- def __init__(self):
- self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
- self.n_cores = 0
- self.n_pipelines = 0
- self.n_pipelines0 = 0
- self.pos = 0
- self.file_comment = ""
- self.ctx1 = None
- self.ctx2 = None
-
- def stage0_print(self):
- print('printing Context0 obj')
- print('c0.cores(n_pipelines) = [ ', end='')
- for cores_count in range(0, constants.MAX_CORES):
- print(self.cores[cores_count].n_pipelines, end=' ')
- print(']')
- print('c0.n_cores = %d' % self.n_cores)
- print('c0.n_pipelines = %d' % self.n_pipelines)
- print('c0.n_pipelines0 = %d' % self.n_pipelines0)
- print('c0.pos = %d' % self.pos)
- print('c0.file_comment = %s' % self.file_comment)
- if (self.ctx1 is not None):
- print('c0.ctx1 = ', end='')
- print(repr(self.ctx1))
- else:
- print('c0.ctx1 = None')
-
- if (self.ctx2 is not None):
- print('c0.ctx2 = ', end='')
- print(repr(self.ctx2))
- else:
- print('c0.ctx2 = None')
-
- def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
- self.n_cores = num_cores
- self.n_pipelines = num_pipelines
- self.ctx1 = ctx1
- self.ctx2 = ctx2
-
- def stage0_process(self):
- # stage0 init
- self.cores[0].n_pipelines = self.n_pipelines
- self.n_pipelines0 = 0
- self.pos = 1
-
- while True:
- # go forward
- while True:
- if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
- self.cores[self.pos].n_pipelines = min(
- self.cores[self.pos - 1].n_pipelines,
- self.n_pipelines0)
- self.n_pipelines0 -= self.cores[self.pos].n_pipelines
- self.pos += 1
- else:
- break
-
- # check solution
- if (self.n_pipelines0 == 0):
- self.stage0_log()
- self.ctx1.stage1_init(self, self.ctx2) # self is object c0
- self.ctx1.stage1_process()
-
- # go backward
- while True:
- if (self.pos == 0):
- return
-
- self.pos -= 1
- if ((self.cores[self.pos].n_pipelines > 1) and
- (self.pos != (self.n_cores - 1))):
- break
-
- self.n_pipelines0 += self.cores[self.pos].n_pipelines
- self.cores[self.pos].n_pipelines = 0
-
- # rearm
- self.cores[self.pos].n_pipelines -= 1
- self.n_pipelines0 += 1
- self.pos += 1
-
- def stage0_log(self):
- tmp_file_comment = ""
- if(enable_stage0_traceout != 1):
- return
-
- print('STAGE0: ', end='')
- tmp_file_comment += 'STAGE0: '
- for cores_count in range(0, self.n_cores):
- print('C%d = %d\t'
- % (cores_count,
- self.cores[cores_count].n_pipelines), end='')
- tmp_file_comment += "C{} = {}\t".format(
- cores_count, self.cores[cores_count].n_pipelines)
- # end for
- print('')
- self.ctx1.stage0_file_comment = tmp_file_comment
- self.ctx2.stage0_file_comment = tmp_file_comment
-
-
-class Context1:
- _fileTrace = None
-
- def __init__(self):
- self.cores = [Cores1() for i in range(constants.MAX_CORES)]
- self.n_cores = 0
- self.n_pipelines = 0
- self.pos = 0
- self.stage0_file_comment = ""
- self.stage1_file_comment = ""
-
- self.ctx2 = None
- self.arr_pipelines2cores = []
-
- def stage1_reset(self):
- for i in range(constants.MAX_CORES):
- self.cores[i].pipelines = 0
- self.cores[i].n_pipelines = 0
-
- self.n_cores = 0
- self.n_pipelines = 0
- self.pos = 0
- self.ctx2 = None
- # clear list
- del self.arr_pipelines2cores[:]
-
- def stage1_print(self):
- print('printing Context1 obj')
- print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
- for cores_count in range(0, constants.MAX_CORES):
- print('(%d,%d)' % (self.cores[cores_count].pipelines,
- self.cores[cores_count].n_pipelines), end=' ')
- print(']')
- print('ctx1.n_cores = %d' % self.n_cores)
- print('ctx1.n_pipelines = %d' % self.n_pipelines)
- print('ctx1.pos = %d' % self.pos)
- print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
- print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
- if (self.ctx2 is not None):
- print('ctx1.ctx2 = ', end='')
- print(self.ctx2)
- else:
- print('ctx1.ctx2 = None')
-
- def stage1_init(self, c0, ctx2):
- self.stage1_reset()
- self.n_cores = 0
- while (c0.cores[self.n_cores].n_pipelines > 0):
- self.n_cores += 1
-
- self.n_pipelines = c0.n_pipelines
- self.ctx2 = ctx2
-
- self.arr_pipelines2cores = [0] * self.n_pipelines
-
- i = 0
- while (i < self.n_cores):
- self.cores[i].n_pipelines = c0.cores[i].n_pipelines
- i += 1
-
- def stage1_process(self):
- pipelines_max = len2mask(self.n_pipelines)
- while True:
- pos = 0
- overlap = 0
-
- if (self.cores[self.pos].pipelines == pipelines_max):
- if (self.pos == 0):
- return
-
- self.cores[self.pos].pipelines = 0
- self.pos -= 1
- continue
-
- self.cores[self.pos].pipelines += 1
- if (popcount(self.cores[self.pos].pipelines) !=
- self.cores[self.pos].n_pipelines):
- continue
-
- overlap = 0
- pos = 0
- while (pos < self.pos):
- if ((self.cores[self.pos].pipelines) &
- (self.cores[pos].pipelines)):
- overlap = 1
- break
- pos += 1
-
- if (overlap):
- continue
-
- if ((self.pos > 0) and
- ((self.cores[self.pos].n_pipelines) ==
- (self.cores[self.pos - 1].n_pipelines)) and
- ((self.cores[self.pos].pipelines) <
- (self.cores[self.pos - 1].pipelines))):
- continue
-
- if (self.pos == self.n_cores - 1):
- self.stage1_log()
- self.ctx2.stage2_init(self)
- self.ctx2.stage2_process()
-
- if (self.pos == 0):
- return
-
- self.cores[self.pos].pipelines = 0
- self.pos -= 1
- continue
-
- self.pos += 1
-
- def stage1_log(self):
- tmp_file_comment = ""
- if(enable_stage1_traceout == 1):
- print('STAGE1: ', end='')
- tmp_file_comment += 'STAGE1: '
- i = 0
- while (i < self.n_cores):
- print('C%d = [' % i, end='')
- tmp_file_comment += "C{} = [".format(i)
-
- j = self.n_pipelines - 1
- while (j >= 0):
- cond = ((self.cores[i].pipelines) & (1 << j))
- if (cond):
- print('1', end='')
- tmp_file_comment += '1'
- else:
- print('0', end='')
- tmp_file_comment += '0'
- j -= 1
-
- print(']\t', end='')
- tmp_file_comment += ']\t'
- i += 1
-
- print('\n', end='')
- self.stage1_file_comment = tmp_file_comment
- self.ctx2.stage1_file_comment = tmp_file_comment
-
- # check if file traceing is enabled
- if(enable_stage1_fileout != 1):
- return
-
- # spit out the combination to file
- self.stage1_process_file()
-
- def stage1_updateCoresInBuf(self, nPipeline, sCore):
- rePipeline = self._fileTrace.arr_pipelines[nPipeline]
- rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
- reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
- sSubs = 'core = ' + sCore + '\n'
-
- reg_pipeline = re.compile(rePipeline)
- search_match = reg_pipeline.search(self._fileTrace.in_buf)
-
- if(search_match):
- pos = search_match.start()
- substr1 = self._fileTrace.in_buf[:pos]
- substr2 = self._fileTrace.in_buf[pos:]
- substr2 = re.sub(reCore, sSubs, substr2, 1)
- self._fileTrace.in_buf = substr1 + substr2
-
- def stage1_process_file(self):
- outFileName = os.path.join(self._fileTrace.out_path,
- self._fileTrace.prefix_outfile)
- outFileName += "_{}CoReS".format(self.n_cores)
-
- i = 0 # represents core number
- while (i < self.n_cores):
- j = self.n_pipelines - 1
- pipeline_idx = 0
- while(j >= 0):
- cond = ((self.cores[i].pipelines) & (1 << j))
- if (cond):
- # update the pipelines array to match the core
- # only in case of cond match
- self.arr_pipelines2cores[
- pipeline_idx] = fileTrace.in_physical_cores[i]
-
- j -= 1
- pipeline_idx += 1
-
- i += 1
-
- # update the in_buf as per the arr_pipelines2cores
- for pipeline_idx in range(len(self.arr_pipelines2cores)):
- outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
- self.stage1_updateCoresInBuf(
- pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
-
- # by now the in_buf is all set to be written to file
- outFileName += self._fileTrace.suffix_outfile
- outputFile = open(outFileName, "w")
-
- # write out the comments
- strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
- outputFile.write(
- "; =============== Pipeline-to-Core Mapping ================\n"
- "; Generated from file {}\n"
- "; Input pipelines = {}\n"
- "; Input cores = {}\n"
- "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
- .format(
- self._fileTrace.in_file_namepath,
- fileTrace.arr_pipelines,
- fileTrace.in_physical_cores,
- self._fileTrace.n_pipelines,
- self._fileTrace.n_cores,
- strTruncated,
- self._fileTrace.hyper_thread))
-
- outputFile.write(
- "; {stg0cmt}\n"
- "; {stg1cmt}\n"
- "; ========================================================\n"
- "; \n"
- .format(
- stg0cmt=self.stage0_file_comment,
- stg1cmt=self.stage1_file_comment))
-
- # write buffer contents
- outputFile.write(self._fileTrace.in_buf)
- outputFile.flush()
- outputFile.close()
-
-
-class Context2:
- _fileTrace = None
-
- def __init__(self):
- self.cores = [Cores2() for i in range(constants.MAX_CORES)]
- self.n_cores = 0
- self.n_pipelines = 0
- self.pos = 0
- self.stage0_file_comment = ""
- self.stage1_file_comment = ""
- self.stage2_file_comment = ""
-
- # each array entry is a pipeline mapped to core stored as string
- # pipeline ranging from 1 to n, however stored in zero based array
- self.arr2_pipelines2cores = []
-
- def stage2_print(self):
- print('printing Context2 obj')
- print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
- for cores_count in range(0, constants.MAX_CORES):
- print('core[%d] = (%d,%d,%d,%d)' % (
- cores_count,
- self.cores[cores_count].pipelines,
- self.cores[cores_count].n_pipelines,
- self.cores[cores_count].counter,
- self.cores[cores_count].counter_max))
-
- print('ctx2.n_cores = %d' % self.n_cores, end='')
- print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
- print('ctx2.pos = %d' % self.pos)
- print('ctx2.stage0_file_comment = %s' %
- self.self.stage0_file_comment)
- print('ctx2.stage1_file_comment = %s' %
- self.self.stage1_file_comment)
- print('ctx2.stage2_file_comment = %s' %
- self.self.stage2_file_comment)
-
- def stage2_reset(self):
- for i in range(0, constants.MAX_CORES):
- self.cores[i].pipelines = 0
- self.cores[i].n_pipelines = 0
- self.cores[i].counter = 0
- self.cores[i].counter_max = 0
-
- for idx in range(0, constants.MAX_PIPELINES):
- self.cores[i].bitpos[idx] = 0
-
- self.n_cores = 0
- self.n_pipelines = 0
- self.pos = 0
- # clear list
- del self.arr2_pipelines2cores[:]
-
- def bitpos_load(self, coreidx):
- i = j = 0
- while (i < self.n_pipelines):
- if ((self.cores[coreidx].pipelines) &
- (1 << i)):
- self.cores[coreidx].bitpos[j] = i
- j += 1
- i += 1
- self.cores[coreidx].n_pipelines = j
-
- def bitpos_apply(self, in_buf, pos, n_pos):
- out = 0
- for i in range(0, n_pos):
- out |= (in_buf & (1 << i)) << (pos[i] - i)
-
- return out
-
- def stage2_init(self, ctx1):
- self.stage2_reset()
- self.n_cores = ctx1.n_cores
- self.n_pipelines = ctx1.n_pipelines
-
- self.arr2_pipelines2cores = [''] * self.n_pipelines
-
- core_idx = 0
- while (core_idx < self.n_cores):
- self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
-
- self.bitpos_load(core_idx)
- core_idx += 1
-
- def stage2_log(self):
- tmp_file_comment = ""
- if(enable_stage2_traceout == 1):
- print('STAGE2: ', end='')
- tmp_file_comment += 'STAGE2: '
-
- for i in range(0, self.n_cores):
- mask = len2mask(self.cores[i].n_pipelines)
- pipelines_ht0 = self.bitpos_apply(
- (~self.cores[i].counter) & mask,
- self.cores[i].bitpos,
- self.cores[i].n_pipelines)
-
- pipelines_ht1 = self.bitpos_apply(
- self.cores[i].counter,
- self.cores[i].bitpos,
- self.cores[i].n_pipelines)
-
- print('C%dHT0 = [' % i, end='')
- tmp_file_comment += "C{}HT0 = [".format(i)
- tmp_file_comment += bitstring_write(
- pipelines_ht0, self.n_pipelines)
-
- print(']\tC%dHT1 = [' % i, end='')
- tmp_file_comment += "]\tC{}HT1 = [".format(i)
- tmp_file_comment += bitstring_write(
- pipelines_ht1, self.n_pipelines)
- print(']\t', end='')
- tmp_file_comment += ']\t'
-
- print('')
- self.stage2_file_comment = tmp_file_comment
-
- # check if file traceing is enabled
- if(enable_stage2_fileout != 1):
- return
- # spit out the combination to file
- self.stage2_process_file()
-
- def stage2_updateCoresInBuf(self, nPipeline, sCore):
- rePipeline = self._fileTrace.arr_pipelines[nPipeline]
- rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
- reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
- sSubs = 'core = ' + sCore + '\n'
-
- reg_pipeline = re.compile(rePipeline)
- search_match = reg_pipeline.search(self._fileTrace.in_buf)
-
- if(search_match):
- pos = search_match.start()
- substr1 = self._fileTrace.in_buf[:pos]
- substr2 = self._fileTrace.in_buf[pos:]
- substr2 = re.sub(reCore, sSubs, substr2, 1)
- self._fileTrace.in_buf = substr1 + substr2
-
- def pipelines2cores(self, n, n_bits, nCore, bHT):
- if (n_bits > 64):
- return
-
- i = n_bits - 1
- pipeline_idx = 0
- while (i >= 0):
- cond = (n & (1 << i))
- if (cond):
- # update the pipelines array to match the core
- # only in case of cond match
- # PIPELINE0 and core 0 are reserved
- if(bHT):
- tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
- self.arr2_pipelines2cores[pipeline_idx] = tmpCore
- else:
- self.arr2_pipelines2cores[pipeline_idx] = \
- fileTrace.in_physical_cores[nCore]
-
- i -= 1
- pipeline_idx += 1
-
- def stage2_process_file(self):
- outFileName = os.path.join(self._fileTrace.out_path,
- self._fileTrace.prefix_outfile)
- outFileName += "_{}CoReS".format(self.n_cores)
-
- for i in range(0, self.n_cores):
- mask = len2mask(self.cores[i].n_pipelines)
- pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
- self.cores[i].bitpos,
- self.cores[i].n_pipelines)
-
- pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
- self.cores[i].bitpos,
- self.cores[i].n_pipelines)
-
- # update pipelines to core mapping
- self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
- self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
-
- # update the in_buf as per the arr_pipelines2cores
- for pipeline_idx in range(len(self.arr2_pipelines2cores)):
- outFileName += "_{}".format(
- self.arr2_pipelines2cores[pipeline_idx])
- self.stage2_updateCoresInBuf(
- pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
-
- # by now the in_buf is all set to be written to file
- outFileName += self._fileTrace.suffix_outfile
- outputFile = open(outFileName, "w")
-
- # write the file comments
- strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
- outputFile.write(
- "; =============== Pipeline-to-Core Mapping ================\n"
- "; Generated from file {}\n"
- "; Input pipelines = {}\n"
- "; Input cores = {}\n"
- "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
- .format(
- self._fileTrace.in_file_namepath,
- fileTrace.arr_pipelines,
- fileTrace.in_physical_cores,
- self._fileTrace.n_pipelines,
- self._fileTrace.n_cores,
- strTruncated,
- self._fileTrace.hyper_thread))
-
- outputFile.write(
- "; {stg0cmt}\n"
- "; {stg1cmt}\n"
- "; {stg2cmt}\n"
- "; ========================================================\n"
- "; \n"
- .format(
- stg0cmt=self.stage0_file_comment,
- stg1cmt=self.stage1_file_comment,
- stg2cmt=self.stage2_file_comment))
-
- # write the buffer contents
- outputFile.write(self._fileTrace.in_buf)
- outputFile.flush()
- outputFile.close()
-
- def stage2_process(self):
- i = 0
- while(i < self.n_cores):
- self.cores[i].counter_max = len2mask(
- self.cores[i].n_pipelines - 1)
- i += 1
-
- self.pos = self.n_cores - 1
- while True:
- if (self.pos == self.n_cores - 1):
- self.stage2_log()
-
- if (self.cores[self.pos].counter ==
- self.cores[self.pos].counter_max):
- if (self.pos == 0):
- return
-
- self.cores[self.pos].counter = 0
- self.pos -= 1
- continue
-
- self.cores[self.pos].counter += 1
- if(self.pos < self.n_cores - 1):
- self.pos += 1
-
-
-class FileTrace:
-
- def __init__(self, filenamepath):
- self.in_file_namepath = os.path.abspath(filenamepath)
- self.in_filename = os.path.basename(self.in_file_namepath)
- self.in_path = os.path.dirname(self.in_file_namepath)
-
- filenamesplit = self.in_filename.split('.')
- self.prefix_outfile = filenamesplit[0]
- self.suffix_outfile = ".cfg"
-
- # output folder: in the same folder as input file
- # create new folder in the name of input file
- self.out_path = os.path.join(
- os.path.abspath(os.path.dirname(__file__)),
- self.prefix_outfile)
-
- try:
- os.makedirs(self.out_path)
- except OSError as excep:
- if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
- pass
- else:
- raise
-
- self.in_buf = None
- self.arr_pipelines = [] # holds the positions of search
-
- self.max_cores = 15
- self.max_pipelines = 15
-
- self.in_physical_cores = None
- self.hyper_thread = None
-
- # save the num of pipelines determined from input file
- self.n_pipelines = 0
- # save the num of cores input (or the truncated value)
- self.n_cores = 0
- self.ncores_truncated = False
-
- def print_TraceFile(self):
- print("self.in_file_namepath = ", self.in_file_namepath)
- print("self.in_filename = ", self.in_filename)
- print("self.in_path = ", self.in_path)
- print("self.out_path = ", self.out_path)
- print("self.prefix_outfile = ", self.prefix_outfile)
- print("self.suffix_outfile = ", self.suffix_outfile)
- print("self.in_buf = ", self.in_buf)
- print("self.arr_pipelines =", self.arr_pipelines)
- print("self.in_physical_cores", self.in_physical_cores)
- print("self.hyper_thread", self.hyper_thread)
-
-
-def process(n_cores, n_pipelines, fileTrace):
- '''process and map pipelines, cores.'''
- if (n_cores == 0):
- sys.exit('N_CORES is 0, exiting')
-
- if (n_pipelines == 0):
- sys.exit('N_PIPELINES is 0, exiting')
-
- if (n_cores > n_pipelines):
- print('\nToo many cores, truncating N_CORES to N_PIPELINES')
- n_cores = n_pipelines
- fileTrace.ncores_truncated = True
-
- fileTrace.n_pipelines = n_pipelines
- fileTrace.n_cores = n_cores
-
- strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
- print("N_PIPELINES = {}, N_CORES = {} {}"
- .format(n_pipelines, n_cores, strTruncated))
- print("---------------------------------------------------------------")
-
- ctx0_inst = Context0()
- ctx1_inst = Context1()
- ctx2_inst = Context2()
-
- # initialize the class variables
- ctx1_inst._fileTrace = fileTrace
- ctx2_inst._fileTrace = fileTrace
-
- ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
- ctx0_inst.stage0_process()
-
-
-def validate_core(core):
- match = reg_phycore.match(core)
- if(match):
- return True
- else:
- return False
-
-
-def validate_phycores(phy_cores):
- '''validate physical cores, check if unique.'''
- # eat up whitespaces
- phy_cores = phy_cores.strip().split(',')
-
- # check if the core list is unique
- if(len(phy_cores) != len(set(phy_cores))):
- print('list of physical cores has duplicates')
- return None
-
- for core in phy_cores:
- if not validate_core(core):
- print('invalid physical core specified.')
- return None
- return phy_cores
-
-
-def scanconfigfile(fileTrace):
- '''scan input file for pipelines, validate then process.'''
- # open file
- filetoscan = open(fileTrace.in_file_namepath, 'r')
- fileTrace.in_buf = filetoscan.read()
-
- # reset iterator on open file
- filetoscan.seek(0)
-
- # scan input file for pipelines
- # master pipelines to be ignored
- pattern_pipeline = r'\[PIPELINE\d*\]'
- pattern_mastertype = r'type\s*=\s*MASTER'
-
- pending_pipeline = False
- for line in filetoscan:
- match_pipeline = re.search(pattern_pipeline, line)
- match_type = re.search('type\s*=', line)
- match_mastertype = re.search(pattern_mastertype, line)
-
- if(match_pipeline):
- sPipeline = line[match_pipeline.start():match_pipeline.end()]
- pending_pipeline = True
- elif(match_type):
- # found a type definition...
- if(match_mastertype is None):
- # and this is not a master pipeline...
- if(pending_pipeline):
- # add it to the list of pipelines to be mapped
- fileTrace.arr_pipelines.append(sPipeline)
- pending_pipeline = False
- else:
- # and this is a master pipeline...
- # ignore the current and move on to next
- sPipeline = ""
- pending_pipeline = False
- filetoscan.close()
-
- # validate if pipelines are unique
- if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
- sys.exit('Error: duplicate pipelines in input file')
-
- num_pipelines = len(fileTrace.arr_pipelines)
- num_cores = len(fileTrace.in_physical_cores)
-
- print("-------------------Pipeline-to-core mapping--------------------")
- print("Input pipelines = {}\nInput cores = {}"
- .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
-
- # input configuration file validations goes here
- if (num_cores > fileTrace.max_cores):
- sys.exit('Error: number of cores specified > max_cores (%d)' %
- fileTrace.max_cores)
-
- if (num_pipelines > fileTrace.max_pipelines):
- sys.exit('Error: number of pipelines in input \
- cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
-
- # call process to generate pipeline-to-core mapping, trace and log
- process(num_cores, num_pipelines, fileTrace)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='mappipelines')
-
- reqNamedGrp = parser.add_argument_group('required named args')
- reqNamedGrp.add_argument(
- '-i',
- '--input-file',
- type=argparse.FileType('r'),
- help='Input config file',
- required=True)
-
- reqNamedGrp.add_argument(
- '-pc',
- '--physical-cores',
- type=validate_phycores,
- help='''Enter available CPU cores in
- format:\"<core>,<core>,...\"
- where each core format: \"s<SOCKETID>c<COREID>\"
- where SOCKETID={0..9}, COREID={1-99}''',
- required=True)
-
- # add optional arguments
- parser.add_argument(
- '-ht',
- '--hyper-thread',
- help='enable/disable hyper threading. default is ON',
- default='ON',
- choices=['ON', 'OFF'])
-
- parser.add_argument(
- '-nO',
- '--no-output-file',
- help='''disable output config file generation.
- Output file generation is enabled by default''',
- action="store_true")
-
- args = parser.parse_args()
-
- if(args.physical_cores is None):
- parser.error("invalid physical_cores specified")
-
- # create object of FileTrace and initialise
- fileTrace = FileTrace(args.input_file.name)
- fileTrace.in_physical_cores = args.physical_cores
- fileTrace.hyper_thread = args.hyper_thread
-
- if(fileTrace.hyper_thread == 'OFF'):
- print("!!!!disabling stage2 HT!!!!")
- enable_stage2_traceout = 0
- enable_stage2_fileout = 0
- elif(fileTrace.hyper_thread == 'ON'):
- print("!!!!HT enabled. disabling stage1 file generation.!!!!")
- enable_stage1_fileout = 0
-
- if(args.no_output_file is True):
- print("!!!!disabling stage1 and stage2 fileout!!!!")
- enable_stage1_fileout = 0
- enable_stage2_fileout = 0
-
- scanconfigfile(fileTrace)
diff --git a/examples/ip_pipeline/config/tap.cfg b/examples/ip_pipeline/config/tap.cfg
deleted file mode 100644
index 10d35ebb..00000000
--- a/examples/ip_pipeline/config/tap.cfg
+++ /dev/null
@@ -1,64 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2016 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; ______________ ______________________
-; | | TAP0 | |
-; RXQ0.0 --->| |------->|--+ |
-; | | TAP1 | | br0 |
-; TXQ1.0 <---| |<-------|<-+ |
-; | Pass-through | | Linux Kernel |
-; | (P1) | | Network Stack |
-; | | TAP1 | |
-; RXQ1.0 --->| |------->|--+ |
-; | | TAP0 | | br0 |
-; TXQ0.0 <---| |<-------|<-+ |
-; |______________| |______________________|
-;
-; Configure Linux kernel bridge between TAP0 and TAP1 interfaces:
-; [Linux]$ ifconfig TAP0 up
-; [Linux]$ ifconfig TAP1 up
-; [Linux]$ brctl addbr "br0"
-; [Linux]$ brctl addif br0 TAP0
-; [Linux]$ brctl addif br0 TAP1
-; [Linux]$ ifconfig br0 up
-
-[EAL]
-log_level = 0
-
-[PIPELINE0]
-type = MASTER
-core = 0
-
-[PIPELINE1]
-type = PASS-THROUGH
-core = 1
-pktq_in = RXQ0.0 TAP1 RXQ1.0 TAP0
-pktq_out = TAP0 TXQ1.0 TAP1 TXQ0.0
diff --git a/examples/ip_pipeline/config/tm_profile.cfg b/examples/ip_pipeline/config/tm_profile.cfg
deleted file mode 100644
index 2dfb215e..00000000
--- a/examples/ip_pipeline/config/tm_profile.cfg
+++ /dev/null
@@ -1,105 +0,0 @@
-; BSD LICENSE
-;
-; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-; All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-;
-; * Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; * Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in
-; the documentation and/or other materials provided with the
-; distribution.
-; * Neither the name of Intel Corporation nor the names of its
-; contributors may be used to endorse or promote products derived
-; from this software without specific prior written permission.
-;
-; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-; This file enables the following hierarchical scheduler configuration for each
-; 10GbE output port:
-; * Single subport (subport 0):
-; - Subport rate set to 100% of port rate
-; - Each of the 4 traffic classes has rate set to 100% of port rate
-; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
-; - Pipe rate set to 1/4K of port rate
-; - Each of the 4 traffic classes has rate set to 100% of pipe rate
-; - Within each traffic class, the byte-level WRR weights for the 4 queues
-; are set to 1:1:1:1
-;
-; For more details, please refer to chapter "Quality of Service (QoS) Framework"
-; of Data Plane Development Kit (DPDK) Programmer's Guide.
-
-; Port configuration
-[port]
-frame overhead = 24 ; frame overhead = Preamble (7) + SFD (1) + FCS (4) + IFG (12)
-mtu = 1522; mtu = Q-in-Q MTU (FCS not included)
-number of subports per port = 1
-number of pipes per subport = 4096
-queue sizes = 64 64 64 64
-
-; Subport configuration
-[subport 0]
-tb rate = 1250000000 ; Bytes per second
-tb size = 1000000 ; Bytes
-
-tc 0 rate = 1250000000 ; Bytes per second
-tc 1 rate = 1250000000 ; Bytes per second
-tc 2 rate = 1250000000 ; Bytes per second
-tc 3 rate = 1250000000 ; Bytes per second
-tc period = 10 ; Milliseconds
-
-pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0
-
-; Pipe configuration
-[pipe profile 0]
-tb rate = 305175 ; Bytes per second
-tb size = 1000000 ; Bytes
-
-tc 0 rate = 305175 ; Bytes per second
-tc 1 rate = 305175 ; Bytes per second
-tc 2 rate = 305175 ; Bytes per second
-tc 3 rate = 305175 ; Bytes per second
-tc period = 40 ; Milliseconds
-
-tc 3 oversubscription weight = 1
-
-tc 0 wrr weights = 1 1 1 1
-tc 1 wrr weights = 1 1 1 1
-tc 2 wrr weights = 1 1 1 1
-tc 3 wrr weights = 1 1 1 1
-
-; RED params per traffic class and color (Green / Yellow / Red)
-[red]
-tc 0 wred min = 48 40 32
-tc 0 wred max = 64 64 64
-tc 0 wred inv prob = 10 10 10
-tc 0 wred weight = 9 9 9
-
-tc 1 wred min = 48 40 32
-tc 1 wred max = 64 64 64
-tc 1 wred inv prob = 10 10 10
-tc 1 wred weight = 9 9 9
-
-tc 2 wred min = 48 40 32
-tc 2 wred max = 64 64 64
-tc 2 wred inv prob = 10 10 10
-tc 2 wred weight = 9 9 9
-
-tc 3 wred min = 48 40 32
-tc 3 wred max = 64 64 64
-tc 3 wred inv prob = 10 10 10
-tc 3 wred weight = 9 9 9
diff --git a/examples/ip_pipeline/config_check.c b/examples/ip_pipeline/config_check.c
deleted file mode 100644
index 86d1191a..00000000
--- a/examples/ip_pipeline/config_check.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdio.h>
-
-#include <rte_ip.h>
-
-#include "app.h"
-
-static void
-check_mempools(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_mempools; i++) {
- struct app_mempool_params *p = &app->mempool_params[i];
-
- APP_CHECK((p->pool_size > 0),
- "Mempool %s size is 0\n", p->name);
-
- APP_CHECK((p->cache_size > 0),
- "Mempool %s cache size is 0\n", p->name);
-
- APP_CHECK(rte_is_power_of_2(p->cache_size),
- "Mempool %s cache size not a power of 2\n", p->name);
- }
-}
-
-static inline uint32_t
-link_rxq_used(struct app_link_params *link, uint32_t q_id)
-{
- uint32_t i;
-
- if ((link->arp_q == q_id) ||
- (link->tcp_syn_q == q_id) ||
- (link->ip_local_q == q_id) ||
- (link->tcp_local_q == q_id) ||
- (link->udp_local_q == q_id) ||
- (link->sctp_local_q == q_id))
- return 1;
-
- for (i = 0; i < link->n_rss_qs; i++)
- if (link->rss_qs[i] == q_id)
- return 1;
-
- return 0;
-}
-
-static void
-check_links(struct app_params *app)
-{
- uint32_t i;
-
- /* Check that number of links matches the port mask */
- if (app->port_mask) {
- uint32_t n_links_port_mask =
- __builtin_popcountll(app->port_mask);
-
- APP_CHECK((app->n_links == n_links_port_mask),
- "Not enough links provided in the PORT_MASK\n");
- }
-
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *link = &app->link_params[i];
- uint32_t rxq_max, n_rxq, n_txq, link_id, i;
-
- APP_PARAM_GET_ID(link, "LINK", link_id);
-
- /* Check that link RXQs are contiguous */
- rxq_max = 0;
- if (link->arp_q > rxq_max)
- rxq_max = link->arp_q;
- if (link->tcp_syn_q > rxq_max)
- rxq_max = link->tcp_syn_q;
- if (link->ip_local_q > rxq_max)
- rxq_max = link->ip_local_q;
- if (link->tcp_local_q > rxq_max)
- rxq_max = link->tcp_local_q;
- if (link->udp_local_q > rxq_max)
- rxq_max = link->udp_local_q;
- if (link->sctp_local_q > rxq_max)
- rxq_max = link->sctp_local_q;
- for (i = 0; i < link->n_rss_qs; i++)
- if (link->rss_qs[i] > rxq_max)
- rxq_max = link->rss_qs[i];
-
- for (i = 1; i <= rxq_max; i++)
- APP_CHECK((link_rxq_used(link, i)),
- "%s RXQs are not contiguous (A)\n", link->name);
-
- n_rxq = app_link_get_n_rxq(app, link);
-
- APP_CHECK((n_rxq), "%s does not have any RXQ\n", link->name);
-
- APP_CHECK((n_rxq == rxq_max + 1),
- "%s RXQs are not contiguous (B)\n", link->name);
-
- for (i = 0; i < n_rxq; i++) {
- char name[APP_PARAM_NAME_SIZE];
- int pos;
-
- sprintf(name, "RXQ%" PRIu32 ".%" PRIu32,
- link_id, i);
- pos = APP_PARAM_FIND(app->hwq_in_params, name);
- APP_CHECK((pos >= 0),
- "%s RXQs are not contiguous (C)\n", link->name);
- }
-
- /* Check that link TXQs are contiguous */
- n_txq = app_link_get_n_txq(app, link);
-
- APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
-
- for (i = 0; i < n_txq; i++) {
- char name[APP_PARAM_NAME_SIZE];
- int pos;
-
- sprintf(name, "TXQ%" PRIu32 ".%" PRIu32,
- link_id, i);
- pos = APP_PARAM_FIND(app->hwq_out_params, name);
- APP_CHECK((pos >= 0),
- "%s TXQs are not contiguous\n", link->name);
- }
- }
-}
-
-static void
-check_rxqs(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_hwq_in; i++) {
- struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
- uint32_t n_readers = app_rxq_get_readers(app, p);
-
- APP_CHECK((p->size > 0),
- "%s size is 0\n", p->name);
-
- APP_CHECK((rte_is_power_of_2(p->size)),
- "%s size is not a power of 2\n", p->name);
-
- APP_CHECK((p->burst > 0),
- "%s burst size is 0\n", p->name);
-
- APP_CHECK((p->burst <= p->size),
- "%s burst size is bigger than its size\n", p->name);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
- }
-}
-
-static void
-check_txqs(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_hwq_out; i++) {
- struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
- uint32_t n_writers = app_txq_get_writers(app, p);
-
- APP_CHECK((p->size > 0),
- "%s size is 0\n", p->name);
-
- APP_CHECK((rte_is_power_of_2(p->size)),
- "%s size is not a power of 2\n", p->name);
-
- APP_CHECK((p->burst > 0),
- "%s burst size is 0\n", p->name);
-
- APP_CHECK((p->burst <= p->size),
- "%s burst size is bigger than its size\n", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
- }
-}
-
-static void
-check_swqs(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_swq; i++) {
- struct app_pktq_swq_params *p = &app->swq_params[i];
- uint32_t n_readers = app_swq_get_readers(app, p);
- uint32_t n_writers = app_swq_get_writers(app, p);
- uint32_t n_flags;
-
- APP_CHECK((p->size > 0),
- "%s size is 0\n", p->name);
-
- APP_CHECK((rte_is_power_of_2(p->size)),
- "%s size is not a power of 2\n", p->name);
-
- APP_CHECK((p->burst_read > 0),
- "%s read burst size is 0\n", p->name);
-
- APP_CHECK((p->burst_read <= p->size),
- "%s read burst size is bigger than its size\n",
- p->name);
-
- APP_CHECK((p->burst_write > 0),
- "%s write burst size is 0\n", p->name);
-
- APP_CHECK((p->burst_write <= p->size),
- "%s write burst size is bigger than its size\n",
- p->name);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- if (n_readers > 1)
- APP_LOG(app, LOW, "%s has more than one reader", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- if (n_writers > 1)
- APP_LOG(app, LOW, "%s has more than one writer", p->name);
-
- n_flags = p->ipv4_frag + p->ipv6_frag + p->ipv4_ras + p->ipv6_ras;
-
- APP_CHECK((n_flags < 2),
- "%s has more than one fragmentation or reassembly mode enabled\n",
- p->name);
-
- APP_CHECK((!((n_readers > 1) && (n_flags == 1))),
- "%s has more than one reader when fragmentation or reassembly"
- " mode enabled\n",
- p->name);
-
- APP_CHECK((!((n_writers > 1) && (n_flags == 1))),
- "%s has more than one writer when fragmentation or reassembly"
- " mode enabled\n",
- p->name);
-
- n_flags = p->ipv4_ras + p->ipv6_ras;
-
- APP_CHECK((!((p->dropless == 1) && (n_flags == 1))),
- "%s has dropless when reassembly mode enabled\n", p->name);
-
- n_flags = p->ipv4_frag + p->ipv6_frag;
-
- if (n_flags == 1) {
- uint16_t ip_hdr_size = (p->ipv4_frag) ? sizeof(struct ipv4_hdr) :
- sizeof(struct ipv6_hdr);
-
- APP_CHECK((p->mtu > ip_hdr_size),
- "%s mtu size is smaller than ip header\n", p->name);
-
- APP_CHECK((!((p->mtu - ip_hdr_size) % 8)),
- "%s mtu size is incorrect\n", p->name);
- }
- }
-}
-
-static void
-check_tms(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_tm; i++) {
- struct app_pktq_tm_params *p = &app->tm_params[i];
- uint32_t n_readers = app_tm_get_readers(app, p);
- uint32_t n_writers = app_tm_get_writers(app, p);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
- }
-}
-
-static void
-check_taps(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_tap; i++) {
- struct app_pktq_tap_params *p = &app->tap_params[i];
- uint32_t n_readers = app_tap_get_readers(app, p);
- uint32_t n_writers = app_tap_get_writers(app, p);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
-
- APP_CHECK((p->burst_read > 0),
- "%s read burst size is 0\n", p->name);
-
- APP_CHECK((p->burst_write > 0),
- "%s write burst size is 0\n", p->name);
- }
-}
-
-static void
-check_knis(struct app_params *app) {
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_kni; i++) {
- struct app_pktq_kni_params *p = &app->kni_params[i];
- uint32_t n_readers = app_kni_get_readers(app, p);
- uint32_t n_writers = app_kni_get_writers(app, p);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
- }
-}
-
-static void
-check_sources(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_source; i++) {
- struct app_pktq_source_params *p = &app->source_params[i];
- uint32_t n_readers = app_source_get_readers(app, p);
-
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
- }
-}
-
-static void
-check_sinks(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_sink; i++) {
- struct app_pktq_sink_params *p = &app->sink_params[i];
- uint32_t n_writers = app_sink_get_writers(app, p);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
- }
-}
-
-static void
-check_msgqs(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_msgq; i++) {
- struct app_msgq_params *p = &app->msgq_params[i];
- uint32_t n_readers = app_msgq_get_readers(app, p);
- uint32_t n_writers = app_msgq_get_writers(app, p);
- uint32_t msgq_req_pipeline, msgq_rsp_pipeline;
- uint32_t msgq_req_core, msgq_rsp_core;
-
- APP_CHECK((p->size > 0),
- "%s size is 0\n", p->name);
-
- APP_CHECK((rte_is_power_of_2(p->size)),
- "%s size is not a power of 2\n", p->name);
-
- msgq_req_pipeline = (strncmp(p->name, "MSGQ-REQ-PIPELINE",
- strlen("MSGQ-REQ-PIPELINE")) == 0);
-
- msgq_rsp_pipeline = (strncmp(p->name, "MSGQ-RSP-PIPELINE",
- strlen("MSGQ-RSP-PIPELINE")) == 0);
-
- msgq_req_core = (strncmp(p->name, "MSGQ-REQ-CORE",
- strlen("MSGQ-REQ-CORE")) == 0);
-
- msgq_rsp_core = (strncmp(p->name, "MSGQ-RSP-CORE",
- strlen("MSGQ-RSP-CORE")) == 0);
-
- if ((msgq_req_pipeline == 0) &&
- (msgq_rsp_pipeline == 0) &&
- (msgq_req_core == 0) &&
- (msgq_rsp_core == 0)) {
- APP_CHECK((n_readers != 0),
- "%s has no reader\n", p->name);
-
- APP_CHECK((n_readers == 1),
- "%s has more than one reader\n", p->name);
-
- APP_CHECK((n_writers != 0),
- "%s has no writer\n", p->name);
-
- APP_CHECK((n_writers == 1),
- "%s has more than one writer\n", p->name);
- }
-
- if (msgq_req_pipeline) {
- struct app_pipeline_params *pipeline;
- uint32_t pipeline_id;
-
- APP_PARAM_GET_ID(p, "MSGQ-REQ-PIPELINE", pipeline_id);
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params,
- "PIPELINE",
- pipeline_id,
- pipeline);
-
- APP_CHECK((pipeline != NULL),
- "%s is not associated with a valid pipeline\n",
- p->name);
- }
-
- if (msgq_rsp_pipeline) {
- struct app_pipeline_params *pipeline;
- uint32_t pipeline_id;
-
- APP_PARAM_GET_ID(p, "MSGQ-RSP-PIPELINE", pipeline_id);
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params,
- "PIPELINE",
- pipeline_id,
- pipeline);
-
- APP_CHECK((pipeline != NULL),
- "%s is not associated with a valid pipeline\n",
- p->name);
- }
- }
-}
-
-static void
-check_pipelines(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
-
- APP_CHECK((p->n_msgq_in == p->n_msgq_out),
- "%s number of input MSGQs does not match "
- "the number of output MSGQs\n", p->name);
- }
-}
-
-int
-app_config_check(struct app_params *app)
-{
- check_mempools(app);
- check_links(app);
- check_rxqs(app);
- check_txqs(app);
- check_swqs(app);
- check_tms(app);
- check_taps(app);
- check_knis(app);
- check_sources(app);
- check_sinks(app);
- check_msgqs(app);
- check_pipelines(app);
-
- return 0;
-}
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
deleted file mode 100644
index e90499e7..00000000
--- a/examples/ip_pipeline/config_parse.c
+++ /dev/null
@@ -1,3395 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <ctype.h>
-#include <getopt.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <string.h>
-#include <libgen.h>
-#include <unistd.h>
-#include <sys/wait.h>
-
-#include <rte_errno.h>
-#include <rte_cfgfile.h>
-#include <rte_string_fns.h>
-
-#include "app.h"
-#include "parser.h"
-
-/**
- * Default config values
- **/
-
-static struct app_params app_params_default = {
- .config_file = "./config/ip_pipeline.cfg",
- .log_level = APP_LOG_LEVEL_HIGH,
- .port_mask = 0,
-
- .eal_params = {
- .channels = 4,
- },
-};
-
-static const struct app_mempool_params mempool_params_default = {
- .parsed = 0,
- .buffer_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
- .pool_size = 32 * 1024,
- .cache_size = 256,
- .cpu_socket_id = 0,
-};
-
-static const struct app_link_params link_params_default = {
- .parsed = 0,
- .pmd_id = 0,
- .arp_q = 0,
- .tcp_syn_q = 0,
- .ip_local_q = 0,
- .tcp_local_q = 0,
- .udp_local_q = 0,
- .sctp_local_q = 0,
- .rss_qs = {0},
- .n_rss_qs = 0,
- .rss_proto_ipv4 = ETH_RSS_IPV4,
- .rss_proto_ipv6 = ETH_RSS_IPV6,
- .rss_proto_l2 = 0,
- .state = 0,
- .ip = 0,
- .depth = 0,
- .mac_addr = 0,
- .pci_bdf = {0},
-
- .conf = {
- .link_speeds = 0,
- .rxmode = {
- .mq_mode = ETH_MQ_RX_NONE,
-
- .ignore_offload_bitfield = 1,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
-
- .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
- .split_hdr_size = 0, /* Header split buffer size */
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_key = NULL,
- .rss_key_len = 40,
- .rss_hf = 0,
- },
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- .lpbk_mode = 0,
- },
-
- .promisc = 1,
-};
-
-static const struct app_pktq_hwq_in_params default_hwq_in_params = {
- .parsed = 0,
- .mempool_id = 0,
- .size = 128,
- .burst = 32,
-
- .conf = {
- .rx_thresh = {
- .pthresh = 8,
- .hthresh = 8,
- .wthresh = 4,
- },
- .rx_free_thresh = 64,
- .rx_drop_en = 0,
- .rx_deferred_start = 0,
- }
-};
-
-static const struct app_pktq_hwq_out_params default_hwq_out_params = {
- .parsed = 0,
- .size = 512,
- .burst = 32,
- .dropless = 0,
- .n_retries = 0,
-
- .conf = {
- .tx_thresh = {
- .pthresh = 36,
- .hthresh = 0,
- .wthresh = 0,
- },
- .tx_rs_thresh = 0,
- .tx_free_thresh = 0,
- .txq_flags = ETH_TXQ_FLAGS_IGNORE,
- .tx_deferred_start = 0,
- }
-};
-
-static const struct app_pktq_swq_params default_swq_params = {
- .parsed = 0,
- .size = 256,
- .burst_read = 32,
- .burst_write = 32,
- .dropless = 0,
- .n_retries = 0,
- .cpu_socket_id = 0,
- .ipv4_frag = 0,
- .ipv6_frag = 0,
- .ipv4_ras = 0,
- .ipv6_ras = 0,
- .mtu = 0,
- .metadata_size = 0,
- .mempool_direct_id = 0,
- .mempool_indirect_id = 0,
-};
-
-struct app_pktq_tm_params default_tm_params = {
- .parsed = 0,
- .file_name = "./config/tm_profile.cfg",
- .burst_read = 24,
- .burst_write = 32,
-};
-
-struct app_pktq_tap_params default_tap_params = {
- .parsed = 0,
- .burst_read = 32,
- .burst_write = 32,
- .dropless = 0,
- .n_retries = 0,
- .mempool_id = 0,
-};
-
-struct app_pktq_kni_params default_kni_params = {
- .parsed = 0,
- .socket_id = 0,
- .core_id = 0,
- .hyper_th_id = 0,
- .force_bind = 0,
-
- .mempool_id = 0,
- .burst_read = 32,
- .burst_write = 32,
- .dropless = 0,
- .n_retries = 0,
-};
-
-struct app_pktq_source_params default_source_params = {
- .parsed = 0,
- .mempool_id = 0,
- .burst = 32,
- .file_name = "./config/packets.pcap",
- .n_bytes_per_pkt = 0,
-};
-
-struct app_pktq_sink_params default_sink_params = {
- .parsed = 0,
- .file_name = NULL,
- .n_pkts_to_dump = 0,
-};
-
-struct app_msgq_params default_msgq_params = {
- .parsed = 0,
- .size = 64,
- .cpu_socket_id = 0,
-};
-
-struct app_pipeline_params default_pipeline_params = {
- .parsed = 0,
- .socket_id = 0,
- .core_id = 0,
- .hyper_th_id = 0,
- .n_pktq_in = 0,
- .n_pktq_out = 0,
- .n_msgq_in = 0,
- .n_msgq_out = 0,
- .timer_period = 1,
- .n_args = 0,
-};
-
-static const char app_usage[] =
- "Usage: %s [-f CONFIG_FILE] [-s SCRIPT_FILE] [-p PORT_MASK] "
- "[-l LOG_LEVEL] [--preproc PREPROCESSOR] [--preproc-args ARGS]\n"
- "\n"
- "Arguments:\n"
- "\t-f CONFIG_FILE: Default config file is %s\n"
- "\t-p PORT_MASK: Mask of NIC port IDs in hex format (generated from "
- "config file when not provided)\n"
- "\t-s SCRIPT_FILE: No CLI script file is run when not specified\n"
- "\t-l LOG_LEVEL: 0 = NONE, 1 = HIGH PRIO (default), 2 = LOW PRIO\n"
- "\t--preproc PREPROCESSOR: Configuration file pre-processor\n"
- "\t--preproc-args ARGS: Arguments to be passed to pre-processor\n"
- "\n";
-
-static void
-app_print_usage(char *prgname)
-{
- rte_exit(0, app_usage, prgname, app_params_default.config_file);
-}
-
-#define APP_PARAM_ADD(set, key) \
-({ \
- ssize_t pos = APP_PARAM_FIND(set, key); \
- ssize_t size = RTE_DIM(set); \
- \
- if (pos < 0) { \
- for (pos = 0; pos < size; pos++) { \
- if (!APP_PARAM_VALID(&((set)[pos]))) \
- break; \
- } \
- \
- APP_CHECK((pos < size), \
- "Parse error: size of %s is limited to %u elements",\
- #set, (uint32_t) size); \
- \
- (set)[pos].name = strdup(key); \
- APP_CHECK(((set)[pos].name), \
- "Parse error: no free memory"); \
- } \
- pos; \
-})
-
-#define APP_PARAM_ADD_LINK_FOR_RXQ(app, rxq_name) \
-({ \
- char link_name[APP_PARAM_NAME_SIZE]; \
- ssize_t link_param_pos; \
- uint32_t link_id, queue_id; \
- \
- sscanf((rxq_name), "RXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
- sprintf(link_name, "LINK%" PRIu32, link_id); \
- link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
- link_param_pos; \
-})
-
-#define APP_PARAM_ADD_LINK_FOR_TXQ(app, txq_name) \
-({ \
- char link_name[APP_PARAM_NAME_SIZE]; \
- ssize_t link_param_pos; \
- uint32_t link_id, queue_id; \
- \
- sscanf((txq_name), "TXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
- sprintf(link_name, "LINK%" PRIu32, link_id); \
- link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
- link_param_pos; \
-})
-
-#define APP_PARAM_ADD_LINK_FOR_TM(app, tm_name) \
-({ \
- char link_name[APP_PARAM_NAME_SIZE]; \
- ssize_t link_param_pos; \
- uint32_t link_id; \
- \
- sscanf((tm_name), "TM%" SCNu32, &link_id); \
- sprintf(link_name, "LINK%" PRIu32, link_id); \
- link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
- link_param_pos; \
-})
-
-#define APP_PARAM_ADD_LINK_FOR_KNI(app, kni_name) \
-({ \
- char link_name[APP_PARAM_NAME_SIZE]; \
- ssize_t link_param_pos; \
- uint32_t link_id; \
- \
- sscanf((kni_name), "KNI%" SCNu32, &link_id); \
- sprintf(link_name, "LINK%" PRIu32, link_id); \
- link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
- link_param_pos; \
-})
-
-#define PARSE_CHECK_DUPLICATE_SECTION(obj) \
-do { \
- APP_CHECK(((obj)->parsed == 0), \
- "Parse error: duplicate \"%s\" section", (obj)->name); \
- (obj)->parsed++; \
-} while (0)
-
-#define PARSE_CHECK_DUPLICATE_SECTION_EAL(obj) \
-do { \
- APP_CHECK(((obj)->parsed == 0), \
- "Parse error: duplicate \"%s\" section", "EAL"); \
- (obj)->parsed++; \
-} while (0)
-
-#define PARSE_ERROR(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"", section, entry)
-
-#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s", \
- section, entry, message)
-
-#define PARSE_ERROR_NO_ELEMENTS(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
- "no elements detected", \
- section, entry)
-
-#define PARSE_ERROR_TOO_MANY_ELEMENTS(exp, section, entry, max) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
- "maximum number of elements allowed is %u", \
- section, entry, max)
-
-#define PARSE_ERROR_INVALID_ELEMENT(exp, section, entry, value) \
-APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
- "Invalid element value \"%s\"", \
- section, entry, value)
-
-#define PARSE_ERROR_MALLOC(exp) \
-APP_CHECK(exp, "Parse error: no free memory")
-
-#define PARSE_ERROR_SECTION(exp, section) \
-APP_CHECK(exp, "Parse error in section \"%s\"", section)
-
-#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
-APP_CHECK(exp, "Parse error in section \"%s\": no entries", section)
-
-#define PARSE_WARNING_IGNORED(exp, section, entry) \
-do \
-if (!(exp)) \
- fprintf(stderr, "Parse warning in section \"%s\": " \
- "entry \"%s\" is ignored", section, entry); \
-while (0)
-
-#define PARSE_ERROR_INVALID(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"",\
- section, entry)
-
-#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
-APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"", \
- section, entry)
-
-static int
-validate_name(const char *name, const char *prefix, int num)
-{
- size_t i, j;
-
- for (i = 0; (name[i] != '\0') && (prefix[i] != '\0'); i++) {
- if (name[i] != prefix[i])
- return -1;
- }
-
- if (prefix[i] != '\0')
- return -1;
-
- if (!num) {
- if (name[i] != '\0')
- return -1;
- else
- return 0;
- }
-
- if (num == 2) {
- j = skip_digits(&name[i]);
- i += j;
- if ((j == 0) || (name[i] != '.'))
- return -1;
- i++;
- }
-
- if (num == 1) {
- j = skip_digits(&name[i]);
- i += j;
- if ((j == 0) || (name[i] != '\0'))
- return -1;
- }
-
- return 0;
-}
-
-static void
-parse_eal(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_eal_params *p = &app->eal_params;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- PARSE_CHECK_DUPLICATE_SECTION_EAL(p);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *entry = &entries[i];
-
- /* coremask */
- if (strcmp(entry->name, "c") == 0) {
- PARSE_WARNING_IGNORED(0, section_name, entry->name);
- continue;
- }
-
- /* corelist */
- if (strcmp(entry->name, "l") == 0) {
- PARSE_WARNING_IGNORED(0, section_name, entry->name);
- continue;
- }
-
- /* coremap */
- if (strcmp(entry->name, "lcores") == 0) {
- PARSE_ERROR_DUPLICATE((p->coremap == NULL),
- section_name,
- entry->name);
- p->coremap = strdup(entry->value);
- continue;
- }
-
- /* master_lcore */
- if (strcmp(entry->name, "master_lcore") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((p->master_lcore_present == 0),
- section_name,
- entry->name);
- p->master_lcore_present = 1;
-
- status = parser_read_uint32(&p->master_lcore,
- entry->value);
- PARSE_ERROR((status == 0), section_name, entry->name);
- continue;
- }
-
- /* channels */
- if (strcmp(entry->name, "n") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((p->channels_present == 0),
- section_name,
- entry->name);
- p->channels_present = 1;
-
- status = parser_read_uint32(&p->channels, entry->value);
- PARSE_ERROR((status == 0), section_name, entry->name);
- continue;
- }
-
- /* memory */
- if (strcmp(entry->name, "m") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((p->memory_present == 0),
- section_name,
- entry->name);
- p->memory_present = 1;
-
- status = parser_read_uint32(&p->memory, entry->value);
- PARSE_ERROR((status == 0), section_name, entry->name);
- continue;
- }
-
- /* ranks */
- if (strcmp(entry->name, "r") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((p->ranks_present == 0),
- section_name,
- entry->name);
- p->ranks_present = 1;
-
- status = parser_read_uint32(&p->ranks, entry->value);
- PARSE_ERROR((status == 0), section_name, entry->name);
- continue;
- }
-
- /* pci_blacklist */
- if ((strcmp(entry->name, "pci_blacklist") == 0) ||
- (strcmp(entry->name, "b") == 0)) {
- uint32_t i;
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_blacklist[i])
- continue;
-
- p->pci_blacklist[i] =
- strdup(entry->value);
- PARSE_ERROR_MALLOC(p->pci_blacklist[i]);
-
- break;
- }
-
- PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
- section_name, entry->name,
- "too many elements");
- continue;
- }
-
- /* pci_whitelist */
- if ((strcmp(entry->name, "pci_whitelist") == 0) ||
- (strcmp(entry->name, "w") == 0)) {
- uint32_t i;
-
- PARSE_ERROR_MESSAGE((app->port_mask != 0),
- section_name, entry->name, "entry to be "
- "generated by the application (port_mask "
- "not provided)");
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_whitelist[i])
- continue;
-
- p->pci_whitelist[i] = strdup(entry->value);
- PARSE_ERROR_MALLOC(p->pci_whitelist[i]);
-
- break;
- }
-
- PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
- section_name, entry->name,
- "too many elements");
- continue;
- }
-
- /* vdev */
- if (strcmp(entry->name, "vdev") == 0) {
- uint32_t i;
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->vdev[i])
- continue;
-
- p->vdev[i] = strdup(entry->value);
- PARSE_ERROR_MALLOC(p->vdev[i]);
-
- break;
- }
-
- PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
- section_name, entry->name,
- "too many elements");
- continue;
- }
-
- /* vmware_tsc_map */
- if (strcmp(entry->name, "vmware_tsc_map") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->vmware_tsc_map_present == 0),
- section_name,
- entry->name);
- p->vmware_tsc_map_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->vmware_tsc_map = val;
- continue;
- }
-
- /* proc_type */
- if (strcmp(entry->name, "proc_type") == 0) {
- PARSE_ERROR_DUPLICATE((p->proc_type == NULL),
- section_name,
- entry->name);
- p->proc_type = strdup(entry->value);
- continue;
- }
-
- /* syslog */
- if (strcmp(entry->name, "syslog") == 0) {
- PARSE_ERROR_DUPLICATE((p->syslog == NULL),
- section_name,
- entry->name);
- p->syslog = strdup(entry->value);
- continue;
- }
-
- /* log_level */
- if (strcmp(entry->name, "log_level") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((p->log_level_present == 0),
- section_name,
- entry->name);
- p->log_level_present = 1;
-
- status = parser_read_uint32(&p->log_level,
- entry->value);
- PARSE_ERROR((status == 0), section_name, entry->name);
- continue;
- }
-
- /* version */
- if (strcmp(entry->name, "v") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->version_present == 0),
- section_name,
- entry->name);
- p->version_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->version = val;
- continue;
- }
-
- /* help */
- if ((strcmp(entry->name, "help") == 0) ||
- (strcmp(entry->name, "h") == 0)) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->help_present == 0),
- section_name,
- entry->name);
- p->help_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->help = val;
- continue;
- }
-
- /* no_huge */
- if (strcmp(entry->name, "no_huge") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->no_huge_present == 0),
- section_name,
- entry->name);
- p->no_huge_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->no_huge = val;
- continue;
- }
-
- /* no_pci */
- if (strcmp(entry->name, "no_pci") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->no_pci_present == 0),
- section_name,
- entry->name);
- p->no_pci_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->no_pci = val;
- continue;
- }
-
- /* no_hpet */
- if (strcmp(entry->name, "no_hpet") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->no_hpet_present == 0),
- section_name,
- entry->name);
- p->no_hpet_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->no_hpet = val;
- continue;
- }
-
- /* no_shconf */
- if (strcmp(entry->name, "no_shconf") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->no_shconf_present == 0),
- section_name,
- entry->name);
- p->no_shconf_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->no_shconf = val;
- continue;
- }
-
- /* add_driver */
- if (strcmp(entry->name, "d") == 0) {
- PARSE_ERROR_DUPLICATE((p->add_driver == NULL),
- section_name,
- entry->name);
- p->add_driver = strdup(entry->value);
- continue;
- }
-
- /* socket_mem */
- if (strcmp(entry->name, "socket_mem") == 0) {
- PARSE_ERROR_DUPLICATE((p->socket_mem == NULL),
- section_name,
- entry->name);
- p->socket_mem = strdup(entry->value);
- continue;
- }
-
- /* huge_dir */
- if (strcmp(entry->name, "huge_dir") == 0) {
- PARSE_ERROR_DUPLICATE((p->huge_dir == NULL),
- section_name,
- entry->name);
- p->huge_dir = strdup(entry->value);
- continue;
- }
-
- /* file_prefix */
- if (strcmp(entry->name, "file_prefix") == 0) {
- PARSE_ERROR_DUPLICATE((p->file_prefix == NULL),
- section_name,
- entry->name);
- p->file_prefix = strdup(entry->value);
- continue;
- }
-
- /* base_virtaddr */
- if (strcmp(entry->name, "base_virtaddr") == 0) {
- PARSE_ERROR_DUPLICATE((p->base_virtaddr == NULL),
- section_name,
- entry->name);
- p->base_virtaddr = strdup(entry->value);
- continue;
- }
-
- /* create_uio_dev */
- if (strcmp(entry->name, "create_uio_dev") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->create_uio_dev_present == 0),
- section_name,
- entry->name);
- p->create_uio_dev_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->create_uio_dev = val;
- continue;
- }
-
- /* vfio_intr */
- if (strcmp(entry->name, "vfio_intr") == 0) {
- PARSE_ERROR_DUPLICATE((p->vfio_intr == NULL),
- section_name,
- entry->name);
- p->vfio_intr = strdup(entry->value);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, entry->name);
- }
-
- free(entries);
-}
-
-static void
-parse_pipeline_pktq_in(struct app_params *app,
- struct app_pipeline_params *p,
- char *value)
-{
- p->n_pktq_in = 0;
-
- while (1) {
- enum app_pktq_in_type type;
- int id;
- char *name = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (name == NULL)
- break;
-
- PARSE_ERROR_TOO_MANY_ELEMENTS(
- (p->n_pktq_in < RTE_DIM(p->pktq_in)),
- p->name, "pktq_in", (uint32_t)RTE_DIM(p->pktq_in));
-
- if (validate_name(name, "RXQ", 2) == 0) {
- type = APP_PKTQ_IN_HWQ;
- id = APP_PARAM_ADD(app->hwq_in_params, name);
- APP_PARAM_ADD_LINK_FOR_RXQ(app, name);
- } else if (validate_name(name, "SWQ", 1) == 0) {
- type = APP_PKTQ_IN_SWQ;
- id = APP_PARAM_ADD(app->swq_params, name);
- } else if (validate_name(name, "TM", 1) == 0) {
- type = APP_PKTQ_IN_TM;
- id = APP_PARAM_ADD(app->tm_params, name);
- APP_PARAM_ADD_LINK_FOR_TM(app, name);
- } else if (validate_name(name, "TAP", 1) == 0) {
- type = APP_PKTQ_IN_TAP;
- id = APP_PARAM_ADD(app->tap_params, name);
- } else if (validate_name(name, "KNI", 1) == 0) {
- type = APP_PKTQ_IN_KNI;
- id = APP_PARAM_ADD(app->kni_params, name);
- APP_PARAM_ADD_LINK_FOR_KNI(app, name);
- } else if (validate_name(name, "SOURCE", 1) == 0) {
- type = APP_PKTQ_IN_SOURCE;
- id = APP_PARAM_ADD(app->source_params, name);
- } else
- PARSE_ERROR_INVALID_ELEMENT(0,
- p->name, "pktq_in", name);
-
- p->pktq_in[p->n_pktq_in].type = type;
- p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
- p->n_pktq_in++;
- }
-
- PARSE_ERROR_NO_ELEMENTS((p->n_pktq_in > 0), p->name, "pktq_in");
-}
-
-static void
-parse_pipeline_pktq_out(struct app_params *app,
- struct app_pipeline_params *p,
- char *value)
-{
- p->n_pktq_out = 0;
-
- while (1) {
- enum app_pktq_out_type type;
- int id;
- char *name = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (name == NULL)
- break;
-
- PARSE_ERROR_TOO_MANY_ELEMENTS(
- (p->n_pktq_out < RTE_DIM(p->pktq_out)),
- p->name, "pktq_out", (uint32_t)RTE_DIM(p->pktq_out));
-
- if (validate_name(name, "TXQ", 2) == 0) {
- type = APP_PKTQ_OUT_HWQ;
- id = APP_PARAM_ADD(app->hwq_out_params, name);
- APP_PARAM_ADD_LINK_FOR_TXQ(app, name);
- } else if (validate_name(name, "SWQ", 1) == 0) {
- type = APP_PKTQ_OUT_SWQ;
- id = APP_PARAM_ADD(app->swq_params, name);
- } else if (validate_name(name, "TM", 1) == 0) {
- type = APP_PKTQ_OUT_TM;
- id = APP_PARAM_ADD(app->tm_params, name);
- APP_PARAM_ADD_LINK_FOR_TM(app, name);
- } else if (validate_name(name, "TAP", 1) == 0) {
- type = APP_PKTQ_OUT_TAP;
- id = APP_PARAM_ADD(app->tap_params, name);
- } else if (validate_name(name, "KNI", 1) == 0) {
- type = APP_PKTQ_OUT_KNI;
- id = APP_PARAM_ADD(app->kni_params, name);
- APP_PARAM_ADD_LINK_FOR_KNI(app, name);
- } else if (validate_name(name, "SINK", 1) == 0) {
- type = APP_PKTQ_OUT_SINK;
- id = APP_PARAM_ADD(app->sink_params, name);
- } else
- PARSE_ERROR_INVALID_ELEMENT(0,
- p->name, "pktq_out", name);
-
- p->pktq_out[p->n_pktq_out].type = type;
- p->pktq_out[p->n_pktq_out].id = id;
- p->n_pktq_out++;
- }
-
- PARSE_ERROR_NO_ELEMENTS((p->n_pktq_out > 0), p->name, "pktq_out");
-}
-
-static void
-parse_pipeline_msgq_in(struct app_params *app,
- struct app_pipeline_params *p,
- char *value)
-{
- p->n_msgq_in = 0;
-
- while (1) {
- int idx;
- char *name = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (name == NULL)
- break;
-
- PARSE_ERROR_TOO_MANY_ELEMENTS(
- (p->n_msgq_in < RTE_DIM(p->msgq_in)),
- p->name, "msgq_in", (uint32_t)(RTE_DIM(p->msgq_in)));
-
- PARSE_ERROR_INVALID_ELEMENT(
- (validate_name(name, "MSGQ", 1) == 0),
- p->name, "msgq_in", name);
-
- idx = APP_PARAM_ADD(app->msgq_params, name);
- p->msgq_in[p->n_msgq_in] = idx;
- p->n_msgq_in++;
- }
-
- PARSE_ERROR_NO_ELEMENTS((p->n_msgq_in > 0), p->name, "msgq_in");
-}
-
-static void
-parse_pipeline_msgq_out(struct app_params *app,
- struct app_pipeline_params *p,
- char *value)
-{
- p->n_msgq_out = 0;
-
- while (1) {
- int idx;
- char *name = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (name == NULL)
- break;
-
- PARSE_ERROR_TOO_MANY_ELEMENTS(
- (p->n_msgq_out < RTE_DIM(p->msgq_out)),
- p->name, "msgq_out", (uint32_t)RTE_DIM(p->msgq_out));
-
- PARSE_ERROR_INVALID_ELEMENT(
- (validate_name(name, "MSGQ", 1) == 0),
- p->name, "msgq_out", name);
-
- idx = APP_PARAM_ADD(app->msgq_params, name);
- p->msgq_out[p->n_msgq_out] = idx;
- p->n_msgq_out++;
- }
-
- PARSE_ERROR_NO_ELEMENTS((p->n_msgq_out > 0), p->name, "msgq_out");
-}
-
-static void
-parse_pipeline(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- char name[CFG_NAME_LEN];
- struct app_pipeline_params *param;
- struct rte_cfgfile_entry *entries;
- ssize_t param_idx;
- int n_entries, i;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
- param = &app->pipeline_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "type") == 0) {
- int w_size = snprintf(param->type, RTE_DIM(param->type),
- "%s", ent->value);
-
- PARSE_ERROR(((w_size > 0) &&
- (w_size < (int)RTE_DIM(param->type))),
- section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "core") == 0) {
- int status = parse_pipeline_core(
- &param->socket_id, &param->core_id,
- &param->hyper_th_id, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pktq_in") == 0) {
- parse_pipeline_pktq_in(app, param, ent->value);
-
- continue;
- }
-
- if (strcmp(ent->name, "pktq_out") == 0) {
- parse_pipeline_pktq_out(app, param, ent->value);
-
- continue;
- }
-
- if (strcmp(ent->name, "msgq_in") == 0) {
- parse_pipeline_msgq_in(app, param, ent->value);
-
- continue;
- }
-
- if (strcmp(ent->name, "msgq_out") == 0) {
- parse_pipeline_msgq_out(app, param, ent->value);
-
- continue;
- }
-
- if (strcmp(ent->name, "timer_period") == 0) {
- int status = parser_read_uint32(
- &param->timer_period,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* pipeline type specific items */
- APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
- "Parse error in section \"%s\": too many "
- "pipeline specified parameters", section_name);
-
- param->args_name[param->n_args] = strdup(ent->name);
- param->args_value[param->n_args] = strdup(ent->value);
-
- APP_CHECK((param->args_name[param->n_args] != NULL) &&
- (param->args_value[param->n_args] != NULL),
- "Parse error: no free memory");
-
- param->n_args++;
- }
-
- snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
- param_idx = APP_PARAM_ADD(app->msgq_params, name);
- app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
- param->msgq_in[param->n_msgq_in++] = param_idx;
-
- snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
- param_idx = APP_PARAM_ADD(app->msgq_params, name);
- app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
- param->msgq_out[param->n_msgq_out++] = param_idx;
-
- snprintf(name, sizeof(name), "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
- param->socket_id,
- param->core_id,
- (param->hyper_th_id) ? "h" : "");
- param_idx = APP_PARAM_ADD(app->msgq_params, name);
- app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
-
- snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
- param->socket_id,
- param->core_id,
- (param->hyper_th_id) ? "h" : "");
- param_idx = APP_PARAM_ADD(app->msgq_params, name);
- app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
-
- free(entries);
-}
-
-static void
-parse_mempool(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_mempool_params *param;
- struct rte_cfgfile_entry *entries;
- ssize_t param_idx;
- int n_entries, i;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
- param = &app->mempool_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "buffer_size") == 0) {
- int status = parser_read_uint32(
- &param->buffer_size, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pool_size") == 0) {
- int status = parser_read_uint32(
- &param->pool_size, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "cache_size") == 0) {
- int status = parser_read_uint32(
- &param->cache_size, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "cpu") == 0) {
- int status = parser_read_uint32(
- &param->cpu_socket_id, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static int
-parse_link_rss_qs(struct app_link_params *p,
- char *value)
-{
- p->n_rss_qs = 0;
-
- while (1) {
- char *token = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (token == NULL)
- break;
-
- if (p->n_rss_qs == RTE_DIM(p->rss_qs))
- return -ENOMEM;
-
- if (parser_read_uint32(&p->rss_qs[p->n_rss_qs++], token))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int
-parse_link_rss_proto_ipv4(struct app_link_params *p,
- char *value)
-{
- uint64_t mask = 0;
-
- while (1) {
- char *token = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (token == NULL)
- break;
-
- if (strcmp(token, "IP") == 0) {
- mask |= ETH_RSS_IPV4;
- continue;
- }
- if (strcmp(token, "FRAG") == 0) {
- mask |= ETH_RSS_FRAG_IPV4;
- continue;
- }
- if (strcmp(token, "TCP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV4_TCP;
- continue;
- }
- if (strcmp(token, "UDP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV4_UDP;
- continue;
- }
- if (strcmp(token, "SCTP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV4_SCTP;
- continue;
- }
- if (strcmp(token, "OTHER") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV4_OTHER;
- continue;
- }
- return -EINVAL;
- }
-
- p->rss_proto_ipv4 = mask;
- return 0;
-}
-
-static int
-parse_link_rss_proto_ipv6(struct app_link_params *p,
- char *value)
-{
- uint64_t mask = 0;
-
- while (1) {
- char *token = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (token == NULL)
- break;
-
- if (strcmp(token, "IP") == 0) {
- mask |= ETH_RSS_IPV6;
- continue;
- }
- if (strcmp(token, "FRAG") == 0) {
- mask |= ETH_RSS_FRAG_IPV6;
- continue;
- }
- if (strcmp(token, "TCP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV6_TCP;
- continue;
- }
- if (strcmp(token, "UDP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV6_UDP;
- continue;
- }
- if (strcmp(token, "SCTP") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV6_SCTP;
- continue;
- }
- if (strcmp(token, "OTHER") == 0) {
- mask |= ETH_RSS_NONFRAG_IPV6_OTHER;
- continue;
- }
- if (strcmp(token, "IP_EX") == 0) {
- mask |= ETH_RSS_IPV6_EX;
- continue;
- }
- if (strcmp(token, "TCP_EX") == 0) {
- mask |= ETH_RSS_IPV6_TCP_EX;
- continue;
- }
- if (strcmp(token, "UDP_EX") == 0) {
- mask |= ETH_RSS_IPV6_UDP_EX;
- continue;
- }
- return -EINVAL;
- }
-
- p->rss_proto_ipv6 = mask;
- return 0;
-}
-
-static int
-parse_link_rss_proto_l2(struct app_link_params *p,
- char *value)
-{
- uint64_t mask = 0;
-
- while (1) {
- char *token = strtok_r(value, PARSE_DELIMITER, &value);
-
- if (token == NULL)
- break;
-
- if (strcmp(token, "L2") == 0) {
- mask |= ETH_RSS_L2_PAYLOAD;
- continue;
- }
- return -EINVAL;
- }
-
- p->rss_proto_l2 = mask;
- return 0;
-}
-
-static void
-parse_link(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_link_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- int rss_qs_present = 0;
- int rss_proto_ipv4_present = 0;
- int rss_proto_ipv6_present = 0;
- int rss_proto_l2_present = 0;
- int pci_bdf_present = 0;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->link_params, section_name);
- param = &app->link_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "promisc") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->promisc = status;
- continue;
- }
-
- if (strcmp(ent->name, "arp_q") == 0) {
- int status = parser_read_uint32(&param->arp_q,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "tcp_syn_q") == 0) {
- int status = parser_read_uint32(
- &param->tcp_syn_q, ent->value);
-
- PARSE_ERROR((status == 0), section_name, ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "ip_local_q") == 0) {
- int status = parser_read_uint32(
- &param->ip_local_q, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "tcp_local_q") == 0) {
- int status = parser_read_uint32(
- &param->tcp_local_q, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "udp_local_q") == 0) {
- int status = parser_read_uint32(
- &param->udp_local_q, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "sctp_local_q") == 0) {
- int status = parser_read_uint32(
- &param->sctp_local_q, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "rss_qs") == 0) {
- int status = parse_link_rss_qs(param, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- rss_qs_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "rss_proto_ipv4") == 0) {
- int status =
- parse_link_rss_proto_ipv4(param, ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- rss_proto_ipv4_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "rss_proto_ipv6") == 0) {
- int status =
- parse_link_rss_proto_ipv6(param, ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- rss_proto_ipv6_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "rss_proto_l2") == 0) {
- int status = parse_link_rss_proto_l2(param, ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- rss_proto_l2_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "pci_bdf") == 0) {
- PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
- section_name, ent->name);
-
- snprintf(param->pci_bdf, APP_LINK_PCI_BDF_SIZE,
- "%s", ent->value);
- pci_bdf_present = 1;
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- /* Check for mandatory fields */
- if (app->port_mask)
- PARSE_ERROR_MESSAGE((pci_bdf_present == 0),
- section_name, "pci_bdf",
- "entry not allowed (port_mask is provided)");
- else
- PARSE_ERROR_MESSAGE((pci_bdf_present),
- section_name, "pci_bdf",
- "this entry is mandatory (port_mask is not "
- "provided)");
-
- if (rss_proto_ipv4_present)
- PARSE_ERROR_MESSAGE((rss_qs_present),
- section_name, "rss_proto_ipv4",
- "entry not allowed (rss_qs entry is not provided)");
- if (rss_proto_ipv6_present)
- PARSE_ERROR_MESSAGE((rss_qs_present),
- section_name, "rss_proto_ipv6",
- "entry not allowed (rss_qs entry is not provided)");
- if (rss_proto_l2_present)
- PARSE_ERROR_MESSAGE((rss_qs_present),
- section_name, "rss_proto_l2",
- "entry not allowed (rss_qs entry is not provided)");
- if (rss_proto_ipv4_present |
- rss_proto_ipv6_present |
- rss_proto_l2_present){
- if (rss_proto_ipv4_present == 0)
- param->rss_proto_ipv4 = 0;
- if (rss_proto_ipv6_present == 0)
- param->rss_proto_ipv6 = 0;
- if (rss_proto_l2_present == 0)
- param->rss_proto_l2 = 0;
- }
-
- free(entries);
-}
-
-static void
-parse_rxq(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_hwq_in_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
- param = &app->hwq_in_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- APP_PARAM_ADD_LINK_FOR_RXQ(app, section_name);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "mempool") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_id = idx;
- continue;
- }
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst") == 0) {
- int status = parser_read_uint32(&param->burst,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_txq(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_hwq_out_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
- param = &app->hwq_out_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- APP_PARAM_ADD_LINK_FOR_TXQ(app, section_name);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst") == 0) {
- int status = parser_read_uint32(&param->burst,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "dropless") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->dropless = status;
- continue;
- }
-
- if (strcmp(ent->name, "n_retries") == 0) {
- int status = parser_read_uint64(&param->n_retries,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_swq(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_swq_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- uint32_t mtu_present = 0;
- uint32_t metadata_size_present = 0;
- uint32_t mempool_direct_present = 0;
- uint32_t mempool_indirect_present = 0;
-
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->swq_params, section_name);
- param = &app->swq_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst_read") == 0) {
- int status = parser_read_uint32(&
- param->burst_read, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst_write") == 0) {
- int status = parser_read_uint32(
- &param->burst_write, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "dropless") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->dropless = status;
- continue;
- }
-
- if (strcmp(ent->name, "n_retries") == 0) {
- int status = parser_read_uint64(&param->n_retries,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "cpu") == 0) {
- int status = parser_read_uint32(
- &param->cpu_socket_id, ent->value);
-
- PARSE_ERROR((status == 0), section_name, ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "ipv4_frag") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
-
- param->ipv4_frag = status;
- if (param->mtu == 0)
- param->mtu = 1500;
-
- continue;
- }
-
- if (strcmp(ent->name, "ipv6_frag") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->ipv6_frag = status;
- if (param->mtu == 0)
- param->mtu = 1320;
- continue;
- }
-
- if (strcmp(ent->name, "ipv4_ras") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->ipv4_ras = status;
- continue;
- }
-
- if (strcmp(ent->name, "ipv6_ras") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->ipv6_ras = status;
- continue;
- }
-
- if (strcmp(ent->name, "mtu") == 0) {
- int status = parser_read_uint32(&param->mtu,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- mtu_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "metadata_size") == 0) {
- int status = parser_read_uint32(
- &param->metadata_size, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- metadata_size_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "mempool_direct") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_direct_id = idx;
-
- mempool_direct_present = 1;
- continue;
- }
-
- if (strcmp(ent->name, "mempool_indirect") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_indirect_id = idx;
-
- mempool_indirect_present = 1;
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- APP_CHECK(((mtu_present == 0) ||
- ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
- "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
- "is off, therefore entry \"mtu\" is not allowed",
- section_name);
-
- APP_CHECK(((metadata_size_present == 0) ||
- ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
- "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
- "is off, therefore entry \"metadata_size\" is "
- "not allowed", section_name);
-
- APP_CHECK(((mempool_direct_present == 0) ||
- ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
- "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
- "is off, therefore entry \"mempool_direct\" is "
- "not allowed", section_name);
-
- APP_CHECK(((mempool_indirect_present == 0) ||
- ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
- "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
- "is off, therefore entry \"mempool_indirect\" is "
- "not allowed", section_name);
-
- free(entries);
-}
-
-static void
-parse_tm(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_tm_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->tm_params, section_name);
- param = &app->tm_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- APP_PARAM_ADD_LINK_FOR_TM(app, section_name);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "cfg") == 0) {
- param->file_name = strdup(ent->value);
- PARSE_ERROR_MALLOC(param->file_name != NULL);
- continue;
- }
-
- if (strcmp(ent->name, "burst_read") == 0) {
- int status = parser_read_uint32(
- &param->burst_read, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst_write") == 0) {
- int status = parser_read_uint32(
- &param->burst_write, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_tap(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_tap_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->tap_params, section_name);
- param = &app->tap_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "burst_read") == 0) {
- int status = parser_read_uint32(
- &param->burst_read, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst_write") == 0) {
- int status = parser_read_uint32(
- &param->burst_write, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "dropless") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->dropless = status;
- continue;
- }
-
- if (strcmp(ent->name, "n_retries") == 0) {
- int status = parser_read_uint64(&param->n_retries,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "mempool") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_id = idx;
-
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_kni(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_kni_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->kni_params, section_name);
- param = &app->kni_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- APP_PARAM_ADD_LINK_FOR_KNI(app, section_name);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "core") == 0) {
- int status = parse_pipeline_core(
- &param->socket_id,
- &param->core_id,
- &param->hyper_th_id,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- param->force_bind = 1;
- continue;
- }
-
- if (strcmp(ent->name, "mempool") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_id = idx;
- continue;
- }
-
- if (strcmp(ent->name, "burst_read") == 0) {
- int status = parser_read_uint32(&param->burst_read,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "burst_write") == 0) {
- int status = parser_read_uint32(&param->burst_write,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "dropless") == 0) {
- int status = parser_read_arg_bool(ent->value);
-
- PARSE_ERROR((status != -EINVAL), section_name,
- ent->name);
- param->dropless = status;
- continue;
- }
-
- if (strcmp(ent->name, "n_retries") == 0) {
- int status = parser_read_uint64(&param->n_retries,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_source(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_source_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
- uint32_t pcap_file_present = 0;
- uint32_t pcap_size_present = 0;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->source_params, section_name);
- param = &app->source_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "mempool") == 0) {
- int status = validate_name(ent->value,
- "MEMPOOL", 1);
- ssize_t idx;
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- idx = APP_PARAM_ADD(app->mempool_params, ent->value);
- param->mempool_id = idx;
- continue;
- }
-
- if (strcmp(ent->name, "burst") == 0) {
- int status = parser_read_uint32(&param->burst,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "pcap_file_rd") == 0) {
- PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
- section_name, ent->name);
-
- param->file_name = strdup(ent->value);
-
- PARSE_ERROR_MALLOC(param->file_name != NULL);
- pcap_file_present = 1;
-
- continue;
- }
-
- if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((pcap_size_present == 0),
- section_name, ent->name);
-
- status = parser_read_uint32(
- &param->n_bytes_per_pkt, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- pcap_size_present = 1;
-
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_sink(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_pktq_sink_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
- uint32_t pcap_file_present = 0;
- uint32_t pcap_n_pkt_present = 0;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->sink_params, section_name);
- param = &app->sink_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "pcap_file_wr") == 0) {
- PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
- section_name, ent->name);
-
- param->file_name = strdup(ent->value);
-
- PARSE_ERROR_MALLOC((param->file_name != NULL));
-
- continue;
- }
-
- if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
- int status;
-
- PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
- section_name, ent->name);
-
- status = parser_read_uint32(
- &param->n_pkts_to_dump, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
-
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_msgq_req_pipeline(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_msgq_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- param = &app->msgq_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_msgq_rsp_pipeline(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_msgq_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- param = &app->msgq_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-static void
-parse_msgq(struct app_params *app,
- const char *section_name,
- struct rte_cfgfile *cfg)
-{
- struct app_msgq_params *param;
- struct rte_cfgfile_entry *entries;
- int n_entries, i;
- ssize_t param_idx;
-
- n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
- PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
-
- entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
- PARSE_ERROR_MALLOC(entries != NULL);
-
- rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
-
- param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
- param = &app->msgq_params[param_idx];
- PARSE_CHECK_DUPLICATE_SECTION(param);
-
- for (i = 0; i < n_entries; i++) {
- struct rte_cfgfile_entry *ent = &entries[i];
-
- if (strcmp(ent->name, "size") == 0) {
- int status = parser_read_uint32(&param->size,
- ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- if (strcmp(ent->name, "cpu") == 0) {
- int status = parser_read_uint32(
- &param->cpu_socket_id, ent->value);
-
- PARSE_ERROR((status == 0), section_name,
- ent->name);
- continue;
- }
-
- /* unrecognized */
- PARSE_ERROR_INVALID(0, section_name, ent->name);
- }
-
- free(entries);
-}
-
-typedef void (*config_section_load)(struct app_params *p,
- const char *section_name,
- struct rte_cfgfile *cfg);
-
-struct config_section {
- const char prefix[CFG_NAME_LEN];
- int numbers;
- config_section_load load;
-};
-
-static const struct config_section cfg_file_scheme[] = {
- {"EAL", 0, parse_eal},
- {"PIPELINE", 1, parse_pipeline},
- {"MEMPOOL", 1, parse_mempool},
- {"LINK", 1, parse_link},
- {"RXQ", 2, parse_rxq},
- {"TXQ", 2, parse_txq},
- {"SWQ", 1, parse_swq},
- {"TM", 1, parse_tm},
- {"TAP", 1, parse_tap},
- {"KNI", 1, parse_kni},
- {"SOURCE", 1, parse_source},
- {"SINK", 1, parse_sink},
- {"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
- {"MSGQ-RSP-PIPELINE", 1, parse_msgq_rsp_pipeline},
- {"MSGQ", 1, parse_msgq},
-};
-
-static void
-create_implicit_mempools(struct app_params *app)
-{
- APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
-}
-
-static void
-create_implicit_links_from_port_mask(struct app_params *app,
- uint64_t port_mask)
-{
- uint32_t pmd_id, link_id;
-
- link_id = 0;
- for (pmd_id = 0; pmd_id < RTE_MAX_ETHPORTS; pmd_id++) {
- char name[APP_PARAM_NAME_SIZE];
- ssize_t idx;
-
- if ((port_mask & (1LLU << pmd_id)) == 0)
- continue;
-
- snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
- idx = APP_PARAM_ADD(app->link_params, name);
-
- app->link_params[idx].pmd_id = pmd_id;
- link_id++;
- }
-}
-
-static void
-assign_link_pmd_id_from_pci_bdf(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *link = &app->link_params[i];
-
- APP_CHECK((strlen(link->pci_bdf)),
- "Parse error: %s pci_bdf is not configured "
- "(port_mask is not provided)",
- link->name);
-
- link->pmd_id = i;
- }
-}
-
-int
-app_config_parse(struct app_params *app, const char *file_name)
-{
- struct rte_cfgfile *cfg;
- char **section_names;
- int i, j, sect_count;
-
- /* Implicit mempools */
- create_implicit_mempools(app);
-
- /* Port mask */
- if (app->port_mask)
- create_implicit_links_from_port_mask(app, app->port_mask);
-
- /* Load application configuration file */
- cfg = rte_cfgfile_load(file_name, 0);
- APP_CHECK((cfg != NULL), "Parse error: Unable to load config "
- "file %s", file_name);
-
- sect_count = rte_cfgfile_num_sections(cfg, NULL, 0);
- APP_CHECK((sect_count > 0), "Parse error: number of sections "
- "in file \"%s\" return %d", file_name,
- sect_count);
-
- section_names = malloc(sect_count * sizeof(char *));
- PARSE_ERROR_MALLOC(section_names != NULL);
-
- for (i = 0; i < sect_count; i++)
- section_names[i] = malloc(CFG_NAME_LEN);
-
- rte_cfgfile_sections(cfg, section_names, sect_count);
-
- for (i = 0; i < sect_count; i++) {
- const struct config_section *sch_s;
- int len, cfg_name_len;
-
- cfg_name_len = strlen(section_names[i]);
-
- /* Find section type */
- for (j = 0; j < (int)RTE_DIM(cfg_file_scheme); j++) {
- sch_s = &cfg_file_scheme[j];
- len = strlen(sch_s->prefix);
-
- if (cfg_name_len < len)
- continue;
-
- /* After section name we expect only '\0' or digit or
- * digit dot digit, so protect against false matching,
- * for example: "ABC" should match section name
- * "ABC0.0", but it should not match section_name
- * "ABCDEF".
- */
- if ((section_names[i][len] != '\0') &&
- !isdigit(section_names[i][len]))
- continue;
-
- if (strncmp(sch_s->prefix, section_names[i], len) == 0)
- break;
- }
-
- APP_CHECK(j < (int)RTE_DIM(cfg_file_scheme),
- "Parse error: unknown section %s",
- section_names[i]);
-
- APP_CHECK(validate_name(section_names[i],
- sch_s->prefix,
- sch_s->numbers) == 0,
- "Parse error: invalid section name \"%s\"",
- section_names[i]);
-
- sch_s->load(app, section_names[i], cfg);
- }
-
- for (i = 0; i < sect_count; i++)
- free(section_names[i]);
-
- free(section_names);
-
- rte_cfgfile_close(cfg);
-
- APP_PARAM_COUNT(app->mempool_params, app->n_mempools);
- APP_PARAM_COUNT(app->link_params, app->n_links);
- APP_PARAM_COUNT(app->hwq_in_params, app->n_pktq_hwq_in);
- APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
- APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
- APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
- APP_PARAM_COUNT(app->tap_params, app->n_pktq_tap);
- APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni);
- APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
- APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
- APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
- APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
-
- if (app->port_mask == 0)
- assign_link_pmd_id_from_pci_bdf(app);
-
- /* Save configuration to output file */
- app_config_save(app, app->output_file);
-
- /* Load TM configuration files */
- app_config_parse_tm(app);
-
- return 0;
-}
-
-static void
-save_eal_params(struct app_params *app, FILE *f)
-{
- struct app_eal_params *p = &app->eal_params;
- uint32_t i;
-
- fprintf(f, "[EAL]\n");
-
- if (p->coremap)
- fprintf(f, "%s = %s\n", "lcores", p->coremap);
-
- if (p->master_lcore_present)
- fprintf(f, "%s = %" PRIu32 "\n",
- "master_lcore", p->master_lcore);
-
- fprintf(f, "%s = %" PRIu32 "\n", "n", p->channels);
-
- if (p->memory_present)
- fprintf(f, "%s = %" PRIu32 "\n", "m", p->memory);
-
- if (p->ranks_present)
- fprintf(f, "%s = %" PRIu32 "\n", "r", p->ranks);
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_blacklist[i] == NULL)
- break;
-
- fprintf(f, "%s = %s\n", "pci_blacklist",
- p->pci_blacklist[i]);
- }
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_whitelist[i] == NULL)
- break;
-
- fprintf(f, "%s = %s\n", "pci_whitelist",
- p->pci_whitelist[i]);
- }
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->vdev[i] == NULL)
- break;
-
- fprintf(f, "%s = %s\n", "vdev",
- p->vdev[i]);
- }
-
- if (p->vmware_tsc_map_present)
- fprintf(f, "%s = %s\n", "vmware_tsc_map",
- (p->vmware_tsc_map) ? "yes" : "no");
-
- if (p->proc_type)
- fprintf(f, "%s = %s\n", "proc_type", p->proc_type);
-
- if (p->syslog)
- fprintf(f, "%s = %s\n", "syslog", p->syslog);
-
- if (p->log_level_present)
- fprintf(f, "%s = %" PRIu32 "\n", "log_level", p->log_level);
-
- if (p->version_present)
- fprintf(f, "%s = %s\n", "v", (p->version) ? "yes" : "no");
-
- if (p->help_present)
- fprintf(f, "%s = %s\n", "help", (p->help) ? "yes" : "no");
-
- if (p->no_huge_present)
- fprintf(f, "%s = %s\n", "no_huge", (p->no_huge) ? "yes" : "no");
-
- if (p->no_pci_present)
- fprintf(f, "%s = %s\n", "no_pci", (p->no_pci) ? "yes" : "no");
-
- if (p->no_hpet_present)
- fprintf(f, "%s = %s\n", "no_hpet", (p->no_hpet) ? "yes" : "no");
-
- if (p->no_shconf_present)
- fprintf(f, "%s = %s\n", "no_shconf",
- (p->no_shconf) ? "yes" : "no");
-
- if (p->add_driver)
- fprintf(f, "%s = %s\n", "d", p->add_driver);
-
- if (p->socket_mem)
- fprintf(f, "%s = %s\n", "socket_mem", p->socket_mem);
-
- if (p->huge_dir)
- fprintf(f, "%s = %s\n", "huge_dir", p->huge_dir);
-
- if (p->file_prefix)
- fprintf(f, "%s = %s\n", "file_prefix", p->file_prefix);
-
- if (p->base_virtaddr)
- fprintf(f, "%s = %s\n", "base_virtaddr", p->base_virtaddr);
-
- if (p->create_uio_dev_present)
- fprintf(f, "%s = %s\n", "create_uio_dev",
- (p->create_uio_dev) ? "yes" : "no");
-
- if (p->vfio_intr)
- fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
-
- fputc('\n', f);
-}
-
-static void
-save_mempool_params(struct app_params *app, FILE *f)
-{
- struct app_mempool_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->mempool_params);
- for (i = 0; i < count; i++) {
- p = &app->mempool_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %" PRIu32 "\n", "buffer_size", p->buffer_size);
- fprintf(f, "%s = %" PRIu32 "\n", "pool_size", p->pool_size);
- fprintf(f, "%s = %" PRIu32 "\n", "cache_size", p->cache_size);
- fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_links_params(struct app_params *app, FILE *f)
-{
- struct app_link_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->link_params);
- for (i = 0; i < count; i++) {
- p = &app->link_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "; %s = %" PRIu32 "\n", "pmd_id", p->pmd_id);
- fprintf(f, "%s = %s\n", "promisc", p->promisc ? "yes" : "no");
- fprintf(f, "%s = %" PRIu32 "\n", "arp_q", p->arp_q);
- fprintf(f, "%s = %" PRIu32 "\n", "tcp_syn_q",
- p->tcp_syn_q);
- fprintf(f, "%s = %" PRIu32 "\n", "ip_local_q", p->ip_local_q);
- fprintf(f, "%s = %" PRIu32 "\n", "tcp_local_q", p->tcp_local_q);
- fprintf(f, "%s = %" PRIu32 "\n", "udp_local_q", p->udp_local_q);
- fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
- p->sctp_local_q);
-
- if (p->n_rss_qs) {
- uint32_t j;
-
- /* rss_qs */
- fprintf(f, "rss_qs = ");
- for (j = 0; j < p->n_rss_qs; j++)
- fprintf(f, "%" PRIu32 " ", p->rss_qs[j]);
- fputc('\n', f);
-
- /* rss_proto_ipv4 */
- if (p->rss_proto_ipv4) {
- fprintf(f, "rss_proto_ipv4 = ");
- if (p->rss_proto_ipv4 & ETH_RSS_IPV4)
- fprintf(f, "IP ");
- if (p->rss_proto_ipv4 & ETH_RSS_FRAG_IPV4)
- fprintf(f, "FRAG ");
- if (p->rss_proto_ipv4 &
- ETH_RSS_NONFRAG_IPV4_TCP)
- fprintf(f, "TCP ");
- if (p->rss_proto_ipv4 &
- ETH_RSS_NONFRAG_IPV4_UDP)
- fprintf(f, "UDP ");
- if (p->rss_proto_ipv4 &
- ETH_RSS_NONFRAG_IPV4_SCTP)
- fprintf(f, "SCTP ");
- if (p->rss_proto_ipv4 &
- ETH_RSS_NONFRAG_IPV4_OTHER)
- fprintf(f, "OTHER ");
- fprintf(f, "\n");
- } else
- fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
-
- /* rss_proto_ipv6 */
- if (p->rss_proto_ipv6) {
- fprintf(f, "rss_proto_ipv6 = ");
- if (p->rss_proto_ipv6 & ETH_RSS_IPV6)
- fprintf(f, "IP ");
- if (p->rss_proto_ipv6 & ETH_RSS_FRAG_IPV6)
- fprintf(f, "FRAG ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_NONFRAG_IPV6_TCP)
- fprintf(f, "TCP ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_NONFRAG_IPV6_UDP)
- fprintf(f, "UDP ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_NONFRAG_IPV6_SCTP)
- fprintf(f, "SCTP ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_NONFRAG_IPV6_OTHER)
- fprintf(f, "OTHER ");
- if (p->rss_proto_ipv6 & ETH_RSS_IPV6_EX)
- fprintf(f, "IP_EX ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_IPV6_TCP_EX)
- fprintf(f, "TCP_EX ");
- if (p->rss_proto_ipv6 &
- ETH_RSS_IPV6_UDP_EX)
- fprintf(f, "UDP_EX ");
- fprintf(f, "\n");
- } else
- fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
-
- /* rss_proto_l2 */
- if (p->rss_proto_l2) {
- fprintf(f, "rss_proto_l2 = ");
- if (p->rss_proto_l2 & ETH_RSS_L2_PAYLOAD)
- fprintf(f, "L2 ");
- fprintf(f, "\n");
- } else
- fprintf(f, "; rss_proto_l2 = <NONE>\n");
- } else {
- fprintf(f, "; rss_qs = <NONE>\n");
- fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
- fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
- fprintf(f, "; rss_proto_l2 = <NONE>\n");
- }
-
- if (strlen(p->pci_bdf))
- fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_rxq_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_hwq_in_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->hwq_in_params);
- for (i = 0; i < count; i++) {
- p = &app->hwq_in_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %s\n",
- "mempool",
- app->mempool_params[p->mempool_id].name);
- fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
- fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_txq_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_hwq_out_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->hwq_out_params);
- for (i = 0; i < count; i++) {
- p = &app->hwq_out_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
- fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
- fprintf(f, "%s = %s\n",
- "dropless",
- p->dropless ? "yes" : "no");
- fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_swq_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_swq_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->swq_params);
- for (i = 0; i < count; i++) {
- p = &app->swq_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
- fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
- fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
- fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
- fprintf(f, "%s = %s\n", "ipv4_frag", p->ipv4_frag ? "yes" : "no");
- fprintf(f, "%s = %s\n", "ipv6_frag", p->ipv6_frag ? "yes" : "no");
- fprintf(f, "%s = %s\n", "ipv4_ras", p->ipv4_ras ? "yes" : "no");
- fprintf(f, "%s = %s\n", "ipv6_ras", p->ipv6_ras ? "yes" : "no");
- if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1)) {
- fprintf(f, "%s = %" PRIu32 "\n", "mtu", p->mtu);
- fprintf(f, "%s = %" PRIu32 "\n", "metadata_size", p->metadata_size);
- fprintf(f, "%s = %s\n",
- "mempool_direct",
- app->mempool_params[p->mempool_direct_id].name);
- fprintf(f, "%s = %s\n",
- "mempool_indirect",
- app->mempool_params[p->mempool_indirect_id].name);
- }
-
- fputc('\n', f);
- }
-}
-
-static void
-save_tm_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_tm_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->tm_params);
- for (i = 0; i < count; i++) {
- p = &app->tm_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %s\n", "cfg", p->file_name);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_tap_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_tap_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->tap_params);
- for (i = 0; i < count; i++) {
- p = &app->tap_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
- fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
- fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
- fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
- fprintf(f, "%s = %s\n", "mempool",
- app->mempool_params[p->mempool_id].name);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_kni_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_kni_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->kni_params);
- for (i = 0; i < count; i++) {
- p = &app->kni_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- /* section name */
- fprintf(f, "[%s]\n", p->name);
-
- /* core */
- if (p->force_bind) {
- fprintf(f, "; force_bind = 1\n");
- fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
- p->socket_id,
- p->core_id,
- (p->hyper_th_id) ? "h" : "");
- } else
- fprintf(f, "; force_bind = 0\n");
-
- /* mempool */
- fprintf(f, "%s = %s\n", "mempool",
- app->mempool_params[p->mempool_id].name);
-
- /* burst_read */
- fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
-
- /* burst_write */
- fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
-
- /* dropless */
- fprintf(f, "%s = %s\n",
- "dropless",
- p->dropless ? "yes" : "no");
-
- /* n_retries */
- fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_source_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_source_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->source_params);
- for (i = 0; i < count; i++) {
- p = &app->source_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %s\n",
- "mempool",
- app->mempool_params[p->mempool_id].name);
- fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
- fprintf(f, "%s = %s\n", "pcap_file_rd", p->file_name);
- fprintf(f, "%s = %" PRIu32 "\n", "pcap_bytes_rd_per_pkt",
- p->n_bytes_per_pkt);
- fputc('\n', f);
- }
-}
-
-static void
-save_sink_params(struct app_params *app, FILE *f)
-{
- struct app_pktq_sink_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->sink_params);
- for (i = 0; i < count; i++) {
- p = &app->sink_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %s\n", "pcap_file_wr", p->file_name);
- fprintf(f, "%s = %" PRIu32 "\n",
- "pcap_n_pkt_wr", p->n_pkts_to_dump);
- fputc('\n', f);
- }
-}
-
-static void
-save_msgq_params(struct app_params *app, FILE *f)
-{
- struct app_msgq_params *p;
- size_t i, count;
-
- count = RTE_DIM(app->msgq_params);
- for (i = 0; i < count; i++) {
- p = &app->msgq_params[i];
- if (!APP_PARAM_VALID(p))
- continue;
-
- fprintf(f, "[%s]\n", p->name);
- fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
- fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
-
- fputc('\n', f);
- }
-}
-
-static void
-save_pipeline_params(struct app_params *app, FILE *f)
-{
- size_t i, count;
-
- count = RTE_DIM(app->pipeline_params);
- for (i = 0; i < count; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
-
- if (!APP_PARAM_VALID(p))
- continue;
-
- /* section name */
- fprintf(f, "[%s]\n", p->name);
-
- /* type */
- fprintf(f, "type = %s\n", p->type);
-
- /* core */
- fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
- p->socket_id,
- p->core_id,
- (p->hyper_th_id) ? "h" : "");
-
- /* pktq_in */
- if (p->n_pktq_in) {
- uint32_t j;
-
- fprintf(f, "pktq_in =");
- for (j = 0; j < p->n_pktq_in; j++) {
- struct app_pktq_in_params *pp = &p->pktq_in[j];
- char *name;
-
- switch (pp->type) {
- case APP_PKTQ_IN_HWQ:
- name = app->hwq_in_params[pp->id].name;
- break;
- case APP_PKTQ_IN_SWQ:
- name = app->swq_params[pp->id].name;
- break;
- case APP_PKTQ_IN_TM:
- name = app->tm_params[pp->id].name;
- break;
- case APP_PKTQ_IN_TAP:
- name = app->tap_params[pp->id].name;
- break;
- case APP_PKTQ_IN_KNI:
- name = app->kni_params[pp->id].name;
- break;
- case APP_PKTQ_IN_SOURCE:
- name = app->source_params[pp->id].name;
- break;
- default:
- APP_CHECK(0, "System error "
- "occurred while saving "
- "parameter to file");
- }
-
- fprintf(f, " %s", name);
- }
- fprintf(f, "\n");
- }
-
- /* pktq_in */
- if (p->n_pktq_out) {
- uint32_t j;
-
- fprintf(f, "pktq_out =");
- for (j = 0; j < p->n_pktq_out; j++) {
- struct app_pktq_out_params *pp =
- &p->pktq_out[j];
- char *name;
-
- switch (pp->type) {
- case APP_PKTQ_OUT_HWQ:
- name = app->hwq_out_params[pp->id].name;
- break;
- case APP_PKTQ_OUT_SWQ:
- name = app->swq_params[pp->id].name;
- break;
- case APP_PKTQ_OUT_TM:
- name = app->tm_params[pp->id].name;
- break;
- case APP_PKTQ_OUT_TAP:
- name = app->tap_params[pp->id].name;
- break;
- case APP_PKTQ_OUT_KNI:
- name = app->kni_params[pp->id].name;
- break;
- case APP_PKTQ_OUT_SINK:
- name = app->sink_params[pp->id].name;
- break;
- default:
- APP_CHECK(0, "System error "
- "occurred while saving "
- "parameter to file");
- }
-
- fprintf(f, " %s", name);
- }
- fprintf(f, "\n");
- }
-
- /* msgq_in */
- if (p->n_msgq_in) {
- uint32_t j;
-
- fprintf(f, "msgq_in =");
- for (j = 0; j < p->n_msgq_in; j++) {
- uint32_t id = p->msgq_in[j];
- char *name = app->msgq_params[id].name;
-
- fprintf(f, " %s", name);
- }
- fprintf(f, "\n");
- }
-
- /* msgq_out */
- if (p->n_msgq_out) {
- uint32_t j;
-
- fprintf(f, "msgq_out =");
- for (j = 0; j < p->n_msgq_out; j++) {
- uint32_t id = p->msgq_out[j];
- char *name = app->msgq_params[id].name;
-
- fprintf(f, " %s", name);
- }
- fprintf(f, "\n");
- }
-
- /* timer_period */
- fprintf(f, "timer_period = %" PRIu32 "\n", p->timer_period);
-
- /* args */
- if (p->n_args) {
- uint32_t j;
-
- for (j = 0; j < p->n_args; j++)
- fprintf(f, "%s = %s\n", p->args_name[j],
- p->args_value[j]);
- }
-
- fprintf(f, "\n");
- }
-}
-
-void
-app_config_save(struct app_params *app, const char *file_name)
-{
- FILE *file;
- char *name, *dir_name;
- int status;
-
- name = strdup(file_name);
- dir_name = dirname(name);
- status = access(dir_name, W_OK);
- APP_CHECK((status == 0),
- "Error: need write access privilege to directory "
- "\"%s\" to save configuration\n", dir_name);
-
- file = fopen(file_name, "w");
- APP_CHECK((file != NULL),
- "Error: failed to save configuration to file \"%s\"",
- file_name);
-
- save_eal_params(app, file);
- save_pipeline_params(app, file);
- save_mempool_params(app, file);
- save_links_params(app, file);
- save_rxq_params(app, file);
- save_txq_params(app, file);
- save_swq_params(app, file);
- save_tm_params(app, file);
- save_tap_params(app, file);
- save_kni_params(app, file);
- save_source_params(app, file);
- save_sink_params(app, file);
- save_msgq_params(app, file);
-
- fclose(file);
- free(name);
-}
-
-int
-app_config_init(struct app_params *app)
-{
- size_t i;
-
- memcpy(app, &app_params_default, sizeof(struct app_params));
-
- for (i = 0; i < RTE_DIM(app->mempool_params); i++)
- memcpy(&app->mempool_params[i],
- &mempool_params_default,
- sizeof(struct app_mempool_params));
-
- for (i = 0; i < RTE_DIM(app->link_params); i++)
- memcpy(&app->link_params[i],
- &link_params_default,
- sizeof(struct app_link_params));
-
- for (i = 0; i < RTE_DIM(app->hwq_in_params); i++)
- memcpy(&app->hwq_in_params[i],
- &default_hwq_in_params,
- sizeof(default_hwq_in_params));
-
- for (i = 0; i < RTE_DIM(app->hwq_out_params); i++)
- memcpy(&app->hwq_out_params[i],
- &default_hwq_out_params,
- sizeof(default_hwq_out_params));
-
- for (i = 0; i < RTE_DIM(app->swq_params); i++)
- memcpy(&app->swq_params[i],
- &default_swq_params,
- sizeof(default_swq_params));
-
- for (i = 0; i < RTE_DIM(app->tm_params); i++)
- memcpy(&app->tm_params[i],
- &default_tm_params,
- sizeof(default_tm_params));
-
- for (i = 0; i < RTE_DIM(app->tap_params); i++)
- memcpy(&app->tap_params[i],
- &default_tap_params,
- sizeof(default_tap_params));
-
- for (i = 0; i < RTE_DIM(app->kni_params); i++)
- memcpy(&app->kni_params[i],
- &default_kni_params,
- sizeof(default_kni_params));
-
- for (i = 0; i < RTE_DIM(app->source_params); i++)
- memcpy(&app->source_params[i],
- &default_source_params,
- sizeof(default_source_params));
-
- for (i = 0; i < RTE_DIM(app->sink_params); i++)
- memcpy(&app->sink_params[i],
- &default_sink_params,
- sizeof(default_sink_params));
-
- for (i = 0; i < RTE_DIM(app->msgq_params); i++)
- memcpy(&app->msgq_params[i],
- &default_msgq_params,
- sizeof(default_msgq_params));
-
- for (i = 0; i < RTE_DIM(app->pipeline_params); i++)
- memcpy(&app->pipeline_params[i],
- &default_pipeline_params,
- sizeof(default_pipeline_params));
-
- return 0;
-}
-
-static char *
-filenamedup(const char *filename, const char *suffix)
-{
- char *s = malloc(strlen(filename) + strlen(suffix) + 1);
-
- if (!s)
- return NULL;
-
- sprintf(s, "%s%s", filename, suffix);
- return s;
-}
-
-int
-app_config_args(struct app_params *app, int argc, char **argv)
-{
- const char *optname;
- int opt, option_index;
- int f_present, s_present, p_present, l_present;
- int preproc_present, preproc_params_present;
- int scaned = 0;
-
- static struct option lgopts[] = {
- { "preproc", 1, 0, 0 },
- { "preproc-args", 1, 0, 0 },
- { NULL, 0, 0, 0 }
- };
-
- /* Copy application name */
- strncpy(app->app_name, argv[0], APP_APPNAME_SIZE - 1);
-
- f_present = 0;
- s_present = 0;
- p_present = 0;
- l_present = 0;
- preproc_present = 0;
- preproc_params_present = 0;
-
- while ((opt = getopt_long(argc, argv, "f:s:p:l:", lgopts,
- &option_index)) != EOF)
- switch (opt) {
- case 'f':
- if (f_present)
- rte_panic("Error: Config file is provided "
- "more than once\n");
- f_present = 1;
-
- if (!strlen(optarg))
- rte_panic("Error: Config file name is null\n");
-
- app->config_file = strdup(optarg);
- if (app->config_file == NULL)
- rte_panic("Error: Memory allocation failure\n");
-
- break;
-
- case 's':
- if (s_present)
- rte_panic("Error: Script file is provided "
- "more than once\n");
- s_present = 1;
-
- if (!strlen(optarg))
- rte_panic("Error: Script file name is null\n");
-
- app->script_file = strdup(optarg);
- if (app->script_file == NULL)
- rte_panic("Error: Memory allocation failure\n");
-
- break;
-
- case 'p':
- if (p_present)
- rte_panic("Error: PORT_MASK is provided "
- "more than once\n");
- p_present = 1;
-
- if ((sscanf(optarg, "%" SCNx64 "%n", &app->port_mask,
- &scaned) != 1) ||
- ((size_t) scaned != strlen(optarg)))
- rte_panic("Error: PORT_MASK is not "
- "a hexadecimal integer\n");
-
- if (app->port_mask == 0)
- rte_panic("Error: PORT_MASK is null\n");
-
- break;
-
- case 'l':
- if (l_present)
- rte_panic("Error: LOG_LEVEL is provided "
- "more than once\n");
- l_present = 1;
-
- if ((sscanf(optarg, "%" SCNu32 "%n", &app->log_level,
- &scaned) != 1) ||
- ((size_t) scaned != strlen(optarg)) ||
- (app->log_level >= APP_LOG_LEVELS))
- rte_panic("Error: LOG_LEVEL invalid value\n");
-
- break;
-
- case 0:
- optname = lgopts[option_index].name;
-
- if (strcmp(optname, "preproc") == 0) {
- if (preproc_present)
- rte_panic("Error: Preprocessor argument "
- "is provided more than once\n");
- preproc_present = 1;
-
- app->preproc = strdup(optarg);
- break;
- }
-
- if (strcmp(optname, "preproc-args") == 0) {
- if (preproc_params_present)
- rte_panic("Error: Preprocessor args "
- "are provided more than once\n");
- preproc_params_present = 1;
-
- app->preproc_args = strdup(optarg);
- break;
- }
-
- app_print_usage(argv[0]);
- break;
-
- default:
- app_print_usage(argv[0]);
- }
-
- optind = 1; /* reset getopt lib */
-
- /* Check dependencies between args */
- if (preproc_params_present && (preproc_present == 0))
- rte_panic("Error: Preprocessor args specified while "
- "preprocessor is not defined\n");
-
- app->parser_file = preproc_present ?
- filenamedup(app->config_file, ".preproc") :
- strdup(app->config_file);
- app->output_file = filenamedup(app->config_file, ".out");
-
- return 0;
-}
-
-int
-app_config_preproc(struct app_params *app)
-{
- char buffer[256];
- int status;
-
- if (app->preproc == NULL)
- return 0;
-
- status = access(app->config_file, F_OK | R_OK);
- APP_CHECK((status == 0), "Error: Unable to open file %s",
- app->config_file);
-
- snprintf(buffer, sizeof(buffer), "%s %s %s > %s",
- app->preproc,
- app->preproc_args ? app->preproc_args : "",
- app->config_file,
- app->parser_file);
-
- status = system(buffer);
- APP_CHECK((WIFEXITED(status) && (WEXITSTATUS(status) == 0)),
- "Error occurred while pre-processing file \"%s\"\n",
- app->config_file);
-
- return status;
-}
diff --git a/examples/ip_pipeline/config_parse_tm.c b/examples/ip_pipeline/config_parse_tm.c
deleted file mode 100644
index 6edd2ca9..00000000
--- a/examples/ip_pipeline/config_parse_tm.c
+++ /dev/null
@@ -1,419 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <ctype.h>
-#include <getopt.h>
-#include <errno.h>
-#include <stdarg.h>
-#include <string.h>
-#include <libgen.h>
-#include <unistd.h>
-
-#include <rte_errno.h>
-#include <rte_cfgfile.h>
-#include <rte_string_fns.h>
-
-#include "app.h"
-
-static int
-tm_cfgfile_load_sched_port(
- struct rte_cfgfile *file,
- struct rte_sched_port_params *port_params)
-{
- const char *entry;
- int j;
-
- entry = rte_cfgfile_get_entry(file, "port", "frame overhead");
- if (entry)
- port_params->frame_overhead = (uint32_t)atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, "port", "mtu");
- if (entry)
- port_params->mtu = (uint32_t)atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- "port",
- "number of subports per port");
- if (entry)
- port_params->n_subports_per_port = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- "port",
- "number of pipes per subport");
- if (entry)
- port_params->n_pipes_per_subport = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, "port", "queue sizes");
- if (entry) {
- char *next;
-
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- port_params->qsize[j] = (uint16_t)
- strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
-
-#ifdef RTE_SCHED_RED
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- char str[32];
-
- /* Parse WRED min thresholds */
- snprintf(str, sizeof(str), "tc %" PRId32 " wred min", j);
- entry = rte_cfgfile_get_entry(file, "red", str);
- if (entry) {
- char *next;
- int k;
-
- /* for each packet colour (green, yellow, red) */
- for (k = 0; k < e_RTE_METER_COLORS; k++) {
- port_params->red_params[j][k].min_th
- = (uint16_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
-
- /* Parse WRED max thresholds */
- snprintf(str, sizeof(str), "tc %" PRId32 " wred max", j);
- entry = rte_cfgfile_get_entry(file, "red", str);
- if (entry) {
- char *next;
- int k;
-
- /* for each packet colour (green, yellow, red) */
- for (k = 0; k < e_RTE_METER_COLORS; k++) {
- port_params->red_params[j][k].max_th
- = (uint16_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
-
- /* Parse WRED inverse mark probabilities */
- snprintf(str, sizeof(str), "tc %" PRId32 " wred inv prob", j);
- entry = rte_cfgfile_get_entry(file, "red", str);
- if (entry) {
- char *next;
- int k;
-
- /* for each packet colour (green, yellow, red) */
- for (k = 0; k < e_RTE_METER_COLORS; k++) {
- port_params->red_params[j][k].maxp_inv
- = (uint8_t)strtol(entry, &next, 10);
-
- if (next == NULL)
- break;
- entry = next;
- }
- }
-
- /* Parse WRED EWMA filter weights */
- snprintf(str, sizeof(str), "tc %" PRId32 " wred weight", j);
- entry = rte_cfgfile_get_entry(file, "red", str);
- if (entry) {
- char *next;
- int k;
-
- /* for each packet colour (green, yellow, red) */
- for (k = 0; k < e_RTE_METER_COLORS; k++) {
- port_params->red_params[j][k].wq_log2
- = (uint8_t)strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
- }
-#endif /* RTE_SCHED_RED */
-
- return 0;
-}
-
-static int
-tm_cfgfile_load_sched_pipe(
- struct rte_cfgfile *file,
- struct rte_sched_port_params *port_params,
- struct rte_sched_pipe_params *pipe_params)
-{
- int i, j;
- char *next;
- const char *entry;
- int profiles;
-
- profiles = rte_cfgfile_num_sections(file,
- "pipe profile", sizeof("pipe profile") - 1);
- port_params->n_pipe_profiles = profiles;
-
- for (j = 0; j < profiles; j++) {
- char pipe_name[32];
-
- snprintf(pipe_name, sizeof(pipe_name),
- "pipe profile %" PRId32, j);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tb rate");
- if (entry)
- pipe_params[j].tb_rate = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tb size");
- if (entry)
- pipe_params[j].tb_size = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc period");
- if (entry)
- pipe_params[j].tc_period = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 0 rate");
- if (entry)
- pipe_params[j].tc_rate[0] = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 rate");
- if (entry)
- pipe_params[j].tc_rate[1] = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 rate");
- if (entry)
- pipe_params[j].tc_rate[2] = (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 rate");
- if (entry)
- pipe_params[j].tc_rate[3] = (uint32_t) atoi(entry);
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- entry = rte_cfgfile_get_entry(file, pipe_name,
- "tc 3 oversubscription weight");
- if (entry)
- pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
-#endif
-
- entry = rte_cfgfile_get_entry(file,
- pipe_name,
- "tc 0 wrr weights");
- if (entry)
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
- (uint8_t) strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 wrr weights");
- if (entry)
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
- (uint8_t) strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 wrr weights");
- if (entry)
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
- (uint8_t) strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
-
- entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 wrr weights");
- if (entry)
- for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
- pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
- (uint8_t) strtol(entry, &next, 10);
- if (next == NULL)
- break;
- entry = next;
- }
- }
- return 0;
-}
-
-static int
-tm_cfgfile_load_sched_subport(
- struct rte_cfgfile *file,
- struct rte_sched_subport_params *subport_params,
- int *pipe_to_profile)
-{
- const char *entry;
- int i, j, k;
-
- for (i = 0; i < APP_MAX_SCHED_SUBPORTS; i++) {
- char sec_name[CFG_NAME_LEN];
-
- snprintf(sec_name, sizeof(sec_name),
- "subport %" PRId32, i);
-
- if (rte_cfgfile_has_section(file, sec_name)) {
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tb rate");
- if (entry)
- subport_params[i].tb_rate =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tb size");
- if (entry)
- subport_params[i].tb_size =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tc period");
- if (entry)
- subport_params[i].tc_period =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tc 0 rate");
- if (entry)
- subport_params[i].tc_rate[0] =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tc 1 rate");
- if (entry)
- subport_params[i].tc_rate[1] =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tc 2 rate");
- if (entry)
- subport_params[i].tc_rate[2] =
- (uint32_t) atoi(entry);
-
- entry = rte_cfgfile_get_entry(file,
- sec_name,
- "tc 3 rate");
- if (entry)
- subport_params[i].tc_rate[3] =
- (uint32_t) atoi(entry);
-
- int n_entries = rte_cfgfile_section_num_entries(file,
- sec_name);
- struct rte_cfgfile_entry entries[n_entries];
-
- rte_cfgfile_section_entries(file,
- sec_name,
- entries,
- n_entries);
-
- for (j = 0; j < n_entries; j++)
- if (strncmp("pipe",
- entries[j].name,
- sizeof("pipe") - 1) == 0) {
- int profile;
- char *tokens[2] = {NULL, NULL};
- int n_tokens;
- int begin, end;
- char name[CFG_NAME_LEN + 1];
-
- profile = atoi(entries[j].value);
- strncpy(name,
- entries[j].name,
- sizeof(name));
- n_tokens = rte_strsplit(
- &name[sizeof("pipe")],
- strnlen(name, CFG_NAME_LEN),
- tokens, 2, '-');
-
- begin = atoi(tokens[0]);
- if (n_tokens == 2)
- end = atoi(tokens[1]);
- else
- end = begin;
-
- if ((end >= APP_MAX_SCHED_PIPES) ||
- (begin > end))
- return -1;
-
- for (k = begin; k <= end; k++) {
- char profile_name[CFG_NAME_LEN];
-
- snprintf(profile_name,
- sizeof(profile_name),
- "pipe profile %" PRId32,
- profile);
- if (rte_cfgfile_has_section(file, profile_name))
- pipe_to_profile[i * APP_MAX_SCHED_PIPES + k] = profile;
- else
- rte_exit(EXIT_FAILURE,
- "Wrong pipe profile %s\n",
- entries[j].value);
- }
- }
- }
- }
-
- return 0;
-}
-
-static int
-tm_cfgfile_load(struct app_pktq_tm_params *tm)
-{
- struct rte_cfgfile *file;
- uint32_t i;
-
- memset(tm->sched_subport_params, 0, sizeof(tm->sched_subport_params));
- memset(tm->sched_pipe_profiles, 0, sizeof(tm->sched_pipe_profiles));
- memset(&tm->sched_port_params, 0, sizeof(tm->sched_port_params));
- for (i = 0; i < APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES; i++)
- tm->sched_pipe_to_profile[i] = -1;
-
- tm->sched_port_params.pipe_profiles = &tm->sched_pipe_profiles[0];
-
- if (tm->file_name[0] == '\0')
- return -1;
-
- file = rte_cfgfile_load(tm->file_name, 0);
- if (file == NULL)
- return -1;
-
- tm_cfgfile_load_sched_port(file,
- &tm->sched_port_params);
- tm_cfgfile_load_sched_subport(file,
- tm->sched_subport_params,
- tm->sched_pipe_to_profile);
- tm_cfgfile_load_sched_pipe(file,
- &tm->sched_port_params,
- tm->sched_pipe_profiles);
-
- rte_cfgfile_close(file);
- return 0;
-}
-
-int
-app_config_parse_tm(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < RTE_DIM(app->tm_params); i++) {
- struct app_pktq_tm_params *p = &app->tm_params[i];
- int status;
-
- if (!APP_PARAM_VALID(p))
- break;
-
- status = tm_cfgfile_load(p);
- APP_CHECK(status == 0,
- "Parse error for %s configuration file \"%s\"\n",
- p->name,
- p->file_name);
- }
-
- return 0;
-}
diff --git a/examples/ip_pipeline/conn.c b/examples/ip_pipeline/conn.c
new file mode 100644
index 00000000..6b08e9e8
--- /dev/null
+++ b/examples/ip_pipeline/conn.c
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#define __USE_GNU
+#include <sys/socket.h>
+
+#include <sys/epoll.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+
+#include "conn.h"
+
+#define MSG_CMD_TOO_LONG "Command too long."
+
+struct conn {
+ char *welcome;
+ char *prompt;
+ char *buf;
+ char *msg_in;
+ char *msg_out;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ size_t msg_in_len;
+ int fd_server;
+ int fd_client_group;
+ conn_msg_handle_t msg_handle;
+};
+
+struct conn *
+conn_init(struct conn_params *p)
+{
+ struct sockaddr_in server_address;
+ struct conn *conn;
+ int fd_server, fd_client_group, status;
+
+ memset(&server_address, 0, sizeof(server_address));
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (p->welcome == NULL) ||
+ (p->prompt == NULL) ||
+ (p->addr == NULL) ||
+ (p->buf_size == 0) ||
+ (p->msg_in_len_max == 0) ||
+ (p->msg_out_len_max == 0) ||
+ (p->msg_handle == NULL))
+ return NULL;
+
+ status = inet_aton(p->addr, &server_address.sin_addr);
+ if (status == 0)
+ return NULL;
+
+ /* Memory allocation */
+ conn = calloc(1, sizeof(struct conn));
+ if (conn == NULL)
+ return NULL;
+
+ conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1);
+ conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1);
+ conn->buf = calloc(1, p->buf_size);
+ conn->msg_in = calloc(1, p->msg_in_len_max + 1);
+ conn->msg_out = calloc(1, p->msg_out_len_max + 1);
+
+ if ((conn->welcome == NULL) ||
+ (conn->prompt == NULL) ||
+ (conn->buf == NULL) ||
+ (conn->msg_in == NULL) ||
+ (conn->msg_out == NULL)) {
+ conn_free(conn);
+ return NULL;
+ }
+
+ /* Server socket */
+ server_address.sin_family = AF_INET;
+ server_address.sin_port = htons(p->port);
+
+ fd_server = socket(AF_INET,
+ SOCK_STREAM | SOCK_NONBLOCK,
+ 0);
+ if (fd_server == -1) {
+ conn_free(conn);
+ return NULL;
+ }
+
+ status = bind(fd_server,
+ (struct sockaddr *) &server_address,
+ sizeof(server_address));
+ if (status == -1) {
+ conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ status = listen(fd_server, 16);
+ if (status == -1) {
+ conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Client group */
+ fd_client_group = epoll_create(1);
+ if (fd_client_group == -1) {
+ conn_free(conn);
+ close(fd_server);
+ return NULL;
+ }
+
+ /* Fill in */
+ strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX);
+ strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX);
+ conn->buf_size = p->buf_size;
+ conn->msg_in_len_max = p->msg_in_len_max;
+ conn->msg_out_len_max = p->msg_out_len_max;
+ conn->msg_in_len = 0;
+ conn->fd_server = fd_server;
+ conn->fd_client_group = fd_client_group;
+ conn->msg_handle = p->msg_handle;
+
+ return conn;
+}
+
+void
+conn_free(struct conn *conn)
+{
+ if (conn == NULL)
+ return;
+
+ if (conn->fd_client_group)
+ close(conn->fd_client_group);
+
+ if (conn->fd_server)
+ close(conn->fd_server);
+
+ free(conn->msg_out);
+ free(conn->msg_in);
+ free(conn->prompt);
+ free(conn->welcome);
+ free(conn);
+}
+
+int
+conn_poll_for_conn(struct conn *conn)
+{
+ struct sockaddr_in client_address;
+ struct epoll_event event;
+ socklen_t client_address_length;
+ int fd_client, status;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Server socket */
+ client_address_length = sizeof(client_address);
+ fd_client = accept4(conn->fd_server,
+ (struct sockaddr *) &client_address,
+ &client_address_length,
+ SOCK_NONBLOCK);
+ if (fd_client == -1) {
+ if ((errno == EAGAIN) || (errno == EWOULDBLOCK))
+ return 0;
+
+ return -1;
+ }
+
+ /* Client group */
+ event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP;
+ event.data.fd = fd_client;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_ADD,
+ fd_client,
+ &event);
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ /* Client */
+ status = write(fd_client,
+ conn->welcome,
+ strlen(conn->welcome));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1) {
+ close(fd_client);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+data_event_handle(struct conn *conn,
+ int fd_client)
+{
+ ssize_t len, i, status;
+
+ /* Read input message */
+
+ len = read(fd_client,
+ conn->buf,
+ conn->buf_size);
+ if (len == -1) {
+ if ((errno == EAGAIN) || (errno == EWOULDBLOCK))
+ return 0;
+
+ return -1;
+ }
+ if (len == 0)
+ return 0;
+
+ /* Handle input messages */
+ for (i = 0; i < len; i++) {
+ if (conn->buf[i] == '\n') {
+ size_t n;
+
+ conn->msg_in[conn->msg_in_len] = 0;
+ conn->msg_out[0] = 0;
+
+ conn->msg_handle(conn->msg_in,
+ conn->msg_out,
+ conn->msg_out_len_max);
+
+ n = strlen(conn->msg_out);
+ if (n) {
+ status = write(fd_client,
+ conn->msg_out,
+ n);
+ if (status == -1)
+ return status;
+ }
+
+ conn->msg_in_len = 0;
+ } else if (conn->msg_in_len < conn->msg_in_len_max) {
+ conn->msg_in[conn->msg_in_len] = conn->buf[i];
+ conn->msg_in_len++;
+ } else {
+ status = write(fd_client,
+ MSG_CMD_TOO_LONG,
+ strlen(MSG_CMD_TOO_LONG));
+ if (status == -1)
+ return status;
+
+ conn->msg_in_len = 0;
+ }
+ }
+
+ /* Write prompt */
+ status = write(fd_client,
+ conn->prompt,
+ strlen(conn->prompt));
+ if (status == -1)
+ return status;
+
+ return 0;
+}
+
+static int
+control_event_handle(struct conn *conn,
+ int fd_client)
+{
+ int status;
+
+ status = epoll_ctl(conn->fd_client_group,
+ EPOLL_CTL_DEL,
+ fd_client,
+ NULL);
+ if (status == -1)
+ return -1;
+
+ status = close(fd_client);
+ if (status == -1)
+ return -1;
+
+ return 0;
+}
+
+int
+conn_poll_for_msg(struct conn *conn)
+{
+ struct epoll_event event;
+ int fd_client, status, status_data = 0, status_control = 0;
+
+ /* Check input arguments */
+ if (conn == NULL)
+ return -1;
+
+ /* Client group */
+ status = epoll_wait(conn->fd_client_group,
+ &event,
+ 1,
+ 0);
+ if (status == -1)
+ return -1;
+ if (status == 0)
+ return 0;
+
+ fd_client = event.data.fd;
+
+ /* Data available */
+ if (event.events & EPOLLIN)
+ status_data = data_event_handle(conn, fd_client);
+
+ /* Control events */
+ if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP))
+ status_control = control_event_handle(conn, fd_client);
+
+ if (status_data || status_control)
+ return -1;
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/conn.h b/examples/ip_pipeline/conn.h
new file mode 100644
index 00000000..46f9f95e
--- /dev/null
+++ b/examples/ip_pipeline/conn.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_CONN_H__
+#define __INCLUDE_CONN_H__
+
+#include <stdint.h>
+
+struct conn;
+
+#ifndef CONN_WELCOME_LEN_MAX
+#define CONN_WELCOME_LEN_MAX 1024
+#endif
+
+#ifndef CONN_PROMPT_LEN_MAX
+#define CONN_PROMPT_LEN_MAX 16
+#endif
+
+typedef void (*conn_msg_handle_t)(char *msg_in,
+ char *msg_out,
+ size_t msg_out_len_max);
+
+struct conn_params {
+ const char *welcome;
+ const char *prompt;
+ const char *addr;
+ uint16_t port;
+ size_t buf_size;
+ size_t msg_in_len_max;
+ size_t msg_out_len_max;
+ conn_msg_handle_t msg_handle;
+};
+
+struct conn *
+conn_init(struct conn_params *p);
+
+void
+conn_free(struct conn *conn);
+
+int
+conn_poll_for_conn(struct conn *conn);
+
+int
+conn_poll_for_msg(struct conn *conn);
+
+#endif
diff --git a/examples/ip_pipeline/cpu_core_map.c b/examples/ip_pipeline/cpu_core_map.c
deleted file mode 100644
index 231f38ef..00000000
--- a/examples/ip_pipeline/cpu_core_map.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <inttypes.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-
-#include <rte_lcore.h>
-
-#include "cpu_core_map.h"
-
-struct cpu_core_map {
- uint32_t n_max_sockets;
- uint32_t n_max_cores_per_socket;
- uint32_t n_max_ht_per_core;
- uint32_t n_sockets;
- uint32_t n_cores_per_socket;
- uint32_t n_ht_per_core;
- int map[0];
-};
-
-static inline uint32_t
-cpu_core_map_pos(struct cpu_core_map *map,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t ht_id)
-{
- return (socket_id * map->n_max_cores_per_socket + core_id) *
- map->n_max_ht_per_core + ht_id;
-}
-
-static int
-cpu_core_map_compute_eal(struct cpu_core_map *map);
-
-static int
-cpu_core_map_compute_linux(struct cpu_core_map *map);
-
-static int
-cpu_core_map_compute_and_check(struct cpu_core_map *map);
-
-struct cpu_core_map *
-cpu_core_map_init(uint32_t n_max_sockets,
- uint32_t n_max_cores_per_socket,
- uint32_t n_max_ht_per_core,
- uint32_t eal_initialized)
-{
- uint32_t map_size, map_mem_size, i;
- struct cpu_core_map *map;
- int status;
-
- /* Check input arguments */
- if ((n_max_sockets == 0) ||
- (n_max_cores_per_socket == 0) ||
- (n_max_ht_per_core == 0))
- return NULL;
-
- /* Memory allocation */
- map_size = n_max_sockets * n_max_cores_per_socket * n_max_ht_per_core;
- map_mem_size = sizeof(struct cpu_core_map) + map_size * sizeof(int);
- map = (struct cpu_core_map *) malloc(map_mem_size);
- if (map == NULL)
- return NULL;
-
- /* Initialization */
- map->n_max_sockets = n_max_sockets;
- map->n_max_cores_per_socket = n_max_cores_per_socket;
- map->n_max_ht_per_core = n_max_ht_per_core;
- map->n_sockets = 0;
- map->n_cores_per_socket = 0;
- map->n_ht_per_core = 0;
-
- for (i = 0; i < map_size; i++)
- map->map[i] = -1;
-
- status = (eal_initialized) ?
- cpu_core_map_compute_eal(map) :
- cpu_core_map_compute_linux(map);
-
- if (status) {
- free(map);
- return NULL;
- }
-
- status = cpu_core_map_compute_and_check(map);
- if (status) {
- free(map);
- return NULL;
- }
-
- return map;
-}
-
-int
-cpu_core_map_compute_eal(struct cpu_core_map *map)
-{
- uint32_t socket_id, core_id, ht_id;
-
- /* Compute map */
- for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
- uint32_t n_detected, core_id_contig;
- int lcore_id;
-
- n_detected = 0;
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- struct lcore_config *p = &lcore_config[lcore_id];
-
- if ((p->detected) && (p->socket_id == socket_id))
- n_detected++;
- }
-
- core_id_contig = 0;
-
- for (core_id = 0; n_detected ; core_id++) {
- ht_id = 0;
-
- for (lcore_id = 0;
- lcore_id < RTE_MAX_LCORE;
- lcore_id++) {
- struct lcore_config *p =
- &lcore_config[lcore_id];
-
- if ((p->detected) &&
- (p->socket_id == socket_id) &&
- (p->core_id == core_id)) {
- uint32_t pos = cpu_core_map_pos(map,
- socket_id,
- core_id_contig,
- ht_id);
-
- map->map[pos] = lcore_id;
- ht_id++;
- n_detected--;
- }
- }
-
- if (ht_id) {
- core_id_contig++;
- if (core_id_contig ==
- map->n_max_cores_per_socket)
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-int
-cpu_core_map_compute_and_check(struct cpu_core_map *map)
-{
- uint32_t socket_id, core_id, ht_id;
-
- /* Compute n_ht_per_core, n_cores_per_socket, n_sockets */
- for (ht_id = 0; ht_id < map->n_max_ht_per_core; ht_id++) {
- if (map->map[ht_id] == -1)
- break;
-
- map->n_ht_per_core++;
- }
-
- if (map->n_ht_per_core == 0)
- return -1;
-
- for (core_id = 0; core_id < map->n_max_cores_per_socket; core_id++) {
- uint32_t pos = core_id * map->n_max_ht_per_core;
-
- if (map->map[pos] == -1)
- break;
-
- map->n_cores_per_socket++;
- }
-
- if (map->n_cores_per_socket == 0)
- return -1;
-
- for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
- uint32_t pos = socket_id * map->n_max_cores_per_socket *
- map->n_max_ht_per_core;
-
- if (map->map[pos] == -1)
- break;
-
- map->n_sockets++;
- }
-
- if (map->n_sockets == 0)
- return -1;
-
- /* Check that each socket has exactly the same number of cores
- and that each core has exactly the same number of hyper-threads */
- for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
- for (core_id = 0; core_id < map->n_cores_per_socket; core_id++)
- for (ht_id = 0;
- ht_id < map->n_max_ht_per_core;
- ht_id++) {
- uint32_t pos = (socket_id *
- map->n_max_cores_per_socket + core_id) *
- map->n_max_ht_per_core + ht_id;
-
- if (((ht_id < map->n_ht_per_core) &&
- (map->map[pos] == -1)) ||
- ((ht_id >= map->n_ht_per_core) &&
- (map->map[pos] != -1)))
- return -1;
- }
-
- for ( ; core_id < map->n_max_cores_per_socket; core_id++)
- for (ht_id = 0;
- ht_id < map->n_max_ht_per_core;
- ht_id++) {
- uint32_t pos = cpu_core_map_pos(map,
- socket_id,
- core_id,
- ht_id);
-
- if (map->map[pos] != -1)
- return -1;
- }
- }
-
- return 0;
-}
-
-#define FILE_LINUX_CPU_N_LCORES \
- "/sys/devices/system/cpu/present"
-
-static int
-cpu_core_map_get_n_lcores_linux(void)
-{
- char buffer[64], *string;
- FILE *fd;
-
- fd = fopen(FILE_LINUX_CPU_N_LCORES, "r");
- if (fd == NULL)
- return -1;
-
- if (fgets(buffer, sizeof(buffer), fd) == NULL) {
- fclose(fd);
- return -1;
- }
-
- fclose(fd);
-
- string = index(buffer, '-');
- if (string == NULL)
- return -1;
-
- return atoi(++string) + 1;
-}
-
-#define FILE_LINUX_CPU_CORE_ID \
- "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
-
-static int
-cpu_core_map_get_core_id_linux(int lcore_id)
-{
- char buffer[64];
- FILE *fd;
- int core_id;
-
- snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_CORE_ID, lcore_id);
- fd = fopen(buffer, "r");
- if (fd == NULL)
- return -1;
-
- if (fgets(buffer, sizeof(buffer), fd) == NULL) {
- fclose(fd);
- return -1;
- }
-
- fclose(fd);
-
- core_id = atoi(buffer);
- return core_id;
-}
-
-#define FILE_LINUX_CPU_SOCKET_ID \
- "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id"
-
-static int
-cpu_core_map_get_socket_id_linux(int lcore_id)
-{
- char buffer[64];
- FILE *fd;
- int socket_id;
-
- snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_SOCKET_ID, lcore_id);
- fd = fopen(buffer, "r");
- if (fd == NULL)
- return -1;
-
- if (fgets(buffer, sizeof(buffer), fd) == NULL) {
- fclose(fd);
- return -1;
- }
-
- fclose(fd);
-
- socket_id = atoi(buffer);
- return socket_id;
-}
-
-int
-cpu_core_map_compute_linux(struct cpu_core_map *map)
-{
- uint32_t socket_id, core_id, ht_id;
- int n_lcores;
-
- n_lcores = cpu_core_map_get_n_lcores_linux();
- if (n_lcores <= 0)
- return -1;
-
- /* Compute map */
- for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
- uint32_t n_detected, core_id_contig;
- int lcore_id;
-
- n_detected = 0;
- for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
- int lcore_socket_id =
- cpu_core_map_get_socket_id_linux(lcore_id);
-
-#if !defined(RTE_ARCH_PPC_64)
- if (lcore_socket_id < 0)
- return -1;
-#endif
-
- if (((uint32_t) lcore_socket_id) == socket_id)
- n_detected++;
- }
-
- core_id_contig = 0;
-
- for (core_id = 0; n_detected ; core_id++) {
- ht_id = 0;
-
- for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
- int lcore_socket_id =
- cpu_core_map_get_socket_id_linux(
- lcore_id);
-
-#if !defined(RTE_ARCH_PPC_64)
- if (lcore_socket_id < 0)
- return -1;
-
- int lcore_core_id =
- cpu_core_map_get_core_id_linux(
- lcore_id);
-
- if (lcore_core_id < 0)
- return -1;
-#endif
-
-#if !defined(RTE_ARCH_PPC_64)
- if (((uint32_t) lcore_socket_id == socket_id) &&
- ((uint32_t) lcore_core_id == core_id)) {
-#else
- if (((uint32_t) lcore_socket_id == socket_id)) {
-#endif
- uint32_t pos = cpu_core_map_pos(map,
- socket_id,
- core_id_contig,
- ht_id);
-
- map->map[pos] = lcore_id;
- ht_id++;
- n_detected--;
- }
- }
-
- if (ht_id) {
- core_id_contig++;
- if (core_id_contig ==
- map->n_max_cores_per_socket)
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-void
-cpu_core_map_print(struct cpu_core_map *map)
-{
- uint32_t socket_id, core_id, ht_id;
-
- if (map == NULL)
- return;
-
- for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
- printf("Socket %" PRIu32 ":\n", socket_id);
-
- for (core_id = 0;
- core_id < map->n_cores_per_socket;
- core_id++) {
- printf("[%" PRIu32 "] = [", core_id);
-
- for (ht_id = 0; ht_id < map->n_ht_per_core; ht_id++) {
- int lcore_id = cpu_core_map_get_lcore_id(map,
- socket_id,
- core_id,
- ht_id);
-
- uint32_t core_id_noncontig =
- cpu_core_map_get_core_id_linux(
- lcore_id);
-
- printf(" %" PRId32 " (%" PRIu32 ") ",
- lcore_id,
- core_id_noncontig);
- }
-
- printf("]\n");
- }
- }
-}
-
-uint32_t
-cpu_core_map_get_n_sockets(struct cpu_core_map *map)
-{
- if (map == NULL)
- return 0;
-
- return map->n_sockets;
-}
-
-uint32_t
-cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map)
-{
- if (map == NULL)
- return 0;
-
- return map->n_cores_per_socket;
-}
-
-uint32_t
-cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map)
-{
- if (map == NULL)
- return 0;
-
- return map->n_ht_per_core;
-}
-
-int
-cpu_core_map_get_lcore_id(struct cpu_core_map *map,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t ht_id)
-{
- uint32_t pos;
-
- if ((map == NULL) ||
- (socket_id >= map->n_sockets) ||
- (core_id >= map->n_cores_per_socket) ||
- (ht_id >= map->n_ht_per_core))
- return -1;
-
- pos = cpu_core_map_pos(map, socket_id, core_id, ht_id);
-
- return map->map[pos];
-}
-
-void
-cpu_core_map_free(struct cpu_core_map *map)
-{
- free(map);
-}
diff --git a/examples/ip_pipeline/cpu_core_map.h b/examples/ip_pipeline/cpu_core_map.h
deleted file mode 100644
index 5e50f6e5..00000000
--- a/examples/ip_pipeline/cpu_core_map.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_CPU_CORE_MAP_H__
-#define __INCLUDE_CPU_CORE_MAP_H__
-
-#include <stdio.h>
-
-#include <rte_lcore.h>
-
-struct cpu_core_map;
-
-struct cpu_core_map *
-cpu_core_map_init(uint32_t n_max_sockets,
- uint32_t n_max_cores_per_socket,
- uint32_t n_max_ht_per_core,
- uint32_t eal_initialized);
-
-uint32_t
-cpu_core_map_get_n_sockets(struct cpu_core_map *map);
-
-uint32_t
-cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map);
-
-uint32_t
-cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map);
-
-int
-cpu_core_map_get_lcore_id(struct cpu_core_map *map,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t ht_id);
-
-void cpu_core_map_print(struct cpu_core_map *map);
-
-void
-cpu_core_map_free(struct cpu_core_map *map);
-
-#endif
diff --git a/examples/ip_pipeline/examples/firewall.cli b/examples/ip_pipeline/examples/firewall.cli
new file mode 100644
index 00000000..269256c1
--- /dev/null
+++ b/examples/ip_pipeline/examples/firewall.cli
@@ -0,0 +1,59 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; _______________
+; LINK0 RXQ0 --->| |---> LINK0 TXQ0
+; | |
+; LINK1 RXQ0 --->| |---> LINK1 TXQ0
+; | Firewall |
+; LINK2 RXQ0 --->| |---> LINK2 TXQ0
+; | |
+; LINK3 RXQ0 --->| |---> LINK3 TXQ0
+; |_______________|
+; |
+; -+-
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK2 dev 0000:06:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK3 dev 0000:06:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+table action profile AP0 ipv4 offset 270 fwd
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK3 txq 0
+
+pipeline PIPELINE0 table match acl ipv4 offset 270 size 4K action AP0
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 0
+pipeline PIPELINE0 port in 2 table 0
+pipeline PIPELINE0 port in 3 table 0
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd drop
+pipeline PIPELINE0 table 0 rule add match acl priority 0 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 action fwd port 0
+pipeline PIPELINE0 table 0 rule add match acl priority 0 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 action fwd port 1
+pipeline PIPELINE0 table 0 rule add match acl priority 0 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 action fwd port 2
+pipeline PIPELINE0 table 0 rule add match acl priority 0 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 action fwd port 3
diff --git a/examples/ip_pipeline/examples/flow.cli b/examples/ip_pipeline/examples/flow.cli
new file mode 100644
index 00000000..426ff902
--- /dev/null
+++ b/examples/ip_pipeline/examples/flow.cli
@@ -0,0 +1,60 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; ________________
+; LINK0 RXQ0 --->| |---> LINK0 TXQ0
+; | |
+; LINK1 RXQ0 --->| |---> LINK1 TXQ0
+; | Flow |
+; LINK2 RXQ0 --->| Classification |---> LINK2 TXQ0
+; | |
+; LINK3 RXQ0 --->| |---> LINK3 TXQ0
+; |________________|
+; |
+; +-----------> SINK0 (flow lookup miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK2 dev 0000:06:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK3 dev 0000:06:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+table action profile AP0 ipv4 offset 270 fwd
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK3 txq 0
+pipeline PIPELINE0 port out bsz 32 sink
+
+pipeline PIPELINE0 table match hash ext key 16 mask 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF offset 278 buckets 16K size 65K action AP0
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 0
+pipeline PIPELINE0 port in 2 table 0
+pipeline PIPELINE0 port in 3 table 0
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 4
+pipeline PIPELINE0 table 0 rule add match hash ipv4_5tuple 100.0.0.10 200.0.0.10 100 200 6 action fwd port 0
+pipeline PIPELINE0 table 0 rule add match hash ipv4_5tuple 100.0.0.11 200.0.0.11 101 201 6 action fwd port 1
+pipeline PIPELINE0 table 0 rule add match hash ipv4_5tuple 100.0.0.12 200.0.0.12 102 202 6 action fwd port 2
+pipeline PIPELINE0 table 0 rule add match hash ipv4_5tuple 100.0.0.13 200.0.0.13 103 203 6 action fwd port 3
diff --git a/examples/ip_pipeline/examples/kni.cli b/examples/ip_pipeline/examples/kni.cli
new file mode 100644
index 00000000..14383409
--- /dev/null
+++ b/examples/ip_pipeline/examples/kni.cli
@@ -0,0 +1,69 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; _______________ ______________________
+; | | KNI0 | |
+; LINK0 RXQ0 --->|...............|------->|--+ |
+; | | KNI1 | | br0 |
+; LINK1 TXQ0 <---|...............|<-------|<-+ |
+; | | | Linux Kernel |
+; | PIPELINE0 | | Network Stack |
+; | | KNI1 | |
+; LINK1 RXQ0 --->|...............|------->|--+ |
+; | | KNI0 | | br0 |
+; LINK0 TXQ0 <---|...............|<-------|<-+ |
+; |_______________| |______________________|
+;
+; Insert Linux kernel KNI module:
+; [Linux]$ insmod rte_kni.ko
+;
+; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
+; [Linux]$ brctl addbr br0
+; [Linux]$ brctl addif br0 KNI0
+; [Linux]$ brctl addif br0 KNI1
+; [Linux]$ ifconfig br0 up
+; [Linux]$ ifconfig KNI0 up
+; [Linux]$ ifconfig KNI1 up
+;
+; Monitor packet forwarding performed by Linux kernel between KNI0 and KNI1:
+; [Linux]$ tcpdump -i KNI0
+; [Linux]$ tcpdump -i KNI1
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+kni KNI0 link LINK0 mempool MEMPOOL0
+kni KNI1 link LINK1 mempool MEMPOOL0
+
+table action profile AP0 ipv4 offset 270 fwd
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 kni KNI1
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 kni KNI0
+
+pipeline PIPELINE0 port out bsz 32 kni KNI0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 kni KNI1
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 1
+pipeline PIPELINE0 port in 2 table 2
+pipeline PIPELINE0 port in 3 table 3
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 0
+pipeline PIPELINE0 table 1 rule add match default action fwd port 1
+pipeline PIPELINE0 table 2 rule add match default action fwd port 2
+pipeline PIPELINE0 table 3 rule add match default action fwd port 3
diff --git a/examples/ip_pipeline/examples/l2fwd.cli b/examples/ip_pipeline/examples/l2fwd.cli
new file mode 100644
index 00000000..35e77cc9
--- /dev/null
+++ b/examples/ip_pipeline/examples/l2fwd.cli
@@ -0,0 +1,51 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; The pipeline below implements a simple pass-through connection between the
+; input ports to the output ports, as in this diagram:
+; ________________
+; LINK0 RXQ0 --->|................|---> LINK1 TXQ0
+; | |
+; LINK1 RXQ0 --->|................|---> LINK0 TXQ0
+; | PIPELINE0 |
+; LINK2 RXQ0 --->|................|---> LINK3 TXQ0
+; | |
+; LINK3 RXQ0 --->|................|---> LINK2 TXQ0
+; |________________|
+;
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK2 dev 0000:06:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK3 dev 0000:06:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK3 txq 0
+
+pipeline PIPELINE0 table match stub
+pipeline PIPELINE0 table match stub
+pipeline PIPELINE0 table match stub
+pipeline PIPELINE0 table match stub
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 1
+pipeline PIPELINE0 port in 2 table 2
+pipeline PIPELINE0 port in 3 table 3
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 1
+pipeline PIPELINE0 table 1 rule add match default action fwd port 0
+pipeline PIPELINE0 table 2 rule add match default action fwd port 3
+pipeline PIPELINE0 table 3 rule add match default action fwd port 2
diff --git a/examples/ip_pipeline/examples/route.cli b/examples/ip_pipeline/examples/route.cli
new file mode 100644
index 00000000..579b36a6
--- /dev/null
+++ b/examples/ip_pipeline/examples/route.cli
@@ -0,0 +1,60 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; _______________
+; LINK0 RXQ0 --->| |---> LINK0 TXQ0
+; | |
+; LINK1 RXQ0 --->| |---> LINK1 TXQ0
+; | Routing |
+; LINK2 RXQ0 --->| |---> LINK2 TXQ0
+; | |
+; LINK3 RXQ0 --->| |---> LINK3 TXQ0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK2 dev 0000:06:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK3 dev 0000:06:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+table action profile AP0 ipv4 offset 270 fwd encap ether
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK3 txq 0
+pipeline PIPELINE0 port out bsz 32 sink
+
+pipeline PIPELINE0 table match lpm ipv4 offset 286 size 4K action AP0
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 0
+pipeline PIPELINE0 port in 2 table 0
+pipeline PIPELINE0 port in 3 table 0
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 4
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.0.0.0 10 action fwd port 0 encap ether a0:a1:a2:a3:a4:a5 00:01:02:03:04:05
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.64.0.0 10 action fwd port 1 encap ether b0:b1:b2:b3:b4:b5 10:11:12:13:14:15
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.128.0.0 10 action fwd port 2 encap ether c0:c1:c2:c3:c4:c5 20:21:22:23:24:25
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.192.0.0 10 action fwd port 3 encap ether d0:d1:d2:d3:d4:d5 30:31:32:33:34:35
diff --git a/examples/ip_pipeline/examples/route_ecmp.cli b/examples/ip_pipeline/examples/route_ecmp.cli
new file mode 100644
index 00000000..06434a25
--- /dev/null
+++ b/examples/ip_pipeline/examples/route_ecmp.cli
@@ -0,0 +1,57 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; Equal Cost Multi-Path (ECMP) Routing
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK2 dev 0000:06:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK3 dev 0000:06:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+table action profile APRT ipv4 offset 270 fwd balance offset 278 mask 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF outoffset 256
+table action profile APNH ipv4 offset 270 fwd encap ether
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE0 port out bsz 32 link LINK3 txq 0
+pipeline PIPELINE0 port out bsz 32 sink
+
+pipeline PIPELINE0 table match lpm ipv4 offset 286 size 4K action APRT
+pipeline PIPELINE0 table match array offset 256 size 64K action APNH
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 0
+pipeline PIPELINE0 port in 2 table 0
+pipeline PIPELINE0 port in 3 table 0
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 4
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.0.0.0 10 action fwd table 1 balance 0 0 0 0 1 1 2 2
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.64.0.0 10 action fwd table 1 balance 1 1 1 1 2 2 3 3
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.128.0.0 10 action fwd table 1 balance 2 2 2 2 3 3 0 0
+pipeline PIPELINE0 table 0 rule add match lpm ipv4 100.192.0.0 10 action fwd table 1 balance 3 3 3 3 0 0 1 1
+
+pipeline PIPELINE0 table 1 rule add match array 0 action fwd port 0 encap ether a0:a1:a2:a3:a4:a5 00:01:02:03:04:05
+pipeline PIPELINE0 table 1 rule add match array 1 action fwd port 1 encap ether b0:b1:b2:b3:b4:b5 10:11:12:13:14:15
+pipeline PIPELINE0 table 1 rule add match array 2 action fwd port 2 encap ether c0:c1:c2:c3:c4:c5 20:21:22:23:24:25
+pipeline PIPELINE0 table 1 rule add match array 3 action fwd port 3 encap ether d0:d1:d2:d3:d4:d5 30:31:32:33:34:35
diff --git a/examples/ip_pipeline/examples/rss.cli b/examples/ip_pipeline/examples/rss.cli
new file mode 100644
index 00000000..29c0a913
--- /dev/null
+++ b/examples/ip_pipeline/examples/rss.cli
@@ -0,0 +1,112 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; This setup demonstrates the usage of NIC Receive Side Scaling (RSS) feature.
+; Each NIC splits the input traffic into 4 RX queues, with each of its RX queues
+; being handled by a different pipeline:
+;
+; +-----------+ +----------+
+; +--------------------------->| | | |
+; | +------------------->| PIPELINE0 +--->| LINK 0 |--->
+; | | +------------->| (CORE A) | | TX |
+; | | | +------->| | | |
+; | | | | +-----------+ +----------+
+; +----------+ | | | |
+; | |-------+ | | |
+;--->| LINK 0 |-----------+ | | |
+; | RX |---------+ | | | |
+; | |-------+ | | | | |
+; +----------+ | | | | | | +-----------+ +----------+
+; | | +---|-----|-----|------->| | | |
+; +----------+ | | | +---|-----|------->| PIPELINE1 +--->| LINK 1 |--->
+; | |-------|-|-----+ | | +---|------->| (CORE B) | | TX |
+;--->| LINK 1 |-------|-|-------+ | | | +----->| | | |
+; | RX |-------|-|-------+ | | | | +-----------+ +----------+
+; | |-------|-|-----+ | | | | |
+; +----------+ | | | | | | | |
+; | | | | | | | |
+; +----------+ | | | | | | | |
+; | |-------|-|-----|-|---+ | | |
+;--->| LINK 2 |-------|-|-----|-|-----+ | | +-----------+ +----------+
+; | RX |-----+ | +-----|-|---------|-|----->| | | |
+; | |---+ | | | +---------|-|----->| PIPELINE2 +--->| LINK 2 |--->
+; +----------+ | +-|-------|-----------|-|----->| (CORE C) | | TX |
+; | | | | | +--->| | | |
+; +----------+ | | | | | | +-----------+ +----------+
+; | |---|---|-------|-----------+ | |
+;--->| LINK 3 |---|---|-------|-------------+ |
+; | RX |---|---|-------|---------------+
+; | |---|---|-------|-----------+
+; +----------+ | | | |
+; | | | | +-----------+ +----------+
+; | +-------|-----------|------->| | | |
+; | +-----------|------->| PIPELINE3 +--->| LINK 3 |--->
+; +-----------------------|------->| (CORE D) | | TX |
+; +------->| | | |
+; +-----------+ +----------+
+;
+;
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 4 128 MEMPOOL0 txq 1 512 promiscuous on rss 0 1 2 3
+link LINK1 dev 0000:02:00.1 rxq 4 128 MEMPOOL0 txq 1 512 promiscuous on rss 0 1 2 3
+link LINK2 dev 0000:06:00.0 rxq 4 128 MEMPOOL0 txq 1 512 promiscuous on rss 0 1 2 3
+link LINK3 dev 0000:06:00.1 rxq 4 128 MEMPOOL0 txq 1 512 promiscuous on rss 0 1 2 3
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK2 rxq 0
+pipeline PIPELINE0 port in bsz 32 link LINK3 rxq 0
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+pipeline PIPELINE0 table match stub
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 0
+pipeline PIPELINE0 port in 2 table 0
+pipeline PIPELINE0 port in 3 table 0
+pipeline PIPELINE0 table 0 rule add match default action fwd port 0
+
+pipeline PIPELINE1 period 10 offset_port_id 0 cpu 0
+pipeline PIPELINE1 port in bsz 32 link LINK0 rxq 1
+pipeline PIPELINE1 port in bsz 32 link LINK1 rxq 1
+pipeline PIPELINE1 port in bsz 32 link LINK2 rxq 1
+pipeline PIPELINE1 port in bsz 32 link LINK3 rxq 1
+pipeline PIPELINE1 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE1 table match stub
+pipeline PIPELINE1 port in 0 table 0
+pipeline PIPELINE1 port in 1 table 0
+pipeline PIPELINE1 port in 2 table 0
+pipeline PIPELINE1 port in 3 table 0
+pipeline PIPELINE1 table 0 rule add match default action fwd port 0
+
+pipeline PIPELINE2 period 10 offset_port_id 0 cpu 0
+pipeline PIPELINE2 port in bsz 32 link LINK0 rxq 2
+pipeline PIPELINE2 port in bsz 32 link LINK1 rxq 2
+pipeline PIPELINE2 port in bsz 32 link LINK2 rxq 2
+pipeline PIPELINE2 port in bsz 32 link LINK3 rxq 2
+pipeline PIPELINE2 port out bsz 32 link LINK2 txq 0
+pipeline PIPELINE2 table match stub
+pipeline PIPELINE2 port in 0 table 0
+pipeline PIPELINE2 port in 1 table 0
+pipeline PIPELINE2 port in 2 table 0
+pipeline PIPELINE2 port in 3 table 0
+pipeline PIPELINE2 table 0 rule add match default action fwd port 0
+
+pipeline PIPELINE3 period 10 offset_port_id 0 cpu 0
+pipeline PIPELINE3 port in bsz 32 link LINK0 rxq 3
+pipeline PIPELINE3 port in bsz 32 link LINK1 rxq 3
+pipeline PIPELINE3 port in bsz 32 link LINK2 rxq 3
+pipeline PIPELINE3 port in bsz 32 link LINK3 rxq 3
+pipeline PIPELINE3 port out bsz 32 link LINK3 txq 0
+pipeline PIPELINE3 table match stub
+pipeline PIPELINE3 port in 0 table 0
+pipeline PIPELINE3 port in 1 table 0
+pipeline PIPELINE3 port in 2 table 0
+pipeline PIPELINE3 port in 3 table 0
+pipeline PIPELINE3 table 0 rule add match default action fwd port 0
+
+thread 1 pipeline PIPELINE0 enable
+thread 2 pipeline PIPELINE1 enable
+thread 3 pipeline PIPELINE2 enable
+thread 4 pipeline PIPELINE3 enable
diff --git a/examples/ip_pipeline/examples/tap.cli b/examples/ip_pipeline/examples/tap.cli
new file mode 100644
index 00000000..600cea26
--- /dev/null
+++ b/examples/ip_pipeline/examples/tap.cli
@@ -0,0 +1,66 @@
+; SPDX-License-Identifier: BSD-3-Clause
+; Copyright(c) 2010-2018 Intel Corporation
+
+; _______________ ______________________
+; | | TAP0 | |
+; LINK0 RXQ0 --->|...............|------->|--+ |
+; | | TAP1 | | br0 |
+; LINK1 TXQ0 <---|...............|<-------|<-+ |
+; | | | Linux Kernel |
+; | PIPELINE0 | | Network Stack |
+; | | TAP1 | |
+; LINK1 RXQ0 --->|...............|------->|--+ |
+; | | TAP0 | | br0 |
+; LINK0 TXQ0 <---|...............|<-------|<-+ |
+; |_______________| |______________________|
+;
+; Configure Linux kernel bridge between TAP0 and TAP1 interfaces:
+; [Linux]$ brctl addbr br0
+; [Linux]$ brctl addif br0 TAP0
+; [Linux]$ brctl addif br0 TAP1
+; [Linux]$ ifconfig TAP0 up
+; [Linux]$ ifconfig TAP1 up
+; [Linux]$ ifconfig br0 up
+;
+; Monitor packet forwarding performed by Linux kernel between TAP0 and TAP1:
+; [Linux]$ tcpdump -i TAP0
+; [Linux]$ tcpdump -i TAP1
+
+mempool MEMPOOL0 buffer 2304 pool 32K cache 256 cpu 0
+
+link LINK0 dev 0000:02:00.0 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+link LINK1 dev 0000:02:00.1 rxq 1 128 MEMPOOL0 txq 1 512 promiscuous on
+
+tap TAP0
+tap TAP1
+
+table action profile AP0 ipv4 offset 270 fwd
+
+pipeline PIPELINE0 period 10 offset_port_id 0 cpu 0
+
+pipeline PIPELINE0 port in bsz 32 link LINK0 rxq 0
+pipeline PIPELINE0 port in bsz 32 tap TAP1 mempool MEMPOOL0 mtu 1500
+pipeline PIPELINE0 port in bsz 32 link LINK1 rxq 0
+pipeline PIPELINE0 port in bsz 32 tap TAP0 mempool MEMPOOL0 mtu 1500
+
+pipeline PIPELINE0 port out bsz 32 tap TAP0
+pipeline PIPELINE0 port out bsz 32 link LINK1 txq 0
+pipeline PIPELINE0 port out bsz 32 tap TAP1
+pipeline PIPELINE0 port out bsz 32 link LINK0 txq 0
+
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+pipeline PIPELINE0 table match stub action AP0
+
+pipeline PIPELINE0 port in 0 table 0
+pipeline PIPELINE0 port in 1 table 1
+pipeline PIPELINE0 port in 2 table 2
+pipeline PIPELINE0 port in 3 table 3
+
+thread 1 pipeline PIPELINE0 enable
+
+pipeline PIPELINE0 table 0 rule add match default action fwd port 0
+pipeline PIPELINE0 table 1 rule add match default action fwd port 1
+pipeline PIPELINE0 table 2 rule add match default action fwd port 2
+pipeline PIPELINE0 table 3 rule add match default action fwd port 3
diff --git a/examples/ip_pipeline/pipeline/hash_func.h b/examples/ip_pipeline/hash_func.h
index 806ac227..f1b9d944 100644
--- a/examples/ip_pipeline/pipeline/hash_func.h
+++ b/examples/ip_pipeline/hash_func.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+
#ifndef __INCLUDE_HASH_FUNC_H__
#define __INCLUDE_HASH_FUNC_H__
diff --git a/examples/ip_pipeline/hash_func_arm64.h b/examples/ip_pipeline/hash_func_arm64.h
new file mode 100644
index 00000000..50df8167
--- /dev/null
+++ b/examples/ip_pipeline/hash_func_arm64.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Linaro Limited.
+ */
+#ifndef __HASH_FUNC_ARM64_H__
+#define __HASH_FUNC_ARM64_H__
+
+#define _CRC32CX(crc, val) \
+ __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint32_t crc0;
+
+ crc0 = seed;
+ _CRC32CX(crc0, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ _CRC32CX(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, k5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+ _CRC32CX(crc5, k[7] & m[7]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#endif
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
deleted file mode 100644
index bb07efa1..00000000
--- a/examples/ip_pipeline/init.c
+++ /dev/null
@@ -1,1927 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <inttypes.h>
-#include <stdio.h>
-#include <string.h>
-#include <netinet/in.h>
-#ifdef RTE_EXEC_ENV_LINUXAPP
-#include <linux/if.h>
-#include <linux/if_tun.h>
-#endif
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <unistd.h>
-
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_ether.h>
-#include <rte_ip.h>
-#include <rte_eal.h>
-#include <rte_malloc.h>
-#include <rte_bus_pci.h>
-
-#include "app.h"
-#include "pipeline.h"
-#include "pipeline_common_fe.h"
-#include "pipeline_master.h"
-#include "pipeline_passthrough.h"
-#include "pipeline_firewall.h"
-#include "pipeline_flow_classification.h"
-#include "pipeline_flow_actions.h"
-#include "pipeline_routing.h"
-#include "thread_fe.h"
-
-#define APP_NAME_SIZE 32
-
-#define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
-
-static void
-app_init_core_map(struct app_params *app)
-{
- APP_LOG(app, HIGH, "Initializing CPU core map ...");
- app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
- 4, 0);
-
- if (app->core_map == NULL)
- rte_panic("Cannot create CPU core map\n");
-
- if (app->log_level >= APP_LOG_LEVEL_LOW)
- cpu_core_map_print(app->core_map);
-}
-
-/* Core Mask String in Hex Representation */
-#define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
-
-static void
-app_init_core_mask(struct app_params *app)
-{
- uint32_t i;
- char core_mask_str[APP_CORE_MASK_STRING_SIZE];
-
- for (i = 0; i < app->n_pipelines; i++) {
- struct app_pipeline_params *p = &app->pipeline_params[i];
- int lcore_id;
-
- lcore_id = cpu_core_map_get_lcore_id(app->core_map,
- p->socket_id,
- p->core_id,
- p->hyper_th_id);
-
- if (lcore_id < 0)
- rte_panic("Cannot create CPU core mask\n");
-
- app_core_enable_in_core_mask(app, lcore_id);
- }
-
- app_core_build_core_mask_string(app, core_mask_str);
- APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
-}
-
-static void
-app_init_eal(struct app_params *app)
-{
- char buffer[256];
- char core_mask_str[APP_CORE_MASK_STRING_SIZE];
- struct app_eal_params *p = &app->eal_params;
- uint32_t n_args = 0;
- uint32_t i;
- int status;
-
- app->eal_argv[n_args++] = strdup(app->app_name);
-
- app_core_build_core_mask_string(app, core_mask_str);
- snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
- app->eal_argv[n_args++] = strdup(buffer);
-
- if (p->coremap) {
- snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->master_lcore_present) {
- snprintf(buffer,
- sizeof(buffer),
- "--master-lcore=%" PRIu32,
- p->master_lcore);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
- app->eal_argv[n_args++] = strdup(buffer);
-
- if (p->memory_present) {
- snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->ranks_present) {
- snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_blacklist[i] == NULL)
- break;
-
- snprintf(buffer,
- sizeof(buffer),
- "--pci-blacklist=%s",
- p->pci_blacklist[i]);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (app->port_mask != 0)
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->pci_whitelist[i] == NULL)
- break;
-
- snprintf(buffer,
- sizeof(buffer),
- "--pci-whitelist=%s",
- p->pci_whitelist[i]);
- app->eal_argv[n_args++] = strdup(buffer);
- }
- else
- for (i = 0; i < app->n_links; i++) {
- char *pci_bdf = app->link_params[i].pci_bdf;
-
- snprintf(buffer,
- sizeof(buffer),
- "--pci-whitelist=%s",
- pci_bdf);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- for (i = 0; i < APP_MAX_LINKS; i++) {
- if (p->vdev[i] == NULL)
- break;
-
- snprintf(buffer,
- sizeof(buffer),
- "--vdev=%s",
- p->vdev[i]);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
- snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->proc_type) {
- snprintf(buffer,
- sizeof(buffer),
- "--proc-type=%s",
- p->proc_type);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->syslog) {
- snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->log_level_present) {
- snprintf(buffer,
- sizeof(buffer),
- "--log-level=%" PRIu32,
- p->log_level);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->version_present) && p->version) {
- snprintf(buffer, sizeof(buffer), "-v");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->help_present) && p->help) {
- snprintf(buffer, sizeof(buffer), "--help");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->no_huge_present) && p->no_huge) {
- snprintf(buffer, sizeof(buffer), "--no-huge");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->no_pci_present) && p->no_pci) {
- snprintf(buffer, sizeof(buffer), "--no-pci");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->no_hpet_present) && p->no_hpet) {
- snprintf(buffer, sizeof(buffer), "--no-hpet");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->no_shconf_present) && p->no_shconf) {
- snprintf(buffer, sizeof(buffer), "--no-shconf");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->add_driver) {
- snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->socket_mem) {
- snprintf(buffer,
- sizeof(buffer),
- "--socket-mem=%s",
- p->socket_mem);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->huge_dir) {
- snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->file_prefix) {
- snprintf(buffer,
- sizeof(buffer),
- "--file-prefix=%s",
- p->file_prefix);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->base_virtaddr) {
- snprintf(buffer,
- sizeof(buffer),
- "--base-virtaddr=%s",
- p->base_virtaddr);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if ((p->create_uio_dev_present) && p->create_uio_dev) {
- snprintf(buffer, sizeof(buffer), "--create-uio-dev");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- if (p->vfio_intr) {
- snprintf(buffer,
- sizeof(buffer),
- "--vfio-intr=%s",
- p->vfio_intr);
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
- snprintf(buffer, sizeof(buffer), "--");
- app->eal_argv[n_args++] = strdup(buffer);
-
- app->eal_argc = n_args;
-
- APP_LOG(app, HIGH, "Initializing EAL ...");
- if (app->log_level >= APP_LOG_LEVEL_LOW) {
- int i;
-
- fprintf(stdout, "[APP] EAL arguments: \"");
- for (i = 1; i < app->eal_argc; i++)
- fprintf(stdout, "%s ", app->eal_argv[i]);
- fprintf(stdout, "\"\n");
- }
-
- status = rte_eal_init(app->eal_argc, app->eal_argv);
- if (status < 0)
- rte_panic("EAL init error\n");
-}
-
-static void
-app_init_mempool(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_mempools; i++) {
- struct app_mempool_params *p = &app->mempool_params[i];
-
- APP_LOG(app, HIGH, "Initializing %s ...", p->name);
- app->mempool[i] = rte_pktmbuf_pool_create(
- p->name,
- p->pool_size,
- p->cache_size,
- 0, /* priv_size */
- p->buffer_size -
- sizeof(struct rte_mbuf), /* mbuf data size */
- p->cpu_socket_id);
-
- if (app->mempool[i] == NULL)
- rte_panic("%s init error\n", p->name);
- }
-}
-
-static inline int
-app_link_filter_arp_add(struct app_link_params *link)
-{
- struct rte_eth_ethertype_filter filter = {
- .ether_type = ETHER_TYPE_ARP,
- .flags = 0,
- .queue = link->arp_q,
- };
-
- return rte_eth_dev_filter_ctrl(link->pmd_id,
- RTE_ETH_FILTER_ETHERTYPE,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_tcp_syn_add(struct app_link_params *link)
-{
- struct rte_eth_syn_filter filter = {
- .hig_pri = 1,
- .queue = link->tcp_syn_q,
- };
-
- return rte_eth_dev_filter_ctrl(link->pmd_id,
- RTE_ETH_FILTER_SYN,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = 0,
- .proto_mask = 0, /* Disable */
- .tcp_flags = 0,
- .priority = 1, /* Lowest */
- .queue = l1->ip_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = 0,
- .proto_mask = 0, /* Disable */
- .tcp_flags = 0,
- .priority = 1, /* Lowest */
- .queue = l1->ip_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_DELETE,
- &filter);
-}
-
-static inline int
-app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_TCP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->tcp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_TCP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->tcp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_DELETE,
- &filter);
-}
-
-static inline int
-app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_UDP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->udp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_UDP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->udp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_DELETE,
- &filter);
-}
-
-static inline int
-app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_SCTP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->sctp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_ADD,
- &filter);
-}
-
-static inline int
-app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
-{
- struct rte_eth_ntuple_filter filter = {
- .flags = RTE_5TUPLE_FLAGS,
- .dst_ip = rte_bswap32(l2->ip),
- .dst_ip_mask = UINT32_MAX, /* Enable */
- .src_ip = 0,
- .src_ip_mask = 0, /* Disable */
- .dst_port = 0,
- .dst_port_mask = 0, /* Disable */
- .src_port = 0,
- .src_port_mask = 0, /* Disable */
- .proto = IPPROTO_SCTP,
- .proto_mask = UINT8_MAX, /* Enable */
- .tcp_flags = 0,
- .priority = 2, /* Higher priority than IP */
- .queue = l1->sctp_local_q,
- };
-
- return rte_eth_dev_filter_ctrl(l1->pmd_id,
- RTE_ETH_FILTER_NTUPLE,
- RTE_ETH_FILTER_DELETE,
- &filter);
-}
-
-static void
-app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
-{
- if (cp->arp_q != 0) {
- int status = app_link_filter_arp_add(cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32 "): "
- "Adding ARP filter (queue = %" PRIu32 ")",
- cp->name, cp->pmd_id, cp->arp_q);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding ARP filter "
- "(queue = %" PRIu32 ") (%" PRId32 ")\n",
- cp->name, cp->pmd_id, cp->arp_q, status);
- }
-}
-
-static void
-app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
-{
- if (cp->tcp_syn_q != 0) {
- int status = app_link_filter_tcp_syn_add(cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32 "): "
- "Adding TCP SYN filter (queue = %" PRIu32 ")",
- cp->name, cp->pmd_id, cp->tcp_syn_q);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding TCP SYN filter "
- "(queue = %" PRIu32 ") (%" PRId32 ")\n",
- cp->name, cp->pmd_id, cp->tcp_syn_q,
- status);
- }
-}
-
-void
-app_link_up_internal(struct app_params *app, struct app_link_params *cp)
-{
- uint32_t i;
- int status;
-
- /* For each link, add filters for IP of current link */
- if (cp->ip != 0) {
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *p = &app->link_params[i];
-
- /* IP */
- if (p->ip_local_q != 0) {
- int status = app_link_filter_ip_add(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32 "): "
- "Adding IP filter (queue= %" PRIu32
- ", IP = 0x%08" PRIx32 ")",
- p->name, p->pmd_id, p->ip_local_q,
- cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding IP "
- "filter (queue= %" PRIu32 ", "
- "IP = 0x%08" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id,
- p->ip_local_q, cp->ip, status);
- }
-
- /* TCP */
- if (p->tcp_local_q != 0) {
- int status = app_link_filter_tcp_add(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32 "): "
- "Adding TCP filter "
- "(queue = %" PRIu32
- ", IP = 0x%08" PRIx32 ")",
- p->name, p->pmd_id, p->tcp_local_q,
- cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding TCP "
- "filter (queue = %" PRIu32 ", "
- "IP = 0x%08" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id,
- p->tcp_local_q, cp->ip, status);
- }
-
- /* UDP */
- if (p->udp_local_q != 0) {
- int status = app_link_filter_udp_add(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32 "): "
- "Adding UDP filter "
- "(queue = %" PRIu32
- ", IP = 0x%08" PRIx32 ")",
- p->name, p->pmd_id, p->udp_local_q,
- cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding UDP "
- "filter (queue = %" PRIu32 ", "
- "IP = 0x%08" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id,
- p->udp_local_q, cp->ip, status);
- }
-
- /* SCTP */
- if (p->sctp_local_q != 0) {
- int status = app_link_filter_sctp_add(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32
- "): Adding SCTP filter "
- "(queue = %" PRIu32
- ", IP = 0x%08" PRIx32 ")",
- p->name, p->pmd_id, p->sctp_local_q,
- cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32 "): "
- "Error adding SCTP "
- "filter (queue = %" PRIu32 ", "
- "IP = 0x%08" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id,
- p->sctp_local_q, cp->ip,
- status);
- }
- }
- }
-
- /* PMD link up */
- status = rte_eth_dev_set_link_up(cp->pmd_id);
- /* Do not panic if PMD does not provide link up functionality */
- if (status < 0 && status != -ENOTSUP)
- rte_panic("%s (%" PRIu32 "): PMD set link up error %"
- PRId32 "\n", cp->name, cp->pmd_id, status);
-
- /* Mark link as UP */
- cp->state = 1;
-}
-
-void
-app_link_down_internal(struct app_params *app, struct app_link_params *cp)
-{
- uint32_t i;
- int status;
-
- /* PMD link down */
- status = rte_eth_dev_set_link_down(cp->pmd_id);
- /* Do not panic if PMD does not provide link down functionality */
- if (status < 0 && status != -ENOTSUP)
- rte_panic("%s (%" PRIu32 "): PMD set link down error %"
- PRId32 "\n", cp->name, cp->pmd_id, status);
-
- /* Mark link as DOWN */
- cp->state = 0;
-
- /* Return if current link IP is not valid */
- if (cp->ip == 0)
- return;
-
- /* For each link, remove filters for IP of current link */
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *p = &app->link_params[i];
-
- /* IP */
- if (p->ip_local_q != 0) {
- int status = app_link_filter_ip_del(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32
- "): Deleting IP filter "
- "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
- p->name, p->pmd_id, p->ip_local_q, cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32
- "): Error deleting IP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id, p->ip_local_q,
- cp->ip, status);
- }
-
- /* TCP */
- if (p->tcp_local_q != 0) {
- int status = app_link_filter_tcp_del(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32
- "): Deleting TCP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32 ")",
- p->name, p->pmd_id, p->tcp_local_q, cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32
- "): Error deleting TCP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id, p->tcp_local_q,
- cp->ip, status);
- }
-
- /* UDP */
- if (p->udp_local_q != 0) {
- int status = app_link_filter_udp_del(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32
- "): Deleting UDP filter "
- "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
- p->name, p->pmd_id, p->udp_local_q, cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32
- "): Error deleting UDP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id, p->udp_local_q,
- cp->ip, status);
- }
-
- /* SCTP */
- if (p->sctp_local_q != 0) {
- int status = app_link_filter_sctp_del(p, cp);
-
- APP_LOG(app, LOW, "%s (%" PRIu32
- "): Deleting SCTP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32 ")",
- p->name, p->pmd_id, p->sctp_local_q, cp->ip);
-
- if (status)
- rte_panic("%s (%" PRIu32
- "): Error deleting SCTP filter "
- "(queue = %" PRIu32
- ", IP = 0x%" PRIx32
- ") (%" PRId32 ")\n",
- p->name, p->pmd_id, p->sctp_local_q,
- cp->ip, status);
- }
- }
-}
-
-static void
-app_check_link(struct app_params *app)
-{
- uint32_t all_links_up, i;
-
- all_links_up = 1;
-
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *p = &app->link_params[i];
- struct rte_eth_link link_params;
-
- memset(&link_params, 0, sizeof(link_params));
- rte_eth_link_get(p->pmd_id, &link_params);
-
- APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
- p->name,
- p->pmd_id,
- link_params.link_speed / 1000,
- link_params.link_status ? "UP" : "DOWN");
-
- if (link_params.link_status == ETH_LINK_DOWN)
- all_links_up = 0;
- }
-
- if (all_links_up == 0)
- rte_panic("Some links are DOWN\n");
-}
-
-static uint32_t
-is_any_swq_frag_or_ras(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_swq; i++) {
- struct app_pktq_swq_params *p = &app->swq_params[i];
-
- if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
- (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
- return 1;
- }
-
- return 0;
-}
-
-static void
-app_init_link_frag_ras(struct app_params *app)
-{
- uint32_t i;
-
- if (is_any_swq_frag_or_ras(app)) {
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *p_link = &app->link_params[i];
- p_link->conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MULTI_SEGS;
- }
- }
-}
-
-static inline int
-app_get_cpu_socket_id(uint32_t pmd_id)
-{
- int status = rte_eth_dev_socket_id(pmd_id);
-
- return (status != SOCKET_ID_ANY) ? status : 0;
-}
-
-static inline int
-app_link_rss_enabled(struct app_link_params *cp)
-{
- return (cp->n_rss_qs) ? 1 : 0;
-}
-
-static void
-app_link_rss_setup(struct app_link_params *cp)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
- uint32_t i;
- int status;
-
- /* Get RETA size */
- memset(&dev_info, 0, sizeof(dev_info));
- rte_eth_dev_info_get(cp->pmd_id, &dev_info);
-
- if (dev_info.reta_size == 0)
- rte_panic("%s (%u): RSS setup error (null RETA size)\n",
- cp->name, cp->pmd_id);
-
- if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
- rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
- cp->name, cp->pmd_id);
-
- /* Setup RETA contents */
- memset(reta_conf, 0, sizeof(reta_conf));
-
- for (i = 0; i < dev_info.reta_size; i++)
- reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
-
- for (i = 0; i < dev_info.reta_size; i++) {
- uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
- uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
- uint32_t rss_qs_pos = i % cp->n_rss_qs;
-
- reta_conf[reta_id].reta[reta_pos] =
- (uint16_t) cp->rss_qs[rss_qs_pos];
- }
-
- /* RETA update */
- status = rte_eth_dev_rss_reta_update(cp->pmd_id,
- reta_conf,
- dev_info.reta_size);
- if (status != 0)
- rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
- cp->name, cp->pmd_id);
-}
-
-static void
-app_init_link_set_config(struct app_link_params *p)
-{
- if (p->n_rss_qs) {
- p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
- p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
- p->rss_proto_ipv6 |
- p->rss_proto_l2;
- }
-}
-
-static void
-app_init_link(struct app_params *app)
-{
- uint32_t i;
-
- app_init_link_frag_ras(app);
-
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *p_link = &app->link_params[i];
- struct rte_eth_dev_info dev_info;
- uint32_t link_id, n_hwq_in, n_hwq_out, j;
- int status;
-
- sscanf(p_link->name, "LINK%" PRIu32, &link_id);
- n_hwq_in = app_link_get_n_rxq(app, p_link);
- n_hwq_out = app_link_get_n_txq(app, p_link);
- app_init_link_set_config(p_link);
-
- APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
- "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
- p_link->name,
- p_link->pmd_id,
- n_hwq_in,
- n_hwq_out);
-
- /* LINK */
- rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- p_link->conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
- status = rte_eth_dev_configure(
- p_link->pmd_id,
- n_hwq_in,
- n_hwq_out,
- &p_link->conf);
- if (status < 0)
- rte_panic("%s (%" PRId32 "): "
- "init error (%" PRId32 ")\n",
- p_link->name, p_link->pmd_id, status);
-
- rte_eth_macaddr_get(p_link->pmd_id,
- (struct ether_addr *) &p_link->mac_addr);
-
- if (p_link->promisc)
- rte_eth_promiscuous_enable(p_link->pmd_id);
-
- /* RXQ */
- for (j = 0; j < app->n_pktq_hwq_in; j++) {
- struct app_pktq_hwq_in_params *p_rxq =
- &app->hwq_in_params[j];
- uint32_t rxq_link_id, rxq_queue_id;
- uint16_t nb_rxd = p_rxq->size;
-
- sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
- &rxq_link_id, &rxq_queue_id);
- if (rxq_link_id != link_id)
- continue;
-
- status = rte_eth_dev_adjust_nb_rx_tx_desc(
- p_link->pmd_id,
- &nb_rxd,
- NULL);
- if (status < 0)
- rte_panic("%s (%" PRIu32 "): "
- "%s adjust number of Rx descriptors "
- "error (%" PRId32 ")\n",
- p_link->name,
- p_link->pmd_id,
- p_rxq->name,
- status);
-
- p_rxq->conf.offloads = p_link->conf.rxmode.offloads;
- status = rte_eth_rx_queue_setup(
- p_link->pmd_id,
- rxq_queue_id,
- nb_rxd,
- app_get_cpu_socket_id(p_link->pmd_id),
- &p_rxq->conf,
- app->mempool[p_rxq->mempool_id]);
- if (status < 0)
- rte_panic("%s (%" PRIu32 "): "
- "%s init error (%" PRId32 ")\n",
- p_link->name,
- p_link->pmd_id,
- p_rxq->name,
- status);
- }
-
- /* TXQ */
- for (j = 0; j < app->n_pktq_hwq_out; j++) {
- struct app_pktq_hwq_out_params *p_txq =
- &app->hwq_out_params[j];
- uint32_t txq_link_id, txq_queue_id;
- uint16_t nb_txd = p_txq->size;
-
- sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
- &txq_link_id, &txq_queue_id);
- if (txq_link_id != link_id)
- continue;
-
- status = rte_eth_dev_adjust_nb_rx_tx_desc(
- p_link->pmd_id,
- NULL,
- &nb_txd);
- if (status < 0)
- rte_panic("%s (%" PRIu32 "): "
- "%s adjust number of Tx descriptors "
- "error (%" PRId32 ")\n",
- p_link->name,
- p_link->pmd_id,
- p_txq->name,
- status);
-
- p_txq->conf.offloads = p_link->conf.txmode.offloads;
- status = rte_eth_tx_queue_setup(
- p_link->pmd_id,
- txq_queue_id,
- nb_txd,
- app_get_cpu_socket_id(p_link->pmd_id),
- &p_txq->conf);
- if (status < 0)
- rte_panic("%s (%" PRIu32 "): "
- "%s init error (%" PRId32 ")\n",
- p_link->name,
- p_link->pmd_id,
- p_txq->name,
- status);
- }
-
- /* LINK START */
- status = rte_eth_dev_start(p_link->pmd_id);
- if (status < 0)
- rte_panic("Cannot start %s (error %" PRId32 ")\n",
- p_link->name, status);
-
- /* LINK FILTERS */
- app_link_set_arp_filter(app, p_link);
- app_link_set_tcp_syn_filter(app, p_link);
- if (app_link_rss_enabled(p_link))
- app_link_rss_setup(p_link);
-
- /* LINK UP */
- app_link_up_internal(app, p_link);
- }
-
- app_check_link(app);
-}
-
-static void
-app_init_swq(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_swq; i++) {
- struct app_pktq_swq_params *p = &app->swq_params[i];
- unsigned flags = 0;
-
- if (app_swq_get_readers(app, p) == 1)
- flags |= RING_F_SC_DEQ;
- if (app_swq_get_writers(app, p) == 1)
- flags |= RING_F_SP_ENQ;
-
- APP_LOG(app, HIGH, "Initializing %s...", p->name);
- app->swq[i] = rte_ring_create(
- p->name,
- p->size,
- p->cpu_socket_id,
- flags);
-
- if (app->swq[i] == NULL)
- rte_panic("%s init error\n", p->name);
- }
-}
-
-static void
-app_init_tm(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_tm; i++) {
- struct app_pktq_tm_params *p_tm = &app->tm_params[i];
- struct app_link_params *p_link;
- struct rte_eth_link link_eth_params;
- struct rte_sched_port *sched;
- uint32_t n_subports, subport_id;
- int status;
-
- p_link = app_get_link_for_tm(app, p_tm);
- /* LINK */
- rte_eth_link_get(p_link->pmd_id, &link_eth_params);
-
- /* TM */
- p_tm->sched_port_params.name = p_tm->name;
- p_tm->sched_port_params.socket =
- app_get_cpu_socket_id(p_link->pmd_id);
- p_tm->sched_port_params.rate =
- (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
-
- APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
- sched = rte_sched_port_config(&p_tm->sched_port_params);
- if (sched == NULL)
- rte_panic("%s init error\n", p_tm->name);
- app->tm[i] = sched;
-
- /* Subport */
- n_subports = p_tm->sched_port_params.n_subports_per_port;
- for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport, pipe_id;
-
- status = rte_sched_subport_config(sched,
- subport_id,
- &p_tm->sched_subport_params[subport_id]);
- if (status)
- rte_panic("%s subport %" PRIu32
- " init error (%" PRId32 ")\n",
- p_tm->name, subport_id, status);
-
- /* Pipe */
- n_pipes_per_subport =
- p_tm->sched_port_params.n_pipes_per_subport;
- for (pipe_id = 0;
- pipe_id < n_pipes_per_subport;
- pipe_id++) {
- int profile_id = p_tm->sched_pipe_to_profile[
- subport_id * APP_MAX_SCHED_PIPES +
- pipe_id];
-
- if (profile_id == -1)
- continue;
-
- status = rte_sched_pipe_config(sched,
- subport_id,
- pipe_id,
- profile_id);
- if (status)
- rte_panic("%s subport %" PRIu32
- " pipe %" PRIu32
- " (profile %" PRId32 ") "
- "init error (% " PRId32 ")\n",
- p_tm->name, subport_id, pipe_id,
- profile_id, status);
- }
- }
- }
-}
-
-#ifndef RTE_EXEC_ENV_LINUXAPP
-static void
-app_init_tap(struct app_params *app) {
- if (app->n_pktq_tap == 0)
- return;
-
- rte_panic("TAP device not supported.\n");
-}
-#else
-static void
-app_init_tap(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pktq_tap; i++) {
- struct app_pktq_tap_params *p_tap = &app->tap_params[i];
- struct ifreq ifr;
- int fd, status;
-
- APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
-
- fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
- if (fd < 0)
- rte_panic("Cannot open file /dev/net/tun\n");
-
- memset(&ifr, 0, sizeof(ifr));
- ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
- snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
-
- status = ioctl(fd, TUNSETIFF, (void *) &ifr);
- if (status < 0)
- rte_panic("TAP setup error\n");
-
- app->tap[i] = fd;
- }
-}
-#endif
-
-#ifdef RTE_LIBRTE_KNI
-static int
-kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
- int ret = 0;
-
- if (port_id >= rte_eth_dev_count())
- return -EINVAL;
-
- ret = (if_up) ?
- rte_eth_dev_set_link_up(port_id) :
- rte_eth_dev_set_link_down(port_id);
-
- return ret;
-}
-
-static int
-kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
- int ret;
-
- if (port_id >= rte_eth_dev_count())
- return -EINVAL;
-
- if (new_mtu > ETHER_MAX_LEN)
- return -EINVAL;
-
- /* Set new MTU */
- ret = rte_eth_dev_set_mtu(port_id, new_mtu);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-#endif /* RTE_LIBRTE_KNI */
-
-#ifndef RTE_LIBRTE_KNI
-static void
-app_init_kni(struct app_params *app) {
- if (app->n_pktq_kni == 0)
- return;
-
- rte_panic("Can not init KNI without librte_kni support.\n");
-}
-#else
-static void
-app_init_kni(struct app_params *app) {
- uint32_t i;
-
- if (app->n_pktq_kni == 0)
- return;
-
- rte_kni_init(app->n_pktq_kni);
-
- for (i = 0; i < app->n_pktq_kni; i++) {
- struct app_pktq_kni_params *p_kni = &app->kni_params[i];
- struct app_link_params *p_link;
- struct rte_eth_dev_info dev_info;
- struct app_mempool_params *mempool_params;
- struct rte_mempool *mempool;
- struct rte_kni_conf conf;
- struct rte_kni_ops ops;
-
- /* LINK */
- p_link = app_get_link_for_kni(app, p_kni);
- memset(&dev_info, 0, sizeof(dev_info));
- rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
-
- /* MEMPOOL */
- mempool_params = &app->mempool_params[p_kni->mempool_id];
- mempool = app->mempool[p_kni->mempool_id];
-
- /* KNI */
- memset(&conf, 0, sizeof(conf));
- snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
- conf.force_bind = p_kni->force_bind;
- if (conf.force_bind) {
- int lcore_id;
-
- lcore_id = cpu_core_map_get_lcore_id(app->core_map,
- p_kni->socket_id,
- p_kni->core_id,
- p_kni->hyper_th_id);
-
- if (lcore_id < 0)
- rte_panic("%s invalid CPU core\n", p_kni->name);
-
- conf.core_id = (uint32_t) lcore_id;
- }
- conf.group_id = p_link->pmd_id;
- conf.mbuf_size = mempool_params->buffer_size;
- conf.addr = dev_info.pci_dev->addr;
- conf.id = dev_info.pci_dev->id;
-
- memset(&ops, 0, sizeof(ops));
- ops.port_id = (uint8_t) p_link->pmd_id;
- ops.change_mtu = kni_change_mtu;
- ops.config_network_if = kni_config_network_interface;
-
- APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
- app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
- if (!app->kni[i])
- rte_panic("%s init error\n", p_kni->name);
- }
-}
-#endif /* RTE_LIBRTE_KNI */
-
-static void
-app_init_msgq(struct app_params *app)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_msgq; i++) {
- struct app_msgq_params *p = &app->msgq_params[i];
-
- APP_LOG(app, HIGH, "Initializing %s ...", p->name);
- app->msgq[i] = rte_ring_create(
- p->name,
- p->size,
- p->cpu_socket_id,
- RING_F_SP_ENQ | RING_F_SC_DEQ);
-
- if (app->msgq[i] == NULL)
- rte_panic("%s init error\n", p->name);
- }
-}
-
-void app_pipeline_params_get(struct app_params *app,
- struct app_pipeline_params *p_in,
- struct pipeline_params *p_out)
-{
- uint32_t i;
-
- snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
-
- snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
-
- p_out->socket_id = (int) p_in->socket_id;
-
- p_out->log_level = app->log_level;
-
- /* pktq_in */
- p_out->n_ports_in = p_in->n_pktq_in;
- for (i = 0; i < p_in->n_pktq_in; i++) {
- struct app_pktq_in_params *in = &p_in->pktq_in[i];
- struct pipeline_port_in_params *out = &p_out->port_in[i];
-
- switch (in->type) {
- case APP_PKTQ_IN_HWQ:
- {
- struct app_pktq_hwq_in_params *p_hwq_in =
- &app->hwq_in_params[in->id];
- struct app_link_params *p_link =
- app_get_link_for_rxq(app, p_hwq_in);
- uint32_t rxq_link_id, rxq_queue_id;
-
- sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
- &rxq_link_id,
- &rxq_queue_id);
-
- out->type = PIPELINE_PORT_IN_ETHDEV_READER;
- out->params.ethdev.port_id = p_link->pmd_id;
- out->params.ethdev.queue_id = rxq_queue_id;
- out->burst_size = p_hwq_in->burst;
- break;
- }
- case APP_PKTQ_IN_SWQ:
- {
- struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
-
- if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
- if (app_swq_get_readers(app, swq_params) == 1) {
- out->type = PIPELINE_PORT_IN_RING_READER;
- out->params.ring.ring = app->swq[in->id];
- out->burst_size = app->swq_params[in->id].burst_read;
- } else {
- out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
- out->params.ring_multi.ring = app->swq[in->id];
- out->burst_size = swq_params->burst_read;
- }
- } else {
- if (swq_params->ipv4_frag == 1) {
- struct rte_port_ring_reader_ipv4_frag_params *params =
- &out->params.ring_ipv4_frag;
-
- out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
- params->ring = app->swq[in->id];
- params->mtu = swq_params->mtu;
- params->metadata_size = swq_params->metadata_size;
- params->pool_direct =
- app->mempool[swq_params->mempool_direct_id];
- params->pool_indirect =
- app->mempool[swq_params->mempool_indirect_id];
- out->burst_size = swq_params->burst_read;
- } else {
- struct rte_port_ring_reader_ipv6_frag_params *params =
- &out->params.ring_ipv6_frag;
-
- out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
- params->ring = app->swq[in->id];
- params->mtu = swq_params->mtu;
- params->metadata_size = swq_params->metadata_size;
- params->pool_direct =
- app->mempool[swq_params->mempool_direct_id];
- params->pool_indirect =
- app->mempool[swq_params->mempool_indirect_id];
- out->burst_size = swq_params->burst_read;
- }
- }
- break;
- }
- case APP_PKTQ_IN_TM:
- {
- out->type = PIPELINE_PORT_IN_SCHED_READER;
- out->params.sched.sched = app->tm[in->id];
- out->burst_size = app->tm_params[in->id].burst_read;
- break;
- }
-#ifdef RTE_EXEC_ENV_LINUXAPP
- case APP_PKTQ_IN_TAP:
- {
- struct app_pktq_tap_params *tap_params =
- &app->tap_params[in->id];
- struct app_mempool_params *mempool_params =
- &app->mempool_params[tap_params->mempool_id];
- struct rte_mempool *mempool =
- app->mempool[tap_params->mempool_id];
-
- out->type = PIPELINE_PORT_IN_FD_READER;
- out->params.fd.fd = app->tap[in->id];
- out->params.fd.mtu = mempool_params->buffer_size;
- out->params.fd.mempool = mempool;
- out->burst_size = app->tap_params[in->id].burst_read;
- break;
- }
-#endif
-#ifdef RTE_LIBRTE_KNI
- case APP_PKTQ_IN_KNI:
- {
- out->type = PIPELINE_PORT_IN_KNI_READER;
- out->params.kni.kni = app->kni[in->id];
- out->burst_size = app->kni_params[in->id].burst_read;
- break;
- }
-#endif /* RTE_LIBRTE_KNI */
- case APP_PKTQ_IN_SOURCE:
- {
- uint32_t mempool_id =
- app->source_params[in->id].mempool_id;
-
- out->type = PIPELINE_PORT_IN_SOURCE;
- out->params.source.mempool = app->mempool[mempool_id];
- out->burst_size = app->source_params[in->id].burst;
- out->params.source.file_name =
- app->source_params[in->id].file_name;
- out->params.source.n_bytes_per_pkt =
- app->source_params[in->id].n_bytes_per_pkt;
- break;
- }
- default:
- break;
- }
- }
-
- /* pktq_out */
- p_out->n_ports_out = p_in->n_pktq_out;
- for (i = 0; i < p_in->n_pktq_out; i++) {
- struct app_pktq_out_params *in = &p_in->pktq_out[i];
- struct pipeline_port_out_params *out = &p_out->port_out[i];
-
- switch (in->type) {
- case APP_PKTQ_OUT_HWQ:
- {
- struct app_pktq_hwq_out_params *p_hwq_out =
- &app->hwq_out_params[in->id];
- struct app_link_params *p_link =
- app_get_link_for_txq(app, p_hwq_out);
- uint32_t txq_link_id, txq_queue_id;
-
- sscanf(p_hwq_out->name,
- "TXQ%" SCNu32 ".%" SCNu32,
- &txq_link_id,
- &txq_queue_id);
-
- if (p_hwq_out->dropless == 0) {
- struct rte_port_ethdev_writer_params *params =
- &out->params.ethdev;
-
- out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
- params->port_id = p_link->pmd_id;
- params->queue_id = txq_queue_id;
- params->tx_burst_sz =
- app->hwq_out_params[in->id].burst;
- } else {
- struct rte_port_ethdev_writer_nodrop_params
- *params = &out->params.ethdev_nodrop;
-
- out->type =
- PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
- params->port_id = p_link->pmd_id;
- params->queue_id = txq_queue_id;
- params->tx_burst_sz = p_hwq_out->burst;
- params->n_retries = p_hwq_out->n_retries;
- }
- break;
- }
- case APP_PKTQ_OUT_SWQ:
- {
- struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
-
- if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
- if (app_swq_get_writers(app, swq_params) == 1) {
- if (app->swq_params[in->id].dropless == 0) {
- struct rte_port_ring_writer_params *params =
- &out->params.ring;
-
- out->type = PIPELINE_PORT_OUT_RING_WRITER;
- params->ring = app->swq[in->id];
- params->tx_burst_sz =
- app->swq_params[in->id].burst_write;
- } else {
- struct rte_port_ring_writer_nodrop_params
- *params = &out->params.ring_nodrop;
-
- out->type =
- PIPELINE_PORT_OUT_RING_WRITER_NODROP;
- params->ring = app->swq[in->id];
- params->tx_burst_sz =
- app->swq_params[in->id].burst_write;
- params->n_retries =
- app->swq_params[in->id].n_retries;
- }
- } else {
- if (swq_params->dropless == 0) {
- struct rte_port_ring_multi_writer_params *params =
- &out->params.ring_multi;
-
- out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
- params->ring = app->swq[in->id];
- params->tx_burst_sz = swq_params->burst_write;
- } else {
- struct rte_port_ring_multi_writer_nodrop_params
- *params = &out->params.ring_multi_nodrop;
-
- out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
- params->ring = app->swq[in->id];
- params->tx_burst_sz = swq_params->burst_write;
- params->n_retries = swq_params->n_retries;
- }
- }
- } else {
- if (swq_params->ipv4_ras == 1) {
- struct rte_port_ring_writer_ipv4_ras_params *params =
- &out->params.ring_ipv4_ras;
-
- out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
- params->ring = app->swq[in->id];
- params->tx_burst_sz = swq_params->burst_write;
- } else {
- struct rte_port_ring_writer_ipv6_ras_params *params =
- &out->params.ring_ipv6_ras;
-
- out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
- params->ring = app->swq[in->id];
- params->tx_burst_sz = swq_params->burst_write;
- }
- }
- break;
- }
- case APP_PKTQ_OUT_TM:
- {
- struct rte_port_sched_writer_params *params =
- &out->params.sched;
-
- out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
- params->sched = app->tm[in->id];
- params->tx_burst_sz =
- app->tm_params[in->id].burst_write;
- break;
- }
-#ifdef RTE_EXEC_ENV_LINUXAPP
- case APP_PKTQ_OUT_TAP:
- {
- struct rte_port_fd_writer_params *params =
- &out->params.fd;
-
- out->type = PIPELINE_PORT_OUT_FD_WRITER;
- params->fd = app->tap[in->id];
- params->tx_burst_sz =
- app->tap_params[in->id].burst_write;
- break;
- }
-#endif
-#ifdef RTE_LIBRTE_KNI
- case APP_PKTQ_OUT_KNI:
- {
- struct app_pktq_kni_params *p_kni =
- &app->kni_params[in->id];
-
- if (p_kni->dropless == 0) {
- struct rte_port_kni_writer_params *params =
- &out->params.kni;
-
- out->type = PIPELINE_PORT_OUT_KNI_WRITER;
- params->kni = app->kni[in->id];
- params->tx_burst_sz =
- app->kni_params[in->id].burst_write;
- } else {
- struct rte_port_kni_writer_nodrop_params
- *params = &out->params.kni_nodrop;
-
- out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
- params->kni = app->kni[in->id];
- params->tx_burst_sz =
- app->kni_params[in->id].burst_write;
- params->n_retries =
- app->kni_params[in->id].n_retries;
- }
- break;
- }
-#endif /* RTE_LIBRTE_KNI */
- case APP_PKTQ_OUT_SINK:
- {
- out->type = PIPELINE_PORT_OUT_SINK;
- out->params.sink.file_name =
- app->sink_params[in->id].file_name;
- out->params.sink.max_n_pkts =
- app->sink_params[in->id].
- n_pkts_to_dump;
-
- break;
- }
- default:
- break;
- }
- }
-
- /* msgq */
- p_out->n_msgq = p_in->n_msgq_in;
-
- for (i = 0; i < p_in->n_msgq_in; i++)
- p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
-
- for (i = 0; i < p_in->n_msgq_out; i++)
- p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
-
- /* args */
- p_out->n_args = p_in->n_args;
- for (i = 0; i < p_in->n_args; i++) {
- p_out->args_name[i] = p_in->args_name[i];
- p_out->args_value[i] = p_in->args_value[i];
- }
-}
-
-static void
-app_init_pipelines(struct app_params *app)
-{
- uint32_t p_id;
-
- for (p_id = 0; p_id < app->n_pipelines; p_id++) {
- struct app_pipeline_params *params =
- &app->pipeline_params[p_id];
- struct app_pipeline_data *data = &app->pipeline_data[p_id];
- struct pipeline_type *ptype;
- struct pipeline_params pp;
-
- APP_LOG(app, HIGH, "Initializing %s ...", params->name);
-
- ptype = app_pipeline_type_find(app, params->type);
- if (ptype == NULL)
- rte_panic("Init error: Unknown pipeline type \"%s\"\n",
- params->type);
-
- app_pipeline_params_get(app, params, &pp);
-
- /* Back-end */
- data->be = NULL;
- if (ptype->be_ops->f_init) {
- data->be = ptype->be_ops->f_init(&pp, (void *) app);
-
- if (data->be == NULL)
- rte_panic("Pipeline instance \"%s\" back-end "
- "init error\n", params->name);
- }
-
- /* Front-end */
- data->fe = NULL;
- if (ptype->fe_ops->f_init) {
- data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
-
- if (data->fe == NULL)
- rte_panic("Pipeline instance \"%s\" front-end "
- "init error\n", params->name);
- }
-
- data->ptype = ptype;
-
- data->timer_period = (rte_get_tsc_hz() *
- params->timer_period) / 1000;
- }
-}
-
-static void
-app_post_init_pipelines(struct app_params *app)
-{
- uint32_t p_id;
-
- for (p_id = 0; p_id < app->n_pipelines; p_id++) {
- struct app_pipeline_params *params =
- &app->pipeline_params[p_id];
- struct app_pipeline_data *data = &app->pipeline_data[p_id];
- int status;
-
- if (data->ptype->fe_ops->f_post_init == NULL)
- continue;
-
- status = data->ptype->fe_ops->f_post_init(data->fe);
- if (status)
- rte_panic("Pipeline instance \"%s\" front-end "
- "post-init error\n", params->name);
- }
-}
-
-static void
-app_init_threads(struct app_params *app)
-{
- uint64_t time = rte_get_tsc_cycles();
- uint32_t p_id;
-
- for (p_id = 0; p_id < app->n_pipelines; p_id++) {
- struct app_pipeline_params *params =
- &app->pipeline_params[p_id];
- struct app_pipeline_data *data = &app->pipeline_data[p_id];
- struct pipeline_type *ptype;
- struct app_thread_data *t;
- struct app_thread_pipeline_data *p;
- int lcore_id;
-
- lcore_id = cpu_core_map_get_lcore_id(app->core_map,
- params->socket_id,
- params->core_id,
- params->hyper_th_id);
-
- if (lcore_id < 0)
- rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
- params->socket_id,
- params->core_id,
- (params->hyper_th_id) ? "h" : "");
-
- t = &app->thread_data[lcore_id];
-
- t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
- t->thread_req_deadline = time + t->timer_period;
-
- t->headroom_cycles = 0;
- t->headroom_time = rte_get_tsc_cycles();
- t->headroom_ratio = 0.0;
-
- t->msgq_in = app_thread_msgq_in_get(app,
- params->socket_id,
- params->core_id,
- params->hyper_th_id);
- if (t->msgq_in == NULL)
- rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
- lcore_id);
-
- t->msgq_out = app_thread_msgq_out_get(app,
- params->socket_id,
- params->core_id,
- params->hyper_th_id);
- if (t->msgq_out == NULL)
- rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
- lcore_id);
-
- ptype = app_pipeline_type_find(app, params->type);
- if (ptype == NULL)
- rte_panic("Init error: Unknown pipeline "
- "type \"%s\"\n", params->type);
-
- p = (ptype->be_ops->f_run == NULL) ?
- &t->regular[t->n_regular] :
- &t->custom[t->n_custom];
-
- p->pipeline_id = p_id;
- p->be = data->be;
- p->f_run = ptype->be_ops->f_run;
- p->f_timer = ptype->be_ops->f_timer;
- p->timer_period = data->timer_period;
- p->deadline = time + data->timer_period;
-
- data->enabled = 1;
-
- if (ptype->be_ops->f_run == NULL)
- t->n_regular++;
- else
- t->n_custom++;
- }
-}
-
-int app_init(struct app_params *app)
-{
- app_init_core_map(app);
- app_init_core_mask(app);
-
- app_init_eal(app);
- app_init_mempool(app);
- app_init_link(app);
- app_init_swq(app);
- app_init_tm(app);
- app_init_tap(app);
- app_init_kni(app);
- app_init_msgq(app);
-
- app_pipeline_common_cmd_push(app);
- app_pipeline_thread_cmd_push(app);
- app_pipeline_type_register(app, &pipeline_master);
- app_pipeline_type_register(app, &pipeline_passthrough);
- app_pipeline_type_register(app, &pipeline_flow_classification);
- app_pipeline_type_register(app, &pipeline_flow_actions);
- app_pipeline_type_register(app, &pipeline_firewall);
- app_pipeline_type_register(app, &pipeline_routing);
-
- app_init_pipelines(app);
- app_init_threads(app);
-
- return 0;
-}
-
-int app_post_init(struct app_params *app)
-{
- app_post_init_pipelines(app);
-
- return 0;
-}
-
-static int
-app_pipeline_type_cmd_push(struct app_params *app,
- struct pipeline_type *ptype)
-{
- cmdline_parse_ctx_t *cmds;
- uint32_t n_cmds, i;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (ptype == NULL))
- return -EINVAL;
-
- n_cmds = pipeline_type_cmds_count(ptype);
- if (n_cmds == 0)
- return 0;
-
- cmds = ptype->fe_ops->cmds;
-
- /* Check for available slots in the application commands array */
- if (n_cmds > APP_MAX_CMDS - app->n_cmds)
- return -ENOMEM;
-
- /* Push pipeline commands into the application */
- memcpy(&app->cmds[app->n_cmds],
- cmds,
- n_cmds * sizeof(cmdline_parse_ctx_t));
-
- for (i = 0; i < n_cmds; i++)
- app->cmds[app->n_cmds + i]->data = app;
-
- app->n_cmds += n_cmds;
- app->cmds[app->n_cmds] = NULL;
-
- return 0;
-}
-
-int
-app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
-{
- uint32_t n_cmds, i;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (ptype == NULL) ||
- (ptype->name == NULL) ||
- (strlen(ptype->name) == 0) ||
- (ptype->be_ops->f_init == NULL) ||
- (ptype->be_ops->f_timer == NULL))
- return -EINVAL;
-
- /* Check for duplicate entry */
- for (i = 0; i < app->n_pipeline_types; i++)
- if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
- return -EEXIST;
-
- /* Check for resource availability */
- n_cmds = pipeline_type_cmds_count(ptype);
- if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
- (n_cmds > APP_MAX_CMDS - app->n_cmds))
- return -ENOMEM;
-
- /* Copy pipeline type */
- memcpy(&app->pipeline_type[app->n_pipeline_types++],
- ptype,
- sizeof(struct pipeline_type));
-
- /* Copy CLI commands */
- if (n_cmds)
- app_pipeline_type_cmd_push(app, ptype);
-
- return 0;
-}
-
-struct
-pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
-{
- uint32_t i;
-
- for (i = 0; i < app->n_pipeline_types; i++)
- if (strcmp(app->pipeline_type[i].name, name) == 0)
- return &app->pipeline_type[i];
-
- return NULL;
-}
diff --git a/examples/ip_pipeline/kni.c b/examples/ip_pipeline/kni.c
new file mode 100644
index 00000000..7e5ff054
--- /dev/null
+++ b/examples/ip_pipeline/kni.c
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_bus_pci.h>
+#include <rte_string_fns.h>
+
+#include "kni.h"
+#include "mempool.h"
+#include "link.h"
+
+static struct kni_list kni_list;
+
+#ifndef KNI_MAX
+#define KNI_MAX 16
+#endif
+
+int
+kni_init(void)
+{
+ TAILQ_INIT(&kni_list);
+
+#ifdef RTE_LIBRTE_KNI
+ rte_kni_init(KNI_MAX);
+#endif
+
+ return 0;
+}
+
+struct kni *
+kni_find(const char *name)
+{
+ struct kni *kni;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(kni, &kni_list, node)
+ if (strcmp(kni->name, name) == 0)
+ return kni;
+
+ return NULL;
+}
+
+#ifndef RTE_LIBRTE_KNI
+
+struct kni *
+kni_create(const char *name __rte_unused,
+ struct kni_params *params __rte_unused)
+{
+ return NULL;
+}
+
+void
+kni_handle_request(void)
+{
+ return;
+}
+
+#else
+
+static int
+kni_config_network_interface(uint16_t port_id, uint8_t if_up)
+{
+ int ret = 0;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -EINVAL;
+
+ ret = (if_up) ?
+ rte_eth_dev_set_link_up(port_id) :
+ rte_eth_dev_set_link_down(port_id);
+
+ return ret;
+}
+
+static int
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
+{
+ int ret;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -EINVAL;
+
+ if (new_mtu > ETHER_MAX_LEN)
+ return -EINVAL;
+
+ /* Set new MTU */
+ ret = rte_eth_dev_set_mtu(port_id, new_mtu);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+struct kni *
+kni_create(const char *name, struct kni_params *params)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_kni_conf kni_conf;
+ struct rte_kni_ops kni_ops;
+ struct kni *kni;
+ struct mempool *mempool;
+ struct link *link;
+ struct rte_kni *k;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus = NULL;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ kni_find(name) ||
+ (params == NULL))
+ return NULL;
+
+ mempool = mempool_find(params->mempool_name);
+ link = link_find(params->link_name);
+ if ((mempool == NULL) ||
+ (link == NULL))
+ return NULL;
+
+ /* Resource create */
+ rte_eth_dev_info_get(link->port_id, &dev_info);
+
+ memset(&kni_conf, 0, sizeof(kni_conf));
+ snprintf(kni_conf.name, RTE_KNI_NAMESIZE, "%s", name);
+ kni_conf.force_bind = params->force_bind;
+ kni_conf.core_id = params->thread_id;
+ kni_conf.group_id = link->port_id;
+ kni_conf.mbuf_size = mempool->buffer_size;
+ if (dev_info.device)
+ bus = rte_bus_find_by_device(dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(dev_info.device);
+ kni_conf.addr = pci_dev->addr;
+ kni_conf.id = pci_dev->id;
+ }
+
+ memset(&kni_ops, 0, sizeof(kni_ops));
+ kni_ops.port_id = link->port_id;
+ kni_ops.config_network_if = kni_config_network_interface;
+ kni_ops.change_mtu = kni_change_mtu;
+
+ k = rte_kni_alloc(mempool->m, &kni_conf, &kni_ops);
+ if (k == NULL)
+ return NULL;
+
+ /* Node allocation */
+ kni = calloc(1, sizeof(struct kni));
+ if (kni == NULL)
+ return NULL;
+
+ /* Node fill in */
+ strlcpy(kni->name, name, sizeof(kni->name));
+ kni->k = k;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&kni_list, kni, node);
+
+ return kni;
+}
+
+void
+kni_handle_request(void)
+{
+ struct kni *kni;
+
+ TAILQ_FOREACH(kni, &kni_list, node)
+ rte_kni_handle_request(kni->k);
+}
+
+#endif
diff --git a/examples/ip_pipeline/kni.h b/examples/ip_pipeline/kni.h
new file mode 100644
index 00000000..c3856456
--- /dev/null
+++ b/examples/ip_pipeline/kni.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_KNI_H_
+#define _INCLUDE_KNI_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#ifdef RTE_LIBRTE_KNI
+#include <rte_kni.h>
+#endif
+
+#include "common.h"
+
+struct kni {
+ TAILQ_ENTRY(kni) node;
+ char name[NAME_SIZE];
+#ifdef RTE_LIBRTE_KNI
+ struct rte_kni *k;
+#endif
+};
+
+TAILQ_HEAD(kni_list, kni);
+
+int
+kni_init(void);
+
+struct kni *
+kni_find(const char *name);
+
+struct kni_params {
+ const char *link_name;
+ const char *mempool_name;
+ int force_bind;
+ uint32_t thread_id;
+};
+
+struct kni *
+kni_create(const char *name, struct kni_params *params);
+
+void
+kni_handle_request(void);
+
+#endif /* _INCLUDE_KNI_H_ */
diff --git a/examples/ip_pipeline/link.c b/examples/ip_pipeline/link.c
new file mode 100644
index 00000000..392a890f
--- /dev/null
+++ b/examples/ip_pipeline/link.c
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "link.h"
+#include "mempool.h"
+
+static struct link_list link_list;
+
+int
+link_init(void)
+{
+ TAILQ_INIT(&link_list);
+
+ return 0;
+}
+
+struct link *
+link_find(const char *name)
+{
+ struct link *link;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(link, &link_list, node)
+ if (strcmp(link->name, name) == 0)
+ return link;
+
+ return NULL;
+}
+
+struct link *
+link_next(struct link *link)
+{
+ return (link == NULL) ? TAILQ_FIRST(&link_list) : TAILQ_NEXT(link, node);
+}
+
+static struct rte_eth_conf port_conf_default = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .lpbk_mode = 0,
+};
+
+#define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
+static int
+rss_setup(uint16_t port_id,
+ uint16_t reta_size,
+ struct link_params_rss *rss)
+{
+ struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE];
+ uint32_t i;
+ int status;
+
+ /* RETA setting */
+ memset(reta_conf, 0, sizeof(reta_conf));
+
+ for (i = 0; i < reta_size; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < reta_size; i++) {
+ uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t rss_qs_pos = i % rss->n_queues;
+
+ reta_conf[reta_id].reta[reta_pos] =
+ (uint16_t) rss->queue_id[rss_qs_pos];
+ }
+
+ /* RETA update */
+ status = rte_eth_dev_rss_reta_update(port_id,
+ reta_conf,
+ reta_size);
+
+ return status;
+}
+
+struct link *
+link_create(const char *name, struct link_params *params)
+{
+ struct rte_eth_dev_info port_info;
+ struct rte_eth_conf port_conf;
+ struct link *link;
+ struct link_params_rss *rss;
+ struct mempool *mempool;
+ uint32_t cpu_id, i;
+ int status;
+ uint16_t port_id;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ link_find(name) ||
+ (params == NULL) ||
+ (params->rx.n_queues == 0) ||
+ (params->rx.queue_size == 0) ||
+ (params->tx.n_queues == 0) ||
+ (params->tx.queue_size == 0))
+ return NULL;
+
+ port_id = params->port_id;
+ if (params->dev_name) {
+ status = rte_eth_dev_get_port_by_name(params->dev_name,
+ &port_id);
+
+ if (status)
+ return NULL;
+ } else
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return NULL;
+
+ rte_eth_dev_info_get(port_id, &port_info);
+
+ mempool = mempool_find(params->rx.mempool_name);
+ if (mempool == NULL)
+ return NULL;
+
+ rss = params->rx.rss;
+ if (rss) {
+ if ((port_info.reta_size == 0) ||
+ (port_info.reta_size > ETH_RSS_RETA_SIZE_512))
+ return NULL;
+
+ if ((rss->n_queues == 0) ||
+ (rss->n_queues >= LINK_RXQ_RSS_MAX))
+ return NULL;
+
+ for (i = 0; i < rss->n_queues; i++)
+ if (rss->queue_id[i] >= port_info.max_rx_queues)
+ return NULL;
+ }
+
+ /**
+ * Resource create
+ */
+ /* Port */
+ memcpy(&port_conf, &port_conf_default, sizeof(port_conf));
+ if (rss) {
+ port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rx_adv_conf.rss_conf.rss_hf =
+ (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) &
+ port_info.flow_type_rss_offloads;
+ }
+
+ cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id);
+ if (cpu_id == (uint32_t) SOCKET_ID_ANY)
+ cpu_id = 0;
+
+ status = rte_eth_dev_configure(
+ port_id,
+ params->rx.n_queues,
+ params->tx.n_queues,
+ &port_conf);
+
+ if (status < 0)
+ return NULL;
+
+ if (params->promiscuous)
+ rte_eth_promiscuous_enable(port_id);
+
+ /* Port RX */
+ for (i = 0; i < params->rx.n_queues; i++) {
+ status = rte_eth_rx_queue_setup(
+ port_id,
+ i,
+ params->rx.queue_size,
+ cpu_id,
+ NULL,
+ mempool->m);
+
+ if (status < 0)
+ return NULL;
+ }
+
+ /* Port TX */
+ for (i = 0; i < params->tx.n_queues; i++) {
+ status = rte_eth_tx_queue_setup(
+ port_id,
+ i,
+ params->tx.queue_size,
+ cpu_id,
+ NULL);
+
+ if (status < 0)
+ return NULL;
+ }
+
+ /* Port start */
+ status = rte_eth_dev_start(port_id);
+ if (status < 0)
+ return NULL;
+
+ if (rss) {
+ status = rss_setup(port_id, port_info.reta_size, rss);
+
+ if (status) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+ }
+
+ /* Port link up */
+ status = rte_eth_dev_set_link_up(port_id);
+ if ((status < 0) && (status != -ENOTSUP)) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+
+ /* Node allocation */
+ link = calloc(1, sizeof(struct link));
+ if (link == NULL) {
+ rte_eth_dev_stop(port_id);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(link->name, name, sizeof(link->name));
+ link->port_id = port_id;
+ link->n_rxq = params->rx.n_queues;
+ link->n_txq = params->tx.n_queues;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&link_list, link, node);
+
+ return link;
+}
+
+int
+link_is_up(const char *name)
+{
+ struct rte_eth_link link_params;
+ struct link *link;
+
+ /* Check input params */
+ if (name == NULL)
+ return 0;
+
+ link = link_find(name);
+ if (link == NULL)
+ return 0;
+
+ /* Resource */
+ rte_eth_link_get(link->port_id, &link_params);
+
+ return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1;
+}
diff --git a/examples/ip_pipeline/link.h b/examples/ip_pipeline/link.h
new file mode 100644
index 00000000..34ff1149
--- /dev/null
+++ b/examples/ip_pipeline/link.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_LINK_H_
+#define _INCLUDE_LINK_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include "common.h"
+
+#ifndef LINK_RXQ_RSS_MAX
+#define LINK_RXQ_RSS_MAX 16
+#endif
+
+struct link {
+ TAILQ_ENTRY(link) node;
+ char name[NAME_SIZE];
+ uint16_t port_id;
+ uint32_t n_rxq;
+ uint32_t n_txq;
+};
+
+TAILQ_HEAD(link_list, link);
+
+int
+link_init(void);
+
+struct link *
+link_find(const char *name);
+
+struct link *
+link_next(struct link *link);
+
+struct link_params_rss {
+ uint32_t queue_id[LINK_RXQ_RSS_MAX];
+ uint32_t n_queues;
+};
+
+struct link_params {
+ const char *dev_name;
+ uint16_t port_id; /**< Valid only when *dev_name* is NULL. */
+
+ struct {
+ uint32_t n_queues;
+ uint32_t queue_size;
+ const char *mempool_name;
+ struct link_params_rss *rss;
+ } rx;
+
+ struct {
+ uint32_t n_queues;
+ uint32_t queue_size;
+ } tx;
+
+ int promiscuous;
+};
+
+struct link *
+link_create(const char *name, struct link_params *params);
+
+int
+link_is_up(const char *name);
+
+#endif /* _INCLUDE_LINK_H_ */
diff --git a/examples/ip_pipeline/main.c b/examples/ip_pipeline/main.c
index a44cf9a3..a69facee 100644
--- a/examples/ip_pipeline/main.c
+++ b/examples/ip_pipeline/main.c
@@ -1,35 +1,260 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
-#include "app.h"
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <getopt.h>
-static struct app_params app;
+#include <rte_launch.h>
+#include <rte_eal.h>
+
+#include "cli.h"
+#include "conn.h"
+#include "kni.h"
+#include "link.h"
+#include "mempool.h"
+#include "pipeline.h"
+#include "swq.h"
+#include "tap.h"
+#include "thread.h"
+#include "tmgr.h"
+
+static const char usage[] =
+ "%s EAL_ARGS -- [-h HOST] [-p PORT] [-s SCRIPT]\n";
+
+static const char welcome[] =
+ "\n"
+ "Welcome to IP Pipeline!\n"
+ "\n";
+
+static const char prompt[] = "pipeline> ";
+
+static struct app_params {
+ struct conn_params conn;
+ char *script_name;
+} app = {
+ .conn = {
+ .welcome = welcome,
+ .prompt = prompt,
+ .addr = "0.0.0.0",
+ .port = 8086,
+ .buf_size = 1024 * 1024,
+ .msg_in_len_max = 1024,
+ .msg_out_len_max = 1024 * 1024,
+ .msg_handle = cli_process,
+ },
+ .script_name = NULL,
+};
+
+static int
+parse_args(int argc, char **argv)
+{
+ char *app_name = argv[0];
+ struct option lgopts[] = {
+ { NULL, 0, 0, 0 }
+ };
+ int opt, option_index;
+ int h_present, p_present, s_present, n_args, i;
+
+ /* Skip EAL input args */
+ n_args = argc;
+ for (i = 0; i < n_args; i++)
+ if (strcmp(argv[i], "--") == 0) {
+ argc -= i;
+ argv += i;
+ break;
+ }
+
+ if (i == n_args)
+ return 0;
+
+ /* Parse args */
+ h_present = 0;
+ p_present = 0;
+ s_present = 0;
+
+ while ((opt = getopt_long(argc, argv, "h:p:s:", lgopts, &option_index))
+ != EOF)
+ switch (opt) {
+ case 'h':
+ if (h_present) {
+ printf("Error: Multiple -h arguments\n");
+ return -1;
+ }
+ h_present = 1;
+
+ if (!strlen(optarg)) {
+ printf("Error: Argument for -h not provided\n");
+ return -1;
+ }
+
+ app.conn.addr = strdup(optarg);
+ if (app.conn.addr == NULL) {
+ printf("Error: Not enough memory\n");
+ return -1;
+ }
+ break;
+
+ case 'p':
+ if (p_present) {
+ printf("Error: Multiple -p arguments\n");
+ return -1;
+ }
+ p_present = 1;
+
+ if (!strlen(optarg)) {
+ printf("Error: Argument for -p not provided\n");
+ return -1;
+ }
+
+ app.conn.port = (uint16_t) atoi(optarg);
+ break;
+
+ case 's':
+ if (s_present) {
+ printf("Error: Multiple -s arguments\n");
+ return -1;
+ }
+ s_present = 1;
+
+ if (!strlen(optarg)) {
+ printf("Error: Argument for -s not provided\n");
+ return -1;
+ }
+
+ app.script_name = strdup(optarg);
+ if (app.script_name == NULL) {
+ printf("Error: Not enough memory\n");
+ return -1;
+ }
+ break;
+
+ default:
+ printf(usage, app_name);
+ return -1;
+ }
+
+ optind = 1; /* reset getopt lib */
+
+ return 0;
+}
int
main(int argc, char **argv)
{
- rte_openlog_stream(stderr);
+ struct conn *conn;
+ int status;
+
+ /* Parse application arguments */
+ status = parse_args(argc, argv);
+ if (status < 0)
+ return status;
+
+ /* EAL */
+ status = rte_eal_init(argc, argv);
+ if (status < 0) {
+ printf("Error: EAL initialization failed (%d)\n", status);
+ return status;
+ };
+
+ /* Connectivity */
+ conn = conn_init(&app.conn);
+ if (conn == NULL) {
+ printf("Error: Connectivity initialization failed (%d)\n",
+ status);
+ return status;
+ };
- /* Config */
- app_config_init(&app);
+ /* Mempool */
+ status = mempool_init();
+ if (status) {
+ printf("Error: Mempool initialization failed (%d)\n", status);
+ return status;
+ }
- app_config_args(&app, argc, argv);
+ /* Link */
+ status = link_init();
+ if (status) {
+ printf("Error: Link initialization failed (%d)\n", status);
+ return status;
+ }
- app_config_preproc(&app);
+ /* SWQ */
+ status = swq_init();
+ if (status) {
+ printf("Error: SWQ initialization failed (%d)\n", status);
+ return status;
+ }
- app_config_parse(&app, app.parser_file);
+ /* Traffic Manager */
+ status = tmgr_init();
+ if (status) {
+ printf("Error: TMGR initialization failed (%d)\n", status);
+ return status;
+ }
- app_config_check(&app);
+ /* TAP */
+ status = tap_init();
+ if (status) {
+ printf("Error: TAP initialization failed (%d)\n", status);
+ return status;
+ }
- /* Init */
- app_init(&app);
+ /* KNI */
+ status = kni_init();
+ if (status) {
+ printf("Error: KNI initialization failed (%d)\n", status);
+ return status;
+ }
+
+ /* Action */
+ status = port_in_action_profile_init();
+ if (status) {
+ printf("Error: Input port action profile initialization failed (%d)\n", status);
+ return status;
+ }
+
+ status = table_action_profile_init();
+ if (status) {
+ printf("Error: Action profile initialization failed (%d)\n",
+ status);
+ return status;
+ }
+
+ /* Pipeline */
+ status = pipeline_init();
+ if (status) {
+ printf("Error: Pipeline initialization failed (%d)\n", status);
+ return status;
+ }
+
+ /* Thread */
+ status = thread_init();
+ if (status) {
+ printf("Error: Thread initialization failed (%d)\n", status);
+ return status;
+ }
- /* Run-time */
rte_eal_mp_remote_launch(
- app_thread,
- (void *) &app,
- CALL_MASTER);
+ thread_main,
+ NULL,
+ SKIP_MASTER);
- return 0;
+ /* Script */
+ if (app.script_name)
+ cli_script_process(app.script_name,
+ app.conn.msg_in_len_max,
+ app.conn.msg_out_len_max);
+
+ /* Dispatch loop */
+ for ( ; ; ) {
+ conn_poll_for_conn(conn);
+
+ conn_poll_for_msg(conn);
+
+ kni_handle_request();
+ }
}
diff --git a/examples/ip_pipeline/mempool.c b/examples/ip_pipeline/mempool.c
new file mode 100644
index 00000000..f5d2a7d1
--- /dev/null
+++ b/examples/ip_pipeline/mempool.c
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#include "mempool.h"
+
+#define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+static struct mempool_list mempool_list;
+
+int
+mempool_init(void)
+{
+ TAILQ_INIT(&mempool_list);
+
+ return 0;
+}
+
+struct mempool *
+mempool_find(const char *name)
+{
+ struct mempool *mempool;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(mempool, &mempool_list, node)
+ if (strcmp(mempool->name, name) == 0)
+ return mempool;
+
+ return NULL;
+}
+
+struct mempool *
+mempool_create(const char *name, struct mempool_params *params)
+{
+ struct mempool *mempool;
+ struct rte_mempool *m;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ mempool_find(name) ||
+ (params == NULL) ||
+ (params->buffer_size < BUFFER_SIZE_MIN) ||
+ (params->pool_size == 0))
+ return NULL;
+
+ /* Resource create */
+ m = rte_pktmbuf_pool_create(
+ name,
+ params->pool_size,
+ params->cache_size,
+ 0,
+ params->buffer_size - sizeof(struct rte_mbuf),
+ params->cpu_id);
+
+ if (m == NULL)
+ return NULL;
+
+ /* Node allocation */
+ mempool = calloc(1, sizeof(struct mempool));
+ if (mempool == NULL) {
+ rte_mempool_free(m);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(mempool->name, name, sizeof(mempool->name));
+ mempool->m = m;
+ mempool->buffer_size = params->buffer_size;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&mempool_list, mempool, node);
+
+ return mempool;
+}
diff --git a/examples/ip_pipeline/mempool.h b/examples/ip_pipeline/mempool.h
new file mode 100644
index 00000000..bd46a11c
--- /dev/null
+++ b/examples/ip_pipeline/mempool.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_MEMPOOL_H_
+#define _INCLUDE_MEMPOOL_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_mempool.h>
+
+#include "common.h"
+
+struct mempool {
+ TAILQ_ENTRY(mempool) node;
+ char name[NAME_SIZE];
+ struct rte_mempool *m;
+ uint32_t buffer_size;
+};
+
+TAILQ_HEAD(mempool_list, mempool);
+
+int
+mempool_init(void);
+
+struct mempool *
+mempool_find(const char *name);
+
+struct mempool_params {
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+ uint32_t cpu_id;
+};
+
+struct mempool *
+mempool_create(const char *name, struct mempool_params *params);
+
+#endif /* _INCLUDE_MEMPOOL_H_ */
diff --git a/examples/ip_pipeline/meson.build b/examples/ip_pipeline/meson.build
index 748c9ae0..a9f2ea44 100644
--- a/examples/ip_pipeline/meson.build
+++ b/examples/ip_pipeline/meson.build
@@ -1,35 +1,25 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
# meson file, for building this example as part of a main DPDK build.
#
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
-deps += ['cfgfile', 'pipeline', 'bus_pci']
-includes += include_directories('pipeline')
+deps += ['pipeline', 'bus_pci']
+allow_experimental_apis = true
sources = files(
- 'config_check.c',
- 'config_parse.c',
- 'config_parse_tm.c',
- 'cpu_core_map.c',
- 'init.c',
+ 'action.c',
+ 'cli.c',
+ 'conn.c',
+ 'kni.c',
+ 'link.c',
'main.c',
+ 'mempool.c',
'parser.c',
+ 'pipeline.c',
+ 'swq.c',
+ 'tap.c',
'thread.c',
- 'thread_fe.c',
- 'pipeline/pipeline_common_be.c',
- 'pipeline/pipeline_common_fe.c',
- 'pipeline/pipeline_firewall_be.c',
- 'pipeline/pipeline_firewall.c',
- 'pipeline/pipeline_flow_actions_be.c',
- 'pipeline/pipeline_flow_actions.c',
- 'pipeline/pipeline_flow_classification_be.c',
- 'pipeline/pipeline_flow_classification.c',
- 'pipeline/pipeline_master_be.c',
- 'pipeline/pipeline_master.c',
- 'pipeline/pipeline_passthrough_be.c',
- 'pipeline/pipeline_passthrough.c',
- 'pipeline/pipeline_routing_be.c',
- 'pipeline/pipeline_routing.c',
+ 'tmgr.c'
)
diff --git a/examples/ip_pipeline/parser.c b/examples/ip_pipeline/parser.c
index 0901e9c6..ffcdeb3a 100644
--- a/examples/ip_pipeline/parser.c
+++ b/examples/ip_pipeline/parser.c
@@ -36,10 +36,8 @@
#include <sys/wait.h>
#include <rte_errno.h>
-#include <rte_cfgfile.h>
#include <rte_string_fns.h>
-#include "app.h"
#include "parser.h"
static uint32_t
@@ -596,10 +594,8 @@ parse_mac_addr(const char *token, struct ether_addr *addr)
}
int
-parse_pipeline_core(uint32_t *socket,
- uint32_t *core,
- uint32_t *ht,
- const char *entry)
+parse_cpu_core(const char *entry,
+ struct cpu_core_params *p)
{
size_t num_len;
char num[8];
@@ -609,6 +605,9 @@ parse_pipeline_core(uint32_t *socket,
const char *next = skip_white_spaces(entry);
char type;
+ if (p == NULL)
+ return -EINVAL;
+
/* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
while (*next != '\0') {
/* If everything parsed nothing should left */
@@ -682,8 +681,8 @@ parse_pipeline_core(uint32_t *socket,
}
}
- *socket = s;
- *core = c;
- *ht = h;
+ p->socket_id = s;
+ p->core_id = c;
+ p->thread_id = h;
return 0;
}
diff --git a/examples/ip_pipeline/parser.h b/examples/ip_pipeline/parser.h
index 5c421d27..261a8c85 100644
--- a/examples/ip_pipeline/parser.h
+++ b/examples/ip_pipeline/parser.h
@@ -50,6 +50,14 @@ int parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
int parse_mac_addr(const char *token, struct ether_addr *addr);
int parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels);
+struct cpu_core_params {
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t thread_id;
+};
+
+int parse_cpu_core(const char *entry, struct cpu_core_params *p);
+
int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
#endif
diff --git a/examples/ip_pipeline/pipeline.c b/examples/ip_pipeline/pipeline.c
new file mode 100644
index 00000000..43fe8677
--- /dev/null
+++ b/examples/ip_pipeline/pipeline.c
@@ -0,0 +1,988 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include <rte_string_fns.h>
+#include <rte_port_ethdev.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_port_kni.h>
+#endif
+#include <rte_port_ring.h>
+#include <rte_port_source_sink.h>
+#include <rte_port_fd.h>
+#include <rte_port_sched.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
+#include <rte_table_stub.h>
+
+#ifdef RTE_LIBRTE_KNI
+#include "kni.h"
+#endif
+#include "link.h"
+#include "mempool.h"
+#include "pipeline.h"
+#include "tap.h"
+#include "tmgr.h"
+#include "swq.h"
+
+#include "hash_func.h"
+
+#ifndef PIPELINE_MSGQ_SIZE
+#define PIPELINE_MSGQ_SIZE 64
+#endif
+
+#ifndef TABLE_LPM_NUMBER_TBL8
+#define TABLE_LPM_NUMBER_TBL8 256
+#endif
+
+static struct pipeline_list pipeline_list;
+
+int
+pipeline_init(void)
+{
+ TAILQ_INIT(&pipeline_list);
+
+ return 0;
+}
+
+struct pipeline *
+pipeline_find(const char *name)
+{
+ struct pipeline *pipeline;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(pipeline, &pipeline_list, node)
+ if (strcmp(name, pipeline->name) == 0)
+ return pipeline;
+
+ return NULL;
+}
+
+struct pipeline *
+pipeline_create(const char *name, struct pipeline_params *params)
+{
+ char msgq_name[NAME_MAX];
+ struct rte_pipeline_params pp;
+ struct pipeline *pipeline;
+ struct rte_pipeline *p;
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ pipeline_find(name) ||
+ (params == NULL) ||
+ (params->timer_period_ms == 0))
+ return NULL;
+
+ /* Resource create */
+ snprintf(msgq_name, sizeof(msgq_name), "%s-MSGQ-REQ", name);
+
+ msgq_req = rte_ring_create(msgq_name,
+ PIPELINE_MSGQ_SIZE,
+ params->cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_req == NULL)
+ return NULL;
+
+ snprintf(msgq_name, sizeof(msgq_name), "%s-MSGQ-RSP", name);
+
+ msgq_rsp = rte_ring_create(msgq_name,
+ PIPELINE_MSGQ_SIZE,
+ params->cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (msgq_rsp == NULL) {
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ pp.name = name;
+ pp.socket_id = (int) params->cpu_id;
+ pp.offset_port_id = params->offset_port_id;
+
+ p = rte_pipeline_create(&pp);
+ if (p == NULL) {
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node allocation */
+ pipeline = calloc(1, sizeof(struct pipeline));
+ if (pipeline == NULL) {
+ rte_pipeline_free(p);
+ rte_ring_free(msgq_rsp);
+ rte_ring_free(msgq_req);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(pipeline->name, name, sizeof(pipeline->name));
+ pipeline->p = p;
+ pipeline->n_ports_in = 0;
+ pipeline->n_ports_out = 0;
+ pipeline->n_tables = 0;
+ pipeline->msgq_req = msgq_req;
+ pipeline->msgq_rsp = msgq_rsp;
+ pipeline->timer_period_ms = params->timer_period_ms;
+ pipeline->enabled = 0;
+ pipeline->cpu_id = params->cpu_id;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&pipeline_list, pipeline, node);
+
+ return pipeline;
+}
+
+int
+pipeline_port_in_create(const char *pipeline_name,
+ struct port_in_params *params,
+ int enabled)
+{
+ struct rte_pipeline_port_in_params p;
+
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_fd_reader_params fd;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_reader_params kni;
+#endif
+ struct rte_port_source_params source;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct port_in *port_in;
+ struct port_in_action_profile *ap;
+ struct rte_port_in_action *action;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (params == NULL) ||
+ (params->burst_size == 0) ||
+ (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX))
+ return -1;
+
+ pipeline = pipeline_find(pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = port_in_action_profile_find(params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ switch (params->type) {
+ case PORT_IN_RXQ:
+ {
+ struct link *link;
+
+ link = link_find(params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->rxq.queue_id >= link->n_rxq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->rxq.queue_id;
+
+ p.ops = &rte_port_ethdev_reader_ops;
+ p.arg_create = &pp.ethdev;
+ break;
+ }
+
+ case PORT_IN_SWQ:
+ {
+ struct swq *swq;
+
+ swq = swq_find(params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+
+ p.ops = &rte_port_ring_reader_ops;
+ p.arg_create = &pp.ring;
+ break;
+ }
+
+ case PORT_IN_TMGR:
+ {
+ struct tmgr_port *tmgr_port;
+
+ tmgr_port = tmgr_port_find(params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+
+ p.ops = &rte_port_sched_reader_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_IN_TAP:
+ {
+ struct tap *tap;
+ struct mempool *mempool;
+
+ tap = tap_find(params->dev_name);
+ mempool = mempool_find(params->tap.mempool_name);
+ if ((tap == NULL) || (mempool == NULL))
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.mempool = mempool->m;
+ pp.fd.mtu = params->tap.mtu;
+
+ p.ops = &rte_port_fd_reader_ops;
+ p.arg_create = &pp.fd;
+ break;
+ }
+
+#ifdef RTE_LIBRTE_KNI
+ case PORT_IN_KNI:
+ {
+ struct kni *kni;
+
+ kni = kni_find(params->dev_name);
+ if (kni == NULL)
+ return -1;
+
+ pp.kni.kni = kni->k;
+
+ p.ops = &rte_port_kni_reader_ops;
+ p.arg_create = &pp.kni;
+ break;
+ }
+#endif
+
+ case PORT_IN_SOURCE:
+ {
+ struct mempool *mempool;
+
+ mempool = mempool_find(params->source.mempool_name);
+ if (mempool == NULL)
+ return -1;
+
+ pp.source.mempool = mempool->m;
+ pp.source.file_name = params->source.file_name;
+ pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt;
+
+ p.ops = &rte_port_source_ops;
+ p.arg_create = &pp.source;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.burst_size = params->burst_size;
+
+ /* Resource create */
+ action = NULL;
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_port_in_action_create(ap->ap,
+ pipeline->cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_port_in_action_params_get(
+ action,
+ &p);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+ }
+
+ status = rte_pipeline_port_in_create(pipeline->p,
+ &p,
+ &port_id);
+ if (status) {
+ rte_port_in_action_free(action);
+ return -1;
+ }
+
+ if (enabled)
+ rte_pipeline_port_in_enable(pipeline->p, port_id);
+
+ /* Pipeline */
+ port_in = &pipeline->port_in[pipeline->n_ports_in];
+ memcpy(&port_in->params, params, sizeof(*params));
+ port_in->ap = ap;
+ port_in->a = action;
+ pipeline->n_ports_in++;
+
+ return 0;
+}
+
+int
+pipeline_port_in_connect_to_table(const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ pipeline = pipeline_find(pipeline_name);
+ if ((pipeline == NULL) ||
+ (port_id >= pipeline->n_ports_in) ||
+ (table_id >= pipeline->n_tables))
+ return -1;
+
+ /* Resource */
+ status = rte_pipeline_port_in_connect_to_table(pipeline->p,
+ port_id,
+ table_id);
+
+ return status;
+
+}
+
+int
+pipeline_port_out_create(const char *pipeline_name,
+ struct port_out_params *params)
+{
+ struct rte_pipeline_port_out_params p;
+
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_fd_writer_params fd;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_writer_params kni;
+#endif
+ struct rte_port_sink_params sink;
+ } pp;
+
+ union {
+ struct rte_port_ethdev_writer_nodrop_params ethdev;
+ struct rte_port_ring_writer_nodrop_params ring;
+ struct rte_port_fd_writer_nodrop_params fd;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_writer_nodrop_params kni;
+#endif
+ } pp_nodrop;
+
+ struct pipeline *pipeline;
+ uint32_t port_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+ memset(&pp_nodrop, 0, sizeof(pp_nodrop));
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (params == NULL) ||
+ (params->burst_size == 0) ||
+ (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX))
+ return -1;
+
+ pipeline = pipeline_find(pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ switch (params->type) {
+ case PORT_OUT_TXQ:
+ {
+ struct link *link;
+
+ link = link_find(params->dev_name);
+ if (link == NULL)
+ return -1;
+
+ if (params->txq.queue_id >= link->n_txq)
+ return -1;
+
+ pp.ethdev.port_id = link->port_id;
+ pp.ethdev.queue_id = params->txq.queue_id;
+ pp.ethdev.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ethdev.port_id = link->port_id;
+ pp_nodrop.ethdev.queue_id = params->txq.queue_id;
+ pp_nodrop.ethdev.tx_burst_sz = params->burst_size;
+ pp_nodrop.ethdev.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ethdev_writer_ops;
+ p.arg_create = &pp.ethdev;
+ } else {
+ p.ops = &rte_port_ethdev_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ethdev;
+ }
+ break;
+ }
+
+ case PORT_OUT_SWQ:
+ {
+ struct swq *swq;
+
+ swq = swq_find(params->dev_name);
+ if (swq == NULL)
+ return -1;
+
+ pp.ring.ring = swq->r;
+ pp.ring.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.ring.ring = swq->r;
+ pp_nodrop.ring.tx_burst_sz = params->burst_size;
+ pp_nodrop.ring.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_ring_writer_ops;
+ p.arg_create = &pp.ring;
+ } else {
+ p.ops = &rte_port_ring_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.ring;
+ }
+ break;
+ }
+
+ case PORT_OUT_TMGR:
+ {
+ struct tmgr_port *tmgr_port;
+
+ tmgr_port = tmgr_port_find(params->dev_name);
+ if (tmgr_port == NULL)
+ return -1;
+
+ pp.sched.sched = tmgr_port->s;
+ pp.sched.tx_burst_sz = params->burst_size;
+
+ p.ops = &rte_port_sched_writer_ops;
+ p.arg_create = &pp.sched;
+ break;
+ }
+
+ case PORT_OUT_TAP:
+ {
+ struct tap *tap;
+
+ tap = tap_find(params->dev_name);
+ if (tap == NULL)
+ return -1;
+
+ pp.fd.fd = tap->fd;
+ pp.fd.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.fd.fd = tap->fd;
+ pp_nodrop.fd.tx_burst_sz = params->burst_size;
+ pp_nodrop.fd.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_fd_writer_ops;
+ p.arg_create = &pp.fd;
+ } else {
+ p.ops = &rte_port_fd_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.fd;
+ }
+ break;
+ }
+
+#ifdef RTE_LIBRTE_KNI
+ case PORT_OUT_KNI:
+ {
+ struct kni *kni;
+
+ kni = kni_find(params->dev_name);
+ if (kni == NULL)
+ return -1;
+
+ pp.kni.kni = kni->k;
+ pp.kni.tx_burst_sz = params->burst_size;
+
+ pp_nodrop.kni.kni = kni->k;
+ pp_nodrop.kni.tx_burst_sz = params->burst_size;
+ pp_nodrop.kni.n_retries = params->n_retries;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_kni_writer_ops;
+ p.arg_create = &pp.kni;
+ } else {
+ p.ops = &rte_port_kni_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.kni;
+ }
+ break;
+ }
+#endif
+
+ case PORT_OUT_SINK:
+ {
+ pp.sink.file_name = params->sink.file_name;
+ pp.sink.max_n_pkts = params->sink.max_n_pkts;
+
+ p.ops = &rte_port_sink_ops;
+ p.arg_create = &pp.sink;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ p.f_action = NULL;
+ p.arg_ah = NULL;
+
+ /* Resource create */
+ status = rte_pipeline_port_out_create(pipeline->p,
+ &p,
+ &port_id);
+
+ if (status)
+ return -1;
+
+ /* Pipeline */
+ pipeline->n_ports_out++;
+
+ return 0;
+}
+
+static const struct rte_acl_field_def table_acl_field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static const struct rte_acl_field_def table_acl_field_format_ipv6[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = offsetof(struct ipv6_hdr, proto),
+ },
+
+ /* Source IP address (IPv6) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = offsetof(struct ipv6_hdr, src_addr[0]),
+ },
+
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = offsetof(struct ipv6_hdr, src_addr[4]),
+ },
+
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = offsetof(struct ipv6_hdr, src_addr[8]),
+ },
+
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 4,
+ .input_index = 4,
+ .offset = offsetof(struct ipv6_hdr, src_addr[12]),
+ },
+
+ /* Destination IP address (IPv6) */
+ [5] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 5,
+ .input_index = 5,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[0]),
+ },
+
+ [6] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 6,
+ .input_index = 6,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[4]),
+ },
+
+ [7] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 7,
+ .input_index = 7,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[8]),
+ },
+
+ [8] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 8,
+ .input_index = 8,
+ .offset = offsetof(struct ipv6_hdr, dst_addr[12]),
+ },
+
+ /* Source Port */
+ [9] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 9,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [10] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 10,
+ .input_index = 9,
+ .offset = sizeof(struct ipv6_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+int
+pipeline_table_create(const char *pipeline_name,
+ struct table_params *params)
+{
+ char name[NAME_MAX];
+ struct rte_pipeline_table_params p;
+
+ union {
+ struct rte_table_acl_params acl;
+ struct rte_table_array_params array;
+ struct rte_table_hash_params hash;
+ struct rte_table_lpm_params lpm;
+ struct rte_table_lpm_ipv6_params lpm_ipv6;
+ } pp;
+
+ struct pipeline *pipeline;
+ struct table *table;
+ struct table_action_profile *ap;
+ struct rte_table_action *action;
+ uint32_t table_id;
+ int status;
+
+ memset(&p, 0, sizeof(p));
+ memset(&pp, 0, sizeof(pp));
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (params == NULL))
+ return -1;
+
+ pipeline = pipeline_find(pipeline_name);
+ if ((pipeline == NULL) ||
+ (pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX))
+ return -1;
+
+ ap = NULL;
+ if (params->action_profile_name) {
+ ap = table_action_profile_find(params->action_profile_name);
+ if (ap == NULL)
+ return -1;
+ }
+
+ snprintf(name, NAME_MAX, "%s_table%u",
+ pipeline_name, pipeline->n_tables);
+
+ switch (params->match_type) {
+ case TABLE_ACL:
+ {
+ uint32_t ip_header_offset = params->match.acl.ip_header_offset -
+ (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
+ uint32_t i;
+
+ if (params->match.acl.n_rules == 0)
+ return -1;
+
+ pp.acl.name = name;
+ pp.acl.n_rules = params->match.acl.n_rules;
+ if (params->match.acl.ip_version) {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv4,
+ sizeof(table_acl_field_format_ipv4));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv4);
+ } else {
+ memcpy(&pp.acl.field_format,
+ &table_acl_field_format_ipv6,
+ sizeof(table_acl_field_format_ipv6));
+ pp.acl.n_rule_fields =
+ RTE_DIM(table_acl_field_format_ipv6);
+ }
+
+ for (i = 0; i < pp.acl.n_rule_fields; i++)
+ pp.acl.field_format[i].offset += ip_header_offset;
+
+ p.ops = &rte_table_acl_ops;
+ p.arg_create = &pp.acl;
+ break;
+ }
+
+ case TABLE_ARRAY:
+ {
+ if (params->match.array.n_keys == 0)
+ return -1;
+
+ pp.array.n_entries = params->match.array.n_keys;
+ pp.array.offset = params->match.array.key_offset;
+
+ p.ops = &rte_table_array_ops;
+ p.arg_create = &pp.array;
+ break;
+ }
+
+ case TABLE_HASH:
+ {
+ struct rte_table_ops *ops;
+ rte_table_hash_op_hash f_hash;
+
+ if (params->match.hash.n_keys == 0)
+ return -1;
+
+ switch (params->match.hash.key_size) {
+ case 8:
+ f_hash = hash_default_key8;
+ break;
+ case 16:
+ f_hash = hash_default_key16;
+ break;
+ case 24:
+ f_hash = hash_default_key24;
+ break;
+ case 32:
+ f_hash = hash_default_key32;
+ break;
+ case 40:
+ f_hash = hash_default_key40;
+ break;
+ case 48:
+ f_hash = hash_default_key48;
+ break;
+ case 56:
+ f_hash = hash_default_key56;
+ break;
+ case 64:
+ f_hash = hash_default_key64;
+ break;
+ default:
+ return -1;
+ }
+
+ pp.hash.name = name;
+ pp.hash.key_size = params->match.hash.key_size;
+ pp.hash.key_offset = params->match.hash.key_offset;
+ pp.hash.key_mask = params->match.hash.key_mask;
+ pp.hash.n_keys = params->match.hash.n_keys;
+ pp.hash.n_buckets = params->match.hash.n_buckets;
+ pp.hash.f_hash = f_hash;
+ pp.hash.seed = 0;
+
+ if (params->match.hash.extendable_bucket)
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_ext_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_ext_ops;
+ break;
+ default:
+ ops = &rte_table_hash_ext_ops;
+ }
+ else
+ switch (params->match.hash.key_size) {
+ case 8:
+ ops = &rte_table_hash_key8_lru_ops;
+ break;
+ case 16:
+ ops = &rte_table_hash_key16_lru_ops;
+ break;
+ default:
+ ops = &rte_table_hash_lru_ops;
+ }
+
+ p.ops = ops;
+ p.arg_create = &pp.hash;
+ break;
+ }
+
+ case TABLE_LPM:
+ {
+ if (params->match.lpm.n_rules == 0)
+ return -1;
+
+ switch (params->match.lpm.key_size) {
+ case 4:
+ {
+ pp.lpm.name = name;
+ pp.lpm.n_rules = params->match.lpm.n_rules;
+ pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm.flags = 0;
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ops;
+ p.arg_create = &pp.lpm;
+ break;
+ }
+
+ case 16:
+ {
+ pp.lpm_ipv6.name = name;
+ pp.lpm_ipv6.n_rules = params->match.lpm.n_rules;
+ pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8;
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ pp.lpm_ipv6.offset = params->match.lpm.key_offset;
+
+ p.ops = &rte_table_lpm_ipv6_ops;
+ p.arg_create = &pp.lpm_ipv6;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ break;
+ }
+
+ case TABLE_STUB:
+ {
+ p.ops = &rte_table_stub_ops;
+ p.arg_create = NULL;
+ break;
+ }
+
+ default:
+ return -1;
+ }
+
+ /* Resource create */
+ action = NULL;
+ p.f_action_hit = NULL;
+ p.f_action_miss = NULL;
+ p.arg_ah = NULL;
+
+ if (ap) {
+ action = rte_table_action_create(ap->ap,
+ pipeline->cpu_id);
+ if (action == NULL)
+ return -1;
+
+ status = rte_table_action_table_params_get(
+ action,
+ &p);
+ if (status ||
+ ((p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry)) >
+ TABLE_RULE_ACTION_SIZE_MAX)) {
+ rte_table_action_free(action);
+ return -1;
+ }
+ }
+
+ if (params->match_type == TABLE_LPM) {
+ if (params->match.lpm.key_size == 4)
+ pp.lpm.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+
+ if (params->match.lpm.key_size == 16)
+ pp.lpm_ipv6.entry_unique_size = p.action_data_size +
+ sizeof(struct rte_pipeline_table_entry);
+ }
+
+ status = rte_pipeline_table_create(pipeline->p,
+ &p,
+ &table_id);
+ if (status) {
+ rte_table_action_free(action);
+ return -1;
+ }
+
+ /* Pipeline */
+ table = &pipeline->table[pipeline->n_tables];
+ memcpy(&table->params, params, sizeof(*params));
+ table->ap = ap;
+ table->a = action;
+ pipeline->n_tables++;
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/pipeline.h b/examples/ip_pipeline/pipeline.h
index 7ca9cad6..a953a29f 100644
--- a/examples/ip_pipeline/pipeline.h
+++ b/examples/ip_pipeline/pipeline.h
@@ -1,73 +1,368 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
-#ifndef __INCLUDE_PIPELINE_H__
-#define __INCLUDE_PIPELINE_H__
+#ifndef _INCLUDE_PIPELINE_H_
+#define _INCLUDE_PIPELINE_H_
-#include <cmdline_parse.h>
+#include <stdint.h>
+#include <sys/queue.h>
-#include "pipeline_be.h"
+#include <rte_pipeline.h>
+#include <rte_table_action.h>
-/*
- * Pipeline type front-end operations
- */
+#include "common.h"
+#include "action.h"
+
+struct pipeline_params {
+ uint32_t timer_period_ms;
+ uint32_t offset_port_id;
+ uint32_t cpu_id;
+};
-typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params,
- void *arg);
+enum port_in_type {
+ PORT_IN_RXQ,
+ PORT_IN_SWQ,
+ PORT_IN_TMGR,
+ PORT_IN_TAP,
+ PORT_IN_KNI,
+ PORT_IN_SOURCE,
+};
-typedef int (*pipeline_fe_op_post_init)(void *pipeline);
+struct port_in_params {
+ /* Read */
+ enum port_in_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } rxq;
-typedef int (*pipeline_fe_op_free)(void *pipeline);
+ struct {
+ const char *mempool_name;
+ uint32_t mtu;
+ } tap;
-typedef int (*pipeline_fe_op_track)(struct pipeline_params *params,
- uint32_t port_in,
- uint32_t *port_out);
+ struct {
+ const char *mempool_name;
+ const char *file_name;
+ uint32_t n_bytes_per_pkt;
+ } source;
+ };
+ uint32_t burst_size;
-struct pipeline_fe_ops {
- pipeline_fe_op_init f_init;
- pipeline_fe_op_post_init f_post_init;
- pipeline_fe_op_free f_free;
- pipeline_fe_op_track f_track;
- cmdline_parse_ctx_t *cmds;
+ /* Action */
+ const char *action_profile_name;
};
-/*
- * Pipeline type
- */
+enum port_out_type {
+ PORT_OUT_TXQ,
+ PORT_OUT_SWQ,
+ PORT_OUT_TMGR,
+ PORT_OUT_TAP,
+ PORT_OUT_KNI,
+ PORT_OUT_SINK,
+};
+
+struct port_out_params {
+ enum port_out_type type;
+ const char *dev_name;
+ union {
+ struct {
+ uint16_t queue_id;
+ } txq;
+
+ struct {
+ const char *file_name;
+ uint32_t max_n_pkts;
+ } sink;
+ };
+ uint32_t burst_size;
+ int retry;
+ uint32_t n_retries;
+};
+
+enum table_type {
+ TABLE_ACL,
+ TABLE_ARRAY,
+ TABLE_HASH,
+ TABLE_LPM,
+ TABLE_STUB,
+};
-struct pipeline_type {
- const char *name;
+struct table_acl_params {
+ uint32_t n_rules;
+ uint32_t ip_header_offset;
+ int ip_version;
+};
- /* pipeline back-end */
- struct pipeline_be_ops *be_ops;
+struct table_array_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+};
- /* pipeline front-end */
- struct pipeline_fe_ops *fe_ops;
+struct table_hash_params {
+ uint32_t n_keys;
+ uint32_t key_offset;
+ uint32_t key_size;
+ uint8_t *key_mask;
+ uint32_t n_buckets;
+ int extendable_bucket;
};
-static inline uint32_t
-pipeline_type_cmds_count(struct pipeline_type *ptype)
-{
- cmdline_parse_ctx_t *cmds;
- uint32_t n_cmds;
+struct table_lpm_params {
+ uint32_t n_rules;
+ uint32_t key_offset;
+ uint32_t key_size;
+};
- if (ptype->fe_ops == NULL)
- return 0;
+struct table_params {
+ /* Match */
+ enum table_type match_type;
+ union {
+ struct table_acl_params acl;
+ struct table_array_params array;
+ struct table_hash_params hash;
+ struct table_lpm_params lpm;
+ } match;
- cmds = ptype->fe_ops->cmds;
- if (cmds == NULL)
- return 0;
+ /* Action */
+ const char *action_profile_name;
+};
- for (n_cmds = 0; cmds[n_cmds]; n_cmds++);
+struct port_in {
+ struct port_in_params params;
+ struct port_in_action_profile *ap;
+ struct rte_port_in_action *a;
+};
- return n_cmds;
-}
+struct table {
+ struct table_params params;
+ struct table_action_profile *ap;
+ struct rte_table_action *a;
+};
+
+struct pipeline {
+ TAILQ_ENTRY(pipeline) node;
+ char name[NAME_SIZE];
+
+ struct rte_pipeline *p;
+ struct port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct table table[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+
+ int enabled;
+ uint32_t thread_id;
+ uint32_t cpu_id;
+};
+
+TAILQ_HEAD(pipeline_list, pipeline);
+
+int
+pipeline_init(void);
+
+struct pipeline *
+pipeline_find(const char *name);
+
+struct pipeline *
+pipeline_create(const char *name, struct pipeline_params *params);
+
+int
+pipeline_port_in_create(const char *pipeline_name,
+ struct port_in_params *params,
+ int enabled);
+
+int
+pipeline_port_in_connect_to_table(const char *pipeline_name,
+ uint32_t port_id,
+ uint32_t table_id);
+
+int
+pipeline_port_out_create(const char *pipeline_name,
+ struct port_out_params *params);
int
-parse_pipeline_core(uint32_t *socket,
- uint32_t *core,
- uint32_t *ht,
- const char *entry);
+pipeline_table_create(const char *pipeline_name,
+ struct table_params *params);
+struct table_rule_match_acl {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t sa;
+ uint32_t da;
+ } ipv4;
+
+ struct {
+ uint8_t sa[16];
+ uint8_t da[16];
+ } ipv6;
+ };
+
+ uint32_t sa_depth;
+ uint32_t da_depth;
+ uint16_t sp0;
+ uint16_t sp1;
+ uint16_t dp0;
+ uint16_t dp1;
+ uint8_t proto;
+ uint8_t proto_mask;
+ uint32_t priority;
+};
+
+struct table_rule_match_array {
+ uint32_t pos;
+};
+
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
#endif
+
+#ifndef TABLE_RULE_ACTION_SIZE_MAX
+#define TABLE_RULE_ACTION_SIZE_MAX 2048
+#endif
+
+struct table_rule_match_hash {
+ uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
+};
+
+struct table_rule_match_lpm {
+ int ip_version;
+
+ RTE_STD_C11
+ union {
+ uint32_t ipv4;
+ uint8_t ipv6[16];
+ };
+
+ uint8_t depth;
+};
+
+struct table_rule_match {
+ enum table_type match_type;
+
+ union {
+ struct table_rule_match_acl acl;
+ struct table_rule_match_array array;
+ struct table_rule_match_hash hash;
+ struct table_rule_match_lpm lpm;
+ } match;
+};
+
+struct table_rule_action {
+ uint64_t action_mask;
+ struct rte_table_action_fwd_params fwd;
+ struct rte_table_action_lb_params lb;
+ struct rte_table_action_mtr_params mtr;
+ struct rte_table_action_tm_params tm;
+ struct rte_table_action_encap_params encap;
+ struct rte_table_action_nat_params nat;
+ struct rte_table_action_ttl_params ttl;
+ struct rte_table_action_stats_params stats;
+ struct rte_table_action_time_params time;
+};
+
+int
+pipeline_port_in_stats_read(const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear);
+
+int
+pipeline_port_in_enable(const char *pipeline_name,
+ uint32_t port_id);
+
+int
+pipeline_port_in_disable(const char *pipeline_name,
+ uint32_t port_id);
+
+int
+pipeline_port_out_stats_read(const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear);
+
+int
+pipeline_table_stats_read(const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear);
+
+int
+pipeline_table_rule_add(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match,
+ struct table_rule_action *action,
+ void **data);
+
+int
+pipeline_table_rule_add_bulk(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match,
+ struct table_rule_action *action,
+ void **data,
+ uint32_t *n_rules);
+
+int
+pipeline_table_rule_add_default(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_action *action,
+ void **data);
+
+int
+pipeline_table_rule_delete(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match);
+
+int
+pipeline_table_rule_delete_default(const char *pipeline_name,
+ uint32_t table_id);
+
+int
+pipeline_table_rule_stats_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+int
+pipeline_table_mtr_profile_add(const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+int
+pipeline_table_mtr_profile_delete(const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id);
+
+int
+pipeline_table_rule_mtr_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+int
+pipeline_table_dscp_table_update(const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table);
+
+int
+pipeline_table_rule_ttl_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+#endif /* _INCLUDE_PIPELINE_H_ */
diff --git a/examples/ip_pipeline/pipeline/pipeline_actions_common.h b/examples/ip_pipeline/pipeline/pipeline_actions_common.h
deleted file mode 100644
index 23f88367..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_actions_common.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-#ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
-#define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
-
-#include <stdint.h>
-
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_mbuf.h>
-#include <rte_pipeline.h>
-
-#define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- __rte_unused struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint32_t n_pkts, \
- void *arg) \
-{ \
- uint32_t i; \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
- f_pkt4_work(&pkts[i], arg); \
- \
- for ( ; i < n_pkts; i++) \
- f_pkt_work(pkts[i], arg); \
- \
- return 0; \
-}
-
-#define PIPELINE_PORT_IN_AH_HIJACK_ALL(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint32_t n_pkts, \
- void *arg) \
-{ \
- uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t); \
- uint32_t i; \
- \
- rte_pipeline_ah_packet_hijack(p, pkt_mask); \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
- f_pkt4_work(&pkts[i], arg); \
- \
- for ( ; i < n_pkts; i++) \
- f_pkt_work(pkts[i], arg); \
- \
- return 0; \
-}
-
-#define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- __rte_unused struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint64_t pkts_in_mask, \
- struct rte_pipeline_table_entry **entries, \
- void *arg) \
-{ \
- if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
- uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
- uint32_t i; \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
- f_pkt4_work(&pkts[i], &entries[i], arg); \
- \
- for ( ; i < n_pkts; i++) \
- f_pkt_work(pkts[i], entries[i], arg); \
- } else \
- for ( ; pkts_in_mask; ) { \
- uint32_t pos = __builtin_ctzll(pkts_in_mask); \
- uint64_t pkt_mask = 1LLU << pos; \
- \
- pkts_in_mask &= ~pkt_mask; \
- f_pkt_work(pkts[pos], entries[pos], arg); \
- } \
- \
- return 0; \
-}
-
-#define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- __rte_unused struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint64_t pkts_in_mask, \
- struct rte_pipeline_table_entry *entry, \
- void *arg) \
-{ \
- if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
- uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
- uint32_t i; \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
- f_pkt4_work(&pkts[i], entry, arg); \
- \
- for ( ; i < n_pkts; i++) \
- f_pkt_work(pkts[i], entry, arg); \
- } else \
- for ( ; pkts_in_mask; ) { \
- uint32_t pos = __builtin_ctzll(pkts_in_mask); \
- uint64_t pkt_mask = 1LLU << pos; \
- \
- pkts_in_mask &= ~pkt_mask; \
- f_pkt_work(pkts[pos], entry, arg); \
- } \
- \
- return 0; \
-}
-
-#define PIPELINE_TABLE_AH_HIT_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint64_t pkts_mask, \
- struct rte_pipeline_table_entry **entries, \
- void *arg) \
-{ \
- uint64_t pkts_in_mask = pkts_mask; \
- uint64_t pkts_out_mask = pkts_mask; \
- uint64_t time = rte_rdtsc(); \
- \
- if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
- uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
- uint32_t i; \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
- uint64_t mask = f_pkt4_work(&pkts[i], \
- &entries[i], arg, time); \
- pkts_out_mask ^= mask << i; \
- } \
- \
- for ( ; i < n_pkts; i++) { \
- uint64_t mask = f_pkt_work(pkts[i], \
- entries[i], arg, time); \
- pkts_out_mask ^= mask << i; \
- } \
- } else \
- for ( ; pkts_in_mask; ) { \
- uint32_t pos = __builtin_ctzll(pkts_in_mask); \
- uint64_t pkt_mask = 1LLU << pos; \
- uint64_t mask = f_pkt_work(pkts[pos], \
- entries[pos], arg, time); \
- \
- pkts_in_mask &= ~pkt_mask; \
- pkts_out_mask ^= mask << pos; \
- } \
- \
- rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
- \
- return 0; \
-}
-
-#define PIPELINE_TABLE_AH_MISS_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
-static int \
-f_ah( \
- struct rte_pipeline *p, \
- struct rte_mbuf **pkts, \
- uint64_t pkts_mask, \
- struct rte_pipeline_table_entry *entry, \
- void *arg) \
-{ \
- uint64_t pkts_in_mask = pkts_mask; \
- uint64_t pkts_out_mask = pkts_mask; \
- uint64_t time = rte_rdtsc(); \
- \
- if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
- uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
- uint32_t i; \
- \
- for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
- uint64_t mask = f_pkt4_work(&pkts[i], \
- entry, arg, time); \
- pkts_out_mask ^= mask << i; \
- } \
- \
- for ( ; i < n_pkts; i++) { \
- uint64_t mask = f_pkt_work(pkts[i], entry, arg, time);\
- pkts_out_mask ^= mask << i; \
- } \
- } else \
- for ( ; pkts_in_mask; ) { \
- uint32_t pos = __builtin_ctzll(pkts_in_mask); \
- uint64_t pkt_mask = 1LLU << pos; \
- uint64_t mask = f_pkt_work(pkts[pos], \
- entry, arg, time); \
- \
- pkts_in_mask &= ~pkt_mask; \
- pkts_out_mask ^= mask << pos; \
- } \
- \
- rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
- \
- return 0; \
-}
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_be.c b/examples/ip_pipeline/pipeline/pipeline_common_be.c
deleted file mode 100644
index 5d84989a..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_common_be.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-
-#include "pipeline_common_be.h"
-
-void *
-pipeline_msg_req_ping_handler(__rte_unused struct pipeline *p,
- void *msg)
-{
- struct pipeline_msg_rsp *rsp = msg;
-
- rsp->status = 0; /* OK */
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_stats_port_in_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_stats_msg_req *req = msg;
- struct pipeline_stats_port_in_msg_rsp *rsp = msg;
- uint32_t port_id;
-
- /* Check request */
- if (req->id >= p->n_ports_in) {
- rsp->status = -1;
- return rsp;
- }
- port_id = p->port_in_id[req->id];
-
- /* Process request */
- rsp->status = rte_pipeline_port_in_stats_read(p->p,
- port_id,
- &rsp->stats,
- 1);
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_stats_port_out_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_stats_msg_req *req = msg;
- struct pipeline_stats_port_out_msg_rsp *rsp = msg;
- uint32_t port_id;
-
- /* Check request */
- if (req->id >= p->n_ports_out) {
- rsp->status = -1;
- return rsp;
- }
- port_id = p->port_out_id[req->id];
-
- /* Process request */
- rsp->status = rte_pipeline_port_out_stats_read(p->p,
- port_id,
- &rsp->stats,
- 1);
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_stats_table_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_stats_msg_req *req = msg;
- struct pipeline_stats_table_msg_rsp *rsp = msg;
- uint32_t table_id;
-
- /* Check request */
- if (req->id >= p->n_tables) {
- rsp->status = -1;
- return rsp;
- }
- table_id = p->table_id[req->id];
-
- /* Process request */
- rsp->status = rte_pipeline_table_stats_read(p->p,
- table_id,
- &rsp->stats,
- 1);
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_port_in_enable_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_port_in_msg_req *req = msg;
- struct pipeline_msg_rsp *rsp = msg;
- uint32_t port_id;
-
- /* Check request */
- if (req->port_id >= p->n_ports_in) {
- rsp->status = -1;
- return rsp;
- }
- port_id = p->port_in_id[req->port_id];
-
- /* Process request */
- rsp->status = rte_pipeline_port_in_enable(p->p,
- port_id);
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_port_in_disable_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_port_in_msg_req *req = msg;
- struct pipeline_msg_rsp *rsp = msg;
- uint32_t port_id;
-
- /* Check request */
- if (req->port_id >= p->n_ports_in) {
- rsp->status = -1;
- return rsp;
- }
- port_id = p->port_in_id[req->port_id];
-
- /* Process request */
- rsp->status = rte_pipeline_port_in_disable(p->p,
- port_id);
-
- return rsp;
-}
-
-void *
-pipeline_msg_req_invalid_handler(__rte_unused struct pipeline *p,
- void *msg)
-{
- struct pipeline_msg_rsp *rsp = msg;
-
- rsp->status = -1; /* Error */
-
- return rsp;
-}
-
-int
-pipeline_msg_req_handle(struct pipeline *p)
-{
- uint32_t msgq_id;
-
- for (msgq_id = 0; msgq_id < p->n_msgq; msgq_id++) {
- for ( ; ; ) {
- struct pipeline_msg_req *req;
- pipeline_msg_req_handler f_handle;
-
- req = pipeline_msg_recv(p, msgq_id);
- if (req == NULL)
- break;
-
- f_handle = (req->type < PIPELINE_MSG_REQS) ?
- p->handlers[req->type] :
- pipeline_msg_req_invalid_handler;
-
- if (f_handle == NULL)
- f_handle = pipeline_msg_req_invalid_handler;
-
- pipeline_msg_send(p,
- msgq_id,
- f_handle(p, (void *) req));
- }
- }
-
- return 0;
-}
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_be.h b/examples/ip_pipeline/pipeline/pipeline_common_be.h
deleted file mode 100644
index 83bd04e6..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_common_be.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_COMMON_BE_H__
-#define __INCLUDE_PIPELINE_COMMON_BE_H__
-
-#include <rte_common.h>
-#include <rte_ring.h>
-#include <rte_pipeline.h>
-
-#include "pipeline_be.h"
-
-struct pipeline;
-
-enum pipeline_msg_req_type {
- PIPELINE_MSG_REQ_PING = 0,
- PIPELINE_MSG_REQ_STATS_PORT_IN,
- PIPELINE_MSG_REQ_STATS_PORT_OUT,
- PIPELINE_MSG_REQ_STATS_TABLE,
- PIPELINE_MSG_REQ_PORT_IN_ENABLE,
- PIPELINE_MSG_REQ_PORT_IN_DISABLE,
- PIPELINE_MSG_REQ_CUSTOM,
- PIPELINE_MSG_REQS
-};
-
-typedef void *(*pipeline_msg_req_handler)(struct pipeline *p, void *msg);
-
-struct pipeline {
- struct rte_pipeline *p;
- uint32_t port_in_id[PIPELINE_MAX_PORT_IN];
- uint32_t port_out_id[PIPELINE_MAX_PORT_OUT];
- uint32_t table_id[PIPELINE_MAX_TABLES];
- struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
- struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
-
- uint32_t n_ports_in;
- uint32_t n_ports_out;
- uint32_t n_tables;
- uint32_t n_msgq;
-
- pipeline_msg_req_handler handlers[PIPELINE_MSG_REQS];
- char name[PIPELINE_NAME_SIZE];
- uint32_t log_level;
-};
-
-enum pipeline_log_level {
- PIPELINE_LOG_LEVEL_HIGH = 1,
- PIPELINE_LOG_LEVEL_LOW,
- PIPELINE_LOG_LEVELS
-};
-
-#define PLOG(p, level, fmt, ...) \
-do { \
- if (p->log_level >= PIPELINE_LOG_LEVEL_ ## level) \
- fprintf(stdout, "[%s] " fmt "\n", p->name, ## __VA_ARGS__);\
-} while (0)
-
-static inline void *
-pipeline_msg_recv(struct pipeline *p,
- uint32_t msgq_id)
-{
- struct rte_ring *r = p->msgq_in[msgq_id];
- void *msg;
- int status = rte_ring_sc_dequeue(r, &msg);
-
- if (status != 0)
- return NULL;
-
- return msg;
-}
-
-static inline void
-pipeline_msg_send(struct pipeline *p,
- uint32_t msgq_id,
- void *msg)
-{
- struct rte_ring *r = p->msgq_out[msgq_id];
- int status;
-
- do {
- status = rte_ring_sp_enqueue(r, msg);
- } while (status == -ENOBUFS);
-}
-
-struct pipeline_msg_req {
- enum pipeline_msg_req_type type;
-};
-
-struct pipeline_stats_msg_req {
- enum pipeline_msg_req_type type;
- uint32_t id;
-};
-
-struct pipeline_port_in_msg_req {
- enum pipeline_msg_req_type type;
- uint32_t port_id;
-};
-
-struct pipeline_custom_msg_req {
- enum pipeline_msg_req_type type;
- uint32_t subtype;
-};
-
-struct pipeline_msg_rsp {
- int status;
-};
-
-struct pipeline_stats_port_in_msg_rsp {
- int status;
- struct rte_pipeline_port_in_stats stats;
-};
-
-struct pipeline_stats_port_out_msg_rsp {
- int status;
- struct rte_pipeline_port_out_stats stats;
-};
-
-struct pipeline_stats_table_msg_rsp {
- int status;
- struct rte_pipeline_table_stats stats;
-};
-
-void *pipeline_msg_req_ping_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_stats_port_in_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_stats_port_out_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_stats_table_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_port_in_enable_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_port_in_disable_handler(struct pipeline *p, void *msg);
-void *pipeline_msg_req_invalid_handler(struct pipeline *p, void *msg);
-
-int pipeline_msg_req_handle(struct pipeline *p);
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/examples/ip_pipeline/pipeline/pipeline_common_fe.c
deleted file mode 100644
index cc5214c7..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.c
+++ /dev/null
@@ -1,1455 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdio.h>
-#include <fcntl.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <cmdline_rdline.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-#include <cmdline.h>
-
-#include "pipeline_common_fe.h"
-#include "parser.h"
-
-struct app_link_params *
-app_pipeline_track_pktq_out_to_link(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t pktq_out_id)
-{
- struct app_pipeline_params *p;
-
- /* Check input arguments */
- if (app == NULL)
- return NULL;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if (p == NULL)
- return NULL;
-
- for ( ; ; ) {
- struct app_pktq_out_params *pktq_out =
- &p->pktq_out[pktq_out_id];
-
- switch (pktq_out->type) {
- case APP_PKTQ_OUT_HWQ:
- {
- struct app_pktq_hwq_out_params *hwq_out;
-
- hwq_out = &app->hwq_out_params[pktq_out->id];
-
- return app_get_link_for_txq(app, hwq_out);
- }
-
- case APP_PKTQ_OUT_SWQ:
- {
- struct pipeline_params pp;
- struct pipeline_type *ptype;
- struct app_pktq_swq_params *swq;
- uint32_t pktq_in_id;
- int status;
-
- swq = &app->swq_params[pktq_out->id];
- p = app_swq_get_reader(app, swq, &pktq_in_id);
- if (p == NULL)
- return NULL;
-
- ptype = app_pipeline_type_find(app, p->type);
- if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
- return NULL;
-
- app_pipeline_params_get(app, p, &pp);
- status = ptype->fe_ops->f_track(&pp,
- pktq_in_id,
- &pktq_out_id);
- if (status)
- return NULL;
-
- break;
- }
-
- case APP_PKTQ_OUT_TM:
- {
- struct pipeline_params pp;
- struct pipeline_type *ptype;
- struct app_pktq_tm_params *tm;
- uint32_t pktq_in_id;
- int status;
-
- tm = &app->tm_params[pktq_out->id];
- p = app_tm_get_reader(app, tm, &pktq_in_id);
- if (p == NULL)
- return NULL;
-
- ptype = app_pipeline_type_find(app, p->type);
- if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
- return NULL;
-
- app_pipeline_params_get(app, p, &pp);
- status = ptype->fe_ops->f_track(&pp,
- pktq_in_id,
- &pktq_out_id);
- if (status)
- return NULL;
-
- break;
- }
-
- case APP_PKTQ_OUT_KNI:
- {
- struct pipeline_params pp;
- struct pipeline_type *ptype;
- struct app_pktq_kni_params *kni;
- uint32_t pktq_in_id;
- int status;
-
- kni = &app->kni_params[pktq_out->id];
- p = app_kni_get_reader(app, kni, &pktq_in_id);
- if (p == NULL)
- return NULL;
-
- ptype = app_pipeline_type_find(app, p->type);
- if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
- return NULL;
-
- app_pipeline_params_get(app, p, &pp);
- status = ptype->fe_ops->f_track(&pp,
- pktq_in_id,
- &pktq_out_id);
- if (status)
- return NULL;
-
- break;
- }
-
- case APP_PKTQ_OUT_TAP:
- case APP_PKTQ_OUT_SINK:
- default:
- return NULL;
- }
- }
-}
-
-int
-app_pipeline_track_default(struct pipeline_params *p,
- uint32_t port_in,
- uint32_t *port_out)
-{
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- if (p->n_ports_out == 1) {
- *port_out = 0;
- return 0;
- }
-
- return -1;
-}
-
-int
-app_pipeline_ping(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_params *p;
- struct pipeline_msg_req *req;
- struct pipeline_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if (p == NULL)
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_PING;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_pipeline_stats_port_in(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id,
- struct rte_pipeline_port_in_stats *stats)
-{
- struct app_pipeline_params *p;
- struct pipeline_stats_msg_req *req;
- struct pipeline_stats_port_in_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (stats == NULL))
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if ((p == NULL) ||
- (port_id >= p->n_pktq_in))
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_STATS_PORT_IN;
- req->id = port_id;
-
- /* Send request and wait for response */
- rsp = (struct pipeline_stats_port_in_msg_rsp *)
- app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
- if (status == 0)
- memcpy(stats, &rsp->stats, sizeof(rsp->stats));
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_pipeline_stats_port_out(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id,
- struct rte_pipeline_port_out_stats *stats)
-{
- struct app_pipeline_params *p;
- struct pipeline_stats_msg_req *req;
- struct pipeline_stats_port_out_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (pipeline_id >= app->n_pipelines) ||
- (stats == NULL))
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if ((p == NULL) ||
- (port_id >= p->n_pktq_out))
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_STATS_PORT_OUT;
- req->id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
- if (status == 0)
- memcpy(stats, &rsp->stats, sizeof(rsp->stats));
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_pipeline_stats_table(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t table_id,
- struct rte_pipeline_table_stats *stats)
-{
- struct app_pipeline_params *p;
- struct pipeline_stats_msg_req *req;
- struct pipeline_stats_table_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (stats == NULL))
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if (p == NULL)
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_STATS_TABLE;
- req->id = table_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
- if (status == 0)
- memcpy(stats, &rsp->stats, sizeof(rsp->stats));
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_pipeline_port_in_enable(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct app_pipeline_params *p;
- struct pipeline_port_in_msg_req *req;
- struct pipeline_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if ((p == NULL) ||
- (port_id >= p->n_pktq_in))
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_PORT_IN_ENABLE;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_pipeline_port_in_disable(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct app_pipeline_params *p;
- struct pipeline_port_in_msg_req *req;
- struct pipeline_msg_rsp *rsp;
- int status = 0;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if ((p == NULL) ||
- (port_id >= p->n_pktq_in))
- return -1;
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- /* Fill in request */
- req->type = PIPELINE_MSG_REQ_PORT_IN_DISABLE;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Check response */
- status = rsp->status;
-
- /* Message buffer free */
- app_msg_free(app, rsp);
-
- return status;
-}
-
-int
-app_link_set_op(struct app_params *app,
- uint32_t link_id,
- uint32_t pipeline_id,
- app_link_op op,
- void *arg)
-{
- struct app_pipeline_params *pp;
- struct app_link_params *lp;
- struct app_link_data *ld;
- uint32_t ppos, lpos;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (op == NULL))
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
- if (lp == NULL)
- return -1;
- lpos = lp - app->link_params;
- ld = &app->link_data[lpos];
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, pp);
- if (pp == NULL)
- return -1;
- ppos = pp - app->pipeline_params;
-
- ld->f_link[ppos] = op;
- ld->arg[ppos] = arg;
-
- return 0;
-}
-
-int
-app_link_config(struct app_params *app,
- uint32_t link_id,
- uint32_t ip,
- uint32_t depth)
-{
- struct app_link_params *p;
- uint32_t i, netmask, host, bcast;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- if (p == NULL) {
- APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
- link_id);
- return -1;
- }
-
- if (p->state) {
- APP_LOG(app, HIGH, "%s is UP, please bring it DOWN first",
- p->name);
- return -1;
- }
-
- netmask = (~0U) << (32 - depth);
- host = ip & netmask;
- bcast = host | (~netmask);
-
- if ((ip == 0) ||
- (ip == UINT32_MAX) ||
- (ip == host) ||
- (ip == bcast)) {
- APP_LOG(app, HIGH, "Illegal IP address");
- return -1;
- }
-
- for (i = 0; i < app->n_links; i++) {
- struct app_link_params *link = &app->link_params[i];
-
- if (strcmp(p->name, link->name) == 0)
- continue;
-
- if (link->ip == ip) {
- APP_LOG(app, HIGH,
- "%s is already assigned this IP address",
- link->name);
- return -1;
- }
- }
-
- if ((depth == 0) || (depth > 32)) {
- APP_LOG(app, HIGH, "Illegal value for depth parameter "
- "(%" PRIu32 ")",
- depth);
- return -1;
- }
-
- /* Save link parameters */
- p->ip = ip;
- p->depth = depth;
-
- return 0;
-}
-
-int
-app_link_up(struct app_params *app,
- uint32_t link_id)
-{
- struct app_link_params *p;
- struct app_link_data *d;
- int i;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- if (p == NULL) {
- APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
- link_id);
- return -1;
- }
-
- d = &app->link_data[p - app->link_params];
-
- /* Check link state */
- if (p->state) {
- APP_LOG(app, HIGH, "%s is already UP", p->name);
- return 0;
- }
-
- /* Check that IP address is valid */
- if (p->ip == 0) {
- APP_LOG(app, HIGH, "%s IP address is not set", p->name);
- return 0;
- }
-
- app_link_up_internal(app, p);
-
- /* Callbacks */
- for (i = 0; i < APP_MAX_PIPELINES; i++)
- if (d->f_link[i])
- d->f_link[i](app, link_id, 1, d->arg[i]);
-
- return 0;
-}
-
-int
-app_link_down(struct app_params *app,
- uint32_t link_id)
-{
- struct app_link_params *p;
- struct app_link_data *d;
- uint32_t i;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- if (p == NULL) {
- APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
- link_id);
- return -1;
- }
-
- d = &app->link_data[p - app->link_params];
-
- /* Check link state */
- if (p->state == 0) {
- APP_LOG(app, HIGH, "%s is already DOWN", p->name);
- return 0;
- }
-
- app_link_down_internal(app, p);
-
- /* Callbacks */
- for (i = 0; i < APP_MAX_PIPELINES; i++)
- if (d->f_link[i])
- d->f_link[i](app, link_id, 0, d->arg[i]);
-
- return 0;
-}
-
-/*
- * ping
- */
-
-struct cmd_ping_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t ping_string;
-};
-
-static void
-cmd_ping_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_ping_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_ping(app, params->pipeline_id);
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_ping_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_ping_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
-
-static cmdline_parse_token_string_t cmd_ping_ping_string =
- TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
-
-static cmdline_parse_inst_t cmd_ping = {
- .f = cmd_ping_parsed,
- .data = NULL,
- .help_str = "Pipeline ping",
- .tokens = {
- (void *) &cmd_ping_p_string,
- (void *) &cmd_ping_pipeline_id,
- (void *) &cmd_ping_ping_string,
- NULL,
- },
-};
-
-/*
- * stats port in
- */
-
-struct cmd_stats_port_in_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t stats_string;
- cmdline_fixed_string_t port_string;
- cmdline_fixed_string_t in_string;
- uint32_t port_in_id;
-
-};
-
-static void
-cmd_stats_port_in_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_stats_port_in_result *params = parsed_result;
- struct app_params *app = data;
- struct rte_pipeline_port_in_stats stats;
- int status;
-
- status = app_pipeline_stats_port_in(app,
- params->pipeline_id,
- params->port_in_id,
- &stats);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-
- /* Display stats */
- printf("Pipeline %" PRIu32 " - stats for input port %" PRIu32 ":\n"
- "\tPkts in: %" PRIu64 "\n"
- "\tPkts dropped by AH: %" PRIu64 "\n"
- "\tPkts dropped by other: %" PRIu64 "\n",
- params->pipeline_id,
- params->port_in_id,
- stats.stats.n_pkts_in,
- stats.n_pkts_dropped_by_ah,
- stats.stats.n_pkts_drop);
-}
-
-static cmdline_parse_token_string_t cmd_stats_port_in_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
- "stats");
-
-static cmdline_parse_token_string_t cmd_stats_port_in_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
- "port");
-
-static cmdline_parse_token_string_t cmd_stats_port_in_in_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
- "in");
-
- cmdline_parse_token_num_t cmd_stats_port_in_port_in_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
- UINT32);
-
-static cmdline_parse_inst_t cmd_stats_port_in = {
- .f = cmd_stats_port_in_parsed,
- .data = NULL,
- .help_str = "Pipeline input port stats",
- .tokens = {
- (void *) &cmd_stats_port_in_p_string,
- (void *) &cmd_stats_port_in_pipeline_id,
- (void *) &cmd_stats_port_in_stats_string,
- (void *) &cmd_stats_port_in_port_string,
- (void *) &cmd_stats_port_in_in_string,
- (void *) &cmd_stats_port_in_port_in_id,
- NULL,
- },
-};
-
-/*
- * stats port out
- */
-
-struct cmd_stats_port_out_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t stats_string;
- cmdline_fixed_string_t port_string;
- cmdline_fixed_string_t out_string;
- uint32_t port_out_id;
-};
-
-static void
-cmd_stats_port_out_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
-
- struct cmd_stats_port_out_result *params = parsed_result;
- struct app_params *app = data;
- struct rte_pipeline_port_out_stats stats;
- int status;
-
- status = app_pipeline_stats_port_out(app,
- params->pipeline_id,
- params->port_out_id,
- &stats);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-
- /* Display stats */
- printf("Pipeline %" PRIu32 " - stats for output port %" PRIu32 ":\n"
- "\tPkts in: %" PRIu64 "\n"
- "\tPkts dropped by AH: %" PRIu64 "\n"
- "\tPkts dropped by other: %" PRIu64 "\n",
- params->pipeline_id,
- params->port_out_id,
- stats.stats.n_pkts_in,
- stats.n_pkts_dropped_by_ah,
- stats.stats.n_pkts_drop);
-}
-
-static cmdline_parse_token_string_t cmd_stats_port_out_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
- "stats");
-
-static cmdline_parse_token_string_t cmd_stats_port_out_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
- "port");
-
-static cmdline_parse_token_string_t cmd_stats_port_out_out_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
- "out");
-
-static cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
- UINT32);
-
-static cmdline_parse_inst_t cmd_stats_port_out = {
- .f = cmd_stats_port_out_parsed,
- .data = NULL,
- .help_str = "Pipeline output port stats",
- .tokens = {
- (void *) &cmd_stats_port_out_p_string,
- (void *) &cmd_stats_port_out_pipeline_id,
- (void *) &cmd_stats_port_out_stats_string,
- (void *) &cmd_stats_port_out_port_string,
- (void *) &cmd_stats_port_out_out_string,
- (void *) &cmd_stats_port_out_port_out_id,
- NULL,
- },
-};
-
-/*
- * stats table
- */
-
-struct cmd_stats_table_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t stats_string;
- cmdline_fixed_string_t table_string;
- uint32_t table_id;
-};
-
-static void
-cmd_stats_table_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_stats_table_result *params = parsed_result;
- struct app_params *app = data;
- struct rte_pipeline_table_stats stats;
- int status;
-
- status = app_pipeline_stats_table(app,
- params->pipeline_id,
- params->table_id,
- &stats);
-
- if (status != 0) {
- printf("Command failed\n");
- return;
- }
-
- /* Display stats */
- printf("Pipeline %" PRIu32 " - stats for table %" PRIu32 ":\n"
- "\tPkts in: %" PRIu64 "\n"
- "\tPkts in with lookup miss: %" PRIu64 "\n"
- "\tPkts in with lookup hit dropped by AH: %" PRIu64 "\n"
- "\tPkts in with lookup hit dropped by others: %" PRIu64 "\n"
- "\tPkts in with lookup miss dropped by AH: %" PRIu64 "\n"
- "\tPkts in with lookup miss dropped by others: %" PRIu64 "\n",
- params->pipeline_id,
- params->table_id,
- stats.stats.n_pkts_in,
- stats.stats.n_pkts_lookup_miss,
- stats.n_pkts_dropped_by_lkp_hit_ah,
- stats.n_pkts_dropped_lkp_hit,
- stats.n_pkts_dropped_by_lkp_miss_ah,
- stats.n_pkts_dropped_lkp_miss);
-}
-
-static cmdline_parse_token_string_t cmd_stats_table_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_stats_table_stats_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
- "stats");
-
-static cmdline_parse_token_string_t cmd_stats_table_table_string =
- TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
- "table");
-
-static cmdline_parse_token_num_t cmd_stats_table_table_id =
- TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
-
-static cmdline_parse_inst_t cmd_stats_table = {
- .f = cmd_stats_table_parsed,
- .data = NULL,
- .help_str = "Pipeline table stats",
- .tokens = {
- (void *) &cmd_stats_table_p_string,
- (void *) &cmd_stats_table_pipeline_id,
- (void *) &cmd_stats_table_stats_string,
- (void *) &cmd_stats_table_table_string,
- (void *) &cmd_stats_table_table_id,
- NULL,
- },
-};
-
-/*
- * port in enable
- */
-
-struct cmd_port_in_enable_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t port_string;
- cmdline_fixed_string_t in_string;
- uint32_t port_in_id;
- cmdline_fixed_string_t enable_string;
-};
-
-static void
-cmd_port_in_enable_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_port_in_enable_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_port_in_enable(app,
- params->pipeline_id,
- params->port_in_id);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_port_in_enable_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_port_in_enable_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
- "port");
-
-static cmdline_parse_token_string_t cmd_port_in_enable_in_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
- "in");
-
-static cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
- TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
- enable_string, "enable");
-
-static cmdline_parse_inst_t cmd_port_in_enable = {
- .f = cmd_port_in_enable_parsed,
- .data = NULL,
- .help_str = "Pipeline input port enable",
- .tokens = {
- (void *) &cmd_port_in_enable_p_string,
- (void *) &cmd_port_in_enable_pipeline_id,
- (void *) &cmd_port_in_enable_port_string,
- (void *) &cmd_port_in_enable_in_string,
- (void *) &cmd_port_in_enable_port_in_id,
- (void *) &cmd_port_in_enable_enable_string,
- NULL,
- },
-};
-
-/*
- * port in disable
- */
-
-struct cmd_port_in_disable_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t port_string;
- cmdline_fixed_string_t in_string;
- uint32_t port_in_id;
- cmdline_fixed_string_t disable_string;
-};
-
-static void
-cmd_port_in_disable_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_port_in_disable_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- status = app_pipeline_port_in_disable(app,
- params->pipeline_id,
- params->port_in_id);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_port_in_disable_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
- "p");
-
-static cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_port_in_disable_port_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
- "port");
-
-static cmdline_parse_token_string_t cmd_port_in_disable_in_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
- "in");
-
-static cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
- TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
- TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
- disable_string, "disable");
-
-static cmdline_parse_inst_t cmd_port_in_disable = {
- .f = cmd_port_in_disable_parsed,
- .data = NULL,
- .help_str = "Pipeline input port disable",
- .tokens = {
- (void *) &cmd_port_in_disable_p_string,
- (void *) &cmd_port_in_disable_pipeline_id,
- (void *) &cmd_port_in_disable_port_string,
- (void *) &cmd_port_in_disable_in_string,
- (void *) &cmd_port_in_disable_port_in_id,
- (void *) &cmd_port_in_disable_disable_string,
- NULL,
- },
-};
-
-/*
- * link config
- */
-
-static void
-print_link_info(struct app_link_params *p)
-{
- struct rte_eth_stats stats;
- struct ether_addr *mac_addr;
- uint32_t netmask = (~0U) << (32 - p->depth);
- uint32_t host = p->ip & netmask;
- uint32_t bcast = host | (~netmask);
-
- memset(&stats, 0, sizeof(stats));
- rte_eth_stats_get(p->pmd_id, &stats);
-
- mac_addr = (struct ether_addr *) &p->mac_addr;
-
- if (strlen(p->pci_bdf))
- printf("%s(%s): flags=<%s>\n",
- p->name,
- p->pci_bdf,
- (p->state) ? "UP" : "DOWN");
- else
- printf("%s: flags=<%s>\n",
- p->name,
- (p->state) ? "UP" : "DOWN");
-
- if (p->ip)
- printf("\tinet %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32
- " netmask %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32 " "
- "broadcast %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32 "\n",
- (p->ip >> 24) & 0xFF,
- (p->ip >> 16) & 0xFF,
- (p->ip >> 8) & 0xFF,
- p->ip & 0xFF,
- (netmask >> 24) & 0xFF,
- (netmask >> 16) & 0xFF,
- (netmask >> 8) & 0xFF,
- netmask & 0xFF,
- (bcast >> 24) & 0xFF,
- (bcast >> 16) & 0xFF,
- (bcast >> 8) & 0xFF,
- bcast & 0xFF);
-
- printf("\tether %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
- ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
- mac_addr->addr_bytes[0],
- mac_addr->addr_bytes[1],
- mac_addr->addr_bytes[2],
- mac_addr->addr_bytes[3],
- mac_addr->addr_bytes[4],
- mac_addr->addr_bytes[5]);
-
- printf("\tRX packets %" PRIu64
- " bytes %" PRIu64
- "\n",
- stats.ipackets,
- stats.ibytes);
-
- printf("\tRX errors %" PRIu64
- " missed %" PRIu64
- " no-mbuf %" PRIu64
- "\n",
- stats.ierrors,
- stats.imissed,
- stats.rx_nombuf);
-
- printf("\tTX packets %" PRIu64
- " bytes %" PRIu64 "\n",
- stats.opackets,
- stats.obytes);
-
- printf("\tTX errors %" PRIu64
- "\n",
- stats.oerrors);
-
- printf("\n");
-}
-
-/*
- * link
- *
- * link config:
- * link <linkid> config <ipaddr> <depth>
- *
- * link up:
- * link <linkid> up
- *
- * link down:
- * link <linkid> down
- *
- * link ls:
- * link ls
- */
-
-struct cmd_link_result {
- cmdline_fixed_string_t link_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_link_parsed(
- void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_link_result *params = parsed_result;
- struct app_params *app = data;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- uint32_t link_id;
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status != 0) {
- printf(CMD_MSG_TOO_MANY_ARGS, "link");
- return;
- }
-
- /* link ls */
- if ((n_tokens == 1) && (strcmp(tokens[0], "ls") == 0)) {
- for (link_id = 0; link_id < app->n_links; link_id++) {
- struct app_link_params *p;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
- print_link_info(p);
- }
- return;
- } /* link ls */
-
- if (n_tokens < 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "link");
- return;
- }
-
- if (parser_read_uint32(&link_id, tokens[0])) {
- printf(CMD_MSG_INVALID_ARG, "linkid");
- return;
- }
-
- /* link config */
- if (strcmp(tokens[1], "config") == 0) {
- struct in_addr ipaddr_ipv4;
- uint32_t depth;
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "link config");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &ipaddr_ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "ipaddr");
- return;
- }
-
- if (parser_read_uint32(&depth, tokens[3])) {
- printf(CMD_MSG_INVALID_ARG, "depth");
- return;
- }
-
- status = app_link_config(app,
- link_id,
- rte_be_to_cpu_32(ipaddr_ipv4.s_addr),
- depth);
- if (status)
- printf(CMD_MSG_FAIL, "link config");
-
- return;
- } /* link config */
-
- /* link up */
- if (strcmp(tokens[1], "up") == 0) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "link up");
- return;
- }
-
- status = app_link_up(app, link_id);
- if (status)
- printf(CMD_MSG_FAIL, "link up");
-
- return;
- } /* link up */
-
- /* link down */
- if (strcmp(tokens[1], "down") == 0) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "link down");
- return;
- }
-
- status = app_link_down(app, link_id);
- if (status)
- printf(CMD_MSG_FAIL, "link down");
-
- return;
- } /* link down */
-
- printf(CMD_MSG_MISMATCH_ARGS, "link");
-}
-
-static cmdline_parse_token_string_t cmd_link_link_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_result, link_string, "link");
-
-static cmdline_parse_token_string_t cmd_link_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_link_result, multi_string,
- TOKEN_STRING_MULTI);
-
-static cmdline_parse_inst_t cmd_link = {
- .f = cmd_link_parsed,
- .data = NULL,
- .help_str = "link config / up / down / ls",
- .tokens = {
- (void *) &cmd_link_link_string,
- (void *) &cmd_link_multi_string,
- NULL,
- },
-};
-
-/*
- * quit
- */
-
-struct cmd_quit_result {
- cmdline_fixed_string_t quit;
-};
-
-static void
-cmd_quit_parsed(
- __rte_unused void *parsed_result,
- struct cmdline *cl,
- __rte_unused void *data)
-{
- cmdline_quit(cl);
-}
-
-static cmdline_parse_token_string_t cmd_quit_quit =
- TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
-
-static cmdline_parse_inst_t cmd_quit = {
- .f = cmd_quit_parsed,
- .data = NULL,
- .help_str = "Quit",
- .tokens = {
- (void *) &cmd_quit_quit,
- NULL,
- },
-};
-
-/*
- * run
- *
- * run <file>
- * run <file> [<count> [<interval>]]
- <count> default is 1
- * <interval> is measured in milliseconds, default is 1 second
- */
-
-static void
-app_run_file(
- cmdline_parse_ctx_t *ctx,
- const char *file_name)
-{
- struct cmdline *file_cl;
- int fd;
-
- fd = open(file_name, O_RDONLY);
- if (fd < 0) {
- printf("Cannot open file \"%s\"\n", file_name);
- return;
- }
-
- file_cl = cmdline_new(ctx, "", fd, 1);
- cmdline_interact(file_cl);
- close(fd);
-}
-
-struct cmd_run_result {
- cmdline_fixed_string_t run_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_run_parsed(
- void *parsed_result,
- struct cmdline *cl,
- __attribute__((unused)) void *data)
-{
- struct cmd_run_result *params = parsed_result;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- char *file_name;
- uint32_t count, interval, i;
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status) {
- printf(CMD_MSG_TOO_MANY_ARGS, "run");
- return;
- }
-
- switch (n_tokens) {
- case 0:
- printf(CMD_MSG_NOT_ENOUGH_ARGS, "run");
- return;
-
- case 1:
- file_name = tokens[0];
- count = 1;
- interval = 1000;
- break;
-
- case 2:
- file_name = tokens[0];
-
- if (parser_read_uint32(&count, tokens[1]) ||
- (count == 0)) {
- printf(CMD_MSG_INVALID_ARG, "count");
- return;
- }
-
- interval = 1000;
- break;
-
- case 3:
- file_name = tokens[0];
-
- if (parser_read_uint32(&count, tokens[1]) ||
- (count == 0)) {
- printf(CMD_MSG_INVALID_ARG, "count");
- return;
- }
-
- if (parser_read_uint32(&interval, tokens[2]) ||
- (interval == 0)) {
- printf(CMD_MSG_INVALID_ARG, "interval");
- return;
- }
- break;
-
- default:
- printf(CMD_MSG_MISMATCH_ARGS, "run");
- return;
- }
-
- for (i = 0; i < count; i++) {
- app_run_file(cl->ctx, file_name);
- if (interval)
- usleep(interval * 1000);
- }
-}
-
-static cmdline_parse_token_string_t cmd_run_run_string =
- TOKEN_STRING_INITIALIZER(struct cmd_run_result, run_string, "run");
-
-static cmdline_parse_token_string_t cmd_run_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_run_result, multi_string,
- TOKEN_STRING_MULTI);
-
-
-static cmdline_parse_inst_t cmd_run = {
- .f = cmd_run_parsed,
- .data = NULL,
- .help_str = "Run CLI script file",
- .tokens = {
- (void *) &cmd_run_run_string,
- (void *) &cmd_run_multi_string,
- NULL,
- },
-};
-
-static cmdline_parse_ctx_t pipeline_common_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_quit,
- (cmdline_parse_inst_t *) &cmd_run,
- (cmdline_parse_inst_t *) &cmd_link,
- (cmdline_parse_inst_t *) &cmd_ping,
- (cmdline_parse_inst_t *) &cmd_stats_port_in,
- (cmdline_parse_inst_t *) &cmd_stats_port_out,
- (cmdline_parse_inst_t *) &cmd_stats_table,
- (cmdline_parse_inst_t *) &cmd_port_in_enable,
- (cmdline_parse_inst_t *) &cmd_port_in_disable,
- NULL,
-};
-
-int
-app_pipeline_common_cmd_push(struct app_params *app)
-{
- uint32_t n_cmds, i;
-
- /* Check for available slots in the application commands array */
- n_cmds = RTE_DIM(pipeline_common_cmds) - 1;
- if (n_cmds > APP_MAX_CMDS - app->n_cmds)
- return -ENOMEM;
-
- /* Push pipeline commands into the application */
- memcpy(&app->cmds[app->n_cmds],
- pipeline_common_cmds,
- n_cmds * sizeof(cmdline_parse_ctx_t));
-
- for (i = 0; i < n_cmds; i++)
- app->cmds[app->n_cmds + i]->data = app;
-
- app->n_cmds += n_cmds;
- app->cmds[app->n_cmds] = NULL;
-
- return 0;
-}
diff --git a/examples/ip_pipeline/pipeline/pipeline_common_fe.h b/examples/ip_pipeline/pipeline/pipeline_common_fe.h
deleted file mode 100644
index 7227544f..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_common_fe.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_COMMON_FE_H__
-#define __INCLUDE_PIPELINE_COMMON_FE_H__
-
-#include <rte_common.h>
-#include <rte_cycles.h>
-#include <rte_malloc.h>
-#include <cmdline_parse.h>
-
-#include "pipeline_common_be.h"
-#include "pipeline.h"
-#include "app.h"
-
-#ifndef MSG_TIMEOUT_DEFAULT
-#define MSG_TIMEOUT_DEFAULT 1000
-#endif
-
-static inline struct app_pipeline_data *
-app_pipeline_data(struct app_params *app, uint32_t id)
-{
- struct app_pipeline_params *params;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", id, params);
- if (params == NULL)
- return NULL;
-
- return &app->pipeline_data[params - app->pipeline_params];
-}
-
-static inline void *
-app_pipeline_data_fe(struct app_params *app, uint32_t id, struct pipeline_type *ptype)
-{
- struct app_pipeline_data *pipeline_data;
-
- pipeline_data = app_pipeline_data(app, id);
- if (pipeline_data == NULL)
- return NULL;
-
- if (strcmp(pipeline_data->ptype->name, ptype->name) != 0)
- return NULL;
-
- if (pipeline_data->enabled == 0)
- return NULL;
-
- return pipeline_data->fe;
-}
-
-static inline struct rte_ring *
-app_pipeline_msgq_in_get(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_msgq_params *p;
-
- APP_PARAM_FIND_BY_ID(app->msgq_params,
- "MSGQ-REQ-PIPELINE",
- pipeline_id,
- p);
- if (p == NULL)
- return NULL;
-
- return app->msgq[p - app->msgq_params];
-}
-
-static inline struct rte_ring *
-app_pipeline_msgq_out_get(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_msgq_params *p;
-
- APP_PARAM_FIND_BY_ID(app->msgq_params,
- "MSGQ-RSP-PIPELINE",
- pipeline_id,
- p);
- if (p == NULL)
- return NULL;
-
- return app->msgq[p - app->msgq_params];
-}
-
-static inline void *
-app_msg_alloc(__rte_unused struct app_params *app)
-{
- return rte_malloc(NULL, 2048, RTE_CACHE_LINE_SIZE);
-}
-
-static inline void
-app_msg_free(__rte_unused struct app_params *app,
- void *msg)
-{
- rte_free(msg);
-}
-
-static inline void
-app_msg_send(struct app_params *app,
- uint32_t pipeline_id,
- void *msg)
-{
- struct rte_ring *r = app_pipeline_msgq_in_get(app, pipeline_id);
- int status;
-
- do {
- status = rte_ring_sp_enqueue(r, msg);
- } while (status == -ENOBUFS);
-}
-
-static inline void *
-app_msg_recv(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct rte_ring *r = app_pipeline_msgq_out_get(app, pipeline_id);
- void *msg;
- int status = rte_ring_sc_dequeue(r, &msg);
-
- if (status != 0)
- return NULL;
-
- return msg;
-}
-
-static inline void *
-app_msg_send_recv(struct app_params *app,
- uint32_t pipeline_id,
- void *msg,
- uint32_t timeout_ms)
-{
- struct rte_ring *r_req = app_pipeline_msgq_in_get(app, pipeline_id);
- struct rte_ring *r_rsp = app_pipeline_msgq_out_get(app, pipeline_id);
- uint64_t hz = rte_get_tsc_hz();
- void *msg_recv;
- uint64_t deadline;
- int status;
-
- /* send */
- do {
- status = rte_ring_sp_enqueue(r_req, (void *) msg);
- } while (status == -ENOBUFS);
-
- /* recv */
- deadline = (timeout_ms) ?
- (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
- UINT64_MAX;
-
- do {
- if (rte_rdtsc() > deadline)
- return NULL;
-
- status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
- } while (status != 0);
-
- return msg_recv;
-}
-
-struct app_link_params *
-app_pipeline_track_pktq_out_to_link(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t pktq_out_id);
-
-int
-app_pipeline_track_default(struct pipeline_params *params,
- uint32_t port_in,
- uint32_t *port_out);
-
-int
-app_pipeline_ping(struct app_params *app,
- uint32_t pipeline_id);
-
-int
-app_pipeline_stats_port_in(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id,
- struct rte_pipeline_port_in_stats *stats);
-
-int
-app_pipeline_stats_port_out(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id,
- struct rte_pipeline_port_out_stats *stats);
-
-int
-app_pipeline_stats_table(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t table_id,
- struct rte_pipeline_table_stats *stats);
-
-int
-app_pipeline_port_in_enable(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_pipeline_port_in_disable(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_link_set_op(struct app_params *app,
- uint32_t link_id,
- uint32_t pipeline_id,
- app_link_op op,
- void *arg);
-
-int
-app_link_config(struct app_params *app,
- uint32_t link_id,
- uint32_t ip,
- uint32_t depth);
-
-int
-app_link_up(struct app_params *app,
- uint32_t link_id);
-
-int
-app_link_down(struct app_params *app,
- uint32_t link_id);
-
-int
-app_pipeline_common_cmd_push(struct app_params *app);
-
-#define CMD_MSG_OUT_OF_MEMORY "Not enough memory\n"
-#define CMD_MSG_NOT_ENOUGH_ARGS "Not enough arguments for command \"%s\"\n"
-#define CMD_MSG_TOO_MANY_ARGS "Too many arguments for command \"%s\"\n"
-#define CMD_MSG_MISMATCH_ARGS "Incorrect set of arguments for command \"%s\"\n"
-#define CMD_MSG_INVALID_ARG "Invalid value for argument \"%s\"\n"
-#define CMD_MSG_ARG_NOT_FOUND "Syntax error: \"%s\" not found\n"
-#define CMD_MSG_FILE_ERR "Error in file \"%s\" at line %u\n"
-#define CMD_MSG_FAIL "Command \"%s\" failed\n"
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.c b/examples/ip_pipeline/pipeline/pipeline_firewall.c
deleted file mode 100644
index 0cae9d74..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.c
+++ /dev/null
@@ -1,1421 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-#include <errno.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/queue.h>
-#include <netinet/in.h>
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_malloc.h>
-#include <cmdline_rdline.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-
-#include "app.h"
-#include "pipeline_common_fe.h"
-#include "pipeline_firewall.h"
-#include "parser.h"
-
-struct app_pipeline_firewall_rule {
- struct pipeline_firewall_key key;
- int32_t priority;
- uint32_t port_id;
- void *entry_ptr;
-
- TAILQ_ENTRY(app_pipeline_firewall_rule) node;
-};
-
-struct app_pipeline_firewall {
- /* parameters */
- uint32_t n_ports_in;
- uint32_t n_ports_out;
-
- /* rules */
- TAILQ_HEAD(, app_pipeline_firewall_rule) rules;
- uint32_t n_rules;
- uint32_t default_rule_present;
- uint32_t default_rule_port_id;
- void *default_rule_entry_ptr;
-};
-
-static void
-print_firewall_ipv4_rule(struct app_pipeline_firewall_rule *rule)
-{
- printf("Prio = %" PRId32 " (SA = %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
- "DA = %" PRIu32 ".%" PRIu32
- ".%"PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
- "SP = %" PRIu32 "-%" PRIu32 ", "
- "DP = %" PRIu32 "-%" PRIu32 ", "
- "Proto = %" PRIu32 " / 0x%" PRIx32 ") => "
- "Port = %" PRIu32 " (entry ptr = %p)\n",
-
- rule->priority,
-
- (rule->key.key.ipv4_5tuple.src_ip >> 24) & 0xFF,
- (rule->key.key.ipv4_5tuple.src_ip >> 16) & 0xFF,
- (rule->key.key.ipv4_5tuple.src_ip >> 8) & 0xFF,
- rule->key.key.ipv4_5tuple.src_ip & 0xFF,
- rule->key.key.ipv4_5tuple.src_ip_mask,
-
- (rule->key.key.ipv4_5tuple.dst_ip >> 24) & 0xFF,
- (rule->key.key.ipv4_5tuple.dst_ip >> 16) & 0xFF,
- (rule->key.key.ipv4_5tuple.dst_ip >> 8) & 0xFF,
- rule->key.key.ipv4_5tuple.dst_ip & 0xFF,
- rule->key.key.ipv4_5tuple.dst_ip_mask,
-
- rule->key.key.ipv4_5tuple.src_port_from,
- rule->key.key.ipv4_5tuple.src_port_to,
-
- rule->key.key.ipv4_5tuple.dst_port_from,
- rule->key.key.ipv4_5tuple.dst_port_to,
-
- rule->key.key.ipv4_5tuple.proto,
- rule->key.key.ipv4_5tuple.proto_mask,
-
- rule->port_id,
- rule->entry_ptr);
-}
-
-static struct app_pipeline_firewall_rule *
-app_pipeline_firewall_rule_find(struct app_pipeline_firewall *p,
- struct pipeline_firewall_key *key)
-{
- struct app_pipeline_firewall_rule *r;
-
- TAILQ_FOREACH(r, &p->rules, node)
- if (memcmp(key,
- &r->key,
- sizeof(struct pipeline_firewall_key)) == 0)
- return r;
-
- return NULL;
-}
-
-static int
-app_pipeline_firewall_ls(
- struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_firewall *p;
- struct app_pipeline_firewall_rule *rule;
- uint32_t n_rules;
- int priority;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- n_rules = p->n_rules;
- for (priority = 0; n_rules; priority++)
- TAILQ_FOREACH(rule, &p->rules, node)
- if (rule->priority == priority) {
- print_firewall_ipv4_rule(rule);
- n_rules--;
- }
-
- if (p->default_rule_present)
- printf("Default rule: port %" PRIu32 " (entry ptr = %p)\n",
- p->default_rule_port_id,
- p->default_rule_entry_ptr);
- else
- printf("Default rule: DROP\n");
-
- printf("\n");
-
- return 0;
-}
-
-static void*
-app_pipeline_firewall_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct app_pipeline_firewall *p;
- uint32_t size;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_firewall));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
-
- /* Initialization */
- p->n_ports_in = params->n_ports_in;
- p->n_ports_out = params->n_ports_out;
-
- TAILQ_INIT(&p->rules);
- p->n_rules = 0;
- p->default_rule_present = 0;
- p->default_rule_port_id = 0;
- p->default_rule_entry_ptr = NULL;
-
- return (void *) p;
-}
-
-static int
-app_pipeline_firewall_free(void *pipeline)
-{
- struct app_pipeline_firewall *p = pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- while (!TAILQ_EMPTY(&p->rules)) {
- struct app_pipeline_firewall_rule *rule;
-
- rule = TAILQ_FIRST(&p->rules);
- TAILQ_REMOVE(&p->rules, rule, node);
- rte_free(rule);
- }
-
- rte_free(p);
- return 0;
-}
-
-static int
-app_pipeline_firewall_key_check_and_normalize(struct pipeline_firewall_key *key)
-{
- switch (key->type) {
- case PIPELINE_FIREWALL_IPV4_5TUPLE:
- {
- uint32_t src_ip_depth = key->key.ipv4_5tuple.src_ip_mask;
- uint32_t dst_ip_depth = key->key.ipv4_5tuple.dst_ip_mask;
- uint16_t src_port_from = key->key.ipv4_5tuple.src_port_from;
- uint16_t src_port_to = key->key.ipv4_5tuple.src_port_to;
- uint16_t dst_port_from = key->key.ipv4_5tuple.dst_port_from;
- uint16_t dst_port_to = key->key.ipv4_5tuple.dst_port_to;
-
- uint32_t src_ip_netmask = 0;
- uint32_t dst_ip_netmask = 0;
-
- if ((src_ip_depth > 32) ||
- (dst_ip_depth > 32) ||
- (src_port_from > src_port_to) ||
- (dst_port_from > dst_port_to))
- return -1;
-
- if (src_ip_depth)
- src_ip_netmask = (~0U) << (32 - src_ip_depth);
-
- if (dst_ip_depth)
- dst_ip_netmask = ((~0U) << (32 - dst_ip_depth));
-
- key->key.ipv4_5tuple.src_ip &= src_ip_netmask;
- key->key.ipv4_5tuple.dst_ip &= dst_ip_netmask;
-
- return 0;
- }
-
- default:
- return -1;
- }
-}
-
-int
-app_pipeline_firewall_load_file(char *filename,
- struct pipeline_firewall_key *keys,
- uint32_t *priorities,
- uint32_t *port_ids,
- uint32_t *n_keys,
- uint32_t *line)
-{
- FILE *f = NULL;
- char file_buf[1024];
- uint32_t i, l;
-
- /* Check input arguments */
- if ((filename == NULL) ||
- (keys == NULL) ||
- (priorities == NULL) ||
- (port_ids == NULL) ||
- (n_keys == NULL) ||
- (*n_keys == 0) ||
- (line == NULL)) {
- if (line)
- *line = 0;
- return -1;
- }
-
- /* Open input file */
- f = fopen(filename, "r");
- if (f == NULL) {
- *line = 0;
- return -1;
- }
-
- /* Read file */
- for (i = 0, l = 1; i < *n_keys; l++) {
- char *tokens[32];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- uint32_t priority = 0;
- struct in_addr sipaddr;
- uint32_t sipdepth = 0;
- struct in_addr dipaddr;
- uint32_t dipdepth = 0;
- uint16_t sport0 = 0;
- uint16_t sport1 = 0;
- uint16_t dport0 = 0;
- uint16_t dport1 = 0;
- uint8_t proto = 0;
- uint8_t protomask = 0;
- uint32_t port_id = 0;
-
- int status;
-
- if (fgets(file_buf, sizeof(file_buf), f) == NULL)
- break;
-
- status = parse_tokenize_string(file_buf, tokens, &n_tokens);
- if (status)
- goto error1;
-
- if ((n_tokens == 0) || (tokens[0][0] == '#'))
- continue;
-
- if ((n_tokens != 15) ||
- strcmp(tokens[0], "priority") ||
- parser_read_uint32(&priority, tokens[1]) ||
- strcmp(tokens[2], "ipv4") ||
- parse_ipv4_addr(tokens[3], &sipaddr) ||
- parser_read_uint32(&sipdepth, tokens[4]) ||
- parse_ipv4_addr(tokens[5], &dipaddr) ||
- parser_read_uint32(&dipdepth, tokens[6]) ||
- parser_read_uint16(&sport0, tokens[7]) ||
- parser_read_uint16(&sport1, tokens[8]) ||
- parser_read_uint16(&dport0, tokens[9]) ||
- parser_read_uint16(&dport1, tokens[10]) ||
- parser_read_uint8(&proto, tokens[11]) ||
- parser_read_uint8_hex(&protomask, tokens[12]) ||
- strcmp(tokens[13], "port") ||
- parser_read_uint32(&port_id, tokens[14]))
- goto error1;
-
- keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- keys[i].key.ipv4_5tuple.src_ip =
- rte_be_to_cpu_32(sipaddr.s_addr);
- keys[i].key.ipv4_5tuple.src_ip_mask = sipdepth;
- keys[i].key.ipv4_5tuple.dst_ip =
- rte_be_to_cpu_32(dipaddr.s_addr);
- keys[i].key.ipv4_5tuple.dst_ip_mask = dipdepth;
- keys[i].key.ipv4_5tuple.src_port_from = sport0;
- keys[i].key.ipv4_5tuple.src_port_to = sport1;
- keys[i].key.ipv4_5tuple.dst_port_from = dport0;
- keys[i].key.ipv4_5tuple.dst_port_to = dport1;
- keys[i].key.ipv4_5tuple.proto = proto;
- keys[i].key.ipv4_5tuple.proto_mask = protomask;
-
- port_ids[i] = port_id;
- priorities[i] = priority;
-
- if (app_pipeline_firewall_key_check_and_normalize(&keys[i]))
- goto error1;
-
- i++;
- }
-
- /* Close file */
- *n_keys = i;
- fclose(f);
- return 0;
-
-error1:
- *line = l;
- fclose(f);
- return -1;
-}
-
-int
-app_pipeline_firewall_add_rule(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *key,
- uint32_t priority,
- uint32_t port_id)
-{
- struct app_pipeline_firewall *p;
- struct app_pipeline_firewall_rule *rule;
- struct pipeline_firewall_add_msg_req *req;
- struct pipeline_firewall_add_msg_rsp *rsp;
- int new_rule;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL) ||
- (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
- return -1;
-
- /* Find existing rule or allocate new rule */
- rule = app_pipeline_firewall_rule_find(p, key);
- new_rule = (rule == NULL);
- if (rule == NULL) {
- rule = rte_malloc(NULL, sizeof(*rule), RTE_CACHE_LINE_SIZE);
-
- if (rule == NULL)
- return -1;
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL) {
- if (new_rule)
- rte_free(rule);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD;
- memcpy(&req->key, key, sizeof(*key));
- req->priority = priority;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- if (new_rule)
- rte_free(rule);
- return -1;
- }
-
- /* Read response and write rule */
- if (rsp->status ||
- (rsp->entry_ptr == NULL) ||
- ((new_rule == 0) && (rsp->key_found == 0)) ||
- ((new_rule == 1) && (rsp->key_found == 1))) {
- app_msg_free(app, rsp);
- if (new_rule)
- rte_free(rule);
- return -1;
- }
-
- memcpy(&rule->key, key, sizeof(*key));
- rule->priority = priority;
- rule->port_id = port_id;
- rule->entry_ptr = rsp->entry_ptr;
-
- /* Commit rule */
- if (new_rule) {
- TAILQ_INSERT_TAIL(&p->rules, rule, node);
- p->n_rules++;
- }
-
- print_firewall_ipv4_rule(rule);
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_firewall_delete_rule(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *key)
-{
- struct app_pipeline_firewall *p;
- struct app_pipeline_firewall_rule *rule;
- struct pipeline_firewall_del_msg_req *req;
- struct pipeline_firewall_del_msg_rsp *rsp;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL) ||
- (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
- return -1;
-
- /* Find rule */
- rule = app_pipeline_firewall_rule_find(p, key);
- if (rule == NULL)
- return 0;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL;
- memcpy(&req->key, key, sizeof(*key));
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status || !rsp->key_found) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Remove rule */
- TAILQ_REMOVE(&p->rules, rule, node);
- p->n_rules--;
- rte_free(rule);
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_firewall_add_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *keys,
- uint32_t n_keys,
- uint32_t *priorities,
- uint32_t *port_ids)
-{
- struct app_pipeline_firewall *p;
- struct pipeline_firewall_add_bulk_msg_req *req;
- struct pipeline_firewall_add_bulk_msg_rsp *rsp;
-
- struct app_pipeline_firewall_rule **rules;
- int *new_rules;
-
- int *keys_found;
- void **entries_ptr;
-
- uint32_t i;
- int status = 0;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
- if (rules == NULL)
- return -1;
-
- new_rules = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
- if (new_rules == NULL) {
- rte_free(rules);
- return -1;
- }
-
- /* check data integrity and add to rule list */
- for (i = 0; i < n_keys; i++) {
- if (port_ids[i] >= p->n_ports_out) {
- rte_free(rules);
- rte_free(new_rules);
- return -1;
- }
-
- if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
- rte_free(rules);
- rte_free(new_rules);
- return -1;
- }
-
- rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
- new_rules[i] = (rules[i] == NULL);
- if (rules[i] == NULL) {
- rules[i] = rte_malloc(NULL,
- sizeof(*rules[i]),
- RTE_CACHE_LINE_SIZE);
-
- if (rules[i] == NULL) {
- uint32_t j;
-
- for (j = 0; j <= i; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- return -1;
- }
- }
- }
-
- keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
- if (keys_found == NULL) {
- uint32_t j;
-
- for (j = 0; j < n_keys; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- return -1;
- }
-
- entries_ptr = rte_malloc(NULL,
- n_keys * sizeof(struct rte_pipeline_table_entry *),
- RTE_CACHE_LINE_SIZE);
- if (entries_ptr == NULL) {
- uint32_t j;
-
- for (j = 0; j < n_keys; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- rte_free(keys_found);
- return -1;
- }
- for (i = 0; i < n_keys; i++) {
- entries_ptr[i] = rte_malloc(NULL,
- sizeof(struct rte_pipeline_table_entry),
- RTE_CACHE_LINE_SIZE);
-
- if (entries_ptr[i] == NULL) {
- uint32_t j;
-
- for (j = 0; j < n_keys; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- for (j = 0; j <= i; j++)
- rte_free(entries_ptr[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- rte_free(keys_found);
- rte_free(entries_ptr);
- return -1;
- }
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL) {
- uint32_t j;
-
- for (j = 0; j < n_keys; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- for (j = 0; j < n_keys; j++)
- rte_free(entries_ptr[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- rte_free(keys_found);
- rte_free(entries_ptr);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_BULK;
-
- req->keys = keys;
- req->n_keys = n_keys;
- req->port_ids = port_ids;
- req->priorities = priorities;
- req->keys_found = keys_found;
- req->entries_ptr = entries_ptr;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- uint32_t j;
-
- for (j = 0; j < n_keys; j++)
- if (new_rules[j])
- rte_free(rules[j]);
-
- for (j = 0; j < n_keys; j++)
- rte_free(entries_ptr[j]);
-
- rte_free(rules);
- rte_free(new_rules);
- rte_free(keys_found);
- rte_free(entries_ptr);
- return -1;
- }
-
- if (rsp->status) {
- for (i = 0; i < n_keys; i++)
- if (new_rules[i])
- rte_free(rules[i]);
-
- for (i = 0; i < n_keys; i++)
- rte_free(entries_ptr[i]);
-
- status = -1;
- goto cleanup;
- }
-
- for (i = 0; i < n_keys; i++) {
- if (entries_ptr[i] == NULL ||
- ((new_rules[i] == 0) && (keys_found[i] == 0)) ||
- ((new_rules[i] == 1) && (keys_found[i] == 1))) {
- for (i = 0; i < n_keys; i++)
- if (new_rules[i])
- rte_free(rules[i]);
-
- for (i = 0; i < n_keys; i++)
- rte_free(entries_ptr[i]);
-
- status = -1;
- goto cleanup;
- }
- }
-
- for (i = 0; i < n_keys; i++) {
- memcpy(&rules[i]->key, &keys[i], sizeof(keys[i]));
- rules[i]->priority = priorities[i];
- rules[i]->port_id = port_ids[i];
- rules[i]->entry_ptr = entries_ptr[i];
-
- /* Commit rule */
- if (new_rules[i]) {
- TAILQ_INSERT_TAIL(&p->rules, rules[i], node);
- p->n_rules++;
- }
-
- print_firewall_ipv4_rule(rules[i]);
- }
-
-cleanup:
- app_msg_free(app, rsp);
- rte_free(rules);
- rte_free(new_rules);
- rte_free(keys_found);
- rte_free(entries_ptr);
-
- return status;
-}
-
-int
-app_pipeline_firewall_delete_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *keys,
- uint32_t n_keys)
-{
- struct app_pipeline_firewall *p;
- struct pipeline_firewall_del_bulk_msg_req *req;
- struct pipeline_firewall_del_bulk_msg_rsp *rsp;
-
- struct app_pipeline_firewall_rule **rules;
- int *keys_found;
-
- uint32_t i;
- int status = 0;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- rules = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_firewall_rule *),
- RTE_CACHE_LINE_SIZE);
- if (rules == NULL)
- return -1;
-
- for (i = 0; i < n_keys; i++) {
- if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
- return -1;
- }
-
- rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
- }
-
- keys_found = rte_malloc(NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
- if (keys_found == NULL) {
- rte_free(rules);
- return -1;
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL) {
- rte_free(rules);
- rte_free(keys_found);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_BULK;
-
- req->keys = keys;
- req->n_keys = n_keys;
- req->keys_found = keys_found;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- rte_free(rules);
- rte_free(keys_found);
- return -1;
- }
-
- if (rsp->status) {
- status = -1;
- goto cleanup;
- }
-
- for (i = 0; i < n_keys; i++) {
- if (keys_found[i] == 0) {
- status = -1;
- goto cleanup;
- }
- }
-
- for (i = 0; i < n_keys; i++) {
- TAILQ_REMOVE(&p->rules, rules[i], node);
- p->n_rules--;
- rte_free(rules[i]);
- }
-
-cleanup:
- app_msg_free(app, rsp);
- rte_free(rules);
- rte_free(keys_found);
-
- return status;
-}
-
-int
-app_pipeline_firewall_add_default_rule(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct app_pipeline_firewall *p;
- struct pipeline_firewall_add_default_msg_req *req;
- struct pipeline_firewall_add_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write rule */
- if (rsp->status || (rsp->entry_ptr == NULL)) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- p->default_rule_port_id = port_id;
- p->default_rule_entry_ptr = rsp->entry_ptr;
-
- /* Commit rule */
- p->default_rule_present = 1;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_firewall_delete_default_rule(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_firewall *p;
- struct pipeline_firewall_del_default_msg_req *req;
- struct pipeline_firewall_del_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
- if (p == NULL)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write rule */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Commit rule */
- p->default_rule_present = 0;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-/*
- * firewall
- *
- * firewall add:
- * p <pipelineid> firewall add priority <priority>
- * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
- * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
- * port <portid>
- * Note: <protomask> is a hex value
- *
- * p <pipelineid> firewall add bulk <file>
- *
- * firewall add default:
- * p <pipelineid> firewall add default <port ID>
- *
- * firewall del:
- * p <pipelineid> firewall del
- * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
- * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
- *
- * p <pipelineid> firewall del bulk <file>
- *
- * firewall del default:
- * p <pipelineid> firewall del default
- *
- * firewall ls:
- * p <pipelineid> firewall ls
- */
-
-struct cmd_firewall_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t firewall_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void cmd_firewall_parsed(void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_firewall_result *params = parsed_result;
- struct app_params *app = data;
- int status;
-
- char *tokens[17];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status) {
- printf(CMD_MSG_TOO_MANY_ARGS, "firewall");
- return;
- }
-
- /* firewall add */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "priority") == 0)) {
- struct pipeline_firewall_key key;
- uint32_t priority;
- struct in_addr sipaddr;
- uint32_t sipdepth;
- struct in_addr dipaddr;
- uint32_t dipdepth;
- uint16_t sport0;
- uint16_t sport1;
- uint16_t dport0;
- uint16_t dport1;
- uint8_t proto;
- uint8_t protomask;
- uint32_t port_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 16) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall add");
- return;
- }
-
- if (parser_read_uint32(&priority, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "priority");
- return;
- }
-
- if (strcmp(tokens[3], "ipv4")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "ipv4");
- return;
- }
-
- if (parse_ipv4_addr(tokens[4], &sipaddr)) {
- printf(CMD_MSG_INVALID_ARG, "sipaddr");
- return;
- }
-
- if (parser_read_uint32(&sipdepth, tokens[5])) {
- printf(CMD_MSG_INVALID_ARG, "sipdepth");
- return;
- }
-
- if (parse_ipv4_addr(tokens[6], &dipaddr)) {
- printf(CMD_MSG_INVALID_ARG, "dipaddr");
- return;
- }
-
- if (parser_read_uint32(&dipdepth, tokens[7])) {
- printf(CMD_MSG_INVALID_ARG, "dipdepth");
- return;
- }
-
- if (parser_read_uint16(&sport0, tokens[8])) {
- printf(CMD_MSG_INVALID_ARG, "sport0");
- return;
- }
-
- if (parser_read_uint16(&sport1, tokens[9])) {
- printf(CMD_MSG_INVALID_ARG, "sport1");
- return;
- }
-
- if (parser_read_uint16(&dport0, tokens[10])) {
- printf(CMD_MSG_INVALID_ARG, "dport0");
- return;
- }
-
- if (parser_read_uint16(&dport1, tokens[11])) {
- printf(CMD_MSG_INVALID_ARG, "dport1");
- return;
- }
-
- if (parser_read_uint8(&proto, tokens[12])) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- if (parser_read_uint8_hex(&protomask, tokens[13])) {
- printf(CMD_MSG_INVALID_ARG, "protomask");
- return;
- }
-
- if (strcmp(tokens[14], "port")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "port");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[15])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = sipdepth;
- key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
- key.key.ipv4_5tuple.src_port_from = sport0;
- key.key.ipv4_5tuple.src_port_to = sport1;
- key.key.ipv4_5tuple.dst_port_from = dport0;
- key.key.ipv4_5tuple.dst_port_to = dport1;
- key.key.ipv4_5tuple.proto = proto;
- key.key.ipv4_5tuple.proto_mask = protomask;
-
- status = app_pipeline_firewall_add_rule(app,
- params->pipeline_id,
- &key,
- priority,
- port_id);
- if (status)
- printf(CMD_MSG_FAIL, "firewall add");
-
- return;
- } /* firewall add */
-
- /* firewall add bulk */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "bulk") == 0)) {
- struct pipeline_firewall_key *keys;
- uint32_t *priorities, *port_ids, n_keys, line;
- char *filename;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall add bulk");
- return;
- }
-
- filename = tokens[2];
-
- n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
- keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
- if (keys == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- return;
- }
- memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-
- priorities = malloc(n_keys * sizeof(uint32_t));
- if (priorities == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- free(keys);
- return;
- }
-
- port_ids = malloc(n_keys * sizeof(uint32_t));
- if (port_ids == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- free(priorities);
- free(keys);
- return;
- }
-
- status = app_pipeline_firewall_load_file(filename,
- keys,
- priorities,
- port_ids,
- &n_keys,
- &line);
- if (status != 0) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(port_ids);
- free(priorities);
- free(keys);
- return;
- }
-
- status = app_pipeline_firewall_add_bulk(app,
- params->pipeline_id,
- keys,
- n_keys,
- priorities,
- port_ids);
- if (status)
- printf(CMD_MSG_FAIL, "firewall add bulk");
-
- free(keys);
- free(priorities);
- free(port_ids);
- return;
- } /* firewall add bulk */
-
- /* firewall add default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- uint32_t port_id;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall add default");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- status = app_pipeline_firewall_add_default_rule(app,
- params->pipeline_id,
- port_id);
- if (status)
- printf(CMD_MSG_FAIL, "firewall add default");
-
- return;
- } /* firewall add default */
-
- /* firewall del */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "ipv4") == 0)) {
- struct pipeline_firewall_key key;
- struct in_addr sipaddr;
- uint32_t sipdepth;
- struct in_addr dipaddr;
- uint32_t dipdepth;
- uint16_t sport0;
- uint16_t sport1;
- uint16_t dport0;
- uint16_t dport1;
- uint8_t proto;
- uint8_t protomask;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 12) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall del");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &sipaddr)) {
- printf(CMD_MSG_INVALID_ARG, "sipaddr");
- return;
- }
-
- if (parser_read_uint32(&sipdepth, tokens[3])) {
- printf(CMD_MSG_INVALID_ARG, "sipdepth");
- return;
- }
-
- if (parse_ipv4_addr(tokens[4], &dipaddr)) {
- printf(CMD_MSG_INVALID_ARG, "dipaddr");
- return;
- }
-
- if (parser_read_uint32(&dipdepth, tokens[5])) {
- printf(CMD_MSG_INVALID_ARG, "dipdepth");
- return;
- }
-
- if (parser_read_uint16(&sport0, tokens[6])) {
- printf(CMD_MSG_INVALID_ARG, "sport0");
- return;
- }
-
- if (parser_read_uint16(&sport1, tokens[7])) {
- printf(CMD_MSG_INVALID_ARG, "sport1");
- return;
- }
-
- if (parser_read_uint16(&dport0, tokens[8])) {
- printf(CMD_MSG_INVALID_ARG, "dport0");
- return;
- }
-
- if (parser_read_uint16(&dport1, tokens[9])) {
- printf(CMD_MSG_INVALID_ARG, "dport1");
- return;
- }
-
- if (parser_read_uint8(&proto, tokens[10])) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- if (parser_read_uint8_hex(&protomask, tokens[11])) {
- printf(CMD_MSG_INVALID_ARG, "protomask");
- return;
- }
-
- key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
- key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
- key.key.ipv4_5tuple.src_ip_mask = sipdepth;
- key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
- key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
- key.key.ipv4_5tuple.src_port_from = sport0;
- key.key.ipv4_5tuple.src_port_to = sport1;
- key.key.ipv4_5tuple.dst_port_from = dport0;
- key.key.ipv4_5tuple.dst_port_to = dport1;
- key.key.ipv4_5tuple.proto = proto;
- key.key.ipv4_5tuple.proto_mask = protomask;
-
- status = app_pipeline_firewall_delete_rule(app,
- params->pipeline_id,
- &key);
- if (status)
- printf(CMD_MSG_FAIL, "firewall del");
-
- return;
- } /* firewall del */
-
- /* firewall del bulk */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "bulk") == 0)) {
- struct pipeline_firewall_key *keys;
- uint32_t *priorities, *port_ids, n_keys, line;
- char *filename;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall del bulk");
- return;
- }
-
- filename = tokens[2];
-
- n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
- keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
- if (keys == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- return;
- }
- memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
-
- priorities = malloc(n_keys * sizeof(uint32_t));
- if (priorities == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- free(keys);
- return;
- }
-
- port_ids = malloc(n_keys * sizeof(uint32_t));
- if (port_ids == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- free(priorities);
- free(keys);
- return;
- }
-
- status = app_pipeline_firewall_load_file(filename,
- keys,
- priorities,
- port_ids,
- &n_keys,
- &line);
- if (status != 0) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(port_ids);
- free(priorities);
- free(keys);
- return;
- }
-
- status = app_pipeline_firewall_delete_bulk(app,
- params->pipeline_id,
- keys,
- n_keys);
- if (status)
- printf(CMD_MSG_FAIL, "firewall del bulk");
-
- free(port_ids);
- free(priorities);
- free(keys);
- return;
- } /* firewall del bulk */
-
- /* firewall del default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall del default");
- return;
- }
-
- status = app_pipeline_firewall_delete_default_rule(app,
- params->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "firewall del default");
-
- return;
-
- } /* firewall del default */
-
- /* firewall ls */
- if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
- if (n_tokens != 1) {
- printf(CMD_MSG_MISMATCH_ARGS, "firewall ls");
- return;
- }
-
- status = app_pipeline_firewall_ls(app, params->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "firewall ls");
-
- return;
- } /* firewall ls */
-
- printf(CMD_MSG_MISMATCH_ARGS, "firewall");
-}
-
-static cmdline_parse_token_string_t cmd_firewall_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_firewall_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_firewall_result, pipeline_id, UINT32);
-
-static cmdline_parse_token_string_t cmd_firewall_firewall_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, firewall_string,
- "firewall");
-
-static cmdline_parse_token_string_t cmd_firewall_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, multi_string,
- TOKEN_STRING_MULTI);
-
-static cmdline_parse_inst_t cmd_firewall = {
- .f = cmd_firewall_parsed,
- .data = NULL,
- .help_str = "firewall add / add bulk / add default / del / del bulk"
- " / del default / ls",
- .tokens = {
- (void *) &cmd_firewall_p_string,
- (void *) &cmd_firewall_pipeline_id,
- (void *) &cmd_firewall_firewall_string,
- (void *) &cmd_firewall_multi_string,
- NULL,
- },
-};
-
-static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_firewall,
- NULL,
-};
-
-static struct pipeline_fe_ops pipeline_firewall_fe_ops = {
- .f_init = app_pipeline_firewall_init,
- .f_post_init = NULL,
- .f_free = app_pipeline_firewall_free,
- .f_track = app_pipeline_track_default,
- .cmds = pipeline_cmds,
-};
-
-struct pipeline_type pipeline_firewall = {
- .name = "FIREWALL",
- .be_ops = &pipeline_firewall_be_ops,
- .fe_ops = &pipeline_firewall_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall.h b/examples/ip_pipeline/pipeline/pipeline_firewall.h
deleted file mode 100644
index 27304b00..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_firewall.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FIREWALL_H__
-#define __INCLUDE_PIPELINE_FIREWALL_H__
-
-#include "pipeline.h"
-#include "pipeline_firewall_be.h"
-
-int
-app_pipeline_firewall_add_rule(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *key,
- uint32_t priority,
- uint32_t port_id);
-
-int
-app_pipeline_firewall_delete_rule(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *key);
-
-int
-app_pipeline_firewall_add_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *keys,
- uint32_t n_keys,
- uint32_t *priorities,
- uint32_t *port_ids);
-
-int
-app_pipeline_firewall_delete_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_firewall_key *keys,
- uint32_t n_keys);
-
-int
-app_pipeline_firewall_add_default_rule(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_pipeline_firewall_delete_default_rule(struct app_params *app,
- uint32_t pipeline_id);
-
-#ifndef APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE
-#define APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE 65536
-#endif
-
-int
-app_pipeline_firewall_load_file(char *filename,
- struct pipeline_firewall_key *keys,
- uint32_t *priorities,
- uint32_t *port_ids,
- uint32_t *n_keys,
- uint32_t *line);
-
-extern struct pipeline_type pipeline_firewall;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
deleted file mode 100644
index bd5e1b2b..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
+++ /dev/null
@@ -1,856 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_ether.h>
-#include <rte_ip.h>
-#include <rte_tcp.h>
-#include <rte_byteorder.h>
-#include <rte_table_acl.h>
-
-#include "pipeline_firewall_be.h"
-#include "parser.h"
-
-struct pipeline_firewall {
- struct pipeline p;
- pipeline_msg_req_handler custom_handlers[PIPELINE_FIREWALL_MSG_REQS];
-
- uint32_t n_rules;
- uint32_t n_rule_fields;
- struct rte_acl_field_def *field_format;
- uint32_t field_format_size;
-} __rte_cache_aligned;
-
-static void *
-pipeline_firewall_msg_req_custom_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler handlers[] = {
- [PIPELINE_MSG_REQ_PING] =
- pipeline_msg_req_ping_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_IN] =
- pipeline_msg_req_stats_port_in_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
- pipeline_msg_req_stats_port_out_handler,
- [PIPELINE_MSG_REQ_STATS_TABLE] =
- pipeline_msg_req_stats_table_handler,
- [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
- pipeline_msg_req_port_in_enable_handler,
- [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
- pipeline_msg_req_port_in_disable_handler,
- [PIPELINE_MSG_REQ_CUSTOM] =
- pipeline_firewall_msg_req_custom_handler,
-};
-
-static void *
-pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler custom_handlers[] = {
- [PIPELINE_FIREWALL_MSG_REQ_ADD] =
- pipeline_firewall_msg_req_add_handler,
- [PIPELINE_FIREWALL_MSG_REQ_DEL] =
- pipeline_firewall_msg_req_del_handler,
- [PIPELINE_FIREWALL_MSG_REQ_ADD_BULK] =
- pipeline_firewall_msg_req_add_bulk_handler,
- [PIPELINE_FIREWALL_MSG_REQ_DEL_BULK] =
- pipeline_firewall_msg_req_del_bulk_handler,
- [PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT] =
- pipeline_firewall_msg_req_add_default_handler,
- [PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT] =
- pipeline_firewall_msg_req_del_default_handler,
-};
-
-/*
- * Firewall table
- */
-struct firewall_table_entry {
- struct rte_pipeline_table_entry head;
-};
-
-static struct rte_acl_field_def field_format_ipv4[] = {
- /* Protocol */
- [0] = {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = 0,
- .input_index = 0,
- .offset = sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, next_proto_id),
- },
-
- /* Source IP address (IPv4) */
- [1] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 1,
- .input_index = 1,
- .offset = sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, src_addr),
- },
-
- /* Destination IP address (IPv4) */
- [2] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 2,
- .input_index = 2,
- .offset = sizeof(struct ether_hdr) +
- offsetof(struct ipv4_hdr, dst_addr),
- },
-
- /* Source Port */
- [3] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 3,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
- },
-
- /* Destination Port */
- [4] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 4,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
- },
-};
-
-#define SIZEOF_VLAN_HDR 4
-
-static struct rte_acl_field_def field_format_vlan_ipv4[] = {
- /* Protocol */
- [0] = {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = 0,
- .input_index = 0,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_VLAN_HDR +
- offsetof(struct ipv4_hdr, next_proto_id),
- },
-
- /* Source IP address (IPv4) */
- [1] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 1,
- .input_index = 1,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_VLAN_HDR +
- offsetof(struct ipv4_hdr, src_addr),
- },
-
- /* Destination IP address (IPv4) */
- [2] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 2,
- .input_index = 2,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_VLAN_HDR +
- offsetof(struct ipv4_hdr, dst_addr),
- },
-
- /* Source Port */
- [3] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 3,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_VLAN_HDR +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
- },
-
- /* Destination Port */
- [4] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 4,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_VLAN_HDR +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
- },
-};
-
-#define SIZEOF_QINQ_HEADER 8
-
-static struct rte_acl_field_def field_format_qinq_ipv4[] = {
- /* Protocol */
- [0] = {
- .type = RTE_ACL_FIELD_TYPE_BITMASK,
- .size = sizeof(uint8_t),
- .field_index = 0,
- .input_index = 0,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_QINQ_HEADER +
- offsetof(struct ipv4_hdr, next_proto_id),
- },
-
- /* Source IP address (IPv4) */
- [1] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 1,
- .input_index = 1,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_QINQ_HEADER +
- offsetof(struct ipv4_hdr, src_addr),
- },
-
- /* Destination IP address (IPv4) */
- [2] = {
- .type = RTE_ACL_FIELD_TYPE_MASK,
- .size = sizeof(uint32_t),
- .field_index = 2,
- .input_index = 2,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_QINQ_HEADER +
- offsetof(struct ipv4_hdr, dst_addr),
- },
-
- /* Source Port */
- [3] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 3,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_QINQ_HEADER +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, src_port),
- },
-
- /* Destination Port */
- [4] = {
- .type = RTE_ACL_FIELD_TYPE_RANGE,
- .size = sizeof(uint16_t),
- .field_index = 4,
- .input_index = 3,
- .offset = sizeof(struct ether_hdr) +
- SIZEOF_QINQ_HEADER +
- sizeof(struct ipv4_hdr) +
- offsetof(struct tcp_hdr, dst_port),
- },
-};
-
-static int
-pipeline_firewall_parse_args(struct pipeline_firewall *p,
- struct pipeline_params *params)
-{
- uint32_t n_rules_present = 0;
- uint32_t pkt_type_present = 0;
- uint32_t i;
-
- /* defaults */
- p->n_rules = 4 * 1024;
- p->n_rule_fields = RTE_DIM(field_format_ipv4);
- p->field_format = field_format_ipv4;
- p->field_format_size = sizeof(field_format_ipv4);
-
- for (i = 0; i < params->n_args; i++) {
- char *arg_name = params->args_name[i];
- char *arg_value = params->args_value[i];
-
- if (strcmp(arg_name, "n_rules") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_rules_present == 0, params->name,
- arg_name);
- n_rules_present = 1;
-
- status = parser_read_uint32(&p->n_rules,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
- continue;
- }
-
- if (strcmp(arg_name, "pkt_type") == 0) {
- PIPELINE_PARSE_ERR_DUPLICATE(
- pkt_type_present == 0, params->name,
- arg_name);
- pkt_type_present = 1;
-
- /* ipv4 */
- if (strcmp(arg_value, "ipv4") == 0) {
- p->n_rule_fields = RTE_DIM(field_format_ipv4);
- p->field_format = field_format_ipv4;
- p->field_format_size =
- sizeof(field_format_ipv4);
- continue;
- }
-
- /* vlan_ipv4 */
- if (strcmp(arg_value, "vlan_ipv4") == 0) {
- p->n_rule_fields =
- RTE_DIM(field_format_vlan_ipv4);
- p->field_format = field_format_vlan_ipv4;
- p->field_format_size =
- sizeof(field_format_vlan_ipv4);
- continue;
- }
-
- /* qinq_ipv4 */
- if (strcmp(arg_value, "qinq_ipv4") == 0) {
- p->n_rule_fields =
- RTE_DIM(field_format_qinq_ipv4);
- p->field_format = field_format_qinq_ipv4;
- p->field_format_size =
- sizeof(field_format_qinq_ipv4);
- continue;
- }
-
- /* other */
- PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
- arg_name, arg_value);
- }
-
- /* other */
- PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
- }
-
- return 0;
-}
-
-static void *
-pipeline_firewall_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct pipeline *p;
- struct pipeline_firewall *p_fw;
- uint32_t size, i;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_firewall));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- p_fw = (struct pipeline_firewall *) p;
- if (p == NULL)
- return NULL;
-
- strcpy(p->name, params->name);
- p->log_level = params->log_level;
-
- PLOG(p, HIGH, "Firewall");
-
- /* Parse arguments */
- if (pipeline_firewall_parse_args(p_fw, params))
- return NULL;
-
- /* Pipeline */
- {
- struct rte_pipeline_params pipeline_params = {
- .name = params->name,
- .socket_id = params->socket_id,
- .offset_port_id = 0,
- };
-
- p->p = rte_pipeline_create(&pipeline_params);
- if (p->p == NULL) {
- rte_free(p);
- return NULL;
- }
- }
-
- /* Input ports */
- p->n_ports_in = params->n_ports_in;
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_port_in_params port_params = {
- .ops = pipeline_port_in_params_get_ops(
- &params->port_in[i]),
- .arg_create = pipeline_port_in_params_convert(
- &params->port_in[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- .burst_size = params->port_in[i].burst_size,
- };
-
- int status = rte_pipeline_port_in_create(p->p,
- &port_params,
- &p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Output ports */
- p->n_ports_out = params->n_ports_out;
- for (i = 0; i < p->n_ports_out; i++) {
- struct rte_pipeline_port_out_params port_params = {
- .ops = pipeline_port_out_params_get_ops(
- &params->port_out[i]),
- .arg_create = pipeline_port_out_params_convert(
- &params->port_out[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- };
-
- int status = rte_pipeline_port_out_create(p->p,
- &port_params,
- &p->port_out_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Tables */
- p->n_tables = 1;
- {
- struct rte_table_acl_params table_acl_params = {
- .name = params->name,
- .n_rules = p_fw->n_rules,
- .n_rule_fields = p_fw->n_rule_fields,
- };
-
- struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_acl_ops,
- .arg_create = &table_acl_params,
- .f_action_hit = NULL,
- .f_action_miss = NULL,
- .arg_ah = NULL,
- .action_data_size =
- sizeof(struct firewall_table_entry) -
- sizeof(struct rte_pipeline_table_entry),
- };
-
- int status;
-
- memcpy(table_acl_params.field_format,
- p_fw->field_format,
- p_fw->field_format_size);
-
- status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Connecting input ports to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_connect_to_table(p->p,
- p->port_in_id[i],
- p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Enable input ports */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_enable(p->p,
- p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Check pipeline consistency */
- if (rte_pipeline_check(p->p) < 0) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- /* Message queues */
- p->n_msgq = params->n_msgq;
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_in[i] = params->msgq_in[i];
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_out[i] = params->msgq_out[i];
-
- /* Message handlers */
- memcpy(p->handlers, handlers, sizeof(p->handlers));
- memcpy(p_fw->custom_handlers,
- custom_handlers,
- sizeof(p_fw->custom_handlers));
-
- return p;
-}
-
-static int
-pipeline_firewall_free(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_pipeline_free(p->p);
- rte_free(p);
- return 0;
-}
-
-static int
-pipeline_firewall_timer(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- pipeline_msg_req_handle(p);
- rte_pipeline_flush(p->p);
-
- return 0;
-}
-
-void *
-pipeline_firewall_msg_req_custom_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_firewall *p_fw = (struct pipeline_firewall *) p;
- struct pipeline_custom_msg_req *req = msg;
- pipeline_msg_req_handler f_handle;
-
- f_handle = (req->subtype < PIPELINE_FIREWALL_MSG_REQS) ?
- p_fw->custom_handlers[req->subtype] :
- pipeline_msg_req_invalid_handler;
-
- if (f_handle == NULL)
- f_handle = pipeline_msg_req_invalid_handler;
-
- return f_handle(p, req);
-}
-
-void *
-pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_add_msg_req *req = msg;
- struct pipeline_firewall_add_msg_rsp *rsp = msg;
-
- struct rte_table_acl_rule_add_params params;
- struct firewall_table_entry entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
- };
-
- memset(&params, 0, sizeof(params));
-
- switch (req->key.type) {
- case PIPELINE_FIREWALL_IPV4_5TUPLE:
- params.priority = req->priority;
- params.field_value[0].value.u8 =
- req->key.key.ipv4_5tuple.proto;
- params.field_value[0].mask_range.u8 =
- req->key.key.ipv4_5tuple.proto_mask;
- params.field_value[1].value.u32 =
- req->key.key.ipv4_5tuple.src_ip;
- params.field_value[1].mask_range.u32 =
- req->key.key.ipv4_5tuple.src_ip_mask;
- params.field_value[2].value.u32 =
- req->key.key.ipv4_5tuple.dst_ip;
- params.field_value[2].mask_range.u32 =
- req->key.key.ipv4_5tuple.dst_ip_mask;
- params.field_value[3].value.u16 =
- req->key.key.ipv4_5tuple.src_port_from;
- params.field_value[3].mask_range.u16 =
- req->key.key.ipv4_5tuple.src_port_to;
- params.field_value[4].value.u16 =
- req->key.key.ipv4_5tuple.dst_port_from;
- params.field_value[4].mask_range.u16 =
- req->key.key.ipv4_5tuple.dst_port_to;
- break;
-
- default:
- rsp->status = -1; /* Error */
- return rsp;
- }
-
- rsp->status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &params,
- (struct rte_pipeline_table_entry *) &entry,
- &rsp->key_found,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_del_msg_req *req = msg;
- struct pipeline_firewall_del_msg_rsp *rsp = msg;
-
- struct rte_table_acl_rule_delete_params params;
-
- memset(&params, 0, sizeof(params));
-
- switch (req->key.type) {
- case PIPELINE_FIREWALL_IPV4_5TUPLE:
- params.field_value[0].value.u8 =
- req->key.key.ipv4_5tuple.proto;
- params.field_value[0].mask_range.u8 =
- req->key.key.ipv4_5tuple.proto_mask;
- params.field_value[1].value.u32 =
- req->key.key.ipv4_5tuple.src_ip;
- params.field_value[1].mask_range.u32 =
- req->key.key.ipv4_5tuple.src_ip_mask;
- params.field_value[2].value.u32 =
- req->key.key.ipv4_5tuple.dst_ip;
- params.field_value[2].mask_range.u32 =
- req->key.key.ipv4_5tuple.dst_ip_mask;
- params.field_value[3].value.u16 =
- req->key.key.ipv4_5tuple.src_port_from;
- params.field_value[3].mask_range.u16 =
- req->key.key.ipv4_5tuple.src_port_to;
- params.field_value[4].value.u16 =
- req->key.key.ipv4_5tuple.dst_port_from;
- params.field_value[4].mask_range.u16 =
- req->key.key.ipv4_5tuple.dst_port_to;
- break;
-
- default:
- rsp->status = -1; /* Error */
- return rsp;
- }
-
- rsp->status = rte_pipeline_table_entry_delete(p->p,
- p->table_id[0],
- &params,
- &rsp->key_found,
- NULL);
-
- return rsp;
-}
-
-static void *
-pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_add_bulk_msg_req *req = msg;
- struct pipeline_firewall_add_bulk_msg_rsp *rsp = msg;
-
- struct rte_table_acl_rule_add_params *params[req->n_keys];
- struct firewall_table_entry *entries[req->n_keys];
-
- uint32_t i, n_keys;
-
- n_keys = req->n_keys;
-
- for (i = 0; i < n_keys; i++) {
- entries[i] = rte_zmalloc(NULL,
- sizeof(struct firewall_table_entry),
- RTE_CACHE_LINE_SIZE);
- if (entries[i] == NULL) {
- rsp->status = -1;
- return rsp;
- }
-
- params[i] = rte_zmalloc(NULL,
- sizeof(struct rte_table_acl_rule_add_params),
- RTE_CACHE_LINE_SIZE);
- if (params[i] == NULL) {
- rsp->status = -1;
- return rsp;
- }
-
- entries[i]->head.action = RTE_PIPELINE_ACTION_PORT;
- entries[i]->head.port_id = p->port_out_id[req->port_ids[i]];
-
- switch (req->keys[i].type) {
- case PIPELINE_FIREWALL_IPV4_5TUPLE:
- params[i]->priority = req->priorities[i];
- params[i]->field_value[0].value.u8 =
- req->keys[i].key.ipv4_5tuple.proto;
- params[i]->field_value[0].mask_range.u8 =
- req->keys[i].key.ipv4_5tuple.proto_mask;
- params[i]->field_value[1].value.u32 =
- req->keys[i].key.ipv4_5tuple.src_ip;
- params[i]->field_value[1].mask_range.u32 =
- req->keys[i].key.ipv4_5tuple.src_ip_mask;
- params[i]->field_value[2].value.u32 =
- req->keys[i].key.ipv4_5tuple.dst_ip;
- params[i]->field_value[2].mask_range.u32 =
- req->keys[i].key.ipv4_5tuple.dst_ip_mask;
- params[i]->field_value[3].value.u16 =
- req->keys[i].key.ipv4_5tuple.src_port_from;
- params[i]->field_value[3].mask_range.u16 =
- req->keys[i].key.ipv4_5tuple.src_port_to;
- params[i]->field_value[4].value.u16 =
- req->keys[i].key.ipv4_5tuple.dst_port_from;
- params[i]->field_value[4].mask_range.u16 =
- req->keys[i].key.ipv4_5tuple.dst_port_to;
- break;
-
- default:
- rsp->status = -1; /* Error */
-
- for (i = 0; i < n_keys; i++) {
- rte_free(entries[i]);
- rte_free(params[i]);
- }
-
- return rsp;
- }
- }
-
- rsp->status = rte_pipeline_table_entry_add_bulk(p->p, p->table_id[0],
- (void *)params, (struct rte_pipeline_table_entry **)entries,
- n_keys, req->keys_found,
- (struct rte_pipeline_table_entry **)req->entries_ptr);
-
- for (i = 0; i < n_keys; i++) {
- rte_free(entries[i]);
- rte_free(params[i]);
- }
-
- return rsp;
-}
-
-static void *
-pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_del_bulk_msg_req *req = msg;
- struct pipeline_firewall_del_bulk_msg_rsp *rsp = msg;
-
- struct rte_table_acl_rule_delete_params *params[req->n_keys];
-
- uint32_t i, n_keys;
-
- n_keys = req->n_keys;
-
- for (i = 0; i < n_keys; i++) {
- params[i] = rte_zmalloc(NULL,
- sizeof(struct rte_table_acl_rule_delete_params),
- RTE_CACHE_LINE_SIZE);
- if (params[i] == NULL) {
- rsp->status = -1;
- return rsp;
- }
-
- switch (req->keys[i].type) {
- case PIPELINE_FIREWALL_IPV4_5TUPLE:
- params[i]->field_value[0].value.u8 =
- req->keys[i].key.ipv4_5tuple.proto;
- params[i]->field_value[0].mask_range.u8 =
- req->keys[i].key.ipv4_5tuple.proto_mask;
- params[i]->field_value[1].value.u32 =
- req->keys[i].key.ipv4_5tuple.src_ip;
- params[i]->field_value[1].mask_range.u32 =
- req->keys[i].key.ipv4_5tuple.src_ip_mask;
- params[i]->field_value[2].value.u32 =
- req->keys[i].key.ipv4_5tuple.dst_ip;
- params[i]->field_value[2].mask_range.u32 =
- req->keys[i].key.ipv4_5tuple.dst_ip_mask;
- params[i]->field_value[3].value.u16 =
- req->keys[i].key.ipv4_5tuple.src_port_from;
- params[i]->field_value[3].mask_range.u16 =
- req->keys[i].key.ipv4_5tuple.src_port_to;
- params[i]->field_value[4].value.u16 =
- req->keys[i].key.ipv4_5tuple.dst_port_from;
- params[i]->field_value[4].mask_range.u16 =
- req->keys[i].key.ipv4_5tuple.dst_port_to;
- break;
-
- default:
- rsp->status = -1; /* Error */
-
- for (i = 0; i < n_keys; i++)
- rte_free(params[i]);
-
- return rsp;
- }
- }
-
- rsp->status = rte_pipeline_table_entry_delete_bulk(p->p, p->table_id[0],
- (void **)&params, n_keys, req->keys_found, NULL);
-
- for (i = 0; i < n_keys; i++)
- rte_free(params[i]);
-
- return rsp;
-}
-
-void *
-pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_add_default_msg_req *req = msg;
- struct pipeline_firewall_add_default_msg_rsp *rsp = msg;
-
- struct firewall_table_entry default_entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
- };
-
- rsp->status = rte_pipeline_table_default_entry_add(p->p,
- p->table_id[0],
- (struct rte_pipeline_table_entry *) &default_entry,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_firewall_del_default_msg_rsp *rsp = msg;
-
- rsp->status = rte_pipeline_table_default_entry_delete(p->p,
- p->table_id[0],
- NULL);
-
- return rsp;
-}
-
-struct pipeline_be_ops pipeline_firewall_be_ops = {
- .f_init = pipeline_firewall_init,
- .f_free = pipeline_firewall_free,
- .f_run = NULL,
- .f_timer = pipeline_firewall_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_firewall_be.h b/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
deleted file mode 100644
index 246f0a6d..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FIREWALL_BE_H__
-#define __INCLUDE_PIPELINE_FIREWALL_BE_H__
-
-#include "pipeline_common_be.h"
-
-enum pipeline_firewall_key_type {
- PIPELINE_FIREWALL_IPV4_5TUPLE,
-};
-
-struct pipeline_firewall_key_ipv4_5tuple {
- uint32_t src_ip;
- uint32_t src_ip_mask;
- uint32_t dst_ip;
- uint32_t dst_ip_mask;
- uint16_t src_port_from;
- uint16_t src_port_to;
- uint16_t dst_port_from;
- uint16_t dst_port_to;
- uint8_t proto;
- uint8_t proto_mask;
-};
-
-struct pipeline_firewall_key {
- enum pipeline_firewall_key_type type;
- union {
- struct pipeline_firewall_key_ipv4_5tuple ipv4_5tuple;
- } key;
-};
-
-enum pipeline_firewall_msg_req_type {
- PIPELINE_FIREWALL_MSG_REQ_ADD = 0,
- PIPELINE_FIREWALL_MSG_REQ_DEL,
- PIPELINE_FIREWALL_MSG_REQ_ADD_BULK,
- PIPELINE_FIREWALL_MSG_REQ_DEL_BULK,
- PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT,
- PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT,
- PIPELINE_FIREWALL_MSG_REQS
-};
-
-/*
- * MSG ADD
- */
-struct pipeline_firewall_add_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-
- /* key */
- struct pipeline_firewall_key key;
-
- /* data */
- int32_t priority;
- uint32_t port_id;
-};
-
-struct pipeline_firewall_add_msg_rsp {
- int status;
- int key_found;
- void *entry_ptr;
-};
-
-/*
- * MSG DEL
- */
-struct pipeline_firewall_del_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-
- /* key */
- struct pipeline_firewall_key key;
-};
-
-struct pipeline_firewall_del_msg_rsp {
- int status;
- int key_found;
-};
-
-/*
- * MSG ADD BULK
- */
-struct pipeline_firewall_add_bulk_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
-
- uint32_t *priorities;
- uint32_t *port_ids;
- int *keys_found;
- void **entries_ptr;
-};
-struct pipeline_firewall_add_bulk_msg_rsp {
- int status;
-};
-
-/*
- * MSG DEL BULK
- */
-struct pipeline_firewall_del_bulk_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-
- /* key */
- struct pipeline_firewall_key *keys;
- uint32_t n_keys;
- int *keys_found;
-};
-
-struct pipeline_firewall_del_bulk_msg_rsp {
- int status;
-};
-
-/*
- * MSG ADD DEFAULT
- */
-struct pipeline_firewall_add_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-
- /* data */
- uint32_t port_id;
-};
-
-struct pipeline_firewall_add_default_msg_rsp {
- int status;
- void *entry_ptr;
-};
-
-/*
- * MSG DEL DEFAULT
- */
-struct pipeline_firewall_del_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_firewall_msg_req_type subtype;
-};
-
-struct pipeline_firewall_del_default_msg_rsp {
- int status;
-};
-
-extern struct pipeline_be_ops pipeline_firewall_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
deleted file mode 100644
index 021aee18..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
+++ /dev/null
@@ -1,1286 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <sys/queue.h>
-#include <netinet/in.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_malloc.h>
-#include <cmdline_rdline.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-
-#include "app.h"
-#include "pipeline_common_fe.h"
-#include "pipeline_flow_actions.h"
-#include "hash_func.h"
-#include "parser.h"
-
-/*
- * Flow actions pipeline
- */
-#ifndef N_FLOWS_BULK
-#define N_FLOWS_BULK 4096
-#endif
-
-struct app_pipeline_fa_flow {
- struct pipeline_fa_flow_params params;
- void *entry_ptr;
-};
-
-struct app_pipeline_fa_dscp {
- uint32_t traffic_class;
- enum rte_meter_color color;
-};
-
-struct app_pipeline_fa {
- /* Parameters */
- uint32_t n_ports_in;
- uint32_t n_ports_out;
- struct pipeline_fa_params params;
-
- /* Flows */
- struct app_pipeline_fa_dscp dscp[PIPELINE_FA_N_DSCP];
- struct app_pipeline_fa_flow *flows;
-} __rte_cache_aligned;
-
-static void*
-app_pipeline_fa_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct app_pipeline_fa *p;
- uint32_t size, i;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fa));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
-
- /* Initialization */
- p->n_ports_in = params->n_ports_in;
- p->n_ports_out = params->n_ports_out;
- if (pipeline_fa_parse_args(&p->params, params)) {
- rte_free(p);
- return NULL;
- }
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(
- p->params.n_flows * sizeof(struct app_pipeline_fa_flow));
- p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p->flows == NULL) {
- rte_free(p);
- return NULL;
- }
-
- /* Initialization of flow table */
- for (i = 0; i < p->params.n_flows; i++)
- pipeline_fa_flow_params_set_default(&p->flows[i].params);
-
- /* Initialization of DSCP table */
- for (i = 0; i < RTE_DIM(p->dscp); i++) {
- p->dscp[i].traffic_class = 0;
- p->dscp[i].color = e_RTE_METER_GREEN;
- }
-
- return (void *) p;
-}
-
-static int
-app_pipeline_fa_free(void *pipeline)
-{
- struct app_pipeline_fa *p = pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_free(p->flows);
- rte_free(p);
-
- return 0;
-}
-
-static int
-flow_params_check(struct app_pipeline_fa *p,
- __rte_unused uint32_t meter_update_mask,
- uint32_t policer_update_mask,
- uint32_t port_update,
- struct pipeline_fa_flow_params *params)
-{
- uint32_t mask, i;
-
- /* Meter */
-
- /* Policer */
- for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
- struct pipeline_fa_policer_params *p = &params->p[i];
- uint32_t j;
-
- if ((mask & policer_update_mask) == 0)
- continue;
-
- for (j = 0; j < e_RTE_METER_COLORS; j++) {
- struct pipeline_fa_policer_action *action =
- &p->action[j];
-
- if ((action->drop == 0) &&
- (action->color >= e_RTE_METER_COLORS))
- return -1;
- }
- }
-
- /* Port */
- if (port_update && (params->port_id >= p->n_ports_out))
- return -1;
-
- return 0;
-}
-
-int
-app_pipeline_fa_flow_config(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t flow_id,
- uint32_t meter_update_mask,
- uint32_t policer_update_mask,
- uint32_t port_update,
- struct pipeline_fa_flow_params *params)
-{
- struct app_pipeline_fa *p;
- struct app_pipeline_fa_flow *flow;
-
- struct pipeline_fa_flow_config_msg_req *req;
- struct pipeline_fa_flow_config_msg_rsp *rsp;
-
- uint32_t i, mask;
-
- /* Check input arguments */
- if ((app == NULL) ||
- ((meter_update_mask == 0) &&
- (policer_update_mask == 0) &&
- (port_update == 0)) ||
- (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
- (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
- (params == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- if (flow_params_check(p,
- meter_update_mask,
- policer_update_mask,
- port_update,
- params) != 0)
- return -1;
-
- flow_id %= p->params.n_flows;
- flow = &p->flows[flow_id];
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG;
- req->entry_ptr = flow->entry_ptr;
- req->flow_id = flow_id;
- req->meter_update_mask = meter_update_mask;
- req->policer_update_mask = policer_update_mask;
- req->port_update = port_update;
- memcpy(&req->params, params, sizeof(*params));
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status ||
- (rsp->entry_ptr == NULL)) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Commit flow */
- for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
- if ((mask & meter_update_mask) == 0)
- continue;
-
- memcpy(&flow->params.m[i], &params->m[i], sizeof(params->m[i]));
- }
-
- for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
- if ((mask & policer_update_mask) == 0)
- continue;
-
- memcpy(&flow->params.p[i], &params->p[i], sizeof(params->p[i]));
- }
-
- if (port_update)
- flow->params.port_id = params->port_id;
-
- flow->entry_ptr = rsp->entry_ptr;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_fa_flow_config_bulk(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t *flow_id,
- uint32_t n_flows,
- uint32_t meter_update_mask,
- uint32_t policer_update_mask,
- uint32_t port_update,
- struct pipeline_fa_flow_params *params)
-{
- struct app_pipeline_fa *p;
- struct pipeline_fa_flow_config_bulk_msg_req *req;
- struct pipeline_fa_flow_config_bulk_msg_rsp *rsp;
- void **req_entry_ptr;
- uint32_t *req_flow_id;
- uint32_t i;
- int status;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (flow_id == NULL) ||
- (n_flows == 0) ||
- ((meter_update_mask == 0) &&
- (policer_update_mask == 0) &&
- (port_update == 0)) ||
- (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
- (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
- (params == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- for (i = 0; i < n_flows; i++) {
- struct pipeline_fa_flow_params *flow_params = &params[i];
-
- if (flow_params_check(p,
- meter_update_mask,
- policer_update_mask,
- port_update,
- flow_params) != 0)
- return -1;
- }
-
- /* Allocate and write request */
- req_entry_ptr = (void **) rte_malloc(NULL,
- n_flows * sizeof(void *),
- RTE_CACHE_LINE_SIZE);
- if (req_entry_ptr == NULL)
- return -1;
-
- req_flow_id = (uint32_t *) rte_malloc(NULL,
- n_flows * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (req_flow_id == NULL) {
- rte_free(req_entry_ptr);
- return -1;
- }
-
- for (i = 0; i < n_flows; i++) {
- uint32_t fid = flow_id[i] % p->params.n_flows;
- struct app_pipeline_fa_flow *flow = &p->flows[fid];
-
- req_flow_id[i] = fid;
- req_entry_ptr[i] = flow->entry_ptr;
- }
-
- req = app_msg_alloc(app);
- if (req == NULL) {
- rte_free(req_flow_id);
- rte_free(req_entry_ptr);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK;
- req->entry_ptr = req_entry_ptr;
- req->flow_id = req_flow_id;
- req->n_flows = n_flows;
- req->meter_update_mask = meter_update_mask;
- req->policer_update_mask = policer_update_mask;
- req->port_update = port_update;
- req->params = params;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- rte_free(req_flow_id);
- rte_free(req_entry_ptr);
- return -1;
- }
-
- /* Read response */
- status = (rsp->n_flows == n_flows) ? 0 : -1;
-
- /* Commit flows */
- for (i = 0; i < rsp->n_flows; i++) {
- uint32_t fid = flow_id[i] % p->params.n_flows;
- struct app_pipeline_fa_flow *flow = &p->flows[fid];
- struct pipeline_fa_flow_params *flow_params = &params[i];
- void *entry_ptr = req_entry_ptr[i];
- uint32_t j, mask;
-
- for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
- j++, mask <<= 1) {
- if ((mask & meter_update_mask) == 0)
- continue;
-
- memcpy(&flow->params.m[j],
- &flow_params->m[j],
- sizeof(flow_params->m[j]));
- }
-
- for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
- j++, mask <<= 1) {
- if ((mask & policer_update_mask) == 0)
- continue;
-
- memcpy(&flow->params.p[j],
- &flow_params->p[j],
- sizeof(flow_params->p[j]));
- }
-
- if (port_update)
- flow->params.port_id = flow_params->port_id;
-
- flow->entry_ptr = entry_ptr;
- }
-
- /* Free response */
- app_msg_free(app, rsp);
- rte_free(req_flow_id);
- rte_free(req_entry_ptr);
-
- return status;
-}
-
-int
-app_pipeline_fa_dscp_config(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t dscp,
- uint32_t traffic_class,
- enum rte_meter_color color)
-{
- struct app_pipeline_fa *p;
-
- struct pipeline_fa_dscp_config_msg_req *req;
- struct pipeline_fa_dscp_config_msg_rsp *rsp;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (dscp >= PIPELINE_FA_N_DSCP) ||
- (traffic_class >= PIPELINE_FA_N_TC_MAX) ||
- (color >= e_RTE_METER_COLORS))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- if (p->params.dscp_enabled == 0)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FA_MSG_REQ_DSCP_CONFIG;
- req->dscp = dscp;
- req->traffic_class = traffic_class;
- req->color = color;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Commit DSCP */
- p->dscp[dscp].traffic_class = traffic_class;
- p->dscp[dscp].color = color;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t flow_id,
- uint32_t policer_id,
- int clear,
- struct pipeline_fa_policer_stats *stats)
-{
- struct app_pipeline_fa *p;
- struct app_pipeline_fa_flow *flow;
-
- struct pipeline_fa_policer_stats_msg_req *req;
- struct pipeline_fa_policer_stats_msg_rsp *rsp;
-
- /* Check input arguments */
- if ((app == NULL) || (stats == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- flow_id %= p->params.n_flows;
- flow = &p->flows[flow_id];
-
- if ((policer_id >= p->params.n_meters_per_flow) ||
- (flow->entry_ptr == NULL))
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FA_MSG_REQ_POLICER_STATS_READ;
- req->entry_ptr = flow->entry_ptr;
- req->policer_id = policer_id;
- req->clear = clear;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- memcpy(stats, &rsp->stats, sizeof(*stats));
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-static const char *
-color_to_string(enum rte_meter_color color)
-{
- switch (color) {
- case e_RTE_METER_GREEN: return "G";
- case e_RTE_METER_YELLOW: return "Y";
- case e_RTE_METER_RED: return "R";
- default: return "?";
- }
-}
-
-static int
-string_to_color(char *s, enum rte_meter_color *c)
-{
- if (strcmp(s, "G") == 0) {
- *c = e_RTE_METER_GREEN;
- return 0;
- }
-
- if (strcmp(s, "Y") == 0) {
- *c = e_RTE_METER_YELLOW;
- return 0;
- }
-
- if (strcmp(s, "R") == 0) {
- *c = e_RTE_METER_RED;
- return 0;
- }
-
- return -1;
-}
-
-static const char *
-policer_action_to_string(struct pipeline_fa_policer_action *a)
-{
- if (a->drop)
- return "D";
-
- return color_to_string(a->color);
-}
-
-static int
-string_to_policer_action(char *s, struct pipeline_fa_policer_action *a)
-{
- if (strcmp(s, "G") == 0) {
- a->drop = 0;
- a->color = e_RTE_METER_GREEN;
- return 0;
- }
-
- if (strcmp(s, "Y") == 0) {
- a->drop = 0;
- a->color = e_RTE_METER_YELLOW;
- return 0;
- }
-
- if (strcmp(s, "R") == 0) {
- a->drop = 0;
- a->color = e_RTE_METER_RED;
- return 0;
- }
-
- if (strcmp(s, "D") == 0) {
- a->drop = 1;
- a->color = e_RTE_METER_GREEN;
- return 0;
- }
-
- return -1;
-}
-
-static void
-print_flow(struct app_pipeline_fa *p,
- uint32_t flow_id,
- struct app_pipeline_fa_flow *flow)
-{
- uint32_t i;
-
- printf("Flow ID = %" PRIu32 "\n", flow_id);
-
- for (i = 0; i < p->params.n_meters_per_flow; i++) {
- struct rte_meter_trtcm_params *meter = &flow->params.m[i];
- struct pipeline_fa_policer_params *policer = &flow->params.p[i];
-
- printf("\ttrTCM [CIR = %" PRIu64
- ", CBS = %" PRIu64 ", PIR = %" PRIu64
- ", PBS = %" PRIu64 "] Policer [G : %s, Y : %s, R : %s]\n",
- meter->cir,
- meter->cbs,
- meter->pir,
- meter->pbs,
- policer_action_to_string(&policer->action[e_RTE_METER_GREEN]),
- policer_action_to_string(&policer->action[e_RTE_METER_YELLOW]),
- policer_action_to_string(&policer->action[e_RTE_METER_RED]));
- }
-
- printf("\tPort %u (entry_ptr = %p)\n",
- flow->params.port_id,
- flow->entry_ptr);
-}
-
-
-static int
-app_pipeline_fa_flow_ls(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_fa *p;
- uint32_t i;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- for (i = 0; i < p->params.n_flows; i++) {
- struct app_pipeline_fa_flow *flow = &p->flows[i];
-
- print_flow(p, i, flow);
- }
-
- return 0;
-}
-
-static int
-app_pipeline_fa_dscp_ls(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_fa *p;
- uint32_t i;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id,
- &pipeline_flow_actions);
- if (p == NULL)
- return -1;
-
- if (p->params.dscp_enabled == 0)
- return -1;
-
- for (i = 0; i < RTE_DIM(p->dscp); i++) {
- struct app_pipeline_fa_dscp *dscp = &p->dscp[i];
-
- printf("DSCP = %2" PRIu32 ": Traffic class = %" PRIu32
- ", Color = %s\n",
- i,
- dscp->traffic_class,
- color_to_string(dscp->color));
- }
-
- return 0;
-}
-
-int
-app_pipeline_fa_load_file(char *filename,
- uint32_t *flow_ids,
- struct pipeline_fa_flow_params *p,
- uint32_t *n_flows,
- uint32_t *line)
-{
- FILE *f = NULL;
- char file_buf[1024];
- uint32_t i, l;
-
- /* Check input arguments */
- if ((filename == NULL) ||
- (flow_ids == NULL) ||
- (p == NULL) ||
- (n_flows == NULL) ||
- (*n_flows == 0) ||
- (line == NULL)) {
- if (line)
- *line = 0;
- return -1;
- }
-
- /* Open input file */
- f = fopen(filename, "r");
- if (f == NULL) {
- *line = 0;
- return -1;
- }
-
- /* Read file */
- for (i = 0, l = 1; i < *n_flows; l++) {
- char *tokens[64];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- int status;
-
- if (fgets(file_buf, sizeof(file_buf), f) == NULL)
- break;
-
- status = parse_tokenize_string(file_buf, tokens, &n_tokens);
- if (status)
- goto error1;
-
- if ((n_tokens == 0) || (tokens[0][0] == '#'))
- continue;
-
-
- if ((n_tokens != 64) ||
- /* flow */
- strcmp(tokens[0], "flow") ||
- parser_read_uint32(&flow_ids[i], tokens[1]) ||
-
- /* meter & policer 0 */
- strcmp(tokens[2], "meter") ||
- strcmp(tokens[3], "0") ||
- strcmp(tokens[4], "trtcm") ||
- parser_read_uint64(&p[i].m[0].cir, tokens[5]) ||
- parser_read_uint64(&p[i].m[0].pir, tokens[6]) ||
- parser_read_uint64(&p[i].m[0].cbs, tokens[7]) ||
- parser_read_uint64(&p[i].m[0].pbs, tokens[8]) ||
- strcmp(tokens[9], "policer") ||
- strcmp(tokens[10], "0") ||
- strcmp(tokens[11], "g") ||
- string_to_policer_action(tokens[12],
- &p[i].p[0].action[e_RTE_METER_GREEN]) ||
- strcmp(tokens[13], "y") ||
- string_to_policer_action(tokens[14],
- &p[i].p[0].action[e_RTE_METER_YELLOW]) ||
- strcmp(tokens[15], "r") ||
- string_to_policer_action(tokens[16],
- &p[i].p[0].action[e_RTE_METER_RED]) ||
-
- /* meter & policer 1 */
- strcmp(tokens[17], "meter") ||
- strcmp(tokens[18], "1") ||
- strcmp(tokens[19], "trtcm") ||
- parser_read_uint64(&p[i].m[1].cir, tokens[20]) ||
- parser_read_uint64(&p[i].m[1].pir, tokens[21]) ||
- parser_read_uint64(&p[i].m[1].cbs, tokens[22]) ||
- parser_read_uint64(&p[i].m[1].pbs, tokens[23]) ||
- strcmp(tokens[24], "policer") ||
- strcmp(tokens[25], "1") ||
- strcmp(tokens[26], "g") ||
- string_to_policer_action(tokens[27],
- &p[i].p[1].action[e_RTE_METER_GREEN]) ||
- strcmp(tokens[28], "y") ||
- string_to_policer_action(tokens[29],
- &p[i].p[1].action[e_RTE_METER_YELLOW]) ||
- strcmp(tokens[30], "r") ||
- string_to_policer_action(tokens[31],
- &p[i].p[1].action[e_RTE_METER_RED]) ||
-
- /* meter & policer 2 */
- strcmp(tokens[32], "meter") ||
- strcmp(tokens[33], "2") ||
- strcmp(tokens[34], "trtcm") ||
- parser_read_uint64(&p[i].m[2].cir, tokens[35]) ||
- parser_read_uint64(&p[i].m[2].pir, tokens[36]) ||
- parser_read_uint64(&p[i].m[2].cbs, tokens[37]) ||
- parser_read_uint64(&p[i].m[2].pbs, tokens[38]) ||
- strcmp(tokens[39], "policer") ||
- strcmp(tokens[40], "2") ||
- strcmp(tokens[41], "g") ||
- string_to_policer_action(tokens[42],
- &p[i].p[2].action[e_RTE_METER_GREEN]) ||
- strcmp(tokens[43], "y") ||
- string_to_policer_action(tokens[44],
- &p[i].p[2].action[e_RTE_METER_YELLOW]) ||
- strcmp(tokens[45], "r") ||
- string_to_policer_action(tokens[46],
- &p[i].p[2].action[e_RTE_METER_RED]) ||
-
- /* meter & policer 3 */
- strcmp(tokens[47], "meter") ||
- strcmp(tokens[48], "3") ||
- strcmp(tokens[49], "trtcm") ||
- parser_read_uint64(&p[i].m[3].cir, tokens[50]) ||
- parser_read_uint64(&p[i].m[3].pir, tokens[51]) ||
- parser_read_uint64(&p[i].m[3].cbs, tokens[52]) ||
- parser_read_uint64(&p[i].m[3].pbs, tokens[53]) ||
- strcmp(tokens[54], "policer") ||
- strcmp(tokens[55], "3") ||
- strcmp(tokens[56], "g") ||
- string_to_policer_action(tokens[57],
- &p[i].p[3].action[e_RTE_METER_GREEN]) ||
- strcmp(tokens[58], "y") ||
- string_to_policer_action(tokens[59],
- &p[i].p[3].action[e_RTE_METER_YELLOW]) ||
- strcmp(tokens[60], "r") ||
- string_to_policer_action(tokens[61],
- &p[i].p[3].action[e_RTE_METER_RED]) ||
-
- /* port */
- strcmp(tokens[62], "port") ||
- parser_read_uint32(&p[i].port_id, tokens[63]))
- goto error1;
-
- i++;
- }
-
- /* Close file */
- *n_flows = i;
- fclose(f);
- return 0;
-
-error1:
- *line = l;
- fclose(f);
- return -1;
-}
-
-/*
- * action
- *
- * flow meter, policer and output port configuration:
- * p <pipelineid> action flow <flowid> meter <meterid> trtcm <cir> <pir> <cbs> <pbs>
- *
- * p <pipelineid> action flow <flowid> policer <policerid> g <gaction> y <yaction> r <raction>
- * <action> is one of the following:
- * G = recolor to green
- * Y = recolor as yellow
- * R = recolor as red
- * D = drop
- *
- * p <pipelineid> action flow <flowid> port <port ID>
- *
- * p <pipelineid> action flow bulk <file>
- *
- * flow policer stats read:
- * p <pipelineid> action flow <flowid> stats
- *
- * flow ls:
- * p <pipelineid> action flow ls
- *
- * dscp table configuration:
- * p <pipelineid> action dscp <dscpid> class <class ID> color <color>
- *
- * dscp table ls:
- * p <pipelineid> action dscp ls
-**/
-
-struct cmd_action_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t action_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_action_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_action_result *params = parsed_result;
- struct app_params *app = data;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status != 0) {
- printf(CMD_MSG_TOO_MANY_ARGS, "action");
- return;
- }
-
- /* action flow meter */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "flow") == 0) &&
- strcmp(tokens[1], "bulk") &&
- strcmp(tokens[1], "ls") &&
- (strcmp(tokens[2], "meter") == 0)) {
- struct pipeline_fa_flow_params flow_params;
- uint32_t flow_id, meter_id;
-
- if (n_tokens != 9) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow meter");
- return;
- }
-
- memset(&flow_params, 0, sizeof(flow_params));
-
- if (parser_read_uint32(&flow_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- if (parser_read_uint32(&meter_id, tokens[3]) ||
- (meter_id >= PIPELINE_FA_N_TC_MAX)) {
- printf(CMD_MSG_INVALID_ARG, "meterid");
- return;
- }
-
- if (strcmp(tokens[4], "trtcm")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "trtcm");
- return;
- }
-
- if (parser_read_uint64(&flow_params.m[meter_id].cir, tokens[5])) {
- printf(CMD_MSG_INVALID_ARG, "cir");
- return;
- }
-
- if (parser_read_uint64(&flow_params.m[meter_id].pir, tokens[6])) {
- printf(CMD_MSG_INVALID_ARG, "pir");
- return;
- }
-
- if (parser_read_uint64(&flow_params.m[meter_id].cbs, tokens[7])) {
- printf(CMD_MSG_INVALID_ARG, "cbs");
- return;
- }
-
- if (parser_read_uint64(&flow_params.m[meter_id].pbs, tokens[8])) {
- printf(CMD_MSG_INVALID_ARG, "pbs");
- return;
- }
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- flow_id,
- 1 << meter_id,
- 0,
- 0,
- &flow_params);
- if (status)
- printf(CMD_MSG_FAIL, "action flow meter");
-
- return;
- } /* action flow meter */
-
- /* action flow policer */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "flow") == 0) &&
- strcmp(tokens[1], "bulk") &&
- strcmp(tokens[1], "ls") &&
- (strcmp(tokens[2], "policer") == 0)) {
- struct pipeline_fa_flow_params flow_params;
- uint32_t flow_id, policer_id;
-
- if (n_tokens != 10) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow policer");
- return;
- }
-
- memset(&flow_params, 0, sizeof(flow_params));
-
- if (parser_read_uint32(&flow_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- if (parser_read_uint32(&policer_id, tokens[3]) ||
- (policer_id >= PIPELINE_FA_N_TC_MAX)) {
- printf(CMD_MSG_INVALID_ARG, "policerid");
- return;
- }
-
- if (strcmp(tokens[4], "g")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "g");
- return;
- }
-
- if (string_to_policer_action(tokens[5],
- &flow_params.p[policer_id].action[e_RTE_METER_GREEN])) {
- printf(CMD_MSG_INVALID_ARG, "gaction");
- return;
- }
-
- if (strcmp(tokens[6], "y")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "y");
- return;
- }
-
- if (string_to_policer_action(tokens[7],
- &flow_params.p[policer_id].action[e_RTE_METER_YELLOW])) {
- printf(CMD_MSG_INVALID_ARG, "yaction");
- return;
- }
-
- if (strcmp(tokens[8], "r")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "r");
- return;
- }
-
- if (string_to_policer_action(tokens[9],
- &flow_params.p[policer_id].action[e_RTE_METER_RED])) {
- printf(CMD_MSG_INVALID_ARG, "raction");
- return;
- }
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- flow_id,
- 0,
- 1 << policer_id,
- 0,
- &flow_params);
- if (status != 0)
- printf(CMD_MSG_FAIL, "action flow policer");
-
- return;
- } /* action flow policer */
-
- /* action flow port */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "flow") == 0) &&
- strcmp(tokens[1], "bulk") &&
- strcmp(tokens[1], "ls") &&
- (strcmp(tokens[2], "port") == 0)) {
- struct pipeline_fa_flow_params flow_params;
- uint32_t flow_id, port_id;
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow port");
- return;
- }
-
- memset(&flow_params, 0, sizeof(flow_params));
-
- if (parser_read_uint32(&flow_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[3])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- flow_params.port_id = port_id;
-
- status = app_pipeline_fa_flow_config(app,
- params->pipeline_id,
- flow_id,
- 0,
- 0,
- 1,
- &flow_params);
- if (status)
- printf(CMD_MSG_FAIL, "action flow port");
-
- return;
- } /* action flow port */
-
- /* action flow stats */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "flow") == 0) &&
- strcmp(tokens[1], "bulk") &&
- strcmp(tokens[1], "ls") &&
- (strcmp(tokens[2], "stats") == 0)) {
- struct pipeline_fa_policer_stats stats;
- uint32_t flow_id, policer_id;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow stats");
- return;
- }
-
- if (parser_read_uint32(&flow_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- for (policer_id = 0;
- policer_id < PIPELINE_FA_N_TC_MAX;
- policer_id++) {
- status = app_pipeline_fa_flow_policer_stats_read(app,
- params->pipeline_id,
- flow_id,
- policer_id,
- 1,
- &stats);
- if (status != 0) {
- printf(CMD_MSG_FAIL, "action flow stats");
- return;
- }
-
- /* Display stats */
- printf("\tPolicer: %" PRIu32
- "\tPkts G: %" PRIu64
- "\tPkts Y: %" PRIu64
- "\tPkts R: %" PRIu64
- "\tPkts D: %" PRIu64 "\n",
- policer_id,
- stats.n_pkts[e_RTE_METER_GREEN],
- stats.n_pkts[e_RTE_METER_YELLOW],
- stats.n_pkts[e_RTE_METER_RED],
- stats.n_pkts_drop);
- }
-
- return;
- } /* action flow stats */
-
- /* action flow bulk */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "flow") == 0) &&
- (strcmp(tokens[1], "bulk") == 0)) {
- struct pipeline_fa_flow_params *flow_params;
- uint32_t *flow_ids, n_flows, line;
- char *filename;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow bulk");
- return;
- }
-
- filename = tokens[2];
-
- n_flows = APP_PIPELINE_FA_MAX_RECORDS_IN_FILE;
- flow_ids = malloc(n_flows * sizeof(uint32_t));
- if (flow_ids == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- return;
- }
-
- flow_params = malloc(n_flows * sizeof(struct pipeline_fa_flow_params));
- if (flow_params == NULL) {
- printf(CMD_MSG_OUT_OF_MEMORY);
- free(flow_ids);
- return;
- }
-
- status = app_pipeline_fa_load_file(filename,
- flow_ids,
- flow_params,
- &n_flows,
- &line);
- if (status) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(flow_params);
- free(flow_ids);
- return;
- }
-
- status = app_pipeline_fa_flow_config_bulk(app,
- params->pipeline_id,
- flow_ids,
- n_flows,
- 0xF,
- 0xF,
- 1,
- flow_params);
- if (status)
- printf(CMD_MSG_FAIL, "action flow bulk");
-
- free(flow_params);
- free(flow_ids);
- return;
- } /* action flow bulk */
-
- /* action flow ls */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "flow") == 0) &&
- (strcmp(tokens[1], "ls") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "action flow ls");
- return;
- }
-
- status = app_pipeline_fa_flow_ls(app,
- params->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "action flow ls");
-
- return;
- } /* action flow ls */
-
- /* action dscp */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "dscp") == 0) &&
- strcmp(tokens[1], "ls")) {
- uint32_t dscp_id, tc_id;
- enum rte_meter_color color;
-
- if (n_tokens != 6) {
- printf(CMD_MSG_MISMATCH_ARGS, "action dscp");
- return;
- }
-
- if (parser_read_uint32(&dscp_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "dscpid");
- return;
- }
-
- if (strcmp(tokens[2], "class")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "class");
- return;
- }
-
- if (parser_read_uint32(&tc_id, tokens[3])) {
- printf(CMD_MSG_INVALID_ARG, "classid");
- return;
- }
-
- if (strcmp(tokens[4], "color")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "color");
- return;
- }
-
- if (string_to_color(tokens[5], &color)) {
- printf(CMD_MSG_INVALID_ARG, "colorid");
- return;
- }
-
- status = app_pipeline_fa_dscp_config(app,
- params->pipeline_id,
- dscp_id,
- tc_id,
- color);
- if (status != 0)
- printf(CMD_MSG_FAIL, "action dscp");
-
- return;
- } /* action dscp */
-
- /* action dscp ls */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "dscp") == 0) &&
- (strcmp(tokens[1], "ls") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "action dscp ls");
- return;
- }
-
- status = app_pipeline_fa_dscp_ls(app,
- params->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "action dscp ls");
-
- return;
- } /* action dscp ls */
-
- printf(CMD_MSG_FAIL, "action");
-}
-
-static cmdline_parse_token_string_t cmd_action_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_action_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_action_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_action_result, pipeline_id, UINT32);
-
-static cmdline_parse_token_string_t cmd_action_action_string =
- TOKEN_STRING_INITIALIZER(struct cmd_action_result, action_string, "action");
-
-static cmdline_parse_token_string_t cmd_action_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_action_result, multi_string,
- TOKEN_STRING_MULTI);
-
-cmdline_parse_inst_t cmd_action = {
- .f = cmd_action_parsed,
- .data = NULL,
- .help_str = "flow actions (meter, policer, policer stats, dscp table)",
- .tokens = {
- (void *) &cmd_action_p_string,
- (void *) &cmd_action_pipeline_id,
- (void *) &cmd_action_action_string,
- (void *) &cmd_action_multi_string,
- NULL,
- },
-};
-
-static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_action,
- NULL,
-};
-
-static struct pipeline_fe_ops pipeline_flow_actions_fe_ops = {
- .f_init = app_pipeline_fa_init,
- .f_post_init = NULL,
- .f_free = app_pipeline_fa_free,
- .f_track = app_pipeline_track_default,
- .cmds = pipeline_cmds,
-};
-
-struct pipeline_type pipeline_flow_actions = {
- .name = "FLOW_ACTIONS",
- .be_ops = &pipeline_flow_actions_be_ops,
- .fe_ops = &pipeline_flow_actions_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
deleted file mode 100644
index 885923e4..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
-#define __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
-
-#include <rte_meter.h>
-
-#include "pipeline.h"
-#include "pipeline_flow_actions_be.h"
-
-int
-app_pipeline_fa_flow_config(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t flow_id,
- uint32_t meter_update_mask,
- uint32_t policer_update_mask,
- uint32_t port_update,
- struct pipeline_fa_flow_params *params);
-
-int
-app_pipeline_fa_flow_config_bulk(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t *flow_id,
- uint32_t n_flows,
- uint32_t meter_update_mask,
- uint32_t policer_update_mask,
- uint32_t port_update,
- struct pipeline_fa_flow_params *params);
-
-int
-app_pipeline_fa_dscp_config(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t dscp,
- uint32_t traffic_class,
- enum rte_meter_color color);
-
-int
-app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t flow_id,
- uint32_t policer_id,
- int clear,
- struct pipeline_fa_policer_stats *stats);
-
-#ifndef APP_PIPELINE_FA_MAX_RECORDS_IN_FILE
-#define APP_PIPELINE_FA_MAX_RECORDS_IN_FILE 65536
-#endif
-
-int
-app_pipeline_fa_load_file(char *filename,
- uint32_t *flow_ids,
- struct pipeline_fa_flow_params *p,
- uint32_t *n_flows,
- uint32_t *line);
-
-extern struct pipeline_type pipeline_flow_actions;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
deleted file mode 100644
index 9599b7d8..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
+++ /dev/null
@@ -1,960 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-#include <rte_table_array.h>
-#include <rte_byteorder.h>
-#include <rte_ip.h>
-
-#include "pipeline_actions_common.h"
-#include "pipeline_flow_actions_be.h"
-#include "parser.h"
-#include "hash_func.h"
-
-int
-pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params)
-{
- uint32_t i;
-
- if (params == NULL)
- return -1;
-
- for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
- struct rte_meter_trtcm_params *m = &params->m[i];
-
- m->cir = 1;
- m->cbs = 1;
- m->pir = 1;
- m->pbs = 2;
- }
-
- for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
- struct pipeline_fa_policer_params *p = &params->p[i];
- uint32_t j;
-
- for (j = 0; j < e_RTE_METER_COLORS; j++) {
- struct pipeline_fa_policer_action *a = &p->action[j];
-
- a->drop = 0;
- a->color = (enum rte_meter_color) j;
- }
- }
-
- params->port_id = 0;
-
- return 0;
-}
-
-struct dscp_entry {
- uint32_t traffic_class;
- enum rte_meter_color color;
-};
-
-struct pipeline_flow_actions {
- struct pipeline p;
- struct pipeline_fa_params params;
- pipeline_msg_req_handler custom_handlers[PIPELINE_FA_MSG_REQS];
-
- struct dscp_entry dscp[PIPELINE_FA_N_DSCP];
-} __rte_cache_aligned;
-
-static void *
-pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler handlers[] = {
- [PIPELINE_MSG_REQ_PING] =
- pipeline_msg_req_ping_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_IN] =
- pipeline_msg_req_stats_port_in_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
- pipeline_msg_req_stats_port_out_handler,
- [PIPELINE_MSG_REQ_STATS_TABLE] =
- pipeline_msg_req_stats_table_handler,
- [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
- pipeline_msg_req_port_in_enable_handler,
- [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
- pipeline_msg_req_port_in_disable_handler,
- [PIPELINE_MSG_REQ_CUSTOM] =
- pipeline_fa_msg_req_custom_handler,
-};
-
-static void *
-pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fa_msg_req_policer_stats_read_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler custom_handlers[] = {
- [PIPELINE_FA_MSG_REQ_FLOW_CONFIG] =
- pipeline_fa_msg_req_flow_config_handler,
- [PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK] =
- pipeline_fa_msg_req_flow_config_bulk_handler,
- [PIPELINE_FA_MSG_REQ_DSCP_CONFIG] =
- pipeline_fa_msg_req_dscp_config_handler,
- [PIPELINE_FA_MSG_REQ_POLICER_STATS_READ] =
- pipeline_fa_msg_req_policer_stats_read_handler,
-};
-
-/*
- * Flow table
- */
-struct meter_policer {
- struct rte_meter_trtcm meter;
- struct pipeline_fa_policer_params policer;
- struct pipeline_fa_policer_stats stats;
-};
-
-struct flow_table_entry {
- struct rte_pipeline_table_entry head;
- struct meter_policer mp[PIPELINE_FA_N_TC_MAX];
-};
-
-static int
-flow_table_entry_set_meter(struct flow_table_entry *entry,
- uint32_t meter_id,
- struct pipeline_fa_flow_params *params)
-{
- struct rte_meter_trtcm *meter = &entry->mp[meter_id].meter;
- struct rte_meter_trtcm_params *meter_params = &params->m[meter_id];
-
- return rte_meter_trtcm_config(meter, meter_params);
-}
-
-static void
-flow_table_entry_set_policer(struct flow_table_entry *entry,
- uint32_t policer_id,
- struct pipeline_fa_flow_params *params)
-{
- struct pipeline_fa_policer_params *p0 = &entry->mp[policer_id].policer;
- struct pipeline_fa_policer_params *p1 = &params->p[policer_id];
-
- memcpy(p0, p1, sizeof(*p0));
-}
-
-static void
-flow_table_entry_set_port_id(struct pipeline_flow_actions *p,
- struct flow_table_entry *entry,
- struct pipeline_fa_flow_params *params)
-{
- entry->head.action = RTE_PIPELINE_ACTION_PORT;
- entry->head.port_id = p->p.port_out_id[params->port_id];
-}
-
-static int
-flow_table_entry_set_default(struct pipeline_flow_actions *p,
- struct flow_table_entry *entry)
-{
- struct pipeline_fa_flow_params params;
- uint32_t i;
-
- pipeline_fa_flow_params_set_default(&params);
-
- memset(entry, 0, sizeof(*entry));
-
- flow_table_entry_set_port_id(p, entry, &params);
-
- for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
- int status;
-
- status = flow_table_entry_set_meter(entry, i, &params);
- if (status)
- return status;
- }
-
- for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++)
- flow_table_entry_set_policer(entry, i, &params);
-
- return 0;
-}
-
-static inline uint64_t
-pkt_work(
- struct rte_mbuf *pkt,
- struct rte_pipeline_table_entry *table_entry,
- void *arg,
- uint64_t time)
-{
- struct pipeline_flow_actions *p = arg;
- struct flow_table_entry *entry =
- (struct flow_table_entry *) table_entry;
-
- struct ipv4_hdr *pkt_ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.ip_hdr_offset);
- enum rte_meter_color *pkt_color = (enum rte_meter_color *)
- RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.color_offset);
-
- /* Read (IP header) */
- uint32_t total_length = rte_bswap16(pkt_ip->total_length);
- uint32_t dscp = pkt_ip->type_of_service >> 2;
-
- uint32_t tc = p->dscp[dscp].traffic_class;
- enum rte_meter_color color = p->dscp[dscp].color;
-
- struct rte_meter_trtcm *meter = &entry->mp[tc].meter;
- struct pipeline_fa_policer_params *policer = &entry->mp[tc].policer;
- struct pipeline_fa_policer_stats *stats = &entry->mp[tc].stats;
-
- /* Read (entry), compute */
- enum rte_meter_color color2 = rte_meter_trtcm_color_aware_check(meter,
- time,
- total_length,
- color);
-
- enum rte_meter_color color3 = policer->action[color2].color;
- uint64_t drop = policer->action[color2].drop;
-
- /* Read (entry), write (entry, color) */
- stats->n_pkts[color3] += drop ^ 1LLU;
- stats->n_pkts_drop += drop;
- *pkt_color = color3;
-
- return drop;
-}
-
-static inline uint64_t
-pkt4_work(
- struct rte_mbuf **pkts,
- struct rte_pipeline_table_entry **table_entries,
- void *arg,
- uint64_t time)
-{
- struct pipeline_flow_actions *p = arg;
-
- struct flow_table_entry *entry0 =
- (struct flow_table_entry *) table_entries[0];
- struct flow_table_entry *entry1 =
- (struct flow_table_entry *) table_entries[1];
- struct flow_table_entry *entry2 =
- (struct flow_table_entry *) table_entries[2];
- struct flow_table_entry *entry3 =
- (struct flow_table_entry *) table_entries[3];
-
- struct ipv4_hdr *pkt0_ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.ip_hdr_offset);
- struct ipv4_hdr *pkt1_ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.ip_hdr_offset);
- struct ipv4_hdr *pkt2_ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.ip_hdr_offset);
- struct ipv4_hdr *pkt3_ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.ip_hdr_offset);
-
- enum rte_meter_color *pkt0_color = (enum rte_meter_color *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.color_offset);
- enum rte_meter_color *pkt1_color = (enum rte_meter_color *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.color_offset);
- enum rte_meter_color *pkt2_color = (enum rte_meter_color *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.color_offset);
- enum rte_meter_color *pkt3_color = (enum rte_meter_color *)
- RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.color_offset);
-
- /* Read (IP header) */
- uint32_t total_length0 = rte_bswap16(pkt0_ip->total_length);
- uint32_t dscp0 = pkt0_ip->type_of_service >> 2;
-
- uint32_t total_length1 = rte_bswap16(pkt1_ip->total_length);
- uint32_t dscp1 = pkt1_ip->type_of_service >> 2;
-
- uint32_t total_length2 = rte_bswap16(pkt2_ip->total_length);
- uint32_t dscp2 = pkt2_ip->type_of_service >> 2;
-
- uint32_t total_length3 = rte_bswap16(pkt3_ip->total_length);
- uint32_t dscp3 = pkt3_ip->type_of_service >> 2;
-
- uint32_t tc0 = p->dscp[dscp0].traffic_class;
- enum rte_meter_color color0 = p->dscp[dscp0].color;
-
- uint32_t tc1 = p->dscp[dscp1].traffic_class;
- enum rte_meter_color color1 = p->dscp[dscp1].color;
-
- uint32_t tc2 = p->dscp[dscp2].traffic_class;
- enum rte_meter_color color2 = p->dscp[dscp2].color;
-
- uint32_t tc3 = p->dscp[dscp3].traffic_class;
- enum rte_meter_color color3 = p->dscp[dscp3].color;
-
- struct rte_meter_trtcm *meter0 = &entry0->mp[tc0].meter;
- struct pipeline_fa_policer_params *policer0 = &entry0->mp[tc0].policer;
- struct pipeline_fa_policer_stats *stats0 = &entry0->mp[tc0].stats;
-
- struct rte_meter_trtcm *meter1 = &entry1->mp[tc1].meter;
- struct pipeline_fa_policer_params *policer1 = &entry1->mp[tc1].policer;
- struct pipeline_fa_policer_stats *stats1 = &entry1->mp[tc1].stats;
-
- struct rte_meter_trtcm *meter2 = &entry2->mp[tc2].meter;
- struct pipeline_fa_policer_params *policer2 = &entry2->mp[tc2].policer;
- struct pipeline_fa_policer_stats *stats2 = &entry2->mp[tc2].stats;
-
- struct rte_meter_trtcm *meter3 = &entry3->mp[tc3].meter;
- struct pipeline_fa_policer_params *policer3 = &entry3->mp[tc3].policer;
- struct pipeline_fa_policer_stats *stats3 = &entry3->mp[tc3].stats;
-
- /* Read (entry), compute, write (entry) */
- enum rte_meter_color color2_0 = rte_meter_trtcm_color_aware_check(
- meter0,
- time,
- total_length0,
- color0);
-
- enum rte_meter_color color2_1 = rte_meter_trtcm_color_aware_check(
- meter1,
- time,
- total_length1,
- color1);
-
- enum rte_meter_color color2_2 = rte_meter_trtcm_color_aware_check(
- meter2,
- time,
- total_length2,
- color2);
-
- enum rte_meter_color color2_3 = rte_meter_trtcm_color_aware_check(
- meter3,
- time,
- total_length3,
- color3);
-
- enum rte_meter_color color3_0 = policer0->action[color2_0].color;
- enum rte_meter_color color3_1 = policer1->action[color2_1].color;
- enum rte_meter_color color3_2 = policer2->action[color2_2].color;
- enum rte_meter_color color3_3 = policer3->action[color2_3].color;
-
- uint64_t drop0 = policer0->action[color2_0].drop;
- uint64_t drop1 = policer1->action[color2_1].drop;
- uint64_t drop2 = policer2->action[color2_2].drop;
- uint64_t drop3 = policer3->action[color2_3].drop;
-
- /* Read (entry), write (entry, color) */
- stats0->n_pkts[color3_0] += drop0 ^ 1LLU;
- stats0->n_pkts_drop += drop0;
-
- stats1->n_pkts[color3_1] += drop1 ^ 1LLU;
- stats1->n_pkts_drop += drop1;
-
- stats2->n_pkts[color3_2] += drop2 ^ 1LLU;
- stats2->n_pkts_drop += drop2;
-
- stats3->n_pkts[color3_3] += drop3 ^ 1LLU;
- stats3->n_pkts_drop += drop3;
-
- *pkt0_color = color3_0;
- *pkt1_color = color3_1;
- *pkt2_color = color3_2;
- *pkt3_color = color3_3;
-
- return drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3);
-}
-
-PIPELINE_TABLE_AH_HIT_DROP_TIME(fa_table_ah_hit, pkt_work, pkt4_work);
-
-static rte_pipeline_table_action_handler_hit
-get_fa_table_ah_hit(__rte_unused struct pipeline_flow_actions *p)
-{
- return fa_table_ah_hit;
-}
-
-/*
- * Argument parsing
- */
-int
-pipeline_fa_parse_args(struct pipeline_fa_params *p,
- struct pipeline_params *params)
-{
- uint32_t n_flows_present = 0;
- uint32_t n_meters_per_flow_present = 0;
- uint32_t flow_id_offset_present = 0;
- uint32_t ip_hdr_offset_present = 0;
- uint32_t color_offset_present = 0;
- uint32_t i;
-
- /* Default values */
- p->n_meters_per_flow = 1;
- p->dscp_enabled = 0;
-
- for (i = 0; i < params->n_args; i++) {
- char *arg_name = params->args_name[i];
- char *arg_value = params->args_value[i];
-
- /* n_flows */
- if (strcmp(arg_name, "n_flows") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_flows_present == 0, params->name,
- arg_name);
- n_flows_present = 1;
-
- status = parser_read_uint32(&p->n_flows,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->n_flows != 0)), params->name,
- arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* n_meters_per_flow */
- if (strcmp(arg_name, "n_meters_per_flow") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_meters_per_flow_present == 0,
- params->name, arg_name);
- n_meters_per_flow_present = 1;
-
- status = parser_read_uint32(&p->n_meters_per_flow,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->n_meters_per_flow != 0)),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
- (p->n_meters_per_flow <=
- PIPELINE_FA_N_TC_MAX)), params->name,
- arg_name, arg_value);
-
- continue;
- }
-
- /* flow_id_offset */
- if (strcmp(arg_name, "flow_id_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- flow_id_offset_present == 0,
- params->name, arg_name);
- flow_id_offset_present = 1;
-
- status = parser_read_uint32(&p->flow_id_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* ip_hdr_offset */
- if (strcmp(arg_name, "ip_hdr_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- ip_hdr_offset_present == 0,
- params->name, arg_name);
- ip_hdr_offset_present = 1;
-
- status = parser_read_uint32(&p->ip_hdr_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* color_offset */
- if (strcmp(arg_name, "color_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- color_offset_present == 0, params->name,
- arg_name);
- color_offset_present = 1;
-
- status = parser_read_uint32(&p->color_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- p->dscp_enabled = 1;
-
- continue;
- }
-
- /* Unknown argument */
- PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
- }
-
- /* Check that mandatory arguments are present */
- PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
- "n_flows");
- PIPELINE_PARSE_ERR_MANDATORY((flow_id_offset_present),
- params->name, "flow_id_offset");
- PIPELINE_PARSE_ERR_MANDATORY((ip_hdr_offset_present),
- params->name, "ip_hdr_offset");
- PIPELINE_PARSE_ERR_MANDATORY((color_offset_present), params->name,
- "color_offset");
-
- return 0;
-}
-
-static void
-dscp_init(struct pipeline_flow_actions *p)
-{
- uint32_t i;
-
- for (i = 0; i < PIPELINE_FA_N_DSCP; i++) {
- p->dscp[i].traffic_class = 0;
- p->dscp[i].color = e_RTE_METER_GREEN;
- }
-}
-
-static void *pipeline_fa_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct pipeline *p;
- struct pipeline_flow_actions *p_fa;
- uint32_t size, i;
-
- /* Check input arguments */
- if (params == NULL)
- return NULL;
-
- if (params->n_ports_in != params->n_ports_out)
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct pipeline_flow_actions));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
- p_fa = (struct pipeline_flow_actions *) p;
-
- strcpy(p->name, params->name);
- p->log_level = params->log_level;
-
- PLOG(p, HIGH, "Flow actions");
-
- /* Parse arguments */
- if (pipeline_fa_parse_args(&p_fa->params, params))
- return NULL;
-
- dscp_init(p_fa);
-
- /* Pipeline */
- {
- struct rte_pipeline_params pipeline_params = {
- .name = params->name,
- .socket_id = params->socket_id,
- .offset_port_id = 0,
- };
-
- p->p = rte_pipeline_create(&pipeline_params);
- if (p->p == NULL) {
- rte_free(p);
- return NULL;
- }
- }
-
- /* Input ports */
- p->n_ports_in = params->n_ports_in;
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_port_in_params port_params = {
- .ops = pipeline_port_in_params_get_ops(
- &params->port_in[i]),
- .arg_create = pipeline_port_in_params_convert(
- &params->port_in[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- .burst_size = params->port_in[i].burst_size,
- };
-
- int status = rte_pipeline_port_in_create(p->p,
- &port_params,
- &p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Output ports */
- p->n_ports_out = params->n_ports_out;
- for (i = 0; i < p->n_ports_out; i++) {
- struct rte_pipeline_port_out_params port_params = {
- .ops = pipeline_port_out_params_get_ops(
- &params->port_out[i]),
- .arg_create = pipeline_port_out_params_convert(
- &params->port_out[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- };
-
- int status = rte_pipeline_port_out_create(p->p,
- &port_params,
- &p->port_out_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Tables */
- p->n_tables = 1;
- {
- struct rte_table_array_params table_array_params = {
- .n_entries = p_fa->params.n_flows,
- .offset = p_fa->params.flow_id_offset,
- };
-
- struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_array_ops,
- .arg_create = &table_array_params,
- .f_action_hit = get_fa_table_ah_hit(p_fa),
- .f_action_miss = NULL,
- .arg_ah = p_fa,
- .action_data_size =
- sizeof(struct flow_table_entry) -
- sizeof(struct rte_pipeline_table_entry),
- };
-
- int status;
-
- status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Connecting input ports to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_connect_to_table(p->p,
- p->port_in_id[i],
- p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Enable input ports */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_enable(p->p,
- p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Initialize table entries */
- for (i = 0; i < p_fa->params.n_flows; i++) {
- struct rte_table_array_key key = {
- .pos = i,
- };
-
- struct flow_table_entry entry;
- struct rte_pipeline_table_entry *entry_ptr;
- int key_found, status;
-
- flow_table_entry_set_default(p_fa, &entry);
-
- status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &key,
- (struct rte_pipeline_table_entry *) &entry,
- &key_found,
- &entry_ptr);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Check pipeline consistency */
- if (rte_pipeline_check(p->p) < 0) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- /* Message queues */
- p->n_msgq = params->n_msgq;
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_in[i] = params->msgq_in[i];
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_out[i] = params->msgq_out[i];
-
- /* Message handlers */
- memcpy(p->handlers, handlers, sizeof(p->handlers));
- memcpy(p_fa->custom_handlers,
- custom_handlers,
- sizeof(p_fa->custom_handlers));
-
- return p;
-}
-
-static int
-pipeline_fa_free(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_pipeline_free(p->p);
- rte_free(p);
- return 0;
-}
-
-static int
-pipeline_fa_timer(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- pipeline_msg_req_handle(p);
- rte_pipeline_flush(p->p);
-
- return 0;
-}
-
-void *
-pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_flow_actions *p_fa =
- (struct pipeline_flow_actions *) p;
- struct pipeline_custom_msg_req *req = msg;
- pipeline_msg_req_handler f_handle;
-
- f_handle = (req->subtype < PIPELINE_FA_MSG_REQS) ?
- p_fa->custom_handlers[req->subtype] :
- pipeline_msg_req_invalid_handler;
-
- if (f_handle == NULL)
- f_handle = pipeline_msg_req_invalid_handler;
-
- return f_handle(p, req);
-}
-
-void *
-pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
- struct pipeline_fa_flow_config_msg_req *req = msg;
- struct pipeline_fa_flow_config_msg_rsp *rsp = msg;
- struct flow_table_entry *entry;
- uint32_t mask, i;
-
- /* Set flow table entry to default if not configured before */
- if (req->entry_ptr == NULL) {
- struct rte_table_array_key key = {
- .pos = req->flow_id % p_fa->params.n_flows,
- };
-
- struct flow_table_entry default_entry;
-
- int key_found, status;
-
- flow_table_entry_set_default(p_fa, &default_entry);
-
- status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &key,
- (struct rte_pipeline_table_entry *) &default_entry,
- &key_found,
- (struct rte_pipeline_table_entry **) &entry);
- if (status) {
- rsp->status = -1;
- return rsp;
- }
- } else
- entry = (struct flow_table_entry *) req->entry_ptr;
-
- /* Meter */
- for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
- int status;
-
- if ((mask & req->meter_update_mask) == 0)
- continue;
-
- status = flow_table_entry_set_meter(entry, i, &req->params);
- if (status) {
- rsp->status = -1;
- return rsp;
- }
- }
-
- /* Policer */
- for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
- if ((mask & req->policer_update_mask) == 0)
- continue;
-
- flow_table_entry_set_policer(entry, i, &req->params);
- }
-
- /* Port */
- if (req->port_update)
- flow_table_entry_set_port_id(p_fa, entry, &req->params);
-
- /* Response */
- rsp->status = 0;
- rsp->entry_ptr = (void *) entry;
- return rsp;
-}
-
-void *
-pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
- struct pipeline_fa_flow_config_bulk_msg_req *req = msg;
- struct pipeline_fa_flow_config_bulk_msg_rsp *rsp = msg;
- uint32_t i;
-
- for (i = 0; i < req->n_flows; i++) {
- struct flow_table_entry *entry;
- uint32_t j, mask;
-
- /* Set flow table entry to default if not configured before */
- if (req->entry_ptr[i] == NULL) {
- struct rte_table_array_key key = {
- .pos = req->flow_id[i] % p_fa->params.n_flows,
- };
-
- struct flow_table_entry entry_to_add;
-
- int key_found, status;
-
- flow_table_entry_set_default(p_fa, &entry_to_add);
-
- status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &key,
- (struct rte_pipeline_table_entry *) &entry_to_add,
- &key_found,
- (struct rte_pipeline_table_entry **) &entry);
- if (status) {
- rsp->n_flows = i;
- return rsp;
- }
-
- req->entry_ptr[i] = (void *) entry;
- } else
- entry = (struct flow_table_entry *) req->entry_ptr[i];
-
- /* Meter */
- for (j = 0, mask = 1;
- j < PIPELINE_FA_N_TC_MAX;
- j++, mask <<= 1) {
- int status;
-
- if ((mask & req->meter_update_mask) == 0)
- continue;
-
- status = flow_table_entry_set_meter(entry,
- j, &req->params[i]);
- if (status) {
- rsp->n_flows = i;
- return rsp;
- }
- }
-
- /* Policer */
- for (j = 0, mask = 1;
- j < PIPELINE_FA_N_TC_MAX;
- j++, mask <<= 1) {
- if ((mask & req->policer_update_mask) == 0)
- continue;
-
- flow_table_entry_set_policer(entry,
- j, &req->params[i]);
- }
-
- /* Port */
- if (req->port_update)
- flow_table_entry_set_port_id(p_fa,
- entry, &req->params[i]);
- }
-
- /* Response */
- rsp->n_flows = i;
- return rsp;
-}
-
-void *
-pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
- struct pipeline_fa_dscp_config_msg_req *req = msg;
- struct pipeline_fa_dscp_config_msg_rsp *rsp = msg;
-
- /* Check request */
- if ((req->dscp >= PIPELINE_FA_N_DSCP) ||
- (req->traffic_class >= PIPELINE_FA_N_TC_MAX) ||
- (req->color >= e_RTE_METER_COLORS)) {
- rsp->status = -1;
- return rsp;
- }
-
- p_fa->dscp[req->dscp].traffic_class = req->traffic_class;
- p_fa->dscp[req->dscp].color = req->color;
- rsp->status = 0;
- return rsp;
-}
-
-void *
-pipeline_fa_msg_req_policer_stats_read_handler(__rte_unused struct pipeline *p,
- void *msg)
-{
- struct pipeline_fa_policer_stats_msg_req *req = msg;
- struct pipeline_fa_policer_stats_msg_rsp *rsp = msg;
-
- struct flow_table_entry *entry = req->entry_ptr;
- uint32_t policer_id = req->policer_id;
- int clear = req->clear;
-
- /* Check request */
- if ((req->entry_ptr == NULL) ||
- (req->policer_id >= PIPELINE_FA_N_TC_MAX)) {
- rsp->status = -1;
- return rsp;
- }
-
- memcpy(&rsp->stats,
- &entry->mp[policer_id].stats,
- sizeof(rsp->stats));
- if (clear)
- memset(&entry->mp[policer_id].stats,
- 0, sizeof(entry->mp[policer_id].stats));
- rsp->status = 0;
- return rsp;
-}
-
-struct pipeline_be_ops pipeline_flow_actions_be_ops = {
- .f_init = pipeline_fa_init,
- .f_free = pipeline_fa_free,
- .f_run = NULL,
- .f_timer = pipeline_fa_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
deleted file mode 100644
index ef6cb263..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
-#define __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
-
-#include <rte_meter.h>
-
-#include "pipeline_common_be.h"
-
-#ifndef PIPELINE_FA_N_TC_MAX
-#define PIPELINE_FA_N_TC_MAX 4
-#endif
-
-#define PIPELINE_FA_N_DSCP 64
-
-struct pipeline_fa_params {
- uint32_t n_flows;
- uint32_t n_meters_per_flow;
- uint32_t flow_id_offset;
- uint32_t ip_hdr_offset;
- uint32_t color_offset;
- uint32_t dscp_enabled;
-};
-
-int
-pipeline_fa_parse_args(struct pipeline_fa_params *p,
- struct pipeline_params *params);
-
-struct pipeline_fa_policer_action {
- uint32_t drop;
- enum rte_meter_color color;
-};
-
-struct pipeline_fa_policer_params {
- struct pipeline_fa_policer_action action[e_RTE_METER_COLORS];
-};
-
-struct pipeline_fa_flow_params {
- struct rte_meter_trtcm_params m[PIPELINE_FA_N_TC_MAX];
- struct pipeline_fa_policer_params p[PIPELINE_FA_N_TC_MAX];
- uint32_t port_id;
-};
-
-int
-pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params);
-
-struct pipeline_fa_policer_stats {
- uint64_t n_pkts[e_RTE_METER_COLORS];
- uint64_t n_pkts_drop;
-};
-
-enum pipeline_fa_msg_req_type {
- PIPELINE_FA_MSG_REQ_FLOW_CONFIG = 0,
- PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK,
- PIPELINE_FA_MSG_REQ_DSCP_CONFIG,
- PIPELINE_FA_MSG_REQ_POLICER_STATS_READ,
- PIPELINE_FA_MSG_REQS,
-};
-
-/*
- * MSG FLOW CONFIG
- */
-struct pipeline_fa_flow_config_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fa_msg_req_type subtype;
-
- void *entry_ptr;
- uint32_t flow_id;
-
- uint32_t meter_update_mask;
- uint32_t policer_update_mask;
- uint32_t port_update;
- struct pipeline_fa_flow_params params;
-};
-
-struct pipeline_fa_flow_config_msg_rsp {
- int status;
- void *entry_ptr;
-};
-
-/*
- * MSG FLOW CONFIG BULK
- */
-struct pipeline_fa_flow_config_bulk_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fa_msg_req_type subtype;
-
- void **entry_ptr;
- uint32_t *flow_id;
- uint32_t n_flows;
-
- uint32_t meter_update_mask;
- uint32_t policer_update_mask;
- uint32_t port_update;
- struct pipeline_fa_flow_params *params;
-};
-
-struct pipeline_fa_flow_config_bulk_msg_rsp {
- uint32_t n_flows;
-};
-
-/*
- * MSG DSCP CONFIG
- */
-struct pipeline_fa_dscp_config_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fa_msg_req_type subtype;
-
- uint32_t dscp;
- uint32_t traffic_class;
- enum rte_meter_color color;
-};
-
-struct pipeline_fa_dscp_config_msg_rsp {
- int status;
-};
-
-/*
- * MSG POLICER STATS READ
- */
-struct pipeline_fa_policer_stats_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fa_msg_req_type subtype;
-
- void *entry_ptr;
- uint32_t policer_id;
- int clear;
-};
-
-struct pipeline_fa_policer_stats_msg_rsp {
- int status;
- struct pipeline_fa_policer_stats stats;
-};
-
-extern struct pipeline_be_ops pipeline_flow_actions_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
deleted file mode 100644
index d39e0fb1..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ /dev/null
@@ -1,1878 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <sys/queue.h>
-#include <netinet/in.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_hexdump.h>
-#include <rte_malloc.h>
-#include <cmdline_rdline.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-
-#include "app.h"
-#include "pipeline_common_fe.h"
-#include "pipeline_flow_classification.h"
-#include "hash_func.h"
-#include "parser.h"
-
-/*
- * Key conversion
- */
-
-struct pkt_key_qinq {
- uint16_t ethertype_svlan;
- uint16_t svlan;
- uint16_t ethertype_cvlan;
- uint16_t cvlan;
-} __attribute__((__packed__));
-
-struct pkt_key_ipv4_5tuple {
- uint8_t ttl;
- uint8_t proto;
- uint16_t checksum;
- uint32_t ip_src;
- uint32_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
-} __attribute__((__packed__));
-
-struct pkt_key_ipv6_5tuple {
- uint16_t payload_length;
- uint8_t proto;
- uint8_t hop_limit;
- uint8_t ip_src[16];
- uint8_t ip_dst[16];
- uint16_t port_src;
- uint16_t port_dst;
-} __attribute__((__packed__));
-
-static int
-app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
- uint8_t *key_out,
- uint32_t *signature)
-{
- uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
- uint8_t m[PIPELINE_FC_FLOW_KEY_MAX_SIZE]; /* key mask */
- void *key_buffer = (key_out) ? key_out : buffer;
-
- memset(m, 0xFF, sizeof(m));
- switch (key_in->type) {
- case FLOW_KEY_QINQ:
- {
- struct pkt_key_qinq *qinq = key_buffer;
-
- qinq->ethertype_svlan = 0;
- qinq->svlan = rte_cpu_to_be_16(key_in->key.qinq.svlan);
- qinq->ethertype_cvlan = 0;
- qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
-
- if (signature)
- *signature = (uint32_t) hash_default_key8(qinq, m, 8, 0);
- return 0;
- }
-
- case FLOW_KEY_IPV4_5TUPLE:
- {
- struct pkt_key_ipv4_5tuple *ipv4 = key_buffer;
-
- ipv4->ttl = 0;
- ipv4->proto = key_in->key.ipv4_5tuple.proto;
- ipv4->checksum = 0;
- ipv4->ip_src = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_src);
- ipv4->ip_dst = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_dst);
- ipv4->port_src = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_src);
- ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
-
- if (signature)
- *signature = (uint32_t) hash_default_key16(ipv4, m, 16, 0);
- return 0;
- }
-
- case FLOW_KEY_IPV6_5TUPLE:
- {
- struct pkt_key_ipv6_5tuple *ipv6 = key_buffer;
-
- memset(ipv6, 0, 64);
- ipv6->payload_length = 0;
- ipv6->proto = key_in->key.ipv6_5tuple.proto;
- ipv6->hop_limit = 0;
- memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
- memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
- ipv6->port_src = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_src);
- ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
-
- if (signature)
- *signature = (uint32_t) hash_default_key64(ipv6, m, 64, 0);
- return 0;
- }
-
- default:
- return -1;
- }
-}
-
-/*
- * Flow classification pipeline
- */
-
-struct app_pipeline_fc_flow {
- struct pipeline_fc_key key;
- uint32_t port_id;
- uint32_t flow_id;
- uint32_t signature;
- void *entry_ptr;
-
- TAILQ_ENTRY(app_pipeline_fc_flow) node;
-};
-
-#define N_BUCKETS 65536
-
-struct app_pipeline_fc {
- /* Parameters */
- uint32_t n_ports_in;
- uint32_t n_ports_out;
-
- /* Flows */
- TAILQ_HEAD(, app_pipeline_fc_flow) flows[N_BUCKETS];
- uint32_t n_flows;
-
- /* Default flow */
- uint32_t default_flow_present;
- uint32_t default_flow_port_id;
- void *default_flow_entry_ptr;
-};
-
-static struct app_pipeline_fc_flow *
-app_pipeline_fc_flow_find(struct app_pipeline_fc *p,
- struct pipeline_fc_key *key)
-{
- struct app_pipeline_fc_flow *f;
- uint32_t signature, bucket_id;
-
- app_pipeline_fc_key_convert(key, NULL, &signature);
- bucket_id = signature & (N_BUCKETS - 1);
-
- TAILQ_FOREACH(f, &p->flows[bucket_id], node)
- if ((signature == f->signature) &&
- (memcmp(key,
- &f->key,
- sizeof(struct pipeline_fc_key)) == 0))
- return f;
-
- return NULL;
-}
-
-static void*
-app_pipeline_fc_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct app_pipeline_fc *p;
- uint32_t size, i;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fc));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
-
- /* Initialization */
- p->n_ports_in = params->n_ports_in;
- p->n_ports_out = params->n_ports_out;
-
- for (i = 0; i < N_BUCKETS; i++)
- TAILQ_INIT(&p->flows[i]);
- p->n_flows = 0;
-
- return (void *) p;
-}
-
-static int
-app_pipeline_fc_free(void *pipeline)
-{
- struct app_pipeline_fc *p = pipeline;
- uint32_t i;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- for (i = 0; i < N_BUCKETS; i++)
- while (!TAILQ_EMPTY(&p->flows[i])) {
- struct app_pipeline_fc_flow *flow;
-
- flow = TAILQ_FIRST(&p->flows[i]);
- TAILQ_REMOVE(&p->flows[i], flow, node);
- rte_free(flow);
- }
-
- rte_free(p);
- return 0;
-}
-
-static int
-app_pipeline_fc_key_check(struct pipeline_fc_key *key)
-{
- switch (key->type) {
- case FLOW_KEY_QINQ:
- {
- uint16_t svlan = key->key.qinq.svlan;
- uint16_t cvlan = key->key.qinq.cvlan;
-
- if ((svlan & 0xF000) ||
- (cvlan & 0xF000))
- return -1;
-
- return 0;
- }
-
- case FLOW_KEY_IPV4_5TUPLE:
- return 0;
-
- case FLOW_KEY_IPV6_5TUPLE:
- return 0;
-
- default:
- return -1;
- }
-}
-
-int
-app_pipeline_fc_load_file_qinq(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line)
-{
- FILE *f = NULL;
- char file_buf[1024];
- uint32_t i, l;
-
- /* Check input arguments */
- if ((filename == NULL) ||
- (keys == NULL) ||
- (port_ids == NULL) ||
- (flow_ids == NULL) ||
- (n_keys == NULL) ||
- (*n_keys == 0) ||
- (line == NULL)) {
- if (line)
- *line = 0;
- return -1;
- }
-
- /* Open input file */
- f = fopen(filename, "r");
- if (f == NULL) {
- *line = 0;
- return -1;
- }
-
- /* Read file */
- for (i = 0, l = 1; i < *n_keys; l++) {
- char *tokens[32];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- uint16_t svlan, cvlan;
- uint32_t portid, flowid;
- int status;
-
- if (fgets(file_buf, sizeof(file_buf), f) == NULL)
- break;
-
- status = parse_tokenize_string(file_buf, tokens, &n_tokens);
- if (status)
- goto error1;
-
- if ((n_tokens == 0) || (tokens[0][0] == '#'))
- continue;
-
- if ((n_tokens != 7) ||
- strcmp(tokens[0], "qinq") ||
- parser_read_uint16(&svlan, tokens[1]) ||
- parser_read_uint16(&cvlan, tokens[2]) ||
- strcmp(tokens[3], "port") ||
- parser_read_uint32(&portid, tokens[4]) ||
- strcmp(tokens[5], "id") ||
- parser_read_uint32(&flowid, tokens[6]))
- goto error1;
-
- keys[i].type = FLOW_KEY_QINQ;
- keys[i].key.qinq.svlan = svlan;
- keys[i].key.qinq.cvlan = cvlan;
-
- port_ids[i] = portid;
- flow_ids[i] = flowid;
-
- if (app_pipeline_fc_key_check(&keys[i]))
- goto error1;
-
- i++;
- }
-
- /* Close file */
- *n_keys = i;
- fclose(f);
- return 0;
-
-error1:
- *line = l;
- fclose(f);
- return -1;
-}
-
-int
-app_pipeline_fc_load_file_ipv4(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line)
-{
- FILE *f = NULL;
- char file_buf[1024];
- uint32_t i, l;
-
- /* Check input arguments */
- if ((filename == NULL) ||
- (keys == NULL) ||
- (port_ids == NULL) ||
- (flow_ids == NULL) ||
- (n_keys == NULL) ||
- (*n_keys == 0) ||
- (line == NULL)) {
- if (line)
- *line = 0;
- return -1;
- }
-
- /* Open input file */
- f = fopen(filename, "r");
- if (f == NULL) {
- *line = 0;
- return -1;
- }
-
- /* Read file */
- for (i = 0, l = 1; i < *n_keys; l++) {
- char *tokens[32];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- struct in_addr sipaddr, dipaddr;
- uint16_t sport, dport;
- uint8_t proto;
- uint32_t portid, flowid;
- int status;
-
- if (fgets(file_buf, sizeof(file_buf), f) == NULL)
- break;
-
- status = parse_tokenize_string(file_buf, tokens, &n_tokens);
- if (status)
- goto error2;
-
- if ((n_tokens == 0) || (tokens[0][0] == '#'))
- continue;
-
- if ((n_tokens != 10) ||
- strcmp(tokens[0], "ipv4") ||
- parse_ipv4_addr(tokens[1], &sipaddr) ||
- parse_ipv4_addr(tokens[2], &dipaddr) ||
- parser_read_uint16(&sport, tokens[3]) ||
- parser_read_uint16(&dport, tokens[4]) ||
- parser_read_uint8(&proto, tokens[5]) ||
- strcmp(tokens[6], "port") ||
- parser_read_uint32(&portid, tokens[7]) ||
- strcmp(tokens[8], "id") ||
- parser_read_uint32(&flowid, tokens[9]))
- goto error2;
-
- keys[i].type = FLOW_KEY_IPV4_5TUPLE;
- keys[i].key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
- keys[i].key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
- keys[i].key.ipv4_5tuple.port_src = sport;
- keys[i].key.ipv4_5tuple.port_dst = dport;
- keys[i].key.ipv4_5tuple.proto = proto;
-
- port_ids[i] = portid;
- flow_ids[i] = flowid;
-
- if (app_pipeline_fc_key_check(&keys[i]))
- goto error2;
-
- i++;
- }
-
- /* Close file */
- *n_keys = i;
- fclose(f);
- return 0;
-
-error2:
- *line = l;
- fclose(f);
- return -1;
-}
-
-int
-app_pipeline_fc_load_file_ipv6(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line)
-{
- FILE *f = NULL;
- char file_buf[1024];
- uint32_t i, l;
-
- /* Check input arguments */
- if ((filename == NULL) ||
- (keys == NULL) ||
- (port_ids == NULL) ||
- (flow_ids == NULL) ||
- (n_keys == NULL) ||
- (*n_keys == 0) ||
- (line == NULL)) {
- if (line)
- *line = 0;
- return -1;
- }
-
- /* Open input file */
- f = fopen(filename, "r");
- if (f == NULL) {
- *line = 0;
- return -1;
- }
-
- /* Read file */
- for (i = 0, l = 1; i < *n_keys; l++) {
- char *tokens[32];
- uint32_t n_tokens = RTE_DIM(tokens);
-
- struct in6_addr sipaddr, dipaddr;
- uint16_t sport, dport;
- uint8_t proto;
- uint32_t portid, flowid;
- int status;
-
- if (fgets(file_buf, sizeof(file_buf), f) == NULL)
- break;
-
- status = parse_tokenize_string(file_buf, tokens, &n_tokens);
- if (status)
- goto error3;
-
- if ((n_tokens == 0) || (tokens[0][0] == '#'))
- continue;
-
- if ((n_tokens != 10) ||
- strcmp(tokens[0], "ipv6") ||
- parse_ipv6_addr(tokens[1], &sipaddr) ||
- parse_ipv6_addr(tokens[2], &dipaddr) ||
- parser_read_uint16(&sport, tokens[3]) ||
- parser_read_uint16(&dport, tokens[4]) ||
- parser_read_uint8(&proto, tokens[5]) ||
- strcmp(tokens[6], "port") ||
- parser_read_uint32(&portid, tokens[7]) ||
- strcmp(tokens[8], "id") ||
- parser_read_uint32(&flowid, tokens[9]))
- goto error3;
-
- keys[i].type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(keys[i].key.ipv6_5tuple.ip_src,
- sipaddr.s6_addr,
- sizeof(sipaddr.s6_addr));
- memcpy(keys[i].key.ipv6_5tuple.ip_dst,
- dipaddr.s6_addr,
- sizeof(dipaddr.s6_addr));
- keys[i].key.ipv6_5tuple.port_src = sport;
- keys[i].key.ipv6_5tuple.port_dst = dport;
- keys[i].key.ipv6_5tuple.proto = proto;
-
- port_ids[i] = portid;
- flow_ids[i] = flowid;
-
- if (app_pipeline_fc_key_check(&keys[i]))
- goto error3;
-
- i++;
- }
-
- /* Close file */
- *n_keys = i;
- fclose(f);
- return 0;
-
-error3:
- *line = l;
- fclose(f);
- return -1;
-}
-
-
-
-int
-app_pipeline_fc_add(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key,
- uint32_t port_id,
- uint32_t flow_id)
-{
- struct app_pipeline_fc *p;
- struct app_pipeline_fc_flow *flow;
-
- struct pipeline_fc_add_msg_req *req;
- struct pipeline_fc_add_msg_rsp *rsp;
-
- uint32_t signature;
- int new_flow;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- if (app_pipeline_fc_key_check(key) != 0)
- return -1;
-
- /* Find existing flow or allocate new flow */
- flow = app_pipeline_fc_flow_find(p, key);
- new_flow = (flow == NULL);
- if (flow == NULL) {
- flow = rte_malloc(NULL, sizeof(*flow), RTE_CACHE_LINE_SIZE);
-
- if (flow == NULL)
- return -1;
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
- app_pipeline_fc_key_convert(key, req->key, &signature);
- req->port_id = port_id;
- req->flow_id = flow_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- if (new_flow)
- rte_free(flow);
- return -1;
- }
-
- /* Read response and write flow */
- if (rsp->status ||
- (rsp->entry_ptr == NULL) ||
- ((new_flow == 0) && (rsp->key_found == 0)) ||
- ((new_flow == 1) && (rsp->key_found == 1))) {
- app_msg_free(app, rsp);
- if (new_flow)
- rte_free(flow);
- return -1;
- }
-
- memset(&flow->key, 0, sizeof(flow->key));
- memcpy(&flow->key, key, sizeof(flow->key));
- flow->port_id = port_id;
- flow->flow_id = flow_id;
- flow->signature = signature;
- flow->entry_ptr = rsp->entry_ptr;
-
- /* Commit rule */
- if (new_flow) {
- uint32_t bucket_id = signature & (N_BUCKETS - 1);
-
- TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow, node);
- p->n_flows++;
- }
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_fc_add_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key,
- uint32_t *port_id,
- uint32_t *flow_id,
- uint32_t n_keys)
-{
- struct app_pipeline_fc *p;
- struct pipeline_fc_add_bulk_msg_req *req;
- struct pipeline_fc_add_bulk_msg_rsp *rsp;
-
- struct app_pipeline_fc_flow **flow;
- uint32_t *signature;
- int *new_flow;
- struct pipeline_fc_add_bulk_flow_req *flow_req;
- struct pipeline_fc_add_bulk_flow_rsp *flow_rsp;
-
- uint32_t i;
- int status;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL) ||
- (port_id == NULL) ||
- (flow_id == NULL) ||
- (n_keys == 0))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -1;
-
- for (i = 0; i < n_keys; i++)
- if (port_id[i] >= p->n_ports_out)
- return -1;
-
- for (i = 0; i < n_keys; i++)
- if (app_pipeline_fc_key_check(&key[i]) != 0)
- return -1;
-
- /* Memory allocation */
- flow = rte_malloc(NULL,
- n_keys * sizeof(struct app_pipeline_fc_flow *),
- RTE_CACHE_LINE_SIZE);
- if (flow == NULL)
- return -1;
-
- signature = rte_malloc(NULL,
- n_keys * sizeof(uint32_t),
- RTE_CACHE_LINE_SIZE);
- if (signature == NULL) {
- rte_free(flow);
- return -1;
- }
-
- new_flow = rte_malloc(
- NULL,
- n_keys * sizeof(int),
- RTE_CACHE_LINE_SIZE);
- if (new_flow == NULL) {
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
-
- flow_req = rte_malloc(NULL,
- n_keys * sizeof(struct pipeline_fc_add_bulk_flow_req),
- RTE_CACHE_LINE_SIZE);
- if (flow_req == NULL) {
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
-
- flow_rsp = rte_malloc(NULL,
- n_keys * sizeof(struct pipeline_fc_add_bulk_flow_rsp),
- RTE_CACHE_LINE_SIZE);
- if (flow_rsp == NULL) {
- rte_free(flow_req);
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
-
- /* Find existing flow or allocate new flow */
- for (i = 0; i < n_keys; i++) {
- flow[i] = app_pipeline_fc_flow_find(p, &key[i]);
- new_flow[i] = (flow[i] == NULL);
- if (flow[i] == NULL) {
- flow[i] = rte_zmalloc(NULL,
- sizeof(struct app_pipeline_fc_flow),
- RTE_CACHE_LINE_SIZE);
-
- if (flow[i] == NULL) {
- uint32_t j;
-
- for (j = 0; j < i; j++)
- if (new_flow[j])
- rte_free(flow[j]);
-
- rte_free(flow_rsp);
- rte_free(flow_req);
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
- }
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL) {
- for (i = 0; i < n_keys; i++)
- if (new_flow[i])
- rte_free(flow[i]);
-
- rte_free(flow_rsp);
- rte_free(flow_req);
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
-
- for (i = 0; i < n_keys; i++) {
- app_pipeline_fc_key_convert(&key[i],
- flow_req[i].key,
- &signature[i]);
- flow_req[i].port_id = port_id[i];
- flow_req[i].flow_id = flow_id[i];
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK;
- req->req = flow_req;
- req->rsp = flow_rsp;
- req->n_keys = n_keys;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, 10000);
- if (rsp == NULL) {
- for (i = 0; i < n_keys; i++)
- if (new_flow[i])
- rte_free(flow[i]);
-
- rte_free(flow_rsp);
- rte_free(flow_req);
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
- return -1;
- }
-
- /* Read response */
- status = 0;
-
- for (i = 0; i < rsp->n_keys; i++)
- if ((flow_rsp[i].entry_ptr == NULL) ||
- ((new_flow[i] == 0) && (flow_rsp[i].key_found == 0)) ||
- ((new_flow[i] == 1) && (flow_rsp[i].key_found == 1)))
- status = -1;
-
- if (rsp->n_keys < n_keys)
- status = -1;
-
- /* Commit flows */
- for (i = 0; i < rsp->n_keys; i++) {
- memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
- flow[i]->port_id = port_id[i];
- flow[i]->flow_id = flow_id[i];
- flow[i]->signature = signature[i];
- flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
-
- if (new_flow[i]) {
- uint32_t bucket_id = signature[i] & (N_BUCKETS - 1);
-
- TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow[i], node);
- p->n_flows++;
- }
- }
-
- /* Free resources */
-
- for (i = rsp->n_keys; i < n_keys; i++)
- if (new_flow[i])
- rte_free(flow[i]);
-
- app_msg_free(app, rsp);
- rte_free(flow_rsp);
- rte_free(flow_req);
- rte_free(new_flow);
- rte_free(signature);
- rte_free(flow);
-
- return status;
-}
-
-int
-app_pipeline_fc_del(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key)
-{
- struct app_pipeline_fc *p;
- struct app_pipeline_fc_flow *flow;
-
- struct pipeline_fc_del_msg_req *req;
- struct pipeline_fc_del_msg_rsp *rsp;
-
- uint32_t signature, bucket_id;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -1;
-
- if (app_pipeline_fc_key_check(key) != 0)
- return -1;
-
- /* Find rule */
- flow = app_pipeline_fc_flow_find(p, key);
- if (flow == NULL)
- return 0;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL;
- app_pipeline_fc_key_convert(key, req->key, &signature);
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status || !rsp->key_found) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Remove rule */
- bucket_id = signature & (N_BUCKETS - 1);
- TAILQ_REMOVE(&p->flows[bucket_id], flow, node);
- p->n_flows--;
- rte_free(flow);
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_fc_add_default(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct app_pipeline_fc *p;
-
- struct pipeline_fc_add_default_msg_req *req;
- struct pipeline_fc_add_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write flow */
- if (rsp->status || (rsp->entry_ptr == NULL)) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- p->default_flow_port_id = port_id;
- p->default_flow_entry_ptr = rsp->entry_ptr;
-
- /* Commit route */
- p->default_flow_present = 1;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_fc_del_default(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_fc *p;
-
- struct pipeline_fc_del_default_msg_req *req;
- struct pipeline_fc_del_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -EINVAL;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Commit route */
- p->default_flow_present = 0;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-/*
- * Flow ls
- */
-
-static void
-print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
-{
- printf("(SVLAN = %" PRIu32 ", "
- "CVLAN = %" PRIu32 ") => "
- "Port = %" PRIu32 ", "
- "Flow ID = %" PRIu32 ", "
- "(signature = 0x%08" PRIx32 ", "
- "entry_ptr = %p)\n",
-
- flow->key.key.qinq.svlan,
- flow->key.key.qinq.cvlan,
- flow->port_id,
- flow->flow_id,
- flow->signature,
- flow->entry_ptr);
-}
-
-static void
-print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
-{
- printf("(SA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
- "DA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
- "SP = %" PRIu32 ", "
- "DP = %" PRIu32 ", "
- "Proto = %" PRIu32 ") => "
- "Port = %" PRIu32 ", "
- "Flow ID = %" PRIu32 " "
- "(signature = 0x%08" PRIx32 ", "
- "entry_ptr = %p)\n",
-
- (flow->key.key.ipv4_5tuple.ip_src >> 24) & 0xFF,
- (flow->key.key.ipv4_5tuple.ip_src >> 16) & 0xFF,
- (flow->key.key.ipv4_5tuple.ip_src >> 8) & 0xFF,
- flow->key.key.ipv4_5tuple.ip_src & 0xFF,
-
- (flow->key.key.ipv4_5tuple.ip_dst >> 24) & 0xFF,
- (flow->key.key.ipv4_5tuple.ip_dst >> 16) & 0xFF,
- (flow->key.key.ipv4_5tuple.ip_dst >> 8) & 0xFF,
- flow->key.key.ipv4_5tuple.ip_dst & 0xFF,
-
- flow->key.key.ipv4_5tuple.port_src,
- flow->key.key.ipv4_5tuple.port_dst,
-
- flow->key.key.ipv4_5tuple.proto,
-
- flow->port_id,
- flow->flow_id,
- flow->signature,
- flow->entry_ptr);
-}
-
-static void
-print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
- printf("(SA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
- "DA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
- ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
- "SP = %" PRIu32 ", "
- "DP = %" PRIu32 " "
- "Proto = %" PRIu32 " "
- "=> Port = %" PRIu32 ", "
- "Flow ID = %" PRIu32 " "
- "(signature = 0x%08" PRIx32 ", "
- "entry_ptr = %p)\n",
-
- flow->key.key.ipv6_5tuple.ip_src[0],
- flow->key.key.ipv6_5tuple.ip_src[1],
- flow->key.key.ipv6_5tuple.ip_src[2],
- flow->key.key.ipv6_5tuple.ip_src[3],
- flow->key.key.ipv6_5tuple.ip_src[4],
- flow->key.key.ipv6_5tuple.ip_src[5],
- flow->key.key.ipv6_5tuple.ip_src[6],
- flow->key.key.ipv6_5tuple.ip_src[7],
- flow->key.key.ipv6_5tuple.ip_src[8],
- flow->key.key.ipv6_5tuple.ip_src[9],
- flow->key.key.ipv6_5tuple.ip_src[10],
- flow->key.key.ipv6_5tuple.ip_src[11],
- flow->key.key.ipv6_5tuple.ip_src[12],
- flow->key.key.ipv6_5tuple.ip_src[13],
- flow->key.key.ipv6_5tuple.ip_src[14],
- flow->key.key.ipv6_5tuple.ip_src[15],
-
- flow->key.key.ipv6_5tuple.ip_dst[0],
- flow->key.key.ipv6_5tuple.ip_dst[1],
- flow->key.key.ipv6_5tuple.ip_dst[2],
- flow->key.key.ipv6_5tuple.ip_dst[3],
- flow->key.key.ipv6_5tuple.ip_dst[4],
- flow->key.key.ipv6_5tuple.ip_dst[5],
- flow->key.key.ipv6_5tuple.ip_dst[6],
- flow->key.key.ipv6_5tuple.ip_dst[7],
- flow->key.key.ipv6_5tuple.ip_dst[8],
- flow->key.key.ipv6_5tuple.ip_dst[9],
- flow->key.key.ipv6_5tuple.ip_dst[10],
- flow->key.key.ipv6_5tuple.ip_dst[11],
- flow->key.key.ipv6_5tuple.ip_dst[12],
- flow->key.key.ipv6_5tuple.ip_dst[13],
- flow->key.key.ipv6_5tuple.ip_dst[14],
- flow->key.key.ipv6_5tuple.ip_dst[15],
-
- flow->key.key.ipv6_5tuple.port_src,
- flow->key.key.ipv6_5tuple.port_dst,
-
- flow->key.key.ipv6_5tuple.proto,
-
- flow->port_id,
- flow->flow_id,
- flow->signature,
- flow->entry_ptr);
-}
-
-static void
-print_fc_flow(struct app_pipeline_fc_flow *flow)
-{
- switch (flow->key.type) {
- case FLOW_KEY_QINQ:
- print_fc_qinq_flow(flow);
- break;
-
- case FLOW_KEY_IPV4_5TUPLE:
- print_fc_ipv4_5tuple_flow(flow);
- break;
-
- case FLOW_KEY_IPV6_5TUPLE:
- print_fc_ipv6_5tuple_flow(flow);
- break;
- }
-}
-
-static int
-app_pipeline_fc_ls(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_fc *p;
- struct app_pipeline_fc_flow *flow;
- uint32_t i;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
- if (p == NULL)
- return -1;
-
- for (i = 0; i < N_BUCKETS; i++)
- TAILQ_FOREACH(flow, &p->flows[i], node)
- print_fc_flow(flow);
-
- if (p->default_flow_present)
- printf("Default flow: port %" PRIu32 " (entry ptr = %p)\n",
- p->default_flow_port_id,
- p->default_flow_entry_ptr);
- else
- printf("Default: DROP\n");
-
- return 0;
-}
-/*
- * flow
- *
- * flow add:
- * p <pipelineid> flow add qinq <svlan> <cvlan> port <portid> id <flowid>
- * p <pipelineid> flow add qinq bulk <file>
- * p <pipelineid> flow add ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
- * p <pipelineid> flow add ipv4 bulk <file>
- * p <pipelineid> flow add ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
- * p <pipelineid> flow add ipv6 bulk <file>
- *
- * flow add default:
- * p <pipelineid> flow add default <portid>
- *
- * flow del:
- * p <pipelineid> flow del qinq <svlan> <cvlan>
- * p <pipelineid> flow del ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto>
- * p <pipelineid> flow del ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto>
- *
- * flow del default:
- * p <pipelineid> flow del default
- *
- * flow ls:
- * p <pipelineid> flow ls
- */
-
-struct cmd_flow_result {
- cmdline_fixed_string_t p_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t flow_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_flow_parsed(void *parsed_result,
- __attribute__((unused)) struct cmdline *cl,
- void *data)
-{
- struct cmd_flow_result *results = parsed_result;
- struct app_params *app = data;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- status = parse_tokenize_string(results->multi_string, tokens, &n_tokens);
- if (status) {
- printf(CMD_MSG_TOO_MANY_ARGS, "flow");
- return;
- }
-
- /* flow add qinq */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "qinq") == 0) &&
- strcmp(tokens[2], "bulk")) {
- struct pipeline_fc_key key;
- uint32_t svlan;
- uint32_t cvlan;
- uint32_t port_id;
- uint32_t flow_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 8) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq");
- return;
- }
-
- if (parser_read_uint32(&svlan, tokens[2]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "svlan");
- return;
- }
-
- if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "cvlan");
- return;
- }
-
- if (strcmp(tokens[4], "port") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "port");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[5]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (strcmp(tokens[6], "id") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "id");
- return;
- }
-
- if (parser_read_uint32(&flow_id, tokens[7]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = svlan;
- key.key.qinq.cvlan = cvlan;
-
- status = app_pipeline_fc_add(app,
- results->pipeline_id,
- &key,
- port_id,
- flow_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow add qinq");
-
- return;
- } /* flow add qinq */
-
- /* flow add ipv4 */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "ipv4") == 0) &&
- strcmp(tokens[2], "bulk")) {
- struct pipeline_fc_key key;
- struct in_addr sipaddr;
- struct in_addr dipaddr;
- uint32_t sport;
- uint32_t dport;
- uint32_t proto;
- uint32_t port_id;
- uint32_t flow_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 11) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sipv4addr");
- return;
- }
- if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dipv4addr");
- return;
- }
-
- if (parser_read_uint32(&sport, tokens[4]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sport");
- return;
- }
-
- if (parser_read_uint32(&dport, tokens[5]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dport");
- return;
- }
-
- if (parser_read_uint32(&proto, tokens[6]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- if (strcmp(tokens[7], "port") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "port");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[8]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (strcmp(tokens[9], "id") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "id");
- return;
- }
-
- if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
- key.key.ipv4_5tuple.port_src = sport;
- key.key.ipv4_5tuple.port_dst = dport;
- key.key.ipv4_5tuple.proto = proto;
-
- status = app_pipeline_fc_add(app,
- results->pipeline_id,
- &key,
- port_id,
- flow_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow add ipv4");
-
- return;
- } /* flow add ipv4 */
-
- /* flow add ipv6 */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "ipv6") == 0) &&
- strcmp(tokens[2], "bulk")) {
- struct pipeline_fc_key key;
- struct in6_addr sipaddr;
- struct in6_addr dipaddr;
- uint32_t sport;
- uint32_t dport;
- uint32_t proto;
- uint32_t port_id;
- uint32_t flow_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 11) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6");
- return;
- }
-
- if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sipv6addr");
- return;
- }
- if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dipv6addr");
- return;
- }
-
- if (parser_read_uint32(&sport, tokens[4]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sport");
- return;
- }
-
- if (parser_read_uint32(&dport, tokens[5]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dport");
- return;
- }
-
- if (parser_read_uint32(&proto, tokens[6]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- if (strcmp(tokens[7], "port") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "port");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[8]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (strcmp(tokens[9], "id") != 0) {
- printf(CMD_MSG_ARG_NOT_FOUND, "id");
- return;
- }
-
- if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "flowid");
- return;
- }
-
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src, (void *)&sipaddr, 16);
- memcpy(key.key.ipv6_5tuple.ip_dst, (void *)&dipaddr, 16);
- key.key.ipv6_5tuple.port_src = sport;
- key.key.ipv6_5tuple.port_dst = dport;
- key.key.ipv6_5tuple.proto = proto;
-
- status = app_pipeline_fc_add(app,
- results->pipeline_id,
- &key,
- port_id,
- flow_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow add ipv6");
-
- return;
- } /* flow add ipv6 */
-
- /* flow add qinq bulk */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "qinq") == 0) &&
- (strcmp(tokens[2], "bulk") == 0)) {
- struct pipeline_fc_key *keys;
- uint32_t *port_ids, *flow_ids, n_keys, line;
- char *filename;
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq bulk");
- return;
- }
-
- filename = tokens[3];
-
- n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
- keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
- if (keys == NULL)
- return;
- memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
-
- port_ids = malloc(n_keys * sizeof(uint32_t));
- if (port_ids == NULL) {
- free(keys);
- return;
- }
-
- flow_ids = malloc(n_keys * sizeof(uint32_t));
- if (flow_ids == NULL) {
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_load_file_qinq(filename,
- keys,
- port_ids,
- flow_ids,
- &n_keys,
- &line);
- if (status != 0) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_add_bulk(app,
- results->pipeline_id,
- keys,
- port_ids,
- flow_ids,
- n_keys);
- if (status)
- printf(CMD_MSG_FAIL, "flow add qinq bulk");
-
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- } /* flow add qinq bulk */
-
- /* flow add ipv4 bulk */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "ipv4") == 0) &&
- (strcmp(tokens[2], "bulk") == 0)) {
- struct pipeline_fc_key *keys;
- uint32_t *port_ids, *flow_ids, n_keys, line;
- char *filename;
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4 bulk");
- return;
- }
-
- filename = tokens[3];
-
- n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
- keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
- if (keys == NULL)
- return;
- memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
-
- port_ids = malloc(n_keys * sizeof(uint32_t));
- if (port_ids == NULL) {
- free(keys);
- return;
- }
-
- flow_ids = malloc(n_keys * sizeof(uint32_t));
- if (flow_ids == NULL) {
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_load_file_ipv4(filename,
- keys,
- port_ids,
- flow_ids,
- &n_keys,
- &line);
- if (status != 0) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_add_bulk(app,
- results->pipeline_id,
- keys,
- port_ids,
- flow_ids,
- n_keys);
- if (status)
- printf(CMD_MSG_FAIL, "flow add ipv4 bulk");
-
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- } /* flow add ipv4 bulk */
-
- /* flow add ipv6 bulk */
- if ((n_tokens >= 3) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "ipv6") == 0) &&
- (strcmp(tokens[2], "bulk") == 0)) {
- struct pipeline_fc_key *keys;
- uint32_t *port_ids, *flow_ids, n_keys, line;
- char *filename;
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6 bulk");
- return;
- }
-
- filename = tokens[3];
-
- n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
- keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
- if (keys == NULL)
- return;
- memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
-
- port_ids = malloc(n_keys * sizeof(uint32_t));
- if (port_ids == NULL) {
- free(keys);
- return;
- }
-
- flow_ids = malloc(n_keys * sizeof(uint32_t));
- if (flow_ids == NULL) {
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_load_file_ipv6(filename,
- keys,
- port_ids,
- flow_ids,
- &n_keys,
- &line);
- if (status != 0) {
- printf(CMD_MSG_FILE_ERR, filename, line);
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- }
-
- status = app_pipeline_fc_add_bulk(app,
- results->pipeline_id,
- keys,
- port_ids,
- flow_ids,
- n_keys);
- if (status)
- printf(CMD_MSG_FAIL, "flow add ipv6 bulk");
-
- free(flow_ids);
- free(port_ids);
- free(keys);
- return;
- } /* flow add ipv6 bulk */
-
- /* flow add default*/
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- uint32_t port_id;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow add default");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[2]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- status = app_pipeline_fc_add_default(app,
- results->pipeline_id,
- port_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow add default");
-
- return;
- } /* flow add default */
-
- /* flow del qinq */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "qinq") == 0)) {
- struct pipeline_fc_key key;
- uint32_t svlan;
- uint32_t cvlan;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow del qinq");
- return;
- }
-
- if (parser_read_uint32(&svlan, tokens[2]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "svlan");
- return;
- }
-
- if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "cvlan");
- return;
- }
-
- key.type = FLOW_KEY_QINQ;
- key.key.qinq.svlan = svlan;
- key.key.qinq.cvlan = cvlan;
-
- status = app_pipeline_fc_del(app,
- results->pipeline_id,
- &key);
- if (status)
- printf(CMD_MSG_FAIL, "flow del qinq");
-
- return;
- } /* flow del qinq */
-
- /* flow del ipv4 */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "ipv4") == 0)) {
- struct pipeline_fc_key key;
- struct in_addr sipaddr;
- struct in_addr dipaddr;
- uint32_t sport;
- uint32_t dport;
- uint32_t proto;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 7) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv4");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sipv4addr");
- return;
- }
- if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dipv4addr");
- return;
- }
-
- if (parser_read_uint32(&sport, tokens[4]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sport");
- return;
- }
-
- if (parser_read_uint32(&dport, tokens[5]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dport");
- return;
- }
-
- if (parser_read_uint32(&proto, tokens[6]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- key.type = FLOW_KEY_IPV4_5TUPLE;
- key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
- key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
- key.key.ipv4_5tuple.port_src = sport;
- key.key.ipv4_5tuple.port_dst = dport;
- key.key.ipv4_5tuple.proto = proto;
-
- status = app_pipeline_fc_del(app,
- results->pipeline_id,
- &key);
- if (status)
- printf(CMD_MSG_FAIL, "flow del ipv4");
-
- return;
- } /* flow del ipv4 */
-
- /* flow del ipv6 */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "ipv6") == 0)) {
- struct pipeline_fc_key key;
- struct in6_addr sipaddr;
- struct in6_addr dipaddr;
- uint32_t sport;
- uint32_t dport;
- uint32_t proto;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 7) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv6");
- return;
- }
-
- if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sipv6addr");
- return;
- }
-
- if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dipv6addr");
- return;
- }
-
- if (parser_read_uint32(&sport, tokens[4]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "sport");
- return;
- }
-
- if (parser_read_uint32(&dport, tokens[5]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "dport");
- return;
- }
-
- if (parser_read_uint32(&proto, tokens[6]) != 0) {
- printf(CMD_MSG_INVALID_ARG, "proto");
- return;
- }
-
- key.type = FLOW_KEY_IPV6_5TUPLE;
- memcpy(key.key.ipv6_5tuple.ip_src, &sipaddr, 16);
- memcpy(key.key.ipv6_5tuple.ip_dst, &dipaddr, 16);
- key.key.ipv6_5tuple.port_src = sport;
- key.key.ipv6_5tuple.port_dst = dport;
- key.key.ipv6_5tuple.proto = proto;
-
- status = app_pipeline_fc_del(app,
- results->pipeline_id,
- &key);
- if (status)
- printf(CMD_MSG_FAIL, "flow del ipv6");
-
- return;
- } /* flow del ipv6 */
-
- /* flow del default*/
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow del default");
- return;
- }
-
- status = app_pipeline_fc_del_default(app,
- results->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow del default");
-
- return;
- } /* flow del default */
-
- /* flow ls */
- if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
- if (n_tokens != 1) {
- printf(CMD_MSG_MISMATCH_ARGS, "flow ls");
- return;
- }
-
- status = app_pipeline_fc_ls(app, results->pipeline_id);
- if (status)
- printf(CMD_MSG_FAIL, "flow ls");
-
- return;
- } /* flow ls */
-
- printf(CMD_MSG_MISMATCH_ARGS, "flow");
-}
-
-static cmdline_parse_token_string_t cmd_flow_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_flow_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_flow_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_flow_result, pipeline_id, UINT32);
-
-static cmdline_parse_token_string_t cmd_flow_flow_string =
- TOKEN_STRING_INITIALIZER(struct cmd_flow_result, flow_string, "flow");
-
-static cmdline_parse_token_string_t cmd_flow_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_flow_result, multi_string,
- TOKEN_STRING_MULTI);
-
-static cmdline_parse_inst_t cmd_flow = {
- .f = cmd_flow_parsed,
- .data = NULL,
- .help_str = "flow add / add bulk / add default / del / del default / ls",
- .tokens = {
- (void *) &cmd_flow_p_string,
- (void *) &cmd_flow_pipeline_id,
- (void *) &cmd_flow_flow_string,
- (void *) &cmd_flow_multi_string,
- NULL,
- },
-};
-
-static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_flow,
- NULL,
-};
-
-static struct pipeline_fe_ops pipeline_flow_classification_fe_ops = {
- .f_init = app_pipeline_fc_init,
- .f_post_init = NULL,
- .f_free = app_pipeline_fc_free,
- .f_track = app_pipeline_track_default,
- .cmds = pipeline_cmds,
-};
-
-struct pipeline_type pipeline_flow_classification = {
- .name = "FLOW_CLASSIFICATION",
- .be_ops = &pipeline_flow_classification_be_ops,
- .fe_ops = &pipeline_flow_classification_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
deleted file mode 100644
index 8c354989..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
-#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
-
-#include "pipeline.h"
-#include "pipeline_flow_classification_be.h"
-
-enum flow_key_type {
- FLOW_KEY_QINQ,
- FLOW_KEY_IPV4_5TUPLE,
- FLOW_KEY_IPV6_5TUPLE,
-};
-
-struct flow_key_qinq {
- uint16_t svlan;
- uint16_t cvlan;
-};
-
-struct flow_key_ipv4_5tuple {
- uint32_t ip_src;
- uint32_t ip_dst;
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-struct flow_key_ipv6_5tuple {
- uint8_t ip_src[16];
- uint8_t ip_dst[16];
- uint16_t port_src;
- uint16_t port_dst;
- uint32_t proto;
-};
-
-struct pipeline_fc_key {
- enum flow_key_type type;
- union {
- struct flow_key_qinq qinq;
- struct flow_key_ipv4_5tuple ipv4_5tuple;
- struct flow_key_ipv6_5tuple ipv6_5tuple;
- } key;
-};
-
-int
-app_pipeline_fc_add(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key,
- uint32_t port_id,
- uint32_t flow_id);
-
-int
-app_pipeline_fc_add_bulk(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key,
- uint32_t *port_id,
- uint32_t *flow_id,
- uint32_t n_keys);
-
-int
-app_pipeline_fc_del(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_fc_key *key);
-
-int
-app_pipeline_fc_add_default(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_pipeline_fc_del_default(struct app_params *app,
- uint32_t pipeline_id);
-
-#ifndef APP_PIPELINE_FC_MAX_FLOWS_IN_FILE
-#define APP_PIPELINE_FC_MAX_FLOWS_IN_FILE (16 * 1024 * 1024)
-#endif
-
-int
-app_pipeline_fc_load_file_qinq(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line);
-
-int
-app_pipeline_fc_load_file_ipv4(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line);
-
-int
-app_pipeline_fc_load_file_ipv6(char *filename,
- struct pipeline_fc_key *keys,
- uint32_t *port_ids,
- uint32_t *flow_ids,
- uint32_t *n_keys,
- uint32_t *line);
-
-extern struct pipeline_type pipeline_flow_classification;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
deleted file mode 100644
index 097ec346..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ /dev/null
@@ -1,723 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_table_hash.h>
-#include <rte_byteorder.h>
-#include <pipeline.h>
-
-#include "pipeline_flow_classification_be.h"
-#include "pipeline_actions_common.h"
-#include "parser.h"
-#include "hash_func.h"
-
-struct pipeline_flow_classification {
- struct pipeline p;
- pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
-
- uint32_t n_flows;
- uint32_t key_size;
- uint32_t flow_id;
-
- uint32_t key_offset;
- uint32_t hash_offset;
- uint8_t key_mask[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
- uint32_t key_mask_present;
- uint32_t flow_id_offset;
-
-} __rte_cache_aligned;
-
-static void *
-pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler handlers[] = {
- [PIPELINE_MSG_REQ_PING] =
- pipeline_msg_req_ping_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_IN] =
- pipeline_msg_req_stats_port_in_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
- pipeline_msg_req_stats_port_out_handler,
- [PIPELINE_MSG_REQ_STATS_TABLE] =
- pipeline_msg_req_stats_table_handler,
- [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
- pipeline_msg_req_port_in_enable_handler,
- [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
- pipeline_msg_req_port_in_disable_handler,
- [PIPELINE_MSG_REQ_CUSTOM] =
- pipeline_fc_msg_req_custom_handler,
-};
-
-static void *
-pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg);
-
-static void *
-pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler custom_handlers[] = {
- [PIPELINE_FC_MSG_REQ_FLOW_ADD] =
- pipeline_fc_msg_req_add_handler,
- [PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK] =
- pipeline_fc_msg_req_add_bulk_handler,
- [PIPELINE_FC_MSG_REQ_FLOW_DEL] =
- pipeline_fc_msg_req_del_handler,
- [PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT] =
- pipeline_fc_msg_req_add_default_handler,
- [PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT] =
- pipeline_fc_msg_req_del_default_handler,
-};
-
-/*
- * Flow table
- */
-struct flow_table_entry {
- struct rte_pipeline_table_entry head;
-
- uint32_t flow_id;
- uint32_t pad;
-};
-
-rte_table_hash_op_hash hash_func[] = {
- hash_default_key8,
- hash_default_key16,
- hash_default_key24,
- hash_default_key32,
- hash_default_key40,
- hash_default_key48,
- hash_default_key56,
- hash_default_key64
-};
-
-/*
- * Flow table AH - Write flow_id to packet meta-data
- */
-static inline void
-pkt_work_flow_id(
- struct rte_mbuf *pkt,
- struct rte_pipeline_table_entry *table_entry,
- void *arg)
-{
- struct pipeline_flow_classification *p_fc = arg;
- uint32_t *flow_id_ptr =
- RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
- struct flow_table_entry *entry =
- (struct flow_table_entry *) table_entry;
-
- /* Read */
- uint32_t flow_id = entry->flow_id;
-
- /* Compute */
-
- /* Write */
- *flow_id_ptr = flow_id;
-}
-
-static inline void
-pkt4_work_flow_id(
- struct rte_mbuf **pkts,
- struct rte_pipeline_table_entry **table_entries,
- void *arg)
-{
- struct pipeline_flow_classification *p_fc = arg;
-
- uint32_t *flow_id_ptr0 =
- RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
- uint32_t *flow_id_ptr1 =
- RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
- uint32_t *flow_id_ptr2 =
- RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
- uint32_t *flow_id_ptr3 =
- RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
-
- struct flow_table_entry *entry0 =
- (struct flow_table_entry *) table_entries[0];
- struct flow_table_entry *entry1 =
- (struct flow_table_entry *) table_entries[1];
- struct flow_table_entry *entry2 =
- (struct flow_table_entry *) table_entries[2];
- struct flow_table_entry *entry3 =
- (struct flow_table_entry *) table_entries[3];
-
- /* Read */
- uint32_t flow_id0 = entry0->flow_id;
- uint32_t flow_id1 = entry1->flow_id;
- uint32_t flow_id2 = entry2->flow_id;
- uint32_t flow_id3 = entry3->flow_id;
-
- /* Compute */
-
- /* Write */
- *flow_id_ptr0 = flow_id0;
- *flow_id_ptr1 = flow_id1;
- *flow_id_ptr2 = flow_id2;
- *flow_id_ptr3 = flow_id3;
-}
-
-PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
- pkt_work_flow_id, pkt4_work_flow_id);
-
-static rte_pipeline_table_action_handler_hit
-get_fc_table_ah_hit(struct pipeline_flow_classification *p)
-{
- if (p->flow_id)
- return fc_table_ah_hit;
-
- return NULL;
-}
-
-/*
- * Argument parsing
- */
-static int
-pipeline_fc_parse_args(struct pipeline_flow_classification *p,
- struct pipeline_params *params)
-{
- uint32_t n_flows_present = 0;
- uint32_t key_offset_present = 0;
- uint32_t key_size_present = 0;
- uint32_t hash_offset_present = 0;
- uint32_t key_mask_present = 0;
- uint32_t flow_id_offset_present = 0;
-
- uint32_t i;
- char key_mask_str[PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2 + 1];
-
- p->hash_offset = 0;
-
- /* default values */
- p->flow_id = 0;
-
- for (i = 0; i < params->n_args; i++) {
- char *arg_name = params->args_name[i];
- char *arg_value = params->args_value[i];
-
- /* n_flows */
- if (strcmp(arg_name, "n_flows") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_flows_present == 0, params->name,
- arg_name);
- n_flows_present = 1;
-
- status = parser_read_uint32(&p->n_flows,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->n_flows != 0)), params->name,
- arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* key_offset */
- if (strcmp(arg_name, "key_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- key_offset_present == 0, params->name,
- arg_name);
- key_offset_present = 1;
-
- status = parser_read_uint32(&p->key_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* key_size */
- if (strcmp(arg_name, "key_size") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- key_size_present == 0, params->name,
- arg_name);
- key_size_present = 1;
-
- status = parser_read_uint32(&p->key_size,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->key_size != 0) &&
- (p->key_size % 8 == 0)),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
- (p->key_size <=
- PIPELINE_FC_FLOW_KEY_MAX_SIZE)),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* key_mask */
- if (strcmp(arg_name, "key_mask") == 0) {
- int mask_str_len = strlen(arg_value);
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- key_mask_present == 0,
- params->name, arg_name);
- key_mask_present = 1;
-
- PIPELINE_ARG_CHECK((mask_str_len <=
- (PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2)),
- "Parse error in section \"%s\": entry "
- "\"%s\" is too long", params->name,
- arg_name);
-
- snprintf(key_mask_str, mask_str_len + 1, "%s",
- arg_value);
-
- continue;
- }
-
- /* hash_offset */
- if (strcmp(arg_name, "hash_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- hash_offset_present == 0, params->name,
- arg_name);
- hash_offset_present = 1;
-
- status = parser_read_uint32(&p->hash_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* flow_id_offset */
- if (strcmp(arg_name, "flowid_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- flow_id_offset_present == 0, params->name,
- arg_name);
- flow_id_offset_present = 1;
-
- status = parser_read_uint32(&p->flow_id_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- p->flow_id = 1;
-
- continue;
- }
-
- /* Unknown argument */
- PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
- }
-
- /* Check that mandatory arguments are present */
- PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
- "n_flows");
- PIPELINE_PARSE_ERR_MANDATORY((key_offset_present), params->name,
- "key_offset");
- PIPELINE_PARSE_ERR_MANDATORY((key_size_present), params->name,
- "key_size");
-
- if (key_mask_present) {
- uint32_t key_size = p->key_size;
- int status;
-
- PIPELINE_ARG_CHECK(((key_size == 8) || (key_size == 16)),
- "Parse error in section \"%s\": entry key_mask "
- "only allowed for key_size of 8 or 16 bytes",
- params->name);
-
- PIPELINE_ARG_CHECK((strlen(key_mask_str) ==
- (key_size * 2)), "Parse error in section "
- "\"%s\": key_mask should have exactly %u hex "
- "digits", params->name, (key_size * 2));
-
- PIPELINE_ARG_CHECK((hash_offset_present == 0), "Parse "
- "error in section \"%s\": entry hash_offset only "
- "allowed when key_mask is not present",
- params->name);
-
- status = parse_hex_string(key_mask_str, p->key_mask,
- &p->key_size);
-
- PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
- (key_size == p->key_size)), params->name,
- "key_mask", key_mask_str);
- }
-
- p->key_mask_present = key_mask_present;
-
- return 0;
-}
-
-static void *pipeline_fc_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct pipeline *p;
- struct pipeline_flow_classification *p_fc;
- uint32_t size, i;
-
- /* Check input arguments */
- if (params == NULL)
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(
- sizeof(struct pipeline_flow_classification));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
- p_fc = (struct pipeline_flow_classification *) p;
-
- strcpy(p->name, params->name);
- p->log_level = params->log_level;
-
- PLOG(p, HIGH, "Flow classification");
-
- /* Parse arguments */
- if (pipeline_fc_parse_args(p_fc, params))
- return NULL;
-
- /* Pipeline */
- {
- struct rte_pipeline_params pipeline_params = {
- .name = params->name,
- .socket_id = params->socket_id,
- .offset_port_id = 0,
- };
-
- p->p = rte_pipeline_create(&pipeline_params);
- if (p->p == NULL) {
- rte_free(p);
- return NULL;
- }
- }
-
- /* Input ports */
- p->n_ports_in = params->n_ports_in;
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_port_in_params port_params = {
- .ops = pipeline_port_in_params_get_ops(
- &params->port_in[i]),
- .arg_create = pipeline_port_in_params_convert(
- &params->port_in[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- .burst_size = params->port_in[i].burst_size,
- };
-
- int status = rte_pipeline_port_in_create(p->p,
- &port_params,
- &p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Output ports */
- p->n_ports_out = params->n_ports_out;
- for (i = 0; i < p->n_ports_out; i++) {
- struct rte_pipeline_port_out_params port_params = {
- .ops = pipeline_port_out_params_get_ops(
- &params->port_out[i]),
- .arg_create = pipeline_port_out_params_convert(
- &params->port_out[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- };
-
- int status = rte_pipeline_port_out_create(p->p,
- &port_params,
- &p->port_out_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Tables */
- p->n_tables = 1;
- {
- struct rte_table_hash_params table_hash_params = {
- .name = p->name,
- .key_size = p_fc->key_size,
- .key_offset = p_fc->key_offset,
- .key_mask = (p_fc->key_mask_present) ?
- p_fc->key_mask : NULL,
- .n_keys = p_fc->n_flows,
- .n_buckets = rte_align32pow2(p_fc->n_flows / 4),
- .f_hash = hash_func[(p_fc->key_size / 8) - 1],
- .seed = 0,
- };
-
- struct rte_pipeline_table_params table_params = {
- .ops = NULL, /* set below */
- .arg_create = NULL, /* set below */
- .f_action_hit = get_fc_table_ah_hit(p_fc),
- .f_action_miss = NULL,
- .arg_ah = p_fc,
- .action_data_size = sizeof(struct flow_table_entry) -
- sizeof(struct rte_pipeline_table_entry),
- };
-
- int status;
-
- switch (p_fc->key_size) {
- case 8:
- table_params.ops = &rte_table_hash_key8_ext_ops;
- break;
-
- case 16:
- table_params.ops = &rte_table_hash_key16_ext_ops;
- break;
-
- default:
- table_params.ops = &rte_table_hash_ext_ops;
- }
-
- table_params.arg_create = &table_hash_params;
-
- status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Connecting input ports to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_connect_to_table(p->p,
- p->port_in_id[i],
- p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Enable input ports */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_enable(p->p,
- p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Check pipeline consistency */
- if (rte_pipeline_check(p->p) < 0) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- /* Message queues */
- p->n_msgq = params->n_msgq;
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_in[i] = params->msgq_in[i];
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_out[i] = params->msgq_out[i];
-
- /* Message handlers */
- memcpy(p->handlers, handlers, sizeof(p->handlers));
- memcpy(p_fc->custom_handlers,
- custom_handlers,
- sizeof(p_fc->custom_handlers));
-
- return p;
-}
-
-static int
-pipeline_fc_free(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_pipeline_free(p->p);
- rte_free(p);
- return 0;
-}
-
-static int
-pipeline_fc_timer(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- pipeline_msg_req_handle(p);
- rte_pipeline_flush(p->p);
-
- return 0;
-}
-
-static void *
-pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_flow_classification *p_fc =
- (struct pipeline_flow_classification *) p;
- struct pipeline_custom_msg_req *req = msg;
- pipeline_msg_req_handler f_handle;
-
- f_handle = (req->subtype < PIPELINE_FC_MSG_REQS) ?
- p_fc->custom_handlers[req->subtype] :
- pipeline_msg_req_invalid_handler;
-
- if (f_handle == NULL)
- f_handle = pipeline_msg_req_invalid_handler;
-
- return f_handle(p, req);
-}
-
-static void *
-pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_fc_add_msg_req *req = msg;
- struct pipeline_fc_add_msg_rsp *rsp = msg;
-
- struct flow_table_entry entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
- .flow_id = req->flow_id,
- };
-
- rsp->status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &req->key,
- (struct rte_pipeline_table_entry *) &entry,
- &rsp->key_found,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-static void *
-pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_fc_add_bulk_msg_req *req = msg;
- struct pipeline_fc_add_bulk_msg_rsp *rsp = msg;
- uint32_t i;
-
- for (i = 0; i < req->n_keys; i++) {
- struct pipeline_fc_add_bulk_flow_req *flow_req = &req->req[i];
- struct pipeline_fc_add_bulk_flow_rsp *flow_rsp = &req->rsp[i];
-
- struct flow_table_entry entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[flow_req->port_id]},
- },
- .flow_id = flow_req->flow_id,
- };
-
- int status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &flow_req->key,
- (struct rte_pipeline_table_entry *) &entry,
- &flow_rsp->key_found,
- (struct rte_pipeline_table_entry **)
- &flow_rsp->entry_ptr);
-
- if (status)
- break;
- }
-
- rsp->n_keys = i;
-
- return rsp;
-}
-
-static void *
-pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_fc_del_msg_req *req = msg;
- struct pipeline_fc_del_msg_rsp *rsp = msg;
-
- rsp->status = rte_pipeline_table_entry_delete(p->p,
- p->table_id[0],
- &req->key,
- &rsp->key_found,
- NULL);
-
- return rsp;
-}
-
-static void *
-pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_fc_add_default_msg_req *req = msg;
- struct pipeline_fc_add_default_msg_rsp *rsp = msg;
-
- struct flow_table_entry default_entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
-
- .flow_id = 0,
- };
-
- rsp->status = rte_pipeline_table_default_entry_add(p->p,
- p->table_id[0],
- (struct rte_pipeline_table_entry *) &default_entry,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-static void *
-pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_fc_del_default_msg_rsp *rsp = msg;
-
- rsp->status = rte_pipeline_table_default_entry_delete(p->p,
- p->table_id[0],
- NULL);
-
- return rsp;
-}
-
-struct pipeline_be_ops pipeline_flow_classification_be_ops = {
- .f_init = pipeline_fc_init,
- .f_free = pipeline_fc_free,
- .f_run = NULL,
- .f_timer = pipeline_fc_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
deleted file mode 100644
index 18f5bb42..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
-#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
-
-#include "pipeline_common_be.h"
-
-enum pipeline_fc_msg_req_type {
- PIPELINE_FC_MSG_REQ_FLOW_ADD = 0,
- PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK,
- PIPELINE_FC_MSG_REQ_FLOW_DEL,
- PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT,
- PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT,
- PIPELINE_FC_MSG_REQS,
-};
-
-#ifndef PIPELINE_FC_FLOW_KEY_MAX_SIZE
-#define PIPELINE_FC_FLOW_KEY_MAX_SIZE 64
-#endif
-
-/*
- * MSG ADD
- */
-struct pipeline_fc_add_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fc_msg_req_type subtype;
-
- uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
-
- uint32_t port_id;
- uint32_t flow_id;
-};
-
-struct pipeline_fc_add_msg_rsp {
- int status;
- int key_found;
- void *entry_ptr;
-};
-
-/*
- * MSG ADD BULK
- */
-struct pipeline_fc_add_bulk_flow_req {
- uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
- uint32_t port_id;
- uint32_t flow_id;
-};
-
-struct pipeline_fc_add_bulk_flow_rsp {
- int key_found;
- void *entry_ptr;
-};
-
-struct pipeline_fc_add_bulk_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fc_msg_req_type subtype;
-
- struct pipeline_fc_add_bulk_flow_req *req;
- struct pipeline_fc_add_bulk_flow_rsp *rsp;
- uint32_t n_keys;
-};
-
-struct pipeline_fc_add_bulk_msg_rsp {
- uint32_t n_keys;
-};
-
-/*
- * MSG DEL
- */
-struct pipeline_fc_del_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fc_msg_req_type subtype;
-
- uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
-};
-
-struct pipeline_fc_del_msg_rsp {
- int status;
- int key_found;
-};
-
-/*
- * MSG ADD DEFAULT
- */
-struct pipeline_fc_add_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fc_msg_req_type subtype;
-
- uint32_t port_id;
-};
-
-struct pipeline_fc_add_default_msg_rsp {
- int status;
- void *entry_ptr;
-};
-
-/*
- * MSG DEL DEFAULT
- */
-struct pipeline_fc_del_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_fc_msg_req_type subtype;
-};
-
-struct pipeline_fc_del_default_msg_rsp {
- int status;
-};
-
-extern struct pipeline_be_ops pipeline_flow_classification_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.c b/examples/ip_pipeline/pipeline/pipeline_master.c
deleted file mode 100644
index b0d730a6..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_master.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include "pipeline_master.h"
-#include "pipeline_master_be.h"
-
-static struct pipeline_fe_ops pipeline_master_fe_ops = {
- .f_init = NULL,
- .f_post_init = NULL,
- .f_free = NULL,
- .f_track = NULL,
- .cmds = NULL,
-};
-
-struct pipeline_type pipeline_master = {
- .name = "MASTER",
- .be_ops = &pipeline_master_be_ops,
- .fe_ops = &pipeline_master_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master.h b/examples/ip_pipeline/pipeline/pipeline_master.h
deleted file mode 100644
index a5183e33..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_master.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_MASTER_H__
-#define __INCLUDE_PIPELINE_MASTER_H__
-
-#include "pipeline.h"
-
-extern struct pipeline_type pipeline_master;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.c b/examples/ip_pipeline/pipeline/pipeline_master_be.c
deleted file mode 100644
index c72038ea..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_master_be.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <fcntl.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-
-#include <cmdline_parse.h>
-#include <cmdline_parse_string.h>
-#include <cmdline_socket.h>
-#include <cmdline.h>
-
-#include "app.h"
-#include "pipeline_master_be.h"
-
-struct pipeline_master {
- struct app_params *app;
- struct cmdline *cl;
- int post_init_done;
- int script_file_done;
-} __rte_cache_aligned;
-
-static void*
-pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
-{
- struct app_params *app = (struct app_params *) arg;
- struct pipeline_master *p;
- uint32_t size;
-
- /* Check input arguments */
- if (app == NULL)
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
-
- /* Initialization */
- p->app = app;
-
- p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
- if (p->cl == NULL) {
- rte_free(p);
- return NULL;
- }
-
- p->post_init_done = 0;
- p->script_file_done = 0;
- if (app->script_file == NULL)
- p->script_file_done = 1;
-
- return (void *) p;
-}
-
-static int
-pipeline_free(void *pipeline)
-{
- struct pipeline_master *p = (struct pipeline_master *) pipeline;
-
- if (p == NULL)
- return -EINVAL;
-
- cmdline_stdin_exit(p->cl);
- rte_free(p);
-
- return 0;
-}
-
-static int
-pipeline_run(void *pipeline)
-{
- struct pipeline_master *p = (struct pipeline_master *) pipeline;
- struct app_params *app = p->app;
- int status;
-#ifdef RTE_LIBRTE_KNI
- uint32_t i;
-#endif /* RTE_LIBRTE_KNI */
-
- /* Application post-init phase */
- if (p->post_init_done == 0) {
- app_post_init(app);
-
- p->post_init_done = 1;
- }
-
- /* Run startup script file */
- if (p->script_file_done == 0) {
- struct app_params *app = p->app;
- int fd = open(app->script_file, O_RDONLY);
-
- if (fd < 0)
- printf("Cannot open CLI script file \"%s\"\n",
- app->script_file);
- else {
- struct cmdline *file_cl;
-
- printf("Running CLI script file \"%s\" ...\n",
- app->script_file);
- file_cl = cmdline_new(p->cl->ctx, "", fd, 1);
- cmdline_interact(file_cl);
- close(fd);
- }
-
- p->script_file_done = 1;
- }
-
- /* Command Line Interface (CLI) */
- status = cmdline_poll(p->cl);
- if (status < 0)
- rte_panic("CLI poll error (%" PRId32 ")\n", status);
- else if (status == RDLINE_EXITED) {
- cmdline_stdin_exit(p->cl);
- rte_exit(0, "Bye!\n");
- }
-
-#ifdef RTE_LIBRTE_KNI
- /* Handle KNI requests from Linux kernel */
- for (i = 0; i < app->n_pktq_kni; i++)
- rte_kni_handle_request(app->kni[i]);
-#endif /* RTE_LIBRTE_KNI */
-
- return 0;
-}
-
-static int
-pipeline_timer(__rte_unused void *pipeline)
-{
- return 0;
-}
-
-struct pipeline_be_ops pipeline_master_be_ops = {
- .f_init = pipeline_init,
- .f_free = pipeline_free,
- .f_run = pipeline_run,
- .f_timer = pipeline_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_master_be.h b/examples/ip_pipeline/pipeline/pipeline_master_be.h
deleted file mode 100644
index 847c564c..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_master_be.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_MASTER_BE_H__
-#define __INCLUDE_PIPELINE_MASTER_BE_H__
-
-#include "pipeline_common_be.h"
-
-extern struct pipeline_be_ops pipeline_master_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/examples/ip_pipeline/pipeline/pipeline_passthrough.c
deleted file mode 100644
index 031f5f05..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough.c
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include "pipeline_passthrough.h"
-#include "pipeline_passthrough_be.h"
-
-static int
-app_pipeline_passthrough_track(struct pipeline_params *p,
- uint32_t port_in,
- uint32_t *port_out)
-{
- struct pipeline_passthrough_params pp;
- int status;
-
- /* Check input arguments */
- if ((p == NULL) ||
- (port_in >= p->n_ports_in) ||
- (port_out == NULL))
- return -1;
-
- status = pipeline_passthrough_parse_args(&pp, p);
- if (status)
- return -1;
-
- if (pp.dma_hash_lb_enabled)
- return -1;
-
- *port_out = port_in / (p->n_ports_in / p->n_ports_out);
- return 0;
-}
-
-static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
- .f_init = NULL,
- .f_post_init = NULL,
- .f_free = NULL,
- .f_track = app_pipeline_passthrough_track,
- .cmds = NULL,
-};
-
-struct pipeline_type pipeline_passthrough = {
- .name = "PASS-THROUGH",
- .be_ops = &pipeline_passthrough_be_ops,
- .fe_ops = &pipeline_passthrough_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough.h b/examples/ip_pipeline/pipeline/pipeline_passthrough.h
deleted file mode 100644
index 7a7a2fc6..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_H__
-#define __INCLUDE_PIPELINE_PASSTHROUGH_H__
-
-#include "pipeline.h"
-
-extern struct pipeline_type pipeline_passthrough;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
deleted file mode 100644
index b2bbaedd..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ /dev/null
@@ -1,929 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdio.h>
-#include <string.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_byteorder.h>
-#include <rte_table_stub.h>
-#include <rte_table_hash.h>
-#include <rte_pipeline.h>
-
-#include "pipeline_passthrough_be.h"
-#include "pipeline_actions_common.h"
-#include "parser.h"
-#include "hash_func.h"
-
-#define SWAP_DIM (PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX * \
- (PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX / sizeof(uint64_t)))
-
-struct pipeline_passthrough {
- struct pipeline p;
- struct pipeline_passthrough_params params;
- rte_table_hash_op_hash f_hash;
- uint32_t swap_field0_offset[SWAP_DIM];
- uint32_t swap_field1_offset[SWAP_DIM];
- uint64_t swap_field_mask[SWAP_DIM];
- uint32_t swap_n_fields;
-} __rte_cache_aligned;
-
-static pipeline_msg_req_handler handlers[] = {
- [PIPELINE_MSG_REQ_PING] =
- pipeline_msg_req_ping_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_IN] =
- pipeline_msg_req_stats_port_in_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
- pipeline_msg_req_stats_port_out_handler,
- [PIPELINE_MSG_REQ_STATS_TABLE] =
- pipeline_msg_req_stats_table_handler,
- [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
- pipeline_msg_req_port_in_enable_handler,
- [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
- pipeline_msg_req_port_in_disable_handler,
- [PIPELINE_MSG_REQ_CUSTOM] =
- pipeline_msg_req_invalid_handler,
-};
-
-static __rte_always_inline void
-pkt_work_dma(
- struct rte_mbuf *pkt,
- void *arg,
- uint32_t dma_size,
- uint32_t hash_enabled,
- uint32_t lb_hash,
- uint32_t port_out_pow2)
-{
- struct pipeline_passthrough *p = arg;
-
- uint64_t *dma_dst = RTE_MBUF_METADATA_UINT64_PTR(pkt,
- p->params.dma_dst_offset);
- uint64_t *dma_src = RTE_MBUF_METADATA_UINT64_PTR(pkt,
- p->params.dma_src_offset);
- uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
- uint32_t *dma_hash = RTE_MBUF_METADATA_UINT32_PTR(pkt,
- p->params.dma_hash_offset);
- uint32_t i;
-
- /* Read (dma_src), compute (dma_dst), write (dma_dst) */
- for (i = 0; i < (dma_size / 8); i++)
- dma_dst[i] = dma_src[i] & dma_mask[i];
-
- /* Read (dma_dst), compute (hash), write (hash) */
- if (hash_enabled) {
- uint32_t hash = p->f_hash(dma_src, dma_mask, dma_size, 0);
- *dma_hash = hash;
-
- if (lb_hash) {
- uint32_t port_out;
-
- if (port_out_pow2)
- port_out
- = hash & (p->p.n_ports_out - 1);
- else
- port_out
- = hash % p->p.n_ports_out;
-
- rte_pipeline_port_out_packet_insert(p->p.p,
- port_out, pkt);
- }
- }
-}
-
-static __rte_always_inline void
-pkt4_work_dma(
- struct rte_mbuf **pkts,
- void *arg,
- uint32_t dma_size,
- uint32_t hash_enabled,
- uint32_t lb_hash,
- uint32_t port_out_pow2)
-{
- struct pipeline_passthrough *p = arg;
-
- uint64_t *dma_dst0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- p->params.dma_dst_offset);
- uint64_t *dma_dst1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- p->params.dma_dst_offset);
- uint64_t *dma_dst2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- p->params.dma_dst_offset);
- uint64_t *dma_dst3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- p->params.dma_dst_offset);
-
- uint64_t *dma_src0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- p->params.dma_src_offset);
- uint64_t *dma_src1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- p->params.dma_src_offset);
- uint64_t *dma_src2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- p->params.dma_src_offset);
- uint64_t *dma_src3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- p->params.dma_src_offset);
-
- uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
-
- uint32_t *dma_hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkts[0],
- p->params.dma_hash_offset);
- uint32_t *dma_hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkts[1],
- p->params.dma_hash_offset);
- uint32_t *dma_hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkts[2],
- p->params.dma_hash_offset);
- uint32_t *dma_hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkts[3],
- p->params.dma_hash_offset);
-
- uint32_t i;
-
- /* Read (dma_src), compute (dma_dst), write (dma_dst) */
- for (i = 0; i < (dma_size / 8); i++) {
- dma_dst0[i] = dma_src0[i] & dma_mask[i];
- dma_dst1[i] = dma_src1[i] & dma_mask[i];
- dma_dst2[i] = dma_src2[i] & dma_mask[i];
- dma_dst3[i] = dma_src3[i] & dma_mask[i];
- }
-
- /* Read (dma_dst), compute (hash), write (hash) */
- if (hash_enabled) {
- uint32_t hash0 = p->f_hash(dma_src0, dma_mask, dma_size, 0);
- uint32_t hash1 = p->f_hash(dma_src1, dma_mask, dma_size, 0);
- uint32_t hash2 = p->f_hash(dma_src2, dma_mask, dma_size, 0);
- uint32_t hash3 = p->f_hash(dma_src3, dma_mask, dma_size, 0);
-
- *dma_hash0 = hash0;
- *dma_hash1 = hash1;
- *dma_hash2 = hash2;
- *dma_hash3 = hash3;
-
- if (lb_hash) {
- uint32_t port_out0, port_out1, port_out2, port_out3;
-
- if (port_out_pow2) {
- port_out0
- = hash0 & (p->p.n_ports_out - 1);
- port_out1
- = hash1 & (p->p.n_ports_out - 1);
- port_out2
- = hash2 & (p->p.n_ports_out - 1);
- port_out3
- = hash3 & (p->p.n_ports_out - 1);
- } else {
- port_out0
- = hash0 % p->p.n_ports_out;
- port_out1
- = hash1 % p->p.n_ports_out;
- port_out2
- = hash2 % p->p.n_ports_out;
- port_out3
- = hash3 % p->p.n_ports_out;
- }
- rte_pipeline_port_out_packet_insert(p->p.p,
- port_out0, pkts[0]);
- rte_pipeline_port_out_packet_insert(p->p.p,
- port_out1, pkts[1]);
- rte_pipeline_port_out_packet_insert(p->p.p,
- port_out2, pkts[2]);
- rte_pipeline_port_out_packet_insert(p->p.p,
- port_out3, pkts[3]);
- }
- }
-}
-
-static __rte_always_inline void
-pkt_work_swap(
- struct rte_mbuf *pkt,
- void *arg)
-{
- struct pipeline_passthrough *p = arg;
- uint32_t i;
-
- /* Read(field0, field1), compute(field0, field1), write(field0, field1) */
- for (i = 0; i < p->swap_n_fields; i++) {
- uint64_t *field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt,
- p->swap_field0_offset[i]);
- uint64_t *field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt,
- p->swap_field1_offset[i]);
- uint64_t mask = p->swap_field_mask[i];
-
- uint64_t field0 = *field0_ptr;
- uint64_t field1 = *field1_ptr;
-
- *field0_ptr = (field0 & (~mask)) + (field1 & mask);
- *field1_ptr = (field0 & mask) + (field1 & (~mask));
- }
-}
-
-static __rte_always_inline void
-pkt4_work_swap(
- struct rte_mbuf **pkts,
- void *arg)
-{
- struct pipeline_passthrough *p = arg;
- uint32_t i;
-
- /* Read(field0, field1), compute(field0, field1), write(field0, field1) */
- for (i = 0; i < p->swap_n_fields; i++) {
- uint64_t *pkt0_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- p->swap_field0_offset[i]);
- uint64_t *pkt1_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- p->swap_field0_offset[i]);
- uint64_t *pkt2_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- p->swap_field0_offset[i]);
- uint64_t *pkt3_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- p->swap_field0_offset[i]);
-
- uint64_t *pkt0_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- p->swap_field1_offset[i]);
- uint64_t *pkt1_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- p->swap_field1_offset[i]);
- uint64_t *pkt2_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- p->swap_field1_offset[i]);
- uint64_t *pkt3_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- p->swap_field1_offset[i]);
-
- uint64_t mask = p->swap_field_mask[i];
-
- uint64_t pkt0_field0 = *pkt0_field0_ptr;
- uint64_t pkt1_field0 = *pkt1_field0_ptr;
- uint64_t pkt2_field0 = *pkt2_field0_ptr;
- uint64_t pkt3_field0 = *pkt3_field0_ptr;
-
- uint64_t pkt0_field1 = *pkt0_field1_ptr;
- uint64_t pkt1_field1 = *pkt1_field1_ptr;
- uint64_t pkt2_field1 = *pkt2_field1_ptr;
- uint64_t pkt3_field1 = *pkt3_field1_ptr;
-
- *pkt0_field0_ptr = (pkt0_field0 & (~mask)) + (pkt0_field1 & mask);
- *pkt1_field0_ptr = (pkt1_field0 & (~mask)) + (pkt1_field1 & mask);
- *pkt2_field0_ptr = (pkt2_field0 & (~mask)) + (pkt2_field1 & mask);
- *pkt3_field0_ptr = (pkt3_field0 & (~mask)) + (pkt3_field1 & mask);
-
- *pkt0_field1_ptr = (pkt0_field0 & mask) + (pkt0_field1 & (~mask));
- *pkt1_field1_ptr = (pkt1_field0 & mask) + (pkt1_field1 & (~mask));
- *pkt2_field1_ptr = (pkt2_field0 & mask) + (pkt2_field1 & (~mask));
- *pkt3_field1_ptr = (pkt3_field0 & mask) + (pkt3_field1 & (~mask));
- }
-}
-
-#define PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-static inline void \
-pkt_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2( \
- struct rte_mbuf *pkt, \
- void *arg) \
-{ \
- pkt_work_dma(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
-}
-
-#define PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-static inline void \
-pkt4_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2( \
- struct rte_mbuf **pkts, \
- void *arg) \
-{ \
- pkt4_work_dma(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
-}
-
-#define port_in_ah_dma(dma_size, hash_enabled, lb_hash, port_pow2) \
-PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-PIPELINE_PORT_IN_AH(port_in_ah_dma_size##dma_size##_hash \
- ##hash_enabled##_lb##lb_hash##_pw##port_pow2, \
- pkt_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2, \
- pkt4_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2)
-
-
-#define port_in_ah_lb(dma_size, hash_enabled, lb_hash, port_pow2) \
-PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
-PIPELINE_PORT_IN_AH_HIJACK_ALL( \
- port_in_ah_lb_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2, \
- pkt_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2, \
- pkt4_work_dma_size##dma_size##_hash##hash_enabled \
- ##_lb##lb_hash##_pw##port_pow2)
-
-PIPELINE_PORT_IN_AH(port_in_ah_swap, pkt_work_swap, pkt4_work_swap)
-
-
-/* Port in AH DMA(dma_size, hash_enabled, lb_hash, port_pow2) */
-
-port_in_ah_dma(8, 0, 0, 0)
-port_in_ah_dma(8, 1, 0, 0)
-port_in_ah_lb(8, 1, 1, 0)
-port_in_ah_lb(8, 1, 1, 1)
-
-port_in_ah_dma(16, 0, 0, 0)
-port_in_ah_dma(16, 1, 0, 0)
-port_in_ah_lb(16, 1, 1, 0)
-port_in_ah_lb(16, 1, 1, 1)
-
-port_in_ah_dma(24, 0, 0, 0)
-port_in_ah_dma(24, 1, 0, 0)
-port_in_ah_lb(24, 1, 1, 0)
-port_in_ah_lb(24, 1, 1, 1)
-
-port_in_ah_dma(32, 0, 0, 0)
-port_in_ah_dma(32, 1, 0, 0)
-port_in_ah_lb(32, 1, 1, 0)
-port_in_ah_lb(32, 1, 1, 1)
-
-port_in_ah_dma(40, 0, 0, 0)
-port_in_ah_dma(40, 1, 0, 0)
-port_in_ah_lb(40, 1, 1, 0)
-port_in_ah_lb(40, 1, 1, 1)
-
-port_in_ah_dma(48, 0, 0, 0)
-port_in_ah_dma(48, 1, 0, 0)
-port_in_ah_lb(48, 1, 1, 0)
-port_in_ah_lb(48, 1, 1, 1)
-
-port_in_ah_dma(56, 0, 0, 0)
-port_in_ah_dma(56, 1, 0, 0)
-port_in_ah_lb(56, 1, 1, 0)
-port_in_ah_lb(56, 1, 1, 1)
-
-port_in_ah_dma(64, 0, 0, 0)
-port_in_ah_dma(64, 1, 0, 0)
-port_in_ah_lb(64, 1, 1, 0)
-port_in_ah_lb(64, 1, 1, 1)
-
-static rte_pipeline_port_in_action_handler
-get_port_in_ah(struct pipeline_passthrough *p)
-{
- if ((p->params.dma_enabled == 0) &&
- (p->params.swap_enabled == 0))
- return NULL;
-
- if (p->params.swap_enabled)
- return port_in_ah_swap;
-
- if (p->params.dma_hash_enabled) {
- if (p->params.dma_hash_lb_enabled) {
- if (rte_is_power_of_2(p->p.n_ports_out))
- switch (p->params.dma_size) {
-
- case 8: return port_in_ah_lb_size8_hash1_lb1_pw1;
- case 16: return port_in_ah_lb_size16_hash1_lb1_pw1;
- case 24: return port_in_ah_lb_size24_hash1_lb1_pw1;
- case 32: return port_in_ah_lb_size32_hash1_lb1_pw1;
- case 40: return port_in_ah_lb_size40_hash1_lb1_pw1;
- case 48: return port_in_ah_lb_size48_hash1_lb1_pw1;
- case 56: return port_in_ah_lb_size56_hash1_lb1_pw1;
- case 64: return port_in_ah_lb_size64_hash1_lb1_pw1;
- default: return NULL;
- }
- else
- switch (p->params.dma_size) {
-
- case 8: return port_in_ah_lb_size8_hash1_lb1_pw0;
- case 16: return port_in_ah_lb_size16_hash1_lb1_pw0;
- case 24: return port_in_ah_lb_size24_hash1_lb1_pw0;
- case 32: return port_in_ah_lb_size32_hash1_lb1_pw0;
- case 40: return port_in_ah_lb_size40_hash1_lb1_pw0;
- case 48: return port_in_ah_lb_size48_hash1_lb1_pw0;
- case 56: return port_in_ah_lb_size56_hash1_lb1_pw0;
- case 64: return port_in_ah_lb_size64_hash1_lb1_pw0;
- default: return NULL;
- }
- } else
- switch (p->params.dma_size) {
-
- case 8: return port_in_ah_dma_size8_hash1_lb0_pw0;
- case 16: return port_in_ah_dma_size16_hash1_lb0_pw0;
- case 24: return port_in_ah_dma_size24_hash1_lb0_pw0;
- case 32: return port_in_ah_dma_size32_hash1_lb0_pw0;
- case 40: return port_in_ah_dma_size40_hash1_lb0_pw0;
- case 48: return port_in_ah_dma_size48_hash1_lb0_pw0;
- case 56: return port_in_ah_dma_size56_hash1_lb0_pw0;
- case 64: return port_in_ah_dma_size64_hash1_lb0_pw0;
- default: return NULL;
- }
- } else
- switch (p->params.dma_size) {
-
- case 8: return port_in_ah_dma_size8_hash0_lb0_pw0;
- case 16: return port_in_ah_dma_size16_hash0_lb0_pw0;
- case 24: return port_in_ah_dma_size24_hash0_lb0_pw0;
- case 32: return port_in_ah_dma_size32_hash0_lb0_pw0;
- case 40: return port_in_ah_dma_size40_hash0_lb0_pw0;
- case 48: return port_in_ah_dma_size48_hash0_lb0_pw0;
- case 56: return port_in_ah_dma_size56_hash0_lb0_pw0;
- case 64: return port_in_ah_dma_size64_hash0_lb0_pw0;
- default: return NULL;
- }
-}
-
-int
-pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
- struct pipeline_params *params)
-{
- uint32_t dma_dst_offset_present = 0;
- uint32_t dma_src_offset_present = 0;
- uint32_t dma_src_mask_present = 0;
- char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2 + 1];
- uint32_t dma_size_present = 0;
- uint32_t dma_hash_offset_present = 0;
- uint32_t dma_hash_lb_present = 0;
- uint32_t i;
-
- /* default values */
- p->dma_enabled = 0;
- p->dma_hash_enabled = 0;
- p->dma_hash_lb_enabled = 0;
- memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask));
- p->swap_enabled = 0;
- p->swap_n_fields = 0;
-
- for (i = 0; i < params->n_args; i++) {
- char *arg_name = params->args_name[i];
- char *arg_value = params->args_value[i];
-
- /* dma_dst_offset */
- if (strcmp(arg_name, "dma_dst_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_dst_offset_present == 0, params->name,
- arg_name);
- dma_dst_offset_present = 1;
-
- status = parser_read_uint32(&p->dma_dst_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- p->dma_enabled = 1;
-
- continue;
- }
-
- /* dma_src_offset */
- if (strcmp(arg_name, "dma_src_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_src_offset_present == 0, params->name,
- arg_name);
- dma_src_offset_present = 1;
-
- status = parser_read_uint32(&p->dma_src_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- p->dma_enabled = 1;
-
- continue;
- }
-
- /* dma_size */
- if (strcmp(arg_name, "dma_size") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_size_present == 0, params->name,
- arg_name);
- dma_size_present = 1;
-
- status = parser_read_uint32(&p->dma_size,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->dma_size != 0) &&
- ((p->dma_size % 8) == 0)),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
- (p->dma_size <=
- PIPELINE_PASSTHROUGH_DMA_SIZE_MAX)),
- params->name, arg_name, arg_value);
-
- p->dma_enabled = 1;
-
- continue;
- }
-
- /* dma_src_mask */
- if (strcmp(arg_name, "dma_src_mask") == 0) {
- int mask_str_len = strlen(arg_value);
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_src_mask_present == 0,
- params->name, arg_name);
- dma_src_mask_present = 1;
-
- PIPELINE_ARG_CHECK((mask_str_len <=
- (PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2)),
- "Parse error in section \"%s\": entry "
- "\"%s\" too long", params->name,
- arg_name);
-
- snprintf(dma_mask_str, mask_str_len + 1,
- "%s", arg_value);
-
- p->dma_enabled = 1;
-
- continue;
- }
-
- /* dma_hash_offset */
- if (strcmp(arg_name, "dma_hash_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_hash_offset_present == 0,
- params->name, arg_name);
- dma_hash_offset_present = 1;
-
- status = parser_read_uint32(&p->dma_hash_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- p->dma_hash_enabled = 1;
-
- continue;
- }
-
- /* load_balance mode */
- if (strcmp(arg_name, "lb") == 0) {
- PIPELINE_PARSE_ERR_DUPLICATE(
- dma_hash_lb_present == 0,
- params->name, arg_name);
- dma_hash_lb_present = 1;
-
- if (strcmp(arg_value, "hash") &&
- strcmp(arg_value, "HASH"))
-
- PIPELINE_PARSE_ERR_INV_VAL(0,
- params->name,
- arg_name,
- arg_value);
-
- p->dma_hash_lb_enabled = 1;
-
- continue;
- }
-
- /* swap */
- if (strcmp(arg_name, "swap") == 0) {
- uint32_t a, b, n_args;
- int len;
-
- n_args = sscanf(arg_value, "%" SCNu32 " %" SCNu32 "%n",
- &a, &b, &len);
- PIPELINE_PARSE_ERR_INV_VAL(((n_args == 2) &&
- ((size_t) len == strlen(arg_value))),
- params->name, arg_name, arg_value);
-
- p->swap_field0_offset[p->swap_n_fields] = a;
- p->swap_field1_offset[p->swap_n_fields] = b;
- p->swap_n_fields++;
- p->swap_enabled = 1;
-
- continue;
- }
-
- /* any other */
- PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
- }
-
- /* Check correlations between arguments */
- PIPELINE_ARG_CHECK((p->dma_enabled + p->swap_enabled < 2),
- "Parse error in section \"%s\": DMA and SWAP actions are both enabled",
- params->name);
- PIPELINE_ARG_CHECK((dma_dst_offset_present == p->dma_enabled),
- "Parse error in section \"%s\": missing entry "
- "\"dma_dst_offset\"", params->name);
- PIPELINE_ARG_CHECK((dma_src_offset_present == p->dma_enabled),
- "Parse error in section \"%s\": missing entry "
- "\"dma_src_offset\"", params->name);
- PIPELINE_ARG_CHECK((dma_size_present == p->dma_enabled),
- "Parse error in section \"%s\": missing entry "
- "\"dma_size\"", params->name);
- PIPELINE_ARG_CHECK((p->dma_hash_enabled <= p->dma_enabled),
- "Parse error in section \"%s\": missing all DMA entries",
- params->name);
- PIPELINE_ARG_CHECK((p->dma_hash_lb_enabled <= p->dma_hash_enabled),
- "Parse error in section \"%s\": missing all DMA hash entries ",
- params->name);
-
- if (dma_src_mask_present) {
- uint32_t dma_size = p->dma_size;
- int status;
-
- PIPELINE_ARG_CHECK((strlen(dma_mask_str) ==
- (dma_size * 2)), "Parse error in section "
- "\"%s\": dma_src_mask should have exactly %u hex "
- "digits", params->name, (dma_size * 2));
-
- status = parse_hex_string(dma_mask_str, p->dma_src_mask,
- &p->dma_size);
-
- PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
- (dma_size == p->dma_size)), params->name,
- "dma_src_mask", dma_mask_str);
- }
-
- if (p->dma_hash_lb_enabled)
- PIPELINE_ARG_CHECK((params->n_ports_out > 1),
- "Parse error in section \"%s\": entry \"lb\" not "
- "allowed for single output port pipeline",
- params->name);
- else
- PIPELINE_ARG_CHECK(((params->n_ports_in >= params->n_ports_out)
- && ((params->n_ports_in % params->n_ports_out) == 0)),
- "Parse error in section \"%s\": n_ports_in needs to be "
- "a multiple of n_ports_out (lb mode disabled)",
- params->name);
-
- return 0;
-}
-
-static rte_table_hash_op_hash
-get_hash_function(struct pipeline_passthrough *p)
-{
- switch (p->params.dma_size) {
-
- case 8: return hash_default_key8;
- case 16: return hash_default_key16;
- case 24: return hash_default_key24;
- case 32: return hash_default_key32;
- case 40: return hash_default_key40;
- case 48: return hash_default_key48;
- case 56: return hash_default_key56;
- case 64: return hash_default_key64;
- default: return NULL;
- }
-}
-
-static int
-pipeline_passthrough_swap_convert(struct pipeline_passthrough *p)
-{
- uint32_t i;
-
- p->swap_n_fields = 0;
-
- for (i = 0; i < p->params.swap_n_fields; i++) {
- uint32_t offset0 = p->params.swap_field0_offset[i];
- uint32_t offset1 = p->params.swap_field1_offset[i];
- uint32_t size = offset1 - offset0;
- uint32_t j;
-
- /* Check */
- if ((offset0 >= offset1) ||
- (size > PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX) ||
- (p->swap_n_fields >= SWAP_DIM))
- return -1;
-
- for (j = 0; j < (size / sizeof(uint64_t)); j++) {
- p->swap_field0_offset[p->swap_n_fields] = offset0;
- p->swap_field1_offset[p->swap_n_fields] = offset1;
- p->swap_field_mask[p->swap_n_fields] = UINT64_MAX;
- p->swap_n_fields++;
- offset0 += sizeof(uint64_t);
- offset1 += sizeof(uint64_t);
- }
- if (size % sizeof(uint64_t)) {
- uint32_t n_bits = (size % sizeof(uint64_t)) * 8;
-
- p->swap_field0_offset[p->swap_n_fields] = offset0;
- p->swap_field1_offset[p->swap_n_fields] = offset1;
- p->swap_field_mask[p->swap_n_fields] =
- RTE_LEN2MASK(n_bits, uint64_t);
- p->swap_n_fields++;
- }
- }
-
- return 0;
-}
-
-static void*
-pipeline_passthrough_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct pipeline *p;
- struct pipeline_passthrough *p_pt;
- uint32_t size, i;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- p_pt = (struct pipeline_passthrough *) p;
- if (p == NULL)
- return NULL;
-
- strcpy(p->name, params->name);
- p->log_level = params->log_level;
-
- PLOG(p, HIGH, "Pass-through");
-
- /* Parse arguments */
- if (pipeline_passthrough_parse_args(&p_pt->params, params))
- return NULL;
- if (pipeline_passthrough_swap_convert(p_pt))
- return NULL;
- p_pt->f_hash = get_hash_function(p_pt);
-
- /* Pipeline */
- {
- struct rte_pipeline_params pipeline_params = {
- .name = "PASS-THROUGH",
- .socket_id = params->socket_id,
- .offset_port_id = 0,
- };
-
- p->p = rte_pipeline_create(&pipeline_params);
- if (p->p == NULL) {
- rte_free(p);
- return NULL;
- }
- }
-
- p->n_ports_in = params->n_ports_in;
- p->n_ports_out = params->n_ports_out;
- p->n_tables = p->n_ports_in;
-
- /*Input ports*/
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_port_in_params port_params = {
- .ops = pipeline_port_in_params_get_ops(
- &params->port_in[i]),
- .arg_create = pipeline_port_in_params_convert(
- &params->port_in[i]),
- .f_action = get_port_in_ah(p_pt),
- .arg_ah = p_pt,
- .burst_size = params->port_in[i].burst_size,
- };
-
- int status = rte_pipeline_port_in_create(p->p,
- &port_params,
- &p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Output ports */
- for (i = 0; i < p->n_ports_out; i++) {
- struct rte_pipeline_port_out_params port_params = {
- .ops = pipeline_port_out_params_get_ops(
- &params->port_out[i]),
- .arg_create = pipeline_port_out_params_convert(
- &params->port_out[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- };
-
- int status = rte_pipeline_port_out_create(p->p,
- &port_params,
- &p->port_out_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Tables */
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_stub_ops,
- .arg_create = NULL,
- .f_action_hit = NULL,
- .f_action_miss = NULL,
- .arg_ah = NULL,
- .action_data_size = 0,
- };
-
- int status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Connecting input ports to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_connect_to_table(p->p,
- p->port_in_id[i],
- p->table_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Add entries to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- uint32_t port_out_id = (p_pt->params.dma_hash_lb_enabled == 0) ?
- (i / (p->n_ports_in / p->n_ports_out)) :
- 0;
-
- struct rte_pipeline_table_entry default_entry = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[port_out_id]},
- };
-
- struct rte_pipeline_table_entry *default_entry_ptr;
-
- int status = rte_pipeline_table_default_entry_add(p->p,
- p->table_id[i],
- &default_entry,
- &default_entry_ptr);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Enable input ports */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_enable(p->p,
- p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Check pipeline consistency */
- if (rte_pipeline_check(p->p) < 0) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- /* Message queues */
- p->n_msgq = params->n_msgq;
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_in[i] = params->msgq_in[i];
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_out[i] = params->msgq_out[i];
-
- /* Message handlers */
- memcpy(p->handlers, handlers, sizeof(p->handlers));
-
- return p;
-}
-
-static int
-pipeline_passthrough_free(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_pipeline_free(p->p);
- rte_free(p);
- return 0;
-}
-
-static int
-pipeline_passthrough_timer(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- pipeline_msg_req_handle(p);
- rte_pipeline_flush(p->p);
-
- return 0;
-}
-
-struct pipeline_be_ops pipeline_passthrough_be_ops = {
- .f_init = pipeline_passthrough_init,
- .f_free = pipeline_passthrough_free,
- .f_run = NULL,
- .f_timer = pipeline_passthrough_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
deleted file mode 100644
index 94d1d1cf..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
-#define __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
-
-#include "pipeline_common_be.h"
-
-#define PIPELINE_PASSTHROUGH_DMA_SIZE_MAX 64
-
-#ifndef PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX
-#define PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX 8
-#endif
-
-#ifndef PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX
-#define PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX 16
-#endif
-
-struct pipeline_passthrough_params {
- uint32_t dma_enabled;
- uint32_t dma_dst_offset;
- uint32_t dma_src_offset;
- uint8_t dma_src_mask[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX];
- uint32_t dma_size;
-
- uint32_t dma_hash_enabled;
- uint32_t dma_hash_offset;
-
- uint32_t dma_hash_lb_enabled;
-
- uint32_t swap_enabled;
- uint32_t swap_field0_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX];
- uint32_t swap_field1_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX];
- uint32_t swap_n_fields;
-};
-
-int
-pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
- struct pipeline_params *params);
-
-extern struct pipeline_be_ops pipeline_passthrough_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.c b/examples/ip_pipeline/pipeline/pipeline_routing.c
deleted file mode 100644
index 0562c63a..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_routing.c
+++ /dev/null
@@ -1,1613 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-
-#include "app.h"
-#include "pipeline_common_fe.h"
-#include "pipeline_routing.h"
-#include "parser.h"
-
-struct app_pipeline_routing_route {
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data data;
- void *entry_ptr;
-
- TAILQ_ENTRY(app_pipeline_routing_route) node;
-};
-
-struct app_pipeline_routing_arp_entry {
- struct pipeline_routing_arp_key key;
- struct ether_addr macaddr;
- void *entry_ptr;
-
- TAILQ_ENTRY(app_pipeline_routing_arp_entry) node;
-};
-
-struct pipeline_routing {
- /* Parameters */
- struct app_params *app;
- uint32_t pipeline_id;
- uint32_t n_ports_in;
- uint32_t n_ports_out;
- struct pipeline_routing_params rp;
-
- /* Links */
- uint32_t link_id[PIPELINE_MAX_PORT_OUT];
-
- /* Routes */
- TAILQ_HEAD(, app_pipeline_routing_route) routes;
- uint32_t n_routes;
-
- uint32_t default_route_present;
- uint32_t default_route_port_id;
- void *default_route_entry_ptr;
-
- /* ARP entries */
- TAILQ_HEAD(, app_pipeline_routing_arp_entry) arp_entries;
- uint32_t n_arp_entries;
-
- uint32_t default_arp_entry_present;
- uint32_t default_arp_entry_port_id;
- void *default_arp_entry_ptr;
-};
-
-static int
-app_pipeline_routing_find_link(struct pipeline_routing *p,
- uint32_t link_id,
- uint32_t *port_id)
-{
- uint32_t i;
-
- for (i = 0; i < p->n_ports_out; i++)
- if (p->link_id[i] == link_id) {
- *port_id = i;
- return 0;
- }
-
- return -1;
-}
-
-static void
-app_pipeline_routing_link_op(__rte_unused struct app_params *app,
- uint32_t link_id,
- uint32_t up,
- void *arg)
-{
- struct pipeline_routing_route_key key0, key1;
- struct pipeline_routing *p = arg;
- struct app_link_params *lp;
- uint32_t port_id, netmask;
- int status;
-
- if (app == NULL)
- return;
-
- APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
- if (lp == NULL)
- return;
-
- status = app_pipeline_routing_find_link(p,
- link_id,
- &port_id);
- if (status)
- return;
-
- netmask = (~0U) << (32 - lp->depth);
-
- /* Local network (directly attached network) */
- key0.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key0.key.ipv4.ip = lp->ip & netmask;
- key0.key.ipv4.depth = lp->depth;
-
- /* Local termination */
- key1.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key1.key.ipv4.ip = lp->ip;
- key1.key.ipv4.depth = 32;
-
- if (up) {
- struct pipeline_routing_route_data data0, data1;
-
- /* Local network (directly attached network) */
- memset(&data0, 0, sizeof(data0));
- data0.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
- PIPELINE_ROUTING_ROUTE_ARP;
- if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
- data0.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
- if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
- data0.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
- data0.l2.mpls.n_labels = 1;
- }
- data0.port_id = port_id;
-
- if (p->rp.n_arp_entries)
- app_pipeline_routing_add_route(app,
- p->pipeline_id,
- &key0,
- &data0);
-
- /* Local termination */
- memset(&data1, 0, sizeof(data1));
- data1.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
- PIPELINE_ROUTING_ROUTE_ARP;
- if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
- data1.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
- if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
- data1.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
- data1.l2.mpls.n_labels = 1;
- }
- data1.port_id = p->rp.port_local_dest;
-
- app_pipeline_routing_add_route(app,
- p->pipeline_id,
- &key1,
- &data1);
- } else {
- /* Local network (directly attached network) */
- if (p->rp.n_arp_entries)
- app_pipeline_routing_delete_route(app,
- p->pipeline_id,
- &key0);
-
- /* Local termination */
- app_pipeline_routing_delete_route(app,
- p->pipeline_id,
- &key1);
- }
-}
-
-static int
-app_pipeline_routing_set_link_op(
- struct app_params *app,
- struct pipeline_routing *p)
-{
- uint32_t port_id;
-
- for (port_id = 0; port_id < p->n_ports_out; port_id++) {
- struct app_link_params *link;
- uint32_t link_id;
- int status;
-
- link = app_pipeline_track_pktq_out_to_link(app,
- p->pipeline_id,
- port_id);
- if (link == NULL)
- continue;
-
- link_id = link - app->link_params;
- p->link_id[port_id] = link_id;
-
- status = app_link_set_op(app,
- link_id,
- p->pipeline_id,
- app_pipeline_routing_link_op,
- (void *) p);
- if (status)
- return status;
- }
-
- return 0;
-}
-
-static void *
-app_pipeline_routing_init(struct pipeline_params *params,
- void *arg)
-{
- struct app_params *app = (struct app_params *) arg;
- struct pipeline_routing *p;
- uint32_t pipeline_id, size;
- int status;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- APP_PARAM_GET_ID(params, "PIPELINE", pipeline_id);
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- if (p == NULL)
- return NULL;
-
- /* Initialization */
- p->app = app;
- p->pipeline_id = pipeline_id;
- p->n_ports_in = params->n_ports_in;
- p->n_ports_out = params->n_ports_out;
-
- status = pipeline_routing_parse_args(&p->rp, params);
- if (status) {
- rte_free(p);
- return NULL;
- }
- TAILQ_INIT(&p->routes);
- p->n_routes = 0;
-
- TAILQ_INIT(&p->arp_entries);
- p->n_arp_entries = 0;
-
- app_pipeline_routing_set_link_op(app, p);
-
- return p;
-}
-
-static int
-app_pipeline_routing_post_init(void *pipeline)
-{
- struct pipeline_routing *p = pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- return app_pipeline_routing_set_macaddr(p->app, p->pipeline_id);
-}
-
-static int
-app_pipeline_routing_free(void *pipeline)
-{
- struct pipeline_routing *p = pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- while (!TAILQ_EMPTY(&p->routes)) {
- struct app_pipeline_routing_route *route;
-
- route = TAILQ_FIRST(&p->routes);
- TAILQ_REMOVE(&p->routes, route, node);
- rte_free(route);
- }
-
- while (!TAILQ_EMPTY(&p->arp_entries)) {
- struct app_pipeline_routing_arp_entry *arp_entry;
-
- arp_entry = TAILQ_FIRST(&p->arp_entries);
- TAILQ_REMOVE(&p->arp_entries, arp_entry, node);
- rte_free(arp_entry);
- }
-
- rte_free(p);
- return 0;
-}
-
-static struct app_pipeline_routing_route *
-app_pipeline_routing_find_route(struct pipeline_routing *p,
- const struct pipeline_routing_route_key *key)
-{
- struct app_pipeline_routing_route *it, *found;
-
- found = NULL;
- TAILQ_FOREACH(it, &p->routes, node) {
- if ((key->type == it->key.type) &&
- (key->key.ipv4.ip == it->key.key.ipv4.ip) &&
- (key->key.ipv4.depth == it->key.key.ipv4.depth)) {
- found = it;
- break;
- }
- }
-
- return found;
-}
-
-static struct app_pipeline_routing_arp_entry *
-app_pipeline_routing_find_arp_entry(struct pipeline_routing *p,
- const struct pipeline_routing_arp_key *key)
-{
- struct app_pipeline_routing_arp_entry *it, *found;
-
- found = NULL;
- TAILQ_FOREACH(it, &p->arp_entries, node) {
- if ((key->type == it->key.type) &&
- (key->key.ipv4.port_id == it->key.key.ipv4.port_id) &&
- (key->key.ipv4.ip == it->key.key.ipv4.ip)) {
- found = it;
- break;
- }
- }
-
- return found;
-}
-
-static void
-print_route(const struct app_pipeline_routing_route *route)
-{
- if (route->key.type == PIPELINE_ROUTING_ROUTE_IPV4) {
- const struct pipeline_routing_route_key_ipv4 *key =
- &route->key.key.ipv4;
-
- printf("IP Prefix = %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32 "/%" PRIu32
- " => (Port = %" PRIu32,
-
- (key->ip >> 24) & 0xFF,
- (key->ip >> 16) & 0xFF,
- (key->ip >> 8) & 0xFF,
- key->ip & 0xFF,
-
- key->depth,
- route->data.port_id);
-
- if (route->data.flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- printf(", Local");
- else if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
- printf(
- ", Next Hop IP = %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32,
-
- (route->data.ethernet.ip >> 24) & 0xFF,
- (route->data.ethernet.ip >> 16) & 0xFF,
- (route->data.ethernet.ip >> 8) & 0xFF,
- route->data.ethernet.ip & 0xFF);
- else
- printf(
- ", Next Hop HWaddress = %02" PRIx32
- ":%02" PRIx32 ":%02" PRIx32
- ":%02" PRIx32 ":%02" PRIx32
- ":%02" PRIx32,
-
- route->data.ethernet.macaddr.addr_bytes[0],
- route->data.ethernet.macaddr.addr_bytes[1],
- route->data.ethernet.macaddr.addr_bytes[2],
- route->data.ethernet.macaddr.addr_bytes[3],
- route->data.ethernet.macaddr.addr_bytes[4],
- route->data.ethernet.macaddr.addr_bytes[5]);
-
- if (route->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)
- printf(", QinQ SVLAN = %" PRIu32 " CVLAN = %" PRIu32,
- route->data.l2.qinq.svlan,
- route->data.l2.qinq.cvlan);
-
- if (route->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) {
- uint32_t i;
-
- printf(", MPLS labels");
- for (i = 0; i < route->data.l2.mpls.n_labels; i++)
- printf(" %" PRIu32,
- route->data.l2.mpls.labels[i]);
- }
-
- printf(")\n");
- }
-}
-
-static void
-print_arp_entry(const struct app_pipeline_routing_arp_entry *entry)
-{
- printf("(Port = %" PRIu32 ", IP = %" PRIu32 ".%" PRIu32
- ".%" PRIu32 ".%" PRIu32
- ") => HWaddress = %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
- ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
-
- entry->key.key.ipv4.port_id,
- (entry->key.key.ipv4.ip >> 24) & 0xFF,
- (entry->key.key.ipv4.ip >> 16) & 0xFF,
- (entry->key.key.ipv4.ip >> 8) & 0xFF,
- entry->key.key.ipv4.ip & 0xFF,
-
- entry->macaddr.addr_bytes[0],
- entry->macaddr.addr_bytes[1],
- entry->macaddr.addr_bytes[2],
- entry->macaddr.addr_bytes[3],
- entry->macaddr.addr_bytes[4],
- entry->macaddr.addr_bytes[5]);
-}
-
-static int
-app_pipeline_routing_route_ls(struct app_params *app, uint32_t pipeline_id)
-{
- struct pipeline_routing *p;
- struct app_pipeline_routing_route *it;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -EINVAL;
-
- TAILQ_FOREACH(it, &p->routes, node)
- print_route(it);
-
- if (p->default_route_present)
- printf("Default route: port %" PRIu32 " (entry ptr = %p)\n",
- p->default_route_port_id,
- p->default_route_entry_ptr);
- else
- printf("Default: DROP\n");
-
- return 0;
-}
-
-int
-app_pipeline_routing_add_route(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_route_key *key,
- struct pipeline_routing_route_data *data)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_route_add_msg_req *req;
- struct pipeline_routing_route_add_msg_rsp *rsp;
-
- struct app_pipeline_routing_route *entry;
-
- int new_entry;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL) ||
- (data == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- switch (key->type) {
- case PIPELINE_ROUTING_ROUTE_IPV4:
- {
- uint32_t depth = key->key.ipv4.depth;
- uint32_t netmask;
-
- /* key */
- if ((depth == 0) || (depth > 32))
- return -1;
-
- netmask = (~0U) << (32 - depth);
- key->key.ipv4.ip &= netmask;
-
- /* data */
- if (data->port_id >= p->n_ports_out)
- return -1;
-
- /* Valid range of VLAN tags 12 bits */
- if (data->flags & PIPELINE_ROUTING_ROUTE_QINQ)
- if ((data->l2.qinq.svlan & 0xF000) ||
- (data->l2.qinq.cvlan & 0xF000))
- return -1;
-
- /* Max number of MPLS labels supported */
- if (data->flags & PIPELINE_ROUTING_ROUTE_MPLS) {
- uint32_t i;
-
- if (data->l2.mpls.n_labels >
- PIPELINE_ROUTING_MPLS_LABELS_MAX)
- return -1;
-
- /* Max MPLS label value 20 bits */
- for (i = 0; i < data->l2.mpls.n_labels; i++)
- if (data->l2.mpls.labels[i] & 0xFFF00000)
- return -1;
- }
- }
- break;
-
- default:
- return -1;
- }
-
- /* Find existing rule or allocate new rule */
- entry = app_pipeline_routing_find_route(p, key);
- new_entry = (entry == NULL);
- if (entry == NULL) {
- entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
-
- if (entry == NULL)
- return -1;
- }
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL) {
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD;
- memcpy(&req->key, key, sizeof(*key));
- memcpy(&req->data, data, sizeof(*data));
-
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- /* Read response and write entry */
- if (rsp->status ||
- (rsp->entry_ptr == NULL) ||
- ((new_entry == 0) && (rsp->key_found == 0)) ||
- ((new_entry == 1) && (rsp->key_found == 1))) {
- app_msg_free(app, rsp);
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- memcpy(&entry->key, key, sizeof(*key));
- memcpy(&entry->data, data, sizeof(*data));
- entry->entry_ptr = rsp->entry_ptr;
-
- /* Commit entry */
- if (new_entry) {
- TAILQ_INSERT_TAIL(&p->routes, entry, node);
- p->n_routes++;
- }
-
- /* Message buffer free */
- app_msg_free(app, rsp);
- return 0;
-}
-
-int
-app_pipeline_routing_delete_route(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_route_key *key)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_route_delete_msg_req *req;
- struct pipeline_routing_route_delete_msg_rsp *rsp;
-
- struct app_pipeline_routing_route *entry;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- switch (key->type) {
- case PIPELINE_ROUTING_ROUTE_IPV4:
- {
- uint32_t depth = key->key.ipv4.depth;
- uint32_t netmask;
-
- /* key */
- if ((depth == 0) || (depth > 32))
- return -1;
-
- netmask = (~0U) << (32 - depth);
- key->key.ipv4.ip &= netmask;
- }
- break;
-
- default:
- return -1;
- }
-
- /* Find rule */
- entry = app_pipeline_routing_find_route(p, key);
- if (entry == NULL)
- return 0;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL;
- memcpy(&req->key, key, sizeof(*key));
-
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status || !rsp->key_found) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Remove route */
- TAILQ_REMOVE(&p->routes, entry, node);
- p->n_routes--;
- rte_free(entry);
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_routing_add_default_route(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_route_add_default_msg_req *req;
- struct pipeline_routing_route_add_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write route */
- if (rsp->status || (rsp->entry_ptr == NULL)) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- p->default_route_port_id = port_id;
- p->default_route_entry_ptr = rsp->entry_ptr;
-
- /* Commit route */
- p->default_route_present = 1;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_routing_delete_default_route(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_arp_delete_default_msg_req *req;
- struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write route */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Commit route */
- p->default_route_present = 0;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-static int
-app_pipeline_routing_arp_ls(struct app_params *app, uint32_t pipeline_id)
-{
- struct pipeline_routing *p;
- struct app_pipeline_routing_arp_entry *it;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -EINVAL;
-
- TAILQ_FOREACH(it, &p->arp_entries, node)
- print_arp_entry(it);
-
- if (p->default_arp_entry_present)
- printf("Default entry: port %" PRIu32 " (entry ptr = %p)\n",
- p->default_arp_entry_port_id,
- p->default_arp_entry_ptr);
- else
- printf("Default: DROP\n");
-
- return 0;
-}
-
-int
-app_pipeline_routing_add_arp_entry(struct app_params *app, uint32_t pipeline_id,
- struct pipeline_routing_arp_key *key,
- struct ether_addr *macaddr)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_arp_add_msg_req *req;
- struct pipeline_routing_arp_add_msg_rsp *rsp;
-
- struct app_pipeline_routing_arp_entry *entry;
-
- int new_entry;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL) ||
- (macaddr == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- switch (key->type) {
- case PIPELINE_ROUTING_ARP_IPV4:
- {
- uint32_t port_id = key->key.ipv4.port_id;
-
- /* key */
- if (port_id >= p->n_ports_out)
- return -1;
- }
- break;
-
- default:
- return -1;
- }
-
- /* Find existing entry or allocate new */
- entry = app_pipeline_routing_find_arp_entry(p, key);
- new_entry = (entry == NULL);
- if (entry == NULL) {
- entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
-
- if (entry == NULL)
- return -1;
- }
-
- /* Message buffer allocation */
- req = app_msg_alloc(app);
- if (req == NULL) {
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD;
- memcpy(&req->key, key, sizeof(*key));
- ether_addr_copy(macaddr, &req->macaddr);
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL) {
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- /* Read response and write entry */
- if (rsp->status ||
- (rsp->entry_ptr == NULL) ||
- ((new_entry == 0) && (rsp->key_found == 0)) ||
- ((new_entry == 1) && (rsp->key_found == 1))) {
- app_msg_free(app, rsp);
- if (new_entry)
- rte_free(entry);
- return -1;
- }
-
- memcpy(&entry->key, key, sizeof(*key));
- ether_addr_copy(macaddr, &entry->macaddr);
- entry->entry_ptr = rsp->entry_ptr;
-
- /* Commit entry */
- if (new_entry) {
- TAILQ_INSERT_TAIL(&p->arp_entries, entry, node);
- p->n_arp_entries++;
- }
-
- /* Message buffer free */
- app_msg_free(app, rsp);
- return 0;
-}
-
-int
-app_pipeline_routing_delete_arp_entry(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_arp_key *key)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_arp_delete_msg_req *req;
- struct pipeline_routing_arp_delete_msg_rsp *rsp;
-
- struct app_pipeline_routing_arp_entry *entry;
-
- /* Check input arguments */
- if ((app == NULL) ||
- (key == NULL))
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -EINVAL;
-
- switch (key->type) {
- case PIPELINE_ROUTING_ARP_IPV4:
- {
- uint32_t port_id = key->key.ipv4.port_id;
-
- /* key */
- if (port_id >= p->n_ports_out)
- return -1;
- }
- break;
-
- default:
- return -1;
- }
-
- /* Find rule */
- entry = app_pipeline_routing_find_arp_entry(p, key);
- if (entry == NULL)
- return 0;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL;
- memcpy(&req->key, key, sizeof(*key));
-
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response */
- if (rsp->status || !rsp->key_found) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- /* Remove entry */
- TAILQ_REMOVE(&p->arp_entries, entry, node);
- p->n_arp_entries--;
- rte_free(entry);
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_routing_add_default_arp_entry(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_arp_add_default_msg_req *req;
- struct pipeline_routing_arp_add_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -1;
-
- if (port_id >= p->n_ports_out)
- return -1;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT;
- req->port_id = port_id;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- /* Read response and write entry */
- if (rsp->status || rsp->entry_ptr == NULL) {
- app_msg_free(app, rsp);
- return -1;
- }
-
- p->default_arp_entry_port_id = port_id;
- p->default_arp_entry_ptr = rsp->entry_ptr;
-
- /* Commit entry */
- p->default_arp_entry_present = 1;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct pipeline_routing *p;
-
- struct pipeline_routing_arp_delete_default_msg_req *req;
- struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
-
- /* Check input arguments */
- if (app == NULL)
- return -1;
-
- p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
- if (p == NULL)
- return -EINVAL;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -ENOMEM;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT;
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -ETIMEDOUT;
-
- /* Read response and write entry */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return rsp->status;
- }
-
- /* Commit entry */
- p->default_arp_entry_present = 0;
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-int
-app_pipeline_routing_set_macaddr(struct app_params *app,
- uint32_t pipeline_id)
-{
- struct app_pipeline_params *p;
- struct pipeline_routing_set_macaddr_msg_req *req;
- struct pipeline_routing_set_macaddr_msg_rsp *rsp;
- uint32_t port_id;
-
- /* Check input arguments */
- if (app == NULL)
- return -EINVAL;
-
- APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
- if (p == NULL)
- return -EINVAL;
-
- /* Allocate and write request */
- req = app_msg_alloc(app);
- if (req == NULL)
- return -ENOMEM;
-
- req->type = PIPELINE_MSG_REQ_CUSTOM;
- req->subtype = PIPELINE_ROUTING_MSG_REQ_SET_MACADDR;
-
- memset(req->macaddr, 0, sizeof(req->macaddr));
- for (port_id = 0; port_id < p->n_pktq_out; port_id++) {
- struct app_link_params *link;
-
- link = app_pipeline_track_pktq_out_to_link(app,
- pipeline_id,
- port_id);
- if (link)
- req->macaddr[port_id] = link->mac_addr;
- }
-
- /* Send request and wait for response */
- rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -ETIMEDOUT;
-
- /* Read response and write entry */
- if (rsp->status) {
- app_msg_free(app, rsp);
- return rsp->status;
- }
-
- /* Free response */
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-/*
- * route
- *
- * route add (ARP = ON/OFF, MPLS = ON/OFF, QINQ = ON/OFF):
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr>
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr>
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> qinq <svlan> <cvlan>
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> qinq <svlan> <cvlan>
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> mpls <mpls labels>
- * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> mpls <mpls labels>
- *
- * route add default:
- * p <pipelineid> route add default <portid>
- *
- * route del:
- * p <pipelineid> route del <ipaddr> <depth>
- *
- * route del default:
- * p <pipelineid> route del default
- *
- * route ls:
- * p <pipelineid> route ls
- */
-
-struct cmd_route_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t route_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_route_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_route_result *params = parsed_result;
- struct app_params *app = data;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status != 0) {
- printf(CMD_MSG_TOO_MANY_ARGS, "route");
- return;
- }
-
- /* route add */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- strcmp(tokens[1], "default")) {
- struct pipeline_routing_route_key key;
- struct pipeline_routing_route_data route_data;
- struct in_addr ipv4, nh_ipv4;
- struct ether_addr mac_addr;
- uint32_t depth, port_id, svlan, cvlan, i;
- uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels = RTE_DIM(mpls_labels);
-
- memset(&key, 0, sizeof(key));
- memset(&route_data, 0, sizeof(route_data));
-
- if (n_tokens < 7) {
- printf(CMD_MSG_NOT_ENOUGH_ARGS, "route add");
- return;
- }
-
- if (parse_ipv4_addr(tokens[1], &ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "ipaddr");
- return;
- }
-
- if (parser_read_uint32(&depth, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "depth");
- return;
- }
-
- if (strcmp(tokens[3], "port")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "port");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[4])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (strcmp(tokens[5], "ether")) {
- printf(CMD_MSG_ARG_NOT_FOUND, "ether");
- return;
- }
-
- if (parse_mac_addr(tokens[6], &mac_addr)) {
- if (parse_ipv4_addr(tokens[6], &nh_ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "nhmacaddr or nhipaddr");
- return;
- }
-
- route_data.flags |= PIPELINE_ROUTING_ROUTE_ARP;
- }
-
- if (n_tokens > 7) {
- if (strcmp(tokens[7], "mpls") == 0) {
- if (n_tokens != 9) {
- printf(CMD_MSG_MISMATCH_ARGS, "route add mpls");
- return;
- }
-
- if (parse_mpls_labels(tokens[8], mpls_labels, &n_labels)) {
- printf(CMD_MSG_INVALID_ARG, "mpls labels");
- return;
- }
-
- route_data.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
- } else if (strcmp(tokens[7], "qinq") == 0) {
- if (n_tokens != 10) {
- printf(CMD_MSG_MISMATCH_ARGS, "route add qinq");
- return;
- }
-
- if (parser_read_uint32(&svlan, tokens[8])) {
- printf(CMD_MSG_INVALID_ARG, "svlan");
- return;
- }
- if (parser_read_uint32(&cvlan, tokens[9])) {
- printf(CMD_MSG_INVALID_ARG, "cvlan");
- return;
- }
-
- route_data.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
- } else {
- printf(CMD_MSG_ARG_NOT_FOUND, "mpls or qinq");
- return;
- }
- }
-
- switch (route_data.flags) {
- case 0:
- route_data.port_id = port_id;
- route_data.ethernet.macaddr = mac_addr;
- break;
-
- case PIPELINE_ROUTING_ROUTE_ARP:
- route_data.port_id = port_id;
- route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
- break;
-
- case PIPELINE_ROUTING_ROUTE_MPLS:
- route_data.port_id = port_id;
- route_data.ethernet.macaddr = mac_addr;
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
- break;
-
- case PIPELINE_ROUTING_ROUTE_MPLS | PIPELINE_ROUTING_ROUTE_ARP:
- route_data.port_id = port_id;
- route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
- for (i = 0; i < n_labels; i++)
- route_data.l2.mpls.labels[i] = mpls_labels[i];
- route_data.l2.mpls.n_labels = n_labels;
- break;
-
- case PIPELINE_ROUTING_ROUTE_QINQ:
- route_data.port_id = port_id;
- route_data.ethernet.macaddr = mac_addr;
- route_data.l2.qinq.svlan = svlan;
- route_data.l2.qinq.cvlan = cvlan;
- break;
-
- case PIPELINE_ROUTING_ROUTE_QINQ | PIPELINE_ROUTING_ROUTE_ARP:
- default:
- route_data.port_id = port_id;
- route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
- route_data.l2.qinq.svlan = svlan;
- route_data.l2.qinq.cvlan = cvlan;
- break;
- }
-
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
- key.key.ipv4.depth = depth;
-
- status = app_pipeline_routing_add_route(app,
- params->p,
- &key,
- &route_data);
- if (status != 0)
- printf(CMD_MSG_FAIL, "route add");
-
- return;
- } /* route add */
-
- /* route add default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- uint32_t port_id;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "route add default");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- status = app_pipeline_routing_add_default_route(app,
- params->p,
- port_id);
- if (status != 0)
- printf(CMD_MSG_FAIL, "route add default");
-
- return;
- } /* route add default */
-
- /* route del*/
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- strcmp(tokens[1], "default")) {
- struct pipeline_routing_route_key key;
- struct in_addr ipv4;
- uint32_t depth;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "route del");
- return;
- }
-
- if (parse_ipv4_addr(tokens[1], &ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "ipaddr");
- return;
- }
-
- if (parser_read_uint32(&depth, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "depth");
- return;
- }
-
- key.type = PIPELINE_ROUTING_ROUTE_IPV4;
- key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
- key.key.ipv4.depth = depth;
-
- status = app_pipeline_routing_delete_route(app, params->p, &key);
- if (status != 0)
- printf(CMD_MSG_FAIL, "route del");
-
- return;
- } /* route del */
-
- /* route del default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "route del default");
- return;
- }
-
- status = app_pipeline_routing_delete_default_route(app,
- params->p);
- if (status != 0)
- printf(CMD_MSG_FAIL, "route del default");
-
- return;
- } /* route del default */
-
- /* route ls */
- if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
- if (n_tokens != 1) {
- printf(CMD_MSG_MISMATCH_ARGS, "route ls");
- return;
- }
-
- status = app_pipeline_routing_route_ls(app, params->p);
- if (status != 0)
- printf(CMD_MSG_FAIL, "route ls");
-
- return;
- } /* route ls */
-
- printf(CMD_MSG_MISMATCH_ARGS, "route");
-}
-
-static cmdline_parse_token_string_t cmd_route_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_route_p =
- TOKEN_NUM_INITIALIZER(struct cmd_route_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_route_route_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_result, route_string, "route");
-
-static cmdline_parse_token_string_t cmd_route_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_route_result, multi_string,
- TOKEN_STRING_MULTI);
-
-static cmdline_parse_inst_t cmd_route = {
- .f = cmd_route_parsed,
- .data = NULL,
- .help_str = "route add / add default / del / del default / ls",
- .tokens = {
- (void *)&cmd_route_p_string,
- (void *)&cmd_route_p,
- (void *)&cmd_route_route_string,
- (void *)&cmd_route_multi_string,
- NULL,
- },
-};
-
-/*
- * arp
- *
- * arp add:
- * p <pipelineid> arp add <portid> <ipaddr> <macaddr>
- *
- * arp add default:
- * p <pipelineid> arp add default <portid>
- *
- * arp del:
- * p <pipelineid> arp del <portid> <ipaddr>
- *
- * arp del default:
- * p <pipelineid> arp del default
- *
- * arp ls:
- * p <pipelineid> arp ls
- */
-
-struct cmd_arp_result {
- cmdline_fixed_string_t p_string;
- uint32_t p;
- cmdline_fixed_string_t arp_string;
- cmdline_multi_string_t multi_string;
-};
-
-static void
-cmd_arp_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_arp_result *params = parsed_result;
- struct app_params *app = data;
-
- char *tokens[16];
- uint32_t n_tokens = RTE_DIM(tokens);
- int status;
-
- status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
- if (status != 0) {
- printf(CMD_MSG_TOO_MANY_ARGS, "arp");
- return;
- }
-
- /* arp add */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- strcmp(tokens[1], "default")) {
- struct pipeline_routing_arp_key key;
- struct in_addr ipv4;
- struct ether_addr mac_addr;
- uint32_t port_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 4) {
- printf(CMD_MSG_MISMATCH_ARGS, "arp add");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "ipaddr");
- return;
- }
-
- if (parse_mac_addr(tokens[3], &mac_addr)) {
- printf(CMD_MSG_INVALID_ARG, "macaddr");
- return;
- }
-
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.port_id = port_id;
- key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
-
- status = app_pipeline_routing_add_arp_entry(app,
- params->p,
- &key,
- &mac_addr);
- if (status != 0)
- printf(CMD_MSG_FAIL, "arp add");
-
- return;
- } /* arp add */
-
- /* arp add default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "add") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- uint32_t port_id;
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "arp add default");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[2])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- status = app_pipeline_routing_add_default_arp_entry(app,
- params->p,
- port_id);
- if (status != 0)
- printf(CMD_MSG_FAIL, "arp add default");
-
- return;
- } /* arp add default */
-
- /* arp del*/
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- strcmp(tokens[1], "default")) {
- struct pipeline_routing_arp_key key;
- struct in_addr ipv4;
- uint32_t port_id;
-
- memset(&key, 0, sizeof(key));
-
- if (n_tokens != 3) {
- printf(CMD_MSG_MISMATCH_ARGS, "arp del");
- return;
- }
-
- if (parser_read_uint32(&port_id, tokens[1])) {
- printf(CMD_MSG_INVALID_ARG, "portid");
- return;
- }
-
- if (parse_ipv4_addr(tokens[2], &ipv4)) {
- printf(CMD_MSG_INVALID_ARG, "ipaddr");
- return;
- }
-
- key.type = PIPELINE_ROUTING_ARP_IPV4;
- key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
- key.key.ipv4.port_id = port_id;
-
- status = app_pipeline_routing_delete_arp_entry(app,
- params->p,
- &key);
- if (status != 0)
- printf(CMD_MSG_FAIL, "arp del");
-
- return;
- } /* arp del */
-
- /* arp del default */
- if ((n_tokens >= 2) &&
- (strcmp(tokens[0], "del") == 0) &&
- (strcmp(tokens[1], "default") == 0)) {
- if (n_tokens != 2) {
- printf(CMD_MSG_MISMATCH_ARGS, "arp del default");
- return;
- }
-
- status = app_pipeline_routing_delete_default_arp_entry(app,
- params->p);
- if (status != 0)
- printf(CMD_MSG_FAIL, "arp del default");
-
- return;
- } /* arp del default */
-
- /* arp ls */
- if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
- if (n_tokens != 1) {
- printf(CMD_MSG_MISMATCH_ARGS, "arp ls");
- return;
- }
-
- status = app_pipeline_routing_arp_ls(app, params->p);
- if (status != 0)
- printf(CMD_MSG_FAIL, "arp ls");
-
- return;
- } /* arp ls */
-
- printf(CMD_MSG_FAIL, "arp");
-}
-
-static cmdline_parse_token_string_t cmd_arp_p_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_result, p_string, "p");
-
-static cmdline_parse_token_num_t cmd_arp_p =
- TOKEN_NUM_INITIALIZER(struct cmd_arp_result, p, UINT32);
-
-static cmdline_parse_token_string_t cmd_arp_arp_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_result, arp_string, "arp");
-
-static cmdline_parse_token_string_t cmd_arp_multi_string =
- TOKEN_STRING_INITIALIZER(struct cmd_arp_result, multi_string,
- TOKEN_STRING_MULTI);
-
-static cmdline_parse_inst_t cmd_arp = {
- .f = cmd_arp_parsed,
- .data = NULL,
- .help_str = "arp add / add default / del / del default / ls",
- .tokens = {
- (void *)&cmd_arp_p_string,
- (void *)&cmd_arp_p,
- (void *)&cmd_arp_arp_string,
- (void *)&cmd_arp_multi_string,
- NULL,
- },
-};
-
-static cmdline_parse_ctx_t pipeline_cmds[] = {
- (cmdline_parse_inst_t *)&cmd_route,
- (cmdline_parse_inst_t *)&cmd_arp,
- NULL,
-};
-
-static struct pipeline_fe_ops pipeline_routing_fe_ops = {
- .f_init = app_pipeline_routing_init,
- .f_post_init = app_pipeline_routing_post_init,
- .f_free = app_pipeline_routing_free,
- .f_track = app_pipeline_track_default,
- .cmds = pipeline_cmds,
-};
-
-struct pipeline_type pipeline_routing = {
- .name = "ROUTING",
- .be_ops = &pipeline_routing_be_ops,
- .fe_ops = &pipeline_routing_fe_ops,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing.h b/examples/ip_pipeline/pipeline/pipeline_routing.h
deleted file mode 100644
index f2492957..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_routing.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_ROUTING_H__
-#define __INCLUDE_PIPELINE_ROUTING_H__
-
-#include "pipeline.h"
-#include "pipeline_routing_be.h"
-
-/*
- * Route
- */
-
-int
-app_pipeline_routing_add_route(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_route_key *key,
- struct pipeline_routing_route_data *data);
-
-int
-app_pipeline_routing_delete_route(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_route_key *key);
-
-int
-app_pipeline_routing_add_default_route(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_pipeline_routing_delete_default_route(struct app_params *app,
- uint32_t pipeline_id);
-
-/*
- * ARP
- */
-
-int
-app_pipeline_routing_add_arp_entry(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_arp_key *key,
- struct ether_addr *macaddr);
-
-int
-app_pipeline_routing_delete_arp_entry(struct app_params *app,
- uint32_t pipeline_id,
- struct pipeline_routing_arp_key *key);
-
-int
-app_pipeline_routing_add_default_arp_entry(struct app_params *app,
- uint32_t pipeline_id,
- uint32_t port_id);
-
-int
-app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
- uint32_t pipeline_id);
-
-/*
- * SETTINGS
- */
-int
-app_pipeline_routing_set_macaddr(struct app_params *app,
- uint32_t pipeline_id);
-
-/*
- * Pipeline type
- */
-extern struct pipeline_type pipeline_routing;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
deleted file mode 100644
index 6258a1a9..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ /dev/null
@@ -1,1966 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <string.h>
-#include <unistd.h>
-
-#include <rte_common.h>
-#include <rte_malloc.h>
-#include <rte_ip.h>
-#include <rte_byteorder.h>
-#include <rte_table_lpm.h>
-#include <rte_table_hash.h>
-#include <rte_pipeline.h>
-
-#include "pipeline_routing_be.h"
-#include "pipeline_actions_common.h"
-#include "parser.h"
-#include "hash_func.h"
-
-#define MPLS_LABEL(label, exp, s, ttl) \
- (((((uint64_t) (label)) & 0xFFFFFLLU) << 12) | \
- ((((uint64_t) (exp)) & 0x7LLU) << 9) | \
- ((((uint64_t) (s)) & 0x1LLU) << 8) | \
- (((uint64_t) (ttl)) & 0xFFLU))
-
-#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
- traffic_class, queue, color) \
- ((((uint64_t) (queue)) & 0x3) | \
- ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
- ((((uint64_t) (color)) & 0x3) << 4) | \
- ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
- ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
-
-
-/* Network Byte Order (NBO) */
-#define SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr, ethertype) \
- (((uint64_t) macaddr) | (((uint64_t) rte_cpu_to_be_16(ethertype)) << 48))
-
-#ifndef PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s
-#define PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s 256
-#endif
-
-struct pipeline_routing {
- struct pipeline p;
- struct pipeline_routing_params params;
- pipeline_msg_req_handler custom_handlers[PIPELINE_ROUTING_MSG_REQS];
- uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
-} __rte_cache_aligned;
-
-/*
- * Message handlers
- */
-static void *
-pipeline_routing_msg_req_custom_handler(struct pipeline *p, void *msg);
-
-static pipeline_msg_req_handler handlers[] = {
- [PIPELINE_MSG_REQ_PING] =
- pipeline_msg_req_ping_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_IN] =
- pipeline_msg_req_stats_port_in_handler,
- [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
- pipeline_msg_req_stats_port_out_handler,
- [PIPELINE_MSG_REQ_STATS_TABLE] =
- pipeline_msg_req_stats_table_handler,
- [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
- pipeline_msg_req_port_in_enable_handler,
- [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
- pipeline_msg_req_port_in_disable_handler,
- [PIPELINE_MSG_REQ_CUSTOM] =
- pipeline_routing_msg_req_custom_handler,
-};
-
-static void *
-pipeline_routing_msg_req_route_add_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_route_del_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_arp_add_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_arp_del_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p,
- void *msg);
-
-static void *
-pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p,
- void *msg);
-
-static pipeline_msg_req_handler custom_handlers[] = {
- [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD] =
- pipeline_routing_msg_req_route_add_handler,
- [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL] =
- pipeline_routing_msg_req_route_del_handler,
- [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT] =
- pipeline_routing_msg_req_route_add_default_handler,
- [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT] =
- pipeline_routing_msg_req_route_del_default_handler,
- [PIPELINE_ROUTING_MSG_REQ_ARP_ADD] =
- pipeline_routing_msg_req_arp_add_handler,
- [PIPELINE_ROUTING_MSG_REQ_ARP_DEL] =
- pipeline_routing_msg_req_arp_del_handler,
- [PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT] =
- pipeline_routing_msg_req_arp_add_default_handler,
- [PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT] =
- pipeline_routing_msg_req_arp_del_default_handler,
- [PIPELINE_ROUTING_MSG_REQ_SET_MACADDR] =
- pipeline_routing_msg_req_set_macaddr_handler,
-};
-
-/*
- * Routing table
- */
-struct routing_table_entry {
- struct rte_pipeline_table_entry head;
- uint32_t flags;
- uint32_t port_id; /* Output port ID */
- uint32_t ip; /* Next hop IP address (only valid for remote routes) */
-
- /* ether_l2 */
- uint16_t data_offset;
- uint16_t ether_l2_length;
- uint64_t slab[4];
- uint16_t slab_offset[4];
-};
-
-struct layout {
- uint16_t a;
- uint32_t b;
- uint16_t c;
-} __attribute__((__packed__));
-
-#define MACADDR_DST_WRITE(slab_ptr, slab) \
-{ \
- struct layout *dst = (struct layout *) (slab_ptr); \
- struct layout *src = (struct layout *) &(slab); \
- \
- dst->b = src->b; \
- dst->c = src->c; \
-}
-
-static __rte_always_inline void
-pkt_work_routing(
- struct rte_mbuf *pkt,
- struct rte_pipeline_table_entry *table_entry,
- void *arg,
- int arp,
- int qinq,
- int qinq_sched,
- int mpls,
- int mpls_color_mark)
-{
- struct pipeline_routing *p_rt = arg;
-
- struct routing_table_entry *entry =
- (struct routing_table_entry *) table_entry;
-
- struct ipv4_hdr *ip = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.ip_hdr_offset);
-
- enum rte_meter_color pkt_color = (enum rte_meter_color)
- RTE_MBUF_METADATA_UINT32(pkt, p_rt->params.color_offset);
-
- struct pipeline_routing_arp_key_ipv4 *arp_key =
- (struct pipeline_routing_arp_key_ipv4 *)
- RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.arp_key_offset);
-
- uint64_t *slab0_ptr, *slab1_ptr, *slab2_ptr, *slab3_ptr, sched;
- uint32_t ip_da, nh_ip, port_id;
- uint16_t total_length, data_offset, ether_l2_length;
-
- /* Read */
- total_length = rte_bswap16(ip->total_length);
- ip_da = ip->dst_addr;
- data_offset = entry->data_offset;
- ether_l2_length = entry->ether_l2_length;
- slab0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[0]);
- slab1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[1]);
- slab2_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[2]);
- slab3_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[3]);
-
- if (arp) {
- port_id = entry->port_id;
- nh_ip = entry->ip;
- if (entry->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- nh_ip = ip_da;
- }
-
- /* Compute */
- total_length += ether_l2_length;
-
- if (qinq && qinq_sched) {
- uint32_t dscp = ip->type_of_service >> 2;
- uint32_t svlan, cvlan, tc, tc_q;
-
- if (qinq_sched == 1) {
- uint64_t slab_qinq = rte_bswap64(entry->slab[0]);
-
- svlan = (slab_qinq >> 48) & 0xFFF;
- cvlan = (slab_qinq >> 16) & 0xFFF;
- tc = (dscp >> 2) & 0x3;
- tc_q = dscp & 0x3;
- } else {
- uint32_t ip_src = rte_bswap32(ip->src_addr);
-
- svlan = 0;
- cvlan = (ip_src >> 16) & 0xFFF;
- tc = (ip_src >> 2) & 0x3;
- tc_q = ip_src & 0x3;
- }
- sched = RTE_SCHED_PORT_HIERARCHY(svlan,
- cvlan,
- tc,
- tc_q,
- e_RTE_METER_GREEN);
- }
-
- /* Write */
- pkt->data_off = data_offset;
- pkt->data_len = total_length;
- pkt->pkt_len = total_length;
-
- if ((qinq == 0) && (mpls == 0)) {
- *slab0_ptr = entry->slab[0];
-
- if (arp == 0)
- MACADDR_DST_WRITE(slab1_ptr, entry->slab[1]);
- }
-
- if (qinq) {
- *slab0_ptr = entry->slab[0];
- *slab1_ptr = entry->slab[1];
-
- if (arp == 0)
- MACADDR_DST_WRITE(slab2_ptr, entry->slab[2]);
-
- if (qinq_sched) {
- pkt->hash.sched.lo = sched & 0xFFFFFFFF;
- pkt->hash.sched.hi = sched >> 32;
- }
- }
-
- if (mpls) {
- if (mpls_color_mark) {
- uint64_t mpls_exp = rte_bswap64(
- (MPLS_LABEL(0, pkt_color, 0, 0) << 32) |
- MPLS_LABEL(0, pkt_color, 0, 0));
-
- *slab0_ptr = entry->slab[0] | mpls_exp;
- *slab1_ptr = entry->slab[1] | mpls_exp;
- *slab2_ptr = entry->slab[2];
- } else {
- *slab0_ptr = entry->slab[0];
- *slab1_ptr = entry->slab[1];
- *slab2_ptr = entry->slab[2];
- }
-
- if (arp == 0)
- MACADDR_DST_WRITE(slab3_ptr, entry->slab[3]);
- }
-
- if (arp) {
- arp_key->port_id = port_id;
- arp_key->ip = nh_ip;
- }
-}
-
-static __rte_always_inline void
-pkt4_work_routing(
- struct rte_mbuf **pkts,
- struct rte_pipeline_table_entry **table_entries,
- void *arg,
- int arp,
- int qinq,
- int qinq_sched,
- int mpls,
- int mpls_color_mark)
-{
- struct pipeline_routing *p_rt = arg;
-
- struct routing_table_entry *entry0 =
- (struct routing_table_entry *) table_entries[0];
- struct routing_table_entry *entry1 =
- (struct routing_table_entry *) table_entries[1];
- struct routing_table_entry *entry2 =
- (struct routing_table_entry *) table_entries[2];
- struct routing_table_entry *entry3 =
- (struct routing_table_entry *) table_entries[3];
-
- struct ipv4_hdr *ip0 = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
- p_rt->params.ip_hdr_offset);
- struct ipv4_hdr *ip1 = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
- p_rt->params.ip_hdr_offset);
- struct ipv4_hdr *ip2 = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
- p_rt->params.ip_hdr_offset);
- struct ipv4_hdr *ip3 = (struct ipv4_hdr *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
- p_rt->params.ip_hdr_offset);
-
- enum rte_meter_color pkt0_color = (enum rte_meter_color)
- RTE_MBUF_METADATA_UINT32(pkts[0], p_rt->params.color_offset);
- enum rte_meter_color pkt1_color = (enum rte_meter_color)
- RTE_MBUF_METADATA_UINT32(pkts[1], p_rt->params.color_offset);
- enum rte_meter_color pkt2_color = (enum rte_meter_color)
- RTE_MBUF_METADATA_UINT32(pkts[2], p_rt->params.color_offset);
- enum rte_meter_color pkt3_color = (enum rte_meter_color)
- RTE_MBUF_METADATA_UINT32(pkts[3], p_rt->params.color_offset);
-
- struct pipeline_routing_arp_key_ipv4 *arp_key0 =
- (struct pipeline_routing_arp_key_ipv4 *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
- p_rt->params.arp_key_offset);
- struct pipeline_routing_arp_key_ipv4 *arp_key1 =
- (struct pipeline_routing_arp_key_ipv4 *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
- p_rt->params.arp_key_offset);
- struct pipeline_routing_arp_key_ipv4 *arp_key2 =
- (struct pipeline_routing_arp_key_ipv4 *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
- p_rt->params.arp_key_offset);
- struct pipeline_routing_arp_key_ipv4 *arp_key3 =
- (struct pipeline_routing_arp_key_ipv4 *)
- RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
- p_rt->params.arp_key_offset);
-
- uint64_t *slab0_ptr0, *slab1_ptr0, *slab2_ptr0, *slab3_ptr0;
- uint64_t *slab0_ptr1, *slab1_ptr1, *slab2_ptr1, *slab3_ptr1;
- uint64_t *slab0_ptr2, *slab1_ptr2, *slab2_ptr2, *slab3_ptr2;
- uint64_t *slab0_ptr3, *slab1_ptr3, *slab2_ptr3, *slab3_ptr3;
- uint64_t sched0, sched1, sched2, sched3;
-
- uint32_t ip_da0, nh_ip0, port_id0;
- uint32_t ip_da1, nh_ip1, port_id1;
- uint32_t ip_da2, nh_ip2, port_id2;
- uint32_t ip_da3, nh_ip3, port_id3;
-
- uint16_t total_length0, data_offset0, ether_l2_length0;
- uint16_t total_length1, data_offset1, ether_l2_length1;
- uint16_t total_length2, data_offset2, ether_l2_length2;
- uint16_t total_length3, data_offset3, ether_l2_length3;
-
- /* Read */
- total_length0 = rte_bswap16(ip0->total_length);
- total_length1 = rte_bswap16(ip1->total_length);
- total_length2 = rte_bswap16(ip2->total_length);
- total_length3 = rte_bswap16(ip3->total_length);
-
- ip_da0 = ip0->dst_addr;
- ip_da1 = ip1->dst_addr;
- ip_da2 = ip2->dst_addr;
- ip_da3 = ip3->dst_addr;
-
- data_offset0 = entry0->data_offset;
- data_offset1 = entry1->data_offset;
- data_offset2 = entry2->data_offset;
- data_offset3 = entry3->data_offset;
-
- ether_l2_length0 = entry0->ether_l2_length;
- ether_l2_length1 = entry1->ether_l2_length;
- ether_l2_length2 = entry2->ether_l2_length;
- ether_l2_length3 = entry3->ether_l2_length;
-
- slab0_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- entry0->slab_offset[0]);
- slab1_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- entry0->slab_offset[1]);
- slab2_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- entry0->slab_offset[2]);
- slab3_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
- entry0->slab_offset[3]);
-
- slab0_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- entry1->slab_offset[0]);
- slab1_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- entry1->slab_offset[1]);
- slab2_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- entry1->slab_offset[2]);
- slab3_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
- entry1->slab_offset[3]);
-
- slab0_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- entry2->slab_offset[0]);
- slab1_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- entry2->slab_offset[1]);
- slab2_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- entry2->slab_offset[2]);
- slab3_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
- entry2->slab_offset[3]);
-
- slab0_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- entry3->slab_offset[0]);
- slab1_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- entry3->slab_offset[1]);
- slab2_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- entry3->slab_offset[2]);
- slab3_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
- entry3->slab_offset[3]);
-
- if (arp) {
- port_id0 = entry0->port_id;
- nh_ip0 = entry0->ip;
- if (entry0->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- nh_ip0 = ip_da0;
-
- port_id1 = entry1->port_id;
- nh_ip1 = entry1->ip;
- if (entry1->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- nh_ip1 = ip_da1;
-
- port_id2 = entry2->port_id;
- nh_ip2 = entry2->ip;
- if (entry2->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- nh_ip2 = ip_da2;
-
- port_id3 = entry3->port_id;
- nh_ip3 = entry3->ip;
- if (entry3->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
- nh_ip3 = ip_da3;
- }
-
- /* Compute */
- total_length0 += ether_l2_length0;
- total_length1 += ether_l2_length1;
- total_length2 += ether_l2_length2;
- total_length3 += ether_l2_length3;
-
- if (qinq && qinq_sched) {
- uint32_t dscp0 = ip0->type_of_service >> 2;
- uint32_t dscp1 = ip1->type_of_service >> 2;
- uint32_t dscp2 = ip2->type_of_service >> 2;
- uint32_t dscp3 = ip3->type_of_service >> 2;
- uint32_t svlan0, cvlan0, tc0, tc_q0;
- uint32_t svlan1, cvlan1, tc1, tc_q1;
- uint32_t svlan2, cvlan2, tc2, tc_q2;
- uint32_t svlan3, cvlan3, tc3, tc_q3;
-
- if (qinq_sched == 1) {
- uint64_t slab_qinq0 = rte_bswap64(entry0->slab[0]);
- uint64_t slab_qinq1 = rte_bswap64(entry1->slab[0]);
- uint64_t slab_qinq2 = rte_bswap64(entry2->slab[0]);
- uint64_t slab_qinq3 = rte_bswap64(entry3->slab[0]);
-
- svlan0 = (slab_qinq0 >> 48) & 0xFFF;
- svlan1 = (slab_qinq1 >> 48) & 0xFFF;
- svlan2 = (slab_qinq2 >> 48) & 0xFFF;
- svlan3 = (slab_qinq3 >> 48) & 0xFFF;
-
- cvlan0 = (slab_qinq0 >> 16) & 0xFFF;
- cvlan1 = (slab_qinq1 >> 16) & 0xFFF;
- cvlan2 = (slab_qinq2 >> 16) & 0xFFF;
- cvlan3 = (slab_qinq3 >> 16) & 0xFFF;
-
- tc0 = (dscp0 >> 2) & 0x3;
- tc1 = (dscp1 >> 2) & 0x3;
- tc2 = (dscp2 >> 2) & 0x3;
- tc3 = (dscp3 >> 2) & 0x3;
-
- tc_q0 = dscp0 & 0x3;
- tc_q1 = dscp1 & 0x3;
- tc_q2 = dscp2 & 0x3;
- tc_q3 = dscp3 & 0x3;
- } else {
- uint32_t ip_src0 = rte_bswap32(ip0->src_addr);
- uint32_t ip_src1 = rte_bswap32(ip1->src_addr);
- uint32_t ip_src2 = rte_bswap32(ip2->src_addr);
- uint32_t ip_src3 = rte_bswap32(ip3->src_addr);
-
- svlan0 = 0;
- svlan1 = 0;
- svlan2 = 0;
- svlan3 = 0;
-
- cvlan0 = (ip_src0 >> 16) & 0xFFF;
- cvlan1 = (ip_src1 >> 16) & 0xFFF;
- cvlan2 = (ip_src2 >> 16) & 0xFFF;
- cvlan3 = (ip_src3 >> 16) & 0xFFF;
-
- tc0 = (ip_src0 >> 2) & 0x3;
- tc1 = (ip_src1 >> 2) & 0x3;
- tc2 = (ip_src2 >> 2) & 0x3;
- tc3 = (ip_src3 >> 2) & 0x3;
-
- tc_q0 = ip_src0 & 0x3;
- tc_q1 = ip_src1 & 0x3;
- tc_q2 = ip_src2 & 0x3;
- tc_q3 = ip_src3 & 0x3;
- }
-
- sched0 = RTE_SCHED_PORT_HIERARCHY(svlan0,
- cvlan0,
- tc0,
- tc_q0,
- e_RTE_METER_GREEN);
- sched1 = RTE_SCHED_PORT_HIERARCHY(svlan1,
- cvlan1,
- tc1,
- tc_q1,
- e_RTE_METER_GREEN);
- sched2 = RTE_SCHED_PORT_HIERARCHY(svlan2,
- cvlan2,
- tc2,
- tc_q2,
- e_RTE_METER_GREEN);
- sched3 = RTE_SCHED_PORT_HIERARCHY(svlan3,
- cvlan3,
- tc3,
- tc_q3,
- e_RTE_METER_GREEN);
-
- }
-
- /* Write */
- pkts[0]->data_off = data_offset0;
- pkts[1]->data_off = data_offset1;
- pkts[2]->data_off = data_offset2;
- pkts[3]->data_off = data_offset3;
-
- pkts[0]->data_len = total_length0;
- pkts[1]->data_len = total_length1;
- pkts[2]->data_len = total_length2;
- pkts[3]->data_len = total_length3;
-
- pkts[0]->pkt_len = total_length0;
- pkts[1]->pkt_len = total_length1;
- pkts[2]->pkt_len = total_length2;
- pkts[3]->pkt_len = total_length3;
-
- if ((qinq == 0) && (mpls == 0)) {
- *slab0_ptr0 = entry0->slab[0];
- *slab0_ptr1 = entry1->slab[0];
- *slab0_ptr2 = entry2->slab[0];
- *slab0_ptr3 = entry3->slab[0];
-
- if (arp == 0) {
- MACADDR_DST_WRITE(slab1_ptr0, entry0->slab[1]);
- MACADDR_DST_WRITE(slab1_ptr1, entry1->slab[1]);
- MACADDR_DST_WRITE(slab1_ptr2, entry2->slab[1]);
- MACADDR_DST_WRITE(slab1_ptr3, entry3->slab[1]);
- }
- }
-
- if (qinq) {
- *slab0_ptr0 = entry0->slab[0];
- *slab0_ptr1 = entry1->slab[0];
- *slab0_ptr2 = entry2->slab[0];
- *slab0_ptr3 = entry3->slab[0];
-
- *slab1_ptr0 = entry0->slab[1];
- *slab1_ptr1 = entry1->slab[1];
- *slab1_ptr2 = entry2->slab[1];
- *slab1_ptr3 = entry3->slab[1];
-
- if (arp == 0) {
- MACADDR_DST_WRITE(slab2_ptr0, entry0->slab[2]);
- MACADDR_DST_WRITE(slab2_ptr1, entry1->slab[2]);
- MACADDR_DST_WRITE(slab2_ptr2, entry2->slab[2]);
- MACADDR_DST_WRITE(slab2_ptr3, entry3->slab[2]);
- }
-
- if (qinq_sched) {
- pkts[0]->hash.sched.lo = sched0 & 0xFFFFFFFF;
- pkts[0]->hash.sched.hi = sched0 >> 32;
- pkts[1]->hash.sched.lo = sched1 & 0xFFFFFFFF;
- pkts[1]->hash.sched.hi = sched1 >> 32;
- pkts[2]->hash.sched.lo = sched2 & 0xFFFFFFFF;
- pkts[2]->hash.sched.hi = sched2 >> 32;
- pkts[3]->hash.sched.lo = sched3 & 0xFFFFFFFF;
- pkts[3]->hash.sched.hi = sched3 >> 32;
- }
- }
-
- if (mpls) {
- if (mpls_color_mark) {
- uint64_t mpls_exp0 = rte_bswap64(
- (MPLS_LABEL(0, pkt0_color, 0, 0) << 32) |
- MPLS_LABEL(0, pkt0_color, 0, 0));
- uint64_t mpls_exp1 = rte_bswap64(
- (MPLS_LABEL(0, pkt1_color, 0, 0) << 32) |
- MPLS_LABEL(0, pkt1_color, 0, 0));
- uint64_t mpls_exp2 = rte_bswap64(
- (MPLS_LABEL(0, pkt2_color, 0, 0) << 32) |
- MPLS_LABEL(0, pkt2_color, 0, 0));
- uint64_t mpls_exp3 = rte_bswap64(
- (MPLS_LABEL(0, pkt3_color, 0, 0) << 32) |
- MPLS_LABEL(0, pkt3_color, 0, 0));
-
- *slab0_ptr0 = entry0->slab[0] | mpls_exp0;
- *slab0_ptr1 = entry1->slab[0] | mpls_exp1;
- *slab0_ptr2 = entry2->slab[0] | mpls_exp2;
- *slab0_ptr3 = entry3->slab[0] | mpls_exp3;
-
- *slab1_ptr0 = entry0->slab[1] | mpls_exp0;
- *slab1_ptr1 = entry1->slab[1] | mpls_exp1;
- *slab1_ptr2 = entry2->slab[1] | mpls_exp2;
- *slab1_ptr3 = entry3->slab[1] | mpls_exp3;
-
- *slab2_ptr0 = entry0->slab[2];
- *slab2_ptr1 = entry1->slab[2];
- *slab2_ptr2 = entry2->slab[2];
- *slab2_ptr3 = entry3->slab[2];
- } else {
- *slab0_ptr0 = entry0->slab[0];
- *slab0_ptr1 = entry1->slab[0];
- *slab0_ptr2 = entry2->slab[0];
- *slab0_ptr3 = entry3->slab[0];
-
- *slab1_ptr0 = entry0->slab[1];
- *slab1_ptr1 = entry1->slab[1];
- *slab1_ptr2 = entry2->slab[1];
- *slab1_ptr3 = entry3->slab[1];
-
- *slab2_ptr0 = entry0->slab[2];
- *slab2_ptr1 = entry1->slab[2];
- *slab2_ptr2 = entry2->slab[2];
- *slab2_ptr3 = entry3->slab[2];
- }
-
- if (arp == 0) {
- MACADDR_DST_WRITE(slab3_ptr0, entry0->slab[3]);
- MACADDR_DST_WRITE(slab3_ptr1, entry1->slab[3]);
- MACADDR_DST_WRITE(slab3_ptr2, entry2->slab[3]);
- MACADDR_DST_WRITE(slab3_ptr3, entry3->slab[3]);
- }
- }
-
- if (arp) {
- arp_key0->port_id = port_id0;
- arp_key1->port_id = port_id1;
- arp_key2->port_id = port_id2;
- arp_key3->port_id = port_id3;
-
- arp_key0->ip = nh_ip0;
- arp_key1->ip = nh_ip1;
- arp_key2->ip = nh_ip2;
- arp_key3->ip = nh_ip3;
- }
-}
-
-#define PKT_WORK_ROUTING_ETHERNET(arp) \
-static inline void \
-pkt_work_routing_ether_arp##arp( \
- struct rte_mbuf *pkt, \
- struct rte_pipeline_table_entry *table_entry, \
- void *arg) \
-{ \
- pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 0, 0);\
-}
-
-#define PKT4_WORK_ROUTING_ETHERNET(arp) \
-static inline void \
-pkt4_work_routing_ether_arp##arp( \
- struct rte_mbuf **pkts, \
- struct rte_pipeline_table_entry **table_entries, \
- void *arg) \
-{ \
- pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 0, 0);\
-}
-
-#define routing_table_ah_hit_ether(arp) \
-PKT_WORK_ROUTING_ETHERNET(arp) \
-PKT4_WORK_ROUTING_ETHERNET(arp) \
-PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_arp##arp, \
- pkt_work_routing_ether_arp##arp, \
- pkt4_work_routing_ether_arp##arp)
-
-routing_table_ah_hit_ether(0)
-routing_table_ah_hit_ether(1)
-
-#define PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
-static inline void \
-pkt_work_routing_ether_qinq_sched##sched##_arp##arp( \
- struct rte_mbuf *pkt, \
- struct rte_pipeline_table_entry *table_entry, \
- void *arg) \
-{ \
- pkt_work_routing(pkt, table_entry, arg, arp, 1, sched, 0, 0);\
-}
-
-#define PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
-static inline void \
-pkt4_work_routing_ether_qinq_sched##sched##_arp##arp( \
- struct rte_mbuf **pkts, \
- struct rte_pipeline_table_entry **table_entries, \
- void *arg) \
-{ \
- pkt4_work_routing(pkts, table_entries, arg, arp, 1, sched, 0, 0);\
-}
-
-#define routing_table_ah_hit_ether_qinq(sched, arp) \
-PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
-PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
-PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_qinq_sched##sched##_arp##arp,\
- pkt_work_routing_ether_qinq_sched##sched##_arp##arp, \
- pkt4_work_routing_ether_qinq_sched##sched##_arp##arp)
-
-routing_table_ah_hit_ether_qinq(0, 0)
-routing_table_ah_hit_ether_qinq(1, 0)
-routing_table_ah_hit_ether_qinq(2, 0)
-routing_table_ah_hit_ether_qinq(0, 1)
-routing_table_ah_hit_ether_qinq(1, 1)
-routing_table_ah_hit_ether_qinq(2, 1)
-
-#define PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
-static inline void \
-pkt_work_routing_ether_mpls_color##color##_arp##arp( \
- struct rte_mbuf *pkt, \
- struct rte_pipeline_table_entry *table_entry, \
- void *arg) \
-{ \
- pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 1, color);\
-}
-
-#define PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
-static inline void \
-pkt4_work_routing_ether_mpls_color##color##_arp##arp( \
- struct rte_mbuf **pkts, \
- struct rte_pipeline_table_entry **table_entries, \
- void *arg) \
-{ \
- pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 1, color);\
-}
-
-#define routing_table_ah_hit_ether_mpls(color, arp) \
-PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
-PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
-PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_mpls_color##color##_arp##arp,\
- pkt_work_routing_ether_mpls_color##color##_arp##arp, \
- pkt4_work_routing_ether_mpls_color##color##_arp##arp)
-
-routing_table_ah_hit_ether_mpls(0, 0)
-routing_table_ah_hit_ether_mpls(1, 0)
-routing_table_ah_hit_ether_mpls(0, 1)
-routing_table_ah_hit_ether_mpls(1, 1)
-
-static rte_pipeline_table_action_handler_hit
-get_routing_table_ah_hit(struct pipeline_routing *p)
-{
- if (p->params.dbg_ah_disable)
- return NULL;
-
- switch (p->params.encap) {
- case PIPELINE_ROUTING_ENCAP_ETHERNET:
- return (p->params.n_arp_entries) ?
- routing_table_ah_hit_ether_arp1 :
- routing_table_ah_hit_ether_arp0;
-
- case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
- if (p->params.n_arp_entries)
- switch (p->params.qinq_sched) {
- case 0:
- return routing_table_ah_hit_ether_qinq_sched0_arp1;
- case 1:
- return routing_table_ah_hit_ether_qinq_sched1_arp1;
- case 2:
- return routing_table_ah_hit_ether_qinq_sched2_arp1;
- default:
- return NULL;
- }
- else
- switch (p->params.qinq_sched) {
- case 0:
- return routing_table_ah_hit_ether_qinq_sched0_arp0;
- case 1:
- return routing_table_ah_hit_ether_qinq_sched1_arp0;
- case 2:
- return routing_table_ah_hit_ether_qinq_sched2_arp0;
- default:
- return NULL;
- }
-
- case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
- if (p->params.n_arp_entries)
- if (p->params.mpls_color_mark)
- return routing_table_ah_hit_ether_mpls_color1_arp1;
- else
- return routing_table_ah_hit_ether_mpls_color0_arp1;
- else
- if (p->params.mpls_color_mark)
- return routing_table_ah_hit_ether_mpls_color1_arp0;
- else
- return routing_table_ah_hit_ether_mpls_color0_arp0;
-
- default:
- return NULL;
- }
-}
-
-/*
- * ARP table
- */
-struct arp_table_entry {
- struct rte_pipeline_table_entry head;
- uint64_t macaddr;
-};
-
-/**
- * ARP table AH
- */
-static inline void
-pkt_work_arp(
- struct rte_mbuf *pkt,
- struct rte_pipeline_table_entry *table_entry,
- __rte_unused void *arg)
-{
- struct arp_table_entry *entry = (struct arp_table_entry *) table_entry;
-
- /* Read */
- uint64_t macaddr_dst = entry->macaddr;
- uint64_t *slab_ptr = (uint64_t *) ((char *) pkt->buf_addr +
- (pkt->data_off - 2));
-
- /* Compute */
-
- /* Write */
- MACADDR_DST_WRITE(slab_ptr, macaddr_dst);
-}
-
-static inline void
-pkt4_work_arp(
- struct rte_mbuf **pkts,
- struct rte_pipeline_table_entry **table_entries,
- __rte_unused void *arg)
-{
- struct arp_table_entry *entry0 =
- (struct arp_table_entry *) table_entries[0];
- struct arp_table_entry *entry1 =
- (struct arp_table_entry *) table_entries[1];
- struct arp_table_entry *entry2 =
- (struct arp_table_entry *) table_entries[2];
- struct arp_table_entry *entry3 =
- (struct arp_table_entry *) table_entries[3];
-
- /* Read */
- uint64_t macaddr_dst0 = entry0->macaddr;
- uint64_t macaddr_dst1 = entry1->macaddr;
- uint64_t macaddr_dst2 = entry2->macaddr;
- uint64_t macaddr_dst3 = entry3->macaddr;
-
- uint64_t *slab_ptr0 = (uint64_t *) ((char *) pkts[0]->buf_addr +
- (pkts[0]->data_off - 2));
- uint64_t *slab_ptr1 = (uint64_t *) ((char *) pkts[1]->buf_addr +
- (pkts[1]->data_off - 2));
- uint64_t *slab_ptr2 = (uint64_t *) ((char *) pkts[2]->buf_addr +
- (pkts[2]->data_off - 2));
- uint64_t *slab_ptr3 = (uint64_t *) ((char *) pkts[3]->buf_addr +
- (pkts[3]->data_off - 2));
-
- /* Compute */
-
- /* Write */
- MACADDR_DST_WRITE(slab_ptr0, macaddr_dst0);
- MACADDR_DST_WRITE(slab_ptr1, macaddr_dst1);
- MACADDR_DST_WRITE(slab_ptr2, macaddr_dst2);
- MACADDR_DST_WRITE(slab_ptr3, macaddr_dst3);
-}
-
-PIPELINE_TABLE_AH_HIT(arp_table_ah_hit,
- pkt_work_arp,
- pkt4_work_arp);
-
-static rte_pipeline_table_action_handler_hit
-get_arp_table_ah_hit(struct pipeline_routing *p)
-{
- if (p->params.dbg_ah_disable)
- return NULL;
-
- return arp_table_ah_hit;
-}
-
-/*
- * Argument parsing
- */
-int
-pipeline_routing_parse_args(struct pipeline_routing_params *p,
- struct pipeline_params *params)
-{
- uint32_t n_routes_present = 0;
- uint32_t port_local_dest_present = 0;
- uint32_t encap_present = 0;
- uint32_t qinq_sched_present = 0;
- uint32_t mpls_color_mark_present = 0;
- uint32_t n_arp_entries_present = 0;
- uint32_t ip_hdr_offset_present = 0;
- uint32_t arp_key_offset_present = 0;
- uint32_t color_offset_present = 0;
- uint32_t dbg_ah_disable_present = 0;
- uint32_t i;
-
- /* default values */
- p->n_routes = PIPELINE_ROUTING_N_ROUTES_DEFAULT;
- p->port_local_dest = params->n_ports_out - 1;
- p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
- p->qinq_sched = 0;
- p->mpls_color_mark = 0;
- p->n_arp_entries = 0;
- p->dbg_ah_disable = 0;
-
- for (i = 0; i < params->n_args; i++) {
- char *arg_name = params->args_name[i];
- char *arg_value = params->args_value[i];
-
- /* n_routes */
- if (strcmp(arg_name, "n_routes") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_routes_present == 0, params->name,
- arg_name);
- n_routes_present = 1;
-
- status = parser_read_uint32(&p->n_routes,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
- (p->n_routes != 0)), params->name,
- arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
- /* port_local_dest */
- if (strcmp(arg_name, "port_local_dest") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- port_local_dest_present == 0, params->name,
- arg_name);
- port_local_dest_present = 1;
-
- status = parser_read_uint32(&p->port_local_dest,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
- (p->port_local_dest < params->n_ports_out)),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* encap */
- if (strcmp(arg_name, "encap") == 0) {
- PIPELINE_PARSE_ERR_DUPLICATE(encap_present == 0,
- params->name, arg_name);
- encap_present = 1;
-
- /* ethernet */
- if (strcmp(arg_value, "ethernet") == 0) {
- p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
- continue;
- }
-
- /* ethernet_qinq */
- if (strcmp(arg_value, "ethernet_qinq") == 0) {
- p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ;
- continue;
- }
-
- /* ethernet_mpls */
- if (strcmp(arg_value, "ethernet_mpls") == 0) {
- p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS;
- continue;
- }
-
- /* any other */
- PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
- arg_name, arg_value);
- }
-
- /* qinq_sched */
- if (strcmp(arg_name, "qinq_sched") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- qinq_sched_present == 0, params->name,
- arg_name);
- qinq_sched_present = 1;
-
- status = parser_read_arg_bool(arg_value);
- if (status == -EINVAL) {
- if (strcmp(arg_value, "test") == 0) {
- p->qinq_sched = 2;
- continue;
- }
- } else {
- p->qinq_sched = status;
- continue;
- }
-
- PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
- arg_name, arg_value);
- }
-
- /* mpls_color_mark */
- if (strcmp(arg_name, "mpls_color_mark") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- mpls_color_mark_present == 0,
- params->name, arg_name);
- mpls_color_mark_present = 1;
-
-
- status = parser_read_arg_bool(arg_value);
- if (status >= 0) {
- p->mpls_color_mark = status;
- continue;
- }
-
- PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
- arg_name, arg_value);
- }
-
- /* n_arp_entries */
- if (strcmp(arg_name, "n_arp_entries") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- n_arp_entries_present == 0, params->name,
- arg_name);
- n_arp_entries_present = 1;
-
- status = parser_read_uint32(&p->n_arp_entries,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* ip_hdr_offset */
- if (strcmp(arg_name, "ip_hdr_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- ip_hdr_offset_present == 0, params->name,
- arg_name);
- ip_hdr_offset_present = 1;
-
- status = parser_read_uint32(&p->ip_hdr_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* arp_key_offset */
- if (strcmp(arg_name, "arp_key_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- arp_key_offset_present == 0, params->name,
- arg_name);
- arp_key_offset_present = 1;
-
- status = parser_read_uint32(&p->arp_key_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* color_offset */
- if (strcmp(arg_name, "color_offset") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- color_offset_present == 0, params->name,
- arg_name);
- color_offset_present = 1;
-
- status = parser_read_uint32(&p->color_offset,
- arg_value);
- PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
- params->name, arg_name, arg_value);
- PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
- params->name, arg_name, arg_value);
-
- continue;
- }
-
- /* debug */
- if (strcmp(arg_name, "dbg_ah_disable") == 0) {
- int status;
-
- PIPELINE_PARSE_ERR_DUPLICATE(
- dbg_ah_disable_present == 0, params->name,
- arg_name);
- dbg_ah_disable_present = 1;
-
- status = parser_read_arg_bool(arg_value);
- if (status >= 0) {
- p->dbg_ah_disable = status;
- continue;
- }
-
- PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
- arg_name, arg_value);
-
- continue;
- }
-
- /* any other */
- PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
- }
-
- /* Check that mandatory arguments are present */
- PIPELINE_PARSE_ERR_MANDATORY(ip_hdr_offset_present, params->name,
- "ip_hdr_offset");
-
- /* Check relations between arguments */
- switch (p->encap) {
- case PIPELINE_ROUTING_ENCAP_ETHERNET:
- PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
- "section \"%s\": encap = ethernet, therefore "
- "qinq_sched = yes/test is not allowed",
- params->name);
- PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
- "in section \"%s\": encap = ethernet, therefore "
- "mpls_color_mark = yes is not allowed",
- params->name);
- PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
- "in section \"%s\": encap = ethernet, therefore "
- "color_offset is not allowed",
- params->name);
- break;
-
- case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
- PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
- "in section \"%s\": encap = ethernet_qinq, "
- "therefore mpls_color_mark = yes is not allowed",
- params->name);
- PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
- "in section \"%s\": encap = ethernet_qinq, "
- "therefore color_offset is not allowed",
- params->name);
- break;
-
- case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
- PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
- "section \"%s\": encap = ethernet_mpls, therefore "
- "qinq_sched = yes/test is not allowed",
- params->name);
- break;
- }
-
- PIPELINE_ARG_CHECK((!(p->n_arp_entries &&
- (!arp_key_offset_present))), "Parse error in section "
- "\"%s\": n_arp_entries is set while "
- "arp_key_offset is not set", params->name);
-
- PIPELINE_ARG_CHECK((!((p->n_arp_entries == 0) &&
- arp_key_offset_present)), "Parse error in section "
- "\"%s\": arp_key_offset present while "
- "n_arp_entries is not set", params->name);
-
- return 0;
-}
-
-static void *
-pipeline_routing_init(struct pipeline_params *params,
- __rte_unused void *arg)
-{
- struct pipeline *p;
- struct pipeline_routing *p_rt;
- uint32_t size, i;
-
- /* Check input arguments */
- if ((params == NULL) ||
- (params->n_ports_in == 0) ||
- (params->n_ports_out == 0))
- return NULL;
-
- /* Memory allocation */
- size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
- p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
- p_rt = (struct pipeline_routing *) p;
- if (p == NULL)
- return NULL;
-
- strcpy(p->name, params->name);
- p->log_level = params->log_level;
-
- PLOG(p, HIGH, "Routing");
-
- /* Parse arguments */
- if (pipeline_routing_parse_args(&p_rt->params, params))
- return NULL;
-
- /* Pipeline */
- {
- struct rte_pipeline_params pipeline_params = {
- .name = params->name,
- .socket_id = params->socket_id,
- .offset_port_id = 0,
- };
-
- p->p = rte_pipeline_create(&pipeline_params);
- if (p->p == NULL) {
- rte_free(p);
- return NULL;
- }
- }
-
- /* Input ports */
- p->n_ports_in = params->n_ports_in;
- for (i = 0; i < p->n_ports_in; i++) {
- struct rte_pipeline_port_in_params port_params = {
- .ops = pipeline_port_in_params_get_ops(
- &params->port_in[i]),
- .arg_create = pipeline_port_in_params_convert(
- &params->port_in[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- .burst_size = params->port_in[i].burst_size,
- };
-
- int status = rte_pipeline_port_in_create(p->p,
- &port_params,
- &p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Output ports */
- p->n_ports_out = params->n_ports_out;
- for (i = 0; i < p->n_ports_out; i++) {
- struct rte_pipeline_port_out_params port_params = {
- .ops = pipeline_port_out_params_get_ops(
- &params->port_out[i]),
- .arg_create = pipeline_port_out_params_convert(
- &params->port_out[i]),
- .f_action = NULL,
- .arg_ah = NULL,
- };
-
- int status = rte_pipeline_port_out_create(p->p,
- &port_params,
- &p->port_out_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Routing table */
- p->n_tables = 1;
- {
- struct rte_table_lpm_params table_lpm_params = {
- .name = p->name,
- .n_rules = p_rt->params.n_routes,
- .number_tbl8s = PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s,
- .flags = 0,
- .entry_unique_size = sizeof(struct routing_table_entry),
- .offset = p_rt->params.ip_hdr_offset +
- __builtin_offsetof(struct ipv4_hdr, dst_addr),
- };
-
- struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_lpm_ops,
- .arg_create = &table_lpm_params,
- .f_action_hit = get_routing_table_ah_hit(p_rt),
- .f_action_miss = NULL,
- .arg_ah = p_rt,
- .action_data_size =
- sizeof(struct routing_table_entry) -
- sizeof(struct rte_pipeline_table_entry),
- };
-
- int status;
-
- status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* ARP table configuration */
- if (p_rt->params.n_arp_entries) {
- struct rte_table_hash_params table_arp_params = {
- .name = p->name,
- .key_size = 8,
- .key_offset = p_rt->params.arp_key_offset,
- .key_mask = NULL,
- .n_keys = p_rt->params.n_arp_entries,
- .n_buckets =
- rte_align32pow2(p_rt->params.n_arp_entries / 4),
- .f_hash = hash_default_key8,
- .seed = 0,
- };
-
- struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_hash_key8_ext_ops,
- .arg_create = &table_arp_params,
- .f_action_hit = get_arp_table_ah_hit(p_rt),
- .f_action_miss = NULL,
- .arg_ah = p_rt,
- .action_data_size = sizeof(struct arp_table_entry) -
- sizeof(struct rte_pipeline_table_entry),
- };
-
- int status;
-
- status = rte_pipeline_table_create(p->p,
- &table_params,
- &p->table_id[1]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- p->n_tables++;
- }
-
- /* Connecting input ports to tables */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_connect_to_table(p->p,
- p->port_in_id[i],
- p->table_id[0]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Enable input ports */
- for (i = 0; i < p->n_ports_in; i++) {
- int status = rte_pipeline_port_in_enable(p->p,
- p->port_in_id[i]);
-
- if (status) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
- }
-
- /* Check pipeline consistency */
- if (rte_pipeline_check(p->p) < 0) {
- rte_pipeline_free(p->p);
- rte_free(p);
- return NULL;
- }
-
- /* Message queues */
- p->n_msgq = params->n_msgq;
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_in[i] = params->msgq_in[i];
- for (i = 0; i < p->n_msgq; i++)
- p->msgq_out[i] = params->msgq_out[i];
-
- /* Message handlers */
- memcpy(p->handlers, handlers, sizeof(p->handlers));
- memcpy(p_rt->custom_handlers,
- custom_handlers,
- sizeof(p_rt->custom_handlers));
-
- return p;
-}
-
-static int
-pipeline_routing_free(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- /* Check input arguments */
- if (p == NULL)
- return -1;
-
- /* Free resources */
- rte_pipeline_free(p->p);
- rte_free(p);
- return 0;
-}
-
-static int
-pipeline_routing_timer(void *pipeline)
-{
- struct pipeline *p = (struct pipeline *) pipeline;
-
- pipeline_msg_req_handle(p);
- rte_pipeline_flush(p->p);
-
- return 0;
-}
-
-void *
-pipeline_routing_msg_req_custom_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
- struct pipeline_custom_msg_req *req = msg;
- pipeline_msg_req_handler f_handle;
-
- f_handle = (req->subtype < PIPELINE_ROUTING_MSG_REQS) ?
- p_rt->custom_handlers[req->subtype] :
- pipeline_msg_req_invalid_handler;
-
- if (f_handle == NULL)
- f_handle = pipeline_msg_req_invalid_handler;
-
- return f_handle(p, req);
-}
-
-void *
-pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
- struct pipeline_routing_route_add_msg_req *req = msg;
- struct pipeline_routing_route_add_msg_rsp *rsp = msg;
-
- struct rte_table_lpm_key key = {
- .ip = req->key.key.ipv4.ip,
- .depth = req->key.key.ipv4.depth,
- };
-
- struct routing_table_entry entry_arp0 = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->data.port_id]},
- },
-
- .flags = req->data.flags,
- .port_id = req->data.port_id,
- .ip = 0,
- .data_offset = 0,
- .ether_l2_length = 0,
- .slab = {0},
- .slab_offset = {0},
- };
-
- struct routing_table_entry entry_arp1 = {
- .head = {
- .action = RTE_PIPELINE_ACTION_TABLE,
- {.table_id = p->table_id[1]},
- },
-
- .flags = req->data.flags,
- .port_id = req->data.port_id,
- .ip = rte_bswap32(req->data.ethernet.ip),
- .data_offset = 0,
- .ether_l2_length = 0,
- .slab = {0},
- .slab_offset = {0},
- };
-
- struct rte_pipeline_table_entry *entry = (p_rt->params.n_arp_entries) ?
- (struct rte_pipeline_table_entry *) &entry_arp1 :
- (struct rte_pipeline_table_entry *) &entry_arp0;
-
- if ((req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) ||
- ((p_rt->params.n_arp_entries == 0) &&
- (req->data.flags & PIPELINE_ROUTING_ROUTE_ARP)) ||
- (p_rt->params.n_arp_entries &&
- ((req->data.flags & PIPELINE_ROUTING_ROUTE_ARP) == 0)) ||
- ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
- (req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)) ||
- ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
- ((req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ) == 0)) ||
- ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
- (req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS)) ||
- ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
- ((req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) == 0))) {
- rsp->status = -1;
- return rsp;
- }
-
- /* Ether - ARP off */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
- (p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t macaddr_dst;
- uint64_t ethertype = ETHER_TYPE_IPv4;
-
- macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
- macaddr_dst = rte_bswap64(macaddr_dst << 16);
-
- entry_arp0.slab[0] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
- entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] = rte_bswap64(macaddr_dst);
- entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
-
- entry_arp0.data_offset = entry_arp0.slab_offset[1] + 2
- - sizeof(struct rte_mbuf);
- entry_arp0.ether_l2_length = 14;
- }
-
- /* Ether - ARP on */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
- p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t ethertype = ETHER_TYPE_IPv4;
-
- entry_arp1.slab[0] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
- entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.data_offset = entry_arp1.slab_offset[0] - 6
- - sizeof(struct rte_mbuf);
- entry_arp1.ether_l2_length = 14;
- }
-
- /* Ether QinQ - ARP off */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
- (p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t macaddr_dst;
- uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
- uint64_t ethertype_vlan = 0x8100;
- uint64_t ethertype_qinq = 0x9100;
- uint64_t svlan = req->data.l2.qinq.svlan;
- uint64_t cvlan = req->data.l2.qinq.cvlan;
-
- macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
- macaddr_dst = rte_bswap64(macaddr_dst << 16);
-
- entry_arp0.slab[0] = rte_bswap64((svlan << 48) |
- (ethertype_vlan << 32) |
- (cvlan << 16) |
- ethertype_ipv4);
- entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
- entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
-
- entry_arp0.slab[2] = rte_bswap64(macaddr_dst);
- entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset - 3 * 8;
-
- entry_arp0.data_offset = entry_arp0.slab_offset[2] + 2
- - sizeof(struct rte_mbuf);
- entry_arp0.ether_l2_length = 22;
- }
-
- /* Ether QinQ - ARP on */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
- p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
- uint64_t ethertype_vlan = 0x8100;
- uint64_t ethertype_qinq = 0x9100;
- uint64_t svlan = req->data.l2.qinq.svlan;
- uint64_t cvlan = req->data.l2.qinq.cvlan;
-
- entry_arp1.slab[0] = rte_bswap64((svlan << 48) |
- (ethertype_vlan << 32) |
- (cvlan << 16) |
- ethertype_ipv4);
- entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.slab[1] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
- entry_arp1.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
-
- entry_arp1.data_offset = entry_arp1.slab_offset[1] - 6
- - sizeof(struct rte_mbuf);
- entry_arp1.ether_l2_length = 22;
- }
-
- /* Ether MPLS - ARP off */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
- (p_rt->params.n_arp_entries == 0)) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t macaddr_dst;
- uint64_t ethertype_mpls = 0x8847;
-
- uint64_t label0 = req->data.l2.mpls.labels[0];
- uint64_t label1 = req->data.l2.mpls.labels[1];
- uint64_t label2 = req->data.l2.mpls.labels[2];
- uint64_t label3 = req->data.l2.mpls.labels[3];
- uint32_t n_labels = req->data.l2.mpls.n_labels;
-
- macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
- macaddr_dst = rte_bswap64(macaddr_dst << 16);
-
- switch (n_labels) {
- case 1:
- entry_arp0.slab[0] = 0;
- entry_arp0.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] = rte_bswap64(
- MPLS_LABEL(label0, 0, 1, 0));
- entry_arp0.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 8;
- break;
-
- case 2:
- entry_arp0.slab[0] = 0;
- entry_arp0.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] = rte_bswap64(
- (MPLS_LABEL(label0, 0, 0, 0) << 32) |
- MPLS_LABEL(label1, 0, 1, 0));
- entry_arp0.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 8;
- break;
-
- case 3:
- entry_arp0.slab[0] = rte_bswap64(
- (MPLS_LABEL(label1, 0, 0, 0) << 32) |
- MPLS_LABEL(label2, 0, 1, 0));
- entry_arp0.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] = rte_bswap64(
- MPLS_LABEL(label0, 0, 0, 0));
- entry_arp0.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 2 * 8;
- break;
-
- case 4:
- entry_arp0.slab[0] = rte_bswap64(
- (MPLS_LABEL(label2, 0, 0, 0) << 32) |
- MPLS_LABEL(label3, 0, 1, 0));
- entry_arp0.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp0.slab[1] = rte_bswap64(
- (MPLS_LABEL(label0, 0, 0, 0) << 32) |
- MPLS_LABEL(label1, 0, 0, 0));
- entry_arp0.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 2 * 8;
- break;
-
- default:
- rsp->status = -1;
- return rsp;
- }
-
- entry_arp0.slab[2] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
- entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset -
- (n_labels * 4 + 8);
-
- entry_arp0.slab[3] = rte_bswap64(macaddr_dst);
- entry_arp0.slab_offset[3] = p_rt->params.ip_hdr_offset -
- (n_labels * 4 + 2 * 8);
-
- entry_arp0.data_offset = entry_arp0.slab_offset[3] + 2
- - sizeof(struct rte_mbuf);
- entry_arp0.ether_l2_length = n_labels * 4 + 14;
- }
-
- /* Ether MPLS - ARP on */
- if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
- p_rt->params.n_arp_entries) {
- uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
- uint64_t ethertype_mpls = 0x8847;
-
- uint64_t label0 = req->data.l2.mpls.labels[0];
- uint64_t label1 = req->data.l2.mpls.labels[1];
- uint64_t label2 = req->data.l2.mpls.labels[2];
- uint64_t label3 = req->data.l2.mpls.labels[3];
- uint32_t n_labels = req->data.l2.mpls.n_labels;
-
- switch (n_labels) {
- case 1:
- entry_arp1.slab[0] = 0;
- entry_arp1.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.slab[1] = rte_bswap64(
- MPLS_LABEL(label0, 0, 1, 0));
- entry_arp1.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 8;
- break;
-
- case 2:
- entry_arp1.slab[0] = 0;
- entry_arp1.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.slab[1] = rte_bswap64(
- (MPLS_LABEL(label0, 0, 0, 0) << 32) |
- MPLS_LABEL(label1, 0, 1, 0));
- entry_arp1.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 8;
- break;
-
- case 3:
- entry_arp1.slab[0] = rte_bswap64(
- (MPLS_LABEL(label1, 0, 0, 0) << 32) |
- MPLS_LABEL(label2, 0, 1, 0));
- entry_arp1.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.slab[1] = rte_bswap64(
- MPLS_LABEL(label0, 0, 0, 0));
- entry_arp1.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 2 * 8;
- break;
-
- case 4:
- entry_arp1.slab[0] = rte_bswap64(
- (MPLS_LABEL(label2, 0, 0, 0) << 32) |
- MPLS_LABEL(label3, 0, 1, 0));
- entry_arp1.slab_offset[0] =
- p_rt->params.ip_hdr_offset - 8;
-
- entry_arp1.slab[1] = rte_bswap64(
- (MPLS_LABEL(label0, 0, 0, 0) << 32) |
- MPLS_LABEL(label1, 0, 0, 0));
- entry_arp1.slab_offset[1] =
- p_rt->params.ip_hdr_offset - 2 * 8;
- break;
-
- default:
- rsp->status = -1;
- return rsp;
- }
-
- entry_arp1.slab[2] =
- SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
- entry_arp1.slab_offset[2] = p_rt->params.ip_hdr_offset -
- (n_labels * 4 + 8);
-
- entry_arp1.data_offset = entry_arp1.slab_offset[2] - 6
- - sizeof(struct rte_mbuf);
- entry_arp1.ether_l2_length = n_labels * 4 + 14;
- }
-
- rsp->status = rte_pipeline_table_entry_add(p->p,
- p->table_id[0],
- &key,
- entry,
- &rsp->key_found,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_route_del_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing_route_delete_msg_req *req = msg;
- struct pipeline_routing_route_delete_msg_rsp *rsp = msg;
-
- struct rte_table_lpm_key key = {
- .ip = req->key.key.ipv4.ip,
- .depth = req->key.key.ipv4.depth,
- };
-
- if (req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) {
- rsp->status = -1;
- return rsp;
- }
-
- rsp->status = rte_pipeline_table_entry_delete(p->p,
- p->table_id[0],
- &key,
- &rsp->key_found,
- NULL);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_routing_route_add_default_msg_req *req = msg;
- struct pipeline_routing_route_add_default_msg_rsp *rsp = msg;
-
- struct routing_table_entry default_entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
-
- .flags = 0,
- .port_id = 0,
- .ip = 0,
- };
-
- rsp->status = rte_pipeline_table_default_entry_add(p->p,
- p->table_id[0],
- (struct rte_pipeline_table_entry *) &default_entry,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
- void *msg)
-{
- struct pipeline_routing_route_delete_default_msg_rsp *rsp = msg;
-
- rsp->status = rte_pipeline_table_default_entry_delete(p->p,
- p->table_id[0],
- NULL);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_arp_add_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing_arp_add_msg_req *req = msg;
- struct pipeline_routing_arp_add_msg_rsp *rsp = msg;
-
- struct pipeline_routing_arp_key_ipv4 key = {
- .port_id = req->key.key.ipv4.port_id,
- .ip = rte_bswap32(req->key.key.ipv4.ip),
- };
-
- struct arp_table_entry entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->key.key.ipv4.port_id]},
- },
-
- .macaddr = 0, /* set below */
- };
-
- if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
- rsp->status = -1;
- return rsp;
- }
-
- entry.macaddr = *((uint64_t *)&(req->macaddr));
- entry.macaddr = entry.macaddr << 16;
-
- rsp->status = rte_pipeline_table_entry_add(p->p,
- p->table_id[1],
- &key,
- (struct rte_pipeline_table_entry *) &entry,
- &rsp->key_found,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_arp_del_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing_arp_delete_msg_req *req = msg;
- struct pipeline_routing_arp_delete_msg_rsp *rsp = msg;
-
- struct pipeline_routing_arp_key_ipv4 key = {
- .port_id = req->key.key.ipv4.port_id,
- .ip = rte_bswap32(req->key.key.ipv4.ip),
- };
-
- if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
- rsp->status = -1;
- return rsp;
- }
-
- rsp->status = rte_pipeline_table_entry_delete(p->p,
- p->table_id[1],
- &key,
- &rsp->key_found,
- NULL);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing_arp_add_default_msg_req *req = msg;
- struct pipeline_routing_arp_add_default_msg_rsp *rsp = msg;
-
- struct arp_table_entry default_entry = {
- .head = {
- .action = RTE_PIPELINE_ACTION_PORT,
- {.port_id = p->port_out_id[req->port_id]},
- },
-
- .macaddr = 0,
- };
-
- rsp->status = rte_pipeline_table_default_entry_add(p->p,
- p->table_id[1],
- (struct rte_pipeline_table_entry *) &default_entry,
- (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing_arp_delete_default_msg_rsp *rsp = msg;
-
- rsp->status = rte_pipeline_table_default_entry_delete(p->p,
- p->table_id[1],
- NULL);
-
- return rsp;
-}
-
-void *
-pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p, void *msg)
-{
- struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
- struct pipeline_routing_set_macaddr_msg_req *req = msg;
- struct pipeline_routing_set_macaddr_msg_rsp *rsp = msg;
- uint32_t port_id;
-
- for (port_id = 0; port_id < p->n_ports_out; port_id++)
- p_rt->macaddr[port_id] = req->macaddr[port_id];
-
- rsp->status = 0;
-
- return rsp;
-}
-
-struct pipeline_be_ops pipeline_routing_be_ops = {
- .f_init = pipeline_routing_init,
- .f_free = pipeline_routing_free,
- .f_run = NULL,
- .f_timer = pipeline_routing_timer,
-};
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.h b/examples/ip_pipeline/pipeline/pipeline_routing_be.h
deleted file mode 100644
index 7140ee40..00000000
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.h
+++ /dev/null
@@ -1,283 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_ROUTING_BE_H__
-#define __INCLUDE_PIPELINE_ROUTING_BE_H__
-
-#include <rte_ether.h>
-
-#include "pipeline_common_be.h"
-
-/*
- * Pipeline argument parsing
- */
-#ifndef PIPELINE_ROUTING_N_ROUTES_DEFAULT
-#define PIPELINE_ROUTING_N_ROUTES_DEFAULT 4096
-#endif
-
-enum pipeline_routing_encap {
- PIPELINE_ROUTING_ENCAP_ETHERNET = 0,
- PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ,
- PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS,
-};
-
-struct pipeline_routing_params {
- /* routing */
- uint32_t n_routes;
- uint32_t port_local_dest;
-
- /* routing packet encapsulation */
- enum pipeline_routing_encap encap;
- uint32_t qinq_sched;
- uint32_t mpls_color_mark;
-
- /* arp */
- uint32_t n_arp_entries;
-
- /* packet buffer offsets */
- uint32_t ip_hdr_offset;
- uint32_t arp_key_offset;
- uint32_t color_offset;
-
- /* debug */
- uint32_t dbg_ah_disable;
-};
-
-int
-pipeline_routing_parse_args(struct pipeline_routing_params *p,
- struct pipeline_params *params);
-
-/*
- * Route
- */
-enum pipeline_routing_route_key_type {
- PIPELINE_ROUTING_ROUTE_IPV4,
-};
-
-struct pipeline_routing_route_key_ipv4 {
- uint32_t ip;
- uint32_t depth;
-};
-
-struct pipeline_routing_route_key {
- enum pipeline_routing_route_key_type type;
- union {
- struct pipeline_routing_route_key_ipv4 ipv4;
- } key;
-};
-
-enum pipeline_routing_route_flags {
- PIPELINE_ROUTING_ROUTE_LOCAL = 1 << 0, /* 0 = remote; 1 = local */
- PIPELINE_ROUTING_ROUTE_ARP = 1 << 1, /* 0 = ARP OFF; 1 = ARP ON */
- PIPELINE_ROUTING_ROUTE_QINQ = 1 << 2, /* 0 = QINQ OFF; 1 = QINQ ON */
- PIPELINE_ROUTING_ROUTE_MPLS = 1 << 3, /* 0 = MPLS OFF; 1 = MPLS ON */
-};
-
-#define PIPELINE_ROUTING_MPLS_LABELS_MAX 4
-
-struct pipeline_routing_route_data {
- uint32_t flags;
- uint32_t port_id; /* Output port ID */
-
- union {
- /* Next hop IP (valid only when ARP is enabled) */
- uint32_t ip;
-
- /* Next hop MAC address (valid only when ARP disabled */
- struct ether_addr macaddr;
- } ethernet;
-
- union {
- struct {
- uint16_t svlan;
- uint16_t cvlan;
- } qinq;
-
- struct {
- uint32_t labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
- uint32_t n_labels;
- } mpls;
- } l2;
-};
-
-/*
- * ARP
- */
-enum pipeline_routing_arp_key_type {
- PIPELINE_ROUTING_ARP_IPV4,
-};
-
-struct pipeline_routing_arp_key_ipv4 {
- uint32_t port_id;
- uint32_t ip;
-};
-
-struct pipeline_routing_arp_key {
- enum pipeline_routing_arp_key_type type;
- union {
- struct pipeline_routing_arp_key_ipv4 ipv4;
- } key;
-};
-
-/*
- * Messages
- */
-enum pipeline_routing_msg_req_type {
- PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD,
- PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL,
- PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT,
- PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT,
- PIPELINE_ROUTING_MSG_REQ_ARP_ADD,
- PIPELINE_ROUTING_MSG_REQ_ARP_DEL,
- PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT,
- PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT,
- PIPELINE_ROUTING_MSG_REQ_SET_MACADDR,
- PIPELINE_ROUTING_MSG_REQS
-};
-
-/*
- * MSG ROUTE ADD
- */
-struct pipeline_routing_route_add_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* key */
- struct pipeline_routing_route_key key;
-
- /* data */
- struct pipeline_routing_route_data data;
-};
-
-struct pipeline_routing_route_add_msg_rsp {
- int status;
- int key_found;
- void *entry_ptr;
-};
-
-/*
- * MSG ROUTE DELETE
- */
-struct pipeline_routing_route_delete_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* key */
- struct pipeline_routing_route_key key;
-};
-
-struct pipeline_routing_route_delete_msg_rsp {
- int status;
- int key_found;
-};
-
-/*
- * MSG ROUTE ADD DEFAULT
- */
-struct pipeline_routing_route_add_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* data */
- uint32_t port_id;
-};
-
-struct pipeline_routing_route_add_default_msg_rsp {
- int status;
- void *entry_ptr;
-};
-
-/*
- * MSG ROUTE DELETE DEFAULT
- */
-struct pipeline_routing_route_delete_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-};
-
-struct pipeline_routing_route_delete_default_msg_rsp {
- int status;
-};
-
-/*
- * MSG ARP ADD
- */
-struct pipeline_routing_arp_add_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* key */
- struct pipeline_routing_arp_key key;
-
- /* data */
- struct ether_addr macaddr;
-};
-
-struct pipeline_routing_arp_add_msg_rsp {
- int status;
- int key_found;
- void *entry_ptr;
-};
-
-/*
- * MSG ARP DELETE
- */
-struct pipeline_routing_arp_delete_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* key */
- struct pipeline_routing_arp_key key;
-};
-
-struct pipeline_routing_arp_delete_msg_rsp {
- int status;
- int key_found;
-};
-
-/*
- * MSG ARP ADD DEFAULT
- */
-struct pipeline_routing_arp_add_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- /* data */
- uint32_t port_id;
-};
-
-struct pipeline_routing_arp_add_default_msg_rsp {
- int status;
- void *entry_ptr;
-};
-
-/*
- * MSG ARP DELETE DEFAULT
- */
-struct pipeline_routing_arp_delete_default_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-};
-
-struct pipeline_routing_arp_delete_default_msg_rsp {
- int status;
-};
-
-/*
- * MSG SET MACADDR
- */
-struct pipeline_routing_set_macaddr_msg_req {
- enum pipeline_msg_req_type type;
- enum pipeline_routing_msg_req_type subtype;
-
- uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
-};
-
-struct pipeline_routing_set_macaddr_msg_rsp {
- int status;
-};
-
-extern struct pipeline_be_ops pipeline_routing_be_ops;
-
-#endif
diff --git a/examples/ip_pipeline/pipeline_be.h b/examples/ip_pipeline/pipeline_be.h
deleted file mode 100644
index 6c0c97a4..00000000
--- a/examples/ip_pipeline/pipeline_be.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-
-#ifndef __INCLUDE_PIPELINE_BE_H__
-#define __INCLUDE_PIPELINE_BE_H__
-
-#include <rte_port_ethdev.h>
-#include <rte_port_ring.h>
-#include <rte_port_frag.h>
-#include <rte_port_ras.h>
-#include <rte_port_sched.h>
-#include <rte_port_fd.h>
-#include <rte_port_source_sink.h>
-#ifdef RTE_LIBRTE_KNI
-#include <rte_port_kni.h>
-#endif
-#include <rte_pipeline.h>
-
-enum pipeline_port_in_type {
- PIPELINE_PORT_IN_ETHDEV_READER,
- PIPELINE_PORT_IN_RING_READER,
- PIPELINE_PORT_IN_RING_MULTI_READER,
- PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
- PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
- PIPELINE_PORT_IN_SCHED_READER,
- PIPELINE_PORT_IN_FD_READER,
- PIPELINE_PORT_IN_KNI_READER,
- PIPELINE_PORT_IN_SOURCE,
-};
-
-struct pipeline_port_in_params {
- enum pipeline_port_in_type type;
- union {
- struct rte_port_ethdev_reader_params ethdev;
- struct rte_port_ring_reader_params ring;
- struct rte_port_ring_multi_reader_params ring_multi;
- struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
- struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
- struct rte_port_sched_reader_params sched;
- struct rte_port_fd_reader_params fd;
-#ifdef RTE_LIBRTE_KNI
- struct rte_port_kni_reader_params kni;
-#endif
- struct rte_port_source_params source;
- } params;
- uint32_t burst_size;
-};
-
-static inline void *
-pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
-{
- switch (p->type) {
- case PIPELINE_PORT_IN_ETHDEV_READER:
- return (void *) &p->params.ethdev;
- case PIPELINE_PORT_IN_RING_READER:
- return (void *) &p->params.ring;
- case PIPELINE_PORT_IN_RING_MULTI_READER:
- return (void *) &p->params.ring_multi;
- case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
- return (void *) &p->params.ring_ipv4_frag;
- case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
- return (void *) &p->params.ring_ipv6_frag;
- case PIPELINE_PORT_IN_SCHED_READER:
- return (void *) &p->params.sched;
- case PIPELINE_PORT_IN_FD_READER:
- return (void *) &p->params.fd;
-#ifdef RTE_LIBRTE_KNI
- case PIPELINE_PORT_IN_KNI_READER:
- return (void *) &p->params.kni;
-#endif
- case PIPELINE_PORT_IN_SOURCE:
- return (void *) &p->params.source;
- default:
- return NULL;
- }
-}
-
-static inline struct rte_port_in_ops *
-pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
-{
- switch (p->type) {
- case PIPELINE_PORT_IN_ETHDEV_READER:
- return &rte_port_ethdev_reader_ops;
- case PIPELINE_PORT_IN_RING_READER:
- return &rte_port_ring_reader_ops;
- case PIPELINE_PORT_IN_RING_MULTI_READER:
- return &rte_port_ring_multi_reader_ops;
- case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
- return &rte_port_ring_reader_ipv4_frag_ops;
- case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
- return &rte_port_ring_reader_ipv6_frag_ops;
- case PIPELINE_PORT_IN_SCHED_READER:
- return &rte_port_sched_reader_ops;
- case PIPELINE_PORT_IN_FD_READER:
- return &rte_port_fd_reader_ops;
-#ifdef RTE_LIBRTE_KNI
- case PIPELINE_PORT_IN_KNI_READER:
- return &rte_port_kni_reader_ops;
-#endif
- case PIPELINE_PORT_IN_SOURCE:
- return &rte_port_source_ops;
- default:
- return NULL;
- }
-}
-
-enum pipeline_port_out_type {
- PIPELINE_PORT_OUT_ETHDEV_WRITER,
- PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP,
- PIPELINE_PORT_OUT_RING_WRITER,
- PIPELINE_PORT_OUT_RING_MULTI_WRITER,
- PIPELINE_PORT_OUT_RING_WRITER_NODROP,
- PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP,
- PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
- PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
- PIPELINE_PORT_OUT_SCHED_WRITER,
- PIPELINE_PORT_OUT_FD_WRITER,
- PIPELINE_PORT_OUT_KNI_WRITER,
- PIPELINE_PORT_OUT_KNI_WRITER_NODROP,
- PIPELINE_PORT_OUT_SINK,
-};
-
-struct pipeline_port_out_params {
- enum pipeline_port_out_type type;
- union {
- struct rte_port_ethdev_writer_params ethdev;
- struct rte_port_ethdev_writer_nodrop_params ethdev_nodrop;
- struct rte_port_ring_writer_params ring;
- struct rte_port_ring_multi_writer_params ring_multi;
- struct rte_port_ring_writer_nodrop_params ring_nodrop;
- struct rte_port_ring_multi_writer_nodrop_params ring_multi_nodrop;
- struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
- struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
- struct rte_port_sched_writer_params sched;
- struct rte_port_fd_writer_params fd;
-#ifdef RTE_LIBRTE_KNI
- struct rte_port_kni_writer_params kni;
- struct rte_port_kni_writer_nodrop_params kni_nodrop;
-#endif
- struct rte_port_sink_params sink;
- } params;
-};
-
-static inline void *
-pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
-{
- switch (p->type) {
- case PIPELINE_PORT_OUT_ETHDEV_WRITER:
- return (void *) &p->params.ethdev;
- case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
- return (void *) &p->params.ethdev_nodrop;
- case PIPELINE_PORT_OUT_RING_WRITER:
- return (void *) &p->params.ring;
- case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
- return (void *) &p->params.ring_multi;
- case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
- return (void *) &p->params.ring_nodrop;
- case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
- return (void *) &p->params.ring_multi_nodrop;
- case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
- return (void *) &p->params.ring_ipv4_ras;
- case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
- return (void *) &p->params.ring_ipv6_ras;
- case PIPELINE_PORT_OUT_SCHED_WRITER:
- return (void *) &p->params.sched;
- case PIPELINE_PORT_OUT_FD_WRITER:
- return (void *) &p->params.fd;
-#ifdef RTE_LIBRTE_KNI
- case PIPELINE_PORT_OUT_KNI_WRITER:
- return (void *) &p->params.kni;
- case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
- return (void *) &p->params.kni_nodrop;
-#endif
- case PIPELINE_PORT_OUT_SINK:
- return (void *) &p->params.sink;
- default:
- return NULL;
- }
-}
-
-static inline void *
-pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
-{
- switch (p->type) {
- case PIPELINE_PORT_OUT_ETHDEV_WRITER:
- return &rte_port_ethdev_writer_ops;
- case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
- return &rte_port_ethdev_writer_nodrop_ops;
- case PIPELINE_PORT_OUT_RING_WRITER:
- return &rte_port_ring_writer_ops;
- case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
- return &rte_port_ring_multi_writer_ops;
- case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
- return &rte_port_ring_writer_nodrop_ops;
- case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
- return &rte_port_ring_multi_writer_nodrop_ops;
- case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
- return &rte_port_ring_writer_ipv4_ras_ops;
- case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
- return &rte_port_ring_writer_ipv6_ras_ops;
- case PIPELINE_PORT_OUT_SCHED_WRITER:
- return &rte_port_sched_writer_ops;
- case PIPELINE_PORT_OUT_FD_WRITER:
- return &rte_port_fd_writer_ops;
-#ifdef RTE_LIBRTE_KNI
- case PIPELINE_PORT_OUT_KNI_WRITER:
- return &rte_port_kni_writer_ops;
- case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
- return &rte_port_kni_writer_nodrop_ops;
-#endif
- case PIPELINE_PORT_OUT_SINK:
- return &rte_port_sink_ops;
- default:
- return NULL;
- }
-}
-
-#ifndef PIPELINE_NAME_SIZE
-#define PIPELINE_NAME_SIZE 64
-#endif
-
-#ifndef PIPELINE_TYPE_SIZE
-#define PIPELINE_TYPE_SIZE 64
-#endif
-
-#ifndef PIPELINE_MAX_PORT_IN
-#define PIPELINE_MAX_PORT_IN 64
-#endif
-
-#ifndef PIPELINE_MAX_PORT_OUT
-#define PIPELINE_MAX_PORT_OUT 64
-#endif
-
-#ifndef PIPELINE_MAX_TABLES
-#define PIPELINE_MAX_TABLES 16
-#endif
-
-#ifndef PIPELINE_MAX_MSGQ_IN
-#define PIPELINE_MAX_MSGQ_IN 16
-#endif
-
-#ifndef PIPELINE_MAX_MSGQ_OUT
-#define PIPELINE_MAX_MSGQ_OUT 16
-#endif
-
-#ifndef PIPELINE_MAX_ARGS
-#define PIPELINE_MAX_ARGS 64
-#endif
-
-struct pipeline_params {
- char name[PIPELINE_NAME_SIZE];
- char type[PIPELINE_TYPE_SIZE];
-
- struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
- struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
- struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
- struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
-
- uint32_t n_ports_in;
- uint32_t n_ports_out;
- uint32_t n_msgq;
-
- int socket_id;
-
- char *args_name[PIPELINE_MAX_ARGS];
- char *args_value[PIPELINE_MAX_ARGS];
- uint32_t n_args;
-
- uint32_t log_level;
-};
-
-/*
- * Pipeline type back-end operations
- */
-
-typedef void* (*pipeline_be_op_init)(struct pipeline_params *params,
- void *arg);
-
-typedef int (*pipeline_be_op_free)(void *pipeline);
-
-typedef int (*pipeline_be_op_run)(void *pipeline);
-
-typedef int (*pipeline_be_op_timer)(void *pipeline);
-
-struct pipeline_be_ops {
- pipeline_be_op_init f_init;
- pipeline_be_op_free f_free;
- pipeline_be_op_run f_run;
- pipeline_be_op_timer f_timer;
-};
-
-/* Pipeline specific config parse error messages */
-#define PIPELINE_ARG_CHECK(exp, fmt, ...) \
-do { \
- if (!(exp)) { \
- fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
- return -1; \
- } \
-} while (0)
-
-#define PIPELINE_PARSE_ERR_INV_VAL(exp, section, entry, val) \
-PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
- "has invalid value (\"%s\")", section, entry, val)
-
-#define PIPELINE_PARSE_ERR_OUT_RNG(exp, section, entry, val) \
-PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
- "value is out of range (\"%s\")", section, entry, val)
-
-#define PIPELINE_PARSE_ERR_DUPLICATE(exp, section, entry) \
-PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": duplicated " \
- "entry \"%s\"", section, entry)
-
-#define PIPELINE_PARSE_ERR_INV_ENT(exp, section, entry) \
-PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": invalid entry " \
- "\"%s\"", section, entry)
-
-#define PIPELINE_PARSE_ERR_MANDATORY(exp, section, entry) \
-PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": mandatory " \
- "entry \"%s\" is missing", section, entry)
-
-#endif
diff --git a/examples/ip_pipeline/swq.c b/examples/ip_pipeline/swq.c
new file mode 100644
index 00000000..7e54a1db
--- /dev/null
+++ b/examples/ip_pipeline/swq.c
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include "swq.h"
+
+static struct swq_list swq_list;
+
+int
+swq_init(void)
+{
+ TAILQ_INIT(&swq_list);
+
+ return 0;
+}
+
+struct swq *
+swq_find(const char *name)
+{
+ struct swq *swq;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(swq, &swq_list, node)
+ if (strcmp(swq->name, name) == 0)
+ return swq;
+
+ return NULL;
+}
+
+struct swq *
+swq_create(const char *name, struct swq_params *params)
+{
+ struct swq *swq;
+ struct rte_ring *r;
+ unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ swq_find(name) ||
+ (params == NULL) ||
+ (params->size == 0))
+ return NULL;
+
+ /* Resource create */
+ r = rte_ring_create(
+ name,
+ params->size,
+ params->cpu_id,
+ flags);
+
+ if (r == NULL)
+ return NULL;
+
+ /* Node allocation */
+ swq = calloc(1, sizeof(struct swq));
+ if (swq == NULL) {
+ rte_ring_free(r);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(swq->name, name, sizeof(swq->name));
+ swq->r = r;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&swq_list, swq, node);
+
+ return swq;
+}
diff --git a/examples/ip_pipeline/swq.h b/examples/ip_pipeline/swq.h
new file mode 100644
index 00000000..c8440ee3
--- /dev/null
+++ b/examples/ip_pipeline/swq.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_SWQ_H_
+#define _INCLUDE_SWQ_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_ring.h>
+
+#include "common.h"
+
+struct swq {
+ TAILQ_ENTRY(swq) node;
+ char name[NAME_SIZE];
+ struct rte_ring *r;
+};
+
+TAILQ_HEAD(swq_list, swq);
+
+int
+swq_init(void);
+
+struct swq *
+swq_find(const char *name);
+
+struct swq_params {
+ uint32_t size;
+ uint32_t cpu_id;
+};
+
+struct swq *
+swq_create(const char *name, struct swq_params *params);
+
+#endif /* _INCLUDE_SWQ_H_ */
diff --git a/examples/ip_pipeline/tap.c b/examples/ip_pipeline/tap.c
new file mode 100644
index 00000000..11e4ad20
--- /dev/null
+++ b/examples/ip_pipeline/tap.c
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <netinet/in.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#endif
+#include <sys/ioctl.h>
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "tap.h"
+
+#define TAP_DEV "/dev/net/tun"
+
+static struct tap_list tap_list;
+
+int
+tap_init(void)
+{
+ TAILQ_INIT(&tap_list);
+
+ return 0;
+}
+
+struct tap *
+tap_find(const char *name)
+{
+ struct tap *tap;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tap, &tap_list, node)
+ if (strcmp(tap->name, name) == 0)
+ return tap;
+
+ return NULL;
+}
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+
+struct tap *
+tap_create(const char *name __rte_unused)
+{
+ return NULL;
+}
+
+#else
+
+struct tap *
+tap_create(const char *name)
+{
+ struct tap *tap;
+ struct ifreq ifr;
+ int fd, status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ tap_find(name))
+ return NULL;
+
+ /* Resource create */
+ fd = open(TAP_DEV, O_RDWR | O_NONBLOCK);
+ if (fd < 0)
+ return NULL;
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", name);
+
+ status = ioctl(fd, TUNSETIFF, (void *) &ifr);
+ if (status < 0) {
+ close(fd);
+ return NULL;
+ }
+
+ /* Node allocation */
+ tap = calloc(1, sizeof(struct tap));
+ if (tap == NULL) {
+ close(fd);
+ return NULL;
+ }
+ /* Node fill in */
+ strlcpy(tap->name, name, sizeof(tap->name));
+ tap->fd = fd;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&tap_list, tap, node);
+
+ return tap;
+}
+
+#endif
diff --git a/examples/ip_pipeline/tap.h b/examples/ip_pipeline/tap.h
new file mode 100644
index 00000000..0dce72fe
--- /dev/null
+++ b/examples/ip_pipeline/tap.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_TAP_H_
+#define _INCLUDE_TAP_H_
+
+#include <sys/queue.h>
+
+#include "common.h"
+
+struct tap {
+ TAILQ_ENTRY(tap) node;
+ char name[NAME_SIZE];
+ int fd;
+};
+
+TAILQ_HEAD(tap_list, tap);
+
+int
+tap_init(void);
+
+struct tap *
+tap_find(const char *name);
+
+struct tap *
+tap_create(const char *name);
+
+#endif /* _INCLUDE_TAP_H_ */
diff --git a/examples/ip_pipeline/thread.c b/examples/ip_pipeline/thread.c
index 9013afd1..7fc03332 100644
--- a/examples/ip_pipeline/thread.c
+++ b/examples/ip_pipeline/thread.c
@@ -1,291 +1,2968 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+#include <stdlib.h>
+
#include <rte_common.h>
#include <rte_cycles.h>
-#include <rte_pipeline.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+
+#include <rte_table_acl.h>
+#include <rte_table_array.h>
+#include <rte_table_hash.h>
+#include <rte_table_lpm.h>
+#include <rte_table_lpm_ipv6.h>
-#include "pipeline_common_be.h"
-#include "app.h"
+#include "common.h"
#include "thread.h"
+#include "pipeline.h"
-#if APP_THREAD_HEADROOM_STATS_COLLECT
-
-#define PIPELINE_RUN_REGULAR(thread, pipeline) \
-do { \
- uint64_t t0 = rte_rdtsc_precise(); \
- int n_pkts = rte_pipeline_run(pipeline->p); \
- \
- if (n_pkts == 0) { \
- uint64_t t1 = rte_rdtsc_precise(); \
- \
- thread->headroom_cycles += t1 - t0; \
- } \
-} while (0)
-
-
-#define PIPELINE_RUN_CUSTOM(thread, data) \
-do { \
- uint64_t t0 = rte_rdtsc_precise(); \
- int n_pkts = data->f_run(data->be); \
- \
- if (n_pkts == 0) { \
- uint64_t t1 = rte_rdtsc_precise(); \
- \
- thread->headroom_cycles += t1 - t0; \
- } \
-} while (0)
-
-#else
-
-#define PIPELINE_RUN_REGULAR(thread, pipeline) \
- rte_pipeline_run(pipeline->p)
-
-#define PIPELINE_RUN_CUSTOM(thread, data) \
- data->f_run(data->be)
+#ifndef THREAD_PIPELINES_MAX
+#define THREAD_PIPELINES_MAX 256
+#endif
+#ifndef THREAD_MSGQ_SIZE
+#define THREAD_MSGQ_SIZE 64
#endif
-static inline void *
-thread_msg_recv(struct rte_ring *r)
+#ifndef THREAD_TIMER_PERIOD_MS
+#define THREAD_TIMER_PERIOD_MS 100
+#endif
+
+/**
+ * Master thead: data plane thread context
+ */
+struct thread {
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+
+ uint32_t enabled;
+};
+
+static struct thread thread[RTE_MAX_LCORE];
+
+/**
+ * Data plane threads: context
+ */
+struct table_data {
+ struct rte_table_action *a;
+};
+
+struct pipeline_data {
+ struct rte_pipeline *p;
+ struct table_data table_data[RTE_PIPELINE_TABLE_MAX];
+ uint32_t n_tables;
+
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+
+ uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX];
+};
+
+struct thread_data {
+ struct rte_pipeline *p[THREAD_PIPELINES_MAX];
+ uint32_t n_pipelines;
+
+ struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint64_t timer_period; /* Measured in CPU cycles. */
+ uint64_t time_next;
+ uint64_t time_next_min;
+} __rte_cache_aligned;
+
+static struct thread_data thread_data[RTE_MAX_LCORE];
+
+/**
+ * Master thread: data plane thread init
+ */
+static void
+thread_free(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ struct thread *t = &thread[i];
+
+ if (!rte_lcore_is_enabled(i))
+ continue;
+
+ /* MSGQs */
+ if (t->msgq_req)
+ rte_ring_free(t->msgq_req);
+
+ if (t->msgq_rsp)
+ rte_ring_free(t->msgq_rsp);
+ }
+}
+
+int
+thread_init(void)
+{
+ uint32_t i;
+
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ char name[NAME_MAX];
+ struct rte_ring *msgq_req, *msgq_rsp;
+ struct thread *t = &thread[i];
+ struct thread_data *t_data = &thread_data[i];
+ uint32_t cpu_id = rte_lcore_to_socket_id(i);
+
+ /* MSGQs */
+ snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-REQ", i);
+
+ msgq_req = rte_ring_create(name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_req == NULL) {
+ thread_free();
+ return -1;
+ }
+
+ snprintf(name, sizeof(name), "THREAD-%04x-MSGQ-RSP", i);
+
+ msgq_rsp = rte_ring_create(name,
+ THREAD_MSGQ_SIZE,
+ cpu_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (msgq_rsp == NULL) {
+ thread_free();
+ return -1;
+ }
+
+ /* Master thread records */
+ t->msgq_req = msgq_req;
+ t->msgq_rsp = msgq_rsp;
+ t->enabled = 1;
+
+ /* Data plane thread records */
+ t_data->n_pipelines = 0;
+ t_data->msgq_req = msgq_req;
+ t_data->msgq_rsp = msgq_rsp;
+ t_data->timer_period =
+ (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000;
+ t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period;
+ t_data->time_next_min = t_data->time_next;
+ }
+
+ return 0;
+}
+
+static inline int
+thread_is_running(uint32_t thread_id)
+{
+ enum rte_lcore_state_t thread_state;
+
+ thread_state = rte_eal_get_lcore_state(thread_id);
+ return (thread_state == RUNNING) ? 1 : 0;
+}
+
+/**
+ * Pipeline is running when:
+ * (A) Pipeline is mapped to a data plane thread AND
+ * (B) Its data plane thread is in RUNNING state.
+ */
+static inline int
+pipeline_is_running(struct pipeline *p)
+{
+ if (p->enabled == 0)
+ return 0;
+
+ return thread_is_running(p->thread_id);
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum thread_req_type {
+ THREAD_REQ_PIPELINE_ENABLE = 0,
+ THREAD_REQ_PIPELINE_DISABLE,
+ THREAD_REQ_MAX
+};
+
+struct thread_msg_req {
+ enum thread_req_type type;
+
+ union {
+ struct {
+ struct rte_pipeline *p;
+ struct {
+ struct rte_table_action *a;
+ } table[RTE_PIPELINE_TABLE_MAX];
+ struct rte_ring *msgq_req;
+ struct rte_ring *msgq_rsp;
+ uint32_t timer_period_ms;
+ uint32_t n_tables;
+ } pipeline_enable;
+
+ struct {
+ struct rte_pipeline *p;
+ } pipeline_disable;
+ };
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/**
+ * Master thread
+ */
+static struct thread_msg_req *
+thread_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct thread_msg_req),
+ sizeof(struct thread_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+thread_msg_free(struct thread_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct thread_msg_rsp *
+thread_msg_send_recv(uint32_t thread_id,
+ struct thread_msg_req *req)
+{
+ struct thread *t = &thread[thread_id];
+ struct rte_ring *msgq_req = t->msgq_req;
+ struct rte_ring *msgq_rsp = t->msgq_rsp;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+thread_pipeline_enable(uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = pipeline_find(pipeline_name);
+ struct thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ uint32_t i;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL) ||
+ (p->n_ports_in == 0) ||
+ (p->n_ports_out == 0) ||
+ (p->n_tables == 0))
+ return -1;
+
+ t = &thread[thread_id];
+ if ((t->enabled == 0) ||
+ p->enabled)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct thread_data *td = &thread_data[thread_id];
+ struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines];
+
+ if (td->n_pipelines >= THREAD_PIPELINES_MAX)
+ return -1;
+
+ /* Data plane thread */
+ td->p[td->n_pipelines] = p->p;
+
+ tdp->p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ tdp->table_data[i].a = p->table[i].a;
+
+ tdp->n_tables = p->n_tables;
+
+ tdp->msgq_req = p->msgq_req;
+ tdp->msgq_rsp = p->msgq_rsp;
+ tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000;
+ tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period;
+
+ td->n_pipelines++;
+
+ /* Pipeline */
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_ENABLE;
+ req->pipeline_enable.p = p->p;
+ for (i = 0; i < p->n_tables; i++)
+ req->pipeline_enable.table[i].a =
+ p->table[i].a;
+ req->pipeline_enable.msgq_req = p->msgq_req;
+ req->pipeline_enable.msgq_rsp = p->msgq_rsp;
+ req->pipeline_enable.timer_period_ms = p->timer_period_ms;
+ req->pipeline_enable.n_tables = p->n_tables;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->thread_id = thread_id;
+ p->enabled = 1;
+
+ return 0;
+}
+
+int
+thread_pipeline_disable(uint32_t thread_id,
+ const char *pipeline_name)
+{
+ struct pipeline *p = pipeline_find(pipeline_name);
+ struct thread *t;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((thread_id >= RTE_MAX_LCORE) ||
+ (p == NULL))
+ return -1;
+
+ t = &thread[thread_id];
+ if (t->enabled == 0)
+ return -1;
+
+ if (p->enabled == 0)
+ return 0;
+
+ if (p->thread_id != thread_id)
+ return -1;
+
+ if (!thread_is_running(thread_id)) {
+ struct thread_data *td = &thread_data[thread_id];
+ uint32_t i;
+
+ for (i = 0; i < td->n_pipelines; i++) {
+ struct pipeline_data *tdp = &td->pipeline_data[i];
+
+ if (tdp->p != p->p)
+ continue;
+
+ /* Data plane thread */
+ if (i < td->n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ td->p[td->n_pipelines - 1];
+ struct pipeline_data *tdp_last =
+ &td->pipeline_data[td->n_pipelines - 1];
+
+ td->p[i] = pipeline_last;
+ memcpy(tdp, tdp_last, sizeof(*tdp));
+ }
+
+ td->n_pipelines--;
+
+ /* Pipeline */
+ p->enabled = 0;
+
+ break;
+ }
+
+ return 0;
+ }
+
+ /* Allocate request */
+ req = thread_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = THREAD_REQ_PIPELINE_DISABLE;
+ req->pipeline_disable.p = p->p;
+
+ /* Send request and wait for response */
+ rsp = thread_msg_send_recv(thread_id, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ thread_msg_free(rsp);
+
+ /* Request completion */
+ if (status)
+ return status;
+
+ p->enabled = 0;
+
+ return 0;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct thread_msg_req *
+thread_msg_recv(struct rte_ring *msgq_req)
{
- void *msg;
- int status = rte_ring_sc_dequeue(r, &msg);
+ struct thread_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
if (status != 0)
return NULL;
- return msg;
+ return req;
}
static inline void
-thread_msg_send(struct rte_ring *r,
- void *msg)
+thread_msg_send(struct rte_ring *msgq_rsp,
+ struct thread_msg_rsp *rsp)
{
int status;
do {
- status = rte_ring_sp_enqueue(r, msg);
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
} while (status == -ENOBUFS);
}
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_enable(struct thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
+ struct pipeline_data *p = &t->pipeline_data[t->n_pipelines];
+ uint32_t i;
+
+ /* Request */
+ if (t->n_pipelines >= THREAD_PIPELINES_MAX) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ t->p[t->n_pipelines] = req->pipeline_enable.p;
+
+ p->p = req->pipeline_enable.p;
+ for (i = 0; i < req->pipeline_enable.n_tables; i++)
+ p->table_data[i].a =
+ req->pipeline_enable.table[i].a;
+
+ p->n_tables = req->pipeline_enable.n_tables;
+
+ p->msgq_req = req->pipeline_enable.msgq_req;
+ p->msgq_rsp = req->pipeline_enable.msgq_rsp;
+ p->timer_period =
+ (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000;
+ p->time_next = rte_get_tsc_cycles() + p->timer_period;
+
+ t->n_pipelines++;
+
+ /* Response */
+ rsp->status = 0;
+ return rsp;
+}
+
+static struct thread_msg_rsp *
+thread_msg_handle_pipeline_disable(struct thread_data *t,
+ struct thread_msg_req *req)
+{
+ struct thread_msg_rsp *rsp = (struct thread_msg_rsp *) req;
+ uint32_t n_pipelines = t->n_pipelines;
+ struct rte_pipeline *pipeline = req->pipeline_disable.p;
+ uint32_t i;
+
+ /* find pipeline */
+ for (i = 0; i < n_pipelines; i++) {
+ struct pipeline_data *p = &t->pipeline_data[i];
+
+ if (p->p != pipeline)
+ continue;
+
+ if (i < n_pipelines - 1) {
+ struct rte_pipeline *pipeline_last =
+ t->p[n_pipelines - 1];
+ struct pipeline_data *p_last =
+ &t->pipeline_data[n_pipelines - 1];
+
+ t->p[i] = pipeline_last;
+ memcpy(p, p_last, sizeof(*p));
+ }
+
+ t->n_pipelines--;
+
+ rsp->status = 0;
+ return rsp;
+ }
+
+ /* should not get here */
+ rsp->status = 0;
+ return rsp;
+}
+
+static void
+thread_msg_handle(struct thread_data *t)
+{
+ for ( ; ; ) {
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ req = thread_msg_recv(t->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case THREAD_REQ_PIPELINE_ENABLE:
+ rsp = thread_msg_handle_pipeline_enable(t, req);
+ break;
+
+ case THREAD_REQ_PIPELINE_DISABLE:
+ rsp = thread_msg_handle_pipeline_disable(t, req);
+ break;
+
+ default:
+ rsp = (struct thread_msg_rsp *) req;
+ rsp->status = -1;
+ }
+
+ thread_msg_send(t->msgq_rsp, rsp);
+ }
+}
+
+/**
+ * Master thread & data plane threads: message passing
+ */
+enum pipeline_req_type {
+ /* Port IN */
+ PIPELINE_REQ_PORT_IN_STATS_READ,
+ PIPELINE_REQ_PORT_IN_ENABLE,
+ PIPELINE_REQ_PORT_IN_DISABLE,
+
+ /* Port OUT */
+ PIPELINE_REQ_PORT_OUT_STATS_READ,
+
+ /* Table */
+ PIPELINE_REQ_TABLE_STATS_READ,
+ PIPELINE_REQ_TABLE_RULE_ADD,
+ PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_ADD_BULK,
+ PIPELINE_REQ_TABLE_RULE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT,
+ PIPELINE_REQ_TABLE_RULE_STATS_READ,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_ADD,
+ PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE,
+ PIPELINE_REQ_TABLE_RULE_MTR_READ,
+ PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE,
+ PIPELINE_REQ_TABLE_RULE_TTL_READ,
+ PIPELINE_REQ_MAX
+};
+
+struct pipeline_msg_req_port_in_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_port_out_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_stats_read {
+ int clear;
+};
+
+struct pipeline_msg_req_table_rule_add {
+ struct table_rule_match match;
+ struct table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_default {
+ struct table_rule_action action;
+};
+
+struct pipeline_msg_req_table_rule_add_bulk {
+ struct table_rule_match *match;
+ struct table_rule_action *action;
+ void **data;
+ uint32_t n_rules;
+ int bulk;
+};
+
+struct pipeline_msg_req_table_rule_delete {
+ struct table_rule_match match;
+};
+
+struct pipeline_msg_req_table_rule_stats_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req_table_mtr_profile_add {
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+struct pipeline_msg_req_table_mtr_profile_delete {
+ uint32_t meter_profile_id;
+};
+
+struct pipeline_msg_req_table_rule_mtr_read {
+ void *data;
+ uint32_t tc_mask;
+ int clear;
+};
+
+struct pipeline_msg_req_table_dscp_table_update {
+ uint64_t dscp_mask;
+ struct rte_table_action_dscp_table dscp_table;
+};
+
+struct pipeline_msg_req_table_rule_ttl_read {
+ void *data;
+ int clear;
+};
+
+struct pipeline_msg_req {
+ enum pipeline_req_type type;
+ uint32_t id; /* Port IN, port OUT or table ID */
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_req_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_req_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_req_table_stats_read table_stats_read;
+ struct pipeline_msg_req_table_rule_add table_rule_add;
+ struct pipeline_msg_req_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_req_table_rule_delete table_rule_delete;
+ struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add;
+ struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete;
+ struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update;
+ struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+struct pipeline_msg_rsp_port_in_stats_read {
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_msg_rsp_port_out_stats_read {
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_msg_rsp_table_stats_read {
+ struct rte_pipeline_table_stats stats;
+};
+
+struct pipeline_msg_rsp_table_rule_add {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_default {
+ void *data;
+};
+
+struct pipeline_msg_rsp_table_rule_add_bulk {
+ uint32_t n_rules;
+};
+
+struct pipeline_msg_rsp_table_rule_stats_read {
+ struct rte_table_action_stats_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_mtr_read {
+ struct rte_table_action_mtr_counters stats;
+};
+
+struct pipeline_msg_rsp_table_rule_ttl_read {
+ struct rte_table_action_ttl_counters stats;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+
+ RTE_STD_C11
+ union {
+ struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read;
+ struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read;
+ struct pipeline_msg_rsp_table_stats_read table_stats_read;
+ struct pipeline_msg_rsp_table_rule_add table_rule_add;
+ struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default;
+ struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk;
+ struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read;
+ struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read;
+ struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read;
+ };
+};
+
+/**
+ * Master thread
+ */
+static struct pipeline_msg_req *
+pipeline_msg_alloc(void)
+{
+ size_t size = RTE_MAX(sizeof(struct pipeline_msg_req),
+ sizeof(struct pipeline_msg_rsp));
+
+ return calloc(1, size);
+}
+
+static void
+pipeline_msg_free(struct pipeline_msg_rsp *rsp)
+{
+ free(rsp);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_send_recv(struct pipeline *p,
+ struct pipeline_msg_req *req)
+{
+ struct rte_ring *msgq_req = p->msgq_req;
+ struct rte_ring *msgq_rsp = p->msgq_rsp;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(msgq_req, req);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ do {
+ status = rte_ring_sc_dequeue(msgq_rsp, (void **) &rsp);
+ } while (status != 0);
+
+ return rsp;
+}
+
+int
+pipeline_port_in_stats_read(const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (port_id >= p->n_ports_in))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_STATS_READ;
+ req->id = port_id;
+ req->port_in_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_port_in_enable(const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (port_id >= p->n_ports_in))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_enable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_ENABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_port_in_disable(const char *pipeline_name,
+ uint32_t port_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (port_id >= p->n_ports_in))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_in_disable(p->p, port_id);
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_IN_DISABLE;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_port_out_stats_read(const char *pipeline_name,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (port_id >= p->n_ports_out))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_PORT_OUT_STATS_READ;
+ req->id = port_id;
+ req->port_out_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_stats_read(const char *pipeline_name,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_STATS_READ;
+ req->id = table_id;
+ req->table_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+static int
+match_check(struct table_rule_match *match,
+ struct pipeline *p,
+ uint32_t table_id)
+{
+ struct table *table;
+
+ if ((match == NULL) ||
+ (p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ table = &p->table[table_id];
+ if (match->match_type != table->params.match_type)
+ return -1;
+
+ switch (match->match_type) {
+ case TABLE_ACL:
+ {
+ struct table_acl_params *t = &table->params.match.acl;
+ struct table_rule_match_acl *r = &match->match.acl;
+
+ if ((r->ip_version && (t->ip_version == 0)) ||
+ ((r->ip_version == 0) && t->ip_version))
+ return -1;
+
+ if (r->ip_version) {
+ if ((r->sa_depth > 32) ||
+ (r->da_depth > 32))
+ return -1;
+ } else {
+ if ((r->sa_depth > 128) ||
+ (r->da_depth > 128))
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_ARRAY:
+ return 0;
+
+ case TABLE_HASH:
+ return 0;
+
+ case TABLE_LPM:
+ {
+ struct table_lpm_params *t = &table->params.match.lpm;
+ struct table_rule_match_lpm *r = &match->match.lpm;
+
+ if ((r->ip_version && (t->key_size != 4)) ||
+ ((r->ip_version == 0) && (t->key_size != 16)))
+ return -1;
+
+ if (r->ip_version) {
+ if (r->depth > 32)
+ return -1;
+ } else {
+ if (r->depth > 128)
+ return -1;
+ }
+ return 0;
+ }
+
+ case TABLE_STUB:
+ return -1;
+
+ default:
+ return -1;
+ }
+}
+
static int
-thread_pipeline_enable(struct app_thread_data *t,
- struct thread_pipeline_enable_msg_req *req)
+action_check(struct table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
{
- struct app_thread_pipeline_data *p;
+ struct table_action_profile *ap;
+
+ if ((action == NULL) ||
+ (p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ ap = p->table[table_id].ap;
+ if (action->action_mask != ap->params.action_mask)
+ return -1;
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
+ (action->fwd.id >= p->n_ports_out))
+ return -1;
- if (req->f_run == NULL) {
- if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
+ if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
+ (action->fwd.id >= p->n_tables))
return -1;
- } else {
- if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1;
+ uint32_t tc_mask1 = action->mtr.tc_mask;
+
+ if (tc_mask1 != tc_mask0)
return -1;
}
- p = (req->f_run == NULL) ?
- &t->regular[t->n_regular] :
- &t->custom[t->n_custom];
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ uint32_t n_subports_per_port =
+ ap->params.tm.n_subports_per_port;
+ uint32_t n_pipes_per_subport =
+ ap->params.tm.n_pipes_per_subport;
+ uint32_t subport_id = action->tm.subport_id;
+ uint32_t pipe_id = action->tm.pipe_id;
- p->pipeline_id = req->pipeline_id;
- p->be = req->be;
- p->f_run = req->f_run;
- p->f_timer = req->f_timer;
- p->timer_period = req->timer_period;
- p->deadline = 0;
+ if ((subport_id >= n_subports_per_port) ||
+ (pipe_id >= n_pipes_per_subport))
+ return -1;
+ }
- if (req->f_run == NULL)
- t->n_regular++;
- else
- t->n_custom++;
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ uint64_t encap_mask = ap->params.encap.encap_mask;
+ enum rte_table_action_encap_type type = action->encap.type;
+
+ if ((encap_mask & (1LLU << type)) == 0)
+ return -1;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ int ip_version0 = ap->params.common.ip_version;
+ int ip_version1 = action->nat.ip_version;
+
+ if ((ip_version1 && (ip_version0 == 0)) ||
+ ((ip_version1 == 0) && ip_version0))
+ return -1;
+ }
return 0;
}
static int
-thread_pipeline_disable(struct app_thread_data *t,
- struct thread_pipeline_disable_msg_req *req)
+action_default_check(struct table_rule_action *action,
+ struct pipeline *p,
+ uint32_t table_id)
{
- uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
- uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
- uint32_t i;
+ if ((action == NULL) ||
+ (action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD)) ||
+ (p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
- /* search regular pipelines of current thread */
- for (i = 0; i < n_regular; i++) {
- if (t->regular[i].pipeline_id != req->pipeline_id)
- continue;
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ if ((action->fwd.action == RTE_PIPELINE_ACTION_PORT) &&
+ (action->fwd.id >= p->n_ports_out))
+ return -1;
+
+ if ((action->fwd.action == RTE_PIPELINE_ACTION_TABLE) &&
+ (action->fwd.id >= p->n_tables))
+ return -1;
+ }
- if (i < n_regular - 1)
- memcpy(&t->regular[i],
- &t->regular[i+1],
- (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
+ return 0;
+}
- n_regular--;
- t->n_regular = n_regular;
+union table_rule_match_low_level {
+ struct rte_table_acl_rule_add_params acl_add;
+ struct rte_table_acl_rule_delete_params acl_delete;
+ struct rte_table_array_key array;
+ uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_table_lpm_key lpm_ipv4;
+ struct rte_table_lpm_ipv6_key lpm_ipv6;
+};
+static int
+match_convert(struct table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add);
+
+static int
+action_convert(struct rte_table_action *a,
+ struct table_rule_action *action,
+ struct rte_pipeline_table_entry *data);
+
+int
+pipeline_table_rule_add(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match,
+ struct table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (match == NULL) ||
+ (action == NULL) ||
+ (data == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables) ||
+ match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level match_ll;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ int key_found;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Table match-action rule conversion */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Add rule (match, action) to table */
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
return 0;
}
- /* search custom pipelines of current thread */
- for (i = 0; i < n_custom; i++) {
- if (t->custom[i].pipeline_id != req->pipeline_id)
- continue;
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD;
+ req->id = table_id;
+ memcpy(&req->table_rule_add.match, match, sizeof(*match));
+ memcpy(&req->table_rule_add.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
- if (i < n_custom - 1)
- memcpy(&t->custom[i],
- &t->custom[i+1],
- (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add.data;
- n_custom--;
- t->n_custom = n_custom;
+ /* Free response */
+ pipeline_msg_free(rsp);
+ return status;
+}
+
+int
+pipeline_table_rule_add_default(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_action *action,
+ void **data)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (action == NULL) ||
+ (data == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables) ||
+ action_default_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint8_t *buffer;
+
+ buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t));
+ if (buffer == NULL)
+ return -1;
+
+ /* Apply actions */
+ data_in = (struct rte_pipeline_table_entry *)buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ free(buffer);
+ return -1;
+ }
+
+ /* Write Response */
+ *data = data_out;
+
+ free(buffer);
return 0;
}
- /* return if pipeline not found */
- return -1;
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT;
+ req->id = table_id;
+ memcpy(&req->table_rule_add_default.action, action, sizeof(*action));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *data = rsp->table_rule_add_default.data;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
}
-static int
-thread_msg_req_handle(struct app_thread_data *t)
+int
+pipeline_table_rule_add_bulk(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match,
+ struct table_rule_action *action,
+ void **data,
+ uint32_t *n_rules)
{
- void *msg_ptr;
- struct thread_msg_req *req;
- struct thread_msg_rsp *rsp;
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ uint32_t i;
+ int status;
- msg_ptr = thread_msg_recv(t->msgq_in);
- req = msg_ptr;
- rsp = msg_ptr;
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (match == NULL) ||
+ (action == NULL) ||
+ (data == NULL) ||
+ (n_rules == NULL) ||
+ (*n_rules == 0))
+ return -1;
- if (req != NULL)
- switch (req->type) {
- case THREAD_MSG_REQ_PIPELINE_ENABLE: {
- rsp->status = thread_pipeline_enable(t,
- (struct thread_pipeline_enable_msg_req *) req);
- thread_msg_send(t->msgq_out, rsp);
- break;
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ for (i = 0; i < *n_rules; i++)
+ if (match_check(match, p, table_id) ||
+ action_check(action, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ struct rte_pipeline_table_entry **entries_ptr =
+ (struct rte_pipeline_table_entry **)data;
+ uint32_t bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+ int *found;
+
+ /* Memory allocation */
+ match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(*n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(*n_rules, sizeof(int));
+
+ if (match_ll == NULL ||
+ action_ll == NULL ||
+ match_ll_ptr == NULL ||
+ action_ll_ptr == NULL ||
+ found == NULL)
+ goto fail;
+
+ for (i = 0; i < *n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
}
- case THREAD_MSG_REQ_PIPELINE_DISABLE: {
- rsp->status = thread_pipeline_disable(t,
- (struct thread_pipeline_disable_msg_req *) req);
- thread_msg_send(t->msgq_out, rsp);
- break;
+ /* Rule match conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
}
- case THREAD_MSG_REQ_HEADROOM_READ: {
- struct thread_headroom_read_msg_rsp *rsp =
- (struct thread_headroom_read_msg_rsp *)
- req;
+ /* Rule action conversion */
+ for (i = 0; i < *n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
- rsp->headroom_ratio = t->headroom_ratio;
- rsp->status = 0;
- thread_msg_send(t->msgq_out, rsp);
- break;
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ *n_rules,
+ found,
+ entries_ptr);
+ if (status)
+ *n_rules = 0;
+ } else {
+ for (i = 0; i < *n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &entries_ptr[i]);
+ if (status) {
+ *n_rules = i;
+ break;
+ }
+ }
}
- default:
- break;
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return status;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ *n_rules = 0;
+ return -1;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK;
+ req->id = table_id;
+ req->table_rule_add_bulk.match = match;
+ req->table_rule_add_bulk.action = action;
+ req->table_rule_add_bulk.data = data;
+ req->table_rule_add_bulk.n_rules = *n_rules;
+ req->table_rule_add_bulk.bulk =
+ (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status == 0)
+ *n_rules = rsp->table_rule_add_bulk.n_rules;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_rule_delete(const char *pipeline_name,
+ uint32_t table_id,
+ struct table_rule_match *match)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (match == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables) ||
+ match_check(match, p, table_id))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ union table_rule_match_low_level match_ll;
+ int key_found;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status)
+ return -1;
+
+ status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE;
+ req->id = table_id;
+ memcpy(&req->table_rule_delete.match, match, sizeof(*match));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_rule_delete_default(const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_rule_stats_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (data == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_stats_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ;
+ req->id = table_id;
+ req->table_rule_stats_read.data = data;
+ req->table_rule_stats_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_mtr_profile_add(const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (profile == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
+ req->id = table_id;
+ req->table_mtr_profile_add.meter_profile_id = meter_profile_id;
+ memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_mtr_profile_delete(const char *pipeline_name,
+ uint32_t table_id,
+ uint32_t meter_profile_id)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if (pipeline_name == NULL)
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE;
+ req->id = table_id;
+ req->table_mtr_profile_delete.meter_profile_id = meter_profile_id;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_rule_mtr_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (data == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ;
+ req->id = table_id;
+ req->table_rule_mtr_read.data = data;
+ req->table_rule_mtr_read.tc_mask = tc_mask;
+ req->table_rule_mtr_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_dscp_table_update(const char *pipeline_name,
+ uint32_t table_id,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *dscp_table)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (dscp_table == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE;
+ req->id = table_id;
+ req->table_dscp_table_update.dscp_mask = dscp_mask;
+ memcpy(&req->table_dscp_table_update.dscp_table,
+ dscp_table, sizeof(*dscp_table));
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+int
+pipeline_table_rule_ttl_read(const char *pipeline_name,
+ uint32_t table_id,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct pipeline *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status;
+
+ /* Check input params */
+ if ((pipeline_name == NULL) ||
+ (data == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ p = pipeline_find(pipeline_name);
+ if ((p == NULL) ||
+ (table_id >= p->n_tables))
+ return -1;
+
+ if (!pipeline_is_running(p)) {
+ struct rte_table_action *a = p->table[table_id].a;
+
+ status = rte_table_action_ttl_read(a,
+ data,
+ stats,
+ clear);
+
+ return status;
+ }
+
+ /* Allocate request */
+ req = pipeline_msg_alloc();
+ if (req == NULL)
+ return -1;
+
+ /* Write request */
+ req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ;
+ req->id = table_id;
+ req->table_rule_ttl_read.data = data;
+ req->table_rule_ttl_read.clear = clear;
+
+ /* Send request and wait for response */
+ rsp = pipeline_msg_send_recv(p, req);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ status = rsp->status;
+ if (status)
+ memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats));
+
+ /* Free response */
+ pipeline_msg_free(rsp);
+
+ return status;
+}
+
+/**
+ * Data plane threads: message handling
+ */
+static inline struct pipeline_msg_req *
+pipeline_msg_recv(struct rte_ring *msgq_req)
+{
+ struct pipeline_msg_req *req;
+
+ int status = rte_ring_sc_dequeue(msgq_req, (void **) &req);
+
+ if (status != 0)
+ return NULL;
+
+ return req;
+}
+
+static inline void
+pipeline_msg_send(struct rte_ring *msgq_rsp,
+ struct pipeline_msg_rsp *rsp)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(msgq_rsp, rsp);
+ } while (status == -ENOBUFS);
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t port_id = req->id;
+ int clear = req->port_in_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->port_in_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_enable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_in_disable(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t port_id = req->id;
+
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t port_id = req->id;
+ int clear = req->port_out_stats_read.clear;
+
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->port_out_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t port_id = req->id;
+ int clear = req->table_stats_read.clear;
+
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ port_id,
+ &rsp->table_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static int
+match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32)
+{
+ if (depth > 128)
+ return -1;
+
+ switch (depth / 32) {
+ case 0:
+ depth32[0] = depth;
+ depth32[1] = 0;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 1:
+ depth32[0] = 32;
+ depth32[1] = depth - 32;
+ depth32[2] = 0;
+ depth32[3] = 0;
+ return 0;
+
+ case 2:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = depth - 64;
+ depth32[3] = 0;
+ return 0;
+
+ case 3:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = depth - 96;
+ return 0;
+
+ case 4:
+ depth32[0] = 32;
+ depth32[1] = 32;
+ depth32[2] = 32;
+ depth32[3] = 32;
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+match_convert(struct table_rule_match *mh,
+ union table_rule_match_low_level *ml,
+ int add)
+{
+ memset(ml, 0, sizeof(*ml));
+
+ switch (mh->match_type) {
+ case TABLE_ACL:
+ if (mh->match.acl.ip_version)
+ if (add) {
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_add.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_add.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_add.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_add.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t) mh->match.acl.priority;
+ } else {
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ mh->match.acl.ipv4.sa;
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ mh->match.acl.sa_depth;
+
+ ml->acl_delete.field_value[2].value.u32 =
+ mh->match.acl.ipv4.da;
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ mh->match.acl.da_depth;
+
+ ml->acl_delete.field_value[3].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[3].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[4].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[4].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ else
+ if (add) {
+ uint32_t *sa32 =
+ (uint32_t *) mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *) mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_add.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_add.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_add.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_add.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_add.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_add.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+
+ ml->acl_add.priority =
+ (int32_t) mh->match.acl.priority;
+ } else {
+ uint32_t *sa32 =
+ (uint32_t *) mh->match.acl.ipv6.sa;
+ uint32_t *da32 =
+ (uint32_t *) mh->match.acl.ipv6.da;
+ uint32_t sa32_depth[4], da32_depth[4];
+ int status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.sa_depth,
+ sa32_depth);
+ if (status)
+ return status;
+
+ status = match_convert_ipv6_depth(
+ mh->match.acl.da_depth,
+ da32_depth);
+ if (status)
+ return status;
+
+ ml->acl_delete.field_value[0].value.u8 =
+ mh->match.acl.proto;
+ ml->acl_delete.field_value[0].mask_range.u8 =
+ mh->match.acl.proto_mask;
+
+ ml->acl_delete.field_value[1].value.u32 =
+ sa32[0];
+ ml->acl_delete.field_value[1].mask_range.u32 =
+ sa32_depth[0];
+ ml->acl_delete.field_value[2].value.u32 =
+ sa32[1];
+ ml->acl_delete.field_value[2].mask_range.u32 =
+ sa32_depth[1];
+ ml->acl_delete.field_value[3].value.u32 =
+ sa32[2];
+ ml->acl_delete.field_value[3].mask_range.u32 =
+ sa32_depth[2];
+ ml->acl_delete.field_value[4].value.u32 =
+ sa32[3];
+ ml->acl_delete.field_value[4].mask_range.u32 =
+ sa32_depth[3];
+
+ ml->acl_delete.field_value[5].value.u32 =
+ da32[0];
+ ml->acl_delete.field_value[5].mask_range.u32 =
+ da32_depth[0];
+ ml->acl_delete.field_value[6].value.u32 =
+ da32[1];
+ ml->acl_delete.field_value[6].mask_range.u32 =
+ da32_depth[1];
+ ml->acl_delete.field_value[7].value.u32 =
+ da32[2];
+ ml->acl_delete.field_value[7].mask_range.u32 =
+ da32_depth[2];
+ ml->acl_delete.field_value[8].value.u32 =
+ da32[3];
+ ml->acl_delete.field_value[8].mask_range.u32 =
+ da32_depth[3];
+
+ ml->acl_delete.field_value[9].value.u16 =
+ mh->match.acl.sp0;
+ ml->acl_delete.field_value[9].mask_range.u16 =
+ mh->match.acl.sp1;
+
+ ml->acl_delete.field_value[10].value.u16 =
+ mh->match.acl.dp0;
+ ml->acl_delete.field_value[10].mask_range.u16 =
+ mh->match.acl.dp1;
+ }
+ return 0;
+
+ case TABLE_ARRAY:
+ ml->array.pos = mh->match.array.pos;
+ return 0;
+
+ case TABLE_HASH:
+ memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash));
+ return 0;
+
+ case TABLE_LPM:
+ if (mh->match.lpm.ip_version) {
+ ml->lpm_ipv4.ip = mh->match.lpm.ipv4;
+ ml->lpm_ipv4.depth = mh->match.lpm.depth;
+ } else {
+ memcpy(ml->lpm_ipv6.ip,
+ mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip));
+ ml->lpm_ipv6.depth = mh->match.lpm.depth;
}
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+static int
+action_convert(struct rte_table_action *a,
+ struct table_rule_action *action,
+ struct rte_pipeline_table_entry *data)
+{
+ int status;
+
+ /* Apply actions */
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_FWD,
+ &action->fwd);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_LB,
+ &action->lb);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_MTR,
+ &action->mtr);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TM,
+ &action->tm);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_ENCAP,
+ &action->encap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_NAT,
+ &action->nat);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TTL,
+ &action->ttl);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_STATS,
+ &action->stats);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TIME,
+ &action->time);
+
+ if (status)
+ return status;
+ }
+
return 0;
}
-static void
-thread_headroom_update(struct app_thread_data *t, uint64_t time)
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
{
- uint64_t time_diff = time - t->headroom_time;
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ struct table_rule_match *match = &req->table_rule_add.match;
+ struct table_rule_action *action = &req->table_rule_add.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int key_found, status;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *) p->buffer;
+
+ status = match_convert(match, &match_ll, 1);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = action_convert(a, action, data_in);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ &match_ll,
+ data_in,
+ &key_found,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
- t->headroom_ratio =
- ((double) t->headroom_cycles) / ((double) time_diff);
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add.data = data_out;
- t->headroom_cycles = 0;
- t->headroom_time = rte_rdtsc_precise();
+ return rsp;
}
-int
-app_thread(void *arg)
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
{
- struct app_params *app = (struct app_params *) arg;
- uint32_t core_id = rte_lcore_id(), i, j;
- struct app_thread_data *t = &app->thread_data[core_id];
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ struct table_rule_action *action = &req->table_rule_add_default.action;
+ struct rte_pipeline_table_entry *data_in, *data_out;
+ uint32_t table_id = req->id;
+ int status;
- for (i = 0; ; i++) {
- uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
- uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+ /* Apply actions */
+ memset(p->buffer, 0, sizeof(p->buffer));
+ data_in = (struct rte_pipeline_table_entry *) p->buffer;
+
+ data_in->action = action->fwd.action;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_PORT)
+ data_in->port_id = action->fwd.id;
+ if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE)
+ data_in->table_id = action->fwd.id;
+
+ /* Add default rule to table */
+ status = rte_pipeline_table_default_entry_add(p->p,
+ table_id,
+ data_in,
+ &data_out);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
- /* Run regular pipelines */
- for (j = 0; j < n_regular; j++) {
- struct app_thread_pipeline_data *data = &t->regular[j];
- struct pipeline *p = data->be;
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_default.data = data_out;
- PIPELINE_RUN_REGULAR(t, p);
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+
+ uint32_t table_id = req->id;
+ struct table_rule_match *match = req->table_rule_add_bulk.match;
+ struct table_rule_action *action = req->table_rule_add_bulk.action;
+ struct rte_pipeline_table_entry **data =
+ (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data;
+ uint32_t n_rules = req->table_rule_add_bulk.n_rules;
+ uint32_t bulk = req->table_rule_add_bulk.bulk;
+
+ struct rte_table_action *a = p->table_data[table_id].a;
+ union table_rule_match_low_level *match_ll;
+ uint8_t *action_ll;
+ void **match_ll_ptr;
+ struct rte_pipeline_table_entry **action_ll_ptr;
+ int *found, status;
+ uint32_t i;
+
+ /* Memory allocation */
+ match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level));
+ action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX);
+ match_ll_ptr = calloc(n_rules, sizeof(void *));
+ action_ll_ptr =
+ calloc(n_rules, sizeof(struct rte_pipeline_table_entry *));
+ found = calloc(n_rules, sizeof(int));
+
+ if ((match_ll == NULL) ||
+ (action_ll == NULL) ||
+ (match_ll_ptr == NULL) ||
+ (action_ll_ptr == NULL) ||
+ (found == NULL))
+ goto fail;
+
+ for (i = 0; i < n_rules; i++) {
+ match_ll_ptr[i] = (void *)&match_ll[i];
+ action_ll_ptr[i] =
+ (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX];
+ }
+
+ /* Rule match conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = match_convert(&match[i], match_ll_ptr[i], 1);
+ if (status)
+ goto fail;
+ }
+
+ /* Rule action conversion */
+ for (i = 0; i < n_rules; i++) {
+ status = action_convert(a, &action[i], action_ll_ptr[i]);
+ if (status)
+ goto fail;
+ }
+
+ /* Add rule (match, action) to table */
+ if (bulk) {
+ status = rte_pipeline_table_entry_add_bulk(p->p,
+ table_id,
+ match_ll_ptr,
+ action_ll_ptr,
+ n_rules,
+ found,
+ data);
+ if (status)
+ n_rules = 0;
+ } else
+ for (i = 0; i < n_rules; i++) {
+ status = rte_pipeline_table_entry_add(p->p,
+ table_id,
+ match_ll_ptr[i],
+ action_ll_ptr[i],
+ &found[i],
+ &data[i]);
+ if (status) {
+ n_rules = i;
+ break;
+ }
}
- /* Run custom pipelines */
- for (j = 0; j < n_custom; j++) {
- struct app_thread_pipeline_data *data = &t->custom[j];
+ /* Write response */
+ rsp->status = 0;
+ rsp->table_rule_add_bulk.n_rules = n_rules;
+
+ /* Free */
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ return rsp;
+
+fail:
+ free(found);
+ free(action_ll_ptr);
+ free(match_ll_ptr);
+ free(action_ll);
+ free(match_ll);
+
+ rsp->status = -1;
+ rsp->table_rule_add_bulk.n_rules = 0;
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ union table_rule_match_low_level match_ll;
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ struct table_rule_match *match = &req->table_rule_delete.match;
+ uint32_t table_id = req->id;
+ int key_found, status;
+
+ status = match_convert(match, &match_ll, 0);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ table_id,
+ &match_ll,
+ &key_found,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ table_id,
+ NULL);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_stats_read.data;
+ int clear = req->table_rule_stats_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_stats_read(a,
+ data,
+ &rsp->table_rule_stats_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id;
+ struct rte_table_action_meter_profile *profile =
+ &req->table_mtr_profile_add.profile;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_add(a,
+ meter_profile_id,
+ profile);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ uint32_t meter_profile_id =
+ req->table_mtr_profile_delete.meter_profile_id;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_profile_delete(a,
+ meter_profile_id);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_mtr_read.data;
+ uint32_t tc_mask = req->table_rule_mtr_read.tc_mask;
+ int clear = req->table_rule_mtr_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_meter_read(a,
+ data,
+ tc_mask,
+ &rsp->table_rule_mtr_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask;
+ struct rte_table_action_dscp_table *dscp_table =
+ &req->table_dscp_table_update.dscp_table;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_dscp_table_update(a,
+ dscp_mask,
+ dscp_table);
+
+ return rsp;
+}
+
+static struct pipeline_msg_rsp *
+pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p,
+ struct pipeline_msg_req *req)
+{
+ struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *) req;
+ uint32_t table_id = req->id;
+ void *data = req->table_rule_ttl_read.data;
+ int clear = req->table_rule_ttl_read.clear;
+ struct rte_table_action *a = p->table_data[table_id].a;
+
+ rsp->status = rte_table_action_ttl_read(a,
+ data,
+ &rsp->table_rule_ttl_read.stats,
+ clear);
+
+ return rsp;
+}
+
+static void
+pipeline_msg_handle(struct pipeline_data *p)
+{
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
- PIPELINE_RUN_CUSTOM(t, data);
+ req = pipeline_msg_recv(p->msgq_req);
+ if (req == NULL)
+ break;
+
+ switch (req->type) {
+ case PIPELINE_REQ_PORT_IN_STATS_READ:
+ rsp = pipeline_msg_handle_port_in_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_ENABLE:
+ rsp = pipeline_msg_handle_port_in_enable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_IN_DISABLE:
+ rsp = pipeline_msg_handle_port_in_disable(p, req);
+ break;
+
+ case PIPELINE_REQ_PORT_OUT_STATS_READ:
+ rsp = pipeline_msg_handle_port_out_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_STATS_READ:
+ rsp = pipeline_msg_handle_table_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD:
+ rsp = pipeline_msg_handle_table_rule_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_add_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_ADD_BULK:
+ rsp = pipeline_msg_handle_table_rule_add_bulk(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE:
+ rsp = pipeline_msg_handle_table_rule_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT:
+ rsp = pipeline_msg_handle_table_rule_delete_default(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_STATS_READ:
+ rsp = pipeline_msg_handle_table_rule_stats_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD:
+ rsp = pipeline_msg_handle_table_mtr_profile_add(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE:
+ rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_MTR_READ:
+ rsp = pipeline_msg_handle_table_rule_mtr_read(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE:
+ rsp = pipeline_msg_handle_table_dscp_table_update(p, req);
+ break;
+
+ case PIPELINE_REQ_TABLE_RULE_TTL_READ:
+ rsp = pipeline_msg_handle_table_rule_ttl_read(p, req);
+ break;
+
+ default:
+ rsp = (struct pipeline_msg_rsp *) req;
+ rsp->status = -1;
}
- /* Timer */
- if ((i & 0xF) == 0) {
- uint64_t time = rte_get_tsc_cycles();
- uint64_t t_deadline = UINT64_MAX;
+ pipeline_msg_send(p->msgq_rsp, rsp);
+ }
+}
- if (time < t->deadline)
- continue;
+/**
+ * Data plane threads: main
+ */
+int
+thread_main(void *arg __rte_unused)
+{
+ struct thread_data *t;
+ uint32_t thread_id, i;
- /* Timer for regular pipelines */
- for (j = 0; j < n_regular; j++) {
- struct app_thread_pipeline_data *data =
- &t->regular[j];
- uint64_t p_deadline = data->deadline;
+ thread_id = rte_lcore_id();
+ t = &thread_data[thread_id];
- if (p_deadline <= time) {
- data->f_timer(data->be);
- p_deadline = time + data->timer_period;
- data->deadline = p_deadline;
- }
+ /* Dispatch loop */
+ for (i = 0; ; i++) {
+ uint32_t j;
- if (p_deadline < t_deadline)
- t_deadline = p_deadline;
- }
+ /* Data Plane */
+ for (j = 0; j < t->n_pipelines; j++)
+ rte_pipeline_run(t->p[j]);
- /* Timer for custom pipelines */
- for (j = 0; j < n_custom; j++) {
- struct app_thread_pipeline_data *data =
- &t->custom[j];
- uint64_t p_deadline = data->deadline;
+ /* Control Plane */
+ if ((i & 0xF) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t time_next_min = UINT64_MAX;
+
+ if (time < t->time_next_min)
+ continue;
+
+ /* Pipeline message queues */
+ for (j = 0; j < t->n_pipelines; j++) {
+ struct pipeline_data *p =
+ &t->pipeline_data[j];
+ uint64_t time_next = p->time_next;
- if (p_deadline <= time) {
- data->f_timer(data->be);
- p_deadline = time + data->timer_period;
- data->deadline = p_deadline;
+ if (time_next <= time) {
+ pipeline_msg_handle(p);
+ rte_pipeline_flush(p->p);
+ time_next = time + p->timer_period;
+ p->time_next = time_next;
}
- if (p_deadline < t_deadline)
- t_deadline = p_deadline;
+ if (time_next < time_next_min)
+ time_next_min = time_next;
}
- /* Timer for thread message request */
+ /* Thread message queues */
{
- uint64_t deadline = t->thread_req_deadline;
+ uint64_t time_next = t->time_next;
- if (deadline <= time) {
- thread_msg_req_handle(t);
- thread_headroom_update(t, time);
- deadline = time + t->timer_period;
- t->thread_req_deadline = deadline;
+ if (time_next <= time) {
+ thread_msg_handle(t);
+ time_next = time + t->timer_period;
+ t->time_next = time_next;
}
- if (deadline < t_deadline)
- t_deadline = deadline;
+ if (time_next < time_next_min)
+ time_next_min = time_next;
}
-
- t->deadline = t_deadline;
+ t->time_next_min = time_next_min;
}
}
diff --git a/examples/ip_pipeline/thread.h b/examples/ip_pipeline/thread.h
index 2c4fb6ae..facdf004 100644
--- a/examples/ip_pipeline/thread.h
+++ b/examples/ip_pipeline/thread.h
@@ -1,69 +1,24 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
-#ifndef THREAD_H_
-#define THREAD_H_
+#ifndef _INCLUDE_THREAD_H_
+#define _INCLUDE_THREAD_H_
-#include "app.h"
-#include "pipeline_be.h"
+#include <stdint.h>
-enum thread_msg_req_type {
- THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
- THREAD_MSG_REQ_PIPELINE_DISABLE,
- THREAD_MSG_REQ_HEADROOM_READ,
- THREAD_MSG_REQS
-};
+int
+thread_pipeline_enable(uint32_t thread_id,
+ const char *pipeline_name);
-struct thread_msg_req {
- enum thread_msg_req_type type;
-};
+int
+thread_pipeline_disable(uint32_t thread_id,
+ const char *pipeline_name);
-struct thread_msg_rsp {
- int status;
-};
+int
+thread_init(void);
-/*
- * PIPELINE ENABLE
- */
-struct thread_pipeline_enable_msg_req {
- enum thread_msg_req_type type;
-
- uint32_t pipeline_id;
- void *be;
- pipeline_be_op_run f_run;
- pipeline_be_op_timer f_timer;
- uint64_t timer_period;
-};
-
-struct thread_pipeline_enable_msg_rsp {
- int status;
-};
-
-/*
- * PIPELINE DISABLE
- */
-struct thread_pipeline_disable_msg_req {
- enum thread_msg_req_type type;
-
- uint32_t pipeline_id;
-};
-
-struct thread_pipeline_disable_msg_rsp {
- int status;
-};
-
-/*
- * THREAD HEADROOM
- */
-struct thread_headroom_read_msg_req {
- enum thread_msg_req_type type;
-};
-
-struct thread_headroom_read_msg_rsp {
- int status;
-
- double headroom_ratio;
-};
+int
+thread_main(void *arg);
-#endif /* THREAD_H_ */
+#endif /* _INCLUDE_THREAD_H_ */
diff --git a/examples/ip_pipeline/thread_fe.c b/examples/ip_pipeline/thread_fe.c
deleted file mode 100644
index 4590c2b5..00000000
--- a/examples/ip_pipeline/thread_fe.c
+++ /dev/null
@@ -1,457 +0,0 @@
-#include <rte_common.h>
-#include <rte_ring.h>
-#include <rte_malloc.h>
-#include <cmdline_rdline.h>
-#include <cmdline_parse.h>
-#include <cmdline_parse_num.h>
-#include <cmdline_parse_string.h>
-
-#include "thread.h"
-#include "thread_fe.h"
-#include "pipeline.h"
-#include "pipeline_common_fe.h"
-#include "app.h"
-
-static inline void *
-thread_msg_send_recv(struct app_params *app,
- uint32_t socket_id, uint32_t core_id, uint32_t ht_id,
- void *msg,
- uint32_t timeout_ms)
-{
- struct rte_ring *r_req = app_thread_msgq_in_get(app,
- socket_id, core_id, ht_id);
- struct rte_ring *r_rsp = app_thread_msgq_out_get(app,
- socket_id, core_id, ht_id);
- uint64_t hz = rte_get_tsc_hz();
- void *msg_recv;
- uint64_t deadline;
- int status;
-
- /* send */
- do {
- status = rte_ring_sp_enqueue(r_req, (void *) msg);
- } while (status == -ENOBUFS);
-
- /* recv */
- deadline = (timeout_ms) ?
- (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
- UINT64_MAX;
-
- do {
- if (rte_rdtsc() > deadline)
- return NULL;
-
- status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
- } while (status != 0);
-
- return msg_recv;
-}
-
-int
-app_pipeline_enable(struct app_params *app,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t hyper_th_id,
- uint32_t pipeline_id)
-{
- struct thread_pipeline_enable_msg_req *req;
- struct thread_pipeline_enable_msg_rsp *rsp;
- int thread_id;
- struct app_pipeline_data *p;
- struct app_pipeline_params *p_params;
- struct pipeline_type *p_type;
- int status;
-
- if (app == NULL)
- return -1;
-
- thread_id = cpu_core_map_get_lcore_id(app->core_map,
- socket_id,
- core_id,
- hyper_th_id);
-
- if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
- return -1;
-
- if (app_pipeline_data(app, pipeline_id) == NULL)
- return -1;
-
- p = &app->pipeline_data[pipeline_id];
- p_params = &app->pipeline_params[pipeline_id];
- p_type = app_pipeline_type_find(app, p_params->type);
-
- if (p_type == NULL)
- return -1;
-
- if (p->enabled == 1)
- return -1;
-
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = THREAD_MSG_REQ_PIPELINE_ENABLE;
- req->pipeline_id = pipeline_id;
- req->be = p->be;
- req->f_run = p_type->be_ops->f_run;
- req->f_timer = p_type->be_ops->f_timer;
- req->timer_period = p->timer_period;
-
- rsp = thread_msg_send_recv(app,
- socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
- if (rsp == NULL)
- return -1;
-
- status = rsp->status;
- app_msg_free(app, rsp);
-
- if (status != 0)
- return -1;
-
- p->enabled = 1;
- return 0;
-}
-
-int
-app_pipeline_disable(struct app_params *app,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t hyper_th_id,
- uint32_t pipeline_id)
-{
- struct thread_pipeline_disable_msg_req *req;
- struct thread_pipeline_disable_msg_rsp *rsp;
- int thread_id;
- struct app_pipeline_data *p;
- int status;
-
- if (app == NULL)
- return -1;
-
- thread_id = cpu_core_map_get_lcore_id(app->core_map,
- socket_id,
- core_id,
- hyper_th_id);
-
- if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
- return -1;
-
- if (app_pipeline_data(app, pipeline_id) == NULL)
- return -1;
-
- p = &app->pipeline_data[pipeline_id];
-
- if (p->enabled == 0)
- return -1;
-
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = THREAD_MSG_REQ_PIPELINE_DISABLE;
- req->pipeline_id = pipeline_id;
-
- rsp = thread_msg_send_recv(app,
- socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
-
- if (rsp == NULL)
- return -1;
-
- status = rsp->status;
- app_msg_free(app, rsp);
-
- if (status != 0)
- return -1;
-
- p->enabled = 0;
- return 0;
-}
-
-int
-app_thread_headroom(struct app_params *app,
- uint32_t socket_id,
- uint32_t core_id,
- uint32_t hyper_th_id)
-{
- struct thread_headroom_read_msg_req *req;
- struct thread_headroom_read_msg_rsp *rsp;
- int thread_id;
- int status;
-
- if (app == NULL)
- return -1;
-
- thread_id = cpu_core_map_get_lcore_id(app->core_map,
- socket_id,
- core_id,
- hyper_th_id);
-
- if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
- return -1;
-
- req = app_msg_alloc(app);
- if (req == NULL)
- return -1;
-
- req->type = THREAD_MSG_REQ_HEADROOM_READ;
-
- rsp = thread_msg_send_recv(app,
- socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
-
- if (rsp == NULL)
- return -1;
-
- status = rsp->status;
-
- if (status != 0)
- return -1;
-
- printf("%.3f%%\n", rsp->headroom_ratio * 100);
-
-
- app_msg_free(app, rsp);
-
- return 0;
-}
-
-/*
- * pipeline enable
- */
-
-struct cmd_pipeline_enable_result {
- cmdline_fixed_string_t t_string;
- cmdline_fixed_string_t t_id_string;
- cmdline_fixed_string_t pipeline_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t enable_string;
-};
-
-static void
-cmd_pipeline_enable_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_pipeline_enable_result *params = parsed_result;
- struct app_params *app = data;
- int status;
- uint32_t core_id, socket_id, hyper_th_id;
-
- if (parse_pipeline_core(&socket_id,
- &core_id,
- &hyper_th_id,
- params->t_id_string) != 0) {
- printf("Command failed\n");
- return;
- }
-
- status = app_pipeline_enable(app,
- socket_id,
- core_id,
- hyper_th_id,
- params->pipeline_id);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
-
-static cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
- NULL);
-
-static cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
- "pipeline");
-
-static cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
- "enable");
-
-static cmdline_parse_inst_t cmd_pipeline_enable = {
- .f = cmd_pipeline_enable_parsed,
- .data = NULL,
- .help_str = "Enable pipeline on specified core",
- .tokens = {
- (void *)&cmd_pipeline_enable_t_string,
- (void *)&cmd_pipeline_enable_t_id_string,
- (void *)&cmd_pipeline_enable_pipeline_string,
- (void *)&cmd_pipeline_enable_pipeline_id,
- (void *)&cmd_pipeline_enable_enable_string,
- NULL,
- },
-};
-
-/*
- * pipeline disable
- */
-
-struct cmd_pipeline_disable_result {
- cmdline_fixed_string_t t_string;
- cmdline_fixed_string_t t_id_string;
- cmdline_fixed_string_t pipeline_string;
- uint32_t pipeline_id;
- cmdline_fixed_string_t disable_string;
-};
-
-static void
-cmd_pipeline_disable_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_pipeline_disable_result *params = parsed_result;
- struct app_params *app = data;
- int status;
- uint32_t core_id, socket_id, hyper_th_id;
-
- if (parse_pipeline_core(&socket_id,
- &core_id,
- &hyper_th_id,
- params->t_id_string) != 0) {
- printf("Command failed\n");
- return;
- }
-
- status = app_pipeline_disable(app,
- socket_id,
- core_id,
- hyper_th_id,
- params->pipeline_id);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
-
-static cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
- NULL);
-
-static cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
- pipeline_string, "pipeline");
-
-static cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
- TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
- UINT32);
-
-static cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
- TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
- "disable");
-
-static cmdline_parse_inst_t cmd_pipeline_disable = {
- .f = cmd_pipeline_disable_parsed,
- .data = NULL,
- .help_str = "Disable pipeline on specified core",
- .tokens = {
- (void *)&cmd_pipeline_disable_t_string,
- (void *)&cmd_pipeline_disable_t_id_string,
- (void *)&cmd_pipeline_disable_pipeline_string,
- (void *)&cmd_pipeline_disable_pipeline_id,
- (void *)&cmd_pipeline_disable_disable_string,
- NULL,
- },
-};
-
-
-/*
- * thread headroom
- */
-
-struct cmd_thread_headroom_result {
- cmdline_fixed_string_t t_string;
- cmdline_fixed_string_t t_id_string;
- cmdline_fixed_string_t headroom_string;
-};
-
-static void
-cmd_thread_headroom_parsed(
- void *parsed_result,
- __rte_unused struct cmdline *cl,
- void *data)
-{
- struct cmd_thread_headroom_result *params = parsed_result;
- struct app_params *app = data;
- int status;
- uint32_t core_id, socket_id, hyper_th_id;
-
- if (parse_pipeline_core(&socket_id,
- &core_id,
- &hyper_th_id,
- params->t_id_string) != 0) {
- printf("Command failed\n");
- return;
- }
-
- status = app_thread_headroom(app,
- socket_id,
- core_id,
- hyper_th_id);
-
- if (status != 0)
- printf("Command failed\n");
-}
-
-static cmdline_parse_token_string_t cmd_thread_headroom_t_string =
- TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
- t_string, "t");
-
-static cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
- TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
- t_id_string, NULL);
-
-static cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
- TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
- headroom_string, "headroom");
-
-static cmdline_parse_inst_t cmd_thread_headroom = {
- .f = cmd_thread_headroom_parsed,
- .data = NULL,
- .help_str = "Display thread headroom",
- .tokens = {
- (void *)&cmd_thread_headroom_t_string,
- (void *)&cmd_thread_headroom_t_id_string,
- (void *)&cmd_thread_headroom_headroom_string,
- NULL,
- },
-};
-
-
-static cmdline_parse_ctx_t thread_cmds[] = {
- (cmdline_parse_inst_t *) &cmd_pipeline_enable,
- (cmdline_parse_inst_t *) &cmd_pipeline_disable,
- (cmdline_parse_inst_t *) &cmd_thread_headroom,
- NULL,
-};
-
-int
-app_pipeline_thread_cmd_push(struct app_params *app)
-{
- uint32_t n_cmds, i;
-
- /* Check for available slots in the application commands array */
- n_cmds = RTE_DIM(thread_cmds) - 1;
- if (n_cmds > APP_MAX_CMDS - app->n_cmds)
- return -ENOMEM;
-
- /* Push thread commands into the application */
- memcpy(&app->cmds[app->n_cmds], thread_cmds,
- n_cmds * sizeof(cmdline_parse_ctx_t));
-
- for (i = 0; i < n_cmds; i++)
- app->cmds[app->n_cmds + i]->data = app;
-
- app->n_cmds += n_cmds;
- app->cmds[app->n_cmds] = NULL;
-
- return 0;
-}
diff --git a/examples/ip_pipeline/thread_fe.h b/examples/ip_pipeline/thread_fe.h
deleted file mode 100644
index 056a5e8a..00000000
--- a/examples/ip_pipeline/thread_fe.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef THREAD_FE_H_
-#define THREAD_FE_H_
-
-static inline struct rte_ring *
-app_thread_msgq_in_get(struct app_params *app,
- uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
-{
- char msgq_name[32];
- ssize_t param_idx;
-
- snprintf(msgq_name, sizeof(msgq_name),
- "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
- socket_id,
- core_id,
- (ht_id) ? "h" : "");
- param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
-
- if (param_idx < 0)
- return NULL;
-
- return app->msgq[param_idx];
-}
-
-static inline struct rte_ring *
-app_thread_msgq_out_get(struct app_params *app,
- uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
-{
- char msgq_name[32];
- ssize_t param_idx;
-
- snprintf(msgq_name, sizeof(msgq_name),
- "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
- socket_id,
- core_id,
- (ht_id) ? "h" : "");
- param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
-
- if (param_idx < 0)
- return NULL;
-
- return app->msgq[param_idx];
-
-}
-
-int
-app_pipeline_thread_cmd_push(struct app_params *app);
-
-int
-app_pipeline_enable(struct app_params *app,
- uint32_t core_id,
- uint32_t socket_id,
- uint32_t hyper_th_id,
- uint32_t pipeline_id);
-
-int
-app_pipeline_disable(struct app_params *app,
- uint32_t core_id,
- uint32_t socket_id,
- uint32_t hyper_th_id,
- uint32_t pipeline_id);
-
-int
-app_thread_headroom(struct app_params *app,
- uint32_t core_id,
- uint32_t socket_id,
- uint32_t hyper_th_id);
-
-#endif /* THREAD_FE_H_ */
diff --git a/examples/ip_pipeline/tmgr.c b/examples/ip_pipeline/tmgr.c
new file mode 100644
index 00000000..40cbf1d0
--- /dev/null
+++ b/examples/ip_pipeline/tmgr.c
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+
+#include <rte_string_fns.h>
+
+#include "tmgr.h"
+
+static struct rte_sched_subport_params
+ subport_profile[TMGR_SUBPORT_PROFILE_MAX];
+
+static uint32_t n_subport_profiles;
+
+static struct rte_sched_pipe_params
+ pipe_profile[TMGR_PIPE_PROFILE_MAX];
+
+static uint32_t n_pipe_profiles;
+
+static struct tmgr_port_list tmgr_port_list;
+
+int
+tmgr_init(void)
+{
+ TAILQ_INIT(&tmgr_port_list);
+
+ return 0;
+}
+
+struct tmgr_port *
+tmgr_port_find(const char *name)
+{
+ struct tmgr_port *tmgr_port;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(tmgr_port, &tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
+
+ return NULL;
+}
+
+int
+tmgr_subport_profile_add(struct rte_sched_subport_params *p)
+{
+ /* Check input params */
+ if (p == NULL)
+ return -1;
+
+ /* Save profile */
+ memcpy(&subport_profile[n_subport_profiles],
+ p,
+ sizeof(*p));
+
+ n_subport_profiles++;
+
+ return 0;
+}
+
+int
+tmgr_pipe_profile_add(struct rte_sched_pipe_params *p)
+{
+ /* Check input params */
+ if (p == NULL)
+ return -1;
+
+ /* Save profile */
+ memcpy(&pipe_profile[n_pipe_profiles],
+ p,
+ sizeof(*p));
+
+ n_pipe_profiles++;
+
+ return 0;
+}
+
+struct tmgr_port *
+tmgr_port_create(const char *name, struct tmgr_port_params *params)
+{
+ struct rte_sched_port_params p;
+ struct tmgr_port *tmgr_port;
+ struct rte_sched_port *s;
+ uint32_t i, j;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ tmgr_port_find(name) ||
+ (params == NULL) ||
+ (params->n_subports_per_port == 0) ||
+ (params->n_pipes_per_subport == 0) ||
+ (params->cpu_id >= RTE_MAX_NUMA_NODES) ||
+ (n_subport_profiles == 0) ||
+ (n_pipe_profiles == 0))
+ return NULL;
+
+ /* Resource create */
+ p.name = name;
+ p.socket = (int) params->cpu_id;
+ p.rate = params->rate;
+ p.mtu = params->mtu;
+ p.frame_overhead = params->frame_overhead;
+ p.n_subports_per_port = params->n_subports_per_port;
+ p.n_pipes_per_subport = params->n_pipes_per_subport;
+
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ p.qsize[i] = params->qsize[i];
+
+ p.pipe_profiles = pipe_profile;
+ p.n_pipe_profiles = n_pipe_profiles;
+
+ s = rte_sched_port_config(&p);
+ if (s == NULL)
+ return NULL;
+
+ for (i = 0; i < params->n_subports_per_port; i++) {
+ int status;
+
+ status = rte_sched_subport_config(
+ s,
+ i,
+ &subport_profile[0]);
+
+ if (status) {
+ rte_sched_port_free(s);
+ return NULL;
+ }
+
+ for (j = 0; j < params->n_pipes_per_subport; j++) {
+ status = rte_sched_pipe_config(
+ s,
+ i,
+ j,
+ 0);
+
+ if (status) {
+ rte_sched_port_free(s);
+ return NULL;
+ }
+ }
+ }
+
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(s);
+ return NULL;
+ }
+
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = s;
+ tmgr_port->n_subports_per_port = params->n_subports_per_port;
+ tmgr_port->n_pipes_per_subport = params->n_pipes_per_subport;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
+}
+
+int
+tmgr_subport_config(const char *port_name,
+ uint32_t subport_id,
+ uint32_t subport_profile_id)
+{
+ struct tmgr_port *port;
+ int status;
+
+ /* Check input params */
+ if (port_name == NULL)
+ return -1;
+
+ port = tmgr_port_find(port_name);
+ if ((port == NULL) ||
+ (subport_id >= port->n_subports_per_port) ||
+ (subport_profile_id >= n_subport_profiles))
+ return -1;
+
+ /* Resource config */
+ status = rte_sched_subport_config(
+ port->s,
+ subport_id,
+ &subport_profile[subport_profile_id]);
+
+ return status;
+}
+
+int
+tmgr_pipe_config(const char *port_name,
+ uint32_t subport_id,
+ uint32_t pipe_id_first,
+ uint32_t pipe_id_last,
+ uint32_t pipe_profile_id)
+{
+ struct tmgr_port *port;
+ uint32_t i;
+
+ /* Check input params */
+ if (port_name == NULL)
+ return -1;
+
+ port = tmgr_port_find(port_name);
+ if ((port == NULL) ||
+ (subport_id >= port->n_subports_per_port) ||
+ (pipe_id_first >= port->n_pipes_per_subport) ||
+ (pipe_id_last >= port->n_pipes_per_subport) ||
+ (pipe_id_first > pipe_id_last) ||
+ (pipe_profile_id >= n_pipe_profiles))
+ return -1;
+
+ /* Resource config */
+ for (i = pipe_id_first; i <= pipe_id_last; i++) {
+ int status;
+
+ status = rte_sched_pipe_config(
+ port->s,
+ subport_id,
+ i,
+ (int) pipe_profile_id);
+
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
diff --git a/examples/ip_pipeline/tmgr.h b/examples/ip_pipeline/tmgr.h
new file mode 100644
index 00000000..0b497e79
--- /dev/null
+++ b/examples/ip_pipeline/tmgr.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _INCLUDE_TMGR_H_
+#define _INCLUDE_TMGR_H_
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_sched.h>
+
+#include "common.h"
+
+#ifndef TMGR_SUBPORT_PROFILE_MAX
+#define TMGR_SUBPORT_PROFILE_MAX 256
+#endif
+
+#ifndef TMGR_PIPE_PROFILE_MAX
+#define TMGR_PIPE_PROFILE_MAX 256
+#endif
+
+struct tmgr_port {
+ TAILQ_ENTRY(tmgr_port) node;
+ char name[NAME_SIZE];
+ struct rte_sched_port *s;
+ uint32_t n_subports_per_port;
+ uint32_t n_pipes_per_subport;
+};
+
+TAILQ_HEAD(tmgr_port_list, tmgr_port);
+
+int
+tmgr_init(void);
+
+struct tmgr_port *
+tmgr_port_find(const char *name);
+
+struct tmgr_port_params {
+ uint32_t rate;
+ uint32_t n_subports_per_port;
+ uint32_t n_pipes_per_subport;
+ uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+ uint32_t frame_overhead;
+ uint32_t mtu;
+ uint32_t cpu_id;
+};
+
+int
+tmgr_subport_profile_add(struct rte_sched_subport_params *p);
+
+int
+tmgr_pipe_profile_add(struct rte_sched_pipe_params *p);
+
+struct tmgr_port *
+tmgr_port_create(const char *name, struct tmgr_port_params *params);
+
+int
+tmgr_subport_config(const char *port_name,
+ uint32_t subport_id,
+ uint32_t subport_profile_id);
+
+int
+tmgr_pipe_config(const char *port_name,
+ uint32_t subport_id,
+ uint32_t pipe_id_first,
+ uint32_t pipe_id_last,
+ uint32_t pipe_profile_id);
+
+#endif /* _INCLUDE_TMGR_H_ */
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index 350a9739..b830f67a 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -164,7 +164,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
@@ -702,7 +701,7 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -714,7 +713,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1008,7 +1007,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No ports found!\n");
@@ -1023,7 +1022,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Non-existent ports in portmask!\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_rxconf rxq_conf;
struct rte_eth_conf local_port_conf = port_conf;
@@ -1083,6 +1082,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
&local_port_conf);
if (ret < 0) {
@@ -1121,7 +1132,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
@@ -1141,7 +1151,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
@@ -1157,7 +1167,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
signal(SIGUSR1, signal_handler);
signal(SIGTERM, signal_handler);
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 3a8562ee..b45b87bd 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -40,6 +40,7 @@
#include <rte_hash.h>
#include <rte_jhash.h>
#include <rte_cryptodev.h>
+#include <rte_security.h>
#include "ipsec.h"
#include "parser.h"
@@ -58,10 +59,6 @@
#define CDEV_MP_CACHE_SZ 64
#define MAX_QUEUE_PAIRS 1
-#define OPTION_CONFIG "config"
-#define OPTION_SINGLE_SA "single-sa"
-#define OPTION_CRYPTODEV_MASK "cryptodev_mask"
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 4
@@ -124,6 +121,29 @@ struct ethaddr_info ethaddr_tbl[RTE_MAX_ETHPORTS] = {
{ 0, ETHADDR(0x00, 0x16, 0x3e, 0x49, 0x9e, 0xdd) }
};
+#define CMD_LINE_OPT_CONFIG "config"
+#define CMD_LINE_OPT_SINGLE_SA "single-sa"
+#define CMD_LINE_OPT_CRYPTODEV_MASK "cryptodev_mask"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options
+ */
+ CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_CONFIG_NUM,
+ CMD_LINE_OPT_SINGLE_SA_NUM,
+ CMD_LINE_OPT_CRYPTODEV_MASK_NUM,
+};
+
+static const struct option lgopts[] = {
+ {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
+ {CMD_LINE_OPT_SINGLE_SA, 1, 0, CMD_LINE_OPT_SINGLE_SA_NUM},
+ {CMD_LINE_OPT_CRYPTODEV_MASK, 1, 0, CMD_LINE_OPT_CRYPTODEV_MASK_NUM},
+ {NULL, 0, 0, 0}
+};
+
/* mask of enabled ports */
static uint32_t enabled_port_mask;
static uint64_t enabled_cryptodev_mask = UINT64_MAX;
@@ -179,7 +199,6 @@ static struct rte_eth_conf port_conf = {
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -311,6 +330,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
pkt->l3_len = sizeof(struct ip);
pkt->l2_len = ETHER_HDR_LEN;
+ ip->ip_sum = 0;
ethhdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
} else {
pkt->ol_flags |= PKT_TX_IPV6;
@@ -490,11 +510,13 @@ outbound_sp(struct sp_ctx *sp, struct traffic_type *ip,
sa_idx = ip->res[i] & PROTECT_MASK;
if (ip->res[i] & DISCARD)
rte_pktmbuf_free(m);
+ else if (ip->res[i] & BYPASS)
+ ip->pkts[j++] = m;
else if (sa_idx < IPSEC_SA_MAX_ENTRIES) {
ipsec->res[ipsec->num] = sa_idx;
ipsec->pkts[ipsec->num++] = m;
- } else /* BYPASS */
- ip->pkts[j++] = m;
+ } else /* invalid SA idx */
+ rte_pktmbuf_free(m);
}
ip->num = j;
}
@@ -848,7 +870,7 @@ static int32_t
check_params(void)
{
uint8_t lcore;
- uint16_t portid, nb_ports;
+ uint16_t portid;
uint16_t i;
int32_t socket_id;
@@ -857,8 +879,6 @@ check_params(void)
return -1;
}
- nb_ports = rte_eth_dev_count();
-
for (i = 0; i < nb_lcore_params; ++i) {
lcore = lcore_params[i].lcore_id;
if (!rte_lcore_is_enabled(lcore)) {
@@ -877,7 +897,7 @@ check_params(void)
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
@@ -926,20 +946,28 @@ init_lcore_rx_queues(void)
static void
print_usage(const char *prgname)
{
- printf("%s [EAL options] -- -p PORTMASK -P -u PORTMASK"
- " --"OPTION_CONFIG" (port,queue,lcore)[,(port,queue,lcore]"
- " --single-sa SAIDX -f CONFIG_FILE\n"
- " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " -P : enable promiscuous mode\n"
- " -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
- " -j FRAMESIZE: jumbo frame maximum size\n"
- " --"OPTION_CONFIG": (port,queue,lcore): "
- "rx queues configuration\n"
- " --single-sa SAIDX: use single SA index for outbound, "
- "bypassing the SP\n"
- " --cryptodev_mask MASK: hexadecimal bitmask of the "
- "crypto devices to configure\n"
- " -f CONFIG_FILE: Configuration file path\n",
+ fprintf(stderr, "%s [EAL options] --"
+ " -p PORTMASK"
+ " [-P]"
+ " [-u PORTMASK]"
+ " [-j FRAMESIZE]"
+ " -f CONFIG_FILE"
+ " --config (port,queue,lcore)[,(port,queue,lcore)]"
+ " [--single-sa SAIDX]"
+ " [--cryptodev_mask MASK]"
+ "\n\n"
+ " -p PORTMASK: Hexadecimal bitmask of ports to configure\n"
+ " -P : Enable promiscuous mode\n"
+ " -u PORTMASK: Hexadecimal bitmask of unprotected ports\n"
+ " -j FRAMESIZE: Enable jumbo frame with 'FRAMESIZE' as maximum\n"
+ " packet size\n"
+ " -f CONFIG_FILE: Configuration file\n"
+ " --config (port,queue,lcore): Rx queue configuration\n"
+ " --single-sa SAIDX: Use single SA index for outbound traffic,\n"
+ " bypassing the SP\n"
+ " --cryptodev_mask MASK: Hexadecimal bitmask of the crypto\n"
+ " devices to configure\n"
+ "\n",
prgname);
}
@@ -1029,42 +1057,6 @@ parse_config(const char *q_arg)
return 0;
}
-#define __STRNCMP(name, opt) (!strncmp(name, opt, sizeof(opt)))
-static int32_t
-parse_args_long_options(struct option *lgopts, int32_t option_index)
-{
- int32_t ret = -1;
- const char *optname = lgopts[option_index].name;
-
- if (__STRNCMP(optname, OPTION_CONFIG)) {
- ret = parse_config(optarg);
- if (ret)
- printf("invalid config\n");
- }
-
- if (__STRNCMP(optname, OPTION_SINGLE_SA)) {
- ret = parse_decimal(optarg);
- if (ret != -1) {
- single_sa = 1;
- single_sa_idx = ret;
- printf("Configured with single SA index %u\n",
- single_sa_idx);
- ret = 0;
- }
- }
-
- if (__STRNCMP(optname, OPTION_CRYPTODEV_MASK)) {
- ret = parse_portmask(optarg);
- if (ret != -1) {
- enabled_cryptodev_mask = ret;
- ret = 0;
- }
- }
-
- return ret;
-}
-#undef __STRNCMP
-
static int32_t
parse_args(int32_t argc, char **argv)
{
@@ -1072,12 +1064,6 @@ parse_args(int32_t argc, char **argv)
char **argvopt;
int32_t option_index;
char *prgname = argv[0];
- static struct option lgopts[] = {
- {OPTION_CONFIG, 1, 0, 0},
- {OPTION_SINGLE_SA, 1, 0, 0},
- {OPTION_CRYPTODEV_MASK, 1, 0, 0},
- {NULL, 0, 0, 0}
- };
int32_t f_present = 0;
argvopt = argv;
@@ -1138,11 +1124,38 @@ parse_args(int32_t argc, char **argv)
}
printf("Enabled jumbo frames size %u\n", frame_size);
break;
- case 0:
- if (parse_args_long_options(lgopts, option_index)) {
+ case CMD_LINE_OPT_CONFIG_NUM:
+ ret = parse_config(optarg);
+ if (ret) {
+ printf("Invalid config\n");
+ print_usage(prgname);
+ return -1;
+ }
+ break;
+ case CMD_LINE_OPT_SINGLE_SA_NUM:
+ ret = parse_decimal(optarg);
+ if (ret == -1) {
+ printf("Invalid argument[sa_idx]\n");
print_usage(prgname);
return -1;
}
+
+ /* else */
+ single_sa = 1;
+ single_sa_idx = ret;
+ printf("Configured with single SA index %u\n",
+ single_sa_idx);
+ break;
+ case CMD_LINE_OPT_CRYPTODEV_MASK_NUM:
+ ret = parse_portmask(optarg);
+ if (ret == -1) {
+ printf("Invalid argument[portmask]\n");
+ print_usage(prgname);
+ return -1;
+ }
+
+ /* else */
+ enabled_cryptodev_mask = ret;
break;
default:
print_usage(prgname);
@@ -1173,7 +1186,7 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -1185,7 +1198,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1379,11 +1392,11 @@ cryptodevs_init(void)
uint32_t max_sess_sz = 0, sess_sz;
for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
- sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
if (sess_sz > max_sess_sz)
max_sess_sz = sess_sz;
}
- for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
void *sec_ctx;
if ((enabled_port_mask & (1 << port_id)) == 0)
@@ -1429,6 +1442,12 @@ cryptodevs_init(void)
dev_conf.socket_id = rte_cryptodev_socket_id(cdev_id);
dev_conf.nb_queue_pairs = qp;
+ uint32_t dev_max_sess = cdev_info.sym.max_nb_sessions;
+ if (dev_max_sess != 0 && dev_max_sess < (CDEV_MP_NB_OBJS / 2))
+ rte_exit(EXIT_FAILURE,
+ "Device does not support at least %u "
+ "sessions", CDEV_MP_NB_OBJS / 2);
+
if (!socket_ctx[dev_conf.socket_id].session_pool) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
@@ -1470,7 +1489,7 @@ cryptodevs_init(void)
}
/* create session pools for eth devices that implement security */
- for (port_id = 0; port_id < rte_eth_dev_count(); port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
if ((enabled_port_mask & (1 << port_id)) &&
rte_eth_dev_get_sec_ctx(port_id)) {
int socket_id = rte_eth_dev_socket_id(port_id);
@@ -1555,6 +1574,18 @@ port_init(uint16_t portid)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
&local_port_conf);
if (ret < 0)
@@ -1581,7 +1612,6 @@ port_init(uint16_t portid)
printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
@@ -1640,13 +1670,68 @@ pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
printf("Allocated mbuf pool on socket %d\n", socket_id);
}
+static inline int
+inline_ipsec_event_esn_overflow(struct rte_security_ctx *ctx, uint64_t md)
+{
+ struct ipsec_sa *sa;
+
+ /* For inline protocol processing, the metadata in the event will
+ * uniquely identify the security session which raised the event.
+ * Application would then need the userdata it had registered with the
+ * security session to process the event.
+ */
+
+ sa = (struct ipsec_sa *)rte_security_get_userdata(ctx, md);
+
+ if (sa == NULL) {
+ /* userdata could not be retrieved */
+ return -1;
+ }
+
+ /* Sequence number over flow. SA need to be re-established */
+ RTE_SET_USED(sa);
+ return 0;
+}
+
+static int
+inline_ipsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+ void *param, void *ret_param)
+{
+ uint64_t md;
+ struct rte_eth_event_ipsec_desc *event_desc = NULL;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(port_id);
+
+ RTE_SET_USED(param);
+
+ if (type != RTE_ETH_EVENT_IPSEC)
+ return -1;
+
+ event_desc = ret_param;
+ if (event_desc == NULL) {
+ printf("Event descriptor not set\n");
+ return -1;
+ }
+
+ md = event_desc->metadata;
+
+ if (event_desc->subtype == RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW)
+ return inline_ipsec_event_esn_overflow(ctx, md);
+ else if (event_desc->subtype >= RTE_ETH_EVENT_IPSEC_MAX) {
+ printf("Invalid IPsec event reported\n");
+ return -1;
+ }
+
+ return -1;
+}
+
int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
uint32_t lcore_id;
uint8_t socket_id;
- uint16_t portid, nb_ports;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1665,8 +1750,6 @@ main(int32_t argc, char **argv)
rte_exit(EXIT_FAILURE, "Invalid unprotected portmask 0x%x\n",
unprotected_port_mask);
- nb_ports = rte_eth_dev_count();
-
if (check_params() < 0)
rte_exit(EXIT_FAILURE, "check_params failed\n");
@@ -1700,7 +1783,7 @@ main(int32_t argc, char **argv)
pool_init(&socket_ctx[socket_id], socket_id, NB_MBUF);
}
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -1710,7 +1793,7 @@ main(int32_t argc, char **argv)
cryptodevs_init();
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -1727,9 +1810,12 @@ main(int32_t argc, char **argv)
*/
if (promiscuous_on)
rte_eth_promiscuous_enable(portid);
+
+ rte_eth_dev_callback_register(portid,
+ RTE_ETH_EVENT_IPSEC, inline_ipsec_event_callback, NULL);
}
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 5fb5bc16..3d415f1a 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -36,6 +36,7 @@ set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec)
}
/* TODO support for Transport and IPV6 tunnel */
}
+ ipsec->esn_soft_limit = IPSEC_OFFLOAD_ESN_SOFTLIMIT;
}
static inline int
@@ -186,14 +187,8 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
.rss_key_len = 40,
};
struct rte_eth_dev *eth_dev;
- union {
- struct rte_flow_action_rss rss;
- struct {
- const struct rte_eth_rss_conf *rss_conf;
- uint16_t num;
- uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
- } local;
- } action_rss;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ struct rte_flow_action_rss action_rss;
unsigned int i;
unsigned int j;
@@ -207,9 +202,14 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
for (i = 0, j = 0;
i < eth_dev->data->nb_rx_queues; ++i)
if (eth_dev->data->rx_queues[i])
- action_rss.local.queue[j++] = i;
- action_rss.local.num = j;
- action_rss.local.rss_conf = &rss_conf;
+ queue[j++] = i;
+ action_rss = (struct rte_flow_action_rss){
+ .types = rss_conf.rss_hf,
+ .key_len = rss_conf.rss_key_len,
+ .queue_num = j,
+ .key = rss_key,
+ .queue = queue,
+ };
ret = rte_flow_validate(sa->portid, &sa->attr,
sa->pattern, sa->action,
&err);
@@ -270,11 +270,14 @@ flow_create_failure:
* the packet is received, this userdata will be
* retrieved using the metadata from the packet.
*
- * This is required only for inbound SAs.
+ * The PMD is expected to set similar metadata for other
+ * operations, like rte_eth_event, which are tied to
+ * security session. In such cases, the userdata could
+ * be obtained to uniquely identify the security
+ * parameters denoted.
*/
- if (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
- sess_conf.userdata = (void *) sa;
+ sess_conf.userdata = (void *) sa;
sa->sec_session = rte_security_session_create(ctx,
&sess_conf, ipsec_ctx->session_pool);
@@ -324,18 +327,6 @@ flow_create_failure:
rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
&cdev_info);
- if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
- ret = rte_cryptodev_queue_pair_attach_sym_session(
- ipsec_ctx->tbl[cdev_id_qp].id,
- ipsec_ctx->tbl[cdev_id_qp].qp,
- sa->crypto_session);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
- "Session cannot be attached to qp %u\n",
- ipsec_ctx->tbl[cdev_id_qp].qp);
- return -1;
- }
- }
}
sa->cdev_id_qp = cdev_id_qp;
@@ -345,13 +336,19 @@ flow_create_failure:
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int32_t ret, i;
+ int32_t ret = 0, i;
cqp->buf[cqp->len++] = cop;
if (cqp->len == MAX_PKT_BURST) {
- ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
- cqp->buf, cqp->len);
+ int enq_size = cqp->len;
+ if ((cqp->in_flight + enq_size) > MAX_INFLIGHT)
+ enq_size -=
+ (int)((cqp->in_flight + enq_size) - MAX_INFLIGHT);
+
+ if (enq_size > 0)
+ ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
+ cqp->buf, enq_size);
if (ret < cqp->len) {
RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
" enqueued %u crypto ops out of %u\n",
@@ -486,9 +483,12 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct ipsec_sa *sa;
struct rte_mbuf *pkt;
- for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts;) {
+ for (i = 0; i < ipsec_ctx->nb_qps && nb_pkts < max_pkts; i++) {
struct cdev_qp *cqp;
- cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp];
+
+ cqp = &ipsec_ctx->tbl[ipsec_ctx->last_qp++];
+ if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
+ ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
@@ -503,13 +503,8 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
pkts[nb_pkts++] = pkt;
}
- if (cqp->in_flight == 0) {
- ipsec_ctx->last_qp++;
- if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
- ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
- i++;
+ if (cqp->in_flight == 0)
continue;
- }
nb_cops = rte_cryptodev_dequeue_burst(cqp->id, cqp->qp,
cops, max_pkts - nb_pkts);
@@ -533,12 +528,6 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
}
pkts[nb_pkts++] = pkt;
- if (cqp->in_flight < max_pkts) {
- ipsec_ctx->last_qp++;
- if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
- ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
- i++;
- }
}
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index 6059f6cc..c998c807 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -17,10 +17,13 @@
#define RTE_LOGTYPE_IPSEC_IPIP RTE_LOGTYPE_USER3
#define MAX_PKT_BURST 32
+#define MAX_INFLIGHT 128
#define MAX_QP_PER_LCORE 256
#define MAX_DIGEST_SIZE 32 /* Bytes -- 256 bits */
+#define IPSEC_OFFLOAD_ESN_SOFTLIMIT 0xffffff00
+
#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
sizeof(struct rte_crypto_sym_op))
@@ -188,7 +191,7 @@ ipsec_metadata_size(void)
static inline struct ipsec_mbuf_metadata *
get_priv(struct rte_mbuf *m)
{
- return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
+ return rte_mbuf_to_priv(m);
}
static inline void *
diff --git a/examples/ipsec-secgw/parser.c b/examples/ipsec-secgw/parser.c
index 2403b564..91282ca9 100644
--- a/examples/ipsec-secgw/parser.c
+++ b/examples/ipsec-secgw/parser.c
@@ -3,6 +3,7 @@
*/
#include <rte_common.h>
#include <rte_crypto.h>
+#include <rte_string_fns.h>
#include <cmdline_parse_string.h>
#include <cmdline_parse_num.h>
@@ -207,23 +208,24 @@ inet_pton6(const char *src, unsigned char *dst)
int
parse_ipv4_addr(const char *token, struct in_addr *ipv4, uint32_t *mask)
{
- char ip_str[256] = {0};
+ char ip_str[INET_ADDRSTRLEN] = {0};
char *pch;
pch = strchr(token, '/');
if (pch != NULL) {
- strncpy(ip_str, token, pch - token);
+ strlcpy(ip_str, token,
+ RTE_MIN((unsigned int long)(pch - token + 1),
+ sizeof(ip_str)));
pch += 1;
if (is_str_num(pch) != 0)
return -EINVAL;
if (mask)
*mask = atoi(pch);
} else {
- strncpy(ip_str, token, sizeof(ip_str) - 1);
+ strlcpy(ip_str, token, sizeof(ip_str));
if (mask)
*mask = 0;
}
-
if (strlen(ip_str) >= INET_ADDRSTRLEN)
return -EINVAL;
@@ -241,14 +243,16 @@ parse_ipv6_addr(const char *token, struct in6_addr *ipv6, uint32_t *mask)
pch = strchr(token, '/');
if (pch != NULL) {
- strncpy(ip_str, token, pch - token);
+ strlcpy(ip_str, token,
+ RTE_MIN((unsigned int long)(pch - token + 1),
+ sizeof(ip_str)));
pch += 1;
if (is_str_num(pch) != 0)
return -EINVAL;
if (mask)
*mask = atoi(pch);
} else {
- strncpy(ip_str, token, sizeof(ip_str) - 1);
+ strlcpy(ip_str, token, sizeof(ip_str));
if (mask)
*mask = 0;
}
@@ -515,9 +519,7 @@ parse_cfg_file(const char *cfg_filename)
goto error_exit;
}
- strncpy(str + strlen(str), oneline,
- strlen(oneline));
-
+ strcpy(str + strlen(str), oneline);
continue;
}
@@ -528,8 +530,7 @@ parse_cfg_file(const char *cfg_filename)
cfg_filename, line_num);
goto error_exit;
}
- strncpy(str + strlen(str), oneline,
- strlen(oneline));
+ strcpy(str + strlen(str), oneline);
str[strlen(str)] = '\n';
if (cmdline_parse(cl, str) < 0) {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index d9dcc0e0..4ab8e098 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -939,7 +939,7 @@ inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
{
struct ipsec_mbuf_metadata *priv;
- priv = RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
+ priv = get_priv(m);
return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
}
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index c3bcf2c8..331c32e7 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -109,7 +109,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
},
@@ -578,7 +577,7 @@ init_mcast_hash(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -590,7 +589,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -674,7 +673,7 @@ main(int argc, char **argv)
if (clone_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No physical ports!\n");
if (nb_ports > MAX_PORTS)
@@ -683,7 +682,7 @@ main(int argc, char **argv)
nb_lcores = rte_lcore_count();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_rxconf rxq_conf;
struct rte_eth_conf local_port_conf = port_conf;
@@ -764,7 +763,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
rte_lcore_to_socket_id(lcore_id), txconf);
@@ -786,7 +784,7 @@ main(int argc, char **argv)
printf("done:\n");
}
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* initialize the multicast hash */
int retval = init_mcast_hash();
diff --git a/examples/kni/Makefile b/examples/kni/Makefile
index 562dc274..7e19d2e2 100644
--- a/examples/kni/Makefile
+++ b/examples/kni/Makefile
@@ -48,7 +48,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
diff --git a/examples/kni/main.c b/examples/kni/main.c
index 0d9980ee..81336087 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -95,7 +95,6 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.rxmode = {
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -275,7 +274,7 @@ kni_egress(struct kni_port_params *p)
static int
main_loop(__rte_unused void *arg)
{
- uint8_t i, nb_ports = rte_eth_dev_count();
+ uint16_t i;
int32_t f_stop;
const unsigned lcore_id = rte_lcore_id();
enum lcore_rxtx {
@@ -286,7 +285,7 @@ main_loop(__rte_unused void *arg)
};
enum lcore_rxtx flag = LCORE_NONE;
- for (i = 0; i < nb_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
if (!kni_port_params_array[i])
continue;
if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
@@ -607,7 +606,6 @@ init_port(uint16_t port)
"port%u (%d)\n", (unsigned)port, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port), &txq_conf);
@@ -626,7 +624,7 @@ init_port(uint16_t port)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -638,7 +636,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -689,7 +687,7 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
- if (port_id >= rte_eth_dev_count()) {
+ if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
return -EINVAL;
}
@@ -748,7 +746,7 @@ kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
- if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
+ if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
return -EINVAL;
}
@@ -782,7 +780,7 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
{
int ret = 0;
- if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
+ if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
return -EINVAL;
}
@@ -834,13 +832,18 @@ kni_alloc(uint16_t port_id)
if (i == 0) {
struct rte_kni_ops ops;
struct rte_eth_dev_info dev_info;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus = NULL;
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
- if (dev_info.pci_dev) {
- conf.addr = dev_info.pci_dev->addr;
- conf.id = dev_info.pci_dev->id;
+ if (dev_info.device)
+ bus = rte_bus_find_by_device(dev_info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(dev_info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
}
/* Get the interface default mac address */
rte_eth_macaddr_get(port_id,
@@ -921,13 +924,13 @@ main(int argc, char** argv)
}
/* Get number of ports found in scan */
- nb_sys_ports = rte_eth_dev_count();
+ nb_sys_ports = rte_eth_dev_count_avail();
if (nb_sys_ports == 0)
rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
/* Check if the configured port ID is valid */
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
- if (kni_port_params_array[i] && i >= nb_sys_ports)
+ if (kni_port_params_array[i] && !rte_eth_dev_is_valid_port(i))
rte_exit(EXIT_FAILURE, "Configured invalid "
"port ID %u\n", i);
@@ -935,7 +938,7 @@ main(int argc, char** argv)
init_kni();
/* Initialise each port */
- for (port = 0; port < nb_sys_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Skip ports that are not enabled */
if (!(ports_mask & (1 << port)))
continue;
@@ -947,7 +950,7 @@ main(int argc, char** argv)
kni_alloc(port);
}
- check_all_ports_link_status(nb_sys_ports, ports_mask);
+ check_all_ports_link_status(ports_mask);
/* Launch per-lcore function on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
@@ -957,7 +960,7 @@ main(int argc, char** argv)
}
/* Release resources */
- for (port = 0; port < nb_sys_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
if (!(ports_mask & (1 << port)))
continue;
kni_free_kni(port);
diff --git a/examples/kni/meson.build b/examples/kni/meson.build
index c39aead6..79131639 100644
--- a/examples/kni/meson.build
+++ b/examples/kni/meson.build
@@ -6,6 +6,8 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+# this app can be built if-and-only-if KNI library is buildable
+build = dpdk_conf.has('RTE_LIBRTE_KNI')
deps += ['kni', 'bus_pci']
sources = files(
'main.c'
diff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c
index 69339cca..0e6078aa 100644
--- a/examples/l2fwd-cat/l2fwd-cat.c
+++ b/examples/l2fwd-cat/l2fwd-cat.c
@@ -39,7 +39,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
/* Configure the Ethernet device. */
@@ -95,14 +95,13 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint16_t nb_ports = rte_eth_dev_count();
uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
* for best performance.
*/
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) !=
(int)rte_socket_id())
@@ -119,7 +118,7 @@ lcore_main(void)
* Receive packets on a port and forward them on the paired
* port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
*/
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
@@ -174,7 +173,7 @@ main(int argc, char *argv[])
argv += ret;
/* Check that there is an even number of ports to send/receive on. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
@@ -186,7 +185,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* Initialize all ports. */
- for (portid = 0; portid < nb_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
diff --git a/examples/l2fwd-cat/meson.build b/examples/l2fwd-cat/meson.build
index b6deabc9..1234e7b5 100644
--- a/examples/l2fwd-cat/meson.build
+++ b/examples/l2fwd-cat/meson.build
@@ -6,7 +6,9 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
-ext_deps += cc.find_library('pqos')
+pqos = cc.find_library('pqos', required: false)
+build = pqos.found()
+ext_deps += pqos
cflags += '-D_GNU_SOURCE'
cflags += '-I/usr/local/include' # assume pqos lib installed in /usr/local
sources = files(
diff --git a/examples/l2fwd-crypto/Makefile b/examples/l2fwd-crypto/Makefile
index a67f087b..6658fd0d 100644
--- a/examples/l2fwd-crypto/Makefile
+++ b/examples/l2fwd-crypto/Makefile
@@ -51,5 +51,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
+LDLIBS += -lrte_pmd_crypto_scheduler
+endif
+endif
+
include $(RTE_SDK)/mk/rte.extapp.mk
endif
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 4d8341e2..6061b751 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -42,6 +42,9 @@
#include <rte_prefetch.h>
#include <rte_random.h>
#include <rte_hexdump.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#endif
enum cdev_type {
CDEV_TYPE_ANY,
@@ -59,7 +62,6 @@ enum cdev_type {
#define MAX_AAD_SIZE 65535
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-#define MAX_SESSIONS 32
#define SESSION_POOL_CACHE_SIZE 0
#define MAXIMUM_IV_LENGTH 16
@@ -211,7 +213,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -407,7 +408,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
/* Zero pad data to be crypto'd so it is block aligned */
data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
- if (cparams->do_hash && cparams->hash_verify)
+ if ((cparams->do_hash || cparams->do_aead) && cparams->hash_verify)
data_len -= cparams->digest_length;
if (cparams->do_cipher) {
@@ -1474,8 +1475,8 @@ l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
options->aead_iv_random_size = -1;
options->aead_iv.length = 0;
- options->auth_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
- options->auth_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ options->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ options->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
options->aad_param = 0;
options->aad_random_size = -1;
@@ -1721,7 +1722,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -1733,7 +1734,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1930,21 +1931,19 @@ check_supported_size(uint16_t length, uint16_t min, uint16_t max,
static int
check_iv_param(const struct rte_crypto_param_range *iv_range_size,
unsigned int iv_param, int iv_random_size,
- uint16_t *iv_length)
+ uint16_t iv_length)
{
/*
* Check if length of provided IV is supported
* by the algorithm chosen.
*/
if (iv_param) {
- if (check_supported_size(*iv_length,
+ if (check_supported_size(iv_length,
iv_range_size->min,
iv_range_size->max,
iv_range_size->increment)
- != 0) {
- printf("Unsupported IV length\n");
+ != 0)
return -1;
- }
/*
* Check if length of IV to be randomly generated
* is supported by the algorithm chosen.
@@ -1954,14 +1953,250 @@ check_iv_param(const struct rte_crypto_param_range *iv_range_size,
iv_range_size->min,
iv_range_size->max,
iv_range_size->increment)
- != 0) {
- printf("Unsupported IV length\n");
+ != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ /* Set AEAD parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ /* Check if device supports AEAD algo */
+ cap = check_device_support_aead_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.aead.iv_size,
+ options->aead_iv_param,
+ options->aead_iv_random_size,
+ options->aead_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
return -1;
}
- *iv_length = iv_random_size;
- /* No size provided, use minimum size. */
- } else
- *iv_length = iv_range_size->min;
+
+ /*
+ * Check if length of provided AEAD key is supported
+ * by the algorithm chosen.
+ */
+ if (options->aead_key_param) {
+ if (check_supported_size(
+ options->aead_xform.aead.key.length,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AEAD key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the aead key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->aead_key_random_size != -1) {
+ if (check_supported_size(options->aead_key_random_size,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AEAD key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+
+ /*
+ * Check if length of provided AAD is supported
+ * by the algorithm chosen.
+ */
+ if (options->aad_param) {
+ if (check_supported_size(options->aad.length,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AAD length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of AAD to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->aad_random_size != -1) {
+ if (check_supported_size(options->aad_random_size,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AAD length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (options->digest_size != -1) {
+ if (check_supported_size(options->digest_size,
+ cap->sym.aead.digest_size.min,
+ cap->sym.aead.digest_size.max,
+ cap->sym.aead.digest_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "digest length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
+
+ /* Set cipher parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
+ /* Check if device supports cipher algo */
+ cap = check_device_support_cipher_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.cipher.iv_size,
+ options->cipher_iv_param,
+ options->cipher_iv_random_size,
+ options->cipher_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
+ return -1;
+ }
+
+ /*
+ * Check if length of provided cipher key is supported
+ * by the algorithm chosen.
+ */
+ if (options->ckey_param) {
+ if (check_supported_size(
+ options->cipher_xform.cipher.key.length,
+ cap->sym.cipher.key_size.min,
+ cap->sym.cipher.key_size.max,
+ cap->sym.cipher.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support cipher "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the cipher key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->ckey_random_size != -1) {
+ if (check_supported_size(options->ckey_random_size,
+ cap->sym.cipher.key_size.min,
+ cap->sym.cipher.key_size.max,
+ cap->sym.cipher.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support cipher "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
+
+ /* Set auth parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
+ /* Check if device supports auth algo */
+ cap = check_device_support_auth_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.auth.iv_size,
+ options->auth_iv_param,
+ options->auth_iv_random_size,
+ options->auth_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of provided auth key is supported
+ * by the algorithm chosen.
+ */
+ if (options->akey_param) {
+ if (check_supported_size(
+ options->auth_xform.auth.key.length,
+ cap->sym.auth.key_size.min,
+ cap->sym.auth.key_size.max,
+ cap->sym.auth.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support auth "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the auth key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->akey_random_size != -1) {
+ if (check_supported_size(options->akey_random_size,
+ cap->sym.auth.key_size.min,
+ cap->sym.auth.key_size.max,
+ cap->sym.auth.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support auth "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (options->digest_size != -1) {
+ if (check_supported_size(options->digest_size,
+ cap->sym.auth.digest_size.min,
+ cap->sym.auth.digest_size.max,
+ cap->sym.auth.digest_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "digest length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
return 0;
}
@@ -1970,9 +2205,10 @@ static int
initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
uint8_t *enabled_cdevs)
{
- unsigned int cdev_id, cdev_count, enabled_cdev_count = 0;
+ uint8_t cdev_id, cdev_count, enabled_cdev_count = 0;
const struct rte_cryptodev_capabilities *cap;
unsigned int sess_sz, max_sess_sz = 0;
+ uint32_t sessions_needed = 0;
int retval;
cdev_count = rte_cryptodev_count();
@@ -1981,16 +2217,31 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
return -1;
}
- for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
- sess_sz = rte_cryptodev_get_private_session_size(cdev_id);
+ for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
+ cdev_id++) {
+ if (check_cryptodev_mask(options, cdev_id) < 0)
+ continue;
+
+ if (check_capabilities(options, cdev_id) < 0)
+ continue;
+
+ sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
if (sess_sz > max_sess_sz)
max_sess_sz = sess_sz;
+
+ l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
+
+ enabled_cdevs[cdev_id] = 1;
+ enabled_cdev_count++;
}
- for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
- cdev_id++) {
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
struct rte_cryptodev_qp_conf qp_conf;
struct rte_cryptodev_info dev_info;
+
+ if (enabled_cdevs[cdev_id] == 0)
+ continue;
+
retval = rte_cryptodev_socket_id(cdev_id);
if (retval < 0) {
@@ -2005,11 +2256,23 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
.socket_id = socket_id,
};
- if (check_cryptodev_mask(options, (uint8_t)cdev_id))
- continue;
-
rte_cryptodev_info_get(cdev_id, &dev_info);
+ /*
+ * Two sessions objects are required for each session
+ * (one for the header, one for the private data)
+ */
+ if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ uint32_t nb_slaves =
+ rte_cryptodev_scheduler_slaves_get(cdev_id,
+ NULL);
+
+ sessions_needed = 2 * enabled_cdev_count * nb_slaves;
+#endif
+ } else
+ sessions_needed = 2 * enabled_cdev_count;
+
if (session_pool_socket[socket_id] == NULL) {
char mp_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *sess_mp;
@@ -2022,7 +2285,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
* device private data
*/
sess_mp = rte_mempool_create(mp_name,
- MAX_SESSIONS * 2,
+ sessions_needed,
max_sess_sz,
SESSION_POOL_CACHE_SIZE,
0, NULL, NULL, NULL,
@@ -2041,106 +2304,57 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
/* Set AEAD parameters */
if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
- /* Check if device supports AEAD algo */
cap = check_device_support_aead_algo(options, &dev_info,
cdev_id);
- if (cap == NULL)
- continue;
options->block_size = cap->sym.aead.block_size;
- check_iv_param(&cap->sym.aead.iv_size,
- options->aead_iv_param,
- options->aead_iv_random_size,
- &options->aead_iv.length);
+ /* Set IV if not provided from command line */
+ if (options->aead_iv_param == 0) {
+ if (options->aead_iv_random_size != -1)
+ options->aead_iv.length =
+ options->aead_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aead_iv.length =
+ cap->sym.aead.iv_size.min;
+ }
- /*
- * Check if length of provided AEAD key is supported
- * by the algorithm chosen.
- */
- if (options->aead_key_param) {
- if (check_supported_size(
- options->aead_xform.aead.key.length,
- cap->sym.aead.key_size.min,
- cap->sym.aead.key_size.max,
- cap->sym.aead.key_size.increment)
- != 0) {
- printf("Unsupported aead key length\n");
- return -1;
- }
- /*
- * Check if length of the aead key to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->aead_key_random_size != -1) {
- if (check_supported_size(options->aead_key_random_size,
- cap->sym.aead.key_size.min,
- cap->sym.aead.key_size.max,
- cap->sym.aead.key_size.increment)
- != 0) {
- printf("Unsupported aead key length\n");
- return -1;
- }
- options->aead_xform.aead.key.length =
- options->aead_key_random_size;
- /* No size provided, use minimum size. */
- } else
- options->aead_xform.aead.key.length =
+ /* Set key if not provided from command line */
+ if (options->aead_key_param == 0) {
+ if (options->aead_key_random_size != -1)
+ options->aead_xform.aead.key.length =
+ options->aead_key_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aead_xform.aead.key.length =
cap->sym.aead.key_size.min;
- if (!options->aead_key_param)
generate_random_key(
options->aead_xform.aead.key.data,
options->aead_xform.aead.key.length);
+ }
- /*
- * Check if length of provided AAD is supported
- * by the algorithm chosen.
- */
- if (options->aad_param) {
- if (check_supported_size(options->aad.length,
- cap->sym.aead.aad_size.min,
- cap->sym.aead.aad_size.max,
- cap->sym.aead.aad_size.increment)
- != 0) {
- printf("Unsupported AAD length\n");
- return -1;
- }
- /*
- * Check if length of AAD to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->aad_random_size != -1) {
- if (check_supported_size(options->aad_random_size,
- cap->sym.aead.aad_size.min,
- cap->sym.aead.aad_size.max,
- cap->sym.aead.aad_size.increment)
- != 0) {
- printf("Unsupported AAD length\n");
- return -1;
- }
- options->aad.length = options->aad_random_size;
- /* No size provided, use minimum size. */
- } else
- options->aad.length = cap->sym.auth.aad_size.min;
+ /* Set AAD if not provided from command line */
+ if (options->aad_param == 0) {
+ if (options->aad_random_size != -1)
+ options->aad.length =
+ options->aad_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aad.length =
+ cap->sym.auth.aad_size.min;
+ }
options->aead_xform.aead.aad_length =
options->aad.length;
- /* Check if digest size is supported by the algorithm. */
- if (options->digest_size != -1) {
- if (check_supported_size(options->digest_size,
- cap->sym.aead.digest_size.min,
- cap->sym.aead.digest_size.max,
- cap->sym.aead.digest_size.increment)
- != 0) {
- printf("Unsupported digest length\n");
- return -1;
- }
+ /* Set digest size if not provided from command line */
+ if (options->digest_size != -1)
options->aead_xform.aead.digest_length =
options->digest_size;
- /* No size provided, use minimum size. */
- } else
+ /* No size provided, use minimum size. */
+ else
options->aead_xform.aead.digest_length =
cap->sym.aead.digest_size.min;
}
@@ -2149,127 +2363,76 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
- /* Check if device supports cipher algo */
cap = check_device_support_cipher_algo(options, &dev_info,
cdev_id);
- if (cap == NULL)
- continue;
-
options->block_size = cap->sym.cipher.block_size;
- check_iv_param(&cap->sym.cipher.iv_size,
- options->cipher_iv_param,
- options->cipher_iv_random_size,
- &options->cipher_iv.length);
+ /* Set IV if not provided from command line */
+ if (options->cipher_iv_param == 0) {
+ if (options->cipher_iv_random_size != -1)
+ options->cipher_iv.length =
+ options->cipher_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->cipher_iv.length =
+ cap->sym.cipher.iv_size.min;
+ }
- /*
- * Check if length of provided cipher key is supported
- * by the algorithm chosen.
- */
- if (options->ckey_param) {
- if (check_supported_size(
- options->cipher_xform.cipher.key.length,
- cap->sym.cipher.key_size.min,
- cap->sym.cipher.key_size.max,
- cap->sym.cipher.key_size.increment)
- != 0) {
- printf("Unsupported cipher key length\n");
- return -1;
- }
- /*
- * Check if length of the cipher key to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->ckey_random_size != -1) {
- if (check_supported_size(options->ckey_random_size,
- cap->sym.cipher.key_size.min,
- cap->sym.cipher.key_size.max,
- cap->sym.cipher.key_size.increment)
- != 0) {
- printf("Unsupported cipher key length\n");
- return -1;
- }
- options->cipher_xform.cipher.key.length =
- options->ckey_random_size;
- /* No size provided, use minimum size. */
- } else
- options->cipher_xform.cipher.key.length =
+ /* Set key if not provided from command line */
+ if (options->ckey_param == 0) {
+ if (options->ckey_random_size != -1)
+ options->cipher_xform.cipher.key.length =
+ options->ckey_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->cipher_xform.cipher.key.length =
cap->sym.cipher.key_size.min;
- if (!options->ckey_param)
generate_random_key(
options->cipher_xform.cipher.key.data,
options->cipher_xform.cipher.key.length);
-
+ }
}
/* Set auth parameters */
if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
- /* Check if device supports auth algo */
cap = check_device_support_auth_algo(options, &dev_info,
cdev_id);
- if (cap == NULL)
- continue;
- check_iv_param(&cap->sym.auth.iv_size,
- options->auth_iv_param,
- options->auth_iv_random_size,
- &options->auth_iv.length);
- /*
- * Check if length of provided auth key is supported
- * by the algorithm chosen.
- */
- if (options->akey_param) {
- if (check_supported_size(
- options->auth_xform.auth.key.length,
- cap->sym.auth.key_size.min,
- cap->sym.auth.key_size.max,
- cap->sym.auth.key_size.increment)
- != 0) {
- printf("Unsupported auth key length\n");
- return -1;
- }
- /*
- * Check if length of the auth key to be randomly generated
- * is supported by the algorithm chosen.
- */
- } else if (options->akey_random_size != -1) {
- if (check_supported_size(options->akey_random_size,
- cap->sym.auth.key_size.min,
- cap->sym.auth.key_size.max,
- cap->sym.auth.key_size.increment)
- != 0) {
- printf("Unsupported auth key length\n");
- return -1;
- }
- options->auth_xform.auth.key.length =
- options->akey_random_size;
- /* No size provided, use minimum size. */
- } else
- options->auth_xform.auth.key.length =
+ /* Set IV if not provided from command line */
+ if (options->auth_iv_param == 0) {
+ if (options->auth_iv_random_size != -1)
+ options->auth_iv.length =
+ options->auth_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->auth_iv.length =
+ cap->sym.auth.iv_size.min;
+ }
+
+ /* Set key if not provided from command line */
+ if (options->akey_param == 0) {
+ if (options->akey_random_size != -1)
+ options->auth_xform.auth.key.length =
+ options->akey_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->auth_xform.auth.key.length =
cap->sym.auth.key_size.min;
- if (!options->akey_param)
generate_random_key(
options->auth_xform.auth.key.data,
options->auth_xform.auth.key.length);
+ }
- /* Check if digest size is supported by the algorithm. */
- if (options->digest_size != -1) {
- if (check_supported_size(options->digest_size,
- cap->sym.auth.digest_size.min,
- cap->sym.auth.digest_size.max,
- cap->sym.auth.digest_size.increment)
- != 0) {
- printf("Unsupported digest length\n");
- return -1;
- }
+ /* Set digest size if not provided from command line */
+ if (options->digest_size != -1)
options->auth_xform.auth.digest_length =
options->digest_size;
- /* No size provided, use minimum size. */
- } else
+ /* No size provided, use minimum size. */
+ else
options->auth_xform.auth.digest_length =
cap->sym.auth.digest_size.min;
}
@@ -2296,11 +2459,6 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
cdev_id, retval);
return -1;
}
-
- l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
-
- enabled_cdevs[cdev_id] = 1;
- enabled_cdev_count++;
}
return enabled_cdev_count;
@@ -2309,9 +2467,9 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
static int
initialize_ports(struct l2fwd_crypto_options *options)
{
- uint16_t last_portid, portid;
+ uint16_t last_portid = 0, portid;
unsigned enabled_portcount = 0;
- unsigned nb_ports = rte_eth_dev_count();
+ unsigned nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0) {
printf("No Ethernet ports - bye\n");
@@ -2322,7 +2480,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
l2fwd_dst_ports[portid] = 0;
- for (last_portid = 0, portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
int retval;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
@@ -2371,7 +2529,6 @@ initialize_ports(struct l2fwd_crypto_options *options)
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
@@ -2426,7 +2583,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
return -1;
}
- check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
return enabled_portcount;
}
@@ -2470,12 +2627,12 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
int
main(int argc, char **argv)
{
- struct lcore_queue_conf *qconf;
+ struct lcore_queue_conf *qconf = NULL;
struct l2fwd_crypto_options options;
uint8_t nb_cryptodevs, cdev_id;
- uint16_t nb_ports, portid;
- unsigned lcore_id, rx_lcore_id;
+ uint16_t portid;
+ unsigned lcore_id, rx_lcore_id = 0;
int ret, enabled_cdevcount, enabled_portcount;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
@@ -2516,10 +2673,8 @@ main(int argc, char **argv)
if (enabled_portcount < 1)
rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n");
- nb_ports = rte_eth_dev_count();
/* Initialize the port/queue configuration of each logical core */
- for (rx_lcore_id = 0, qconf = NULL, portid = 0;
- portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((options.portmask & (1 << portid)) == 0)
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 2554f448..af542338 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -90,7 +90,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -680,7 +679,7 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -692,7 +691,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -742,7 +741,7 @@ main(int argc, char **argv)
int ret;
char name[RTE_JOBSTATS_NAMESIZE];
uint16_t nb_ports;
- uint16_t nb_ports_available;
+ uint16_t nb_ports_available = 0;
uint16_t portid, last_port;
uint8_t i;
@@ -770,7 +769,7 @@ main(int argc, char **argv)
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
@@ -782,7 +781,7 @@ main(int argc, char **argv)
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -804,7 +803,7 @@ main(int argc, char **argv)
qconf = NULL;
/* Initialize the port/queue configuration of each logical core */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -827,10 +826,8 @@ main(int argc, char **argv)
printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
- nb_ports_available = nb_ports;
-
/* Initialise each port */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
@@ -839,9 +836,10 @@ main(int argc, char **argv)
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
printf("Skipping disabled port %u\n", portid);
- nb_ports_available--;
continue;
}
+ nb_ports_available++;
+
/* init port */
printf("Initializing port %u... ", portid);
fflush(stdout);
@@ -877,7 +875,6 @@ main(int argc, char **argv)
/* init one TX queue on each port */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
@@ -934,7 +931,7 @@ main(int argc, char **argv)
"All available ports are disabled. Please set portmask.\n");
}
- check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
drain_tsc = (hz + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 39b8c328..2d8b4d1c 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -81,7 +81,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -445,7 +444,7 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -457,7 +456,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -526,7 +525,7 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
int ret;
uint16_t nb_ports;
- uint16_t nb_ports_available;
+ uint16_t nb_ports_available = 0;
uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
@@ -561,7 +560,7 @@ main(int argc, char **argv)
if (l2fwd_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
@@ -573,7 +572,7 @@ main(int argc, char **argv)
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -595,7 +594,7 @@ main(int argc, char **argv)
qconf = NULL;
/* Initialize the port/queue configuration of each logical core */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -619,10 +618,8 @@ main(int argc, char **argv)
rx_lcore_id, portid);
}
- nb_ports_available = nb_ports;
-
/* Initialise each port */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
@@ -631,9 +628,10 @@ main(int argc, char **argv)
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
printf("Skipping disabled port %u\n", portid);
- nb_ports_available--;
continue;
}
+ nb_ports_available++;
+
/* init port */
printf("Initializing port %u... ", portid);
fflush(stdout);
@@ -672,7 +670,6 @@ main(int argc, char **argv)
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
@@ -728,7 +725,7 @@ main(int argc, char **argv)
"All available ports are disabled. Please set portmask.\n");
}
- check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
struct rte_timer hb_timer, stats_timer;
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index e4a4a7c3..9bb4c5bc 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -82,7 +82,6 @@ static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -444,7 +443,7 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -458,7 +457,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
if (force_quit)
return;
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (force_quit)
return;
if ((port_mask & (1 << portid)) == 0)
@@ -517,7 +516,7 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
int ret;
uint16_t nb_ports;
- uint16_t nb_ports_available;
+ uint16_t nb_ports_available = 0;
uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
@@ -545,7 +544,7 @@ main(int argc, char **argv)
/* convert to number of cycles */
timer_period *= rte_get_timer_hz();
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
@@ -562,7 +561,7 @@ main(int argc, char **argv)
/*
* Each logical core is assigned a dedicated TX queue on each port.
*/
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -585,7 +584,7 @@ main(int argc, char **argv)
qconf = NULL;
/* Initialize the port/queue configuration of each logical core */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -610,8 +609,6 @@ main(int argc, char **argv)
printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
- nb_ports_available = nb_ports;
-
nb_mbufs = RTE_MAX(nb_ports * (nb_rxd + nb_txd + MAX_PKT_BURST +
nb_lcores * MEMPOOL_CACHE_SIZE), 8192U);
@@ -623,7 +620,7 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
/* Initialise each port */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
struct rte_eth_conf local_port_conf = port_conf;
@@ -632,9 +629,10 @@ main(int argc, char **argv)
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
printf("Skipping disabled port %u\n", portid);
- nb_ports_available--;
continue;
}
+ nb_ports_available++;
+
/* init port */
printf("Initializing port %u... ", portid);
fflush(stdout);
@@ -671,7 +669,6 @@ main(int argc, char **argv)
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
@@ -726,7 +723,7 @@ main(int argc, char **argv)
"All available ports are disabled. Please set portmask.\n");
}
- check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
ret = 0;
/* launch per-lcore init on every lcore */
@@ -738,7 +735,7 @@ main(int argc, char **argv)
}
}
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
continue;
printf("Closing port %d...", portid);
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 9aebb331..7c063a8d 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -127,7 +127,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
@@ -1452,7 +1451,7 @@ check_lcore_params(void)
}
static int
-check_port_config(const unsigned nb_ports)
+check_port_config(void)
{
unsigned portid;
uint16_t i;
@@ -1464,7 +1463,7 @@ check_port_config(const unsigned nb_ports)
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
@@ -1805,7 +1804,7 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -1817,7 +1816,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1891,9 +1890,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- if (check_port_config(nb_ports) < 0)
+ if (check_port_config() < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
/* Add ACL rules and route entries, build trie */
@@ -1903,7 +1902,7 @@ main(int argc, char **argv)
nb_lcores = rte_lcore_count();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* skip ports that are not enabled */
@@ -1926,6 +1925,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &local_port_conf);
if (ret < 0)
@@ -1982,7 +1993,6 @@ main(int argc, char **argv)
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
@@ -2043,7 +2053,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -2064,7 +2074,7 @@ main(int argc, char **argv)
rte_eth_promiscuous_enable(portid);
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/l3fwd-power/Makefile b/examples/l3fwd-power/Makefile
index 390b7d6b..d7e39a34 100644
--- a/examples/l3fwd-power/Makefile
+++ b/examples/l3fwd-power/Makefile
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2014 Intel Corporation
+# Copyright(c) 2010-2018 Intel Corporation
# binary name
APP = l3fwd-power
# all source are stored in SRCS-y
-SRCS-y := main.c
+SRCS-y := main.c perf_core.c
# Build using pkg-config variables if possible
$(shell pkg-config --exists libdpdk)
@@ -48,7 +48,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index b2a7c79e..d15cd520 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
#include <stdio.h>
@@ -44,6 +44,9 @@
#include <rte_power.h>
#include <rte_spinlock.h>
+#include "perf_core.h"
+#include "main.h"
+
#define RTE_LOGTYPE_L3FWD_POWER RTE_LOGTYPE_USER1
#define MAX_PKT_BURST 32
@@ -155,14 +158,7 @@ struct lcore_rx_queue {
#define MAX_RX_QUEUE_INTERRUPT_PER_PORT 16
-#define MAX_LCORE_PARAMS 1024
-struct lcore_params {
- uint16_t port_id;
- uint8_t queue_id;
- uint8_t lcore_id;
-} __rte_cache_aligned;
-
-static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
static struct lcore_params lcore_params_array_default[] = {
{0, 0, 2},
{0, 1, 2},
@@ -175,8 +171,8 @@ static struct lcore_params lcore_params_array_default[] = {
{3, 1, 3},
};
-static struct lcore_params * lcore_params = lcore_params_array_default;
-static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
+struct lcore_params *lcore_params = lcore_params_array_default;
+uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
sizeof(lcore_params_array_default[0]);
static struct rte_eth_conf port_conf = {
@@ -184,7 +180,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
@@ -341,7 +336,7 @@ static void
signal_exit_now(int sigtype)
{
unsigned lcore_id;
- unsigned int portid, nb_ports;
+ unsigned int portid;
int ret;
if (sigtype == SIGINT) {
@@ -357,8 +352,7 @@ signal_exit_now(int sigtype)
"core%u\n", lcore_id);
}
- nb_ports = rte_eth_dev_count();
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -1057,7 +1051,7 @@ check_lcore_params(void)
}
static int
-check_port_config(const unsigned nb_ports)
+check_port_config(void)
{
unsigned portid;
uint16_t i;
@@ -1069,7 +1063,7 @@ check_port_config(const unsigned nb_ports)
portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n",
portid);
return -1;
@@ -1122,10 +1116,15 @@ print_usage(const char *prgname)
{
printf ("%s [EAL options] -- -p PORTMASK -P"
" [--config (port,queue,lcore)[,(port,queue,lcore]]"
+ " [--high-perf-cores CORELIST"
+ " [--perf-config (port,queue,hi_perf,lcore_index)[,(port,queue,hi_perf,lcore_index]]"
" [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" --config (port,queue,lcore): rx queues configuration\n"
+ " --high-perf-cores CORELIST: list of high performance cores\n"
+ " --perf-config: similar as config, cores specified as indices"
+ " for bins containing high or regular performance cores\n"
" --no-numa: optional, disable numa awareness\n"
" --enable-jumbo: enable jumbo frame"
" which max packet len is PKTLEN in decimal (64-9600)\n"
@@ -1235,6 +1234,8 @@ parse_args(int argc, char **argv)
char *prgname = argv[0];
static struct option lgopts[] = {
{"config", 1, 0, 0},
+ {"perf-config", 1, 0, 0},
+ {"high-perf-cores", 1, 0, 0},
{"no-numa", 0, 0, 0},
{"enable-jumbo", 0, 0, 0},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
@@ -1273,6 +1274,26 @@ parse_args(int argc, char **argv)
}
if (!strncmp(lgopts[option_index].name,
+ "perf-config", 11)) {
+ ret = parse_perf_config(optarg);
+ if (ret) {
+ printf("invalid perf-config\n");
+ print_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (!strncmp(lgopts[option_index].name,
+ "high-perf-cores", 15)) {
+ ret = parse_perf_core_list(optarg);
+ if (ret) {
+ printf("invalid high-perf-cores\n");
+ print_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (!strncmp(lgopts[option_index].name,
"no-numa", 7)) {
printf("numa is disabled \n");
numa_on = 0;
@@ -1512,7 +1533,7 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -1524,7 +1545,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -1610,6 +1631,23 @@ static int check_ptype(uint16_t portid)
}
+static int
+init_power_library(void)
+{
+ int ret = 0, lcore_id;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (rte_lcore_is_enabled(lcore_id)) {
+ /* init power management library */
+ ret = rte_power_init(lcore_id);
+ if (ret)
+ RTE_LOG(ERR, POWER,
+ "Library initialization failed on core %u\n",
+ lcore_id);
+ }
+ }
+ return ret;
+}
+
int
main(int argc, char **argv)
{
@@ -1644,6 +1682,12 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+ if (init_power_library())
+ rte_exit(EXIT_FAILURE, "init_power_library failed\n");
+
+ if (update_lcore_params() < 0)
+ rte_exit(EXIT_FAILURE, "update_lcore_params failed\n");
+
if (check_lcore_params() < 0)
rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
@@ -1651,15 +1695,15 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- if (check_port_config(nb_ports) < 0)
+ if (check_port_config() < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
nb_lcores = rte_lcore_count();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* skip ports that are not enabled */
@@ -1694,6 +1738,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &local_port_conf);
if (ret < 0)
@@ -1751,7 +1807,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
@@ -1774,12 +1829,6 @@ main(int argc, char **argv)
if (rte_lcore_is_enabled(lcore_id) == 0)
continue;
- /* init power management library */
- ret = rte_power_init(lcore_id);
- if (ret)
- RTE_LOG(ERR, POWER,
- "Library initialization failed on core %u\n", lcore_id);
-
/* init timer structures for each enabled lcore */
rte_timer_init(&power_timers[lcore_id]);
hz = rte_get_timer_hz();
@@ -1834,7 +1883,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
@@ -1855,7 +1904,7 @@ main(int argc, char **argv)
rte_spinlock_init(&(locks[portid]));
}
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/l3fwd-power/main.h b/examples/l3fwd-power/main.h
new file mode 100644
index 00000000..258de98f
--- /dev/null
+++ b/examples/l3fwd-power/main.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+
+#define MAX_LCORE_PARAMS 1024
+struct lcore_params {
+ uint16_t port_id;
+ uint8_t queue_id;
+ uint8_t lcore_id;
+} __rte_cache_aligned;
+
+extern struct lcore_params *lcore_params;
+extern uint16_t nb_lcore_params;
+extern struct lcore_params lcore_params_array[];
+
+#endif /* _MAIN_H_ */
diff --git a/examples/l3fwd-power/meson.build b/examples/l3fwd-power/meson.build
index 61e8daa9..20c80543 100644
--- a/examples/l3fwd-power/meson.build
+++ b/examples/l3fwd-power/meson.build
@@ -1,12 +1,15 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2018 Intel Corporation
# meson file, for building this example as part of a main DPDK build.
#
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+if host_machine.system() != 'linux'
+ build = false
+endif
deps += ['power', 'timer', 'lpm', 'hash']
sources = files(
- 'main.c'
+ 'main.c', 'perf_core.c'
)
diff --git a/examples/l3fwd-power/perf_core.c b/examples/l3fwd-power/perf_core.c
new file mode 100644
index 00000000..83948ea2
--- /dev/null
+++ b/examples/l3fwd-power/perf_core.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_power.h>
+#include <rte_string_fns.h>
+
+#include "perf_core.h"
+#include "main.h"
+
+
+static uint16_t hp_lcores[RTE_MAX_LCORE];
+static uint16_t nb_hp_lcores;
+
+struct perf_lcore_params {
+ uint16_t port_id;
+ uint8_t queue_id;
+ uint8_t high_perf;
+ uint8_t lcore_idx;
+} __rte_cache_aligned;
+
+static struct perf_lcore_params prf_lc_prms[MAX_LCORE_PARAMS];
+static uint16_t nb_prf_lc_prms;
+
+int
+update_lcore_params(void)
+{
+ uint8_t non_perf_lcores[RTE_MAX_LCORE];
+ uint16_t nb_non_perf_lcores = 0;
+ int i, j, ret;
+
+ /* if perf-config option was not used do nothing */
+ if (nb_prf_lc_prms == 0)
+ return 0;
+
+ /* if high-perf-cores option was not used query every available core */
+ if (nb_hp_lcores == 0) {
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (rte_lcore_is_enabled(i)) {
+ struct rte_power_core_capabilities caps;
+ ret = rte_power_get_capabilities(i, &caps);
+ if (ret == 0 && caps.turbo) {
+ hp_lcores[nb_hp_lcores] = i;
+ nb_hp_lcores++;
+ }
+ }
+ }
+ }
+
+ /* create a list on non high performance cores*/
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (rte_lcore_is_enabled(i)) {
+ int hp = 0;
+ for (j = 0; j < nb_hp_lcores; j++) {
+ if (hp_lcores[j] == i) {
+ hp = 1;
+ break;
+ }
+ }
+ if (!hp)
+ non_perf_lcores[nb_non_perf_lcores++] = i;
+ }
+ }
+
+ /* update the lcore config */
+ for (i = 0; i < nb_prf_lc_prms; i++) {
+ int lcore = -1;
+ if (prf_lc_prms[i].high_perf) {
+ if (prf_lc_prms[i].lcore_idx < nb_hp_lcores)
+ lcore = hp_lcores[prf_lc_prms[i].lcore_idx];
+ } else {
+ if (prf_lc_prms[i].lcore_idx < nb_non_perf_lcores)
+ lcore =
+ non_perf_lcores[prf_lc_prms[i].lcore_idx];
+ }
+
+ if (lcore < 0) {
+ printf("Performance cores configuration error\n");
+ return -1;
+ }
+
+ lcore_params_array[i].lcore_id = lcore;
+ lcore_params_array[i].queue_id = prf_lc_prms[i].queue_id;
+ lcore_params_array[i].port_id = prf_lc_prms[i].port_id;
+ }
+
+ lcore_params = lcore_params_array;
+ nb_lcore_params = nb_prf_lc_prms;
+
+ printf("Updated performance core configuration\n");
+ for (i = 0; i < nb_prf_lc_prms; i++)
+ printf("\t(%d,%d,%d)\n", lcore_params[i].port_id,
+ lcore_params[i].queue_id,
+ lcore_params[i].lcore_id);
+
+ return 0;
+}
+
+int
+parse_perf_config(const char *q_arg)
+{
+ char s[256];
+ const char *p, *p0 = q_arg;
+ char *end;
+ enum fieldnames {
+ FLD_PORT = 0,
+ FLD_QUEUE,
+ FLD_LCORE_HP,
+ FLD_LCORE_IDX,
+ _NUM_FLD
+ };
+ unsigned long int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ int i;
+ unsigned int size;
+
+ nb_prf_lc_prms = 0;
+
+ while ((p = strchr(p0, '(')) != NULL) {
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
+
+ snprintf(s, sizeof(s), "%.*s", size, p);
+ if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
+ _NUM_FLD)
+ return -1;
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+ return -1;
+ }
+ if (nb_prf_lc_prms >= MAX_LCORE_PARAMS) {
+ printf("exceeded max number of lcore params: %hu\n",
+ nb_prf_lc_prms);
+ return -1;
+ }
+ prf_lc_prms[nb_prf_lc_prms].port_id =
+ (uint8_t)int_fld[FLD_PORT];
+ prf_lc_prms[nb_prf_lc_prms].queue_id =
+ (uint8_t)int_fld[FLD_QUEUE];
+ prf_lc_prms[nb_prf_lc_prms].high_perf =
+ !!(uint8_t)int_fld[FLD_LCORE_HP];
+ prf_lc_prms[nb_prf_lc_prms].lcore_idx =
+ (uint8_t)int_fld[FLD_LCORE_IDX];
+ ++nb_prf_lc_prms;
+ }
+
+ return 0;
+}
+
+int
+parse_perf_core_list(const char *corelist)
+{
+ int i, idx = 0;
+ unsigned int count = 0;
+ char *end = NULL;
+ int min, max;
+
+ if (corelist == NULL) {
+ printf("invalid core list\n");
+ return -1;
+ }
+
+
+ /* Remove all blank characters ahead and after */
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Get list of cores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ errno = 0;
+ idx = strtoul(corelist, &end, 10);
+ if (errno || end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ hp_lcores[count] = idx;
+ count++;
+ }
+ min = RTE_MAX_LCORE;
+ } else {
+ printf("invalid core list\n");
+ return -1;
+ }
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ if (count == 0) {
+ printf("invalid core list\n");
+ return -1;
+ }
+
+ nb_hp_lcores = count;
+
+ printf("Configured %d high performance cores\n", nb_hp_lcores);
+ for (i = 0; i < nb_hp_lcores; i++)
+ printf("\tHigh performance core %d %d\n",
+ i, hp_lcores[i]);
+
+ return 0;
+}
+
diff --git a/examples/l3fwd-power/perf_core.h b/examples/l3fwd-power/perf_core.h
new file mode 100644
index 00000000..7b7b747b
--- /dev/null
+++ b/examples/l3fwd-power/perf_core.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#ifndef _PERF_CORE_H_
+#define _PERF_CORE_H_
+
+int parse_perf_config(const char *q_arg);
+int parse_perf_core_list(const char *corelist);
+int update_lcore_params(void);
+
+#endif /* _PERF_CORE_H_ */
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index c69bd62f..5edd91a7 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -161,7 +161,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
@@ -575,7 +574,7 @@ check_lcore_params(void)
}
static int
-check_port_config(const unsigned nb_ports)
+check_port_config(void)
{
unsigned portid;
uint16_t i;
@@ -586,7 +585,7 @@ check_port_config(const unsigned nb_ports)
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
@@ -648,11 +647,10 @@ static void
signal_handler(int signum)
{
uint16_t portid;
- uint16_t nb_ports = rte_eth_dev_count();
/* When we receive a SIGINT signal */
if (signum == SIGINT) {
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -950,15 +948,15 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- if (check_port_config(nb_ports) < 0)
+ if (check_port_config() < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
nb_lcores = rte_lcore_count();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* skip ports that are not enabled */
@@ -982,6 +980,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue,
n_tx_queue, &local_port_conf);
if (ret < 0)
@@ -1010,7 +1020,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
socketid, txconf);
@@ -1063,7 +1072,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h
index 7002a43a..0f4fcb42 100644
--- a/examples/l3fwd/l3fwd_common.h
+++ b/examples/l3fwd/l3fwd_common.h
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2017, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(c) 2017-2018 Linaro Limited.
*/
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 9dc3b8c4..fa8f82be 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -18,7 +18,6 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_mempool.h>
#include <rte_cycles.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index 9d7afe05..22c26dce 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2017, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(c) 2017-2018 Linaro Limited.
*/
#ifndef __L3FWD_EM_HLM_H__
diff --git a/examples/l3fwd/l3fwd_em_hlm_neon.h b/examples/l3fwd/l3fwd_em_hlm_neon.h
index dae1acfb..16c8b04a 100644
--- a/examples/l3fwd/l3fwd_em_hlm_neon.h
+++ b/examples/l3fwd/l3fwd_em_hlm_neon.h
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2017, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(c) 2017-2018 Linaro Limited.
*/
#ifndef __L3FWD_EM_HLM_NEON_H__
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index a747126a..b1dc195a 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -17,7 +17,6 @@
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_mempool.h>
#include <rte_cycles.h>
#include <rte_mbuf.h>
#include <rte_ip.h>
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
index 85f314d1..02ec0d80 100644
--- a/examples/l3fwd/l3fwd_lpm_neon.h
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -1,35 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2017, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation.
+ * Copyright(c) 2017-2018 Linaro Limited.
*/
#ifndef __L3FWD_LPM_NEON_H__
diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h
index b319b5a9..76ccdfa7 100644
--- a/examples/l3fwd/l3fwd_neon.h
+++ b/examples/l3fwd/l3fwd_neon.h
@@ -1,38 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation. All rights reserved.
- * Copyright(c) 2017, Linaro Limited
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright(c) 2017-2018 Linaro Limited.
*/
-
#ifndef _L3FWD_NEON_H_
#define _L3FWD_NEON_H_
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index e7111fa1..ab019b9e 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -120,7 +120,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
@@ -210,7 +209,7 @@ check_lcore_params(void)
}
static int
-check_port_config(const unsigned nb_ports)
+check_port_config(void)
{
uint16_t portid;
uint16_t i;
@@ -221,7 +220,7 @@ check_port_config(const unsigned nb_ports)
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
@@ -694,7 +693,7 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -708,7 +707,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
if (force_quit)
return;
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (force_quit)
return;
if ((port_mask & (1 << portid)) == 0)
@@ -826,9 +825,9 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- if (check_port_config(nb_ports) < 0)
+ if (check_port_config() < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
nb_lcores = rte_lcore_count();
@@ -837,7 +836,7 @@ main(int argc, char **argv)
setup_l3fwd_lookup_tables();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* skip ports that are not enabled */
@@ -861,6 +860,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &local_port_conf);
if (ret < 0)
@@ -909,7 +920,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
@@ -971,7 +981,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0) {
continue;
}
@@ -1007,7 +1017,7 @@ main(int argc, char **argv)
}
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
ret = 0;
/* launch per-lcore init on every lcore */
@@ -1020,7 +1030,7 @@ main(int argc, char **argv)
}
/* stop ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
printf("Closing port %d...", portid);
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index ad0dd390..3b732076 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -79,7 +79,6 @@ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -542,7 +541,7 @@ main(int argc, char **argv)
if (lsi_pktmbuf_pool == NULL)
rte_panic("Cannot init mbuf pool\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_panic("No Ethernet port - bye\n");
@@ -653,7 +652,6 @@ main(int argc, char **argv)
/* init one TX queue logical core on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index b5b66368..972c85c5 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -121,7 +121,7 @@ str_to_unsigned_array(
int i, num_splits = 0;
/* copy s so we don't modify original string */
- snprintf(str, sizeof(str), "%s", s);
+ strlcpy(str, s, sizeof(str));
num_splits = rte_strsplit(str, sizeof(str), splits, num_vals, separator);
errno = 0;
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index 8d8dbe61..f2045f23 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -45,7 +45,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
@@ -417,6 +416,18 @@ app_init_nics(void)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(
port,
(uint8_t) n_rx_queues,
@@ -466,7 +477,6 @@ app_init_nics(void)
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
/* Init TX queues */
if (app.nic_tx_port_mask[port] == 1) {
diff --git a/examples/meson.build b/examples/meson.build
index 2c6b3f88..4ee7a111 100644
--- a/examples/meson.build
+++ b/examples/meson.build
@@ -7,27 +7,54 @@ if get_option('default_library') == 'static'
endif
execinfo = cc.find_library('execinfo', required: false)
-foreach example: get_option('examples').split(',')
+
+allow_skips = true # don't flag an error if we can't build an app
+
+if get_option('examples').to_lower() == 'all'
+ dirs = run_command('sh', '-c',
+ 'cd $MESON_SOURCE_ROOT/$MESON_SUBDIR && for d in * ; do if [ -d $d ] ; then echo $d ; fi ; done')
+ examples = dirs.stdout().split()
+else
+ examples = get_option('examples').split(',')
+ allow_skips = false # error out if we can't build a requested app
+endif
+default_cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ default_cflags += '-Wno-format-truncation'
+endif
+foreach example: examples
name = example
+ build = true
sources = []
allow_experimental_apis = false
- cflags = machine_args
+ cflags = default_cflags
+
ext_deps = [execinfo]
includes = [include_directories(example)]
deps = ['eal', 'mempool', 'net', 'mbuf', 'ethdev', 'cmdline']
subdir(example)
- dep_objs = ext_deps
- foreach d:deps
- dep_objs += [get_variable(get_option('default_library') + '_rte_' + d)]
- endforeach
- if allow_experimental_apis
- cflags += '-DALLOW_EXPERIMENTAL_API'
+ if build
+ dep_objs = ext_deps
+ foreach d:deps
+ var_name = get_option('default_library') + '_rte_' + d
+ if not is_variable(var_name)
+ error('Missing dependency "@0@" for example "@1@"'.format(d, name))
+ endif
+ dep_objs += [get_variable(var_name)]
+ endforeach
+ if allow_experimental_apis
+ cflags += '-DALLOW_EXPERIMENTAL_API'
+ endif
+ executable('dpdk-' + name, sources,
+ include_directories: includes,
+ link_whole: driver_libs,
+ link_args: dpdk_extra_ldflags,
+ c_args: cflags,
+ dependencies: dep_objs)
+ elif not allow_skips
+ error('Cannot build requested example "' + name + '"')
+ else
+ message('Skipping example "' + name + '"')
endif
- executable('dpdk-' + name, sources,
- include_directories: includes,
- link_whole: driver_libs,
- link_args: dpdk_extra_ldflags,
- c_args: cflags,
- dependencies: dep_objs)
endforeach
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index 92955e97..c23dd3f3 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -220,7 +220,7 @@ main(int argc, char *argv[])
if (parse_app_args(argc, argv) < 0)
rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
- if (rte_eth_dev_count() == 0)
+ if (rte_eth_dev_count_avail() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile
index 3e244e28..9c1caae7 100644
--- a/examples/multi_process/client_server_mp/mp_server/Makefile
+++ b/examples/multi_process/client_server_mp/mp_server/Makefile
@@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
@@ -23,6 +23,7 @@ SRCS-y := main.c init.c args.c
INC := $(sort $(wildcard *.h))
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS) -O3
CFLAGS += -I$(SRCDIR)/../shared
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 1c465ccb..30c8e44b 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -243,7 +243,7 @@ init(int argc, char *argv[])
argv += retval;
/* get total number of ports */
- total_ports = rte_eth_dev_count();
+ total_ports = rte_eth_dev_count_total();
/* set up array for port data */
mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
diff --git a/examples/multi_process/l2fwd_fork/flib.c b/examples/multi_process/l2fwd_fork/flib.c
deleted file mode 100644
index 52c6152b..00000000
--- a/examples/multi_process/l2fwd_fork/flib.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-#include <unistd.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <sys/queue.h>
-#include <sys/wait.h>
-#include <sys/prctl.h>
-#include <netinet/in.h>
-#include <setjmp.h>
-#include <stdarg.h>
-#include <ctype.h>
-#include <errno.h>
-#include <getopt.h>
-#include <dirent.h>
-#include <signal.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
-#include <rte_eal.h>
-#include <rte_launch.h>
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_interrupts.h>
-#include <rte_random.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-
-#include "flib.h"
-
-#define SIG_PARENT_EXIT SIGUSR1
-
-struct lcore_stat {
- pid_t pid; /**< pthread identifier */
- lcore_function_t *f; /**< function to call */
- void *arg; /**< argument of function */
- slave_exit_notify *cb_fn;
-} __rte_cache_aligned;
-
-
-static struct lcore_stat *core_cfg;
-static uint16_t *lcore_cfg = NULL;
-
-/* signal handler to be notified after parent leaves */
-static void
-sighand_parent_exit(int sig)
-{
- printf("lcore = %u : Find parent leaves, sig=%d\n", rte_lcore_id(),
- sig);
- printf("Child leaving\n");
- exit(0);
-
- return;
-}
-
-/**
- * Real function entrance ran in slave process
- **/
-static int
-slave_proc_func(void)
-{
- struct rte_config *config;
- unsigned slave_id = rte_lcore_id();
- struct lcore_stat *cfg = &core_cfg[slave_id];
-
- if (prctl(PR_SET_PDEATHSIG, SIG_PARENT_EXIT, 0, 0, 0, 0) != 0)
- printf("Warning: Slave can't register for being notified in"
- "case master process exited\n");
- else {
- struct sigaction act;
- memset(&act, 0 , sizeof(act));
- act.sa_handler = sighand_parent_exit;
- if (sigaction(SIG_PARENT_EXIT, &act, NULL) != 0)
- printf("Fail to register signal handler:%d\n", SIG_PARENT_EXIT);
- }
-
- /* Set slave process to SECONDARY to avoid operation like dev_start/stop etc */
- config = rte_eal_get_configuration();
- if (NULL == config)
- printf("Warning:Can't get rte_config\n");
- else
- config->process_type = RTE_PROC_SECONDARY;
-
- printf("Core %u is ready (pid=%d)\n", slave_id, (int)cfg->pid);
-
- exit(cfg->f(cfg->arg));
-}
-
-/**
- * function entrance ran in master thread, which will spawn slave process and wait until
- * specific slave exited.
- **/
-static int
-lcore_func(void *arg __attribute__((unused)))
-{
- unsigned slave_id = rte_lcore_id();
- struct lcore_stat *cfg = &core_cfg[slave_id];
- int pid, stat;
-
- if (rte_get_master_lcore() == slave_id)
- return cfg->f(cfg->arg);
-
- /* fork a slave process */
- pid = fork();
-
- if (pid == -1) {
- printf("Failed to fork\n");
- return -1;
- } else if (pid == 0) /* child */
- return slave_proc_func();
- else { /* parent */
- cfg->pid = pid;
-
- waitpid(pid, &stat, 0);
-
- cfg->pid = 0;
- cfg->f = NULL;
- cfg->arg = NULL;
- /* Notify slave's exit if applicable */
- if(cfg->cb_fn)
- cfg->cb_fn(slave_id, stat);
- return stat;
- }
-}
-
-static int
-lcore_id_init(void)
-{
- int i;
- /* Setup lcore ID allocation map */
- lcore_cfg = rte_zmalloc("LCORE_ID_MAP",
- sizeof(uint16_t) * RTE_MAX_LCORE,
- RTE_CACHE_LINE_SIZE);
-
- if(lcore_cfg == NULL)
- rte_panic("Failed to malloc\n");
-
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- if (rte_lcore_is_enabled(i))
- lcore_cfg[i] = 1;
- }
- return 0;
-}
-
-int
-flib_assign_lcore_id(void)
-{
- unsigned i;
- int ret;
-
- /**
- * thread assigned a lcore id previously, or a slave thread. But still have
- * a bug here: If the core mask includes core 0, and that core call this
- * function, it still can get a new lcore id.
- **/
- if (rte_lcore_id() != 0)
- return -1;
-
- do {
- /* Find a lcore id not used yet, avoid to use lcore ID 0 */
- for (i = 1; i < RTE_MAX_LCORE; i++) {
- if (lcore_cfg[i] == 0)
- break;
- }
- if (i == RTE_MAX_LCORE)
- return -1;
-
- /* Assign new lcore id to this thread */
-
- ret = rte_atomic16_cmpset(&lcore_cfg[i], 0, 1);
- } while (unlikely(ret == 0));
-
- RTE_PER_LCORE(_lcore_id) = i;
- return i;
-}
-
-void
-flib_free_lcore_id(unsigned lcore_id)
-{
- /* id is not valid or belongs to pinned core id */
- if (lcore_id >= RTE_MAX_LCORE || lcore_id == 0 ||
- rte_lcore_is_enabled(lcore_id))
- return;
-
- lcore_cfg[lcore_id] = 0;
-}
-
-int
-flib_register_slave_exit_notify(unsigned slave_id,
- slave_exit_notify *cb)
-{
- if (cb == NULL)
- return -EFAULT;
-
- if (!rte_lcore_is_enabled(slave_id))
- return -ENOENT;
-
- core_cfg[slave_id].cb_fn = cb;
-
- return 0;
-}
-
-enum slave_stat
-flib_query_slave_status(unsigned slave_id)
-{
- if (!rte_lcore_is_enabled(slave_id))
- return ST_FREEZE;
- /* pid only be set when slave process spawned */
- if (core_cfg[slave_id].pid != 0)
- return ST_RUN;
- else
- return ST_IDLE;
-}
-
-int
-flib_remote_launch(lcore_function_t *f,
- void *arg, unsigned slave_id)
-{
- if (f == NULL)
- return -1;
-
- if (!rte_lcore_is_enabled(slave_id))
- return -1;
-
- /* Wait until specific lcore state change to WAIT */
- rte_eal_wait_lcore(slave_id);
-
- core_cfg[slave_id].f = f;
- core_cfg[slave_id].arg = arg;
-
- return rte_eal_remote_launch(lcore_func, NULL, slave_id);
-}
-
-int
-flib_mp_remote_launch(lcore_function_t *f, void *arg,
- enum rte_rmt_call_master_t call_master)
-{
- int i;
-
- RTE_LCORE_FOREACH_SLAVE(i) {
- core_cfg[i].arg = arg;
- core_cfg[i].f = f;
- }
-
- return rte_eal_mp_remote_launch(lcore_func, NULL, call_master);
-}
-
-int
-flib_init(void)
-{
- if ((core_cfg = rte_zmalloc("core_cfg",
- sizeof(struct lcore_stat) * RTE_MAX_LCORE,
- RTE_CACHE_LINE_SIZE)) == NULL ) {
- printf("rte_zmalloc failed\n");
- return -1;
- }
-
- if (lcore_id_init() != 0) {
- printf("lcore_id_init failed\n");
- return -1;
- }
-
- return 0;
-}
diff --git a/examples/multi_process/l2fwd_fork/flib.h b/examples/multi_process/l2fwd_fork/flib.h
deleted file mode 100644
index 8bc13a1f..00000000
--- a/examples/multi_process/l2fwd_fork/flib.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
- */
-
-#ifndef __FLIB_H
-#define __FLIB_H
-
-/* callback function pointer when specific slave leaves */
-typedef void (slave_exit_notify)(unsigned slaveid, int stat);
-
-enum slave_stat{
- ST_FREEZE = 1,
- ST_IDLE,
- ST_RUN,
- ST_ZOMBIE, /* Not implemented yet */
-};
-
-/**
- * Initialize the fork lib.
- *
- * @return
- * - 0 : fork lib initialized successfully
- * - -1 : fork lib initialized failed
- */
-int flib_init(void);
-
-/**
- * Check that every SLAVE lcores are in WAIT state, then call
- * flib_remote_launch() for all of them. If call_master is true
- * (set to CALL_MASTER), also call the function on the master lcore.
- *
- * @param f:
- * function pointer need to run
- * @param arg:
- * argument for f to carry
- * @param call_master
- * - SKIP_MASTER : only launch function on slave lcores
- * - CALL_MASTER : launch function on master and slave lcores
- * @return
- * - 0 : function execute successfully
- * - -1 : function execute failed
- */
-int flib_mp_remote_launch(lcore_function_t *f,
- void *arg, enum rte_rmt_call_master_t call_master);
-
-/**
- * Send a message to a slave lcore identified by slave_id to call a
- * function f with argument arg.
- *
- * @param f:
- * function pointer need to run
- * @param arg:
- * argument for f to carry
- * @param slave_id
- * slave lcore id to run on
- * @return
- * - 0 : function execute successfully
- * - -1 : function execute failed
- */
-int flib_remote_launch(lcore_function_t *f,
- void *arg, unsigned slave_id);
-
-/**
- * Query the running stat for specific slave, wont' work in with master id
- *
- * @param slave_id:
- * lcore id which should not be master id
- * @return
- * - ST_FREEZE : lcore is not in enabled core mask
- * - ST_IDLE : lcore is idle
- * - ST_RUN : lcore is running something
- */
-enum slave_stat
-flib_query_slave_status(unsigned slave_id);
-
-/**
- * Register a callback function to be notified in case specific slave exit.
- *
- * @param slave_id:
- * lcore id which should not be master id
- * @param cb:
- * callback pointer to register
- * @return
- * - 0 : function execute successfully
- * - -EFAULT : argument error
- * - -ENOENT : slave_id not correct
- */
-int flib_register_slave_exit_notify(unsigned slave_id,
- slave_exit_notify *cb);
-
-/**
- * Assign a lcore ID to non-slave thread. Non-slave thread refers to thread that
- * not created by function rte_eal_remote_launch or rte_eal_mp_remote_launch.
- * These threads can either bind lcore or float among different lcores.
- * This lcore ID will be unique in multi-thread or multi-process DPDK running
- * environment, then it can benefit from using the cache mechanism provided in
- * mempool library.
- * After calling successfully, use rte_lcore_id() to get the assigned lcore ID, but
- * other lcore funtions can't guarantee to work correctly.
- *
- * @return
- * - -1 : can't assign a lcore id with 3 possibilities.
- * - it's not non-slave thread.
- * - it had assign a lcore id previously
- * - the lcore id is running out.
- * - > 0 : the assigned lcore id.
- */
-int flib_assign_lcore_id(void);
-
-/**
- * Free the lcore_id that assigned in flib_assign_lcore_id().
- * call it in case non-slave thread is leaving or left.
- *
- * @param lcore_id
- * The identifier of the lcore, which MUST be between 1 and
- * RTE_MAX_LCORE-1.
- */
-void flib_free_lcore_id(unsigned lcore_id);
-
-#endif /* __FLIB_H */
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
deleted file mode 100644
index bc9ceb5c..00000000
--- a/examples/multi_process/l2fwd_fork/main.c
+++ /dev/null
@@ -1,1261 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
- */
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdint.h>
-#include <sched.h>
-#include <inttypes.h>
-#include <sys/types.h>
-#include <sys/queue.h>
-#include <netinet/in.h>
-#include <setjmp.h>
-#include <stdarg.h>
-#include <ctype.h>
-#include <errno.h>
-#include <getopt.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memcpy.h>
-#include <rte_eal.h>
-#include <rte_launch.h>
-#include <rte_atomic.h>
-#include <rte_spinlock.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_interrupts.h>
-#include <rte_random.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_malloc.h>
-
-#include "flib.h"
-
-#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
-#define MBUF_NAME "mbuf_pool_%d"
-#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-#define NB_MBUF 8192
-#define RING_MASTER_NAME "l2fwd_ring_m2s_"
-#define RING_SLAVE_NAME "l2fwd_ring_s2m_"
-#define MAX_NAME_LEN 32
-/* RECREATE flag indicate needs initialize resource and launch slave_core again */
-#define SLAVE_RECREATE_FLAG 0x1
-/* RESTART flag indicate needs restart port and send START command again */
-#define SLAVE_RESTART_FLAG 0x2
-#define INVALID_MAPPING_ID ((unsigned)LCORE_ID_ANY)
-/* Maximum message buffer per slave */
-#define NB_CORE_MSGBUF 32
-enum l2fwd_cmd{
- CMD_START,
- CMD_STOP,
-};
-
-#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
-/*
- * Configurable number of RX/TX ring descriptors
- */
-#define RTE_TEST_RX_DESC_DEFAULT 1024
-#define RTE_TEST_TX_DESC_DEFAULT 1024
-static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
-static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
-
-/* ethernet addresses of ports */
-static struct ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
-
-/* mask of enabled ports */
-static uint32_t l2fwd_enabled_port_mask = 0;
-
-/* list of enabled ports */
-static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
-
-static unsigned int l2fwd_rx_queue_per_lcore = 1;
-
-struct mbuf_table {
- unsigned len;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
-#define MAX_RX_QUEUE_PER_LCORE 16
-#define MAX_TX_QUEUE_PER_PORT 16
-struct lcore_queue_conf {
- unsigned n_rx_port;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
-} __rte_cache_aligned;
-struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
-
-struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
-
-struct lcore_resource_struct {
- int enabled; /* Only set in case this lcore involved into packet forwarding */
- int flags; /* Set only slave need to restart or recreate */
- unsigned lcore_id; /* lcore ID */
- unsigned pair_id; /* dependency lcore ID on port */
- char ring_name[2][MAX_NAME_LEN];
- /* ring[0] for master send cmd, slave read */
- /* ring[1] for slave send ack, master read */
- struct rte_ring *ring[2];
- int port_num; /* Total port numbers */
- /* Port id for that lcore to receive packets */
- uint16_t port[RTE_MAX_ETHPORTS];
-}__attribute__((packed)) __rte_cache_aligned;
-
-static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
-static struct rte_mempool *message_pool;
-static rte_spinlock_t res_lock = RTE_SPINLOCK_INITIALIZER;
-/* use floating processes */
-static int float_proc = 0;
-/* Save original cpu affinity */
-struct cpu_aff_arg{
- cpu_set_t set;
- size_t size;
-}cpu_aff;
-
-static const struct rte_eth_conf port_conf = {
- .rxmode = {
- .split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
-};
-
-static struct rte_mempool * l2fwd_pktmbuf_pool[RTE_MAX_ETHPORTS];
-
-/* Per-port statistics struct */
-struct l2fwd_port_statistics {
- uint64_t tx;
- uint64_t rx;
- uint64_t dropped;
-} __rte_cache_aligned;
-struct l2fwd_port_statistics *port_statistics;
-/**
- * pointer to lcore ID mapping array, used to return lcore id in case slave
- * process exited unexpectedly, use only floating process option applied
- **/
-unsigned *mapping_id;
-
-/* A tsc-based timer responsible for triggering statistics printout */
-#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
-#define MAX_TIMER_PERIOD 86400 /* 1 day max */
-static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
-
-static int l2fwd_launch_one_lcore(void *dummy);
-
-/* Print out statistics on packets dropped */
-static void
-print_stats(void)
-{
- uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
-
- total_packets_dropped = 0;
- total_packets_tx = 0;
- total_packets_rx = 0;
-
- const char clr[] = { 27, '[', '2', 'J', '\0' };
- const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
-
- /* Clear screen and move to top left */
- printf("%s%s", clr, topLeft);
-
- printf("\nPort statistics ====================================");
-
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
- /* skip disabled ports */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
- printf("\nStatistics for port %u ------------------------------"
- "\nPackets sent: %24"PRIu64
- "\nPackets received: %20"PRIu64
- "\nPackets dropped: %21"PRIu64,
- portid,
- port_statistics[portid].tx,
- port_statistics[portid].rx,
- port_statistics[portid].dropped);
-
- total_packets_dropped += port_statistics[portid].dropped;
- total_packets_tx += port_statistics[portid].tx;
- total_packets_rx += port_statistics[portid].rx;
- }
- printf("\nAggregate statistics ==============================="
- "\nTotal packets sent: %18"PRIu64
- "\nTotal packets received: %14"PRIu64
- "\nTotal packets dropped: %15"PRIu64,
- total_packets_tx,
- total_packets_rx,
- total_packets_dropped);
- printf("\n====================================================\n");
-}
-
-static int
-clear_cpu_affinity(void)
-{
- int s;
-
- s = sched_setaffinity(0, cpu_aff.size, &cpu_aff.set);
- if (s != 0) {
- printf("sched_setaffinity failed:%s\n", strerror(errno));
- return -1;
- }
-
- return 0;
-}
-
-static int
-get_cpu_affinity(void)
-{
- int s;
-
- cpu_aff.size = sizeof(cpu_set_t);
- CPU_ZERO(&cpu_aff.set);
-
- s = sched_getaffinity(0, cpu_aff.size, &cpu_aff.set);
- if (s != 0) {
- printf("sched_getaffinity failed:%s\n", strerror(errno));
- return -1;
- }
-
- return 0;
-}
-
-/**
- * This fnciton demonstrates the approach to create ring in first instance
- * or re-attach an existed ring in later instance.
- **/
-static struct rte_ring *
-create_ring(const char *name, unsigned count,
- int socket_id,unsigned flags)
-{
- struct rte_ring *ring;
-
- if (name == NULL)
- return NULL;
-
- /* If already create, just attached it */
- if (likely((ring = rte_ring_lookup(name)) != NULL))
- return ring;
-
- /* First call it, create one */
- return rte_ring_create(name, count, socket_id, flags);
-}
-
-/* Malloc with rte_malloc on structures that shared by master and slave */
-static int
-l2fwd_malloc_shared_struct(void)
-{
- port_statistics = rte_zmalloc("port_stat",
- sizeof(struct l2fwd_port_statistics) * RTE_MAX_ETHPORTS,
- 0);
- if (port_statistics == NULL)
- return -1;
-
- /* allocate mapping_id array */
- if (float_proc) {
- int i;
- mapping_id = rte_malloc("mapping_id", sizeof(unsigned) * RTE_MAX_LCORE,
- 0);
-
- if (mapping_id == NULL)
- return -1;
-
- for (i = 0 ;i < RTE_MAX_LCORE; i++)
- mapping_id[i] = INVALID_MAPPING_ID;
- }
- return 0;
-}
-
-/* Create ring which used for communicate among master and slave */
-static int
-create_ms_ring(unsigned slaveid)
-{
- unsigned flag = RING_F_SP_ENQ | RING_F_SC_DEQ;
- struct lcore_resource_struct *res = &lcore_resource[slaveid];
- unsigned socketid = rte_socket_id();
-
- /* Always assume create ring on master socket_id */
- /* Default only create a ring size 32 */
- snprintf(res->ring_name[0], MAX_NAME_LEN, "%s%u",
- RING_MASTER_NAME, slaveid);
- if ((res->ring[0] = create_ring(res->ring_name[0], NB_CORE_MSGBUF,
- socketid, flag)) == NULL) {
- printf("Create m2s ring %s failed\n", res->ring_name[0]);
- return -1;
- }
-
- snprintf(res->ring_name[1], MAX_NAME_LEN, "%s%u",
- RING_SLAVE_NAME, slaveid);
- if ((res->ring[1] = create_ring(res->ring_name[1], NB_CORE_MSGBUF,
- socketid, flag)) == NULL) {
- printf("Create s2m ring %s failed\n", res->ring_name[1]);
- return -1;
- }
-
- return 0;
-}
-
-/* send command to pair in paired master and slave ring */
-static inline int
-sendcmd(unsigned slaveid, enum l2fwd_cmd cmd, int is_master)
-{
- struct lcore_resource_struct *res = &lcore_resource[slaveid];
- void *msg;
- int fd = !is_master;
-
- /* Only check master, it must be enabled and running if it is slave */
- if (is_master && !res->enabled)
- return -1;
-
- if (res->ring[fd] == NULL)
- return -1;
-
- if (rte_mempool_get(message_pool, &msg) < 0) {
- printf("Error to get message buffer\n");
- return -1;
- }
-
- *(enum l2fwd_cmd *)msg = cmd;
-
- if (rte_ring_enqueue(res->ring[fd], msg) != 0) {
- printf("Enqueue error\n");
- rte_mempool_put(message_pool, msg);
- return -1;
- }
-
- return 0;
-}
-
-/* Get command from pair in paired master and slave ring */
-static inline int
-getcmd(unsigned slaveid, enum l2fwd_cmd *cmd, int is_master)
-{
- struct lcore_resource_struct *res = &lcore_resource[slaveid];
- void *msg;
- int fd = !!is_master;
- int ret;
- /* Only check master, it must be enabled and running if it is slave */
- if (is_master && (!res->enabled))
- return -1;
-
- if (res->ring[fd] == NULL)
- return -1;
-
- ret = rte_ring_dequeue(res->ring[fd], &msg);
-
- if (ret == 0) {
- *cmd = *(enum l2fwd_cmd *)msg;
- rte_mempool_put(message_pool, msg);
- }
- return ret;
-}
-
-/* Master send command to slave and wait until ack received or error met */
-static int
-master_sendcmd_with_ack(unsigned slaveid, enum l2fwd_cmd cmd)
-{
- enum l2fwd_cmd ack_cmd;
- int ret = -1;
-
- if (sendcmd(slaveid, cmd, 1) != 0)
- rte_exit(EXIT_FAILURE, "Failed to send message\n");
-
- /* Get ack */
- while (1) {
- ret = getcmd(slaveid, &ack_cmd, 1);
- if (ret == 0 && cmd == ack_cmd)
- break;
-
- /* If slave not running yet, return an error */
- if (flib_query_slave_status(slaveid) != ST_RUN) {
- ret = -ENOENT;
- break;
- }
- }
-
- return ret;
-}
-
-/* restart all port that assigned to that slave lcore */
-static int
-reset_slave_all_ports(unsigned slaveid)
-{
- struct lcore_resource_struct *slave = &lcore_resource[slaveid];
- int i, ret = 0;
-
- /* stop/start port */
- for (i = 0; i < slave->port_num; i++) {
- char buf_name[RTE_MEMPOOL_NAMESIZE];
- struct rte_mempool *pool;
- printf("Stop port :%d\n", slave->port[i]);
- rte_eth_dev_stop(slave->port[i]);
- snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, slave->port[i]);
- pool = rte_mempool_lookup(buf_name);
- if (pool)
- printf("Port %d mempool free object is %u(%u)\n", slave->port[i],
- rte_mempool_avail_count(pool),
- (unsigned int)NB_MBUF);
- else
- printf("Can't find mempool %s\n", buf_name);
-
- printf("Start port :%d\n", slave->port[i]);
- ret = rte_eth_dev_start(slave->port[i]);
- if (ret != 0)
- break;
- }
- return ret;
-}
-
-static int
-reset_shared_structures(unsigned slaveid)
-{
- int ret;
- /* Only port are shared resource here */
- ret = reset_slave_all_ports(slaveid);
-
- return ret;
-}
-
-/**
- * Call this function to re-create resource that needed for slave process that
- * exited in last instance
- **/
-static int
-init_slave_res(unsigned slaveid)
-{
- struct lcore_resource_struct *slave = &lcore_resource[slaveid];
- enum l2fwd_cmd cmd;
-
- if (!slave->enabled) {
- printf("Something wrong with lcore=%u enabled=%d\n",slaveid,
- slave->enabled);
- return -1;
- }
-
- /* Initialize ring */
- if (create_ms_ring(slaveid) != 0)
- rte_exit(EXIT_FAILURE, "failed to create ring for slave %u\n",
- slaveid);
-
- /* drain un-read buffer if have */
- while (getcmd(slaveid, &cmd, 1) == 0);
- while (getcmd(slaveid, &cmd, 0) == 0);
-
- return 0;
-}
-
-static int
-recreate_one_slave(unsigned slaveid)
-{
- int ret = 0;
- /* Re-initialize resource for stalled slave */
- if ((ret = init_slave_res(slaveid)) != 0) {
- printf("Init slave=%u failed\n", slaveid);
- return ret;
- }
-
- if ((ret = flib_remote_launch(l2fwd_launch_one_lcore, NULL, slaveid))
- != 0)
- printf("Launch slave %u failed\n", slaveid);
-
- return ret;
-}
-
-/**
- * remapping resource belong to slave_id to new lcore that gets from flib_assign_lcore_id(),
- * used only floating process option applied.
- *
- * @param slaveid
- * original lcore_id that apply for remapping
- */
-static void
-remapping_slave_resource(unsigned slaveid, unsigned map_id)
-{
-
- /* remapping lcore_resource */
- memcpy(&lcore_resource[map_id], &lcore_resource[slaveid],
- sizeof(struct lcore_resource_struct));
-
- /* remapping lcore_queue_conf */
- memcpy(&lcore_queue_conf[map_id], &lcore_queue_conf[slaveid],
- sizeof(struct lcore_queue_conf));
-}
-
-static int
-reset_pair(unsigned slaveid, unsigned pairid)
-{
- int ret;
- if ((ret = reset_shared_structures(slaveid)) != 0)
- goto back;
-
- if((ret = reset_shared_structures(pairid)) != 0)
- goto back;
-
- if (float_proc) {
- unsigned map_id = mapping_id[slaveid];
-
- if (map_id != INVALID_MAPPING_ID) {
- printf("%u return mapping id %u\n", slaveid, map_id);
- flib_free_lcore_id(map_id);
- mapping_id[slaveid] = INVALID_MAPPING_ID;
- }
-
- map_id = mapping_id[pairid];
- if (map_id != INVALID_MAPPING_ID) {
- printf("%u return mapping id %u\n", pairid, map_id);
- flib_free_lcore_id(map_id);
- mapping_id[pairid] = INVALID_MAPPING_ID;
- }
- }
-
- if((ret = recreate_one_slave(slaveid)) != 0)
- goto back;
-
- ret = recreate_one_slave(pairid);
-
-back:
- return ret;
-}
-
-static void
-slave_exit_cb(unsigned slaveid, __attribute__((unused))int stat)
-{
- struct lcore_resource_struct *slave = &lcore_resource[slaveid];
-
- printf("Get slave %u leave info\n", slaveid);
- if (!slave->enabled) {
- printf("Lcore=%u not registered for it's exit\n", slaveid);
- return;
- }
- rte_spinlock_lock(&res_lock);
-
- /* Change the state and wait master to start them */
- slave->flags = SLAVE_RECREATE_FLAG;
-
- rte_spinlock_unlock(&res_lock);
-}
-
-static void
-l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
-{
- struct ether_hdr *eth;
- void *tmp;
- unsigned dst_port;
- int sent;
- struct rte_eth_dev_tx_buffer *buffer;
-
- dst_port = l2fwd_dst_ports[portid];
- eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
-
- /* 02:00:00:00:00:xx */
- tmp = &eth->d_addr.addr_bytes[0];
- *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
-
- /* src addr */
- ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
-
- buffer = tx_buffer[dst_port];
- sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
- if (sent)
- port_statistics[dst_port].tx += sent;
-}
-
-/* main processing loop */
-static void
-l2fwd_main_loop(void)
-{
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct rte_mbuf *m;
- int sent;
- unsigned lcore_id;
- uint64_t prev_tsc, diff_tsc, cur_tsc;
- unsigned i, j, portid, nb_rx;
- struct lcore_queue_conf *qconf;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
- BURST_TX_DRAIN_US;
- struct rte_eth_dev_tx_buffer *buffer;
-
- prev_tsc = 0;
-
- lcore_id = rte_lcore_id();
-
- qconf = &lcore_queue_conf[lcore_id];
-
- if (qconf->n_rx_port == 0) {
- RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
- return;
- }
-
- RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
-
- for (i = 0; i < qconf->n_rx_port; i++) {
- portid = qconf->rx_port_list[i];
- RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
- portid);
- }
-
- while (1) {
- enum l2fwd_cmd cmd;
- cur_tsc = rte_rdtsc();
-
- if (unlikely(getcmd(lcore_id, &cmd, 0) == 0)) {
- sendcmd(lcore_id, cmd, 0);
-
- /* If get stop command, stop forwarding and exit */
- if (cmd == CMD_STOP) {
- return;
- }
- }
-
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
-
- for (i = 0; i < qconf->n_rx_port; i++) {
-
- portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
- buffer = tx_buffer[portid];
-
- sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
- if (sent)
- port_statistics[portid].tx += sent;
-
- }
-
- prev_tsc = cur_tsc;
- }
-
- /*
- * Read packet from RX queues
- */
- for (i = 0; i < qconf->n_rx_port; i++) {
-
- portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
- pkts_burst, MAX_PKT_BURST);
-
- port_statistics[portid].rx += nb_rx;
-
- for (j = 0; j < nb_rx; j++) {
- m = pkts_burst[j];
- rte_prefetch0(rte_pktmbuf_mtod(m, void *));
- l2fwd_simple_forward(m, portid);
- }
- }
- }
-}
-
-static int
-l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
-{
- unsigned lcore_id = rte_lcore_id();
-
- if (float_proc) {
- unsigned flcore_id;
-
- /* Change it to floating process, also change it's lcore_id */
- clear_cpu_affinity();
- RTE_PER_LCORE(_lcore_id) = 0;
- /* Get a lcore_id */
- if (flib_assign_lcore_id() < 0 ) {
- printf("flib_assign_lcore_id failed\n");
- return -1;
- }
- flcore_id = rte_lcore_id();
- /* Set mapping id, so master can return it after slave exited */
- mapping_id[lcore_id] = flcore_id;
- printf("Org lcore_id = %u, cur lcore_id = %u\n",
- lcore_id, flcore_id);
- remapping_slave_resource(lcore_id, flcore_id);
- }
-
- l2fwd_main_loop();
-
- /* return lcore_id before return */
- if (float_proc) {
- flib_free_lcore_id(rte_lcore_id());
- mapping_id[lcore_id] = INVALID_MAPPING_ID;
- }
- return 0;
-}
-
-/* display usage */
-static void
-l2fwd_usage(const char *prgname)
-{
- printf("%s [EAL options] -- -p PORTMASK -s COREMASK [-q NQ] -f\n"
- " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
- " -f use floating process which won't bind to any core to run\n"
- " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
- prgname);
-}
-
-static int
-l2fwd_parse_portmask(const char *portmask)
-{
- char *end = NULL;
- unsigned long pm;
-
- /* parse hexadecimal string */
- pm = strtoul(portmask, &end, 16);
- if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
- return -1;
-
- if (pm == 0)
- return -1;
-
- return pm;
-}
-
-static unsigned int
-l2fwd_parse_nqueue(const char *q_arg)
-{
- char *end = NULL;
- unsigned long n;
-
- /* parse hexadecimal string */
- n = strtoul(q_arg, &end, 10);
- if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
- return 0;
- if (n == 0)
- return 0;
- if (n >= MAX_RX_QUEUE_PER_LCORE)
- return 0;
-
- return n;
-}
-
-static int
-l2fwd_parse_timer_period(const char *q_arg)
-{
- char *end = NULL;
- int n;
-
- /* parse number string */
- n = strtol(q_arg, &end, 10);
- if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
- return -1;
- if (n >= MAX_TIMER_PERIOD)
- return -1;
-
- return n;
-}
-
-/* Parse the argument given in the command line of the application */
-static int
-l2fwd_parse_args(int argc, char **argv)
-{
- int opt, ret;
- char **argvopt;
- int option_index;
- char *prgname = argv[0];
- static struct option lgopts[] = {
- {NULL, 0, 0, 0}
- };
- int has_pmask = 0;
-
- argvopt = argv;
-
- while ((opt = getopt_long(argc, argvopt, "p:q:T:f",
- lgopts, &option_index)) != EOF) {
-
- switch (opt) {
- /* portmask */
- case 'p':
- l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
- if (l2fwd_enabled_port_mask == 0) {
- printf("invalid portmask\n");
- l2fwd_usage(prgname);
- return -1;
- }
- has_pmask = 1;
- break;
-
- /* nqueue */
- case 'q':
- l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
- if (l2fwd_rx_queue_per_lcore == 0) {
- printf("invalid queue number\n");
- l2fwd_usage(prgname);
- return -1;
- }
- break;
-
- /* timer period */
- case 'T':
- timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
- if (timer_period < 0) {
- printf("invalid timer period\n");
- l2fwd_usage(prgname);
- return -1;
- }
- break;
-
- /* use floating process */
- case 'f':
- float_proc = 1;
- break;
-
- /* long options */
- case 0:
- l2fwd_usage(prgname);
- return -1;
-
- default:
- l2fwd_usage(prgname);
- return -1;
- }
- }
-
- if (optind >= 0)
- argv[optind-1] = prgname;
-
- if (!has_pmask) {
- l2fwd_usage(prgname);
- return -1;
- }
- ret = optind-1;
- optind = 1; /* reset getopt lib */
- return ret;
-}
-
-/* Check the link status of all ports in up to 9s, and print them finally */
-static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
-{
-#define CHECK_INTERVAL 100 /* 100ms */
-#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint16_t portid;
- uint8_t count, all_ports_up, print_flag = 0;
- struct rte_eth_link link;
-
- printf("\nChecking link status");
- fflush(stdout);
- for (count = 0; count <= MAX_CHECK_TIME; count++) {
- all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
- if ((port_mask & (1 << portid)) == 0)
- continue;
- memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait(portid, &link);
- /* print link status if flag set */
- if (print_flag == 1) {
- if (link.link_status)
- printf(
- "Port%d Link Up- speed %u Mbps- %s\n",
- portid, link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- else
- printf("Port %d Link Down\n", portid);
- continue;
- }
- /* clear all_ports_up flag if any link down */
- if (link.link_status == ETH_LINK_DOWN) {
- all_ports_up = 0;
- break;
- }
- }
- /* after finally printing all link status, get out */
- if (print_flag == 1)
- break;
-
- if (all_ports_up == 0) {
- printf(".");
- fflush(stdout);
- rte_delay_ms(CHECK_INTERVAL);
- }
-
- /* set the print_flag if all ports up or timeout */
- if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
- print_flag = 1;
- printf("done\n");
- }
- }
-}
-
-int
-main(int argc, char **argv)
-{
- struct lcore_queue_conf *qconf;
- int ret;
- uint16_t nb_ports;
- uint16_t nb_ports_available;
- uint16_t portid, last_port;
- unsigned rx_lcore_id;
- unsigned nb_ports_in_mask = 0;
- unsigned i;
- uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
-
- /* Save cpu_affinity first, restore it in case it's floating process option */
- if (get_cpu_affinity() != 0)
- rte_exit(EXIT_FAILURE, "get_cpu_affinity error\n");
-
- /* Also tries to set cpu affinity to detect whether it will fail in child process */
- if(clear_cpu_affinity() != 0)
- rte_exit(EXIT_FAILURE, "clear_cpu_affinity error\n");
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
- argc -= ret;
- argv += ret;
-
- /* parse application arguments (after the EAL ones) */
- ret = l2fwd_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
-
- /*flib init */
- if (flib_init() != 0)
- rte_exit(EXIT_FAILURE, "flib init error");
-
- /**
- * Allocated structures that slave lcore would change. For those that slaves are
- * read only, needn't use malloc to share and global or static variables is ok since
- * slave inherit all the knowledge that master initialized.
- **/
- if (l2fwd_malloc_shared_struct() != 0)
- rte_exit(EXIT_FAILURE, "malloc mem failed\n");
-
- /* Initialize lcore_resource structures */
- memset(lcore_resource, 0, sizeof(lcore_resource));
- for (i = 0; i < RTE_MAX_LCORE; i++)
- lcore_resource[i].lcore_id = i;
-
- nb_ports = rte_eth_dev_count();
- if (nb_ports == 0)
- rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
-
- /* create the mbuf pool */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
- char buf_name[RTE_MEMPOOL_NAMESIZE];
- snprintf(buf_name, RTE_MEMPOOL_NAMESIZE, MBUF_NAME, portid);
- l2fwd_pktmbuf_pool[portid] =
- rte_pktmbuf_pool_create(buf_name, NB_MBUF, 32,
- 0, MBUF_DATA_SIZE, rte_socket_id());
- if (l2fwd_pktmbuf_pool[portid] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- printf("Create mbuf %s\n", buf_name);
- }
-
- /* reset l2fwd_dst_ports */
- for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
- l2fwd_dst_ports[portid] = 0;
- last_port = 0;
-
- /*
- * Each logical core is assigned a dedicated TX queue on each port.
- */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- if (nb_ports_in_mask % 2) {
- l2fwd_dst_ports[portid] = last_port;
- l2fwd_dst_ports[last_port] = portid;
- }
- else
- last_port = portid;
-
- nb_ports_in_mask++;
- }
- if (nb_ports_in_mask % 2) {
- printf("Notice: odd number of ports in portmask.\n");
- l2fwd_dst_ports[last_port] = last_port;
- }
-
- rx_lcore_id = 0;
- qconf = NULL;
-
- /* Initialize the port/queue configuration of each logical core */
- for (portid = 0; portid < nb_ports; portid++) {
- struct lcore_resource_struct *res;
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- /* get the lcore_id for this port */
- /* skip master lcore */
- while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
- rte_get_master_lcore() == rx_lcore_id ||
- lcore_queue_conf[rx_lcore_id].n_rx_port ==
- l2fwd_rx_queue_per_lcore) {
-
- rx_lcore_id++;
- if (rx_lcore_id >= RTE_MAX_LCORE)
- rte_exit(EXIT_FAILURE, "Not enough cores\n");
- }
-
- if (qconf != &lcore_queue_conf[rx_lcore_id])
- /* Assigned a new logical core in the loop above. */
- qconf = &lcore_queue_conf[rx_lcore_id];
-
- qconf->rx_port_list[qconf->n_rx_port] = portid;
- qconf->n_rx_port++;
-
- /* Save the port resource info into lcore_resource strucutres */
- res = &lcore_resource[rx_lcore_id];
- res->enabled = 1;
- res->port[res->port_num++] = portid;
-
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
- }
-
- nb_ports_available = nb_ports;
-
- /* Initialise each port */
- for (portid = 0; portid < nb_ports; portid++) {
- struct rte_eth_rxconf rxq_conf;
- struct rte_eth_txconf txq_conf;
- struct rte_eth_conf local_port_conf = port_conf;
-
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
- nb_ports_available--;
- continue;
- }
- /* init port */
- printf("Initializing port %u... ", (unsigned) portid);
- fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
- ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
-
- ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
- &nb_txd);
- if (ret < 0)
- rte_exit(EXIT_FAILURE,
- "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d, port=%u\n",
- ret, (unsigned) portid);
-
- rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
-
- /* init one RX queue */
- fflush(stdout);
- rxq_conf = dev_info.default_rxconf;
- rxq_conf.offloads = local_port_conf.rxmode.offloads;
- ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- rte_eth_dev_socket_id(portid),
- &rxq_conf,
- l2fwd_pktmbuf_pool[portid]);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
-
- /* init one TX queue on each port */
- fflush(stdout);
- txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
- txq_conf.tx_offloads = local_port_conf.txmode.offloads;
- ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
- rte_eth_dev_socket_id(portid),
- &txq_conf);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
-
- /* Initialize TX buffers */
- tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
- RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
- rte_eth_dev_socket_id(portid));
- if (tx_buffer[portid] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
-
- rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
-
- ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
- rte_eth_tx_buffer_count_callback,
- &port_statistics[portid].dropped);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
-
- /* Start device */
- ret = rte_eth_dev_start(portid);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
-
- printf("done: \n");
-
- rte_eth_promiscuous_enable(portid);
-
- printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
- l2fwd_ports_eth_addr[portid].addr_bytes[0],
- l2fwd_ports_eth_addr[portid].addr_bytes[1],
- l2fwd_ports_eth_addr[portid].addr_bytes[2],
- l2fwd_ports_eth_addr[portid].addr_bytes[3],
- l2fwd_ports_eth_addr[portid].addr_bytes[4],
- l2fwd_ports_eth_addr[portid].addr_bytes[5]);
-
- /* initialize port stats */
- //memset(&port_statistics, 0, sizeof(port_statistics));
- }
-
- if (!nb_ports_available) {
- rte_exit(EXIT_FAILURE,
- "All available ports are disabled. Please set portmask.\n");
- }
-
- check_all_ports_link_status(nb_ports, l2fwd_enabled_port_mask);
-
- /* Record pair lcore */
- /**
- * Since l2fwd example would create pair between different neighbour port, that's
- * port 0 receive and forward to port 1, the same to port 1, these 2 ports will have
- * dependency. If one port stopped working (killed, for example), the port need to
- * be stopped/started again. During the time, another port need to wait until stop/start
- * procedure completed. So, record the pair relationship for those lcores working
- * on ports.
- **/
- for (portid = 0; portid < nb_ports; portid++) {
- uint32_t pair_port;
- unsigned lcore = 0, pair_lcore = 0;
- unsigned j, find_lcore, find_pair_lcore;
- /* skip ports that are not enabled */
- if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
- continue;
-
- /* Find pair ports' lcores */
- find_lcore = find_pair_lcore = 0;
- pair_port = l2fwd_dst_ports[portid];
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- if (!rte_lcore_is_enabled(i))
- continue;
- for (j = 0; j < lcore_queue_conf[i].n_rx_port;j++) {
- if (lcore_queue_conf[i].rx_port_list[j] == portid) {
- lcore = i;
- find_lcore = 1;
- break;
- }
- if (lcore_queue_conf[i].rx_port_list[j] == pair_port) {
- pair_lcore = i;
- find_pair_lcore = 1;
- break;
- }
- }
- if (find_lcore && find_pair_lcore)
- break;
- }
- if (!find_lcore || !find_pair_lcore)
- rte_exit(EXIT_FAILURE, "Not find port=%d pair\n", portid);
-
- printf("lcore %u and %u paired\n", lcore, pair_lcore);
- lcore_resource[lcore].pair_id = pair_lcore;
- lcore_resource[pair_lcore].pair_id = lcore;
- }
-
- /* Create message buffer for all master and slave */
- message_pool = rte_mempool_create("ms_msg_pool",
- NB_CORE_MSGBUF * RTE_MAX_LCORE,
- sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
- 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
-
- if (message_pool == NULL)
- rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
-
- /* Create ring for each master and slave pair, also register cb when slave leaves */
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- /**
- * Only create ring and register slave_exit cb in case that core involved into
- * packet forwarding
- **/
- if (lcore_resource[i].enabled) {
- /* Create ring for master and slave communication */
- ret = create_ms_ring(i);
- if (ret != 0)
- rte_exit(EXIT_FAILURE, "Create ring for lcore=%u failed",
- i);
-
- if (flib_register_slave_exit_notify(i,
- slave_exit_cb) != 0)
- rte_exit(EXIT_FAILURE,
- "Register master_trace_slave_exit failed");
- }
- }
-
- /* launch per-lcore init on every lcore except master */
- flib_mp_remote_launch(l2fwd_launch_one_lcore, NULL, SKIP_MASTER);
-
- /* print statistics 10 second */
- prev_tsc = cur_tsc = rte_rdtsc();
- timer_tsc = 0;
- while (1) {
- sleep(1);
- cur_tsc = rte_rdtsc();
- diff_tsc = cur_tsc - prev_tsc;
- /* if timer is enabled */
- if (timer_period > 0) {
-
- /* advance the timer */
- timer_tsc += diff_tsc;
-
- /* if timer has reached its timeout */
- if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
-
- print_stats();
- /* reset the timer */
- timer_tsc = 0;
- }
- }
-
- prev_tsc = cur_tsc;
-
- /* Check any slave need restart or recreate */
- rte_spinlock_lock(&res_lock);
- for (i = 0; i < RTE_MAX_LCORE; i++) {
- struct lcore_resource_struct *res = &lcore_resource[i];
- struct lcore_resource_struct *pair = &lcore_resource[res->pair_id];
-
- /* If find slave exited, try to reset pair */
- if (res->enabled && res->flags && pair->enabled) {
- if (!pair->flags) {
- master_sendcmd_with_ack(pair->lcore_id, CMD_STOP);
- rte_spinlock_unlock(&res_lock);
- sleep(1);
- rte_spinlock_lock(&res_lock);
- if (pair->flags)
- continue;
- }
- if (reset_pair(res->lcore_id, pair->lcore_id) != 0)
- rte_exit(EXIT_FAILURE, "failed to reset slave");
- res->flags = 0;
- pair->flags = 0;
- }
- }
- rte_spinlock_unlock(&res_lock);
- }
-
-}
diff --git a/examples/multi_process/meson.build b/examples/multi_process/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/multi_process/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 1ada4ef5..c6c6a537 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -115,7 +115,7 @@ smp_parse_args(int argc, char **argv)
int opt, ret;
char **argvopt;
int option_index;
- unsigned i, port_mask = 0;
+ uint16_t i, port_mask = 0;
char *prgname = argv[0];
static struct option lgopts[] = {
{PARAM_NUM_PROCS, 1, 0, 0},
@@ -156,7 +156,7 @@ smp_parse_args(int argc, char **argv)
smp_usage(prgname, "Invalid or missing port mask\n");
/* get the port numbers from the port mask */
- for(i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
if(port_mask & (1 << i))
ports[num_ports++] = (uint8_t)i;
@@ -178,7 +178,6 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
@@ -200,11 +199,12 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
uint16_t q;
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
+ uint64_t rss_hf_tmp;
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
printf("# Initialising port %u... ", port);
@@ -216,6 +216,17 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
if (info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+ port_conf.rx_adv_conf.rss_conf.rss_hf &= info.flow_type_rss_offloads;
+ if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ rss_hf_tmp,
+ port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval < 0)
return retval;
@@ -236,7 +247,6 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
}
txq_conf = info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
@@ -418,7 +428,7 @@ main(int argc, char **argv)
argv += ret;
/* determine the NIC devices available */
- if (rte_eth_dev_count() == 0)
+ if (rte_eth_dev_count_avail() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
/* parse application arguments (those after the EAL ones) */
diff --git a/examples/netmap_compat/bridge/Makefile b/examples/netmap_compat/bridge/Makefile
index a7c9c14a..856c847b 100644
--- a/examples/netmap_compat/bridge/Makefile
+++ b/examples/netmap_compat/bridge/Makefile
@@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
diff --git a/examples/netmap_compat/bridge/bridge.c b/examples/netmap_compat/bridge/bridge.c
index 59c5e436..7afca28c 100644
--- a/examples/netmap_compat/bridge/bridge.c
+++ b/examples/netmap_compat/bridge/bridge.c
@@ -26,7 +26,6 @@
struct rte_eth_conf eth_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -236,7 +235,7 @@ int main(int argc, char *argv[])
if (ports.num == 0)
rte_exit(EXIT_FAILURE, "no ports specified\n");
- if (rte_eth_dev_count() < 1)
+ if (rte_eth_dev_count_avail() < 1)
rte_exit(EXIT_FAILURE, "Not enough ethernet ports available\n");
pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index af3dd223..0be0663e 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -708,7 +708,6 @@ rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf)
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = conf->eth_conf->rxmode.offloads;
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf->eth_conf->txmode.offloads;
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
diff --git a/examples/netmap_compat/meson.build b/examples/netmap_compat/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/netmap_compat/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index 97a58dda..149bfdd0 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -35,11 +35,7 @@ volatile uint8_t quit_signal;
static struct rte_mempool *mbuf_pool;
-static struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .ignore_offload_bitfield = 1,
- },
-};
+static struct rte_eth_conf port_conf_default;
struct worker_thread_args {
struct rte_ring *ring_in;
@@ -211,11 +207,10 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
static inline int
free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) {
- const uint8_t nb_ports = rte_eth_dev_count();
- unsigned port_id;
+ uint16_t port_id;
/* initialize buffers for all ports */
- for (port_id = 0; port_id < nb_ports; port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
/* skip ports that are not enabled */
if ((portmask & (1 << port_id)) == 0)
continue;
@@ -228,12 +223,11 @@ free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) {
static inline int
configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
{
- const uint8_t nb_ports = rte_eth_dev_count();
- unsigned port_id;
+ uint16_t port_id;
int ret;
/* initialize buffers for all ports */
- for (port_id = 0; port_id < nb_ports; port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
/* skip ports that are not enabled */
if ((portmask & (1 << port_id)) == 0)
continue;
@@ -263,7 +257,6 @@ configure_eth_port(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 1, txRings = 1;
- const uint8_t nb_ports = rte_eth_dev_count();
int ret;
uint16_t q;
uint16_t nb_rxd = RX_DESC_PER_QUEUE;
@@ -272,7 +265,7 @@ configure_eth_port(uint16_t port_id)
struct rte_eth_txconf txconf;
struct rte_eth_conf port_conf = port_conf_default;
- if (port_id > nb_ports)
+ if (!rte_eth_dev_is_valid_port(port_id))
return -1;
rte_eth_dev_info_get(port_id, &dev_info);
@@ -296,7 +289,6 @@ configure_eth_port(uint16_t port_id)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < txRings; q++) {
ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
@@ -325,8 +317,7 @@ configure_eth_port(uint16_t port_id)
static void
print_stats(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- unsigned i;
+ uint16_t i;
struct rte_eth_stats eth_stats;
printf("\nRX thread stats:\n");
@@ -355,7 +346,7 @@ print_stats(void)
printf(" - Pkts tx failed w/o reorder: %"PRIu64"\n",
app_stats.tx.early_pkts_tx_failed_woro);
- for (i = 0; i < nb_ports; i++) {
+ RTE_ETH_FOREACH_DEV(i) {
rte_eth_stats_get(i, &eth_stats);
printf("\nPort %u stats:\n", i);
printf(" - Pkts in: %"PRIu64"\n", eth_stats.ipackets);
@@ -383,7 +374,6 @@ int_handler(int sig_num)
static int
rx_thread(struct rte_ring *ring_out)
{
- const uint8_t nb_ports = rte_eth_dev_count();
uint32_t seqn = 0;
uint16_t i, ret = 0;
uint16_t nb_rx_pkts;
@@ -395,7 +385,7 @@ rx_thread(struct rte_ring *ring_out)
while (!quit_signal) {
- for (port_id = 0; port_id < nb_ports; port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
if ((portmask & (1 << port_id)) != 0) {
/* receive packets */
@@ -435,7 +425,7 @@ rx_thread(struct rte_ring *ring_out)
static int
worker_thread(void *args_ptr)
{
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count_avail();
uint16_t i, ret = 0;
uint16_t burst_size = 0;
struct worker_thread_args *args;
@@ -649,7 +639,7 @@ main(int argc, char **argv)
"1 lcore for packet TX\n"
"and at least 1 lcore for worker threads\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
rte_exit(EXIT_FAILURE, "Error: no ethernet ports detected\n");
if (nb_ports != 1 && (nb_ports & 1))
@@ -665,7 +655,7 @@ main(int argc, char **argv)
nb_ports_available = nb_ports;
/* initialize all ports */
- for (port_id = 0; port_id < nb_ports; port_id++) {
+ RTE_ETH_FOREACH_DEV(port_id) {
/* skip ports that are not enabled */
if ((portmask & (1 << port_id)) == 0) {
printf("\nSkipping disabled port %d\n", port_id);
diff --git a/examples/performance-thread/common/arch/x86/ctx.c b/examples/performance-thread/common/arch/x86/ctx.c
index 1e8e2717..d63fd9fc 100644
--- a/examples/performance-thread/common/arch/x86/ctx.c
+++ b/examples/performance-thread/common/arch/x86/ctx.c
@@ -1,65 +1,9 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * https://github.com/halayli/lthread which carries the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
-
-
#if defined(__x86_64__)
__asm__ (
".text\n"
diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c
index 7d76c8c4..3f1f48db 100644
--- a/examples/performance-thread/common/lthread.c
+++ b/examples/performance-thread/common/lthread.c
@@ -1,62 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software is derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
#define RTE_MEM 1
@@ -320,13 +265,14 @@ struct lthread *lthread_current(void)
/*
* Tasklet to cancel a thread
*/
-static void
+static void *
_cancel(void *arg)
{
struct lthread *lt = (struct lthread *) arg;
lt->state |= BIT(ST_LT_CANCELLED);
lthread_detach();
+ return NULL;
}
diff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h
index 0cde5919..4c945cf7 100644
--- a/examples/performance-thread/common/lthread.h
+++ b/examples/performance-thread/common/lthread.h
@@ -1,64 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software is derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
-
#ifndef LTHREAD_H_
#define LTHREAD_H_
diff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h
index ff245a08..995deb4d 100644
--- a/examples/performance-thread/common/lthread_api.h
+++ b/examples/performance-thread/common/lthread_api.h
@@ -1,64 +1,8 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software may have been derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
-
/**
* @file lthread_api.h
*
@@ -143,7 +87,7 @@ struct lthread_mutex;
struct lthread_condattr;
struct lthread_mutexattr;
-typedef void (*lthread_func_t) (void *);
+typedef void *(*lthread_func_t) (void *);
/*
* Define the size of stack for an lthread
diff --git a/examples/performance-thread/common/lthread_cond.c b/examples/performance-thread/common/lthread_cond.c
index 96fcce04..cdcc7a7b 100644
--- a/examples/performance-thread/common/lthread_cond.c
+++ b/examples/performance-thread/common/lthread_cond.c
@@ -1,62 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software may have been derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
#include <stdio.h>
diff --git a/examples/performance-thread/common/lthread_cond.h b/examples/performance-thread/common/lthread_cond.h
index 5e5f14be..616a55c4 100644
--- a/examples/performance-thread/common/lthread_cond.h
+++ b/examples/performance-thread/common/lthread_cond.h
@@ -1,62 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software may have been derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
#ifndef LTHREAD_COND_H_
diff --git a/examples/performance-thread/common/lthread_int.h b/examples/performance-thread/common/lthread_int.h
index e1da2462..a352f13b 100644
--- a/examples/performance-thread/common/lthread_int.h
+++ b/examples/performance-thread/common/lthread_int.h
@@ -1,62 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software may have been derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
#ifndef LTHREAD_INT_H
#define LTHREAD_INT_H
diff --git a/examples/performance-thread/common/lthread_pool.h b/examples/performance-thread/common/lthread_pool.h
index 315a2e21..6f93775f 100644
--- a/examples/performance-thread/common/lthread_pool.h
+++ b/examples/performance-thread/common/lthread_pool.h
@@ -1,69 +1,7 @@
/*
- *-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Some portions of this software is derived from the producer
- * consumer queues described by Dmitry Vyukov and published here
- * http://www.1024cores.net
- *
- * Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
- * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2010-2011 Dmitry Vyukov
*/
#ifndef LTHREAD_POOL_H_
diff --git a/examples/performance-thread/common/lthread_queue.h b/examples/performance-thread/common/lthread_queue.h
index 833ed92b..5b63ba22 100644
--- a/examples/performance-thread/common/lthread_queue.h
+++ b/examples/performance-thread/common/lthread_queue.h
@@ -1,69 +1,7 @@
/*
- *-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Some portions of this software is derived from the producer
- * consumer queues described by Dmitry Vyukov and published here
- * http://www.1024cores.net
- *
- * Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
- * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
- * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * The views and conclusions contained in the software and documentation are
- * those of the authors and should not be interpreted as representing official
- * policies, either expressed or implied, of Dmitry Vyukov.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2010-2011 Dmitry Vyukov
*/
#ifndef LTHREAD_QUEUE_H_
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 779aeb17..38ca0c45 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -1,65 +1,9 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software is derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
-
#define RTE_MEM 1
#include <stdio.h>
diff --git a/examples/performance-thread/common/lthread_sched.h b/examples/performance-thread/common/lthread_sched.h
index aa2f0c48..d14bec1c 100644
--- a/examples/performance-thread/common/lthread_sched.h
+++ b/examples/performance-thread/common/lthread_sched.h
@@ -1,62 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
/*
- * Some portions of this software is derived from the
- * https://github.com/halayli/lthread which carrys the following license.
- *
- * Copyright (C) 2012, Hasan Alayli <halayli@gmail.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2015 Intel Corporation.
+ * Copyright 2012 Hasan Alayli <halayli@gmail.com>
*/
#ifndef LTHREAD_SCHED_H_
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index c04294cf..5392fcea 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -306,7 +306,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
@@ -1990,17 +1989,18 @@ cpu_load_collector(__rte_unused void *arg) {
*
* This loop is used to start empty scheduler on lcore.
*/
-static void
+static void *
lthread_null(__rte_unused void *args)
{
int lcore_id = rte_lcore_id();
RTE_LOG(INFO, L3FWD, "Starting scheduler on lcore %d.\n", lcore_id);
lthread_exit(NULL);
+ return NULL;
}
/* main processing loop */
-static void
+static void *
lthread_tx_per_ring(void *dummy)
{
int nb_rx;
@@ -2045,6 +2045,7 @@ lthread_tx_per_ring(void *dummy)
lthread_cond_wait(ready, 0);
}
+ return NULL;
}
/*
@@ -2053,7 +2054,7 @@ lthread_tx_per_ring(void *dummy)
* This lthread is used to spawn one new lthread per ring from producers.
*
*/
-static void
+static void *
lthread_tx(void *args)
{
struct lthread *lt;
@@ -2098,9 +2099,10 @@ lthread_tx(void *args)
}
}
+ return NULL;
}
-static void
+static void *
lthread_rx(void *dummy)
{
int ret;
@@ -2124,7 +2126,7 @@ lthread_rx(void *dummy)
if (rx_conf->n_rx_queue == 0) {
RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", rte_lcore_id());
- return;
+ return NULL;
}
RTE_LOG(INFO, L3FWD, "Entering main Rx loop on lcore %u\n", rte_lcore_id());
@@ -2196,6 +2198,7 @@ lthread_rx(void *dummy)
lthread_yield();
}
}
+ return NULL;
}
/*
@@ -2204,8 +2207,9 @@ lthread_rx(void *dummy)
* This lthread loop spawns all rx and tx lthreads on master lcore
*/
-static void
-lthread_spawner(__rte_unused void *arg) {
+static void *
+lthread_spawner(__rte_unused void *arg)
+{
struct lthread *lt[MAX_THREAD];
int i;
int n_thread = 0;
@@ -2246,6 +2250,7 @@ lthread_spawner(__rte_unused void *arg) {
for (i = 0; i < n_thread; i++)
lthread_join(lt[i], NULL);
+ return NULL;
}
/*
@@ -2491,7 +2496,7 @@ check_lcore_params(void)
}
static int
-check_port_config(const unsigned nb_ports)
+check_port_config(void)
{
unsigned portid;
uint16_t i;
@@ -2502,7 +2507,7 @@ check_port_config(const unsigned nb_ports)
printf("port %u is not enabled in port mask\n", portid);
return -1;
}
- if (portid >= nb_ports) {
+ if (!rte_eth_dev_is_valid_port(portid)) {
printf("port %u is not present on the board\n", portid);
return -1;
}
@@ -3411,7 +3416,7 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -3423,7 +3428,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
@@ -3514,15 +3519,15 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "init_rx_rings failed\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- if (check_port_config(nb_ports) < 0)
+ if (check_port_config() < 0)
rte_exit(EXIT_FAILURE, "check_port_config failed\n");
nb_lcores = rte_lcore_count();
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
struct rte_eth_conf local_port_conf = port_conf;
/* skip ports that are not enabled */
@@ -3545,6 +3550,18 @@ main(int argc, char **argv)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ portid,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(portid, nb_rx_queue,
(uint16_t)n_tx_queue, &local_port_conf);
if (ret < 0)
@@ -3591,7 +3608,6 @@ main(int argc, char **argv)
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
@@ -3654,7 +3670,7 @@ main(int argc, char **argv)
printf("\n");
/* start ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((enabled_port_mask & (1 << portid)) == 0)
continue;
@@ -3699,7 +3715,7 @@ main(int argc, char **argv)
}
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(enabled_port_mask);
if (lthreads_on) {
printf("Starting L-Threading Model\n");
diff --git a/examples/performance-thread/meson.build b/examples/performance-thread/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/performance-thread/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index 964ea252..7d0d5811 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -119,8 +119,7 @@ void *helloworld_pthread(void *arg)
*/
__thread pthread_t tid[HELLOW_WORLD_MAX_LTHREADS];
-static void initial_lthread(void *args);
-static void initial_lthread(void *args __attribute__((unused)))
+static void *initial_lthread(void *args __attribute__((unused)))
{
int lcore = (int) rte_lcore_id();
/*
@@ -195,6 +194,7 @@ static void initial_lthread(void *args __attribute__((unused)))
/* shutdown the lthread scheduler */
lthread_scheduler_shutdown(rte_lcore_id());
lthread_detach();
+ return NULL;
}
@@ -205,8 +205,6 @@ static void initial_lthread(void *args __attribute__((unused)))
* in the core mask
*/
static int
-lthread_scheduler(void *args);
-static int
lthread_scheduler(void *args __attribute__((unused)))
{
/* create initial thread */
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index c31de4e9..53f12437 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -365,7 +365,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
int
pthread_create(pthread_t *__restrict tid,
const pthread_attr_t *__restrict attr,
- void *(func) (void *),
+ lthread_func_t func,
void *__restrict arg)
{
if (override) {
@@ -390,7 +390,7 @@ pthread_create(pthread_t *__restrict tid,
}
}
return lthread_create((struct lthread **)tid, lcore,
- (void (*)(void *))func, arg);
+ func, arg);
}
return _sys_pthread_funcs.f_pthread_create(tid, attr, func, arg);
}
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index 83821eb8..82ae71c1 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -50,7 +50,6 @@ static uint8_t ptp_enabled_ports[RTE_MAX_ETHPORTS];
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
@@ -187,7 +186,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -217,11 +216,9 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- /* Setup txq_flags */
struct rte_eth_txconf *txconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
@@ -727,7 +724,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Error with PTP initialization\n");
/* Check that there is an even number of ports to send/receive on. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
/* Creates a new mempool in memory to hold the mbufs. */
mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
@@ -737,7 +734,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* Initialize all ports. */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if ((ptp_enabled_port_mask & (1 << portid)) != 0) {
if (port_init(portid, mbuf_pool) == 0) {
ptp_enabled_ports[ptp_enabled_port_nb] = portid;
diff --git a/examples/qos_meter/Makefile b/examples/qos_meter/Makefile
index 69ac6617..46341b1a 100644
--- a/examples/qos_meter/Makefile
+++ b/examples/qos_meter/Makefile
@@ -23,6 +23,8 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index f0f9bcaf..5cf4e9df 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -56,7 +56,6 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
@@ -90,14 +89,23 @@ static uint16_t port_tx;
static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
struct rte_eth_dev_tx_buffer *tx_buffer;
-struct rte_meter_srtcm_params app_srtcm_params[] = {
- {.cir = 1000000 * 46, .cbs = 2048, .ebs = 2048},
+struct rte_meter_srtcm_params app_srtcm_params = {
+ .cir = 1000000 * 46,
+ .cbs = 2048,
+ .ebs = 2048
};
-struct rte_meter_trtcm_params app_trtcm_params[] = {
- {.cir = 1000000 * 46, .pir = 1500000 * 46, .cbs = 2048, .pbs = 2048},
+struct rte_meter_srtcm_profile app_srtcm_profile;
+
+struct rte_meter_trtcm_params app_trtcm_params = {
+ .cir = 1000000 * 46,
+ .pir = 1500000 * 46,
+ .cbs = 2048,
+ .pbs = 2048
};
+struct rte_meter_trtcm_profile app_trtcm_profile;
+
#define APP_FLOWS_MAX 256
FLOW_METER app_flows[APP_FLOWS_MAX];
@@ -105,12 +113,21 @@ FLOW_METER app_flows[APP_FLOWS_MAX];
static int
app_configure_flow_table(void)
{
- uint32_t i, j;
+ uint32_t i;
int ret;
- for (i = 0, j = 0; i < APP_FLOWS_MAX;
- i ++, j = (j + 1) % RTE_DIM(PARAMS)) {
- ret = FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ ret = rte_meter_srtcm_profile_config(&app_srtcm_profile,
+ &app_srtcm_params);
+ if (ret)
+ return ret;
+
+ ret = rte_meter_trtcm_profile_config(&app_trtcm_profile,
+ &app_trtcm_params);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < APP_FLOWS_MAX; i++) {
+ ret = FUNC_CONFIG(&app_flows[i], &PROFILE);
if (ret)
return ret;
}
@@ -135,7 +152,10 @@ app_pkt_handle(struct rte_mbuf *pkt, uint64_t time)
enum policer_action action;
/* color input is not used for blind modes */
- output_color = (uint8_t) FUNC_METER(&app_flows[flow_id], time, pkt_len,
+ output_color = (uint8_t) FUNC_METER(&app_flows[flow_id],
+ &PROFILE,
+ time,
+ pkt_len,
(enum rte_meter_color) input_color);
/* Apply policing and set the output color */
@@ -312,6 +332,17 @@ main(int argc, char **argv)
rte_eth_dev_info_get(port_rx, &dev_info);
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
+ if (conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port_rx,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(port_rx, 1, 1, &conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
@@ -330,7 +361,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_rx),
@@ -342,6 +372,17 @@ main(int argc, char **argv)
rte_eth_dev_info_get(port_tx, &dev_info);
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
+ if (conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port_tx,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
ret = rte_eth_dev_configure(port_tx, 1, 1, &conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
@@ -362,7 +403,6 @@ main(int argc, char **argv)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_tx),
diff --git a/examples/qos_meter/main.h b/examples/qos_meter/main.h
index b27e8eb8..f51eb664 100644
--- a/examples/qos_meter/main.h
+++ b/examples/qos_meter/main.h
@@ -21,44 +21,52 @@ enum policer_action policer_table[e_RTE_METER_COLORS][e_RTE_METER_COLORS] =
#if APP_MODE == APP_MODE_FWD
-#define FUNC_METER(a,b,c,d) color, flow_id=flow_id, pkt_len=pkt_len, time=time
+#define FUNC_METER(m, p, time, pkt_len, pkt_color) \
+({ \
+ void *mp = m; \
+ void *pp = p; \
+ mp = mp; \
+ pp = pp; \
+ time = time; \
+ pkt_len = pkt_len; \
+ pkt_color; \
+})
#define FUNC_CONFIG(a, b) 0
-#define PARAMS app_srtcm_params
#define FLOW_METER int
+#define PROFILE app_srtcm_profile
#elif APP_MODE == APP_MODE_SRTCM_COLOR_BLIND
-#define FUNC_METER(a,b,c,d) rte_meter_srtcm_color_blind_check(a,b,c)
+#define FUNC_METER(m, p, time, pkt_len, pkt_color) \
+ rte_meter_srtcm_color_blind_check(m, p, time, pkt_len)
#define FUNC_CONFIG rte_meter_srtcm_config
-#define PARAMS app_srtcm_params
#define FLOW_METER struct rte_meter_srtcm
+#define PROFILE app_srtcm_profile
#elif (APP_MODE == APP_MODE_SRTCM_COLOR_AWARE)
#define FUNC_METER rte_meter_srtcm_color_aware_check
#define FUNC_CONFIG rte_meter_srtcm_config
-#define PARAMS app_srtcm_params
#define FLOW_METER struct rte_meter_srtcm
+#define PROFILE app_srtcm_profile
#elif (APP_MODE == APP_MODE_TRTCM_COLOR_BLIND)
-#define FUNC_METER(a,b,c,d) rte_meter_trtcm_color_blind_check(a,b,c)
+#define FUNC_METER(m, p, time, pkt_len, pkt_color) \
+ rte_meter_trtcm_color_blind_check(m, p, time, pkt_len)
#define FUNC_CONFIG rte_meter_trtcm_config
-#define PARAMS app_trtcm_params
#define FLOW_METER struct rte_meter_trtcm
+#define PROFILE app_trtcm_profile
#elif (APP_MODE == APP_MODE_TRTCM_COLOR_AWARE)
-#define FUNC_METER rte_meter_trtcm_color_aware_check
+#define FUNC_METER rte_meter_trtcm_color_aware_check
#define FUNC_CONFIG rte_meter_trtcm_config
-#define PARAMS app_trtcm_params
#define FLOW_METER struct rte_meter_trtcm
+#define PROFILE app_trtcm_profile
#else
#error Invalid value for APP_MODE
#endif
-
-
-
#endif /* _MAIN_H_ */
diff --git a/examples/qos_sched/Makefile b/examples/qos_sched/Makefile
index 0f0a31ff..a7ecf978 100644
--- a/examples/qos_sched/Makefile
+++ b/examples/qos_sched/Makefile
@@ -48,7 +48,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index 8914f766..94cbb26f 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -59,7 +59,6 @@ static struct rte_eth_conf port_conf = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -96,7 +95,6 @@ app_init_port(uint16_t portid, struct rte_mempool *mp)
tx_conf.tx_free_thresh = 0;
tx_conf.tx_rs_thresh = 0;
tx_conf.tx_deferred_start = 0;
- tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
/* init port */
RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
@@ -298,7 +296,7 @@ int app_init(void)
char ring_name[MAX_NAME_LEN];
char pool_name[MAX_NAME_LEN];
- if (rte_eth_dev_count() == 0)
+ if (rte_eth_dev_count_avail() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
/* load configuration profile */
diff --git a/examples/quota_watermark/meson.build b/examples/quota_watermark/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/quota_watermark/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index d4a69183..19164385 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -24,7 +24,6 @@
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -82,7 +81,6 @@ void configure_eth_port(uint16_t port_id)
/* Initialize the port's TX queue */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
rte_eth_dev_socket_id(port_id),
@@ -112,7 +110,7 @@ void configure_eth_port(uint16_t port_id)
void
init_dpdk(void)
{
- if (rte_eth_dev_count() < 2)
+ if (rte_eth_dev_count_avail() < 2)
rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
}
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index 313216f3..c55d3874 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -181,7 +181,7 @@ receive_stage(__attribute__((unused)) void *args)
}
}
-static void
+static int
pipeline_stage(__attribute__((unused)) void *args)
{
int i, ret;
@@ -243,9 +243,11 @@ pipeline_stage(__attribute__((unused)) void *args)
}
}
}
+
+ return 0;
}
-static void
+static int
send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
@@ -287,6 +289,8 @@ send_stage(__attribute__((unused)) void *args)
/* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
}
}
+
+ return 0;
}
int
@@ -346,15 +350,13 @@ main(int argc, char **argv)
if (is_bit_set(port_id, portmask))
init_ring(lcore_id, port_id);
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))pipeline_stage,
+ rte_eal_remote_launch(pipeline_stage,
NULL, lcore_id);
}
}
/* Start send_stage() on the last slave core */
- /* typecast is a workaround for GCC 4.3 bug */
- rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
+ rte_eal_remote_launch(send_stage, NULL, last_lcore_id);
/* Start receive_stage() on the master core */
receive_stage(NULL);
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index d2e5e19e..2058be62 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -20,12 +20,9 @@
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
-static unsigned nb_ports;
-
static struct {
uint64_t total_cycles;
uint64_t total_pkts;
@@ -82,7 +79,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -106,7 +103,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
@@ -145,7 +141,7 @@ lcore_main(void)
{
uint16_t port;
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) !=
(int)rte_socket_id())
@@ -156,7 +152,7 @@ lcore_main(void)
printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
rte_lcore_id());
for (;;) {
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
struct rte_mbuf *bufs[BURST_SIZE];
const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
bufs, BURST_SIZE);
@@ -179,6 +175,7 @@ int
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
+ uint16_t nb_ports;
uint16_t portid;
/* init EAL */
@@ -189,7 +186,7 @@ main(int argc, char *argv[])
argc -= ret;
argv += ret;
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
@@ -200,7 +197,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8"\n",
portid);
diff --git a/examples/server_node_efd/meson.build b/examples/server_node_efd/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/server_node_efd/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 84f7bcff..3b97fbd4 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -320,7 +320,7 @@ main(int argc, char *argv[])
if (parse_app_args(argc, argv) < 0)
rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
- if (rte_eth_dev_count() == 0)
+ if (rte_eth_dev_count_avail() == 0)
rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
rx_ring = rte_ring_lookup(get_rx_queue_name(node_id));
diff --git a/examples/server_node_efd/server/Makefile b/examples/server_node_efd/server/Makefile
index cbb91ebe..df6614c6 100644
--- a/examples/server_node_efd/server/Makefile
+++ b/examples/server_node_efd/server/Makefile
@@ -10,7 +10,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV), "linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index 07b6882f..af5a18e2 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -97,7 +97,6 @@ init_port(uint16_t port_num)
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .ignore_offload_bitfield = 1,
},
};
const uint16_t rx_rings = 1, tx_rings = num_nodes;
@@ -139,7 +138,6 @@ init_port(uint16_t port_num)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
@@ -310,7 +308,7 @@ init(int argc, char *argv[])
argv += retval;
/* get total number of ports */
- total_ports = rte_eth_dev_count();
+ total_ports = rte_eth_dev_count_avail();
/* set up array for port data */
mz = rte_memzone_reserve(MZ_SHARED_INFO, sizeof(*info),
diff --git a/examples/service_cores/Makefile b/examples/service_cores/Makefile
index 3156e35d..a4d6b7b4 100644
--- a/examples/service_cores/Makefile
+++ b/examples/service_cores/Makefile
@@ -23,8 +23,6 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
-CFLAGS += -DALLOW_EXPERIMENTAL_API
-
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
@@ -50,7 +48,6 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS)
# workaround for a gcc bug with noreturn attribute
diff --git a/examples/service_cores/meson.build b/examples/service_cores/meson.build
index 2b0a2503..c34e11e3 100644
--- a/examples/service_cores/meson.build
+++ b/examples/service_cores/meson.build
@@ -6,7 +6,6 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
-allow_experimental_apis = true
sources = files(
'main.c'
)
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index e62cc0a5..4aba1dc3 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -20,7 +20,6 @@
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
@@ -42,7 +41,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txconf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -68,7 +67,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@@ -106,14 +104,13 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint16_t nb_ports = rte_eth_dev_count();
uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
* for best performance.
*/
- for (port = 0; port < nb_ports; port++)
+ RTE_ETH_FOREACH_DEV(port)
if (rte_eth_dev_socket_id(port) > 0 &&
rte_eth_dev_socket_id(port) !=
(int)rte_socket_id())
@@ -130,7 +127,7 @@ lcore_main(void)
* Receive packets on a port and forward them on the paired
* port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
*/
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
/* Get burst of RX packets, from first port of pair. */
struct rte_mbuf *bufs[BURST_SIZE];
@@ -174,7 +171,7 @@ main(int argc, char *argv[])
argv += ret;
/* Check that there is an even number of ports to send/receive on. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < 2 || (nb_ports & 1))
rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
@@ -186,7 +183,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* Initialize all ports. */
- for (portid = 0; portid < nb_ports; portid++)
+ RTE_ETH_FOREACH_DEV(portid)
if (port_init(portid, mbuf_pool) != 0)
rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
diff --git a/examples/tep_termination/Makefile b/examples/tep_termination/Makefile
index d2c357a1..8ec1a38e 100644
--- a/examples/tep_termination/Makefile
+++ b/examples/tep_termination/Makefile
@@ -25,6 +25,8 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
@@ -50,11 +52,12 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(error This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
endif
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -D_GNU_SOURCE
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index 27073913..7795d089 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -515,11 +515,10 @@ check_ports_num(unsigned max_nb_ports)
}
for (portid = 0; portid < nb_ports; portid++) {
- if (ports[portid] >= max_nb_ports) {
+ if (!rte_eth_dev_is_valid_port(ports[portid])) {
RTE_LOG(INFO, VHOST_PORT,
- "\nSpecified port ID(%u) exceeds max "
- " system port ID(%u)\n",
- ports[portid], (max_nb_ports - 1));
+ "\nSpecified port ID(%u) is not valid\n",
+ ports[portid]);
ports[portid] = INVALID_PORT_ID;
valid_nb_ports--;
}
@@ -1062,8 +1061,8 @@ static const struct vhost_device_ops virtio_net_device_ops = {
* This is a thread will wake up after a period to print stats if the user has
* enabled them.
*/
-static void
-print_stats(void)
+static void *
+print_stats(__rte_unused void *arg)
{
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
@@ -1120,6 +1119,8 @@ print_stats(void)
}
printf("\n================================================\n");
}
+
+ return NULL;
}
/**
@@ -1135,7 +1136,6 @@ main(int argc, char *argv[])
uint16_t portid;
uint16_t queue_id;
static pthread_t tid;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1157,7 +1157,7 @@ main(int argc, char *argv[])
nb_switching_cores = rte_lcore_count()-1;
/* Get the number of physical ports. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
/*
* Update the global var NB_PORTS and global array PORTS
@@ -1185,7 +1185,7 @@ main(int argc, char *argv[])
vpool_array[queue_id].pool = mbuf_pool;
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
RTE_LOG(INFO, VHOST_PORT,
@@ -1206,13 +1206,10 @@ main(int argc, char *argv[])
/* Enable stats if the user option is set. */
if (enable_stats) {
- ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
- if (ret != 0)
+ ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
+ print_stats, NULL);
+ if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot create print-stats thread\n");
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
- ret = rte_thread_setname(tid, thread_name);
- if (ret != 0)
- RTE_LOG(DEBUG, VHOST_CONFIG, "Cannot set print-stats name\n");
}
/* Launch all data cores. */
diff --git a/examples/tep_termination/meson.build b/examples/tep_termination/meson.build
index 68c940aa..6d363624 100644
--- a/examples/tep_termination/meson.build
+++ b/examples/tep_termination/meson.build
@@ -6,7 +6,11 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+if host_machine.system() != 'linux'
+ build = false
+endif
deps += ['hash', 'vhost']
+allow_experimental_apis = true
sources = files(
'main.c', 'vxlan.c', 'vxlan_setup.c'
)
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index ba7d92aa..b99ab97d 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -69,7 +69,6 @@ uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID,
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
@@ -131,9 +130,8 @@ vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rx_rings = nb_devices;
diff --git a/examples/vhost/Makefile b/examples/vhost/Makefile
index 2dc62ebf..a2ea97a0 100644
--- a/examples/vhost/Makefile
+++ b/examples/vhost/Makefile
@@ -25,6 +25,8 @@ CFLAGS += -O3 $(shell pkg-config --cflags libdpdk)
LDFLAGS_SHARED = $(shell pkg-config --libs libdpdk)
LDFLAGS_STATIC = -Wl,-Bstatic $(shell pkg-config --static --libs libdpdk)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
$(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
@@ -50,12 +52,13 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
else
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -D_GNU_SOURCE
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 60d862b4..2175c118 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -116,7 +116,6 @@ static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
@@ -256,7 +255,6 @@ port_init(uint16_t port)
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
rxconf->rx_drop_en = 1;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
@@ -294,7 +292,8 @@ port_init(uint16_t port)
printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
num_pf_queues, num_devices, queues_per_pool);
- if (port >= rte_eth_dev_count()) return -1;
+ if (!rte_eth_dev_is_valid_port(port))
+ return -1;
rx_rings = (uint16_t)dev_info.max_rx_queues;
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
@@ -667,9 +666,10 @@ static unsigned check_ports_num(unsigned nb_ports)
}
for (portid = 0; portid < num_ports; portid ++) {
- if (ports[portid] >= nb_ports) {
- RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
- ports[portid], (nb_ports - 1));
+ if (!rte_eth_dev_is_valid_port(ports[portid])) {
+ RTE_LOG(INFO, VHOST_PORT,
+ "\nSpecified port ID(%u) is not valid\n",
+ ports[portid]);
ports[portid] = INVALID_PORT_ID;
valid_num_ports--;
}
@@ -1290,8 +1290,8 @@ static const struct vhost_device_ops virtio_net_device_ops =
* This is a thread will wake up after a period to print stats if the user has
* enabled them.
*/
-static void
-print_stats(void)
+static void *
+print_stats(__rte_unused void *arg)
{
struct vhost_dev *vdev;
uint64_t tx_dropped, rx_dropped;
@@ -1330,6 +1330,8 @@ print_stats(void)
printf("===================================================\n");
}
+
+ return NULL;
}
static void
@@ -1418,7 +1420,6 @@ main(int argc, char *argv[])
int ret, i;
uint16_t portid;
static pthread_t tid;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
uint64_t flags = 0;
signal(SIGINT, sigint_handler);
@@ -1446,7 +1447,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE,"Not enough cores\n");
/* Get the number of physical ports. */
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -1477,7 +1478,7 @@ main(int argc, char *argv[])
}
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
RTE_LOG(INFO, VHOST_PORT,
@@ -1491,17 +1492,11 @@ main(int argc, char *argv[])
/* Enable stats if the user option is set. */
if (enable_stats) {
- ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
- if (ret != 0)
+ ret = rte_ctrl_thread_create(&tid, "print-stats", NULL,
+ print_stats, NULL);
+ if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot create print-stats thread\n");
-
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
- ret = rte_thread_setname(tid, thread_name);
- if (ret != 0)
- RTE_LOG(DEBUG, VHOST_CONFIG,
- "Cannot set print-stats name\n");
}
/* Launch all data cores. */
diff --git a/examples/vhost/meson.build b/examples/vhost/meson.build
index 3e6e6904..7b498076 100644
--- a/examples/vhost/meson.build
+++ b/examples/vhost/meson.build
@@ -6,7 +6,11 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+if host_machine.system() != 'linux'
+ build = false
+endif
deps += 'vhost'
+allow_experimental_apis = true
sources = files(
'main.c', 'virtio_net.c'
)
diff --git a/examples/vhost/virtio_net.c b/examples/vhost/virtio_net.c
index f6e00674..8ea6b36d 100644
--- a/examples/vhost/virtio_net.c
+++ b/examples/vhost/virtio_net.c
@@ -56,16 +56,20 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mbuf *m, uint16_t desc_idx)
{
uint32_t desc_avail, desc_offset;
+ uint64_t desc_chunck_len;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
struct vring_desc *desc;
- uint64_t desc_addr;
+ uint64_t desc_addr, desc_gaddr;
struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
/* A counter to avoid desc dead loop chain */
uint16_t nr_desc = 1;
desc = &vr->desc[desc_idx];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr, &desc_chunck_len);
/*
* Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
* performance issue with some versions of gcc (4.8.4 and 5.3.0) which
@@ -77,9 +81,42 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
rte_prefetch0((void *)(uintptr_t)desc_addr);
/* write virtio-net header */
- *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
+ if (likely(desc_chunck_len >= dev->hdr_len)) {
+ *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
+ desc_offset = dev->hdr_len;
+ } else {
+ uint64_t len;
+ uint64_t remain = dev->hdr_len;
+ uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
+ uint64_t guest_addr = desc_gaddr;
+
+ while (remain) {
+ len = remain;
+ dst = rte_vhost_va_from_guest_pa(dev->mem,
+ guest_addr, &len);
+ if (unlikely(!dst || !len))
+ return -1;
+
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src,
+ len);
+
+ remain -= len;
+ guest_addr += len;
+ src += len;
+ }
+
+ desc_chunck_len = desc->len - dev->hdr_len;
+ desc_gaddr += dev->hdr_len;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr,
+ &desc_chunck_len);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ desc_offset = 0;
+ }
- desc_offset = dev->hdr_len;
desc_avail = desc->len - dev->hdr_len;
mbuf_avail = rte_pktmbuf_data_len(m);
@@ -104,15 +141,28 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
return -1;
desc = &vr->desc[desc->next];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr, &desc_chunck_len);
if (unlikely(!desc_addr))
return -1;
desc_offset = 0;
desc_avail = desc->len;
+ } else if (unlikely(desc_chunck_len == 0)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += desc_offset;
+ desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
+ desc_gaddr,
+ &desc_chunck_len);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ desc_offset = 0;
}
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
@@ -121,6 +171,7 @@ enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
mbuf_offset += cpy_len;
desc_avail -= cpy_len;
desc_offset += cpy_len;
+ desc_chunck_len -= cpy_len;
}
return 0;
@@ -189,8 +240,9 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
struct rte_mempool *mbuf_pool)
{
struct vring_desc *desc;
- uint64_t desc_addr;
+ uint64_t desc_addr, desc_gaddr;
uint32_t desc_avail, desc_offset;
+ uint64_t desc_chunck_len;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
@@ -202,7 +254,10 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
(desc->flags & VRING_DESC_F_INDIRECT))
return -1;
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr, &desc_chunck_len);
if (unlikely(!desc_addr))
return -1;
@@ -216,7 +271,10 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
* header.
*/
desc = &vr->desc[desc->next];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr, &desc_chunck_len);
if (unlikely(!desc_addr))
return -1;
rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -228,7 +286,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
while (1) {
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
mbuf_offset),
(void *)((uintptr_t)(desc_addr + desc_offset)),
@@ -238,6 +296,7 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
mbuf_offset += cpy_len;
desc_avail -= cpy_len;
desc_offset += cpy_len;
+ desc_chunck_len -= cpy_len;
/* This desc reaches to its end, get the next one */
if (desc_avail == 0) {
@@ -249,13 +308,26 @@ dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
return -1;
desc = &vr->desc[desc->next];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = rte_vhost_va_from_guest_pa(
+ dev->mem, desc_gaddr, &desc_chunck_len);
if (unlikely(!desc_addr))
return -1;
rte_prefetch0((void *)(uintptr_t)desc_addr);
desc_offset = 0;
desc_avail = desc->len;
+ } else if (unlikely(desc_chunck_len == 0)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += desc_offset;
+ desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
+ desc_gaddr,
+ &desc_chunck_len);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ desc_offset = 0;
}
/*
diff --git a/examples/multi_process/l2fwd_fork/Makefile b/examples/vhost_crypto/Makefile
index b65582ef..83d33101 100644
--- a/examples/multi_process/l2fwd_fork/Makefile
+++ b/examples/vhost_crypto/Makefile
@@ -1,11 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2010-2014 Intel Corporation
-
-# binary name
-APP = l2fwd-fork
-
-# all source are stored in SRCS-y
-SRCS-y := main.c flib.c
+# Copyright(c) 2017-2018 Intel Corporation
ifeq ($(RTE_SDK),)
$(error "Please define RTE_SDK environment variable")
@@ -16,7 +10,23 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-CFLAGS += -O3
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+else
+
+# binary name
+APP = vhost-crypto
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D_GNU_SOURCE
include $(RTE_SDK)/mk/rte.extapp.mk
+
+endif
diff --git a/examples/vhost_crypto/main.c b/examples/vhost_crypto/main.c
new file mode 100644
index 00000000..f334d712
--- /dev/null
+++ b/examples/vhost_crypto/main.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <getopt.h>
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_vhost.h>
+#include <rte_cryptodev.h>
+#include <rte_vhost_crypto.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+
+#define NB_VIRTIO_QUEUES (1)
+#define MAX_PKT_BURST (64)
+#define MAX_IV_LEN (32)
+#define NB_MEMPOOL_OBJS (8192)
+#define NB_CRYPTO_DESCRIPTORS (4096)
+#define NB_CACHE_OBJS (128)
+#define SESSION_MAP_ENTRIES (1024)
+#define REFRESH_TIME_SEC (3)
+
+#define MAX_NB_SOCKETS (32)
+#define DEF_SOCKET_FILE "/tmp/vhost_crypto1.socket"
+
+struct vhost_crypto_options {
+ char *socket_files[MAX_NB_SOCKETS];
+ uint32_t nb_sockets;
+ uint8_t cid;
+ uint16_t qid;
+ uint32_t zero_copy;
+ uint32_t guest_polling;
+} options;
+
+struct vhost_crypto_info {
+ int vids[MAX_NB_SOCKETS];
+ struct rte_mempool *sess_pool;
+ struct rte_mempool *cop_pool;
+ uint32_t lcore_id;
+ uint8_t cid;
+ uint32_t qid;
+ uint32_t nb_vids;
+ volatile uint32_t initialized[MAX_NB_SOCKETS];
+
+} info;
+
+#define SOCKET_FILE_KEYWORD "socket-file"
+#define CRYPTODEV_ID_KEYWORD "cdev-id"
+#define CRYPTODEV_QUEUE_KEYWORD "cdev-queue-id"
+#define ZERO_COPY_KEYWORD "zero-copy"
+#define POLLING_KEYWORD "guest-polling"
+
+uint64_t vhost_cycles[2], last_v_cycles[2];
+uint64_t outpkt_amount;
+
+/** support *SOCKET_FILE_PATH:CRYPTODEV_ID* format */
+static int
+parse_socket_arg(char *arg)
+{
+ uint32_t nb_sockets = options.nb_sockets;
+ size_t len = strlen(arg);
+
+ if (nb_sockets >= MAX_NB_SOCKETS) {
+ RTE_LOG(ERR, USER1, "Too many socket files!\n");
+ return -ENOMEM;
+ }
+
+ options.socket_files[nb_sockets] = rte_malloc(NULL, len, 0);
+ if (!options.socket_files[nb_sockets]) {
+ RTE_LOG(ERR, USER1, "Insufficient memory\n");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(options.socket_files[nb_sockets], arg, len);
+
+ options.nb_sockets++;
+
+ return 0;
+}
+
+static int
+parse_cryptodev_id(const char *q_arg)
+{
+ char *end = NULL;
+ uint64_t pm;
+
+ /* parse decimal string */
+ pm = strtoul(q_arg, &end, 10);
+ if (pm > rte_cryptodev_count()) {
+ RTE_LOG(ERR, USER1, "Invalid Cryptodev ID %s\n", q_arg);
+ return -1;
+ }
+
+ options.cid = (uint8_t)pm;
+
+ return 0;
+}
+
+static int
+parse_cdev_queue_id(const char *q_arg)
+{
+ char *end = NULL;
+ uint64_t pm;
+
+ /* parse decimal string */
+ pm = strtoul(q_arg, &end, 10);
+ if (pm == UINT64_MAX) {
+ RTE_LOG(ERR, USER1, "Invalid Cryptodev Queue ID %s\n", q_arg);
+ return -1;
+ }
+
+ options.qid = (uint16_t)pm;
+
+ return 0;
+}
+
+static void
+vhost_crypto_usage(const char *prgname)
+{
+ printf("%s [EAL options] --\n"
+ " --%s SOCKET-FILE-PATH\n"
+ " --%s CRYPTODEV_ID: crypto device id\n"
+ " --%s CDEV_QUEUE_ID: crypto device queue id\n"
+ " --%s: zero copy\n"
+ " --%s: guest polling\n",
+ prgname, SOCKET_FILE_KEYWORD, CRYPTODEV_ID_KEYWORD,
+ CRYPTODEV_QUEUE_KEYWORD, ZERO_COPY_KEYWORD, POLLING_KEYWORD);
+}
+
+static int
+vhost_crypto_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char *prgname = argv[0];
+ char **argvopt;
+ int option_index;
+ struct option lgopts[] = {
+ {SOCKET_FILE_KEYWORD, required_argument, 0, 0},
+ {CRYPTODEV_ID_KEYWORD, required_argument, 0, 0},
+ {CRYPTODEV_QUEUE_KEYWORD, required_argument, 0, 0},
+ {ZERO_COPY_KEYWORD, no_argument, 0, 0},
+ {POLLING_KEYWORD, no_argument, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+
+ options.cid = 0;
+ options.qid = 0;
+ options.nb_sockets = 0;
+ options.guest_polling = 0;
+ options.zero_copy = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "s:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ case 0:
+ if (strcmp(lgopts[option_index].name,
+ SOCKET_FILE_KEYWORD) == 0) {
+ ret = parse_socket_arg(optarg);
+ if (ret < 0) {
+ vhost_crypto_usage(prgname);
+ return ret;
+ }
+ } else if (strcmp(lgopts[option_index].name,
+ CRYPTODEV_ID_KEYWORD) == 0) {
+ ret = parse_cryptodev_id(optarg);
+ if (ret < 0) {
+ vhost_crypto_usage(prgname);
+ return ret;
+ }
+ } else if (strcmp(lgopts[option_index].name,
+ CRYPTODEV_QUEUE_KEYWORD) == 0) {
+ ret = parse_cdev_queue_id(optarg);
+ if (ret < 0) {
+ vhost_crypto_usage(prgname);
+ return ret;
+ }
+ } else if (strcmp(lgopts[option_index].name,
+ ZERO_COPY_KEYWORD) == 0) {
+ options.zero_copy =
+ RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE;
+ } else if (strcmp(lgopts[option_index].name,
+ POLLING_KEYWORD) == 0) {
+ options.guest_polling = 1;
+ } else {
+ vhost_crypto_usage(prgname);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ if (options.nb_sockets == 0) {
+ options.socket_files[0] = strdup(DEF_SOCKET_FILE);
+ options.nb_sockets = 1;
+ RTE_LOG(INFO, USER1,
+ "VHOST-CRYPTO: use default socket file %s\n",
+ DEF_SOCKET_FILE);
+ }
+
+ return 0;
+}
+
+static int
+new_device(int vid)
+{
+ char path[PATH_MAX];
+ uint32_t idx, i;
+ int ret;
+
+ ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Cannot find matched socket\n");
+ return ret;
+ }
+
+ for (idx = 0; idx < options.nb_sockets; idx++) {
+ if (strcmp(path, options.socket_files[idx]) == 0)
+ break;
+ }
+
+ if (idx == options.nb_sockets) {
+ RTE_LOG(ERR, USER1, "Cannot find recorded socket\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i < 2; i++) {
+ vhost_cycles[i] = 0;
+ last_v_cycles[i] = 0;
+ }
+
+ ret = rte_vhost_crypto_create(vid, info.cid, info.sess_pool,
+ rte_lcore_to_socket_id(info.lcore_id));
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Cannot create vhost crypto\n");
+ return ret;
+ }
+
+ ret = rte_vhost_crypto_set_zero_copy(vid, options.zero_copy);
+ if (ret) {
+ RTE_LOG(ERR, USER1, "Cannot %s zero copy feature\n",
+ options.zero_copy == 1 ? "enable" : "disable");
+ return ret;
+ }
+
+ info.vids[idx] = vid;
+ info.initialized[idx] = 1;
+
+ rte_wmb();
+
+ RTE_LOG(INFO, USER1, "New Vhost-crypto Device %s, Device ID %d\n", path,
+ vid);
+ return 0;
+}
+
+static void
+destroy_device(int vid)
+{
+ uint32_t i;
+
+ for (i = 0; i < info.nb_vids; i++) {
+ if (vid == info.vids[i])
+ break;
+ }
+
+ if (i == info.nb_vids) {
+ RTE_LOG(ERR, USER1, "Cannot find socket file from list\n");
+ return;
+ }
+
+ info.initialized[i] = 0;
+
+ rte_wmb();
+
+ rte_vhost_crypto_free(vid);
+
+ RTE_LOG(INFO, USER1, "Vhost Crypto Device %i Removed\n", vid);
+}
+
+static const struct vhost_device_ops virtio_crypto_device_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+};
+
+__attribute__((unused))
+static void clrscr(void)
+{
+ system("@cls||clear");
+}
+
+static int
+vhost_crypto_worker(__rte_unused void *arg)
+{
+ struct rte_crypto_op *ops[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
+ struct rte_crypto_op *ops_deq[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
+ uint32_t nb_inflight_ops = 0;
+ uint16_t nb_callfds;
+ int callfds[VIRTIO_CRYPTO_MAX_NUM_BURST_VQS];
+ uint32_t lcore_id = rte_lcore_id();
+ uint32_t burst_size = MAX_PKT_BURST;
+ uint32_t i, j, k;
+ uint32_t to_fetch, fetched;
+ uint64_t t_start, t_end, interval;
+
+ int ret = 0;
+
+ RTE_LOG(INFO, USER1, "Processing on Core %u started\n", lcore_id);
+
+ for (i = 0; i < NB_VIRTIO_QUEUES; i++) {
+ if (rte_crypto_op_bulk_alloc(info.cop_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops[i],
+ burst_size) < burst_size) {
+ RTE_LOG(ERR, USER1, "Failed to alloc cops\n");
+ ret = -1;
+ goto exit;
+ }
+ }
+
+ while (1) {
+ for (i = 0; i < info.nb_vids; i++) {
+ if (unlikely(info.initialized[i] == 0))
+ continue;
+
+ for (j = 0; j < NB_VIRTIO_QUEUES; j++) {
+ t_start = rte_rdtsc_precise();
+
+ to_fetch = RTE_MIN(burst_size,
+ (NB_CRYPTO_DESCRIPTORS -
+ nb_inflight_ops));
+ fetched = rte_vhost_crypto_fetch_requests(
+ info.vids[i], j, ops[j],
+ to_fetch);
+ nb_inflight_ops += rte_cryptodev_enqueue_burst(
+ info.cid, info.qid, ops[j],
+ fetched);
+ if (unlikely(rte_crypto_op_bulk_alloc(
+ info.cop_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops[j], fetched) < fetched)) {
+ RTE_LOG(ERR, USER1, "Failed realloc\n");
+ return -1;
+ }
+ t_end = rte_rdtsc_precise();
+ interval = t_end - t_start;
+
+ vhost_cycles[fetched > 0] += interval;
+
+ t_start = t_end;
+ fetched = rte_cryptodev_dequeue_burst(
+ info.cid, info.qid,
+ ops_deq[j], RTE_MIN(burst_size,
+ nb_inflight_ops));
+ fetched = rte_vhost_crypto_finalize_requests(
+ ops_deq[j], fetched, callfds,
+ &nb_callfds);
+
+ nb_inflight_ops -= fetched;
+ outpkt_amount += fetched;
+
+ if (!options.guest_polling) {
+ for (k = 0; k < nb_callfds; k++)
+ eventfd_write(callfds[k],
+ (eventfd_t)1);
+ }
+
+ rte_mempool_put_bulk(info.cop_pool,
+ (void **)ops_deq[j], fetched);
+ interval = rte_rdtsc_precise() - t_start;
+
+ vhost_cycles[fetched > 0] += interval;
+ }
+ }
+ }
+exit:
+ return ret;
+}
+
+
+static void
+unregister_drivers(int socket_num)
+{
+ int ret;
+
+ ret = rte_vhost_driver_unregister(options.socket_files[socket_num]);
+ if (ret != 0)
+ RTE_LOG(ERR, USER1,
+ "Fail to unregister vhost driver for %s.\n",
+ options.socket_files[socket_num]);
+}
+
+int
+main(int argc, char *argv[])
+{
+ struct rte_cryptodev_qp_conf qp_conf = {NB_CRYPTO_DESCRIPTORS};
+ struct rte_cryptodev_config config;
+ struct rte_cryptodev_info dev_info;
+ uint32_t cryptodev_id;
+ uint32_t worker_lcore;
+ char name[128];
+ uint32_t i = 0;
+ int ret;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ return -1;
+ argc -= ret;
+ argv += ret;
+
+ ret = vhost_crypto_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Failed to parse arguments!\n");
+
+ info.cid = options.cid;
+ info.qid = options.qid;
+
+ worker_lcore = rte_get_next_lcore(0, 1, 0);
+ if (worker_lcore == RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough lcore\n");
+
+ cryptodev_id = info.cid;
+ rte_cryptodev_info_get(cryptodev_id, &dev_info);
+ if (dev_info.max_nb_queue_pairs < info.qid + 1) {
+ RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
+ dev_info.max_nb_queue_pairs);
+ goto error_exit;
+ }
+
+ config.nb_queue_pairs = dev_info.max_nb_queue_pairs;
+ config.socket_id = rte_lcore_to_socket_id(worker_lcore);
+
+ ret = rte_cryptodev_configure(cryptodev_id, &config);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to configure cryptodev %u",
+ cryptodev_id);
+ goto error_exit;
+ }
+
+ snprintf(name, 127, "SESS_POOL_%u", worker_lcore);
+ info.sess_pool = rte_mempool_create(name, SESSION_MAP_ENTRIES,
+ rte_cryptodev_sym_get_private_session_size(
+ cryptodev_id), 64, 0, NULL, NULL, NULL, NULL,
+ rte_lcore_to_socket_id(worker_lcore), 0);
+ if (!info.sess_pool) {
+ RTE_LOG(ERR, USER1, "Failed to create mempool");
+ goto error_exit;
+ }
+
+ snprintf(name, 127, "COPPOOL_%u", worker_lcore);
+ info.cop_pool = rte_crypto_op_pool_create(name,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS,
+ NB_CACHE_OBJS, 0, rte_lcore_to_socket_id(worker_lcore));
+
+ if (!info.cop_pool) {
+ RTE_LOG(ERR, USER1, "Lcore %u failed to create crypto pool",
+ worker_lcore);
+ ret = -1;
+ goto error_exit;
+ }
+
+ info.nb_vids = options.nb_sockets;
+ for (i = 0; i < MAX_NB_SOCKETS; i++)
+ info.vids[i] = -1;
+
+ for (i = 0; i < dev_info.max_nb_queue_pairs; i++) {
+ ret = rte_cryptodev_queue_pair_setup(cryptodev_id, i,
+ &qp_conf, rte_lcore_to_socket_id(worker_lcore),
+ info.sess_pool);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to configure qp %u\n",
+ info.cid);
+ goto error_exit;
+ }
+ }
+
+ ret = rte_cryptodev_start(cryptodev_id);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1, "Failed to start cryptodev %u\n", info.cid);
+ goto error_exit;
+ }
+
+ info.cid = cryptodev_id;
+ info.lcore_id = worker_lcore;
+
+ if (rte_eal_remote_launch(vhost_crypto_worker, NULL, worker_lcore)
+ < 0) {
+ RTE_LOG(ERR, USER1, "Failed to start worker lcore");
+ goto error_exit;
+ }
+
+ for (i = 0; i < options.nb_sockets; i++) {
+ if (rte_vhost_driver_register(options.socket_files[i],
+ RTE_VHOST_USER_DEQUEUE_ZERO_COPY) < 0) {
+ RTE_LOG(ERR, USER1, "socket %s already exists\n",
+ options.socket_files[i]);
+ goto error_exit;
+ }
+
+ rte_vhost_driver_callback_register(options.socket_files[i],
+ &virtio_crypto_device_ops);
+
+ if (rte_vhost_driver_start(options.socket_files[i]) < 0) {
+ RTE_LOG(ERR, USER1, "failed to start vhost driver.\n");
+ goto error_exit;
+ }
+ }
+
+ RTE_LCORE_FOREACH(worker_lcore)
+ rte_eal_wait_lcore(worker_lcore);
+
+ rte_mempool_free(info.sess_pool);
+ rte_mempool_free(info.cop_pool);
+
+ return 0;
+
+error_exit:
+ for (i = 0; i < options.nb_sockets; i++)
+ unregister_drivers(i);
+
+ rte_mempool_free(info.cop_pool);
+ rte_mempool_free(info.sess_pool);
+
+ return -1;
+}
diff --git a/examples/vhost_crypto/meson.build b/examples/vhost_crypto/meson.build
new file mode 100644
index 00000000..0f4876f0
--- /dev/null
+++ b/examples/vhost_crypto/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+allow_experimental_apis = true
+deps += ['vhost', 'cryptodev']
+cflags += ['-D_GNU_SOURCE','-D_FILE_OFFSET_BITS=64']
+sources = files(
+ 'main.c'
+)
diff --git a/examples/vhost_scsi/Makefile b/examples/vhost_scsi/Makefile
index 31bd2563..fa0cf727 100644
--- a/examples/vhost_scsi/Makefile
+++ b/examples/vhost_scsi/Makefile
@@ -51,7 +51,7 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+ifneq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
$(info This application can only operate in a linuxapp environment, \
please change the definition of the RTE_TARGET environment variable)
all:
diff --git a/examples/vhost_scsi/meson.build b/examples/vhost_scsi/meson.build
index bd78e84b..5f92370f 100644
--- a/examples/vhost_scsi/meson.build
+++ b/examples/vhost_scsi/meson.build
@@ -6,6 +6,9 @@
# To build this example as a standalone application with an already-installed
# DPDK instance, use 'make'
+if host_machine.system() != 'linux'
+ build = false
+endif
deps += 'vhost'
cflags += ['-D_GNU_SOURCE','-D_FILE_OFFSET_BITS=64']
sources = files(
diff --git a/examples/vhost_scsi/scsi.c b/examples/vhost_scsi/scsi.c
index 2a034bb9..0c2fa3e6 100644
--- a/examples/vhost_scsi/scsi.c
+++ b/examples/vhost_scsi/scsi.c
@@ -20,6 +20,7 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_byteorder.h>
+#include <rte_string_fns.h>
#include "vhost_scsi.h"
#include "scsi_spec.h"
@@ -181,7 +182,8 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
break;
case SPC_VPD_UNIT_SERIAL_NUMBER:
hlen = 4;
- strncpy((char *)vpage->params, bdev->name, 32);
+ strlcpy((char *)vpage->params, bdev->name,
+ sizeof(vpage->params));
vpage->alloc_len = rte_cpu_to_be_16(32);
break;
case SPC_VPD_DEVICE_IDENTIFICATION:
@@ -215,10 +217,10 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
desig->piv = 1;
desig->reserved1 = 0;
desig->len = 8 + 16 + 32;
- strncpy((char *)desig->desig, "INTEL", 8);
+ strlcpy((char *)desig->desig, "INTEL", 8);
vhost_strcpy_pad((char *)&desig->desig[8],
bdev->product_name, 16, ' ');
- strncpy((char *)&desig->desig[24], bdev->name, 32);
+ strlcpy((char *)&desig->desig[24], bdev->name, 32);
len += sizeof(struct scsi_desig_desc) + 8 + 16 + 32;
buf += sizeof(struct scsi_desig_desc) + desig->len;
@@ -275,7 +277,8 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
inqdata->flags3 = 0x2;
/* T10 VENDOR IDENTIFICATION */
- strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
+ strlcpy((char *)inqdata->t10_vendor_id, "INTEL",
+ sizeof(inqdata->t10_vendor_id));
/* PRODUCT IDENTIFICATION */
snprintf((char *)inqdata->product_id,
@@ -283,7 +286,8 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
bdev->product_name);
/* PRODUCT REVISION LEVEL */
- strncpy((char *)inqdata->product_rev, "0001", 4);
+ strlcpy((char *)inqdata->product_rev, "0001",
+ sizeof(inqdata->product_rev));
/* Standard inquiry data ends here. Only populate
* remaining fields if alloc_len indicates enough
diff --git a/examples/vhost_scsi/scsi_spec.h b/examples/vhost_scsi/scsi_spec.h
index 5c7a894b..27be0268 100644
--- a/examples/vhost_scsi/scsi_spec.h
+++ b/examples/vhost_scsi/scsi_spec.h
@@ -367,7 +367,7 @@ struct scsi_vpd_page {
uint8_t peripheral;
uint8_t page_code;
uint16_t alloc_len;
- uint8_t params[];
+ uint8_t params[32];
};
#define SCSI_VEXT_REF_CHK 0x01
diff --git a/examples/vhost_scsi/vhost_scsi.c b/examples/vhost_scsi/vhost_scsi.c
index 3cb4383e..2908ff68 100644
--- a/examples/vhost_scsi/vhost_scsi.c
+++ b/examples/vhost_scsi/vhost_scsi.c
@@ -38,7 +38,7 @@ vhost_scsi_ctrlr_find(__rte_unused const char *ctrlr_name)
return g_vhost_ctrlr;
}
-static uint64_t gpa_to_vva(int vid, uint64_t gpa)
+static uint64_t gpa_to_vva(int vid, uint64_t gpa, uint64_t *len)
{
char path[PATH_MAX];
struct vhost_scsi_ctrlr *ctrlr;
@@ -58,7 +58,7 @@ static uint64_t gpa_to_vva(int vid, uint64_t gpa)
assert(ctrlr->mem != NULL);
- return rte_vhost_gpa_to_vva(ctrlr->mem, gpa);
+ return rte_vhost_va_from_guest_pa(ctrlr->mem, gpa, len);
}
static struct vring_desc *
@@ -108,15 +108,29 @@ static void
vhost_process_read_payload_chain(struct vhost_scsi_task *task)
{
void *data;
+ uint64_t chunck_len;
task->iovs_cnt = 0;
+ chunck_len = task->desc->len;
task->resp = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
- task->desc->addr);
+ task->desc->addr,
+ &chunck_len);
+ if (!task->resp || chunck_len != task->desc->len) {
+ fprintf(stderr, "failed to translate desc address.\n");
+ return;
+ }
while (descriptor_has_next(task->desc)) {
task->desc = descriptor_get_next(task->vq->desc, task->desc);
+ chunck_len = task->desc->len;
data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
- task->desc->addr);
+ task->desc->addr,
+ &chunck_len);
+ if (!data || chunck_len != task->desc->len) {
+ fprintf(stderr, "failed to translate desc address.\n");
+ return;
+ }
+
task->iovs[task->iovs_cnt].iov_base = data;
task->iovs[task->iovs_cnt].iov_len = task->desc->len;
task->data_len += task->desc->len;
@@ -128,12 +142,20 @@ static void
vhost_process_write_payload_chain(struct vhost_scsi_task *task)
{
void *data;
+ uint64_t chunck_len;
task->iovs_cnt = 0;
do {
+ chunck_len = task->desc->len;
data = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
- task->desc->addr);
+ task->desc->addr,
+ &chunck_len);
+ if (!data || chunck_len != task->desc->len) {
+ fprintf(stderr, "failed to translate desc address.\n");
+ return;
+ }
+
task->iovs[task->iovs_cnt].iov_base = data;
task->iovs[task->iovs_cnt].iov_len = task->desc->len;
task->data_len += task->desc->len;
@@ -141,8 +163,12 @@ vhost_process_write_payload_chain(struct vhost_scsi_task *task)
task->desc = descriptor_get_next(task->vq->desc, task->desc);
} while (descriptor_has_next(task->desc));
+ chunck_len = task->desc->len;
task->resp = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
- task->desc->addr);
+ task->desc->addr,
+ &chunck_len);
+ if (!task->resp || chunck_len != task->desc->len)
+ fprintf(stderr, "failed to translate desc address.\n");
}
static struct vhost_block_dev *
@@ -188,6 +214,7 @@ process_requestq(struct vhost_scsi_ctrlr *ctrlr, uint32_t q_idx)
int req_idx;
uint16_t last_idx;
struct vhost_scsi_task *task;
+ uint64_t chunck_len;
last_idx = scsi_vq->last_used_idx & (vq->size - 1);
req_idx = vq->avail->ring[last_idx];
@@ -205,16 +232,27 @@ process_requestq(struct vhost_scsi_ctrlr *ctrlr, uint32_t q_idx)
assert((task->desc->flags & VRING_DESC_F_INDIRECT) == 0);
scsi_vq->last_used_idx++;
+ chunck_len = task->desc->len;
task->req = (void *)(uintptr_t)gpa_to_vva(task->bdev->vid,
- task->desc->addr);
+ task->desc->addr,
+ &chunck_len);
+ if (!task->req || chunck_len != task->desc->len) {
+ fprintf(stderr, "failed to translate desc address.\n");
+ return;
+ }
task->desc = descriptor_get_next(task->vq->desc, task->desc);
if (!descriptor_has_next(task->desc)) {
task->dxfer_dir = SCSI_DIR_NONE;
+ chunck_len = task->desc->len;
task->resp = (void *)(uintptr_t)
gpa_to_vva(task->bdev->vid,
- task->desc->addr);
-
+ task->desc->addr,
+ &chunck_len);
+ if (!task->resp || chunck_len != task->desc->len) {
+ fprintf(stderr, "failed to translate desc address.\n");
+ return;
+ }
} else if (!descriptor_is_wr(task->desc)) {
task->dxfer_dir = SCSI_DIR_TO_DEV;
vhost_process_write_payload_chain(task);
diff --git a/examples/vm_power_manager/Makefile b/examples/vm_power_manager/Makefile
index ef2a9f95..13a5205b 100644
--- a/examples/vm_power_manager/Makefile
+++ b/examples/vm_power_manager/Makefile
@@ -19,7 +19,12 @@ APP = vm_power_mgr
# all source are stored in SRCS-y
SRCS-y := main.c vm_power_cli.c power_manager.c channel_manager.c
-SRCS-y += channel_monitor.c
+SRCS-y += channel_monitor.c parse.c
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+SRCS-y += oob_monitor_x86.c
+else
+SRCS-y += oob_monitor_nop.c
+endif
CFLAGS += -O3 -I$(RTE_SDK)/lib/librte_power/
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index 1c7b6eb2..7fa47ba9 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -27,6 +27,7 @@
#include "channel_commands.h"
#include "channel_manager.h"
#include "power_manager.h"
+#include "oob_monitor.h"
#define RTE_LOGTYPE_CHANNEL_MONITOR RTE_LOGTYPE_USER1
@@ -92,6 +93,10 @@ get_pcpu_to_control(struct policy *pol)
struct vm_info info;
int pcpu, count;
uint64_t mask_u64b;
+ struct core_info *ci;
+ int ret;
+
+ ci = get_core_info();
RTE_LOG(INFO, CHANNEL_MONITOR, "Looking for pcpu for %s\n",
pol->pkt.vm_name);
@@ -100,8 +105,22 @@ get_pcpu_to_control(struct policy *pol)
for (count = 0; count < pol->pkt.num_vcpu; count++) {
mask_u64b = info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
for (pcpu = 0; mask_u64b; mask_u64b &= ~(1ULL << pcpu++)) {
- if ((mask_u64b >> pcpu) & 1)
- pol->core_share[count].pcpu = pcpu;
+ if ((mask_u64b >> pcpu) & 1) {
+ if (pol->pkt.policy_to_use == BRANCH_RATIO) {
+ ci->cd[pcpu].oob_enabled = 1;
+ ret = add_core_to_monitor(pcpu);
+ if (ret == 0)
+ printf("Monitoring pcpu %d via Branch Ratio\n",
+ pcpu);
+ else
+ printf("Failed to start OOB Monitoring pcpu %d\n",
+ pcpu);
+
+ } else {
+ pol->core_share[count].pcpu = pcpu;
+ printf("Monitoring pcpu %d\n", pcpu);
+ }
+ }
}
}
}
@@ -110,12 +129,11 @@ static int
get_pfid(struct policy *pol)
{
- int i, x, ret = 0, nb_ports;
+ int i, x, ret = 0;
- nb_ports = rte_eth_dev_count();
for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
- for (x = 0; x < nb_ports; x++) {
+ RTE_ETH_FOREACH_DEV(x) {
ret = rte_pmd_i40e_query_vfid_by_mac(x,
(struct ether_addr *)&(pol->pkt.vfid[i]));
if (ret != -EINVAL) {
diff --git a/examples/vm_power_manager/guest_cli/Makefile b/examples/vm_power_manager/guest_cli/Makefile
index d710e22d..8b1db861 100644
--- a/examples/vm_power_manager/guest_cli/Makefile
+++ b/examples/vm_power_manager/guest_cli/Makefile
@@ -14,7 +14,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
APP = guest_vm_power_mgr
# all source are stored in SRCS-y
-SRCS-y := main.c vm_power_cli_guest.c
+SRCS-y := main.c vm_power_cli_guest.c parse.c
CFLAGS += -O3 -I$(RTE_SDK)/lib/librte_power/
CFLAGS += $(WERROR_FLAGS)
diff --git a/examples/vm_power_manager/guest_cli/main.c b/examples/vm_power_manager/guest_cli/main.c
index b17936d6..36365b12 100644
--- a/examples/vm_power_manager/guest_cli/main.c
+++ b/examples/vm_power_manager/guest_cli/main.c
@@ -2,23 +2,20 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
-/*
#include <stdio.h>
-#include <string.h>
-#include <stdint.h>
-#include <sys/epoll.h>
-#include <fcntl.h>
-#include <unistd.h>
#include <stdlib.h>
-#include <errno.h>
-*/
#include <signal.h>
+#include <getopt.h>
+#include <string.h>
#include <rte_lcore.h>
#include <rte_power.h>
#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_log.h>
#include "vm_power_cli_guest.h"
+#include "parse.h"
static void
sig_handler(int signo)
@@ -32,6 +29,136 @@ sig_handler(int signo)
}
+#define MAX_HOURS 24
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ const struct option lgopts[] = {
+ { "vm-name", required_argument, 0, 'n'},
+ { "busy-hours", required_argument, 0, 'b'},
+ { "quiet-hours", required_argument, 0, 'q'},
+ { "port-list", required_argument, 0, 'p'},
+ { "vcpu-list", required_argument, 0, 'l'},
+ { "policy", required_argument, 0, 'o'},
+ {NULL, 0, 0, 0}
+ };
+ struct channel_packet *policy;
+ unsigned short int hours[MAX_HOURS];
+ unsigned short int cores[MAX_VCPU_PER_VM];
+ unsigned short int ports[MAX_VCPU_PER_VM];
+ int i, cnt, idx;
+
+ policy = get_policy();
+ set_policy_defaults(policy);
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "n:b:q:p:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'n':
+ strcpy(policy->vm_name, optarg);
+ printf("Setting VM Name to [%s]\n", policy->vm_name);
+ break;
+ case 'b':
+ case 'q':
+ //printf("***Processing set using [%s]\n", optarg);
+ cnt = parse_set(optarg, hours, MAX_HOURS);
+ if (cnt < 0) {
+ printf("Invalid value passed to quiet/busy hours - [%s]\n",
+ optarg);
+ break;
+ }
+ idx = 0;
+ for (i = 0; i < MAX_HOURS; i++) {
+ if (hours[i]) {
+ if (opt == 'b') {
+ printf("***Busy Hour %d\n", i);
+ policy->timer_policy.busy_hours
+ [idx++] = i;
+ } else {
+ printf("***Quiet Hour %d\n", i);
+ policy->timer_policy.quiet_hours
+ [idx++] = i;
+ }
+ }
+ }
+ break;
+ case 'l':
+ cnt = parse_set(optarg, cores, MAX_VCPU_PER_VM);
+ if (cnt < 0) {
+ printf("Invalid value passed to vcpu-list - [%s]\n",
+ optarg);
+ break;
+ }
+ idx = 0;
+ for (i = 0; i < MAX_VCPU_PER_VM; i++) {
+ if (cores[i]) {
+ printf("***Using core %d\n", i);
+ policy->vcpu_to_control[idx++] = i;
+ }
+ }
+ policy->num_vcpu = idx;
+ printf("Total cores: %d\n", idx);
+ break;
+ case 'p':
+ cnt = parse_set(optarg, ports, MAX_VCPU_PER_VM);
+ if (cnt < 0) {
+ printf("Invalid value passed to port-list - [%s]\n",
+ optarg);
+ break;
+ }
+ idx = 0;
+ for (i = 0; i < MAX_VCPU_PER_VM; i++) {
+ if (ports[i]) {
+ printf("***Using port %d\n", i);
+ set_policy_mac(i, idx++);
+ }
+ }
+ policy->nb_mac_to_monitor = idx;
+ printf("Total Ports: %d\n", idx);
+ break;
+ case 'o':
+ if (!strcmp(optarg, "TRAFFIC"))
+ policy->policy_to_use = TRAFFIC;
+ else if (!strcmp(optarg, "TIME"))
+ policy->policy_to_use = TIME;
+ else if (!strcmp(optarg, "WORKLOAD"))
+ policy->policy_to_use = WORKLOAD;
+ else if (!strcmp(optarg, "BRANCH_RATIO"))
+ policy->policy_to_use = BRANCH_RATIO;
+ else {
+ printf("Invalid policy specified: %s\n",
+ optarg);
+ return -1;
+ }
+ break;
+ /* long options */
+
+ case 0:
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
int
main(int argc, char **argv)
{
@@ -45,6 +172,14 @@ main(int argc, char **argv)
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid arguments\n");
+
rte_power_set_env(PM_ENV_KVM_VM);
RTE_LCORE_FOREACH(lcore_id) {
rte_power_init(lcore_id);
diff --git a/examples/vm_power_manager/guest_cli/parse.c b/examples/vm_power_manager/guest_cli/parse.c
new file mode 100644
index 00000000..528df6d6
--- /dev/null
+++ b/examples/vm_power_manager/guest_cli/parse.c
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <rte_log.h>
+#include "parse.h"
+
+/*
+ * Parse elem, the elem could be single number/range or group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) e.g 0,2-4,6
+ * Within group, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+int
+parse_set(const char *input, uint16_t set[], unsigned int num)
+{
+ unsigned int idx;
+ const char *str = input;
+ char *end = NULL;
+ unsigned int min, max;
+
+ memset(set, 0, num * sizeof(uint16_t));
+
+ while (isblank(*str))
+ str++;
+
+ /* only digit or left bracket is qualify for start point */
+ if (!isdigit(*str) || *str == '\0')
+ return -1;
+
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = num;
+ do {
+
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+
+ /* go ahead to separator '-' and ',' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == num)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+
+ if (min == num)
+ min = idx;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++) {
+ set[idx] = 1;
+ }
+ min = num;
+ } else
+ return -1;
+
+ str = end + 1;
+ } while (*end != '\0');
+
+ return str - input;
+}
diff --git a/examples/vm_power_manager/guest_cli/parse.h b/examples/vm_power_manager/guest_cli/parse.h
new file mode 100644
index 00000000..c8aa0ea5
--- /dev/null
+++ b/examples/vm_power_manager/guest_cli/parse.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef PARSE_H_
+#define PARSE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int
+parse_set(const char *, uint16_t [], unsigned int);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PARSE_H_ */
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 43bdeace..0db1b804 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -33,6 +33,71 @@ struct cmd_quit_result {
cmdline_fixed_string_t quit;
};
+union PFID {
+ struct ether_addr addr;
+ uint64_t pfid;
+};
+
+static struct channel_packet policy;
+
+struct channel_packet *
+get_policy(void)
+{
+ return &policy;
+}
+
+int
+set_policy_mac(int port, int idx)
+{
+ struct channel_packet *policy;
+ union PFID pfid;
+
+ /* Use port MAC address as the vfid */
+ rte_eth_macaddr_get(port, &pfid.addr);
+
+ printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
+ "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ port,
+ pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
+ pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
+ pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
+ policy = get_policy();
+ policy->vfid[idx] = pfid.pfid;
+ return 0;
+}
+
+void
+set_policy_defaults(struct channel_packet *pkt)
+{
+ set_policy_mac(0, 0);
+ pkt->nb_mac_to_monitor = 1;
+
+ pkt->t_boost_status.tbEnabled = false;
+
+ pkt->vcpu_to_control[0] = 0;
+ pkt->vcpu_to_control[1] = 1;
+ pkt->num_vcpu = 2;
+ /* Dummy Population. */
+ pkt->traffic_policy.min_packet_thresh = 96000;
+ pkt->traffic_policy.avg_max_packet_thresh = 1800000;
+ pkt->traffic_policy.max_max_packet_thresh = 2000000;
+
+ pkt->timer_policy.busy_hours[0] = 3;
+ pkt->timer_policy.busy_hours[1] = 4;
+ pkt->timer_policy.busy_hours[2] = 5;
+ pkt->timer_policy.quiet_hours[0] = 11;
+ pkt->timer_policy.quiet_hours[1] = 12;
+ pkt->timer_policy.quiet_hours[2] = 13;
+
+ pkt->timer_policy.hours_to_use_traffic_profile[0] = 8;
+ pkt->timer_policy.hours_to_use_traffic_profile[1] = 10;
+
+ pkt->workload = LOW;
+ pkt->policy_to_use = TIME;
+ pkt->command = PKT_POLICY;
+ strcpy(pkt->vm_name, "ubuntu2");
+}
+
static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
__attribute__((unused)) struct cmdline *cl,
__attribute__((unused)) void *data)
@@ -118,54 +183,12 @@ struct cmd_send_policy_result {
cmdline_fixed_string_t cmd;
};
-union PFID {
- struct ether_addr addr;
- uint64_t pfid;
-};
-
static inline int
-send_policy(void)
+send_policy(struct channel_packet *pkt)
{
- struct channel_packet pkt;
int ret;
- union PFID pfid;
- /* Use port MAC address as the vfid */
- rte_eth_macaddr_get(0, &pfid.addr);
- printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
- "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
- 1,
- pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
- pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
- pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
- pkt.vfid[0] = pfid.pfid;
-
- pkt.nb_mac_to_monitor = 1;
- pkt.t_boost_status.tbEnabled = false;
-
- pkt.vcpu_to_control[0] = 0;
- pkt.vcpu_to_control[1] = 1;
- pkt.num_vcpu = 2;
- /* Dummy Population. */
- pkt.traffic_policy.min_packet_thresh = 96000;
- pkt.traffic_policy.avg_max_packet_thresh = 1800000;
- pkt.traffic_policy.max_max_packet_thresh = 2000000;
-
- pkt.timer_policy.busy_hours[0] = 3;
- pkt.timer_policy.busy_hours[1] = 4;
- pkt.timer_policy.busy_hours[2] = 5;
- pkt.timer_policy.quiet_hours[0] = 11;
- pkt.timer_policy.quiet_hours[1] = 12;
- pkt.timer_policy.quiet_hours[2] = 13;
-
- pkt.timer_policy.hours_to_use_traffic_profile[0] = 8;
- pkt.timer_policy.hours_to_use_traffic_profile[1] = 10;
-
- pkt.workload = LOW;
- pkt.policy_to_use = TIME;
- pkt.command = PKT_POLICY;
- strcpy(pkt.vm_name, "ubuntu2");
- ret = rte_power_guest_channel_send_msg(&pkt, 1);
+ ret = rte_power_guest_channel_send_msg(pkt, 1);
if (ret == 0)
return 1;
RTE_LOG(DEBUG, POWER, "Error sending message: %s\n",
@@ -182,7 +205,7 @@ cmd_send_policy_parsed(void *parsed_result, struct cmdline *cl,
if (!strcmp(res->cmd, "now")) {
printf("Sending Policy down now!\n");
- ret = send_policy();
+ ret = send_policy(&policy);
}
if (ret != 1)
cmdline_printf(cl, "Error sending message: %s\n",
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
index 75a26296..fd77f6a6 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
@@ -11,6 +11,12 @@ extern "C" {
#include "channel_commands.h"
+struct channel_packet *get_policy(void);
+
+int set_policy_mac(int port, int idx);
+
+void set_policy_defaults(struct channel_packet *pkt);
+
void run_cli(__attribute__((unused)) void *arg);
#ifdef __cplusplus
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index 8a1e95bd..58c5fa45 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -29,6 +29,8 @@
#include "channel_monitor.h"
#include "power_manager.h"
#include "vm_power_cli.h"
+#include "oob_monitor.h"
+#include "parse.h"
#include <rte_pmd_ixgbe.h>
#include <rte_pmd_i40e.h>
#include <rte_pmd_bnxt.h>
@@ -47,7 +49,6 @@ static volatile bool force_quit;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
@@ -61,7 +62,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf txq_conf;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
rte_eth_dev_info_get(port, &dev_info);
@@ -83,7 +84,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
@@ -135,18 +135,25 @@ parse_portmask(const char *portmask)
static int
parse_args(int argc, char **argv)
{
- int opt, ret;
+ int opt, ret, cnt, i;
char **argvopt;
+ uint16_t *oob_enable;
int option_index;
char *prgname = argv[0];
+ struct core_info *ci;
+ float branch_ratio;
static struct option lgopts[] = {
{ "mac-updating", no_argument, 0, 1},
{ "no-mac-updating", no_argument, 0, 0},
+ { "core-list", optional_argument, 0, 'l'},
+ { "port-list", optional_argument, 0, 'p'},
+ { "branch-ratio", optional_argument, 0, 'b'},
{NULL, 0, 0, 0}
};
argvopt = argv;
+ ci = get_core_info();
- while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ while ((opt = getopt_long(argc, argvopt, "l:p:q:T:b:",
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -158,6 +165,39 @@ parse_args(int argc, char **argv)
return -1;
}
break;
+ case 'l':
+ oob_enable = malloc(ci->core_count * sizeof(uint16_t));
+ if (oob_enable == NULL) {
+ printf("Error - Unable to allocate memory\n");
+ return -1;
+ }
+ cnt = parse_set(optarg, oob_enable, ci->core_count);
+ if (cnt < 0) {
+ printf("Invalid core-list - [%s]\n",
+ optarg);
+ break;
+ }
+ for (i = 0; i < ci->core_count; i++) {
+ if (oob_enable[i]) {
+ printf("***Using core %d\n", i);
+ ci->cd[i].oob_enabled = 1;
+ ci->cd[i].global_enabled_cpus = 1;
+ }
+ }
+ free(oob_enable);
+ break;
+ case 'b':
+ branch_ratio = 0.0;
+ if (strlen(optarg))
+ branch_ratio = atof(optarg);
+ if (branch_ratio <= 0.0) {
+ printf("invalid branch ratio specified\n");
+ return -1;
+ }
+ ci->branch_ratio_threshold = branch_ratio;
+ printf("***Setting branch ratio to %f\n",
+ branch_ratio);
+ break;
/* long options */
case 0:
break;
@@ -176,7 +216,7 @@ parse_args(int argc, char **argv)
}
static void
-check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
@@ -189,7 +229,7 @@ check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
if (force_quit)
return;
all_ports_up = 1;
- for (portid = 0; portid < port_num; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (force_quit)
return;
if ((port_mask & (1 << portid)) == 0)
@@ -243,6 +283,17 @@ run_monitor(__attribute__((unused)) void *arg)
return 0;
}
+static int
+run_core_monitor(__attribute__((unused)) void *arg)
+{
+ if (branch_monitor_init() < 0) {
+ printf("Unable to initialize core monitor\n");
+ return -1;
+ }
+ run_branch_monitor();
+ return 0;
+}
+
static void
sig_handler(int signo)
{
@@ -261,7 +312,14 @@ main(int argc, char **argv)
unsigned int nb_ports;
struct rte_mempool *mbuf_pool;
uint16_t portid;
+ struct core_info *ci;
+
+
+ ret = core_info_init();
+ if (ret < 0)
+ rte_panic("Cannot allocate core info\n");
+ ci = get_core_info();
ret = rte_eal_init(argc, argv);
if (ret < 0)
@@ -278,53 +336,58 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid arguments\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
- MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (nb_ports > 0) {
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
+ NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
- /* Initialize ports. */
- for (portid = 0; portid < nb_ports; portid++) {
- struct ether_addr eth;
- int w, j;
- int ret;
+ /* Initialize ports. */
+ RTE_ETH_FOREACH_DEV(portid) {
+ struct ether_addr eth;
+ int w, j;
+ int ret;
- if ((enabled_port_mask & (1 << portid)) == 0)
- continue;
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
- eth.addr_bytes[0] = 0xe0;
- eth.addr_bytes[1] = 0xe0;
- eth.addr_bytes[2] = 0xe0;
- eth.addr_bytes[3] = 0xe0;
- eth.addr_bytes[4] = portid + 0xf0;
+ eth.addr_bytes[0] = 0xe0;
+ eth.addr_bytes[1] = 0xe0;
+ eth.addr_bytes[2] = 0xe0;
+ eth.addr_bytes[3] = 0xe0;
+ eth.addr_bytes[4] = portid + 0xf0;
- if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot init port %"PRIu8 "\n",
portid);
- for (w = 0; w < MAX_VFS; w++) {
- eth.addr_bytes[5] = w + 0xf0;
-
- ret = rte_pmd_ixgbe_set_vf_mac_addr(portid,
- w, &eth);
- if (ret == -ENOTSUP)
- ret = rte_pmd_i40e_set_vf_mac_addr(portid,
- w, &eth);
- if (ret == -ENOTSUP)
- ret = rte_pmd_bnxt_set_vf_mac_addr(portid,
- w, &eth);
-
- switch (ret) {
- case 0:
- printf("Port %d VF %d MAC: ",
- portid, w);
- for (j = 0; j < 6; j++) {
- printf("%02x", eth.addr_bytes[j]);
- if (j < 5)
- printf(":");
+ for (w = 0; w < MAX_VFS; w++) {
+ eth.addr_bytes[5] = w + 0xf0;
+
+ ret = rte_pmd_ixgbe_set_vf_mac_addr(portid,
+ w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_mac_addr(
+ portid, w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_addr(
+ portid, w, &eth);
+
+ switch (ret) {
+ case 0:
+ printf("Port %d VF %d MAC: ",
+ portid, w);
+ for (j = 0; j < 5; j++) {
+ printf("%02x:",
+ eth.addr_bytes[j]);
+ }
+ printf("%02x\n", eth.addr_bytes[5]);
+ break;
}
printf("\n");
break;
@@ -332,16 +395,23 @@ main(int argc, char **argv)
}
}
+ check_all_ports_link_status(enabled_port_mask);
+
lcore_id = rte_get_next_lcore(-1, 1, 0);
if (lcore_id == RTE_MAX_LCORE) {
- RTE_LOG(ERR, EAL, "A minimum of two cores are required to run "
+ RTE_LOG(ERR, EAL, "A minimum of three cores are required to run "
"application\n");
return 0;
}
-
- check_all_ports_link_status(nb_ports, enabled_port_mask);
+ printf("Running channel monitor on lcore id %d\n", lcore_id);
rte_eal_remote_launch(run_monitor, NULL, lcore_id);
+ lcore_id = rte_get_next_lcore(lcore_id, 1, 0);
+ if (lcore_id == RTE_MAX_LCORE) {
+ RTE_LOG(ERR, EAL, "A minimum of three cores are required to run "
+ "application\n");
+ return 0;
+ }
if (power_manager_init() < 0) {
printf("Unable to initialize power manager\n");
return -1;
@@ -350,8 +420,17 @@ main(int argc, char **argv)
printf("Unable to initialize channel manager\n");
return -1;
}
+
+ printf("Running core monitor on lcore id %d\n", lcore_id);
+ rte_eal_remote_launch(run_core_monitor, NULL, lcore_id);
+
run_cli(NULL);
+ branch_monitor_exit();
+
rte_eal_mp_wait_lcore();
+
+ free(ci->cd);
+
return 0;
}
diff --git a/examples/vm_power_manager/meson.build b/examples/vm_power_manager/meson.build
new file mode 100644
index 00000000..c370d747
--- /dev/null
+++ b/examples/vm_power_manager/meson.build
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Example app currently unsupported by meson build
+build = false
diff --git a/examples/vm_power_manager/oob_monitor.h b/examples/vm_power_manager/oob_monitor.h
new file mode 100644
index 00000000..b96e08df
--- /dev/null
+++ b/examples/vm_power_manager/oob_monitor.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef OOB_MONITOR_H_
+#define OOB_MONITOR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Setup the Branch Monitor resources required to initialize epoll.
+ * Must be called first before calling other functions.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int branch_monitor_init(void);
+
+/**
+ * Run the OOB branch monitor, loops forever on on epoll_wait.
+ *
+ *
+ * @return
+ * None
+ */
+void run_branch_monitor(void);
+
+/**
+ * Exit the OOB Branch Monitor.
+ *
+ * @return
+ * None
+ */
+void branch_monitor_exit(void);
+
+/**
+ * Add a core to the list of cores to monitor.
+ *
+ * @param core
+ * Core Number
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int add_core_to_monitor(int core);
+
+/**
+ * Remove a previously added core from core list.
+ *
+ * @param core
+ * Core Number
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int remove_core_from_monitor(int core);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* OOB_MONITOR_H_ */
diff --git a/examples/vm_power_manager/oob_monitor_nop.c b/examples/vm_power_manager/oob_monitor_nop.c
new file mode 100644
index 00000000..7e7b8bc1
--- /dev/null
+++ b/examples/vm_power_manager/oob_monitor_nop.c
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include "oob_monitor.h"
+
+void branch_monitor_exit(void)
+{
+}
+
+__attribute__((unused)) static float
+apply_policy(__attribute__((unused)) int core)
+{
+ return 0.0;
+}
+
+int
+add_core_to_monitor(__attribute__((unused)) int core)
+{
+ return 0;
+}
+
+int
+remove_core_from_monitor(__attribute__((unused)) int core)
+{
+ return 0;
+}
+
+int
+branch_monitor_init(void)
+{
+ return 0;
+}
+
+void
+run_branch_monitor(void)
+{
+}
diff --git a/examples/vm_power_manager/oob_monitor_x86.c b/examples/vm_power_manager/oob_monitor_x86.c
new file mode 100644
index 00000000..589c604e
--- /dev/null
+++ b/examples/vm_power_manager/oob_monitor_x86.c
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <rte_log.h>
+
+#include "oob_monitor.h"
+#include "power_manager.h"
+#include "channel_manager.h"
+
+static volatile unsigned run_loop = 1;
+static uint64_t g_branches, g_branch_misses;
+static int g_active;
+
+void branch_monitor_exit(void)
+{
+ run_loop = 0;
+}
+
+/* Number of microseconds between each poll */
+#define INTERVAL 100
+#define PRINT_LOOP_COUNT (1000000/INTERVAL)
+#define IA32_PERFEVTSEL0 0x186
+#define IA32_PERFEVTSEL1 0x187
+#define IA32_PERFCTR0 0xc1
+#define IA32_PERFCTR1 0xc2
+#define IA32_PERFEVT_BRANCH_HITS 0x05300c4
+#define IA32_PERFEVT_BRANCH_MISS 0x05300c5
+
+static float
+apply_policy(int core)
+{
+ struct core_info *ci;
+ uint64_t counter;
+ uint64_t branches, branch_misses;
+ uint32_t last_branches, last_branch_misses;
+ int hits_diff, miss_diff;
+ float ratio;
+ int ret;
+
+ g_active = 0;
+ ci = get_core_info();
+
+ last_branches = ci->cd[core].last_branches;
+ last_branch_misses = ci->cd[core].last_branch_misses;
+
+ ret = pread(ci->cd[core].msr_fd, &counter,
+ sizeof(counter), IA32_PERFCTR0);
+ if (ret < 0)
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to read counter for core %u\n",
+ core);
+ branches = counter;
+
+ ret = pread(ci->cd[core].msr_fd, &counter,
+ sizeof(counter), IA32_PERFCTR1);
+ if (ret < 0)
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to read counter for core %u\n",
+ core);
+ branch_misses = counter;
+
+
+ ci->cd[core].last_branches = branches;
+ ci->cd[core].last_branch_misses = branch_misses;
+
+ hits_diff = (int)branches - (int)last_branches;
+ if (hits_diff <= 0) {
+ /* Likely a counter overflow condition, skip this round */
+ return -1.0;
+ }
+
+ miss_diff = (int)branch_misses - (int)last_branch_misses;
+ if (miss_diff <= 0) {
+ /* Likely a counter overflow condition, skip this round */
+ return -1.0;
+ }
+
+ g_branches = hits_diff;
+ g_branch_misses = miss_diff;
+
+ if (hits_diff < (INTERVAL*100)) {
+ /* Likely no workload running on this core. Skip. */
+ return -1.0;
+ }
+
+ ratio = (float)miss_diff * (float)100 / (float)hits_diff;
+
+ if (ratio < ci->branch_ratio_threshold)
+ power_manager_scale_core_min(core);
+ else
+ power_manager_scale_core_max(core);
+
+ g_active = 1;
+ return ratio;
+}
+
+int
+add_core_to_monitor(int core)
+{
+ struct core_info *ci;
+ char proc_file[UNIX_PATH_MAX];
+ int ret;
+
+ ci = get_core_info();
+
+ if (core < ci->core_count) {
+ long setup;
+
+ snprintf(proc_file, UNIX_PATH_MAX, "/dev/cpu/%d/msr", core);
+ ci->cd[core].msr_fd = open(proc_file, O_RDWR | O_SYNC);
+ if (ci->cd[core].msr_fd < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Error opening MSR file for core %d "
+ "(is msr kernel module loaded?)\n",
+ core);
+ return -1;
+ }
+ /*
+ * Set up branch counters
+ */
+ setup = IA32_PERFEVT_BRANCH_HITS;
+ ret = pwrite(ci->cd[core].msr_fd, &setup,
+ sizeof(setup), IA32_PERFEVTSEL0);
+ if (ret < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to set counter for core %u\n",
+ core);
+ return ret;
+ }
+ setup = IA32_PERFEVT_BRANCH_MISS;
+ ret = pwrite(ci->cd[core].msr_fd, &setup,
+ sizeof(setup), IA32_PERFEVTSEL1);
+ if (ret < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to set counter for core %u\n",
+ core);
+ return ret;
+ }
+ /*
+ * Close the file and re-open as read only so
+ * as not to hog the resource
+ */
+ close(ci->cd[core].msr_fd);
+ ci->cd[core].msr_fd = open(proc_file, O_RDONLY);
+ if (ci->cd[core].msr_fd < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Error opening MSR file for core %d "
+ "(is msr kernel module loaded?)\n",
+ core);
+ return -1;
+ }
+ ci->cd[core].oob_enabled = 1;
+ }
+ return 0;
+}
+
+int
+remove_core_from_monitor(int core)
+{
+ struct core_info *ci;
+ char proc_file[UNIX_PATH_MAX];
+ int ret;
+
+ ci = get_core_info();
+
+ if (ci->cd[core].oob_enabled) {
+ long setup;
+
+ /*
+ * close the msr file, then reopen rw so we can
+ * disable the counters
+ */
+ if (ci->cd[core].msr_fd != 0)
+ close(ci->cd[core].msr_fd);
+ snprintf(proc_file, UNIX_PATH_MAX, "/dev/cpu/%d/msr", core);
+ ci->cd[core].msr_fd = open(proc_file, O_RDWR | O_SYNC);
+ if (ci->cd[core].msr_fd < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Error opening MSR file for core %d "
+ "(is msr kernel module loaded?)\n",
+ core);
+ return -1;
+ }
+ setup = 0x0; /* clear event */
+ ret = pwrite(ci->cd[core].msr_fd, &setup,
+ sizeof(setup), IA32_PERFEVTSEL0);
+ if (ret < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to set counter for core %u\n",
+ core);
+ return ret;
+ }
+ setup = 0x0; /* clear event */
+ ret = pwrite(ci->cd[core].msr_fd, &setup,
+ sizeof(setup), IA32_PERFEVTSEL1);
+ if (ret < 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "unable to set counter for core %u\n",
+ core);
+ return ret;
+ }
+
+ close(ci->cd[core].msr_fd);
+ ci->cd[core].msr_fd = 0;
+ ci->cd[core].oob_enabled = 0;
+ }
+ return 0;
+}
+
+int
+branch_monitor_init(void)
+{
+ return 0;
+}
+
+void
+run_branch_monitor(void)
+{
+ struct core_info *ci;
+ int print = 0;
+ float ratio;
+ int printed;
+ int reads = 0;
+
+ ci = get_core_info();
+
+ while (run_loop) {
+
+ if (!run_loop)
+ break;
+ usleep(INTERVAL);
+ int j;
+ print++;
+ printed = 0;
+ for (j = 0; j < ci->core_count; j++) {
+ if (ci->cd[j].oob_enabled) {
+ ratio = apply_policy(j);
+ if ((print > PRINT_LOOP_COUNT) && (g_active)) {
+ printf(" %d: %.4f {%lu} {%d}", j,
+ ratio, g_branches,
+ reads);
+ printed = 1;
+ reads = 0;
+ } else {
+ reads++;
+ }
+ }
+ }
+ if (print > PRINT_LOOP_COUNT) {
+ if (printed)
+ printf("\n");
+ print = 0;
+ }
+ }
+}
diff --git a/examples/vm_power_manager/parse.c b/examples/vm_power_manager/parse.c
new file mode 100644
index 00000000..8231533b
--- /dev/null
+++ b/examples/vm_power_manager/parse.c
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright(c) 2014 6WIND S.A.
+ */
+
+#include <string.h>
+#include <rte_log.h>
+#include "parse.h"
+
+/*
+ * Parse elem, the elem could be single number/range or group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) e.g 0,2-4,6
+ * Within group, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+int
+parse_set(const char *input, uint16_t set[], unsigned int num)
+{
+ unsigned int idx;
+ const char *str = input;
+ char *end = NULL;
+ unsigned int min, max;
+
+ memset(set, 0, num * sizeof(uint16_t));
+
+ while (isblank(*str))
+ str++;
+
+ /* only digit or left bracket is qualify for start point */
+ if (!isdigit(*str) || *str == '\0')
+ return -1;
+
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = num;
+ do {
+
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+
+ /* go ahead to separator '-' and ',' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == num)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+
+ if (min == num)
+ min = idx;
+
+ for (idx = RTE_MIN(min, max);
+ idx <= RTE_MAX(min, max); idx++) {
+ set[idx] = 1;
+ }
+ min = num;
+ } else
+ return -1;
+
+ str = end + 1;
+ } while (*end != '\0');
+
+ return str - input;
+}
diff --git a/examples/vm_power_manager/parse.h b/examples/vm_power_manager/parse.h
new file mode 100644
index 00000000..a5971e9a
--- /dev/null
+++ b/examples/vm_power_manager/parse.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef PARSE_H_
+#define PARSE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int
+parse_set(const char *, uint16_t [], unsigned int);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* PARSE_H_ */
diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index 35db2559..b7769c3c 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -12,20 +12,21 @@
#include <dirent.h>
#include <errno.h>
+#include <sys/sysinfo.h>
#include <sys/types.h>
#include <rte_log.h>
#include <rte_power.h>
#include <rte_spinlock.h>
+#include "channel_manager.h"
#include "power_manager.h"
-
-#define RTE_LOGTYPE_POWER_MANAGER RTE_LOGTYPE_USER1
+#include "oob_monitor.h"
#define POWER_SCALE_CORE(DIRECTION, core_num , ret) do { \
- if (core_num >= POWER_MGR_MAX_CPUS) \
+ if (core_num >= ci.core_count) \
return -1; \
- if (!(global_enabled_cpus & (1ULL << core_num))) \
+ if (!(ci.cd[core_num].global_enabled_cpus)) \
return -1; \
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl); \
ret = rte_power_freq_##DIRECTION(core_num); \
@@ -36,7 +37,7 @@
int i; \
for (i = 0; core_mask; core_mask &= ~(1 << i++)) { \
if ((core_mask >> i) & 1) { \
- if (!(global_enabled_cpus & (1ULL << i))) \
+ if (!(ci.cd[i].global_enabled_cpus)) \
continue; \
rte_spinlock_lock(&global_core_freq_info[i].power_sl); \
if (rte_power_freq_##DIRECTION(i) != 1) \
@@ -54,63 +55,82 @@ struct freq_info {
static struct freq_info global_core_freq_info[POWER_MGR_MAX_CPUS];
-static uint64_t global_enabled_cpus;
+struct core_info ci;
#define SYSFS_CPU_PATH "/sys/devices/system/cpu/cpu%u/topology/core_id"
-static unsigned
-set_host_cpus_mask(void)
+struct core_info *
+get_core_info(void)
{
- char path[PATH_MAX];
- unsigned i;
- unsigned num_cpus = 0;
-
- for (i = 0; i < POWER_MGR_MAX_CPUS; i++) {
- snprintf(path, sizeof(path), SYSFS_CPU_PATH, i);
- if (access(path, F_OK) == 0) {
- global_enabled_cpus |= 1ULL << i;
- num_cpus++;
- } else
- return num_cpus;
+ return &ci;
+}
+
+int
+core_info_init(void)
+{
+ struct core_info *ci;
+ int i;
+
+ ci = get_core_info();
+
+ ci->core_count = get_nprocs_conf();
+ ci->branch_ratio_threshold = BRANCH_RATIO_THRESHOLD;
+ ci->cd = malloc(ci->core_count * sizeof(struct core_details));
+ if (!ci->cd) {
+ RTE_LOG(ERR, POWER_MANAGER, "Failed to allocate memory for core info.");
+ return -1;
}
- return num_cpus;
+ for (i = 0; i < ci->core_count; i++) {
+ ci->cd[i].global_enabled_cpus = 1;
+ ci->cd[i].oob_enabled = 0;
+ ci->cd[i].msr_fd = 0;
+ }
+ printf("%d cores in system\n", ci->core_count);
+ return 0;
}
int
power_manager_init(void)
{
- unsigned int i, num_cpus, num_freqs;
- uint64_t cpu_mask;
+ unsigned int i, num_cpus = 0, num_freqs = 0;
int ret = 0;
+ struct core_info *ci;
+
+ rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
- num_cpus = set_host_cpus_mask();
- if (num_cpus == 0) {
- RTE_LOG(ERR, POWER_MANAGER, "Unable to detected host CPUs, please "
- "ensure that sufficient privileges exist to inspect sysfs\n");
+ ci = get_core_info();
+ if (!ci) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Failed to get core info!\n");
return -1;
}
- rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
- cpu_mask = global_enabled_cpus;
- for (i = 0; cpu_mask; cpu_mask &= ~(1 << i++)) {
- if (rte_power_init(i) < 0)
- RTE_LOG(ERR, POWER_MANAGER,
- "Unable to initialize power manager "
- "for core %u\n", i);
- num_freqs = rte_power_freqs(i, global_core_freq_info[i].freqs,
+
+ for (i = 0; i < ci->core_count; i++) {
+ if (ci->cd[i].global_enabled_cpus) {
+ if (rte_power_init(i) < 0)
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to initialize power manager "
+ "for core %u\n", i);
+ num_cpus++;
+ num_freqs = rte_power_freqs(i,
+ global_core_freq_info[i].freqs,
RTE_MAX_LCORE_FREQS);
- if (num_freqs == 0) {
- RTE_LOG(ERR, POWER_MANAGER,
- "Unable to get frequency list for core %u\n",
- i);
- global_enabled_cpus &= ~(1 << i);
- num_cpus--;
- ret = -1;
+ if (num_freqs == 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to get frequency list for core %u\n",
+ i);
+ ci->cd[i].oob_enabled = 0;
+ ret = -1;
+ }
+ global_core_freq_info[i].num_freqs = num_freqs;
+
+ rte_spinlock_init(&global_core_freq_info[i].power_sl);
}
- global_core_freq_info[i].num_freqs = num_freqs;
- rte_spinlock_init(&global_core_freq_info[i].power_sl);
+ if (ci->cd[i].oob_enabled)
+ add_core_to_monitor(i);
}
- RTE_LOG(INFO, POWER_MANAGER, "Detected %u host CPUs , enabled core mask:"
- " 0x%"PRIx64"\n", num_cpus, global_enabled_cpus);
+ RTE_LOG(INFO, POWER_MANAGER, "Managing %u cores out of %u available host cores\n",
+ num_cpus, ci->core_count);
return ret;
}
@@ -125,7 +145,7 @@ power_manager_get_current_frequency(unsigned core_num)
core_num, POWER_MGR_MAX_CPUS-1);
return -1;
}
- if (!(global_enabled_cpus & (1ULL << core_num)))
+ if (!(ci.cd[core_num].global_enabled_cpus))
return 0;
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
@@ -144,15 +164,26 @@ power_manager_exit(void)
{
unsigned int i;
int ret = 0;
+ struct core_info *ci;
+
+ ci = get_core_info();
+ if (!ci) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Failed to get core info!\n");
+ return -1;
+ }
- for (i = 0; global_enabled_cpus; global_enabled_cpus &= ~(1 << i++)) {
- if (rte_power_exit(i) < 0) {
- RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
- "for core %u\n", i);
- ret = -1;
+ for (i = 0; i < ci->core_count; i++) {
+ if (ci->cd[i].global_enabled_cpus) {
+ if (rte_power_exit(i) < 0) {
+ RTE_LOG(ERR, POWER_MANAGER, "Unable to shutdown power manager "
+ "for core %u\n", i);
+ ret = -1;
+ }
+ ci->cd[i].global_enabled_cpus = 0;
}
+ remove_core_from_monitor(i);
}
- global_enabled_cpus = 0;
return ret;
}
@@ -268,10 +299,12 @@ int
power_manager_scale_core_med(unsigned int core_num)
{
int ret = 0;
+ struct core_info *ci;
+ ci = get_core_info();
if (core_num >= POWER_MGR_MAX_CPUS)
return -1;
- if (!(global_enabled_cpus & (1ULL << core_num)))
+ if (!(ci->cd[core_num].global_enabled_cpus))
return -1;
rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
ret = rte_power_set_freq(core_num,
diff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h
index 8a8a84aa..605b3c8f 100644
--- a/examples/vm_power_manager/power_manager.h
+++ b/examples/vm_power_manager/power_manager.h
@@ -8,6 +8,29 @@
#ifdef __cplusplus
extern "C" {
#endif
+struct core_details {
+ uint64_t last_branches;
+ uint64_t last_branch_misses;
+ uint16_t global_enabled_cpus;
+ uint16_t oob_enabled;
+ int msr_fd;
+};
+
+struct core_info {
+ uint16_t core_count;
+ struct core_details *cd;
+ float branch_ratio_threshold;
+};
+
+#define BRANCH_RATIO_THRESHOLD 0.1
+
+struct core_info *
+get_core_info(void);
+
+int
+core_info_init(void);
+
+#define RTE_LOGTYPE_POWER_MANAGER RTE_LOGTYPE_USER1
/* Maximum number of CPUS to manage */
#define POWER_MGR_MAX_CPUS 64
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index d94a1831..627a5da4 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -65,7 +65,6 @@ static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
},
.txmode = {
@@ -201,7 +200,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
num_pf_queues, num_pools, queues_per_pool);
printf("vmdq queue base: %d pool base %d\n",
vmdq_queue_base, vmdq_pool_base);
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
/*
@@ -237,7 +236,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = port_conf.txmode.offloads;
for (q = 0; q < rxRings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
@@ -539,9 +537,9 @@ static unsigned check_ports_num(unsigned nb_ports)
}
for (portid = 0; portid < num_ports; portid++) {
- if (ports[portid] >= nb_ports) {
- printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
- ports[portid], (nb_ports - 1));
+ if (!rte_eth_dev_is_valid_port(ports[portid])) {
+ printf("\nSpecified port ID(%u) is not valid\n",
+ ports[portid]);
ports[portid] = INVALID_PORT_ID;
valid_num_ports--;
}
@@ -580,7 +578,7 @@ main(int argc, char *argv[])
if (rte_lcore_count() > RTE_MAX_LCORE)
rte_exit(EXIT_FAILURE, "Not enough cores\n");
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -600,7 +598,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
printf("\nSkipping disabled port %d\n", portid);
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index bfe72f8c..64636839 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -71,7 +71,6 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_DCB,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_VMDQ_DCB,
@@ -197,6 +196,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
uint16_t queues_per_pool;
uint32_t max_nb_pools;
struct rte_eth_txconf txq_conf;
+ uint64_t rss_hf_tmp;
/*
* The max pool number from dev_info will be used to validate the pool
@@ -246,7 +246,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
num_pools, queues_per_pool);
}
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
retval = get_eth_conf(&port_conf);
@@ -257,6 +257,18 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ rss_hf_tmp = port_conf.rx_adv_conf.rss_conf.rss_hf;
+ port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (port_conf.rx_adv_conf.rss_conf.rss_hf != rss_hf_tmp) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ port,
+ rss_hf_tmp,
+ port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
/*
* Though in this example, all queues including pf queues are setup.
* This is because VMDQ queues doesn't always start from zero, and the
@@ -290,7 +302,6 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
for (q = 0; q < num_queues; q++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize,
@@ -599,9 +610,9 @@ static unsigned check_ports_num(unsigned nb_ports)
}
for (portid = 0; portid < num_ports; portid++) {
- if (ports[portid] >= nb_ports) {
- printf("\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
- ports[portid], (nb_ports - 1));
+ if (!rte_eth_dev_is_valid_port(ports[portid])) {
+ printf("\nSpecified port ID(%u) is not valid\n",
+ ports[portid]);
ports[portid] = INVALID_PORT_ID;
valid_num_ports--;
}
@@ -642,7 +653,7 @@ main(int argc, char *argv[])
" number of cores(1-%d)\n\n", RTE_MAX_LCORE);
}
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
/*
* Update the global var NUM_PORTS and global array PORTS
@@ -662,7 +673,7 @@ main(int argc, char *argv[])
rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
/* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
printf("\nSkipping disabled port %d\n", portid);
diff --git a/kernel/Makefile b/kernel/Makefile
new file mode 100644
index 00000000..8948d042
--- /dev/null
+++ b/kernel/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += linux
+DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += freebsd
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/kernel/freebsd/BSDmakefile.meson b/kernel/freebsd/BSDmakefile.meson
new file mode 100644
index 00000000..6839ac01
--- /dev/null
+++ b/kernel/freebsd/BSDmakefile.meson
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# makefile for building kernel modules using meson
+# takes parameters from the environment
+
+# source file is passed via KMOD_SRC as relative path, we only use final
+# (tail) component of it (:T), as VPATH is used to find actual file. The
+# VPATH is similarly extracted from the non-final (head) portion of the
+# path (:H) converted to absolute path (:tA). This use of VPATH is to have
+# the .o files placed in the build, not source directory
+
+VPATH := ${KMOD_SRC:H:tA}
+SRCS := ${KMOD_SRC:T} device_if.h bus_if.h pci_if.h
+CFLAGS += $(KMOD_CFLAGS)
+.OBJDIR: ${KMOD_OBJDIR}
+
+.include <bsd.kmod.mk>
diff --git a/kernel/freebsd/Makefile b/kernel/freebsd/Makefile
new file mode 100644
index 00000000..c93d7a62
--- /dev/null
+++ b/kernel/freebsd/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += contigmem
+DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += nic_uio
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/bsdapp/contigmem/BSDmakefile b/kernel/freebsd/contigmem/BSDmakefile
index 33ce83ee..33ce83ee 100644
--- a/lib/librte_eal/bsdapp/contigmem/BSDmakefile
+++ b/kernel/freebsd/contigmem/BSDmakefile
diff --git a/lib/librte_eal/bsdapp/contigmem/Makefile b/kernel/freebsd/contigmem/Makefile
index 428a7ede..428a7ede 100644
--- a/lib/librte_eal/bsdapp/contigmem/Makefile
+++ b/kernel/freebsd/contigmem/Makefile
diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/kernel/freebsd/contigmem/contigmem.c
index 1715b5dc..1715b5dc 100644
--- a/lib/librte_eal/bsdapp/contigmem/contigmem.c
+++ b/kernel/freebsd/contigmem/contigmem.c
diff --git a/lib/librte_eal/bsdapp/contigmem/meson.build b/kernel/freebsd/contigmem/meson.build
index 8fb2ab78..8fb2ab78 100644
--- a/lib/librte_eal/bsdapp/contigmem/meson.build
+++ b/kernel/freebsd/contigmem/meson.build
diff --git a/kernel/freebsd/meson.build b/kernel/freebsd/meson.build
new file mode 100644
index 00000000..336083c4
--- /dev/null
+++ b/kernel/freebsd/meson.build
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+kmods = ['contigmem', 'nic_uio']
+
+# for building kernel modules, we use kernel build system using make, as
+# with Linux. We have a skeleton BSDmakefile, which pulls many of its
+# values from the environment. Each module only has a single source file
+# right now, which allows us to simplify things. We pull in the sourcer
+# files from the individual meson.build files, and then use a custom
+# target to call make, passing in the values as env parameters.
+kmod_cflags = ['-I' + meson.build_root(),
+ '-I' + join_paths(meson.source_root(), 'config'),
+ '-include rte_config.h']
+
+# to avoid warnings due to race conditions with creating the dev_if.h, etc.
+# files, serialize the kernel module builds. Each module will depend on
+# previous ones
+built_kmods = []
+foreach k:kmods
+ subdir(k)
+ built_kmods += custom_target(k,
+ input: [files('BSDmakefile.meson'), sources],
+ output: k + '.ko',
+ command: ['make', '-f', '@INPUT0@',
+ 'KMOD_OBJDIR=@OUTDIR@',
+ 'KMOD_SRC=@INPUT1@',
+ 'KMOD=' + k,
+ 'KMOD_CFLAGS=' + ' '.join(kmod_cflags)],
+ depends: built_kmods, # make each module depend on prev
+ build_by_default: get_option('enable_kmods'))
+endforeach
diff --git a/lib/librte_eal/bsdapp/nic_uio/BSDmakefile b/kernel/freebsd/nic_uio/BSDmakefile
index b6f92d55..b6f92d55 100644
--- a/lib/librte_eal/bsdapp/nic_uio/BSDmakefile
+++ b/kernel/freebsd/nic_uio/BSDmakefile
diff --git a/lib/librte_eal/bsdapp/nic_uio/Makefile b/kernel/freebsd/nic_uio/Makefile
index 376ef3a3..376ef3a3 100644
--- a/lib/librte_eal/bsdapp/nic_uio/Makefile
+++ b/kernel/freebsd/nic_uio/Makefile
diff --git a/lib/librte_eal/bsdapp/nic_uio/meson.build b/kernel/freebsd/nic_uio/meson.build
index 4bdaf969..4bdaf969 100644
--- a/lib/librte_eal/bsdapp/nic_uio/meson.build
+++ b/kernel/freebsd/nic_uio/meson.build
diff --git a/lib/librte_eal/bsdapp/nic_uio/nic_uio.c b/kernel/freebsd/nic_uio/nic_uio.c
index 401b487e..401b487e 100644
--- a/lib/librte_eal/bsdapp/nic_uio/nic_uio.c
+++ b/kernel/freebsd/nic_uio/nic_uio.c
diff --git a/kernel/linux/Makefile b/kernel/linux/Makefile
new file mode 100644
index 00000000..c2c45a3e
--- /dev/null
+++ b/kernel/linux/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_EAL_IGB_UIO) += igb_uio
+DIRS-$(CONFIG_RTE_KNI_KMOD) += kni
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/kernel/linux/igb_uio/Kbuild b/kernel/linux/igb_uio/Kbuild
new file mode 100644
index 00000000..3ab85c41
--- /dev/null
+++ b/kernel/linux/igb_uio/Kbuild
@@ -0,0 +1,2 @@
+ccflags-y := $(MODULE_CFLAGS)
+obj-m := igb_uio.o
diff --git a/lib/librte_eal/linuxapp/igb_uio/Makefile b/kernel/linux/igb_uio/Makefile
index f83bcc7c..f83bcc7c 100644
--- a/lib/librte_eal/linuxapp/igb_uio/Makefile
+++ b/kernel/linux/igb_uio/Makefile
diff --git a/lib/librte_eal/linuxapp/igb_uio/compat.h b/kernel/linux/igb_uio/compat.h
index ce456d4b..8dbb896a 100644
--- a/lib/librte_eal/linuxapp/igb_uio/compat.h
+++ b/kernel/linux/igb_uio/compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Minimal wrappers to allow compiling igb_uio on older kernels.
*/
@@ -121,8 +122,8 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev)
#endif /* < 3.3.0 */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
-#define HAVE_ALLOC_IRQ_VECTORS 1
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
+#define HAVE_PCI_IS_BRIDGE_API 1
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
@@ -132,3 +133,22 @@ static bool pci_check_and_mask_intx(struct pci_dev *pdev)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
#define HAVE_PCI_MSI_MASK_IRQ 1
#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+#define HAVE_ALLOC_IRQ_VECTORS 1
+#endif
+
+static inline bool igbuio_kernel_is_locked_down(void)
+{
+#ifdef CONFIG_LOCK_DOWN_KERNEL
+#ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT
+ return kernel_is_locked_down(NULL);
+#elif defined(CONFIG_EFI_SECURE_BOOT_LOCK_DOWN)
+ return kernel_is_locked_down();
+#else
+ return false;
+#endif
+#else
+ return false;
+#endif
+}
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/kernel/linux/igb_uio/igb_uio.c
index 4cae4dd2..3398eacd 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/kernel/linux/igb_uio/igb_uio.c
@@ -30,6 +30,7 @@ struct rte_uio_pci_dev {
int refcnt;
};
+static int wc_activate;
static char *intr_mode;
static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
/* sriov sysfs */
@@ -375,9 +376,13 @@ igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
len = pci_resource_len(dev, pci_bar);
if (addr == 0 || len == 0)
return -1;
- internal_addr = ioremap(addr, len);
- if (internal_addr == NULL)
- return -1;
+ if (wc_activate == 0) {
+ internal_addr = ioremap(addr, len);
+ if (internal_addr == NULL)
+ return -1;
+ } else {
+ internal_addr = NULL;
+ }
info->mem[n].name = name;
info->mem[n].addr = addr;
info->mem[n].internal_addr = internal_addr;
@@ -473,6 +478,13 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
void *map_addr;
int err;
+#ifdef HAVE_PCI_IS_BRIDGE_API
+ if (pci_is_bridge(dev)) {
+ dev_warn(&dev->dev, "Ignoring PCI bridge device\n");
+ return -ENODEV;
+ }
+#endif
+
udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
if (!udev)
return -ENOMEM;
@@ -614,6 +626,14 @@ igbuio_pci_init_module(void)
{
int ret;
+ if (igbuio_kernel_is_locked_down()) {
+ pr_err("Not able to use module, kernel lock down is enabled\n");
+ return -EINVAL;
+ }
+
+ if (wc_activate != 0)
+ pr_info("wc_activate is set\n");
+
ret = igbuio_config_intr_mode(intr_mode);
if (ret < 0)
return ret;
@@ -638,6 +658,12 @@ MODULE_PARM_DESC(intr_mode,
" " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
"\n");
+module_param(wc_activate, int, 0);
+MODULE_PARM_DESC(wc_activate,
+"Activate support for write combining (WC) (default=0)\n"
+" 0 - disable\n"
+" other - enable\n");
+
MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
diff --git a/lib/librte_eal/linuxapp/igb_uio/meson.build b/kernel/linux/igb_uio/meson.build
index 257ef631..71ed2e7a 100644
--- a/lib/librte_eal/linuxapp/igb_uio/meson.build
+++ b/kernel/linux/igb_uio/meson.build
@@ -1,12 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-kernel_dir = get_option('kernel_dir')
-if kernel_dir == ''
- kernel_version = run_command('uname', '-r').stdout().strip()
- kernel_dir = '/lib/modules/' + kernel_version + '/build'
-endif
-
mkfile = custom_target('igb_uio_makefile',
output: 'Makefile',
command: ['touch', '@OUTPUT@'])
@@ -18,7 +12,7 @@ custom_target('igb_uio',
'M=' + meson.current_build_dir(),
'src=' + meson.current_source_dir(),
'EXTRA_CFLAGS=-I' + meson.current_source_dir() +
- '/../../common/include',
+ '/../../../lib/librte_eal/common/include',
'modules'],
depends: mkfile,
build_by_default: get_option('enable_kmods'))
diff --git a/lib/librte_eal/linuxapp/kni/Makefile b/kernel/linux/kni/Makefile
index 282be7b6..282be7b6 100644
--- a/lib/librte_eal/linuxapp/kni/Makefile
+++ b/kernel/linux/kni/Makefile
diff --git a/lib/librte_eal/linuxapp/kni/compat.h b/kernel/linux/kni/compat.h
index 3f8c0bc8..5aadebbc 100644
--- a/lib/librte_eal/linuxapp/kni/compat.h
+++ b/kernel/linux/kni/compat.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Minimal wrappers to allow compiling kni on older kernels.
*/
@@ -101,6 +102,11 @@
#undef NET_NAME_UNKNOWN
#endif
+#if (defined(RHEL_RELEASE_CODE) && \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5)))
+#define ndo_change_mtu ndo_change_mtu_rh74
+#endif
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
#define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER
#endif
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/README b/kernel/linux/kni/ethtool/README
index af36738a..af36738a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/README
+++ b/kernel/linux/kni/ethtool/README
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c b/kernel/linux/kni/ethtool/igb/e1000_82575.c
index 98346709..98346709 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_82575.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h b/kernel/linux/kni/ethtool/igb/e1000_82575.h
index 2e0dbb2f..2e0dbb2f 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_82575.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_82575.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c b/kernel/linux/kni/ethtool/igb/e1000_api.c
index 3e54e50e..3e54e50e 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_api.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h b/kernel/linux/kni/ethtool/igb/e1000_api.h
index 0bc00acd..0bc00acd 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_api.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_api.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h b/kernel/linux/kni/ethtool/igb/e1000_defines.h
index b39aaf80..b39aaf80 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_defines.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_defines.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h b/kernel/linux/kni/ethtool/igb/e1000_hw.h
index ed43ef5a..ed43ef5a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_hw.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_hw.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c b/kernel/linux/kni/ethtool/igb/e1000_i210.c
index a4fabc3a..a4fabc3a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_i210.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h b/kernel/linux/kni/ethtool/igb/e1000_i210.h
index 9df7c203..9df7c203 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_i210.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_i210.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c b/kernel/linux/kni/ethtool/igb/e1000_mac.c
index 13a42267..13a42267 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_mac.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h b/kernel/linux/kni/ethtool/igb/e1000_mac.h
index a3e78498..a3e78498 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mac.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_mac.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c b/kernel/linux/kni/ethtool/igb/e1000_manage.c
index 2f75bc35..2f75bc35 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_manage.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h b/kernel/linux/kni/ethtool/igb/e1000_manage.h
index 9f27b934..9f27b934 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_manage.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_manage.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c b/kernel/linux/kni/ethtool/igb/e1000_mbx.c
index 1be44349..1be44349 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_mbx.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h b/kernel/linux/kni/ethtool/igb/e1000_mbx.h
index 5951f18f..5951f18f 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_mbx.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_mbx.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c b/kernel/linux/kni/ethtool/igb/e1000_nvm.c
index 78c3fc0e..78c3fc0e 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_nvm.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h b/kernel/linux/kni/ethtool/igb/e1000_nvm.h
index e27b1c0a..e27b1c0a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_nvm.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_nvm.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h b/kernel/linux/kni/ethtool/igb/e1000_osdep.h
index 3228100e..3228100e 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_osdep.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_osdep.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c b/kernel/linux/kni/ethtool/igb/e1000_phy.c
index 1934a309..1934a309 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.c
+++ b/kernel/linux/kni/ethtool/igb/e1000_phy.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h b/kernel/linux/kni/ethtool/igb/e1000_phy.h
index 67e9ba77..67e9ba77 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_phy.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_phy.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h b/kernel/linux/kni/ethtool/igb/e1000_regs.h
index f5c7e031..f5c7e031 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/e1000_regs.h
+++ b/kernel/linux/kni/ethtool/igb/e1000_regs.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h b/kernel/linux/kni/ethtool/igb/igb.h
index 8aa2a308..8aa2a308 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h
+++ b/kernel/linux/kni/ethtool/igb/igb.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c b/kernel/linux/kni/ethtool/igb/igb_ethtool.c
index 064528bc..002f75c4 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
+++ b/kernel/linux/kni/ethtool/igb/igb_ethtool.c
@@ -811,9 +811,10 @@ static void igb_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
strncpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version) - 1);
- strncpy(drvinfo->fw_version, adapter->fw_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info) -1);
+ strlcpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c b/kernel/linux/kni/ethtool/igb/igb_main.c
index af378d2f..af378d2f 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_main.c
+++ b/kernel/linux/kni/ethtool/igb/igb_main.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c b/kernel/linux/kni/ethtool/igb/igb_param.c
index 98209a10..98209a10 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_param.c
+++ b/kernel/linux/kni/ethtool/igb/igb_param.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h b/kernel/linux/kni/ethtool/igb/igb_regtest.h
index ec2b86a0..ec2b86a0 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_regtest.h
+++ b/kernel/linux/kni/ethtool/igb/igb_regtest.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c b/kernel/linux/kni/ethtool/igb/igb_vmdq.c
index cdd807b9..cdd807b9 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.c
+++ b/kernel/linux/kni/ethtool/igb/igb_vmdq.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h b/kernel/linux/kni/ethtool/igb/igb_vmdq.h
index e68c48cf..e68c48cf 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_vmdq.h
+++ b/kernel/linux/kni/ethtool/igb/igb_vmdq.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/kernel/linux/kni/ethtool/igb/kcompat.h
index fd3175b5..ae1b5309 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h
+++ b/kernel/linux/kni/ethtool/igb/kcompat.h
@@ -3900,7 +3900,8 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
#endif /* >= 4.1.0 */
-#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) )
+#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) ) \
+ || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4) ))
/* ndo_bridge_getlink adds new filter_mask and vlan_fill parameters */
#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL
#endif /* >= 4.2.0 */
@@ -3918,9 +3919,20 @@ skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
#endif
#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) || \
- (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)))
+ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) || \
+ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)))
#define HAVE_VF_VLAN_PROTO
-#endif /* >= 4.9.0, >= SLES12SP3 */
+#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
+/* In RHEL/Centos 7.4, the "new" version of ndo_set_vf_vlan
+ * is in the struct net_device_ops_extended */
+#define ndo_set_vf_vlan extended.ndo_set_vf_vlan
+#endif
+#endif
+
+#if (defined(RHEL_RELEASE_CODE) && \
+ (RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE))
+#define ndo_change_mtu ndo_change_mtu_rh74
+#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
#define HAVE_PCI_ENABLE_MSIX
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe.h
index 6ff94133..6ff94133 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c
index 242de671..242de671 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.h
index 9a8c670a..9a8c670a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82598.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82598.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.c
index 3f159123..3f159123 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.h
index 0305ed73..0305ed73 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_82599.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_82599.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_api.c
index 1be4c64f..1be4c64f 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_api.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_api.h
index 11247a0b..11247a0b 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_api.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_api.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_common.c
index e9b9529a..e9b9529a 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_common.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_common.h
index 2989a80b..2989a80b 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_common.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_common.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_dcb.h
index e9a099d5..e9a099d5 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_dcb.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_dcb.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_ethtool.c
index 44cdc9f2..44cdc9f2 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_ethtool.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_fcoe.h
index eec86cbb..eec86cbb 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_fcoe.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_fcoe.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_main.c
index a5acf19c..a5acf19c 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_main.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_main.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_mbx.h
index 53ace941..53ace941 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_mbx.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_mbx.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_osdep.h
index 7b3f8c51..7b3f8c51 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_osdep.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_osdep.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.c
index a47a2ff8..a47a2ff8 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.h
index 6baa9acb..6baa9acb 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_phy.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_phy.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_type.h
index 0689590e..0689590e 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_type.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_type.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c b/kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.c
index 07b219a1..07b219a1 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.c
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h b/kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.h
index 96020911..96020911 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_x540.h
+++ b/kernel/linux/kni/ethtool/ixgbe/ixgbe_x540.h
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c b/kernel/linux/kni/ethtool/ixgbe/kcompat.c
index 6c994576..6c994576 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
+++ b/kernel/linux/kni/ethtool/ixgbe/kcompat.c
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h b/kernel/linux/kni/ethtool/ixgbe/kcompat.h
index 7c7d6c31..7c7d6c31 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.h
+++ b/kernel/linux/kni/ethtool/ixgbe/kcompat.h
diff --git a/lib/librte_eal/linuxapp/kni/kni_dev.h b/kernel/linux/kni/kni_dev.h
index c9393d89..6275ef27 100644
--- a/lib/librte_eal/linuxapp/kni/kni_dev.h
+++ b/kernel/linux/kni/kni_dev.h
@@ -92,6 +92,7 @@ struct kni_dev {
void *alloc_va[MBUF_BURST_SZ];
};
+void kni_net_release_fifo_phy(struct kni_dev *kni);
void kni_net_rx(struct kni_dev *kni);
void kni_net_init(struct net_device *dev);
void kni_net_config_lo_mode(char *lo_str);
diff --git a/lib/librte_eal/linuxapp/kni/kni_ethtool.c b/kernel/linux/kni/kni_ethtool.c
index a44e7d94..a44e7d94 100644
--- a/lib/librte_eal/linuxapp/kni/kni_ethtool.c
+++ b/kernel/linux/kni/kni_ethtool.c
diff --git a/lib/librte_eal/linuxapp/kni/kni_fifo.h b/kernel/linux/kni/kni_fifo.h
index 9a4762de..9a4762de 100644
--- a/lib/librte_eal/linuxapp/kni/kni_fifo.h
+++ b/kernel/linux/kni/kni_fifo.h
diff --git a/lib/librte_eal/linuxapp/kni/kni_misc.c b/kernel/linux/kni/kni_misc.c
index 01574ecf..fa69f8e6 100644
--- a/lib/librte_eal/linuxapp/kni/kni_misc.c
+++ b/kernel/linux/kni/kni_misc.c
@@ -192,6 +192,8 @@ kni_dev_remove(struct kni_dev *dev)
free_netdev(dev->net_dev);
}
+ kni_net_release_fifo_phy(dev);
+
return 0;
}
diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/kernel/linux/kni/kni_net.c
index 9f9b798c..7fcfa106 100644
--- a/lib/librte_eal/linuxapp/kni/kni_net.c
+++ b/kernel/linux/kni/kni_net.c
@@ -163,6 +163,46 @@ kni_net_release(struct net_device *dev)
return (ret == 0) ? req.result : ret;
}
+static void
+kni_fifo_trans_pa2va(struct kni_dev *kni,
+ struct rte_kni_fifo *src_pa, struct rte_kni_fifo *dst_va)
+{
+ uint32_t ret, i, num_dst, num_rx;
+ void *kva;
+ do {
+ num_dst = kni_fifo_free_count(dst_va);
+ if (num_dst == 0)
+ return;
+
+ num_rx = min_t(uint32_t, num_dst, MBUF_BURST_SZ);
+
+ num_rx = kni_fifo_get(src_pa, kni->pa, num_rx);
+ if (num_rx == 0)
+ return;
+
+ for (i = 0; i < num_rx; i++) {
+ kva = pa2kva(kni->pa[i]);
+ kni->va[i] = pa2va(kni->pa[i], kva);
+ }
+
+ ret = kni_fifo_put(dst_va, kni->va, num_rx);
+ if (ret != num_rx) {
+ /* Failing should not happen */
+ pr_err("Fail to enqueue entries into dst_va\n");
+ return;
+ }
+ } while (1);
+}
+
+/* Try to release mbufs when kni release */
+void kni_net_release_fifo_phy(struct kni_dev *kni)
+{
+ /* release rx_q first, because it can't release in userspace */
+ kni_fifo_trans_pa2va(kni, kni->rx_q, kni->free_q);
+ /* release alloc_q for speeding up kni release in userspace */
+ kni_fifo_trans_pa2va(kni, kni->alloc_q, kni->free_q);
+}
+
/*
* Configuration changes (passed on by ifconfig)
*/
diff --git a/kernel/linux/meson.build b/kernel/linux/meson.build
new file mode 100644
index 00000000..a924c7b6
--- /dev/null
+++ b/kernel/linux/meson.build
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+subdirs = ['igb_uio']
+
+WARN_CROSS_COMPILE='Need "kernel_dir" option for kmod compilation when cross-compiling'
+WARN_NO_HEADERS='Cannot compile kernel modules as requested - are kernel headers installed?'
+
+# if we are cross-compiling we need kernel_dir specified
+# NOTE: warning() function only available from version 0.44 onwards
+if get_option('kernel_dir') == '' and meson.is_cross_build()
+ if meson.version().version_compare('>=0.44')
+ warning(WARN_CROSS_COMPILE)
+ else
+ message('WARNING: ' + WARN_CROSS_COMPILE)
+ endif
+else
+
+ kernel_dir = get_option('kernel_dir')
+ if kernel_dir == ''
+ # use default path for native builds
+ kernel_version = run_command('uname', '-r').stdout().strip()
+ kernel_dir = '/lib/modules/' + kernel_version + '/build'
+ endif
+
+ # test running make in kernel directory, using "make kernelversion"
+ make_returncode = run_command('make', '-sC', kernel_dir,
+ 'kernelversion').returncode()
+ if make_returncode != 0
+ if meson.version().version_compare('>=0.44')
+ warning(WARN_NO_HEADERS)
+ else
+ message('WARNING: ' + WARN_NO_HEADERS)
+ endif
+ else # returncode == 0
+
+# DO ACTUAL MODULE BUILDING
+ foreach d:subdirs
+ subdir(d)
+ endforeach
+ endif
+endif
diff --git a/kernel/meson.build b/kernel/meson.build
new file mode 100644
index 00000000..2c8fa76d
--- /dev/null
+++ b/kernel/meson.build
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+subdir(host_machine.system())
diff --git a/lib/Makefile b/lib/Makefile
index ec965a60..afa604e2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -4,7 +4,10 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-y += librte_compat
+DIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += librte_kvargs
+DEPDIRS-librte_kvargs := librte_compat
DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
+DEPDIRS-librte_eal := librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_PCI) += librte_pci
DEPDIRS-librte_pci := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
@@ -18,9 +21,10 @@ DEPDIRS-librte_timer := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_CFGFILE) += librte_cfgfile
DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
DEPDIRS-librte_cmdline := librte_eal
-DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
-DEPDIRS-librte_ether := librte_net librte_eal librte_mempool librte_ring
-DEPDIRS-librte_ether += librte_mbuf
+DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ethdev
+DEPDIRS-librte_ethdev := librte_net librte_eal librte_mempool librte_ring
+DEPDIRS-librte_ethdev += librte_mbuf
+DEPDIRS-librte_ethdev += librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_BBDEV) += librte_bbdev
DEPDIRS-librte_bbdev := librte_eal librte_mempool librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
@@ -28,14 +32,18 @@ DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
DEPDIRS-librte_cryptodev += librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_SECURITY) += librte_security
DEPDIRS-librte_security := librte_eal librte_mempool librte_ring librte_mbuf
-DEPDIRS-librte_security += librte_ether
+DEPDIRS-librte_security += librte_ethdev
DEPDIRS-librte_security += librte_cryptodev
+DIRS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += librte_compressdev
+DEPDIRS-librte_compressdev := librte_eal librte_mempool librte_ring librte_mbuf
+DEPDIRS-librte_compressdev += librte_kvargs
DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
-DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ether librte_hash
+DEPDIRS-librte_eventdev := librte_eal librte_ring librte_ethdev librte_hash \
+ librte_mempool librte_timer librte_cryptodev
DIRS-$(CONFIG_RTE_LIBRTE_RAWDEV) += librte_rawdev
-DEPDIRS-librte_rawdev := librte_eal librte_ether
+DEPDIRS-librte_rawdev := librte_eal librte_ethdev
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
-DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether \
+DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ethdev \
librte_net
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
DEPDIRS-librte_hash := librte_eal librte_ring
@@ -50,18 +58,18 @@ DEPDIRS-librte_member := librte_eal librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_NET) += librte_net
DEPDIRS-librte_net := librte_mbuf librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += librte_ip_frag
-DEPDIRS-librte_ip_frag := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_ip_frag := librte_eal librte_mempool librte_mbuf librte_ethdev
DEPDIRS-librte_ip_frag += librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_GRO) += librte_gro
-DEPDIRS-librte_gro := librte_eal librte_mbuf librte_ether librte_net
+DEPDIRS-librte_gro := librte_eal librte_mbuf librte_ethdev librte_net
DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += librte_jobstats
DEPDIRS-librte_jobstats := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METRICS) += librte_metrics
DEPDIRS-librte_metrics := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_BITRATE) += librte_bitratestats
-DEPDIRS-librte_bitratestats := librte_eal librte_metrics librte_ether
+DEPDIRS-librte_bitratestats := librte_eal librte_metrics librte_ethdev
DIRS-$(CONFIG_RTE_LIBRTE_LATENCY_STATS) += librte_latencystats
-DEPDIRS-librte_latencystats := librte_eal librte_metrics librte_ether librte_mbuf
+DEPDIRS-librte_latencystats := librte_eal librte_metrics librte_ethdev librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_POWER) += librte_power
DEPDIRS-librte_power := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_METER) += librte_meter
@@ -71,12 +79,10 @@ DEPDIRS-librte_flow_classify := librte_net librte_table librte_acl
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += librte_sched
DEPDIRS-librte_sched := librte_eal librte_mempool librte_mbuf librte_net
DEPDIRS-librte_sched += librte_timer
-DIRS-$(CONFIG_RTE_LIBRTE_KVARGS) += librte_kvargs
-DEPDIRS-librte_kvargs := librte_eal
DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += librte_distributor
-DEPDIRS-librte_distributor := librte_eal librte_mbuf librte_ether
+DEPDIRS-librte_distributor := librte_eal librte_mbuf librte_ethdev
DIRS-$(CONFIG_RTE_LIBRTE_PORT) += librte_port
-DEPDIRS-librte_port := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_port := librte_eal librte_mempool librte_mbuf librte_ethdev
DEPDIRS-librte_port += librte_ip_frag librte_sched
ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
DEPDIRS-librte_port += librte_kni
@@ -93,15 +99,17 @@ DEPDIRS-librte_pipeline += librte_table librte_port
DIRS-$(CONFIG_RTE_LIBRTE_REORDER) += librte_reorder
DEPDIRS-librte_reorder := librte_eal librte_mempool librte_mbuf
DIRS-$(CONFIG_RTE_LIBRTE_PDUMP) += librte_pdump
-DEPDIRS-librte_pdump := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_pdump := librte_eal librte_mempool librte_mbuf librte_ethdev
DIRS-$(CONFIG_RTE_LIBRTE_GSO) += librte_gso
-DEPDIRS-librte_gso := librte_eal librte_mbuf librte_ether librte_net
+DEPDIRS-librte_gso := librte_eal librte_mbuf librte_ethdev librte_net
DEPDIRS-librte_gso += librte_mempool
+DIRS-$(CONFIG_RTE_LIBRTE_BPF) += librte_bpf
+DEPDIRS-librte_bpf := librte_eal librte_mempool librte_mbuf librte_ethdev
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
DIRS-$(CONFIG_RTE_LIBRTE_KNI) += librte_kni
endif
-DEPDIRS-librte_kni := librte_eal librte_mempool librte_mbuf librte_ether
+DEPDIRS-librte_kni := librte_eal librte_mempool librte_mbuf librte_ethdev
DEPDIRS-librte_kni += librte_pci
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_bbdev/rte_bbdev.c b/lib/librte_bbdev/rte_bbdev.c
index 74ecc490..c4cc18d9 100644
--- a/lib/librte_bbdev/rte_bbdev.c
+++ b/lib/librte_bbdev/rte_bbdev.c
@@ -495,11 +495,20 @@ rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id,
conf->queue_size, queue_id, dev_id);
return -EINVAL;
}
- if (conf->priority > dev_info.max_queue_priority) {
+ if (conf->op_type == RTE_BBDEV_OP_TURBO_DEC &&
+ conf->priority > dev_info.max_ul_queue_priority) {
rte_bbdev_log(ERR,
"Priority (%u) of queue %u of bdev %u must be <= %u",
conf->priority, queue_id, dev_id,
- dev_info.max_queue_priority);
+ dev_info.max_ul_queue_priority);
+ return -EINVAL;
+ }
+ if (conf->op_type == RTE_BBDEV_OP_TURBO_ENC &&
+ conf->priority > dev_info.max_dl_queue_priority) {
+ rte_bbdev_log(ERR,
+ "Priority (%u) of queue %u of bdev %u must be <= %u",
+ conf->priority, queue_id, dev_id,
+ dev_info.max_dl_queue_priority);
return -EINVAL;
}
}
@@ -1116,9 +1125,7 @@ rte_bbdev_op_type_str(enum rte_bbdev_op_type op_type)
return NULL;
}
-RTE_INIT(rte_bbdev_init_log);
-static void
-rte_bbdev_init_log(void)
+RTE_INIT(rte_bbdev_init_log)
{
bbdev_logtype = rte_log_register("lib.bbdev");
if (bbdev_logtype >= 0)
diff --git a/lib/librte_bbdev/rte_bbdev.h b/lib/librte_bbdev/rte_bbdev.h
index 5e7e4954..25ef409f 100644
--- a/lib/librte_bbdev/rte_bbdev.h
+++ b/lib/librte_bbdev/rte_bbdev.h
@@ -239,6 +239,8 @@ struct rte_bbdev_stats {
uint64_t enqueue_err_count;
/** Total error count on operations dequeued */
uint64_t dequeue_err_count;
+ /** Offload time */
+ uint64_t offload_time;
};
/**
@@ -279,8 +281,10 @@ struct rte_bbdev_driver_info {
uint32_t queue_size_lim;
/** Set if device off-loads operation to hardware */
bool hardware_accelerated;
- /** Max value supported by queue priority */
- uint8_t max_queue_priority;
+ /** Max value supported by queue priority for DL */
+ uint8_t max_dl_queue_priority;
+ /** Max value supported by queue priority for UL */
+ uint8_t max_ul_queue_priority;
/** Set if device supports per-queue interrupts */
bool queue_intr_supported;
/** Minimum alignment of buffers, in bytes */
diff --git a/lib/librte_bbdev/rte_bbdev_op.h b/lib/librte_bbdev/rte_bbdev_op.h
index 9a80c64a..83f62c2d 100644
--- a/lib/librte_bbdev/rte_bbdev_op.h
+++ b/lib/librte_bbdev/rte_bbdev_op.h
@@ -25,7 +25,23 @@ extern "C" {
#include <rte_memory.h>
#include <rte_mempool.h>
-#define RTE_BBDEV_MAX_CODE_BLOCKS 64
+/* Number of columns in sub-block interleaver (36.212, section 5.1.4.1.1) */
+#define RTE_BBDEV_C_SUBBLOCK (32)
+/* Maximum size of Transport Block (36.213, Table, Table 7.1.7.2.5-1) */
+#define RTE_BBDEV_MAX_TB_SIZE (391656)
+/* Maximum size of Code Block (36.212, Table 5.1.3-3) */
+#define RTE_BBDEV_MAX_CB_SIZE (6144)
+/* Minimum size of Code Block (36.212, Table 5.1.3-3) */
+#define RTE_BBDEV_MIN_CB_SIZE (40)
+/* Maximum size of circular buffer */
+#define RTE_BBDEV_MAX_KW (18528)
+/*
+ * Maximum number of Code Blocks in Transport Block. It is calculated based on
+ * maximum size of one Code Block and one Transport Block (considering CRC24A
+ * and CRC24B):
+ * (391656 + 24) / (6144 - 24) = 64
+ */
+#define RTE_BBDEV_MAX_CODE_BLOCKS (64)
/** Flags for turbo decoder operation and capability structure */
enum rte_bbdev_op_td_flag_bitmasks {
@@ -86,7 +102,11 @@ enum rte_bbdev_op_td_flag_bitmasks {
*/
RTE_BBDEV_TURBO_MAP_DEC = (1ULL << 14),
/**< Set if a device supports scatter-gather functionality */
- RTE_BBDEV_TURBO_DEC_SCATTER_GATHER = (1ULL << 15)
+ RTE_BBDEV_TURBO_DEC_SCATTER_GATHER = (1ULL << 15),
+ /**< Set to keep CRC24B bits appended while decoding. Only usable when
+ * decoding Transport Blocks (code_block_mode = 0).
+ */
+ RTE_BBDEV_TURBO_DEC_TB_CRC_24B_KEEP = (1ULL << 16)
};
/** Flags for turbo encoder operation and capability structure */
@@ -363,6 +383,10 @@ struct rte_bbdev_op_turbo_enc {
struct rte_bbdev_op_cap_turbo_dec {
/**< Flags from rte_bbdev_op_td_flag_bitmasks */
uint32_t capability_flags;
+ /** Maximal LLR absolute value. Acceptable LLR values lie in range
+ * [-max_llr_modulus, max_llr_modulus].
+ */
+ int8_t max_llr_modulus;
uint8_t num_buffers_src; /**< Num input code block buffers */
/**< Num hard output code block buffers */
uint8_t num_buffers_hard_out;
diff --git a/lib/librte_bitratestats/meson.build b/lib/librte_bitratestats/meson.build
index ede7e0a5..c35b62b3 100644
--- a/lib/librte_bitratestats/meson.build
+++ b/lib/librte_bitratestats/meson.build
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_bitrate.c')
headers = files('rte_bitrate.h')
deps += ['ethdev', 'metrics']
diff --git a/lib/librte_bitratestats/rte_bitrate.c b/lib/librte_bitratestats/rte_bitrate.c
index 964e3c39..c4b28f62 100644
--- a/lib/librte_bitratestats/rte_bitrate.c
+++ b/lib/librte_bitratestats/rte_bitrate.c
@@ -47,6 +47,9 @@ rte_stats_bitrate_reg(struct rte_stats_bitrates *bitrate_data)
};
int return_value;
+ if (bitrate_data == NULL)
+ return -EINVAL;
+
return_value = rte_metrics_reg_names(&names[0], ARRAY_SIZE(names));
if (return_value >= 0)
bitrate_data->id_stats_set = return_value;
@@ -65,6 +68,9 @@ rte_stats_bitrate_calc(struct rte_stats_bitrates *bitrate_data,
const int64_t alpha_percent = 20;
uint64_t values[6];
+ if (bitrate_data == NULL)
+ return -EINVAL;
+
ret_code = rte_eth_stats_get(port_id, &eth_stats);
if (ret_code != 0)
return ret_code;
diff --git a/lib/librte_bpf/Makefile b/lib/librte_bpf/Makefile
new file mode 100644
index 00000000..c0e8aaa6
--- /dev/null
+++ b/lib/librte_bpf/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_bpf.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lrte_net -lrte_eal
+LDLIBS += -lrte_mempool -lrte_ring
+LDLIBS += -lrte_mbuf -lrte_ethdev
+ifeq ($(CONFIG_RTE_LIBRTE_BPF_ELF),y)
+LDLIBS += -lelf
+endif
+
+EXPORT_MAP := rte_bpf_version.map
+
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_exec.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_load.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_pkt.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_validate.c
+ifeq ($(CONFIG_RTE_LIBRTE_BPF_ELF),y)
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_load_elf.c
+endif
+ifeq ($(CONFIG_RTE_ARCH_X86_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_jit_x86.c
+endif
+
+# install header files
+SYMLINK-$(CONFIG_RTE_LIBRTE_BPF)-include += bpf_def.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_BPF)-include += rte_bpf.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_BPF)-include += rte_bpf_ethdev.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_bpf/bpf.c b/lib/librte_bpf/bpf.c
new file mode 100644
index 00000000..f590c8c3
--- /dev/null
+++ b/lib/librte_bpf/bpf.c
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+
+#include "bpf_impl.h"
+
+int rte_bpf_logtype;
+
+__rte_experimental void
+rte_bpf_destroy(struct rte_bpf *bpf)
+{
+ if (bpf != NULL) {
+ if (bpf->jit.func != NULL)
+ munmap(bpf->jit.func, bpf->jit.sz);
+ munmap(bpf, bpf->sz);
+ }
+}
+
+__rte_experimental int
+rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit)
+{
+ if (bpf == NULL || jit == NULL)
+ return -EINVAL;
+
+ jit[0] = bpf->jit;
+ return 0;
+}
+
+int
+bpf_jit(struct rte_bpf *bpf)
+{
+ int32_t rc;
+
+#ifdef RTE_ARCH_X86_64
+ rc = bpf_jit_x86(bpf);
+#else
+ rc = -ENOTSUP;
+#endif
+
+ if (rc != 0)
+ RTE_BPF_LOG(WARNING, "%s(%p) failed, error code: %d;\n",
+ __func__, bpf, rc);
+ return rc;
+}
+
+RTE_INIT(rte_bpf_init_log)
+{
+ rte_bpf_logtype = rte_log_register("lib.bpf");
+ if (rte_bpf_logtype >= 0)
+ rte_log_set_level(rte_bpf_logtype, RTE_LOG_INFO);
+}
diff --git a/lib/librte_bpf/bpf_def.h b/lib/librte_bpf/bpf_def.h
new file mode 100644
index 00000000..c10f3aec
--- /dev/null
+++ b/lib/librte_bpf/bpf_def.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 1982, 1986, 1990, 1993
+ * The Regents of the University of California.
+ * Copyright(c) 2018 Intel Corporation.
+ */
+
+#ifndef _RTE_BPF_DEF_H_
+#define _RTE_BPF_DEF_H_
+
+/**
+ * @file
+ *
+ * classic BPF (cBPF) and extended BPF (eBPF) related defines.
+ * For more information regarding cBPF and eBPF ISA and their differences,
+ * please refer to:
+ * https://www.kernel.org/doc/Documentation/networking/filter.txt.
+ * As a rule of thumb for that file:
+ * all definitions used by both cBPF and eBPF start with bpf(BPF)_ prefix,
+ * while eBPF only ones start with ebpf(EBPF)) prefix.
+ */
+
+#include <stdint.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The instruction encodings.
+ */
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define BPF_LD 0x00
+#define BPF_LDX 0x01
+#define BPF_ST 0x02
+#define BPF_STX 0x03
+#define BPF_ALU 0x04
+#define BPF_JMP 0x05
+#define BPF_RET 0x06
+#define BPF_MISC 0x07
+
+#define EBPF_ALU64 0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code) ((code) & 0x18)
+#define BPF_W 0x00
+#define BPF_H 0x08
+#define BPF_B 0x10
+#define EBPF_DW 0x18
+
+#define BPF_MODE(code) ((code) & 0xe0)
+#define BPF_IMM 0x00
+#define BPF_ABS 0x20
+#define BPF_IND 0x40
+#define BPF_MEM 0x60
+#define BPF_LEN 0x80
+#define BPF_MSH 0xa0
+
+#define EBPF_XADD 0xc0
+
+/* alu/jmp fields */
+#define BPF_OP(code) ((code) & 0xf0)
+#define BPF_ADD 0x00
+#define BPF_SUB 0x10
+#define BPF_MUL 0x20
+#define BPF_DIV 0x30
+#define BPF_OR 0x40
+#define BPF_AND 0x50
+#define BPF_LSH 0x60
+#define BPF_RSH 0x70
+#define BPF_NEG 0x80
+#define BPF_MOD 0x90
+#define BPF_XOR 0xa0
+
+#define EBPF_MOV 0xb0
+#define EBPF_ARSH 0xc0
+#define EBPF_END 0xd0
+
+#define BPF_JA 0x00
+#define BPF_JEQ 0x10
+#define BPF_JGT 0x20
+#define BPF_JGE 0x30
+#define BPF_JSET 0x40
+
+#define EBPF_JNE 0x50
+#define EBPF_JSGT 0x60
+#define EBPF_JSGE 0x70
+#define EBPF_CALL 0x80
+#define EBPF_EXIT 0x90
+#define EBPF_JLT 0xa0
+#define EBPF_JLE 0xb0
+#define EBPF_JSLT 0xc0
+#define EBPF_JSLE 0xd0
+
+#define BPF_SRC(code) ((code) & 0x08)
+#define BPF_K 0x00
+#define BPF_X 0x08
+
+/* if BPF_OP(code) == EBPF_END */
+#define EBPF_TO_LE 0x00 /* convert to little-endian */
+#define EBPF_TO_BE 0x08 /* convert to big-endian */
+
+/*
+ * eBPF registers
+ */
+enum {
+ EBPF_REG_0, /* return value from internal function/for eBPF program */
+ EBPF_REG_1, /* 0-th argument to internal function */
+ EBPF_REG_2, /* 1-th argument to internal function */
+ EBPF_REG_3, /* 2-th argument to internal function */
+ EBPF_REG_4, /* 3-th argument to internal function */
+ EBPF_REG_5, /* 4-th argument to internal function */
+ EBPF_REG_6, /* callee saved register */
+ EBPF_REG_7, /* callee saved register */
+ EBPF_REG_8, /* callee saved register */
+ EBPF_REG_9, /* callee saved register */
+ EBPF_REG_10, /* stack pointer (read-only) */
+ EBPF_REG_NUM,
+};
+
+/*
+ * eBPF instruction format
+ */
+struct ebpf_insn {
+ uint8_t code;
+ uint8_t dst_reg:4;
+ uint8_t src_reg:4;
+ int16_t off;
+ int32_t imm;
+};
+
+/*
+ * eBPF allows functions with R1-R5 as arguments.
+ */
+#define EBPF_FUNC_MAX_ARGS (EBPF_REG_6 - EBPF_REG_1)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_BPF_DEF_H_ */
diff --git a/lib/librte_bpf/bpf_exec.c b/lib/librte_bpf/bpf_exec.c
new file mode 100644
index 00000000..6a79139c
--- /dev/null
+++ b/lib/librte_bpf/bpf_exec.c
@@ -0,0 +1,453 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+
+#include "bpf_impl.h"
+
+#define BPF_JMP_UNC(ins) ((ins) += (ins)->off)
+
+#define BPF_JMP_CND_REG(reg, ins, op, type) \
+ ((ins) += \
+ ((type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg]) ? \
+ (ins)->off : 0)
+
+#define BPF_JMP_CND_IMM(reg, ins, op, type) \
+ ((ins) += \
+ ((type)(reg)[(ins)->dst_reg] op (type)(ins)->imm) ? \
+ (ins)->off : 0)
+
+#define BPF_NEG_ALU(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(-(reg)[(ins)->dst_reg]))
+
+#define EBPF_MOV_ALU_REG(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(reg)[(ins)->src_reg])
+
+#define BPF_OP_ALU_REG(reg, ins, op, type) \
+ ((reg)[(ins)->dst_reg] = \
+ (type)(reg)[(ins)->dst_reg] op (type)(reg)[(ins)->src_reg])
+
+#define EBPF_MOV_ALU_IMM(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = (type)(ins)->imm)
+
+#define BPF_OP_ALU_IMM(reg, ins, op, type) \
+ ((reg)[(ins)->dst_reg] = \
+ (type)(reg)[(ins)->dst_reg] op (type)(ins)->imm)
+
+#define BPF_DIV_ZERO_CHECK(bpf, reg, ins, type) do { \
+ if ((type)(reg)[(ins)->src_reg] == 0) { \
+ RTE_BPF_LOG(ERR, \
+ "%s(%p): division by 0 at pc: %#zx;\n", \
+ __func__, bpf, \
+ (uintptr_t)(ins) - (uintptr_t)(bpf)->prm.ins); \
+ return 0; \
+ } \
+} while (0)
+
+#define BPF_LD_REG(reg, ins, type) \
+ ((reg)[(ins)->dst_reg] = \
+ *(type *)(uintptr_t)((reg)[(ins)->src_reg] + (ins)->off))
+
+#define BPF_ST_IMM(reg, ins, type) \
+ (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
+ (type)(ins)->imm)
+
+#define BPF_ST_REG(reg, ins, type) \
+ (*(type *)(uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off) = \
+ (type)(reg)[(ins)->src_reg])
+
+#define BPF_ST_XADD_REG(reg, ins, tp) \
+ (rte_atomic##tp##_add((rte_atomic##tp##_t *) \
+ (uintptr_t)((reg)[(ins)->dst_reg] + (ins)->off), \
+ reg[ins->src_reg]))
+
+static inline void
+bpf_alu_be(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
+{
+ uint64_t *v;
+
+ v = reg + ins->dst_reg;
+ switch (ins->imm) {
+ case 16:
+ *v = rte_cpu_to_be_16(*v);
+ break;
+ case 32:
+ *v = rte_cpu_to_be_32(*v);
+ break;
+ case 64:
+ *v = rte_cpu_to_be_64(*v);
+ break;
+ }
+}
+
+static inline void
+bpf_alu_le(uint64_t reg[EBPF_REG_NUM], const struct ebpf_insn *ins)
+{
+ uint64_t *v;
+
+ v = reg + ins->dst_reg;
+ switch (ins->imm) {
+ case 16:
+ *v = rte_cpu_to_le_16(*v);
+ break;
+ case 32:
+ *v = rte_cpu_to_le_32(*v);
+ break;
+ case 64:
+ *v = rte_cpu_to_le_64(*v);
+ break;
+ }
+}
+
+static inline uint64_t
+bpf_exec(const struct rte_bpf *bpf, uint64_t reg[EBPF_REG_NUM])
+{
+ const struct ebpf_insn *ins;
+
+ for (ins = bpf->prm.ins; ; ins++) {
+ switch (ins->code) {
+ /* 32 bit ALU IMM operations */
+ case (BPF_ALU | BPF_ADD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, +, uint32_t);
+ break;
+ case (BPF_ALU | BPF_SUB | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, -, uint32_t);
+ break;
+ case (BPF_ALU | BPF_AND | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, &, uint32_t);
+ break;
+ case (BPF_ALU | BPF_OR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, |, uint32_t);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, <<, uint32_t);
+ break;
+ case (BPF_ALU | BPF_RSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, uint32_t);
+ break;
+ case (BPF_ALU | BPF_XOR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, ^, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MUL | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, *, uint32_t);
+ break;
+ case (BPF_ALU | BPF_DIV | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, /, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MOD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, %, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_K):
+ EBPF_MOV_ALU_IMM(reg, ins, uint32_t);
+ break;
+ /* 32 bit ALU REG operations */
+ case (BPF_ALU | BPF_ADD | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, +, uint32_t);
+ break;
+ case (BPF_ALU | BPF_SUB | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, -, uint32_t);
+ break;
+ case (BPF_ALU | BPF_AND | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, &, uint32_t);
+ break;
+ case (BPF_ALU | BPF_OR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, |, uint32_t);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, <<, uint32_t);
+ break;
+ case (BPF_ALU | BPF_RSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, uint32_t);
+ break;
+ case (BPF_ALU | BPF_XOR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, ^, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MUL | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, *, uint32_t);
+ break;
+ case (BPF_ALU | BPF_DIV | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
+ BPF_OP_ALU_REG(reg, ins, /, uint32_t);
+ break;
+ case (BPF_ALU | BPF_MOD | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint32_t);
+ BPF_OP_ALU_REG(reg, ins, %, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_X):
+ EBPF_MOV_ALU_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_ALU | BPF_NEG):
+ BPF_NEG_ALU(reg, ins, uint32_t);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_BE):
+ bpf_alu_be(reg, ins);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_LE):
+ bpf_alu_le(reg, ins);
+ break;
+ /* 64 bit ALU IMM operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, +, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_SUB | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, -, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_AND | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, &, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_OR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, |, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, <<, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_RSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, >>, int64_t);
+ break;
+ case (EBPF_ALU64 | BPF_XOR | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, ^, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MUL | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, *, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_DIV | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, /, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MOD | BPF_K):
+ BPF_OP_ALU_IMM(reg, ins, %, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_K):
+ EBPF_MOV_ALU_IMM(reg, ins, uint64_t);
+ break;
+ /* 64 bit ALU REG operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, +, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_SUB | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, -, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_AND | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, &, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_OR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, |, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, <<, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_RSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, >>, int64_t);
+ break;
+ case (EBPF_ALU64 | BPF_XOR | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, ^, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MUL | BPF_X):
+ BPF_OP_ALU_REG(reg, ins, *, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_DIV | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
+ BPF_OP_ALU_REG(reg, ins, /, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_MOD | BPF_X):
+ BPF_DIV_ZERO_CHECK(bpf, reg, ins, uint64_t);
+ BPF_OP_ALU_REG(reg, ins, %, uint64_t);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_X):
+ EBPF_MOV_ALU_REG(reg, ins, uint64_t);
+ break;
+ case (EBPF_ALU64 | BPF_NEG):
+ BPF_NEG_ALU(reg, ins, uint64_t);
+ break;
+ /* load instructions */
+ case (BPF_LDX | BPF_MEM | BPF_B):
+ BPF_LD_REG(reg, ins, uint8_t);
+ break;
+ case (BPF_LDX | BPF_MEM | BPF_H):
+ BPF_LD_REG(reg, ins, uint16_t);
+ break;
+ case (BPF_LDX | BPF_MEM | BPF_W):
+ BPF_LD_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_LDX | BPF_MEM | EBPF_DW):
+ BPF_LD_REG(reg, ins, uint64_t);
+ break;
+ /* load 64 bit immediate value */
+ case (BPF_LD | BPF_IMM | EBPF_DW):
+ reg[ins->dst_reg] = (uint32_t)ins[0].imm |
+ (uint64_t)(uint32_t)ins[1].imm << 32;
+ ins++;
+ break;
+ /* store instructions */
+ case (BPF_STX | BPF_MEM | BPF_B):
+ BPF_ST_REG(reg, ins, uint8_t);
+ break;
+ case (BPF_STX | BPF_MEM | BPF_H):
+ BPF_ST_REG(reg, ins, uint16_t);
+ break;
+ case (BPF_STX | BPF_MEM | BPF_W):
+ BPF_ST_REG(reg, ins, uint32_t);
+ break;
+ case (BPF_STX | BPF_MEM | EBPF_DW):
+ BPF_ST_REG(reg, ins, uint64_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_B):
+ BPF_ST_IMM(reg, ins, uint8_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_H):
+ BPF_ST_IMM(reg, ins, uint16_t);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_W):
+ BPF_ST_IMM(reg, ins, uint32_t);
+ break;
+ case (BPF_ST | BPF_MEM | EBPF_DW):
+ BPF_ST_IMM(reg, ins, uint64_t);
+ break;
+ /* atomic add instructions */
+ case (BPF_STX | EBPF_XADD | BPF_W):
+ BPF_ST_XADD_REG(reg, ins, 32);
+ break;
+ case (BPF_STX | EBPF_XADD | EBPF_DW):
+ BPF_ST_XADD_REG(reg, ins, 64);
+ break;
+ /* jump instructions */
+ case (BPF_JMP | BPF_JA):
+ BPF_JMP_UNC(ins);
+ break;
+ /* jump IMM instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, ==, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JNE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, !=, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLT | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, >=, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLE | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, <=, int64_t);
+ break;
+ case (BPF_JMP | BPF_JSET | BPF_K):
+ BPF_JMP_CND_IMM(reg, ins, &, uint64_t);
+ break;
+ /* jump REG instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, ==, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JNE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, !=, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <, uint64_t);
+ break;
+ case (BPF_JMP | BPF_JGE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JLE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <=, uint64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLT | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSGE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, >=, int64_t);
+ break;
+ case (BPF_JMP | EBPF_JSLE | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, <=, int64_t);
+ break;
+ case (BPF_JMP | BPF_JSET | BPF_X):
+ BPF_JMP_CND_REG(reg, ins, &, uint64_t);
+ break;
+ /* call instructions */
+ case (BPF_JMP | EBPF_CALL):
+ reg[EBPF_REG_0] = bpf->prm.xsym[ins->imm].func.val(
+ reg[EBPF_REG_1], reg[EBPF_REG_2],
+ reg[EBPF_REG_3], reg[EBPF_REG_4],
+ reg[EBPF_REG_5]);
+ break;
+ /* return instruction */
+ case (BPF_JMP | EBPF_EXIT):
+ return reg[EBPF_REG_0];
+ default:
+ RTE_BPF_LOG(ERR,
+ "%s(%p): invalid opcode %#x at pc: %#zx;\n",
+ __func__, bpf, ins->code,
+ (uintptr_t)ins - (uintptr_t)bpf->prm.ins);
+ return 0;
+ }
+ }
+
+ /* should never be reached */
+ RTE_VERIFY(0);
+ return 0;
+}
+
+__rte_experimental uint32_t
+rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
+ uint32_t num)
+{
+ uint32_t i;
+ uint64_t reg[EBPF_REG_NUM];
+ uint64_t stack[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
+
+ for (i = 0; i != num; i++) {
+
+ reg[EBPF_REG_1] = (uintptr_t)ctx[i];
+ reg[EBPF_REG_10] = (uintptr_t)(stack + RTE_DIM(stack));
+
+ rc[i] = bpf_exec(bpf, reg);
+ }
+
+ return i;
+}
+
+__rte_experimental uint64_t
+rte_bpf_exec(const struct rte_bpf *bpf, void *ctx)
+{
+ uint64_t rc;
+
+ rte_bpf_exec_burst(bpf, &ctx, &rc, 1);
+ return rc;
+}
diff --git a/lib/librte_bpf/bpf_impl.h b/lib/librte_bpf/bpf_impl.h
new file mode 100644
index 00000000..b577e2cb
--- /dev/null
+++ b/lib/librte_bpf/bpf_impl.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _BPF_H_
+#define _BPF_H_
+
+#include <rte_bpf.h>
+#include <sys/mman.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_BPF_STACK_SIZE 0x200
+
+struct rte_bpf {
+ struct rte_bpf_prm prm;
+ struct rte_bpf_jit jit;
+ size_t sz;
+ uint32_t stack_sz;
+};
+
+extern int bpf_validate(struct rte_bpf *bpf);
+
+extern int bpf_jit(struct rte_bpf *bpf);
+
+#ifdef RTE_ARCH_X86_64
+extern int bpf_jit_x86(struct rte_bpf *);
+#endif
+
+extern int rte_bpf_logtype;
+
+#define RTE_BPF_LOG(lvl, fmt, args...) \
+ rte_log(RTE_LOG_## lvl, rte_bpf_logtype, fmt, ##args)
+
+static inline size_t
+bpf_size(uint32_t bpf_op_sz)
+{
+ if (bpf_op_sz == BPF_B)
+ return sizeof(uint8_t);
+ else if (bpf_op_sz == BPF_H)
+ return sizeof(uint16_t);
+ else if (bpf_op_sz == BPF_W)
+ return sizeof(uint32_t);
+ else if (bpf_op_sz == EBPF_DW)
+ return sizeof(uint64_t);
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BPF_H_ */
diff --git a/lib/librte_bpf/bpf_jit_x86.c b/lib/librte_bpf/bpf_jit_x86.c
new file mode 100644
index 00000000..68ea389f
--- /dev/null
+++ b/lib/librte_bpf/bpf_jit_x86.c
@@ -0,0 +1,1356 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+
+#include "bpf_impl.h"
+
+#define GET_BPF_OP(op) (BPF_OP(op) >> 4)
+
+enum {
+ RAX = 0, /* scratch, return value */
+ RCX = 1, /* scratch, 4th arg */
+ RDX = 2, /* scratch, 3rd arg */
+ RBX = 3, /* callee saved */
+ RSP = 4, /* stack pointer */
+ RBP = 5, /* frame pointer, callee saved */
+ RSI = 6, /* scratch, 2nd arg */
+ RDI = 7, /* scratch, 1st arg */
+ R8 = 8, /* scratch, 5th arg */
+ R9 = 9, /* scratch, 6th arg */
+ R10 = 10, /* scratch */
+ R11 = 11, /* scratch */
+ R12 = 12, /* callee saved */
+ R13 = 13, /* callee saved */
+ R14 = 14, /* callee saved */
+ R15 = 15, /* callee saved */
+};
+
+#define IS_EXT_REG(r) ((r) >= R8)
+
+enum {
+ REX_PREFIX = 0x40, /* fixed value 0100 */
+ REX_W = 0x8, /* 64bit operand size */
+ REX_R = 0x4, /* extension of the ModRM.reg field */
+ REX_X = 0x2, /* extension of the SIB.index field */
+ REX_B = 0x1, /* extension of the ModRM.rm field */
+};
+
+enum {
+ MOD_INDIRECT = 0,
+ MOD_IDISP8 = 1,
+ MOD_IDISP32 = 2,
+ MOD_DIRECT = 3,
+};
+
+enum {
+ SIB_SCALE_1 = 0,
+ SIB_SCALE_2 = 1,
+ SIB_SCALE_4 = 2,
+ SIB_SCALE_8 = 3,
+};
+
+/*
+ * eBPF to x86_64 register mappings.
+ */
+static const uint32_t ebpf2x86[] = {
+ [EBPF_REG_0] = RAX,
+ [EBPF_REG_1] = RDI,
+ [EBPF_REG_2] = RSI,
+ [EBPF_REG_3] = RDX,
+ [EBPF_REG_4] = RCX,
+ [EBPF_REG_5] = R8,
+ [EBPF_REG_6] = RBX,
+ [EBPF_REG_7] = R13,
+ [EBPF_REG_8] = R14,
+ [EBPF_REG_9] = R15,
+ [EBPF_REG_10] = RBP,
+};
+
+/*
+ * r10 and r11 are used as a scratch temporary registers.
+ */
+enum {
+ REG_DIV_IMM = R9,
+ REG_TMP0 = R11,
+ REG_TMP1 = R10,
+};
+
+/*
+ * callee saved registers list.
+ * keep RBP as the last one.
+ */
+static const uint32_t save_regs[] = {RBX, R12, R13, R14, R15, RBP};
+
+struct bpf_jit_state {
+ uint32_t idx;
+ size_t sz;
+ struct {
+ uint32_t num;
+ int32_t off;
+ } exit;
+ uint32_t reguse;
+ int32_t *off;
+ uint8_t *ins;
+};
+
+#define INUSE(v, r) (((v) >> (r)) & 1)
+#define USED(v, r) ((v) |= 1 << (r))
+
+union bpf_jit_imm {
+ uint32_t u32;
+ uint8_t u8[4];
+};
+
+/*
+ * In many cases for imm8 we can produce shorter code.
+ */
+static size_t
+imm_size(int32_t v)
+{
+ if (v == (int8_t)v)
+ return sizeof(int8_t);
+ return sizeof(int32_t);
+}
+
+static void
+emit_bytes(struct bpf_jit_state *st, const uint8_t ins[], uint32_t sz)
+{
+ uint32_t i;
+
+ if (st->ins != NULL) {
+ for (i = 0; i != sz; i++)
+ st->ins[st->sz + i] = ins[i];
+ }
+ st->sz += sz;
+}
+
+static void
+emit_imm(struct bpf_jit_state *st, const uint32_t imm, uint32_t sz)
+{
+ union bpf_jit_imm v;
+
+ v.u32 = imm;
+ emit_bytes(st, v.u8, sz);
+}
+
+/*
+ * emit REX byte
+ */
+static void
+emit_rex(struct bpf_jit_state *st, uint32_t op, uint32_t reg, uint32_t rm)
+{
+ uint8_t rex;
+
+ /* mark operand registers as used*/
+ USED(st->reguse, reg);
+ USED(st->reguse, rm);
+
+ rex = 0;
+ if (BPF_CLASS(op) == EBPF_ALU64 ||
+ op == (BPF_ST | BPF_MEM | EBPF_DW) ||
+ op == (BPF_STX | BPF_MEM | EBPF_DW) ||
+ op == (BPF_STX | EBPF_XADD | EBPF_DW) ||
+ op == (BPF_LD | BPF_IMM | EBPF_DW) ||
+ (BPF_CLASS(op) == BPF_LDX &&
+ BPF_MODE(op) == BPF_MEM &&
+ BPF_SIZE(op) != BPF_W))
+ rex |= REX_W;
+
+ if (IS_EXT_REG(reg))
+ rex |= REX_R;
+
+ if (IS_EXT_REG(rm))
+ rex |= REX_B;
+
+ /* store using SIL, DIL */
+ if (op == (BPF_STX | BPF_MEM | BPF_B) && (reg == RDI || reg == RSI))
+ rex |= REX_PREFIX;
+
+ if (rex != 0) {
+ rex |= REX_PREFIX;
+ emit_bytes(st, &rex, sizeof(rex));
+ }
+}
+
+/*
+ * emit MODRegRM byte
+ */
+static void
+emit_modregrm(struct bpf_jit_state *st, uint32_t mod, uint32_t reg, uint32_t rm)
+{
+ uint8_t v;
+
+ v = mod << 6 | (reg & 7) << 3 | (rm & 7);
+ emit_bytes(st, &v, sizeof(v));
+}
+
+/*
+ * emit SIB byte
+ */
+static void
+emit_sib(struct bpf_jit_state *st, uint32_t scale, uint32_t idx, uint32_t base)
+{
+ uint8_t v;
+
+ v = scale << 6 | (idx & 7) << 3 | (base & 7);
+ emit_bytes(st, &v, sizeof(v));
+}
+
+/*
+ * emit xchg %<sreg>, %<dreg>
+ */
+static void
+emit_xchg_reg(struct bpf_jit_state *st, uint32_t sreg, uint32_t dreg)
+{
+ const uint8_t ops = 0x87;
+
+ emit_rex(st, EBPF_ALU64, sreg, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+}
+
+/*
+ * emit neg %<dreg>
+ */
+static void
+emit_neg(struct bpf_jit_state *st, uint32_t op, uint32_t dreg)
+{
+ const uint8_t ops = 0xF7;
+ const uint8_t mods = 3;
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, dreg);
+}
+
+/*
+ * emit mov %<sreg>, %<dreg>
+ */
+static void
+emit_mov_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ const uint8_t ops = 0x89;
+
+ /* if operands are 32-bit, then it can be used to clear upper 32-bit */
+ if (sreg != dreg || BPF_CLASS(op) == BPF_ALU) {
+ emit_rex(st, op, sreg, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+ }
+}
+
+/*
+ * emit movzwl %<sreg>, %<dreg>
+ */
+static void
+emit_movzwl(struct bpf_jit_state *st, uint32_t sreg, uint32_t dreg)
+{
+ static const uint8_t ops[] = {0x0F, 0xB7};
+
+ emit_rex(st, BPF_ALU, sreg, dreg);
+ emit_bytes(st, ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+}
+
+/*
+ * emit ror <imm8>, %<dreg>
+ */
+static void
+emit_ror_imm(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
+{
+ const uint8_t prfx = 0x66;
+ const uint8_t ops = 0xC1;
+ const uint8_t mods = 1;
+
+ emit_bytes(st, &prfx, sizeof(prfx));
+ emit_rex(st, BPF_ALU, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, dreg);
+ emit_imm(st, imm, imm_size(imm));
+}
+
+/*
+ * emit bswap %<dreg>
+ */
+static void
+emit_be2le_48(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
+{
+ uint32_t rop;
+
+ const uint8_t ops = 0x0F;
+ const uint8_t mods = 1;
+
+ rop = (imm == 64) ? EBPF_ALU64 : BPF_ALU;
+ emit_rex(st, rop, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, dreg);
+}
+
+static void
+emit_be2le(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
+{
+ if (imm == 16) {
+ emit_ror_imm(st, dreg, 8);
+ emit_movzwl(st, dreg, dreg);
+ } else
+ emit_be2le_48(st, dreg, imm);
+}
+
+/*
+ * In general it is NOP for x86.
+ * Just clear the upper bits.
+ */
+static void
+emit_le2be(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm)
+{
+ if (imm == 16)
+ emit_movzwl(st, dreg, dreg);
+ else if (imm == 32)
+ emit_mov_reg(st, BPF_ALU | EBPF_MOV | BPF_X, dreg, dreg);
+}
+
+/*
+ * emit one of:
+ * add <imm>, %<dreg>
+ * and <imm>, %<dreg>
+ * or <imm>, %<dreg>
+ * sub <imm>, %<dreg>
+ * xor <imm>, %<dreg>
+ */
+static void
+emit_alu_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
+{
+ uint8_t mod, opcode;
+ uint32_t bop, imsz;
+
+ const uint8_t op8 = 0x83;
+ const uint8_t op32 = 0x81;
+ static const uint8_t mods[] = {
+ [GET_BPF_OP(BPF_ADD)] = 0,
+ [GET_BPF_OP(BPF_AND)] = 4,
+ [GET_BPF_OP(BPF_OR)] = 1,
+ [GET_BPF_OP(BPF_SUB)] = 5,
+ [GET_BPF_OP(BPF_XOR)] = 6,
+ };
+
+ bop = GET_BPF_OP(op);
+ mod = mods[bop];
+
+ imsz = imm_size(imm);
+ opcode = (imsz == 1) ? op8 : op32;
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &opcode, sizeof(opcode));
+ emit_modregrm(st, MOD_DIRECT, mod, dreg);
+ emit_imm(st, imm, imsz);
+}
+
+/*
+ * emit one of:
+ * add %<sreg>, %<dreg>
+ * and %<sreg>, %<dreg>
+ * or %<sreg>, %<dreg>
+ * sub %<sreg>, %<dreg>
+ * xor %<sreg>, %<dreg>
+ */
+static void
+emit_alu_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ uint32_t bop;
+
+ static const uint8_t ops[] = {
+ [GET_BPF_OP(BPF_ADD)] = 0x01,
+ [GET_BPF_OP(BPF_AND)] = 0x21,
+ [GET_BPF_OP(BPF_OR)] = 0x09,
+ [GET_BPF_OP(BPF_SUB)] = 0x29,
+ [GET_BPF_OP(BPF_XOR)] = 0x31,
+ };
+
+ bop = GET_BPF_OP(op);
+
+ emit_rex(st, op, sreg, dreg);
+ emit_bytes(st, &ops[bop], sizeof(ops[bop]));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+}
+
+static void
+emit_shift(struct bpf_jit_state *st, uint32_t op, uint32_t dreg)
+{
+ uint8_t mod;
+ uint32_t bop, opx;
+
+ static const uint8_t ops[] = {0xC1, 0xD3};
+ static const uint8_t mods[] = {
+ [GET_BPF_OP(BPF_LSH)] = 4,
+ [GET_BPF_OP(BPF_RSH)] = 5,
+ [GET_BPF_OP(EBPF_ARSH)] = 7,
+ };
+
+ bop = GET_BPF_OP(op);
+ mod = mods[bop];
+ opx = (BPF_SRC(op) == BPF_X);
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &ops[opx], sizeof(ops[opx]));
+ emit_modregrm(st, MOD_DIRECT, mod, dreg);
+}
+
+/*
+ * emit one of:
+ * shl <imm>, %<dreg>
+ * shr <imm>, %<dreg>
+ * sar <imm>, %<dreg>
+ */
+static void
+emit_shift_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg,
+ uint32_t imm)
+{
+ emit_shift(st, op, dreg);
+ emit_imm(st, imm, imm_size(imm));
+}
+
+/*
+ * emit one of:
+ * shl %<dreg>
+ * shr %<dreg>
+ * sar %<dreg>
+ * note that rcx is implicitly used as a source register, so few extra
+ * instructions for register spillage might be necessary.
+ */
+static void
+emit_shift_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ if (sreg != RCX)
+ emit_xchg_reg(st, RCX, sreg);
+
+ emit_shift(st, op, (dreg == RCX) ? sreg : dreg);
+
+ if (sreg != RCX)
+ emit_xchg_reg(st, RCX, sreg);
+}
+
+/*
+ * emit mov <imm>, %<dreg>
+ */
+static void
+emit_mov_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
+{
+ const uint8_t ops = 0xC7;
+
+ if (imm == 0) {
+ /* replace 'mov 0, %<dst>' with 'xor %<dst>, %<dst>' */
+ op = BPF_CLASS(op) | BPF_XOR | BPF_X;
+ emit_alu_reg(st, op, dreg, dreg);
+ return;
+ }
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, 0, dreg);
+ emit_imm(st, imm, sizeof(imm));
+}
+
+/*
+ * emit mov <imm64>, %<dreg>
+ */
+static void
+emit_ld_imm64(struct bpf_jit_state *st, uint32_t dreg, uint32_t imm0,
+ uint32_t imm1)
+{
+ const uint8_t ops = 0xB8;
+
+ if (imm1 == 0) {
+ emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, dreg, imm0);
+ return;
+ }
+
+ emit_rex(st, EBPF_ALU64, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, 0, dreg);
+
+ emit_imm(st, imm0, sizeof(imm0));
+ emit_imm(st, imm1, sizeof(imm1));
+}
+
+/*
+ * note that rax:rdx are implicitly used as source/destination registers,
+ * so some reg spillage is necessary.
+ * emit:
+ * mov %rax, %r11
+ * mov %rdx, %r10
+ * mov %<dreg>, %rax
+ * either:
+ * mov %<sreg>, %rdx
+ * OR
+ * mov <imm>, %rdx
+ * mul %rdx
+ * mov %r10, %rdx
+ * mov %rax, %<dreg>
+ * mov %r11, %rax
+ */
+static void
+emit_mul(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
+ uint32_t imm)
+{
+ const uint8_t ops = 0xF7;
+ const uint8_t mods = 4;
+
+ /* save rax & rdx */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, REG_TMP0);
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, REG_TMP1);
+
+ /* rax = dreg */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, dreg, RAX);
+
+ if (BPF_SRC(op) == BPF_X)
+ /* rdx = sreg */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X,
+ sreg == RAX ? REG_TMP0 : sreg, RDX);
+ else
+ /* rdx = imm */
+ emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, RDX, imm);
+
+ emit_rex(st, op, RAX, RDX);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, RDX);
+
+ if (dreg != RDX)
+ /* restore rdx */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP1, RDX);
+
+ if (dreg != RAX) {
+ /* dreg = rax */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, dreg);
+ /* restore rax */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP0, RAX);
+ }
+}
+
+/*
+ * emit mov <ofs>(%<sreg>), %<dreg>
+ * note that for non 64-bit ops, higher bits have to be cleared.
+ */
+static void
+emit_ld_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
+ int32_t ofs)
+{
+ uint32_t mods, opsz;
+ const uint8_t op32 = 0x8B;
+ const uint8_t op16[] = {0x0F, 0xB7};
+ const uint8_t op8[] = {0x0F, 0xB6};
+
+ emit_rex(st, op, dreg, sreg);
+
+ opsz = BPF_SIZE(op);
+ if (opsz == BPF_B)
+ emit_bytes(st, op8, sizeof(op8));
+ else if (opsz == BPF_H)
+ emit_bytes(st, op16, sizeof(op16));
+ else
+ emit_bytes(st, &op32, sizeof(op32));
+
+ mods = (imm_size(ofs) == 1) ? MOD_IDISP8 : MOD_IDISP32;
+
+ emit_modregrm(st, mods, dreg, sreg);
+ if (sreg == RSP || sreg == R12)
+ emit_sib(st, SIB_SCALE_1, sreg, sreg);
+ emit_imm(st, ofs, imm_size(ofs));
+}
+
+/*
+ * emit one of:
+ * mov %<sreg>, <ofs>(%<dreg>)
+ * mov <imm>, <ofs>(%<dreg>)
+ */
+static void
+emit_st_common(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg, uint32_t imm, int32_t ofs)
+{
+ uint32_t mods, imsz, opsz, opx;
+ const uint8_t prfx16 = 0x66;
+
+ /* 8 bit instruction opcodes */
+ static const uint8_t op8[] = {0xC6, 0x88};
+
+ /* 16/32/64 bit instruction opcodes */
+ static const uint8_t ops[] = {0xC7, 0x89};
+
+ /* is the instruction has immediate value or src reg? */
+ opx = (BPF_CLASS(op) == BPF_STX);
+
+ opsz = BPF_SIZE(op);
+ if (opsz == BPF_H)
+ emit_bytes(st, &prfx16, sizeof(prfx16));
+
+ emit_rex(st, op, sreg, dreg);
+
+ if (opsz == BPF_B)
+ emit_bytes(st, &op8[opx], sizeof(op8[opx]));
+ else
+ emit_bytes(st, &ops[opx], sizeof(ops[opx]));
+
+ imsz = imm_size(ofs);
+ mods = (imsz == 1) ? MOD_IDISP8 : MOD_IDISP32;
+
+ emit_modregrm(st, mods, sreg, dreg);
+
+ if (dreg == RSP || dreg == R12)
+ emit_sib(st, SIB_SCALE_1, dreg, dreg);
+
+ emit_imm(st, ofs, imsz);
+
+ if (opx == 0) {
+ imsz = RTE_MIN(bpf_size(opsz), sizeof(imm));
+ emit_imm(st, imm, imsz);
+ }
+}
+
+static void
+emit_st_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm,
+ int32_t ofs)
+{
+ emit_st_common(st, op, 0, dreg, imm, ofs);
+}
+
+static void
+emit_st_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
+ int32_t ofs)
+{
+ emit_st_common(st, op, sreg, dreg, 0, ofs);
+}
+
+/*
+ * emit lock add %<sreg>, <ofs>(%<dreg>)
+ */
+static void
+emit_st_xadd(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg, int32_t ofs)
+{
+ uint32_t imsz, mods;
+
+ const uint8_t lck = 0xF0; /* lock prefix */
+ const uint8_t ops = 0x01; /* add opcode */
+
+ imsz = imm_size(ofs);
+ mods = (imsz == 1) ? MOD_IDISP8 : MOD_IDISP32;
+
+ emit_bytes(st, &lck, sizeof(lck));
+ emit_rex(st, op, sreg, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, mods, sreg, dreg);
+ emit_imm(st, ofs, imsz);
+}
+
+/*
+ * emit:
+ * mov <imm64>, (%rax)
+ * call *%rax
+ */
+static void
+emit_call(struct bpf_jit_state *st, uintptr_t trg)
+{
+ const uint8_t ops = 0xFF;
+ const uint8_t mods = 2;
+
+ emit_ld_imm64(st, RAX, trg, trg >> 32);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, RAX);
+}
+
+/*
+ * emit jmp <ofs>
+ * where 'ofs' is the target offset for the native code.
+ */
+static void
+emit_abs_jmp(struct bpf_jit_state *st, int32_t ofs)
+{
+ int32_t joff;
+ uint32_t imsz;
+
+ const uint8_t op8 = 0xEB;
+ const uint8_t op32 = 0xE9;
+
+ const int32_t sz8 = sizeof(op8) + sizeof(uint8_t);
+ const int32_t sz32 = sizeof(op32) + sizeof(uint32_t);
+
+ /* max possible jmp instruction size */
+ const int32_t iszm = RTE_MAX(sz8, sz32);
+
+ joff = ofs - st->sz;
+ imsz = RTE_MAX(imm_size(joff), imm_size(joff + iszm));
+
+ if (imsz == 1) {
+ emit_bytes(st, &op8, sizeof(op8));
+ joff -= sz8;
+ } else {
+ emit_bytes(st, &op32, sizeof(op32));
+ joff -= sz32;
+ }
+
+ emit_imm(st, joff, imsz);
+}
+
+/*
+ * emit jmp <ofs>
+ * where 'ofs' is the target offset for the BPF bytecode.
+ */
+static void
+emit_jmp(struct bpf_jit_state *st, int32_t ofs)
+{
+ emit_abs_jmp(st, st->off[st->idx + ofs]);
+}
+
+/*
+ * emit one of:
+ * cmovz %<sreg>, <%dreg>
+ * cmovne %<sreg>, <%dreg>
+ * cmova %<sreg>, <%dreg>
+ * cmovb %<sreg>, <%dreg>
+ * cmovae %<sreg>, <%dreg>
+ * cmovbe %<sreg>, <%dreg>
+ * cmovg %<sreg>, <%dreg>
+ * cmovl %<sreg>, <%dreg>
+ * cmovge %<sreg>, <%dreg>
+ * cmovle %<sreg>, <%dreg>
+ */
+static void
+emit_movcc_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ uint32_t bop;
+
+ static const uint8_t ops[][2] = {
+ [GET_BPF_OP(BPF_JEQ)] = {0x0F, 0x44}, /* CMOVZ */
+ [GET_BPF_OP(EBPF_JNE)] = {0x0F, 0x45}, /* CMOVNE */
+ [GET_BPF_OP(BPF_JGT)] = {0x0F, 0x47}, /* CMOVA */
+ [GET_BPF_OP(EBPF_JLT)] = {0x0F, 0x42}, /* CMOVB */
+ [GET_BPF_OP(BPF_JGE)] = {0x0F, 0x43}, /* CMOVAE */
+ [GET_BPF_OP(EBPF_JLE)] = {0x0F, 0x46}, /* CMOVBE */
+ [GET_BPF_OP(EBPF_JSGT)] = {0x0F, 0x4F}, /* CMOVG */
+ [GET_BPF_OP(EBPF_JSLT)] = {0x0F, 0x4C}, /* CMOVL */
+ [GET_BPF_OP(EBPF_JSGE)] = {0x0F, 0x4D}, /* CMOVGE */
+ [GET_BPF_OP(EBPF_JSLE)] = {0x0F, 0x4E}, /* CMOVLE */
+ [GET_BPF_OP(BPF_JSET)] = {0x0F, 0x45}, /* CMOVNE */
+ };
+
+ bop = GET_BPF_OP(op);
+
+ emit_rex(st, op, dreg, sreg);
+ emit_bytes(st, ops[bop], sizeof(ops[bop]));
+ emit_modregrm(st, MOD_DIRECT, dreg, sreg);
+}
+
+/*
+ * emit one of:
+ * je <ofs>
+ * jne <ofs>
+ * ja <ofs>
+ * jb <ofs>
+ * jae <ofs>
+ * jbe <ofs>
+ * jg <ofs>
+ * jl <ofs>
+ * jge <ofs>
+ * jle <ofs>
+ * where 'ofs' is the target offset for the native code.
+ */
+static void
+emit_abs_jcc(struct bpf_jit_state *st, uint32_t op, int32_t ofs)
+{
+ uint32_t bop, imsz;
+ int32_t joff;
+
+ static const uint8_t op8[] = {
+ [GET_BPF_OP(BPF_JEQ)] = 0x74, /* JE */
+ [GET_BPF_OP(EBPF_JNE)] = 0x75, /* JNE */
+ [GET_BPF_OP(BPF_JGT)] = 0x77, /* JA */
+ [GET_BPF_OP(EBPF_JLT)] = 0x72, /* JB */
+ [GET_BPF_OP(BPF_JGE)] = 0x73, /* JAE */
+ [GET_BPF_OP(EBPF_JLE)] = 0x76, /* JBE */
+ [GET_BPF_OP(EBPF_JSGT)] = 0x7F, /* JG */
+ [GET_BPF_OP(EBPF_JSLT)] = 0x7C, /* JL */
+ [GET_BPF_OP(EBPF_JSGE)] = 0x7D, /*JGE */
+ [GET_BPF_OP(EBPF_JSLE)] = 0x7E, /* JLE */
+ [GET_BPF_OP(BPF_JSET)] = 0x75, /*JNE */
+ };
+
+ static const uint8_t op32[][2] = {
+ [GET_BPF_OP(BPF_JEQ)] = {0x0F, 0x84}, /* JE */
+ [GET_BPF_OP(EBPF_JNE)] = {0x0F, 0x85}, /* JNE */
+ [GET_BPF_OP(BPF_JGT)] = {0x0F, 0x87}, /* JA */
+ [GET_BPF_OP(EBPF_JLT)] = {0x0F, 0x82}, /* JB */
+ [GET_BPF_OP(BPF_JGE)] = {0x0F, 0x83}, /* JAE */
+ [GET_BPF_OP(EBPF_JLE)] = {0x0F, 0x86}, /* JBE */
+ [GET_BPF_OP(EBPF_JSGT)] = {0x0F, 0x8F}, /* JG */
+ [GET_BPF_OP(EBPF_JSLT)] = {0x0F, 0x8C}, /* JL */
+ [GET_BPF_OP(EBPF_JSGE)] = {0x0F, 0x8D}, /*JGE */
+ [GET_BPF_OP(EBPF_JSLE)] = {0x0F, 0x8E}, /* JLE */
+ [GET_BPF_OP(BPF_JSET)] = {0x0F, 0x85}, /*JNE */
+ };
+
+ const int32_t sz8 = sizeof(op8[0]) + sizeof(uint8_t);
+ const int32_t sz32 = sizeof(op32[0]) + sizeof(uint32_t);
+
+ /* max possible jcc instruction size */
+ const int32_t iszm = RTE_MAX(sz8, sz32);
+
+ joff = ofs - st->sz;
+ imsz = RTE_MAX(imm_size(joff), imm_size(joff + iszm));
+
+ bop = GET_BPF_OP(op);
+
+ if (imsz == 1) {
+ emit_bytes(st, &op8[bop], sizeof(op8[bop]));
+ joff -= sz8;
+ } else {
+ emit_bytes(st, op32[bop], sizeof(op32[bop]));
+ joff -= sz32;
+ }
+
+ emit_imm(st, joff, imsz);
+}
+
+/*
+ * emit one of:
+ * je <ofs>
+ * jne <ofs>
+ * ja <ofs>
+ * jb <ofs>
+ * jae <ofs>
+ * jbe <ofs>
+ * jg <ofs>
+ * jl <ofs>
+ * jge <ofs>
+ * jle <ofs>
+ * where 'ofs' is the target offset for the BPF bytecode.
+ */
+static void
+emit_jcc(struct bpf_jit_state *st, uint32_t op, int32_t ofs)
+{
+ emit_abs_jcc(st, op, st->off[st->idx + ofs]);
+}
+
+
+/*
+ * emit cmp <imm>, %<dreg>
+ */
+static void
+emit_cmp_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
+{
+ uint8_t ops;
+ uint32_t imsz;
+
+ const uint8_t op8 = 0x83;
+ const uint8_t op32 = 0x81;
+ const uint8_t mods = 7;
+
+ imsz = imm_size(imm);
+ ops = (imsz == 1) ? op8 : op32;
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, dreg);
+ emit_imm(st, imm, imsz);
+}
+
+/*
+ * emit test <imm>, %<dreg>
+ */
+static void
+emit_tst_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg, uint32_t imm)
+{
+ const uint8_t ops = 0xF7;
+ const uint8_t mods = 0;
+
+ emit_rex(st, op, 0, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, dreg);
+ emit_imm(st, imm, imm_size(imm));
+}
+
+static void
+emit_jcc_imm(struct bpf_jit_state *st, uint32_t op, uint32_t dreg,
+ uint32_t imm, int32_t ofs)
+{
+ if (BPF_OP(op) == BPF_JSET)
+ emit_tst_imm(st, EBPF_ALU64, dreg, imm);
+ else
+ emit_cmp_imm(st, EBPF_ALU64, dreg, imm);
+
+ emit_jcc(st, op, ofs);
+}
+
+/*
+ * emit test %<sreg>, %<dreg>
+ */
+static void
+emit_tst_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ const uint8_t ops = 0x85;
+
+ emit_rex(st, op, sreg, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+}
+
+/*
+ * emit cmp %<sreg>, %<dreg>
+ */
+static void
+emit_cmp_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg)
+{
+ const uint8_t ops = 0x39;
+
+ emit_rex(st, op, sreg, dreg);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, sreg, dreg);
+
+}
+
+static void
+emit_jcc_reg(struct bpf_jit_state *st, uint32_t op, uint32_t sreg,
+ uint32_t dreg, int32_t ofs)
+{
+ if (BPF_OP(op) == BPF_JSET)
+ emit_tst_reg(st, EBPF_ALU64, sreg, dreg);
+ else
+ emit_cmp_reg(st, EBPF_ALU64, sreg, dreg);
+
+ emit_jcc(st, op, ofs);
+}
+
+/*
+ * note that rax:rdx are implicitly used as source/destination registers,
+ * so some reg spillage is necessary.
+ * emit:
+ * mov %rax, %r11
+ * mov %rdx, %r10
+ * mov %<dreg>, %rax
+ * xor %rdx, %rdx
+ * for divisor as immediate value:
+ * mov <imm>, %r9
+ * div %<divisor_reg>
+ * mov %r10, %rdx
+ * mov %rax, %<dreg>
+ * mov %r11, %rax
+ * either:
+ * mov %rax, %<dreg>
+ * OR
+ * mov %rdx, %<dreg>
+ * mov %r11, %rax
+ * mov %r10, %rdx
+ */
+static void
+emit_div(struct bpf_jit_state *st, uint32_t op, uint32_t sreg, uint32_t dreg,
+ uint32_t imm)
+{
+ uint32_t sr;
+
+ const uint8_t ops = 0xF7;
+ const uint8_t mods = 6;
+
+ if (BPF_SRC(op) == BPF_X) {
+
+ /* check that src divisor is not zero */
+ emit_tst_reg(st, BPF_CLASS(op), sreg, sreg);
+
+ /* exit with return value zero */
+ emit_movcc_reg(st, BPF_CLASS(op) | BPF_JEQ | BPF_X, sreg, RAX);
+ emit_abs_jcc(st, BPF_JMP | BPF_JEQ | BPF_K, st->exit.off);
+ }
+
+ /* save rax & rdx */
+ if (dreg != RAX)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, REG_TMP0);
+ if (dreg != RDX)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, REG_TMP1);
+
+ /* fill rax & rdx */
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, dreg, RAX);
+ emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, RDX, 0);
+
+ if (BPF_SRC(op) == BPF_X) {
+ sr = sreg;
+ if (sr == RAX)
+ sr = REG_TMP0;
+ else if (sr == RDX)
+ sr = REG_TMP1;
+ } else {
+ sr = REG_DIV_IMM;
+ emit_mov_imm(st, EBPF_ALU64 | EBPF_MOV | BPF_K, sr, imm);
+ }
+
+ emit_rex(st, op, 0, sr);
+ emit_bytes(st, &ops, sizeof(ops));
+ emit_modregrm(st, MOD_DIRECT, mods, sr);
+
+ if (BPF_OP(op) == BPF_DIV)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RAX, dreg);
+ else
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RDX, dreg);
+
+ if (dreg != RAX)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP0, RAX);
+ if (dreg != RDX)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, REG_TMP1, RDX);
+}
+
+static void
+emit_prolog(struct bpf_jit_state *st, int32_t stack_size)
+{
+ uint32_t i;
+ int32_t spil, ofs;
+
+ spil = 0;
+ for (i = 0; i != RTE_DIM(save_regs); i++)
+ spil += INUSE(st->reguse, save_regs[i]);
+
+ /* we can avoid touching the stack at all */
+ if (spil == 0)
+ return;
+
+
+ emit_alu_imm(st, EBPF_ALU64 | BPF_SUB | BPF_K, RSP,
+ spil * sizeof(uint64_t));
+
+ ofs = 0;
+ for (i = 0; i != RTE_DIM(save_regs); i++) {
+ if (INUSE(st->reguse, save_regs[i]) != 0) {
+ emit_st_reg(st, BPF_STX | BPF_MEM | EBPF_DW,
+ save_regs[i], RSP, ofs);
+ ofs += sizeof(uint64_t);
+ }
+ }
+
+ if (INUSE(st->reguse, RBP) != 0) {
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X, RSP, RBP);
+ emit_alu_imm(st, EBPF_ALU64 | BPF_SUB | BPF_K, RSP, stack_size);
+ }
+}
+
+/*
+ * emit ret
+ */
+static void
+emit_ret(struct bpf_jit_state *st)
+{
+ const uint8_t ops = 0xC3;
+
+ emit_bytes(st, &ops, sizeof(ops));
+}
+
+static void
+emit_epilog(struct bpf_jit_state *st)
+{
+ uint32_t i;
+ int32_t spil, ofs;
+
+ /* if we allready have an epilog generate a jump to it */
+ if (st->exit.num++ != 0) {
+ emit_abs_jmp(st, st->exit.off);
+ return;
+ }
+
+ /* store offset of epilog block */
+ st->exit.off = st->sz;
+
+ spil = 0;
+ for (i = 0; i != RTE_DIM(save_regs); i++)
+ spil += INUSE(st->reguse, save_regs[i]);
+
+ if (spil != 0) {
+
+ if (INUSE(st->reguse, RBP) != 0)
+ emit_mov_reg(st, EBPF_ALU64 | EBPF_MOV | BPF_X,
+ RBP, RSP);
+
+ ofs = 0;
+ for (i = 0; i != RTE_DIM(save_regs); i++) {
+ if (INUSE(st->reguse, save_regs[i]) != 0) {
+ emit_ld_reg(st, BPF_LDX | BPF_MEM | EBPF_DW,
+ RSP, save_regs[i], ofs);
+ ofs += sizeof(uint64_t);
+ }
+ }
+
+ emit_alu_imm(st, EBPF_ALU64 | BPF_ADD | BPF_K, RSP,
+ spil * sizeof(uint64_t));
+ }
+
+ emit_ret(st);
+}
+
+/*
+ * walk through bpf code and translate them x86_64 one.
+ */
+static int
+emit(struct bpf_jit_state *st, const struct rte_bpf *bpf)
+{
+ uint32_t i, dr, op, sr;
+ const struct ebpf_insn *ins;
+
+ /* reset state fields */
+ st->sz = 0;
+ st->exit.num = 0;
+
+ emit_prolog(st, bpf->stack_sz);
+
+ for (i = 0; i != bpf->prm.nb_ins; i++) {
+
+ st->idx = i;
+ st->off[i] = st->sz;
+
+ ins = bpf->prm.ins + i;
+
+ dr = ebpf2x86[ins->dst_reg];
+ sr = ebpf2x86[ins->src_reg];
+ op = ins->code;
+
+ switch (op) {
+ /* 32 bit ALU IMM operations */
+ case (BPF_ALU | BPF_ADD | BPF_K):
+ case (BPF_ALU | BPF_SUB | BPF_K):
+ case (BPF_ALU | BPF_AND | BPF_K):
+ case (BPF_ALU | BPF_OR | BPF_K):
+ case (BPF_ALU | BPF_XOR | BPF_K):
+ emit_alu_imm(st, op, dr, ins->imm);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_K):
+ case (BPF_ALU | BPF_RSH | BPF_K):
+ emit_shift_imm(st, op, dr, ins->imm);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_K):
+ emit_mov_imm(st, op, dr, ins->imm);
+ break;
+ /* 32 bit ALU REG operations */
+ case (BPF_ALU | BPF_ADD | BPF_X):
+ case (BPF_ALU | BPF_SUB | BPF_X):
+ case (BPF_ALU | BPF_AND | BPF_X):
+ case (BPF_ALU | BPF_OR | BPF_X):
+ case (BPF_ALU | BPF_XOR | BPF_X):
+ emit_alu_reg(st, op, sr, dr);
+ break;
+ case (BPF_ALU | BPF_LSH | BPF_X):
+ case (BPF_ALU | BPF_RSH | BPF_X):
+ emit_shift_reg(st, op, sr, dr);
+ break;
+ case (BPF_ALU | EBPF_MOV | BPF_X):
+ emit_mov_reg(st, op, sr, dr);
+ break;
+ case (BPF_ALU | BPF_NEG):
+ emit_neg(st, op, dr);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_BE):
+ emit_be2le(st, dr, ins->imm);
+ break;
+ case (BPF_ALU | EBPF_END | EBPF_TO_LE):
+ emit_le2be(st, dr, ins->imm);
+ break;
+ /* 64 bit ALU IMM operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_K):
+ case (EBPF_ALU64 | BPF_SUB | BPF_K):
+ case (EBPF_ALU64 | BPF_AND | BPF_K):
+ case (EBPF_ALU64 | BPF_OR | BPF_K):
+ case (EBPF_ALU64 | BPF_XOR | BPF_K):
+ emit_alu_imm(st, op, dr, ins->imm);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_K):
+ case (EBPF_ALU64 | BPF_RSH | BPF_K):
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_K):
+ emit_shift_imm(st, op, dr, ins->imm);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_K):
+ emit_mov_imm(st, op, dr, ins->imm);
+ break;
+ /* 64 bit ALU REG operations */
+ case (EBPF_ALU64 | BPF_ADD | BPF_X):
+ case (EBPF_ALU64 | BPF_SUB | BPF_X):
+ case (EBPF_ALU64 | BPF_AND | BPF_X):
+ case (EBPF_ALU64 | BPF_OR | BPF_X):
+ case (EBPF_ALU64 | BPF_XOR | BPF_X):
+ emit_alu_reg(st, op, sr, dr);
+ break;
+ case (EBPF_ALU64 | BPF_LSH | BPF_X):
+ case (EBPF_ALU64 | BPF_RSH | BPF_X):
+ case (EBPF_ALU64 | EBPF_ARSH | BPF_X):
+ emit_shift_reg(st, op, sr, dr);
+ break;
+ case (EBPF_ALU64 | EBPF_MOV | BPF_X):
+ emit_mov_reg(st, op, sr, dr);
+ break;
+ case (EBPF_ALU64 | BPF_NEG):
+ emit_neg(st, op, dr);
+ break;
+ /* multiply instructions */
+ case (BPF_ALU | BPF_MUL | BPF_K):
+ case (BPF_ALU | BPF_MUL | BPF_X):
+ case (EBPF_ALU64 | BPF_MUL | BPF_K):
+ case (EBPF_ALU64 | BPF_MUL | BPF_X):
+ emit_mul(st, op, sr, dr, ins->imm);
+ break;
+ /* divide instructions */
+ case (BPF_ALU | BPF_DIV | BPF_K):
+ case (BPF_ALU | BPF_MOD | BPF_K):
+ case (BPF_ALU | BPF_DIV | BPF_X):
+ case (BPF_ALU | BPF_MOD | BPF_X):
+ case (EBPF_ALU64 | BPF_DIV | BPF_K):
+ case (EBPF_ALU64 | BPF_MOD | BPF_K):
+ case (EBPF_ALU64 | BPF_DIV | BPF_X):
+ case (EBPF_ALU64 | BPF_MOD | BPF_X):
+ emit_div(st, op, sr, dr, ins->imm);
+ break;
+ /* load instructions */
+ case (BPF_LDX | BPF_MEM | BPF_B):
+ case (BPF_LDX | BPF_MEM | BPF_H):
+ case (BPF_LDX | BPF_MEM | BPF_W):
+ case (BPF_LDX | BPF_MEM | EBPF_DW):
+ emit_ld_reg(st, op, sr, dr, ins->off);
+ break;
+ /* load 64 bit immediate value */
+ case (BPF_LD | BPF_IMM | EBPF_DW):
+ emit_ld_imm64(st, dr, ins[0].imm, ins[1].imm);
+ i++;
+ break;
+ /* store instructions */
+ case (BPF_STX | BPF_MEM | BPF_B):
+ case (BPF_STX | BPF_MEM | BPF_H):
+ case (BPF_STX | BPF_MEM | BPF_W):
+ case (BPF_STX | BPF_MEM | EBPF_DW):
+ emit_st_reg(st, op, sr, dr, ins->off);
+ break;
+ case (BPF_ST | BPF_MEM | BPF_B):
+ case (BPF_ST | BPF_MEM | BPF_H):
+ case (BPF_ST | BPF_MEM | BPF_W):
+ case (BPF_ST | BPF_MEM | EBPF_DW):
+ emit_st_imm(st, op, dr, ins->imm, ins->off);
+ break;
+ /* atomic add instructions */
+ case (BPF_STX | EBPF_XADD | BPF_W):
+ case (BPF_STX | EBPF_XADD | EBPF_DW):
+ emit_st_xadd(st, op, sr, dr, ins->off);
+ break;
+ /* jump instructions */
+ case (BPF_JMP | BPF_JA):
+ emit_jmp(st, ins->off + 1);
+ break;
+ /* jump IMM instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_K):
+ case (BPF_JMP | EBPF_JNE | BPF_K):
+ case (BPF_JMP | BPF_JGT | BPF_K):
+ case (BPF_JMP | EBPF_JLT | BPF_K):
+ case (BPF_JMP | BPF_JGE | BPF_K):
+ case (BPF_JMP | EBPF_JLE | BPF_K):
+ case (BPF_JMP | EBPF_JSGT | BPF_K):
+ case (BPF_JMP | EBPF_JSLT | BPF_K):
+ case (BPF_JMP | EBPF_JSGE | BPF_K):
+ case (BPF_JMP | EBPF_JSLE | BPF_K):
+ case (BPF_JMP | BPF_JSET | BPF_K):
+ emit_jcc_imm(st, op, dr, ins->imm, ins->off + 1);
+ break;
+ /* jump REG instructions */
+ case (BPF_JMP | BPF_JEQ | BPF_X):
+ case (BPF_JMP | EBPF_JNE | BPF_X):
+ case (BPF_JMP | BPF_JGT | BPF_X):
+ case (BPF_JMP | EBPF_JLT | BPF_X):
+ case (BPF_JMP | BPF_JGE | BPF_X):
+ case (BPF_JMP | EBPF_JLE | BPF_X):
+ case (BPF_JMP | EBPF_JSGT | BPF_X):
+ case (BPF_JMP | EBPF_JSLT | BPF_X):
+ case (BPF_JMP | EBPF_JSGE | BPF_X):
+ case (BPF_JMP | EBPF_JSLE | BPF_X):
+ case (BPF_JMP | BPF_JSET | BPF_X):
+ emit_jcc_reg(st, op, sr, dr, ins->off + 1);
+ break;
+ /* call instructions */
+ case (BPF_JMP | EBPF_CALL):
+ emit_call(st,
+ (uintptr_t)bpf->prm.xsym[ins->imm].func.val);
+ break;
+ /* return instruction */
+ case (BPF_JMP | EBPF_EXIT):
+ emit_epilog(st);
+ break;
+ default:
+ RTE_BPF_LOG(ERR,
+ "%s(%p): invalid opcode %#x at pc: %u;\n",
+ __func__, bpf, ins->code, i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * produce a native ISA version of the given BPF code.
+ */
+int
+bpf_jit_x86(struct rte_bpf *bpf)
+{
+ int32_t rc;
+ uint32_t i;
+ size_t sz;
+ struct bpf_jit_state st;
+
+ /* init state */
+ memset(&st, 0, sizeof(st));
+ st.off = malloc(bpf->prm.nb_ins * sizeof(st.off[0]));
+ if (st.off == NULL)
+ return -ENOMEM;
+
+ /* fill with fake offsets */
+ st.exit.off = INT32_MAX;
+ for (i = 0; i != bpf->prm.nb_ins; i++)
+ st.off[i] = INT32_MAX;
+
+ /*
+ * dry runs, used to calculate total code size and valid jump offsets.
+ * stop when we get minimal possible size
+ */
+ do {
+ sz = st.sz;
+ rc = emit(&st, bpf);
+ } while (rc == 0 && sz != st.sz);
+
+ if (rc == 0) {
+
+ /* allocate memory needed */
+ st.ins = mmap(NULL, st.sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (st.ins == MAP_FAILED)
+ rc = -ENOMEM;
+ else
+ /* generate code */
+ rc = emit(&st, bpf);
+ }
+
+ if (rc == 0 && mprotect(st.ins, st.sz, PROT_READ | PROT_EXEC) != 0)
+ rc = -ENOMEM;
+
+ if (rc != 0)
+ munmap(st.ins, st.sz);
+ else {
+ bpf->jit.func = (void *)st.ins;
+ bpf->jit.sz = st.sz;
+ }
+
+ free(st.off);
+ return rc;
+}
diff --git a/lib/librte_bpf/bpf_load.c b/lib/librte_bpf/bpf_load.c
new file mode 100644
index 00000000..2b84fe72
--- /dev/null
+++ b/lib/librte_bpf/bpf_load.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+
+#include "bpf_impl.h"
+
+static struct rte_bpf *
+bpf_load(const struct rte_bpf_prm *prm)
+{
+ uint8_t *buf;
+ struct rte_bpf *bpf;
+ size_t sz, bsz, insz, xsz;
+
+ xsz = prm->nb_xsym * sizeof(prm->xsym[0]);
+ insz = prm->nb_ins * sizeof(prm->ins[0]);
+ bsz = sizeof(bpf[0]);
+ sz = insz + xsz + bsz;
+
+ buf = mmap(NULL, sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (buf == MAP_FAILED)
+ return NULL;
+
+ bpf = (void *)buf;
+ bpf->sz = sz;
+
+ memcpy(&bpf->prm, prm, sizeof(bpf->prm));
+
+ memcpy(buf + bsz, prm->xsym, xsz);
+ memcpy(buf + bsz + xsz, prm->ins, insz);
+
+ bpf->prm.xsym = (void *)(buf + bsz);
+ bpf->prm.ins = (void *)(buf + bsz + xsz);
+
+ return bpf;
+}
+
+/*
+ * Check that user provided external symbol.
+ */
+static int
+bpf_check_xsym(const struct rte_bpf_xsym *xsym)
+{
+ uint32_t i;
+
+ if (xsym->name == NULL)
+ return -EINVAL;
+
+ if (xsym->type == RTE_BPF_XTYPE_VAR) {
+ if (xsym->var.desc.type == RTE_BPF_ARG_UNDEF)
+ return -EINVAL;
+ } else if (xsym->type == RTE_BPF_XTYPE_FUNC) {
+
+ if (xsym->func.nb_args > EBPF_FUNC_MAX_ARGS)
+ return -EINVAL;
+
+ /* check function arguments */
+ for (i = 0; i != xsym->func.nb_args; i++) {
+ if (xsym->func.args[i].type == RTE_BPF_ARG_UNDEF)
+ return -EINVAL;
+ }
+
+ /* check return value info */
+ if (xsym->func.ret.type != RTE_BPF_ARG_UNDEF &&
+ xsym->func.ret.size == 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ return 0;
+}
+
+__rte_experimental struct rte_bpf *
+rte_bpf_load(const struct rte_bpf_prm *prm)
+{
+ struct rte_bpf *bpf;
+ int32_t rc;
+ uint32_t i;
+
+ if (prm == NULL || prm->ins == NULL ||
+ (prm->nb_xsym != 0 && prm->xsym == NULL)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ rc = 0;
+ for (i = 0; i != prm->nb_xsym && rc == 0; i++)
+ rc = bpf_check_xsym(prm->xsym + i);
+
+ if (rc != 0) {
+ rte_errno = -rc;
+ RTE_BPF_LOG(ERR, "%s: %d-th xsym is invalid\n", __func__, i);
+ return NULL;
+ }
+
+ bpf = bpf_load(prm);
+ if (bpf == NULL) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rc = bpf_validate(bpf);
+ if (rc == 0) {
+ bpf_jit(bpf);
+ if (mprotect(bpf, bpf->sz, PROT_READ) != 0)
+ rc = -ENOMEM;
+ }
+
+ if (rc != 0) {
+ rte_bpf_destroy(bpf);
+ rte_errno = -rc;
+ return NULL;
+ }
+
+ return bpf;
+}
+
+__rte_experimental __attribute__ ((weak)) struct rte_bpf *
+rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
+ const char *sname)
+{
+ if (prm == NULL || fname == NULL || sname == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ RTE_BPF_LOG(ERR, "%s() is not supported with current config\n"
+ "rebuild with libelf installed\n",
+ __func__);
+ rte_errno = ENOTSUP;
+ return NULL;
+}
diff --git a/lib/librte_bpf/bpf_load_elf.c b/lib/librte_bpf/bpf_load_elf.c
new file mode 100644
index 00000000..96d3630f
--- /dev/null
+++ b/lib/librte_bpf/bpf_load_elf.c
@@ -0,0 +1,322 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <fcntl.h>
+
+#include <libelf.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+
+#include "bpf_impl.h"
+
+/* To overcome compatibility issue */
+#ifndef EM_BPF
+#define EM_BPF 247
+#endif
+
+static uint32_t
+bpf_find_xsym(const char *sn, enum rte_bpf_xtype type,
+ const struct rte_bpf_xsym fp[], uint32_t fn)
+{
+ uint32_t i;
+
+ if (sn == NULL || fp == NULL)
+ return UINT32_MAX;
+
+ for (i = 0; i != fn; i++) {
+ if (fp[i].type == type && strcmp(sn, fp[i].name) == 0)
+ break;
+ }
+
+ return (i != fn) ? i : UINT32_MAX;
+}
+
+/*
+ * update BPF code at offset *ofs* with a proper address(index) for external
+ * symbol *sn*
+ */
+static int
+resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz,
+ const struct rte_bpf_prm *prm)
+{
+ uint32_t idx, fidx;
+ enum rte_bpf_xtype type;
+
+ if (ofs % sizeof(ins[0]) != 0 || ofs >= ins_sz)
+ return -EINVAL;
+
+ idx = ofs / sizeof(ins[0]);
+ if (ins[idx].code == (BPF_JMP | EBPF_CALL))
+ type = RTE_BPF_XTYPE_FUNC;
+ else if (ins[idx].code == (BPF_LD | BPF_IMM | EBPF_DW) &&
+ ofs < ins_sz - sizeof(ins[idx]))
+ type = RTE_BPF_XTYPE_VAR;
+ else
+ return -EINVAL;
+
+ fidx = bpf_find_xsym(sn, type, prm->xsym, prm->nb_xsym);
+ if (fidx == UINT32_MAX)
+ return -ENOENT;
+
+ /* for function we just need an index in our xsym table */
+ if (type == RTE_BPF_XTYPE_FUNC)
+ ins[idx].imm = fidx;
+ /* for variable we need to store its absolute address */
+ else {
+ ins[idx].imm = (uintptr_t)prm->xsym[fidx].var.val;
+ ins[idx + 1].imm =
+ (uint64_t)(uintptr_t)prm->xsym[fidx].var.val >> 32;
+ }
+
+ return 0;
+}
+
+static int
+check_elf_header(const Elf64_Ehdr *eh)
+{
+ const char *err;
+
+ err = NULL;
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ if (eh->e_ident[EI_DATA] != ELFDATA2LSB)
+#else
+ if (eh->e_ident[EI_DATA] != ELFDATA2MSB)
+#endif
+ err = "not native byte order";
+ else if (eh->e_ident[EI_OSABI] != ELFOSABI_NONE)
+ err = "unexpected OS ABI";
+ else if (eh->e_type != ET_REL)
+ err = "unexpected ELF type";
+ else if (eh->e_machine != EM_NONE && eh->e_machine != EM_BPF)
+ err = "unexpected machine type";
+
+ if (err != NULL) {
+ RTE_BPF_LOG(ERR, "%s(): %s\n", __func__, err);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * helper function, find executable section by name.
+ */
+static int
+find_elf_code(Elf *elf, const char *section, Elf_Data **psd, size_t *pidx)
+{
+ Elf_Scn *sc;
+ const Elf64_Ehdr *eh;
+ const Elf64_Shdr *sh;
+ Elf_Data *sd;
+ const char *sn;
+ int32_t rc;
+
+ eh = elf64_getehdr(elf);
+ if (eh == NULL) {
+ rc = elf_errno();
+ RTE_BPF_LOG(ERR, "%s(%p, %s) error code: %d(%s)\n",
+ __func__, elf, section, rc, elf_errmsg(rc));
+ return -EINVAL;
+ }
+
+ if (check_elf_header(eh) != 0)
+ return -EINVAL;
+
+ /* find given section by name */
+ for (sc = elf_nextscn(elf, NULL); sc != NULL;
+ sc = elf_nextscn(elf, sc)) {
+ sh = elf64_getshdr(sc);
+ sn = elf_strptr(elf, eh->e_shstrndx, sh->sh_name);
+ if (sn != NULL && strcmp(section, sn) == 0 &&
+ sh->sh_type == SHT_PROGBITS &&
+ sh->sh_flags == (SHF_ALLOC | SHF_EXECINSTR))
+ break;
+ }
+
+ sd = elf_getdata(sc, NULL);
+ if (sd == NULL || sd->d_size == 0 ||
+ sd->d_size % sizeof(struct ebpf_insn) != 0) {
+ rc = elf_errno();
+ RTE_BPF_LOG(ERR, "%s(%p, %s) error code: %d(%s)\n",
+ __func__, elf, section, rc, elf_errmsg(rc));
+ return -EINVAL;
+ }
+
+ *psd = sd;
+ *pidx = elf_ndxscn(sc);
+ return 0;
+}
+
+/*
+ * helper function to process data from relocation table.
+ */
+static int
+process_reloc(Elf *elf, size_t sym_idx, Elf64_Rel *re, size_t re_sz,
+ struct ebpf_insn *ins, size_t ins_sz, const struct rte_bpf_prm *prm)
+{
+ int32_t rc;
+ uint32_t i, n;
+ size_t ofs, sym;
+ const char *sn;
+ const Elf64_Ehdr *eh;
+ Elf_Scn *sc;
+ const Elf_Data *sd;
+ Elf64_Sym *sm;
+
+ eh = elf64_getehdr(elf);
+
+ /* get symtable by section index */
+ sc = elf_getscn(elf, sym_idx);
+ sd = elf_getdata(sc, NULL);
+ if (sd == NULL)
+ return -EINVAL;
+ sm = sd->d_buf;
+
+ n = re_sz / sizeof(re[0]);
+ for (i = 0; i != n; i++) {
+
+ ofs = re[i].r_offset;
+
+ /* retrieve index in the symtable */
+ sym = ELF64_R_SYM(re[i].r_info);
+ if (sym * sizeof(sm[0]) >= sd->d_size)
+ return -EINVAL;
+
+ sn = elf_strptr(elf, eh->e_shstrndx, sm[sym].st_name);
+
+ rc = resolve_xsym(sn, ofs, ins, ins_sz, prm);
+ if (rc != 0) {
+ RTE_BPF_LOG(ERR,
+ "resolve_xsym(%s, %zu) error code: %d\n",
+ sn, ofs, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * helper function, find relocation information (if any)
+ * and update bpf code.
+ */
+static int
+elf_reloc_code(Elf *elf, Elf_Data *ed, size_t sidx,
+ const struct rte_bpf_prm *prm)
+{
+ Elf64_Rel *re;
+ Elf_Scn *sc;
+ const Elf64_Shdr *sh;
+ const Elf_Data *sd;
+ int32_t rc;
+
+ rc = 0;
+
+ /* walk through all sections */
+ for (sc = elf_nextscn(elf, NULL); sc != NULL && rc == 0;
+ sc = elf_nextscn(elf, sc)) {
+
+ sh = elf64_getshdr(sc);
+
+ /* relocation data for our code section */
+ if (sh->sh_type == SHT_REL && sh->sh_info == sidx) {
+ sd = elf_getdata(sc, NULL);
+ if (sd == NULL || sd->d_size == 0 ||
+ sd->d_size % sizeof(re[0]) != 0)
+ return -EINVAL;
+ rc = process_reloc(elf, sh->sh_link,
+ sd->d_buf, sd->d_size, ed->d_buf, ed->d_size,
+ prm);
+ }
+ }
+
+ return rc;
+}
+
+static struct rte_bpf *
+bpf_load_elf(const struct rte_bpf_prm *prm, int32_t fd, const char *section)
+{
+ Elf *elf;
+ Elf_Data *sd;
+ size_t sidx;
+ int32_t rc;
+ struct rte_bpf *bpf;
+ struct rte_bpf_prm np;
+
+ elf_version(EV_CURRENT);
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+
+ rc = find_elf_code(elf, section, &sd, &sidx);
+ if (rc == 0)
+ rc = elf_reloc_code(elf, sd, sidx, prm);
+
+ if (rc == 0) {
+ np = prm[0];
+ np.ins = sd->d_buf;
+ np.nb_ins = sd->d_size / sizeof(struct ebpf_insn);
+ bpf = rte_bpf_load(&np);
+ } else {
+ bpf = NULL;
+ rte_errno = -rc;
+ }
+
+ elf_end(elf);
+ return bpf;
+}
+
+__rte_experimental struct rte_bpf *
+rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
+ const char *sname)
+{
+ int32_t fd, rc;
+ struct rte_bpf *bpf;
+
+ if (prm == NULL || fname == NULL || sname == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ fd = open(fname, O_RDONLY);
+ if (fd < 0) {
+ rc = errno;
+ RTE_BPF_LOG(ERR, "%s(%s) error code: %d(%s)\n",
+ __func__, fname, rc, strerror(rc));
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ bpf = bpf_load_elf(prm, fd, sname);
+ close(fd);
+
+ if (bpf == NULL) {
+ RTE_BPF_LOG(ERR,
+ "%s(fname=\"%s\", sname=\"%s\") failed, "
+ "error code: %d\n",
+ __func__, fname, sname, rte_errno);
+ return NULL;
+ }
+
+ RTE_BPF_LOG(INFO, "%s(fname=\"%s\", sname=\"%s\") "
+ "successfully creates %p(jit={.func=%p,.sz=%zu});\n",
+ __func__, fname, sname, bpf, bpf->jit.func, bpf->jit.sz);
+ return bpf;
+}
diff --git a/lib/librte_bpf/bpf_pkt.c b/lib/librte_bpf/bpf_pkt.c
new file mode 100644
index 00000000..ab9daa52
--- /dev/null
+++ b/lib/librte_bpf/bpf_pkt.c
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include <rte_bpf_ethdev.h>
+#include "bpf_impl.h"
+
+/*
+ * information about installed BPF rx/tx callback
+ */
+
+struct bpf_eth_cbi {
+ /* used by both data & control path */
+ uint32_t use; /*usage counter */
+ const struct rte_eth_rxtx_callback *cb; /* callback handle */
+ struct rte_bpf *bpf;
+ struct rte_bpf_jit jit;
+ /* used by control path only */
+ LIST_ENTRY(bpf_eth_cbi) link;
+ uint16_t port;
+ uint16_t queue;
+} __rte_cache_aligned;
+
+/*
+ * Odd number means that callback is used by datapath.
+ * Even number means that callback is not used by datapath.
+ */
+#define BPF_ETH_CBI_INUSE 1
+
+/*
+ * List to manage RX/TX installed callbacks.
+ */
+LIST_HEAD(bpf_eth_cbi_list, bpf_eth_cbi);
+
+enum {
+ BPF_ETH_RX,
+ BPF_ETH_TX,
+ BPF_ETH_NUM,
+};
+
+/*
+ * information about all installed BPF rx/tx callbacks
+ */
+struct bpf_eth_cbh {
+ rte_spinlock_t lock;
+ struct bpf_eth_cbi_list list;
+ uint32_t type;
+};
+
+static struct bpf_eth_cbh rx_cbh = {
+ .lock = RTE_SPINLOCK_INITIALIZER,
+ .list = LIST_HEAD_INITIALIZER(list),
+ .type = BPF_ETH_RX,
+};
+
+static struct bpf_eth_cbh tx_cbh = {
+ .lock = RTE_SPINLOCK_INITIALIZER,
+ .list = LIST_HEAD_INITIALIZER(list),
+ .type = BPF_ETH_TX,
+};
+
+/*
+ * Marks given callback as used by datapath.
+ */
+static __rte_always_inline void
+bpf_eth_cbi_inuse(struct bpf_eth_cbi *cbi)
+{
+ cbi->use++;
+ /* make sure no store/load reordering could happen */
+ rte_smp_mb();
+}
+
+/*
+ * Marks given callback list as not used by datapath.
+ */
+static __rte_always_inline void
+bpf_eth_cbi_unuse(struct bpf_eth_cbi *cbi)
+{
+ /* make sure all previous loads are completed */
+ rte_smp_rmb();
+ cbi->use++;
+}
+
+/*
+ * Waits till datapath finished using given callback.
+ */
+static void
+bpf_eth_cbi_wait(const struct bpf_eth_cbi *cbi)
+{
+ uint32_t nuse, puse;
+
+ /* make sure all previous loads and stores are completed */
+ rte_smp_mb();
+
+ puse = cbi->use;
+
+ /* in use, busy wait till current RX/TX iteration is finished */
+ if ((puse & BPF_ETH_CBI_INUSE) != 0) {
+ do {
+ rte_pause();
+ rte_compiler_barrier();
+ nuse = cbi->use;
+ } while (nuse == puse);
+ }
+}
+
+static void
+bpf_eth_cbi_cleanup(struct bpf_eth_cbi *bc)
+{
+ bc->bpf = NULL;
+ memset(&bc->jit, 0, sizeof(bc->jit));
+}
+
+static struct bpf_eth_cbi *
+bpf_eth_cbh_find(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
+{
+ struct bpf_eth_cbi *cbi;
+
+ LIST_FOREACH(cbi, &cbh->list, link) {
+ if (cbi->port == port && cbi->queue == queue)
+ break;
+ }
+ return cbi;
+}
+
+static struct bpf_eth_cbi *
+bpf_eth_cbh_add(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
+{
+ struct bpf_eth_cbi *cbi;
+
+ /* return an existing one */
+ cbi = bpf_eth_cbh_find(cbh, port, queue);
+ if (cbi != NULL)
+ return cbi;
+
+ cbi = rte_zmalloc(NULL, sizeof(*cbi), RTE_CACHE_LINE_SIZE);
+ if (cbi != NULL) {
+ cbi->port = port;
+ cbi->queue = queue;
+ LIST_INSERT_HEAD(&cbh->list, cbi, link);
+ }
+ return cbi;
+}
+
+/*
+ * BPF packet processing routinies.
+ */
+
+static inline uint32_t
+apply_filter(struct rte_mbuf *mb[], const uint64_t rc[], uint32_t num,
+ uint32_t drop)
+{
+ uint32_t i, j, k;
+ struct rte_mbuf *dr[num];
+
+ for (i = 0, j = 0, k = 0; i != num; i++) {
+
+ /* filter matches */
+ if (rc[i] != 0)
+ mb[j++] = mb[i];
+ /* no match */
+ else
+ dr[k++] = mb[i];
+ }
+
+ if (drop != 0) {
+ /* free filtered out mbufs */
+ for (i = 0; i != k; i++)
+ rte_pktmbuf_free(dr[i]);
+ } else {
+ /* copy filtered out mbufs beyond good ones */
+ for (i = 0; i != k; i++)
+ mb[j + i] = dr[i];
+ }
+
+ return j;
+}
+
+static inline uint32_t
+pkt_filter_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,
+ uint32_t drop)
+{
+ uint32_t i;
+ void *dp[num];
+ uint64_t rc[num];
+
+ for (i = 0; i != num; i++)
+ dp[i] = rte_pktmbuf_mtod(mb[i], void *);
+
+ rte_bpf_exec_burst(bpf, dp, rc, num);
+ return apply_filter(mb, rc, num, drop);
+}
+
+static inline uint32_t
+pkt_filter_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],
+ uint32_t num, uint32_t drop)
+{
+ uint32_t i, n;
+ void *dp;
+ uint64_t rc[num];
+
+ n = 0;
+ for (i = 0; i != num; i++) {
+ dp = rte_pktmbuf_mtod(mb[i], void *);
+ rc[i] = jit->func(dp);
+ n += (rc[i] == 0);
+ }
+
+ if (n != 0)
+ num = apply_filter(mb, rc, num, drop);
+
+ return num;
+}
+
+static inline uint32_t
+pkt_filter_mb_vm(const struct rte_bpf *bpf, struct rte_mbuf *mb[], uint32_t num,
+ uint32_t drop)
+{
+ uint64_t rc[num];
+
+ rte_bpf_exec_burst(bpf, (void **)mb, rc, num);
+ return apply_filter(mb, rc, num, drop);
+}
+
+static inline uint32_t
+pkt_filter_mb_jit(const struct rte_bpf_jit *jit, struct rte_mbuf *mb[],
+ uint32_t num, uint32_t drop)
+{
+ uint32_t i, n;
+ uint64_t rc[num];
+
+ n = 0;
+ for (i = 0; i != num; i++) {
+ rc[i] = jit->func(mb[i]);
+ n += (rc[i] == 0);
+ }
+
+ if (n != 0)
+ num = apply_filter(mb, rc, num, drop);
+
+ return num;
+}
+
+/*
+ * RX/TX callbacks for raw data bpf.
+ */
+
+static uint16_t
+bpf_rx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_vm(cbi->bpf, pkt, nb_pkts, 1) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_rx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_jit(&cbi->jit, pkt, nb_pkts, 1) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_tx_callback_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_vm(cbi->bpf, pkt, nb_pkts, 0) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_tx_callback_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_jit(&cbi->jit, pkt, nb_pkts, 0) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+/*
+ * RX/TX callbacks for mbuf.
+ */
+
+static uint16_t
+bpf_rx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 1) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_rx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts,
+ __rte_unused uint16_t max_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 1) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_tx_callback_mb_vm(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_mb_vm(cbi->bpf, pkt, nb_pkts, 0) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static uint16_t
+bpf_tx_callback_mb_jit(__rte_unused uint16_t port, __rte_unused uint16_t queue,
+ struct rte_mbuf *pkt[], uint16_t nb_pkts, void *user_param)
+{
+ struct bpf_eth_cbi *cbi;
+ uint16_t rc;
+
+ cbi = user_param;
+ bpf_eth_cbi_inuse(cbi);
+ rc = (cbi->cb != NULL) ?
+ pkt_filter_mb_jit(&cbi->jit, pkt, nb_pkts, 0) :
+ nb_pkts;
+ bpf_eth_cbi_unuse(cbi);
+ return rc;
+}
+
+static rte_rx_callback_fn
+select_rx_callback(enum rte_bpf_arg_type type, uint32_t flags)
+{
+ if (flags & RTE_BPF_ETH_F_JIT) {
+ if (type == RTE_BPF_ARG_PTR)
+ return bpf_rx_callback_jit;
+ else if (type == RTE_BPF_ARG_PTR_MBUF)
+ return bpf_rx_callback_mb_jit;
+ } else if (type == RTE_BPF_ARG_PTR)
+ return bpf_rx_callback_vm;
+ else if (type == RTE_BPF_ARG_PTR_MBUF)
+ return bpf_rx_callback_mb_vm;
+
+ return NULL;
+}
+
+static rte_tx_callback_fn
+select_tx_callback(enum rte_bpf_arg_type type, uint32_t flags)
+{
+ if (flags & RTE_BPF_ETH_F_JIT) {
+ if (type == RTE_BPF_ARG_PTR)
+ return bpf_tx_callback_jit;
+ else if (type == RTE_BPF_ARG_PTR_MBUF)
+ return bpf_tx_callback_mb_jit;
+ } else if (type == RTE_BPF_ARG_PTR)
+ return bpf_tx_callback_vm;
+ else if (type == RTE_BPF_ARG_PTR_MBUF)
+ return bpf_tx_callback_mb_vm;
+
+ return NULL;
+}
+
+/*
+ * helper function to perform BPF unload for given port/queue.
+ * have to introduce extra complexity (and possible slowdown) here,
+ * as right now there is no safe generic way to remove RX/TX callback
+ * while IO is active.
+ * Still don't free memory allocated for callback handle itself,
+ * again right now there is no safe way to do that without stopping RX/TX
+ * on given port/queue first.
+ */
+static void
+bpf_eth_cbi_unload(struct bpf_eth_cbi *bc)
+{
+ /* mark this cbi as empty */
+ bc->cb = NULL;
+ rte_smp_mb();
+
+ /* make sure datapath doesn't use bpf anymore, then destroy bpf */
+ bpf_eth_cbi_wait(bc);
+ rte_bpf_destroy(bc->bpf);
+ bpf_eth_cbi_cleanup(bc);
+}
+
+static void
+bpf_eth_unload(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue)
+{
+ struct bpf_eth_cbi *bc;
+
+ bc = bpf_eth_cbh_find(cbh, port, queue);
+ if (bc == NULL || bc->cb == NULL)
+ return;
+
+ if (cbh->type == BPF_ETH_RX)
+ rte_eth_remove_rx_callback(port, queue, bc->cb);
+ else
+ rte_eth_remove_tx_callback(port, queue, bc->cb);
+
+ bpf_eth_cbi_unload(bc);
+}
+
+
+__rte_experimental void
+rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue)
+{
+ struct bpf_eth_cbh *cbh;
+
+ cbh = &rx_cbh;
+ rte_spinlock_lock(&cbh->lock);
+ bpf_eth_unload(cbh, port, queue);
+ rte_spinlock_unlock(&cbh->lock);
+}
+
+__rte_experimental void
+rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue)
+{
+ struct bpf_eth_cbh *cbh;
+
+ cbh = &tx_cbh;
+ rte_spinlock_lock(&cbh->lock);
+ bpf_eth_unload(cbh, port, queue);
+ rte_spinlock_unlock(&cbh->lock);
+}
+
+static int
+bpf_eth_elf_load(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue,
+ const struct rte_bpf_prm *prm, const char *fname, const char *sname,
+ uint32_t flags)
+{
+ int32_t rc;
+ struct bpf_eth_cbi *bc;
+ struct rte_bpf *bpf;
+ rte_rx_callback_fn frx;
+ rte_tx_callback_fn ftx;
+ struct rte_bpf_jit jit;
+
+ frx = NULL;
+ ftx = NULL;
+
+ if (prm == NULL || rte_eth_dev_is_valid_port(port) == 0 ||
+ queue >= RTE_MAX_QUEUES_PER_PORT)
+ return -EINVAL;
+
+ if (cbh->type == BPF_ETH_RX)
+ frx = select_rx_callback(prm->prog_arg.type, flags);
+ else
+ ftx = select_tx_callback(prm->prog_arg.type, flags);
+
+ if (frx == NULL && ftx == NULL) {
+ RTE_BPF_LOG(ERR, "%s(%u, %u): no callback selected;\n",
+ __func__, port, queue);
+ return -EINVAL;
+ }
+
+ bpf = rte_bpf_elf_load(prm, fname, sname);
+ if (bpf == NULL)
+ return -rte_errno;
+
+ rte_bpf_get_jit(bpf, &jit);
+
+ if ((flags & RTE_BPF_ETH_F_JIT) != 0 && jit.func == NULL) {
+ RTE_BPF_LOG(ERR, "%s(%u, %u): no JIT generated;\n",
+ __func__, port, queue);
+ rte_bpf_destroy(bpf);
+ return -ENOTSUP;
+ }
+
+ /* setup/update global callback info */
+ bc = bpf_eth_cbh_add(cbh, port, queue);
+ if (bc == NULL)
+ return -ENOMEM;
+
+ /* remove old one, if any */
+ if (bc->cb != NULL)
+ bpf_eth_unload(cbh, port, queue);
+
+ bc->bpf = bpf;
+ bc->jit = jit;
+
+ if (cbh->type == BPF_ETH_RX)
+ bc->cb = rte_eth_add_rx_callback(port, queue, frx, bc);
+ else
+ bc->cb = rte_eth_add_tx_callback(port, queue, ftx, bc);
+
+ if (bc->cb == NULL) {
+ rc = -rte_errno;
+ rte_bpf_destroy(bpf);
+ bpf_eth_cbi_cleanup(bc);
+ } else
+ rc = 0;
+
+ return rc;
+}
+
+__rte_experimental int
+rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
+ const struct rte_bpf_prm *prm, const char *fname, const char *sname,
+ uint32_t flags)
+{
+ int32_t rc;
+ struct bpf_eth_cbh *cbh;
+
+ cbh = &rx_cbh;
+ rte_spinlock_lock(&cbh->lock);
+ rc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);
+ rte_spinlock_unlock(&cbh->lock);
+
+ return rc;
+}
+
+__rte_experimental int
+rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
+ const struct rte_bpf_prm *prm, const char *fname, const char *sname,
+ uint32_t flags)
+{
+ int32_t rc;
+ struct bpf_eth_cbh *cbh;
+
+ cbh = &tx_cbh;
+ rte_spinlock_lock(&cbh->lock);
+ rc = bpf_eth_elf_load(cbh, port, queue, prm, fname, sname, flags);
+ rte_spinlock_unlock(&cbh->lock);
+
+ return rc;
+}
diff --git a/lib/librte_bpf/bpf_validate.c b/lib/librte_bpf/bpf_validate.c
new file mode 100644
index 00000000..83983efc
--- /dev/null
+++ b/lib/librte_bpf/bpf_validate.c
@@ -0,0 +1,2248 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+
+#include "bpf_impl.h"
+
+struct bpf_reg_val {
+ struct rte_bpf_arg v;
+ uint64_t mask;
+ struct {
+ int64_t min;
+ int64_t max;
+ } s;
+ struct {
+ uint64_t min;
+ uint64_t max;
+ } u;
+};
+
+struct bpf_eval_state {
+ struct bpf_reg_val rv[EBPF_REG_NUM];
+ struct bpf_reg_val sv[MAX_BPF_STACK_SIZE / sizeof(uint64_t)];
+};
+
+/* possible instruction node colour */
+enum {
+ WHITE,
+ GREY,
+ BLACK,
+ MAX_NODE_COLOUR
+};
+
+/* possible edge types */
+enum {
+ UNKNOWN_EDGE,
+ TREE_EDGE,
+ BACK_EDGE,
+ CROSS_EDGE,
+ MAX_EDGE_TYPE
+};
+
+#define MAX_EDGES 2
+
+struct inst_node {
+ uint8_t colour;
+ uint8_t nb_edge:4;
+ uint8_t cur_edge:4;
+ uint8_t edge_type[MAX_EDGES];
+ uint32_t edge_dest[MAX_EDGES];
+ uint32_t prev_node;
+ struct bpf_eval_state *evst;
+};
+
+struct bpf_verifier {
+ const struct rte_bpf_prm *prm;
+ struct inst_node *in;
+ uint64_t stack_sz;
+ uint32_t nb_nodes;
+ uint32_t nb_jcc_nodes;
+ uint32_t node_colour[MAX_NODE_COLOUR];
+ uint32_t edge_type[MAX_EDGE_TYPE];
+ struct bpf_eval_state *evst;
+ struct inst_node *evin;
+ struct {
+ uint32_t num;
+ uint32_t cur;
+ struct bpf_eval_state *ent;
+ } evst_pool;
+};
+
+struct bpf_ins_check {
+ struct {
+ uint16_t dreg;
+ uint16_t sreg;
+ } mask;
+ struct {
+ uint16_t min;
+ uint16_t max;
+ } off;
+ struct {
+ uint32_t min;
+ uint32_t max;
+ } imm;
+ const char * (*check)(const struct ebpf_insn *);
+ const char * (*eval)(struct bpf_verifier *, const struct ebpf_insn *);
+};
+
+#define ALL_REGS RTE_LEN2MASK(EBPF_REG_NUM, uint16_t)
+#define WRT_REGS RTE_LEN2MASK(EBPF_REG_10, uint16_t)
+#define ZERO_REG RTE_LEN2MASK(EBPF_REG_1, uint16_t)
+
+/*
+ * check and evaluate functions for particular instruction types.
+ */
+
+static const char *
+check_alu_bele(const struct ebpf_insn *ins)
+{
+ if (ins->imm != 16 && ins->imm != 32 && ins->imm != 64)
+ return "invalid imm field";
+ return NULL;
+}
+
+static const char *
+eval_exit(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ RTE_SET_USED(ins);
+ if (bvf->evst->rv[EBPF_REG_0].v.type == RTE_BPF_ARG_UNDEF)
+ return "undefined return value";
+ return NULL;
+}
+
+/* setup max possible with this mask bounds */
+static void
+eval_umax_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+ rv->u.max = mask;
+ rv->u.min = 0;
+}
+
+static void
+eval_smax_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+ rv->s.max = mask >> 1;
+ rv->s.min = rv->s.max ^ UINT64_MAX;
+}
+
+static void
+eval_max_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+ eval_umax_bound(rv, mask);
+ eval_smax_bound(rv, mask);
+}
+
+static void
+eval_fill_max_bound(struct bpf_reg_val *rv, uint64_t mask)
+{
+ eval_max_bound(rv, mask);
+ rv->v.type = RTE_BPF_ARG_RAW;
+ rv->mask = mask;
+}
+
+static void
+eval_fill_imm64(struct bpf_reg_val *rv, uint64_t mask, uint64_t val)
+{
+ rv->mask = mask;
+ rv->s.min = val;
+ rv->s.max = val;
+ rv->u.min = val;
+ rv->u.max = val;
+}
+
+static void
+eval_fill_imm(struct bpf_reg_val *rv, uint64_t mask, int32_t imm)
+{
+ uint64_t v;
+
+ v = (uint64_t)imm & mask;
+
+ rv->v.type = RTE_BPF_ARG_RAW;
+ eval_fill_imm64(rv, mask, v);
+}
+
+static const char *
+eval_ld_imm64(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint32_t i;
+ uint64_t val;
+ struct bpf_reg_val *rd;
+
+ val = (uint32_t)ins[0].imm | (uint64_t)(uint32_t)ins[1].imm << 32;
+
+ rd = bvf->evst->rv + ins->dst_reg;
+ rd->v.type = RTE_BPF_ARG_RAW;
+ eval_fill_imm64(rd, UINT64_MAX, val);
+
+ for (i = 0; i != bvf->prm->nb_xsym; i++) {
+
+ /* load of external variable */
+ if (bvf->prm->xsym[i].type == RTE_BPF_XTYPE_VAR &&
+ (uintptr_t)bvf->prm->xsym[i].var.val == val) {
+ rd->v = bvf->prm->xsym[i].var.desc;
+ eval_fill_imm64(rd, UINT64_MAX, 0);
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+static void
+eval_apply_mask(struct bpf_reg_val *rv, uint64_t mask)
+{
+ struct bpf_reg_val rt;
+
+ rt.u.min = rv->u.min & mask;
+ rt.u.max = rv->u.max & mask;
+ if (rt.u.min != rv->u.min || rt.u.max != rv->u.max) {
+ rv->u.max = RTE_MAX(rt.u.max, mask);
+ rv->u.min = 0;
+ }
+
+ eval_smax_bound(&rt, mask);
+ rv->s.max = RTE_MIN(rt.s.max, rv->s.max);
+ rv->s.min = RTE_MAX(rt.s.min, rv->s.min);
+
+ rv->mask = mask;
+}
+
+static void
+eval_add(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
+{
+ struct bpf_reg_val rv;
+
+ rv.u.min = (rd->u.min + rs->u.min) & msk;
+ rv.u.max = (rd->u.min + rs->u.max) & msk;
+ rv.s.min = (rd->s.min + rs->s.min) & msk;
+ rv.s.max = (rd->s.max + rs->s.max) & msk;
+
+ /*
+ * if at least one of the operands is not constant,
+ * then check for overflow
+ */
+ if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
+ (rv.u.min < rd->u.min || rv.u.max < rd->u.max))
+ eval_umax_bound(&rv, msk);
+
+ if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
+ (((rs->s.min < 0 && rv.s.min > rd->s.min) ||
+ rv.s.min < rd->s.min) ||
+ ((rs->s.max < 0 && rv.s.max > rd->s.max) ||
+ rv.s.max < rd->s.max)))
+ eval_smax_bound(&rv, msk);
+
+ rd->s = rv.s;
+ rd->u = rv.u;
+}
+
+static void
+eval_sub(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, uint64_t msk)
+{
+ struct bpf_reg_val rv;
+
+ rv.u.min = (rd->u.min - rs->u.min) & msk;
+ rv.u.max = (rd->u.min - rs->u.max) & msk;
+ rv.s.min = (rd->s.min - rs->s.min) & msk;
+ rv.s.max = (rd->s.max - rs->s.max) & msk;
+
+ /*
+ * if at least one of the operands is not constant,
+ * then check for overflow
+ */
+ if ((rd->u.min != rd->u.max || rs->u.min != rs->u.max) &&
+ (rv.u.min > rd->u.min || rv.u.max > rd->u.max))
+ eval_umax_bound(&rv, msk);
+
+ if ((rd->s.min != rd->s.max || rs->s.min != rs->s.max) &&
+ (((rs->s.min < 0 && rv.s.min < rd->s.min) ||
+ rv.s.min > rd->s.min) ||
+ ((rs->s.max < 0 && rv.s.max < rd->s.max) ||
+ rv.s.max > rd->s.max)))
+ eval_smax_bound(&rv, msk);
+
+ rd->s = rv.s;
+ rd->u = rv.u;
+}
+
+static void
+eval_lsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* check if shift value is less then max result bits */
+ if (rs->u.max >= opsz) {
+ eval_max_bound(rd, msk);
+ return;
+ }
+
+ /* check for overflow */
+ if (rd->u.max > RTE_LEN2MASK(opsz - rs->u.max, uint64_t))
+ eval_umax_bound(rd, msk);
+ else {
+ rd->u.max <<= rs->u.max;
+ rd->u.min <<= rs->u.min;
+ }
+
+ /* check that dreg values are and would remain always positive */
+ if ((uint64_t)rd->s.min >> (opsz - 1) != 0 || rd->s.max >=
+ RTE_LEN2MASK(opsz - rs->u.max - 1, int64_t))
+ eval_smax_bound(rd, msk);
+ else {
+ rd->s.max <<= rs->u.max;
+ rd->s.min <<= rs->u.min;
+ }
+}
+
+static void
+eval_rsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* check if shift value is less then max result bits */
+ if (rs->u.max >= opsz) {
+ eval_max_bound(rd, msk);
+ return;
+ }
+
+ rd->u.max >>= rs->u.min;
+ rd->u.min >>= rs->u.max;
+
+ /* check that dreg values are always positive */
+ if ((uint64_t)rd->s.min >> (opsz - 1) != 0)
+ eval_smax_bound(rd, msk);
+ else {
+ rd->s.max >>= rs->u.min;
+ rd->s.min >>= rs->u.max;
+ }
+}
+
+static void
+eval_arsh(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ uint32_t shv;
+
+ /* check if shift value is less then max result bits */
+ if (rs->u.max >= opsz) {
+ eval_max_bound(rd, msk);
+ return;
+ }
+
+ rd->u.max = (int64_t)rd->u.max >> rs->u.min;
+ rd->u.min = (int64_t)rd->u.min >> rs->u.max;
+
+ /* if we have 32-bit values - extend them to 64-bit */
+ if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+ rd->s.min <<= opsz;
+ rd->s.max <<= opsz;
+ shv = opsz;
+ } else
+ shv = 0;
+
+ if (rd->s.min < 0)
+ rd->s.min = (rd->s.min >> (rs->u.min + shv)) & msk;
+ else
+ rd->s.min = (rd->s.min >> (rs->u.max + shv)) & msk;
+
+ if (rd->s.max < 0)
+ rd->s.max = (rd->s.max >> (rs->u.max + shv)) & msk;
+ else
+ rd->s.max = (rd->s.max >> (rs->u.min + shv)) & msk;
+}
+
+static uint64_t
+eval_umax_bits(uint64_t v, size_t opsz)
+{
+ if (v == 0)
+ return 0;
+
+ v = __builtin_clzll(v);
+ return RTE_LEN2MASK(opsz - v, uint64_t);
+}
+
+/* estimate max possible value for (v1 & v2) */
+static uint64_t
+eval_uand_max(uint64_t v1, uint64_t v2, size_t opsz)
+{
+ v1 = eval_umax_bits(v1, opsz);
+ v2 = eval_umax_bits(v2, opsz);
+ return (v1 & v2);
+}
+
+/* estimate max possible value for (v1 | v2) */
+static uint64_t
+eval_uor_max(uint64_t v1, uint64_t v2, size_t opsz)
+{
+ v1 = eval_umax_bits(v1, opsz);
+ v2 = eval_umax_bits(v2, opsz);
+ return (v1 | v2);
+}
+
+static void
+eval_and(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* both operands are constants */
+ if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+ rd->u.min &= rs->u.min;
+ rd->u.max &= rs->u.max;
+ } else {
+ rd->u.max = eval_uand_max(rd->u.max, rs->u.max, opsz);
+ rd->u.min &= rs->u.min;
+ }
+
+ /* both operands are constants */
+ if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+ rd->s.min &= rs->s.min;
+ rd->s.max &= rs->s.max;
+ /* at least one of operand is non-negative */
+ } else if (rd->s.min >= 0 || rs->s.min >= 0) {
+ rd->s.max = eval_uand_max(rd->s.max & (msk >> 1),
+ rs->s.max & (msk >> 1), opsz);
+ rd->s.min &= rs->s.min;
+ } else
+ eval_smax_bound(rd, msk);
+}
+
+static void
+eval_or(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* both operands are constants */
+ if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+ rd->u.min |= rs->u.min;
+ rd->u.max |= rs->u.max;
+ } else {
+ rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
+ rd->u.min |= rs->u.min;
+ }
+
+ /* both operands are constants */
+ if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+ rd->s.min |= rs->s.min;
+ rd->s.max |= rs->s.max;
+
+ /* both operands are non-negative */
+ } else if (rd->s.min >= 0 || rs->s.min >= 0) {
+ rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
+ rd->s.min |= rs->s.min;
+ } else
+ eval_smax_bound(rd, msk);
+}
+
+static void
+eval_xor(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* both operands are constants */
+ if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+ rd->u.min ^= rs->u.min;
+ rd->u.max ^= rs->u.max;
+ } else {
+ rd->u.max = eval_uor_max(rd->u.max, rs->u.max, opsz);
+ rd->u.min = 0;
+ }
+
+ /* both operands are constants */
+ if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+ rd->s.min ^= rs->s.min;
+ rd->s.max ^= rs->s.max;
+
+ /* both operands are non-negative */
+ } else if (rd->s.min >= 0 || rs->s.min >= 0) {
+ rd->s.max = eval_uor_max(rd->s.max, rs->s.max, opsz);
+ rd->s.min = 0;
+ } else
+ eval_smax_bound(rd, msk);
+}
+
+static void
+eval_mul(struct bpf_reg_val *rd, const struct bpf_reg_val *rs, size_t opsz,
+ uint64_t msk)
+{
+ /* both operands are constants */
+ if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+ rd->u.min = (rd->u.min * rs->u.min) & msk;
+ rd->u.max = (rd->u.max * rs->u.max) & msk;
+ /* check for overflow */
+ } else if (rd->u.max <= msk >> opsz / 2 && rs->u.max <= msk >> opsz) {
+ rd->u.max *= rs->u.max;
+ rd->u.min *= rd->u.min;
+ } else
+ eval_umax_bound(rd, msk);
+
+ /* both operands are constants */
+ if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+ rd->s.min = (rd->s.min * rs->s.min) & msk;
+ rd->s.max = (rd->s.max * rs->s.max) & msk;
+ /* check that both operands are positive and no overflow */
+ } else if (rd->s.min >= 0 && rs->s.min >= 0) {
+ rd->s.max *= rs->s.max;
+ rd->s.min *= rd->s.min;
+ } else
+ eval_smax_bound(rd, msk);
+}
+
+static const char *
+eval_divmod(uint32_t op, struct bpf_reg_val *rd, struct bpf_reg_val *rs,
+ size_t opsz, uint64_t msk)
+{
+ /* both operands are constants */
+ if (rd->u.min == rd->u.max && rs->u.min == rs->u.max) {
+ if (rs->u.max == 0)
+ return "division by 0";
+ if (op == BPF_DIV) {
+ rd->u.min /= rs->u.min;
+ rd->u.max /= rs->u.max;
+ } else {
+ rd->u.min %= rs->u.min;
+ rd->u.max %= rs->u.max;
+ }
+ } else {
+ if (op == BPF_MOD)
+ rd->u.max = RTE_MIN(rd->u.max, rs->u.max - 1);
+ else
+ rd->u.max = rd->u.max;
+ rd->u.min = 0;
+ }
+
+ /* if we have 32-bit values - extend them to 64-bit */
+ if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+ rd->s.min = (int32_t)rd->s.min;
+ rd->s.max = (int32_t)rd->s.max;
+ rs->s.min = (int32_t)rs->s.min;
+ rs->s.max = (int32_t)rs->s.max;
+ }
+
+ /* both operands are constants */
+ if (rd->s.min == rd->s.max && rs->s.min == rs->s.max) {
+ if (rs->s.max == 0)
+ return "division by 0";
+ if (op == BPF_DIV) {
+ rd->s.min /= rs->s.min;
+ rd->s.max /= rs->s.max;
+ } else {
+ rd->s.min %= rs->s.min;
+ rd->s.max %= rs->s.max;
+ }
+ } else if (op == BPF_MOD) {
+ rd->s.min = RTE_MAX(rd->s.max, 0);
+ rd->s.min = RTE_MIN(rd->s.min, 0);
+ } else
+ eval_smax_bound(rd, msk);
+
+ rd->s.max &= msk;
+ rd->s.min &= msk;
+
+ return NULL;
+}
+
+static void
+eval_neg(struct bpf_reg_val *rd, size_t opsz, uint64_t msk)
+{
+ uint64_t ux, uy;
+ int64_t sx, sy;
+
+ /* if we have 32-bit values - extend them to 64-bit */
+ if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+ rd->u.min = (int32_t)rd->u.min;
+ rd->u.max = (int32_t)rd->u.max;
+ }
+
+ ux = -(int64_t)rd->u.min & msk;
+ uy = -(int64_t)rd->u.max & msk;
+
+ rd->u.max = RTE_MAX(ux, uy);
+ rd->u.min = RTE_MIN(ux, uy);
+
+ /* if we have 32-bit values - extend them to 64-bit */
+ if (opsz == sizeof(uint32_t) * CHAR_BIT) {
+ rd->s.min = (int32_t)rd->s.min;
+ rd->s.max = (int32_t)rd->s.max;
+ }
+
+ sx = -rd->s.min & msk;
+ sy = -rd->s.max & msk;
+
+ rd->s.max = RTE_MAX(sx, sy);
+ rd->s.min = RTE_MIN(sx, sy);
+}
+
+/*
+ * check that destination and source operand are in defined state.
+ */
+static const char *
+eval_defined(const struct bpf_reg_val *dst, const struct bpf_reg_val *src)
+{
+ if (dst != NULL && dst->v.type == RTE_BPF_ARG_UNDEF)
+ return "dest reg value is undefined";
+ if (src != NULL && src->v.type == RTE_BPF_ARG_UNDEF)
+ return "src reg value is undefined";
+ return NULL;
+}
+
+static const char *
+eval_alu(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint64_t msk;
+ uint32_t op;
+ size_t opsz;
+ const char *err;
+ struct bpf_eval_state *st;
+ struct bpf_reg_val *rd, rs;
+
+ opsz = (BPF_CLASS(ins->code) == BPF_ALU) ?
+ sizeof(uint32_t) : sizeof(uint64_t);
+ opsz = opsz * CHAR_BIT;
+ msk = RTE_LEN2MASK(opsz, uint64_t);
+
+ st = bvf->evst;
+ rd = st->rv + ins->dst_reg;
+
+ if (BPF_SRC(ins->code) == BPF_X) {
+ rs = st->rv[ins->src_reg];
+ eval_apply_mask(&rs, msk);
+ } else
+ eval_fill_imm(&rs, msk, ins->imm);
+
+ eval_apply_mask(rd, msk);
+
+ op = BPF_OP(ins->code);
+
+ err = eval_defined((op != EBPF_MOV) ? rd : NULL,
+ (op != BPF_NEG) ? &rs : NULL);
+ if (err != NULL)
+ return err;
+
+ if (op == BPF_ADD)
+ eval_add(rd, &rs, msk);
+ else if (op == BPF_SUB)
+ eval_sub(rd, &rs, msk);
+ else if (op == BPF_LSH)
+ eval_lsh(rd, &rs, opsz, msk);
+ else if (op == BPF_RSH)
+ eval_rsh(rd, &rs, opsz, msk);
+ else if (op == EBPF_ARSH)
+ eval_arsh(rd, &rs, opsz, msk);
+ else if (op == BPF_AND)
+ eval_and(rd, &rs, opsz, msk);
+ else if (op == BPF_OR)
+ eval_or(rd, &rs, opsz, msk);
+ else if (op == BPF_XOR)
+ eval_xor(rd, &rs, opsz, msk);
+ else if (op == BPF_MUL)
+ eval_mul(rd, &rs, opsz, msk);
+ else if (op == BPF_DIV || op == BPF_MOD)
+ err = eval_divmod(op, rd, &rs, opsz, msk);
+ else if (op == BPF_NEG)
+ eval_neg(rd, opsz, msk);
+ else if (op == EBPF_MOV)
+ *rd = rs;
+ else
+ eval_max_bound(rd, msk);
+
+ return err;
+}
+
+static const char *
+eval_bele(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint64_t msk;
+ struct bpf_eval_state *st;
+ struct bpf_reg_val *rd;
+ const char *err;
+
+ msk = RTE_LEN2MASK(ins->imm, uint64_t);
+
+ st = bvf->evst;
+ rd = st->rv + ins->dst_reg;
+
+ err = eval_defined(rd, NULL);
+ if (err != NULL)
+ return err;
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_BE))
+ eval_max_bound(rd, msk);
+ else
+ eval_apply_mask(rd, msk);
+#else
+ if (ins->code == (BPF_ALU | EBPF_END | EBPF_TO_LE))
+ eval_max_bound(rd, msk);
+ else
+ eval_apply_mask(rd, msk);
+#endif
+
+ return NULL;
+}
+
+static const char *
+eval_ptr(struct bpf_verifier *bvf, struct bpf_reg_val *rm, uint32_t opsz,
+ uint32_t align, int16_t off)
+{
+ struct bpf_reg_val rv;
+
+ /* calculate reg + offset */
+ eval_fill_imm(&rv, rm->mask, off);
+ eval_add(rm, &rv, rm->mask);
+
+ if (RTE_BPF_ARG_PTR_TYPE(rm->v.type) == 0)
+ return "destination is not a pointer";
+
+ if (rm->mask != UINT64_MAX)
+ return "pointer truncation";
+
+ if (rm->u.max + opsz > rm->v.size ||
+ (uint64_t)rm->s.max + opsz > rm->v.size ||
+ rm->s.min < 0)
+ return "memory boundary violation";
+
+ if (rm->u.max % align != 0)
+ return "unaligned memory access";
+
+ if (rm->v.type == RTE_BPF_ARG_PTR_STACK) {
+
+ if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
+ rm->u.max != (uint64_t)rm->s.max)
+ return "stack access with variable offset";
+
+ bvf->stack_sz = RTE_MAX(bvf->stack_sz, rm->v.size - rm->u.max);
+
+ /* pointer to mbuf */
+ } else if (rm->v.type == RTE_BPF_ARG_PTR_MBUF) {
+
+ if (rm->u.max != rm->u.min || rm->s.max != rm->s.min ||
+ rm->u.max != (uint64_t)rm->s.max)
+ return "mbuf access with variable offset";
+ }
+
+ return NULL;
+}
+
+static void
+eval_max_load(struct bpf_reg_val *rv, uint64_t mask)
+{
+ eval_umax_bound(rv, mask);
+
+ /* full 64-bit load */
+ if (mask == UINT64_MAX)
+ eval_smax_bound(rv, mask);
+
+ /* zero-extend load */
+ rv->s.min = rv->u.min;
+ rv->s.max = rv->u.max;
+}
+
+
+static const char *
+eval_load(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint32_t opsz;
+ uint64_t msk;
+ const char *err;
+ struct bpf_eval_state *st;
+ struct bpf_reg_val *rd, rs;
+ const struct bpf_reg_val *sv;
+
+ st = bvf->evst;
+ rd = st->rv + ins->dst_reg;
+ rs = st->rv[ins->src_reg];
+ opsz = bpf_size(BPF_SIZE(ins->code));
+ msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
+
+ err = eval_ptr(bvf, &rs, opsz, 1, ins->off);
+ if (err != NULL)
+ return err;
+
+ if (rs.v.type == RTE_BPF_ARG_PTR_STACK) {
+
+ sv = st->sv + rs.u.max / sizeof(uint64_t);
+ if (sv->v.type == RTE_BPF_ARG_UNDEF || sv->mask < msk)
+ return "undefined value on the stack";
+
+ *rd = *sv;
+
+ /* pointer to mbuf */
+ } else if (rs.v.type == RTE_BPF_ARG_PTR_MBUF) {
+
+ if (rs.u.max == offsetof(struct rte_mbuf, next)) {
+ eval_fill_imm(rd, msk, 0);
+ rd->v = rs.v;
+ } else if (rs.u.max == offsetof(struct rte_mbuf, buf_addr)) {
+ eval_fill_imm(rd, msk, 0);
+ rd->v.type = RTE_BPF_ARG_PTR;
+ rd->v.size = rs.v.buf_size;
+ } else if (rs.u.max == offsetof(struct rte_mbuf, data_off)) {
+ eval_fill_imm(rd, msk, RTE_PKTMBUF_HEADROOM);
+ rd->v.type = RTE_BPF_ARG_RAW;
+ } else {
+ eval_max_load(rd, msk);
+ rd->v.type = RTE_BPF_ARG_RAW;
+ }
+
+ /* pointer to raw data */
+ } else {
+ eval_max_load(rd, msk);
+ rd->v.type = RTE_BPF_ARG_RAW;
+ }
+
+ return NULL;
+}
+
+static const char *
+eval_mbuf_store(const struct bpf_reg_val *rv, uint32_t opsz)
+{
+ uint32_t i;
+
+ static const struct {
+ size_t off;
+ size_t sz;
+ } mbuf_ro_fileds[] = {
+ { .off = offsetof(struct rte_mbuf, buf_addr), },
+ { .off = offsetof(struct rte_mbuf, refcnt), },
+ { .off = offsetof(struct rte_mbuf, nb_segs), },
+ { .off = offsetof(struct rte_mbuf, buf_len), },
+ { .off = offsetof(struct rte_mbuf, pool), },
+ { .off = offsetof(struct rte_mbuf, next), },
+ { .off = offsetof(struct rte_mbuf, priv_size), },
+ };
+
+ for (i = 0; i != RTE_DIM(mbuf_ro_fileds) &&
+ (mbuf_ro_fileds[i].off + mbuf_ro_fileds[i].sz <=
+ rv->u.max || rv->u.max + opsz <= mbuf_ro_fileds[i].off);
+ i++)
+ ;
+
+ if (i != RTE_DIM(mbuf_ro_fileds))
+ return "store to the read-only mbuf field";
+
+ return NULL;
+
+}
+
+static const char *
+eval_store(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint32_t opsz;
+ uint64_t msk;
+ const char *err;
+ struct bpf_eval_state *st;
+ struct bpf_reg_val rd, rs, *sv;
+
+ opsz = bpf_size(BPF_SIZE(ins->code));
+ msk = RTE_LEN2MASK(opsz * CHAR_BIT, uint64_t);
+
+ st = bvf->evst;
+ rd = st->rv[ins->dst_reg];
+
+ if (BPF_CLASS(ins->code) == BPF_STX) {
+ rs = st->rv[ins->src_reg];
+ eval_apply_mask(&rs, msk);
+ } else
+ eval_fill_imm(&rs, msk, ins->imm);
+
+ err = eval_defined(NULL, &rs);
+ if (err != NULL)
+ return err;
+
+ err = eval_ptr(bvf, &rd, opsz, 1, ins->off);
+ if (err != NULL)
+ return err;
+
+ if (rd.v.type == RTE_BPF_ARG_PTR_STACK) {
+
+ sv = st->sv + rd.u.max / sizeof(uint64_t);
+ if (BPF_CLASS(ins->code) == BPF_STX &&
+ BPF_MODE(ins->code) == EBPF_XADD)
+ eval_max_bound(sv, msk);
+ else
+ *sv = rs;
+
+ /* pointer to mbuf */
+ } else if (rd.v.type == RTE_BPF_ARG_PTR_MBUF) {
+ err = eval_mbuf_store(&rd, opsz);
+ if (err != NULL)
+ return err;
+ }
+
+ return NULL;
+}
+
+static const char *
+eval_func_arg(struct bpf_verifier *bvf, const struct rte_bpf_arg *arg,
+ struct bpf_reg_val *rv)
+{
+ uint32_t i, n;
+ struct bpf_eval_state *st;
+ const char *err;
+
+ st = bvf->evst;
+
+ if (rv->v.type == RTE_BPF_ARG_UNDEF)
+ return "Undefined argument type";
+
+ if (arg->type != rv->v.type &&
+ arg->type != RTE_BPF_ARG_RAW &&
+ (arg->type != RTE_BPF_ARG_PTR ||
+ RTE_BPF_ARG_PTR_TYPE(rv->v.type) == 0))
+ return "Invalid argument type";
+
+ err = NULL;
+
+ /* argument is a pointer */
+ if (RTE_BPF_ARG_PTR_TYPE(arg->type) != 0) {
+
+ err = eval_ptr(bvf, rv, arg->size, 1, 0);
+
+ /*
+ * pointer to the variable on the stack is passed
+ * as an argument, mark stack space it occupies as initialized.
+ */
+ if (err == NULL && rv->v.type == RTE_BPF_ARG_PTR_STACK) {
+
+ i = rv->u.max / sizeof(uint64_t);
+ n = i + arg->size / sizeof(uint64_t);
+ while (i != n) {
+ eval_fill_max_bound(st->sv + i, UINT64_MAX);
+ i++;
+ };
+ }
+ }
+
+ return err;
+}
+
+static const char *
+eval_call(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint64_t msk;
+ uint32_t i, idx;
+ struct bpf_reg_val *rv;
+ const struct rte_bpf_xsym *xsym;
+ const char *err;
+
+ idx = ins->imm;
+
+ if (idx >= bvf->prm->nb_xsym ||
+ bvf->prm->xsym[idx].type != RTE_BPF_XTYPE_FUNC)
+ return "invalid external function index";
+
+ /* for now don't support function calls on 32 bit platform */
+ if (sizeof(uint64_t) != sizeof(uintptr_t))
+ return "function calls are supported only for 64 bit apps";
+
+ xsym = bvf->prm->xsym + idx;
+
+ /* evaluate function arguments */
+ err = NULL;
+ for (i = 0; i != xsym->func.nb_args && err == NULL; i++) {
+ err = eval_func_arg(bvf, xsym->func.args + i,
+ bvf->evst->rv + EBPF_REG_1 + i);
+ }
+
+ /* R1-R5 argument/scratch registers */
+ for (i = EBPF_REG_1; i != EBPF_REG_6; i++)
+ bvf->evst->rv[i].v.type = RTE_BPF_ARG_UNDEF;
+
+ /* update return value */
+
+ rv = bvf->evst->rv + EBPF_REG_0;
+ rv->v = xsym->func.ret;
+ msk = (rv->v.type == RTE_BPF_ARG_RAW) ?
+ RTE_LEN2MASK(rv->v.size * CHAR_BIT, uint64_t) : UINTPTR_MAX;
+ eval_max_bound(rv, msk);
+ rv->mask = msk;
+
+ return err;
+}
+
+static void
+eval_jeq_jne(struct bpf_reg_val *trd, struct bpf_reg_val *trs)
+{
+ /* sreg is constant */
+ if (trs->u.min == trs->u.max) {
+ trd->u = trs->u;
+ /* dreg is constant */
+ } else if (trd->u.min == trd->u.max) {
+ trs->u = trd->u;
+ } else {
+ trd->u.max = RTE_MIN(trd->u.max, trs->u.max);
+ trd->u.min = RTE_MAX(trd->u.min, trs->u.min);
+ trs->u = trd->u;
+ }
+
+ /* sreg is constant */
+ if (trs->s.min == trs->s.max) {
+ trd->s = trs->s;
+ /* dreg is constant */
+ } else if (trd->s.min == trd->s.max) {
+ trs->s = trd->s;
+ } else {
+ trd->s.max = RTE_MIN(trd->s.max, trs->s.max);
+ trd->s.min = RTE_MAX(trd->s.min, trs->s.min);
+ trs->s = trd->s;
+ }
+}
+
+static void
+eval_jgt_jle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+ struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+ frd->u.max = RTE_MIN(frd->u.max, frs->u.min);
+ trd->u.min = RTE_MAX(trd->u.min, trs->u.min + 1);
+}
+
+static void
+eval_jlt_jge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+ struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+ frd->u.min = RTE_MAX(frd->u.min, frs->u.min);
+ trd->u.max = RTE_MIN(trd->u.max, trs->u.max - 1);
+}
+
+static void
+eval_jsgt_jsle(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+ struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+ frd->s.max = RTE_MIN(frd->s.max, frs->s.min);
+ trd->s.min = RTE_MAX(trd->s.min, trs->s.min + 1);
+}
+
+static void
+eval_jslt_jsge(struct bpf_reg_val *trd, struct bpf_reg_val *trs,
+ struct bpf_reg_val *frd, struct bpf_reg_val *frs)
+{
+ frd->s.min = RTE_MAX(frd->s.min, frs->s.min);
+ trd->s.max = RTE_MIN(trd->s.max, trs->s.max - 1);
+}
+
+static const char *
+eval_jcc(struct bpf_verifier *bvf, const struct ebpf_insn *ins)
+{
+ uint32_t op;
+ const char *err;
+ struct bpf_eval_state *fst, *tst;
+ struct bpf_reg_val *frd, *frs, *trd, *trs;
+ struct bpf_reg_val rvf, rvt;
+
+ tst = bvf->evst;
+ fst = bvf->evin->evst;
+
+ frd = fst->rv + ins->dst_reg;
+ trd = tst->rv + ins->dst_reg;
+
+ if (BPF_SRC(ins->code) == BPF_X) {
+ frs = fst->rv + ins->src_reg;
+ trs = tst->rv + ins->src_reg;
+ } else {
+ frs = &rvf;
+ trs = &rvt;
+ eval_fill_imm(frs, UINT64_MAX, ins->imm);
+ eval_fill_imm(trs, UINT64_MAX, ins->imm);
+ }
+
+ err = eval_defined(trd, trs);
+ if (err != NULL)
+ return err;
+
+ op = BPF_OP(ins->code);
+
+ if (op == BPF_JEQ)
+ eval_jeq_jne(trd, trs);
+ else if (op == EBPF_JNE)
+ eval_jeq_jne(frd, frs);
+ else if (op == BPF_JGT)
+ eval_jgt_jle(trd, trs, frd, frs);
+ else if (op == EBPF_JLE)
+ eval_jgt_jle(frd, frs, trd, trs);
+ else if (op == EBPF_JLT)
+ eval_jlt_jge(trd, trs, frd, frs);
+ else if (op == BPF_JGE)
+ eval_jlt_jge(frd, frs, trd, trs);
+ else if (op == EBPF_JSGT)
+ eval_jsgt_jsle(trd, trs, frd, frs);
+ else if (op == EBPF_JSLE)
+ eval_jsgt_jsle(frd, frs, trd, trs);
+ else if (op == EBPF_JLT)
+ eval_jslt_jsge(trd, trs, frd, frs);
+ else if (op == EBPF_JSGE)
+ eval_jslt_jsge(frd, frs, trd, trs);
+
+ return NULL;
+}
+
+/*
+ * validate parameters for each instruction type.
+ */
+static const struct bpf_ins_check ins_chk[UINT8_MAX] = {
+ /* ALU IMM 32-bit instructions */
+ [(BPF_ALU | BPF_ADD | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_SUB | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_AND | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_OR | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_LSH | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_RSH | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_XOR | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_MUL | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | EBPF_MOV | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_DIV | BPF_K)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 1, .max = UINT32_MAX},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_MOD | BPF_K)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 1, .max = UINT32_MAX},
+ .eval = eval_alu,
+ },
+ /* ALU IMM 64-bit instructions */
+ [(EBPF_ALU64 | BPF_ADD | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_SUB | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_AND | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_OR | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_LSH | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_RSH | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | EBPF_ARSH | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_XOR | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_MUL | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | EBPF_MOV | BPF_K)] = {
+ .mask = {.dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX,},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_DIV | BPF_K)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 1, .max = UINT32_MAX},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_MOD | BPF_K)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 1, .max = UINT32_MAX},
+ .eval = eval_alu,
+ },
+ /* ALU REG 32-bit instructions */
+ [(BPF_ALU | BPF_ADD | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_SUB | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_AND | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_OR | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_LSH | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_RSH | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_XOR | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_MUL | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_DIV | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_MOD | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | EBPF_MOV | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | BPF_NEG)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(BPF_ALU | EBPF_END | EBPF_TO_BE)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 16, .max = 64},
+ .check = check_alu_bele,
+ .eval = eval_bele,
+ },
+ [(BPF_ALU | EBPF_END | EBPF_TO_LE)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 16, .max = 64},
+ .check = check_alu_bele,
+ .eval = eval_bele,
+ },
+ /* ALU REG 64-bit instructions */
+ [(EBPF_ALU64 | BPF_ADD | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_SUB | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_AND | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_OR | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_LSH | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_RSH | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | EBPF_ARSH | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_XOR | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_MUL | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_DIV | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_MOD | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | EBPF_MOV | BPF_X)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ [(EBPF_ALU64 | BPF_NEG)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_alu,
+ },
+ /* load instructions */
+ [(BPF_LDX | BPF_MEM | BPF_B)] = {
+ .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_load,
+ },
+ [(BPF_LDX | BPF_MEM | BPF_H)] = {
+ .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_load,
+ },
+ [(BPF_LDX | BPF_MEM | BPF_W)] = {
+ .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_load,
+ },
+ [(BPF_LDX | BPF_MEM | EBPF_DW)] = {
+ .mask = {. dreg = WRT_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_load,
+ },
+ /* load 64 bit immediate value */
+ [(BPF_LD | BPF_IMM | EBPF_DW)] = {
+ .mask = { .dreg = WRT_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_ld_imm64,
+ },
+ /* store REG instructions */
+ [(BPF_STX | BPF_MEM | BPF_B)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ [(BPF_STX | BPF_MEM | BPF_H)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ [(BPF_STX | BPF_MEM | BPF_W)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ [(BPF_STX | BPF_MEM | EBPF_DW)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ /* atomic add instructions */
+ [(BPF_STX | EBPF_XADD | BPF_W)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ [(BPF_STX | EBPF_XADD | EBPF_DW)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_store,
+ },
+ /* store IMM instructions */
+ [(BPF_ST | BPF_MEM | BPF_B)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_store,
+ },
+ [(BPF_ST | BPF_MEM | BPF_H)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_store,
+ },
+ [(BPF_ST | BPF_MEM | BPF_W)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_store,
+ },
+ [(BPF_ST | BPF_MEM | EBPF_DW)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_store,
+ },
+ /* jump instruction */
+ [(BPF_JMP | BPF_JA)] = {
+ .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ },
+ /* jcc IMM instructions */
+ [(BPF_JMP | BPF_JEQ | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JNE | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JGT | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JLT | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JGE | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JLE | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSGT | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSLT | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSGE | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSLE | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JSET | BPF_K)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_jcc,
+ },
+ /* jcc REG instructions */
+ [(BPF_JMP | BPF_JEQ | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JNE | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JGT | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JLT | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JGE | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JLE | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSGT | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSLT | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ },
+ [(BPF_JMP | EBPF_JSGE | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | EBPF_JSLE | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ [(BPF_JMP | BPF_JSET | BPF_X)] = {
+ .mask = { .dreg = ALL_REGS, .sreg = ALL_REGS},
+ .off = { .min = 0, .max = UINT16_MAX},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_jcc,
+ },
+ /* call instruction */
+ [(BPF_JMP | EBPF_CALL)] = {
+ .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = UINT32_MAX},
+ .eval = eval_call,
+ },
+ /* ret instruction */
+ [(BPF_JMP | EBPF_EXIT)] = {
+ .mask = { .dreg = ZERO_REG, .sreg = ZERO_REG},
+ .off = { .min = 0, .max = 0},
+ .imm = { .min = 0, .max = 0},
+ .eval = eval_exit,
+ },
+};
+
+/*
+ * make sure that instruction syntax is valid,
+ * and it fields don't violate partciular instrcution type restrictions.
+ */
+static const char *
+check_syntax(const struct ebpf_insn *ins)
+{
+
+ uint8_t op;
+ uint16_t off;
+ uint32_t imm;
+
+ op = ins->code;
+
+ if (ins_chk[op].mask.dreg == 0)
+ return "invalid opcode";
+
+ if ((ins_chk[op].mask.dreg & 1 << ins->dst_reg) == 0)
+ return "invalid dst-reg field";
+
+ if ((ins_chk[op].mask.sreg & 1 << ins->src_reg) == 0)
+ return "invalid src-reg field";
+
+ off = ins->off;
+ if (ins_chk[op].off.min > off || ins_chk[op].off.max < off)
+ return "invalid off field";
+
+ imm = ins->imm;
+ if (ins_chk[op].imm.min > imm || ins_chk[op].imm.max < imm)
+ return "invalid imm field";
+
+ if (ins_chk[op].check != NULL)
+ return ins_chk[op].check(ins);
+
+ return NULL;
+}
+
+/*
+ * helper function, return instruction index for the given node.
+ */
+static uint32_t
+get_node_idx(const struct bpf_verifier *bvf, const struct inst_node *node)
+{
+ return node - bvf->in;
+}
+
+/*
+ * helper function, used to walk through constructed CFG.
+ */
+static struct inst_node *
+get_next_node(struct bpf_verifier *bvf, struct inst_node *node)
+{
+ uint32_t ce, ne, dst;
+
+ ne = node->nb_edge;
+ ce = node->cur_edge;
+ if (ce == ne)
+ return NULL;
+
+ node->cur_edge++;
+ dst = node->edge_dest[ce];
+ return bvf->in + dst;
+}
+
+static void
+set_node_colour(struct bpf_verifier *bvf, struct inst_node *node,
+ uint32_t new)
+{
+ uint32_t prev;
+
+ prev = node->colour;
+ node->colour = new;
+
+ bvf->node_colour[prev]--;
+ bvf->node_colour[new]++;
+}
+
+/*
+ * helper function, add new edge between two nodes.
+ */
+static int
+add_edge(struct bpf_verifier *bvf, struct inst_node *node, uint32_t nidx)
+{
+ uint32_t ne;
+
+ if (nidx > bvf->prm->nb_ins) {
+ RTE_BPF_LOG(ERR, "%s: program boundary violation at pc: %u, "
+ "next pc: %u\n",
+ __func__, get_node_idx(bvf, node), nidx);
+ return -EINVAL;
+ }
+
+ ne = node->nb_edge;
+ if (ne >= RTE_DIM(node->edge_dest)) {
+ RTE_BPF_LOG(ERR, "%s: internal error at pc: %u\n",
+ __func__, get_node_idx(bvf, node));
+ return -EINVAL;
+ }
+
+ node->edge_dest[ne] = nidx;
+ node->nb_edge = ne + 1;
+ return 0;
+}
+
+/*
+ * helper function, determine type of edge between two nodes.
+ */
+static void
+set_edge_type(struct bpf_verifier *bvf, struct inst_node *node,
+ const struct inst_node *next)
+{
+ uint32_t ce, clr, type;
+
+ ce = node->cur_edge - 1;
+ clr = next->colour;
+
+ type = UNKNOWN_EDGE;
+
+ if (clr == WHITE)
+ type = TREE_EDGE;
+ else if (clr == GREY)
+ type = BACK_EDGE;
+ else if (clr == BLACK)
+ /*
+ * in fact it could be either direct or cross edge,
+ * but for now, we don't need to distinguish between them.
+ */
+ type = CROSS_EDGE;
+
+ node->edge_type[ce] = type;
+ bvf->edge_type[type]++;
+}
+
+static struct inst_node *
+get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
+{
+ return bvf->in + node->prev_node;
+}
+
+/*
+ * Depth-First Search (DFS) through previously constructed
+ * Control Flow Graph (CFG).
+ * Information collected at this path would be used later
+ * to determine is there any loops, and/or unreachable instructions.
+ */
+static void
+dfs(struct bpf_verifier *bvf)
+{
+ struct inst_node *next, *node;
+
+ node = bvf->in;
+ while (node != NULL) {
+
+ if (node->colour == WHITE)
+ set_node_colour(bvf, node, GREY);
+
+ if (node->colour == GREY) {
+
+ /* find next unprocessed child node */
+ do {
+ next = get_next_node(bvf, node);
+ if (next == NULL)
+ break;
+ set_edge_type(bvf, node, next);
+ } while (next->colour != WHITE);
+
+ if (next != NULL) {
+ /* proceed with next child */
+ next->prev_node = get_node_idx(bvf, node);
+ node = next;
+ } else {
+ /*
+ * finished with current node and all it's kids,
+ * proceed with parent
+ */
+ set_node_colour(bvf, node, BLACK);
+ node->cur_edge = 0;
+ node = get_prev_node(bvf, node);
+ }
+ } else
+ node = NULL;
+ }
+}
+
+/*
+ * report unreachable instructions.
+ */
+static void
+log_unreachable(const struct bpf_verifier *bvf)
+{
+ uint32_t i;
+ struct inst_node *node;
+ const struct ebpf_insn *ins;
+
+ for (i = 0; i != bvf->prm->nb_ins; i++) {
+
+ node = bvf->in + i;
+ ins = bvf->prm->ins + i;
+
+ if (node->colour == WHITE &&
+ ins->code != (BPF_LD | BPF_IMM | EBPF_DW))
+ RTE_BPF_LOG(ERR, "unreachable code at pc: %u;\n", i);
+ }
+}
+
+/*
+ * report loops detected.
+ */
+static void
+log_loop(const struct bpf_verifier *bvf)
+{
+ uint32_t i, j;
+ struct inst_node *node;
+
+ for (i = 0; i != bvf->prm->nb_ins; i++) {
+
+ node = bvf->in + i;
+ if (node->colour != BLACK)
+ continue;
+
+ for (j = 0; j != node->nb_edge; j++) {
+ if (node->edge_type[j] == BACK_EDGE)
+ RTE_BPF_LOG(ERR,
+ "loop at pc:%u --> pc:%u;\n",
+ i, node->edge_dest[j]);
+ }
+ }
+}
+
+/*
+ * First pass goes though all instructions in the set, checks that each
+ * instruction is a valid one (correct syntax, valid field values, etc.)
+ * and constructs control flow graph (CFG).
+ * Then deapth-first search is performed over the constructed graph.
+ * Programs with unreachable instructions and/or loops will be rejected.
+ */
+static int
+validate(struct bpf_verifier *bvf)
+{
+ int32_t rc;
+ uint32_t i;
+ struct inst_node *node;
+ const struct ebpf_insn *ins;
+ const char *err;
+
+ rc = 0;
+ for (i = 0; i < bvf->prm->nb_ins; i++) {
+
+ ins = bvf->prm->ins + i;
+ node = bvf->in + i;
+
+ err = check_syntax(ins);
+ if (err != 0) {
+ RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+ __func__, err, i);
+ rc |= -EINVAL;
+ }
+
+ /*
+ * construct CFG, jcc nodes have to outgoing edges,
+ * 'exit' nodes - none, all others nodes have exaclty one
+ * outgoing edge.
+ */
+ switch (ins->code) {
+ case (BPF_JMP | EBPF_EXIT):
+ break;
+ case (BPF_JMP | BPF_JEQ | BPF_K):
+ case (BPF_JMP | EBPF_JNE | BPF_K):
+ case (BPF_JMP | BPF_JGT | BPF_K):
+ case (BPF_JMP | EBPF_JLT | BPF_K):
+ case (BPF_JMP | BPF_JGE | BPF_K):
+ case (BPF_JMP | EBPF_JLE | BPF_K):
+ case (BPF_JMP | EBPF_JSGT | BPF_K):
+ case (BPF_JMP | EBPF_JSLT | BPF_K):
+ case (BPF_JMP | EBPF_JSGE | BPF_K):
+ case (BPF_JMP | EBPF_JSLE | BPF_K):
+ case (BPF_JMP | BPF_JSET | BPF_K):
+ case (BPF_JMP | BPF_JEQ | BPF_X):
+ case (BPF_JMP | EBPF_JNE | BPF_X):
+ case (BPF_JMP | BPF_JGT | BPF_X):
+ case (BPF_JMP | EBPF_JLT | BPF_X):
+ case (BPF_JMP | BPF_JGE | BPF_X):
+ case (BPF_JMP | EBPF_JLE | BPF_X):
+ case (BPF_JMP | EBPF_JSGT | BPF_X):
+ case (BPF_JMP | EBPF_JSLT | BPF_X):
+ case (BPF_JMP | EBPF_JSGE | BPF_X):
+ case (BPF_JMP | EBPF_JSLE | BPF_X):
+ case (BPF_JMP | BPF_JSET | BPF_X):
+ rc |= add_edge(bvf, node, i + ins->off + 1);
+ rc |= add_edge(bvf, node, i + 1);
+ bvf->nb_jcc_nodes++;
+ break;
+ case (BPF_JMP | BPF_JA):
+ rc |= add_edge(bvf, node, i + ins->off + 1);
+ break;
+ /* load 64 bit immediate value */
+ case (BPF_LD | BPF_IMM | EBPF_DW):
+ rc |= add_edge(bvf, node, i + 2);
+ i++;
+ break;
+ default:
+ rc |= add_edge(bvf, node, i + 1);
+ break;
+ }
+
+ bvf->nb_nodes++;
+ bvf->node_colour[WHITE]++;
+ }
+
+ if (rc != 0)
+ return rc;
+
+ dfs(bvf);
+
+ RTE_BPF_LOG(DEBUG, "%s(%p) stats:\n"
+ "nb_nodes=%u;\n"
+ "nb_jcc_nodes=%u;\n"
+ "node_color={[WHITE]=%u, [GREY]=%u,, [BLACK]=%u};\n"
+ "edge_type={[UNKNOWN]=%u, [TREE]=%u, [BACK]=%u, [CROSS]=%u};\n",
+ __func__, bvf,
+ bvf->nb_nodes,
+ bvf->nb_jcc_nodes,
+ bvf->node_colour[WHITE], bvf->node_colour[GREY],
+ bvf->node_colour[BLACK],
+ bvf->edge_type[UNKNOWN_EDGE], bvf->edge_type[TREE_EDGE],
+ bvf->edge_type[BACK_EDGE], bvf->edge_type[CROSS_EDGE]);
+
+ if (bvf->node_colour[BLACK] != bvf->nb_nodes) {
+ RTE_BPF_LOG(ERR, "%s(%p) unreachable instructions;\n",
+ __func__, bvf);
+ log_unreachable(bvf);
+ return -EINVAL;
+ }
+
+ if (bvf->node_colour[GREY] != 0 || bvf->node_colour[WHITE] != 0 ||
+ bvf->edge_type[UNKNOWN_EDGE] != 0) {
+ RTE_BPF_LOG(ERR, "%s(%p) DFS internal error;\n",
+ __func__, bvf);
+ return -EINVAL;
+ }
+
+ if (bvf->edge_type[BACK_EDGE] != 0) {
+ RTE_BPF_LOG(ERR, "%s(%p) loops detected;\n",
+ __func__, bvf);
+ log_loop(bvf);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * helper functions get/free eval states.
+ */
+static struct bpf_eval_state *
+pull_eval_state(struct bpf_verifier *bvf)
+{
+ uint32_t n;
+
+ n = bvf->evst_pool.cur;
+ if (n == bvf->evst_pool.num)
+ return NULL;
+
+ bvf->evst_pool.cur = n + 1;
+ return bvf->evst_pool.ent + n;
+}
+
+static void
+push_eval_state(struct bpf_verifier *bvf)
+{
+ bvf->evst_pool.cur--;
+}
+
+static void
+evst_pool_fini(struct bpf_verifier *bvf)
+{
+ bvf->evst = NULL;
+ free(bvf->evst_pool.ent);
+ memset(&bvf->evst_pool, 0, sizeof(bvf->evst_pool));
+}
+
+static int
+evst_pool_init(struct bpf_verifier *bvf)
+{
+ uint32_t n;
+
+ n = bvf->nb_jcc_nodes + 1;
+
+ bvf->evst_pool.ent = calloc(n, sizeof(bvf->evst_pool.ent[0]));
+ if (bvf->evst_pool.ent == NULL)
+ return -ENOMEM;
+
+ bvf->evst_pool.num = n;
+ bvf->evst_pool.cur = 0;
+
+ bvf->evst = pull_eval_state(bvf);
+ return 0;
+}
+
+/*
+ * Save current eval state.
+ */
+static int
+save_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+{
+ struct bpf_eval_state *st;
+
+ /* get new eval_state for this node */
+ st = pull_eval_state(bvf);
+ if (st == NULL) {
+ RTE_BPF_LOG(ERR,
+ "%s: internal error (out of space) at pc: %u\n",
+ __func__, get_node_idx(bvf, node));
+ return -ENOMEM;
+ }
+
+ /* make a copy of current state */
+ memcpy(st, bvf->evst, sizeof(*st));
+
+ /* swap current state with new one */
+ node->evst = bvf->evst;
+ bvf->evst = st;
+
+ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+ __func__, bvf, get_node_idx(bvf, node), node->evst, bvf->evst);
+
+ return 0;
+}
+
+/*
+ * Restore previous eval state and mark current eval state as free.
+ */
+static void
+restore_eval_state(struct bpf_verifier *bvf, struct inst_node *node)
+{
+ RTE_BPF_LOG(DEBUG, "%s(bvf=%p,node=%u) old/new states: %p/%p;\n",
+ __func__, bvf, get_node_idx(bvf, node), bvf->evst, node->evst);
+
+ bvf->evst = node->evst;
+ node->evst = NULL;
+ push_eval_state(bvf);
+}
+
+static void
+log_eval_state(const struct bpf_verifier *bvf, const struct ebpf_insn *ins,
+ uint32_t pc, int32_t loglvl)
+{
+ const struct bpf_eval_state *st;
+ const struct bpf_reg_val *rv;
+
+ rte_log(loglvl, rte_bpf_logtype, "%s(pc=%u):\n", __func__, pc);
+
+ st = bvf->evst;
+ rv = st->rv + ins->dst_reg;
+
+ rte_log(loglvl, rte_bpf_logtype,
+ "r%u={\n"
+ "\tv={type=%u, size=%zu},\n"
+ "\tmask=0x%" PRIx64 ",\n"
+ "\tu={min=0x%" PRIx64 ", max=0x%" PRIx64 "},\n"
+ "\ts={min=%" PRId64 ", max=%" PRId64 "},\n"
+ "};\n",
+ ins->dst_reg,
+ rv->v.type, rv->v.size,
+ rv->mask,
+ rv->u.min, rv->u.max,
+ rv->s.min, rv->s.max);
+}
+
+/*
+ * Do second pass through CFG and try to evaluate instructions
+ * via each possible path.
+ * Right now evaluation functionality is quite limited.
+ * Still need to add extra checks for:
+ * - use/return uninitialized registers.
+ * - use uninitialized data from the stack.
+ * - memory boundaries violation.
+ */
+static int
+evaluate(struct bpf_verifier *bvf)
+{
+ int32_t rc;
+ uint32_t idx, op;
+ const char *err;
+ const struct ebpf_insn *ins;
+ struct inst_node *next, *node;
+
+ /* initial state of frame pointer */
+ static const struct bpf_reg_val rvfp = {
+ .v = {
+ .type = RTE_BPF_ARG_PTR_STACK,
+ .size = MAX_BPF_STACK_SIZE,
+ },
+ .mask = UINT64_MAX,
+ .u = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
+ .s = {.min = MAX_BPF_STACK_SIZE, .max = MAX_BPF_STACK_SIZE},
+ };
+
+ bvf->evst->rv[EBPF_REG_1].v = bvf->prm->prog_arg;
+ bvf->evst->rv[EBPF_REG_1].mask = UINT64_MAX;
+ if (bvf->prm->prog_arg.type == RTE_BPF_ARG_RAW)
+ eval_max_bound(bvf->evst->rv + EBPF_REG_1, UINT64_MAX);
+
+ bvf->evst->rv[EBPF_REG_10] = rvfp;
+
+ ins = bvf->prm->ins;
+ node = bvf->in;
+ next = node;
+ rc = 0;
+
+ while (node != NULL && rc == 0) {
+
+ /*
+ * current node evaluation, make sure we evaluate
+ * each node only once.
+ */
+ if (next != NULL) {
+
+ bvf->evin = node;
+ idx = get_node_idx(bvf, node);
+ op = ins[idx].code;
+
+ /* for jcc node make a copy of evaluatoion state */
+ if (node->nb_edge > 1)
+ rc |= save_eval_state(bvf, node);
+
+ if (ins_chk[op].eval != NULL && rc == 0) {
+ err = ins_chk[op].eval(bvf, ins + idx);
+ if (err != NULL) {
+ RTE_BPF_LOG(ERR, "%s: %s at pc: %u\n",
+ __func__, err, idx);
+ rc = -EINVAL;
+ }
+ }
+
+ log_eval_state(bvf, ins + idx, idx, RTE_LOG_DEBUG);
+ bvf->evin = NULL;
+ }
+
+ /* proceed through CFG */
+ next = get_next_node(bvf, node);
+ if (next != NULL) {
+
+ /* proceed with next child */
+ if (node->cur_edge == node->nb_edge &&
+ node->evst != NULL)
+ restore_eval_state(bvf, node);
+
+ next->prev_node = get_node_idx(bvf, node);
+ node = next;
+ } else {
+ /*
+ * finished with current node and all it's kids,
+ * proceed with parent
+ */
+ node->cur_edge = 0;
+ node = get_prev_node(bvf, node);
+
+ /* finished */
+ if (node == bvf->in)
+ node = NULL;
+ }
+ }
+
+ return rc;
+}
+
+int
+bpf_validate(struct rte_bpf *bpf)
+{
+ int32_t rc;
+ struct bpf_verifier bvf;
+
+ /* check input argument type, don't allow mbuf ptr on 32-bit */
+ if (bpf->prm.prog_arg.type != RTE_BPF_ARG_RAW &&
+ bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR &&
+ (sizeof(uint64_t) != sizeof(uintptr_t) ||
+ bpf->prm.prog_arg.type != RTE_BPF_ARG_PTR_MBUF)) {
+ RTE_BPF_LOG(ERR, "%s: unsupported argument type\n", __func__);
+ return -ENOTSUP;
+ }
+
+ memset(&bvf, 0, sizeof(bvf));
+ bvf.prm = &bpf->prm;
+ bvf.in = calloc(bpf->prm.nb_ins, sizeof(bvf.in[0]));
+ if (bvf.in == NULL)
+ return -ENOMEM;
+
+ rc = validate(&bvf);
+
+ if (rc == 0) {
+ rc = evst_pool_init(&bvf);
+ if (rc == 0)
+ rc = evaluate(&bvf);
+ evst_pool_fini(&bvf);
+ }
+
+ free(bvf.in);
+
+ /* copy collected info */
+ if (rc == 0)
+ bpf->stack_sz = bvf.stack_sz;
+
+ return rc;
+}
diff --git a/lib/librte_bpf/meson.build b/lib/librte_bpf/meson.build
new file mode 100644
index 00000000..bc0cd78f
--- /dev/null
+++ b/lib/librte_bpf/meson.build
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('bpf.c',
+ 'bpf_exec.c',
+ 'bpf_load.c',
+ 'bpf_pkt.c',
+ 'bpf_validate.c')
+
+if arch_subdir == 'x86' and cc.sizeof('void *') == 8
+ sources += files('bpf_jit_x86.c')
+endif
+
+install_headers = files('bpf_def.h',
+ 'rte_bpf.h',
+ 'rte_bpf_ethdev.h')
+
+deps += ['mbuf', 'net', 'ethdev']
+
+dep = cc.find_library('elf', required: false)
+if dep.found() == true and cc.has_header('libelf.h', dependencies: dep)
+ sources += files('bpf_load_elf.c')
+ ext_deps += dep
+endif
diff --git a/lib/librte_bpf/rte_bpf.h b/lib/librte_bpf/rte_bpf.h
new file mode 100644
index 00000000..ad62ef2c
--- /dev/null
+++ b/lib/librte_bpf/rte_bpf.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_BPF_H_
+#define _RTE_BPF_H_
+
+/**
+ * @file rte_bpf.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * RTE BPF support.
+ * librte_bpf provides a framework to load and execute eBPF bytecode
+ * inside user-space dpdk based applications.
+ * It supports basic set of features from eBPF spec
+ * (https://www.kernel.org/doc/Documentation/networking/filter.txt).
+ */
+
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <bpf_def.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Possible types for function/BPF program arguments.
+ */
+enum rte_bpf_arg_type {
+ RTE_BPF_ARG_UNDEF, /**< undefined */
+ RTE_BPF_ARG_RAW, /**< scalar value */
+ RTE_BPF_ARG_PTR = 0x10, /**< pointer to data buffer */
+ RTE_BPF_ARG_PTR_MBUF, /**< pointer to rte_mbuf */
+ RTE_BPF_ARG_PTR_STACK,
+};
+
+/**
+ * function argument information
+ */
+struct rte_bpf_arg {
+ enum rte_bpf_arg_type type;
+ /**
+ * for ptr type - max size of data buffer it points to
+ * for raw type - the size (in bytes) of the value
+ */
+ size_t size;
+ size_t buf_size;
+ /**< for mbuf ptr type, max size of rte_mbuf data buffer */
+};
+
+/**
+ * determine is argument a pointer
+ */
+#define RTE_BPF_ARG_PTR_TYPE(x) ((x) & RTE_BPF_ARG_PTR)
+
+/**
+ * Possible types for external symbols.
+ */
+enum rte_bpf_xtype {
+ RTE_BPF_XTYPE_FUNC, /**< function */
+ RTE_BPF_XTYPE_VAR, /**< variable */
+ RTE_BPF_XTYPE_NUM
+};
+
+/**
+ * Definition for external symbols available in the BPF program.
+ */
+struct rte_bpf_xsym {
+ const char *name; /**< name */
+ enum rte_bpf_xtype type; /**< type */
+ union {
+ struct {
+ uint64_t (*val)(uint64_t, uint64_t, uint64_t,
+ uint64_t, uint64_t);
+ uint32_t nb_args;
+ struct rte_bpf_arg args[EBPF_FUNC_MAX_ARGS];
+ /**< Function arguments descriptions. */
+ struct rte_bpf_arg ret; /**< function return value. */
+ } func;
+ struct {
+ void *val; /**< actual memory location */
+ struct rte_bpf_arg desc; /**< type, size, etc. */
+ } var; /**< external variable */
+ };
+};
+
+/**
+ * Input parameters for loading eBPF code.
+ */
+struct rte_bpf_prm {
+ const struct ebpf_insn *ins; /**< array of eBPF instructions */
+ uint32_t nb_ins; /**< number of instructions in ins */
+ const struct rte_bpf_xsym *xsym;
+ /**< array of external symbols that eBPF code is allowed to reference */
+ uint32_t nb_xsym; /**< number of elements in xsym */
+ struct rte_bpf_arg prog_arg; /**< eBPF program input arg description */
+};
+
+/**
+ * Information about compiled into native ISA eBPF code.
+ */
+struct rte_bpf_jit {
+ uint64_t (*func)(void *); /**< JIT-ed native code */
+ size_t sz; /**< size of JIT-ed code */
+};
+
+struct rte_bpf;
+
+/**
+ * De-allocate all memory used by this eBPF execution context.
+ *
+ * @param bpf
+ * BPF handle to destroy.
+ */
+void __rte_experimental
+rte_bpf_destroy(struct rte_bpf *bpf);
+
+/**
+ * Create a new eBPF execution context and load given BPF code into it.
+ *
+ * @param prm
+ * Parameters used to create and initialise the BPF exeution context.
+ * @return
+ * BPF handle that is used in future BPF operations,
+ * or NULL on error, with error code set in rte_errno.
+ * Possible rte_errno errors include:
+ * - EINVAL - invalid parameter passed to function
+ * - ENOMEM - can't reserve enough memory
+ */
+struct rte_bpf * __rte_experimental
+rte_bpf_load(const struct rte_bpf_prm *prm);
+
+/**
+ * Create a new eBPF execution context and load BPF code from given ELF
+ * file into it.
+ *
+ * @param prm
+ * Parameters used to create and initialise the BPF exeution context.
+ * @param fname
+ * Pathname for a ELF file.
+ * @param sname
+ * Name of the executable section within the file to load.
+ * @return
+ * BPF handle that is used in future BPF operations,
+ * or NULL on error, with error code set in rte_errno.
+ * Possible rte_errno errors include:
+ * - EINVAL - invalid parameter passed to function
+ * - ENOMEM - can't reserve enough memory
+ */
+struct rte_bpf * __rte_experimental
+rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
+ const char *sname);
+/**
+ * Execute given BPF bytecode.
+ *
+ * @param bpf
+ * handle for the BPF code to execute.
+ * @param ctx
+ * pointer to input context.
+ * @return
+ * BPF execution return value.
+ */
+uint64_t __rte_experimental
+rte_bpf_exec(const struct rte_bpf *bpf, void *ctx);
+
+/**
+ * Execute given BPF bytecode over a set of input contexts.
+ *
+ * @param bpf
+ * handle for the BPF code to execute.
+ * @param ctx
+ * array of pointers to the input contexts.
+ * @param rc
+ * array of return values (one per input).
+ * @param num
+ * number of elements in ctx[] (and rc[]).
+ * @return
+ * number of successfully processed inputs.
+ */
+uint32_t __rte_experimental
+rte_bpf_exec_burst(const struct rte_bpf *bpf, void *ctx[], uint64_t rc[],
+ uint32_t num);
+
+/**
+ * Provide information about natively compield code for given BPF handle.
+ *
+ * @param bpf
+ * handle for the BPF code.
+ * @param jit
+ * pointer to the rte_bpf_jit structure to be filled with related data.
+ * @return
+ * - -EINVAL if the parameters are invalid.
+ * - Zero if operation completed successfully.
+ */
+int __rte_experimental
+rte_bpf_get_jit(const struct rte_bpf *bpf, struct rte_bpf_jit *jit);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BPF_H_ */
diff --git a/lib/librte_bpf/rte_bpf_ethdev.h b/lib/librte_bpf/rte_bpf_ethdev.h
new file mode 100644
index 00000000..31731e7a
--- /dev/null
+++ b/lib/librte_bpf/rte_bpf_ethdev.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_BPF_ETHDEV_H_
+#define _RTE_BPF_ETHDEV_H_
+
+/**
+ * @file rte_bpf_ethdev.h
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * API to install BPF filter as RX/TX callbacks for eth devices.
+ * Note that right now:
+ * - it is not MT safe, i.e. it is not allowed to do load/unload for the
+ * same port/queue from different threads in parallel.
+ * - though it allows to do load/unload at runtime
+ * (while RX/TX is ongoing on given port/queue).
+ * - allows only one BPF program per port/queue,
+ * i.e. new load will replace previously loaded for that port/queue BPF program.
+ * Filter behaviour - if BPF program returns zero value for a given packet,
+ * then it will be dropped inside callback and no further processing
+ * on RX - it will be dropped inside callback and no further processing
+ * for that packet will happen.
+ * on TX - packet will remain unsent, and it is responsibility of the user
+ * to handle such situation (drop, try to send again, etc.).
+ */
+
+#include <rte_bpf.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ RTE_BPF_ETH_F_NONE = 0,
+ RTE_BPF_ETH_F_JIT = 0x1, /*< use compiled into native ISA code */
+};
+
+/**
+ * Unload previously loaded BPF program (if any) from given RX port/queue
+ * and remove appropriate RX port/queue callback.
+ *
+ * @param port
+ * The identifier of the ethernet port
+ * @param queue
+ * The identifier of the RX queue on the given port
+ */
+void __rte_experimental
+rte_bpf_eth_rx_unload(uint16_t port, uint16_t queue);
+
+/**
+ * Unload previously loaded BPF program (if any) from given TX port/queue
+ * and remove appropriate TX port/queue callback.
+ *
+ * @param port
+ * The identifier of the ethernet port
+ * @param queue
+ * The identifier of the TX queue on the given port
+ */
+void __rte_experimental
+rte_bpf_eth_tx_unload(uint16_t port, uint16_t queue);
+
+/**
+ * Load BPF program from the ELF file and install callback to execute it
+ * on given RX port/queue.
+ *
+ * @param port
+ * The identifier of the ethernet port
+ * @param queue
+ * The identifier of the RX queue on the given port
+ * @param fname
+ * Pathname for a ELF file.
+ * @param sname
+ * Name of the executable section within the file to load.
+ * @param prm
+ * Parameters used to create and initialise the BPF exeution context.
+ * @param flags
+ * Flags that define expected expected behavior of the loaded filter
+ * (i.e. jited/non-jited version to use).
+ * @return
+ * Zero on successful completion or negative error code otherwise.
+ */
+int __rte_experimental
+rte_bpf_eth_rx_elf_load(uint16_t port, uint16_t queue,
+ const struct rte_bpf_prm *prm, const char *fname, const char *sname,
+ uint32_t flags);
+
+/**
+ * Load BPF program from the ELF file and install callback to execute it
+ * on given TX port/queue.
+ *
+ * @param port
+ * The identifier of the ethernet port
+ * @param queue
+ * The identifier of the TX queue on the given port
+ * @param fname
+ * Pathname for a ELF file.
+ * @param sname
+ * Name of the executable section within the file to load.
+ * @param prm
+ * Parameters used to create and initialise the BPF exeution context.
+ * @param flags
+ * Flags that define expected expected behavior of the loaded filter
+ * (i.e. jited/non-jited version to use).
+ * @return
+ * Zero on successful completion or negative error code otherwise.
+ */
+int __rte_experimental
+rte_bpf_eth_tx_elf_load(uint16_t port, uint16_t queue,
+ const struct rte_bpf_prm *prm, const char *fname, const char *sname,
+ uint32_t flags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BPF_ETHDEV_H_ */
diff --git a/lib/librte_bpf/rte_bpf_version.map b/lib/librte_bpf/rte_bpf_version.map
new file mode 100644
index 00000000..a203e088
--- /dev/null
+++ b/lib/librte_bpf/rte_bpf_version.map
@@ -0,0 +1,16 @@
+EXPERIMENTAL {
+ global:
+
+ rte_bpf_destroy;
+ rte_bpf_elf_load;
+ rte_bpf_eth_rx_elf_load;
+ rte_bpf_eth_rx_unload;
+ rte_bpf_eth_tx_elf_load;
+ rte_bpf_eth_tx_unload;
+ rte_bpf_exec;
+ rte_bpf_exec_burst;
+ rte_bpf_get_jit;
+ rte_bpf_load;
+
+ local: *;
+};
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
index e88e4e11..9666e90c 100644
--- a/lib/librte_cmdline/cmdline_parse.c
+++ b/lib/librte_cmdline/cmdline_parse.c
@@ -208,9 +208,6 @@ cmdline_parse(struct cmdline *cl, const char * buf)
int err = CMDLINE_PARSE_NOMATCH;
int tok;
cmdline_parse_ctx_t *ctx;
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
- char debug_buf[BUFSIZ];
-#endif
char *result_buf = result.buf;
if (!cl || !buf)
@@ -250,10 +247,8 @@ cmdline_parse(struct cmdline *cl, const char * buf)
return linelen;
}
-#ifdef RTE_LIBRTE_CMDLINE_DEBUG
- snprintf(debug_buf, (linelen>64 ? 64 : linelen), "%s", buf);
- debug_printf("Parse line : len=%d, <%s>\n", linelen, debug_buf);
-#endif
+ debug_printf("Parse line : len=%d, <%.*s>\n",
+ linelen, linelen > 64 ? 64 : linelen, buf);
/* parse it !! */
inst = ctx[inst_num];
@@ -436,7 +431,7 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
if ((unsigned)(comp_len + 1) > size)
return 0;
- snprintf(dst, size, "%s", comp_buf);
+ strlcpy(dst, comp_buf, size);
dst[comp_len] = 0;
return 2;
}
@@ -513,7 +508,7 @@ cmdline_complete(struct cmdline *cl, const char *buf, int *state,
continue;
}
(*state)++;
- l=snprintf(dst, size, "%s", tmpbuf);
+ l=strlcpy(dst, tmpbuf, size);
if (l>=0 && token_hdr.ops->get_help) {
token_hdr.ops->get_help(token_p, tmpbuf,
sizeof(tmpbuf));
diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.c b/lib/librte_cmdline/cmdline_parse_etheraddr.c
index 8d281192..24e04755 100644
--- a/lib/librte_cmdline/cmdline_parse_etheraddr.c
+++ b/lib/librte_cmdline/cmdline_parse_etheraddr.c
@@ -102,7 +102,7 @@ cmdline_parse_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
(token_len != ETHER_ADDRSTRLENSHORT - 1))
return -1;
- snprintf(ether_str, token_len+1, "%s", buf);
+ strlcpy(ether_str, buf, token_len + 1);
tmp = my_ether_aton(ether_str);
if (tmp == NULL)
diff --git a/lib/librte_cmdline/cmdline_parse_ipaddr.c b/lib/librte_cmdline/cmdline_parse_ipaddr.c
index ae6ea100..6647f569 100644
--- a/lib/librte_cmdline/cmdline_parse_ipaddr.c
+++ b/lib/librte_cmdline/cmdline_parse_ipaddr.c
@@ -4,26 +4,6 @@
* All rights reserved.
*/
-/*
- * For inet_ntop() functions:
- *
- * Copyright (c) 1996 by Internet Software Consortium.
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
- * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
- * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
- * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
- * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
- * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
- * SOFTWARE.
- */
-
-
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -31,6 +11,7 @@
#include <ctype.h>
#include <string.h>
#include <errno.h>
+#include <arpa/inet.h>
#include <netinet/in.h>
#ifndef __linux__
#ifndef __FreeBSD__
@@ -52,205 +33,9 @@ struct cmdline_token_ops cmdline_token_ipaddr_ops = {
.get_help = cmdline_get_help_ipaddr,
};
-#define INADDRSZ 4
-#define IN6ADDRSZ 16
#define PREFIXMAX 128
#define V4PREFIXMAX 32
-/*
- * WARNING: Don't even consider trying to compile this on a system where
- * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX.
- */
-
-static int inet_pton4(const char *src, unsigned char *dst);
-static int inet_pton6(const char *src, unsigned char *dst);
-
-/* int
- * inet_pton(af, src, dst)
- * convert from presentation format (which usually means ASCII printable)
- * to network format (which is usually some kind of binary format).
- * return:
- * 1 if the address was valid for the specified address family
- * 0 if the address wasn't valid (`dst' is untouched in this case)
- * -1 if some other error occurred (`dst' is untouched in this case, too)
- * author:
- * Paul Vixie, 1996.
- */
-static int
-my_inet_pton(int af, const char *src, void *dst)
-{
- switch (af) {
- case AF_INET:
- return inet_pton4(src, dst);
- case AF_INET6:
- return inet_pton6(src, dst);
- default:
- errno = EAFNOSUPPORT;
- return -1;
- }
- /* NOTREACHED */
-}
-
-/* int
- * inet_pton4(src, dst)
- * like inet_aton() but without all the hexadecimal and shorthand.
- * return:
- * 1 if `src' is a valid dotted quad, else 0.
- * notice:
- * does not touch `dst' unless it's returning 1.
- * author:
- * Paul Vixie, 1996.
- */
-static int
-inet_pton4(const char *src, unsigned char *dst)
-{
- static const char digits[] = "0123456789";
- int saw_digit, octets, ch;
- unsigned char tmp[INADDRSZ], *tp;
-
- saw_digit = 0;
- octets = 0;
- *(tp = tmp) = 0;
- while ((ch = *src++) != '\0') {
- const char *pch;
-
- if ((pch = strchr(digits, ch)) != NULL) {
- unsigned int new = *tp * 10 + (pch - digits);
-
- if (new > 255)
- return 0;
- if (! saw_digit) {
- if (++octets > 4)
- return 0;
- saw_digit = 1;
- }
- *tp = (unsigned char)new;
- } else if (ch == '.' && saw_digit) {
- if (octets == 4)
- return 0;
- *++tp = 0;
- saw_digit = 0;
- } else
- return 0;
- }
- if (octets < 4)
- return 0;
-
- memcpy(dst, tmp, INADDRSZ);
- return 1;
-}
-
-/* int
- * inet_pton6(src, dst)
- * convert presentation level address to network order binary form.
- * return:
- * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
- * notice:
- * (1) does not touch `dst' unless it's returning 1.
- * (2) :: in a full address is silently ignored.
- * credit:
- * inspired by Mark Andrews.
- * author:
- * Paul Vixie, 1996.
- */
-static int
-inet_pton6(const char *src, unsigned char *dst)
-{
- static const char xdigits_l[] = "0123456789abcdef",
- xdigits_u[] = "0123456789ABCDEF";
- unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
- const char *xdigits = 0, *curtok = 0;
- int ch = 0, saw_xdigit = 0, count_xdigit = 0;
- unsigned int val = 0;
- unsigned dbloct_count = 0;
-
- memset((tp = tmp), '\0', IN6ADDRSZ);
- endp = tp + IN6ADDRSZ;
- colonp = NULL;
- /* Leading :: requires some special handling. */
- if (*src == ':')
- if (*++src != ':')
- return 0;
- curtok = src;
- saw_xdigit = count_xdigit = 0;
- val = 0;
-
- while ((ch = *src++) != '\0') {
- const char *pch;
-
- if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
- pch = strchr((xdigits = xdigits_u), ch);
- if (pch != NULL) {
- if (count_xdigit >= 4)
- return 0;
- val <<= 4;
- val |= (pch - xdigits);
- if (val > 0xffff)
- return 0;
- saw_xdigit = 1;
- count_xdigit++;
- continue;
- }
- if (ch == ':') {
- curtok = src;
- if (!saw_xdigit) {
- if (colonp)
- return 0;
- colonp = tp;
- continue;
- } else if (*src == '\0') {
- return 0;
- }
- if (tp + sizeof(int16_t) > endp)
- return 0;
- *tp++ = (unsigned char) ((val >> 8) & 0xff);
- *tp++ = (unsigned char) (val & 0xff);
- saw_xdigit = 0;
- count_xdigit = 0;
- val = 0;
- dbloct_count++;
- continue;
- }
- if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
- inet_pton4(curtok, tp) > 0) {
- tp += INADDRSZ;
- saw_xdigit = 0;
- dbloct_count += 2;
- break; /* '\0' was seen by inet_pton4(). */
- }
- return 0;
- }
- if (saw_xdigit) {
- if (tp + sizeof(int16_t) > endp)
- return 0;
- *tp++ = (unsigned char) ((val >> 8) & 0xff);
- *tp++ = (unsigned char) (val & 0xff);
- dbloct_count++;
- }
- if (colonp != NULL) {
- /* if we already have 8 double octets, having a colon means error */
- if (dbloct_count == 8)
- return 0;
-
- /*
- * Since some memmove()'s erroneously fail to handle
- * overlapping regions, we'll do the shift by hand.
- */
- const int n = tp - colonp;
- int i;
-
- for (i = 1; i <= n; i++) {
- endp[- i] = colonp[n - i];
- colonp[n - i] = 0;
- }
- tp = endp;
- }
- if (tp != endp)
- return 0;
- memcpy(dst, tmp, IN6ADDRSZ);
- return 1;
-}
-
int
cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
unsigned ressize)
@@ -277,7 +62,7 @@ cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
if (token_len >= INET6_ADDRSTRLEN+4)
return -1;
- snprintf(ip_str, token_len+1, "%s", buf);
+ strlcpy(ip_str, buf, token_len + 1);
/* convert the network prefix */
if (tk2->ipaddr_data.flags & CMDLINE_IPADDR_NETWORK) {
@@ -299,7 +84,7 @@ cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
/* convert the IP addr */
if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V4) &&
- my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1 &&
+ inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1 &&
prefixlen <= V4PREFIXMAX) {
ipaddr.family = AF_INET;
if (res)
@@ -307,7 +92,7 @@ cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
return token_len;
}
if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V6) &&
- my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
+ inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
ipaddr.family = AF_INET6;
if (res)
memcpy(res, &ipaddr, sizeof(ipaddr));
diff --git a/lib/librte_cmdline/cmdline_parse_portlist.c b/lib/librte_cmdline/cmdline_parse_portlist.c
index 5952f343..ad43b522 100644
--- a/lib/librte_cmdline/cmdline_parse_portlist.c
+++ b/lib/librte_cmdline/cmdline_parse_portlist.c
@@ -94,7 +94,7 @@ cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
if (token_len >= PORTLIST_TOKEN_SIZE)
return -1;
- snprintf(portlist_str, token_len+1, "%s", buf);
+ strlcpy(portlist_str, buf, token_len + 1);
if (pl) {
pl->map = 0;
diff --git a/lib/librte_cmdline/cmdline_parse_string.c b/lib/librte_cmdline/cmdline_parse_string.c
index abde0412..9cf41d0f 100644
--- a/lib/librte_cmdline/cmdline_parse_string.c
+++ b/lib/librte_cmdline/cmdline_parse_string.c
@@ -125,10 +125,10 @@ cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res,
if (res) {
if ((sd->str != NULL) && (strcmp(sd->str, TOKEN_STRING_MULTI) == 0))
/* we are sure that token_len is < STR_MULTI_TOKEN_SIZE-1 */
- snprintf(res, STR_MULTI_TOKEN_SIZE, "%s", buf);
+ strlcpy(res, buf, STR_MULTI_TOKEN_SIZE);
else
/* we are sure that token_len is < STR_TOKEN_SIZE-1 */
- snprintf(res, STR_TOKEN_SIZE, "%s", buf);
+ strlcpy(res, buf, STR_TOKEN_SIZE);
*((char *)res + token_len) = 0;
}
diff --git a/lib/librte_compat/Makefile b/lib/librte_compat/Makefile
index 0c57533c..61089fe7 100644
--- a/lib/librte_compat/Makefile
+++ b/lib/librte_compat/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2013 Neil Horman <nhorman@tuxdriver.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2013 Neil Horman <nhorman@tuxdriver.com>
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/lib/librte_compressdev/Makefile b/lib/librte_compressdev/Makefile
new file mode 100644
index 00000000..7ef89e61
--- /dev/null
+++ b/lib/librte_compressdev/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_compressdev.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDLIBS += -lrte_eal -lrte_mempool -lrte_kvargs
+
+# library source files
+SRCS-y += rte_compressdev.c rte_compressdev_pmd.c rte_comp.c
+
+# export include files
+SYMLINK-y-include += rte_comp.h
+SYMLINK-y-include += rte_compressdev.h
+# export include files (for PMDs)
+SYMLINK-y-include += rte_compressdev_pmd.h
+SYMLINK-y-include += rte_compressdev_internal.h
+
+# versioning export map
+EXPORT_MAP := rte_compressdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_compressdev/meson.build b/lib/librte_compressdev/meson.build
new file mode 100644
index 00000000..5416571c
--- /dev/null
+++ b/lib/librte_compressdev/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('rte_compressdev.c',
+ 'rte_compressdev_pmd.c',
+ 'rte_comp.c')
+headers = files('rte_compressdev.h',
+ 'rte_compressdev_pmd.h',
+ 'rte_compressdev_internal.h',
+ 'rte_comp.h')
+deps += ['kvargs', 'mbuf']
diff --git a/lib/librte_compressdev/rte_comp.c b/lib/librte_compressdev/rte_comp.c
new file mode 100644
index 00000000..98ad0cfd
--- /dev/null
+++ b/lib/librte_compressdev/rte_comp.c
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include "rte_comp.h"
+#include "rte_compressdev.h"
+#include "rte_compressdev_internal.h"
+
+const char * __rte_experimental
+rte_comp_get_feature_name(uint64_t flag)
+{
+ switch (flag) {
+ case RTE_COMP_FF_STATEFUL_COMPRESSION:
+ return "STATEFUL_COMPRESSION";
+ case RTE_COMP_FF_STATEFUL_DECOMPRESSION:
+ return "STATEFUL_DECOMPRESSION";
+ case RTE_COMP_FF_OOP_SGL_IN_SGL_OUT:
+ return "OOP_SGL_IN_SGL_OUT";
+ case RTE_COMP_FF_OOP_SGL_IN_LB_OUT:
+ return "OOP_SGL_IN_LB_OUT";
+ case RTE_COMP_FF_OOP_LB_IN_SGL_OUT:
+ return "OOP_LB_IN_SGL_OUT";
+ case RTE_COMP_FF_MULTI_PKT_CHECKSUM:
+ return "MULTI_PKT_CHECKSUM";
+ case RTE_COMP_FF_ADLER32_CHECKSUM:
+ return "ADLER32_CHECKSUM";
+ case RTE_COMP_FF_CRC32_CHECKSUM:
+ return "CRC32_CHECKSUM";
+ case RTE_COMP_FF_CRC32_ADLER32_CHECKSUM:
+ return "CRC32_ADLER32_CHECKSUM";
+ case RTE_COMP_FF_NONCOMPRESSED_BLOCKS:
+ return "NONCOMPRESSED_BLOCKS";
+ case RTE_COMP_FF_SHA1_HASH:
+ return "SHA1_HASH";
+ case RTE_COMP_FF_SHA2_SHA256_HASH:
+ return "SHA2_SHA256_HASH";
+ case RTE_COMP_FF_SHAREABLE_PRIV_XFORM:
+ return "SHAREABLE_PRIV_XFORM";
+ case RTE_COMP_FF_HUFFMAN_FIXED:
+ return "HUFFMAN_FIXED";
+ case RTE_COMP_FF_HUFFMAN_DYNAMIC:
+ return "HUFFMAN_DYNAMIC";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * Reset the fields of an operation to their default values.
+ *
+ * @note The private data associated with the operation is not zeroed.
+ *
+ * @param op
+ * The operation to be reset
+ */
+static inline void
+rte_comp_op_reset(struct rte_comp_op *op)
+{
+ struct rte_mempool *tmp_mp = op->mempool;
+ rte_iova_t tmp_iova_addr = op->iova_addr;
+
+ memset(op, 0, sizeof(struct rte_comp_op));
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ op->iova_addr = tmp_iova_addr;
+ op->mempool = tmp_mp;
+}
+
+/**
+ * Private data structure belonging to an operation pool.
+ */
+struct rte_comp_op_pool_private {
+ uint16_t user_size;
+ /**< Size of private user data with each operation. */
+};
+
+/**
+ * Bulk allocate raw element from mempool and return as comp operations
+ *
+ * @param mempool
+ * Compress operation mempool
+ * @param ops
+ * Array to place allocated operations
+ * @param nb_ops
+ * Number of operations to allocate
+ * @return
+ * - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no ops are retrieved.
+ */
+static inline int
+rte_comp_op_raw_bulk_alloc(struct rte_mempool *mempool,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0)
+ return nb_ops;
+
+ return 0;
+}
+
+/** Initialise rte_comp_op mempool element */
+static void
+rte_comp_op_init(struct rte_mempool *mempool,
+ __rte_unused void *opaque_arg,
+ void *_op_data,
+ __rte_unused unsigned int i)
+{
+ struct rte_comp_op *op = _op_data;
+
+ memset(_op_data, 0, mempool->elt_size);
+
+ op->status = RTE_COMP_OP_STATUS_NOT_PROCESSED;
+ op->iova_addr = rte_mem_virt2iova(_op_data);
+ op->mempool = mempool;
+}
+
+struct rte_mempool * __rte_experimental
+rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size,
+ uint16_t user_size, int socket_id)
+{
+ struct rte_comp_op_pool_private *priv;
+
+ unsigned int elt_size = sizeof(struct rte_comp_op) + user_size;
+
+ /* lookup mempool in case already allocated */
+ struct rte_mempool *mp = rte_mempool_lookup(name);
+
+ if (mp != NULL) {
+ priv = (struct rte_comp_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ if (mp->elt_size != elt_size ||
+ mp->cache_size < cache_size ||
+ mp->size < nb_elts ||
+ priv->user_size < user_size) {
+ mp = NULL;
+ COMPRESSDEV_LOG(ERR,
+ "Mempool %s already exists but with incompatible parameters",
+ name);
+ return NULL;
+ }
+ return mp;
+ }
+
+ mp = rte_mempool_create(
+ name,
+ nb_elts,
+ elt_size,
+ cache_size,
+ sizeof(struct rte_comp_op_pool_private),
+ NULL,
+ NULL,
+ rte_comp_op_init,
+ NULL,
+ socket_id,
+ 0);
+
+ if (mp == NULL) {
+ COMPRESSDEV_LOG(ERR, "Failed to create mempool %s", name);
+ return NULL;
+ }
+
+ priv = (struct rte_comp_op_pool_private *)
+ rte_mempool_get_priv(mp);
+
+ priv->user_size = user_size;
+
+ return mp;
+}
+
+struct rte_comp_op * __rte_experimental
+rte_comp_op_alloc(struct rte_mempool *mempool)
+{
+ struct rte_comp_op *op = NULL;
+ int retval;
+
+ retval = rte_comp_op_raw_bulk_alloc(mempool, &op, 1);
+ if (unlikely(retval < 0))
+ return NULL;
+
+ rte_comp_op_reset(op);
+
+ return op;
+}
+
+int __rte_experimental
+rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ int ret;
+ uint16_t i;
+
+ ret = rte_comp_op_raw_bulk_alloc(mempool, ops, nb_ops);
+ if (unlikely(ret < nb_ops))
+ return ret;
+
+ for (i = 0; i < nb_ops; i++)
+ rte_comp_op_reset(ops[i]);
+
+ return nb_ops;
+}
+
+/**
+ * free operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op
+ * Compress operation
+ */
+void __rte_experimental
+rte_comp_op_free(struct rte_comp_op *op)
+{
+ if (op != NULL && op->mempool != NULL)
+ rte_mempool_put(op->mempool, op);
+}
diff --git a/lib/librte_compressdev/rte_comp.h b/lib/librte_compressdev/rte_comp.h
new file mode 100644
index 00000000..ee9056ea
--- /dev/null
+++ b/lib/librte_compressdev/rte_comp.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _RTE_COMP_H_
+#define _RTE_COMP_H_
+
+/**
+ * @file rte_comp.h
+ *
+ * RTE definitions for Data Compression Service
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/**
+ * compression service feature flags
+ *
+ * @note New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_comp_get_feature_name()
+ */
+#define RTE_COMP_FF_STATEFUL_COMPRESSION (1ULL << 0)
+/**< Stateful compression is supported */
+#define RTE_COMP_FF_STATEFUL_DECOMPRESSION (1ULL << 1)
+/**< Stateful decompression is supported */
+#define RTE_COMP_FF_OOP_SGL_IN_SGL_OUT (1ULL << 2)
+/**< Out-of-place Scatter-gather (SGL) buffers,
+ * with multiple segments, are supported in input and output
+ */
+#define RTE_COMP_FF_OOP_SGL_IN_LB_OUT (1ULL << 3)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in input, combined with linear buffers (LB), with a
+ * single segment, in output
+ */
+#define RTE_COMP_FF_OOP_LB_IN_SGL_OUT (1ULL << 4)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in output, combined with linear buffers (LB) in input
+ */
+#define RTE_COMP_FF_ADLER32_CHECKSUM (1ULL << 5)
+/**< Adler-32 Checksum is supported */
+#define RTE_COMP_FF_CRC32_CHECKSUM (1ULL << 6)
+/**< CRC32 Checksum is supported */
+#define RTE_COMP_FF_CRC32_ADLER32_CHECKSUM (1ULL << 7)
+/**< Adler-32/CRC32 Checksum is supported */
+#define RTE_COMP_FF_MULTI_PKT_CHECKSUM (1ULL << 8)
+/**< Generation of checksum across multiple stateless packets is supported */
+#define RTE_COMP_FF_SHA1_HASH (1ULL << 9)
+/**< SHA1 Hash is supported */
+#define RTE_COMP_FF_SHA2_SHA256_HASH (1ULL << 10)
+/**< SHA256 Hash of SHA2 family is supported */
+#define RTE_COMP_FF_NONCOMPRESSED_BLOCKS (1ULL << 11)
+/**< Creation of non-compressed blocks using RTE_COMP_LEVEL_NONE is supported */
+#define RTE_COMP_FF_SHAREABLE_PRIV_XFORM (1ULL << 12)
+/**< Private xforms created by the PMD can be shared
+ * across multiple stateless operations. If not set, then app needs
+ * to create as many priv_xforms as it expects to have stateless
+ * operations in-flight.
+ */
+#define RTE_COMP_FF_HUFFMAN_FIXED (1ULL << 13)
+/**< Fixed huffman encoding is supported */
+#define RTE_COMP_FF_HUFFMAN_DYNAMIC (1ULL << 14)
+/**< Dynamic huffman encoding is supported */
+
+/** Status of comp operation */
+enum rte_comp_op_status {
+ RTE_COMP_OP_STATUS_SUCCESS = 0,
+ /**< Operation completed successfully */
+ RTE_COMP_OP_STATUS_NOT_PROCESSED,
+ /**< Operation has not yet been processed by the device */
+ RTE_COMP_OP_STATUS_INVALID_ARGS,
+ /**< Operation failed due to invalid arguments in request */
+ RTE_COMP_OP_STATUS_ERROR,
+ /**< Error handling operation */
+ RTE_COMP_OP_STATUS_INVALID_STATE,
+ /**< Operation is invoked in invalid state */
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED,
+ /**< Output buffer ran out of space before operation completed.
+ * Error case. Application must resubmit all data with a larger
+ * output buffer.
+ */
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_RECOVERABLE,
+ /**< Output buffer ran out of space before operation completed, but this
+ * is not an error case. Output data up to op.produced can be used and
+ * next op in the stream should continue on from op.consumed+1.
+ */
+};
+
+/** Compression Algorithms */
+enum rte_comp_algorithm {
+ RTE_COMP_ALGO_UNSPECIFIED = 0,
+ /** No Compression algorithm */
+ RTE_COMP_ALGO_NULL,
+ /**< No compression.
+ * Pass-through, data is copied unchanged from source buffer to
+ * destination buffer.
+ */
+ RTE_COMP_ALGO_DEFLATE,
+ /**< DEFLATE compression algorithm
+ * https://tools.ietf.org/html/rfc1951
+ */
+ RTE_COMP_ALGO_LZS,
+ /**< LZS compression algorithm
+ * https://tools.ietf.org/html/rfc2395
+ */
+ RTE_COMP_ALGO_LIST_END
+};
+
+/** Compression Hash Algorithms */
+enum rte_comp_hash_algorithm {
+ RTE_COMP_HASH_ALGO_NONE = 0,
+ /**< No hash */
+ RTE_COMP_HASH_ALGO_SHA1,
+ /**< SHA1 hash algorithm */
+ RTE_COMP_HASH_ALGO_SHA2_256,
+ /**< SHA256 hash algorithm of SHA2 family */
+ RTE_COMP_HASH_ALGO_LIST_END
+};
+
+/**< Compression Level.
+ * The number is interpreted by each PMD differently. However, lower numbers
+ * give fastest compression, at the expense of compression ratio while
+ * higher numbers may give better compression ratios but are likely slower.
+ */
+#define RTE_COMP_LEVEL_PMD_DEFAULT (-1)
+/** Use PMD Default */
+#define RTE_COMP_LEVEL_NONE (0)
+/** Output uncompressed blocks if supported by the specified algorithm */
+#define RTE_COMP_LEVEL_MIN (1)
+/** Use minimum compression level supported by the PMD */
+#define RTE_COMP_LEVEL_MAX (9)
+/** Use maximum compression level supported by the PMD */
+
+/** Compression checksum types */
+enum rte_comp_checksum_type {
+ RTE_COMP_CHECKSUM_NONE,
+ /**< No checksum generated */
+ RTE_COMP_CHECKSUM_CRC32,
+ /**< Generates a CRC32 checksum, as used by gzip */
+ RTE_COMP_CHECKSUM_ADLER32,
+ /**< Generates an Adler-32 checksum, as used by zlib */
+ RTE_COMP_CHECKSUM_CRC32_ADLER32,
+ /**< Generates both Adler-32 and CRC32 checksums, concatenated.
+ * CRC32 is in the lower 32bits, Adler-32 in the upper 32 bits.
+ */
+};
+
+
+/** Compression Huffman Type - used by DEFLATE algorithm */
+enum rte_comp_huffman {
+ RTE_COMP_HUFFMAN_DEFAULT,
+ /**< PMD may choose which Huffman codes to use */
+ RTE_COMP_HUFFMAN_FIXED,
+ /**< Use Fixed Huffman codes */
+ RTE_COMP_HUFFMAN_DYNAMIC,
+ /**< Use Dynamic Huffman codes */
+};
+
+/** Compression flush flags */
+enum rte_comp_flush_flag {
+ RTE_COMP_FLUSH_NONE,
+ /**< Data is not flushed. Output may remain in the compressor and be
+ * processed during a following op. It may not be possible to decompress
+ * output until a later op with some other flush flag has been sent.
+ */
+ RTE_COMP_FLUSH_SYNC,
+ /**< All data should be flushed to output buffer. Output data can be
+ * decompressed. However state and history is not cleared, so future
+ * operations may use history from this operation.
+ */
+ RTE_COMP_FLUSH_FULL,
+ /**< All data should be flushed to output buffer. Output data can be
+ * decompressed. State and history data is cleared, so future
+ * ops will be independent of ops processed before this.
+ */
+ RTE_COMP_FLUSH_FINAL
+ /**< Same as RTE_COMP_FLUSH_FULL but if op.algo is RTE_COMP_ALGO_DEFLATE
+ * then bfinal bit is set in the last block.
+ */
+};
+
+/** Compression transform types */
+enum rte_comp_xform_type {
+ RTE_COMP_COMPRESS,
+ /**< Compression service - compress */
+ RTE_COMP_DECOMPRESS,
+ /**< Compression service - decompress */
+};
+
+/** Compression operation type */
+enum rte_comp_op_type {
+ RTE_COMP_OP_STATELESS,
+ /**< All data to be processed is submitted in the op, no state or
+ * history from previous ops is used and none will be stored for future
+ * ops. Flush flag must be set to either FLUSH_FULL or FLUSH_FINAL.
+ */
+ RTE_COMP_OP_STATEFUL
+ /**< There may be more data to be processed after this op, it's part of
+ * a stream of data. State and history from previous ops can be used
+ * and resulting state and history can be stored for future ops,
+ * depending on flush flag.
+ */
+};
+
+
+/** Parameters specific to the deflate algorithm */
+struct rte_comp_deflate_params {
+ enum rte_comp_huffman huffman;
+ /**< Compression huffman encoding type */
+};
+
+/** Setup Data for compression */
+struct rte_comp_compress_xform {
+ enum rte_comp_algorithm algo;
+ /**< Algorithm to use for compress operation */
+ union {
+ struct rte_comp_deflate_params deflate;
+ /**< Parameters specific to the deflate algorithm */
+ }; /**< Algorithm specific parameters */
+ int level;
+ /**< Compression level */
+ uint8_t window_size;
+ /**< Base two log value of sliding window to be used. If window size
+ * can't be supported by the PMD then it may fall back to a smaller
+ * size. This is likely to result in a worse compression ratio.
+ */
+ enum rte_comp_checksum_type chksum;
+ /**< Type of checksum to generate on the uncompressed data */
+ enum rte_comp_hash_algorithm hash_algo;
+ /**< Hash algorithm to be used with compress operation. Hash is always
+ * done on plaintext.
+ */
+};
+
+/**
+ * Setup Data for decompression.
+ */
+struct rte_comp_decompress_xform {
+ enum rte_comp_algorithm algo;
+ /**< Algorithm to use for decompression */
+ enum rte_comp_checksum_type chksum;
+ /**< Type of checksum to generate on the decompressed data */
+ uint8_t window_size;
+ /**< Base two log value of sliding window which was used to generate
+ * compressed data. If window size can't be supported by the PMD then
+ * setup of stream or private_xform should fail.
+ */
+ enum rte_comp_hash_algorithm hash_algo;
+ /**< Hash algorithm to be used with decompress operation. Hash is always
+ * done on plaintext.
+ */
+};
+
+/**
+ * Compression transform structure.
+ *
+ * This is used to specify the compression transforms required.
+ * Each transform structure can hold a single transform, the type field is
+ * used to specify which transform is contained within the union.
+ */
+struct rte_comp_xform {
+ enum rte_comp_xform_type type;
+ /**< xform type */
+ union {
+ struct rte_comp_compress_xform compress;
+ /**< xform for compress operation */
+ struct rte_comp_decompress_xform decompress;
+ /**< decompress xform */
+ };
+};
+
+/**
+ * Compression Operation.
+ *
+ * This structure contains data relating to performing a compression
+ * operation on the referenced mbuf data buffers.
+ *
+ * Comp operations are enqueued and dequeued in comp PMDs using the
+ * rte_compressdev_enqueue_burst() / rte_compressdev_dequeue_burst() APIs
+ */
+struct rte_comp_op {
+ enum rte_comp_op_type op_type;
+ union {
+ void *private_xform;
+ /**< Stateless private PMD data derived from an rte_comp_xform.
+ * A handle returned by rte_compressdev_private_xform_create()
+ * must be attached to operations of op_type RTE_COMP_STATELESS.
+ */
+ void *stream;
+ /**< Private PMD data derived initially from an rte_comp_xform,
+ * which holds state and history data and evolves as operations
+ * are processed. rte_compressdev_stream_create() must be called
+ * on a device for all STATEFUL data streams and the resulting
+ * stream attached to the one or more operations associated
+ * with the data stream.
+ * All operations in a stream must be sent to the same device.
+ */
+ };
+
+ struct rte_mempool *mempool;
+ /**< Pool from which operation is allocated */
+ rte_iova_t iova_addr;
+ /**< IOVA address of this operation */
+ struct rte_mbuf *m_src;
+ /**< source mbuf
+ * The total size of the input buffer(s) can be retrieved using
+ * rte_pktmbuf_data_len(m_src). The max data size which can fit in a
+ * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
+ * If the input data is bigger than this it can be passed to the PMD in
+ * a chain of mbufs if the PMD's capabilities indicate it supports this.
+ */
+ struct rte_mbuf *m_dst;
+ /**< destination mbuf
+ * The total size of the output buffer(s) can be retrieved using
+ * rte_pktmbuf_data_len(m_dst). The max data size which can fit in a
+ * single mbuf is limited by the uint16_t rte_mbuf.data_len to 64k-1.
+ * If the output data is expected to be bigger than this a chain of
+ * mbufs can be passed to the PMD if the PMD's capabilities indicate
+ * it supports this.
+ */
+
+ struct {
+ uint32_t offset;
+ /**< Starting point for compression or decompression,
+ * specified as number of bytes from start of packet in
+ * source buffer.
+ * This offset starts from the first segment
+ * of the buffer, in case the m_src is a chain of mbufs.
+ * Starting point for checksum generation in compress direction.
+ */
+ uint32_t length;
+ /**< The length, in bytes, of the data in source buffer
+ * to be compressed or decompressed.
+ * Also the length of the data over which the checksum
+ * should be generated in compress direction
+ */
+ } src;
+ struct {
+ uint32_t offset;
+ /**< Starting point for writing output data, specified as
+ * number of bytes from start of packet in dest
+ * buffer.
+ * This offset starts from the first segment
+ * of the buffer, in case the m_dst is a chain of mbufs.
+ * Starting point for checksum generation in
+ * decompress direction.
+ */
+ } dst;
+ struct {
+ uint8_t *digest;
+ /**< Output buffer to store hash output, if enabled in xform.
+ * Buffer would contain valid value only after an op with
+ * flush flag = RTE_COMP_FLUSH_FULL/FLUSH_FINAL is processed
+ * successfully.
+ *
+ * Length of buffer should be contiguous and large enough to
+ * accommodate digest produced by specific hash algo.
+ */
+ rte_iova_t iova_addr;
+ /**< IO address of the buffer */
+ } hash;
+ enum rte_comp_flush_flag flush_flag;
+ /**< Defines flush characteristics for the output data.
+ * Only applicable in compress direction
+ */
+ uint64_t input_chksum;
+ /**< An input checksum can be provided to generate a
+ * cumulative checksum across sequential blocks in a STATELESS stream.
+ * Checksum type is as specified in xform chksum_type
+ */
+ uint64_t output_chksum;
+ /**< If a checksum is generated it will be written in here.
+ * Checksum type is as specified in xform chksum_type.
+ */
+ uint32_t consumed;
+ /**< The number of bytes from the source buffer
+ * which were compressed/decompressed.
+ */
+ uint32_t produced;
+ /**< The number of bytes written to the destination buffer
+ * which were compressed/decompressed.
+ */
+ uint64_t debug_status;
+ /**<
+ * Status of the operation is returned in the status param.
+ * This field allows the PMD to pass back extra
+ * pmd-specific debug information. Value is not defined on the API.
+ */
+ uint8_t status;
+ /**<
+ * Operation status - use values from enum rte_comp_status.
+ * This is reset to
+ * RTE_COMP_OP_STATUS_NOT_PROCESSED on allocation from mempool and
+ * will be set to RTE_COMP_OP_STATUS_SUCCESS after operation
+ * is successfully processed by a PMD
+ */
+} __rte_cache_aligned;
+
+/**
+ * Creates an operation pool
+ *
+ * @param name
+ * Compress pool name
+ * @param nb_elts
+ * Number of elements in pool
+ * @param cache_size
+ * Number of elements to cache on lcore, see
+ * *rte_mempool_create* for further details about cache size
+ * @param user_size
+ * Size of private data to allocate for user with each operation
+ * @param socket_id
+ * Socket to identifier allocate memory on
+ * @return
+ * - On success pointer to mempool
+ * - On failure NULL
+ */
+struct rte_mempool * __rte_experimental
+rte_comp_op_pool_create(const char *name,
+ unsigned int nb_elts, unsigned int cache_size,
+ uint16_t user_size, int socket_id);
+
+/**
+ * Allocate an operation from a mempool with default parameters set
+ *
+ * @param mempool
+ * Compress operation mempool
+ *
+ * @return
+ * - On success returns a valid rte_comp_op structure
+ * - On failure returns NULL
+ */
+struct rte_comp_op * __rte_experimental
+rte_comp_op_alloc(struct rte_mempool *mempool);
+
+/**
+ * Bulk allocate operations from a mempool with default parameters set
+ *
+ * @param mempool
+ * Compress operation mempool
+ * @param ops
+ * Array to place allocated operations
+ * @param nb_ops
+ * Number of operations to allocate
+ * @return
+ * - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no ops are retrieved.
+ */
+int __rte_experimental
+rte_comp_op_bulk_alloc(struct rte_mempool *mempool,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+
+/**
+ * Free operation structure
+ * If operation has been allocate from a rte_mempool, then the operation will
+ * be returned to the mempool.
+ *
+ * @param op
+ * Compress operation
+ */
+void __rte_experimental
+rte_comp_op_free(struct rte_comp_op *op);
+
+/**
+ * Get the name of a compress service feature flag
+ *
+ * @param flag
+ * The mask describing the flag
+ *
+ * @return
+ * The name of this flag, or NULL if it's not a valid feature flag.
+ */
+const char * __rte_experimental
+rte_comp_get_feature_name(uint64_t flag);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMP_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev.c b/lib/librte_compressdev/rte_compressdev.c
new file mode 100644
index 00000000..9091dd6e
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev.c
@@ -0,0 +1,772 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+#include <rte_eal.h>
+#include <rte_memzone.h>
+
+#include "rte_compressdev.h"
+#include "rte_compressdev_internal.h"
+#include "rte_compressdev_pmd.h"
+
+#define RTE_COMPRESSDEV_DETACHED (0)
+#define RTE_COMPRESSDEV_ATTACHED (1)
+
+struct rte_compressdev rte_comp_devices[RTE_COMPRESS_MAX_DEVS];
+
+struct rte_compressdev *rte_compressdevs = &rte_comp_devices[0];
+
+static struct rte_compressdev_global compressdev_globals = {
+ .devs = &rte_comp_devices[0],
+ .data = { NULL },
+ .nb_devs = 0,
+ .max_devs = RTE_COMPRESS_MAX_DEVS
+};
+
+struct rte_compressdev_global *rte_compressdev_globals = &compressdev_globals;
+
+const struct rte_compressdev_capabilities * __rte_experimental
+rte_compressdev_capability_get(uint8_t dev_id,
+ enum rte_comp_algorithm algo)
+{
+ const struct rte_compressdev_capabilities *capability;
+ struct rte_compressdev_info dev_info;
+ int i = 0;
+
+ if (dev_id >= compressdev_globals.nb_devs) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
+ return NULL;
+ }
+ rte_compressdev_info_get(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->algo !=
+ RTE_COMP_ALGO_UNSPECIFIED){
+ if (capability->algo == algo)
+ return capability;
+ }
+
+ return NULL;
+}
+
+const char * __rte_experimental
+rte_compressdev_get_feature_name(uint64_t flag)
+{
+ switch (flag) {
+ case RTE_COMPDEV_FF_HW_ACCELERATED:
+ return "HW_ACCELERATED";
+ case RTE_COMPDEV_FF_CPU_SSE:
+ return "CPU_SSE";
+ case RTE_COMPDEV_FF_CPU_AVX:
+ return "CPU_AVX";
+ case RTE_COMPDEV_FF_CPU_AVX2:
+ return "CPU_AVX2";
+ case RTE_COMPDEV_FF_CPU_AVX512:
+ return "CPU_AVX512";
+ case RTE_COMPDEV_FF_CPU_NEON:
+ return "CPU_NEON";
+ default:
+ return NULL;
+ }
+}
+
+static struct rte_compressdev *
+rte_compressdev_get_dev(uint8_t dev_id)
+{
+ return &rte_compressdev_globals->devs[dev_id];
+}
+
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_get_named_dev(const char *name)
+{
+ struct rte_compressdev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < rte_compressdev_globals->max_devs; i++) {
+ dev = &rte_compressdev_globals->devs[i];
+
+ if ((dev->attached == RTE_COMPRESSDEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+static unsigned int
+rte_compressdev_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_compressdev *dev = NULL;
+
+ if (dev_id >= rte_compressdev_globals->nb_devs)
+ return 0;
+
+ dev = rte_compressdev_get_dev(dev_id);
+ if (dev->attached != RTE_COMPRESSDEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+
+int __rte_experimental
+rte_compressdev_get_dev_id(const char *name)
+{
+ unsigned int i;
+
+ if (name == NULL)
+ return -1;
+
+ for (i = 0; i < rte_compressdev_globals->nb_devs; i++)
+ if ((strcmp(rte_compressdev_globals->devs[i].data->name, name)
+ == 0) &&
+ (rte_compressdev_globals->devs[i].attached ==
+ RTE_COMPRESSDEV_ATTACHED))
+ return i;
+
+ return -1;
+}
+
+uint8_t __rte_experimental
+rte_compressdev_count(void)
+{
+ return rte_compressdev_globals->nb_devs;
+}
+
+uint8_t __rte_experimental
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices)
+{
+ uint8_t i, count = 0;
+ struct rte_compressdev *devs = rte_compressdev_globals->devs;
+ uint8_t max_devs = rte_compressdev_globals->max_devs;
+
+ for (i = 0; i < max_devs && count < nb_devices; i++) {
+
+ if (devs[i].attached == RTE_COMPRESSDEV_ATTACHED) {
+ int cmp;
+
+ cmp = strncmp(devs[i].device->driver->name,
+ driver_name,
+ strlen(driver_name));
+
+ if (cmp == 0)
+ devices[count++] = devs[i].data->dev_id;
+ }
+ }
+
+ return count;
+}
+
+int __rte_experimental
+rte_compressdev_socket_id(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_is_valid_dev(dev_id))
+ return -1;
+
+ dev = rte_compressdev_get_dev(dev_id);
+
+ return dev->data->socket_id;
+}
+
+static inline int
+rte_compressdev_data_alloc(uint8_t dev_id, struct rte_compressdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name),
+ "rte_compressdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_compressdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_compressdev_data));
+
+ return 0;
+}
+
+static uint8_t
+rte_compressdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_COMPRESS_MAX_DEVS; dev_id++) {
+ if (rte_comp_devices[dev_id].attached ==
+ RTE_COMPRESSDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_COMPRESS_MAX_DEVS;
+}
+
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_compressdev *compressdev;
+ uint8_t dev_id;
+
+ if (rte_compressdev_pmd_get_named_dev(name) != NULL) {
+ COMPRESSDEV_LOG(ERR,
+ "comp device with name %s already allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_compressdev_find_free_device_index();
+ if (dev_id == RTE_COMPRESS_MAX_DEVS) {
+ COMPRESSDEV_LOG(ERR, "Reached maximum number of comp devices");
+ return NULL;
+ }
+ compressdev = rte_compressdev_get_dev(dev_id);
+
+ if (compressdev->data == NULL) {
+ struct rte_compressdev_data *compressdev_data =
+ compressdev_globals.data[dev_id];
+
+ int retval = rte_compressdev_data_alloc(dev_id,
+ &compressdev_data, socket_id);
+
+ if (retval < 0 || compressdev_data == NULL)
+ return NULL;
+
+ compressdev->data = compressdev_data;
+
+ snprintf(compressdev->data->name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ compressdev->data->dev_id = dev_id;
+ compressdev->data->socket_id = socket_id;
+ compressdev->data->dev_started = 0;
+
+ compressdev->attached = RTE_COMPRESSDEV_ATTACHED;
+
+ compressdev_globals.nb_devs++;
+ }
+
+ return compressdev;
+}
+
+int __rte_experimental
+rte_compressdev_pmd_release_device(struct rte_compressdev *compressdev)
+{
+ int ret;
+
+ if (compressdev == NULL)
+ return -EINVAL;
+
+ /* Close device only if device operations have been set */
+ if (compressdev->dev_ops) {
+ ret = rte_compressdev_close(compressdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ compressdev->attached = RTE_COMPRESSDEV_DETACHED;
+ compressdev_globals.nb_devs--;
+ return 0;
+}
+
+uint16_t __rte_experimental
+rte_compressdev_queue_pair_count(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ dev = &rte_comp_devices[dev_id];
+ return dev->data->nb_queue_pairs;
+}
+
+static int
+rte_compressdev_queue_pairs_config(struct rte_compressdev *dev,
+ uint16_t nb_qpairs, int socket_id)
+{
+ struct rte_compressdev_info dev_info;
+ void **qp;
+ unsigned int i;
+
+ if ((dev == NULL) || (nb_qpairs < 1)) {
+ COMPRESSDEV_LOG(ERR, "invalid param: dev %p, nb_queues %u",
+ dev, nb_qpairs);
+ return -EINVAL;
+ }
+
+ COMPRESSDEV_LOG(DEBUG, "Setup %d queues pairs on device %u",
+ nb_qpairs, dev->data->dev_id);
+
+ memset(&dev_info, 0, sizeof(struct rte_compressdev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+
+ if ((dev_info.max_nb_queue_pairs != 0) &&
+ (nb_qpairs > dev_info.max_nb_queue_pairs)) {
+ COMPRESSDEV_LOG(ERR, "Invalid num queue_pairs (%u) for dev %u",
+ nb_qpairs, dev->data->dev_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->queue_pairs == NULL) { /* first time configuration */
+ dev->data->queue_pairs = rte_zmalloc_socket(
+ "compressdev->queue_pairs",
+ sizeof(dev->data->queue_pairs[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (dev->data->queue_pairs == NULL) {
+ dev->data->nb_queue_pairs = 0;
+ COMPRESSDEV_LOG(ERR,
+ "failed to get memory for qp meta data, nb_queues %u",
+ nb_qpairs);
+ return -(ENOMEM);
+ }
+ } else { /* re-configure */
+ int ret;
+ uint16_t old_nb_queues = dev->data->nb_queue_pairs;
+
+ qp = dev->data->queue_pairs;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
+ -ENOTSUP);
+
+ for (i = nb_qpairs; i < old_nb_queues; i++) {
+ ret = (*dev->dev_ops->queue_pair_release)(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs,
+ RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ COMPRESSDEV_LOG(ERR,
+ "failed to realloc qp meta data, nb_queues %u",
+ nb_qpairs);
+ return -(ENOMEM);
+ }
+
+ if (nb_qpairs > old_nb_queues) {
+ uint16_t new_qs = nb_qpairs - old_nb_queues;
+
+ memset(qp + old_nb_queues, 0,
+ sizeof(qp[0]) * new_qs);
+ }
+
+ dev->data->queue_pairs = qp;
+
+ }
+ dev->data->nb_queue_pairs = nb_qpairs;
+ return 0;
+}
+
+static int
+rte_compressdev_queue_pairs_release(struct rte_compressdev *dev)
+{
+ uint16_t num_qps, i;
+ int ret;
+
+ if (dev == NULL) {
+ COMPRESSDEV_LOG(ERR, "invalid param: dev %p", dev);
+ return -EINVAL;
+ }
+
+ num_qps = dev->data->nb_queue_pairs;
+
+ if (num_qps == 0)
+ return 0;
+
+ COMPRESSDEV_LOG(DEBUG, "Free %d queues pairs on device %u",
+ dev->data->nb_queue_pairs, dev->data->dev_id);
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release,
+ -ENOTSUP);
+
+ for (i = 0; i < num_qps; i++) {
+ ret = (*dev->dev_ops->queue_pair_release)(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (dev->data->queue_pairs != NULL)
+ rte_free(dev->data->queue_pairs);
+ dev->data->queue_pairs = NULL;
+ dev->data->nb_queue_pairs = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_compressdev_configure(uint8_t dev_id, struct rte_compressdev_config *config)
+{
+ struct rte_compressdev *dev;
+ int diag;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ if (dev->data->dev_started) {
+ COMPRESSDEV_LOG(ERR,
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ /* Setup new number of queue pairs and reconfigure device. */
+ diag = rte_compressdev_queue_pairs_config(dev, config->nb_queue_pairs,
+ config->socket_id);
+ if (diag != 0) {
+ COMPRESSDEV_LOG(ERR,
+ "dev%d rte_comp_dev_queue_pairs_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ return (*dev->dev_ops->dev_configure)(dev, config);
+}
+
+int __rte_experimental
+rte_compressdev_start(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+ int diag;
+
+ COMPRESSDEV_LOG(DEBUG, "Start dev_id=%" PRIu8, dev_id);
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ COMPRESSDEV_LOG(ERR,
+ "Device with dev_id=%" PRIu8 " already started", dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+void __rte_experimental
+rte_compressdev_stop(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ COMPRESSDEV_LOG(ERR,
+ "Device with dev_id=%" PRIu8 " already stopped", dev_id);
+ return;
+ }
+
+ (*dev->dev_ops->dev_stop)(dev);
+ dev->data->dev_started = 0;
+}
+
+int __rte_experimental
+rte_compressdev_close(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+ int retval;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return -1;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ COMPRESSDEV_LOG(ERR, "Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ /* Free queue pairs memory */
+ retval = rte_compressdev_queue_pairs_release(dev);
+
+ if (retval < 0)
+ return retval;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+ retval = (*dev->dev_ops->dev_close)(dev);
+
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ if (queue_pair_id >= dev->data->nb_queue_pairs) {
+ COMPRESSDEV_LOG(ERR, "Invalid queue_pair_id=%d", queue_pair_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ COMPRESSDEV_LOG(ERR,
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ if (max_inflight_ops == 0) {
+ COMPRESSDEV_LOG(ERR,
+ "Invalid maximum number of inflight operations");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP);
+
+ return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id,
+ max_inflight_ops, socket_id);
+}
+
+uint16_t __rte_experimental
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+ nb_ops = (*dev->dequeue_burst)
+ (dev->data->queue_pairs[qp_id], ops, nb_ops);
+
+ return nb_ops;
+}
+
+uint16_t __rte_experimental
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct rte_compressdev *dev = &rte_compressdevs[dev_id];
+
+ return (*dev->enqueue_burst)(
+ dev->data->queue_pairs[qp_id], ops, nb_ops);
+}
+
+int __rte_experimental
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
+ return -ENODEV;
+ }
+
+ if (stats == NULL) {
+ COMPRESSDEV_LOG(ERR, "Invalid stats ptr");
+ return -EINVAL;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+ memset(stats, 0, sizeof(*stats));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
+ (*dev->dev_ops->stats_get)(dev, stats);
+ return 0;
+}
+
+void __rte_experimental
+rte_compressdev_stats_reset(uint8_t dev_id)
+{
+ struct rte_compressdev *dev;
+
+ if (!rte_compressdev_is_valid_dev(dev_id)) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%" PRIu8, dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+ (*dev->dev_ops->stats_reset)(dev);
+}
+
+
+void __rte_experimental
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info)
+{
+ struct rte_compressdev *dev;
+
+ if (dev_id >= compressdev_globals.nb_devs) {
+ COMPRESSDEV_LOG(ERR, "Invalid dev_id=%d", dev_id);
+ return;
+ }
+
+ dev = &rte_comp_devices[dev_id];
+
+ memset(dev_info, 0, sizeof(struct rte_compressdev_info));
+
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->driver_name = dev->device->driver->name;
+}
+
+int __rte_experimental
+rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform,
+ void **priv_xform)
+{
+ struct rte_compressdev *dev;
+ int ret;
+
+ dev = rte_compressdev_get_dev(dev_id);
+
+ if (xform == NULL || priv_xform == NULL || dev == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_create, -ENOTSUP);
+ ret = (*dev->dev_ops->private_xform_create)(dev, xform, priv_xform);
+ if (ret < 0) {
+ COMPRESSDEV_LOG(ERR,
+ "dev_id %d failed to create private_xform: err=%d",
+ dev_id, ret);
+ return ret;
+ };
+
+ return 0;
+}
+
+int __rte_experimental
+rte_compressdev_private_xform_free(uint8_t dev_id, void *priv_xform)
+{
+ struct rte_compressdev *dev;
+ int ret;
+
+ dev = rte_compressdev_get_dev(dev_id);
+
+ if (dev == NULL || priv_xform == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->private_xform_free, -ENOTSUP);
+ ret = dev->dev_ops->private_xform_free(dev, priv_xform);
+ if (ret < 0) {
+ COMPRESSDEV_LOG(ERR,
+ "dev_id %d failed to free private xform: err=%d",
+ dev_id, ret);
+ return ret;
+ };
+
+ return 0;
+}
+
+int __rte_experimental
+rte_compressdev_stream_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform,
+ void **stream)
+{
+ struct rte_compressdev *dev;
+ int ret;
+
+ dev = rte_compressdev_get_dev(dev_id);
+
+ if (xform == NULL || dev == NULL || stream == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_create, -ENOTSUP);
+ ret = (*dev->dev_ops->stream_create)(dev, xform, stream);
+ if (ret < 0) {
+ COMPRESSDEV_LOG(ERR,
+ "dev_id %d failed to create stream: err=%d",
+ dev_id, ret);
+ return ret;
+ };
+
+ return 0;
+}
+
+
+int __rte_experimental
+rte_compressdev_stream_free(uint8_t dev_id, void *stream)
+{
+ struct rte_compressdev *dev;
+ int ret;
+
+ dev = rte_compressdev_get_dev(dev_id);
+
+ if (dev == NULL || stream == NULL)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stream_free, -ENOTSUP);
+ ret = dev->dev_ops->stream_free(dev, stream);
+ if (ret < 0) {
+ COMPRESSDEV_LOG(ERR,
+ "dev_id %d failed to free stream: err=%d",
+ dev_id, ret);
+ return ret;
+ };
+
+ return 0;
+}
+
+const char * __rte_experimental
+rte_compressdev_name_get(uint8_t dev_id)
+{
+ struct rte_compressdev *dev = rte_compressdev_get_dev(dev_id);
+
+ if (dev == NULL)
+ return NULL;
+
+ return dev->data->name;
+}
+
+RTE_INIT(rte_compressdev_log)
+{
+ compressdev_logtype = rte_log_register("lib.compressdev");
+ if (compressdev_logtype >= 0)
+ rte_log_set_level(compressdev_logtype, RTE_LOG_NOTICE);
+}
diff --git a/lib/librte_compressdev/rte_compressdev.h b/lib/librte_compressdev/rte_compressdev.h
new file mode 100644
index 00000000..5b4fca4d
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev.h
@@ -0,0 +1,540 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _RTE_COMPRESSDEV_H_
+#define _RTE_COMPRESSDEV_H_
+
+/**
+ * @file rte_compressdev.h
+ *
+ * RTE Compression Device APIs
+ *
+ * Defines comp device APIs for the provisioning of compression operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#include "rte_comp.h"
+
+/**
+ * Parameter log base 2 range description.
+ * Final value will be 2^value.
+ */
+struct rte_param_log2_range {
+ uint8_t min; /**< Minimum log2 value */
+ uint8_t max; /**< Maximum log2 value */
+ uint8_t increment;
+ /**< If a range of sizes are supported,
+ * this parameter is used to indicate
+ * increments in base 2 log byte value
+ * that are supported between the minimum and maximum
+ */
+};
+
+/** Structure used to capture a capability of a comp device */
+struct rte_compressdev_capabilities {
+ enum rte_comp_algorithm algo;
+ /* Compression algorithm */
+ uint64_t comp_feature_flags;
+ /**< Bitmask of flags for compression service features */
+ struct rte_param_log2_range window_size;
+ /**< Window size range in base two log byte values */
+};
+
+/** Macro used at end of comp PMD list */
+#define RTE_COMP_END_OF_CAPABILITIES_LIST() \
+ { RTE_COMP_ALGO_UNSPECIFIED }
+
+const struct rte_compressdev_capabilities * __rte_experimental
+rte_compressdev_capability_get(uint8_t dev_id,
+ enum rte_comp_algorithm algo);
+
+/**
+ * compression device supported feature flags
+ *
+ * @note New features flags should be added to the end of the list
+ *
+ * Keep these flags synchronised with rte_compressdev_get_feature_name()
+ */
+#define RTE_COMPDEV_FF_HW_ACCELERATED (1ULL << 0)
+/**< Operations are off-loaded to an external hardware accelerator */
+#define RTE_COMPDEV_FF_CPU_SSE (1ULL << 1)
+/**< Utilises CPU SIMD SSE instructions */
+#define RTE_COMPDEV_FF_CPU_AVX (1ULL << 2)
+/**< Utilises CPU SIMD AVX instructions */
+#define RTE_COMPDEV_FF_CPU_AVX2 (1ULL << 3)
+/**< Utilises CPU SIMD AVX2 instructions */
+#define RTE_COMPDEV_FF_CPU_AVX512 (1ULL << 4)
+/**< Utilises CPU SIMD AVX512 instructions */
+#define RTE_COMPDEV_FF_CPU_NEON (1ULL << 5)
+/**< Utilises CPU NEON instructions */
+
+/**
+ * Get the name of a compress device feature flag.
+ *
+ * @param flag
+ * The mask describing the flag
+ *
+ * @return
+ * The name of this flag, or NULL if it's not a valid feature flag.
+ */
+const char * __rte_experimental
+rte_compressdev_get_feature_name(uint64_t flag);
+
+/** comp device information */
+struct rte_compressdev_info {
+ const char *driver_name; /**< Driver name. */
+ uint64_t feature_flags; /**< Feature flags */
+ const struct rte_compressdev_capabilities *capabilities;
+ /**< Array of devices supported capabilities */
+ uint16_t max_nb_queue_pairs;
+ /**< Maximum number of queues pairs supported by device.
+ * (If 0, there is no limit in maximum number of queue pairs)
+ */
+};
+
+/** comp device statistics */
+struct rte_compressdev_stats {
+ uint64_t enqueued_count;
+ /**< Count of all operations enqueued */
+ uint64_t dequeued_count;
+ /**< Count of all operations dequeued */
+
+ uint64_t enqueue_err_count;
+ /**< Total error count on operations enqueued */
+ uint64_t dequeue_err_count;
+ /**< Total error count on operations dequeued */
+};
+
+
+/**
+ * Get the device identifier for the named compress device.
+ *
+ * @param name
+ * Device name to select the device structure
+ * @return
+ * - Returns compress device identifier on success.
+ * - Return -1 on failure to find named compress device.
+ */
+int __rte_experimental
+rte_compressdev_get_dev_id(const char *name);
+
+/**
+ * Get the compress device name given a device identifier.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @return
+ * - Returns compress device name.
+ * - Returns NULL if compress device is not present.
+ */
+const char * __rte_experimental
+rte_compressdev_name_get(uint8_t dev_id);
+
+/**
+ * Get the total number of compress devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * - The total number of usable compress devices.
+ */
+uint8_t __rte_experimental
+rte_compressdev_count(void);
+
+/**
+ * Get number and identifiers of attached comp devices that
+ * use the same compress driver.
+ *
+ * @param driver_name
+ * Driver name
+ * @param devices
+ * Output devices identifiers
+ * @param nb_devices
+ * Maximal number of devices
+ *
+ * @return
+ * Returns number of attached compress devices.
+ */
+uint8_t __rte_experimental
+rte_compressdev_devices_get(const char *driver_name, uint8_t *devices,
+ uint8_t nb_devices);
+
+/*
+ * Return the NUMA socket to which a device is connected.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -1 if returned is the dev_id value is out of range.
+ */
+int __rte_experimental
+rte_compressdev_socket_id(uint8_t dev_id);
+
+/** Compress device configuration structure */
+struct rte_compressdev_config {
+ int socket_id;
+ /**< Socket on which to allocate resources */
+ uint16_t nb_queue_pairs;
+ /**< Total number of queue pairs to configure on a device */
+ uint16_t max_nb_priv_xforms;
+ /**< Max number of private_xforms which will be created on the device */
+ uint16_t max_nb_streams;
+ /**< Max number of streams which will be created on the device */
+};
+
+/**
+ * Configure a device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param config
+ * The compress device configuration
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+int __rte_experimental
+rte_compressdev_configure(uint8_t dev_id,
+ struct rte_compressdev_config *config);
+
+/**
+ * Start a device.
+ *
+ * The device start step is called after configuring the device and setting up
+ * its queue pairs.
+ * On success, data-path functions exported by the API (enqueue/dequeue, etc)
+ * can be invoked.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @return
+ * - 0: Success, device started.
+ * - <0: Error code of the driver device start function.
+ */
+int __rte_experimental
+rte_compressdev_start(uint8_t dev_id);
+
+/**
+ * Stop a device. The device can be restarted with a call to
+ * rte_compressdev_start()
+ *
+ * @param dev_id
+ * Compress device identifier
+ */
+void __rte_experimental
+rte_compressdev_stop(uint8_t dev_id);
+
+/**
+ * Close an device.
+ * The memory allocated in the device gets freed.
+ * After calling this function, in order to use
+ * the device again, it is required to
+ * configure the device again.
+ *
+ * @param dev_id
+ * Compress device identifier
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ */
+int __rte_experimental
+rte_compressdev_close(uint8_t dev_id);
+
+/**
+ * Allocate and set up a receive queue pair for a device.
+ * This should only be called when the device is stopped.
+ *
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param queue_pair_id
+ * The index of the queue pairs to set up. The
+ * value must be in the range [0, nb_queue_pair - 1]
+ * previously supplied to rte_compressdev_configure()
+ * @param max_inflight_ops
+ * Max number of ops which the qp will have to
+ * accommodate simultaneously
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier
+ * in case of NUMA. The value can be *SOCKET_ID_ANY*
+ * if there is no NUMA constraint for the DMA memory
+ * allocated for the receive queue pair
+ * @return
+ * - 0: Success, queue pair correctly set up.
+ * - <0: Queue pair configuration failed
+ */
+int __rte_experimental
+rte_compressdev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ uint32_t max_inflight_ops, int socket_id);
+
+/**
+ * Get the number of queue pairs on a specific comp device
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @return
+ * - The number of configured queue pairs.
+ */
+uint16_t __rte_experimental
+rte_compressdev_queue_pair_count(uint8_t dev_id);
+
+
+/**
+ * Retrieve the general I/O statistics of a device.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param stats
+ * A pointer to a structure of type
+ * *rte_compressdev_stats* to be filled with the
+ * values of device counters
+ * @return
+ * - Zero if successful.
+ * - Non-zero otherwise.
+ */
+int __rte_experimental
+rte_compressdev_stats_get(uint8_t dev_id, struct rte_compressdev_stats *stats);
+
+/**
+ * Reset the general I/O statistics of a device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ */
+void __rte_experimental
+rte_compressdev_stats_reset(uint8_t dev_id);
+
+/**
+ * Retrieve the contextual information of a device.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param dev_info
+ * A pointer to a structure of type *rte_compressdev_info*
+ * to be filled with the contextual information of the device
+ *
+ * @note The capabilities field of dev_info is set to point to the first
+ * element of an array of struct rte_compressdev_capabilities.
+ * The element after the last valid element has it's op field set to
+ * RTE_COMP_ALGO_LIST_END.
+ */
+void __rte_experimental
+rte_compressdev_info_get(uint8_t dev_id, struct rte_compressdev_info *dev_info);
+
+/**
+ *
+ * Dequeue a burst of processed compression operations from a queue on the comp
+ * device. The dequeued operation are stored in *rte_comp_op* structures
+ * whose pointers are supplied in the *ops* array.
+ *
+ * The rte_compressdev_dequeue_burst() function returns the number of ops
+ * actually dequeued, which is the number of *rte_comp_op* data structures
+ * effectively supplied into the *ops* array.
+ *
+ * A return value equal to *nb_ops* indicates that the queue contained
+ * at least *nb_ops* operations, and this is likely to signify that other
+ * processed operations remain in the devices output queue. Applications
+ * implementing a "retrieve as many processed operations as possible" policy
+ * can check this specific case and keep invoking the
+ * rte_compressdev_dequeue_burst() function until a value less than
+ * *nb_ops* is returned.
+ *
+ * The rte_compressdev_dequeue_burst() function does not provide any error
+ * notification to avoid the corresponding overhead.
+ *
+ * @note: operation ordering is not maintained within the queue pair.
+ *
+ * @note: In case op status = OUT_OF_SPACE_TERMINATED, op.consumed=0 and the
+ * op must be resubmitted with the same input data and a larger output buffer.
+ * op.produced is usually 0, but in decompression cases a PMD may return > 0
+ * and the application may find it useful to inspect that data.
+ * This status is only returned on STATELESS ops.
+ *
+ * @note: In case op status = OUT_OF_SPACE_RECOVERABLE, op.produced can be used
+ * and next op in stream should continue on from op.consumed+1 with a fresh
+ * output buffer.
+ * Consumed=0, produced=0 is an unusual but allowed case. There may be useful
+ * state/history stored in the PMD, even though no output was produced yet.
+ *
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param qp_id
+ * The index of the queue pair from which to retrieve
+ * processed operations. The value must be in the range
+ * [0, nb_queue_pair - 1] previously supplied to
+ * rte_compressdev_configure()
+ * @param ops
+ * The address of an array of pointers to
+ * *rte_comp_op* structures that must be
+ * large enough to store *nb_ops* pointers in it
+ * @param nb_ops
+ * The maximum number of operations to dequeue
+ * @return
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_comp_op* structures effectively supplied to the
+ * *ops* array.
+ */
+uint16_t __rte_experimental
+rte_compressdev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+
+/**
+ * Enqueue a burst of operations for processing on a compression device.
+ *
+ * The rte_compressdev_enqueue_burst() function is invoked to place
+ * comp operations on the queue *qp_id* of the device designated by
+ * its *dev_id*.
+ *
+ * The *nb_ops* parameter is the number of operations to process which are
+ * supplied in the *ops* array of *rte_comp_op* structures.
+ *
+ * The rte_compressdev_enqueue_burst() function returns the number of
+ * operations it actually enqueued for processing. A return value equal to
+ * *nb_ops* means that all packets have been enqueued.
+ *
+ * @note All compression operations are Out-of-place (OOP) operations,
+ * as the size of the output data is different to the size of the input data.
+ *
+ * @note The flush flag only applies to operations which return SUCCESS.
+ * In OUT_OF_SPACE cases whether STATEFUL or STATELESS, data in dest buffer
+ * is as if flush flag was FLUSH_NONE.
+ * @note flush flag only applies in compression direction. It has no meaning
+ * for decompression.
+ * @note: operation ordering is not maintained within the queue pair.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param qp_id
+ * The index of the queue pair on which operations
+ * are to be enqueued for processing. The value
+ * must be in the range [0, nb_queue_pairs - 1]
+ * previously supplied to *rte_compressdev_configure*
+ * @param ops
+ * The address of an array of *nb_ops* pointers
+ * to *rte_comp_op* structures which contain
+ * the operations to be processed
+ * @param nb_ops
+ * The number of operations to process
+ * @return
+ * The number of operations actually enqueued on the device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * comp devices queue is full or if invalid parameters are specified in
+ * a *rte_comp_op*.
+ */
+uint16_t __rte_experimental
+rte_compressdev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+
+/**
+ * This should alloc a stream from the device's mempool and initialise it.
+ * The application should call this API when setting up for the stateful
+ * processing of a set of data on a device. The API can be called multiple
+ * times to set up a stream for each data set. The handle returned is only for
+ * use with ops of op_type STATEFUL and must be passed to the PMD
+ * with every op in the data stream
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param xform
+ * xform data
+ * @param stream
+ * Pointer to where PMD's private stream handle should be stored
+ *
+ * @return
+ * - 0 if successful and valid stream handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private stream could not be allocated.
+ *
+ */
+int __rte_experimental
+rte_compressdev_stream_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform,
+ void **stream);
+
+/**
+ * This should clear the stream and return it to the device's mempool.
+ *
+ * @param dev_id
+ * Compress device identifier
+ *
+ * @param stream
+ * PMD's private stream data
+ *
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ * - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+int __rte_experimental
+rte_compressdev_stream_free(uint8_t dev_id, void *stream);
+
+/**
+ * This should alloc a private_xform from the device's mempool and initialise
+ * it. The application should call this API when setting up for stateless
+ * processing on a device. If it returns non-shareable, then the appl cannot
+ * share this handle with multiple in-flight ops and should call this API again
+ * to get a separate handle for every in-flight op.
+ * The handle returned is only valid for use with ops of op_type STATELESS.
+ *
+ * @param dev_id
+ * Compress device identifier
+ * @param xform
+ * xform data
+ * @param private_xform
+ * Pointer to where PMD's private_xform handle should be stored
+ *
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int __rte_experimental
+rte_compressdev_private_xform_create(uint8_t dev_id,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+/**
+ * This should clear the private_xform and return it to the device's mempool.
+ * It is the application's responsibility to ensure that private_xform data
+ * is not cleared while there are still in-flight operations using it.
+ *
+ * @param dev_id
+ * Compress device identifier
+ *
+ * @param private_xform
+ * PMD's private_xform data
+ *
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int __rte_experimental
+rte_compressdev_private_xform_free(uint8_t dev_id, void *private_xform);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev_internal.h b/lib/librte_compressdev/rte_compressdev_internal.h
new file mode 100644
index 00000000..22ceac66
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_internal.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _RTE_COMPRESSDEV_INTERNAL_H_
+#define _RTE_COMPRESSDEV_INTERNAL_H_
+
+/* rte_compressdev_internal.h
+ * This file holds Compressdev private data structures.
+ */
+#include <rte_log.h>
+
+#include "rte_comp.h"
+
+#define RTE_COMPRESSDEV_NAME_MAX_LEN (64)
+/**< Max length of name of comp PMD */
+
+/* Logging Macros */
+extern int compressdev_logtype;
+#define COMPRESSDEV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, compressdev_logtype, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/**
+ * Dequeue processed packets from queue pair of a device.
+ *
+ * @param qp
+ * The queue pair from which to retrieve
+ * processed operations.
+ * @param ops
+ * The address of an array of pointers to
+ * *rte_comp_op* structures that must be
+ * large enough to store *nb_ops* pointers in it
+ * @param nb_ops
+ * The maximum number of operations to dequeue
+ * @return
+ * - The number of operations actually dequeued, which is the number
+ * of pointers to *rte_comp_op* structures effectively supplied to the
+ * *ops* array.
+ */
+typedef uint16_t (*compressdev_dequeue_pkt_burst_t)(void *qp,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+
+/**
+ * Enqueue a burst of operations for processing.
+ *
+ * @param qp
+ * The queue pair on which operations
+ * are to be enqueued for processing
+ * @param ops
+ * The address of an array of *nb_ops* pointers
+ * to *rte_comp_op* structures which contain
+ * the operations to be processed
+ * @param nb_ops
+ * The number of operations to process
+ * @return
+ * The number of operations actually enqueued on the device. The return
+ * value can be less than the value of the *nb_ops* parameter when the
+ * comp devices queue is full or if invalid parameters are specified in
+ * a *rte_comp_op*.
+ */
+
+typedef uint16_t (*compressdev_enqueue_pkt_burst_t)(void *qp,
+ struct rte_comp_op **ops, uint16_t nb_ops);
+
+/** The data structure associated with each comp device. */
+struct rte_compressdev {
+ compressdev_dequeue_pkt_burst_t dequeue_burst;
+ /**< Pointer to PMD receive function */
+ compressdev_enqueue_pkt_burst_t enqueue_burst;
+ /**< Pointer to PMD transmit function */
+
+ struct rte_compressdev_data *data;
+ /**< Pointer to device data */
+ struct rte_compressdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ uint64_t feature_flags;
+ /**< Supported features */
+ struct rte_device *device;
+ /**< Backing device */
+
+ __extension__
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+/**
+ *
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_compressdev_data {
+ uint8_t dev_id;
+ /**< Compress device identifier */
+ uint8_t socket_id;
+ /**< Socket identifier where memory is allocated */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+
+ __extension__
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ void **queue_pairs;
+ /**< Array of pointers to queue pairs. */
+ uint16_t nb_queue_pairs;
+ /**< Number of device queue pairs */
+
+ void *dev_private;
+ /**< PMD-specific private data */
+} __rte_cache_aligned;
+#endif
diff --git a/lib/librte_compressdev/rte_compressdev_pmd.c b/lib/librte_compressdev/rte_compressdev_pmd.c
new file mode 100644
index 00000000..7de4f339
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_pmd.c
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <rte_malloc.h>
+#include <rte_kvargs.h>
+#include <rte_eal.h>
+
+#include "rte_compressdev_internal.h"
+#include "rte_compressdev_pmd.h"
+
+int compressdev_logtype;
+
+/**
+ * Parse name from argument
+ */
+static int
+rte_compressdev_pmd_parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_compressdev_pmd_init_params *params = extra_args;
+ int n;
+
+ n = snprintf(params->name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s", value);
+ if (n >= RTE_COMPRESSDEV_NAME_MAX_LEN)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Parse unsigned integer from argument
+ */
+static int
+rte_compressdev_pmd_parse_uint_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int i;
+ char *end;
+
+ errno = 0;
+ i = strtol(value, &end, 10);
+ if (*end != 0 || errno != 0 || i < 0)
+ return -EINVAL;
+
+ *((uint32_t *)extra_args) = i;
+ return 0;
+}
+
+int __rte_experimental
+rte_compressdev_pmd_parse_input_args(
+ struct rte_compressdev_pmd_init_params *params,
+ const char *args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (args) {
+ kvlist = rte_kvargs_parse(args, compressdev_pmd_valid_params);
+ if (kvlist == NULL)
+ return -EINVAL;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG,
+ &rte_compressdev_pmd_parse_uint_arg,
+ &params->socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_COMPRESSDEV_PMD_NAME_ARG,
+ &rte_compressdev_pmd_parse_name_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_create(const char *name,
+ struct rte_device *device,
+ size_t private_data_size,
+ struct rte_compressdev_pmd_init_params *params)
+{
+ struct rte_compressdev *compressdev;
+
+ if (params->name[0] != '\0') {
+ COMPRESSDEV_LOG(INFO, "[%s] User specified device name = %s\n",
+ device->driver->name, params->name);
+ name = params->name;
+ }
+
+ COMPRESSDEV_LOG(INFO, "[%s] - Creating compressdev %s\n",
+ device->driver->name, name);
+
+ COMPRESSDEV_LOG(INFO,
+ "[%s] - Init parameters - name: %s, socket id: %d",
+ device->driver->name, name,
+ params->socket_id);
+
+ /* allocate device structure */
+ compressdev = rte_compressdev_pmd_allocate(name, params->socket_id);
+ if (compressdev == NULL) {
+ COMPRESSDEV_LOG(ERR, "[%s] Failed to allocate comp device %s",
+ device->driver->name, name);
+ return NULL;
+ }
+
+ /* allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ compressdev->data->dev_private =
+ rte_zmalloc_socket("compressdev device private",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ params->socket_id);
+
+ if (compressdev->data->dev_private == NULL) {
+ COMPRESSDEV_LOG(ERR,
+ "[%s] Cannot allocate memory for compressdev %s private data",
+ device->driver->name, name);
+
+ rte_compressdev_pmd_release_device(compressdev);
+ return NULL;
+ }
+ }
+
+ compressdev->device = device;
+
+ return compressdev;
+}
+
+int __rte_experimental
+rte_compressdev_pmd_destroy(struct rte_compressdev *compressdev)
+{
+ int retval;
+
+ COMPRESSDEV_LOG(INFO, "[%s] Closing comp device %s",
+ compressdev->device->driver->name,
+ compressdev->device->name);
+
+ /* free comp device */
+ retval = rte_compressdev_pmd_release_device(compressdev);
+ if (retval)
+ return retval;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(compressdev->data->dev_private);
+
+ compressdev->device = NULL;
+ compressdev->data = NULL;
+
+ return 0;
+}
diff --git a/lib/librte_compressdev/rte_compressdev_pmd.h b/lib/librte_compressdev/rte_compressdev_pmd.h
new file mode 100644
index 00000000..38e9ea02
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_pmd.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _RTE_COMPRESSDEV_PMD_H_
+#define _RTE_COMPRESSDEV_PMD_H_
+
+/** @file
+ * RTE comp PMD APIs
+ *
+ * @note
+ * These APIs are for comp PMDs only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_dev.h>
+#include <rte_common.h>
+
+#include "rte_compressdev.h"
+#include "rte_compressdev_internal.h"
+
+#define RTE_COMPRESSDEV_PMD_NAME_ARG ("name")
+#define RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG ("socket_id")
+
+static const char * const compressdev_pmd_valid_params[] = {
+ RTE_COMPRESSDEV_PMD_NAME_ARG,
+ RTE_COMPRESSDEV_PMD_SOCKET_ID_ARG
+};
+
+/**
+ * @internal
+ * Initialisation parameters for comp devices
+ */
+struct rte_compressdev_pmd_init_params {
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ int socket_id;
+};
+
+/** Global structure used for maintaining state of allocated comp devices */
+struct rte_compressdev_global {
+ struct rte_compressdev *devs; /**< Device information array */
+ struct rte_compressdev_data *data[RTE_COMPRESS_MAX_DEVS];
+ /**< Device private data */
+ uint8_t nb_devs; /**< Number of devices found */
+ uint8_t max_devs; /**< Max number of devices */
+};
+
+/** Pointer to global array of comp devices */
+extern struct rte_compressdev *rte_compressdevs;
+/** Pointer to global comp devices data structure */
+extern struct rte_compressdev_global *rte_compressdev_globals;
+
+/**
+ * Get the rte_compressdev structure device pointer for the named device.
+ *
+ * @param name
+ * Compress device name
+ * @return
+ * - The rte_compressdev structure pointer for the given device identifier.
+ */
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_get_named_dev(const char *name);
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *comp_dev_ops* supplied in the
+ * *rte_compressdev* structure associated with a device.
+ */
+
+/**
+ * Function used to configure device.
+ *
+ * @param dev
+ * Compress device
+ * @param config
+ * Compress device configurations
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*compressdev_configure_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config);
+
+/**
+ * Function used to start a configured device.
+ *
+ * @param dev
+ * Compress device
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*compressdev_start_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to stop a configured device.
+ *
+ * @param dev
+ * Compress device
+ */
+typedef void (*compressdev_stop_t)(struct rte_compressdev *dev);
+
+/**
+ * Function used to close a configured device.
+ *
+ * @param dev
+ * Compress device
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_close_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get statistics of a device.
+ *
+ * @param dev
+ * Compress device
+ * @param stats
+ * Compress device stats to populate
+ */
+typedef void (*compressdev_stats_get_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats);
+
+
+/**
+ * Function used to reset statistics of a device.
+ *
+ * @param dev
+ * Compress device
+ */
+typedef void (*compressdev_stats_reset_t)(struct rte_compressdev *dev);
+
+
+/**
+ * Function used to get specific information of a device.
+ *
+ * @param dev
+ * Compress device
+ */
+typedef void (*compressdev_info_get_t)(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info);
+
+/**
+ * Setup a queue pair for a device.
+ *
+ * @param dev
+ * Compress device
+ * @param qp_id
+ * Queue pair identifier
+ * @param max_inflight_ops
+ * Max inflight ops which qp must accommodate
+ * @param socket_id
+ * Socket identifier
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*compressdev_queue_pair_setup_t)(struct rte_compressdev *dev,
+ uint16_t qp_id, uint32_t max_inflight_ops, int socket_id);
+
+/**
+ * Release memory resources allocated by given queue pair.
+ *
+ * @param dev
+ * Compress device
+ * @param qp_id
+ * Queue pair identifier
+ * @return
+ * - 0 on success.
+ * - EAGAIN if can't close as device is busy
+ */
+typedef int (*compressdev_queue_pair_release_t)(struct rte_compressdev *dev,
+ uint16_t qp_id);
+
+/**
+ * Get number of available queue pairs of a device.
+ *
+ * @param dev
+ * Compress device
+ * @return
+ * Returns number of queue pairs on success.
+ */
+typedef uint32_t (*compressdev_queue_pair_count_t)(struct rte_compressdev *dev);
+
+/**
+ * Create driver private stream data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data
+ * @param stream
+ * ptr where handle of pmd's private stream data should be stored
+ * @return
+ * - Returns 0 if private stream structure has been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private stream could not be allocated.
+ */
+typedef int (*compressdev_stream_create_t)(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream);
+
+/**
+ * Free driver private stream data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param stream
+ * handle of pmd's private stream data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support STATEFUL operations.
+ * - Returns -EBUSY if can't free stream as there are inflight operations
+ */
+typedef int (*compressdev_stream_free_t)(struct rte_compressdev *dev,
+ void *stream);
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+typedef int (*compressdev_private_xform_create_t)(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **private_xform);
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -EBUSY if can't free private_xform due to inflight operations
+ */
+typedef int (*compressdev_private_xform_free_t)(struct rte_compressdev *dev,
+ void *private_xform);
+
+/** comp device operations function pointer table */
+struct rte_compressdev_ops {
+ compressdev_configure_t dev_configure; /**< Configure device. */
+ compressdev_start_t dev_start; /**< Start device. */
+ compressdev_stop_t dev_stop; /**< Stop device. */
+ compressdev_close_t dev_close; /**< Close device. */
+
+ compressdev_info_get_t dev_infos_get; /**< Get device info. */
+
+ compressdev_stats_get_t stats_get;
+ /**< Get device statistics. */
+ compressdev_stats_reset_t stats_reset;
+ /**< Reset device statistics. */
+
+ compressdev_queue_pair_setup_t queue_pair_setup;
+ /**< Set up a device queue pair. */
+ compressdev_queue_pair_release_t queue_pair_release;
+ /**< Release a queue pair. */
+
+ compressdev_stream_create_t stream_create;
+ /**< Create a comp stream and initialise its private data. */
+ compressdev_stream_free_t stream_free;
+ /**< Free a comp stream's private data. */
+
+ compressdev_private_xform_create_t private_xform_create;
+ /**< Create a comp private_xform and initialise its private data. */
+ compressdev_private_xform_free_t private_xform_free;
+ /**< Free a comp private_xform's data. */
+};
+
+/**
+ * @internal
+ *
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Allocates a new compressdev slot for an comp device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ * Unique identifier name for each device
+ * @param socket_id
+ * Socket to allocate resources on
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * @internal
+ *
+ * Function for internal use by dummy drivers primarily, e.g. ring-based
+ * driver.
+ * Release the specified compressdev device.
+ *
+ * @param dev
+ * Compress device
+ * @return
+ * - 0 on success, negative on error
+ */
+int __rte_experimental
+rte_compressdev_pmd_release_device(struct rte_compressdev *dev);
+
+
+/**
+ * @internal
+ *
+ * PMD assist function to parse initialisation arguments for comp driver
+ * when creating a new comp PMD device instance.
+ *
+ * PMD driver should set default values for that PMD before calling function,
+ * these default values will be over-written with successfully parsed values
+ * from args string.
+ *
+ * @param params
+ * Parsed PMD initialisation parameters
+ * @param args
+ * Input argument string to parse
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int __rte_experimental
+rte_compressdev_pmd_parse_input_args(
+ struct rte_compressdev_pmd_init_params *params,
+ const char *args);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to create
+ * and allocate resources for a new comp PMD device instance.
+ *
+ * @param name
+ * Compress device name
+ * @param device
+ * Base device instance
+ * @param params
+ * PMD initialisation parameters
+ * @return
+ * - comp device instance on success
+ * - NULL on creation failure
+ */
+struct rte_compressdev * __rte_experimental
+rte_compressdev_pmd_create(const char *name,
+ struct rte_device *device,
+ size_t private_data_size,
+ struct rte_compressdev_pmd_init_params *params);
+
+/**
+ * @internal
+ *
+ * PMD assist function to provide boiler plate code for comp driver to
+ * destroy and free resources associated with a comp PMD device instance.
+ *
+ * @param dev
+ * Compress device
+ * @return
+ * - 0 on success
+ * - errno on failure
+ */
+int __rte_experimental
+rte_compressdev_pmd_destroy(struct rte_compressdev *dev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_COMPRESSDEV_PMD_H_ */
diff --git a/lib/librte_compressdev/rte_compressdev_version.map b/lib/librte_compressdev/rte_compressdev_version.map
new file mode 100644
index 00000000..6f900b67
--- /dev/null
+++ b/lib/librte_compressdev/rte_compressdev_version.map
@@ -0,0 +1,39 @@
+EXPERIMENTAL {
+ global:
+
+ rte_compressdev_capability_get;
+ rte_compressdev_close;
+ rte_compressdev_configure;
+ rte_compressdev_count;
+ rte_compressdev_dequeue_burst;
+ rte_compressdev_devices_get;
+ rte_compressdev_enqueue_burst;
+ rte_compressdev_get_dev_id;
+ rte_compressdev_get_feature_name;
+ rte_compressdev_info_get;
+ rte_compressdev_name_get;
+ rte_compressdev_pmd_allocate;
+ rte_compressdev_pmd_create;
+ rte_compressdev_pmd_destroy;
+ rte_compressdev_pmd_get_named_dev;
+ rte_compressdev_pmd_parse_input_args;
+ rte_compressdev_pmd_release_device;
+ rte_compressdev_private_xform_create;
+ rte_compressdev_private_xform_free;
+ rte_compressdev_queue_pair_count;
+ rte_compressdev_queue_pair_setup;
+ rte_compressdev_socket_id;
+ rte_compressdev_start;
+ rte_compressdev_stats_get;
+ rte_compressdev_stats_reset;
+ rte_compressdev_stop;
+ rte_compressdev_stream_create;
+ rte_compressdev_stream_free;
+ rte_comp_get_feature_name;
+ rte_comp_op_alloc;
+ rte_comp_op_bulk_alloc;
+ rte_comp_op_free;
+ rte_comp_op_pool_create;
+
+ local: *;
+};
diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile
index bba8dee9..c1148887 100644
--- a/lib/librte_cryptodev/Makefile
+++ b/lib/librte_cryptodev/Makefile
@@ -23,6 +23,7 @@ SYMLINK-y-include += rte_crypto.h
SYMLINK-y-include += rte_crypto_sym.h
SYMLINK-y-include += rte_cryptodev.h
SYMLINK-y-include += rte_cryptodev_pmd.h
+SYMLINK-y-include += rte_crypto_asym.h
# versioning export map
EXPORT_MAP := rte_cryptodev_version.map
diff --git a/lib/librte_cryptodev/meson.build b/lib/librte_cryptodev/meson.build
index 234da323..295f509e 100644
--- a/lib/librte_cryptodev/meson.build
+++ b/lib/librte_cryptodev/meson.build
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-version = 3
+version = 4
sources = files('rte_cryptodev.c', 'rte_cryptodev_pmd.c')
headers = files('rte_cryptodev.h',
'rte_cryptodev_pmd.h',
'rte_crypto.h',
- 'rte_crypto_sym.h')
+ 'rte_crypto_sym.h',
+ 'rte_crypto_asym.h')
deps += ['kvargs', 'mbuf']
diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
index 95cf8615..fd5ef3a8 100644
--- a/lib/librte_cryptodev/rte_crypto.h
+++ b/lib/librte_cryptodev/rte_crypto.h
@@ -23,6 +23,7 @@ extern "C" {
#include <rte_common.h>
#include "rte_crypto_sym.h"
+#include "rte_crypto_asym.h"
/** Crypto operation types */
enum rte_crypto_op_type {
@@ -30,6 +31,8 @@ enum rte_crypto_op_type {
/**< Undefined operation type */
RTE_CRYPTO_OP_TYPE_SYMMETRIC,
/**< Symmetric operation */
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC
+ /**< Asymmetric operation */
};
/** Status of crypto operation */
@@ -73,20 +76,37 @@ enum rte_crypto_op_sess_type {
* rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() .
*/
struct rte_crypto_op {
- uint8_t type;
- /**< operation type */
- uint8_t status;
- /**<
- * operation status - this is reset to
- * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and
- * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
- * is successfully processed by a crypto PMD
- */
- uint8_t sess_type;
- /**< operation session type */
-
- uint8_t reserved[5];
- /**< Reserved bytes to fill 64 bits for future additions */
+ __extension__
+ union {
+ uint64_t raw;
+ __extension__
+ struct {
+ uint8_t type;
+ /**< operation type */
+ uint8_t status;
+ /**<
+ * operation status - this is reset to
+ * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation
+ * from mempool and will be set to
+ * RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation
+ * is successfully processed by a crypto PMD
+ */
+ uint8_t sess_type;
+ /**< operation session type */
+ uint8_t reserved[3];
+ /**< Reserved bytes to fill 64 bits for
+ * future additions
+ */
+ uint16_t private_data_offset;
+ /**< Offset to indicate start of private data (if any).
+ * The offset is counted from the start of the
+ * rte_crypto_op including IV.
+ * The private data may be used by the application
+ * to store information which should remain untouched
+ * in the library/driver
+ */
+ };
+ };
struct rte_mempool *mempool;
/**< crypto operation mempool which operation is allocated from */
@@ -97,6 +117,10 @@ struct rte_crypto_op {
union {
struct rte_crypto_sym_op sym[0];
/**< Symmetric operation parameters */
+
+ struct rte_crypto_asym_op asym[0];
+ /**< Asymmetric operation parameters */
+
}; /**< operation specific parameters */
};
@@ -117,6 +141,9 @@ __rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type)
case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
__rte_crypto_sym_op_reset(op->sym);
break;
+ case RTE_CRYPTO_OP_TYPE_ASYMMETRIC:
+ memset(op->asym, 0, sizeof(struct rte_crypto_asym_op));
+ break;
case RTE_CRYPTO_OP_TYPE_UNDEFINED:
default:
break;
@@ -283,9 +310,14 @@ __rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size)
if (likely(op->mempool != NULL)) {
priv_size = __rte_crypto_op_get_priv_data_size(op->mempool);
- if (likely(priv_size >= size))
- return (void *)((uint8_t *)(op + 1) +
+ if (likely(priv_size >= size)) {
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ return (void *)((uint8_t *)(op + 1) +
sizeof(struct rte_crypto_sym_op));
+ if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ return (void *)((uint8_t *)(op + 1) +
+ sizeof(struct rte_crypto_asym_op));
+ }
}
return NULL;
@@ -388,6 +420,24 @@ rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
return __rte_crypto_sym_op_attach_sym_session(op->sym, sess);
}
+/**
+ * Attach a asymmetric session to a crypto operation
+ *
+ * @param op crypto operation, must be of type asymmetric
+ * @param sess cryptodev session
+ */
+static inline int
+rte_crypto_op_attach_asym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_asym_session *sess)
+{
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_ASYMMETRIC))
+ return -1;
+
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->asym->session = sess;
+ return 0;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_crypto_asym.h b/lib/librte_cryptodev/rte_crypto_asym.h
new file mode 100644
index 00000000..5e185b2d
--- /dev/null
+++ b/lib/librte_cryptodev/rte_crypto_asym.h
@@ -0,0 +1,496 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_CRYPTO_ASYM_H_
+#define _RTE_CRYPTO_ASYM_H_
+
+/**
+ * @file rte_crypto_asym.h
+ *
+ * RTE Definitions for Asymmetric Cryptography
+ *
+ * Defines asymmetric algorithms and modes, as well as supported
+ * asymmetric crypto operations.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <stdint.h>
+
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+
+typedef struct rte_crypto_param_t {
+ uint8_t *data;
+ /**< pointer to buffer holding data */
+ rte_iova_t iova;
+ /**< IO address of data buffer */
+ size_t length;
+ /**< length of data in bytes */
+} rte_crypto_param;
+
+/** asym xform type name strings */
+extern const char *
+rte_crypto_asym_xform_strings[];
+
+/** asym operations type name strings */
+extern const char *
+rte_crypto_asym_op_strings[];
+
+/**
+ * Asymmetric crypto transformation types.
+ * Each xform type maps to one asymmetric algorithm
+ * performing specific operation
+ *
+ */
+enum rte_crypto_asym_xform_type {
+ RTE_CRYPTO_ASYM_XFORM_UNSPECIFIED = 0,
+ /**< Invalid xform. */
+ RTE_CRYPTO_ASYM_XFORM_NONE,
+ /**< Xform type None.
+ * May be supported by PMD to support
+ * passthrough op for debugging purpose.
+ * if xform_type none , op_type is disregarded.
+ */
+ RTE_CRYPTO_ASYM_XFORM_RSA,
+ /**< RSA. Performs Encrypt, Decrypt, Sign and Verify.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_DH,
+ /**< Diffie-Hellman.
+ * Performs Key Generate and Shared Secret Compute.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_DSA,
+ /**< Digital Signature Algorithm
+ * Performs Signature Generation and Verification.
+ * Refer to rte_crypto_asym_op_type
+ */
+ RTE_CRYPTO_ASYM_XFORM_MODINV,
+ /**< Modular Inverse
+ * Perform Modulus inverse b^(-1) mod n
+ */
+ RTE_CRYPTO_ASYM_XFORM_MODEX,
+ /**< Modular Exponentiation
+ * Perform Modular Exponentiation b^e mod n
+ */
+ RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
+ /**< End of list */
+};
+
+/**
+ * Asymmetric crypto operation type variants
+ */
+enum rte_crypto_asym_op_type {
+ RTE_CRYPTO_ASYM_OP_ENCRYPT,
+ /**< Asymmetric Encrypt operation */
+ RTE_CRYPTO_ASYM_OP_DECRYPT,
+ /**< Asymmetric Decrypt operation */
+ RTE_CRYPTO_ASYM_OP_SIGN,
+ /**< Signature Generation operation */
+ RTE_CRYPTO_ASYM_OP_VERIFY,
+ /**< Signature Verification operation */
+ RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE,
+ /**< DH Private Key generation operation */
+ RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE,
+ /**< DH Public Key generation operation */
+ RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE,
+ /**< DH Shared Secret compute operation */
+ RTE_CRYPTO_ASYM_OP_LIST_END
+};
+
+/**
+ * Padding types for RSA signature.
+ */
+enum rte_crypto_rsa_padding_type {
+ RTE_CRYPTO_RSA_PADDING_NONE = 0,
+ /**< RSA no padding scheme */
+ RTE_CRYPTO_RSA_PKCS1_V1_5_BT0,
+ /**< RSA PKCS#1 V1.5 Block Type 0 padding scheme
+ * as descibed in rfc2313
+ */
+ RTE_CRYPTO_RSA_PKCS1_V1_5_BT1,
+ /**< RSA PKCS#1 V1.5 Block Type 01 padding scheme
+ * as descibed in rfc2313
+ */
+ RTE_CRYPTO_RSA_PKCS1_V1_5_BT2,
+ /**< RSA PKCS#1 V1.5 Block Type 02 padding scheme
+ * as descibed in rfc2313
+ */
+ RTE_CRYPTO_RSA_PADDING_OAEP,
+ /**< RSA PKCS#1 OAEP padding scheme */
+ RTE_CRYPTO_RSA_PADDING_PSS,
+ /**< RSA PKCS#1 PSS padding scheme */
+ RTE_CRYPTO_RSA_PADDING_TYPE_LIST_END
+};
+
+/**
+ * RSA private key type enumeration
+ *
+ * enumerates private key format required to perform RSA crypto
+ * transform.
+ *
+ */
+enum rte_crypto_rsa_priv_key_type {
+ RTE_RSA_KEY_TYPE_EXP,
+ /**< RSA private key is an exponent */
+ RTE_RSA_KET_TYPE_QT,
+ /**< RSA private key is in quintuple format
+ * See rte_crypto_rsa_priv_key_qt
+ */
+};
+
+/**
+ * Structure describing RSA private key in quintuple format.
+ * See PKCS V1.5 RSA Cryptography Standard.
+ */
+struct rte_crypto_rsa_priv_key_qt {
+ rte_crypto_param p;
+ /**< p - Private key component P
+ * Private key component of RSA parameter required for CRT method
+ * of private key operations in Octet-string network byte order
+ * format.
+ */
+
+ rte_crypto_param q;
+ /**< q - Private key component Q
+ * Private key component of RSA parameter required for CRT method
+ * of private key operations in Octet-string network byte order
+ * format.
+ */
+
+ rte_crypto_param dP;
+ /**< dP - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * dP = d mod ( p - 1 )
+ */
+
+ rte_crypto_param dQ;
+ /**< dQ - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * dQ = d mod ( q - 1 )
+ */
+
+ rte_crypto_param qInv;
+ /**< qInv - Private CRT component
+ * Private CRT component of RSA parameter required for CRT method
+ * RSA private key operations in Octet-string network byte order
+ * format.
+ * qInv = inv q mod p
+ */
+};
+
+/**
+ * Asymmetric RSA transform data
+ *
+ * Structure describing RSA xform params
+ *
+ */
+struct rte_crypto_rsa_xform {
+ rte_crypto_param n;
+ /**< n - Prime modulus
+ * Prime modulus data of RSA operation in Octet-string network
+ * byte order format.
+ */
+
+ rte_crypto_param e;
+ /**< e - Public key exponent
+ * Public key exponent used for RSA public key operations in Octet-
+ * string network byte order format.
+ */
+
+ enum rte_crypto_rsa_priv_key_type key_type;
+
+ __extension__
+ union {
+ rte_crypto_param d;
+ /**< d - Private key exponent
+ * Private key exponent used for RSA
+ * private key operations in
+ * Octet-string network byte order format.
+ */
+
+ struct rte_crypto_rsa_priv_key_qt qt;
+ /**< qt - Private key in quintuple format */
+ };
+};
+
+/**
+ * Asymmetric Modular exponentiation transform data
+ *
+ * Structure describing modular exponentation xform param
+ *
+ */
+struct rte_crypto_modex_xform {
+ rte_crypto_param modulus;
+ /**< modulus
+ * Prime modulus of the modexp transform operation in octet-string
+ * network byte order format.
+ */
+
+ rte_crypto_param exponent;
+ /**< exponent
+ * Private exponent of the modexp transform operation in
+ * octet-string network byte order format.
+ */
+};
+
+/**
+ * Asymmetric modular inverse transform operation
+ *
+ * Structure describing modulus inverse xform params
+ *
+ */
+struct rte_crypto_modinv_xform {
+ rte_crypto_param modulus;
+ /**<
+ * Pointer to the prime modulus data for modular
+ * inverse operation in octet-string network byte
+ * order format.
+ */
+};
+
+/**
+ * Asymmetric DH transform data
+ *
+ * Structure describing deffie-hellman xform params
+ *
+ */
+struct rte_crypto_dh_xform {
+ enum rte_crypto_asym_op_type type;
+ /**< Setup xform for key generate or shared secret compute */
+
+ rte_crypto_param p;
+ /**< p : Prime modulus data
+ * DH prime modulous data in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param g;
+ /**< g : Generator
+ * DH group generator data in octet-string network byte order
+ * format.
+ *
+ */
+};
+
+/**
+ * Asymmetric Digital Signature transform operation
+ *
+ * Structure describing DSA xform params
+ *
+ */
+struct rte_crypto_dsa_xform {
+ rte_crypto_param p;
+ /**< p - Prime modulus
+ * Prime modulus data for DSA operation in Octet-string network byte
+ * order format.
+ */
+ rte_crypto_param q;
+ /**< q : Order of the subgroup.
+ * Order of the subgroup data in Octet-string network byte order
+ * format.
+ * (p-1) % q = 0
+ */
+ rte_crypto_param g;
+ /**< g: Generator of the subgroup
+ * Generator data in Octet-string network byte order format.
+ */
+ rte_crypto_param x;
+ /**< x: Private key of the signer in octet-string network
+ * byte order format.
+ * Used when app has pre-defined private key.
+ * Valid only when xform chain is DSA ONLY.
+ * if xform chain is DH private key generate + DSA, then DSA sign
+ * compute will use internally generated key.
+ */
+};
+
+/**
+ * Operations params for modular operations:
+ * exponentiation and invert
+ *
+ */
+struct rte_crypto_mod_op_param {
+ rte_crypto_param base;
+ /**<
+ * Pointer to base of modular exponentiation/inversion data in
+ * Octet-string network byte order format.
+ */
+};
+
+/**
+ * Asymmetric crypto transform data
+ *
+ * Structure describing asym xforms.
+ */
+struct rte_crypto_asym_xform {
+ struct rte_crypto_asym_xform *next;
+ /**< Pointer to next xform to set up xform chain.*/
+ enum rte_crypto_asym_xform_type xform_type;
+ /**< Asymmetric crypto transform */
+
+ __extension__
+ union {
+ struct rte_crypto_rsa_xform rsa;
+ /**< RSA xform parameters */
+
+ struct rte_crypto_modex_xform modex;
+ /**< Modular Exponentiation xform parameters */
+
+ struct rte_crypto_modinv_xform modinv;
+ /**< Modulus Inverse xform parameters */
+
+ struct rte_crypto_dh_xform dh;
+ /**< DH xform parameters */
+
+ struct rte_crypto_dsa_xform dsa;
+ /**< DSA xform parameters */
+ };
+};
+
+struct rte_cryptodev_asym_session;
+
+/**
+ * RSA operation params
+ *
+ */
+struct rte_crypto_rsa_op_param {
+ enum rte_crypto_asym_op_type op_type;
+ /**< Type of RSA operation for transform */;
+
+ rte_crypto_param message;
+ /**<
+ * Pointer to data
+ * - to be encrypted for RSA public encrypt.
+ * - to be decrypted for RSA private decrypt.
+ * - to be signed for RSA sign generation.
+ * - to be authenticated for RSA sign verification.
+ */
+
+ rte_crypto_param sign;
+ /**<
+ * Pointer to RSA signature data. If operation is RSA
+ * sign @ref RTE_CRYPTO_ASYM_OP_SIGN, buffer will be
+ * over-written with generated signature.
+ *
+ * Length of the signature data will be equal to the
+ * RSA prime modulus length.
+ */
+
+ enum rte_crypto_rsa_padding_type pad;
+ /**< RSA padding scheme to be used for transform */
+
+ enum rte_crypto_auth_algorithm md;
+ /**< Hash algorithm to be used for data hash if padding
+ * scheme is either OAEP or PSS. Valid hash algorithms
+ * are:
+ * MD5, SHA1, SHA224, SHA256, SHA384, SHA512
+ */
+
+ enum rte_crypto_auth_algorithm mgf1md;
+ /**<
+ * Hash algorithm to be used for mask generation if
+ * padding scheme is either OAEP or PSS. If padding
+ * scheme is unspecified data hash algorithm is used
+ * for mask generation. Valid hash algorithms are:
+ * MD5, SHA1, SHA224, SHA256, SHA384, SHA512
+ */
+};
+
+/**
+ * Diffie-Hellman Operations params.
+ * @note:
+ */
+struct rte_crypto_dh_op_param {
+ rte_crypto_param pub_key;
+ /**<
+ * Output generated public key when xform type is
+ * DH PUB_KEY_GENERATION.
+ * Input peer public key when xform type is DH
+ * SHARED_SECRET_COMPUTATION
+ * pub_key is in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param priv_key;
+ /**<
+ * Output generated private key if xform type is
+ * DH PRIVATE_KEY_GENERATION
+ * Input when xform type is DH SHARED_SECRET_COMPUTATION.
+ * priv_key is in octet-string network byte order format.
+ *
+ */
+
+ rte_crypto_param shared_secret;
+ /**<
+ * Output with calculated shared secret
+ * when dh xform set up with op type = SHARED_SECRET_COMPUTATION.
+ * shared_secret is an octet-string network byte order format.
+ *
+ */
+};
+
+/**
+ * DSA Operations params
+ *
+ */
+struct rte_crypto_dsa_op_param {
+ enum rte_crypto_asym_op_type op_type;
+ /**< Signature Generation or Verification */
+ rte_crypto_param message;
+ /**< input message to be signed or verified */
+ rte_crypto_param r;
+ /**< dsa sign component 'r' value
+ *
+ * output if op_type = sign generate,
+ * input if op_type = sign verify
+ */
+ rte_crypto_param s;
+ /**< dsa sign component 's' value
+ *
+ * output if op_type = sign generate,
+ * input if op_type = sign verify
+ */
+ rte_crypto_param y;
+ /**< y : Public key of the signer.
+ * Public key data of the signer in Octet-string network byte order
+ * format.
+ * y = g^x mod p
+ */
+};
+
+/**
+ * Asymmetric Cryptographic Operation.
+ *
+ * Structure describing asymmetric crypto operation params.
+ *
+ */
+struct rte_crypto_asym_op {
+ struct rte_cryptodev_asym_session *session;
+ /**< Handle for the initialised session context */
+
+ __extension__
+ union {
+ struct rte_crypto_rsa_op_param rsa;
+ struct rte_crypto_mod_op_param modex;
+ struct rte_crypto_mod_op_param modinv;
+ struct rte_crypto_dh_op_param dh;
+ struct rte_crypto_dsa_op_param dsa;
+ };
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CRYPTO_ASYM_H_ */
diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
index 60797e9c..eb5afc5e 100644
--- a/lib/librte_cryptodev/rte_crypto_sym.h
+++ b/lib/librte_cryptodev/rte_crypto_sym.h
@@ -245,6 +245,23 @@ enum rte_crypto_auth_algorithm {
RTE_CRYPTO_AUTH_ZUC_EIA3,
/**< ZUC algorithm in EIA3 mode */
+ RTE_CRYPTO_AUTH_SHA3_224,
+ /**< 224 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_224_HMAC,
+ /**< HMAC using 224 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_256,
+ /**< 256 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_256_HMAC,
+ /**< HMAC using 256 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_384,
+ /**< 384 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_384_HMAC,
+ /**< HMAC using 384 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_512,
+ /**< 512 bit SHA3 algorithm. */
+ RTE_CRYPTO_AUTH_SHA3_512_HMAC,
+ /**< HMAC using 512 bit SHA3 algorithm. */
+
RTE_CRYPTO_AUTH_LIST_END
};
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 8745b6b0..63ae23f0 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -166,6 +166,31 @@ rte_crypto_aead_operation_strings[] = {
[RTE_CRYPTO_AEAD_OP_DECRYPT] = "decrypt"
};
+/**
+ * Asymmetric crypto transform operation strings identifiers.
+ */
+const char *rte_crypto_asym_xform_strings[] = {
+ [RTE_CRYPTO_ASYM_XFORM_NONE] = "none",
+ [RTE_CRYPTO_ASYM_XFORM_RSA] = "rsa",
+ [RTE_CRYPTO_ASYM_XFORM_MODEX] = "modexp",
+ [RTE_CRYPTO_ASYM_XFORM_MODINV] = "modinv",
+ [RTE_CRYPTO_ASYM_XFORM_DH] = "dh",
+ [RTE_CRYPTO_ASYM_XFORM_DSA] = "dsa",
+};
+
+/**
+ * Asymmetric crypto operation strings identifiers.
+ */
+const char *rte_crypto_asym_op_strings[] = {
+ [RTE_CRYPTO_ASYM_OP_ENCRYPT] = "encrypt",
+ [RTE_CRYPTO_ASYM_OP_DECRYPT] = "decrypt",
+ [RTE_CRYPTO_ASYM_OP_SIGN] = "sign",
+ [RTE_CRYPTO_ASYM_OP_VERIFY] = "verify",
+ [RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE] = "priv_key_generate",
+ [RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE] = "pub_key_generate",
+ [RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE] = "sharedsecret_compute",
+};
+
int
rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum,
const char *algo_string)
@@ -217,6 +242,24 @@ rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
return -1;
}
+int __rte_experimental
+rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
+ const char *xform_string)
+{
+ unsigned int i;
+
+ for (i = 1; i < RTE_DIM(rte_crypto_asym_xform_strings); i++) {
+ if (strcmp(xform_string,
+ rte_crypto_asym_xform_strings[i]) == 0) {
+ *xform_enum = (enum rte_crypto_asym_xform_type) i;
+ return 0;
+ }
+ }
+
+ /* Invalid string */
+ return -1;
+}
+
/**
* The crypto auth operation strings identifiers.
* It could be used in application command line.
@@ -262,19 +305,62 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id,
}
-#define param_range_check(x, y) \
- (((x < y.min) || (x > y.max)) || \
- (y.increment != 0 && (x % y.increment) != 0))
+static int
+param_range_check(uint16_t size, const struct rte_crypto_param_range *range)
+{
+ unsigned int next_size;
+
+ /* Check lower/upper bounds */
+ if (size < range->min)
+ return -1;
+
+ if (size > range->max)
+ return -1;
+
+ /* If range is actually only one value, size is correct */
+ if (range->increment == 0)
+ return 0;
+
+ /* Check if value is one of the supported sizes */
+ for (next_size = range->min; next_size <= range->max;
+ next_size += range->increment)
+ if (size == next_size)
+ return 0;
+
+ return -1;
+}
+
+const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
+rte_cryptodev_asym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_asym_capability_idx *idx)
+{
+ const struct rte_cryptodev_capabilities *capability;
+ struct rte_cryptodev_info dev_info;
+ unsigned int i = 0;
+
+ memset(&dev_info, 0, sizeof(struct rte_cryptodev_info));
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ while ((capability = &dev_info.capabilities[i++])->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_ASYMMETRIC)
+ continue;
+
+ if (capability->asym.xform_capa.xform_type == idx->type)
+ return &capability->asym.xform_capa;
+ }
+ return NULL;
+};
int
rte_cryptodev_sym_capability_check_cipher(
const struct rte_cryptodev_symmetric_capability *capability,
uint16_t key_size, uint16_t iv_size)
{
- if (param_range_check(key_size, capability->cipher.key_size))
+ if (param_range_check(key_size, &capability->cipher.key_size) != 0)
return -1;
- if (param_range_check(iv_size, capability->cipher.iv_size))
+ if (param_range_check(iv_size, &capability->cipher.iv_size) != 0)
return -1;
return 0;
@@ -285,13 +371,13 @@ rte_cryptodev_sym_capability_check_auth(
const struct rte_cryptodev_symmetric_capability *capability,
uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
{
- if (param_range_check(key_size, capability->auth.key_size))
+ if (param_range_check(key_size, &capability->auth.key_size) != 0)
return -1;
- if (param_range_check(digest_size, capability->auth.digest_size))
+ if (param_range_check(digest_size, &capability->auth.digest_size) != 0)
return -1;
- if (param_range_check(iv_size, capability->auth.iv_size))
+ if (param_range_check(iv_size, &capability->auth.iv_size) != 0)
return -1;
return 0;
@@ -303,20 +389,56 @@ rte_cryptodev_sym_capability_check_aead(
uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
uint16_t iv_size)
{
- if (param_range_check(key_size, capability->aead.key_size))
+ if (param_range_check(key_size, &capability->aead.key_size) != 0)
return -1;
- if (param_range_check(digest_size, capability->aead.digest_size))
+ if (param_range_check(digest_size, &capability->aead.digest_size) != 0)
return -1;
- if (param_range_check(aad_size, capability->aead.aad_size))
+ if (param_range_check(aad_size, &capability->aead.aad_size) != 0)
return -1;
- if (param_range_check(iv_size, capability->aead.iv_size))
+ if (param_range_check(iv_size, &capability->aead.iv_size) != 0)
return -1;
return 0;
}
+int __rte_experimental
+rte_cryptodev_asym_xform_capability_check_optype(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ enum rte_crypto_asym_op_type op_type)
+{
+ if (capability->op_types & (1 << op_type))
+ return 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_cryptodev_asym_xform_capability_check_modlen(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ uint16_t modlen)
+{
+ /* no need to check for limits, if min or max = 0 */
+ if (capability->modlen.min != 0) {
+ if (modlen < capability->modlen.min)
+ return -1;
+ }
+
+ if (capability->modlen.max != 0) {
+ if (modlen > capability->modlen.max)
+ return -1;
+ }
+
+ /* in any case, check if given modlen is module increment */
+ if (capability->modlen.increment != 0) {
+ if (modlen % (capability->modlen.increment))
+ return -1;
+ }
+
+ return 0;
+}
+
const char *
rte_cryptodev_get_feature_name(uint64_t flag)
@@ -340,12 +462,22 @@ rte_cryptodev_get_feature_name(uint64_t flag)
return "CPU_AESNI";
case RTE_CRYPTODEV_FF_HW_ACCELERATED:
return "HW_ACCELERATED";
- case RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER:
- return "MBUF_SCATTER_GATHER";
+ case RTE_CRYPTODEV_FF_IN_PLACE_SGL:
+ return "IN_PLACE_SGL";
+ case RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT:
+ return "OOP_SGL_IN_SGL_OUT";
+ case RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT:
+ return "OOP_SGL_IN_LB_OUT";
+ case RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT:
+ return "OOP_LB_IN_SGL_OUT";
+ case RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT:
+ return "OOP_LB_IN_LB_OUT";
case RTE_CRYPTODEV_FF_CPU_NEON:
return "CPU_NEON";
case RTE_CRYPTODEV_FF_CPU_ARM_CE:
return "CPU_ARM_CE";
+ case RTE_CRYPTODEV_FF_SECURITY:
+ return "SECURITY_PROTOCOL";
default:
return NULL;
}
@@ -680,50 +812,6 @@ rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs,
}
int
-rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id)
-{
- struct rte_cryptodev *dev;
-
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
- return -EINVAL;
- }
-
- dev = &rte_crypto_devices[dev_id];
- if (queue_pair_id >= dev->data->nb_queue_pairs) {
- CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP);
-
- return dev->dev_ops->queue_pair_start(dev, queue_pair_id);
-
-}
-
-int
-rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id)
-{
- struct rte_cryptodev *dev;
-
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
- return -EINVAL;
- }
-
- dev = &rte_crypto_devices[dev_id];
- if (queue_pair_id >= dev->data->nb_queue_pairs) {
- CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
- return -EINVAL;
- }
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP);
-
- return dev->dev_ops->queue_pair_stop(dev, queue_pair_id);
-
-}
-
-int
rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
{
struct rte_cryptodev *dev;
@@ -943,6 +1031,7 @@ rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
+ dev_info->device = dev->device;
}
@@ -1075,8 +1164,46 @@ rte_cryptodev_sym_session_init(uint8_t dev_id,
index = dev->driver_id;
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_configure, -ENOTSUP);
+
if (sess->sess_private_data[index] == NULL) {
- ret = dev->dev_ops->session_configure(dev, xforms, sess, mp);
+ ret = dev->dev_ops->sym_session_configure(dev, xforms,
+ sess, mp);
+ if (ret < 0) {
+ CDEV_LOG_ERR(
+ "dev_id %d failed to configure session details",
+ dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_cryptodev_asym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_crypto_asym_xform *xforms,
+ struct rte_mempool *mp)
+{
+ struct rte_cryptodev *dev;
+ uint8_t index;
+ int ret;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (sess == NULL || xforms == NULL || dev == NULL)
+ return -EINVAL;
+
+ index = dev->driver_id;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_configure,
+ -ENOTSUP);
+
+ if (sess->sess_private_data[index] == NULL) {
+ ret = dev->dev_ops->asym_session_configure(dev,
+ xforms,
+ sess, mp);
if (ret < 0) {
CDEV_LOG_ERR(
"dev_id %d failed to configure session details",
@@ -1099,69 +1226,54 @@ rte_cryptodev_sym_session_create(struct rte_mempool *mp)
return NULL;
}
- /* Clear device session pointer */
- memset(sess, 0, (sizeof(void *) * nb_drivers));
+ /* Clear device session pointer.
+ * Include the flag indicating presence of user data
+ */
+ memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
return sess;
}
-int
-rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
- struct rte_cryptodev_sym_session *sess)
+struct rte_cryptodev_asym_session * __rte_experimental
+rte_cryptodev_asym_session_create(struct rte_mempool *mp)
{
- struct rte_cryptodev *dev;
+ struct rte_cryptodev_asym_session *sess;
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
- return -EINVAL;
+ /* Allocate a session structure from the session pool */
+ if (rte_mempool_get(mp, (void **)&sess)) {
+ CDEV_LOG_ERR("couldn't get object from session mempool");
+ return NULL;
}
- dev = &rte_crypto_devices[dev_id];
-
- /* The API is optional, not returning error if driver do not suuport */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_attach_session, 0);
-
- void *sess_priv = get_session_private_data(sess, dev->driver_id);
-
- if (dev->dev_ops->qp_attach_session(dev, qp_id, sess_priv)) {
- CDEV_LOG_ERR("dev_id %d failed to attach qp: %d with session",
- dev_id, qp_id);
- return -EPERM;
- }
+ /* Clear device session pointer.
+ * Include the flag indicating presence of private data
+ */
+ memset(sess, 0, (sizeof(void *) * nb_drivers) + sizeof(uint8_t));
- return 0;
+ return sess;
}
int
-rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess)
{
struct rte_cryptodev *dev;
- if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
- CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
- return -EINVAL;
- }
-
- dev = &rte_crypto_devices[dev_id];
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
- /* The API is optional, not returning error if driver do not suuport */
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->qp_detach_session, 0);
+ if (dev == NULL || sess == NULL)
+ return -EINVAL;
- void *sess_priv = get_session_private_data(sess, dev->driver_id);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->sym_session_clear, -ENOTSUP);
- if (dev->dev_ops->qp_detach_session(dev, qp_id, sess_priv)) {
- CDEV_LOG_ERR("dev_id %d failed to detach qp: %d from session",
- dev_id, qp_id);
- return -EPERM;
- }
+ dev->dev_ops->sym_session_clear(dev, sess);
return 0;
}
-int
-rte_cryptodev_sym_session_clear(uint8_t dev_id,
- struct rte_cryptodev_sym_session *sess)
+int __rte_experimental
+rte_cryptodev_asym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess)
{
struct rte_cryptodev *dev;
@@ -1170,7 +1282,9 @@ rte_cryptodev_sym_session_clear(uint8_t dev_id,
if (dev == NULL || sess == NULL)
return -EINVAL;
- dev->dev_ops->session_clear(dev, sess);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->asym_session_clear, -ENOTSUP);
+
+ dev->dev_ops->asym_session_clear(dev, sess);
return 0;
}
@@ -1187,7 +1301,7 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
/* Check that all device private data has been freed */
for (i = 0; i < nb_drivers; i++) {
- sess_priv = get_session_private_data(sess, i);
+ sess_priv = get_sym_session_private_data(sess, i);
if (sess_priv != NULL)
return -EBUSY;
}
@@ -1199,18 +1313,55 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess)
return 0;
}
+int __rte_experimental
+rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess)
+{
+ uint8_t i;
+ void *sess_priv;
+ struct rte_mempool *sess_mp;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ /* Check that all device private data has been freed */
+ for (i = 0; i < nb_drivers; i++) {
+ sess_priv = get_asym_session_private_data(sess, i);
+ if (sess_priv != NULL)
+ return -EBUSY;
+ }
+
+ /* Return session to mempool */
+ sess_mp = rte_mempool_from_obj(sess);
+ rte_mempool_put(sess_mp, sess);
+
+ return 0;
+}
+
+
unsigned int
-rte_cryptodev_get_header_session_size(void)
+rte_cryptodev_sym_get_header_session_size(void)
{
/*
* Header contains pointers to the private data
- * of all registered drivers
+ * of all registered drivers, and a flag which
+ * indicates presence of user data
*/
- return (sizeof(void *) * nb_drivers);
+ return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
+}
+
+unsigned int __rte_experimental
+rte_cryptodev_asym_get_header_session_size(void)
+{
+ /*
+ * Header contains pointers to the private data
+ * of all registered drivers, and a flag which
+ * indicates presence of private data
+ */
+ return ((sizeof(void *) * nb_drivers) + sizeof(uint8_t));
}
unsigned int
-rte_cryptodev_get_private_session_size(uint8_t dev_id)
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
{
struct rte_cryptodev *dev;
unsigned int header_size = sizeof(void *) * nb_drivers;
@@ -1221,10 +1372,10 @@ rte_cryptodev_get_private_session_size(uint8_t dev_id)
dev = rte_cryptodev_pmd_get_dev(dev_id);
- if (*dev->dev_ops->session_get_size == NULL)
+ if (*dev->dev_ops->sym_session_get_size == NULL)
return 0;
- priv_sess_size = (*dev->dev_ops->session_get_size)(dev);
+ priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev);
/*
* If size is less than session header size,
@@ -1238,6 +1389,61 @@ rte_cryptodev_get_private_session_size(uint8_t dev_id)
}
+unsigned int __rte_experimental
+rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
+{
+ struct rte_cryptodev *dev;
+ unsigned int header_size = sizeof(void *) * nb_drivers;
+ unsigned int priv_sess_size;
+
+ if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
+ return 0;
+
+ dev = rte_cryptodev_pmd_get_dev(dev_id);
+
+ if (*dev->dev_ops->asym_session_get_size == NULL)
+ return 0;
+
+ priv_sess_size = (*dev->dev_ops->asym_session_get_size)(dev);
+ if (priv_sess_size < header_size)
+ return header_size;
+
+ return priv_sess_size;
+
+}
+
+int __rte_experimental
+rte_cryptodev_sym_session_set_user_data(
+ struct rte_cryptodev_sym_session *sess,
+ void *data,
+ uint16_t size)
+{
+ uint16_t off_set = sizeof(void *) * nb_drivers;
+ uint8_t *user_data_present = (uint8_t *)sess + off_set;
+
+ if (sess == NULL)
+ return -EINVAL;
+
+ *user_data_present = 1;
+ off_set += sizeof(uint8_t);
+ rte_memcpy((uint8_t *)sess + off_set, data, size);
+ return 0;
+}
+
+void * __rte_experimental
+rte_cryptodev_sym_session_get_user_data(
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint16_t off_set = sizeof(void *) * nb_drivers;
+ uint8_t *user_data_present = (uint8_t *)sess + off_set;
+
+ if (sess == NULL || !*user_data_present)
+ return NULL;
+
+ off_set += sizeof(uint8_t);
+ return (uint8_t *)sess + off_set;
+}
+
/** Initialise rte_crypto_op mempool element */
static void
rte_crypto_op_init(struct rte_mempool *mempool,
@@ -1265,9 +1471,17 @@ rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
struct rte_crypto_op_pool_private *priv;
unsigned elt_size = sizeof(struct rte_crypto_op) +
- sizeof(struct rte_crypto_sym_op) +
priv_size;
+ if (type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ elt_size += sizeof(struct rte_crypto_sym_op);
+ } else if (type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ elt_size += sizeof(struct rte_crypto_asym_op);
+ } else {
+ CDEV_LOG_ERR("Invalid op_type\n");
+ return NULL;
+ }
+
/* lookup mempool in case already allocated */
struct rte_mempool *mp = rte_mempool_lookup(name);
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index c8fa6893..4099823f 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -1,32 +1,5 @@
-/*-
- *
- * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2017 Intel Corporation.
*/
#ifndef _RTE_CRYPTODEV_H_
@@ -65,7 +38,6 @@ extern const char **rte_cyptodev_names;
RTE_FMT(RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
RTE_FMT_TAIL(__VA_ARGS__,)))
-#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG
#define CDEV_LOG_DEBUG(...) \
RTE_LOG(DEBUG, CRYPTODEV, \
RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
@@ -76,13 +48,6 @@ extern const char **rte_cyptodev_names;
RTE_FMT("[%s] %s: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
dev, __func__, RTE_FMT_TAIL(__VA_ARGS__,)))
-#else
-#define CDEV_LOG_DEBUG(...) (void)0
-#define CDEV_PMD_TRACE(...) (void)0
-#endif
-
-
-
/**
* A macro that points to an offset from the start
* of the crypto operation structure (rte_crypto_op)
@@ -178,6 +143,35 @@ struct rte_cryptodev_symmetric_capability {
};
};
+/**
+ * Asymmetric Xform Crypto Capability
+ *
+ */
+struct rte_cryptodev_asymmetric_xform_capability {
+ enum rte_crypto_asym_xform_type xform_type;
+ /**< Transform type: RSA/MODEXP/DH/DSA/MODINV */
+
+ uint32_t op_types;
+ /**< bitmask for supported rte_crypto_asym_op_type */
+
+ __extension__
+ union {
+ struct rte_crypto_param_range modlen;
+ /**< Range of modulus length supported by modulus based xform.
+ * Value 0 mean implementation default
+ */
+ };
+};
+
+/**
+ * Asymmetric Crypto Capability
+ *
+ */
+struct rte_cryptodev_asymmetric_capability {
+ struct rte_cryptodev_asymmetric_xform_capability xform_capa;
+};
+
+
/** Structure used to capture a capability of a crypto device */
struct rte_cryptodev_capabilities {
enum rte_crypto_op_type op;
@@ -187,6 +181,8 @@ struct rte_cryptodev_capabilities {
union {
struct rte_cryptodev_symmetric_capability sym;
/**< Symmetric operation capability parameters */
+ struct rte_cryptodev_asymmetric_capability asym;
+ /**< Asymmetric operation capability parameters */
};
};
@@ -201,7 +197,17 @@ struct rte_cryptodev_sym_capability_idx {
};
/**
- * Provide capabilities available for defined device and algorithm
+ * Structure used to describe asymmetric crypto xforms
+ * Each xform maps to one asym algorithm.
+ *
+ */
+struct rte_cryptodev_asym_capability_idx {
+ enum rte_crypto_asym_xform_type type;
+ /**< Asymmetric xform (algo) type */
+};
+
+/**
+ * Provide capabilities available for defined device and algorithm
*
* @param dev_id The identifier of the device.
* @param idx Description of crypto algorithms.
@@ -215,6 +221,20 @@ rte_cryptodev_sym_capability_get(uint8_t dev_id,
const struct rte_cryptodev_sym_capability_idx *idx);
/**
+ * Provide capabilities available for defined device and xform
+ *
+ * @param dev_id The identifier of the device.
+ * @param idx Description of asym crypto xform.
+ *
+ * @return
+ * - Return description of the asymmetric crypto capability if exist.
+ * - Return NULL if the capability not exist.
+ */
+const struct rte_cryptodev_asymmetric_xform_capability * __rte_experimental
+rte_cryptodev_asym_capability_get(uint8_t dev_id,
+ const struct rte_cryptodev_asym_capability_idx *idx);
+
+/**
* Check if key size and initial vector are supported
* in crypto cipher capability
*
@@ -270,6 +290,36 @@ rte_cryptodev_sym_capability_check_aead(
uint16_t iv_size);
/**
+ * Check if op type is supported
+ *
+ * @param capability Description of the asymmetric crypto capability.
+ * @param op_type op type
+ *
+ * @return
+ * - Return 1 if the op type is supported
+ * - Return 0 if unsupported
+ */
+int __rte_experimental
+rte_cryptodev_asym_xform_capability_check_optype(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ enum rte_crypto_asym_op_type op_type);
+
+/**
+ * Check if modulus length is in supported range
+ *
+ * @param capability Description of the asymmetric crypto capability.
+ * @param modlen modulus length.
+ *
+ * @return
+ * - Return 0 if the parameters are in range of the capability.
+ * - Return -1 if the parameters are out of range of the capability.
+ */
+int __rte_experimental
+rte_cryptodev_asym_xform_capability_check_modlen(
+ const struct rte_cryptodev_asymmetric_xform_capability *capability,
+ uint16_t modlen);
+
+/**
* Provide the cipher algorithm enum, given an algorithm string
*
* @param algo_enum A pointer to the cipher algorithm
@@ -314,6 +364,22 @@ int
rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
const char *algo_string);
+/**
+ * Provide the Asymmetric xform enum, given an xform string
+ *
+ * @param xform_enum A pointer to the xform type
+ * enum to be filled
+ * @param xform_string xform string
+ *
+ * @return
+ * - Return -1 if string is not valid
+ * - Return 0 if the string is valid
+ */
+int __rte_experimental
+rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
+ const char *xform_string);
+
+
/** Macro used at end of crypto PMD list */
#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
{ RTE_CRYPTO_OP_TYPE_UNDEFINED }
@@ -327,31 +393,50 @@ rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum,
*
* Keep these flags synchronised with rte_cryptodev_get_feature_name()
*/
-#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
+#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
/**< Symmetric crypto operations are supported */
-#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
+#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
/**< Asymmetric crypto operations are supported */
-#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
+#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
/**< Chaining symmetric crypto operations are supported */
-#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
+#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
/**< Utilises CPU SIMD SSE instructions */
-#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
+#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
/**< Utilises CPU SIMD AVX instructions */
-#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
+#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
/**< Utilises CPU SIMD AVX2 instructions */
-#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
+#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
/**< Utilises CPU AES-NI instructions */
-#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
-/**< Operations are off-loaded to an external hardware accelerator */
-#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
+#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
+/**< Operations are off-loaded to an
+ * external hardware accelerator
+ */
+#define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
/**< Utilises CPU SIMD AVX512 instructions */
-#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
-/**< Scatter-gather mbufs are supported */
-#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 10)
+#define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
+/**< In-place Scatter-gather (SGL) buffers, with multiple segments,
+ * are supported
+ */
+#define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
+/**< Out-of-place Scatter-gather (SGL) buffers are
+ * supported in input and output
+ */
+#define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in input, combined with linear buffers (LB), with a
+ * single segment in output
+ */
+#define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
+/**< Out-of-place Scatter-gather (SGL) buffers are supported
+ * in output, combined with linear buffers (LB) in input
+ */
+#define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
+/**< Out-of-place linear buffers (LB) are supported in input and output */
+#define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
/**< Utilises CPU NEON instructions */
-#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 11)
+#define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
/**< Utilises ARM CPU Cryptographic Extensions */
-#define RTE_CRYPTODEV_FF_SECURITY (1ULL << 12)
+#define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
/**< Support Security Protocol Processing */
@@ -369,11 +454,12 @@ rte_cryptodev_get_feature_name(uint64_t flag);
/** Crypto device information */
struct rte_cryptodev_info {
- const char *driver_name; /**< Driver name. */
- uint8_t driver_id; /**< Driver identifier */
- struct rte_pci_device *pci_dev; /**< PCI information. */
+ const char *driver_name; /**< Driver name. */
+ uint8_t driver_id; /**< Driver identifier */
+ struct rte_device *device; /**< Generic device information. */
- uint64_t feature_flags; /**< Feature flags */
+ uint64_t feature_flags;
+ /**< Feature flags exposes HW/SW features for the given device */
const struct rte_cryptodev_capabilities *capabilities;
/**< Array of devices supported capabilities */
@@ -381,12 +467,17 @@ struct rte_cryptodev_info {
unsigned max_nb_queue_pairs;
/**< Maximum number of queues pairs supported by device. */
+ uint16_t min_mbuf_headroom_req;
+ /**< Minimum mbuf headroom required by device */
+
+ uint16_t min_mbuf_tailroom_req;
+ /**< Minimum mbuf tailroom required by device */
+
struct {
unsigned max_nb_sessions;
- /**< Maximum number of sessions supported by device. */
- unsigned int max_nb_sessions_per_qp;
- /**< Maximum number of sessions per queue pair.
- * Default 0 for infinite sessions
+ /**< Maximum number of sessions supported by device.
+ * If 0, the device does not have any limitation in
+ * number of sessions that can be used.
*/
} sym;
};
@@ -602,39 +693,6 @@ rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
struct rte_mempool *session_pool);
/**
- * Start a specified queue pair of a device. It is used
- * when deferred_start flag of the specified queue is true.
- *
- * @param dev_id The identifier of the device
- * @param queue_pair_id The index of the queue pair to start. The value
- * must be in the range [0, nb_queue_pair - 1]
- * previously supplied to
- * rte_crypto_dev_configure().
- * @return
- * - 0: Success, the transmit queue is correctly set up.
- * - -EINVAL: The dev_id or the queue_id out of range.
- * - -ENOTSUP: The function not supported in PMD driver.
- */
-extern int
-rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id);
-
-/**
- * Stop specified queue pair of a device
- *
- * @param dev_id The identifier of the device
- * @param queue_pair_id The index of the queue pair to stop. The value
- * must be in the range [0, nb_queue_pair - 1]
- * previously supplied to
- * rte_cryptodev_configure().
- * @return
- * - 0: Success, the transmit queue is correctly set up.
- * - -EINVAL: The dev_id or the queue_id out of range.
- * - -ENOTSUP: The function not supported in PMD driver.
- */
-extern int
-rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id);
-
-/**
* Get the number of queue pairs on a specific crypto device
*
* @param dev_id Crypto device identifier.
@@ -749,7 +807,7 @@ struct rte_cryptodev {
struct rte_cryptodev_ops *dev_ops;
/**< Functions exported by PMD */
uint64_t feature_flags;
- /**< Supported features */
+ /**< Feature flags exposes HW/SW features for the given device */
struct rte_device *device;
/**< Backing device */
@@ -897,9 +955,14 @@ rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
*/
struct rte_cryptodev_sym_session {
__extension__ void *sess_private_data[0];
- /**< Private session material */
+ /**< Private symmetric session material */
};
+/** Cryptodev asymmetric crypto session */
+struct rte_cryptodev_asym_session {
+ __extension__ void *sess_private_data[0];
+ /**< Private asymmetric session material */
+};
/**
* Create symmetric crypto session header (generic with no private data)
@@ -914,6 +977,18 @@ struct rte_cryptodev_sym_session *
rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
/**
+ * Create asymmetric crypto session header (generic with no private data)
+ *
+ * @param mempool mempool to allocate asymmetric session
+ * objects from
+ * @return
+ * - On success return pointer to asym-session
+ * - On failure returns NULL
+ */
+struct rte_cryptodev_asym_session * __rte_experimental
+rte_cryptodev_asym_session_create(struct rte_mempool *mempool);
+
+/**
* Frees symmetric crypto session header, after checking that all
* the device private data has been freed, returning it
* to its original mempool.
@@ -929,6 +1004,21 @@ int
rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
/**
+ * Frees asymmetric crypto session header, after checking that all
+ * the device private data has been freed, returning it
+ * to its original mempool.
+ *
+ * @param sess Session header to be freed.
+ *
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if session is NULL.
+ * - -EBUSY if not all device private data has been freed.
+ */
+int __rte_experimental
+rte_cryptodev_asym_session_free(struct rte_cryptodev_asym_session *sess);
+
+/**
* Fill out private data for the device id, based on its device type.
*
* @param dev_id ID of device that we want the session to be used on
@@ -940,7 +1030,8 @@ rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
* @return
* - On success, zero.
* - -EINVAL if input parameters are invalid.
- * - -ENOTSUP if crypto device does not support the crypto transform.
+ * - -ENOTSUP if crypto device does not support the crypto transform or
+ * does not support symmetric operations.
* - -ENOMEM if the private session could not be allocated.
*/
int
@@ -950,8 +1041,31 @@ rte_cryptodev_sym_session_init(uint8_t dev_id,
struct rte_mempool *mempool);
/**
+ * Initialize asymmetric session on a device with specific asymmetric xform
+ *
+ * @param dev_id ID of device that we want the session to be used on
+ * @param sess Session to be set up on a device
+ * @param xforms Asymmetric crypto transform operations to apply on flow
+ * processed with this session
+ * @param mempool Mempool to be used for internal allocation.
+ *
+ * @return
+ * - On success, zero.
+ * - -EINVAL if input parameters are invalid.
+ * - -ENOTSUP if crypto device does not support the crypto transform.
+ * - -ENOMEM if the private session could not be allocated.
+ */
+int __rte_experimental
+rte_cryptodev_asym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_crypto_asym_xform *xforms,
+ struct rte_mempool *mempool);
+
+/**
* Frees private data for the device id, based on its device type,
- * returning it to its mempool.
+ * returning it to its mempool. It is the application's responsibility
+ * to ensure that private session data is not cleared while there are
+ * still in-flight operations using it.
*
* @param dev_id ID of device that uses the session.
* @param sess Session containing the reference to the private data
@@ -959,63 +1073,70 @@ rte_cryptodev_sym_session_init(uint8_t dev_id,
* @return
* - 0 if successful.
* - -EINVAL if device is invalid or session is NULL.
+ * - -ENOTSUP if crypto device does not support symmetric operations.
*/
int
rte_cryptodev_sym_session_clear(uint8_t dev_id,
struct rte_cryptodev_sym_session *sess);
/**
+ * Frees resources held by asymmetric session during rte_cryptodev_session_init
+ *
+ * @param dev_id ID of device that uses the asymmetric session.
+ * @param sess Asymmetric session setup on device using
+ * rte_cryptodev_session_init
+ * @return
+ * - 0 if successful.
+ * - -EINVAL if device is invalid or session is NULL.
+ */
+int __rte_experimental
+rte_cryptodev_asym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_asym_session *sess);
+
+/**
* Get the size of the header session, for all registered drivers.
*
* @return
- * Size of the header session.
+ * Size of the symmetric eader session.
*/
unsigned int
-rte_cryptodev_get_header_session_size(void);
+rte_cryptodev_sym_get_header_session_size(void);
/**
- * Get the size of the private session data for a device.
- *
- * @param dev_id The device identifier.
+ * Get the size of the asymmetric session header, for all registered drivers.
*
* @return
- * - Size of the private data, if successful
- * - 0 if device is invalid or does not have private session
+ * Size of the asymmetric header session.
*/
-unsigned int
-rte_cryptodev_get_private_session_size(uint8_t dev_id);
+unsigned int __rte_experimental
+rte_cryptodev_asym_get_header_session_size(void);
/**
- * Attach queue pair with sym session.
+ * Get the size of the private symmetric session data
+ * for a device.
*
- * @param dev_id Device to which the session will be attached.
- * @param qp_id Queue pair to which the session will be attached.
- * @param session Session pointer previously allocated by
- * *rte_cryptodev_sym_session_create*.
+ * @param dev_id The device identifier.
*
* @return
- * - On success, zero.
- * - On failure, a negative value.
+ * - Size of the private data, if successful
+ * - 0 if device is invalid or does not have private
+ * symmetric session
*/
-int
-rte_cryptodev_queue_pair_attach_sym_session(uint8_t dev_id, uint16_t qp_id,
- struct rte_cryptodev_sym_session *session);
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
/**
- * Detach queue pair with sym session.
+ * Get the size of the private data for asymmetric session
+ * on device
*
- * @param dev_id Device to which the session is attached.
- * @param qp_id Queue pair to which the session is attached.
- * @param session Session pointer previously allocated by
- * *rte_cryptodev_sym_session_create*.
+ * @param dev_id The device identifier.
*
* @return
- * - On success, zero.
- * - On failure, a negative value.
+ * - Size of the asymmetric private data, if successful
+ * - 0 if device is invalid or does not have private session
*/
-int
-rte_cryptodev_queue_pair_detach_sym_session(uint8_t dev_id, uint16_t qp_id,
- struct rte_cryptodev_sym_session *session);
+unsigned int __rte_experimental
+rte_cryptodev_asym_get_private_session_size(uint8_t dev_id);
/**
* Provide driver identifier.
@@ -1037,6 +1158,38 @@ int rte_cryptodev_driver_id_get(const char *name);
*/
const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
+/**
+ * Store user data in a session.
+ *
+ * @param sess Session pointer allocated by
+ * *rte_cryptodev_sym_session_create*.
+ * @param data Pointer to the user data.
+ * @param size Size of the user data.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int __rte_experimental
+rte_cryptodev_sym_session_set_user_data(
+ struct rte_cryptodev_sym_session *sess,
+ void *data,
+ uint16_t size);
+
+/**
+ * Get user data stored in a session.
+ *
+ * @param sess Session pointer allocated by
+ * *rte_cryptodev_sym_session_create*.
+ *
+ * @return
+ * - On success return pointer to user data.
+ * - On failure returns NULL.
+ */
+void * __rte_experimental
+rte_cryptodev_sym_session_get_user_data(
+ struct rte_cryptodev_sym_session *sess);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.c b/lib/librte_cryptodev/rte_cryptodev_pmd.c
index f2aac24b..2088ac3f 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.c
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.c
@@ -66,13 +66,6 @@ rte_cryptodev_pmd_parse_input_args(
goto free_kvlist;
ret = rte_kvargs_process(kvlist,
- RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG,
- &rte_cryptodev_pmd_parse_uint_arg,
- &params->max_nb_sessions);
- if (ret < 0)
- goto free_kvlist;
-
- ret = rte_kvargs_process(kvlist,
RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
&rte_cryptodev_pmd_parse_uint_arg,
&params->socket_id);
@@ -109,10 +102,9 @@ rte_cryptodev_pmd_create(const char *name,
device->driver->name, name);
CDEV_LOG_INFO("[%s] - Initialisation parameters - name: %s,"
- "socket id: %d, max queue pairs: %u, max sessions: %u",
+ "socket id: %d, max queue pairs: %u",
device->driver->name, name,
- params->socket_id, params->max_nb_queue_pairs,
- params->max_nb_sessions);
+ params->socket_id, params->max_nb_queue_pairs);
/* allocate device structure */
cryptodev = rte_cryptodev_pmd_allocate(name, params->socket_id);
diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
index 089848e0..6ff49d64 100644
--- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
+++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
@@ -1,32 +1,5 @@
-/*-
- *
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation.
*/
#ifndef _RTE_CRYPTODEV_PMD_H_
@@ -59,18 +32,15 @@ extern "C" {
#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS 8
-#define RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS 2048
#define RTE_CRYPTODEV_PMD_NAME_ARG ("name")
#define RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG ("max_nb_queue_pairs")
-#define RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
#define RTE_CRYPTODEV_PMD_SOCKET_ID_ARG ("socket_id")
static const char * const cryptodev_pmd_valid_params[] = {
RTE_CRYPTODEV_PMD_NAME_ARG,
RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
- RTE_CRYPTODEV_PMD_MAX_NB_SESS_ARG,
RTE_CRYPTODEV_PMD_SOCKET_ID_ARG
};
@@ -83,7 +53,6 @@ struct rte_cryptodev_pmd_init_params {
size_t private_data_size;
int socket_id;
unsigned int max_nb_queue_pairs;
- unsigned int max_nb_sessions;
};
/** Global structure used for maintaining state of allocated crypto devices */
@@ -216,28 +185,6 @@ typedef void (*cryptodev_info_get_t)(struct rte_cryptodev *dev,
struct rte_cryptodev_info *dev_info);
/**
- * Start queue pair of a device.
- *
- * @param dev Crypto device pointer
- * @param qp_id Queue Pair Index
- *
- * @return Returns 0 on success.
- */
-typedef int (*cryptodev_queue_pair_start_t)(struct rte_cryptodev *dev,
- uint16_t qp_id);
-
-/**
- * Stop queue pair of a device.
- *
- * @param dev Crypto device pointer
- * @param qp_id Queue Pair Index
- *
- * @return Returns 0 on success.
- */
-typedef int (*cryptodev_queue_pair_stop_t)(struct rte_cryptodev *dev,
- uint16_t qp_id);
-
-/**
* Setup a queue pair for a device.
*
* @param dev Crypto device pointer
@@ -302,6 +249,17 @@ typedef int (*cryptodev_sym_create_session_pool_t)(
*/
typedef unsigned (*cryptodev_sym_get_session_private_size_t)(
struct rte_cryptodev *dev);
+/**
+ * Get the size of a asymmetric cryptodev session
+ *
+ * @param dev Crypto device pointer
+ *
+ * @return
+ * - On success returns the size of the session structure for device
+ * - On failure returns 0
+ */
+typedef unsigned int (*cryptodev_asym_get_session_private_size_t)(
+ struct rte_cryptodev *dev);
/**
* Configure a Crypto session on a device.
@@ -321,7 +279,24 @@ typedef int (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *session,
struct rte_mempool *mp);
-
+/**
+ * Configure a Crypto asymmetric session on a device.
+ *
+ * @param dev Crypto device pointer
+ * @param xform Single or chain of crypto xforms
+ * @param priv_sess Pointer to cryptodev's private session structure
+ * @param mp Mempool where the private session is allocated
+ *
+ * @return
+ * - Returns 0 if private session structure have been created successfully.
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if crypto device does not support the crypto transform.
+ * - Returns -ENOMEM if the private session could not be allocated.
+ */
+typedef int (*cryptodev_asym_configure_session_t)(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *session,
+ struct rte_mempool *mp);
/**
* Free driver private session data.
*
@@ -330,32 +305,14 @@ typedef int (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev,
*/
typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess);
-
-/**
- * Optional API for drivers to attach sessions with queue pair.
- * @param dev Crypto device pointer
- * @param qp_id queue pair id for attaching session
- * @param priv_sess Pointer to cryptodev's private session structure
- * @return
- * - Return 0 on success
- */
-typedef int (*cryptodev_sym_queue_pair_attach_session_t)(
- struct rte_cryptodev *dev,
- uint16_t qp_id,
- void *session_private);
-
/**
- * Optional API for drivers to detach sessions from queue pair.
+ * Free asymmetric session private data.
+ *
* @param dev Crypto device pointer
- * @param qp_id queue pair id for detaching session
- * @param priv_sess Pointer to cryptodev's private session structure
- * @return
- * - Return 0 on success
+ * @param sess Cryptodev session structure
*/
-typedef int (*cryptodev_sym_queue_pair_detach_session_t)(
- struct rte_cryptodev *dev,
- uint16_t qp_id,
- void *session_private);
+typedef void (*cryptodev_asym_free_session_t)(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess);
/** Crypto device operations function pointer table */
struct rte_cryptodev_ops {
@@ -375,23 +332,21 @@ struct rte_cryptodev_ops {
/**< Set up a device queue pair. */
cryptodev_queue_pair_release_t queue_pair_release;
/**< Release a queue pair. */
- cryptodev_queue_pair_start_t queue_pair_start;
- /**< Start a queue pair. */
- cryptodev_queue_pair_stop_t queue_pair_stop;
- /**< Stop a queue pair. */
cryptodev_queue_pair_count_t queue_pair_count;
/**< Get count of the queue pairs. */
- cryptodev_sym_get_session_private_size_t session_get_size;
+ cryptodev_sym_get_session_private_size_t sym_session_get_size;
/**< Return private session. */
- cryptodev_sym_configure_session_t session_configure;
+ cryptodev_asym_get_session_private_size_t asym_session_get_size;
+ /**< Return asym session private size. */
+ cryptodev_sym_configure_session_t sym_session_configure;
/**< Configure a Crypto session. */
- cryptodev_sym_free_session_t session_clear;
+ cryptodev_asym_configure_session_t asym_session_configure;
+ /**< Configure asymmetric Crypto session. */
+ cryptodev_sym_free_session_t sym_session_clear;
+ /**< Clear a Crypto sessions private data. */
+ cryptodev_asym_free_session_t asym_session_clear;
/**< Clear a Crypto sessions private data. */
- cryptodev_sym_queue_pair_attach_session_t qp_attach_session;
- /**< Attach session to queue pair. */
- cryptodev_sym_queue_pair_detach_session_t qp_detach_session;
- /**< Detach session from queue pair. */
};
@@ -516,20 +471,32 @@ uint8_t rte_cryptodev_allocate_driver(struct cryptodev_driver *crypto_drv,
#define RTE_PMD_REGISTER_CRYPTO_DRIVER(crypto_drv, drv, driver_id)\
-RTE_INIT(init_ ##driver_id);\
-static void init_ ##driver_id(void)\
+RTE_INIT(init_ ##driver_id)\
{\
- driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv).driver);\
+ driver_id = rte_cryptodev_allocate_driver(&crypto_drv, &(drv));\
+}
+
+static inline void *
+get_sym_session_private_data(const struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id) {
+ return sess->sess_private_data[driver_id];
+}
+
+static inline void
+set_sym_session_private_data(struct rte_cryptodev_sym_session *sess,
+ uint8_t driver_id, void *private_data)
+{
+ sess->sess_private_data[driver_id] = private_data;
}
static inline void *
-get_session_private_data(const struct rte_cryptodev_sym_session *sess,
+get_asym_session_private_data(const struct rte_cryptodev_asym_session *sess,
uint8_t driver_id) {
return sess->sess_private_data[driver_id];
}
static inline void
-set_session_private_data(struct rte_cryptodev_sym_session *sess,
+set_asym_session_private_data(struct rte_cryptodev_asym_session *sess,
uint8_t driver_id, void *private_data)
{
sess->sess_private_data[driver_id] = private_data;
diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
index eb47308b..7ca00735 100644
--- a/lib/librte_cryptodev/rte_cryptodev_version.map
+++ b/lib/librte_cryptodev/rte_cryptodev_version.map
@@ -22,8 +22,6 @@ DPDK_16.04 {
rte_cryptodev_stop;
rte_cryptodev_queue_pair_count;
rte_cryptodev_queue_pair_setup;
- rte_cryptodev_queue_pair_start;
- rte_cryptodev_queue_pair_stop;
rte_crypto_op_pool_create;
local: *;
@@ -52,8 +50,6 @@ DPDK_17.05 {
rte_cryptodev_get_auth_algo_enum;
rte_cryptodev_get_cipher_algo_enum;
- rte_cryptodev_queue_pair_attach_sym_session;
- rte_cryptodev_queue_pair_detach_sym_session;
} DPDK_17.02;
@@ -65,8 +61,6 @@ DPDK_17.08 {
rte_cryptodev_driver_id_get;
rte_cryptodev_driver_name_get;
rte_cryptodev_get_aead_algo_enum;
- rte_cryptodev_get_header_session_size;
- rte_cryptodev_get_private_session_size;
rte_cryptodev_sym_capability_check_aead;
rte_cryptodev_sym_session_init;
rte_cryptodev_sym_session_clear;
@@ -85,3 +79,30 @@ DPDK_17.11 {
rte_cryptodev_pmd_parse_input_args;
} DPDK_17.08;
+
+DPDK_18.05 {
+ global:
+
+ rte_cryptodev_sym_get_header_session_size;
+ rte_cryptodev_sym_get_private_session_size;
+
+} DPDK_17.11;
+
+EXPERIMENTAL {
+ global:
+
+ rte_cryptodev_asym_capability_get;
+ rte_cryptodev_asym_get_header_session_size;
+ rte_cryptodev_asym_get_private_session_size;
+ rte_cryptodev_asym_get_xform_enum;
+ rte_cryptodev_asym_session_clear;
+ rte_cryptodev_asym_session_create;
+ rte_cryptodev_asym_session_free;
+ rte_cryptodev_asym_session_init;
+ rte_cryptodev_asym_xform_capability_check_modlen;
+ rte_cryptodev_asym_xform_capability_check_optype;
+ rte_cryptodev_sym_session_get_user_data;
+ rte_cryptodev_sym_session_set_user_data;
+ rte_crypto_asym_op_strings;
+ rte_crypto_asym_xform_strings;
+};
diff --git a/lib/librte_eal/bsdapp/Makefile b/lib/librte_eal/bsdapp/Makefile
index 9d8e2477..5b06b216 100644
--- a/lib/librte_eal/bsdapp/Makefile
+++ b/lib/librte_eal/bsdapp/Makefile
@@ -4,7 +4,5 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal
-DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += contigmem
-DIRS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += nic_uio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/bsdapp/eal/Makefile b/lib/librte_eal/bsdapp/eal/Makefile
index dd455e67..d27da3d1 100644
--- a/lib/librte_eal/bsdapp/eal/Makefile
+++ b/lib/librte_eal/bsdapp/eal/Makefile
@@ -18,21 +18,25 @@ CFLAGS += $(WERROR_FLAGS) -O3
LDLIBS += -lexecinfo
LDLIBS += -lpthread
LDLIBS += -lgcc_s
+LDLIBS += -lrte_kvargs
EXPORT_MAP := ../../rte_eal_version.map
-LIBABIVER := 6
+LIBABIVER := 8
# specific to bsdapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) := eal.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_cpuflags.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_hugepage_info.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_debug.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_interrupts.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_alarm.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_dev.c
# from common dir
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_lcore.c
@@ -40,6 +44,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_tailqs.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_errno.c
@@ -48,14 +53,18 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_hypervisor.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_string_fns.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_hexdump.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_class.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_bus.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_dev.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_options.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_proc.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_fbarray.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += eal_common_uuid.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_malloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_elem.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_heap.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += malloc_mp.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_keepalive.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_service.c
SRCS-$(CONFIG_RTE_EXEC_ENV_BSDAPP) += rte_reciprocal.c
diff --git a/lib/librte_eal/bsdapp/eal/eal.c b/lib/librte_eal/bsdapp/eal/eal.c
index 4eafcb5a..d7ae9d68 100644
--- a/lib/librte_eal/bsdapp/eal/eal.c
+++ b/lib/librte_eal/bsdapp/eal/eal.c
@@ -18,6 +18,7 @@
#include <limits.h>
#include <sys/mman.h>
#include <sys/queue.h>
+#include <sys/stat.h>
#include <rte_compat.h>
#include <rte_common.h>
@@ -40,6 +41,7 @@
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_version.h>
+#include <rte_vfio.h>
#include <rte_atomic.h>
#include <malloc_heap.h>
@@ -64,8 +66,8 @@ static int mem_cfg_fd = -1;
static struct flock wr_lock = {
.l_type = F_WRLCK,
.l_whence = SEEK_SET,
- .l_start = offsetof(struct rte_mem_config, memseg),
- .l_len = sizeof(early_mem_config.memseg),
+ .l_start = offsetof(struct rte_mem_config, memsegs),
+ .l_len = sizeof(early_mem_config.memsegs),
};
/* Address of global and public configuration */
@@ -82,20 +84,72 @@ struct internal_config internal_config;
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
-/* Return user provided mbuf pool ops name */
-const char * __rte_experimental
-rte_eal_mbuf_user_pool_ops(void)
-{
- return internal_config.user_mbuf_pool_ops_name;
+/* platform-specific runtime dir */
+static char runtime_dir[PATH_MAX];
+
+static const char *default_runtime_dir = "/var/run";
+
+int
+eal_create_runtime_dir(void)
+{
+ const char *directory = default_runtime_dir;
+ const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
+ const char *fallback = "/tmp";
+ char tmp[PATH_MAX];
+ int ret;
+
+ if (getuid() != 0) {
+ /* try XDG path first, fall back to /tmp */
+ if (xdg_runtime_dir != NULL)
+ directory = xdg_runtime_dir;
+ else
+ directory = fallback;
+ }
+ /* create DPDK subdirectory under runtime dir */
+ ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory);
+ if (ret < 0 || ret == sizeof(tmp)) {
+ RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
+ return -1;
+ }
+
+ /* create prefix-specific subdirectory under DPDK runtime dir */
+ ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+ tmp, internal_config.hugefile_prefix);
+ if (ret < 0 || ret == sizeof(runtime_dir)) {
+ RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
+ return -1;
+ }
+
+ /* create the path if it doesn't exist. no "mkdir -p" here, so do it
+ * step by step.
+ */
+ ret = mkdir(tmp, 0700);
+ if (ret < 0 && errno != EEXIST) {
+ RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+ tmp, strerror(errno));
+ return -1;
+ }
+
+ ret = mkdir(runtime_dir, 0700);
+ if (ret < 0 && errno != EEXIST) {
+ RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+ runtime_dir, strerror(errno));
+ return -1;
+ }
+
+ return 0;
}
-/* Return mbuf pool ops name */
const char *
-rte_eal_mbuf_default_mempool_ops(void)
+eal_get_runtime_dir(void)
{
- if (internal_config.user_mbuf_pool_ops_name == NULL)
- return RTE_MBUF_DEFAULT_MEMPOOL_OPS;
+ return runtime_dir;
+}
+/* Return user provided mbuf pool ops name */
+const char *
+rte_eal_mbuf_user_pool_ops(void)
+{
return internal_config.user_mbuf_pool_ops_name;
}
@@ -222,12 +276,17 @@ eal_proc_type_detect(void)
enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
const char *pathname = eal_runtime_config_path();
- /* if we can open the file but not get a write-lock we are a secondary
- * process. NOTE: if we get a file handle back, we keep that open
- * and don't close it to prevent a race condition between multiple opens */
- if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
- (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
- ptype = RTE_PROC_SECONDARY;
+ /* if there no shared config, there can be no secondary processes */
+ if (!internal_config.no_shconf) {
+ /* if we can open the file but not get a write-lock we are a
+ * secondary process. NOTE: if we get a file handle back, we
+ * keep that open and don't close it to prevent a race condition
+ * between multiple opens.
+ */
+ if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
+ (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
+ ptype = RTE_PROC_SECONDARY;
+ }
RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
@@ -289,7 +348,7 @@ eal_get_hugepage_mem_size(void)
for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
struct hugepage_info *hpi = &internal_config.hugepage_info[i];
- if (hpi->hugedir != NULL) {
+ if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
size += hpi->hugepage_sz * hpi->num_pages[j];
}
@@ -379,7 +438,8 @@ eal_parse_args(int argc, char **argv)
switch (opt) {
case OPT_MBUF_POOL_OPS_NAME_NUM:
- internal_config.user_mbuf_pool_ops_name = optarg;
+ internal_config.user_mbuf_pool_ops_name =
+ strdup(optarg);
break;
case 'h':
eal_usage(prgname);
@@ -403,6 +463,14 @@ eal_parse_args(int argc, char **argv)
}
}
+ /* create runtime data directory */
+ if (internal_config.no_shconf == 0 &&
+ eal_create_runtime_dir() < 0) {
+ RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
+ ret = -1;
+ goto out;
+ }
+
if (eal_adjust_config(&internal_config) != 0) {
ret = -1;
goto out;
@@ -429,25 +497,29 @@ out:
return ret;
}
+static int
+check_socket(const struct rte_memseg_list *msl, void *arg)
+{
+ int *socket_id = arg;
+
+ if (msl->socket_id == *socket_id && msl->memseg_arr.count != 0)
+ return 1;
+
+ return 0;
+}
+
static void
eal_check_mem_on_local_socket(void)
{
- const struct rte_memseg *ms;
- int i, socket_id;
+ int socket_id;
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG; i++)
- if (ms[i].socket_id == socket_id &&
- ms[i].len > 0)
- return;
-
- RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
- "memory on local socket!\n");
+ if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
+ RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
}
+
static int
sync_func(__attribute__((unused)) void *arg)
{
@@ -531,6 +603,9 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ /* FreeBSD always uses legacy memory model */
+ internal_config.legacy_mem = true;
+
if (eal_plugins_init() < 0) {
rte_eal_init_alert("Cannot init plugins\n");
rte_errno = EINVAL;
@@ -544,6 +619,24 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ rte_config_init();
+
+ if (rte_eal_intr_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ return -1;
+ }
+
+ /* Put mp channel init before bus scan so that we can init the vdev
+ * bus through mp channel in the secondary process before the bus scan.
+ */
+ if (rte_mp_channel_init() < 0) {
+ rte_eal_init_alert("failed to init mp channel\n");
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_errno = EFAULT;
+ return -1;
+ }
+ }
+
if (rte_bus_scan()) {
rte_eal_init_alert("Cannot scan the buses for devices\n");
rte_errno = ENODEV;
@@ -554,13 +647,17 @@ rte_eal_init(int argc, char **argv)
/* autodetect the iova mapping mode (default is iova_pa) */
rte_eal_get_configuration()->iova_mode = rte_bus_get_iommu_class();
- if (internal_config.no_hugetlbfs == 0 &&
- internal_config.process_type != RTE_PROC_SECONDARY &&
- eal_hugepage_info_init() < 0) {
- rte_eal_init_alert("Cannot get hugepage information.");
- rte_errno = EACCES;
- rte_atomic32_clear(&run_once);
- return -1;
+ if (internal_config.no_hugetlbfs == 0) {
+ /* rte_config isn't initialized yet */
+ ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+ eal_hugepage_info_init() :
+ eal_hugepage_info_read();
+ if (ret < 0) {
+ rte_eal_init_alert("Cannot get hugepage information.");
+ rte_errno = EACCES;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
}
if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
@@ -583,14 +680,14 @@ rte_eal_init(int argc, char **argv)
rte_srand(rte_rdtsc());
- rte_config_init();
-
- if (rte_mp_channel_init() < 0) {
- rte_eal_init_alert("failed to init mp channel\n");
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- rte_errno = EFAULT;
- return -1;
- }
+ /* in secondary processes, memory init may allocate additional fbarrays
+ * not present in primary processes, so to avoid any potential issues,
+ * initialize memzones first.
+ */
+ if (rte_eal_memzone_init() < 0) {
+ rte_eal_init_alert("Cannot init memzone\n");
+ rte_errno = ENODEV;
+ return -1;
}
if (rte_eal_memory_init() < 0) {
@@ -599,8 +696,8 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (rte_eal_memzone_init() < 0) {
- rte_eal_init_alert("Cannot init memzone\n");
+ if (rte_eal_malloc_heap_init() < 0) {
+ rte_eal_init_alert("Cannot init malloc heap\n");
rte_errno = ENODEV;
return -1;
}
@@ -617,11 +714,6 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (rte_eal_intr_init() < 0) {
- rte_eal_init_alert("Cannot init interrupt-handling thread\n");
- return -1;
- }
-
if (rte_eal_timer_init() < 0) {
rte_eal_init_alert("Cannot init HPET or TSC timers\n");
rte_errno = ENOTSUP;
@@ -632,7 +724,7 @@ rte_eal_init(int argc, char **argv)
eal_thread_init_master(rte_config.master_lcore);
- ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+ ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
rte_config.master_lcore, thread_id, cpuset,
@@ -658,7 +750,7 @@ rte_eal_init(int argc, char **argv)
rte_panic("Cannot create thread\n");
/* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ snprintf(thread_name, sizeof(thread_name),
"lcore-slave-%d", i);
rte_thread_setname(lcore_config[i].thread_id, thread_name);
}
@@ -735,18 +827,6 @@ rte_eal_vfio_intr_mode(void)
return RTE_INTR_MODE_NONE;
}
-/* dummy forward declaration. */
-struct vfio_device_info;
-
-/* dummy prototypes. */
-int rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
- int *vfio_dev_fd, struct vfio_device_info *device_info);
-int rte_vfio_release_device(const char *sysfs_base, const char *dev_addr, int fd);
-int rte_vfio_enable(const char *modname);
-int rte_vfio_is_enabled(const char *modname);
-int rte_vfio_noiommu_is_enabled(void);
-int rte_vfio_clear_group(int vfio_group_fd);
-
int rte_vfio_setup_device(__rte_unused const char *sysfs_base,
__rte_unused const char *dev_addr,
__rte_unused int *vfio_dev_fd,
@@ -781,3 +861,81 @@ int rte_vfio_clear_group(__rte_unused int vfio_group_fd)
{
return 0;
}
+
+int
+rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr,
+ __rte_unused int *iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_container_fd(void)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_create(void)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_destroy(__rte_unused int container_fd)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_group_bind(__rte_unused int container_fd,
+ __rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_group_unbind(__rte_unused int container_fd,
+ __rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_dma_map(__rte_unused int container_fd,
+ __rte_unused uint64_t vaddr,
+ __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_dma_unmap(__rte_unused int container_fd,
+ __rte_unused uint64_t vaddr,
+ __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_alarm.c b/lib/librte_eal/bsdapp/eal/eal_alarm.c
index eb3913c9..51ea4b8c 100644
--- a/lib/librte_eal/bsdapp/eal/eal_alarm.c
+++ b/lib/librte_eal/bsdapp/eal/eal_alarm.c
@@ -1,31 +1,314 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
+#include <time.h>
#include <errno.h>
#include <rte_alarm.h>
+#include <rte_cycles.h>
#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_interrupts.h>
+#include <rte_spinlock.h>
+
#include "eal_private.h"
+#include "eal_alarm_private.h"
+
+#define NS_PER_US 1000
+
+#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
+#define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
+#else
+#define CLOCK_TYPE_ID CLOCK_MONOTONIC
+#endif
+
+struct alarm_entry {
+ LIST_ENTRY(alarm_entry) next;
+ struct rte_intr_handle handle;
+ struct timespec time;
+ rte_eal_alarm_callback cb_fn;
+ void *cb_arg;
+ volatile uint8_t executing;
+ volatile pthread_t executing_id;
+};
+
+static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
+static rte_spinlock_t alarm_list_lk = RTE_SPINLOCK_INITIALIZER;
+
+static struct rte_intr_handle intr_handle = {.fd = -1 };
+static void eal_alarm_callback(void *arg);
int
rte_eal_alarm_init(void)
{
+ intr_handle.type = RTE_INTR_HANDLE_ALARM;
+
+ /* on FreeBSD, timers don't use fd's, and their identifiers are stored
+ * in separate namespace from fd's, so using any value is OK. however,
+ * EAL interrupts handler expects fd's to be unique, so use an actual fd
+ * to guarantee unique timer identifier.
+ */
+ intr_handle.fd = open("/dev/zero", O_RDONLY);
+
+ return 0;
+}
+
+static inline int
+timespec_cmp(const struct timespec *now, const struct timespec *at)
+{
+ if (now->tv_sec < at->tv_sec)
+ return -1;
+ if (now->tv_sec > at->tv_sec)
+ return 1;
+ if (now->tv_nsec < at->tv_nsec)
+ return -1;
+ if (now->tv_nsec > at->tv_nsec)
+ return 1;
return 0;
}
+static inline uint64_t
+diff_ns(struct timespec *now, struct timespec *at)
+{
+ uint64_t now_ns, at_ns;
+
+ if (timespec_cmp(now, at) >= 0)
+ return 0;
+
+ now_ns = now->tv_sec * NS_PER_S + now->tv_nsec;
+ at_ns = at->tv_sec * NS_PER_S + at->tv_nsec;
+
+ return at_ns - now_ns;
+}
int
-rte_eal_alarm_set(uint64_t us __rte_unused,
- rte_eal_alarm_callback cb_fn __rte_unused,
- void *cb_arg __rte_unused)
+eal_alarm_get_timeout_ns(uint64_t *val)
{
- return -ENOTSUP;
+ struct alarm_entry *ap;
+ struct timespec now;
+
+ if (clock_gettime(CLOCK_TYPE_ID, &now) < 0)
+ return -1;
+
+ if (LIST_EMPTY(&alarm_list))
+ return -1;
+
+ ap = LIST_FIRST(&alarm_list);
+
+ *val = diff_ns(&now, &ap->time);
+
+ return 0;
+}
+
+static int
+unregister_current_callback(void)
+{
+ struct alarm_entry *ap;
+ int ret = 0;
+
+ if (!LIST_EMPTY(&alarm_list)) {
+ ap = LIST_FIRST(&alarm_list);
+
+ do {
+ ret = rte_intr_callback_unregister(&intr_handle,
+ eal_alarm_callback, &ap->time);
+ } while (ret == -EAGAIN);
+ }
+
+ return ret;
}
+static int
+register_first_callback(void)
+{
+ struct alarm_entry *ap;
+ int ret = 0;
+
+ if (!LIST_EMPTY(&alarm_list)) {
+ ap = LIST_FIRST(&alarm_list);
+
+ /* register a new callback */
+ ret = rte_intr_callback_register(&intr_handle,
+ eal_alarm_callback, &ap->time);
+ }
+ return ret;
+}
+
+static void
+eal_alarm_callback(void *arg __rte_unused)
+{
+ struct timespec now;
+ struct alarm_entry *ap;
+
+ rte_spinlock_lock(&alarm_list_lk);
+ ap = LIST_FIRST(&alarm_list);
+
+ if (clock_gettime(CLOCK_TYPE_ID, &now) < 0)
+ return;
+
+ while (ap != NULL && timespec_cmp(&now, &ap->time) >= 0) {
+ ap->executing = 1;
+ ap->executing_id = pthread_self();
+ rte_spinlock_unlock(&alarm_list_lk);
+
+ ap->cb_fn(ap->cb_arg);
+
+ rte_spinlock_lock(&alarm_list_lk);
+
+ LIST_REMOVE(ap, next);
+ free(ap);
+
+ ap = LIST_FIRST(&alarm_list);
+ }
+
+ /* timer has been deleted from the kqueue, so recreate it if needed */
+ register_first_callback();
+
+ rte_spinlock_unlock(&alarm_list_lk);
+}
+
+
int
-rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn __rte_unused,
- void *cb_arg __rte_unused)
+rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
{
- return -ENOTSUP;
+ struct alarm_entry *ap, *new_alarm;
+ struct timespec now;
+ uint64_t ns;
+ int ret = 0;
+
+ /* check parameters, also ensure us won't cause a uint64_t overflow */
+ if (us < 1 || us > (UINT64_MAX - US_PER_S) || cb_fn == NULL)
+ return -EINVAL;
+
+ new_alarm = calloc(1, sizeof(*new_alarm));
+ if (new_alarm == NULL)
+ return -ENOMEM;
+
+ /* use current time to calculate absolute time of alarm */
+ clock_gettime(CLOCK_TYPE_ID, &now);
+
+ ns = us * NS_PER_US;
+
+ new_alarm->cb_fn = cb_fn;
+ new_alarm->cb_arg = cb_arg;
+ new_alarm->time.tv_nsec = (now.tv_nsec + ns) % NS_PER_S;
+ new_alarm->time.tv_sec = now.tv_sec + ((now.tv_nsec + ns) / NS_PER_S);
+
+ rte_spinlock_lock(&alarm_list_lk);
+
+ if (LIST_EMPTY(&alarm_list))
+ LIST_INSERT_HEAD(&alarm_list, new_alarm, next);
+ else {
+ LIST_FOREACH(ap, &alarm_list, next) {
+ if (timespec_cmp(&new_alarm->time, &ap->time) < 0) {
+ LIST_INSERT_BEFORE(ap, new_alarm, next);
+ break;
+ }
+ if (LIST_NEXT(ap, next) == NULL) {
+ LIST_INSERT_AFTER(ap, new_alarm, next);
+ break;
+ }
+ }
+ }
+
+ /* re-register first callback just in case */
+ register_first_callback();
+
+ rte_spinlock_unlock(&alarm_list_lk);
+
+ return ret;
+}
+
+int
+rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
+{
+ struct alarm_entry *ap, *ap_prev;
+ int count = 0;
+ int err = 0;
+ int executing;
+
+ if (!cb_fn) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ do {
+ executing = 0;
+ rte_spinlock_lock(&alarm_list_lk);
+ /* remove any matches at the start of the list */
+ while (1) {
+ ap = LIST_FIRST(&alarm_list);
+ if (ap == NULL)
+ break;
+ if (cb_fn != ap->cb_fn)
+ break;
+ if (cb_arg != ap->cb_arg && cb_arg != (void *) -1)
+ break;
+ if (ap->executing == 0) {
+ LIST_REMOVE(ap, next);
+ free(ap);
+ count++;
+ } else {
+ /* If calling from other context, mark that
+ * alarm is executing so loop can spin till it
+ * finish. Otherwise we are trying to cancel
+ * ourselves - mark it by EINPROGRESS.
+ */
+ if (pthread_equal(ap->executing_id,
+ pthread_self()) == 0)
+ executing++;
+ else
+ err = EINPROGRESS;
+
+ break;
+ }
+ }
+ ap_prev = ap;
+
+ /* now go through list, removing entries not at start */
+ LIST_FOREACH(ap, &alarm_list, next) {
+ /* this won't be true first time through */
+ if (cb_fn == ap->cb_fn &&
+ (cb_arg == (void *)-1 ||
+ cb_arg == ap->cb_arg)) {
+ if (ap->executing == 0) {
+ LIST_REMOVE(ap, next);
+ free(ap);
+ count++;
+ ap = ap_prev;
+ } else if (pthread_equal(ap->executing_id,
+ pthread_self()) == 0) {
+ executing++;
+ } else {
+ err = EINPROGRESS;
+ }
+ }
+ ap_prev = ap;
+ }
+ rte_spinlock_unlock(&alarm_list_lk);
+ } while (executing != 0);
+
+ if (count == 0 && err == 0)
+ rte_errno = ENOENT;
+ else if (err)
+ rte_errno = err;
+
+ rte_spinlock_lock(&alarm_list_lk);
+
+ /* unregister if no alarms left, otherwise re-register first */
+ if (LIST_EMPTY(&alarm_list))
+ unregister_current_callback();
+ else
+ register_first_callback();
+
+ rte_spinlock_unlock(&alarm_list_lk);
+
+ return count;
}
diff --git a/lib/librte_eal/bsdapp/eal/eal_alarm_private.h b/lib/librte_eal/bsdapp/eal/eal_alarm_private.h
new file mode 100644
index 00000000..65c71151
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_alarm_private.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef EAL_ALARM_PRIVATE_H
+#define EAL_ALARM_PRIVATE_H
+
+#include <inttypes.h>
+
+/*
+ * FreeBSD needs a back-channel communication mechanism between interrupt and
+ * alarm thread, because on FreeBSD, timer period is set up inside the interrupt
+ * API and not inside alarm API like on Linux.
+ */
+
+int
+eal_alarm_get_timeout_ns(uint64_t *val);
+
+#endif // EAL_ALARM_PRIVATE_H
diff --git a/lib/librte_eal/bsdapp/eal/eal_cpuflags.c b/lib/librte_eal/bsdapp/eal/eal_cpuflags.c
new file mode 100644
index 00000000..69b161ea
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_cpuflags.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
+
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+
+unsigned long
+rte_cpu_getauxval(unsigned long type __rte_unused)
+{
+ /* not implemented */
+ return 0;
+}
+
+int
+rte_cpu_strcmp_auxval(unsigned long type __rte_unused,
+ const char *str __rte_unused)
+{
+ /* not implemented */
+ return -1;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_dev.c b/lib/librte_eal/bsdapp/eal/eal_dev.c
new file mode 100644
index 00000000..1c6c51bd
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_dev.c
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_log.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+
+int __rte_experimental
+rte_dev_event_monitor_start(void)
+{
+ RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
+ return -1;
+}
+
+int __rte_experimental
+rte_dev_event_monitor_stop(void)
+{
+ RTE_LOG(ERR, EAL, "Device event is not supported for FreeBSD\n");
+ return -1;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c b/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
index be2dbf0e..1e8f5df2 100644
--- a/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/bsdapp/eal/eal_hugepage_info.c
@@ -19,10 +19,10 @@
* Used in this file to store the hugepage file map on disk
*/
static void *
-create_shared_memory(const char *filename, const size_t mem_size)
+map_shared_memory(const char *filename, const size_t mem_size, int flags)
{
void *retval;
- int fd = open(filename, O_CREAT | O_RDWR, 0666);
+ int fd = open(filename, flags, 0666);
if (fd < 0)
return NULL;
if (ftruncate(fd, mem_size) < 0) {
@@ -34,6 +34,18 @@ create_shared_memory(const char *filename, const size_t mem_size)
return retval;
}
+static void *
+open_shared_memory(const char *filename, const size_t mem_size)
+{
+ return map_shared_memory(filename, mem_size, O_RDWR);
+}
+
+static void *
+create_shared_memory(const char *filename, const size_t mem_size)
+{
+ return map_shared_memory(filename, mem_size, O_RDWR | O_CREAT);
+}
+
/*
* No hugepage support on freebsd, but we dummy it, using contigmem driver
*/
@@ -46,13 +58,16 @@ eal_hugepage_info_init(void)
/* re-use the linux "internal config" structure for our memory data */
struct hugepage_info *hpi = &internal_config.hugepage_info[0];
struct hugepage_info *tmp_hpi;
+ unsigned int i;
+
+ internal_config.num_hugepage_sizes = 1;
sysctl_size = sizeof(num_buffers);
error = sysctlbyname("hw.contigmem.num_buffers", &num_buffers,
&sysctl_size, NULL, 0);
if (error != 0) {
- RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.num_buffers");
+ RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.num_buffers\n");
return -1;
}
@@ -61,7 +76,7 @@ eal_hugepage_info_init(void)
&sysctl_size, NULL, 0);
if (error != 0) {
- RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.buffer_size");
+ RTE_LOG(ERR, EAL, "could not read sysctl hw.contigmem.buffer_size\n");
return -1;
}
@@ -81,25 +96,61 @@ eal_hugepage_info_init(void)
RTE_LOG(INFO, EAL, "Contigmem driver has %d buffers, each of size %dKB\n",
num_buffers, (int)(buffer_size>>10));
- internal_config.num_hugepage_sizes = 1;
- hpi->hugedir = CONTIGMEM_DEV;
+ strlcpy(hpi->hugedir, CONTIGMEM_DEV, sizeof(hpi->hugedir));
hpi->hugepage_sz = buffer_size;
hpi->num_pages[0] = num_buffers;
hpi->lock_descriptor = fd;
+ /* for no shared files mode, do not create shared memory config */
+ if (internal_config.no_shconf)
+ return 0;
+
tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
- sizeof(struct hugepage_info));
+ sizeof(internal_config.hugepage_info));
if (tmp_hpi == NULL ) {
RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
return -1;
}
- memcpy(tmp_hpi, hpi, sizeof(struct hugepage_info));
+ memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+
+ /* we've copied file descriptors along with everything else, but they
+ * will be invalid in secondary process, so overwrite them
+ */
+ for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ struct hugepage_info *tmp = &tmp_hpi[i];
+ tmp->lock_descriptor = -1;
+ }
- if ( munmap(tmp_hpi, sizeof(struct hugepage_info)) < 0) {
+ if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
return -1;
}
return 0;
}
+
+/* copy stuff from shared info into internal config */
+int
+eal_hugepage_info_read(void)
+{
+ struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct hugepage_info *tmp_hpi;
+
+ internal_config.num_hugepage_sizes = 1;
+
+ tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
+ sizeof(internal_config.hugepage_info));
+ if (tmp_hpi == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
+ return -1;
+ }
+
+ memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+
+ if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_interrupts.c b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
index 290d53ab..2feee2d5 100644
--- a/lib/librte_eal/bsdapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/bsdapp/eal/eal_interrupts.c
@@ -1,51 +1,479 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+#include <string.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/queue.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
#include <rte_common.h>
#include <rte_interrupts.h>
+
#include "eal_private.h"
+#include "eal_alarm_private.h"
+
+#define MAX_INTR_EVENTS 16
+
+/**
+ * union buffer for reading on different devices
+ */
+union rte_intr_read_buffer {
+ char charbuf[16]; /* for others */
+};
+
+TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
+TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
+
+struct rte_intr_callback {
+ TAILQ_ENTRY(rte_intr_callback) next;
+ rte_intr_callback_fn cb_fn; /**< callback address */
+ void *cb_arg; /**< parameter for callback */
+};
+
+struct rte_intr_source {
+ TAILQ_ENTRY(rte_intr_source) next;
+ struct rte_intr_handle intr_handle; /**< interrupt handle */
+ struct rte_intr_cb_list callbacks; /**< user callbacks */
+ uint32_t active;
+};
+
+/* global spinlock for interrupt data operation */
+static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* interrupt sources list */
+static struct rte_intr_source_list intr_sources;
+
+/* interrupt handling thread */
+static pthread_t intr_thread;
+
+static volatile int kq = -1;
+
+static int
+intr_source_to_kevent(const struct rte_intr_handle *ih, struct kevent *ke)
+{
+ /* alarm callbacks are special case */
+ if (ih->type == RTE_INTR_HANDLE_ALARM) {
+ uint64_t timeout_ns;
+
+ /* get soonest alarm timeout */
+ if (eal_alarm_get_timeout_ns(&timeout_ns) < 0)
+ return -1;
+
+ ke->filter = EVFILT_TIMER;
+ /* timers are one shot */
+ ke->flags |= EV_ONESHOT;
+ ke->fflags = NOTE_NSECONDS;
+ ke->data = timeout_ns;
+ } else {
+ ke->filter = EVFILT_READ;
+ }
+ ke->ident = ih->fd;
+
+ return 0;
+}
int
rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
- rte_intr_callback_fn cb,
- void *cb_arg)
+ rte_intr_callback_fn cb, void *cb_arg)
{
- RTE_SET_USED(intr_handle);
- RTE_SET_USED(cb);
- RTE_SET_USED(cb_arg);
+ struct rte_intr_callback *callback = NULL;
+ struct rte_intr_source *src = NULL;
+ int ret, add_event;
- return -ENOTSUP;
+ /* first do parameter checking */
+ if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Registering with invalid input parameter\n");
+ return -EINVAL;
+ }
+ if (kq < 0) {
+ RTE_LOG(ERR, EAL, "Kqueue is not active: %d\n", kq);
+ return -ENODEV;
+ }
+
+ /* allocate a new interrupt callback entity */
+ callback = calloc(1, sizeof(*callback));
+ if (callback == NULL) {
+ RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+ return -ENOMEM;
+ }
+ callback->cb_fn = cb;
+ callback->cb_arg = cb_arg;
+
+ rte_spinlock_lock(&intr_lock);
+
+ /* check if there is at least one callback registered for the fd */
+ TAILQ_FOREACH(src, &intr_sources, next) {
+ if (src->intr_handle.fd == intr_handle->fd) {
+ /* we had no interrupts for this */
+ if (TAILQ_EMPTY(&src->callbacks))
+ add_event = 1;
+
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ ret = 0;
+ break;
+ }
+ }
+
+ /* no existing callbacks for this - add new source */
+ if (src == NULL) {
+ src = calloc(1, sizeof(*src));
+ if (src == NULL) {
+ RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+ ret = -ENOMEM;
+ goto fail;
+ } else {
+ src->intr_handle = *intr_handle;
+ TAILQ_INIT(&src->callbacks);
+ TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+ TAILQ_INSERT_TAIL(&intr_sources, src, next);
+ add_event = 1;
+ ret = 0;
+ }
+ }
+
+ /* add events to the queue. timer events are special as we need to
+ * re-set the timer.
+ */
+ if (add_event || src->intr_handle.type == RTE_INTR_HANDLE_ALARM) {
+ struct kevent ke;
+
+ memset(&ke, 0, sizeof(ke));
+ ke.flags = EV_ADD; /* mark for addition to the queue */
+
+ if (intr_source_to_kevent(intr_handle, &ke) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot convert interrupt handle to kevent\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /**
+ * add the intr file descriptor into wait list.
+ */
+ if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
+ /* currently, nic_uio does not support interrupts, so
+ * this error will always be triggered and output to the
+ * user. so, don't output it unless debug log level set.
+ */
+ if (errno == ENODEV)
+ RTE_LOG(DEBUG, EAL, "Interrupt handle %d not supported\n",
+ src->intr_handle.fd);
+ else
+ RTE_LOG(ERR, EAL, "Error adding fd %d "
+ "kevent, %s\n",
+ src->intr_handle.fd,
+ strerror(errno));
+ ret = -errno;
+ goto fail;
+ }
+ }
+ rte_spinlock_unlock(&intr_lock);
+
+ return ret;
+fail:
+ /* clean up */
+ if (src != NULL) {
+ TAILQ_REMOVE(&(src->callbacks), callback, next);
+ if (TAILQ_EMPTY(&(src->callbacks))) {
+ TAILQ_REMOVE(&intr_sources, src, next);
+ free(src);
+ }
+ }
+ free(callback);
+ rte_spinlock_unlock(&intr_lock);
+ return ret;
}
int
rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
- rte_intr_callback_fn cb,
- void *cb_arg)
+ rte_intr_callback_fn cb_fn, void *cb_arg)
{
- RTE_SET_USED(intr_handle);
- RTE_SET_USED(cb);
- RTE_SET_USED(cb_arg);
+ int ret;
+ struct rte_intr_source *src;
+ struct rte_intr_callback *cb, *next;
- return -ENOTSUP;
+ /* do parameter checking first */
+ if (intr_handle == NULL || intr_handle->fd < 0) {
+ RTE_LOG(ERR, EAL,
+ "Unregistering with invalid input parameter\n");
+ return -EINVAL;
+ }
+ if (kq < 0) {
+ RTE_LOG(ERR, EAL, "Kqueue is not active\n");
+ return -ENODEV;
+ }
+
+ rte_spinlock_lock(&intr_lock);
+
+ /* check if the insterrupt source for the fd is existent */
+ TAILQ_FOREACH(src, &intr_sources, next)
+ if (src->intr_handle.fd == intr_handle->fd)
+ break;
+
+ /* No interrupt source registered for the fd */
+ if (src == NULL) {
+ ret = -ENOENT;
+
+ /* interrupt source has some active callbacks right now. */
+ } else if (src->active != 0) {
+ ret = -EAGAIN;
+
+ /* ok to remove. */
+ } else {
+ struct kevent ke;
+
+ ret = 0;
+
+ /* remove it from the kqueue */
+ memset(&ke, 0, sizeof(ke));
+ ke.flags = EV_DELETE; /* mark for deletion from the queue */
+
+ if (intr_source_to_kevent(intr_handle, &ke) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot convert to kevent\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /**
+ * remove intr file descriptor from wait list.
+ */
+ if (kevent(kq, &ke, 1, NULL, 0, NULL) < 0) {
+ RTE_LOG(ERR, EAL, "Error removing fd %d kevent, %s\n",
+ src->intr_handle.fd, strerror(errno));
+ /* removing non-existent even is an expected condition
+ * in some circumstances (e.g. oneshot events).
+ */
+ }
+
+ /*walk through the callbacks and remove all that match. */
+ for (cb = TAILQ_FIRST(&src->callbacks); cb != NULL; cb = next) {
+ next = TAILQ_NEXT(cb, next);
+ if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
+ cb->cb_arg == cb_arg)) {
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ free(cb);
+ ret++;
+ }
+ }
+
+ /* all callbacks for that source are removed. */
+ if (TAILQ_EMPTY(&src->callbacks)) {
+ TAILQ_REMOVE(&intr_sources, src, next);
+ free(src);
+ }
+ }
+out:
+ rte_spinlock_unlock(&intr_lock);
+
+ return ret;
}
int
-rte_intr_enable(const struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_enable(const struct rte_intr_handle *intr_handle)
{
- return -ENOTSUP;
+ if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 0;
+
+ if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ return -1;
+
+ switch (intr_handle->type) {
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_ALARM:
+ return -1;
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ return -1;
+ /* unknown handle type */
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ return 0;
}
int
-rte_intr_disable(const struct rte_intr_handle *intr_handle __rte_unused)
+rte_intr_disable(const struct rte_intr_handle *intr_handle)
{
- return -ENOTSUP;
+ if (intr_handle && intr_handle->type == RTE_INTR_HANDLE_VDEV)
+ return 0;
+
+ if (!intr_handle || intr_handle->fd < 0 || intr_handle->uio_cfg_fd < 0)
+ return -1;
+
+ switch (intr_handle->type) {
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_ALARM:
+ return -1;
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ return -1;
+ /* unknown handle type */
+ default:
+ RTE_LOG(ERR, EAL,
+ "Unknown handle type of fd %d\n",
+ intr_handle->fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+eal_intr_process_interrupts(struct kevent *events, int nfds)
+{
+ struct rte_intr_callback active_cb;
+ union rte_intr_read_buffer buf;
+ struct rte_intr_callback *cb;
+ struct rte_intr_source *src;
+ bool call = false;
+ int n, bytes_read;
+
+ for (n = 0; n < nfds; n++) {
+ int event_fd = events[n].ident;
+
+ rte_spinlock_lock(&intr_lock);
+ TAILQ_FOREACH(src, &intr_sources, next)
+ if (src->intr_handle.fd == event_fd)
+ break;
+ if (src == NULL) {
+ rte_spinlock_unlock(&intr_lock);
+ continue;
+ }
+
+ /* mark this interrupt source as active and release the lock. */
+ src->active = 1;
+ rte_spinlock_unlock(&intr_lock);
+
+ /* set the length to be read dor different handle type */
+ switch (src->intr_handle.type) {
+ case RTE_INTR_HANDLE_ALARM:
+ bytes_read = 0;
+ call = true;
+ break;
+ case RTE_INTR_HANDLE_VDEV:
+ case RTE_INTR_HANDLE_EXT:
+ bytes_read = 0;
+ call = true;
+ break;
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ bytes_read = 0;
+ call = true;
+ break;
+ default:
+ bytes_read = 1;
+ break;
+ }
+
+ if (bytes_read > 0) {
+ /**
+ * read out to clear the ready-to-be-read flag
+ * for epoll_wait.
+ */
+ bytes_read = read(event_fd, &buf, bytes_read);
+ if (bytes_read < 0) {
+ if (errno == EINTR || errno == EWOULDBLOCK)
+ continue;
+
+ RTE_LOG(ERR, EAL, "Error reading from file "
+ "descriptor %d: %s\n",
+ event_fd,
+ strerror(errno));
+ } else if (bytes_read == 0)
+ RTE_LOG(ERR, EAL, "Read nothing from file "
+ "descriptor %d\n", event_fd);
+ else
+ call = true;
+ }
+
+ /* grab a lock, again to call callbacks and update status. */
+ rte_spinlock_lock(&intr_lock);
+
+ if (call) {
+ /* Finally, call all callbacks. */
+ TAILQ_FOREACH(cb, &src->callbacks, next) {
+
+ /* make a copy and unlock. */
+ active_cb = *cb;
+ rte_spinlock_unlock(&intr_lock);
+
+ /* call the actual callback */
+ active_cb.cb_fn(active_cb.cb_arg);
+
+ /*get the lock back. */
+ rte_spinlock_lock(&intr_lock);
+ }
+ }
+
+ /* we done with that interrupt source, release it. */
+ src->active = 0;
+ rte_spinlock_unlock(&intr_lock);
+ }
+}
+
+static void *
+eal_intr_thread_main(void *arg __rte_unused)
+{
+ struct kevent events[MAX_INTR_EVENTS];
+ int nfds;
+
+ /* host thread, never break out */
+ for (;;) {
+ /* do not change anything, just wait */
+ nfds = kevent(kq, NULL, 0, events, MAX_INTR_EVENTS, NULL);
+
+ /* kevent fail */
+ if (nfds < 0) {
+ if (errno == EINTR)
+ continue;
+ RTE_LOG(ERR, EAL,
+ "kevent returns with fail\n");
+ break;
+ }
+ /* kevent timeout, will never happen here */
+ else if (nfds == 0)
+ continue;
+
+ /* kevent has at least one fd ready to read */
+ eal_intr_process_interrupts(events, nfds);
+ }
+ close(kq);
+ kq = -1;
+ return NULL;
}
int
rte_eal_intr_init(void)
{
- return 0;
+ int ret = 0;
+
+ /* init the global interrupt source head */
+ TAILQ_INIT(&intr_sources);
+
+ kq = kqueue();
+ if (kq < 0) {
+ RTE_LOG(ERR, EAL, "Cannot create kqueue instance\n");
+ return -1;
+ }
+
+ /* create the host thread to wait/handle the interrupt */
+ ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
+ eal_intr_thread_main, NULL);
+ if (ret != 0) {
+ rte_errno = -ret;
+ RTE_LOG(ERR, EAL,
+ "Failed to create thread for interrupt handling\n");
+ }
+
+ return ret;
}
int
diff --git a/lib/librte_eal/bsdapp/eal/eal_memalloc.c b/lib/librte_eal/bsdapp/eal/eal_memalloc.c
new file mode 100644
index 00000000..f7f07abd
--- /dev/null
+++ b/lib/librte_eal/bsdapp/eal/eal_memalloc.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+
+#include "eal_memalloc.h"
+
+int
+eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms __rte_unused,
+ int __rte_unused n_segs, size_t __rte_unused page_sz,
+ int __rte_unused socket, bool __rte_unused exact)
+{
+ RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+ return -1;
+}
+
+struct rte_memseg *
+eal_memalloc_alloc_seg(size_t __rte_unused page_sz, int __rte_unused socket)
+{
+ RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+ return NULL;
+}
+
+int
+eal_memalloc_free_seg(struct rte_memseg *ms __rte_unused)
+{
+ RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+ return -1;
+}
+
+int
+eal_memalloc_free_seg_bulk(struct rte_memseg **ms __rte_unused,
+ int n_segs __rte_unused)
+{
+ RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+ return -1;
+}
+
+int
+eal_memalloc_sync_with_primary(void)
+{
+ RTE_LOG(ERR, EAL, "Memory hotplug not supported on FreeBSD\n");
+ return -1;
+}
+
+int
+eal_memalloc_init(void)
+{
+ return 0;
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_memory.c b/lib/librte_eal/bsdapp/eal/eal_memory.c
index bdfb8828..16d2bc7c 100644
--- a/lib/librte_eal/bsdapp/eal/eal_memory.c
+++ b/lib/librte_eal/bsdapp/eal/eal_memory.c
@@ -6,10 +6,13 @@
#include <sys/types.h>
#include <sys/sysctl.h>
#include <inttypes.h>
+#include <errno.h>
+#include <string.h>
#include <fcntl.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
#include <rte_log.h>
#include <rte_string_fns.h>
#include "eal_private.h"
@@ -41,129 +44,253 @@ rte_eal_hugepage_init(void)
struct rte_mem_config *mcfg;
uint64_t total_mem = 0;
void *addr;
- unsigned i, j, seg_idx = 0;
+ unsigned int i, j, seg_idx = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
/* for debug purposes, hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
- addr = malloc(internal_config.memory);
- mcfg->memseg[0].iova = (rte_iova_t)(uintptr_t)addr;
- mcfg->memseg[0].addr = addr;
- mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
- mcfg->memseg[0].len = internal_config.memory;
- mcfg->memseg[0].socket_id = 0;
+ struct rte_memseg_list *msl;
+ struct rte_fbarray *arr;
+ struct rte_memseg *ms;
+ uint64_t page_sz;
+ int n_segs, cur_seg;
+
+ /* create a memseg list */
+ msl = &mcfg->memsegs[0];
+
+ page_sz = RTE_PGSIZE_4K;
+ n_segs = internal_config.memory / page_sz;
+
+ if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+ return -1;
+ }
+
+ addr = mmap(NULL, internal_config.memory,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
+ strerror(errno));
+ return -1;
+ }
+ msl->base_va = addr;
+ msl->page_sz = page_sz;
+ msl->socket_id = 0;
+
+ /* populate memsegs. each memseg is 1 page long */
+ for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
+ arr = &msl->memseg_arr;
+
+ ms = rte_fbarray_get(arr, cur_seg);
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ ms->iova = (uintptr_t)addr;
+ else
+ ms->iova = RTE_BAD_IOVA;
+ ms->addr = addr;
+ ms->hugepage_sz = page_sz;
+ ms->len = page_sz;
+ ms->socket_id = 0;
+
+ rte_fbarray_set_used(arr, cur_seg);
+
+ addr = RTE_PTR_ADD(addr, page_sz);
+ }
return 0;
}
/* map all hugepages and sort them */
for (i = 0; i < internal_config.num_hugepage_sizes; i ++){
struct hugepage_info *hpi;
+ rte_iova_t prev_end = 0;
+ int prev_ms_idx = -1;
+ uint64_t page_sz, mem_needed;
+ unsigned int n_pages, max_pages;
hpi = &internal_config.hugepage_info[i];
- for (j = 0; j < hpi->num_pages[0]; j++) {
+ page_sz = hpi->hugepage_sz;
+ max_pages = hpi->num_pages[0];
+ mem_needed = RTE_ALIGN_CEIL(internal_config.memory - total_mem,
+ page_sz);
+
+ n_pages = RTE_MIN(mem_needed / page_sz, max_pages);
+
+ for (j = 0; j < n_pages; j++) {
+ struct rte_memseg_list *msl;
+ struct rte_fbarray *arr;
struct rte_memseg *seg;
+ int msl_idx, ms_idx;
rte_iova_t physaddr;
int error;
size_t sysctl_size = sizeof(physaddr);
char physaddr_str[64];
+ bool is_adjacent;
- addr = mmap(NULL, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
- MAP_SHARED, hpi->lock_descriptor,
- j * EAL_PAGE_SIZE);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
- j, hpi->hugedir);
- return -1;
- }
-
- snprintf(physaddr_str, sizeof(physaddr_str), "hw.contigmem"
- ".physaddr.%d", j);
- error = sysctlbyname(physaddr_str, &physaddr, &sysctl_size,
- NULL, 0);
+ /* first, check if this segment is IOVA-adjacent to
+ * the previous one.
+ */
+ snprintf(physaddr_str, sizeof(physaddr_str),
+ "hw.contigmem.physaddr.%d", j);
+ error = sysctlbyname(physaddr_str, &physaddr,
+ &sysctl_size, NULL, 0);
if (error < 0) {
RTE_LOG(ERR, EAL, "Failed to get physical addr for buffer %u "
"from %s\n", j, hpi->hugedir);
return -1;
}
- seg = &mcfg->memseg[seg_idx++];
+ is_adjacent = prev_end != 0 && physaddr == prev_end;
+ prev_end = physaddr + hpi->hugepage_sz;
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
+ msl_idx++) {
+ bool empty, need_hole;
+ msl = &mcfg->memsegs[msl_idx];
+ arr = &msl->memseg_arr;
+
+ if (msl->page_sz != page_sz)
+ continue;
+
+ empty = arr->count == 0;
+
+ /* we need a hole if this isn't an empty memseg
+ * list, and if previous segment was not
+ * adjacent to current one.
+ */
+ need_hole = !empty && !is_adjacent;
+
+ /* we need 1, plus hole if not adjacent */
+ ms_idx = rte_fbarray_find_next_n_free(arr,
+ 0, 1 + (need_hole ? 1 : 0));
+
+ /* memseg list is full? */
+ if (ms_idx < 0)
+ continue;
+
+ if (need_hole && prev_ms_idx == ms_idx - 1)
+ ms_idx++;
+ prev_ms_idx = ms_idx;
+
+ break;
+ }
+ if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
+ RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
+ return -1;
+ }
+ arr = &msl->memseg_arr;
+ seg = rte_fbarray_get(arr, ms_idx);
+
+ addr = RTE_PTR_ADD(msl->base_va,
+ (size_t)msl->page_sz * ms_idx);
+
+ /* address is already mapped in memseg list, so using
+ * MAP_FIXED here is safe.
+ */
+ addr = mmap(addr, page_sz, PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_FIXED,
+ hpi->lock_descriptor,
+ j * EAL_PAGE_SIZE);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
+ j, hpi->hugedir);
+ return -1;
+ }
+
seg->addr = addr;
seg->iova = physaddr;
- seg->hugepage_sz = hpi->hugepage_sz;
- seg->len = hpi->hugepage_sz;
+ seg->hugepage_sz = page_sz;
+ seg->len = page_sz;
seg->nchannel = mcfg->nchannel;
seg->nrank = mcfg->nrank;
seg->socket_id = 0;
+ rte_fbarray_set_used(arr, ms_idx);
+
RTE_LOG(INFO, EAL, "Mapped memory segment %u @ %p: physaddr:0x%"
PRIx64", len %zu\n",
- seg_idx, addr, physaddr, hpi->hugepage_sz);
- if (total_mem >= internal_config.memory ||
- seg_idx >= RTE_MAX_MEMSEG)
- break;
+ seg_idx++, addr, physaddr, page_sz);
+
+ total_mem += seg->len;
}
+ if (total_mem >= internal_config.memory)
+ break;
+ }
+ if (total_mem < internal_config.memory) {
+ RTE_LOG(ERR, EAL, "Couldn't reserve requested memory, "
+ "requested: %" PRIu64 "M "
+ "available: %" PRIu64 "M\n",
+ internal_config.memory >> 20, total_mem >> 20);
+ return -1;
}
return 0;
}
+struct attach_walk_args {
+ int fd_hugepage;
+ int seg_idx;
+};
+static int
+attach_segment(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ struct attach_walk_args *wa = arg;
+ void *addr;
+
+ addr = mmap(ms->addr, ms->len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, wa->fd_hugepage,
+ wa->seg_idx * EAL_PAGE_SIZE);
+ if (addr == MAP_FAILED || addr != ms->addr)
+ return -1;
+ wa->seg_idx++;
+
+ return 0;
+}
+
int
rte_eal_hugepage_attach(void)
{
const struct hugepage_info *hpi;
- int fd_hugepage_info, fd_hugepage = -1;
- unsigned i = 0;
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int fd_hugepage = -1;
+ unsigned int i;
- /* Obtain a file descriptor for hugepage_info */
- fd_hugepage_info = open(eal_hugepage_info_path(), O_RDONLY);
- if (fd_hugepage_info < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
- return -1;
- }
+ hpi = &internal_config.hugepage_info[0];
- /* Map the shared hugepage_info into the process address spaces */
- hpi = mmap(NULL, sizeof(struct hugepage_info), PROT_READ, MAP_PRIVATE,
- fd_hugepage_info, 0);
- if (hpi == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
- goto error;
- }
-
- /* Obtain a file descriptor for contiguous memory */
- fd_hugepage = open(hpi->hugedir, O_RDWR);
- if (fd_hugepage < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n", hpi->hugedir);
- goto error;
- }
+ for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
+ const struct hugepage_info *cur_hpi = &hpi[i];
+ struct attach_walk_args wa;
- /* Map the contiguous memory into each memory segment */
- for (i = 0; i < hpi->num_pages[0]; i++) {
+ memset(&wa, 0, sizeof(wa));
- void *addr;
- struct rte_memseg *seg = &mcfg->memseg[i];
+ /* Obtain a file descriptor for contiguous memory */
+ fd_hugepage = open(cur_hpi->hugedir, O_RDWR);
+ if (fd_hugepage < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s\n",
+ cur_hpi->hugedir);
+ goto error;
+ }
+ wa.fd_hugepage = fd_hugepage;
+ wa.seg_idx = 0;
- addr = mmap(seg->addr, hpi->hugepage_sz, PROT_READ|PROT_WRITE,
- MAP_SHARED|MAP_FIXED, fd_hugepage,
- i * EAL_PAGE_SIZE);
- if (addr == MAP_FAILED || addr != seg->addr) {
+ /* Map the contiguous memory into each memory segment */
+ if (rte_memseg_walk(attach_segment, &wa) < 0) {
RTE_LOG(ERR, EAL, "Failed to mmap buffer %u from %s\n",
- i, hpi->hugedir);
+ wa.seg_idx, cur_hpi->hugedir);
goto error;
}
+ close(fd_hugepage);
+ fd_hugepage = -1;
}
/* hugepage_info is no longer required */
- munmap((void *)(uintptr_t)hpi, sizeof(struct hugepage_info));
- close(fd_hugepage_info);
- close(fd_hugepage);
return 0;
error:
- if (fd_hugepage_info >= 0)
- close(fd_hugepage_info);
if (fd_hugepage >= 0)
close(fd_hugepage);
return -1;
@@ -174,3 +301,217 @@ rte_eal_using_phys_addrs(void)
{
return 0;
}
+
+static uint64_t
+get_mem_amount(uint64_t page_sz, uint64_t max_mem)
+{
+ uint64_t area_sz, max_pages;
+
+ /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
+ max_pages = RTE_MAX_MEMSEG_PER_LIST;
+ max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
+
+ area_sz = RTE_MIN(page_sz * max_pages, max_mem);
+
+ /* make sure the list isn't smaller than the page size */
+ area_sz = RTE_MAX(area_sz, page_sz);
+
+ return RTE_ALIGN(area_sz, page_sz);
+}
+
+#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
+static int
+alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx)
+{
+ char name[RTE_FBARRAY_NAME_LEN];
+
+ snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
+ type_msl_idx);
+ if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
+ rte_strerror(rte_errno));
+ return -1;
+ }
+
+ msl->page_sz = page_sz;
+ msl->socket_id = socket_id;
+ msl->base_va = NULL;
+
+ RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
+ (size_t)page_sz >> 10, socket_id);
+
+ return 0;
+}
+
+static int
+alloc_va_space(struct rte_memseg_list *msl)
+{
+ uint64_t page_sz;
+ size_t mem_sz;
+ void *addr;
+ int flags = 0;
+
+#ifdef RTE_ARCH_PPC_64
+ flags |= MAP_HUGETLB;
+#endif
+
+ page_sz = msl->page_sz;
+ mem_sz = page_sz * msl->memseg_arr.len;
+
+ addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
+ if (addr == NULL) {
+ if (rte_errno == EADDRNOTAVAIL)
+ RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
+ (unsigned long long)mem_sz, msl->base_va);
+ else
+ RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
+ return -1;
+ }
+ msl->base_va = addr;
+
+ return 0;
+}
+
+
+static int
+memseg_primary_init(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int hpi_idx, msl_idx = 0;
+ struct rte_memseg_list *msl;
+ uint64_t max_mem, total_mem;
+
+ /* no-huge does not need this at all */
+ if (internal_config.no_hugetlbfs)
+ return 0;
+
+ /* FreeBSD has an issue where core dump will dump the entire memory
+ * contents, including anonymous zero-page memory. Therefore, while we
+ * will be limiting total amount of memory to RTE_MAX_MEM_MB, we will
+ * also be further limiting total memory amount to whatever memory is
+ * available to us through contigmem driver (plus spacing blocks).
+ *
+ * so, at each stage, we will be checking how much memory we are
+ * preallocating, and adjust all the values accordingly.
+ */
+
+ max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
+ total_mem = 0;
+
+ /* create memseg lists */
+ for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+ hpi_idx++) {
+ uint64_t max_type_mem, total_type_mem = 0;
+ uint64_t avail_mem;
+ int type_msl_idx, max_segs, avail_segs, total_segs = 0;
+ struct hugepage_info *hpi;
+ uint64_t hugepage_sz;
+
+ hpi = &internal_config.hugepage_info[hpi_idx];
+ hugepage_sz = hpi->hugepage_sz;
+
+ /* no NUMA support on FreeBSD */
+
+ /* check if we've already exceeded total memory amount */
+ if (total_mem >= max_mem)
+ break;
+
+ /* first, calculate theoretical limits according to config */
+ max_type_mem = RTE_MIN(max_mem - total_mem,
+ (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
+ max_segs = RTE_MAX_MEMSEG_PER_TYPE;
+
+ /* now, limit all of that to whatever will actually be
+ * available to us, because without dynamic allocation support,
+ * all of that extra memory will be sitting there being useless
+ * and slowing down core dumps in case of a crash.
+ *
+ * we need (N*2)-1 segments because we cannot guarantee that
+ * each segment will be IOVA-contiguous with the previous one,
+ * so we will allocate more and put spaces inbetween segments
+ * that are non-contiguous.
+ */
+ avail_segs = (hpi->num_pages[0] * 2) - 1;
+ avail_mem = avail_segs * hugepage_sz;
+
+ max_type_mem = RTE_MIN(avail_mem, max_type_mem);
+ max_segs = RTE_MIN(avail_segs, max_segs);
+
+ type_msl_idx = 0;
+ while (total_type_mem < max_type_mem &&
+ total_segs < max_segs) {
+ uint64_t cur_max_mem, cur_mem;
+ unsigned int n_segs;
+
+ if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL,
+ "No more space in memseg lists, please increase %s\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+ return -1;
+ }
+
+ msl = &mcfg->memsegs[msl_idx++];
+
+ cur_max_mem = max_type_mem - total_type_mem;
+
+ cur_mem = get_mem_amount(hugepage_sz,
+ cur_max_mem);
+ n_segs = cur_mem / hugepage_sz;
+
+ if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ 0, type_msl_idx))
+ return -1;
+
+ total_segs += msl->memseg_arr.len;
+ total_type_mem = total_segs * hugepage_sz;
+ type_msl_idx++;
+
+ if (alloc_va_space(msl)) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
+ return -1;
+ }
+ }
+ total_mem += total_type_mem;
+ }
+ return 0;
+}
+
+static int
+memseg_secondary_init(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int msl_idx = 0;
+ struct rte_memseg_list *msl;
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+ msl = &mcfg->memsegs[msl_idx];
+
+ /* skip empty memseg lists */
+ if (msl->memseg_arr.len == 0)
+ continue;
+
+ if (rte_fbarray_attach(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
+ return -1;
+ }
+
+ /* preallocate VA space */
+ if (alloc_va_space(msl)) {
+ RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eal_memseg_init(void)
+{
+ return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ memseg_primary_init() :
+ memseg_secondary_init();
+}
diff --git a/lib/librte_eal/bsdapp/eal/eal_thread.c b/lib/librte_eal/bsdapp/eal/eal_thread.c
index d602daf8..309b5872 100644
--- a/lib/librte_eal/bsdapp/eal/eal_thread.c
+++ b/lib/librte_eal/bsdapp/eal/eal_thread.c
@@ -119,7 +119,7 @@ eal_thread_loop(__attribute__((unused)) void *arg)
if (eal_thread_set_affinity() < 0)
rte_panic("cannot set affinity\n");
- ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+ ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%p;cpuset=[%s%s])\n",
lcore_id, thread_id, cpuset, ret == 0 ? "" : "...");
diff --git a/lib/librte_eal/bsdapp/eal/meson.build b/lib/librte_eal/bsdapp/eal/meson.build
index e83fc919..3945b529 100644
--- a/lib/librte_eal/bsdapp/eal/meson.build
+++ b/lib/librte_eal/bsdapp/eal/meson.build
@@ -4,12 +4,17 @@
env_objs = []
env_headers = []
env_sources = files('eal_alarm.c',
+ 'eal_cpuflags.c',
'eal_debug.c',
'eal_hugepage_info.c',
'eal_interrupts.c',
'eal_lcore.c',
+ 'eal_memalloc.c',
'eal_thread.c',
'eal_timer.c',
'eal.c',
'eal_memory.c',
+ 'eal_dev.c'
)
+
+deps += ['kvargs']
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
index ea824a3a..cca68826 100644
--- a/lib/librte_eal/common/Makefile
+++ b/lib/librte_eal/common/Makefile
@@ -11,12 +11,12 @@ INC += rte_per_lcore.h rte_random.h
INC += rte_tailq.h rte_interrupts.h rte_alarm.h
INC += rte_string_fns.h rte_version.h
INC += rte_eal_memconfig.h rte_malloc_heap.h
-INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h
+INC += rte_hexdump.h rte_devargs.h rte_bus.h rte_dev.h rte_class.h
INC += rte_pci_dev_feature_defs.h rte_pci_dev_features.h
INC += rte_malloc.h rte_keepalive.h rte_time.h
INC += rte_service.h rte_service_component.h
INC += rte_bitmap.h rte_vfio.h rte_hypervisor.h rte_test.h
-INC += rte_reciprocal.h
+INC += rte_reciprocal.h rte_fbarray.h rte_uuid.h
GENERIC_INC := rte_atomic.h rte_byteorder.h rte_cycles.h rte_prefetch.h
GENERIC_INC += rte_spinlock.h rte_memcpy.h rte_cpuflags.h rte_rwlock.h
diff --git a/lib/librte_eal/common/arch/arm/rte_cpuflags.c b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
index 88f1cbe3..caf3dc83 100644
--- a/lib/librte_eal/common/arch/arm/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/arm/rte_cpuflags.c
@@ -1,34 +1,6 @@
-/*
- * BSD LICENSE
- *
- * Copyright (C) Cavium, Inc. 2015.
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) Cavium, Inc. 2015.
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#include "rte_cpuflags.h"
@@ -133,22 +105,10 @@ const struct feature_entry rte_cpu_feature_table[] = {
static void
rte_cpu_get_features(hwcap_registers_t out)
{
- int auxv_fd;
- _Elfx_auxv_t auxv;
-
- auxv_fd = open("/proc/self/auxv", O_RDONLY);
- assert(auxv_fd != -1);
- while (read(auxv_fd, &auxv, sizeof(auxv)) == sizeof(auxv)) {
- if (auxv.a_type == AT_HWCAP) {
- out[REG_HWCAP] = auxv.a_un.a_val;
- } else if (auxv.a_type == AT_HWCAP2) {
- out[REG_HWCAP2] = auxv.a_un.a_val;
- } else if (auxv.a_type == AT_PLATFORM) {
- if (!strcmp((const char *)auxv.a_un.a_val, PLATFORM_STR))
- out[REG_PLATFORM] = 0x0001;
- }
- }
- close(auxv_fd);
+ out[REG_HWCAP] = rte_cpu_getauxval(AT_HWCAP);
+ out[REG_HWCAP2] = rte_cpu_getauxval(AT_HWCAP2);
+ if (!rte_cpu_strcmp_auxval(AT_PLATFORM, PLATFORM_STR))
+ out[REG_PLATFORM] = 0x0001;
}
/*
diff --git a/lib/librte_eal/common/arch/arm/rte_hypervisor.c b/lib/librte_eal/common/arch/arm/rte_hypervisor.c
index 3792fe2c..08a1c97d 100644
--- a/lib/librte_eal/common/arch/arm/rte_hypervisor.c
+++ b/lib/librte_eal/common/arch/arm/rte_hypervisor.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include "rte_hypervisor.h"
diff --git a/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c b/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
index 970a61c5..e7a82452 100644
--- a/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
+++ b/lib/librte_eal/common/arch/ppc_64/rte_cpuflags.c
@@ -104,19 +104,8 @@ const struct feature_entry rte_cpu_feature_table[] = {
static void
rte_cpu_get_features(hwcap_registers_t out)
{
- int auxv_fd;
- Elf64_auxv_t auxv;
-
- auxv_fd = open("/proc/self/auxv", O_RDONLY);
- assert(auxv_fd != -1);
- while (read(auxv_fd, &auxv,
- sizeof(Elf64_auxv_t)) == sizeof(Elf64_auxv_t)) {
- if (auxv.a_type == AT_HWCAP)
- out[REG_HWCAP] = auxv.a_un.a_val;
- else if (auxv.a_type == AT_HWCAP2)
- out[REG_HWCAP2] = auxv.a_un.a_val;
- }
- close(auxv_fd);
+ out[REG_HWCAP] = rte_cpu_getauxval(AT_HWCAP);
+ out[REG_HWCAP2] = rte_cpu_getauxval(AT_HWCAP2);
}
/*
diff --git a/lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c b/lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c
index 3792fe2c..08a1c97d 100644
--- a/lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c
+++ b/lib/librte_eal/common/arch/ppc_64/rte_hypervisor.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include "rte_hypervisor.h"
diff --git a/lib/librte_eal/common/arch/x86/rte_hypervisor.c b/lib/librte_eal/common/arch/x86/rte_hypervisor.c
index edf07be1..c38cfc09 100644
--- a/lib/librte_eal/common/arch/x86/rte_hypervisor.c
+++ b/lib/librte_eal/common/arch/x86/rte_hypervisor.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include "rte_hypervisor.h"
diff --git a/lib/librte_eal/common/eal_common_bus.c b/lib/librte_eal/common/eal_common_bus.c
index 3e022d51..0943851c 100644
--- a/lib/librte_eal/common/eal_common_bus.c
+++ b/lib/librte_eal/common/eal_common_bus.c
@@ -36,6 +36,7 @@
#include <rte_bus.h>
#include <rte_debug.h>
+#include <rte_string_fns.h>
#include "eal_private.h"
@@ -212,7 +213,7 @@ rte_bus_find_by_device_name(const char *str)
char name[RTE_DEV_NAME_MAX_LEN];
char *c;
- snprintf(name, sizeof(name), "%s", str);
+ strlcpy(name, str, sizeof(name));
c = strchr(name, ',');
if (c != NULL)
c[0] = '\0';
diff --git a/lib/librte_eal/common/eal_common_class.c b/lib/librte_eal/common/eal_common_class.c
new file mode 100644
index 00000000..404a9065
--- /dev/null
+++ b/lib/librte_eal/common/eal_common_class.c
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_class.h>
+#include <rte_debug.h>
+
+struct rte_class_list rte_class_list =
+ TAILQ_HEAD_INITIALIZER(rte_class_list);
+
+__rte_experimental void
+rte_class_register(struct rte_class *class)
+{
+ RTE_VERIFY(class);
+ RTE_VERIFY(class->name && strlen(class->name));
+
+ TAILQ_INSERT_TAIL(&rte_class_list, class, next);
+ RTE_LOG(DEBUG, EAL, "Registered [%s] device class.\n", class->name);
+}
+
+__rte_experimental void
+rte_class_unregister(struct rte_class *class)
+{
+ TAILQ_REMOVE(&rte_class_list, class, next);
+ RTE_LOG(DEBUG, EAL, "Unregistered [%s] device class.\n", class->name);
+}
+
+__rte_experimental
+struct rte_class *
+rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
+ const void *data)
+{
+ struct rte_class *cls;
+
+ if (start != NULL)
+ cls = TAILQ_NEXT(start, next);
+ else
+ cls = TAILQ_FIRST(&rte_class_list);
+ while (cls != NULL) {
+ if (cmp(cls, data) == 0)
+ break;
+ cls = TAILQ_NEXT(cls, next);
+ }
+ return cls;
+}
+
+static int
+cmp_class_name(const struct rte_class *class, const void *_name)
+{
+ const char *name = _name;
+
+ return strcmp(class->name, name);
+}
+
+__rte_experimental
+struct rte_class *
+rte_class_find_by_name(const char *name)
+{
+ return rte_class_find(NULL, cmp_class_name, (const void *)name);
+}
diff --git a/lib/librte_eal/common/eal_common_dev.c b/lib/librte_eal/common/eal_common_dev.c
index cd071442..678dbcac 100644
--- a/lib/librte_eal/common/eal_common_dev.c
+++ b/lib/librte_eal/common/eal_common_dev.c
@@ -10,24 +10,62 @@
#include <rte_compat.h>
#include <rte_bus.h>
+#include <rte_class.h>
#include <rte_dev.h>
#include <rte_devargs.h>
#include <rte_debug.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
#include <rte_log.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
#include "eal_private.h"
-static int cmp_detached_dev_name(const struct rte_device *dev,
- const void *_name)
-{
- const char *name = _name;
+/**
+ * The device event callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the device name.
+ */
+struct dev_event_callback {
+ TAILQ_ENTRY(dev_event_callback) next; /**< Callbacks list */
+ rte_dev_event_cb_fn cb_fn; /**< Callback address */
+ void *cb_arg; /**< Callback parameter */
+ char *dev_name; /**< Callback device name, NULL is for all device */
+ uint32_t active; /**< Callback is executing */
+};
- /* skip attached devices */
- if (dev->driver != NULL)
- return 1;
+/** @internal Structure to keep track of registered callbacks */
+TAILQ_HEAD(dev_event_cb_list, dev_event_callback);
- return strcmp(dev->name, name);
-}
+/* The device event callback list for all registered callbacks. */
+static struct dev_event_cb_list dev_event_cbs;
+
+/* spinlock for device callbacks */
+static rte_spinlock_t dev_event_lock = RTE_SPINLOCK_INITIALIZER;
+
+struct dev_next_ctx {
+ struct rte_dev_iterator *it;
+ const char *bus_str;
+ const char *cls_str;
+};
+
+#define CTX(it, bus_str, cls_str) \
+ (&(const struct dev_next_ctx){ \
+ .it = it, \
+ .bus_str = bus_str, \
+ .cls_str = cls_str, \
+ })
+
+#define ITCTX(ptr) \
+ (((struct dev_next_ctx *)(intptr_t)ptr)->it)
+
+#define BUSCTX(ptr) \
+ (((struct dev_next_ctx *)(intptr_t)ptr)->bus_str)
+
+#define CLSCTX(ptr) \
+ (((struct dev_next_ctx *)(intptr_t)ptr)->cls_str)
static int cmp_dev_name(const struct rte_device *dev, const void *_name)
{
@@ -89,29 +127,12 @@ int rte_eal_dev_detach(struct rte_device *dev)
return ret;
}
-static char *
-full_dev_name(const char *bus, const char *dev, const char *args)
-{
- char *name;
- size_t len;
-
- len = snprintf(NULL, 0, "%s:%s,%s", bus, dev, args) + 1;
- name = calloc(1, len);
- if (name == NULL) {
- RTE_LOG(ERR, EAL, "Could not allocate full device name\n");
- return NULL;
- }
- snprintf(name, len, "%s:%s,%s", bus, dev, args);
- return name;
-}
-
int __rte_experimental rte_eal_hotplug_add(const char *busname, const char *devname,
const char *devargs)
{
struct rte_bus *bus;
struct rte_device *dev;
struct rte_devargs *da;
- char *name;
int ret;
bus = rte_bus_find_by_name(busname);
@@ -126,21 +147,16 @@ int __rte_experimental rte_eal_hotplug_add(const char *busname, const char *devn
return -ENOTSUP;
}
- name = full_dev_name(busname, devname, devargs);
- if (name == NULL)
- return -ENOMEM;
-
da = calloc(1, sizeof(*da));
- if (da == NULL) {
- ret = -ENOMEM;
- goto err_name;
- }
+ if (da == NULL)
+ return -ENOMEM;
- ret = rte_eal_devargs_parse(name, da);
+ ret = rte_devargs_parsef(da, "%s:%s,%s",
+ busname, devname, devargs);
if (ret)
goto err_devarg;
- ret = rte_eal_devargs_insert(da);
+ ret = rte_devargs_insert(da);
if (ret)
goto err_devarg;
@@ -148,30 +164,32 @@ int __rte_experimental rte_eal_hotplug_add(const char *busname, const char *devn
if (ret)
goto err_devarg;
- dev = bus->find_device(NULL, cmp_detached_dev_name, devname);
+ dev = bus->find_device(NULL, cmp_dev_name, devname);
if (dev == NULL) {
- RTE_LOG(ERR, EAL, "Cannot find unplugged device (%s)\n",
+ RTE_LOG(ERR, EAL, "Cannot find device (%s)\n",
devname);
ret = -ENODEV;
goto err_devarg;
}
+ if (dev->driver != NULL) {
+ RTE_LOG(ERR, EAL, "Device is already plugged\n");
+ return -EEXIST;
+ }
+
ret = bus->plug(dev);
if (ret) {
RTE_LOG(ERR, EAL, "Driver cannot attach the device (%s)\n",
dev->name);
goto err_devarg;
}
- free(name);
return 0;
err_devarg:
- if (rte_eal_devargs_remove(busname, devname)) {
+ if (rte_devargs_remove(busname, devname)) {
free(da->args);
free(da);
}
-err_name:
- free(name);
return ret;
}
@@ -200,10 +218,349 @@ rte_eal_hotplug_remove(const char *busname, const char *devname)
return -EINVAL;
}
+ if (dev->driver == NULL) {
+ RTE_LOG(ERR, EAL, "Device is already unplugged\n");
+ return -ENOENT;
+ }
+
ret = bus->unplug(dev);
if (ret)
RTE_LOG(ERR, EAL, "Driver cannot detach the device (%s)\n",
dev->name);
- rte_eal_devargs_remove(busname, devname);
+ rte_devargs_remove(busname, devname);
+ return ret;
+}
+
+int __rte_experimental
+rte_dev_event_callback_register(const char *device_name,
+ rte_dev_event_cb_fn cb_fn,
+ void *cb_arg)
+{
+ struct dev_event_callback *event_cb;
+ int ret;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ rte_spinlock_lock(&dev_event_lock);
+
+ if (TAILQ_EMPTY(&dev_event_cbs))
+ TAILQ_INIT(&dev_event_cbs);
+
+ TAILQ_FOREACH(event_cb, &dev_event_cbs, next) {
+ if (event_cb->cb_fn == cb_fn && event_cb->cb_arg == cb_arg) {
+ if (device_name == NULL && event_cb->dev_name == NULL)
+ break;
+ if (device_name == NULL || event_cb->dev_name == NULL)
+ continue;
+ if (!strcmp(event_cb->dev_name, device_name))
+ break;
+ }
+ }
+
+ /* create a new callback. */
+ if (event_cb == NULL) {
+ event_cb = malloc(sizeof(struct dev_event_callback));
+ if (event_cb != NULL) {
+ event_cb->cb_fn = cb_fn;
+ event_cb->cb_arg = cb_arg;
+ event_cb->active = 0;
+ if (!device_name) {
+ event_cb->dev_name = NULL;
+ } else {
+ event_cb->dev_name = strdup(device_name);
+ if (event_cb->dev_name == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+ TAILQ_INSERT_TAIL(&dev_event_cbs, event_cb, next);
+ } else {
+ RTE_LOG(ERR, EAL,
+ "Failed to allocate memory for device "
+ "event callback.");
+ ret = -ENOMEM;
+ goto error;
+ }
+ } else {
+ RTE_LOG(ERR, EAL,
+ "The callback is already exist, no need "
+ "to register again.\n");
+ ret = -EEXIST;
+ }
+
+ rte_spinlock_unlock(&dev_event_lock);
+ return 0;
+error:
+ free(event_cb);
+ rte_spinlock_unlock(&dev_event_lock);
+ return ret;
+}
+
+int __rte_experimental
+rte_dev_event_callback_unregister(const char *device_name,
+ rte_dev_event_cb_fn cb_fn,
+ void *cb_arg)
+{
+ int ret = 0;
+ struct dev_event_callback *event_cb, *next;
+
+ if (!cb_fn)
+ return -EINVAL;
+
+ rte_spinlock_lock(&dev_event_lock);
+ /*walk through the callbacks and remove all that match. */
+ for (event_cb = TAILQ_FIRST(&dev_event_cbs); event_cb != NULL;
+ event_cb = next) {
+
+ next = TAILQ_NEXT(event_cb, next);
+
+ if (device_name != NULL && event_cb->dev_name != NULL) {
+ if (!strcmp(event_cb->dev_name, device_name)) {
+ if (event_cb->cb_fn != cb_fn ||
+ (cb_arg != (void *)-1 &&
+ event_cb->cb_arg != cb_arg))
+ continue;
+ }
+ } else if (device_name != NULL) {
+ continue;
+ }
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (event_cb->active == 0) {
+ TAILQ_REMOVE(&dev_event_cbs, event_cb, next);
+ free(event_cb);
+ ret++;
+ } else {
+ continue;
+ }
+ }
+ rte_spinlock_unlock(&dev_event_lock);
return ret;
}
+
+void
+dev_callback_process(char *device_name, enum rte_dev_event_type event)
+{
+ struct dev_event_callback *cb_lst;
+
+ if (device_name == NULL)
+ return;
+
+ rte_spinlock_lock(&dev_event_lock);
+
+ TAILQ_FOREACH(cb_lst, &dev_event_cbs, next) {
+ if (cb_lst->dev_name) {
+ if (strcmp(cb_lst->dev_name, device_name))
+ continue;
+ }
+ cb_lst->active = 1;
+ rte_spinlock_unlock(&dev_event_lock);
+ cb_lst->cb_fn(device_name, event,
+ cb_lst->cb_arg);
+ rte_spinlock_lock(&dev_event_lock);
+ cb_lst->active = 0;
+ }
+ rte_spinlock_unlock(&dev_event_lock);
+}
+
+__rte_experimental
+int
+rte_dev_iterator_init(struct rte_dev_iterator *it,
+ const char *dev_str)
+{
+ struct rte_devargs devargs;
+ struct rte_class *cls = NULL;
+ struct rte_bus *bus = NULL;
+
+ /* Having both bus_str and cls_str NULL is illegal,
+ * marking this iterator as invalid unless
+ * everything goes well.
+ */
+ it->bus_str = NULL;
+ it->cls_str = NULL;
+
+ devargs.data = dev_str;
+ if (rte_devargs_layers_parse(&devargs, dev_str))
+ goto get_out;
+
+ bus = devargs.bus;
+ cls = devargs.cls;
+ /* The string should have at least
+ * one layer specified.
+ */
+ if (bus == NULL && cls == NULL) {
+ RTE_LOG(ERR, EAL,
+ "Either bus or class must be specified.\n");
+ rte_errno = EINVAL;
+ goto get_out;
+ }
+ if (bus != NULL && bus->dev_iterate == NULL) {
+ RTE_LOG(ERR, EAL, "Bus %s not supported\n", bus->name);
+ rte_errno = ENOTSUP;
+ goto get_out;
+ }
+ if (cls != NULL && cls->dev_iterate == NULL) {
+ RTE_LOG(ERR, EAL, "Class %s not supported\n", cls->name);
+ rte_errno = ENOTSUP;
+ goto get_out;
+ }
+ it->bus_str = devargs.bus_str;
+ it->cls_str = devargs.cls_str;
+ it->dev_str = dev_str;
+ it->bus = bus;
+ it->cls = cls;
+ it->device = NULL;
+ it->class_device = NULL;
+get_out:
+ return -rte_errno;
+}
+
+static char *
+dev_str_sane_copy(const char *str)
+{
+ size_t end;
+ char *copy;
+
+ end = strcspn(str, ",/");
+ if (str[end] == ',') {
+ copy = strdup(&str[end + 1]);
+ } else {
+ /* '/' or '\0' */
+ copy = strdup("");
+ }
+ if (copy == NULL) {
+ rte_errno = ENOMEM;
+ } else {
+ char *slash;
+
+ slash = strchr(copy, '/');
+ if (slash != NULL)
+ slash[0] = '\0';
+ }
+ return copy;
+}
+
+static int
+class_next_dev_cmp(const struct rte_class *cls,
+ const void *ctx)
+{
+ struct rte_dev_iterator *it;
+ const char *cls_str = NULL;
+ void *dev;
+
+ if (cls->dev_iterate == NULL)
+ return 1;
+ it = ITCTX(ctx);
+ cls_str = CLSCTX(ctx);
+ dev = it->class_device;
+ /* it->cls_str != NULL means a class
+ * was specified in the devstr.
+ */
+ if (it->cls_str != NULL && cls != it->cls)
+ return 1;
+ /* If an error occurred previously,
+ * no need to test further.
+ */
+ if (rte_errno != 0)
+ return -1;
+ dev = cls->dev_iterate(dev, cls_str, it);
+ it->class_device = dev;
+ return dev == NULL;
+}
+
+static int
+bus_next_dev_cmp(const struct rte_bus *bus,
+ const void *ctx)
+{
+ struct rte_device *dev = NULL;
+ struct rte_class *cls = NULL;
+ struct rte_dev_iterator *it;
+ const char *bus_str = NULL;
+
+ if (bus->dev_iterate == NULL)
+ return 1;
+ it = ITCTX(ctx);
+ bus_str = BUSCTX(ctx);
+ dev = it->device;
+ /* it->bus_str != NULL means a bus
+ * was specified in the devstr.
+ */
+ if (it->bus_str != NULL && bus != it->bus)
+ return 1;
+ /* If an error occurred previously,
+ * no need to test further.
+ */
+ if (rte_errno != 0)
+ return -1;
+ if (it->cls_str == NULL) {
+ dev = bus->dev_iterate(dev, bus_str, it);
+ goto end;
+ }
+ /* cls_str != NULL */
+ if (dev == NULL) {
+next_dev_on_bus:
+ dev = bus->dev_iterate(dev, bus_str, it);
+ it->device = dev;
+ }
+ if (dev == NULL)
+ return 1;
+ if (it->cls != NULL)
+ cls = TAILQ_PREV(it->cls, rte_class_list, next);
+ cls = rte_class_find(cls, class_next_dev_cmp, ctx);
+ if (cls != NULL) {
+ it->cls = cls;
+ goto end;
+ }
+ goto next_dev_on_bus;
+end:
+ it->device = dev;
+ return dev == NULL;
+}
+__rte_experimental
+struct rte_device *
+rte_dev_iterator_next(struct rte_dev_iterator *it)
+{
+ struct rte_bus *bus = NULL;
+ int old_errno = rte_errno;
+ char *bus_str = NULL;
+ char *cls_str = NULL;
+
+ rte_errno = 0;
+ if (it->bus_str == NULL && it->cls_str == NULL) {
+ /* Invalid iterator. */
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ if (it->bus != NULL)
+ bus = TAILQ_PREV(it->bus, rte_bus_list, next);
+ if (it->bus_str != NULL) {
+ bus_str = dev_str_sane_copy(it->bus_str);
+ if (bus_str == NULL)
+ goto out;
+ }
+ if (it->cls_str != NULL) {
+ cls_str = dev_str_sane_copy(it->cls_str);
+ if (cls_str == NULL)
+ goto out;
+ }
+ while ((bus = rte_bus_find(bus, bus_next_dev_cmp,
+ CTX(it, bus_str, cls_str)))) {
+ if (it->device != NULL) {
+ it->bus = bus;
+ goto out;
+ }
+ if (it->bus_str != NULL ||
+ rte_errno != 0)
+ break;
+ }
+ if (rte_errno == 0)
+ rte_errno = old_errno;
+out:
+ free(bus_str);
+ free(cls_str);
+ return it->device;
+}
diff --git a/lib/librte_eal/common/eal_common_devargs.c b/lib/librte_eal/common/eal_common_devargs.c
index 810b3e18..dac2402a 100644
--- a/lib/librte_eal/common/eal_common_devargs.c
+++ b/lib/librte_eal/common/eal_common_devargs.c
@@ -11,13 +11,22 @@
#include <stdio.h>
#include <string.h>
+#include <stdarg.h>
+#include <rte_bus.h>
+#include <rte_class.h>
#include <rte_compat.h>
#include <rte_dev.h>
#include <rte_devargs.h>
+#include <rte_errno.h>
+#include <rte_kvargs.h>
+#include <rte_log.h>
#include <rte_tailq.h>
#include "eal_private.h"
+/** user device double-linked queue type definition */
+TAILQ_HEAD(rte_devargs_list, rte_devargs);
+
/** Global list of user devices */
struct rte_devargs_list devargs_list =
TAILQ_HEAD_INITIALIZER(devargs_list);
@@ -52,22 +61,164 @@ rte_eal_parse_devargs_str(const char *devargs_str,
return 0;
}
+static size_t
+devargs_layer_count(const char *s)
+{
+ size_t i = s ? 1 : 0;
+
+ while (s != NULL && s[0] != '\0') {
+ i += s[0] == '/';
+ s++;
+ }
+ return i;
+}
+
+int
+rte_devargs_layers_parse(struct rte_devargs *devargs,
+ const char *devstr)
+{
+ struct {
+ const char *key;
+ const char *str;
+ struct rte_kvargs *kvlist;
+ } layers[] = {
+ { "bus=", NULL, NULL, },
+ { "class=", NULL, NULL, },
+ { "driver=", NULL, NULL, },
+ };
+ struct rte_kvargs_pair *kv = NULL;
+ struct rte_class *cls = NULL;
+ struct rte_bus *bus = NULL;
+ const char *s = devstr;
+ size_t nblayer;
+ size_t i = 0;
+ int ret = 0;
+
+ /* Split each sub-lists. */
+ nblayer = devargs_layer_count(devstr);
+ if (nblayer > RTE_DIM(layers)) {
+ RTE_LOG(ERR, EAL, "Invalid format: too many layers (%zu)\n",
+ nblayer);
+ ret = -E2BIG;
+ goto get_out;
+ }
+
+ /* If the devargs points the devstr
+ * as source data, then it should not allocate
+ * anything and keep referring only to it.
+ */
+ if (devargs->data != devstr) {
+ devargs->data = strdup(devstr);
+ if (devargs->data == NULL) {
+ RTE_LOG(ERR, EAL, "OOM\n");
+ ret = -ENOMEM;
+ goto get_out;
+ }
+ s = devargs->data;
+ }
+
+ while (s != NULL) {
+ if (i >= RTE_DIM(layers)) {
+ RTE_LOG(ERR, EAL, "Unrecognized layer %s\n", s);
+ ret = -EINVAL;
+ goto get_out;
+ }
+ /*
+ * The last layer is free-form.
+ * The "driver" key is not required (but accepted).
+ */
+ if (strncmp(layers[i].key, s, strlen(layers[i].key)) &&
+ i != RTE_DIM(layers) - 1)
+ goto next_layer;
+ layers[i].str = s;
+ layers[i].kvlist = rte_kvargs_parse_delim(s, NULL, "/");
+ if (layers[i].kvlist == NULL) {
+ RTE_LOG(ERR, EAL, "Could not parse %s\n", s);
+ ret = -EINVAL;
+ goto get_out;
+ }
+ s = strchr(s, '/');
+ if (s != NULL)
+ s++;
+next_layer:
+ i++;
+ }
+
+ /* Parse each sub-list. */
+ for (i = 0; i < RTE_DIM(layers); i++) {
+ if (layers[i].kvlist == NULL)
+ continue;
+ kv = &layers[i].kvlist->pairs[0];
+ if (strcmp(kv->key, "bus") == 0) {
+ bus = rte_bus_find_by_name(kv->value);
+ if (bus == NULL) {
+ RTE_LOG(ERR, EAL, "Could not find bus \"%s\"\n",
+ kv->value);
+ ret = -EFAULT;
+ goto get_out;
+ }
+ } else if (strcmp(kv->key, "class") == 0) {
+ cls = rte_class_find_by_name(kv->value);
+ if (cls == NULL) {
+ RTE_LOG(ERR, EAL, "Could not find class \"%s\"\n",
+ kv->value);
+ ret = -EFAULT;
+ goto get_out;
+ }
+ } else if (strcmp(kv->key, "driver") == 0) {
+ /* Ignore */
+ continue;
+ }
+ }
+
+ /* Fill devargs fields. */
+ devargs->bus_str = layers[0].str;
+ devargs->cls_str = layers[1].str;
+ devargs->drv_str = layers[2].str;
+ devargs->bus = bus;
+ devargs->cls = cls;
+
+ /* If we own the data, clean up a bit
+ * the several layers string, to ease
+ * their parsing afterward.
+ */
+ if (devargs->data != devstr) {
+ char *s = (void *)(intptr_t)(devargs->data);
+
+ while ((s = strchr(s, '/'))) {
+ *s = '\0';
+ s++;
+ }
+ }
+
+get_out:
+ for (i = 0; i < RTE_DIM(layers); i++) {
+ if (layers[i].kvlist)
+ rte_kvargs_free(layers[i].kvlist);
+ }
+ if (ret != 0)
+ rte_errno = -ret;
+ return ret;
+}
+
static int
bus_name_cmp(const struct rte_bus *bus, const void *name)
{
return strncmp(bus->name, name, strlen(bus->name));
}
-int __rte_experimental
-rte_eal_devargs_parse(const char *dev, struct rte_devargs *da)
+__rte_experimental
+int
+rte_devargs_parse(struct rte_devargs *da, const char *dev)
{
struct rte_bus *bus = NULL;
const char *devname;
const size_t maxlen = sizeof(da->name);
size_t i;
- if (dev == NULL || da == NULL)
+ if (da == NULL)
return -EINVAL;
+
/* Retrieve eventual bus info */
do {
devname = dev;
@@ -84,7 +235,7 @@ rte_eal_devargs_parse(const char *dev, struct rte_devargs *da)
da->name[i] = devname[i];
i++;
if (i == maxlen) {
- fprintf(stderr, "WARNING: Parsing \"%s\": device name should be shorter than %zu\n",
+ RTE_LOG(WARNING, EAL, "Parsing \"%s\": device name should be shorter than %zu\n",
dev, maxlen);
da->name[i - 1] = '\0';
return -EINVAL;
@@ -94,7 +245,7 @@ rte_eal_devargs_parse(const char *dev, struct rte_devargs *da)
if (bus == NULL) {
bus = rte_bus_find_by_device_name(da->name);
if (bus == NULL) {
- fprintf(stderr, "ERROR: failed to parse device \"%s\"\n",
+ RTE_LOG(ERR, EAL, "failed to parse device \"%s\"\n",
da->name);
return -EFAULT;
}
@@ -106,18 +257,46 @@ rte_eal_devargs_parse(const char *dev, struct rte_devargs *da)
else
da->args = strdup("");
if (da->args == NULL) {
- fprintf(stderr, "ERROR: not enough memory to parse arguments\n");
+ RTE_LOG(ERR, EAL, "not enough memory to parse arguments\n");
return -ENOMEM;
}
return 0;
}
+__rte_experimental
+int
+rte_devargs_parsef(struct rte_devargs *da, const char *format, ...)
+{
+ va_list ap;
+ size_t len;
+ char *dev;
+
+ if (da == NULL)
+ return -EINVAL;
+
+ va_start(ap, format);
+ len = vsnprintf(NULL, 0, format, ap);
+ va_end(ap);
+
+ dev = calloc(1, len + 1);
+ if (dev == NULL) {
+ RTE_LOG(ERR, EAL, "not enough memory to parse device\n");
+ return -ENOMEM;
+ }
+
+ va_start(ap, format);
+ vsnprintf(dev, len + 1, format, ap);
+ va_end(ap);
+
+ return rte_devargs_parse(da, dev);
+}
+
int __rte_experimental
-rte_eal_devargs_insert(struct rte_devargs *da)
+rte_devargs_insert(struct rte_devargs *da)
{
int ret;
- ret = rte_eal_devargs_remove(da->bus->name, da->name);
+ ret = rte_devargs_remove(da->bus->name, da->name);
if (ret < 0)
return ret;
TAILQ_INSERT_TAIL(&devargs_list, da, next);
@@ -125,8 +304,9 @@ rte_eal_devargs_insert(struct rte_devargs *da)
}
/* store a whitelist parameter for later parsing */
+__rte_experimental
int
-rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
+rte_devargs_add(enum rte_devtype devtype, const char *devargs_str)
{
struct rte_devargs *devargs = NULL;
struct rte_bus *bus = NULL;
@@ -137,7 +317,7 @@ rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str)
if (devargs == NULL)
goto fail;
- if (rte_eal_devargs_parse(dev, devargs))
+ if (rte_devargs_parse(devargs, dev))
goto fail;
devargs->type = devtype;
bus = devargs->bus;
@@ -162,7 +342,7 @@ fail:
}
int __rte_experimental
-rte_eal_devargs_remove(const char *busname, const char *devname)
+rte_devargs_remove(const char *busname, const char *devname)
{
struct rte_devargs *d;
void *tmp;
@@ -180,8 +360,9 @@ rte_eal_devargs_remove(const char *busname, const char *devname)
}
/* count the number of devices of a specified type */
+__rte_experimental
unsigned int
-rte_eal_devargs_type_count(enum rte_devtype devtype)
+rte_devargs_type_count(enum rte_devtype devtype)
{
struct rte_devargs *devargs;
unsigned int count = 0;
@@ -195,8 +376,9 @@ rte_eal_devargs_type_count(enum rte_devtype devtype)
}
/* dump the user devices on the console */
+__rte_experimental
void
-rte_eal_devargs_dump(FILE *f)
+rte_devargs_dump(FILE *f)
{
struct rte_devargs *devargs;
@@ -207,3 +389,23 @@ rte_eal_devargs_dump(FILE *f)
devargs->name, devargs->args);
}
}
+
+/* bus-aware rte_devargs iterator. */
+__rte_experimental
+struct rte_devargs *
+rte_devargs_next(const char *busname, const struct rte_devargs *start)
+{
+ struct rte_devargs *da;
+
+ if (start != NULL)
+ da = TAILQ_NEXT(start, next);
+ else
+ da = TAILQ_FIRST(&devargs_list);
+ while (da != NULL) {
+ if (busname == NULL ||
+ (strcmp(busname, da->bus->name) == 0))
+ return da;
+ da = TAILQ_NEXT(da, next);
+ }
+ return NULL;
+}
diff --git a/lib/librte_eal/common/eal_common_fbarray.c b/lib/librte_eal/common/eal_common_fbarray.c
new file mode 100644
index 00000000..43caf3ce
--- /dev/null
+++ b/lib/librte_eal/common/eal_common_fbarray.c
@@ -0,0 +1,1239 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <limits.h>
+#include <sys/mman.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/file.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+#include <rte_tailq.h>
+
+#include "eal_filesystem.h"
+#include "eal_private.h"
+
+#include "rte_fbarray.h"
+
+#define MASK_SHIFT 6ULL
+#define MASK_ALIGN (1ULL << MASK_SHIFT)
+#define MASK_LEN_TO_IDX(x) ((x) >> MASK_SHIFT)
+#define MASK_LEN_TO_MOD(x) ((x) - RTE_ALIGN_FLOOR(x, MASK_ALIGN))
+#define MASK_GET_IDX(idx, mod) ((idx << MASK_SHIFT) + mod)
+
+/*
+ * This is a mask that is always stored at the end of array, to provide fast
+ * way of finding free/used spots without looping through each element.
+ */
+
+struct used_mask {
+ unsigned int n_masks;
+ uint64_t data[];
+};
+
+static size_t
+calc_mask_size(unsigned int len)
+{
+ /* mask must be multiple of MASK_ALIGN, even though length of array
+ * itself may not be aligned on that boundary.
+ */
+ len = RTE_ALIGN_CEIL(len, MASK_ALIGN);
+ return sizeof(struct used_mask) +
+ sizeof(uint64_t) * MASK_LEN_TO_IDX(len);
+}
+
+static size_t
+calc_data_size(size_t page_sz, unsigned int elt_sz, unsigned int len)
+{
+ size_t data_sz = elt_sz * len;
+ size_t msk_sz = calc_mask_size(len);
+ return RTE_ALIGN_CEIL(data_sz + msk_sz, page_sz);
+}
+
+static struct used_mask *
+get_used_mask(void *data, unsigned int elt_sz, unsigned int len)
+{
+ return (struct used_mask *) RTE_PTR_ADD(data, elt_sz * len);
+}
+
+static int
+resize_and_map(int fd, void *addr, size_t len)
+{
+ char path[PATH_MAX];
+ void *map_addr;
+
+ if (ftruncate(fd, len)) {
+ RTE_LOG(ERR, EAL, "Cannot truncate %s\n", path);
+ /* pass errno up the chain */
+ rte_errno = errno;
+ return -1;
+ }
+
+ map_addr = mmap(addr, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, fd, 0);
+ if (map_addr != addr) {
+ RTE_LOG(ERR, EAL, "mmap() failed: %s\n", strerror(errno));
+ /* pass errno up the chain */
+ rte_errno = errno;
+ return -1;
+ }
+ return 0;
+}
+
+static int
+find_next_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int msk_idx, lookahead_idx, first, first_mod;
+ unsigned int last, last_mod;
+ uint64_t last_msk, ignore_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing ctz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ ignore_msk = ~((1ULL << first_mod) - 1);
+
+ /* array length may not be aligned, so calculate ignore mask for last
+ * mask index.
+ */
+ last = MASK_LEN_TO_IDX(arr->len);
+ last_mod = MASK_LEN_TO_MOD(arr->len);
+ last_msk = ~(-1ULL << last_mod);
+
+ for (msk_idx = first; msk_idx < msk->n_masks; msk_idx++) {
+ uint64_t cur_msk, lookahead_msk;
+ unsigned int run_start, clz, left;
+ bool found = false;
+ /*
+ * The process of getting n consecutive bits for arbitrary n is
+ * a bit involved, but here it is in a nutshell:
+ *
+ * 1. let n be the number of consecutive bits we're looking for
+ * 2. check if n can fit in one mask, and if so, do n-1
+ * rshift-ands to see if there is an appropriate run inside
+ * our current mask
+ * 2a. if we found a run, bail out early
+ * 2b. if we didn't find a run, proceed
+ * 3. invert the mask and count leading zeroes (that is, count
+ * how many consecutive set bits we had starting from the
+ * end of current mask) as k
+ * 3a. if k is 0, continue to next mask
+ * 3b. if k is not 0, we have a potential run
+ * 4. to satisfy our requirements, next mask must have n-k
+ * consecutive set bits right at the start, so we will do
+ * (n-k-1) rshift-ands and check if first bit is set.
+ *
+ * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
+ * we either run out of masks, lose the run, or find what we
+ * were looking for.
+ */
+ cur_msk = msk->data[msk_idx];
+ left = n;
+
+ /* if we're looking for free spaces, invert the mask */
+ if (!used)
+ cur_msk = ~cur_msk;
+
+ /* combine current ignore mask with last index ignore mask */
+ if (msk_idx == last)
+ ignore_msk |= last_msk;
+
+ /* if we have an ignore mask, ignore once */
+ if (ignore_msk) {
+ cur_msk &= ignore_msk;
+ ignore_msk = 0;
+ }
+
+ /* if n can fit in within a single mask, do a search */
+ if (n <= MASK_ALIGN) {
+ uint64_t tmp_msk = cur_msk;
+ unsigned int s_idx;
+ for (s_idx = 0; s_idx < n - 1; s_idx++)
+ tmp_msk &= tmp_msk >> 1ULL;
+ /* we found what we were looking for */
+ if (tmp_msk != 0) {
+ run_start = __builtin_ctzll(tmp_msk);
+ return MASK_GET_IDX(msk_idx, run_start);
+ }
+ }
+
+ /*
+ * we didn't find our run within the mask, or n > MASK_ALIGN,
+ * so we're going for plan B.
+ */
+
+ /* count leading zeroes on inverted mask */
+ if (~cur_msk == 0)
+ clz = sizeof(cur_msk) * 8;
+ else
+ clz = __builtin_clzll(~cur_msk);
+
+ /* if there aren't any runs at the end either, just continue */
+ if (clz == 0)
+ continue;
+
+ /* we have a partial run at the end, so try looking ahead */
+ run_start = MASK_ALIGN - clz;
+ left -= clz;
+
+ for (lookahead_idx = msk_idx + 1; lookahead_idx < msk->n_masks;
+ lookahead_idx++) {
+ unsigned int s_idx, need;
+ lookahead_msk = msk->data[lookahead_idx];
+
+ /* if we're looking for free space, invert the mask */
+ if (!used)
+ lookahead_msk = ~lookahead_msk;
+
+ /* figure out how many consecutive bits we need here */
+ need = RTE_MIN(left, MASK_ALIGN);
+
+ for (s_idx = 0; s_idx < need - 1; s_idx++)
+ lookahead_msk &= lookahead_msk >> 1ULL;
+
+ /* if first bit is not set, we've lost the run */
+ if ((lookahead_msk & 1) == 0) {
+ /*
+ * we've scanned this far, so we know there are
+ * no runs in the space we've lookahead-scanned
+ * as well, so skip that on next iteration.
+ */
+ ignore_msk = ~((1ULL << need) - 1);
+ msk_idx = lookahead_idx;
+ break;
+ }
+
+ left -= need;
+
+ /* check if we've found what we were looking for */
+ if (left == 0) {
+ found = true;
+ break;
+ }
+ }
+
+ /* we didn't find anything, so continue */
+ if (!found)
+ continue;
+
+ return MASK_GET_IDX(msk_idx, run_start);
+ }
+ /* we didn't find anything */
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+}
+
+static int
+find_next(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int idx, first, first_mod;
+ unsigned int last, last_mod;
+ uint64_t last_msk, ignore_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing ctz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ ignore_msk = ~((1ULL << first_mod) - 1ULL);
+
+ /* array length may not be aligned, so calculate ignore mask for last
+ * mask index.
+ */
+ last = MASK_LEN_TO_IDX(arr->len);
+ last_mod = MASK_LEN_TO_MOD(arr->len);
+ last_msk = ~(-(1ULL) << last_mod);
+
+ for (idx = first; idx < msk->n_masks; idx++) {
+ uint64_t cur = msk->data[idx];
+ int found;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used)
+ cur = ~cur;
+
+ if (idx == last)
+ cur &= last_msk;
+
+ /* ignore everything before start on first iteration */
+ if (idx == first)
+ cur &= ignore_msk;
+
+ /* check if we have any entries */
+ if (cur == 0)
+ continue;
+
+ /*
+ * find first set bit - that will correspond to whatever it is
+ * that we're looking for.
+ */
+ found = __builtin_ctzll(cur);
+ return MASK_GET_IDX(idx, found);
+ }
+ /* we didn't find anything */
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+}
+
+static int
+find_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int idx, first, first_mod;
+ unsigned int last, last_mod;
+ uint64_t last_msk;
+ unsigned int need_len, result = 0;
+
+ /* array length may not be aligned, so calculate ignore mask for last
+ * mask index.
+ */
+ last = MASK_LEN_TO_IDX(arr->len);
+ last_mod = MASK_LEN_TO_MOD(arr->len);
+ last_msk = ~(-(1ULL) << last_mod);
+
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ for (idx = first; idx < msk->n_masks; idx++, result += need_len) {
+ uint64_t cur = msk->data[idx];
+ unsigned int run_len;
+
+ need_len = MASK_ALIGN;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used)
+ cur = ~cur;
+
+ /* if this is last mask, ignore everything after last bit */
+ if (idx == last)
+ cur &= last_msk;
+
+ /* ignore everything before start on first iteration */
+ if (idx == first) {
+ cur >>= first_mod;
+ /* at the start, we don't need the full mask len */
+ need_len -= first_mod;
+ }
+
+ /* we will be looking for zeroes, so invert the mask */
+ cur = ~cur;
+
+ /* if mask is zero, we have a complete run */
+ if (cur == 0)
+ continue;
+
+ /*
+ * see if current run ends before mask end.
+ */
+ run_len = __builtin_ctzll(cur);
+
+ /* add however many zeroes we've had in the last run and quit */
+ if (run_len < need_len) {
+ result += run_len;
+ break;
+ }
+ }
+ return result;
+}
+
+static int
+find_prev_n(const struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int msk_idx, lookbehind_idx, first, first_mod;
+ uint64_t ignore_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing ctz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ /* we're going backwards, so mask must start from the top */
+ ignore_msk = first_mod == MASK_ALIGN - 1 ?
+ -1ULL : /* prevent overflow */
+ ~(-1ULL << (first_mod + 1));
+
+ /* go backwards, include zero */
+ msk_idx = first;
+ do {
+ uint64_t cur_msk, lookbehind_msk;
+ unsigned int run_start, run_end, ctz, left;
+ bool found = false;
+ /*
+ * The process of getting n consecutive bits from the top for
+ * arbitrary n is a bit involved, but here it is in a nutshell:
+ *
+ * 1. let n be the number of consecutive bits we're looking for
+ * 2. check if n can fit in one mask, and if so, do n-1
+ * lshift-ands to see if there is an appropriate run inside
+ * our current mask
+ * 2a. if we found a run, bail out early
+ * 2b. if we didn't find a run, proceed
+ * 3. invert the mask and count trailing zeroes (that is, count
+ * how many consecutive set bits we had starting from the
+ * start of current mask) as k
+ * 3a. if k is 0, continue to next mask
+ * 3b. if k is not 0, we have a potential run
+ * 4. to satisfy our requirements, next mask must have n-k
+ * consecutive set bits at the end, so we will do (n-k-1)
+ * lshift-ands and check if last bit is set.
+ *
+ * Step 4 will need to be repeated if (n-k) > MASK_ALIGN until
+ * we either run out of masks, lose the run, or find what we
+ * were looking for.
+ */
+ cur_msk = msk->data[msk_idx];
+ left = n;
+
+ /* if we're looking for free spaces, invert the mask */
+ if (!used)
+ cur_msk = ~cur_msk;
+
+ /* if we have an ignore mask, ignore once */
+ if (ignore_msk) {
+ cur_msk &= ignore_msk;
+ ignore_msk = 0;
+ }
+
+ /* if n can fit in within a single mask, do a search */
+ if (n <= MASK_ALIGN) {
+ uint64_t tmp_msk = cur_msk;
+ unsigned int s_idx;
+ for (s_idx = 0; s_idx < n - 1; s_idx++)
+ tmp_msk &= tmp_msk << 1ULL;
+ /* we found what we were looking for */
+ if (tmp_msk != 0) {
+ /* clz will give us offset from end of mask, and
+ * we only get the end of our run, not start,
+ * so adjust result to point to where start
+ * would have been.
+ */
+ run_start = MASK_ALIGN -
+ __builtin_clzll(tmp_msk) - n;
+ return MASK_GET_IDX(msk_idx, run_start);
+ }
+ }
+
+ /*
+ * we didn't find our run within the mask, or n > MASK_ALIGN,
+ * so we're going for plan B.
+ */
+
+ /* count trailing zeroes on inverted mask */
+ if (~cur_msk == 0)
+ ctz = sizeof(cur_msk) * 8;
+ else
+ ctz = __builtin_ctzll(~cur_msk);
+
+ /* if there aren't any runs at the start either, just
+ * continue
+ */
+ if (ctz == 0)
+ continue;
+
+ /* we have a partial run at the start, so try looking behind */
+ run_end = MASK_GET_IDX(msk_idx, ctz);
+ left -= ctz;
+
+ /* go backwards, include zero */
+ lookbehind_idx = msk_idx - 1;
+
+ /* we can't lookbehind as we've run out of masks, so stop */
+ if (msk_idx == 0)
+ break;
+
+ do {
+ const uint64_t last_bit = 1ULL << (MASK_ALIGN - 1);
+ unsigned int s_idx, need;
+
+ lookbehind_msk = msk->data[lookbehind_idx];
+
+ /* if we're looking for free space, invert the mask */
+ if (!used)
+ lookbehind_msk = ~lookbehind_msk;
+
+ /* figure out how many consecutive bits we need here */
+ need = RTE_MIN(left, MASK_ALIGN);
+
+ for (s_idx = 0; s_idx < need - 1; s_idx++)
+ lookbehind_msk &= lookbehind_msk << 1ULL;
+
+ /* if last bit is not set, we've lost the run */
+ if ((lookbehind_msk & last_bit) == 0) {
+ /*
+ * we've scanned this far, so we know there are
+ * no runs in the space we've lookbehind-scanned
+ * as well, so skip that on next iteration.
+ */
+ ignore_msk = -1ULL << need;
+ msk_idx = lookbehind_idx;
+ break;
+ }
+
+ left -= need;
+
+ /* check if we've found what we were looking for */
+ if (left == 0) {
+ found = true;
+ break;
+ }
+ } while ((lookbehind_idx--) != 0); /* decrement after check to
+ * include zero
+ */
+
+ /* we didn't find anything, so continue */
+ if (!found)
+ continue;
+
+ /* we've found what we were looking for, but we only know where
+ * the run ended, so calculate start position.
+ */
+ return run_end - n;
+ } while (msk_idx-- != 0); /* decrement after check to include zero */
+ /* we didn't find anything */
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+}
+
+static int
+find_prev(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int idx, first, first_mod;
+ uint64_t ignore_msk;
+
+ /*
+ * mask only has granularity of MASK_ALIGN, but start may not be aligned
+ * on that boundary, so construct a special mask to exclude anything we
+ * don't want to see to avoid confusing clz.
+ */
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+ /* we're going backwards, so mask must start from the top */
+ ignore_msk = first_mod == MASK_ALIGN - 1 ?
+ -1ULL : /* prevent overflow */
+ ~(-1ULL << (first_mod + 1));
+
+ /* go backwards, include zero */
+ idx = first;
+ do {
+ uint64_t cur = msk->data[idx];
+ int found;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used)
+ cur = ~cur;
+
+ /* ignore everything before start on first iteration */
+ if (idx == first)
+ cur &= ignore_msk;
+
+ /* check if we have any entries */
+ if (cur == 0)
+ continue;
+
+ /*
+ * find last set bit - that will correspond to whatever it is
+ * that we're looking for. we're counting trailing zeroes, thus
+ * the value we get is counted from end of mask, so calculate
+ * position from start of mask.
+ */
+ found = MASK_ALIGN - __builtin_clzll(cur) - 1;
+
+ return MASK_GET_IDX(idx, found);
+ } while (idx-- != 0); /* decrement after check to include zero*/
+
+ /* we didn't find anything */
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+}
+
+static int
+find_rev_contig(const struct rte_fbarray *arr, unsigned int start, bool used)
+{
+ const struct used_mask *msk = get_used_mask(arr->data, arr->elt_sz,
+ arr->len);
+ unsigned int idx, first, first_mod;
+ unsigned int need_len, result = 0;
+
+ first = MASK_LEN_TO_IDX(start);
+ first_mod = MASK_LEN_TO_MOD(start);
+
+ /* go backwards, include zero */
+ idx = first;
+ do {
+ uint64_t cur = msk->data[idx];
+ unsigned int run_len;
+
+ need_len = MASK_ALIGN;
+
+ /* if we're looking for free entries, invert mask */
+ if (!used)
+ cur = ~cur;
+
+ /* ignore everything after start on first iteration */
+ if (idx == first) {
+ unsigned int end_len = MASK_ALIGN - first_mod - 1;
+ cur <<= end_len;
+ /* at the start, we don't need the full mask len */
+ need_len -= end_len;
+ }
+
+ /* we will be looking for zeroes, so invert the mask */
+ cur = ~cur;
+
+ /* if mask is zero, we have a complete run */
+ if (cur == 0)
+ goto endloop;
+
+ /*
+ * see where run ends, starting from the end.
+ */
+ run_len = __builtin_clzll(cur);
+
+ /* add however many zeroes we've had in the last run and quit */
+ if (run_len < need_len) {
+ result += run_len;
+ break;
+ }
+endloop:
+ result += need_len;
+ } while (idx-- != 0); /* decrement after check to include zero */
+ return result;
+}
+
+static int
+set_used(struct rte_fbarray *arr, unsigned int idx, bool used)
+{
+ struct used_mask *msk;
+ uint64_t msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+ unsigned int msk_idx = MASK_LEN_TO_IDX(idx);
+ bool already_used;
+ int ret = -1;
+
+ if (arr == NULL || idx >= arr->len) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+ ret = 0;
+
+ /* prevent array from changing under us */
+ rte_rwlock_write_lock(&arr->rwlock);
+
+ already_used = (msk->data[msk_idx] & msk_bit) != 0;
+
+ /* nothing to be done */
+ if (used == already_used)
+ goto out;
+
+ if (used) {
+ msk->data[msk_idx] |= msk_bit;
+ arr->count++;
+ } else {
+ msk->data[msk_idx] &= ~msk_bit;
+ arr->count--;
+ }
+out:
+ rte_rwlock_write_unlock(&arr->rwlock);
+
+ return ret;
+}
+
+static int
+fully_validate(const char *name, unsigned int elt_sz, unsigned int len)
+{
+ if (name == NULL || elt_sz == 0 || len == 0 || len > INT_MAX) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ if (strnlen(name, RTE_FBARRAY_NAME_LEN) == RTE_FBARRAY_NAME_LEN) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+ return 0;
+}
+
+int __rte_experimental
+rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
+ unsigned int elt_sz)
+{
+ size_t page_sz, mmap_len;
+ char path[PATH_MAX];
+ struct used_mask *msk;
+ void *data = NULL;
+ int fd = -1;
+
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ if (fully_validate(name, elt_sz, len))
+ return -1;
+
+ page_sz = sysconf(_SC_PAGESIZE);
+ if (page_sz == (size_t)-1)
+ goto fail;
+
+ /* calculate our memory limits */
+ mmap_len = calc_data_size(page_sz, elt_sz, len);
+
+ data = eal_get_virtual_area(NULL, &mmap_len, page_sz, 0, 0);
+ if (data == NULL)
+ goto fail;
+
+ if (internal_config.no_shconf) {
+ /* remap virtual area as writable */
+ void *new_data = mmap(data, mmap_len, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (new_data == MAP_FAILED) {
+ RTE_LOG(DEBUG, EAL, "%s(): couldn't remap anonymous memory: %s\n",
+ __func__, strerror(errno));
+ goto fail;
+ }
+ } else {
+ eal_get_fbarray_path(path, sizeof(path), name);
+
+ /*
+ * Each fbarray is unique to process namespace, i.e. the
+ * filename depends on process prefix. Try to take out a lock
+ * and see if we succeed. If we don't, someone else is using it
+ * already.
+ */
+ fd = open(path, O_CREAT | O_RDWR, 0600);
+ if (fd < 0) {
+ RTE_LOG(DEBUG, EAL, "%s(): couldn't open %s: %s\n",
+ __func__, path, strerror(errno));
+ rte_errno = errno;
+ goto fail;
+ } else if (flock(fd, LOCK_EX | LOCK_NB)) {
+ RTE_LOG(DEBUG, EAL, "%s(): couldn't lock %s: %s\n",
+ __func__, path, strerror(errno));
+ rte_errno = EBUSY;
+ goto fail;
+ }
+
+ /* take out a non-exclusive lock, so that other processes could
+ * still attach to it, but no other process could reinitialize
+ * it.
+ */
+ if (flock(fd, LOCK_SH | LOCK_NB)) {
+ rte_errno = errno;
+ goto fail;
+ }
+
+ if (resize_and_map(fd, data, mmap_len))
+ goto fail;
+
+ /* we've mmap'ed the file, we can now close the fd */
+ close(fd);
+ }
+
+ /* initialize the data */
+ memset(data, 0, mmap_len);
+
+ /* populate data structure */
+ strlcpy(arr->name, name, sizeof(arr->name));
+ arr->data = data;
+ arr->len = len;
+ arr->elt_sz = elt_sz;
+ arr->count = 0;
+
+ msk = get_used_mask(data, elt_sz, len);
+ msk->n_masks = MASK_LEN_TO_IDX(RTE_ALIGN_CEIL(len, MASK_ALIGN));
+
+ rte_rwlock_init(&arr->rwlock);
+
+ return 0;
+fail:
+ if (data)
+ munmap(data, mmap_len);
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+int __rte_experimental
+rte_fbarray_attach(struct rte_fbarray *arr)
+{
+ size_t page_sz, mmap_len;
+ char path[PATH_MAX];
+ void *data = NULL;
+ int fd = -1;
+
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ /*
+ * we don't need to synchronize attach as two values we need (element
+ * size and array length) are constant for the duration of life of
+ * the array, so the parts we care about will not race.
+ */
+
+ if (fully_validate(arr->name, arr->elt_sz, arr->len))
+ return -1;
+
+ page_sz = sysconf(_SC_PAGESIZE);
+ if (page_sz == (size_t)-1)
+ goto fail;
+
+ mmap_len = calc_data_size(page_sz, arr->elt_sz, arr->len);
+
+ data = eal_get_virtual_area(arr->data, &mmap_len, page_sz, 0, 0);
+ if (data == NULL)
+ goto fail;
+
+ eal_get_fbarray_path(path, sizeof(path), arr->name);
+
+ fd = open(path, O_RDWR);
+ if (fd < 0) {
+ rte_errno = errno;
+ goto fail;
+ }
+
+ /* lock the file, to let others know we're using it */
+ if (flock(fd, LOCK_SH | LOCK_NB)) {
+ rte_errno = errno;
+ goto fail;
+ }
+
+ if (resize_and_map(fd, data, mmap_len))
+ goto fail;
+
+ close(fd);
+
+ /* we're done */
+
+ return 0;
+fail:
+ if (data)
+ munmap(data, mmap_len);
+ if (fd >= 0)
+ close(fd);
+ return -1;
+}
+
+int __rte_experimental
+rte_fbarray_detach(struct rte_fbarray *arr)
+{
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ /*
+ * we don't need to synchronize detach as two values we need (element
+ * size and total capacity) are constant for the duration of life of
+ * the array, so the parts we care about will not race. if the user is
+ * detaching while doing something else in the same process, we can't
+ * really do anything about it, things will blow up either way.
+ */
+
+ size_t page_sz = sysconf(_SC_PAGESIZE);
+
+ if (page_sz == (size_t)-1)
+ return -1;
+
+ /* this may already be unmapped (e.g. repeated call from previously
+ * failed destroy(), but this is on user, we can't (easily) know if this
+ * is still mapped.
+ */
+ munmap(arr->data, calc_data_size(page_sz, arr->elt_sz, arr->len));
+
+ return 0;
+}
+
+int __rte_experimental
+rte_fbarray_destroy(struct rte_fbarray *arr)
+{
+ int fd, ret;
+ char path[PATH_MAX];
+
+ ret = rte_fbarray_detach(arr);
+ if (ret)
+ return ret;
+
+ /* try deleting the file */
+ eal_get_fbarray_path(path, sizeof(path), arr->name);
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open fbarray file: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ if (flock(fd, LOCK_EX | LOCK_NB)) {
+ RTE_LOG(DEBUG, EAL, "Cannot destroy fbarray - another process is using it\n");
+ rte_errno = EBUSY;
+ ret = -1;
+ } else {
+ ret = 0;
+ unlink(path);
+ memset(arr, 0, sizeof(*arr));
+ }
+ close(fd);
+
+ return ret;
+}
+
+void * __rte_experimental
+rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx)
+{
+ void *ret = NULL;
+ if (arr == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ if (idx >= arr->len) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ ret = RTE_PTR_ADD(arr->data, idx * arr->elt_sz);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx)
+{
+ return set_used(arr, idx, true);
+}
+
+int __rte_experimental
+rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx)
+{
+ return set_used(arr, idx, false);
+}
+
+int __rte_experimental
+rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx)
+{
+ struct used_mask *msk;
+ int msk_idx;
+ uint64_t msk_bit;
+ int ret = -1;
+
+ if (arr == NULL || idx >= arr->len) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ /* prevent array from changing under us */
+ rte_rwlock_read_lock(&arr->rwlock);
+
+ msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+ msk_idx = MASK_LEN_TO_IDX(idx);
+ msk_bit = 1ULL << MASK_LEN_TO_MOD(idx);
+
+ ret = (msk->data[msk_idx] & msk_bit) != 0;
+
+ rte_rwlock_read_unlock(&arr->rwlock);
+
+ return ret;
+}
+
+static int
+fbarray_find(struct rte_fbarray *arr, unsigned int start, bool next, bool used)
+{
+ int ret = -1;
+
+ if (arr == NULL || start >= arr->len) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ /* prevent array from changing under us */
+ rte_rwlock_read_lock(&arr->rwlock);
+
+ /* cheap checks to prevent doing useless work */
+ if (!used) {
+ if (arr->len == arr->count) {
+ rte_errno = ENOSPC;
+ goto out;
+ }
+ if (arr->count == 0) {
+ ret = start;
+ goto out;
+ }
+ } else {
+ if (arr->count == 0) {
+ rte_errno = ENOENT;
+ goto out;
+ }
+ if (arr->len == arr->count) {
+ ret = start;
+ goto out;
+ }
+ }
+ if (next)
+ ret = find_next(arr, start, used);
+ else
+ ret = find_prev(arr, start, used);
+out:
+ rte_rwlock_read_unlock(&arr->rwlock);
+ return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find(arr, start, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find(arr, start, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find(arr, start, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find(arr, start, false, true);
+}
+
+static int
+fbarray_find_n(struct rte_fbarray *arr, unsigned int start, unsigned int n,
+ bool next, bool used)
+{
+ int ret = -1;
+
+ if (arr == NULL || start >= arr->len || n > arr->len || n == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ if (next && (arr->len - start) < n) {
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+ }
+ if (!next && start < (n - 1)) {
+ rte_errno = used ? ENOENT : ENOSPC;
+ return -1;
+ }
+
+ /* prevent array from changing under us */
+ rte_rwlock_read_lock(&arr->rwlock);
+
+ /* cheap checks to prevent doing useless work */
+ if (!used) {
+ if (arr->len == arr->count || arr->len - arr->count < n) {
+ rte_errno = ENOSPC;
+ goto out;
+ }
+ if (arr->count == 0) {
+ ret = next ? start : start - n + 1;
+ goto out;
+ }
+ } else {
+ if (arr->count < n) {
+ rte_errno = ENOENT;
+ goto out;
+ }
+ if (arr->count == arr->len) {
+ ret = next ? start : start - n + 1;
+ goto out;
+ }
+ }
+
+ if (next)
+ ret = find_next_n(arr, start, n, used);
+ else
+ ret = find_prev_n(arr, start, n, used);
+out:
+ rte_rwlock_read_unlock(&arr->rwlock);
+ return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n)
+{
+ return fbarray_find_n(arr, start, n, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n)
+{
+ return fbarray_find_n(arr, start, n, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n)
+{
+ return fbarray_find_n(arr, start, n, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n)
+{
+ return fbarray_find_n(arr, start, n, false, true);
+}
+
+static int
+fbarray_find_contig(struct rte_fbarray *arr, unsigned int start, bool next,
+ bool used)
+{
+ int ret = -1;
+
+ if (arr == NULL || start >= arr->len) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ /* prevent array from changing under us */
+ rte_rwlock_read_lock(&arr->rwlock);
+
+ /* cheap checks to prevent doing useless work */
+ if (used) {
+ if (arr->count == 0) {
+ ret = 0;
+ goto out;
+ }
+ if (next && arr->count == arr->len) {
+ ret = arr->len - start;
+ goto out;
+ }
+ if (!next && arr->count == arr->len) {
+ ret = start + 1;
+ goto out;
+ }
+ } else {
+ if (arr->len == arr->count) {
+ ret = 0;
+ goto out;
+ }
+ if (next && arr->count == 0) {
+ ret = arr->len - start;
+ goto out;
+ }
+ if (!next && arr->count == 0) {
+ ret = start + 1;
+ goto out;
+ }
+ }
+
+ if (next)
+ ret = find_contig(arr, start, used);
+ else
+ ret = find_rev_contig(arr, start, used);
+out:
+ rte_rwlock_read_unlock(&arr->rwlock);
+ return ret;
+}
+
+int __rte_experimental
+rte_fbarray_find_contig_free(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find_contig(arr, start, true, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find_contig(arr, start, true, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find_contig(arr, start, false, false);
+}
+
+int __rte_experimental
+rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start)
+{
+ return fbarray_find_contig(arr, start, false, true);
+}
+
+int __rte_experimental
+rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt)
+{
+ void *end;
+ int ret = -1;
+
+ /*
+ * no need to synchronize as it doesn't matter if underlying data
+ * changes - we're doing pointer arithmetic here.
+ */
+
+ if (arr == NULL || elt == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ end = RTE_PTR_ADD(arr->data, arr->elt_sz * arr->len);
+ if (elt < arr->data || elt >= end) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ ret = RTE_PTR_DIFF(elt, arr->data) / arr->elt_sz;
+
+ return ret;
+}
+
+void __rte_experimental
+rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f)
+{
+ struct used_mask *msk;
+ unsigned int i;
+
+ if (arr == NULL || f == NULL) {
+ rte_errno = EINVAL;
+ return;
+ }
+
+ if (fully_validate(arr->name, arr->elt_sz, arr->len)) {
+ fprintf(f, "Invalid file-backed array\n");
+ goto out;
+ }
+
+ /* prevent array from changing under us */
+ rte_rwlock_read_lock(&arr->rwlock);
+
+ fprintf(f, "File-backed array: %s\n", arr->name);
+ fprintf(f, "size: %i occupied: %i elt_sz: %i\n",
+ arr->len, arr->count, arr->elt_sz);
+
+ msk = get_used_mask(arr->data, arr->elt_sz, arr->len);
+
+ for (i = 0; i < msk->n_masks; i++)
+ fprintf(f, "msk idx %i: 0x%016" PRIx64 "\n", i, msk->data[i]);
+out:
+ rte_rwlock_read_unlock(&arr->rwlock);
+}
diff --git a/lib/librte_eal/common/eal_common_hypervisor.c b/lib/librte_eal/common/eal_common_hypervisor.c
index c3b4c621..5388b81a 100644
--- a/lib/librte_eal/common/eal_common_hypervisor.c
+++ b/lib/librte_eal/common/eal_common_hypervisor.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#include "rte_hypervisor.h"
diff --git a/lib/librte_eal/common/eal_common_lcore.c b/lib/librte_eal/common/eal_common_lcore.c
index 7724fa43..3167e9d7 100644
--- a/lib/librte_eal/common/eal_common_lcore.c
+++ b/lib/librte_eal/common/eal_common_lcore.c
@@ -7,6 +7,7 @@
#include <string.h>
#include <dirent.h>
+#include <rte_errno.h>
#include <rte_log.h>
#include <rte_eal.h>
#include <rte_lcore.h>
@@ -16,6 +17,19 @@
#include "eal_private.h"
#include "eal_thread.h"
+static int
+socket_id_cmp(const void *a, const void *b)
+{
+ const int *lcore_id_a = a;
+ const int *lcore_id_b = b;
+
+ if (*lcore_id_a < *lcore_id_b)
+ return -1;
+ if (*lcore_id_a > *lcore_id_b)
+ return 1;
+ return 0;
+}
+
/*
* Parse /sys/devices/system/cpu to get the number of physical and logical
* processors on the machine. The function will fill the cpu_info
@@ -28,6 +42,8 @@ rte_eal_cpu_init(void)
struct rte_config *config = rte_eal_get_configuration();
unsigned lcore_id;
unsigned count = 0;
+ unsigned int socket_id, prev_socket_id;
+ int lcore_to_socket_id[RTE_MAX_LCORE];
/*
* Parse the maximum set of logical cores, detect the subset of running
@@ -39,6 +55,19 @@ rte_eal_cpu_init(void)
/* init cpuset for per lcore config */
CPU_ZERO(&lcore_config[lcore_id].cpuset);
+ /* find socket first */
+ socket_id = eal_cpu_socket_id(lcore_id);
+ if (socket_id >= RTE_MAX_NUMA_NODES) {
+#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
+ socket_id = 0;
+#else
+ RTE_LOG(ERR, EAL, "Socket ID (%u) is greater than RTE_MAX_NUMA_NODES (%d)\n",
+ socket_id, RTE_MAX_NUMA_NODES);
+ return -1;
+#endif
+ }
+ lcore_to_socket_id[lcore_id] = socket_id;
+
/* in 1:1 mapping, record related cpu detected state */
lcore_config[lcore_id].detected = eal_cpu_detected(lcore_id);
if (lcore_config[lcore_id].detected == 0) {
@@ -54,18 +83,7 @@ rte_eal_cpu_init(void)
config->lcore_role[lcore_id] = ROLE_RTE;
lcore_config[lcore_id].core_role = ROLE_RTE;
lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
- lcore_config[lcore_id].socket_id = eal_cpu_socket_id(lcore_id);
- if (lcore_config[lcore_id].socket_id >= RTE_MAX_NUMA_NODES) {
-#ifdef RTE_EAL_ALLOW_INV_SOCKET_ID
- lcore_config[lcore_id].socket_id = 0;
-#else
- RTE_LOG(ERR, EAL, "Socket ID (%u) is greater than "
- "RTE_MAX_NUMA_NODES (%d)\n",
- lcore_config[lcore_id].socket_id,
- RTE_MAX_NUMA_NODES);
- return -1;
-#endif
- }
+ lcore_config[lcore_id].socket_id = socket_id;
RTE_LOG(DEBUG, EAL, "Detected lcore %u as "
"core %u on socket %u\n",
lcore_id, lcore_config[lcore_id].core_id,
@@ -79,5 +97,38 @@ rte_eal_cpu_init(void)
RTE_MAX_LCORE);
RTE_LOG(INFO, EAL, "Detected %u lcore(s)\n", config->lcore_count);
+ /* sort all socket id's in ascending order */
+ qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
+ sizeof(lcore_to_socket_id[0]), socket_id_cmp);
+
+ prev_socket_id = -1;
+ config->numa_node_count = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ socket_id = lcore_to_socket_id[lcore_id];
+ if (socket_id != prev_socket_id)
+ config->numa_nodes[config->numa_node_count++] =
+ socket_id;
+ prev_socket_id = socket_id;
+ }
+ RTE_LOG(INFO, EAL, "Detected %u NUMA nodes\n", config->numa_node_count);
+
return 0;
}
+
+unsigned int __rte_experimental
+rte_socket_count(void)
+{
+ const struct rte_config *config = rte_eal_get_configuration();
+ return config->numa_node_count;
+}
+
+int __rte_experimental
+rte_socket_id_by_idx(unsigned int idx)
+{
+ const struct rte_config *config = rte_eal_get_configuration();
+ if (idx >= config->numa_node_count) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ return config->numa_nodes[idx];
+}
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
index 37b2e20e..c714a4bd 100644
--- a/lib/librte_eal/common/eal_common_log.c
+++ b/lib/librte_eal/common/eal_common_log.c
@@ -9,6 +9,7 @@
#include <string.h>
#include <errno.h>
#include <regex.h>
+#include <fnmatch.h>
#include <rte_eal.h>
#include <rte_log.h>
@@ -23,6 +24,23 @@ struct rte_logs rte_logs = {
.file = NULL,
};
+struct rte_eal_opt_loglevel {
+ /** Next list entry */
+ TAILQ_ENTRY(rte_eal_opt_loglevel) next;
+ /** Compiled regular expression obtained from the option */
+ regex_t re_match;
+ /** Glob match string option */
+ char *pattern;
+ /** Log level value obtained from the option */
+ uint32_t level;
+};
+
+TAILQ_HEAD(rte_eal_opt_loglevel_list, rte_eal_opt_loglevel);
+
+/** List of valid EAL log level options */
+static struct rte_eal_opt_loglevel_list opt_loglevel_list =
+ TAILQ_HEAD_INITIALIZER(opt_loglevel_list);
+
/* Stream to use for logging if rte_logs.file is NULL */
static FILE *default_log_stream;
@@ -89,9 +107,9 @@ rte_log_set_level(uint32_t type, uint32_t level)
return 0;
}
-/* set level */
+/* set log level by regular expression */
int
-rte_log_set_level_regexp(const char *pattern, uint32_t level)
+rte_log_set_level_regexp(const char *regex, uint32_t level)
{
regex_t r;
size_t i;
@@ -99,7 +117,7 @@ rte_log_set_level_regexp(const char *pattern, uint32_t level)
if (level > RTE_LOG_DEBUG)
return -1;
- if (regcomp(&r, pattern, 0) != 0)
+ if (regcomp(&r, regex, 0) != 0)
return -1;
for (i = 0; i < rte_logs.dynamic_types_len; i++) {
@@ -115,6 +133,69 @@ rte_log_set_level_regexp(const char *pattern, uint32_t level)
return 0;
}
+/*
+ * Save the type string and the loglevel for later dynamic
+ * logtypes which may register later.
+ */
+static int rte_log_save_level(int priority,
+ const char *regex, const char *pattern)
+{
+ struct rte_eal_opt_loglevel *opt_ll = NULL;
+
+ opt_ll = malloc(sizeof(*opt_ll));
+ if (opt_ll == NULL)
+ goto fail;
+
+ opt_ll->level = priority;
+
+ if (regex) {
+ opt_ll->pattern = NULL;
+ if (regcomp(&opt_ll->re_match, regex, 0) != 0)
+ goto fail;
+ } else if (pattern) {
+ opt_ll->pattern = strdup(pattern);
+ if (opt_ll->pattern == NULL)
+ goto fail;
+ } else
+ goto fail;
+
+ TAILQ_INSERT_HEAD(&opt_loglevel_list, opt_ll, next);
+ return 0;
+fail:
+ free(opt_ll);
+ return -1;
+}
+
+int rte_log_save_regexp(const char *regex, int tmp)
+{
+ return rte_log_save_level(tmp, regex, NULL);
+}
+
+/* set log level based on glob (file match) pattern */
+int
+rte_log_set_level_pattern(const char *pattern, uint32_t level)
+{
+ size_t i;
+
+ if (level > RTE_LOG_DEBUG)
+ return -1;
+
+ for (i = 0; i < rte_logs.dynamic_types_len; i++) {
+ if (rte_logs.dynamic_types[i].name == NULL)
+ continue;
+
+ if (fnmatch(pattern, rte_logs.dynamic_types[i].name, 0) == 0)
+ rte_logs.dynamic_types[i].loglevel = level;
+ }
+
+ return 0;
+}
+
+int rte_log_save_pattern(const char *pattern, int priority)
+{
+ return rte_log_save_level(priority, NULL, pattern);
+}
+
/* get the current loglevel for the message being processed */
int rte_log_cur_msg_loglevel(void)
{
@@ -186,6 +267,36 @@ rte_log_register(const char *name)
return ret;
}
+/* Register an extended log type and try to pick its level from EAL options */
+int __rte_experimental
+rte_log_register_type_and_pick_level(const char *name, uint32_t level_def)
+{
+ struct rte_eal_opt_loglevel *opt_ll;
+ uint32_t level = level_def;
+ int type;
+
+ type = rte_log_register(name);
+ if (type < 0)
+ return type;
+
+ TAILQ_FOREACH(opt_ll, &opt_loglevel_list, next) {
+ if (opt_ll->level > RTE_LOG_DEBUG)
+ continue;
+
+ if (opt_ll->pattern) {
+ if (fnmatch(opt_ll->pattern, name, 0))
+ level = opt_ll->level;
+ } else {
+ if (regexec(&opt_ll->re_match, name, 0, NULL, 0) == 0)
+ level = opt_ll->level;
+ }
+ }
+
+ rte_logs.dynamic_types[type].loglevel = level;
+
+ return type;
+}
+
struct logtype {
uint32_t log_id;
const char *logtype;
@@ -224,9 +335,7 @@ static const struct logtype logtype_strings[] = {
};
/* Logging should be first initializer (before drivers and bus) */
-RTE_INIT_PRIO(rte_log_init, 101);
-static void
-rte_log_init(void)
+RTE_INIT_PRIO(rte_log_init, LOG)
{
uint32_t i;
diff --git a/lib/librte_eal/common/eal_common_memalloc.c b/lib/librte_eal/common/eal_common_memalloc.c
new file mode 100644
index 00000000..1d41ea11
--- /dev/null
+++ b/lib/librte_eal/common/eal_common_memalloc.c
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#include <string.h>
+
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_fbarray.h>
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+#include <rte_string_fns.h>
+#include <rte_rwlock.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
+
+struct mem_event_callback_entry {
+ TAILQ_ENTRY(mem_event_callback_entry) next;
+ char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN];
+ rte_mem_event_callback_t clb;
+ void *arg;
+};
+
+struct mem_alloc_validator_entry {
+ TAILQ_ENTRY(mem_alloc_validator_entry) next;
+ char name[RTE_MEM_ALLOC_VALIDATOR_NAME_LEN];
+ rte_mem_alloc_validator_t clb;
+ int socket_id;
+ size_t limit;
+};
+
+/** Double linked list of actions. */
+TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry);
+TAILQ_HEAD(mem_alloc_validator_entry_list, mem_alloc_validator_entry);
+
+static struct mem_event_callback_entry_list mem_event_callback_list =
+ TAILQ_HEAD_INITIALIZER(mem_event_callback_list);
+static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER;
+
+static struct mem_alloc_validator_entry_list mem_alloc_validator_list =
+ TAILQ_HEAD_INITIALIZER(mem_alloc_validator_list);
+static rte_rwlock_t mem_alloc_validator_rwlock = RTE_RWLOCK_INITIALIZER;
+
+static struct mem_event_callback_entry *
+find_mem_event_callback(const char *name, void *arg)
+{
+ struct mem_event_callback_entry *r;
+
+ TAILQ_FOREACH(r, &mem_event_callback_list, next) {
+ if (!strcmp(r->name, name) && r->arg == arg)
+ break;
+ }
+ return r;
+}
+
+static struct mem_alloc_validator_entry *
+find_mem_alloc_validator(const char *name, int socket_id)
+{
+ struct mem_alloc_validator_entry *r;
+
+ TAILQ_FOREACH(r, &mem_alloc_validator_list, next) {
+ if (!strcmp(r->name, name) && r->socket_id == socket_id)
+ break;
+ }
+ return r;
+}
+
+bool
+eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
+ size_t len)
+{
+ void *end, *aligned_start, *aligned_end;
+ size_t pgsz = (size_t)msl->page_sz;
+ const struct rte_memseg *ms;
+
+ /* for IOVA_VA, it's always contiguous */
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ return true;
+
+ /* for legacy memory, it's always contiguous */
+ if (internal_config.legacy_mem)
+ return true;
+
+ end = RTE_PTR_ADD(start, len);
+
+ /* for nohuge, we check pagemap, otherwise check memseg */
+ if (!rte_eal_has_hugepages()) {
+ rte_iova_t cur, expected;
+
+ aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
+ aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
+
+ /* if start and end are on the same page, bail out early */
+ if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
+ return true;
+
+ /* skip first iteration */
+ cur = rte_mem_virt2iova(aligned_start);
+ expected = cur + pgsz;
+ aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
+
+ while (aligned_start < aligned_end) {
+ cur = rte_mem_virt2iova(aligned_start);
+ if (cur != expected)
+ return false;
+ aligned_start = RTE_PTR_ADD(aligned_start, pgsz);
+ expected += pgsz;
+ }
+ } else {
+ int start_seg, end_seg, cur_seg;
+ rte_iova_t cur, expected;
+
+ aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz);
+ aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz);
+
+ start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) /
+ pgsz;
+ end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) /
+ pgsz;
+
+ /* if start and end are on the same page, bail out early */
+ if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz)
+ return true;
+
+ /* skip first iteration */
+ ms = rte_fbarray_get(&msl->memseg_arr, start_seg);
+ cur = ms->iova;
+ expected = cur + pgsz;
+
+ /* if we can't access IOVA addresses, assume non-contiguous */
+ if (cur == RTE_BAD_IOVA)
+ return false;
+
+ for (cur_seg = start_seg + 1; cur_seg < end_seg;
+ cur_seg++, expected += pgsz) {
+ ms = rte_fbarray_get(&msl->memseg_arr, cur_seg);
+
+ if (ms->iova != expected)
+ return false;
+ }
+ }
+ return true;
+}
+
+int
+eal_memalloc_mem_event_callback_register(const char *name,
+ rte_mem_event_callback_t clb, void *arg)
+{
+ struct mem_event_callback_entry *entry;
+ int ret, len;
+ if (name == NULL || clb == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+ rte_rwlock_write_lock(&mem_event_rwlock);
+
+ entry = find_mem_event_callback(name, arg);
+ if (entry != NULL) {
+ rte_errno = EEXIST;
+ ret = -1;
+ goto unlock;
+ }
+
+ entry = malloc(sizeof(*entry));
+ if (entry == NULL) {
+ rte_errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+
+ /* callback successfully created and is valid, add it to the list */
+ entry->clb = clb;
+ entry->arg = arg;
+ strlcpy(entry->name, name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
+ TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next);
+
+ ret = 0;
+
+ RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' registered\n",
+ name, arg);
+
+unlock:
+ rte_rwlock_write_unlock(&mem_event_rwlock);
+ return ret;
+}
+
+int
+eal_memalloc_mem_event_callback_unregister(const char *name, void *arg)
+{
+ struct mem_event_callback_entry *entry;
+ int ret, len;
+
+ if (name == NULL) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN);
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+ rte_rwlock_write_lock(&mem_event_rwlock);
+
+ entry = find_mem_event_callback(name, arg);
+ if (entry == NULL) {
+ rte_errno = ENOENT;
+ ret = -1;
+ goto unlock;
+ }
+ TAILQ_REMOVE(&mem_event_callback_list, entry, next);
+ free(entry);
+
+ ret = 0;
+
+ RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' unregistered\n",
+ name, arg);
+
+unlock:
+ rte_rwlock_write_unlock(&mem_event_rwlock);
+ return ret;
+}
+
+void
+eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
+ size_t len)
+{
+ struct mem_event_callback_entry *entry;
+
+ rte_rwlock_read_lock(&mem_event_rwlock);
+
+ TAILQ_FOREACH(entry, &mem_event_callback_list, next) {
+ RTE_LOG(DEBUG, EAL, "Calling mem event callback '%s:%p'\n",
+ entry->name, entry->arg);
+ entry->clb(event, start, len, entry->arg);
+ }
+
+ rte_rwlock_read_unlock(&mem_event_rwlock);
+}
+
+int
+eal_memalloc_mem_alloc_validator_register(const char *name,
+ rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
+{
+ struct mem_alloc_validator_entry *entry;
+ int ret, len;
+ if (name == NULL || clb == NULL || socket_id < 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+ rte_rwlock_write_lock(&mem_alloc_validator_rwlock);
+
+ entry = find_mem_alloc_validator(name, socket_id);
+ if (entry != NULL) {
+ rte_errno = EEXIST;
+ ret = -1;
+ goto unlock;
+ }
+
+ entry = malloc(sizeof(*entry));
+ if (entry == NULL) {
+ rte_errno = ENOMEM;
+ ret = -1;
+ goto unlock;
+ }
+
+ /* callback successfully created and is valid, add it to the list */
+ entry->clb = clb;
+ entry->socket_id = socket_id;
+ entry->limit = limit;
+ strlcpy(entry->name, name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
+ TAILQ_INSERT_TAIL(&mem_alloc_validator_list, entry, next);
+
+ ret = 0;
+
+ RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i with limit %zu registered\n",
+ name, socket_id, limit);
+
+unlock:
+ rte_rwlock_write_unlock(&mem_alloc_validator_rwlock);
+ return ret;
+}
+
+int
+eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id)
+{
+ struct mem_alloc_validator_entry *entry;
+ int ret, len;
+
+ if (name == NULL || socket_id < 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+ len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN);
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) {
+ rte_errno = ENAMETOOLONG;
+ return -1;
+ }
+ rte_rwlock_write_lock(&mem_alloc_validator_rwlock);
+
+ entry = find_mem_alloc_validator(name, socket_id);
+ if (entry == NULL) {
+ rte_errno = ENOENT;
+ ret = -1;
+ goto unlock;
+ }
+ TAILQ_REMOVE(&mem_alloc_validator_list, entry, next);
+ free(entry);
+
+ ret = 0;
+
+ RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i unregistered\n",
+ name, socket_id);
+
+unlock:
+ rte_rwlock_write_unlock(&mem_alloc_validator_rwlock);
+ return ret;
+}
+
+int
+eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len)
+{
+ struct mem_alloc_validator_entry *entry;
+ int ret = 0;
+
+ rte_rwlock_read_lock(&mem_alloc_validator_rwlock);
+
+ TAILQ_FOREACH(entry, &mem_alloc_validator_list, next) {
+ if (entry->socket_id != socket_id || entry->limit > new_len)
+ continue;
+ RTE_LOG(DEBUG, EAL, "Calling mem alloc validator '%s' on socket %i\n",
+ entry->name, entry->socket_id);
+ if (entry->clb(socket_id, entry->limit, new_len) < 0)
+ ret = -1;
+ }
+
+ rte_rwlock_read_unlock(&mem_alloc_validator_rwlock);
+
+ return ret;
+}
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
index 852f3bb9..fbfb1b05 100644
--- a/lib/librte_eal/common/eal_common_memory.c
+++ b/lib/librte_eal/common/eal_common_memory.c
@@ -2,82 +2,385 @@
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <errno.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdarg.h>
+#include <string.h>
#include <unistd.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <sys/queue.h>
+#include <rte_fbarray.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
+#include <rte_errno.h>
#include <rte_log.h>
+#include "eal_memalloc.h"
#include "eal_private.h"
#include "eal_internal_cfg.h"
/*
- * Return a pointer to a read-only table of struct rte_physmem_desc
- * elements, containing the layout of all addressable physical
- * memory. The last element of the table contains a NULL address.
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
+ * pointer to the mmap'd area and keep *size unmodified. Else, retry
+ * with a smaller zone: decrease *size by hugepage_sz until it reaches
+ * 0. In this case, return NULL. Note: this function returns an address
+ * which is a multiple of hugepage size.
*/
-const struct rte_memseg *
-rte_eal_get_physmem_layout(void)
+
+#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
+
+static void *next_baseaddr;
+static uint64_t system_page_sz;
+
+void *
+eal_get_virtual_area(void *requested_addr, size_t *size,
+ size_t page_sz, int flags, int mmap_flags)
+{
+ bool addr_is_hint, allow_shrink, unmap, no_align;
+ uint64_t map_sz;
+ void *mapped_addr, *aligned_addr;
+
+ if (system_page_sz == 0)
+ system_page_sz = sysconf(_SC_PAGESIZE);
+
+ mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+
+ RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
+
+ addr_is_hint = (flags & EAL_VIRTUAL_AREA_ADDR_IS_HINT) > 0;
+ allow_shrink = (flags & EAL_VIRTUAL_AREA_ALLOW_SHRINK) > 0;
+ unmap = (flags & EAL_VIRTUAL_AREA_UNMAP) > 0;
+
+ if (next_baseaddr == NULL && internal_config.base_virtaddr != 0 &&
+ rte_eal_process_type() == RTE_PROC_PRIMARY)
+ next_baseaddr = (void *) internal_config.base_virtaddr;
+
+ if (requested_addr == NULL && next_baseaddr != NULL) {
+ requested_addr = next_baseaddr;
+ requested_addr = RTE_PTR_ALIGN(requested_addr, page_sz);
+ addr_is_hint = true;
+ }
+
+ /* we don't need alignment of resulting pointer in the following cases:
+ *
+ * 1. page size is equal to system size
+ * 2. we have a requested address, and it is page-aligned, and we will
+ * be discarding the address if we get a different one.
+ *
+ * for all other cases, alignment is potentially necessary.
+ */
+ no_align = (requested_addr != NULL &&
+ requested_addr == RTE_PTR_ALIGN(requested_addr, page_sz) &&
+ !addr_is_hint) ||
+ page_sz == system_page_sz;
+
+ do {
+ map_sz = no_align ? *size : *size + page_sz;
+ if (map_sz > SIZE_MAX) {
+ RTE_LOG(ERR, EAL, "Map size too big\n");
+ rte_errno = E2BIG;
+ return NULL;
+ }
+
+ mapped_addr = mmap(requested_addr, (size_t)map_sz, PROT_READ,
+ mmap_flags, -1, 0);
+ if (mapped_addr == MAP_FAILED && allow_shrink)
+ *size -= page_sz;
+ } while (allow_shrink && mapped_addr == MAP_FAILED && *size > 0);
+
+ /* align resulting address - if map failed, we will ignore the value
+ * anyway, so no need to add additional checks.
+ */
+ aligned_addr = no_align ? mapped_addr :
+ RTE_PTR_ALIGN(mapped_addr, page_sz);
+
+ if (*size == 0) {
+ RTE_LOG(ERR, EAL, "Cannot get a virtual area of any size: %s\n",
+ strerror(errno));
+ rte_errno = errno;
+ return NULL;
+ } else if (mapped_addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
+ strerror(errno));
+ /* pass errno up the call chain */
+ rte_errno = errno;
+ return NULL;
+ } else if (requested_addr != NULL && !addr_is_hint &&
+ aligned_addr != requested_addr) {
+ RTE_LOG(ERR, EAL, "Cannot get a virtual area at requested address: %p (got %p)\n",
+ requested_addr, aligned_addr);
+ munmap(mapped_addr, map_sz);
+ rte_errno = EADDRNOTAVAIL;
+ return NULL;
+ } else if (requested_addr != NULL && addr_is_hint &&
+ aligned_addr != requested_addr) {
+ RTE_LOG(WARNING, EAL, "WARNING! Base virtual address hint (%p != %p) not respected!\n",
+ requested_addr, aligned_addr);
+ RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory into secondary processes\n");
+ } else if (next_baseaddr != NULL) {
+ next_baseaddr = RTE_PTR_ADD(aligned_addr, *size);
+ }
+
+ RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
+ aligned_addr, *size);
+
+ if (unmap) {
+ munmap(mapped_addr, map_sz);
+ } else if (!no_align) {
+ void *map_end, *aligned_end;
+ size_t before_len, after_len;
+
+ /* when we reserve space with alignment, we add alignment to
+ * mapping size. On 32-bit, if 1GB alignment was requested, this
+ * would waste 1GB of address space, which is a luxury we cannot
+ * afford. so, if alignment was performed, check if any unneeded
+ * address space can be unmapped back.
+ */
+
+ map_end = RTE_PTR_ADD(mapped_addr, (size_t)map_sz);
+ aligned_end = RTE_PTR_ADD(aligned_addr, *size);
+
+ /* unmap space before aligned mmap address */
+ before_len = RTE_PTR_DIFF(aligned_addr, mapped_addr);
+ if (before_len > 0)
+ munmap(mapped_addr, before_len);
+
+ /* unmap space after aligned end mmap address */
+ after_len = RTE_PTR_DIFF(map_end, aligned_end);
+ if (after_len > 0)
+ munmap(aligned_end, after_len);
+ }
+
+ return aligned_addr;
+}
+
+static struct rte_memseg *
+virt2memseg(const void *addr, const struct rte_memseg_list *msl)
+{
+ const struct rte_fbarray *arr;
+ void *start, *end;
+ int ms_idx;
+
+ if (msl == NULL)
+ return NULL;
+
+ /* a memseg list was specified, check if it's the right one */
+ start = msl->base_va;
+ end = RTE_PTR_ADD(start, (size_t)msl->page_sz * msl->memseg_arr.len);
+
+ if (addr < start || addr >= end)
+ return NULL;
+
+ /* now, calculate index */
+ arr = &msl->memseg_arr;
+ ms_idx = RTE_PTR_DIFF(addr, msl->base_va) / msl->page_sz;
+ return rte_fbarray_get(arr, ms_idx);
+}
+
+static struct rte_memseg_list *
+virt2memseg_list(const void *addr)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *msl;
+ int msl_idx;
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+ void *start, *end;
+ msl = &mcfg->memsegs[msl_idx];
+
+ start = msl->base_va;
+ end = RTE_PTR_ADD(start,
+ (size_t)msl->page_sz * msl->memseg_arr.len);
+ if (addr >= start && addr < end)
+ break;
+ }
+ /* if we didn't find our memseg list */
+ if (msl_idx == RTE_MAX_MEMSEG_LISTS)
+ return NULL;
+ return msl;
+}
+
+__rte_experimental struct rte_memseg_list *
+rte_mem_virt2memseg_list(const void *addr)
+{
+ return virt2memseg_list(addr);
+}
+
+struct virtiova {
+ rte_iova_t iova;
+ void *virt;
+};
+static int
+find_virt(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ struct virtiova *vi = arg;
+ if (vi->iova >= ms->iova && vi->iova < (ms->iova + ms->len)) {
+ size_t offset = vi->iova - ms->iova;
+ vi->virt = RTE_PTR_ADD(ms->addr, offset);
+ /* stop the walk */
+ return 1;
+ }
+ return 0;
+}
+static int
+find_virt_legacy(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, size_t len, void *arg)
{
- return rte_eal_get_configuration()->mem_config->memseg;
+ struct virtiova *vi = arg;
+ if (vi->iova >= ms->iova && vi->iova < (ms->iova + len)) {
+ size_t offset = vi->iova - ms->iova;
+ vi->virt = RTE_PTR_ADD(ms->addr, offset);
+ /* stop the walk */
+ return 1;
+ }
+ return 0;
}
+__rte_experimental void *
+rte_mem_iova2virt(rte_iova_t iova)
+{
+ struct virtiova vi;
+
+ memset(&vi, 0, sizeof(vi));
+
+ vi.iova = iova;
+ /* for legacy mem, we can get away with scanning VA-contiguous segments,
+ * as we know they are PA-contiguous as well
+ */
+ if (internal_config.legacy_mem)
+ rte_memseg_contig_walk(find_virt_legacy, &vi);
+ else
+ rte_memseg_walk(find_virt, &vi);
+
+ return vi.virt;
+}
+
+__rte_experimental struct rte_memseg *
+rte_mem_virt2memseg(const void *addr, const struct rte_memseg_list *msl)
+{
+ return virt2memseg(addr, msl != NULL ? msl :
+ rte_mem_virt2memseg_list(addr));
+}
+
+static int
+physmem_size(const struct rte_memseg_list *msl, void *arg)
+{
+ uint64_t *total_len = arg;
+
+ *total_len += msl->memseg_arr.count * msl->page_sz;
+
+ return 0;
+}
/* get the total size of memory */
uint64_t
rte_eal_get_physmem_size(void)
{
- const struct rte_mem_config *mcfg;
- unsigned i = 0;
uint64_t total_len = 0;
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
+ rte_memseg_list_walk(physmem_size, &total_len);
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (mcfg->memseg[i].addr == NULL)
- break;
+ return total_len;
+}
- total_len += mcfg->memseg[i].len;
- }
+static int
+dump_memseg(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
+ void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int msl_idx, ms_idx;
+ FILE *f = arg;
- return total_len;
+ msl_idx = msl - mcfg->memsegs;
+ if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
+ return -1;
+
+ ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
+ if (ms_idx < 0)
+ return -1;
+
+ fprintf(f, "Segment %i-%i: IOVA:0x%"PRIx64", len:%zu, "
+ "virt:%p, socket_id:%"PRId32", "
+ "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
+ "nrank:%"PRIx32"\n",
+ msl_idx, ms_idx,
+ ms->iova,
+ ms->len,
+ ms->addr,
+ ms->socket_id,
+ ms->hugepage_sz,
+ ms->nchannel,
+ ms->nrank);
+
+ return 0;
}
-/* Dump the physical memory layout on console */
-void
-rte_dump_physmem_layout(FILE *f)
+/*
+ * Defining here because declared in rte_memory.h, but the actual implementation
+ * is in eal_common_memalloc.c, like all other memalloc internals.
+ */
+int __rte_experimental
+rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
+ void *arg)
{
- const struct rte_mem_config *mcfg;
- unsigned i = 0;
+ /* FreeBSD boots with legacy mem enabled by default */
+ if (internal_config.legacy_mem) {
+ RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+ return eal_memalloc_mem_event_callback_register(name, clb, arg);
+}
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
+int __rte_experimental
+rte_mem_event_callback_unregister(const char *name, void *arg)
+{
+ /* FreeBSD boots with legacy mem enabled by default */
+ if (internal_config.legacy_mem) {
+ RTE_LOG(DEBUG, EAL, "Registering mem event callbacks not supported\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+ return eal_memalloc_mem_event_callback_unregister(name, arg);
+}
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (mcfg->memseg[i].addr == NULL)
- break;
+int __rte_experimental
+rte_mem_alloc_validator_register(const char *name,
+ rte_mem_alloc_validator_t clb, int socket_id, size_t limit)
+{
+ /* FreeBSD boots with legacy mem enabled by default */
+ if (internal_config.legacy_mem) {
+ RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+ return eal_memalloc_mem_alloc_validator_register(name, clb, socket_id,
+ limit);
+}
- fprintf(f, "Segment %u: IOVA:0x%"PRIx64", len:%zu, "
- "virt:%p, socket_id:%"PRId32", "
- "hugepage_sz:%"PRIu64", nchannel:%"PRIx32", "
- "nrank:%"PRIx32"\n", i,
- mcfg->memseg[i].iova,
- mcfg->memseg[i].len,
- mcfg->memseg[i].addr,
- mcfg->memseg[i].socket_id,
- mcfg->memseg[i].hugepage_sz,
- mcfg->memseg[i].nchannel,
- mcfg->memseg[i].nrank);
+int __rte_experimental
+rte_mem_alloc_validator_unregister(const char *name, int socket_id)
+{
+ /* FreeBSD boots with legacy mem enabled by default */
+ if (internal_config.legacy_mem) {
+ RTE_LOG(DEBUG, EAL, "Registering mem alloc validators not supported\n");
+ rte_errno = ENOTSUP;
+ return -1;
}
+ return eal_memalloc_mem_alloc_validator_unregister(name, socket_id);
+}
+
+/* Dump the physical memory layout on console */
+void
+rte_dump_physmem_layout(FILE *f)
+{
+ rte_memseg_walk(dump_memseg, f);
}
/* return the number of memory channels */
@@ -117,20 +420,165 @@ rte_mem_lock_page(const void *virt)
return mlock((void *)aligned, page_size);
}
+int __rte_experimental
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int i, ms_idx, ret = 0;
+
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+ const struct rte_memseg *ms;
+ struct rte_fbarray *arr;
+
+ if (msl->memseg_arr.count == 0)
+ continue;
+
+ arr = &msl->memseg_arr;
+
+ ms_idx = rte_fbarray_find_next_used(arr, 0);
+ while (ms_idx >= 0) {
+ int n_segs;
+ size_t len;
+
+ ms = rte_fbarray_get(arr, ms_idx);
+
+ /* find how many more segments there are, starting with
+ * this one.
+ */
+ n_segs = rte_fbarray_find_contig_used(arr, ms_idx);
+ len = n_segs * msl->page_sz;
+
+ ret = func(msl, ms, len, arg);
+ if (ret)
+ return ret;
+ ms_idx = rte_fbarray_find_next_used(arr,
+ ms_idx + n_segs);
+ }
+ }
+ return 0;
+}
+
+int __rte_experimental
+rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret = 0;
+
+ /* do not allow allocations/frees/init while we iterate */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ ret = rte_memseg_contig_walk_thread_unsafe(func, arg);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int i, ms_idx, ret = 0;
+
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+ const struct rte_memseg *ms;
+ struct rte_fbarray *arr;
+
+ if (msl->memseg_arr.count == 0)
+ continue;
+
+ arr = &msl->memseg_arr;
+
+ ms_idx = rte_fbarray_find_next_used(arr, 0);
+ while (ms_idx >= 0) {
+ ms = rte_fbarray_get(arr, ms_idx);
+ ret = func(msl, ms, arg);
+ if (ret)
+ return ret;
+ ms_idx = rte_fbarray_find_next_used(arr, ms_idx + 1);
+ }
+ }
+ return 0;
+}
+
+int __rte_experimental
+rte_memseg_walk(rte_memseg_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret = 0;
+
+ /* do not allow allocations/frees/init while we iterate */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ ret = rte_memseg_walk_thread_unsafe(func, arg);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int i, ret = 0;
+
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+ if (msl->base_va == NULL)
+ continue;
+
+ ret = func(msl, arg);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int __rte_experimental
+rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret = 0;
+
+ /* do not allow allocations/frees/init while we iterate */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+ ret = rte_memseg_list_walk_thread_unsafe(func, arg);
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
+ return ret;
+}
+
/* init memory subsystem */
int
rte_eal_memory_init(void)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int retval;
RTE_LOG(DEBUG, EAL, "Setting up physically contiguous memory...\n");
- const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+ if (!mcfg)
+ return -1;
+
+ /* lock mem hotplug here, to prevent races while we init */
+ rte_rwlock_read_lock(&mcfg->memory_hotplug_lock);
+
+ if (rte_eal_memseg_init() < 0)
+ goto fail;
+
+ if (eal_memalloc_init() < 0)
+ goto fail;
+
+ retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
rte_eal_hugepage_init() :
rte_eal_hugepage_attach();
if (retval < 0)
- return -1;
+ goto fail;
if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
- return -1;
+ goto fail;
return 0;
+fail:
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+ return -1;
}
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
index 1ab3ade2..7300fe05 100644
--- a/lib/librte_eal/common/eal_common_memzone.c
+++ b/lib/librte_eal/common/eal_common_memzone.c
@@ -28,88 +28,49 @@
static inline const struct rte_memzone *
memzone_lookup_thread_unsafe(const char *name)
{
- const struct rte_mem_config *mcfg;
+ struct rte_mem_config *mcfg;
+ struct rte_fbarray *arr;
const struct rte_memzone *mz;
- unsigned i = 0;
+ int i = 0;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
+ arr = &mcfg->memzones;
/*
* the algorithm is not optimal (linear), but there are few
* zones and this function should be called at init only
*/
- for (i = 0; i < RTE_MAX_MEMZONE; i++) {
- mz = &mcfg->memzone[i];
- if (mz->addr != NULL && !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
- return &mcfg->memzone[i];
+ i = rte_fbarray_find_next_used(arr, 0);
+ while (i >= 0) {
+ mz = rte_fbarray_get(arr, i);
+ if (mz->addr != NULL &&
+ !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE))
+ return mz;
+ i = rte_fbarray_find_next_used(arr, i + 1);
}
-
- return NULL;
-}
-
-static inline struct rte_memzone *
-get_next_free_memzone(void)
-{
- struct rte_mem_config *mcfg;
- unsigned i = 0;
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
-
- for (i = 0; i < RTE_MAX_MEMZONE; i++) {
- if (mcfg->memzone[i].addr == NULL)
- return &mcfg->memzone[i];
- }
-
return NULL;
}
-/* This function will return the greatest free block if a heap has been
- * specified. If no heap has been specified, it will return the heap and
- * length of the greatest free block available in all heaps */
-static size_t
-find_heap_max_free_elem(int *s, unsigned align)
-{
- struct rte_mem_config *mcfg;
- struct rte_malloc_socket_stats stats;
- int i, socket = *s;
- size_t len = 0;
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
-
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- if ((socket != SOCKET_ID_ANY) && (socket != i))
- continue;
-
- malloc_heap_get_stats(&mcfg->malloc_heaps[i], &stats);
- if (stats.greatest_free_size > len) {
- len = stats.greatest_free_size;
- *s = i;
- }
- }
-
- if (len < MALLOC_ELEM_OVERHEAD + align)
- return 0;
-
- return len - MALLOC_ELEM_OVERHEAD - align;
-}
-
static const struct rte_memzone *
memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
- int socket_id, unsigned flags, unsigned align, unsigned bound)
+ int socket_id, unsigned int flags, unsigned int align,
+ unsigned int bound)
{
struct rte_memzone *mz;
struct rte_mem_config *mcfg;
+ struct rte_fbarray *arr;
+ void *mz_addr;
size_t requested_len;
- int socket, i;
+ int mz_idx;
+ bool contig;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
+ arr = &mcfg->memzones;
/* no more room in config */
- if (mcfg->memzone_cnt >= RTE_MAX_MEMZONE) {
+ if (arr->count >= arr->len) {
RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
rte_errno = ENOSPC;
return NULL;
@@ -148,8 +109,7 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
return NULL;
}
- len += RTE_CACHE_LINE_MASK;
- len &= ~((size_t) RTE_CACHE_LINE_MASK);
+ len = RTE_ALIGN_CEIL(len, RTE_CACHE_LINE_SIZE);
/* save minimal requested length */
requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len);
@@ -169,40 +129,22 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
if (!rte_eal_has_hugepages())
socket_id = SOCKET_ID_ANY;
- if (len == 0) {
- if (bound != 0)
- requested_len = bound;
- else {
- requested_len = find_heap_max_free_elem(&socket_id, align);
- if (requested_len == 0) {
- rte_errno = ENOMEM;
- return NULL;
- }
- }
- }
+ contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0;
+ /* malloc only cares about size flags, remove contig flag from flags */
+ flags &= ~RTE_MEMZONE_IOVA_CONTIG;
- if (socket_id == SOCKET_ID_ANY)
- socket = malloc_get_numa_socket();
- else
- socket = socket_id;
-
- /* allocate memory on heap */
- void *mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[socket], NULL,
- requested_len, flags, align, bound);
-
- if ((mz_addr == NULL) && (socket_id == SOCKET_ID_ANY)) {
- /* try other heaps */
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- if (socket == i)
- continue;
-
- mz_addr = malloc_heap_alloc(&mcfg->malloc_heaps[i],
- NULL, requested_len, flags, align, bound);
- if (mz_addr != NULL)
- break;
- }
+ if (len == 0 && bound == 0) {
+ /* no size constraints were placed, so use malloc elem len */
+ requested_len = 0;
+ mz_addr = malloc_heap_alloc_biggest(NULL, socket_id, flags,
+ align, contig);
+ } else {
+ if (len == 0)
+ requested_len = bound;
+ /* allocate memory on heap */
+ mz_addr = malloc_heap_alloc(NULL, requested_len, socket_id,
+ flags, align, bound, contig);
}
-
if (mz_addr == NULL) {
rte_errno = ENOMEM;
return NULL;
@@ -211,33 +153,38 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len,
struct malloc_elem *elem = malloc_elem_from_data(mz_addr);
/* fill the zone in config */
- mz = get_next_free_memzone();
+ mz_idx = rte_fbarray_find_next_free(arr, 0);
+
+ if (mz_idx < 0) {
+ mz = NULL;
+ } else {
+ rte_fbarray_set_used(arr, mz_idx);
+ mz = rte_fbarray_get(arr, mz_idx);
+ }
if (mz == NULL) {
- RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone but there is room "
- "in config!\n", __func__);
- malloc_elem_free(elem);
+ RTE_LOG(ERR, EAL, "%s(): Cannot find free memzone\n", __func__);
+ malloc_heap_free(elem);
rte_errno = ENOSPC;
return NULL;
}
- mcfg->memzone_cnt++;
snprintf(mz->name, sizeof(mz->name), "%s", name);
mz->iova = rte_malloc_virt2iova(mz_addr);
mz->addr = mz_addr;
- mz->len = (requested_len == 0 ? elem->size : requested_len);
- mz->hugepage_sz = elem->ms->hugepage_sz;
- mz->socket_id = elem->ms->socket_id;
+ mz->len = requested_len == 0 ?
+ elem->size - elem->pad - MALLOC_ELEM_OVERHEAD :
+ requested_len;
+ mz->hugepage_sz = elem->msl->page_sz;
+ mz->socket_id = elem->msl->socket_id;
mz->flags = 0;
- mz->memseg_id = elem->ms - rte_eal_get_configuration()->mem_config->memseg;
return mz;
}
static const struct rte_memzone *
-rte_memzone_reserve_thread_safe(const char *name, size_t len,
- int socket_id, unsigned flags, unsigned align,
- unsigned bound)
+rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id,
+ unsigned int flags, unsigned int align, unsigned int bound)
{
struct rte_mem_config *mcfg;
const struct rte_memzone *mz = NULL;
@@ -296,34 +243,38 @@ int
rte_memzone_free(const struct rte_memzone *mz)
{
struct rte_mem_config *mcfg;
+ struct rte_fbarray *arr;
+ struct rte_memzone *found_mz;
int ret = 0;
- void *addr;
+ void *addr = NULL;
unsigned idx;
if (mz == NULL)
return -EINVAL;
mcfg = rte_eal_get_configuration()->mem_config;
+ arr = &mcfg->memzones;
rte_rwlock_write_lock(&mcfg->mlock);
- idx = ((uintptr_t)mz - (uintptr_t)mcfg->memzone);
- idx = idx / sizeof(struct rte_memzone);
+ idx = rte_fbarray_find_idx(arr, mz);
+ found_mz = rte_fbarray_get(arr, idx);
- addr = mcfg->memzone[idx].addr;
- if (addr == NULL)
+ if (found_mz == NULL) {
+ ret = -EINVAL;
+ } else if (found_mz->addr == NULL) {
+ RTE_LOG(ERR, EAL, "Memzone is not allocated\n");
ret = -EINVAL;
- else if (mcfg->memzone_cnt == 0) {
- rte_panic("%s(): memzone address not NULL but memzone_cnt is 0!\n",
- __func__);
} else {
- memset(&mcfg->memzone[idx], 0, sizeof(mcfg->memzone[idx]));
- mcfg->memzone_cnt--;
+ addr = found_mz->addr;
+ memset(found_mz, 0, sizeof(*found_mz));
+ rte_fbarray_set_free(arr, idx);
}
rte_rwlock_write_unlock(&mcfg->mlock);
- rte_free(addr);
+ if (addr != NULL)
+ rte_free(addr);
return ret;
}
@@ -348,31 +299,61 @@ rte_memzone_lookup(const char *name)
return memzone;
}
+static void
+dump_memzone(const struct rte_memzone *mz, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *msl = NULL;
+ void *cur_addr, *mz_end;
+ struct rte_memseg *ms;
+ int mz_idx, ms_idx;
+ size_t page_sz;
+ FILE *f = arg;
+
+ mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz);
+
+ fprintf(f, "Zone %u: name:<%s>, len:0x%zx, virt:%p, "
+ "socket_id:%"PRId32", flags:%"PRIx32"\n",
+ mz_idx,
+ mz->name,
+ mz->len,
+ mz->addr,
+ mz->socket_id,
+ mz->flags);
+
+ /* go through each page occupied by this memzone */
+ msl = rte_mem_virt2memseg_list(mz->addr);
+ if (!msl) {
+ RTE_LOG(DEBUG, EAL, "Skipping bad memzone\n");
+ return;
+ }
+ page_sz = (size_t)mz->hugepage_sz;
+ cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz);
+ mz_end = RTE_PTR_ADD(cur_addr, mz->len);
+
+ fprintf(f, "physical segments used:\n");
+ ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz;
+ ms = rte_fbarray_get(&msl->memseg_arr, ms_idx);
+
+ do {
+ fprintf(f, " addr: %p iova: 0x%" PRIx64 " "
+ "len: 0x%zx "
+ "pagesz: 0x%zx\n",
+ cur_addr, ms->iova, ms->len, page_sz);
+
+ /* advance VA to next page */
+ cur_addr = RTE_PTR_ADD(cur_addr, page_sz);
+
+ /* memzones occupy contiguous segments */
+ ++ms;
+ } while (cur_addr < mz_end);
+}
+
/* Dump all reserved memory zones on console */
void
rte_memzone_dump(FILE *f)
{
- struct rte_mem_config *mcfg;
- unsigned i = 0;
-
- /* get pointer to global configuration */
- mcfg = rte_eal_get_configuration()->mem_config;
-
- rte_rwlock_read_lock(&mcfg->mlock);
- /* dump all zones */
- for (i=0; i<RTE_MAX_MEMZONE; i++) {
- if (mcfg->memzone[i].addr == NULL)
- break;
- fprintf(f, "Zone %u: name:<%s>, IO:0x%"PRIx64", len:0x%zx"
- ", virt:%p, socket_id:%"PRId32", flags:%"PRIx32"\n", i,
- mcfg->memzone[i].name,
- mcfg->memzone[i].iova,
- mcfg->memzone[i].len,
- mcfg->memzone[i].addr,
- mcfg->memzone[i].socket_id,
- mcfg->memzone[i].flags);
- }
- rte_rwlock_read_unlock(&mcfg->mlock);
+ rte_memzone_walk(dump_memzone, f);
}
/*
@@ -382,30 +363,27 @@ int
rte_eal_memzone_init(void)
{
struct rte_mem_config *mcfg;
- const struct rte_memseg *memseg;
/* get pointer to global configuration */
mcfg = rte_eal_get_configuration()->mem_config;
- /* secondary processes don't need to initialise anything */
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return 0;
+ rte_rwlock_write_lock(&mcfg->mlock);
- memseg = rte_eal_get_physmem_layout();
- if (memseg == NULL) {
- RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
+ rte_fbarray_init(&mcfg->memzones, "memzone",
+ RTE_MAX_MEMZONE, sizeof(struct rte_memzone))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memzone list\n");
+ return -1;
+ } else if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
+ rte_fbarray_attach(&mcfg->memzones)) {
+ RTE_LOG(ERR, EAL, "Cannot attach to memzone list\n");
+ rte_rwlock_write_unlock(&mcfg->mlock);
return -1;
}
- rte_rwlock_write_lock(&mcfg->mlock);
-
- /* delete all zones */
- mcfg->memzone_cnt = 0;
- memset(mcfg->memzone, 0, sizeof(mcfg->memzone));
-
rte_rwlock_write_unlock(&mcfg->mlock);
- return rte_eal_malloc_heap_init();
+ return 0;
}
/* Walk all reserved memory zones */
@@ -413,14 +391,18 @@ void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *),
void *arg)
{
struct rte_mem_config *mcfg;
- unsigned i;
+ struct rte_fbarray *arr;
+ int i;
mcfg = rte_eal_get_configuration()->mem_config;
+ arr = &mcfg->memzones;
rte_rwlock_read_lock(&mcfg->mlock);
- for (i=0; i<RTE_MAX_MEMZONE; i++) {
- if (mcfg->memzone[i].addr != NULL)
- (*func)(&mcfg->memzone[i], arg);
+ i = rte_fbarray_find_next_used(arr, 0);
+ while (i >= 0) {
+ struct rte_memzone *mz = rte_fbarray_get(arr, i);
+ (*func)(mz, arg);
+ i = rte_fbarray_find_next_used(arr, i + 1);
}
rte_rwlock_read_unlock(&mcfg->mlock);
}
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index 9f2f8d25..dd5f9740 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -27,6 +27,7 @@
#include "eal_internal_cfg.h"
#include "eal_options.h"
#include "eal_filesystem.h"
+#include "eal_private.h"
#define BITS_PER_HEX 4
#define LCORE_OPT_LST 1
@@ -65,14 +66,18 @@ eal_long_options[] = {
{OPT_NO_HUGE, 0, NULL, OPT_NO_HUGE_NUM },
{OPT_NO_PCI, 0, NULL, OPT_NO_PCI_NUM },
{OPT_NO_SHCONF, 0, NULL, OPT_NO_SHCONF_NUM },
+ {OPT_IN_MEMORY, 0, NULL, OPT_IN_MEMORY_NUM },
{OPT_PCI_BLACKLIST, 1, NULL, OPT_PCI_BLACKLIST_NUM },
{OPT_PCI_WHITELIST, 1, NULL, OPT_PCI_WHITELIST_NUM },
{OPT_PROC_TYPE, 1, NULL, OPT_PROC_TYPE_NUM },
{OPT_SOCKET_MEM, 1, NULL, OPT_SOCKET_MEM_NUM },
+ {OPT_SOCKET_LIMIT, 1, NULL, OPT_SOCKET_LIMIT_NUM },
{OPT_SYSLOG, 1, NULL, OPT_SYSLOG_NUM },
{OPT_VDEV, 1, NULL, OPT_VDEV_NUM },
{OPT_VFIO_INTR, 1, NULL, OPT_VFIO_INTR_NUM },
{OPT_VMWARE_TSC_MAP, 0, NULL, OPT_VMWARE_TSC_MAP_NUM },
+ {OPT_LEGACY_MEM, 0, NULL, OPT_LEGACY_MEM_NUM },
+ {OPT_SINGLE_FILE_SEGMENTS, 0, NULL, OPT_SINGLE_FILE_SEGMENTS_NUM},
{0, 0, NULL, 0 }
};
@@ -151,7 +156,7 @@ eal_option_device_parse(void)
TAILQ_FOREACH_SAFE(devopt, &devopt_list, next, tmp) {
if (ret == 0) {
- ret = rte_eal_devargs_add(devopt->type, devopt->arg);
+ ret = rte_devargs_add(devopt->type, devopt->arg);
if (ret)
RTE_LOG(ERR, EAL, "Unable to parse device '%s'\n",
devopt->arg);
@@ -176,9 +181,16 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
/* zero out the NUMA config */
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
internal_cfg->socket_mem[i] = 0;
+ internal_cfg->force_socket_limits = 0;
+ /* zero out the NUMA limits config */
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
+ internal_cfg->socket_limit[i] = 0;
/* zero out hugedir descriptors */
- for (i = 0; i < MAX_HUGEPAGE_SIZES; i++)
+ for (i = 0; i < MAX_HUGEPAGE_SIZES; i++) {
+ memset(&internal_cfg->hugepage_info[i], 0,
+ sizeof(internal_cfg->hugepage_info[0]));
internal_cfg->hugepage_info[i].lock_descriptor = -1;
+ }
internal_cfg->base_virtaddr = 0;
internal_cfg->syslog_facility = LOG_DAEMON;
@@ -194,6 +206,7 @@ eal_reset_internal_config(struct internal_config *internal_cfg)
internal_cfg->vmware_tsc_map = 0;
internal_cfg->create_uio_dev = 0;
internal_cfg->user_mbuf_pool_ops_name = NULL;
+ internal_cfg->init_complete = 0;
}
static int
@@ -308,6 +321,7 @@ eal_parse_service_coremask(const char *coremask)
unsigned int count = 0;
char c;
int val;
+ uint32_t taken_lcore_count = 0;
if (coremask == NULL)
return -1;
@@ -341,7 +355,7 @@ eal_parse_service_coremask(const char *coremask)
if (master_lcore_parsed &&
cfg->master_lcore == lcore) {
RTE_LOG(ERR, EAL,
- "Error: lcore %u is master lcore, cannot use as service core\n",
+ "lcore %u is master lcore, cannot use as service core\n",
idx);
return -1;
}
@@ -351,6 +365,10 @@ eal_parse_service_coremask(const char *coremask)
"lcore %u unavailable\n", idx);
return -1;
}
+
+ if (cfg->lcore_role[idx] == ROLE_RTE)
+ taken_lcore_count++;
+
lcore_config[idx].core_role = ROLE_SERVICE;
count++;
}
@@ -367,11 +385,28 @@ eal_parse_service_coremask(const char *coremask)
if (count == 0)
return -1;
+ if (core_parsed && taken_lcore_count != count) {
+ RTE_LOG(WARNING, EAL,
+ "Not all service cores are in the coremask. "
+ "Please ensure -c or -l includes service cores\n");
+ }
+
cfg->service_lcore_count = count;
return 0;
}
static int
+eal_service_cores_parsed(void)
+{
+ int idx;
+ for (idx = 0; idx < RTE_MAX_LCORE; idx++) {
+ if (lcore_config[idx].core_role == ROLE_SERVICE)
+ return 1;
+ }
+ return 0;
+}
+
+static int
eal_parse_coremask(const char *coremask)
{
struct rte_config *cfg = rte_eal_get_configuration();
@@ -380,6 +415,11 @@ eal_parse_coremask(const char *coremask)
char c;
int val;
+ if (eal_service_cores_parsed())
+ RTE_LOG(WARNING, EAL,
+ "Service cores parsed before dataplane cores. "
+ "Please ensure -c is before -s or -S\n");
+
if (coremask == NULL)
return -1;
/* Remove all blank characters ahead and after .
@@ -411,6 +451,7 @@ eal_parse_coremask(const char *coremask)
"unavailable\n", idx);
return -1;
}
+
cfg->lcore_role[idx] = ROLE_RTE;
lcore_config[idx].core_index = count;
count++;
@@ -442,6 +483,7 @@ eal_parse_service_corelist(const char *corelist)
unsigned count = 0;
char *end = NULL;
int min, max;
+ uint32_t taken_lcore_count = 0;
if (corelist == NULL)
return -1;
@@ -483,6 +525,9 @@ eal_parse_service_corelist(const char *corelist)
idx);
return -1;
}
+ if (cfg->lcore_role[idx] == ROLE_RTE)
+ taken_lcore_count++;
+
lcore_config[idx].core_role =
ROLE_SERVICE;
count++;
@@ -497,6 +542,12 @@ eal_parse_service_corelist(const char *corelist)
if (count == 0)
return -1;
+ if (core_parsed && taken_lcore_count != count) {
+ RTE_LOG(WARNING, EAL,
+ "Not all service cores were in the coremask. "
+ "Please ensure -c or -l includes service cores\n");
+ }
+
return 0;
}
@@ -509,6 +560,11 @@ eal_parse_corelist(const char *corelist)
char *end = NULL;
int min, max;
+ if (eal_service_cores_parsed())
+ RTE_LOG(WARNING, EAL,
+ "Service cores parsed before dataplane cores. "
+ "Please ensure -l is before -s or -S\n");
+
if (corelist == NULL)
return -1;
@@ -583,7 +639,8 @@ eal_parse_master_lcore(const char *arg)
/* ensure master core is not used as service core */
if (lcore_config[cfg->master_lcore].core_role == ROLE_SERVICE) {
- RTE_LOG(ERR, EAL, "Error: Master lcore is used as a service core.\n");
+ RTE_LOG(ERR, EAL,
+ "Error: Master lcore is used as a service core\n");
return -1;
}
@@ -875,7 +932,7 @@ static int
eal_parse_syslog(const char *facility, struct internal_config *conf)
{
int i;
- static struct {
+ static const struct {
const char *name;
int value;
} map[] = {
@@ -911,43 +968,92 @@ eal_parse_syslog(const char *facility, struct internal_config *conf)
}
static int
-eal_parse_log_level(const char *arg)
+eal_parse_log_priority(const char *level)
{
- char *end, *str, *type, *level;
+ static const char * const levels[] = {
+ [RTE_LOG_EMERG] = "emergency",
+ [RTE_LOG_ALERT] = "alert",
+ [RTE_LOG_CRIT] = "critical",
+ [RTE_LOG_ERR] = "error",
+ [RTE_LOG_WARNING] = "warning",
+ [RTE_LOG_NOTICE] = "notice",
+ [RTE_LOG_INFO] = "info",
+ [RTE_LOG_DEBUG] = "debug",
+ };
+ size_t len = strlen(level);
unsigned long tmp;
+ char *end;
+ unsigned int i;
- str = strdup(arg);
- if (str == NULL)
+ if (len == 0)
return -1;
- if (strchr(str, ',') == NULL) {
- type = NULL;
- level = str;
- } else {
- type = strsep(&str, ",");
- level = strsep(&str, ",");
+ /* look for named values, skip 0 which is not a valid level */
+ for (i = 1; i < RTE_DIM(levels); i++) {
+ if (strncmp(levels[i], level, len) == 0)
+ return i;
}
+ /* not a string, maybe it is numeric */
errno = 0;
tmp = strtoul(level, &end, 0);
/* check for errors */
- if ((errno != 0) || (level[0] == '\0') ||
- end == NULL || (*end != '\0'))
- goto fail;
+ if (errno != 0 || end == NULL || *end != '\0' ||
+ tmp >= UINT32_MAX)
+ return -1;
- /* log_level is a uint32_t */
- if (tmp >= UINT32_MAX)
- goto fail;
+ return tmp;
+}
+
+static int
+eal_parse_log_level(const char *arg)
+{
+ const char *pattern = NULL;
+ const char *regex = NULL;
+ char *str, *level;
+ int priority;
+
+ str = strdup(arg);
+ if (str == NULL)
+ return -1;
- if (type == NULL) {
- rte_log_set_global_level(tmp);
- } else if (rte_log_set_level_regexp(type, tmp) < 0) {
- printf("cannot set log level %s,%lu\n",
- type, tmp);
+ if ((level = strchr(str, ','))) {
+ regex = str;
+ *level++ = '\0';
+ } else if ((level = strchr(str, ':'))) {
+ pattern = str;
+ *level++ = '\0';
+ } else {
+ level = str;
+ }
+
+ priority = eal_parse_log_priority(level);
+ if (priority < 0) {
+ fprintf(stderr, "invalid log priority: %s\n", level);
goto fail;
}
+ if (regex) {
+ if (rte_log_set_level_regexp(regex, priority) < 0) {
+ fprintf(stderr, "cannot set log level %s,%d\n",
+ pattern, priority);
+ goto fail;
+ }
+ if (rte_log_save_regexp(regex, priority) < 0)
+ goto fail;
+ } else if (pattern) {
+ if (rte_log_set_level_pattern(pattern, priority) < 0) {
+ fprintf(stderr, "cannot set log level %s:%d\n",
+ pattern, priority);
+ goto fail;
+ }
+ if (rte_log_save_pattern(pattern, priority) < 0)
+ goto fail;
+ } else {
+ rte_log_set_global_level(priority);
+ }
+
free(str);
return 0;
@@ -1089,6 +1195,8 @@ eal_parse_common_option(int opt, const char *optarg,
case OPT_NO_HUGE_NUM:
conf->no_hugetlbfs = 1;
+ /* no-huge is legacy mem */
+ conf->legacy_mem = 1;
break;
case OPT_NO_PCI_NUM:
@@ -1107,6 +1215,13 @@ eal_parse_common_option(int opt, const char *optarg,
conf->no_shconf = 1;
break;
+ case OPT_IN_MEMORY_NUM:
+ conf->in_memory = 1;
+ /* in-memory is a superset of noshconf and huge-unlink */
+ conf->no_shconf = 1;
+ conf->hugepage_unlink = 1;
+ break;
+
case OPT_PROC_TYPE_NUM:
conf->process_type = eal_parse_proc_type(optarg);
break;
@@ -1160,6 +1275,12 @@ eal_parse_common_option(int opt, const char *optarg,
core_parsed = LCORE_OPT_MAP;
break;
+ case OPT_LEGACY_MEM_NUM:
+ conf->legacy_mem = 1;
+ break;
+ case OPT_SINGLE_FILE_SEGMENTS_NUM:
+ conf->single_file_segments = 1;
+ break;
/* don't know what to do, leave this to caller */
default:
@@ -1252,12 +1373,23 @@ eal_check_common_options(struct internal_config *internal_cfg)
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
}
-
- if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink) {
+ if (internal_cfg->no_hugetlbfs && internal_cfg->hugepage_unlink &&
+ !internal_cfg->in_memory) {
RTE_LOG(ERR, EAL, "Option --"OPT_HUGE_UNLINK" cannot "
"be specified together with --"OPT_NO_HUGE"\n");
return -1;
}
+ if (internal_config.force_socket_limits && internal_config.legacy_mem) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_SOCKET_LIMIT
+ " is only supported in non-legacy memory mode\n");
+ }
+ if (internal_cfg->single_file_segments &&
+ internal_cfg->hugepage_unlink) {
+ RTE_LOG(ERR, EAL, "Option --"OPT_SINGLE_FILE_SEGMENTS" is "
+ "not compatible with neither --"OPT_IN_MEMORY" nor "
+ "--"OPT_HUGE_UNLINK"\n");
+ return -1;
+ }
return 0;
}
@@ -1302,10 +1434,12 @@ eal_common_usage(void)
" --"OPT_PROC_TYPE" Type of this process (primary|secondary|auto)\n"
" --"OPT_SYSLOG" Set syslog facility\n"
" --"OPT_LOG_LEVEL"=<int> Set global log level\n"
- " --"OPT_LOG_LEVEL"=<type-regexp>,<int>\n"
+ " --"OPT_LOG_LEVEL"=<type-match>:<int>\n"
" Set specific log level\n"
" -v Display version information on startup\n"
" -h, --help This help\n"
+ " --"OPT_IN_MEMORY" Operate entirely in memory. This will\n"
+ " disable secondary process support\n"
"\nEAL options for DEBUG use only:\n"
" --"OPT_HUGE_UNLINK" Unlink hugepage files after init\n"
" --"OPT_NO_HUGE" Use malloc instead of hugetlbfs\n"
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index caa8774a..9fcb9121 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -13,18 +13,21 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/file.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
+#include <rte_alarm.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_eal.h>
#include <rte_errno.h>
#include <rte_lcore.h>
#include <rte_log.h>
+#include <rte_tailq.h>
#include "eal_private.h"
#include "eal_filesystem.h"
@@ -51,6 +54,7 @@ enum mp_type {
MP_MSG, /* Share message with peers, will not block */
MP_REQ, /* Request for information, Will block for a reply */
MP_REP, /* Response to previously-received request */
+ MP_IGN, /* Response telling requester to ignore this response */
};
struct mp_msg_internal {
@@ -58,31 +62,66 @@ struct mp_msg_internal {
struct rte_mp_msg msg;
};
-struct sync_request {
- TAILQ_ENTRY(sync_request) next;
- int reply_received;
+struct async_request_param {
+ rte_mp_async_reply_t clb;
+ struct rte_mp_reply user_reply;
+ struct timespec end;
+ int n_responses_processed;
+};
+
+struct pending_request {
+ TAILQ_ENTRY(pending_request) next;
+ enum {
+ REQUEST_TYPE_SYNC,
+ REQUEST_TYPE_ASYNC
+ } type;
char dst[PATH_MAX];
struct rte_mp_msg *request;
struct rte_mp_msg *reply;
- pthread_cond_t cond;
+ int reply_received;
+ RTE_STD_C11
+ union {
+ struct {
+ struct async_request_param *param;
+ } async;
+ struct {
+ pthread_cond_t cond;
+ } sync;
+ };
};
-TAILQ_HEAD(sync_request_list, sync_request);
+TAILQ_HEAD(pending_request_list, pending_request);
static struct {
- struct sync_request_list requests;
+ struct pending_request_list requests;
pthread_mutex_t lock;
-} sync_requests = {
- .requests = TAILQ_HEAD_INITIALIZER(sync_requests.requests),
- .lock = PTHREAD_MUTEX_INITIALIZER
+} pending_requests = {
+ .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+ /**< used in async requests only */
};
-static struct sync_request *
-find_sync_request(const char *dst, const char *act_name)
+/* forward declarations */
+static int
+mp_send(struct rte_mp_msg *msg, const char *peer, int type);
+
+/* for use with alarm callback */
+static void
+async_reply_handle(void *arg);
+
+/* for use with process_msg */
+static struct pending_request *
+async_reply_handle_thread_unsafe(void *arg);
+
+static void
+trigger_async_action(struct pending_request *req);
+
+static struct pending_request *
+find_pending_request(const char *dst, const char *act_name)
{
- struct sync_request *r;
+ struct pending_request *r;
- TAILQ_FOREACH(r, &sync_requests.requests, next) {
+ TAILQ_FOREACH(r, &pending_requests.requests, next) {
if (!strcmp(r->dst, dst) &&
!strcmp(r->request->name, act_name))
break;
@@ -91,6 +130,17 @@ find_sync_request(const char *dst, const char *act_name)
return r;
}
+static void
+create_socket_path(const char *name, char *buf, int len)
+{
+ const char *prefix = eal_mp_socket_path();
+
+ if (strlen(name) > 0)
+ snprintf(buf, len, "%s_%s", prefix, name);
+ else
+ strlcpy(buf, prefix, len);
+}
+
int
rte_eal_primary_proc_alive(const char *config_file_path)
{
@@ -159,7 +209,7 @@ rte_mp_action_register(const char *name, rte_mp_t action)
rte_errno = ENOMEM;
return -1;
}
- strcpy(entry->action_name, name);
+ strlcpy(entry->action_name, name, sizeof(entry->action_name));
entry->action = action;
pthread_mutex_lock(&mp_mutex_action);
@@ -241,23 +291,35 @@ read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
static void
process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
{
- struct sync_request *sync_req;
+ struct pending_request *pending_req;
struct action_entry *entry;
struct rte_mp_msg *msg = &m->msg;
rte_mp_t action = NULL;
RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
- if (m->type == MP_REP) {
- pthread_mutex_lock(&sync_requests.lock);
- sync_req = find_sync_request(s->sun_path, msg->name);
- if (sync_req) {
- memcpy(sync_req->reply, msg, sizeof(*msg));
- sync_req->reply_received = 1;
- pthread_cond_signal(&sync_req->cond);
+ if (m->type == MP_REP || m->type == MP_IGN) {
+ struct pending_request *req = NULL;
+
+ pthread_mutex_lock(&pending_requests.lock);
+ pending_req = find_pending_request(s->sun_path, msg->name);
+ if (pending_req) {
+ memcpy(pending_req->reply, msg, sizeof(*msg));
+ /* -1 indicates that we've been asked to ignore */
+ pending_req->reply_received =
+ m->type == MP_REP ? 1 : -1;
+
+ if (pending_req->type == REQUEST_TYPE_SYNC)
+ pthread_cond_signal(&pending_req->sync.cond);
+ else if (pending_req->type == REQUEST_TYPE_ASYNC)
+ req = async_reply_handle_thread_unsafe(
+ pending_req);
} else
RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
- pthread_mutex_unlock(&sync_requests.lock);
+ pthread_mutex_unlock(&pending_requests.lock);
+
+ if (req != NULL)
+ trigger_async_action(req);
return;
}
@@ -267,10 +329,25 @@ process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
action = entry->action;
pthread_mutex_unlock(&mp_mutex_action);
- if (!action)
- RTE_LOG(ERR, EAL, "Cannot find action: %s\n", msg->name);
- else if (action(msg, s->sun_path) < 0)
+ if (!action) {
+ if (m->type == MP_REQ && !internal_config.init_complete) {
+ /* if this is a request, and init is not yet complete,
+ * and callback wasn't registered, we should tell the
+ * requester to ignore our existence because we're not
+ * yet ready to process this request.
+ */
+ struct rte_mp_msg dummy;
+
+ memset(&dummy, 0, sizeof(dummy));
+ strlcpy(dummy.name, msg->name, sizeof(dummy.name));
+ mp_send(&dummy, s->sun_path, MP_IGN);
+ } else {
+ RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
+ msg->name);
+ }
+ } else if (action(msg, s->sun_path) < 0) {
RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
+ }
}
static void *
@@ -288,10 +365,158 @@ mp_handle(void *arg __rte_unused)
}
static int
+timespec_cmp(const struct timespec *a, const struct timespec *b)
+{
+ if (a->tv_sec < b->tv_sec)
+ return -1;
+ if (a->tv_sec > b->tv_sec)
+ return 1;
+ if (a->tv_nsec < b->tv_nsec)
+ return -1;
+ if (a->tv_nsec > b->tv_nsec)
+ return 1;
+ return 0;
+}
+
+enum async_action {
+ ACTION_FREE, /**< free the action entry, but don't trigger callback */
+ ACTION_TRIGGER /**< trigger callback, then free action entry */
+};
+
+static enum async_action
+process_async_request(struct pending_request *sr, const struct timespec *now)
+{
+ struct async_request_param *param;
+ struct rte_mp_reply *reply;
+ bool timeout, last_msg;
+
+ param = sr->async.param;
+ reply = &param->user_reply;
+
+ /* did we timeout? */
+ timeout = timespec_cmp(&param->end, now) <= 0;
+
+ /* if we received a response, adjust relevant data and copy mesasge. */
+ if (sr->reply_received == 1 && sr->reply) {
+ struct rte_mp_msg *msg, *user_msgs, *tmp;
+
+ msg = sr->reply;
+ user_msgs = reply->msgs;
+
+ tmp = realloc(user_msgs, sizeof(*msg) *
+ (reply->nb_received + 1));
+ if (!tmp) {
+ RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
+ sr->dst, sr->request->name);
+ /* this entry is going to be removed and its message
+ * dropped, but we don't want to leak memory, so
+ * continue.
+ */
+ } else {
+ user_msgs = tmp;
+ reply->msgs = user_msgs;
+ memcpy(&user_msgs[reply->nb_received],
+ msg, sizeof(*msg));
+ reply->nb_received++;
+ }
+
+ /* mark this request as processed */
+ param->n_responses_processed++;
+ } else if (sr->reply_received == -1) {
+ /* we were asked to ignore this process */
+ reply->nb_sent--;
+ } else if (timeout) {
+ /* count it as processed response, but don't increment
+ * nb_received.
+ */
+ param->n_responses_processed++;
+ }
+
+ free(sr->reply);
+
+ last_msg = param->n_responses_processed == reply->nb_sent;
+
+ return last_msg ? ACTION_TRIGGER : ACTION_FREE;
+}
+
+static void
+trigger_async_action(struct pending_request *sr)
+{
+ struct async_request_param *param;
+ struct rte_mp_reply *reply;
+
+ param = sr->async.param;
+ reply = &param->user_reply;
+
+ param->clb(sr->request, reply);
+
+ /* clean up */
+ free(sr->async.param->user_reply.msgs);
+ free(sr->async.param);
+ free(sr->request);
+ free(sr);
+}
+
+static struct pending_request *
+async_reply_handle_thread_unsafe(void *arg)
+{
+ struct pending_request *req = (struct pending_request *)arg;
+ enum async_action action;
+ struct timespec ts_now;
+ struct timeval now;
+
+ if (gettimeofday(&now, NULL) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot get current time\n");
+ goto no_trigger;
+ }
+ ts_now.tv_nsec = now.tv_usec * 1000;
+ ts_now.tv_sec = now.tv_sec;
+
+ action = process_async_request(req, &ts_now);
+
+ TAILQ_REMOVE(&pending_requests.requests, req, next);
+
+ if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
+ /* if we failed to cancel the alarm because it's already in
+ * progress, don't proceed because otherwise we will end up
+ * handling the same message twice.
+ */
+ if (rte_errno == EINPROGRESS) {
+ RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
+ goto no_trigger;
+ }
+ RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
+ }
+
+ if (action == ACTION_TRIGGER)
+ return req;
+no_trigger:
+ free(req);
+ return NULL;
+}
+
+static void
+async_reply_handle(void *arg)
+{
+ struct pending_request *req;
+
+ pthread_mutex_lock(&pending_requests.lock);
+ req = async_reply_handle_thread_unsafe(arg);
+ pthread_mutex_unlock(&pending_requests.lock);
+
+ if (req != NULL)
+ trigger_async_action(req);
+}
+
+static int
open_socket_fd(void)
{
+ char peer_name[PATH_MAX] = {0};
struct sockaddr_un un;
- const char *prefix = eal_mp_socket_path();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ snprintf(peer_name, sizeof(peer_name),
+ "%d_%"PRIx64, getpid(), rte_rdtsc());
mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
if (mp_fd < 0) {
@@ -301,13 +526,11 @@ open_socket_fd(void)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- snprintf(un.sun_path, sizeof(un.sun_path), "%s", prefix);
- else {
- snprintf(un.sun_path, sizeof(un.sun_path), "%s_%d_%"PRIx64,
- prefix, getpid(), rte_rdtsc());
- }
+
+ create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
+
unlink(un.sun_path); /* May still exist since last run */
+
if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
un.sun_path, strerror(errno));
@@ -342,54 +565,70 @@ unlink_sockets(const char *filter)
return 0;
}
-static void
-unlink_socket_by_path(const char *path)
-{
- char *filename;
- char *fullpath = strdup(path);
-
- if (!fullpath)
- return;
- filename = basename(fullpath);
- unlink_sockets(filename);
- free(fullpath);
- RTE_LOG(INFO, EAL, "Remove socket %s\n", path);
-}
-
int
rte_mp_channel_init(void)
{
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
- char *path;
- pthread_t tid;
+ char path[PATH_MAX];
+ int dir_fd;
+ pthread_t mp_handle_tid;
+
+ /* in no shared files mode, we do not have secondary processes support,
+ * so no need to initialize IPC.
+ */
+ if (internal_config.no_shconf) {
+ RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
+ return 0;
+ }
- snprintf(mp_filter, PATH_MAX, ".%s_unix_*",
- internal_config.hugefile_prefix);
+ /* create filter path */
+ create_socket_path("*", path, sizeof(path));
+ strlcpy(mp_filter, basename(path), sizeof(mp_filter));
- path = strdup(eal_mp_socket_path());
- snprintf(mp_dir_path, PATH_MAX, "%s", dirname(path));
- free(path);
+ /* path may have been modified, so recreate it */
+ create_socket_path("*", path, sizeof(path));
+ strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
+
+ /* lock the directory */
+ dir_fd = open(mp_dir_path, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
+ mp_dir_path, strerror(errno));
+ return -1;
+ }
+
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
+ mp_dir_path, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
- unlink_sockets(mp_filter)) {
+ unlink_sockets(mp_filter)) {
RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
+ close(dir_fd);
return -1;
}
- if (open_socket_fd() < 0)
+ if (open_socket_fd() < 0) {
+ close(dir_fd);
return -1;
+ }
- if (pthread_create(&tid, NULL, mp_handle, NULL) < 0) {
+ if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
+ NULL, mp_handle, NULL) < 0) {
RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
strerror(errno));
close(mp_fd);
+ close(dir_fd);
mp_fd = -1;
return -1;
}
- /* try best to set thread name */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "rte_mp_handle");
- rte_thread_setname(tid, thread_name);
+ /* unlock the directory */
+ flock(dir_fd, LOCK_UN);
+ close(dir_fd);
+
return 0;
}
@@ -416,7 +655,7 @@ send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
memset(&dst, 0, sizeof(dst));
dst.sun_family = AF_UNIX;
- snprintf(dst.sun_path, sizeof(dst.sun_path), "%s", dst_path);
+ strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
memset(&msgh, 0, sizeof(msgh));
memset(control, 0, sizeof(control));
@@ -444,13 +683,12 @@ send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
if (snd < 0) {
rte_errno = errno;
/* Check if it caused by peer process exits */
- if (errno == -ECONNREFUSED) {
- /* We don't unlink the primary's socket here */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- unlink_socket_by_path(dst_path);
+ if (errno == ECONNREFUSED &&
+ rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ unlink(dst_path);
return 0;
}
- if (errno == -ENOBUFS) {
+ if (errno == ENOBUFS) {
RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
dst_path);
return 0;
@@ -466,7 +704,7 @@ send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
static int
mp_send(struct rte_mp_msg *msg, const char *peer, int type)
{
- int ret = 0;
+ int dir_fd, ret = 0;
DIR *mp_dir;
struct dirent *ent;
@@ -488,14 +726,32 @@ mp_send(struct rte_mp_msg *msg, const char *peer, int type)
rte_errno = errno;
return -1;
}
+
+ dir_fd = dirfd(mp_dir);
+ /* lock the directory to prevent processes spinning up while we send */
+ if (flock(dir_fd, LOCK_SH)) {
+ RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
+ mp_dir_path);
+ rte_errno = errno;
+ closedir(mp_dir);
+ return -1;
+ }
+
while ((ent = readdir(mp_dir))) {
+ char path[PATH_MAX];
+
if (fnmatch(mp_filter, ent->d_name, 0) != 0)
continue;
- if (send_msg(ent->d_name, msg, type) < 0)
+ snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
+ ent->d_name);
+ if (send_msg(path, msg, type) < 0)
ret = -1;
}
+ /* unlock the dir */
+ flock(dir_fd, LOCK_UN);
+ /* dir_fd automatically closed on closedir */
closedir(mp_dir);
return ret;
}
@@ -539,25 +795,82 @@ rte_mp_sendmsg(struct rte_mp_msg *msg)
}
static int
-mp_request_one(const char *dst, struct rte_mp_msg *req,
+mp_request_async(const char *dst, struct rte_mp_msg *req,
+ struct async_request_param *param, const struct timespec *ts)
+{
+ struct rte_mp_msg *reply_msg;
+ struct pending_request *pending_req, *exist;
+ int ret;
+
+ pending_req = calloc(1, sizeof(*pending_req));
+ reply_msg = calloc(1, sizeof(*reply_msg));
+ if (pending_req == NULL || reply_msg == NULL) {
+ RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
+ rte_errno = ENOMEM;
+ ret = -1;
+ goto fail;
+ }
+
+ pending_req->type = REQUEST_TYPE_ASYNC;
+ strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
+ pending_req->request = req;
+ pending_req->reply = reply_msg;
+ pending_req->async.param = param;
+
+ /* queue already locked by caller */
+
+ exist = find_pending_request(dst, req->name);
+ if (exist) {
+ RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
+ rte_errno = EEXIST;
+ ret = -1;
+ goto fail;
+ }
+
+ ret = send_msg(dst, req, MP_REQ);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
+ dst, req->name);
+ ret = -1;
+ goto fail;
+ } else if (ret == 0) {
+ ret = 0;
+ goto fail;
+ }
+ TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
+
+ param->user_reply.nb_sent++;
+
+ if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
+ async_reply_handle, pending_req) < 0) {
+ RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
+ dst, req->name);
+ rte_panic("Fix the above shit to properly free all memory\n");
+ }
+
+ return 0;
+fail:
+ free(pending_req);
+ free(reply_msg);
+ return ret;
+}
+
+static int
+mp_request_sync(const char *dst, struct rte_mp_msg *req,
struct rte_mp_reply *reply, const struct timespec *ts)
{
int ret;
- struct timeval now;
struct rte_mp_msg msg, *tmp;
- struct sync_request sync_req, *exist;
-
- sync_req.reply_received = 0;
- strcpy(sync_req.dst, dst);
- sync_req.request = req;
- sync_req.reply = &msg;
- pthread_cond_init(&sync_req.cond, NULL);
-
- pthread_mutex_lock(&sync_requests.lock);
- exist = find_sync_request(dst, req->name);
- if (!exist)
- TAILQ_INSERT_TAIL(&sync_requests.requests, &sync_req, next);
- pthread_mutex_unlock(&sync_requests.lock);
+ struct pending_request pending_req, *exist;
+
+ pending_req.type = REQUEST_TYPE_SYNC;
+ pending_req.reply_received = 0;
+ strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
+ pending_req.request = req;
+ pending_req.reply = &msg;
+ pthread_cond_init(&pending_req.sync.cond, NULL);
+
+ exist = find_pending_request(dst, req->name);
if (exist) {
RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
rte_errno = EEXIST;
@@ -572,33 +885,31 @@ mp_request_one(const char *dst, struct rte_mp_msg *req,
} else if (ret == 0)
return 0;
+ TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
+
reply->nb_sent++;
- pthread_mutex_lock(&sync_requests.lock);
do {
- pthread_cond_timedwait(&sync_req.cond, &sync_requests.lock, ts);
- /* Check spurious wakeups */
- if (sync_req.reply_received == 1)
- break;
- /* Check if time is out */
- if (gettimeofday(&now, NULL) < 0)
- break;
- if (now.tv_sec < ts->tv_sec)
- break;
- else if (now.tv_sec == ts->tv_sec &&
- now.tv_usec * 1000 < ts->tv_nsec)
- break;
- } while (1);
- /* We got the lock now */
- TAILQ_REMOVE(&sync_requests.requests, &sync_req, next);
- pthread_mutex_unlock(&sync_requests.lock);
+ ret = pthread_cond_timedwait(&pending_req.sync.cond,
+ &pending_requests.lock, ts);
+ } while (ret != 0 && ret != ETIMEDOUT);
+
+ TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
- if (sync_req.reply_received == 0) {
+ if (pending_req.reply_received == 0) {
RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
dst, req->name);
rte_errno = ETIMEDOUT;
return -1;
}
+ if (pending_req.reply_received == -1) {
+ RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
+ /* not receiving this message is not an error, so decrement
+ * number of sent messages
+ */
+ reply->nb_sent--;
+ return 0;
+ }
tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
if (!tmp) {
@@ -614,10 +925,10 @@ mp_request_one(const char *dst, struct rte_mp_msg *req,
}
int __rte_experimental
-rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
+rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts)
{
- int ret = 0;
+ int dir_fd, ret = 0;
DIR *mp_dir;
struct dirent *ent;
struct timeval now;
@@ -627,6 +938,12 @@ rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
if (check_input(req) == false)
return -1;
+
+ if (internal_config.no_shconf) {
+ RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+ return 0;
+ }
+
if (gettimeofday(&now, NULL) < 0) {
RTE_LOG(ERR, EAL, "Faile to get current time\n");
rte_errno = errno;
@@ -642,8 +959,12 @@ rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
reply->msgs = NULL;
/* for secondary process, send request to the primary process only */
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return mp_request_one(eal_mp_socket_path(), req, reply, &end);
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ pthread_mutex_lock(&pending_requests.lock);
+ ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
+ pthread_mutex_unlock(&pending_requests.lock);
+ return ret;
+ }
/* for primary process, broadcast request, and collect reply 1 by 1 */
mp_dir = opendir(mp_dir_path);
@@ -653,22 +974,193 @@ rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
return -1;
}
+ dir_fd = dirfd(mp_dir);
+ /* lock the directory to prevent processes spinning up while we send */
+ if (flock(dir_fd, LOCK_SH)) {
+ RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
+ mp_dir_path);
+ closedir(mp_dir);
+ rte_errno = errno;
+ return -1;
+ }
+
+ pthread_mutex_lock(&pending_requests.lock);
while ((ent = readdir(mp_dir))) {
+ char path[PATH_MAX];
+
if (fnmatch(mp_filter, ent->d_name, 0) != 0)
continue;
- if (mp_request_one(ent->d_name, req, reply, &end))
+ snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
+ ent->d_name);
+
+ /* unlocks the mutex while waiting for response,
+ * locks on receive
+ */
+ if (mp_request_sync(path, req, reply, &end))
ret = -1;
}
+ pthread_mutex_unlock(&pending_requests.lock);
+ /* unlock the directory */
+ flock(dir_fd, LOCK_UN);
+ /* dir_fd automatically closed on closedir */
closedir(mp_dir);
return ret;
}
int __rte_experimental
-rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
+rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
+ rte_mp_async_reply_t clb)
{
+ struct rte_mp_msg *copy;
+ struct pending_request *dummy;
+ struct async_request_param *param;
+ struct rte_mp_reply *reply;
+ int dir_fd, ret = 0;
+ DIR *mp_dir;
+ struct dirent *ent;
+ struct timeval now;
+ struct timespec *end;
+ bool dummy_used = false;
+
+ RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
+
+ if (check_input(req) == false)
+ return -1;
+
+ if (internal_config.no_shconf) {
+ RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+ return 0;
+ }
+
+ if (gettimeofday(&now, NULL) < 0) {
+ RTE_LOG(ERR, EAL, "Faile to get current time\n");
+ rte_errno = errno;
+ return -1;
+ }
+ copy = calloc(1, sizeof(*copy));
+ dummy = calloc(1, sizeof(*dummy));
+ param = calloc(1, sizeof(*param));
+ if (copy == NULL || dummy == NULL || param == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
+ rte_errno = ENOMEM;
+ goto fail;
+ }
+
+ /* copy message */
+ memcpy(copy, req, sizeof(*copy));
+
+ param->n_responses_processed = 0;
+ param->clb = clb;
+ end = &param->end;
+ reply = &param->user_reply;
+
+ end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
+ end->tv_sec = now.tv_sec + ts->tv_sec +
+ (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
+ reply->nb_sent = 0;
+ reply->nb_received = 0;
+ reply->msgs = NULL;
+
+ /* we have to lock the request queue here, as we will be adding a bunch
+ * of requests to the queue at once, and some of the replies may arrive
+ * before we add all of the requests to the queue.
+ */
+ pthread_mutex_lock(&pending_requests.lock);
+
+ /* we have to ensure that callback gets triggered even if we don't send
+ * anything, therefore earlier we have allocated a dummy request. fill
+ * it, and put it on the queue if we don't send any requests.
+ */
+ dummy->type = REQUEST_TYPE_ASYNC;
+ dummy->request = copy;
+ dummy->reply = NULL;
+ dummy->async.param = param;
+ dummy->reply_received = 1; /* short-circuit the timeout */
+
+ /* for secondary process, send request to the primary process only */
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
+
+ /* if we didn't send anything, put dummy request on the queue */
+ if (ret == 0 && reply->nb_sent == 0) {
+ TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
+ next);
+ dummy_used = true;
+ }
+
+ pthread_mutex_unlock(&pending_requests.lock);
+
+ /* if we couldn't send anything, clean up */
+ if (ret != 0)
+ goto fail;
+ return 0;
+ }
+
+ /* for primary process, broadcast request */
+ mp_dir = opendir(mp_dir_path);
+ if (!mp_dir) {
+ RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
+ rte_errno = errno;
+ goto unlock_fail;
+ }
+ dir_fd = dirfd(mp_dir);
+ /* lock the directory to prevent processes spinning up while we send */
+ if (flock(dir_fd, LOCK_SH)) {
+ RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
+ mp_dir_path);
+ rte_errno = errno;
+ goto closedir_fail;
+ }
+
+ while ((ent = readdir(mp_dir))) {
+ char path[PATH_MAX];
+
+ if (fnmatch(mp_filter, ent->d_name, 0) != 0)
+ continue;
+
+ snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
+ ent->d_name);
+
+ if (mp_request_async(path, copy, param, ts))
+ ret = -1;
+ }
+ /* if we didn't send anything, put dummy request on the queue */
+ if (ret == 0 && reply->nb_sent == 0) {
+ TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
+ dummy_used = true;
+ }
+
+ /* finally, unlock the queue */
+ pthread_mutex_unlock(&pending_requests.lock);
+
+ /* unlock the directory */
+ flock(dir_fd, LOCK_UN);
+
+ /* dir_fd automatically closed on closedir */
+ closedir(mp_dir);
+
+ /* if dummy was unused, free it */
+ if (!dummy_used)
+ free(dummy);
+
+ return ret;
+closedir_fail:
+ closedir(mp_dir);
+unlock_fail:
+ pthread_mutex_unlock(&pending_requests.lock);
+fail:
+ free(dummy);
+ free(param);
+ free(copy);
+ return -1;
+}
+
+int __rte_experimental
+rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
+{
RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
if (check_input(msg) == false)
@@ -680,5 +1172,10 @@ rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
return -1;
}
+ if (internal_config.no_shconf) {
+ RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
+ return 0;
+ }
+
return mp_send(msg, peer, MP_REP);
}
diff --git a/lib/librte_eal/common/eal_common_thread.c b/lib/librte_eal/common/eal_common_thread.c
index 40902e49..48ef4d6d 100644
--- a/lib/librte_eal/common/eal_common_thread.c
+++ b/lib/librte_eal/common/eal_common_thread.c
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <unistd.h>
#include <pthread.h>
+#include <signal.h>
#include <sched.h>
#include <assert.h>
#include <string.h>
@@ -15,6 +16,7 @@
#include <rte_memory.h>
#include <rte_log.h>
+#include "eal_private.h"
#include "eal_thread.h"
RTE_DECLARE_PER_LCORE(unsigned , _socket_id);
@@ -32,10 +34,7 @@ rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
if (lcore_id >= RTE_MAX_LCORE)
return -EINVAL;
- if (cfg->lcore_role[lcore_id] == role)
- return 0;
-
- return -EINVAL;
+ return cfg->lcore_role[lcore_id] == role;
}
int eal_cpuset_socket_id(rte_cpuset_t *cpusetp)
@@ -140,3 +139,94 @@ exit:
return ret;
}
+
+
+struct rte_thread_ctrl_params {
+ void *(*start_routine)(void *);
+ void *arg;
+ pthread_barrier_t configured;
+};
+
+static void *rte_thread_init(void *arg)
+{
+ int ret;
+ struct rte_thread_ctrl_params *params = arg;
+ void *(*start_routine)(void *) = params->start_routine;
+ void *routine_arg = params->arg;
+
+ ret = pthread_barrier_wait(&params->configured);
+ if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
+ pthread_barrier_destroy(&params->configured);
+ free(params);
+ }
+
+ return start_routine(routine_arg);
+}
+
+__rte_experimental int
+rte_ctrl_thread_create(pthread_t *thread, const char *name,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg)
+{
+ struct rte_thread_ctrl_params *params;
+ unsigned int lcore_id;
+ rte_cpuset_t cpuset;
+ int cpu_found, ret;
+
+ params = malloc(sizeof(*params));
+ if (!params)
+ return -ENOMEM;
+
+ params->start_routine = start_routine;
+ params->arg = arg;
+
+ pthread_barrier_init(&params->configured, NULL, 2);
+
+ ret = pthread_create(thread, attr, rte_thread_init, (void *)params);
+ if (ret != 0) {
+ free(params);
+ return -ret;
+ }
+
+ if (name != NULL) {
+ ret = rte_thread_setname(*thread, name);
+ if (ret < 0)
+ RTE_LOG(DEBUG, EAL,
+ "Cannot set name for ctrl thread\n");
+ }
+
+ cpu_found = 0;
+ CPU_ZERO(&cpuset);
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ if (eal_cpu_detected(lcore_id) &&
+ rte_lcore_has_role(lcore_id, ROLE_OFF)) {
+ CPU_SET(lcore_id, &cpuset);
+ cpu_found = 1;
+ }
+ }
+ /* if no detected cpu is off, use master core */
+ if (!cpu_found)
+ CPU_SET(rte_get_master_lcore(), &cpuset);
+
+ ret = pthread_setaffinity_np(*thread, sizeof(cpuset), &cpuset);
+ if (ret < 0)
+ goto fail;
+
+ ret = pthread_barrier_wait(&params->configured);
+ if (ret == PTHREAD_BARRIER_SERIAL_THREAD) {
+ pthread_barrier_destroy(&params->configured);
+ free(params);
+ }
+
+ return 0;
+
+fail:
+ if (PTHREAD_BARRIER_SERIAL_THREAD ==
+ pthread_barrier_wait(&params->configured)) {
+ pthread_barrier_destroy(&params->configured);
+ free(params);
+ }
+ pthread_cancel(*thread);
+ pthread_join(*thread, NULL);
+ return -ret;
+}
diff --git a/lib/librte_eal/common/eal_common_uuid.c b/lib/librte_eal/common/eal_common_uuid.c
new file mode 100644
index 00000000..1b93c5b3
--- /dev/null
+++ b/lib/librte_eal/common/eal_common_uuid.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 1996, 1997 Theodore Ts'o.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#include <rte_uuid.h>
+
+/* UUID packed form */
+struct uuid {
+ uint32_t time_low;
+ uint16_t time_mid;
+ uint16_t time_hi_and_version;
+ uint16_t clock_seq;
+ uint8_t node[6];
+};
+
+static void uuid_pack(const struct uuid *uu, rte_uuid_t ptr)
+{
+ uint32_t tmp;
+ uint8_t *out = ptr;
+
+ tmp = uu->time_low;
+ out[3] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[2] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[1] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[0] = (uint8_t) tmp;
+
+ tmp = uu->time_mid;
+ out[5] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[4] = (uint8_t) tmp;
+
+ tmp = uu->time_hi_and_version;
+ out[7] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[6] = (uint8_t) tmp;
+
+ tmp = uu->clock_seq;
+ out[9] = (uint8_t) tmp;
+ tmp >>= 8;
+ out[8] = (uint8_t) tmp;
+
+ memcpy(out+10, uu->node, 6);
+}
+
+static void uuid_unpack(const rte_uuid_t in, struct uuid *uu)
+{
+ const uint8_t *ptr = in;
+ uint32_t tmp;
+
+ tmp = *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ uu->time_low = tmp;
+
+ tmp = *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ uu->time_mid = tmp;
+
+ tmp = *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ uu->time_hi_and_version = tmp;
+
+ tmp = *ptr++;
+ tmp = (tmp << 8) | *ptr++;
+ uu->clock_seq = tmp;
+
+ memcpy(uu->node, ptr, 6);
+}
+
+bool rte_uuid_is_null(const rte_uuid_t uu)
+{
+ const uint8_t *cp = uu;
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*cp++)
+ return false;
+ return true;
+}
+
+/*
+ * rte_uuid_compare() - compare two UUIDs.
+ */
+int rte_uuid_compare(const rte_uuid_t uu1, const rte_uuid_t uu2)
+{
+ struct uuid uuid1, uuid2;
+
+ uuid_unpack(uu1, &uuid1);
+ uuid_unpack(uu2, &uuid2);
+
+#define UUCMP(u1, u2) \
+ do { if (u1 != u2) return (u1 < u2) ? -1 : 1; } while (0)
+
+ UUCMP(uuid1.time_low, uuid2.time_low);
+ UUCMP(uuid1.time_mid, uuid2.time_mid);
+ UUCMP(uuid1.time_hi_and_version, uuid2.time_hi_and_version);
+ UUCMP(uuid1.clock_seq, uuid2.clock_seq);
+#undef UUCMP
+
+ return memcmp(uuid1.node, uuid2.node, 6);
+}
+
+int rte_uuid_parse(const char *in, rte_uuid_t uu)
+{
+ struct uuid uuid;
+ int i;
+ const char *cp;
+ char buf[3];
+
+ if (strlen(in) != 36)
+ return -1;
+
+ for (i = 0, cp = in; i <= 36; i++, cp++) {
+ if ((i == 8) || (i == 13) || (i == 18) ||
+ (i == 23)) {
+ if (*cp == '-')
+ continue;
+ else
+ return -1;
+ }
+ if (i == 36)
+ if (*cp == 0)
+ continue;
+ if (!isxdigit(*cp))
+ return -1;
+ }
+
+ uuid.time_low = strtoul(in, NULL, 16);
+ uuid.time_mid = strtoul(in+9, NULL, 16);
+ uuid.time_hi_and_version = strtoul(in+14, NULL, 16);
+ uuid.clock_seq = strtoul(in+19, NULL, 16);
+ cp = in+24;
+ buf[2] = 0;
+
+ for (i = 0; i < 6; i++) {
+ buf[0] = *cp++;
+ buf[1] = *cp++;
+ uuid.node[i] = strtoul(buf, NULL, 16);
+ }
+
+ uuid_pack(&uuid, uu);
+ return 0;
+}
+
+void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len)
+{
+ struct uuid uuid;
+
+ uuid_unpack(uu, &uuid);
+
+ snprintf(out, len,
+ "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
+ uuid.time_low, uuid.time_mid, uuid.time_hi_and_version,
+ uuid.clock_seq >> 8, uuid.clock_seq & 0xFF,
+ uuid.node[0], uuid.node[1], uuid.node[2],
+ uuid.node[3], uuid.node[4], uuid.node[5]);
+}
diff --git a/lib/librte_eal/common/eal_filesystem.h b/lib/librte_eal/common/eal_filesystem.h
index 4708dd54..de05febf 100644
--- a/lib/librte_eal/common/eal_filesystem.h
+++ b/lib/librte_eal/common/eal_filesystem.h
@@ -12,7 +12,6 @@
#define EAL_FILESYSTEM_H
/** Path of rte config file. */
-#define RUNTIME_CONFIG_FMT "%s/.%s_config"
#include <stdint.h>
#include <limits.h>
@@ -22,60 +21,70 @@
#include <rte_string_fns.h>
#include "eal_internal_cfg.h"
-static const char *default_config_dir = "/var/run";
+/* sets up platform-specific runtime data dir */
+int
+eal_create_runtime_dir(void);
+/* returns runtime dir */
+const char *
+eal_get_runtime_dir(void);
+
+#define RUNTIME_CONFIG_FNAME "config"
static inline const char *
eal_runtime_config_path(void)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- const char *directory = default_config_dir;
- const char *home_dir = getenv("HOME");
- if (getuid() != 0 && home_dir != NULL)
- directory = home_dir;
- snprintf(buffer, sizeof(buffer) - 1, RUNTIME_CONFIG_FMT, directory,
- internal_config.hugefile_prefix);
+ snprintf(buffer, sizeof(buffer) - 1, "%s/%s", eal_get_runtime_dir(),
+ RUNTIME_CONFIG_FNAME);
return buffer;
}
/** Path of primary/secondary communication unix socket file. */
-#define MP_SOCKET_PATH_FMT "%s/.%s_unix"
+#define MP_SOCKET_FNAME "mp_socket"
static inline const char *
eal_mp_socket_path(void)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- const char *directory = default_config_dir;
- const char *home_dir = getenv("HOME");
- if (getuid() != 0 && home_dir != NULL)
- directory = home_dir;
- snprintf(buffer, sizeof(buffer) - 1, MP_SOCKET_PATH_FMT,
- directory, internal_config.hugefile_prefix);
+ snprintf(buffer, sizeof(buffer) - 1, "%s/%s", eal_get_runtime_dir(),
+ MP_SOCKET_FNAME);
+ return buffer;
+}
+#define FBARRAY_NAME_FMT "%s/fbarray_%s"
+static inline const char *
+eal_get_fbarray_path(char *buffer, size_t buflen, const char *name) {
+ snprintf(buffer, buflen, FBARRAY_NAME_FMT, eal_get_runtime_dir(), name);
return buffer;
}
/** Path of hugepage info file. */
-#define HUGEPAGE_INFO_FMT "%s/.%s_hugepage_info"
-
+#define HUGEPAGE_INFO_FNAME "hugepage_info"
static inline const char *
eal_hugepage_info_path(void)
{
static char buffer[PATH_MAX]; /* static so auto-zeroed */
- const char *directory = default_config_dir;
- const char *home_dir = getenv("HOME");
- if (getuid() != 0 && home_dir != NULL)
- directory = home_dir;
- snprintf(buffer, sizeof(buffer) - 1, HUGEPAGE_INFO_FMT, directory,
- internal_config.hugefile_prefix);
+ snprintf(buffer, sizeof(buffer) - 1, "%s/%s", eal_get_runtime_dir(),
+ HUGEPAGE_INFO_FNAME);
+ return buffer;
+}
+
+/** Path of hugepage data file. */
+#define HUGEPAGE_DATA_FNAME "hugepage_data"
+static inline const char *
+eal_hugepage_data_path(void)
+{
+ static char buffer[PATH_MAX]; /* static so auto-zeroed */
+
+ snprintf(buffer, sizeof(buffer) - 1, "%s/%s", eal_get_runtime_dir(),
+ HUGEPAGE_DATA_FNAME);
return buffer;
}
/** String format for hugepage map files. */
#define HUGEFILE_FMT "%s/%smap_%d"
-#define TEMP_HUGEFILE_FMT "%s/%smap_temp_%d"
-
static inline const char *
eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
{
@@ -85,6 +94,17 @@ eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id
return buffer;
}
+/** String format for hugepage map lock files. */
+#define HUGEFILE_LOCK_FMT "%s/map_%d.lock"
+static inline const char *
+eal_get_hugefile_lock_path(char *buffer, size_t buflen, int f_id)
+{
+ snprintf(buffer, buflen, HUGEFILE_LOCK_FMT, eal_get_runtime_dir(),
+ f_id);
+ buffer[buflen - 1] = '\0';
+ return buffer;
+}
+
/** define the default filename prefix for the %s values above */
#define HUGEFILE_PREFIX_DEFAULT "rte"
diff --git a/lib/librte_eal/common/eal_hugepages.h b/lib/librte_eal/common/eal_hugepages.h
index 1d519bbb..4582f19c 100644
--- a/lib/librte_eal/common/eal_hugepages.h
+++ b/lib/librte_eal/common/eal_hugepages.h
@@ -22,14 +22,19 @@ struct hugepage_file {
size_t size; /**< the page size */
int socket_id; /**< NUMA socket ID */
int file_id; /**< the '%d' in HUGEFILE_FMT */
- int memseg_id; /**< the memory segment to which page belongs */
char filepath[MAX_HUGEPAGE_PATH]; /**< path to backing file on filesystem */
};
/**
- * Read the information from linux on what hugepages are available
- * for the EAL to use
+ * Read the information on what hugepages are available for the EAL to use,
+ * clearing out any unused ones.
*/
int eal_hugepage_info_init(void);
+/**
+ * Read whatever information primary process has shared about hugepages into
+ * secondary process.
+ */
+int eal_hugepage_info_read(void);
+
#endif /* EAL_HUGEPAGES_H */
diff --git a/lib/librte_eal/common/eal_internal_cfg.h b/lib/librte_eal/common/eal_internal_cfg.h
index 1169fcc3..00ee6e06 100644
--- a/lib/librte_eal/common/eal_internal_cfg.h
+++ b/lib/librte_eal/common/eal_internal_cfg.h
@@ -21,9 +21,9 @@
*/
struct hugepage_info {
uint64_t hugepage_sz; /**< size of a huge page */
- const char *hugedir; /**< dir where hugetlbfs is mounted */
+ char hugedir[PATH_MAX]; /**< dir where hugetlbfs is mounted */
uint32_t num_pages[RTE_MAX_NUMA_NODES];
- /**< number of hugepages of that size on each socket */
+ /**< number of hugepages of that size on each socket */
int lock_descriptor; /**< file descriptor for hugepage dir */
};
@@ -41,12 +41,26 @@ struct internal_config {
volatile unsigned vmware_tsc_map; /**< true to use VMware TSC mapping
* instead of native TSC */
volatile unsigned no_shconf; /**< true if there is no shared config */
+ volatile unsigned in_memory;
+ /**< true if DPDK should operate entirely in-memory and not create any
+ * shared files or runtime data.
+ */
volatile unsigned create_uio_dev; /**< true to create /dev/uioX devices */
volatile enum rte_proc_type_t process_type; /**< multi-process proc type */
/** true to try allocating memory on specific sockets */
volatile unsigned force_sockets;
volatile uint64_t socket_mem[RTE_MAX_NUMA_NODES]; /**< amount of memory per socket */
+ volatile unsigned force_socket_limits;
+ volatile uint64_t socket_limit[RTE_MAX_NUMA_NODES]; /**< limit amount of memory per socket */
uintptr_t base_virtaddr; /**< base address to try and reserve memory from */
+ volatile unsigned legacy_mem;
+ /**< true to enable legacy memory behavior (no dynamic allocation,
+ * IOVA-contiguous segments).
+ */
+ volatile unsigned single_file_segments;
+ /**< true if storing all pages within single files (per-page-size,
+ * per-node) non-legacy mode only.
+ */
volatile int syslog_facility; /**< facility passed to openlog() */
/** default interrupt mode for VFIO */
volatile enum rte_intr_mode vfio_intr_mode;
@@ -56,6 +70,8 @@ struct internal_config {
/**< user defined mbuf pool ops name */
unsigned num_hugepage_sizes; /**< how many sizes on this system */
struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
+ volatile unsigned int init_complete;
+ /**< indicates whether EAL has completed initialization */
};
extern struct internal_config internal_config; /**< Global EAL configuration. */
diff --git a/lib/librte_eal/common/eal_memalloc.h b/lib/librte_eal/common/eal_memalloc.h
new file mode 100644
index 00000000..36bb1a02
--- /dev/null
+++ b/lib/librte_eal/common/eal_memalloc.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef EAL_MEMALLOC_H
+#define EAL_MEMALLOC_H
+
+#include <stdbool.h>
+
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+
+/*
+ * Allocate segment of specified page size.
+ */
+struct rte_memseg *
+eal_memalloc_alloc_seg(size_t page_sz, int socket);
+
+/*
+ * Allocate `n_segs` segments.
+ *
+ * Note: `ms` can be NULL.
+ *
+ * Note: it is possible to request best-effort allocation by setting `exact` to
+ * `false`, in which case allocator will return however many pages it managed to
+ * allocate successfully.
+ */
+int
+eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
+ int socket, bool exact);
+
+/*
+ * Deallocate segment
+ */
+int
+eal_memalloc_free_seg(struct rte_memseg *ms);
+
+/*
+ * Deallocate `n_segs` segments. Returns 0 on successful deallocation of all
+ * segments, returns -1 on error. Any segments that could have been deallocated,
+ * will be deallocated even in case of error.
+ */
+int
+eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs);
+
+/*
+ * Check if memory pointed to by `start` and of `length` that resides in
+ * memseg list `msl` is IOVA-contiguous.
+ */
+bool
+eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start,
+ size_t len);
+
+/* synchronize local memory map to primary process */
+int
+eal_memalloc_sync_with_primary(void);
+
+int
+eal_memalloc_mem_event_callback_register(const char *name,
+ rte_mem_event_callback_t clb, void *arg);
+
+int
+eal_memalloc_mem_event_callback_unregister(const char *name, void *arg);
+
+void
+eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start,
+ size_t len);
+
+int
+eal_memalloc_mem_alloc_validator_register(const char *name,
+ rte_mem_alloc_validator_t clb, int socket_id, size_t limit);
+
+int
+eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id);
+
+int
+eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len);
+
+int
+eal_memalloc_init(void);
+
+#endif /* EAL_MEMALLOC_H */
diff --git a/lib/librte_eal/common/eal_options.h b/lib/librte_eal/common/eal_options.h
index e86c7114..96e16678 100644
--- a/lib/librte_eal/common/eal_options.h
+++ b/lib/librte_eal/common/eal_options.h
@@ -45,8 +45,12 @@ enum {
OPT_NO_PCI_NUM,
#define OPT_NO_SHCONF "no-shconf"
OPT_NO_SHCONF_NUM,
+#define OPT_IN_MEMORY "in-memory"
+ OPT_IN_MEMORY_NUM,
#define OPT_SOCKET_MEM "socket-mem"
OPT_SOCKET_MEM_NUM,
+#define OPT_SOCKET_LIMIT "socket-limit"
+ OPT_SOCKET_LIMIT_NUM,
#define OPT_SYSLOG "syslog"
OPT_SYSLOG_NUM,
#define OPT_VDEV "vdev"
@@ -55,6 +59,10 @@ enum {
OPT_VFIO_INTR_NUM,
#define OPT_VMWARE_TSC_MAP "vmware-tsc-map"
OPT_VMWARE_TSC_MAP_NUM,
+#define OPT_LEGACY_MEM "legacy-mem"
+ OPT_LEGACY_MEM_NUM,
+#define OPT_SINGLE_FILE_SEGMENTS "single-file-segments"
+ OPT_SINGLE_FILE_SEGMENTS_NUM,
OPT_LONG_MAX_NUM
};
diff --git a/lib/librte_eal/common/eal_private.h b/lib/librte_eal/common/eal_private.h
index 0b287700..4f809a83 100644
--- a/lib/librte_eal/common/eal_private.h
+++ b/lib/librte_eal/common/eal_private.h
@@ -9,6 +9,8 @@
#include <stdint.h>
#include <stdio.h>
+#include <rte_dev.h>
+
/**
* Initialize the memzone subsystem (private to eal).
*
@@ -45,6 +47,18 @@ void eal_log_set_default(FILE *default_log);
int rte_eal_cpu_init(void);
/**
+ * Create memseg lists
+ *
+ * This function is private to EAL.
+ *
+ * Preallocate virtual memory.
+ *
+ * @return
+ * 0 on success, negative on error
+ */
+int rte_eal_memseg_init(void);
+
+/**
* Map memory
*
* This function is private to EAL.
@@ -81,6 +95,12 @@ int rte_eal_timer_init(void);
int rte_eal_log_init(const char *id, int facility);
/**
+ * Save the log regexp for later
+ */
+int rte_log_save_regexp(const char *type, int priority);
+int rte_log_save_pattern(const char *pattern, int priority);
+
+/**
* Init tail queues for non-EAL library structures. This is to allow
* the rings, mempools, etc. lists to be shared among multiple processes
*
@@ -127,6 +147,39 @@ int rte_eal_alarm_init(void);
int rte_eal_check_module(const char *module_name);
/**
+ * Get virtual area of specified size from the OS.
+ *
+ * This function is private to the EAL.
+ *
+ * @param requested_addr
+ * Address where to request address space.
+ * @param size
+ * Size of requested area.
+ * @param page_sz
+ * Page size on which to align requested virtual area.
+ * @param flags
+ * EAL_VIRTUAL_AREA_* flags.
+ * @param mmap_flags
+ * Extra flags passed directly to mmap().
+ *
+ * @return
+ * Virtual area address if successful.
+ * NULL if unsuccessful.
+ */
+
+#define EAL_VIRTUAL_AREA_ADDR_IS_HINT (1 << 0)
+/**< don't fail if cannot get exact requested address. */
+#define EAL_VIRTUAL_AREA_ALLOW_SHRINK (1 << 1)
+/**< try getting smaller sized (decrement by page size) virtual areas if cannot
+ * get area of requested size.
+ */
+#define EAL_VIRTUAL_AREA_UNMAP (1 << 2)
+/**< immediately unmap reserved virtual area. */
+void *
+eal_get_virtual_area(void *requested_addr, size_t *size,
+ size_t page_sz, int flags, int mmap_flags);
+
+/**
* Get cpu core_id.
*
* This function is private to the EAL.
@@ -205,4 +258,50 @@ struct rte_bus *rte_bus_find_by_device_name(const char *str);
int rte_mp_channel_init(void);
+/**
+ * Internal Executes all the user application registered callbacks for
+ * the specific device. It is for DPDK internal user only. User
+ * application should not call it directly.
+ *
+ * @param device_name
+ * The device name.
+ * @param event
+ * the device event type.
+ */
+void dev_callback_process(char *device_name, enum rte_dev_event_type event);
+
+/**
+ * @internal
+ * Parse a device string and store its information in an
+ * rte_devargs structure.
+ *
+ * A device description is split by layers of abstraction of the device:
+ * bus, class and driver. Each layer will offer a set of properties that
+ * can be applied either to configure or recognize a device.
+ *
+ * This function will parse those properties and prepare the rte_devargs
+ * to be given to each layers for processing.
+ *
+ * Note: if the "data" field of the devargs points to devstr,
+ * then no dynamic allocation is performed and the rte_devargs
+ * can be safely discarded.
+ *
+ * Otherwise ``data`` will hold a workable copy of devstr, that will be
+ * used by layers descriptors within rte_devargs. In this case,
+ * any rte_devargs should be cleaned-up before being freed.
+ *
+ * @param da
+ * rte_devargs structure to fill.
+ *
+ * @param devstr
+ * Device string.
+ *
+ * @return
+ * 0 on success.
+ * Negative errno values on error (rte_errno is set).
+ */
+int
+rte_devargs_layers_parse(struct rte_devargs *devargs,
+ const char *devstr);
+
#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic.h b/lib/librte_eal/common/include/arch/arm/rte_atomic.h
index f3f3b6e3..40e14e56 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_ATOMIC_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h b/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
index d2b7fa20..859562e5 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_atomic_32.h
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_ATOMIC_ARM32_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_byteorder.h b/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
index 8af0a39a..9ec4a975 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_byteorder.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_BYTEORDER_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h
index b8f62889..022e7da5 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cpuflags.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_CPUFLAGS_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h
index eb02d9b9..b5347be1 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cpuflags_32.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_CPUFLAGS_ARM32_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles.h b/lib/librte_eal/common/include/arch/arm/rte_cycles.h
index a8009a06..e8ffa894 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cycles.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cycles.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_CYCLES_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h b/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h
index 9c1be71e..c4f974fe 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_cycles_32.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_CYCLES_ARM32_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy.h
index 1d562c3f..47dea9a8 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_MEMCPY_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
index e4dafda1..eb02c3b4 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_memcpy_32.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_MEMCPY_ARM32_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch.h
index aa37de57..27870c2a 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_prefetch.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_prefetch.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_PREFETCH_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h b/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h
index 43cde172..e53420a0 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_prefetch_32.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_PREFETCH_ARM32_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_rwlock.h b/lib/librte_eal/common/include/arch/arm/rte_rwlock.h
index 664bec88..18bb37b0 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_rwlock.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_rwlock.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ */
/* copied from ppc_64 */
#ifndef _RTE_RWLOCK_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/arm/rte_spinlock.h b/lib/librte_eal/common/include/arch/arm/rte_spinlock.h
index 396a42e8..1a6916b6 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_spinlock.h
@@ -1,33 +1,5 @@
-/*
- * BSD LICENSE
- *
- * Copyright(c) 2015 RehiveTech. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015 RehiveTech. All rights reserved.
*/
#ifndef _RTE_SPINLOCK_ARM_H_
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
index 39fce7b9..ce38350b 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h
@@ -55,7 +55,7 @@ extern "C" {
* Guarantees that the LOAD and STORE operations generated before the
* barrier occur before the LOAD and STORE operations generated after.
*/
-#define rte_mb() {asm volatile("sync" : : : "memory"); }
+#define rte_mb() asm volatile("sync" : : : "memory")
/**
* Write memory barrier.
@@ -136,6 +136,12 @@ static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
}
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 32 bit atomic operations -------------------------*/
static inline int
@@ -237,6 +243,13 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
return ret == 0;
}
+
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
/*------------------------- 64 bit atomic operations -------------------------*/
static inline int
@@ -431,7 +444,6 @@ static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
{
return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
}
-
/**
* Atomically set a 64-bit counter to 0.
*
@@ -442,6 +454,13 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)
{
v->cnt = 0;
}
+
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+}
+
#endif
#ifdef __cplusplus
diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h b/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h
index de8af19e..9fadc040 100644
--- a/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h
+++ b/lib/librte_eal/common/include/arch/ppc_64/rte_rwlock.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ */
#ifndef _RTE_RWLOCK_PPC_64_H_
#define _RTE_RWLOCK_PPC_64_H_
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic.h b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
index 5cfd3832..148398f5 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic.h
@@ -104,6 +104,18 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
return res;
}
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgw %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
{
return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
@@ -178,6 +190,18 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
return res;
}
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgl %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
{
return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
index fb3abf18..a932f354 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_32.h
@@ -98,6 +98,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dest, uint64_t val)
+{
+ uint64_t old;
+
+ do {
+ old = *dest;
+ } while (rte_atomic64_cmpset(dest, old, val) == 0);
+
+ return old;
+}
+
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{
diff --git a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
index 1a53a766..fd2ec9c5 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_atomic_64.h
@@ -71,6 +71,18 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
return res;
}
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+ asm volatile(
+ MPLOCKED
+ "xchgq %0, %1;"
+ : "=r" (val), "=m" (*dst)
+ : "0" (val), "m" (*dst)
+ : "memory"); /* no-clobber list */
+ return val;
+}
+
static inline void
rte_atomic64_init(rte_atomic64_t *v)
{
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index cc140ecc..7b758094 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -52,7 +52,7 @@ rte_memcpy(void *dst, const void *src, size_t n);
* Copy 16 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov16(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -65,7 +65,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* Copy 32 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov32(uint8_t *dst, const uint8_t *src)
{
__m256i ymm0;
@@ -78,7 +78,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* Copy 64 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov64(uint8_t *dst, const uint8_t *src)
{
__m512i zmm0;
@@ -91,7 +91,7 @@ rte_mov64(uint8_t *dst, const uint8_t *src)
* Copy 128 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov128(uint8_t *dst, const uint8_t *src)
{
rte_mov64(dst + 0 * 64, src + 0 * 64);
@@ -102,7 +102,7 @@ rte_mov128(uint8_t *dst, const uint8_t *src)
* Copy 256 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov256(uint8_t *dst, const uint8_t *src)
{
rte_mov64(dst + 0 * 64, src + 0 * 64);
@@ -293,7 +293,7 @@ COPY_BLOCK_128_BACK63:
* Copy 16 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov16(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -306,7 +306,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* Copy 32 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov32(uint8_t *dst, const uint8_t *src)
{
__m256i ymm0;
@@ -319,7 +319,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* Copy 64 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov64(uint8_t *dst, const uint8_t *src)
{
rte_mov32((uint8_t *)dst + 0 * 32, (const uint8_t *)src + 0 * 32);
@@ -486,7 +486,7 @@ COPY_BLOCK_128_BACK31:
* Copy 16 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov16(uint8_t *dst, const uint8_t *src)
{
__m128i xmm0;
@@ -499,7 +499,7 @@ rte_mov16(uint8_t *dst, const uint8_t *src)
* Copy 32 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov32(uint8_t *dst, const uint8_t *src)
{
rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
@@ -510,7 +510,7 @@ rte_mov32(uint8_t *dst, const uint8_t *src)
* Copy 64 bytes from one location to another,
* locations should not overlap.
*/
-static inline void
+static __rte_always_inline void
rte_mov64(uint8_t *dst, const uint8_t *src)
{
rte_mov16((uint8_t *)dst + 0 * 16, (const uint8_t *)src + 0 * 16);
@@ -574,7 +574,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
*/
#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
__extension__ ({ \
- int tmp; \
+ size_t tmp; \
while (len >= 128 + 16 - offset) { \
xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
len -= 128; \
diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 4b16887e..60321da0 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -76,10 +76,12 @@ static inline int rte_tm_supported(void)
static inline int
rte_try_tm(volatile int *lock)
{
+ int retries;
+
if (!rte_rtm_supported)
return 0;
- int retries = RTE_RTM_MAX_RETRIES;
+ retries = RTE_RTM_MAX_RETRIES;
while (likely(retries--)) {
diff --git a/lib/librte_eal/common/include/generic/rte_atomic.h b/lib/librte_eal/common/include/generic/rte_atomic.h
index 50e1b8a4..b99ba468 100644
--- a/lib/librte_eal/common/include/generic/rte_atomic.h
+++ b/lib/librte_eal/common/include/generic/rte_atomic.h
@@ -191,6 +191,36 @@ rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint16_t
+rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val)
+{
+#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {
@@ -444,6 +474,36 @@ rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint32_t
+rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val)
+{
+#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {
@@ -696,6 +756,36 @@ rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
#endif
/**
+ * Atomic exchange.
+ *
+ * (atomic) equivalent to:
+ * ret = *dst
+ * *dst = val;
+ * return ret;
+ *
+ * @param dst
+ * The destination location into which the value will be written.
+ * @param val
+ * The new value.
+ * @return
+ * The original value at that location
+ */
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val);
+
+#ifdef RTE_FORCE_INTRINSICS
+static inline uint64_t
+rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
+{
+#if defined(RTE_ARCH_ARM64) && defined(RTE_TOOLCHAIN_CLANG)
+ return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST);
+#else
+ return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST);
+#endif
+}
+#endif
+
+/**
* The atomic counter structure.
*/
typedef struct {
diff --git a/lib/librte_eal/common/include/generic/rte_byteorder.h b/lib/librte_eal/common/include/generic/rte_byteorder.h
index 9bed85cc..7d9a1463 100644
--- a/lib/librte_eal/common/include/generic/rte_byteorder.h
+++ b/lib/librte_eal/common/include/generic/rte_byteorder.h
@@ -123,7 +123,7 @@ typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */
static inline uint16_t
rte_constant_bswap16(uint16_t x)
{
- return RTE_STATIC_BSWAP16(x);
+ return (uint16_t)RTE_STATIC_BSWAP16(x);
}
/*
@@ -135,7 +135,7 @@ rte_constant_bswap16(uint16_t x)
static inline uint32_t
rte_constant_bswap32(uint32_t x)
{
- return RTE_STATIC_BSWAP32(x);
+ return (uint32_t)RTE_STATIC_BSWAP32(x);
}
/*
@@ -147,7 +147,7 @@ rte_constant_bswap32(uint32_t x)
static inline uint64_t
rte_constant_bswap64(uint64_t x)
{
- return RTE_STATIC_BSWAP64(x);
+ return (uint64_t)RTE_STATIC_BSWAP64(x);
}
diff --git a/lib/librte_eal/common/include/generic/rte_cpuflags.h b/lib/librte_eal/common/include/generic/rte_cpuflags.h
index 8d31687d..156ea002 100644
--- a/lib/librte_eal/common/include/generic/rte_cpuflags.h
+++ b/lib/librte_eal/common/include/generic/rte_cpuflags.h
@@ -64,4 +64,25 @@ rte_cpu_check_supported(void);
int
rte_cpu_is_supported(void);
+/**
+ * This function attempts to retrieve a value from the auxiliary vector.
+ * If it is unsuccessful, the result will be 0, and errno will be set.
+ *
+ * @return A value from the auxiliary vector. When the value is 0, check
+ * errno to determine if an error occurred.
+ */
+unsigned long
+rte_cpu_getauxval(unsigned long type);
+
+/**
+ * This function retrieves a value from the auxiliary vector, and compares it
+ * as a string against the value retrieved.
+ *
+ * @return The result of calling strcmp() against the value retrieved from
+ * the auxiliary vector. When the value is 0 (meaning a match is found),
+ * check errno to determine if an error occurred.
+ */
+int
+rte_cpu_strcmp_auxval(unsigned long type, const char *str);
+
#endif /* _RTE_CPUFLAGS_H_ */
diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index 899e9bc4..5751a0e6 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -71,7 +71,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
continue;
}
success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- x, x + 1);
+ (uint32_t)x, (uint32_t)(x + 1));
}
}
@@ -107,7 +107,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
continue;
}
success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- 0, -1);
+ 0, (uint32_t)-1);
}
}
diff --git a/lib/librte_eal/common/include/rte_bitmap.h b/lib/librte_eal/common/include/rte_bitmap.h
index 7d4935fc..d9facc64 100644
--- a/lib/librte_eal/common/include/rte_bitmap.h
+++ b/lib/librte_eal/common/include/rte_bitmap.h
@@ -198,12 +198,12 @@ rte_bitmap_get_memory_footprint(uint32_t n_bits) {
/**
* Bitmap initialization
*
- * @param mem_size
- * Minimum expected size of bitmap.
+ * @param n_bits
+ * Number of pre-allocated bits in array2.
* @param mem
* Base address of array1 and array2.
- * @param n_bits
- * Number of pre-allocated bits in array2. Must be non-zero and multiple of 512.
+ * @param mem_size
+ * Minimum expected size of bitmap.
* @return
* Handle to bitmap instance.
*/
diff --git a/lib/librte_eal/common/include/rte_bus.h b/lib/librte_eal/common/include/rte_bus.h
index 6fb08341..b7b5b084 100644
--- a/lib/librte_eal/common/include/rte_bus.h
+++ b/lib/librte_eal/common/include/rte_bus.h
@@ -211,6 +211,7 @@ struct rte_bus {
rte_bus_parse_t parse; /**< Parse a device name */
struct rte_bus_conf conf; /**< Bus configuration */
rte_bus_get_iommu_class_t get_iommu_class; /**< Get iommu class */
+ rte_dev_iterate_t dev_iterate; /**< Device iterator. */
};
/**
@@ -325,8 +326,7 @@ enum rte_iova_mode rte_bus_get_iommu_class(void);
* The constructor has higher priority than PMD constructors.
*/
#define RTE_REGISTER_BUS(nm, bus) \
-RTE_INIT_PRIO(businitfn_ ##nm, 110); \
-static void businitfn_ ##nm(void) \
+RTE_INIT_PRIO(businitfn_ ##nm, BUS) \
{\
(bus).name = RTE_STR(nm);\
rte_bus_register(&bus); \
diff --git a/lib/librte_eal/common/include/rte_class.h b/lib/librte_eal/common/include/rte_class.h
new file mode 100644
index 00000000..276c91e9
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_class.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Gaëtan Rivet
+ */
+
+#ifndef _RTE_CLASS_H_
+#define _RTE_CLASS_H_
+
+/**
+ * @file
+ *
+ * DPDK device class interface.
+ *
+ * This file describes the interface of the device class
+ * abstraction layer.
+ *
+ * A device class defines the type of function a device
+ * will be used for e.g.: Ethernet adapter (eth),
+ * cryptographic coprocessor (crypto), etc.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+
+#include <rte_dev.h>
+
+/** Double linked list of classes */
+TAILQ_HEAD(rte_class_list, rte_class);
+
+/**
+ * A structure describing a generic device class.
+ */
+struct rte_class {
+ TAILQ_ENTRY(rte_class) next; /**< Next device class in linked list */
+ const char *name; /**< Name of the class */
+ rte_dev_iterate_t dev_iterate; /**< Device iterator. */
+};
+
+/**
+ * Class comparison function.
+ *
+ * @param cls
+ * Class under test.
+ *
+ * @param data
+ * Data to compare against.
+ *
+ * @return
+ * 0 if the class matches the data.
+ * !0 if the class does not match.
+ * <0 if ordering is possible and the class is lower than the data.
+ * >0 if ordering is possible and the class is greater than the data.
+ */
+typedef int (*rte_class_cmp_t)(const struct rte_class *cls, const void *data);
+
+/**
+ * Class iterator to find a particular class.
+ *
+ * This function compares each registered class to find one that matches
+ * the data passed as parameter.
+ *
+ * If the comparison function returns zero this function will stop iterating
+ * over any more classes. To continue a search the class of a previous search
+ * can be passed via the start parameter.
+ *
+ * @param start
+ * Starting point for the iteration.
+ *
+ * @param cmp
+ * Comparison function.
+ *
+ * @param data
+ * Data to pass to comparison function.
+ *
+ * @return
+ * A pointer to a rte_class structure or NULL in case no class matches
+ */
+__rte_experimental
+struct rte_class *
+rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
+ const void *data);
+
+/**
+ * Find the registered class for a given name.
+ */
+__rte_experimental
+struct rte_class *
+rte_class_find_by_name(const char *name);
+
+/**
+ * Register a Class handle.
+ *
+ * @param cls
+ * A pointer to a rte_class structure describing the class
+ * to be registered.
+ */
+__rte_experimental
+void rte_class_register(struct rte_class *cls);
+
+/**
+ * Unregister a Class handle.
+ *
+ * @param cls
+ * A pointer to a rte_class structure describing the class
+ * to be unregistered.
+ */
+__rte_experimental
+void rte_class_unregister(struct rte_class *cls);
+
+/**
+ * Helper for Class registration.
+ * The constructor has lower priority than Bus constructors.
+ * The constructor has higher priority than PMD constructors.
+ */
+#define RTE_REGISTER_CLASS(nm, cls) \
+RTE_INIT_PRIO(classinitfn_ ##nm, CLASS) \
+{\
+ (cls).name = RTE_STR(nm); \
+ rte_class_register(&cls); \
+}
+
+#define RTE_UNREGISTER_CLASS(nm, cls) \
+RTE_FINI_PRIO(classfinifn_ ##nm, CLASS) \
+{ \
+ rte_class_unregister(&cls); \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CLASS_H_ */
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index c7803e41..069c13ec 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -81,6 +81,26 @@ typedef uint16_t unaligned_uint16_t;
*/
#define RTE_SET_USED(x) (void)(x)
+#define RTE_PRIORITY_LOG 101
+#define RTE_PRIORITY_BUS 110
+#define RTE_PRIORITY_CLASS 120
+#define RTE_PRIORITY_LAST 65535
+
+#define RTE_PRIO(prio) \
+ RTE_PRIORITY_ ## prio
+
+/**
+ * Run function before main() with high priority.
+ *
+ * @param func
+ * Constructor function.
+ * @param prio
+ * Priority number must be above 100.
+ * Lowest number is the first to run.
+ */
+#define RTE_INIT_PRIO(func, prio) \
+static void __attribute__((constructor(RTE_PRIO(prio)), used)) func(void)
+
/**
* Run function before main() with low priority.
*
@@ -90,19 +110,30 @@ typedef uint16_t unaligned_uint16_t;
* Constructor function.
*/
#define RTE_INIT(func) \
-static void __attribute__((constructor, used)) func(void)
+ RTE_INIT_PRIO(func, LAST)
/**
- * Run function before main() with high priority.
+ * Run after main() with low priority.
*
* @param func
- * Constructor function.
+ * Destructor function name.
* @param prio
* Priority number must be above 100.
- * Lowest number is the first to run.
+ * Lowest number is the last to run.
*/
-#define RTE_INIT_PRIO(func, prio) \
-static void __attribute__((constructor(prio), used)) func(void)
+#define RTE_FINI_PRIO(func, prio) \
+static void __attribute__((destructor(RTE_PRIO(prio)), used)) func(void)
+
+/**
+ * Run after main() with high priority.
+ *
+ * The destructor will be run *before* prioritized destructors.
+ *
+ * @param func
+ * Destructor function name.
+ */
+#define RTE_FINI(func) \
+ RTE_FINI_PRIO(func, LAST)
/**
* Force a function to be inlined
@@ -117,7 +148,7 @@ static void __attribute__((constructor(prio), used)) func(void)
/*********** Macros for pointer arithmetic ********/
/**
- * add a byte-value offset from a pointer
+ * add a byte-value offset to a pointer
*/
#define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x)))
@@ -191,6 +222,22 @@ static void __attribute__((constructor(prio), used)) func(void)
#define RTE_ALIGN(val, align) RTE_ALIGN_CEIL(val, align)
/**
+ * Macro to align a value to the multiple of given value. The resultant
+ * value will be of the same type as the first parameter and will be no lower
+ * than the first parameter.
+ */
+#define RTE_ALIGN_MUL_CEIL(v, mul) \
+ (((v + (typeof(v))(mul) - 1) / ((typeof(v))(mul))) * (typeof(v))(mul))
+
+/**
+ * Macro to align a value to the multiple of given value. The resultant
+ * value will be of the same type as the first parameter and will be no higher
+ * than the first parameter.
+ */
+#define RTE_ALIGN_MUL_FLOOR(v, mul) \
+ ((v / ((typeof(v))(mul))) * (typeof(v))(mul))
+
+/**
* Checks if a pointer is aligned to a given power-of-two value
*
* @param ptr
@@ -223,9 +270,59 @@ extern int RTE_BUILD_BUG_ON_detected_error;
} while(0)
#endif
+/**
+ * Combines 32b inputs most significant set bits into the least
+ * significant bits to construct a value with the same MSBs as x
+ * but all 1's under it.
+ *
+ * @param x
+ * The integer whose MSBs need to be combined with its LSBs
+ * @return
+ * The combined value.
+ */
+static inline uint32_t
+rte_combine32ms1b(register uint32_t x)
+{
+ x |= x >> 1;
+ x |= x >> 2;
+ x |= x >> 4;
+ x |= x >> 8;
+ x |= x >> 16;
+
+ return x;
+}
+
+/**
+ * Combines 64b inputs most significant set bits into the least
+ * significant bits to construct a value with the same MSBs as x
+ * but all 1's under it.
+ *
+ * @param v
+ * The integer whose MSBs need to be combined with its LSBs
+ * @return
+ * The combined value.
+ */
+static inline uint64_t
+rte_combine64ms1b(register uint64_t v)
+{
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v |= v >> 32;
+
+ return v;
+}
+
/*********** Macros to work with powers of 2 ********/
/**
+ * Macro to return 1 if n is a power of 2, 0 otherwise
+ */
+#define RTE_IS_POWER_OF_2(n) ((n) && !(((n) - 1) & (n)))
+
+/**
* Returns true if n is a power of 2
* @param n
* Number to check
@@ -250,16 +347,29 @@ static inline uint32_t
rte_align32pow2(uint32_t x)
{
x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
+ x = rte_combine32ms1b(x);
return x + 1;
}
/**
+ * Aligns input parameter to the previous power of 2
+ *
+ * @param x
+ * The integer value to algin
+ *
+ * @return
+ * Input parameter aligned to the previous power of 2
+ */
+static inline uint32_t
+rte_align32prevpow2(uint32_t x)
+{
+ x = rte_combine32ms1b(x);
+
+ return x - (x >> 1);
+}
+
+/**
* Aligns 64b input parameter to the next power of 2
*
* @param v
@@ -272,16 +382,28 @@ static inline uint64_t
rte_align64pow2(uint64_t v)
{
v--;
- v |= v >> 1;
- v |= v >> 2;
- v |= v >> 4;
- v |= v >> 8;
- v |= v >> 16;
- v |= v >> 32;
+ v = rte_combine64ms1b(v);
return v + 1;
}
+/**
+ * Aligns 64b input parameter to the previous power of 2
+ *
+ * @param v
+ * The 64b value to align
+ *
+ * @return
+ * Input parameter aligned to the previous power of 2
+ */
+static inline uint64_t
+rte_align64prevpow2(uint64_t v)
+{
+ v = rte_combine64ms1b(v);
+
+ return v - (v >> 1);
+}
+
/*********** Macros for calculating min and max **********/
/**
@@ -320,7 +442,7 @@ rte_align64pow2(uint64_t v)
static inline uint32_t
rte_bsf32(uint32_t v)
{
- return __builtin_ctz(v);
+ return (uint32_t)__builtin_ctz(v);
}
/**
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index b688f1ef..b80a8059 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -24,6 +24,25 @@ extern "C" {
#include <rte_compat.h>
#include <rte_log.h>
+/**
+ * The device event type.
+ */
+enum rte_dev_event_type {
+ RTE_DEV_EVENT_ADD, /**< device being added */
+ RTE_DEV_EVENT_REMOVE, /**< device being removed */
+ RTE_DEV_EVENT_MAX /**< max value of this enum */
+};
+
+struct rte_dev_event {
+ enum rte_dev_event_type type; /**< device event type */
+ int subsystem; /**< subsystem id */
+ char *devname; /**< device name */
+};
+
+typedef void (*rte_dev_event_cb_fn)(char *device_name,
+ enum rte_dev_event_type event,
+ void *cb_arg);
+
__attribute__((format(printf, 2, 0)))
static inline void
rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
@@ -32,24 +51,25 @@ rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
va_start(ap, fmt);
- char buffer[vsnprintf(NULL, 0, fmt, ap) + 1];
+ {
+ char buffer[vsnprintf(NULL, 0, fmt, ap) + 1];
- va_end(ap);
+ va_end(ap);
- va_start(ap, fmt);
- vsnprintf(buffer, sizeof(buffer), fmt, ap);
- va_end(ap);
+ va_start(ap, fmt);
+ vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ va_end(ap);
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", func_name, buffer);
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s",
+ func_name, buffer);
+ }
}
/*
* Enable RTE_PMD_DEBUG_TRACE() when at least one component relying on the
* RTE_*_RET() macros defined below is compiled in debug mode.
*/
-#if defined(RTE_LIBRTE_ETHDEV_DEBUG) || \
- defined(RTE_LIBRTE_CRYPTODEV_DEBUG) || \
- defined(RTE_LIBRTE_EVENTDEV_DEBUG)
+#if defined(RTE_LIBRTE_EVENTDEV_DEBUG)
#define RTE_PMD_DEBUG_TRACE(...) \
rte_pmd_debug_trace(__func__, __VA_ARGS__)
#else
@@ -154,6 +174,7 @@ struct rte_device {
* @return
* 0 on success, negative on error.
*/
+__rte_deprecated
int rte_eal_dev_attach(const char *name, const char *devargs);
/**
@@ -164,6 +185,7 @@ int rte_eal_dev_attach(const char *name, const char *devargs);
* @return
* 0 on success, negative on error.
*/
+__rte_deprecated
int rte_eal_dev_detach(struct rte_device *dev);
/**
@@ -263,8 +285,179 @@ __attribute__((used)) = str
static const char DRV_EXP_TAG(name, kmod_dep_export)[] \
__attribute__((used)) = str
+/**
+ * Iteration context.
+ *
+ * This context carries over the current iteration state.
+ */
+struct rte_dev_iterator {
+ const char *dev_str; /**< device string. */
+ const char *bus_str; /**< bus-related part of device string. */
+ const char *cls_str; /**< class-related part of device string. */
+ struct rte_bus *bus; /**< bus handle. */
+ struct rte_class *cls; /**< class handle. */
+ struct rte_device *device; /**< current position. */
+ void *class_device; /**< additional specialized context. */
+};
+
+/**
+ * Device iteration function.
+ *
+ * Find the next device matching properties passed in parameters.
+ * The function takes an additional ``start`` parameter, that is
+ * used as starting context when relevant.
+ *
+ * The function returns the current element in the iteration.
+ * This return value will potentially be used as a start parameter
+ * in subsequent calls to the function.
+ *
+ * The additional iterator parameter is only there if a specific
+ * implementation needs additional context. It must not be modified by
+ * the iteration function itself.
+ *
+ * @param start
+ * Starting iteration context.
+ *
+ * @param devstr
+ * Device description string.
+ *
+ * @param it
+ * Device iterator.
+ *
+ * @return
+ * The address of the current element matching the device description
+ * string.
+ */
+typedef void *(*rte_dev_iterate_t)(const void *start,
+ const char *devstr,
+ const struct rte_dev_iterator *it);
+
+/**
+ * Initializes a device iterator.
+ *
+ * This iterator allows accessing a list of devices matching a criteria.
+ * The device matching is made among all buses and classes currently registered,
+ * filtered by the device description given as parameter.
+ *
+ * This function will not allocate any memory. It is safe to stop the
+ * iteration at any moment and let the iterator go out of context.
+ *
+ * @param it
+ * Device iterator handle.
+ *
+ * @param str
+ * Device description string.
+ *
+ * @return
+ * 0 on successful initialization.
+ * <0 on error.
+ */
+__rte_experimental
+int
+rte_dev_iterator_init(struct rte_dev_iterator *it, const char *str);
+
+/**
+ * Iterates on a device iterator.
+ *
+ * Generates a new rte_device handle corresponding to the next element
+ * in the list described in comprehension by the iterator.
+ *
+ * The next object is returned, and the iterator is updated.
+ *
+ * @param it
+ * Device iterator handle.
+ *
+ * @return
+ * An rte_device handle if found.
+ * NULL if an error occurred (rte_errno is set).
+ * NULL if no device could be found (rte_errno is not set).
+ */
+__rte_experimental
+struct rte_device *
+rte_dev_iterator_next(struct rte_dev_iterator *it);
+
+#define RTE_DEV_FOREACH(dev, devstr, it) \
+ for (rte_dev_iterator_init(it, devstr), \
+ dev = rte_dev_iterator_next(it); \
+ dev != NULL; \
+ dev = rte_dev_iterator_next(it))
+
#ifdef __cplusplus
}
#endif
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * It registers the callback for the specific device.
+ * Multiple callbacks cal be registered at the same time.
+ *
+ * @param device_name
+ * The device name, that is the param name of the struct rte_device,
+ * null value means for all devices.
+ * @param cb_fn
+ * callback address.
+ * @param cb_arg
+ * address of parameter for callback.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int __rte_experimental
+rte_dev_event_callback_register(const char *device_name,
+ rte_dev_event_cb_fn cb_fn,
+ void *cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * It unregisters the callback according to the specified device.
+ *
+ * @param device_name
+ * The device name, that is the param name of the struct rte_device,
+ * null value means for all devices and their callbacks.
+ * @param cb_fn
+ * callback address.
+ * @param cb_arg
+ * address of parameter for callback, (void *)-1 means to remove all
+ * registered which has the same callback address.
+ *
+ * @return
+ * - On success, return the number of callback entities removed.
+ * - On failure, a negative value.
+ */
+int __rte_experimental
+rte_dev_event_callback_unregister(const char *device_name,
+ rte_dev_event_cb_fn cb_fn,
+ void *cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start the device event monitoring.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int __rte_experimental
+rte_dev_event_monitor_start(void);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop the device event monitoring.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int __rte_experimental
+rte_dev_event_monitor_stop(void);
+
#endif /* _RTE_DEV_H_ */
diff --git a/lib/librte_eal/common/include/rte_devargs.h b/lib/librte_eal/common/include/rte_devargs.h
index 84e5e23c..097a4ce7 100644
--- a/lib/librte_eal/common/include/rte_devargs.h
+++ b/lib/librte_eal/common/include/rte_devargs.h
@@ -51,21 +51,23 @@ struct rte_devargs {
enum rte_devtype type;
/** Device policy. */
enum rte_dev_policy policy;
- /** Bus handle for the device. */
- struct rte_bus *bus;
/** Name of the device. */
char name[RTE_DEV_NAME_MAX_LEN];
+ RTE_STD_C11
+ union {
/** Arguments string as given by user or "" for no argument. */
- char *args;
+ char *args;
+ const char *drv_str;
+ };
+ struct rte_bus *bus; /**< bus handle. */
+ struct rte_class *cls; /**< class handle. */
+ const char *bus_str; /**< bus-related part of device string. */
+ const char *cls_str; /**< class-related part of device string. */
+ const char *data; /**< Device string storage. */
};
-/** user device double-linked queue type definition */
-TAILQ_HEAD(rte_devargs_list, rte_devargs);
-
-/** Global list of user devices */
-extern struct rte_devargs_list devargs_list;
-
/**
+ * @deprecated
* Parse a devargs string.
*
* For PCI devices, the format of arguments string is "PCI_ADDR" or
@@ -90,6 +92,7 @@ extern struct rte_devargs_list devargs_list;
* - 0 on success
* - A negative value on error
*/
+__rte_deprecated
int rte_eal_parse_devargs_str(const char *devargs_str,
char **drvname, char **drvargs);
@@ -100,18 +103,73 @@ int rte_eal_parse_devargs_str(const char *devargs_str,
* in argument. Store which bus will handle the device, its name
* and the eventual device parameters.
*
+ * The syntax is:
+ *
+ * bus:device_identifier,arg1=val1,arg2=val2
+ *
+ * where "bus:" is the bus name followed by any character separator.
+ * The bus name is optional. If no bus name is specified, each bus
+ * will attempt to recognize the device identifier. The first one
+ * to succeed will be used.
+ *
+ * Examples:
+ *
+ * pci:0000:05.00.0,arg=val
+ * 05.00.0,arg=val
+ * vdev:net_ring0
+ *
+ * @param da
+ * The devargs structure holding the device information.
+ *
* @param dev
- * The device declaration string.
+ * String describing a device.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative errno on error.
+ */
+__rte_experimental
+int
+rte_devargs_parse(struct rte_devargs *da, const char *dev);
+
+/**
+ * Parse a device string.
+ *
+ * Verify that a bus is capable of handling the device passed
+ * in argument. Store which bus will handle the device, its name
+ * and the eventual device parameters.
+ *
+ * The device string is built with a printf-like syntax.
+ *
+ * The syntax is:
+ *
+ * bus:device_identifier,arg1=val1,arg2=val2
+ *
+ * where "bus:" is the bus name followed by any character separator.
+ * The bus name is optional. If no bus name is specified, each bus
+ * will attempt to recognize the device identifier. The first one
+ * to succeed will be used.
+ *
+ * Examples:
+ *
+ * pci:0000:05.00.0,arg=val
+ * 05.00.0,arg=val
+ * vdev:net_ring0
+ *
* @param da
* The devargs structure holding the device information.
+ * @param format
+ * Format string describing a device.
*
* @return
* - 0 on success.
* - Negative errno on error.
*/
-int __rte_experimental
-rte_eal_devargs_parse(const char *dev,
- struct rte_devargs *da);
+__rte_experimental
+int
+rte_devargs_parsef(struct rte_devargs *da,
+ const char *format, ...)
+__attribute__((format(printf, 2, 0)));
/**
* Insert an rte_devargs in the global list.
@@ -123,21 +181,30 @@ rte_eal_devargs_parse(const char *dev,
* - 0 on success
* - Negative on error.
*/
-int __rte_experimental
-rte_eal_devargs_insert(struct rte_devargs *da);
+__rte_experimental
+int
+rte_devargs_insert(struct rte_devargs *da);
/**
* Add a device to the user device list
+ * See rte_devargs_parse() for details.
*
- * For PCI devices, the format of arguments string is "PCI_ADDR" or
- * "PCI_ADDR,key=val,key2=val2,...". Examples: "08:00.1", "0000:5:00.0",
- * "04:00.0,arg=val".
+ * @param devtype
+ * The type of the device.
+ * @param devargs_str
+ * The arguments as given by the user.
*
- * For virtual devices, the format of arguments string is "DRIVER_NAME*"
- * or "DRIVER_NAME*,key=val,key2=val2,...". Examples: "net_ring",
- * "net_ring0", "net_pmdAnything,arg=0:arg2=1". The validity of the
- * driver name is not checked by this function, it is done when probing
- * the drivers.
+ * @return
+ * - 0 on success
+ * - A negative value on error
+ */
+__rte_experimental
+int rte_devargs_add(enum rte_devtype devtype, const char *devargs_str);
+
+/**
+ * @deprecated
+ * Add a device to the user device list
+ * See rte_devargs_parse() for details.
*
* @param devtype
* The type of the device.
@@ -148,6 +215,7 @@ rte_eal_devargs_insert(struct rte_devargs *da);
* - 0 on success
* - A negative value on error
*/
+__rte_deprecated
int rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str);
/**
@@ -166,10 +234,25 @@ int rte_eal_devargs_add(enum rte_devtype devtype, const char *devargs_str);
* <0 on error.
* >0 if the devargs was not within the user device list.
*/
-int __rte_experimental rte_eal_devargs_remove(const char *busname,
- const char *devname);
+__rte_experimental
+int rte_devargs_remove(const char *busname,
+ const char *devname);
+
+/**
+ * Count the number of user devices of a specified type
+ *
+ * @param devtype
+ * The type of the devices to counted.
+ *
+ * @return
+ * The number of devices.
+ */
+__rte_experimental
+unsigned int
+rte_devargs_type_count(enum rte_devtype devtype);
/**
+ * @deprecated
* Count the number of user devices of a specified type
*
* @param devtype
@@ -178,6 +261,7 @@ int __rte_experimental rte_eal_devargs_remove(const char *busname,
* @return
* The number of devices.
*/
+__rte_deprecated
unsigned int
rte_eal_devargs_type_count(enum rte_devtype devtype);
@@ -187,8 +271,47 @@ rte_eal_devargs_type_count(enum rte_devtype devtype);
* @param f
* A pointer to a file for output
*/
+__rte_experimental
+void rte_devargs_dump(FILE *f);
+
+/**
+ * @deprecated
+ * This function dumps the list of user device and their arguments.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+__rte_deprecated
void rte_eal_devargs_dump(FILE *f);
+/**
+ * Find next rte_devargs matching the provided bus name.
+ *
+ * @param busname
+ * Limit the iteration to devargs related to buses
+ * matching this name.
+ * Will return any next rte_devargs if NULL.
+ *
+ * @param start
+ * Starting iteration point. The iteration will start at
+ * the first rte_devargs if NULL.
+ *
+ * @return
+ * Next rte_devargs entry matching the requested bus,
+ * NULL if there is none.
+ */
+__rte_experimental
+struct rte_devargs *
+rte_devargs_next(const char *busname, const struct rte_devargs *start);
+
+/**
+ * Iterate over all rte_devargs for a specific bus.
+ */
+#define RTE_EAL_DEVARGS_FOREACH(busname, da) \
+ for (da = rte_devargs_next(busname, NULL); \
+ da != NULL; \
+ da = rte_devargs_next(busname, da)) \
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
index 044474e6..e114dcbd 100644
--- a/lib/librte_eal/common/include/rte_eal.h
+++ b/lib/librte_eal/common/include/rte_eal.h
@@ -57,6 +57,8 @@ enum rte_proc_type_t {
struct rte_config {
uint32_t master_lcore; /**< Id of the master lcore */
uint32_t lcore_count; /**< Number of available logical cores. */
+ uint32_t numa_node_count; /**< Number of detected NUMA nodes. */
+ uint32_t numa_nodes[RTE_MAX_NUMA_NODES]; /**< List of detected NUMA nodes. */
uint32_t service_lcore_count;/**< Number of available service cores. */
enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
@@ -230,6 +232,16 @@ struct rte_mp_reply {
typedef int (*rte_mp_t)(const struct rte_mp_msg *msg, const void *peer);
/**
+ * Asynchronous reply function typedef used by other components.
+ *
+ * As we create socket channel for primary/secondary communication, use
+ * this function typedef to register action for coming responses to asynchronous
+ * requests.
+ */
+typedef int (*rte_mp_async_reply_t)(const struct rte_mp_msg *request,
+ const struct rte_mp_reply *reply);
+
+/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
@@ -314,13 +326,39 @@ rte_mp_sendmsg(struct rte_mp_msg *msg);
* - On failure, return -1, and the reason will be stored in rte_errno.
*/
int __rte_experimental
-rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
+rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
const struct timespec *ts);
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
+ * Send a request to the peer process and expect a reply in a separate callback.
+ *
+ * This function sends a request message to the peer process, and will not
+ * block. Instead, reply will be received in a separate callback.
+ *
+ * @param req
+ * The req argument contains the customized request message.
+ *
+ * @param ts
+ * The ts argument specifies how long we can wait for the peer(s) to reply.
+ *
+ * @param clb
+ * The callback to trigger when all responses for this request have arrived.
+ *
+ * @return
+ * - On success, return 0.
+ * - On failure, return -1, and the reason will be stored in rte_errno.
+ */
+int __rte_experimental
+rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
+ rte_mp_async_reply_t clb);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
* Send a reply to the peer process.
*
* This function will send a reply message in response to a request message
@@ -452,25 +490,13 @@ static inline int rte_gettid(void)
enum rte_iova_mode rte_eal_iova_mode(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Get user provided pool ops name for mbuf
*
* @return
* returns user provided pool ops name.
*/
-const char * __rte_experimental
-rte_eal_mbuf_user_pool_ops(void);
-
-/**
- * Get default pool ops name for mbuf
- *
- * @return
- * returns default pool ops name.
- */
const char *
-rte_eal_mbuf_default_mempool_ops(void);
+rte_eal_mbuf_user_pool_ops(void);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_eal_interrupts.h b/lib/librte_eal/common/include/rte_eal_interrupts.h
index 3f792a97..6eb49327 100644
--- a/lib/librte_eal/common/include/rte_eal_interrupts.h
+++ b/lib/librte_eal/common/include/rte_eal_interrupts.h
@@ -34,6 +34,7 @@ enum rte_intr_handle_type {
RTE_INTR_HANDLE_ALARM, /**< alarm handle */
RTE_INTR_HANDLE_EXT, /**< external handler */
RTE_INTR_HANDLE_VDEV, /**< virtual device */
+ RTE_INTR_HANDLE_DEV_EVENT, /**< device event handle */
RTE_INTR_HANDLE_MAX /**< count of elements */
};
diff --git a/lib/librte_eal/common/include/rte_eal_memconfig.h b/lib/librte_eal/common/include/rte_eal_memconfig.h
index 29fa0b60..aff0688d 100644
--- a/lib/librte_eal/common/include/rte_eal_memconfig.h
+++ b/lib/librte_eal/common/include/rte_eal_memconfig.h
@@ -12,12 +12,31 @@
#include <rte_malloc_heap.h>
#include <rte_rwlock.h>
#include <rte_pause.h>
+#include <rte_fbarray.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
+ * memseg list is a special case as we need to store a bunch of other data
+ * together with the array itself.
+ */
+struct rte_memseg_list {
+ RTE_STD_C11
+ union {
+ void *base_va;
+ /**< Base virtual address for this memseg list. */
+ uint64_t addr_64;
+ /**< Makes sure addr is always 64-bits */
+ };
+ int socket_id; /**< Socket ID for all memsegs in this list. */
+ uint64_t page_sz; /**< Page size for all memsegs in this list. */
+ volatile uint32_t version; /**< version number for multiprocess sync. */
+ struct rte_fbarray memseg_arr;
+};
+
+/**
* the structure for the memory configuration for the RTE.
* Used by the rte_config structure. It is separated out, as for multi-process
* support, the memory details should be shared across instances
@@ -40,11 +59,14 @@ struct rte_mem_config {
rte_rwlock_t qlock; /**< used for tailq operation for thread safe. */
rte_rwlock_t mplock; /**< only used by mempool LIB for thread-safe. */
- uint32_t memzone_cnt; /**< Number of allocated memzones */
+ rte_rwlock_t memory_hotplug_lock;
+ /**< indicates whether memory hotplug request is in progress. */
/* memory segments and zones */
- struct rte_memseg memseg[RTE_MAX_MEMSEG]; /**< Physmem descriptors. */
- struct rte_memzone memzone[RTE_MAX_MEMZONE]; /**< Memzone descriptors. */
+ struct rte_fbarray memzones; /**< Memzone descriptors. */
+
+ struct rte_memseg_list memsegs[RTE_MAX_MEMSEG_LISTS];
+ /**< list of dynamic arrays holding memsegs */
struct rte_tailq_head tailq_head[RTE_MAX_TAILQ]; /**< Tailqs for objects */
diff --git a/lib/librte_eal/common/include/rte_fbarray.h b/lib/librte_eal/common/include/rte_fbarray.h
new file mode 100644
index 00000000..5d880551
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_fbarray.h
@@ -0,0 +1,470 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef RTE_FBARRAY_H
+#define RTE_FBARRAY_H
+
+/**
+ * @file
+ *
+ * File-backed shared indexed array for DPDK.
+ *
+ * Basic workflow is expected to be the following:
+ * 1) Allocate array either using ``rte_fbarray_init()`` or
+ * ``rte_fbarray_attach()`` (depending on whether it's shared between
+ * multiple DPDK processes)
+ * 2) find free spots using ``rte_fbarray_find_next_free()``
+ * 3) get pointer to data in the free spot using ``rte_fbarray_get()``, and
+ * copy data into the pointer (element size is fixed)
+ * 4) mark entry as used using ``rte_fbarray_set_used()``
+ *
+ * Calls to ``rte_fbarray_init()`` and ``rte_fbarray_destroy()`` will have
+ * consequences for all processes, while calls to ``rte_fbarray_attach()`` and
+ * ``rte_fbarray_detach()`` will only have consequences within a single process.
+ * Therefore, it is safe to call ``rte_fbarray_attach()`` or
+ * ``rte_fbarray_detach()`` while another process is using ``rte_fbarray``,
+ * provided no other thread within the same process will try to use
+ * ``rte_fbarray`` before attaching or after detaching. It is not safe to call
+ * ``rte_fbarray_init()`` or ``rte_fbarray_destroy()`` while another thread or
+ * another process is using ``rte_fbarray``.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#include <rte_compat.h>
+#include <rte_rwlock.h>
+
+#define RTE_FBARRAY_NAME_LEN 64
+
+struct rte_fbarray {
+ char name[RTE_FBARRAY_NAME_LEN]; /**< name associated with an array */
+ unsigned int count; /**< number of entries stored */
+ unsigned int len; /**< current length of the array */
+ unsigned int elt_sz; /**< size of each element */
+ void *data; /**< data pointer */
+ rte_rwlock_t rwlock; /**< multiprocess lock */
+};
+
+/**
+ * Set up ``rte_fbarray`` structure and allocate underlying resources.
+ *
+ * Call this function to correctly set up ``rte_fbarray`` and allocate
+ * underlying files that will be backing the data in the current process. Note
+ * that in order to use and share ``rte_fbarray`` between multiple processes,
+ * data pointed to by ``arr`` pointer must itself be allocated in shared memory.
+ *
+ * @param arr
+ * Valid pointer to allocated ``rte_fbarray`` structure.
+ *
+ * @param name
+ * Unique name to be assigned to this array.
+ *
+ * @param len
+ * Number of elements initially available in the array.
+ *
+ * @param elt_sz
+ * Size of each element.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_init(struct rte_fbarray *arr, const char *name, unsigned int len,
+ unsigned int elt_sz);
+
+
+/**
+ * Attach to a file backing an already allocated and correctly set up
+ * ``rte_fbarray`` structure.
+ *
+ * Call this function to attach to file that will be backing the data in the
+ * current process. The structure must have been previously correctly set up
+ * with a call to ``rte_fbarray_init()``. Calls to ``rte_fbarray_attach()`` are
+ * usually meant to be performed in a multiprocessing scenario, with data
+ * pointed to by ``arr`` pointer allocated in shared memory.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up rte_fbarray structure.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_attach(struct rte_fbarray *arr);
+
+
+/**
+ * Deallocate resources for an already allocated and correctly set up
+ * ``rte_fbarray`` structure, and remove the underlying file.
+ *
+ * Call this function to deallocate all resources associated with an
+ * ``rte_fbarray`` structure within the current process. This will also
+ * zero-fill data pointed to by ``arr`` pointer and remove the underlying file
+ * backing the data, so it is expected that by the time this function is called,
+ * all other processes have detached from this ``rte_fbarray``.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_destroy(struct rte_fbarray *arr);
+
+
+/**
+ * Deallocate resources for an already allocated and correctly set up
+ * ``rte_fbarray`` structure.
+ *
+ * Call this function to deallocate all resources associated with an
+ * ``rte_fbarray`` structure within current process.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_detach(struct rte_fbarray *arr);
+
+
+/**
+ * Get pointer to element residing at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param idx
+ * Index of an element to get a pointer to.
+ *
+ * @return
+ * - non-NULL pointer on success.
+ * - NULL on failure, with ``rte_errno`` indicating reason for failure.
+ */
+void * __rte_experimental
+rte_fbarray_get(const struct rte_fbarray *arr, unsigned int idx);
+
+
+/**
+ * Find index of a specified element within the array.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param elt
+ * Pointer to element to find index to.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_idx(const struct rte_fbarray *arr, const void *elt);
+
+
+/**
+ * Mark specified element as used.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param idx
+ * Element index to mark as used.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_set_used(struct rte_fbarray *arr, unsigned int idx);
+
+
+/**
+ * Mark specified element as free.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param idx
+ * Element index to mark as free.
+ *
+ * @return
+ * - 0 on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_set_free(struct rte_fbarray *arr, unsigned int idx);
+
+
+/**
+ * Check whether element at specified index is marked as used.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param idx
+ * Element index to check as used.
+ *
+ * @return
+ * - 1 if element is used.
+ * - 0 if element is unused.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_is_used(struct rte_fbarray *arr, unsigned int idx);
+
+
+/**
+ * Find index of next free element, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_next_free(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of next used element, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_next_used(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of next chunk of ``n`` free elements, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @param n
+ * Number of free elements to look for.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_next_n_free(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n);
+
+
+/**
+ * Find index of next chunk of ``n`` used elements, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @param n
+ * Number of used elements to look for.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_next_n_used(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n);
+
+
+/**
+ * Find how many more free entries there are, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_contig_free(struct rte_fbarray *arr,
+ unsigned int start);
+
+
+/**
+ * Find how many more used entries there are, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_contig_used(struct rte_fbarray *arr, unsigned int start);
+
+/**
+ * Find index of previous free element, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_prev_free(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find index of previous used element, starting at specified index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_prev_used(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Find lowest start index of chunk of ``n`` free elements, down from specified
+ * index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @param n
+ * Number of free elements to look for.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_prev_n_free(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n);
+
+
+/**
+ * Find lowest start index of chunk of ``n`` used elements, down from specified
+ * index.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @param n
+ * Number of used elements to look for.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_prev_n_used(struct rte_fbarray *arr, unsigned int start,
+ unsigned int n);
+
+
+/**
+ * Find how many more free entries there are before specified index (like
+ * ``rte_fbarray_find_contig_free`` but going in reverse).
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_rev_contig_free(struct rte_fbarray *arr,
+ unsigned int start);
+
+
+/**
+ * Find how many more used entries there are before specified index (like
+ * ``rte_fbarray_find_contig_used`` but going in reverse).
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param start
+ * Element index to start search from.
+ *
+ * @return
+ * - non-negative integer on success.
+ * - -1 on failure, with ``rte_errno`` indicating reason for failure.
+ */
+int __rte_experimental
+rte_fbarray_find_rev_contig_used(struct rte_fbarray *arr, unsigned int start);
+
+
+/**
+ * Dump ``rte_fbarray`` metadata.
+ *
+ * @param arr
+ * Valid pointer to allocated and correctly set up ``rte_fbarray`` structure.
+ *
+ * @param f
+ * File object to dump information into.
+ */
+void __rte_experimental
+rte_fbarray_dump_metadata(struct rte_fbarray *arr, FILE *f);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_FBARRAY_H */
diff --git a/lib/librte_eal/common/include/rte_hypervisor.h b/lib/librte_eal/common/include/rte_hypervisor.h
index 8d8aac74..5fe719c1 100644
--- a/lib/librte_eal/common/include/rte_hypervisor.h
+++ b/lib/librte_eal/common/include/rte_hypervisor.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2017 Mellanox Technologies, Ltd.
+ * Copyright 2017 Mellanox Technologies, Ltd
*/
#ifndef RTE_HYPERVISOR_H
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index 04722203..6e09d918 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -119,7 +119,7 @@ rte_lcore_index(int lcore_id)
if (lcore_id >= RTE_MAX_LCORE)
return -1;
if (lcore_id < 0)
- lcore_id = rte_lcore_id();
+ lcore_id = (int)rte_lcore_id();
return lcore_config[lcore_id].core_index;
}
@@ -132,6 +132,36 @@ rte_lcore_index(int lcore_id)
unsigned rte_socket_id(void);
/**
+ * Return number of physical sockets detected on the system.
+ *
+ * Note that number of nodes may not be correspondent to their physical id's:
+ * for example, a system may report two socket id's, but the actual socket id's
+ * may be 0 and 8.
+ *
+ * @return
+ * the number of physical sockets as recognized by EAL
+ */
+unsigned int __rte_experimental
+rte_socket_count(void);
+
+/**
+ * Return socket id with a particular index.
+ *
+ * This will return socket id at a particular position in list of all detected
+ * physical socket id's. For example, on a machine with sockets [0, 8], passing
+ * 1 as a parameter will return 8.
+ *
+ * @param idx
+ * index of physical socket id to return
+ *
+ * @return
+ * - physical socket id as recognized by EAL
+ * - -1 on error, with errno set to EINVAL
+ */
+int __rte_experimental
+rte_socket_id_by_idx(unsigned int idx);
+
+/**
* Get the ID of the physical socket of the specified lcore
*
* @param lcore_id
@@ -247,6 +277,32 @@ void rte_thread_get_affinity(rte_cpuset_t *cpusetp);
int rte_thread_setname(pthread_t id, const char *name);
/**
+ * Create a control thread.
+ *
+ * Wrapper to pthread_create(), pthread_setname_np() and
+ * pthread_setaffinity_np(). The dataplane and service lcores are
+ * excluded from the affinity of the new thread.
+ *
+ * @param thread
+ * Filled with the thread id of the new created thread.
+ * @param name
+ * The name of the control thread (max 16 characters including '\0').
+ * @param attr
+ * Attributes for the new thread.
+ * @param start_routine
+ * Function to be executed by the new thread.
+ * @param arg
+ * Argument passed to start_routine.
+ * @return
+ * On success, returns 0; on error, it returns a negative value
+ * corresponding to the error number.
+ */
+__rte_experimental int
+rte_ctrl_thread_create(pthread_t *thread, const char *name,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg);
+
+/**
* Test if the core supplied has a specific role
*
* @param lcore_id
@@ -255,7 +311,7 @@ int rte_thread_setname(pthread_t id, const char *name);
* @param role
* The role to be checked against.
* @return
- * On success, return 0; otherwise return a negative value.
+ * Boolean value: positive if test is true; otherwise returns 0.
*/
int
rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role);
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
index 9029c785..2f789cb9 100644
--- a/lib/librte_eal/common/include/rte_log.h
+++ b/lib/librte_eal/common/include/rte_log.h
@@ -20,6 +20,7 @@ extern "C" {
#include <stdint.h>
#include <stdio.h>
#include <stdarg.h>
+#include <sys/queue.h>
#include <rte_common.h>
#include <rte_config.h>
@@ -129,16 +130,28 @@ uint32_t rte_log_get_global_level(void);
int rte_log_get_level(uint32_t logtype);
/**
- * Set the log level for a given type.
+ * Set the log level for a given type based on shell pattern.
*
* @param pattern
- * The regexp identifying the log type.
+ * The match pattern identifying the log type.
+ * @param level
+ * The level to be set.
+ * @return
+ * 0 on success, a negative value if level is invalid.
+ */
+int rte_log_set_level_pattern(const char *pattern, uint32_t level);
+
+/**
+ * Set the log level for a given type based on regular expression.
+ *
+ * @param regex
+ * The regular expression identifying the log type.
* @param level
* The level to be set.
* @return
* 0 on success, a negative value if level is invalid.
*/
-int rte_log_set_level_regexp(const char *pattern, uint32_t level);
+int rte_log_set_level_regexp(const char *regex, uint32_t level);
/**
* Set the log level for a given type.
@@ -195,6 +208,27 @@ int rte_log_cur_msg_logtype(void);
int rte_log_register(const char *name);
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register a dynamic log type and try to pick its level from EAL options
+ *
+ * rte_log_register() is called inside. If successful, the function tries
+ * to search for matching regexp in the list of EAL log level options and
+ * pick the level from the last matching entry. If nothing can be applied
+ * from the list, the level will be set to the user-defined default value.
+ *
+ * @param name
+ * Name for the log type to be registered
+ * @param level_def
+ * Fallback level to be set if the global list has no matching options
+ * @return
+ * - >=0: the newly registered log type
+ * - <0: rte_log_register() error value
+ */
+int rte_log_register_type_and_pick_level(const char *name, uint32_t level_def);
+
+/**
* Dump log information.
*
* Dump the global level and the registered log types.
diff --git a/lib/librte_eal/common/include/rte_malloc.h b/lib/librte_eal/common/include/rte_malloc.h
index f02a8ba1..a9fb7e45 100644
--- a/lib/librte_eal/common/include/rte_malloc.h
+++ b/lib/librte_eal/common/include/rte_malloc.h
@@ -13,6 +13,7 @@
#include <stdio.h>
#include <stddef.h>
+#include <rte_compat.h>
#include <rte_memory.h>
#ifdef __cplusplus
@@ -278,6 +279,15 @@ void
rte_malloc_dump_stats(FILE *f, const char *type);
/**
+ * Dump contents of all malloc heaps to a file.
+ *
+ * @param f
+ * A pointer to a file for output
+ */
+void __rte_experimental
+rte_malloc_dump_heaps(FILE *f);
+
+/**
* Set the maximum amount of allocated memory for this type.
*
* This is not yet implemented
diff --git a/lib/librte_eal/common/include/rte_malloc_heap.h b/lib/librte_eal/common/include/rte_malloc_heap.h
index ba99ed90..d43fa909 100644
--- a/lib/librte_eal/common/include/rte_malloc_heap.h
+++ b/lib/librte_eal/common/include/rte_malloc_heap.h
@@ -13,12 +13,18 @@
/* Number of free lists per heap, grouped by size. */
#define RTE_HEAP_NUM_FREELISTS 13
+/* dummy definition, for pointers */
+struct malloc_elem;
+
/**
* Structure to hold malloc heap
*/
struct malloc_heap {
rte_spinlock_t lock;
LIST_HEAD(, malloc_elem) free_head[RTE_HEAP_NUM_FREELISTS];
+ struct malloc_elem *volatile first;
+ struct malloc_elem *volatile last;
+
unsigned alloc_count;
size_t total_size;
} __rte_cache_aligned;
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
index 302f865b..c4b7f4cf 100644
--- a/lib/librte_eal/common/include/rte_memory.h
+++ b/lib/librte_eal/common/include/rte_memory.h
@@ -20,8 +20,12 @@ extern "C" {
#endif
#include <rte_common.h>
+#include <rte_compat.h>
#include <rte_config.h>
+/* forward declaration for pointers */
+struct rte_memseg_list;
+
__extension__
enum rte_page_sizes {
RTE_PGSIZE_4K = 1ULL << 12,
@@ -79,6 +83,8 @@ typedef uint64_t rte_iova_t;
/**
* Physical memory segment descriptor.
*/
+#define RTE_MEMSEG_FLAG_DO_NOT_FREE (1 << 0)
+/**< Prevent this segment from being freed back to the OS. */
struct rte_memseg {
RTE_STD_C11
union {
@@ -95,6 +101,7 @@ struct rte_memseg {
int32_t socket_id; /**< NUMA socket ID. */
uint32_t nchannel; /**< Number of channels. */
uint32_t nrank; /**< Number of ranks. */
+ uint32_t flags; /**< Memseg-specific flags */
} __rte_packed;
/**
@@ -130,25 +137,192 @@ phys_addr_t rte_mem_virt2phy(const void *virt);
rte_iova_t rte_mem_virt2iova(const void *virt);
/**
- * Get the layout of the available physical memory.
+ * Get virtual memory address corresponding to iova address.
+ *
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
+ * @param iova
+ * The iova address.
+ * @return
+ * Virtual address corresponding to iova address (or NULL if address does not
+ * exist within DPDK memory map).
+ */
+__rte_experimental void *
+rte_mem_iova2virt(rte_iova_t iova);
+
+/**
+ * Get memseg to which a particular virtual address belongs.
+ *
+ * @param virt
+ * The virtual address.
+ * @param msl
+ * The memseg list in which to look up based on ``virt`` address
+ * (can be NULL).
+ * @return
+ * Memseg pointer on success, or NULL on error.
+ */
+__rte_experimental struct rte_memseg *
+rte_mem_virt2memseg(const void *virt, const struct rte_memseg_list *msl);
+
+/**
+ * Get memseg list corresponding to virtual memory address.
+ *
+ * @param virt
+ * The virtual address.
+ * @return
+ * Memseg list to which this virtual address belongs to.
+ */
+__rte_experimental struct rte_memseg_list *
+rte_mem_virt2memseg_list(const void *virt);
+
+/**
+ * Memseg walk function prototype.
+ *
+ * Returning 0 will continue walk
+ * Returning 1 will stop the walk
+ * Returning -1 will stop the walk and report error
+ */
+typedef int (*rte_memseg_walk_t)(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, void *arg);
+
+/**
+ * Memseg contig walk function prototype. This will trigger a callback on every
+ * VA-contiguous are starting at memseg ``ms``, so total valid VA space at each
+ * callback call will be [``ms->addr``, ``ms->addr + len``).
+ *
+ * Returning 0 will continue walk
+ * Returning 1 will stop the walk
+ * Returning -1 will stop the walk and report error
+ */
+typedef int (*rte_memseg_contig_walk_t)(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg);
+
+/**
+ * Memseg list walk function prototype. This will trigger a callback on every
+ * allocated memseg list.
+ *
+ * Returning 0 will continue walk
+ * Returning 1 will stop the walk
+ * Returning -1 will stop the walk and report error
+ */
+typedef int (*rte_memseg_list_walk_t)(const struct rte_memseg_list *msl,
+ void *arg);
+
+/**
+ * Walk list of all memsegs.
+ *
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_walk(rte_memseg_walk_t func, void *arg);
+
+/**
+ * Walk each VA-contiguous area.
+ *
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_contig_walk(rte_memseg_contig_walk_t func, void *arg);
+
+/**
+ * Walk each allocated memseg list.
+ *
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_list_walk(rte_memseg_list_walk_t func, void *arg);
+
+/**
+ * Walk list of all memsegs without performing any locking.
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_walk_thread_unsafe(rte_memseg_walk_t func, void *arg);
+
+/**
+ * Walk each VA-contiguous area without performing any locking.
*
- * It can be useful for an application to have the full physical
- * memory layout to decide the size of a memory zone to reserve. This
- * table is stored in rte_config (see rte_eal_get_configuration()).
+ * @note This function does not perform any locking, and is only safe to call
+ * from within memory-related callback functions.
*
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
* @return
- * - On success, return a pointer to a read-only table of struct
- * rte_physmem_desc elements, containing the layout of all
- * addressable physical memory. The last element of the table
- * contains a NULL address.
- * - On error, return NULL. This should not happen since it is a fatal
- * error that will probably cause the entire system to panic.
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
*/
-const struct rte_memseg *rte_eal_get_physmem_layout(void);
+int __rte_experimental
+rte_memseg_contig_walk_thread_unsafe(rte_memseg_contig_walk_t func, void *arg);
+
+/**
+ * Walk each allocated memseg list without performing any locking.
+ *
+ * @note This function does not perform any locking, and is only safe to call
+ * from within memory-related callback functions.
+ *
+ * @param func
+ * Iterator function
+ * @param arg
+ * Argument passed to iterator
+ * @return
+ * 0 if walked over the entire list
+ * 1 if stopped by the user
+ * -1 if user function reported error
+ */
+int __rte_experimental
+rte_memseg_list_walk_thread_unsafe(rte_memseg_list_walk_t func, void *arg);
/**
* Dump the physical memory layout to a file.
*
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
* @param f
* A pointer to a file for output
*/
@@ -157,6 +331,9 @@ void rte_dump_physmem_layout(FILE *f);
/**
* Get the total amount of available physical memory.
*
+ * @note This function read-locks the memory hotplug subsystem, and thus cannot
+ * be used within memory-related callback functions.
+ *
* @return
* The total amount of available physical memory in bytes.
*/
@@ -191,6 +368,137 @@ unsigned rte_memory_get_nrank(void);
*/
int rte_eal_using_phys_addrs(void);
+
+/**
+ * Enum indicating which kind of memory event has happened. Used by callbacks to
+ * distinguish between memory allocations and deallocations.
+ */
+enum rte_mem_event {
+ RTE_MEM_EVENT_ALLOC = 0, /**< Allocation event. */
+ RTE_MEM_EVENT_FREE, /**< Deallocation event. */
+};
+#define RTE_MEM_EVENT_CALLBACK_NAME_LEN 64
+/**< maximum length of callback name */
+
+/**
+ * Function typedef used to register callbacks for memory events.
+ */
+typedef void (*rte_mem_event_callback_t)(enum rte_mem_event event_type,
+ const void *addr, size_t len, void *arg);
+
+/**
+ * Function used to register callbacks for memory events.
+ *
+ * @note callbacks will happen while memory hotplug subsystem is write-locked,
+ * therefore some functions (e.g. `rte_memseg_walk()`) will cause a
+ * deadlock when called from within such callbacks.
+ *
+ * @note mem event callbacks not being supported is an expected error condition,
+ * so user code needs to handle this situation. In these cases, return
+ * value will be -1, and rte_errno will be set to ENOTSUP.
+ *
+ * @param name
+ * Name associated with specified callback to be added to the list.
+ *
+ * @param clb
+ * Callback function pointer.
+ *
+ * @param arg
+ * Argument to pass to the callback.
+ *
+ * @return
+ * 0 on successful callback register
+ * -1 on unsuccessful callback register, with rte_errno value indicating
+ * reason for failure.
+ */
+int __rte_experimental
+rte_mem_event_callback_register(const char *name, rte_mem_event_callback_t clb,
+ void *arg);
+
+/**
+ * Function used to unregister callbacks for memory events.
+ *
+ * @param name
+ * Name associated with specified callback to be removed from the list.
+ *
+ * @param arg
+ * Argument to look for among callbacks with specified callback name.
+ *
+ * @return
+ * 0 on successful callback unregister
+ * -1 on unsuccessful callback unregister, with rte_errno value indicating
+ * reason for failure.
+ */
+int __rte_experimental
+rte_mem_event_callback_unregister(const char *name, void *arg);
+
+
+#define RTE_MEM_ALLOC_VALIDATOR_NAME_LEN 64
+/**< maximum length of alloc validator name */
+/**
+ * Function typedef used to register memory allocation validation callbacks.
+ *
+ * Returning 0 will allow allocation attempt to continue. Returning -1 will
+ * prevent allocation from succeeding.
+ */
+typedef int (*rte_mem_alloc_validator_t)(int socket_id,
+ size_t cur_limit, size_t new_len);
+
+/**
+ * @brief Register validator callback for memory allocations.
+ *
+ * Callbacks registered by this function will be called right before memory
+ * allocator is about to trigger allocation of more pages from the system if
+ * said allocation will bring total memory usage above specified limit on
+ * specified socket. User will be able to cancel pending allocation if callback
+ * returns -1.
+ *
+ * @note callbacks will happen while memory hotplug subsystem is write-locked,
+ * therefore some functions (e.g. `rte_memseg_walk()`) will cause a
+ * deadlock when called from within such callbacks.
+ *
+ * @note validator callbacks not being supported is an expected error condition,
+ * so user code needs to handle this situation. In these cases, return
+ * value will be -1, and rte_errno will be set to ENOTSUP.
+ *
+ * @param name
+ * Name associated with specified callback to be added to the list.
+ *
+ * @param clb
+ * Callback function pointer.
+ *
+ * @param socket_id
+ * Socket ID on which to watch for allocations.
+ *
+ * @param limit
+ * Limit above which to trigger callbacks.
+ *
+ * @return
+ * 0 on successful callback register
+ * -1 on unsuccessful callback register, with rte_errno value indicating
+ * reason for failure.
+ */
+int __rte_experimental
+rte_mem_alloc_validator_register(const char *name,
+ rte_mem_alloc_validator_t clb, int socket_id, size_t limit);
+
+/**
+ * @brief Unregister validator callback for memory allocations.
+ *
+ * @param name
+ * Name associated with specified callback to be removed from the list.
+ *
+ * @param socket_id
+ * Socket ID on which to watch for allocations.
+ *
+ * @return
+ * 0 on successful callback unregister
+ * -1 on unsuccessful callback unregister, with rte_errno value indicating
+ * reason for failure.
+ */
+int __rte_experimental
+rte_mem_alloc_validator_unregister(const char *name, int socket_id);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h
index 2bfb2731..f478fa9e 100644
--- a/lib/librte_eal/common/include/rte_memzone.h
+++ b/lib/librte_eal/common/include/rte_memzone.h
@@ -23,6 +23,7 @@
*/
#include <stdio.h>
+#include <rte_compat.h>
#include <rte_memory.h>
#include <rte_common.h>
@@ -39,6 +40,7 @@ extern "C" {
#define RTE_MEMZONE_512MB 0x00040000 /**< Use 512MB pages. */
#define RTE_MEMZONE_4GB 0x00080000 /**< Use 4GB pages. */
#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */
+#define RTE_MEMZONE_IOVA_CONTIG 0x00100000 /**< Ask for IOVA-contiguous memzone. */
/**
* A structure describing a memzone, which is a contiguous portion of
@@ -66,7 +68,6 @@ struct rte_memzone {
int32_t socket_id; /**< NUMA socket ID. */
uint32_t flags; /**< Characteristics of this memzone. */
- uint32_t memseg_id; /**< Memseg it belongs. */
} __attribute__((__packed__));
/**
@@ -76,6 +77,17 @@ struct rte_memzone {
* correctly filled memzone descriptor. If the allocation cannot be
* done, return NULL.
*
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ * memzones from memory that is already available. It will not trigger any
+ * new allocations.
+ *
+ * @note: When reserving memzones with len set to 0, it is preferable to also
+ * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but
+ * will likely not yield expected results. Specifically, the resulting memzone
+ * may not necessarily be the biggest memzone available, but rather biggest
+ * memzone available on socket id corresponding to an lcore from which
+ * reservation was called.
+ *
* @param name
* The name of the memzone. If it already exists, the function will
* fail and return NULL.
@@ -102,6 +114,9 @@ struct rte_memzone {
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @return
* A pointer to a correctly-filled read-only memzone descriptor, or NULL
* on error.
@@ -126,6 +141,17 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* descriptor. If the allocation cannot be done or if the alignment
* is not a power of 2, returns NULL.
*
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ * memzones from memory that is already available. It will not trigger any
+ * new allocations.
+ *
+ * @note: When reserving memzones with len set to 0, it is preferable to also
+ * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but
+ * will likely not yield expected results. Specifically, the resulting memzone
+ * may not necessarily be the biggest memzone available, but rather biggest
+ * memzone available on socket id corresponding to an lcore from which
+ * reservation was called.
+ *
* @param name
* The name of the memzone. If it already exists, the function will
* fail and return NULL.
@@ -152,6 +178,9 @@ const struct rte_memzone *rte_memzone_reserve(const char *name,
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @param align
* Alignment for resulting memzone. Must be a power of 2.
* @return
@@ -181,6 +210,17 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* boundary. That implies that requested length should be less or equal
* then boundary.
*
+ * @note Reserving memzones with len set to 0 will only attempt to allocate
+ * memzones from memory that is already available. It will not trigger any
+ * new allocations.
+ *
+ * @note: When reserving memzones with len set to 0, it is preferable to also
+ * set a valid socket_id. Setting socket_id to SOCKET_ID_ANY is supported, but
+ * will likely not yield expected results. Specifically, the resulting memzone
+ * may not necessarily be the biggest memzone available, but rather biggest
+ * memzone available on socket id corresponding to an lcore from which
+ * reservation was called.
+ *
* @param name
* The name of the memzone. If it already exists, the function will
* fail and return NULL.
@@ -207,6 +247,9 @@ const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
* If this flag is not set, the function
* will return error on an unavailable size
* request.
+ * - RTE_MEMZONE_IOVA_CONTIG - Ensure reserved memzone is IOVA-contiguous.
+ * This option should be used when allocating
+ * memory intended for hardware rings etc.
* @param align
* Alignment for resulting memzone. Must be a power of 2.
* @param bound
diff --git a/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h b/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h
index 08222510..e12c2208 100644
--- a/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h
+++ b/lib/librte_eal/common/include/rte_pci_dev_feature_defs.h
@@ -1,59 +1,5 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_PCI_DEV_DEFS_H_
diff --git a/lib/librte_eal/common/include/rte_pci_dev_features.h b/lib/librte_eal/common/include/rte_pci_dev_features.h
index 67b986a6..6104123d 100644
--- a/lib/librte_eal/common/include/rte_pci_dev_features.h
+++ b/lib/librte_eal/common/include/rte_pci_dev_features.h
@@ -1,59 +1,5 @@
-/*-
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _RTE_PCI_DEV_FEATURES_H
diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h
index 63bb2808..b2ca1c20 100644
--- a/lib/librte_eal/common/include/rte_random.h
+++ b/lib/librte_eal/common/include/rte_random.h
@@ -31,7 +31,7 @@ extern "C" {
static inline void
rte_srand(uint64_t seedval)
{
- srand48((long unsigned int)seedval);
+ srand48((long)seedval);
}
/**
@@ -48,9 +48,9 @@ static inline uint64_t
rte_rand(void)
{
uint64_t val;
- val = lrand48();
+ val = (uint64_t)lrand48();
val <<= 32;
- val += lrand48();
+ val += (uint64_t)lrand48();
return val;
}
diff --git a/lib/librte_eal/common/include/rte_service.h b/lib/librte_eal/common/include/rte_service.h
index 211eb376..34b41aff 100644
--- a/lib/librte_eal/common/include/rte_service.h
+++ b/lib/librte_eal/common/include/rte_service.h
@@ -47,9 +47,6 @@ extern "C" {
#define RTE_SERVICE_CAP_MT_SAFE (1 << 0)
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Return the number of services registered.
*
* The number of services registered can be passed to *rte_service_get_by_id*,
@@ -57,12 +54,9 @@ extern "C" {
*
* @return The number of services registered.
*/
-uint32_t __rte_experimental rte_service_get_count(void);
+uint32_t rte_service_get_count(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Return the id of a service by name.
*
* This function provides the id of the service using the service name as
@@ -84,24 +78,17 @@ uint32_t __rte_experimental rte_service_get_count(void);
* @retval -EINVAL Null *service_id* pointer provided
* @retval -ENODEV No such service registered
*/
-int32_t __rte_experimental rte_service_get_by_name(const char *name,
- uint32_t *service_id);
+int32_t rte_service_get_by_name(const char *name, uint32_t *service_id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Return the name of the service.
*
* @return A pointer to the name of the service. The returned pointer remains
* in ownership of the service, and the application must not free it.
*/
-const char __rte_experimental *rte_service_get_name(uint32_t id);
+const char *rte_service_get_name(uint32_t id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Check if a service has a specific capability.
*
* This function returns if *service* has implements *capability*.
@@ -109,13 +96,9 @@ const char __rte_experimental *rte_service_get_name(uint32_t id);
* @retval 1 Capability supported by this service instance
* @retval 0 Capability not supported by this service instance
*/
-int32_t __rte_experimental rte_service_probe_capability(uint32_t id,
- uint32_t capability);
+int32_t rte_service_probe_capability(uint32_t id, uint32_t capability);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Map or unmap a lcore to a service.
*
* Each core can be added or removed from running a specific service. This
@@ -134,13 +117,10 @@ int32_t __rte_experimental rte_service_probe_capability(uint32_t id,
* @retval 0 lcore map updated successfully
* @retval -EINVAL An invalid service or lcore was provided.
*/
-int32_t __rte_experimental rte_service_map_lcore_set(uint32_t service_id,
- uint32_t lcore, uint32_t enable);
+int32_t rte_service_map_lcore_set(uint32_t service_id, uint32_t lcore,
+ uint32_t enable);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Retrieve the mapping of an lcore to a service.
*
* @param service_id the service to apply the lcore to
@@ -150,13 +130,9 @@ int32_t __rte_experimental rte_service_map_lcore_set(uint32_t service_id,
* @retval 0 lcore is not mapped to service
* @retval -EINVAL An invalid service or lcore was provided.
*/
-int32_t __rte_experimental rte_service_map_lcore_get(uint32_t service_id,
- uint32_t lcore);
+int32_t rte_service_map_lcore_get(uint32_t service_id, uint32_t lcore);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Set the runstate of the service.
*
* Each service is either running or stopped. Setting a non-zero runstate
@@ -168,12 +144,9 @@ int32_t __rte_experimental rte_service_map_lcore_get(uint32_t service_id,
* @retval 0 The service was successfully started
* @retval -EINVAL Invalid service id
*/
-int32_t __rte_experimental rte_service_runstate_set(uint32_t id, uint32_t runstate);
+int32_t rte_service_runstate_set(uint32_t id, uint32_t runstate);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Get the runstate for the service with *id*. See *rte_service_runstate_set*
* for details of runstates. A service can call this function to ensure that
* the application has indicated that it will receive CPU cycles. Either a
@@ -186,12 +159,29 @@ int32_t __rte_experimental rte_service_runstate_set(uint32_t id, uint32_t runsta
* @retval 0 Service is stopped
* @retval -EINVAL Invalid service id
*/
-int32_t __rte_experimental rte_service_runstate_get(uint32_t id);
+int32_t rte_service_runstate_get(uint32_t id);
/**
* @warning
- * @b EXPERIMENTAL: this API may change without prior notice
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * This function returns whether the service may be currently executing on
+ * at least one lcore, or definitely is not. This function can be used to
+ * determine if, after setting the service runstate to stopped, the service
+ * is still executing a service lcore.
*
+ * Care must be taken if calling this function when the service runstate is
+ * running, since the result of this function may be incorrect by the time the
+ * function returns due to service cores running in parallel.
+ *
+ * @retval 1 Service may be running on one or more lcores
+ * @retval 0 Service is not running on any lcore
+ * @retval -EINVAL Invalid service id
+ */
+int32_t __rte_experimental
+rte_service_may_be_active(uint32_t id);
+
+/**
* Enable or disable the check for a service-core being mapped to the service.
* An application can disable the check when takes the responsibility to run a
* service itself using *rte_service_run_iter_on_app_lcore*.
@@ -202,13 +192,9 @@ int32_t __rte_experimental rte_service_runstate_get(uint32_t id);
* @retval 0 Success
* @retval -EINVAL Invalid service ID
*/
-int32_t __rte_experimental rte_service_set_runstate_mapped_check(uint32_t id,
- int32_t enable);
+int32_t rte_service_set_runstate_mapped_check(uint32_t id, int32_t enable);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* This function runs a service callback from a non-service lcore.
*
* This function is designed to enable gradual porting to service cores, and
@@ -241,13 +227,10 @@ int32_t __rte_experimental rte_service_set_runstate_mapped_check(uint32_t id,
* @retval -ENOEXEC Service is not in a run-able state
* @retval -EINVAL Invalid service id
*/
-int32_t __rte_experimental rte_service_run_iter_on_app_lcore(uint32_t id,
+int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
uint32_t serialize_multithread_unsafe);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Start a service core.
*
* Starting a core makes the core begin polling. Any services assigned to it
@@ -259,12 +242,9 @@ int32_t __rte_experimental rte_service_run_iter_on_app_lcore(uint32_t id,
* @retval -EINVAL Failed to start core. The *lcore_id* passed in is not
* currently assigned to be a service core.
*/
-int32_t __rte_experimental rte_service_lcore_start(uint32_t lcore_id);
+int32_t rte_service_lcore_start(uint32_t lcore_id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Stop a service core.
*
* Stopping a core makes the core become idle, but remains assigned as a
@@ -278,12 +258,9 @@ int32_t __rte_experimental rte_service_lcore_start(uint32_t lcore_id);
* The application must stop the service first, and then stop the
* lcore.
*/
-int32_t __rte_experimental rte_service_lcore_stop(uint32_t lcore_id);
+int32_t rte_service_lcore_stop(uint32_t lcore_id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Adds lcore to the list of service cores.
*
* This functions can be used at runtime in order to modify the service core
@@ -294,12 +271,9 @@ int32_t __rte_experimental rte_service_lcore_stop(uint32_t lcore_id);
* @retval -EALREADY lcore is already added to the service core list
* @retval -EINVAL Invalid lcore provided
*/
-int32_t __rte_experimental rte_service_lcore_add(uint32_t lcore);
+int32_t rte_service_lcore_add(uint32_t lcore);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Removes lcore from the list of service cores.
*
* This can fail if the core is not stopped, see *rte_service_core_stop*.
@@ -308,12 +282,9 @@ int32_t __rte_experimental rte_service_lcore_add(uint32_t lcore);
* @retval -EBUSY Lcore is not stopped, stop service core before removing.
* @retval -EINVAL failed to add lcore to service core mask.
*/
-int32_t __rte_experimental rte_service_lcore_del(uint32_t lcore);
+int32_t rte_service_lcore_del(uint32_t lcore);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Retrieve the number of service cores currently available.
*
* This function returns the integer count of service cores available. The
@@ -325,24 +296,18 @@ int32_t __rte_experimental rte_service_lcore_del(uint32_t lcore);
*
* @return The number of service cores currently configured.
*/
-int32_t __rte_experimental rte_service_lcore_count(void);
+int32_t rte_service_lcore_count(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Resets all service core mappings. This does not remove the service cores
* from duty, just unmaps all services / cores, and stops() the service cores.
* The runstate of services is not modified.
*
* @retval 0 Success
*/
-int32_t __rte_experimental rte_service_lcore_reset_all(void);
+int32_t rte_service_lcore_reset_all(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Enable or disable statistics collection for *service*.
*
* This function enables per core, per-service cycle count collection.
@@ -351,13 +316,9 @@ int32_t __rte_experimental rte_service_lcore_reset_all(void);
* @retval 0 Success
* @retval -EINVAL Invalid service pointer passed
*/
-int32_t __rte_experimental rte_service_set_stats_enable(uint32_t id,
- int32_t enable);
+int32_t rte_service_set_stats_enable(uint32_t id, int32_t enable);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Retrieve the list of currently enabled service cores.
*
* This function fills in an application supplied array, with each element
@@ -373,12 +334,9 @@ int32_t __rte_experimental rte_service_set_stats_enable(uint32_t id,
* service core list. No items have been populated, call this function
* with a size of at least *rte_service_core_count* items.
*/
-int32_t __rte_experimental rte_service_lcore_list(uint32_t array[], uint32_t n);
+int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Get the numer of services running on the supplied lcore.
*
* @param lcore Id of the service core.
@@ -386,19 +344,16 @@ int32_t __rte_experimental rte_service_lcore_list(uint32_t array[], uint32_t n);
* @retval -EINVAL Invalid lcore provided
* @retval -ENOTSUP The provided lcore is not a service core.
*/
-int32_t __rte_experimental rte_service_lcore_count_services(uint32_t lcore);
+int32_t rte_service_lcore_count_services(uint32_t lcore);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Dumps any information available about the service. When id is UINT32_MAX,
* this function dumps info for all services.
*
* @retval 0 Statistics have been successfully dumped
* @retval -EINVAL Invalid service id provided
*/
-int32_t __rte_experimental rte_service_dump(FILE *f, uint32_t id);
+int32_t rte_service_dump(FILE *f, uint32_t id);
/**
* Returns the number of cycles that this service has consumed
@@ -411,28 +366,58 @@ int32_t __rte_experimental rte_service_dump(FILE *f, uint32_t id);
#define RTE_SERVICE_ATTR_CALL_COUNT 1
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Get an attribute from a service.
*
* @retval 0 Success, the attribute value has been written to *attr_value*.
* -EINVAL Invalid id, attr_id or attr_value was NULL.
*/
-int32_t __rte_experimental rte_service_attr_get(uint32_t id, uint32_t attr_id,
+int32_t rte_service_attr_get(uint32_t id, uint32_t attr_id,
uint32_t *attr_value);
/**
+ * Reset all attribute values of a service.
+ *
+ * @param id The service to reset all statistics of
+ * @retval 0 Successfully reset attributes
+ * -EINVAL Invalid service id provided
+ */
+int32_t rte_service_attr_reset_all(uint32_t id);
+
+/**
+ * Returns the number of times the service runner has looped.
+ */
+#define RTE_SERVICE_LCORE_ATTR_LOOPS 0
+
+/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
- * Reset all attribute values of a service.
+ * Get an attribute from a service core.
*
- * @param id The service to reset all statistics of
+ * @param lcore Id of the service core.
+ * @param attr_id Id of the attribute to be retrieved.
+ * @param [out] attr_value Pointer to storage in which to write retrieved value.
+ * @retval 0 Success, the attribute value has been written to *attr_value*.
+ * -EINVAL Invalid lcore, attr_id or attr_value was NULL.
+ * -ENOTSUP lcore is not a service core.
+ */
+int32_t __rte_experimental
+rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
+ uint64_t *attr_value);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset all attribute values of a service core.
+ *
+ * @param lcore The service core to reset all the statistics of
* @retval 0 Successfully reset attributes
* -EINVAL Invalid service id provided
+ * -ENOTSUP lcore is not a service core.
*/
-int32_t __rte_experimental rte_service_attr_reset_all(uint32_t id);
+int32_t __rte_experimental
+rte_service_lcore_attr_reset_all(uint32_t lcore);
#ifdef __cplusplus
}
diff --git a/lib/librte_eal/common/include/rte_service_component.h b/lib/librte_eal/common/include/rte_service_component.h
index 9ba4aa29..c12adbc2 100644
--- a/lib/librte_eal/common/include/rte_service_component.h
+++ b/lib/librte_eal/common/include/rte_service_component.h
@@ -13,17 +13,11 @@
#include <rte_service.h>
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Signature of callback function to run a service.
*/
typedef int32_t (*rte_service_func)(void *args);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* The specification of a service.
*
* This struct contains metadata about the service itself, the callback
@@ -47,9 +41,6 @@ struct rte_service_spec {
};
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Register a new service.
*
* A service represents a component that the requires CPU time periodically to
@@ -73,14 +64,10 @@ struct rte_service_spec {
* -EINVAL Attempted to register an invalid service (eg, no callback
* set)
*/
-int32_t __rte_experimental
-rte_service_component_register(const struct rte_service_spec *spec,
- uint32_t *service_id);
+int32_t rte_service_component_register(const struct rte_service_spec *spec,
+ uint32_t *service_id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Unregister a service component.
*
* The service being removed must be stopped before calling this function.
@@ -89,12 +76,9 @@ rte_service_component_register(const struct rte_service_spec *spec,
* @retval -EBUSY The service is currently running, stop the service before
* calling unregister. No action has been taken.
*/
-int32_t __rte_experimental rte_service_component_unregister(uint32_t id);
+int32_t rte_service_component_unregister(uint32_t id);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Private function to allow EAL to initialized default mappings.
*
* This function iterates all the services, and maps then to the available
@@ -107,12 +91,9 @@ int32_t __rte_experimental rte_service_component_unregister(uint32_t id);
* @retval -ENODEV Error in enabling service lcore on a service
* @retval -ENOEXEC Error when starting services
*/
-int32_t __rte_experimental rte_service_start_with_defaults(void);
+int32_t rte_service_start_with_defaults(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Set the backend runstate of a component.
*
* This function allows services to be registered at startup, but not yet
@@ -124,13 +105,9 @@ int32_t __rte_experimental rte_service_start_with_defaults(void);
*
* @retval 0 Success
*/
-int32_t __rte_experimental rte_service_component_runstate_set(uint32_t id,
- uint32_t runstate);
+int32_t rte_service_component_runstate_set(uint32_t id, uint32_t runstate);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* Initialize the service library.
*
* In order to use the service library, it must be initialized. EAL initializes
@@ -142,14 +119,11 @@ int32_t __rte_experimental rte_service_component_runstate_set(uint32_t id,
int32_t rte_service_init(void);
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
* @internal Free up the memory that has been initialized.
* This routine is to be invoked prior to process termination.
*
* @retval None
*/
-void __rte_experimental rte_service_finalize(void);
+void rte_service_finalize(void);
#endif /* _RTE_SERVICE_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h
index e97047a4..97597a14 100644
--- a/lib/librte_eal/common/include/rte_string_fns.h
+++ b/lib/librte_eal/common/include/rte_string_fns.h
@@ -15,6 +15,8 @@
extern "C" {
#endif
+#include <stdio.h>
+
/**
* Takes string "string" parameter and splits it at character "delim"
* up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like
@@ -45,6 +47,35 @@ int
rte_strsplit(char *string, int stringlen,
char **tokens, int maxtokens, char delim);
+/**
+ * @internal
+ * DPDK-specific version of strlcpy for systems without
+ * libc or libbsd copies of the function
+ */
+static inline size_t
+rte_strlcpy(char *dst, const char *src, size_t size)
+{
+ return (size_t)snprintf(dst, size, "%s", src);
+}
+
+/* pull in a strlcpy function */
+#ifdef RTE_EXEC_ENV_BSDAPP
+#include <string.h>
+#ifndef __BSD_VISIBLE /* non-standard functions are hidden */
+#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+#endif
+
+
+#else /* non-BSD platforms */
+#ifdef RTE_USE_LIBBSD
+#include <bsd/string.h>
+
+#else /* no BSD header files, create own */
+#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+
+#endif /* RTE_USE_LIBBSD */
+#endif /* BSDAPP */
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_tailq.h b/lib/librte_eal/common/include/rte_tailq.h
index 8dccaefc..9b01abb2 100644
--- a/lib/librte_eal/common/include/rte_tailq.h
+++ b/lib/librte_eal/common/include/rte_tailq.h
@@ -119,8 +119,7 @@ struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
int rte_eal_tailq_register(struct rte_tailq_elem *t);
#define EAL_REGISTER_TAILQ(t) \
-RTE_INIT(tailqinitfn_ ##t); \
-static void tailqinitfn_ ##t(void) \
+RTE_INIT(tailqinitfn_ ##t) \
{ \
if (rte_eal_tailq_register(&t) < 0) \
rte_panic("Cannot initialize tailq: %s\n", t.name); \
diff --git a/lib/librte_eal/common/include/rte_uuid.h b/lib/librte_eal/common/include/rte_uuid.h
new file mode 100644
index 00000000..2c846b5f
--- /dev/null
+++ b/lib/librte_eal/common/include/rte_uuid.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 1996, 1997, 1998 Theodore Ts'o.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+/**
+ * @file
+ *
+ * UUID related functions originally from libuuid
+ */
+
+#ifndef _RTE_UUID_H_
+#define _RTE_UUID_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdbool.h>
+
+/**
+ * Struct describing a Universal Unique Identifer
+ */
+typedef unsigned char rte_uuid_t[16];
+
+/**
+ * Helper for defining UUID values for id tables.
+ */
+#define RTE_UUID_INIT(a, b, c, d, e) { \
+ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, \
+ ((a) >> 8) & 0xff, (a) & 0xff, \
+ ((b) >> 8) & 0xff, (b) & 0xff, \
+ ((c) >> 8) & 0xff, (c) & 0xff, \
+ ((d) >> 8) & 0xff, (d) & 0xff, \
+ ((e) >> 40) & 0xff, ((e) >> 32) & 0xff, \
+ ((e) >> 24) & 0xff, ((e) >> 16) & 0xff, \
+ ((e) >> 8) & 0xff, (e) & 0xff \
+}
+
+/**
+ * Test if UUID is all zeros.
+ *
+ * @param uu
+ * The uuid to check.
+ * @return
+ * true if uuid is NULL value, false otherwise
+ */
+bool rte_uuid_is_null(const rte_uuid_t uu);
+
+/**
+ * Copy uuid.
+ *
+ * @param dst
+ * Destination uuid
+ * @param src
+ * Source uuid
+ */
+static inline void rte_uuid_copy(rte_uuid_t dst, const rte_uuid_t src)
+{
+ memcpy(dst, src, sizeof(rte_uuid_t));
+}
+
+/**
+ * Compare two UUID's
+ *
+ * @param a
+ * A UUID to compare
+ * @param b
+ * A UUID to compare
+ * @return
+ * returns an integer less than, equal to, or greater than zero if UUID a is
+ * is less than, equal, or greater than UUID b.
+ */
+int rte_uuid_compare(const rte_uuid_t a, const rte_uuid_t b);
+
+/**
+ * Extract UUID from string
+ *
+ * @param in
+ * Pointer to string of characters to convert
+ * @param uu
+ * Destination UUID
+ * @return
+ * Returns 0 on succes, and -1 if string is not a valid UUID.
+ */
+int rte_uuid_parse(const char *in, rte_uuid_t uu);
+
+/**
+ * Convert UUID to string
+ *
+ * @param uu
+ * UUID to format
+ * @param out
+ * Resulting string buffer
+ * @param len
+ * Sizeof the available string buffer
+ */
+#define RTE_UUID_STRLEN (36 + 1)
+void rte_uuid_unparse(const rte_uuid_t uu, char *out, size_t len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_UUID_H */
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 8173802b..7c6714a2 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -32,7 +32,7 @@ extern "C" {
/**
* Minor version/month number i.e. the mm in yy.mm.z
*/
-#define RTE_VER_MONTH 02
+#define RTE_VER_MONTH 8
/**
* Patch level number i.e. the z in yy.mm.z
diff --git a/lib/librte_eal/common/include/rte_vfio.h b/lib/librte_eal/common/include/rte_vfio.h
index e981a622..5ca13fcc 100644
--- a/lib/librte_eal/common/include/rte_vfio.h
+++ b/lib/librte_eal/common/include/rte_vfio.h
@@ -5,6 +5,15 @@
#ifndef _RTE_VFIO_H_
#define _RTE_VFIO_H_
+/**
+ * @file
+ * RTE VFIO. This library provides various VFIO related utility functions.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
/*
* determine if VFIO is present on the system
*/
@@ -28,6 +37,20 @@
#define VFIO_NOIOMMU_MODE \
"/sys/module/vfio/parameters/enable_unsafe_noiommu_mode"
+/* NOIOMMU is defined from kernel version 4.5 onwards */
+#ifdef VFIO_NOIOMMU_IOMMU
+#define RTE_VFIO_NOIOMMU VFIO_NOIOMMU_IOMMU
+#else
+#define RTE_VFIO_NOIOMMU 8
+#endif
+
+#else /* not VFIO_PRESENT */
+
+/* we don't need an actual definition, only pointer is used */
+struct vfio_device_info;
+
+#endif /* VFIO_PRESENT */
+
/**
* Setup vfio_cfg for the device identified by its address.
* It discovers the configured I/O MMU groups or sets a new one for the device.
@@ -119,10 +142,226 @@ int rte_vfio_is_enabled(const char *modname);
*/
int rte_vfio_noiommu_is_enabled(void);
-/* remove group fd from internal VFIO group fd array */
+/**
+ * Remove group fd from internal VFIO group fd array/
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param vfio_group_fd
+ * VFIO Grouup FD.
+ *
+ * @return
+ * 0 on success.
+ * <0 on failure.
+ */
int
rte_vfio_clear_group(int vfio_group_fd);
-#endif /* VFIO_PRESENT */
+/**
+ * Map memory region for use with VFIO.
+ *
+ * @note Require at least one device to be attached at the time of
+ * mapping. DMA maps done via this API will only apply to default
+ * container and will not apply to any of the containers created
+ * via rte_vfio_container_create().
+ *
+ * @param vaddr
+ * Starting virtual address of memory to be mapped.
+ *
+ * @param iova
+ * Starting IOVA address of memory to be mapped.
+ *
+ * @param len
+ * Length of memory segment being mapped.
+ *
+ * @return
+ * 0 if success.
+ * -1 on error.
+ */
+int
+rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len);
+
+
+/**
+ * Unmap memory region from VFIO.
+ *
+ * @param vaddr
+ * Starting virtual address of memory to be unmapped.
+ *
+ * @param iova
+ * Starting IOVA address of memory to be unmapped.
+ *
+ * @param len
+ * Length of memory segment being unmapped.
+ *
+ * @return
+ * 0 if success.
+ * -1 on error.
+ */
+
+int
+rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len);
+/**
+ * Parse IOMMU group number for a device
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param sysfs_base
+ * sysfs path prefix.
+ *
+ * @param dev_addr
+ * device location.
+ *
+ * @param iommu_group_num
+ * iommu group number
+ *
+ * @return
+ * >0 on success
+ * 0 for non-existent group or VFIO
+ * <0 for errors
+ */
+int
+rte_vfio_get_group_num(const char *sysfs_base,
+ const char *dev_addr, int *iommu_group_num);
+
+/**
+ * Open VFIO container fd or get an existing one
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @return
+ * > 0 container fd
+ * < 0 for errors
+ */
+int
+rte_vfio_get_container_fd(void);
+
+/**
+ * Open VFIO group fd or get an existing one
+ *
+ * This function is only relevant to linux and will return
+ * an error on BSD.
+ *
+ * @param iommu_group_num
+ * iommu group number
+ *
+ * @return
+ * > 0 group fd
+ * < 0 for errors
+ */
+int
+rte_vfio_get_group_fd(int iommu_group_num);
+
+/**
+ * Create a new container for device binding.
+ *
+ * @note Any newly allocated DPDK memory will not be mapped into these
+ * containers by default, user needs to manage DMA mappings for
+ * any container created by this API.
+ *
+ * @return
+ * the container fd if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_create(void);
+
+/**
+ * Destroy the container, unbind all vfio groups within it.
+ *
+ * @param container_fd
+ * the container fd to destroy
+ *
+ * @return
+ * 0 if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_destroy(int container_fd);
+
+/**
+ * Bind a IOMMU group to a container.
+ *
+ * @param container_fd
+ * the container's fd
+ *
+ * @param iommu_group_num
+ * the iommu group number to bind to container
+ *
+ * @return
+ * group fd if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_group_bind(int container_fd, int iommu_group_num);
+
+/**
+ * Unbind a IOMMU group from a container.
+ *
+ * @param container_fd
+ * the container fd of container
+ *
+ * @param iommu_group_num
+ * the iommu group number to delete from container
+ *
+ * @return
+ * 0 if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_group_unbind(int container_fd, int iommu_group_num);
+
+/**
+ * Perform DMA mapping for devices in a container.
+ *
+ * @param container_fd
+ * the specified container fd
+ *
+ * @param vaddr
+ * Starting virtual address of memory to be mapped.
+ *
+ * @param iova
+ * Starting IOVA address of memory to be mapped.
+ *
+ * @param len
+ * Length of memory segment being mapped.
+ *
+ * @return
+ * 0 if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_dma_map(int container_fd, uint64_t vaddr,
+ uint64_t iova, uint64_t len);
+
+/**
+ * Perform DMA unmapping for devices in a container.
+ *
+ * @param container_fd
+ * the specified container fd
+ *
+ * @param vaddr
+ * Starting virtual address of memory to be unmapped.
+ *
+ * @param iova
+ * Starting IOVA address of memory to be unmapped.
+ *
+ * @param len
+ * Length of memory segment being unmapped.
+ *
+ * @return
+ * 0 if successful
+ * <0 if failed
+ */
+int
+rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr,
+ uint64_t iova, uint64_t len);
+
+#ifdef __cplusplus
+}
+#endif
#endif /* _RTE_VFIO_H_ */
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 0cadc8af..e0a8ed15 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -1,10 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
*/
+#include <inttypes.h>
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
+#include <unistd.h>
#include <sys/queue.h>
#include <rte_memory.h>
@@ -16,21 +18,100 @@
#include <rte_common.h>
#include <rte_spinlock.h>
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
#include "malloc_elem.h"
#include "malloc_heap.h"
-#define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
+size_t
+malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
+{
+ void *cur_page, *contig_seg_start, *page_end, *cur_seg_end;
+ void *data_start, *data_end;
+ rte_iova_t expected_iova;
+ struct rte_memseg *ms;
+ size_t page_sz, cur, max;
+
+ page_sz = (size_t)elem->msl->page_sz;
+ data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
+ data_end = RTE_PTR_ADD(elem, elem->size - MALLOC_ELEM_TRAILER_LEN);
+ /* segment must start after header and with specified alignment */
+ contig_seg_start = RTE_PTR_ALIGN_CEIL(data_start, align);
+
+ /* if we're in IOVA as VA mode, or if we're in legacy mode with
+ * hugepages, all elements are IOVA-contiguous.
+ */
+ if (rte_eal_iova_mode() == RTE_IOVA_VA ||
+ (internal_config.legacy_mem && rte_eal_has_hugepages()))
+ return RTE_PTR_DIFF(data_end, contig_seg_start);
+
+ cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
+ ms = rte_mem_virt2memseg(cur_page, elem->msl);
+
+ /* do first iteration outside the loop */
+ page_end = RTE_PTR_ADD(cur_page, page_sz);
+ cur_seg_end = RTE_MIN(page_end, data_end);
+ cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start) -
+ MALLOC_ELEM_TRAILER_LEN;
+ max = cur;
+ expected_iova = ms->iova + page_sz;
+ /* memsegs are contiguous in memory */
+ ms++;
+
+ cur_page = RTE_PTR_ADD(cur_page, page_sz);
+
+ while (cur_page < data_end) {
+ page_end = RTE_PTR_ADD(cur_page, page_sz);
+ cur_seg_end = RTE_MIN(page_end, data_end);
+
+ /* reset start of contiguous segment if unexpected iova */
+ if (ms->iova != expected_iova) {
+ /* next contiguous segment must start at specified
+ * alignment.
+ */
+ contig_seg_start = RTE_PTR_ALIGN(cur_page, align);
+ /* new segment start may be on a different page, so find
+ * the page and skip to next iteration to make sure
+ * we're not blowing past data end.
+ */
+ ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
+ cur_page = ms->addr;
+ /* don't trigger another recalculation */
+ expected_iova = ms->iova;
+ continue;
+ }
+ /* cur_seg_end ends on a page boundary or on data end. if we're
+ * looking at data end, then malloc trailer is already included
+ * in the calculations. if we're looking at page end, then we
+ * know there's more data past this page and thus there's space
+ * for malloc element trailer, so don't count it here.
+ */
+ cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start);
+ /* update max if cur value is bigger */
+ if (cur > max)
+ max = cur;
+
+ /* move to next page */
+ cur_page = page_end;
+ expected_iova = ms->iova + page_sz;
+ /* memsegs are contiguous in memory */
+ ms++;
+ }
+
+ return max;
+}
/*
* Initialize a general malloc_elem header structure
*/
void
-malloc_elem_init(struct malloc_elem *elem,
- struct malloc_heap *heap, const struct rte_memseg *ms, size_t size)
+malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
+ struct rte_memseg_list *msl, size_t size)
{
elem->heap = heap;
- elem->ms = ms;
+ elem->msl = msl;
elem->prev = NULL;
+ elem->next = NULL;
memset(&elem->free_list, 0, sizeof(elem->free_list));
elem->state = ELEM_FREE;
elem->size = size;
@@ -39,15 +120,74 @@ malloc_elem_init(struct malloc_elem *elem,
set_trailer(elem);
}
+void
+malloc_elem_insert(struct malloc_elem *elem)
+{
+ struct malloc_elem *prev_elem, *next_elem;
+ struct malloc_heap *heap = elem->heap;
+
+ /* first and last elements must be both NULL or both non-NULL */
+ if ((heap->first == NULL) != (heap->last == NULL)) {
+ RTE_LOG(ERR, EAL, "Heap is probably corrupt\n");
+ return;
+ }
+
+ if (heap->first == NULL && heap->last == NULL) {
+ /* if empty heap */
+ heap->first = elem;
+ heap->last = elem;
+ prev_elem = NULL;
+ next_elem = NULL;
+ } else if (elem < heap->first) {
+ /* if lower than start */
+ prev_elem = NULL;
+ next_elem = heap->first;
+ heap->first = elem;
+ } else if (elem > heap->last) {
+ /* if higher than end */
+ prev_elem = heap->last;
+ next_elem = NULL;
+ heap->last = elem;
+ } else {
+ /* the new memory is somewhere inbetween start and end */
+ uint64_t dist_from_start, dist_from_end;
+
+ dist_from_end = RTE_PTR_DIFF(heap->last, elem);
+ dist_from_start = RTE_PTR_DIFF(elem, heap->first);
+
+ /* check which is closer, and find closest list entries */
+ if (dist_from_start < dist_from_end) {
+ prev_elem = heap->first;
+ while (prev_elem->next < elem)
+ prev_elem = prev_elem->next;
+ next_elem = prev_elem->next;
+ } else {
+ next_elem = heap->last;
+ while (next_elem->prev > elem)
+ next_elem = next_elem->prev;
+ prev_elem = next_elem->prev;
+ }
+ }
+
+ /* insert new element */
+ elem->prev = prev_elem;
+ elem->next = next_elem;
+ if (prev_elem)
+ prev_elem->next = elem;
+ if (next_elem)
+ next_elem->prev = elem;
+}
+
/*
- * Initialize a dummy malloc_elem header for the end-of-memseg marker
+ * Attempt to find enough physically contiguous memory in this block to store
+ * our data. Assume that element has at least enough space to fit in the data,
+ * so we just check the page addresses.
*/
-void
-malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
+static bool
+elem_check_phys_contig(const struct rte_memseg_list *msl,
+ void *start, size_t size)
{
- malloc_elem_init(elem, prev->heap, prev->ms, 0);
- elem->prev = prev;
- elem->state = ELEM_BUSY; /* mark busy so its never merged */
+ return eal_memalloc_is_contig(msl, start, size);
}
/*
@@ -57,27 +197,59 @@ malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
*/
static void *
elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
- size_t bound)
+ size_t bound, bool contig)
{
- const size_t bmask = ~(bound - 1);
- uintptr_t end_pt = (uintptr_t)elem +
- elem->size - MALLOC_ELEM_TRAILER_LEN;
- uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
- uintptr_t new_elem_start;
-
- /* check boundary */
- if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
- end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
- new_data_start = RTE_ALIGN_FLOOR((end_pt - size), align);
- end_pt = new_data_start + size;
- if (((end_pt - 1) & bmask) != (new_data_start & bmask))
- return NULL;
- }
+ size_t elem_size = elem->size;
+
+ /*
+ * we're allocating from the end, so adjust the size of element by
+ * alignment size.
+ */
+ while (elem_size >= size) {
+ const size_t bmask = ~(bound - 1);
+ uintptr_t end_pt = (uintptr_t)elem +
+ elem_size - MALLOC_ELEM_TRAILER_LEN;
+ uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
+ align);
+ uintptr_t new_elem_start;
+
+ /* check boundary */
+ if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
+ end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
+ new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
+ align);
+ end_pt = new_data_start + size;
+
+ if (((end_pt - 1) & bmask) != (new_data_start & bmask))
+ return NULL;
+ }
+
+ new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
- new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
+ /* if the new start point is before the exist start,
+ * it won't fit
+ */
+ if (new_elem_start < (uintptr_t)elem)
+ return NULL;
- /* if the new start point is before the exist start, it won't fit */
- return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start;
+ if (contig) {
+ size_t new_data_size = end_pt - new_data_start;
+
+ /*
+ * if physical contiguousness was requested and we
+ * couldn't fit all data into one physically contiguous
+ * block, try again with lower addresses.
+ */
+ if (!elem_check_phys_contig(elem->msl,
+ (void *)new_data_start,
+ new_data_size)) {
+ elem_size -= align;
+ continue;
+ }
+ }
+ return (void *)new_elem_start;
+ }
+ return NULL;
}
/*
@@ -86,9 +258,9 @@ elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
*/
int
malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
- size_t bound)
+ size_t bound, bool contig)
{
- return elem_start_pt(elem, size, align, bound) != NULL;
+ return elem_start_pt(elem, size, align, bound, contig) != NULL;
}
/*
@@ -98,18 +270,58 @@ malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align,
static void
split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
{
- struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size);
+ struct malloc_elem *next_elem = elem->next;
const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
const size_t new_elem_size = elem->size - old_elem_size;
- malloc_elem_init(split_pt, elem->heap, elem->ms, new_elem_size);
+ malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size);
split_pt->prev = elem;
- next_elem->prev = split_pt;
+ split_pt->next = next_elem;
+ if (next_elem)
+ next_elem->prev = split_pt;
+ else
+ elem->heap->last = split_pt;
+ elem->next = split_pt;
elem->size = old_elem_size;
set_trailer(elem);
}
/*
+ * our malloc heap is a doubly linked list, so doubly remove our element.
+ */
+static void __rte_unused
+remove_elem(struct malloc_elem *elem)
+{
+ struct malloc_elem *next, *prev;
+ next = elem->next;
+ prev = elem->prev;
+
+ if (next)
+ next->prev = prev;
+ else
+ elem->heap->last = prev;
+ if (prev)
+ prev->next = next;
+ else
+ elem->heap->first = next;
+
+ elem->prev = NULL;
+ elem->next = NULL;
+}
+
+static int
+next_elem_is_adjacent(struct malloc_elem *elem)
+{
+ return elem->next == RTE_PTR_ADD(elem, elem->size);
+}
+
+static int
+prev_elem_is_adjacent(struct malloc_elem *elem)
+{
+ return elem == RTE_PTR_ADD(elem->prev, elem->prev->size);
+}
+
+/*
* Given an element size, compute its freelist index.
* We free an element into the freelist containing similarly-sized elements.
* We try to allocate elements starting with the freelist containing
@@ -162,8 +374,8 @@ malloc_elem_free_list_insert(struct malloc_elem *elem)
/*
* Remove the specified element from its heap's free list.
*/
-static void
-elem_free_list_remove(struct malloc_elem *elem)
+void
+malloc_elem_free_list_remove(struct malloc_elem *elem)
{
LIST_REMOVE(elem, free_list);
}
@@ -176,14 +388,15 @@ elem_free_list_remove(struct malloc_elem *elem)
*/
struct malloc_elem *
malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
- size_t bound)
+ size_t bound, bool contig)
{
- struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound);
+ struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
+ contig);
const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
const size_t trailer_size = elem->size - old_elem_size - size -
MALLOC_ELEM_OVERHEAD;
- elem_free_list_remove(elem);
+ malloc_elem_free_list_remove(elem);
if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
/* split it, too much free space after elem */
@@ -192,6 +405,9 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
split_elem(elem, new_free_elem);
malloc_elem_free_list_insert(new_free_elem);
+
+ if (elem == elem->heap->last)
+ elem->heap->last = new_free_elem;
}
if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
@@ -230,9 +446,66 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
static inline void
join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
{
- struct malloc_elem *next = RTE_PTR_ADD(elem2, elem2->size);
+ struct malloc_elem *next = elem2->next;
elem1->size += elem2->size;
- next->prev = elem1;
+ if (next)
+ next->prev = elem1;
+ else
+ elem1->heap->last = elem1;
+ elem1->next = next;
+}
+
+struct malloc_elem *
+malloc_elem_join_adjacent_free(struct malloc_elem *elem)
+{
+ /*
+ * check if next element exists, is adjacent and is free, if so join
+ * with it, need to remove from free list.
+ */
+ if (elem->next != NULL && elem->next->state == ELEM_FREE &&
+ next_elem_is_adjacent(elem)) {
+ void *erase;
+ size_t erase_len;
+
+ /* we will want to erase the trailer and header */
+ erase = RTE_PTR_SUB(elem->next, MALLOC_ELEM_TRAILER_LEN);
+ erase_len = MALLOC_ELEM_OVERHEAD + elem->next->pad;
+
+ /* remove from free list, join to this one */
+ malloc_elem_free_list_remove(elem->next);
+ join_elem(elem, elem->next);
+
+ /* erase header, trailer and pad */
+ memset(erase, 0, erase_len);
+ }
+
+ /*
+ * check if prev element exists, is adjacent and is free, if so join
+ * with it, need to remove from free list.
+ */
+ if (elem->prev != NULL && elem->prev->state == ELEM_FREE &&
+ prev_elem_is_adjacent(elem)) {
+ struct malloc_elem *new_elem;
+ void *erase;
+ size_t erase_len;
+
+ /* we will want to erase trailer and header */
+ erase = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
+ erase_len = MALLOC_ELEM_OVERHEAD + elem->pad;
+
+ /* remove from free list, join to this one */
+ malloc_elem_free_list_remove(elem->prev);
+
+ new_elem = elem->prev;
+ join_elem(new_elem, elem);
+
+ /* erase header, trailer and pad */
+ memset(erase, 0, erase_len);
+
+ elem = new_elem;
+ }
+
+ return elem;
}
/*
@@ -240,43 +513,74 @@ join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
* blocks either immediately before or immediately after newly freed block
* are also free, the blocks are merged together.
*/
-int
+struct malloc_elem *
malloc_elem_free(struct malloc_elem *elem)
{
- if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
- return -1;
+ void *ptr;
+ size_t data_len;
- rte_spinlock_lock(&(elem->heap->lock));
- size_t sz = elem->size - sizeof(*elem) - MALLOC_ELEM_TRAILER_LEN;
- uint8_t *ptr = (uint8_t *)&elem[1];
- struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
- if (next->state == ELEM_FREE){
- /* remove from free list, join to this one */
- elem_free_list_remove(next);
- join_elem(elem, next);
- sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
- }
+ ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
+ data_len = elem->size - MALLOC_ELEM_OVERHEAD;
+
+ elem = malloc_elem_join_adjacent_free(elem);
- /* check if previous element is free, if so join with it and return,
- * need to re-insert in free list, as that element's size is changing
- */
- if (elem->prev != NULL && elem->prev->state == ELEM_FREE) {
- elem_free_list_remove(elem->prev);
- join_elem(elem->prev, elem);
- sz += (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
- ptr -= (sizeof(*elem) + MALLOC_ELEM_TRAILER_LEN);
- elem = elem->prev;
- }
malloc_elem_free_list_insert(elem);
+ elem->pad = 0;
+
/* decrease heap's count of allocated elements */
elem->heap->alloc_count--;
- memset(ptr, 0, sz);
+ memset(ptr, 0, data_len);
- rte_spinlock_unlock(&(elem->heap->lock));
+ return elem;
+}
- return 0;
+/* assume all checks were already done */
+void
+malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
+{
+ struct malloc_elem *hide_start, *hide_end, *prev, *next;
+ size_t len_before, len_after;
+
+ hide_start = start;
+ hide_end = RTE_PTR_ADD(start, len);
+
+ prev = elem->prev;
+ next = elem->next;
+
+ /* we cannot do anything with non-adjacent elements */
+ if (next && next_elem_is_adjacent(elem)) {
+ len_after = RTE_PTR_DIFF(next, hide_end);
+ if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ /* split after */
+ split_elem(elem, hide_end);
+
+ malloc_elem_free_list_insert(hide_end);
+ } else if (len_after > 0) {
+ RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
+ return;
+ }
+ }
+
+ /* we cannot do anything with non-adjacent elements */
+ if (prev && prev_elem_is_adjacent(elem)) {
+ len_before = RTE_PTR_DIFF(hide_start, elem);
+ if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ /* split before */
+ split_elem(elem, hide_start);
+
+ prev = elem;
+ elem = hide_start;
+
+ malloc_elem_free_list_insert(prev);
+ } else if (len_before > 0) {
+ RTE_LOG(ERR, EAL, "Unaligned element, heap is probably corrupt\n");
+ return;
+ }
+ }
+
+ remove_elem(elem);
}
/*
@@ -287,22 +591,23 @@ int
malloc_elem_resize(struct malloc_elem *elem, size_t size)
{
const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
+
/* if we request a smaller size, then always return ok */
if (elem->size >= new_size)
return 0;
- struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
- rte_spinlock_lock(&elem->heap->lock);
- if (next ->state != ELEM_FREE)
- goto err_return;
- if (elem->size + next->size < new_size)
- goto err_return;
+ /* check if there is a next element, it's free and adjacent */
+ if (!elem->next || elem->next->state != ELEM_FREE ||
+ !next_elem_is_adjacent(elem))
+ return -1;
+ if (elem->size + elem->next->size < new_size)
+ return -1;
/* we now know the element fits, so remove from free list,
* join the two
*/
- elem_free_list_remove(next);
- join_elem(elem, next);
+ malloc_elem_free_list_remove(elem->next);
+ join_elem(elem, elem->next);
if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
/* now we have a big block together. Lets cut it down a bit, by splitting */
@@ -311,10 +616,28 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
split_elem(elem, split_pt);
malloc_elem_free_list_insert(split_pt);
}
- rte_spinlock_unlock(&elem->heap->lock);
return 0;
+}
-err_return:
- rte_spinlock_unlock(&elem->heap->lock);
- return -1;
+static inline const char *
+elem_state_to_str(enum elem_state state)
+{
+ switch (state) {
+ case ELEM_PAD:
+ return "PAD";
+ case ELEM_BUSY:
+ return "BUSY";
+ case ELEM_FREE:
+ return "FREE";
+ }
+ return "ERROR";
+}
+
+void
+malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
+{
+ fprintf(f, "Malloc element at %p (%s)\n", elem,
+ elem_state_to_str(elem->state));
+ fprintf(f, " len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
+ fprintf(f, " prev: %p next: %p\n", elem->prev, elem->next);
}
diff --git a/lib/librte_eal/common/malloc_elem.h b/lib/librte_eal/common/malloc_elem.h
index f4c1c7a9..e2bda4c0 100644
--- a/lib/librte_eal/common/malloc_elem.h
+++ b/lib/librte_eal/common/malloc_elem.h
@@ -5,7 +5,11 @@
#ifndef MALLOC_ELEM_H_
#define MALLOC_ELEM_H_
-#include <rte_memory.h>
+#include <stdbool.h>
+
+#include <rte_eal_memconfig.h>
+
+#define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE)
/* dummy definition of struct so we can use pointers to it in malloc_elem struct */
struct malloc_heap;
@@ -18,9 +22,13 @@ enum elem_state {
struct malloc_elem {
struct malloc_heap *heap;
- struct malloc_elem *volatile prev; /* points to prev elem in memseg */
- LIST_ENTRY(malloc_elem) free_list; /* list of free elements in heap */
- const struct rte_memseg *ms;
+ struct malloc_elem *volatile prev;
+ /**< points to prev elem in memseg */
+ struct malloc_elem *volatile next;
+ /**< points to next elem in memseg */
+ LIST_ENTRY(malloc_elem) free_list;
+ /**< list of free elements in heap */
+ struct rte_memseg_list *msl;
volatile enum elem_state state;
uint32_t pad;
size_t size;
@@ -107,15 +115,11 @@ malloc_elem_from_data(const void *data)
void
malloc_elem_init(struct malloc_elem *elem,
struct malloc_heap *heap,
- const struct rte_memseg *ms,
+ struct rte_memseg_list *msl,
size_t size);
-/*
- * initialise a dummy malloc_elem header for the end-of-memseg marker
- */
void
-malloc_elem_mkend(struct malloc_elem *elem,
- struct malloc_elem *prev_free);
+malloc_elem_insert(struct malloc_elem *elem);
/*
* return true if the current malloc_elem can hold a block of data
@@ -123,7 +127,7 @@ malloc_elem_mkend(struct malloc_elem *elem,
*/
int
malloc_elem_can_hold(struct malloc_elem *elem, size_t size,
- unsigned align, size_t bound);
+ unsigned int align, size_t bound, bool contig);
/*
* reserve a block of data in an existing malloc_elem. If the malloc_elem
@@ -131,16 +135,19 @@ malloc_elem_can_hold(struct malloc_elem *elem, size_t size,
*/
struct malloc_elem *
malloc_elem_alloc(struct malloc_elem *elem, size_t size,
- unsigned align, size_t bound);
+ unsigned int align, size_t bound, bool contig);
/*
* free a malloc_elem block by adding it to the free list. If the
* blocks either immediately before or immediately after newly freed block
* are also free, the blocks are merged together.
*/
-int
+struct malloc_elem *
malloc_elem_free(struct malloc_elem *elem);
+struct malloc_elem *
+malloc_elem_join_adjacent_free(struct malloc_elem *elem);
+
/*
* attempt to resize a malloc_elem by expanding into any free space
* immediately after it in memory.
@@ -148,6 +155,18 @@ malloc_elem_free(struct malloc_elem *elem);
int
malloc_elem_resize(struct malloc_elem *elem, size_t size);
+void
+malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len);
+
+void
+malloc_elem_free_list_remove(struct malloc_elem *elem);
+
+/*
+ * dump contents of malloc elem to a file.
+ */
+void
+malloc_elem_dump(const struct malloc_elem *elem, FILE *f);
+
/*
* Given an element size, compute its freelist index.
*/
@@ -160,4 +179,10 @@ malloc_elem_free_list_index(size_t size);
void
malloc_elem_free_list_insert(struct malloc_elem *elem);
+/*
+ * Find biggest IOVA-contiguous zone within an element with specified alignment.
+ */
+size_t
+malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align);
+
#endif /* MALLOC_ELEM_H_ */
diff --git a/lib/librte_eal/common/malloc_heap.c b/lib/librte_eal/common/malloc_heap.c
index 7aafc880..12aaf2d7 100644
--- a/lib/librte_eal/common/malloc_heap.c
+++ b/lib/librte_eal/common/malloc_heap.c
@@ -10,6 +10,7 @@
#include <sys/queue.h>
#include <rte_memory.h>
+#include <rte_errno.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_launch.h>
@@ -20,9 +21,13 @@
#include <rte_spinlock.h>
#include <rte_memcpy.h>
#include <rte_atomic.h>
+#include <rte_fbarray.h>
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
#include "malloc_elem.h"
#include "malloc_heap.h"
+#include "malloc_mp.h"
static unsigned
check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
@@ -62,26 +67,51 @@ check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
}
/*
- * Expand the heap with a memseg.
- * This reserves the zone and sets a dummy malloc_elem header at the end
- * to prevent overflow. The rest of the zone is added to free list as a single
- * large free block
+ * Expand the heap with a memory area.
*/
-static void
-malloc_heap_add_memseg(struct malloc_heap *heap, struct rte_memseg *ms)
+static struct malloc_elem *
+malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
+ void *start, size_t len)
+{
+ struct malloc_elem *elem = start;
+
+ malloc_elem_init(elem, heap, msl, len);
+
+ malloc_elem_insert(elem);
+
+ elem = malloc_elem_join_adjacent_free(elem);
+
+ malloc_elem_free_list_insert(elem);
+
+ return elem;
+}
+
+static int
+malloc_add_seg(const struct rte_memseg_list *msl,
+ const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
{
- /* allocate the memory block headers, one at end, one at start */
- struct malloc_elem *start_elem = (struct malloc_elem *)ms->addr;
- struct malloc_elem *end_elem = RTE_PTR_ADD(ms->addr,
- ms->len - MALLOC_ELEM_OVERHEAD);
- end_elem = RTE_PTR_ALIGN_FLOOR(end_elem, RTE_CACHE_LINE_SIZE);
- const size_t elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *found_msl;
+ struct malloc_heap *heap;
+ int msl_idx;
+
+ heap = &mcfg->malloc_heaps[msl->socket_id];
+
+ /* msl is const, so find it */
+ msl_idx = msl - mcfg->memsegs;
+
+ if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
+ return -1;
- malloc_elem_init(start_elem, heap, ms, elem_size);
- malloc_elem_mkend(end_elem, start_elem);
- malloc_elem_free_list_insert(start_elem);
+ found_msl = &mcfg->memsegs[msl_idx];
- heap->total_size += elem_size;
+ malloc_heap_add_memory(heap, found_msl, ms->addr, len);
+
+ heap->total_size += len;
+
+ RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
+ msl->socket_id);
+ return 0;
}
/*
@@ -92,7 +122,7 @@ malloc_heap_add_memseg(struct malloc_heap *heap, struct rte_memseg *ms)
*/
static struct malloc_elem *
find_suitable_element(struct malloc_heap *heap, size_t size,
- unsigned flags, size_t align, size_t bound)
+ unsigned int flags, size_t align, size_t bound, bool contig)
{
size_t idx;
struct malloc_elem *elem, *alt_elem = NULL;
@@ -101,8 +131,10 @@ find_suitable_element(struct malloc_heap *heap, size_t size,
idx < RTE_HEAP_NUM_FREELISTS; idx++) {
for (elem = LIST_FIRST(&heap->free_head[idx]);
!!elem; elem = LIST_NEXT(elem, free_list)) {
- if (malloc_elem_can_hold(elem, size, align, bound)) {
- if (check_hugepage_sz(flags, elem->ms->hugepage_sz))
+ if (malloc_elem_can_hold(elem, size, align, bound,
+ contig)) {
+ if (check_hugepage_sz(flags,
+ elem->msl->page_sz))
return elem;
if (alt_elem == NULL)
alt_elem = elem;
@@ -117,34 +149,770 @@ find_suitable_element(struct malloc_heap *heap, size_t size,
}
/*
+ * Iterates through the freelist for a heap to find a free element with the
+ * biggest size and requested alignment. Will also set size to whatever element
+ * size that was found.
+ * Returns null on failure, or pointer to element on success.
+ */
+static struct malloc_elem *
+find_biggest_element(struct malloc_heap *heap, size_t *size,
+ unsigned int flags, size_t align, bool contig)
+{
+ struct malloc_elem *elem, *max_elem = NULL;
+ size_t idx, max_size = 0;
+
+ for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
+ for (elem = LIST_FIRST(&heap->free_head[idx]);
+ !!elem; elem = LIST_NEXT(elem, free_list)) {
+ size_t cur_size;
+ if (!check_hugepage_sz(flags, elem->msl->page_sz))
+ continue;
+ if (contig) {
+ cur_size =
+ malloc_elem_find_max_iova_contig(elem,
+ align);
+ } else {
+ void *data_start = RTE_PTR_ADD(elem,
+ MALLOC_ELEM_HEADER_LEN);
+ void *data_end = RTE_PTR_ADD(elem, elem->size -
+ MALLOC_ELEM_TRAILER_LEN);
+ void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
+ align);
+ /* check if aligned data start is beyond end */
+ if (aligned >= data_end)
+ continue;
+ cur_size = RTE_PTR_DIFF(data_end, aligned);
+ }
+ if (cur_size > max_size) {
+ max_size = cur_size;
+ max_elem = elem;
+ }
+ }
+ }
+
+ *size = max_size;
+ return max_elem;
+}
+
+/*
* Main function to allocate a block of memory from the heap.
* It locks the free list, scans it, and adds a new memseg if the
* scan fails. Once the new memseg is added, it re-scans and should return
* the new element after releasing the lock.
*/
-void *
-malloc_heap_alloc(struct malloc_heap *heap,
- const char *type __attribute__((unused)), size_t size, unsigned flags,
- size_t align, size_t bound)
+static void *
+heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
+ unsigned int flags, size_t align, size_t bound, bool contig)
{
struct malloc_elem *elem;
size = RTE_CACHE_LINE_ROUNDUP(size);
align = RTE_CACHE_LINE_ROUNDUP(align);
- rte_spinlock_lock(&heap->lock);
+ elem = find_suitable_element(heap, size, flags, align, bound, contig);
+ if (elem != NULL) {
+ elem = malloc_elem_alloc(elem, size, align, bound, contig);
+
+ /* increase heap's count of allocated elements */
+ heap->alloc_count++;
+ }
- elem = find_suitable_element(heap, size, flags, align, bound);
+ return elem == NULL ? NULL : (void *)(&elem[1]);
+}
+
+static void *
+heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
+ unsigned int flags, size_t align, bool contig)
+{
+ struct malloc_elem *elem;
+ size_t size;
+
+ align = RTE_CACHE_LINE_ROUNDUP(align);
+
+ elem = find_biggest_element(heap, &size, flags, align, contig);
if (elem != NULL) {
- elem = malloc_elem_alloc(elem, size, align, bound);
+ elem = malloc_elem_alloc(elem, size, align, 0, contig);
+
/* increase heap's count of allocated elements */
heap->alloc_count++;
}
- rte_spinlock_unlock(&heap->lock);
return elem == NULL ? NULL : (void *)(&elem[1]);
}
+/* this function is exposed in malloc_mp.h */
+void
+rollback_expand_heap(struct rte_memseg **ms, int n_segs,
+ struct malloc_elem *elem, void *map_addr, size_t map_len)
+{
+ if (elem != NULL) {
+ malloc_elem_free_list_remove(elem);
+ malloc_elem_hide_region(elem, map_addr, map_len);
+ }
+
+ eal_memalloc_free_seg_bulk(ms, n_segs);
+}
+
+/* this function is exposed in malloc_mp.h */
+struct malloc_elem *
+alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
+ int socket, unsigned int flags, size_t align, size_t bound,
+ bool contig, struct rte_memseg **ms, int n_segs)
+{
+ struct rte_memseg_list *msl;
+ struct malloc_elem *elem = NULL;
+ size_t alloc_sz;
+ int allocd_pages;
+ void *ret, *map_addr;
+
+ alloc_sz = (size_t)pg_sz * n_segs;
+
+ /* first, check if we're allowed to allocate this memory */
+ if (eal_memalloc_mem_alloc_validate(socket,
+ heap->total_size + alloc_sz) < 0) {
+ RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
+ return NULL;
+ }
+
+ allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
+ socket, true);
+
+ /* make sure we've allocated our pages... */
+ if (allocd_pages < 0)
+ return NULL;
+
+ map_addr = ms[0]->addr;
+ msl = rte_mem_virt2memseg_list(map_addr);
+
+ /* check if we wanted contiguous memory but didn't get it */
+ if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
+ RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
+ __func__);
+ goto fail;
+ }
+
+ /* add newly minted memsegs to malloc heap */
+ elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz);
+
+ /* try once more, as now we have allocated new memory */
+ ret = find_suitable_element(heap, elt_size, flags, align, bound,
+ contig);
+
+ if (ret == NULL)
+ goto fail;
+
+ return elem;
+
+fail:
+ rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
+ return NULL;
+}
+
+static int
+try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
+ size_t elt_size, int socket, unsigned int flags, size_t align,
+ size_t bound, bool contig)
+{
+ struct malloc_elem *elem;
+ struct rte_memseg **ms;
+ void *map_addr;
+ size_t alloc_sz;
+ int n_segs;
+ bool callback_triggered = false;
+
+ alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
+ MALLOC_ELEM_TRAILER_LEN, pg_sz);
+ n_segs = alloc_sz / pg_sz;
+
+ /* we can't know in advance how many pages we'll need, so we malloc */
+ ms = malloc(sizeof(*ms) * n_segs);
+
+ memset(ms, 0, sizeof(*ms) * n_segs);
+
+ if (ms == NULL)
+ return -1;
+
+ elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
+ bound, contig, ms, n_segs);
+
+ if (elem == NULL)
+ goto free_ms;
+
+ map_addr = ms[0]->addr;
+
+ /* notify user about changes in memory map */
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
+
+ /* notify other processes that this has happened */
+ if (request_sync()) {
+ /* we couldn't ensure all processes have mapped memory,
+ * so free it back and notify everyone that it's been
+ * freed back.
+ *
+ * technically, we could've avoided adding memory addresses to
+ * the map, but that would've led to inconsistent behavior
+ * between primary and secondary processes, as those get
+ * callbacks during sync. therefore, force primary process to
+ * do alloc-and-rollback syncs as well.
+ */
+ callback_triggered = true;
+ goto free_elem;
+ }
+ heap->total_size += alloc_sz;
+
+ RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
+ socket, alloc_sz >> 20ULL);
+
+ free(ms);
+
+ return 0;
+
+free_elem:
+ if (callback_triggered)
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ map_addr, alloc_sz);
+
+ rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
+
+ request_sync();
+free_ms:
+ free(ms);
+
+ return -1;
+}
+
+static int
+try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
+ size_t elt_size, int socket, unsigned int flags, size_t align,
+ size_t bound, bool contig)
+{
+ struct malloc_mp_req req;
+ int req_result;
+
+ memset(&req, 0, sizeof(req));
+
+ req.t = REQ_TYPE_ALLOC;
+ req.alloc_req.align = align;
+ req.alloc_req.bound = bound;
+ req.alloc_req.contig = contig;
+ req.alloc_req.flags = flags;
+ req.alloc_req.elt_size = elt_size;
+ req.alloc_req.page_sz = pg_sz;
+ req.alloc_req.socket = socket;
+ req.alloc_req.heap = heap; /* it's in shared memory */
+
+ req_result = request_to_primary(&req);
+
+ if (req_result != 0)
+ return -1;
+
+ if (req.result != REQ_RESULT_SUCCESS)
+ return -1;
+
+ return 0;
+}
+
+static int
+try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
+ int socket, unsigned int flags, size_t align, size_t bound,
+ bool contig)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int ret;
+
+ rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
+ flags, align, bound, contig);
+ } else {
+ ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
+ flags, align, bound, contig);
+ }
+
+ rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+ return ret;
+}
+
+static int
+compare_pagesz(const void *a, const void *b)
+{
+ const struct rte_memseg_list * const*mpa = a;
+ const struct rte_memseg_list * const*mpb = b;
+ const struct rte_memseg_list *msla = *mpa;
+ const struct rte_memseg_list *mslb = *mpb;
+ uint64_t pg_sz_a = msla->page_sz;
+ uint64_t pg_sz_b = mslb->page_sz;
+
+ if (pg_sz_a < pg_sz_b)
+ return -1;
+ if (pg_sz_a > pg_sz_b)
+ return 1;
+ return 0;
+}
+
+static int
+alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
+ unsigned int flags, size_t align, size_t bound, bool contig)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
+ struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
+ uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
+ uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
+ uint64_t prev_pg_sz;
+ int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
+ bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
+ unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+ void *ret;
+
+ memset(requested_msls, 0, sizeof(requested_msls));
+ memset(other_msls, 0, sizeof(other_msls));
+ memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
+ memset(other_pg_sz, 0, sizeof(other_pg_sz));
+
+ /*
+ * go through memseg list and take note of all the page sizes available,
+ * and if any of them were specifically requested by the user.
+ */
+ n_requested_msls = 0;
+ n_other_msls = 0;
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+
+ if (msl->socket_id != socket)
+ continue;
+
+ if (msl->base_va == NULL)
+ continue;
+
+ /* if pages of specific size were requested */
+ if (size_flags != 0 && check_hugepage_sz(size_flags,
+ msl->page_sz))
+ requested_msls[n_requested_msls++] = msl;
+ else if (size_flags == 0 || size_hint)
+ other_msls[n_other_msls++] = msl;
+ }
+
+ /* sort the lists, smallest first */
+ qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
+ compare_pagesz);
+ qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
+ compare_pagesz);
+
+ /* now, extract page sizes we are supposed to try */
+ prev_pg_sz = 0;
+ n_requested_pg_sz = 0;
+ for (i = 0; i < n_requested_msls; i++) {
+ uint64_t pg_sz = requested_msls[i]->page_sz;
+
+ if (prev_pg_sz != pg_sz) {
+ requested_pg_sz[n_requested_pg_sz++] = pg_sz;
+ prev_pg_sz = pg_sz;
+ }
+ }
+ prev_pg_sz = 0;
+ n_other_pg_sz = 0;
+ for (i = 0; i < n_other_msls; i++) {
+ uint64_t pg_sz = other_msls[i]->page_sz;
+
+ if (prev_pg_sz != pg_sz) {
+ other_pg_sz[n_other_pg_sz++] = pg_sz;
+ prev_pg_sz = pg_sz;
+ }
+ }
+
+ /* finally, try allocating memory of specified page sizes, starting from
+ * the smallest sizes
+ */
+ for (i = 0; i < n_requested_pg_sz; i++) {
+ uint64_t pg_sz = requested_pg_sz[i];
+
+ /*
+ * do not pass the size hint here, as user expects other page
+ * sizes first, before resorting to best effort allocation.
+ */
+ if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
+ align, bound, contig))
+ return 0;
+ }
+ if (n_other_pg_sz == 0)
+ return -1;
+
+ /* now, check if we can reserve anything with size hint */
+ ret = find_suitable_element(heap, size, flags, align, bound, contig);
+ if (ret != NULL)
+ return 0;
+
+ /*
+ * we still couldn't reserve memory, so try expanding heap with other
+ * page sizes, if there are any
+ */
+ for (i = 0; i < n_other_pg_sz; i++) {
+ uint64_t pg_sz = other_pg_sz[i];
+
+ if (!try_expand_heap(heap, pg_sz, size, socket, flags,
+ align, bound, contig))
+ return 0;
+ }
+ return -1;
+}
+
+/* this will try lower page sizes first */
+static void *
+heap_alloc_on_socket(const char *type, size_t size, int socket,
+ unsigned int flags, size_t align, size_t bound, bool contig)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+ unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
+ void *ret;
+
+ rte_spinlock_lock(&(heap->lock));
+
+ align = align == 0 ? 1 : align;
+
+ /* for legacy mode, try once and with all flags */
+ if (internal_config.legacy_mem) {
+ ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+ goto alloc_unlock;
+ }
+
+ /*
+ * we do not pass the size hint here, because even if allocation fails,
+ * we may still be able to allocate memory from appropriate page sizes,
+ * we just need to request more memory first.
+ */
+ ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
+ if (ret != NULL)
+ goto alloc_unlock;
+
+ if (!alloc_more_mem_on_socket(heap, size, socket, flags, align, bound,
+ contig)) {
+ ret = heap_alloc(heap, type, size, flags, align, bound, contig);
+
+ /* this should have succeeded */
+ if (ret == NULL)
+ RTE_LOG(ERR, EAL, "Error allocating from heap\n");
+ }
+alloc_unlock:
+ rte_spinlock_unlock(&(heap->lock));
+ return ret;
+}
+
+void *
+malloc_heap_alloc(const char *type, size_t size, int socket_arg,
+ unsigned int flags, size_t align, size_t bound, bool contig)
+{
+ int socket, i, cur_socket;
+ void *ret;
+
+ /* return NULL if size is 0 or alignment is not power-of-2 */
+ if (size == 0 || (align && !rte_is_power_of_2(align)))
+ return NULL;
+
+ if (!rte_eal_has_hugepages())
+ socket_arg = SOCKET_ID_ANY;
+
+ if (socket_arg == SOCKET_ID_ANY)
+ socket = malloc_get_numa_socket();
+ else
+ socket = socket_arg;
+
+ /* Check socket parameter */
+ if (socket >= RTE_MAX_NUMA_NODES)
+ return NULL;
+
+ ret = heap_alloc_on_socket(type, size, socket, flags, align, bound,
+ contig);
+ if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+ return ret;
+
+ /* try other heaps */
+ for (i = 0; i < (int) rte_socket_count(); i++) {
+ cur_socket = rte_socket_id_by_idx(i);
+ if (cur_socket == socket)
+ continue;
+ ret = heap_alloc_on_socket(type, size, cur_socket, flags,
+ align, bound, contig);
+ if (ret != NULL)
+ return ret;
+ }
+ return NULL;
+}
+
+static void *
+heap_alloc_biggest_on_socket(const char *type, int socket, unsigned int flags,
+ size_t align, bool contig)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct malloc_heap *heap = &mcfg->malloc_heaps[socket];
+ void *ret;
+
+ rte_spinlock_lock(&(heap->lock));
+
+ align = align == 0 ? 1 : align;
+
+ ret = heap_alloc_biggest(heap, type, flags, align, contig);
+
+ rte_spinlock_unlock(&(heap->lock));
+
+ return ret;
+}
+
+void *
+malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
+ size_t align, bool contig)
+{
+ int socket, i, cur_socket;
+ void *ret;
+
+ /* return NULL if align is not power-of-2 */
+ if ((align && !rte_is_power_of_2(align)))
+ return NULL;
+
+ if (!rte_eal_has_hugepages())
+ socket_arg = SOCKET_ID_ANY;
+
+ if (socket_arg == SOCKET_ID_ANY)
+ socket = malloc_get_numa_socket();
+ else
+ socket = socket_arg;
+
+ /* Check socket parameter */
+ if (socket >= RTE_MAX_NUMA_NODES)
+ return NULL;
+
+ ret = heap_alloc_biggest_on_socket(type, socket, flags, align,
+ contig);
+ if (ret != NULL || socket_arg != SOCKET_ID_ANY)
+ return ret;
+
+ /* try other heaps */
+ for (i = 0; i < (int) rte_socket_count(); i++) {
+ cur_socket = rte_socket_id_by_idx(i);
+ if (cur_socket == socket)
+ continue;
+ ret = heap_alloc_biggest_on_socket(type, cur_socket, flags,
+ align, contig);
+ if (ret != NULL)
+ return ret;
+ }
+ return NULL;
+}
+
+/* this function is exposed in malloc_mp.h */
+int
+malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
+{
+ int n_segs, seg_idx, max_seg_idx;
+ struct rte_memseg_list *msl;
+ size_t page_sz;
+
+ msl = rte_mem_virt2memseg_list(aligned_start);
+ if (msl == NULL)
+ return -1;
+
+ page_sz = (size_t)msl->page_sz;
+ n_segs = aligned_len / page_sz;
+ seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
+ max_seg_idx = seg_idx + n_segs;
+
+ for (; seg_idx < max_seg_idx; seg_idx++) {
+ struct rte_memseg *ms;
+
+ ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
+ eal_memalloc_free_seg(ms);
+ }
+ return 0;
+}
+
+int
+malloc_heap_free(struct malloc_elem *elem)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct malloc_heap *heap;
+ void *start, *aligned_start, *end, *aligned_end;
+ size_t len, aligned_len, page_sz;
+ struct rte_memseg_list *msl;
+ unsigned int i, n_segs, before_space, after_space;
+ int ret;
+
+ if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+ return -1;
+
+ /* elem may be merged with previous element, so keep heap address */
+ heap = elem->heap;
+ msl = elem->msl;
+ page_sz = (size_t)msl->page_sz;
+
+ rte_spinlock_lock(&(heap->lock));
+
+ /* mark element as free */
+ elem->state = ELEM_FREE;
+
+ elem = malloc_elem_free(elem);
+
+ /* anything after this is a bonus */
+ ret = 0;
+
+ /* ...of which we can't avail if we are in legacy mode */
+ if (internal_config.legacy_mem)
+ goto free_unlock;
+
+ /* check if we can free any memory back to the system */
+ if (elem->size < page_sz)
+ goto free_unlock;
+
+ /* probably, but let's make sure, as we may not be using up full page */
+ start = elem;
+ len = elem->size;
+ aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
+ end = RTE_PTR_ADD(elem, len);
+ aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
+
+ aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
+
+ /* can't free anything */
+ if (aligned_len < page_sz)
+ goto free_unlock;
+
+ /* we can free something. however, some of these pages may be marked as
+ * unfreeable, so also check that as well
+ */
+ n_segs = aligned_len / page_sz;
+ for (i = 0; i < n_segs; i++) {
+ const struct rte_memseg *tmp =
+ rte_mem_virt2memseg(aligned_start, msl);
+
+ if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+ /* this is an unfreeable segment, so move start */
+ aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
+ }
+ }
+
+ /* recalculate length and number of segments */
+ aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
+ n_segs = aligned_len / page_sz;
+
+ /* check if we can still free some pages */
+ if (n_segs == 0)
+ goto free_unlock;
+
+ /* We're not done yet. We also have to check if by freeing space we will
+ * be leaving free elements that are too small to store new elements.
+ * Check if we have enough space in the beginning and at the end, or if
+ * start/end are exactly page aligned.
+ */
+ before_space = RTE_PTR_DIFF(aligned_start, elem);
+ after_space = RTE_PTR_DIFF(end, aligned_end);
+ if (before_space != 0 &&
+ before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ /* There is not enough space before start, but we may be able to
+ * move the start forward by one page.
+ */
+ if (n_segs == 1)
+ goto free_unlock;
+
+ /* move start */
+ aligned_start = RTE_PTR_ADD(aligned_start, page_sz);
+ aligned_len -= page_sz;
+ n_segs--;
+ }
+ if (after_space != 0 && after_space <
+ MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
+ /* There is not enough space after end, but we may be able to
+ * move the end backwards by one page.
+ */
+ if (n_segs == 1)
+ goto free_unlock;
+
+ /* move end */
+ aligned_end = RTE_PTR_SUB(aligned_end, page_sz);
+ aligned_len -= page_sz;
+ n_segs--;
+ }
+
+ /* now we can finally free us some pages */
+
+ rte_rwlock_write_lock(&mcfg->memory_hotplug_lock);
+
+ /*
+ * we allow secondary processes to clear the heap of this allocated
+ * memory because it is safe to do so, as even if notifications about
+ * unmapped pages don't make it to other processes, heap is shared
+ * across all processes, and will become empty of this memory anyway,
+ * and nothing can allocate it back unless primary process will be able
+ * to deliver allocation message to every single running process.
+ */
+
+ malloc_elem_free_list_remove(elem);
+
+ malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
+
+ heap->total_size -= aligned_len;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* notify user about changes in memory map */
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ aligned_start, aligned_len);
+
+ /* don't care if any of this fails */
+ malloc_heap_free_pages(aligned_start, aligned_len);
+
+ request_sync();
+ } else {
+ struct malloc_mp_req req;
+
+ memset(&req, 0, sizeof(req));
+
+ req.t = REQ_TYPE_FREE;
+ req.free_req.addr = aligned_start;
+ req.free_req.len = aligned_len;
+
+ /*
+ * we request primary to deallocate pages, but we don't do it
+ * in this thread. instead, we notify primary that we would like
+ * to deallocate pages, and this process will receive another
+ * request (in parallel) that will do it for us on another
+ * thread.
+ *
+ * we also don't really care if this succeeds - the data is
+ * already removed from the heap, so it is, for all intents and
+ * purposes, hidden from the rest of DPDK even if some other
+ * process (including this one) may have these pages mapped.
+ *
+ * notifications about deallocated memory happen during sync.
+ */
+ request_to_primary(&req);
+ }
+
+ RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
+ msl->socket_id, aligned_len >> 20ULL);
+
+ rte_rwlock_write_unlock(&mcfg->memory_hotplug_lock);
+free_unlock:
+ rte_spinlock_unlock(&(heap->lock));
+ return ret;
+}
+
+int
+malloc_heap_resize(struct malloc_elem *elem, size_t size)
+{
+ int ret;
+
+ if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+ return -1;
+
+ rte_spinlock_lock(&(elem->heap->lock));
+
+ ret = malloc_elem_resize(elem, size);
+
+ rte_spinlock_unlock(&(elem->heap->lock));
+
+ return ret;
+}
+
/*
* Function to retrieve data for heap on given socket
*/
@@ -183,21 +951,49 @@ malloc_heap_get_stats(struct malloc_heap *heap,
return 0;
}
+/*
+ * Function to retrieve data for heap on given socket
+ */
+void
+malloc_heap_dump(struct malloc_heap *heap, FILE *f)
+{
+ struct malloc_elem *elem;
+
+ rte_spinlock_lock(&heap->lock);
+
+ fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
+ fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
+
+ elem = heap->first;
+ while (elem) {
+ malloc_elem_dump(elem, f);
+ elem = elem->next;
+ }
+
+ rte_spinlock_unlock(&heap->lock);
+}
+
int
rte_eal_malloc_heap_init(void)
{
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- unsigned ms_cnt;
- struct rte_memseg *ms;
- if (mcfg == NULL)
+ if (register_mp_requests()) {
+ RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
return -1;
-
- for (ms = &mcfg->memseg[0], ms_cnt = 0;
- (ms_cnt < RTE_MAX_MEMSEG) && (ms->len > 0);
- ms_cnt++, ms++) {
- malloc_heap_add_memseg(&mcfg->malloc_heaps[ms->socket_id], ms);
}
- return 0;
+ /* unlock mem hotplug here. it's safe for primary as no requests can
+ * even come before primary itself is fully initialized, and secondaries
+ * do not need to initialize the heap.
+ */
+ rte_rwlock_read_unlock(&mcfg->memory_hotplug_lock);
+
+ /* secondary process does not need to initialize anything */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* add all IOVA-contiguous areas to the heap */
+ return rte_memseg_contig_walk(malloc_add_seg, NULL);
}
diff --git a/lib/librte_eal/common/malloc_heap.h b/lib/librte_eal/common/malloc_heap.h
index e0defa70..f52cb555 100644
--- a/lib/librte_eal/common/malloc_heap.h
+++ b/lib/librte_eal/common/malloc_heap.h
@@ -5,6 +5,8 @@
#ifndef MALLOC_HEAP_H_
#define MALLOC_HEAP_H_
+#include <stdbool.h>
+
#include <rte_malloc.h>
#include <rte_malloc_heap.h>
@@ -24,13 +26,26 @@ malloc_get_numa_socket(void)
}
void *
-malloc_heap_alloc(struct malloc_heap *heap, const char *type, size_t size,
- unsigned flags, size_t align, size_t bound);
+malloc_heap_alloc(const char *type, size_t size, int socket, unsigned int flags,
+ size_t align, size_t bound, bool contig);
+
+void *
+malloc_heap_alloc_biggest(const char *type, int socket, unsigned int flags,
+ size_t align, bool contig);
+
+int
+malloc_heap_free(struct malloc_elem *elem);
+
+int
+malloc_heap_resize(struct malloc_elem *elem, size_t size);
int
malloc_heap_get_stats(struct malloc_heap *heap,
struct rte_malloc_socket_stats *socket_stats);
+void
+malloc_heap_dump(struct malloc_heap *heap, FILE *f);
+
int
rte_eal_malloc_heap_init(void);
diff --git a/lib/librte_eal/common/malloc_mp.c b/lib/librte_eal/common/malloc_mp.c
new file mode 100644
index 00000000..931c14bc
--- /dev/null
+++ b/lib/librte_eal/common/malloc_mp.c
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <sys/time.h>
+
+#include <rte_alarm.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "eal_memalloc.h"
+
+#include "malloc_elem.h"
+#include "malloc_mp.h"
+
+#define MP_ACTION_SYNC "mp_malloc_sync"
+/**< request sent by primary process to notify of changes in memory map */
+#define MP_ACTION_ROLLBACK "mp_malloc_rollback"
+/**< request sent by primary process to notify of changes in memory map. this is
+ * essentially a regular sync request, but we cannot send sync requests while
+ * another one is in progress, and we might have to - therefore, we do this as
+ * a separate callback.
+ */
+#define MP_ACTION_REQUEST "mp_malloc_request"
+/**< request sent by secondary process to ask for allocation/deallocation */
+#define MP_ACTION_RESPONSE "mp_malloc_response"
+/**< response sent to secondary process to indicate result of request */
+
+/* forward declarations */
+static int
+handle_sync_response(const struct rte_mp_msg *request,
+ const struct rte_mp_reply *reply);
+static int
+handle_rollback_response(const struct rte_mp_msg *request,
+ const struct rte_mp_reply *reply);
+
+#define MP_TIMEOUT_S 5 /**< 5 seconds timeouts */
+
+/* when we're allocating, we need to store some state to ensure that we can
+ * roll back later
+ */
+struct primary_alloc_req_state {
+ struct malloc_heap *heap;
+ struct rte_memseg **ms;
+ int ms_len;
+ struct malloc_elem *elem;
+ void *map_addr;
+ size_t map_len;
+};
+
+enum req_state {
+ REQ_STATE_INACTIVE = 0,
+ REQ_STATE_ACTIVE,
+ REQ_STATE_COMPLETE
+};
+
+struct mp_request {
+ TAILQ_ENTRY(mp_request) next;
+ struct malloc_mp_req user_req; /**< contents of request */
+ pthread_cond_t cond; /**< variable we use to time out on this request */
+ enum req_state state; /**< indicate status of this request */
+ struct primary_alloc_req_state alloc_state;
+};
+
+/*
+ * We could've used just a single request, but it may be possible for
+ * secondaries to timeout earlier than the primary, and send a new request while
+ * primary is still expecting replies to the old one. Therefore, each new
+ * request will get assigned a new ID, which is how we will distinguish between
+ * expected and unexpected messages.
+ */
+TAILQ_HEAD(mp_request_list, mp_request);
+static struct {
+ struct mp_request_list list;
+ pthread_mutex_t lock;
+} mp_request_list = {
+ .list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),
+ .lock = PTHREAD_MUTEX_INITIALIZER
+};
+
+/**
+ * General workflow is the following:
+ *
+ * Allocation:
+ * S: send request to primary
+ * P: attempt to allocate memory
+ * if failed, sendmsg failure
+ * if success, send sync request
+ * S: if received msg of failure, quit
+ * if received sync request, synchronize memory map and reply with result
+ * P: if received sync request result
+ * if success, sendmsg success
+ * if failure, roll back allocation and send a rollback request
+ * S: if received msg of success, quit
+ * if received rollback request, synchronize memory map and reply with result
+ * P: if received sync request result
+ * sendmsg sync request result
+ * S: if received msg, quit
+ *
+ * Aside from timeouts, there are three points where we can quit:
+ * - if allocation failed straight away
+ * - if allocation and sync request succeeded
+ * - if allocation succeeded, sync request failed, allocation rolled back and
+ * rollback request received (irrespective of whether it succeeded or failed)
+ *
+ * Deallocation:
+ * S: send request to primary
+ * P: attempt to deallocate memory
+ * if failed, sendmsg failure
+ * if success, send sync request
+ * S: if received msg of failure, quit
+ * if received sync request, synchronize memory map and reply with result
+ * P: if received sync request result
+ * sendmsg sync request result
+ * S: if received msg, quit
+ *
+ * There is no "rollback" from deallocation, as it's safe to have some memory
+ * mapped in some processes - it's absent from the heap, so it won't get used.
+ */
+
+static struct mp_request *
+find_request_by_id(uint64_t id)
+{
+ struct mp_request *req;
+ TAILQ_FOREACH(req, &mp_request_list.list, next) {
+ if (req->user_req.id == id)
+ break;
+ }
+ return req;
+}
+
+/* this ID is, like, totally guaranteed to be absolutely unique. pinky swear. */
+static uint64_t
+get_unique_id(void)
+{
+ uint64_t id;
+ do {
+ id = rte_rand();
+ } while (find_request_by_id(id) != NULL);
+ return id;
+}
+
+/* secondary will respond to sync requests thusly */
+static int
+handle_sync(const struct rte_mp_msg *msg, const void *peer)
+{
+ struct rte_mp_msg reply;
+ const struct malloc_mp_req *req =
+ (const struct malloc_mp_req *)msg->param;
+ struct malloc_mp_req *resp =
+ (struct malloc_mp_req *)reply.param;
+ int ret;
+
+ if (req->t != REQ_TYPE_SYNC) {
+ RTE_LOG(ERR, EAL, "Unexpected request from primary\n");
+ return -1;
+ }
+
+ memset(&reply, 0, sizeof(reply));
+
+ reply.num_fds = 0;
+ strlcpy(reply.name, msg->name, sizeof(reply.name));
+ reply.len_param = sizeof(*resp);
+
+ ret = eal_memalloc_sync_with_primary();
+
+ resp->t = REQ_TYPE_SYNC;
+ resp->id = req->id;
+ resp->result = ret == 0 ? REQ_RESULT_SUCCESS : REQ_RESULT_FAIL;
+
+ rte_mp_reply(&reply, peer);
+
+ return 0;
+}
+
+static int
+handle_alloc_request(const struct malloc_mp_req *m,
+ struct mp_request *req)
+{
+ const struct malloc_req_alloc *ar = &m->alloc_req;
+ struct malloc_heap *heap;
+ struct malloc_elem *elem;
+ struct rte_memseg **ms;
+ size_t alloc_sz;
+ int n_segs;
+ void *map_addr;
+
+ alloc_sz = RTE_ALIGN_CEIL(ar->align + ar->elt_size +
+ MALLOC_ELEM_TRAILER_LEN, ar->page_sz);
+ n_segs = alloc_sz / ar->page_sz;
+
+ heap = ar->heap;
+
+ /* we can't know in advance how many pages we'll need, so we malloc */
+ ms = malloc(sizeof(*ms) * n_segs);
+
+ memset(ms, 0, sizeof(*ms) * n_segs);
+
+ if (ms == NULL) {
+ RTE_LOG(ERR, EAL, "Couldn't allocate memory for request state\n");
+ goto fail;
+ }
+
+ elem = alloc_pages_on_heap(heap, ar->page_sz, ar->elt_size, ar->socket,
+ ar->flags, ar->align, ar->bound, ar->contig, ms,
+ n_segs);
+
+ if (elem == NULL)
+ goto fail;
+
+ map_addr = ms[0]->addr;
+
+ /* we have succeeded in allocating memory, but we still need to sync
+ * with other processes. however, since DPDK IPC is single-threaded, we
+ * send an asynchronous request and exit this callback.
+ */
+
+ req->alloc_state.ms = ms;
+ req->alloc_state.ms_len = n_segs;
+ req->alloc_state.map_addr = map_addr;
+ req->alloc_state.map_len = alloc_sz;
+ req->alloc_state.elem = elem;
+ req->alloc_state.heap = heap;
+
+ return 0;
+fail:
+ free(ms);
+ return -1;
+}
+
+/* first stage of primary handling requests from secondary */
+static int
+handle_request(const struct rte_mp_msg *msg, const void *peer __rte_unused)
+{
+ const struct malloc_mp_req *m =
+ (const struct malloc_mp_req *)msg->param;
+ struct mp_request *entry;
+ int ret;
+
+ /* lock access to request */
+ pthread_mutex_lock(&mp_request_list.lock);
+
+ /* make sure it's not a dupe */
+ entry = find_request_by_id(m->id);
+ if (entry != NULL) {
+ RTE_LOG(ERR, EAL, "Duplicate request id\n");
+ goto fail;
+ }
+
+ entry = malloc(sizeof(*entry));
+ if (entry == NULL) {
+ RTE_LOG(ERR, EAL, "Unable to allocate memory for request\n");
+ goto fail;
+ }
+
+ /* erase all data */
+ memset(entry, 0, sizeof(*entry));
+
+ if (m->t == REQ_TYPE_ALLOC) {
+ ret = handle_alloc_request(m, entry);
+ } else if (m->t == REQ_TYPE_FREE) {
+ ret = malloc_heap_free_pages(m->free_req.addr,
+ m->free_req.len);
+ } else {
+ RTE_LOG(ERR, EAL, "Unexpected request from secondary\n");
+ goto fail;
+ }
+
+ if (ret != 0) {
+ struct rte_mp_msg resp_msg;
+ struct malloc_mp_req *resp =
+ (struct malloc_mp_req *)resp_msg.param;
+
+ /* send failure message straight away */
+ resp_msg.num_fds = 0;
+ resp_msg.len_param = sizeof(*resp);
+ strlcpy(resp_msg.name, MP_ACTION_RESPONSE,
+ sizeof(resp_msg.name));
+
+ resp->t = m->t;
+ resp->result = REQ_RESULT_FAIL;
+ resp->id = m->id;
+
+ if (rte_mp_sendmsg(&resp_msg)) {
+ RTE_LOG(ERR, EAL, "Couldn't send response\n");
+ goto fail;
+ }
+ /* we did not modify the request */
+ free(entry);
+ } else {
+ struct rte_mp_msg sr_msg;
+ struct malloc_mp_req *sr =
+ (struct malloc_mp_req *)sr_msg.param;
+ struct timespec ts;
+
+ memset(&sr_msg, 0, sizeof(sr_msg));
+
+ /* we can do something, so send sync request asynchronously */
+ sr_msg.num_fds = 0;
+ sr_msg.len_param = sizeof(*sr);
+ strlcpy(sr_msg.name, MP_ACTION_SYNC, sizeof(sr_msg.name));
+
+ ts.tv_nsec = 0;
+ ts.tv_sec = MP_TIMEOUT_S;
+
+ /* sync requests carry no data */
+ sr->t = REQ_TYPE_SYNC;
+ sr->id = m->id;
+
+ /* there may be stray timeout still waiting */
+ do {
+ ret = rte_mp_request_async(&sr_msg, &ts,
+ handle_sync_response);
+ } while (ret != 0 && rte_errno == EEXIST);
+ if (ret != 0) {
+ RTE_LOG(ERR, EAL, "Couldn't send sync request\n");
+ if (m->t == REQ_TYPE_ALLOC)
+ free(entry->alloc_state.ms);
+ goto fail;
+ }
+
+ /* mark request as in progress */
+ memcpy(&entry->user_req, m, sizeof(*m));
+ entry->state = REQ_STATE_ACTIVE;
+
+ TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
+ }
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return 0;
+fail:
+ pthread_mutex_unlock(&mp_request_list.lock);
+ free(entry);
+ return -1;
+}
+
+/* callback for asynchronous sync requests for primary. this will either do a
+ * sendmsg with results, or trigger rollback request.
+ */
+static int
+handle_sync_response(const struct rte_mp_msg *request,
+ const struct rte_mp_reply *reply)
+{
+ enum malloc_req_result result;
+ struct mp_request *entry;
+ const struct malloc_mp_req *mpreq =
+ (const struct malloc_mp_req *)request->param;
+ int i;
+
+ /* lock the request */
+ pthread_mutex_lock(&mp_request_list.lock);
+
+ entry = find_request_by_id(mpreq->id);
+ if (entry == NULL) {
+ RTE_LOG(ERR, EAL, "Wrong request ID\n");
+ goto fail;
+ }
+
+ result = REQ_RESULT_SUCCESS;
+
+ if (reply->nb_received != reply->nb_sent)
+ result = REQ_RESULT_FAIL;
+
+ for (i = 0; i < reply->nb_received; i++) {
+ struct malloc_mp_req *resp =
+ (struct malloc_mp_req *)reply->msgs[i].param;
+
+ if (resp->t != REQ_TYPE_SYNC) {
+ RTE_LOG(ERR, EAL, "Unexpected response to sync request\n");
+ result = REQ_RESULT_FAIL;
+ break;
+ }
+ if (resp->id != entry->user_req.id) {
+ RTE_LOG(ERR, EAL, "Response to wrong sync request\n");
+ result = REQ_RESULT_FAIL;
+ break;
+ }
+ if (resp->result == REQ_RESULT_FAIL) {
+ result = REQ_RESULT_FAIL;
+ break;
+ }
+ }
+
+ if (entry->user_req.t == REQ_TYPE_FREE) {
+ struct rte_mp_msg msg;
+ struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* this is a free request, just sendmsg result */
+ resp->t = REQ_TYPE_FREE;
+ resp->result = result;
+ resp->id = entry->user_req.id;
+ msg.num_fds = 0;
+ msg.len_param = sizeof(*resp);
+ strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+ if (rte_mp_sendmsg(&msg))
+ RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+ TAILQ_REMOVE(&mp_request_list.list, entry, next);
+ free(entry);
+ } else if (entry->user_req.t == REQ_TYPE_ALLOC &&
+ result == REQ_RESULT_SUCCESS) {
+ struct malloc_heap *heap = entry->alloc_state.heap;
+ struct rte_mp_msg msg;
+ struct malloc_mp_req *resp =
+ (struct malloc_mp_req *)msg.param;
+
+ memset(&msg, 0, sizeof(msg));
+
+ heap->total_size += entry->alloc_state.map_len;
+
+ /* result is success, so just notify secondary about this */
+ resp->t = REQ_TYPE_ALLOC;
+ resp->result = result;
+ resp->id = entry->user_req.id;
+ msg.num_fds = 0;
+ msg.len_param = sizeof(*resp);
+ strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+ if (rte_mp_sendmsg(&msg))
+ RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+ TAILQ_REMOVE(&mp_request_list.list, entry, next);
+ free(entry->alloc_state.ms);
+ free(entry);
+ } else if (entry->user_req.t == REQ_TYPE_ALLOC &&
+ result == REQ_RESULT_FAIL) {
+ struct rte_mp_msg rb_msg;
+ struct malloc_mp_req *rb =
+ (struct malloc_mp_req *)rb_msg.param;
+ struct timespec ts;
+ struct primary_alloc_req_state *state =
+ &entry->alloc_state;
+ int ret;
+
+ memset(&rb_msg, 0, sizeof(rb_msg));
+
+ /* we've failed to sync, so do a rollback */
+ rollback_expand_heap(state->ms, state->ms_len, state->elem,
+ state->map_addr, state->map_len);
+
+ /* send rollback request */
+ rb_msg.num_fds = 0;
+ rb_msg.len_param = sizeof(*rb);
+ strlcpy(rb_msg.name, MP_ACTION_ROLLBACK, sizeof(rb_msg.name));
+
+ ts.tv_nsec = 0;
+ ts.tv_sec = MP_TIMEOUT_S;
+
+ /* sync requests carry no data */
+ rb->t = REQ_TYPE_SYNC;
+ rb->id = entry->user_req.id;
+
+ /* there may be stray timeout still waiting */
+ do {
+ ret = rte_mp_request_async(&rb_msg, &ts,
+ handle_rollback_response);
+ } while (ret != 0 && rte_errno == EEXIST);
+ if (ret != 0) {
+ RTE_LOG(ERR, EAL, "Could not send rollback request to secondary process\n");
+
+ /* we couldn't send rollback request, but that's OK -
+ * secondary will time out, and memory has been removed
+ * from heap anyway.
+ */
+ TAILQ_REMOVE(&mp_request_list.list, entry, next);
+ free(state->ms);
+ free(entry);
+ goto fail;
+ }
+ } else {
+ RTE_LOG(ERR, EAL, " to sync request of unknown type\n");
+ goto fail;
+ }
+
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return 0;
+fail:
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return -1;
+}
+
+static int
+handle_rollback_response(const struct rte_mp_msg *request,
+ const struct rte_mp_reply *reply __rte_unused)
+{
+ struct rte_mp_msg msg;
+ struct malloc_mp_req *resp = (struct malloc_mp_req *)msg.param;
+ const struct malloc_mp_req *mpreq =
+ (const struct malloc_mp_req *)request->param;
+ struct mp_request *entry;
+
+ /* lock the request */
+ pthread_mutex_lock(&mp_request_list.lock);
+
+ memset(&msg, 0, sizeof(0));
+
+ entry = find_request_by_id(mpreq->id);
+ if (entry == NULL) {
+ RTE_LOG(ERR, EAL, "Wrong request ID\n");
+ goto fail;
+ }
+
+ if (entry->user_req.t != REQ_TYPE_ALLOC) {
+ RTE_LOG(ERR, EAL, "Unexpected active request\n");
+ goto fail;
+ }
+
+ /* we don't care if rollback succeeded, request still failed */
+ resp->t = REQ_TYPE_ALLOC;
+ resp->result = REQ_RESULT_FAIL;
+ resp->id = mpreq->id;
+ msg.num_fds = 0;
+ msg.len_param = sizeof(*resp);
+ strlcpy(msg.name, MP_ACTION_RESPONSE, sizeof(msg.name));
+
+ if (rte_mp_sendmsg(&msg))
+ RTE_LOG(ERR, EAL, "Could not send message to secondary process\n");
+
+ /* clean up */
+ TAILQ_REMOVE(&mp_request_list.list, entry, next);
+ free(entry->alloc_state.ms);
+ free(entry);
+
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return 0;
+fail:
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return -1;
+}
+
+/* final stage of the request from secondary */
+static int
+handle_response(const struct rte_mp_msg *msg, const void *peer __rte_unused)
+{
+ const struct malloc_mp_req *m =
+ (const struct malloc_mp_req *)msg->param;
+ struct mp_request *entry;
+
+ pthread_mutex_lock(&mp_request_list.lock);
+
+ entry = find_request_by_id(m->id);
+ if (entry != NULL) {
+ /* update request status */
+ entry->user_req.result = m->result;
+
+ entry->state = REQ_STATE_COMPLETE;
+
+ /* trigger thread wakeup */
+ pthread_cond_signal(&entry->cond);
+ }
+
+ pthread_mutex_unlock(&mp_request_list.lock);
+
+ return 0;
+}
+
+/* synchronously request memory map sync, this is only called whenever primary
+ * process initiates the allocation.
+ */
+int
+request_sync(void)
+{
+ struct rte_mp_msg msg;
+ struct rte_mp_reply reply;
+ struct malloc_mp_req *req = (struct malloc_mp_req *)msg.param;
+ struct timespec ts;
+ int i, ret;
+
+ memset(&msg, 0, sizeof(msg));
+ memset(&reply, 0, sizeof(reply));
+
+ /* no need to create tailq entries as this is entirely synchronous */
+
+ msg.num_fds = 0;
+ msg.len_param = sizeof(*req);
+ strlcpy(msg.name, MP_ACTION_SYNC, sizeof(msg.name));
+
+ /* sync request carries no data */
+ req->t = REQ_TYPE_SYNC;
+ req->id = get_unique_id();
+
+ ts.tv_nsec = 0;
+ ts.tv_sec = MP_TIMEOUT_S;
+
+ /* there may be stray timeout still waiting */
+ do {
+ ret = rte_mp_request_sync(&msg, &reply, &ts);
+ } while (ret != 0 && rte_errno == EEXIST);
+ if (ret != 0) {
+ RTE_LOG(ERR, EAL, "Could not send sync request to secondary process\n");
+ ret = -1;
+ goto out;
+ }
+
+ if (reply.nb_received != reply.nb_sent) {
+ RTE_LOG(ERR, EAL, "Not all secondaries have responded\n");
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < reply.nb_received; i++) {
+ struct malloc_mp_req *resp =
+ (struct malloc_mp_req *)reply.msgs[i].param;
+ if (resp->t != REQ_TYPE_SYNC) {
+ RTE_LOG(ERR, EAL, "Unexpected response from secondary\n");
+ ret = -1;
+ goto out;
+ }
+ if (resp->id != req->id) {
+ RTE_LOG(ERR, EAL, "Wrong request ID\n");
+ ret = -1;
+ goto out;
+ }
+ if (resp->result != REQ_RESULT_SUCCESS) {
+ RTE_LOG(ERR, EAL, "Secondary process failed to synchronize\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ free(reply.msgs);
+ return ret;
+}
+
+/* this is a synchronous wrapper around a bunch of asynchronous requests to
+ * primary process. this will initiate a request and wait until responses come.
+ */
+int
+request_to_primary(struct malloc_mp_req *user_req)
+{
+ struct rte_mp_msg msg;
+ struct malloc_mp_req *msg_req = (struct malloc_mp_req *)msg.param;
+ struct mp_request *entry;
+ struct timespec ts;
+ struct timeval now;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ memset(&ts, 0, sizeof(ts));
+
+ pthread_mutex_lock(&mp_request_list.lock);
+
+ entry = malloc(sizeof(*entry));
+ if (entry == NULL) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memory for request\n");
+ goto fail;
+ }
+
+ memset(entry, 0, sizeof(*entry));
+
+ if (gettimeofday(&now, NULL) < 0) {
+ RTE_LOG(ERR, EAL, "Cannot get current time\n");
+ goto fail;
+ }
+
+ ts.tv_nsec = (now.tv_usec * 1000) % 1000000000;
+ ts.tv_sec = now.tv_sec + MP_TIMEOUT_S +
+ (now.tv_usec * 1000) / 1000000000;
+
+ /* initialize the request */
+ pthread_cond_init(&entry->cond, NULL);
+
+ msg.num_fds = 0;
+ msg.len_param = sizeof(*msg_req);
+ strlcpy(msg.name, MP_ACTION_REQUEST, sizeof(msg.name));
+
+ /* (attempt to) get a unique id */
+ user_req->id = get_unique_id();
+
+ /* copy contents of user request into the message */
+ memcpy(msg_req, user_req, sizeof(*msg_req));
+
+ if (rte_mp_sendmsg(&msg)) {
+ RTE_LOG(ERR, EAL, "Cannot send message to primary\n");
+ goto fail;
+ }
+
+ /* copy contents of user request into active request */
+ memcpy(&entry->user_req, user_req, sizeof(*user_req));
+
+ /* mark request as in progress */
+ entry->state = REQ_STATE_ACTIVE;
+
+ TAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);
+
+ /* finally, wait on timeout */
+ do {
+ ret = pthread_cond_timedwait(&entry->cond,
+ &mp_request_list.lock, &ts);
+ } while (ret != 0 && ret != ETIMEDOUT);
+
+ if (entry->state != REQ_STATE_COMPLETE) {
+ RTE_LOG(ERR, EAL, "Request timed out\n");
+ ret = -1;
+ } else {
+ ret = 0;
+ user_req->result = entry->user_req.result;
+ }
+ TAILQ_REMOVE(&mp_request_list.list, entry, next);
+ free(entry);
+
+ pthread_mutex_unlock(&mp_request_list.lock);
+ return ret;
+fail:
+ pthread_mutex_unlock(&mp_request_list.lock);
+ free(entry);
+ return -1;
+}
+
+int
+register_mp_requests(void)
+{
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (rte_mp_action_register(MP_ACTION_REQUEST, handle_request)) {
+ RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
+ MP_ACTION_REQUEST);
+ return -1;
+ }
+ } else {
+ if (rte_mp_action_register(MP_ACTION_SYNC, handle_sync)) {
+ RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
+ MP_ACTION_SYNC);
+ return -1;
+ }
+ if (rte_mp_action_register(MP_ACTION_ROLLBACK, handle_sync)) {
+ RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
+ MP_ACTION_SYNC);
+ return -1;
+ }
+ if (rte_mp_action_register(MP_ACTION_RESPONSE,
+ handle_response)) {
+ RTE_LOG(ERR, EAL, "Couldn't register '%s' action\n",
+ MP_ACTION_RESPONSE);
+ return -1;
+ }
+ }
+ return 0;
+}
diff --git a/lib/librte_eal/common/malloc_mp.h b/lib/librte_eal/common/malloc_mp.h
new file mode 100644
index 00000000..2b86b76f
--- /dev/null
+++ b/lib/librte_eal/common/malloc_mp.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef MALLOC_MP_H
+#define MALLOC_MP_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_random.h>
+#include <rte_spinlock.h>
+#include <rte_tailq.h>
+
+/* forward declarations */
+struct malloc_heap;
+struct rte_memseg;
+
+/* multiprocess synchronization structures for malloc */
+enum malloc_req_type {
+ REQ_TYPE_ALLOC, /**< ask primary to allocate */
+ REQ_TYPE_FREE, /**< ask primary to free */
+ REQ_TYPE_SYNC /**< ask secondary to synchronize its memory map */
+};
+
+enum malloc_req_result {
+ REQ_RESULT_SUCCESS,
+ REQ_RESULT_FAIL
+};
+
+struct malloc_req_alloc {
+ struct malloc_heap *heap;
+ uint64_t page_sz;
+ size_t elt_size;
+ int socket;
+ unsigned int flags;
+ size_t align;
+ size_t bound;
+ bool contig;
+};
+
+struct malloc_req_free {
+ RTE_STD_C11
+ union {
+ void *addr;
+ uint64_t addr_64;
+ };
+ uint64_t len;
+};
+
+struct malloc_mp_req {
+ enum malloc_req_type t;
+ RTE_STD_C11
+ union {
+ struct malloc_req_alloc alloc_req;
+ struct malloc_req_free free_req;
+ };
+ uint64_t id; /**< not to be populated by caller */
+ enum malloc_req_result result;
+};
+
+int
+register_mp_requests(void);
+
+int
+request_to_primary(struct malloc_mp_req *req);
+
+/* synchronous memory map sync request */
+int
+request_sync(void);
+
+/* functions from malloc_heap exposed here */
+int
+malloc_heap_free_pages(void *aligned_start, size_t aligned_len);
+
+struct malloc_elem *
+alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
+ int socket, unsigned int flags, size_t align, size_t bound,
+ bool contig, struct rte_memseg **ms, int n_segs);
+
+void
+rollback_expand_heap(struct rte_memseg **ms, int n_segs,
+ struct malloc_elem *elem, void *map_addr, size_t map_len);
+
+#endif /* MALLOC_MP_H */
diff --git a/lib/librte_eal/common/meson.build b/lib/librte_eal/common/meson.build
index 82b8910f..56005bea 100644
--- a/lib/librte_eal/common/meson.build
+++ b/lib/librte_eal/common/meson.build
@@ -8,13 +8,16 @@ common_objs = []
common_sources = files(
'eal_common_bus.c',
'eal_common_cpuflags.c',
+ 'eal_common_class.c',
'eal_common_devargs.c',
'eal_common_dev.c',
'eal_common_errno.c',
+ 'eal_common_fbarray.c',
'eal_common_hexdump.c',
'eal_common_launch.c',
'eal_common_lcore.c',
'eal_common_log.c',
+ 'eal_common_memalloc.c',
'eal_common_memory.c',
'eal_common_memzone.c',
'eal_common_options.c',
@@ -23,8 +26,10 @@ common_sources = files(
'eal_common_tailqs.c',
'eal_common_thread.c',
'eal_common_timer.c',
+ 'eal_common_uuid.c',
'malloc_elem.c',
'malloc_heap.c',
+ 'malloc_mp.c',
'rte_keepalive.c',
'rte_malloc.c',
'rte_reciprocal.c',
@@ -43,6 +48,7 @@ common_headers = files(
'include/rte_branch_prediction.h',
'include/rte_bus.h',
'include/rte_bitmap.h',
+ 'include/rte_class.h',
'include/rte_common.h',
'include/rte_debug.h',
'include/rte_devargs.h',
@@ -51,6 +57,7 @@ common_headers = files(
'include/rte_eal_memconfig.h',
'include/rte_eal_interrupts.h',
'include/rte_errno.h',
+ 'include/rte_fbarray.h',
'include/rte_hexdump.h',
'include/rte_interrupts.h',
'include/rte_keepalive.h',
@@ -71,6 +78,7 @@ common_headers = files(
'include/rte_string_fns.h',
'include/rte_tailq.h',
'include/rte_time.h',
+ 'include/rte_uuid.h',
'include/rte_version.h')
# special case install the generic headers, since they go in a subdir
diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c
index e0e0d0b3..b51a6d11 100644
--- a/lib/librte_eal/common/rte_malloc.c
+++ b/lib/librte_eal/common/rte_malloc.c
@@ -29,20 +29,17 @@
void rte_free(void *addr)
{
if (addr == NULL) return;
- if (malloc_elem_free(malloc_elem_from_data(addr)) < 0)
- rte_panic("Fatal error: Invalid memory\n");
+ if (malloc_heap_free(malloc_elem_from_data(addr)) < 0)
+ RTE_LOG(ERR, EAL, "Error: Invalid memory\n");
}
/*
* Allocate memory on specified heap.
*/
void *
-rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
+rte_malloc_socket(const char *type, size_t size, unsigned int align,
+ int socket_arg)
{
- struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
- int socket, i;
- void *ret;
-
/* return NULL if size is 0 or alignment is not power-of-2 */
if (size == 0 || (align && !rte_is_power_of_2(align)))
return NULL;
@@ -50,33 +47,12 @@ rte_malloc_socket(const char *type, size_t size, unsigned align, int socket_arg)
if (!rte_eal_has_hugepages())
socket_arg = SOCKET_ID_ANY;
- if (socket_arg == SOCKET_ID_ANY)
- socket = malloc_get_numa_socket();
- else
- socket = socket_arg;
-
/* Check socket parameter */
- if (socket >= RTE_MAX_NUMA_NODES)
+ if (socket_arg >= RTE_MAX_NUMA_NODES)
return NULL;
- ret = malloc_heap_alloc(&mcfg->malloc_heaps[socket], type,
- size, 0, align == 0 ? 1 : align, 0);
- if (ret != NULL || socket_arg != SOCKET_ID_ANY)
- return ret;
-
- /* try other heaps */
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- /* we already tried this one */
- if (i == socket)
- continue;
-
- ret = malloc_heap_alloc(&mcfg->malloc_heaps[i], type,
- size, 0, align == 0 ? 1 : align, 0);
- if (ret != NULL)
- return ret;
- }
-
- return NULL;
+ return malloc_heap_alloc(type, size, socket_arg, 0,
+ align == 0 ? 1 : align, 0, false);
}
/*
@@ -134,13 +110,15 @@ rte_realloc(void *ptr, size_t size, unsigned align)
return rte_malloc(NULL, size, align);
struct malloc_elem *elem = malloc_elem_from_data(ptr);
- if (elem == NULL)
- rte_panic("Fatal error: memory corruption detected\n");
+ if (elem == NULL) {
+ RTE_LOG(ERR, EAL, "Error: memory corruption detected\n");
+ return NULL;
+ }
size = RTE_CACHE_LINE_ROUNDUP(size), align = RTE_CACHE_LINE_ROUNDUP(align);
/* check alignment matches first, and if ok, see if we can resize block */
if (RTE_PTR_ALIGN(ptr,align) == ptr &&
- malloc_elem_resize(elem, size) == 0)
+ malloc_heap_resize(elem, size) == 0)
return ptr;
/* either alignment is off, or we have no room to expand,
@@ -182,6 +160,23 @@ rte_malloc_get_socket_stats(int socket,
}
/*
+ * Function to dump contents of all heaps
+ */
+void __rte_experimental
+rte_malloc_dump_heaps(FILE *f)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned int idx;
+
+ for (idx = 0; idx < rte_socket_count(); idx++) {
+ unsigned int socket = rte_socket_id_by_idx(idx);
+ fprintf(f, "Heap on socket %i:\n", socket);
+ malloc_heap_dump(&mcfg->malloc_heaps[socket], f);
+ }
+
+}
+
+/*
* Print stats on memory type. If type is NULL, info on all types is printed
*/
void
@@ -222,17 +217,21 @@ rte_malloc_set_limit(__rte_unused const char *type,
rte_iova_t
rte_malloc_virt2iova(const void *addr)
{
- rte_iova_t iova;
- const struct malloc_elem *elem = malloc_elem_from_data(addr);
+ const struct rte_memseg *ms;
+ struct malloc_elem *elem = malloc_elem_from_data(addr);
+
if (elem == NULL)
return RTE_BAD_IOVA;
- if (elem->ms->iova == RTE_BAD_IOVA)
- return RTE_BAD_IOVA;
if (rte_eal_iova_mode() == RTE_IOVA_VA)
- iova = (uintptr_t)addr;
- else
- iova = elem->ms->iova +
- RTE_PTR_DIFF(addr, elem->ms->addr);
- return iova;
+ return (uintptr_t) addr;
+
+ ms = rte_mem_virt2memseg(addr, elem->msl);
+ if (ms == NULL)
+ return RTE_BAD_IOVA;
+
+ if (ms->iova == RTE_BAD_IOVA)
+ return RTE_BAD_IOVA;
+
+ return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
}
diff --git a/lib/librte_eal/common/rte_service.c b/lib/librte_eal/common/rte_service.c
index be9b5e6d..8767c722 100644
--- a/lib/librte_eal/common/rte_service.c
+++ b/lib/librte_eal/common/rte_service.c
@@ -52,6 +52,7 @@ struct rte_service_spec_impl {
rte_atomic32_t num_mapped_cores;
uint64_t calls;
uint64_t cycles_spent;
+ uint8_t active_on_lcore[RTE_MAX_LCORE];
} __rte_cache_aligned;
/* the internal values of a service core */
@@ -61,7 +62,7 @@ struct core_state {
uint8_t runstate; /* running or stopped */
uint8_t is_service_core; /* set if core is currently a service core */
- /* extreme statistics */
+ uint64_t loops;
uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
} __rte_cache_aligned;
@@ -115,7 +116,7 @@ fail_mem:
return -ENOMEM;
}
-void __rte_experimental
+void
rte_service_finalize(void)
{
if (!rte_service_library_initialized)
@@ -161,7 +162,7 @@ service_mt_safe(struct rte_service_spec_impl *s)
return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
}
-int32_t __rte_experimental
+int32_t
rte_service_set_stats_enable(uint32_t id, int32_t enabled)
{
struct rte_service_spec_impl *s;
@@ -175,7 +176,7 @@ rte_service_set_stats_enable(uint32_t id, int32_t enabled)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
{
struct rte_service_spec_impl *s;
@@ -189,13 +190,13 @@ rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
return 0;
}
-uint32_t __rte_experimental
+uint32_t
rte_service_get_count(void)
{
return rte_service_count;
}
-int32_t __rte_experimental
+int32_t
rte_service_get_by_name(const char *name, uint32_t *service_id)
{
if (!service_id)
@@ -213,7 +214,7 @@ rte_service_get_by_name(const char *name, uint32_t *service_id)
return -ENODEV;
}
-const char * __rte_experimental
+const char *
rte_service_get_name(uint32_t id)
{
struct rte_service_spec_impl *s;
@@ -221,7 +222,7 @@ rte_service_get_name(uint32_t id)
return s->spec.name;
}
-int32_t __rte_experimental
+int32_t
rte_service_probe_capability(uint32_t id, uint32_t capability)
{
struct rte_service_spec_impl *s;
@@ -229,7 +230,7 @@ rte_service_probe_capability(uint32_t id, uint32_t capability)
return !!(s->spec.capabilities & capability);
}
-int32_t __rte_experimental
+int32_t
rte_service_component_register(const struct rte_service_spec *spec,
uint32_t *id_ptr)
{
@@ -262,7 +263,7 @@ rte_service_component_register(const struct rte_service_spec *spec,
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_component_unregister(uint32_t id)
{
uint32_t i;
@@ -283,7 +284,7 @@ rte_service_component_unregister(uint32_t id)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
{
struct rte_service_spec_impl *s;
@@ -298,7 +299,7 @@ rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_runstate_set(uint32_t id, uint32_t runstate)
{
struct rte_service_spec_impl *s;
@@ -313,7 +314,7 @@ rte_service_runstate_set(uint32_t id, uint32_t runstate)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_runstate_get(uint32_t id)
{
struct rte_service_spec_impl *s;
@@ -347,15 +348,19 @@ rte_service_runner_do_callback(struct rte_service_spec_impl *s,
static inline int32_t
-service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
+service_run(uint32_t i, int lcore, struct core_state *cs, uint64_t service_mask)
{
if (!service_valid(i))
return -EINVAL;
struct rte_service_spec_impl *s = &rte_services[i];
if (s->comp_runstate != RUNSTATE_RUNNING ||
s->app_runstate != RUNSTATE_RUNNING ||
- !(service_mask & (UINT64_C(1) << i)))
+ !(service_mask & (UINT64_C(1) << i))) {
+ s->active_on_lcore[lcore] = 0;
return -ENOEXEC;
+ }
+
+ s->active_on_lcore[lcore] = 1;
/* check do we need cmpset, if MT safe or <= 1 core
* mapped, atomic ops are not required.
@@ -374,7 +379,26 @@ service_run(uint32_t i, struct core_state *cs, uint64_t service_mask)
return 0;
}
-int32_t __rte_experimental rte_service_run_iter_on_app_lcore(uint32_t id,
+int32_t __rte_experimental
+rte_service_may_be_active(uint32_t id)
+{
+ uint32_t ids[RTE_MAX_LCORE] = {0};
+ struct rte_service_spec_impl *s = &rte_services[id];
+ int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
+ int i;
+
+ if (!service_valid(id))
+ return -EINVAL;
+
+ for (i = 0; i < lcore_count; i++) {
+ if (s->active_on_lcore[ids[i]])
+ return 1;
+ }
+
+ return 0;
+}
+
+int32_t rte_service_run_iter_on_app_lcore(uint32_t id,
uint32_t serialize_mt_unsafe)
{
/* run service on calling core, using all-ones as the service mask */
@@ -398,7 +422,7 @@ int32_t __rte_experimental rte_service_run_iter_on_app_lcore(uint32_t id,
return -EBUSY;
}
- int ret = service_run(id, cs, UINT64_MAX);
+ int ret = service_run(id, rte_lcore_id(), cs, UINT64_MAX);
if (serialize_mt_unsafe)
rte_atomic32_dec(&s->num_mapped_cores);
@@ -419,9 +443,11 @@ rte_service_runner_func(void *arg)
for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
/* return value ignored as no change to code flow */
- service_run(i, cs, service_mask);
+ service_run(i, lcore, cs, service_mask);
}
+ cs->loops++;
+
rte_smp_rmb();
}
@@ -430,7 +456,7 @@ rte_service_runner_func(void *arg)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_count(void)
{
int32_t count = 0;
@@ -440,7 +466,7 @@ rte_service_lcore_count(void)
return count;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_list(uint32_t array[], uint32_t n)
{
uint32_t count = rte_service_lcore_count();
@@ -463,7 +489,7 @@ rte_service_lcore_list(uint32_t array[], uint32_t n)
return count;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_count_services(uint32_t lcore)
{
if (lcore >= RTE_MAX_LCORE)
@@ -476,7 +502,7 @@ rte_service_lcore_count_services(uint32_t lcore)
return __builtin_popcountll(cs->service_mask);
}
-int32_t __rte_experimental
+int32_t
rte_service_start_with_defaults(void)
{
/* create a default mapping from cores to services, then start the
@@ -562,7 +588,7 @@ service_update(struct rte_service_spec *service, uint32_t lcore,
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
{
struct rte_service_spec_impl *s;
@@ -571,7 +597,7 @@ rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
return service_update(&s->spec, lcore, &on, 0);
}
-int32_t __rte_experimental
+int32_t
rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
{
struct rte_service_spec_impl *s;
@@ -597,7 +623,7 @@ set_lcore_state(uint32_t lcore, int32_t state)
lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_reset_all(void)
{
/* loop over cores, reset all to mask 0 */
@@ -617,7 +643,7 @@ rte_service_lcore_reset_all(void)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_add(uint32_t lcore)
{
if (lcore >= RTE_MAX_LCORE)
@@ -636,7 +662,7 @@ rte_service_lcore_add(uint32_t lcore)
return rte_eal_wait_lcore(lcore);
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_del(uint32_t lcore)
{
if (lcore >= RTE_MAX_LCORE)
@@ -655,7 +681,7 @@ rte_service_lcore_del(uint32_t lcore)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_start(uint32_t lcore)
{
if (lcore >= RTE_MAX_LCORE)
@@ -678,7 +704,7 @@ rte_service_lcore_start(uint32_t lcore)
return ret;
}
-int32_t __rte_experimental
+int32_t
rte_service_lcore_stop(uint32_t lcore)
{
if (lcore >= RTE_MAX_LCORE)
@@ -708,7 +734,7 @@ rte_service_lcore_stop(uint32_t lcore)
return 0;
}
-int32_t __rte_experimental
+int32_t
rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
{
struct rte_service_spec_impl *s;
@@ -729,6 +755,28 @@ rte_service_attr_get(uint32_t id, uint32_t attr_id, uint32_t *attr_value)
}
}
+int32_t __rte_experimental
+rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
+ uint64_t *attr_value)
+{
+ struct core_state *cs;
+
+ if (lcore >= RTE_MAX_LCORE || !attr_value)
+ return -EINVAL;
+
+ cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -ENOTSUP;
+
+ switch (attr_id) {
+ case RTE_SERVICE_LCORE_ATTR_LOOPS:
+ *attr_value = cs->loops;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static void
rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
uint64_t all_cycles, uint32_t reset)
@@ -753,7 +801,7 @@ rte_service_dump_one(FILE *f, struct rte_service_spec_impl *s,
s->cycles_spent, s->cycles_spent / calls);
}
-int32_t __rte_experimental
+int32_t
rte_service_attr_reset_all(uint32_t id)
{
struct rte_service_spec_impl *s;
@@ -764,6 +812,23 @@ rte_service_attr_reset_all(uint32_t id)
return 0;
}
+int32_t __rte_experimental
+rte_service_lcore_attr_reset_all(uint32_t lcore)
+{
+ struct core_state *cs;
+
+ if (lcore >= RTE_MAX_LCORE)
+ return -EINVAL;
+
+ cs = &lcore_states[lcore];
+ if (!cs->is_service_core)
+ return -ENOTSUP;
+
+ cs->loops = 0;
+
+ return 0;
+}
+
static void
service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
{
@@ -781,7 +846,8 @@ service_dump_calls_per_lcore(FILE *f, uint32_t lcore, uint32_t reset)
fprintf(f, "\n");
}
-int32_t __rte_experimental rte_service_dump(FILE *f, uint32_t id)
+int32_t
+rte_service_dump(FILE *f, uint32_t id)
{
uint32_t i;
int print_one = (id != UINT32_MAX);
diff --git a/lib/librte_eal/linuxapp/Makefile b/lib/librte_eal/linuxapp/Makefile
index aa52a01e..a0fffa98 100644
--- a/lib/librte_eal/linuxapp/Makefile
+++ b/lib/librte_eal/linuxapp/Makefile
@@ -4,8 +4,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal
-DIRS-$(CONFIG_RTE_EAL_IGB_UIO) += igb_uio
-DIRS-$(CONFIG_RTE_KNI_KMOD) += kni
DEPDIRS-kni := eal
CFLAGS += -DALLOW_EXPERIMENTAL_API
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
index 7e5bbe88..fd92c75c 100644
--- a/lib/librte_eal/linuxapp/eal/Makefile
+++ b/lib/librte_eal/linuxapp/eal/Makefile
@@ -10,7 +10,7 @@ ARCH_DIR ?= $(RTE_ARCH)
EXPORT_MAP := ../../rte_eal_version.map
VPATH += $(RTE_SDK)/lib/librte_eal/common/arch/$(ARCH_DIR)
-LIBABIVER := 6
+LIBABIVER := 8
VPATH += $(RTE_SDK)/lib/librte_eal/common
@@ -24,23 +24,27 @@ LDLIBS += -ldl
LDLIBS += -lpthread
LDLIBS += -lgcc_s
LDLIBS += -lrt
+LDLIBS += -lrte_kvargs
ifeq ($(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),y)
LDLIBS += -lnuma
endif
# specific to linuxapp exec-env
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) := eal.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_cpuflags.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_hugepage_info.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_mp_sync.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_debug.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_interrupts.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_alarm.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_dev.c
# from common dir
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_lcore.c
@@ -48,6 +52,7 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_timer.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_memzone.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_log.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_memalloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_memory.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_tailqs.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_errno.c
@@ -56,14 +61,18 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_hypervisor.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_string_fns.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_hexdump.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_devargs.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_class.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_bus.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_dev.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_options.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_thread.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_proc.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_fbarray.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_common_uuid.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_malloc.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_elem.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_heap.c
+SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += malloc_mp.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_keepalive.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_service.c
SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += rte_reciprocal.c
@@ -81,6 +90,7 @@ CFLAGS_eal_interrupts.o := -D_GNU_SOURCE
CFLAGS_eal_vfio_mp_sync.o := -D_GNU_SOURCE
CFLAGS_eal_timer.o := -D_GNU_SOURCE
CFLAGS_eal_lcore.o := -D_GNU_SOURCE
+CFLAGS_eal_memalloc.o := -D_GNU_SOURCE
CFLAGS_eal_thread.o := -D_GNU_SOURCE
CFLAGS_eal_log.o := -D_GNU_SOURCE
CFLAGS_eal_common_log.o := -D_GNU_SOURCE
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 38306bf5..e59ac657 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -74,8 +74,8 @@ static int mem_cfg_fd = -1;
static struct flock wr_lock = {
.l_type = F_WRLCK,
.l_whence = SEEK_SET,
- .l_start = offsetof(struct rte_mem_config, memseg),
- .l_len = sizeof(early_mem_config.memseg),
+ .l_start = offsetof(struct rte_mem_config, memsegs),
+ .l_len = sizeof(early_mem_config.memsegs),
};
/* Address of global and public configuration */
@@ -92,20 +92,72 @@ struct internal_config internal_config;
/* used by rte_rdtsc() */
int rte_cycles_vmware_tsc_map;
-/* Return user provided mbuf pool ops name */
-const char * __rte_experimental
-rte_eal_mbuf_user_pool_ops(void)
+/* platform-specific runtime dir */
+static char runtime_dir[PATH_MAX];
+
+static const char *default_runtime_dir = "/var/run";
+
+int
+eal_create_runtime_dir(void)
{
- return internal_config.user_mbuf_pool_ops_name;
+ const char *directory = default_runtime_dir;
+ const char *xdg_runtime_dir = getenv("XDG_RUNTIME_DIR");
+ const char *fallback = "/tmp";
+ char tmp[PATH_MAX];
+ int ret;
+
+ if (getuid() != 0) {
+ /* try XDG path first, fall back to /tmp */
+ if (xdg_runtime_dir != NULL)
+ directory = xdg_runtime_dir;
+ else
+ directory = fallback;
+ }
+ /* create DPDK subdirectory under runtime dir */
+ ret = snprintf(tmp, sizeof(tmp), "%s/dpdk", directory);
+ if (ret < 0 || ret == sizeof(tmp)) {
+ RTE_LOG(ERR, EAL, "Error creating DPDK runtime path name\n");
+ return -1;
+ }
+
+ /* create prefix-specific subdirectory under DPDK runtime dir */
+ ret = snprintf(runtime_dir, sizeof(runtime_dir), "%s/%s",
+ tmp, internal_config.hugefile_prefix);
+ if (ret < 0 || ret == sizeof(runtime_dir)) {
+ RTE_LOG(ERR, EAL, "Error creating prefix-specific runtime path name\n");
+ return -1;
+ }
+
+ /* create the path if it doesn't exist. no "mkdir -p" here, so do it
+ * step by step.
+ */
+ ret = mkdir(tmp, 0700);
+ if (ret < 0 && errno != EEXIST) {
+ RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+ tmp, strerror(errno));
+ return -1;
+ }
+
+ ret = mkdir(runtime_dir, 0700);
+ if (ret < 0 && errno != EEXIST) {
+ RTE_LOG(ERR, EAL, "Error creating '%s': %s\n",
+ runtime_dir, strerror(errno));
+ return -1;
+ }
+
+ return 0;
}
-/* Return mbuf pool ops name */
const char *
-rte_eal_mbuf_default_mempool_ops(void)
+eal_get_runtime_dir(void)
{
- if (internal_config.user_mbuf_pool_ops_name == NULL)
- return RTE_MBUF_DEFAULT_MEMPOOL_OPS;
+ return runtime_dir;
+}
+/* Return user provided mbuf pool ops name */
+const char *
+rte_eal_mbuf_user_pool_ops(void)
+{
return internal_config.user_mbuf_pool_ops_name;
}
@@ -282,12 +334,17 @@ eal_proc_type_detect(void)
enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
const char *pathname = eal_runtime_config_path();
- /* if we can open the file but not get a write-lock we are a secondary
- * process. NOTE: if we get a file handle back, we keep that open
- * and don't close it to prevent a race condition between multiple opens */
- if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
- (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
- ptype = RTE_PROC_SECONDARY;
+ /* if there no shared config, there can be no secondary processes */
+ if (!internal_config.no_shconf) {
+ /* if we can open the file but not get a write-lock we are a
+ * secondary process. NOTE: if we get a file handle back, we
+ * keep that open and don't close it to prevent a race condition
+ * between multiple opens.
+ */
+ if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
+ (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
+ ptype = RTE_PROC_SECONDARY;
+ }
RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
@@ -343,11 +400,14 @@ eal_usage(const char *prgname)
eal_common_usage();
printf("EAL Linux options:\n"
" --"OPT_SOCKET_MEM" Memory to allocate on sockets (comma separated values)\n"
+ " --"OPT_SOCKET_LIMIT" Limit memory allocation on sockets (comma separated values)\n"
" --"OPT_HUGE_DIR" Directory where hugetlbfs is mounted\n"
" --"OPT_FILE_PREFIX" Prefix for hugepage filenames\n"
" --"OPT_BASE_VIRTADDR" Base virtual address\n"
" --"OPT_CREATE_UIO_DEV" Create /dev/uioX (usually done by hotplug)\n"
" --"OPT_VFIO_INTR" Interrupt mode for VFIO (legacy|msi|msix)\n"
+ " --"OPT_LEGACY_MEM" Legacy memory mode (no dynamic allocation, contiguous segments)\n"
+ " --"OPT_SINGLE_FILE_SEGMENTS" Put all hugepage memory in single files\n"
"\n");
/* Allow the application to print its usage message too if hook is set */
if ( rte_application_usage_hook ) {
@@ -370,46 +430,45 @@ rte_set_application_usage_hook( rte_usage_hook_t usage_func )
}
static int
-eal_parse_socket_mem(char *socket_mem)
+eal_parse_socket_arg(char *strval, volatile uint64_t *socket_arg)
{
char * arg[RTE_MAX_NUMA_NODES];
char *end;
int arg_num, i, len;
uint64_t total_mem = 0;
- len = strnlen(socket_mem, SOCKET_MEM_STRLEN);
+ len = strnlen(strval, SOCKET_MEM_STRLEN);
if (len == SOCKET_MEM_STRLEN) {
RTE_LOG(ERR, EAL, "--socket-mem is too long\n");
return -1;
}
/* all other error cases will be caught later */
- if (!isdigit(socket_mem[len-1]))
+ if (!isdigit(strval[len-1]))
return -1;
/* split the optarg into separate socket values */
- arg_num = rte_strsplit(socket_mem, len,
+ arg_num = rte_strsplit(strval, len,
arg, RTE_MAX_NUMA_NODES, ',');
/* if split failed, or 0 arguments */
if (arg_num <= 0)
return -1;
- internal_config.force_sockets = 1;
-
/* parse each defined socket option */
errno = 0;
for (i = 0; i < arg_num; i++) {
+ uint64_t val;
end = NULL;
- internal_config.socket_mem[i] = strtoull(arg[i], &end, 10);
+ val = strtoull(arg[i], &end, 10);
/* check for invalid input */
if ((errno != 0) ||
(arg[i][0] == '\0') || (end == NULL) || (*end != '\0'))
return -1;
- internal_config.socket_mem[i] *= 1024ULL;
- internal_config.socket_mem[i] *= 1024ULL;
- total_mem += internal_config.socket_mem[i];
+ val <<= 20;
+ total_mem += val;
+ socket_arg[i] = val;
}
/* check if we have a positive amount of total memory */
@@ -557,13 +616,27 @@ eal_parse_args(int argc, char **argv)
break;
case OPT_SOCKET_MEM_NUM:
- if (eal_parse_socket_mem(optarg) < 0) {
+ if (eal_parse_socket_arg(optarg,
+ internal_config.socket_mem) < 0) {
RTE_LOG(ERR, EAL, "invalid parameters for --"
OPT_SOCKET_MEM "\n");
eal_usage(prgname);
ret = -1;
goto out;
}
+ internal_config.force_sockets = 1;
+ break;
+
+ case OPT_SOCKET_LIMIT_NUM:
+ if (eal_parse_socket_arg(optarg,
+ internal_config.socket_limit) < 0) {
+ RTE_LOG(ERR, EAL, "invalid parameters for --"
+ OPT_SOCKET_LIMIT "\n");
+ eal_usage(prgname);
+ ret = -1;
+ goto out;
+ }
+ internal_config.force_socket_limits = 1;
break;
case OPT_BASE_VIRTADDR_NUM:
@@ -591,7 +664,8 @@ eal_parse_args(int argc, char **argv)
break;
case OPT_MBUF_POOL_OPS_NAME_NUM:
- internal_config.user_mbuf_pool_ops_name = optarg;
+ internal_config.user_mbuf_pool_ops_name =
+ strdup(optarg);
break;
default:
@@ -613,6 +687,14 @@ eal_parse_args(int argc, char **argv)
}
}
+ /* create runtime data directory */
+ if (internal_config.no_shconf == 0 &&
+ eal_create_runtime_dir() < 0) {
+ RTE_LOG(ERR, EAL, "Cannot create runtime directory\n");
+ ret = -1;
+ goto out;
+ }
+
if (eal_adjust_config(&internal_config) != 0) {
ret = -1;
goto out;
@@ -638,23 +720,23 @@ out:
return ret;
}
+static int
+check_socket(const struct rte_memseg_list *msl, void *arg)
+{
+ int *socket_id = arg;
+
+ return *socket_id == msl->socket_id;
+}
+
static void
eal_check_mem_on_local_socket(void)
{
- const struct rte_memseg *ms;
- int i, socket_id;
+ int socket_id;
socket_id = rte_lcore_to_socket_id(rte_config.master_lcore);
- ms = rte_eal_get_physmem_layout();
-
- for (i = 0; i < RTE_MAX_MEMSEG; i++)
- if (ms[i].socket_id == socket_id &&
- ms[i].len > 0)
- return;
-
- RTE_LOG(WARNING, EAL, "WARNING: Master core has no "
- "memory on local socket!\n");
+ if (rte_memseg_list_walk(check_socket, &socket_id) == 0)
+ RTE_LOG(WARNING, EAL, "WARNING: Master core has no memory on local socket!\n");
}
static int
@@ -669,6 +751,8 @@ rte_eal_mcfg_complete(void)
/* ALL shared mem_config related INIT DONE */
if (rte_config.process_type == RTE_PROC_PRIMARY)
rte_config.mem_config->magic = RTE_MAGIC;
+
+ internal_config.init_complete = 1;
}
/*
@@ -689,24 +773,8 @@ rte_eal_iopl_init(void)
#ifdef VFIO_PRESENT
static int rte_eal_vfio_setup(void)
{
- int vfio_enabled = 0;
-
if (rte_vfio_enable("vfio"))
return -1;
- vfio_enabled = rte_vfio_is_enabled("vfio");
-
- if (vfio_enabled) {
-
- /* if we are primary process, create a thread to communicate with
- * secondary processes. the thread will use a socket to wait for
- * requests from secondary process to send open file descriptors,
- * because VFIO does not allow multiple open descriptors on a group or
- * VFIO container.
- */
- if (internal_config.process_type == RTE_PROC_PRIMARY &&
- vfio_mp_sync_setup() < 0)
- return -1;
- }
return 0;
}
@@ -779,6 +847,24 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+ rte_config_init();
+
+ if (rte_eal_intr_init() < 0) {
+ rte_eal_init_alert("Cannot init interrupt-handling thread\n");
+ return -1;
+ }
+
+ /* Put mp channel init before bus scan so that we can init the vdev
+ * bus through mp channel in the secondary process before the bus scan.
+ */
+ if (rte_mp_channel_init() < 0) {
+ rte_eal_init_alert("failed to init mp channel\n");
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_errno = EFAULT;
+ return -1;
+ }
+ }
+
if (rte_bus_scan()) {
rte_eal_init_alert("Cannot scan the buses for devices\n");
rte_errno = ENODEV;
@@ -798,13 +884,17 @@ rte_eal_init(int argc, char **argv)
"KNI module inserted\n");
}
- if (internal_config.no_hugetlbfs == 0 &&
- internal_config.process_type != RTE_PROC_SECONDARY &&
- eal_hugepage_info_init() < 0) {
- rte_eal_init_alert("Cannot get hugepage information.");
- rte_errno = EACCES;
- rte_atomic32_clear(&run_once);
- return -1;
+ if (internal_config.no_hugetlbfs == 0) {
+ /* rte_config isn't initialized yet */
+ ret = internal_config.process_type == RTE_PROC_PRIMARY ?
+ eal_hugepage_info_init() :
+ eal_hugepage_info_read();
+ if (ret < 0) {
+ rte_eal_init_alert("Cannot get hugepage information.");
+ rte_errno = EACCES;
+ rte_atomic32_clear(&run_once);
+ return -1;
+ }
}
if (internal_config.memory == 0 && internal_config.force_sockets == 0) {
@@ -825,8 +915,6 @@ rte_eal_init(int argc, char **argv)
rte_srand(rte_rdtsc());
- rte_config_init();
-
if (rte_eal_log_init(logid, internal_config.syslog_facility) < 0) {
rte_eal_init_alert("Cannot init logging.");
rte_errno = ENOMEM;
@@ -834,14 +922,6 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- if (rte_mp_channel_init() < 0) {
- rte_eal_init_alert("failed to init mp channel\n");
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- rte_errno = EFAULT;
- return -1;
- }
- }
-
#ifdef VFIO_PRESENT
if (rte_eal_vfio_setup() < 0) {
rte_eal_init_alert("Cannot init VFIO\n");
@@ -850,6 +930,15 @@ rte_eal_init(int argc, char **argv)
return -1;
}
#endif
+ /* in secondary processes, memory init may allocate additional fbarrays
+ * not present in primary processes, so to avoid any potential issues,
+ * initialize memzones first.
+ */
+ if (rte_eal_memzone_init() < 0) {
+ rte_eal_init_alert("Cannot init memzone\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
if (rte_eal_memory_init() < 0) {
rte_eal_init_alert("Cannot init memory\n");
@@ -860,8 +949,8 @@ rte_eal_init(int argc, char **argv)
/* the directories are locked during eal_hugepage_info_init */
eal_hugedirs_unlock();
- if (rte_eal_memzone_init() < 0) {
- rte_eal_init_alert("Cannot init memzone\n");
+ if (rte_eal_malloc_heap_init() < 0) {
+ rte_eal_init_alert("Cannot init malloc heap\n");
rte_errno = ENODEV;
return -1;
}
@@ -888,17 +977,12 @@ rte_eal_init(int argc, char **argv)
eal_thread_init_master(rte_config.master_lcore);
- ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+ ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "Master lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
rte_config.master_lcore, (int)thread_id, cpuset,
ret == 0 ? "" : "...");
- if (rte_eal_intr_init() < 0) {
- rte_eal_init_alert("Cannot init interrupt-handling thread\n");
- return -1;
- }
-
RTE_LCORE_FOREACH_SLAVE(i) {
/*
@@ -919,7 +1003,7 @@ rte_eal_init(int argc, char **argv)
rte_panic("Cannot create thread\n");
/* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ snprintf(thread_name, sizeof(thread_name),
"lcore-slave-%d", i);
ret = rte_thread_setname(lcore_config[i].thread_id,
thread_name);
@@ -950,6 +1034,12 @@ rte_eal_init(int argc, char **argv)
return -1;
}
+#ifdef VFIO_PRESENT
+ /* Register mp action after probe() so that we got enough info */
+ if (rte_vfio_is_enabled("vfio") && vfio_mp_sync_setup() < 0)
+ return -1;
+#endif
+
/* initialize default service/lcore mappings and start running. Ignore
* -ENOTSUP, as it indicates no service coremask passed to EAL.
*/
@@ -964,9 +1054,26 @@ rte_eal_init(int argc, char **argv)
return fctret;
}
+static int
+mark_freeable(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
+ void *arg __rte_unused)
+{
+ /* ms is const, so find this memseg */
+ struct rte_memseg *found = rte_mem_virt2memseg(ms->addr, msl);
+
+ found->flags &= ~RTE_MEMSEG_FLAG_DO_NOT_FREE;
+
+ return 0;
+}
+
int __rte_experimental
rte_eal_cleanup(void)
{
+ /* if we're in a primary process, we need to mark hugepages as freeable
+ * so that finalization can release them back to the system.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_memseg_walk(mark_freeable, NULL);
rte_service_finalize();
return 0;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index c115e823..391d2a65 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -19,7 +19,6 @@
#include <rte_launch.h>
#include <rte_lcore.h>
#include <rte_errno.h>
-#include <rte_malloc.h>
#include <rte_spinlock.h>
#include <eal_private.h>
@@ -91,7 +90,7 @@ eal_alarm_callback(void *arg __rte_unused)
rte_spinlock_lock(&alarm_list_lk);
LIST_REMOVE(ap, next);
- rte_free(ap);
+ free(ap);
}
if (!LIST_EMPTY(&alarm_list)) {
@@ -122,7 +121,7 @@ rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
if (us < 1 || us > (UINT64_MAX - US_PER_S) || cb_fn == NULL)
return -EINVAL;
- new_alarm = rte_zmalloc(NULL, sizeof(*new_alarm), 0);
+ new_alarm = calloc(1, sizeof(*new_alarm));
if (new_alarm == NULL)
return -ENOMEM;
@@ -196,7 +195,7 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
if (ap->executing == 0) {
LIST_REMOVE(ap, next);
- rte_free(ap);
+ free(ap);
count++;
} else {
/* If calling from other context, mark that alarm is executing
@@ -220,7 +219,7 @@ rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
if (ap->executing == 0) {
LIST_REMOVE(ap, next);
- rte_free(ap);
+ free(ap);
count++;
ap = ap_prev;
} else if (pthread_equal(ap->executing_id, pthread_self()) == 0)
diff --git a/lib/librte_eal/linuxapp/eal/eal_cpuflags.c b/lib/librte_eal/linuxapp/eal/eal_cpuflags.c
new file mode 100644
index 00000000..d38296e1
--- /dev/null
+++ b/lib/librte_eal/linuxapp/eal/eal_cpuflags.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Red Hat, Inc.
+ */
+
+#include <elf.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
+#if __GLIBC_PREREQ(2, 16)
+#include <sys/auxv.h>
+#define HAS_AUXV 1
+#endif
+#endif
+
+#include <rte_cpuflags.h>
+
+#ifndef HAS_AUXV
+static unsigned long
+getauxval(unsigned long type __rte_unused)
+{
+ errno = ENOTSUP;
+ return 0;
+}
+#endif
+
+#ifdef RTE_ARCH_64
+typedef Elf64_auxv_t Internal_Elfx_auxv_t;
+#else
+typedef Elf32_auxv_t Internal_Elfx_auxv_t;
+#endif
+
+/**
+ * Provides a method for retrieving values from the auxiliary vector and
+ * possibly running a string comparison.
+ *
+ * @return Always returns a result. When the result is 0, check errno
+ * to see if an error occurred during processing.
+ */
+static unsigned long
+_rte_cpu_getauxval(unsigned long type, const char *str)
+{
+ unsigned long val;
+
+ errno = 0;
+ val = getauxval(type);
+
+ if (!val && (errno == ENOTSUP || errno == ENOENT)) {
+ int auxv_fd = open("/proc/self/auxv", O_RDONLY);
+ Internal_Elfx_auxv_t auxv;
+
+ if (auxv_fd == -1)
+ return 0;
+
+ errno = ENOENT;
+ while (read(auxv_fd, &auxv, sizeof(auxv)) == sizeof(auxv)) {
+ if (auxv.a_type == type) {
+ errno = 0;
+ val = auxv.a_un.a_val;
+ if (str)
+ val = strcmp((const char *)val, str);
+ break;
+ }
+ }
+ close(auxv_fd);
+ }
+
+ return val;
+}
+
+unsigned long
+rte_cpu_getauxval(unsigned long type)
+{
+ return _rte_cpu_getauxval(type, NULL);
+}
+
+int
+rte_cpu_strcmp_auxval(unsigned long type, const char *str)
+{
+ return _rte_cpu_getauxval(type, str);
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_dev.c b/lib/librte_eal/linuxapp/eal/eal_dev.c
new file mode 100644
index 00000000..1cf6aebf
--- /dev/null
+++ b/lib/librte_eal/linuxapp/eal/eal_dev.c
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <linux/netlink.h>
+
+#include <rte_string_fns.h>
+#include <rte_log.h>
+#include <rte_compat.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_interrupts.h>
+#include <rte_alarm.h>
+
+#include "eal_private.h"
+
+static struct rte_intr_handle intr_handle = {.fd = -1 };
+static bool monitor_started;
+
+#define EAL_UEV_MSG_LEN 4096
+#define EAL_UEV_MSG_ELEM_LEN 128
+
+static void dev_uev_handler(__rte_unused void *param);
+
+/* identify the system layer which reports this event. */
+enum eal_dev_event_subsystem {
+ EAL_DEV_EVENT_SUBSYSTEM_PCI, /* PCI bus device event */
+ EAL_DEV_EVENT_SUBSYSTEM_UIO, /* UIO driver device event */
+ EAL_DEV_EVENT_SUBSYSTEM_VFIO, /* VFIO driver device event */
+ EAL_DEV_EVENT_SUBSYSTEM_MAX
+};
+
+static int
+dev_uev_socket_fd_create(void)
+{
+ struct sockaddr_nl addr;
+ int ret;
+
+ intr_handle.fd = socket(PF_NETLINK, SOCK_RAW | SOCK_CLOEXEC |
+ SOCK_NONBLOCK,
+ NETLINK_KOBJECT_UEVENT);
+ if (intr_handle.fd < 0) {
+ RTE_LOG(ERR, EAL, "create uevent fd failed.\n");
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.nl_family = AF_NETLINK;
+ addr.nl_pid = 0;
+ addr.nl_groups = 0xffffffff;
+
+ ret = bind(intr_handle.fd, (struct sockaddr *) &addr, sizeof(addr));
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Failed to bind uevent socket.\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ close(intr_handle.fd);
+ intr_handle.fd = -1;
+ return ret;
+}
+
+static int
+dev_uev_parse(const char *buf, struct rte_dev_event *event, int length)
+{
+ char action[EAL_UEV_MSG_ELEM_LEN];
+ char subsystem[EAL_UEV_MSG_ELEM_LEN];
+ char pci_slot_name[EAL_UEV_MSG_ELEM_LEN];
+ int i = 0;
+
+ memset(action, 0, EAL_UEV_MSG_ELEM_LEN);
+ memset(subsystem, 0, EAL_UEV_MSG_ELEM_LEN);
+ memset(pci_slot_name, 0, EAL_UEV_MSG_ELEM_LEN);
+
+ while (i < length) {
+ for (; i < length; i++) {
+ if (*buf)
+ break;
+ buf++;
+ }
+ /**
+ * check device uevent from kernel side, no need to check
+ * uevent from udev.
+ */
+ if (!strncmp(buf, "libudev", 7)) {
+ buf += 7;
+ i += 7;
+ return -1;
+ }
+ if (!strncmp(buf, "ACTION=", 7)) {
+ buf += 7;
+ i += 7;
+ strlcpy(action, buf, sizeof(action));
+ } else if (!strncmp(buf, "SUBSYSTEM=", 10)) {
+ buf += 10;
+ i += 10;
+ strlcpy(subsystem, buf, sizeof(subsystem));
+ } else if (!strncmp(buf, "PCI_SLOT_NAME=", 14)) {
+ buf += 14;
+ i += 14;
+ strlcpy(pci_slot_name, buf, sizeof(subsystem));
+ event->devname = strdup(pci_slot_name);
+ }
+ for (; i < length; i++) {
+ if (*buf == '\0')
+ break;
+ buf++;
+ }
+ }
+
+ /* parse the subsystem layer */
+ if (!strncmp(subsystem, "uio", 3))
+ event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_UIO;
+ else if (!strncmp(subsystem, "pci", 3))
+ event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_PCI;
+ else if (!strncmp(subsystem, "vfio", 4))
+ event->subsystem = EAL_DEV_EVENT_SUBSYSTEM_VFIO;
+ else
+ return -1;
+
+ /* parse the action type */
+ if (!strncmp(action, "add", 3))
+ event->type = RTE_DEV_EVENT_ADD;
+ else if (!strncmp(action, "remove", 6))
+ event->type = RTE_DEV_EVENT_REMOVE;
+ else
+ return -1;
+ return 0;
+}
+
+static void
+dev_delayed_unregister(void *param)
+{
+ rte_intr_callback_unregister(&intr_handle, dev_uev_handler, param);
+ close(intr_handle.fd);
+ intr_handle.fd = -1;
+}
+
+static void
+dev_uev_handler(__rte_unused void *param)
+{
+ struct rte_dev_event uevent;
+ int ret;
+ char buf[EAL_UEV_MSG_LEN];
+
+ memset(&uevent, 0, sizeof(struct rte_dev_event));
+ memset(buf, 0, EAL_UEV_MSG_LEN);
+
+ ret = recv(intr_handle.fd, buf, EAL_UEV_MSG_LEN, MSG_DONTWAIT);
+ if (ret < 0 && errno == EAGAIN)
+ return;
+ else if (ret <= 0) {
+ /* connection is closed or broken, can not up again. */
+ RTE_LOG(ERR, EAL, "uevent socket connection is broken.\n");
+ rte_eal_alarm_set(1, dev_delayed_unregister, NULL);
+ return;
+ }
+
+ ret = dev_uev_parse(buf, &uevent, EAL_UEV_MSG_LEN);
+ if (ret < 0) {
+ RTE_LOG(DEBUG, EAL, "It is not an valid event "
+ "that need to be handle.\n");
+ return;
+ }
+
+ RTE_LOG(DEBUG, EAL, "receive uevent(name:%s, type:%d, subsystem:%d)\n",
+ uevent.devname, uevent.type, uevent.subsystem);
+
+ if (uevent.devname)
+ dev_callback_process(uevent.devname, uevent.type);
+}
+
+int __rte_experimental
+rte_dev_event_monitor_start(void)
+{
+ int ret;
+
+ if (monitor_started)
+ return 0;
+
+ ret = dev_uev_socket_fd_create();
+ if (ret) {
+ RTE_LOG(ERR, EAL, "error create device event fd.\n");
+ return -1;
+ }
+
+ intr_handle.type = RTE_INTR_HANDLE_DEV_EVENT;
+ ret = rte_intr_callback_register(&intr_handle, dev_uev_handler, NULL);
+
+ if (ret) {
+ RTE_LOG(ERR, EAL, "fail to register uevent callback.\n");
+ return -1;
+ }
+
+ monitor_started = true;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_dev_event_monitor_stop(void)
+{
+ int ret;
+
+ if (!monitor_started)
+ return 0;
+
+ ret = rte_intr_callback_unregister(&intr_handle, dev_uev_handler,
+ (void *)-1);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "fail to unregister uevent callback.\n");
+ return ret;
+ }
+
+ close(intr_handle.fd);
+ intr_handle.fd = -1;
+ monitor_started = false;
+ return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
index 8bbf771a..3a7d4b22 100644
--- a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
+++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
@@ -14,7 +14,11 @@
#include <stdarg.h>
#include <unistd.h>
#include <errno.h>
+#include <sys/mman.h>
#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <linux/mman.h> /* for hugetlb-related flags */
#include <rte_memory.h>
#include <rte_eal.h>
@@ -30,6 +34,40 @@
#include "eal_filesystem.h"
static const char sys_dir_path[] = "/sys/kernel/mm/hugepages";
+static const char sys_pages_numa_dir_path[] = "/sys/devices/system/node";
+
+/*
+ * Uses mmap to create a shared memory area for storage of data
+ * Used in this file to store the hugepage file map on disk
+ */
+static void *
+map_shared_memory(const char *filename, const size_t mem_size, int flags)
+{
+ void *retval;
+ int fd = open(filename, flags, 0666);
+ if (fd < 0)
+ return NULL;
+ if (ftruncate(fd, mem_size) < 0) {
+ close(fd);
+ return NULL;
+ }
+ retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ close(fd);
+ return retval;
+}
+
+static void *
+open_shared_memory(const char *filename, const size_t mem_size)
+{
+ return map_shared_memory(filename, mem_size, O_RDWR);
+}
+
+static void *
+create_shared_memory(const char *filename, const size_t mem_size)
+{
+ return map_shared_memory(filename, mem_size, O_RDWR | O_CREAT);
+}
/* this function is only called from eal_hugepage_info_init which itself
* is only called from a primary process */
@@ -70,6 +108,45 @@ get_num_hugepages(const char *subdir)
return num_pages;
}
+static uint32_t
+get_num_hugepages_on_node(const char *subdir, unsigned int socket)
+{
+ char path[PATH_MAX], socketpath[PATH_MAX];
+ DIR *socketdir;
+ unsigned long num_pages = 0;
+ const char *nr_hp_file = "free_hugepages";
+
+ snprintf(socketpath, sizeof(socketpath), "%s/node%u/hugepages",
+ sys_pages_numa_dir_path, socket);
+
+ socketdir = opendir(socketpath);
+ if (socketdir) {
+ /* Keep calm and carry on */
+ closedir(socketdir);
+ } else {
+ /* Can't find socket dir, so ignore it */
+ return 0;
+ }
+
+ snprintf(path, sizeof(path), "%s/%s/%s",
+ socketpath, subdir, nr_hp_file);
+ if (eal_parse_sysfs_value(path, &num_pages) < 0)
+ return 0;
+
+ if (num_pages == 0)
+ RTE_LOG(WARNING, EAL, "No free hugepages reported in %s\n",
+ subdir);
+
+ /*
+ * we want to return a uint32_t and more than this looks suspicious
+ * anyway ...
+ */
+ if (num_pages > UINT32_MAX)
+ num_pages = UINT32_MAX;
+
+ return num_pages;
+}
+
static uint64_t
get_default_hp_size(void)
{
@@ -94,8 +171,8 @@ get_default_hp_size(void)
return size;
}
-static const char *
-get_hugepage_dir(uint64_t hugepage_sz)
+static int
+get_hugepage_dir(uint64_t hugepage_sz, char *hugedir, int len)
{
enum proc_mount_fieldnames {
DEVICE = 0,
@@ -113,7 +190,7 @@ get_hugepage_dir(uint64_t hugepage_sz)
const char split_tok = ' ';
char *splitstr[_FIELDNAME_MAX];
char buf[BUFSIZ];
- char *retval = NULL;
+ int retval = -1;
FILE *fd = fopen(proc_mounts, "r");
if (fd == NULL)
@@ -140,7 +217,8 @@ get_hugepage_dir(uint64_t hugepage_sz)
/* if no explicit page size, the default page size is compared */
if (pagesz_str == NULL){
if (hugepage_sz == default_size){
- retval = strdup(splitstr[MOUNTPT]);
+ strlcpy(hugedir, splitstr[MOUNTPT], len);
+ retval = 0;
break;
}
}
@@ -148,7 +226,8 @@ get_hugepage_dir(uint64_t hugepage_sz)
else {
uint64_t pagesz = rte_str_to_size(&pagesz_str[pagesize_opt_len]);
if (pagesz == hugepage_sz) {
- retval = strdup(splitstr[MOUNTPT]);
+ strlcpy(hugedir, splitstr[MOUNTPT], len);
+ retval = 0;
break;
}
}
@@ -207,11 +286,9 @@ clear_hugedir(const char * hugedir)
/* non-blocking lock */
lck_result = flock(fd, LOCK_EX | LOCK_NB);
- /* if lock succeeds, unlock and remove the file */
- if (lck_result != -1) {
- flock(fd, LOCK_UN);
+ /* if lock succeeds, remove the file */
+ if (lck_result != -1)
unlinkat(dir_fd, dirent->d_name, 0);
- }
close (fd);
dirent = readdir(dir);
}
@@ -238,17 +315,49 @@ compare_hpi(const void *a, const void *b)
return hpi_b->hugepage_sz - hpi_a->hugepage_sz;
}
-/*
- * when we initialize the hugepage info, everything goes
- * to socket 0 by default. it will later get sorted by memory
- * initialization procedure.
- */
-int
-eal_hugepage_info_init(void)
+static void
+calc_num_pages(struct hugepage_info *hpi, struct dirent *dirent)
{
- const char dirent_start_text[] = "hugepages-";
+ uint64_t total_pages = 0;
+ unsigned int i;
+
+ /*
+ * first, try to put all hugepages into relevant sockets, but
+ * if first attempts fails, fall back to collecting all pages
+ * in one socket and sorting them later
+ */
+ total_pages = 0;
+ /* we also don't want to do this for legacy init */
+ if (!internal_config.legacy_mem)
+ for (i = 0; i < rte_socket_count(); i++) {
+ int socket = rte_socket_id_by_idx(i);
+ unsigned int num_pages =
+ get_num_hugepages_on_node(
+ dirent->d_name, socket);
+ hpi->num_pages[socket] = num_pages;
+ total_pages += num_pages;
+ }
+ /*
+ * we failed to sort memory from the get go, so fall
+ * back to old way
+ */
+ if (total_pages == 0) {
+ hpi->num_pages[0] = get_num_hugepages(dirent->d_name);
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit systems, limit number of hugepages to
+ * 1GB per page size */
+ hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
+ RTE_PGSIZE_1G / hpi->hugepage_sz);
+#endif
+ }
+}
+
+static int
+hugepage_info_init(void)
+{ const char dirent_start_text[] = "hugepages-";
const size_t dirent_start_len = sizeof(dirent_start_text) - 1;
- unsigned i, num_sizes = 0;
+ unsigned int i, num_sizes = 0;
DIR *dir;
struct dirent *dirent;
@@ -273,10 +382,10 @@ eal_hugepage_info_init(void)
hpi = &internal_config.hugepage_info[num_sizes];
hpi->hugepage_sz =
rte_str_to_size(&dirent->d_name[dirent_start_len]);
- hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);
/* first, check if we have a mountpoint */
- if (hpi->hugedir == NULL) {
+ if (get_hugepage_dir(hpi->hugepage_sz,
+ hpi->hugedir, sizeof(hpi->hugedir)) < 0) {
uint32_t num_pages;
num_pages = get_num_hugepages(dirent->d_name);
@@ -286,6 +395,22 @@ eal_hugepage_info_init(void)
"%" PRIu64 " reserved, but no mounted "
"hugetlbfs found for that size\n",
num_pages, hpi->hugepage_sz);
+ /* if we have kernel support for reserving hugepages
+ * through mmap, and we're in in-memory mode, treat this
+ * page size as valid. we cannot be in legacy mode at
+ * this point because we've checked this earlier in the
+ * init process.
+ */
+#ifdef MAP_HUGE_SHIFT
+ if (internal_config.in_memory) {
+ RTE_LOG(DEBUG, EAL, "In-memory mode enabled, "
+ "hugepages of size %" PRIu64 " bytes "
+ "will be allocated anonymously\n",
+ hpi->hugepage_sz);
+ calc_num_pages(hpi, dirent);
+ num_sizes++;
+ }
+#endif
continue;
}
@@ -302,16 +427,7 @@ eal_hugepage_info_init(void)
if (clear_hugedir(hpi->hugedir) == -1)
break;
- /* for now, put all pages into socket 0,
- * later they will be sorted */
- hpi->num_pages[0] = get_num_hugepages(dirent->d_name);
-
-#ifndef RTE_ARCH_64
- /* for 32-bit systems, limit number of hugepages to
- * 1GB per page size */
- hpi->num_pages[0] = RTE_MIN(hpi->num_pages[0],
- RTE_PGSIZE_1G / hpi->hugepage_sz);
-#endif
+ calc_num_pages(hpi, dirent);
num_sizes++;
}
@@ -328,11 +444,82 @@ eal_hugepage_info_init(void)
sizeof(internal_config.hugepage_info[0]), compare_hpi);
/* now we have all info, check we have at least one valid size */
- for (i = 0; i < num_sizes; i++)
- if (internal_config.hugepage_info[i].hugedir != NULL &&
- internal_config.hugepage_info[i].num_pages[0] > 0)
+ for (i = 0; i < num_sizes; i++) {
+ /* pages may no longer all be on socket 0, so check all */
+ unsigned int j, num_pages = 0;
+ struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+
+ for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
+ num_pages += hpi->num_pages[j];
+ if (num_pages > 0)
return 0;
+ }
/* no valid hugepage mounts available, return error */
return -1;
}
+
+/*
+ * when we initialize the hugepage info, everything goes
+ * to socket 0 by default. it will later get sorted by memory
+ * initialization procedure.
+ */
+int
+eal_hugepage_info_init(void)
+{
+ struct hugepage_info *hpi, *tmp_hpi;
+ unsigned int i;
+
+ if (hugepage_info_init() < 0)
+ return -1;
+
+ /* for no shared files mode, we're done */
+ if (internal_config.no_shconf)
+ return 0;
+
+ hpi = &internal_config.hugepage_info[0];
+
+ tmp_hpi = create_shared_memory(eal_hugepage_info_path(),
+ sizeof(internal_config.hugepage_info));
+ if (tmp_hpi == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
+ return -1;
+ }
+
+ memcpy(tmp_hpi, hpi, sizeof(internal_config.hugepage_info));
+
+ /* we've copied file descriptors along with everything else, but they
+ * will be invalid in secondary process, so overwrite them
+ */
+ for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ struct hugepage_info *tmp = &tmp_hpi[i];
+ tmp->lock_descriptor = -1;
+ }
+
+ if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
+ return -1;
+ }
+ return 0;
+}
+
+int eal_hugepage_info_read(void)
+{
+ struct hugepage_info *hpi = &internal_config.hugepage_info[0];
+ struct hugepage_info *tmp_hpi;
+
+ tmp_hpi = open_shared_memory(eal_hugepage_info_path(),
+ sizeof(internal_config.hugepage_info));
+ if (tmp_hpi == NULL) {
+ RTE_LOG(ERR, EAL, "Failed to open shared memory!\n");
+ return -1;
+ }
+
+ memcpy(hpi, tmp_hpi, sizeof(internal_config.hugepage_info));
+
+ if (munmap(tmp_hpi, sizeof(internal_config.hugepage_info)) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to unmap shared memory!\n");
+ return -1;
+ }
+ return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index f86f22f7..4076c6d6 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -30,7 +30,6 @@
#include <rte_branch_prediction.h>
#include <rte_debug.h>
#include <rte_log.h>
-#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_pause.h>
@@ -405,8 +404,7 @@ rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
}
/* allocate a new interrupt callback entity */
- callback = rte_zmalloc("interrupt callback list",
- sizeof(*callback), 0);
+ callback = calloc(1, sizeof(*callback));
if (callback == NULL) {
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
return -ENOMEM;
@@ -420,7 +418,7 @@ rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
TAILQ_FOREACH(src, &intr_sources, next) {
if (src->intr_handle.fd == intr_handle->fd) {
/* we had no interrupts for this */
- if TAILQ_EMPTY(&src->callbacks)
+ if (TAILQ_EMPTY(&src->callbacks))
wake_thread = 1;
TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
@@ -431,10 +429,10 @@ rte_intr_callback_register(const struct rte_intr_handle *intr_handle,
/* no existing callbacks for this - add new source */
if (src == NULL) {
- if ((src = rte_zmalloc("interrupt source list",
- sizeof(*src), 0)) == NULL) {
+ src = calloc(1, sizeof(*src));
+ if (src == NULL) {
RTE_LOG(ERR, EAL, "Can not allocate memory\n");
- rte_free(callback);
+ free(callback);
ret = -ENOMEM;
} else {
src->intr_handle = *intr_handle;
@@ -501,7 +499,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
if (cb->cb_fn == cb_fn && (cb_arg == (void *)-1 ||
cb->cb_arg == cb_arg)) {
TAILQ_REMOVE(&src->callbacks, cb, next);
- rte_free(cb);
+ free(cb);
ret++;
}
}
@@ -509,7 +507,7 @@ rte_intr_callback_unregister(const struct rte_intr_handle *intr_handle,
/* all callbacks for that source are removed. */
if (TAILQ_EMPTY(&src->callbacks)) {
TAILQ_REMOVE(&intr_sources, src, next);
- rte_free(src);
+ free(src);
}
}
@@ -559,6 +557,9 @@ rte_intr_enable(const struct rte_intr_handle *intr_handle)
return -1;
break;
#endif
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ return -1;
/* unknown handle type */
default:
RTE_LOG(ERR, EAL,
@@ -606,6 +607,9 @@ rte_intr_disable(const struct rte_intr_handle *intr_handle)
return -1;
break;
#endif
+ /* not used at this moment */
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ return -1;
/* unknown handle type */
default:
RTE_LOG(ERR, EAL,
@@ -674,7 +678,10 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
bytes_read = 0;
call = true;
break;
-
+ case RTE_INTR_HANDLE_DEV_EVENT:
+ bytes_read = 0;
+ call = true;
+ break;
default:
bytes_read = 1;
break;
@@ -844,8 +851,7 @@ eal_intr_thread_main(__rte_unused void *arg)
int
rte_eal_intr_init(void)
{
- int ret = 0, ret_1 = 0;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
+ int ret = 0;
/* init the global interrupt source head */
TAILQ_INIT(&intr_sources);
@@ -860,23 +866,15 @@ rte_eal_intr_init(void)
}
/* create the host thread to wait/handle the interrupt */
- ret = pthread_create(&intr_thread, NULL,
+ ret = rte_ctrl_thread_create(&intr_thread, "eal-intr-thread", NULL,
eal_intr_thread_main, NULL);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
RTE_LOG(ERR, EAL,
"Failed to create thread for interrupt handling\n");
- } else {
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
- "eal-intr-thread");
- ret_1 = rte_thread_setname(intr_thread, thread_name);
- if (ret_1 != 0)
- RTE_LOG(DEBUG, EAL,
- "Failed to set thread name for interrupt handling\n");
}
- return -ret;
+ return ret;
}
static void
diff --git a/lib/librte_eal/linuxapp/eal/eal_log.c b/lib/librte_eal/linuxapp/eal/eal_log.c
index ff145884..9d02dddb 100644
--- a/lib/librte_eal/linuxapp/eal/eal_log.c
+++ b/lib/librte_eal/linuxapp/eal/eal_log.c
@@ -25,25 +25,14 @@
static ssize_t
console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
{
- char copybuf[BUFSIZ + 1];
ssize_t ret;
- uint32_t loglevel;
/* write on stdout */
ret = fwrite(buf, 1, size, stdout);
fflush(stdout);
- /* truncate message if too big (should not happen) */
- if (size > BUFSIZ)
- size = BUFSIZ;
-
/* Syslog error levels are from 0 to 7, so subtract 1 to convert */
- loglevel = rte_log_cur_msg_loglevel() - 1;
- memcpy(copybuf, buf, size);
- copybuf[size] = '\0';
-
- /* write on syslog too */
- syslog(loglevel, "%s", copybuf);
+ syslog(rte_log_cur_msg_loglevel() - 1, "%.*s", (int)size, buf);
return ret;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_memalloc.c b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
new file mode 100644
index 00000000..aa95551a
--- /dev/null
+++ b/lib/librte_eal/linuxapp/eal/eal_memalloc.c
@@ -0,0 +1,1363 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#define _FILE_OFFSET_BITS 64
+#include <errno.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <sys/file.h>
+#include <unistd.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <setjmp.h>
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+#include <numa.h>
+#include <numaif.h>
+#endif
+#include <linux/falloc.h>
+#include <linux/mman.h> /* for hugetlb-related mmap flags */
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_eal_memconfig.h>
+#include <rte_eal.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include "eal_filesystem.h"
+#include "eal_internal_cfg.h"
+#include "eal_memalloc.h"
+#include "eal_private.h"
+
+const int anonymous_hugepages_supported =
+#ifdef MAP_HUGE_SHIFT
+ 1;
+#define RTE_MAP_HUGE_SHIFT MAP_HUGE_SHIFT
+#else
+ 0;
+#define RTE_MAP_HUGE_SHIFT 26
+#endif
+
+/*
+ * not all kernel version support fallocate on hugetlbfs, so fall back to
+ * ftruncate and disallow deallocation if fallocate is not supported.
+ */
+static int fallocate_supported = -1; /* unknown */
+
+/* for single-file segments, we need some kind of mechanism to keep track of
+ * which hugepages can be freed back to the system, and which cannot. we cannot
+ * use flock() because they don't allow locking parts of a file, and we cannot
+ * use fcntl() due to issues with their semantics, so we will have to rely on a
+ * bunch of lockfiles for each page.
+ *
+ * we cannot know how many pages a system will have in advance, but we do know
+ * that they come in lists, and we know lengths of these lists. so, simply store
+ * a malloc'd array of fd's indexed by list and segment index.
+ *
+ * they will be initialized at startup, and filled as we allocate/deallocate
+ * segments. also, use this to track memseg list proper fd.
+ */
+static struct {
+ int *fds; /**< dynamically allocated array of segment lock fd's */
+ int memseg_list_fd; /**< memseg list fd */
+ int len; /**< total length of the array */
+ int count; /**< entries used in an array */
+} lock_fds[RTE_MAX_MEMSEG_LISTS];
+
+/** local copy of a memory map, used to synchronize memory hotplug in MP */
+static struct rte_memseg_list local_memsegs[RTE_MAX_MEMSEG_LISTS];
+
+static sigjmp_buf huge_jmpenv;
+
+static void __rte_unused huge_sigbus_handler(int signo __rte_unused)
+{
+ siglongjmp(huge_jmpenv, 1);
+}
+
+/* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
+ * non-static local variable in the stack frame calling sigsetjmp might be
+ * clobbered by a call to longjmp.
+ */
+static int __rte_unused huge_wrap_sigsetjmp(void)
+{
+ return sigsetjmp(huge_jmpenv, 1);
+}
+
+static struct sigaction huge_action_old;
+static int huge_need_recover;
+
+static void __rte_unused
+huge_register_sigbus(void)
+{
+ sigset_t mask;
+ struct sigaction action;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGBUS);
+ action.sa_flags = 0;
+ action.sa_mask = mask;
+ action.sa_handler = huge_sigbus_handler;
+
+ huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
+}
+
+static void __rte_unused
+huge_recover_sigbus(void)
+{
+ if (huge_need_recover) {
+ sigaction(SIGBUS, &huge_action_old, NULL);
+ huge_need_recover = 0;
+ }
+}
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+static bool
+check_numa(void)
+{
+ bool ret = true;
+ /* Check if kernel supports NUMA. */
+ if (numa_available() != 0) {
+ RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
+ ret = false;
+ }
+ return ret;
+}
+
+static void
+prepare_numa(int *oldpolicy, struct bitmask *oldmask, int socket_id)
+{
+ RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ if (get_mempolicy(oldpolicy, oldmask->maskp,
+ oldmask->size + 1, 0, 0) < 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to get current mempolicy: %s. "
+ "Assuming MPOL_DEFAULT.\n", strerror(errno));
+ oldpolicy = MPOL_DEFAULT;
+ }
+ RTE_LOG(DEBUG, EAL,
+ "Setting policy MPOL_PREFERRED for socket %d\n",
+ socket_id);
+ numa_set_preferred(socket_id);
+}
+
+static void
+restore_numa(int *oldpolicy, struct bitmask *oldmask)
+{
+ RTE_LOG(DEBUG, EAL,
+ "Restoring previous memory policy: %d\n", *oldpolicy);
+ if (*oldpolicy == MPOL_DEFAULT) {
+ numa_set_localalloc();
+ } else if (set_mempolicy(*oldpolicy, oldmask->maskp,
+ oldmask->size + 1) < 0) {
+ RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
+ strerror(errno));
+ numa_set_localalloc();
+ }
+ numa_free_cpumask(oldmask);
+}
+#endif
+
+/*
+ * uses fstat to report the size of a file on disk
+ */
+static off_t
+get_file_size(int fd)
+{
+ struct stat st;
+ if (fstat(fd, &st) < 0)
+ return 0;
+ return st.st_size;
+}
+
+/* returns 1 on successful lock, 0 on unsuccessful lock, -1 on error */
+static int lock(int fd, int type)
+{
+ int ret;
+
+ /* flock may be interrupted */
+ do {
+ ret = flock(fd, type | LOCK_NB);
+ } while (ret && errno == EINTR);
+
+ if (ret && errno == EWOULDBLOCK) {
+ /* couldn't lock */
+ return 0;
+ } else if (ret) {
+ RTE_LOG(ERR, EAL, "%s(): error calling flock(): %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+ /* lock was successful */
+ return 1;
+}
+
+static int get_segment_lock_fd(int list_idx, int seg_idx)
+{
+ char path[PATH_MAX] = {0};
+ int fd;
+
+ if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
+ return -1;
+ if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
+ return -1;
+
+ fd = lock_fds[list_idx].fds[seg_idx];
+ /* does this lock already exist? */
+ if (fd >= 0)
+ return fd;
+
+ eal_get_hugefile_lock_path(path, sizeof(path),
+ list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+
+ fd = open(path, O_CREAT | O_RDWR, 0660);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): error creating lockfile '%s': %s\n",
+ __func__, path, strerror(errno));
+ return -1;
+ }
+ /* take out a read lock */
+ if (lock(fd, LOCK_SH) != 1) {
+ RTE_LOG(ERR, EAL, "%s(): failed to take out a readlock on '%s': %s\n",
+ __func__, path, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ /* store it for future reference */
+ lock_fds[list_idx].fds[seg_idx] = fd;
+ lock_fds[list_idx].count++;
+ return fd;
+}
+
+static int unlock_segment(int list_idx, int seg_idx)
+{
+ int fd, ret;
+
+ if (list_idx < 0 || list_idx >= (int)RTE_DIM(lock_fds))
+ return -1;
+ if (seg_idx < 0 || seg_idx >= lock_fds[list_idx].len)
+ return -1;
+
+ fd = lock_fds[list_idx].fds[seg_idx];
+
+ /* upgrade lock to exclusive to see if we can remove the lockfile */
+ ret = lock(fd, LOCK_EX);
+ if (ret == 1) {
+ /* we've succeeded in taking exclusive lock, this lockfile may
+ * be removed.
+ */
+ char path[PATH_MAX] = {0};
+ eal_get_hugefile_lock_path(path, sizeof(path),
+ list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+ if (unlink(path)) {
+ RTE_LOG(ERR, EAL, "%s(): error removing lockfile '%s': %s\n",
+ __func__, path, strerror(errno));
+ }
+ }
+ /* we don't want to leak the fd, so even if we fail to lock, close fd
+ * and remove it from list anyway.
+ */
+ close(fd);
+ lock_fds[list_idx].fds[seg_idx] = -1;
+ lock_fds[list_idx].count--;
+
+ if (ret < 0)
+ return -1;
+ return 0;
+}
+
+static int
+get_seg_fd(char *path, int buflen, struct hugepage_info *hi,
+ unsigned int list_idx, unsigned int seg_idx)
+{
+ int fd;
+
+ if (internal_config.single_file_segments) {
+ /* create a hugepage file path */
+ eal_get_hugefile_path(path, buflen, hi->hugedir, list_idx);
+
+ fd = lock_fds[list_idx].memseg_list_fd;
+
+ if (fd < 0) {
+ fd = open(path, O_CREAT | O_RDWR, 0600);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): open failed: %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+ /* take out a read lock and keep it indefinitely */
+ if (lock(fd, LOCK_SH) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ lock_fds[list_idx].memseg_list_fd = fd;
+ }
+ } else {
+ /* create a hugepage file path */
+ eal_get_hugefile_path(path, buflen, hi->hugedir,
+ list_idx * RTE_MAX_MEMSEG_PER_LIST + seg_idx);
+ fd = open(path, O_CREAT | O_RDWR, 0600);
+ if (fd < 0) {
+ RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
+ strerror(errno));
+ return -1;
+ }
+ /* take out a read lock */
+ if (lock(fd, LOCK_SH) < 0) {
+ RTE_LOG(ERR, EAL, "%s(): lock failed: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ }
+ return fd;
+}
+
+static int
+resize_hugefile(int fd, char *path, int list_idx, int seg_idx,
+ uint64_t fa_offset, uint64_t page_sz, bool grow)
+{
+ bool again = false;
+ do {
+ if (fallocate_supported == 0) {
+ /* we cannot deallocate memory if fallocate() is not
+ * supported, and hugepage file is already locked at
+ * creation, so no further synchronization needed.
+ */
+
+ if (!grow) {
+ RTE_LOG(DEBUG, EAL, "%s(): fallocate not supported, not freeing page back to the system\n",
+ __func__);
+ return -1;
+ }
+ uint64_t new_size = fa_offset + page_sz;
+ uint64_t cur_size = get_file_size(fd);
+
+ /* fallocate isn't supported, fall back to ftruncate */
+ if (new_size > cur_size &&
+ ftruncate(fd, new_size) < 0) {
+ RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+ __func__, strerror(errno));
+ return -1;
+ }
+ } else {
+ int flags = grow ? 0 : FALLOC_FL_PUNCH_HOLE |
+ FALLOC_FL_KEEP_SIZE;
+ int ret, lock_fd;
+
+ /* if fallocate() is supported, we need to take out a
+ * read lock on allocate (to prevent other processes
+ * from deallocating this page), and take out a write
+ * lock on deallocate (to ensure nobody else is using
+ * this page).
+ *
+ * read locks on page itself are already taken out at
+ * file creation, in get_seg_fd().
+ *
+ * we cannot rely on simple use of flock() call, because
+ * we need to be able to lock a section of the file,
+ * and we cannot use fcntl() locks, because of numerous
+ * problems with their semantics, so we will use
+ * deterministically named lock files for each section
+ * of the file.
+ *
+ * if we're shrinking the file, we want to upgrade our
+ * lock from shared to exclusive.
+ *
+ * lock_fd is an fd for a lockfile, not for the segment
+ * list.
+ */
+ lock_fd = get_segment_lock_fd(list_idx, seg_idx);
+
+ if (!grow) {
+ /* we are using this lockfile to determine
+ * whether this particular page is locked, as we
+ * are in single file segments mode and thus
+ * cannot use regular flock() to get this info.
+ *
+ * we want to try and take out an exclusive lock
+ * on the lock file to determine if we're the
+ * last ones using this page, and if not, we
+ * won't be shrinking it, and will instead exit
+ * prematurely.
+ */
+ ret = lock(lock_fd, LOCK_EX);
+
+ /* drop the lock on the lockfile, so that even
+ * if we couldn't shrink the file ourselves, we
+ * are signalling to other processes that we're
+ * no longer using this page.
+ */
+ if (unlock_segment(list_idx, seg_idx))
+ RTE_LOG(ERR, EAL, "Could not unlock segment\n");
+
+ /* additionally, if this was the last lock on
+ * this segment list, we can safely close the
+ * page file fd, so that one of the processes
+ * could then delete the file after shrinking.
+ */
+ if (ret < 1 && lock_fds[list_idx].count == 0) {
+ close(fd);
+ lock_fds[list_idx].memseg_list_fd = -1;
+ }
+
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Could not lock segment\n");
+ return -1;
+ }
+ if (ret == 0)
+ /* failed to lock, not an error. */
+ return 0;
+ }
+
+ /* grow or shrink the file */
+ ret = fallocate(fd, flags, fa_offset, page_sz);
+
+ if (ret < 0) {
+ if (fallocate_supported == -1 &&
+ errno == ENOTSUP) {
+ RTE_LOG(ERR, EAL, "%s(): fallocate() not supported, hugepage deallocation will be disabled\n",
+ __func__);
+ again = true;
+ fallocate_supported = 0;
+ } else {
+ RTE_LOG(DEBUG, EAL, "%s(): fallocate() failed: %s\n",
+ __func__,
+ strerror(errno));
+ return -1;
+ }
+ } else {
+ fallocate_supported = 1;
+
+ /* we've grew/shrunk the file, and we hold an
+ * exclusive lock now. check if there are no
+ * more segments active in this segment list,
+ * and remove the file if there aren't.
+ */
+ if (lock_fds[list_idx].count == 0) {
+ if (unlink(path))
+ RTE_LOG(ERR, EAL, "%s(): unlinking '%s' failed: %s\n",
+ __func__, path,
+ strerror(errno));
+ close(fd);
+ lock_fds[list_idx].memseg_list_fd = -1;
+ }
+ }
+ }
+ } while (again);
+ return 0;
+}
+
+static int
+alloc_seg(struct rte_memseg *ms, void *addr, int socket_id,
+ struct hugepage_info *hi, unsigned int list_idx,
+ unsigned int seg_idx)
+{
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ int cur_socket_id = 0;
+#endif
+ uint64_t map_offset;
+ rte_iova_t iova;
+ void *va;
+ char path[PATH_MAX];
+ int ret = 0;
+ int fd;
+ size_t alloc_sz;
+ int flags;
+ void *new_addr;
+
+ alloc_sz = hi->hugepage_sz;
+ if (!internal_config.single_file_segments &&
+ internal_config.in_memory &&
+ anonymous_hugepages_supported) {
+ int log2, flags;
+
+ log2 = rte_log2_u32(alloc_sz);
+ /* as per mmap() manpage, all page sizes are log2 of page size
+ * shifted by MAP_HUGE_SHIFT
+ */
+ flags = (log2 << RTE_MAP_HUGE_SHIFT) | MAP_HUGETLB | MAP_FIXED |
+ MAP_PRIVATE | MAP_ANONYMOUS;
+ fd = -1;
+ va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
+
+ /* single-file segments codepath will never be active because
+ * in-memory mode is incompatible with it and it's stopped at
+ * EAL initialization stage, however the compiler doesn't know
+ * that and complains about map_offset being used uninitialized
+ * on failure codepaths while having in-memory mode enabled. so,
+ * assign a value here.
+ */
+ map_offset = 0;
+ } else {
+ /* takes out a read lock on segment or segment list */
+ fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Couldn't get fd on hugepage file\n");
+ return -1;
+ }
+
+ if (internal_config.single_file_segments) {
+ map_offset = seg_idx * alloc_sz;
+ ret = resize_hugefile(fd, path, list_idx, seg_idx,
+ map_offset, alloc_sz, true);
+ if (ret < 0)
+ goto resized;
+ } else {
+ map_offset = 0;
+ if (ftruncate(fd, alloc_sz) < 0) {
+ RTE_LOG(DEBUG, EAL, "%s(): ftruncate() failed: %s\n",
+ __func__, strerror(errno));
+ goto resized;
+ }
+ if (internal_config.hugepage_unlink) {
+ if (unlink(path)) {
+ RTE_LOG(DEBUG, EAL, "%s(): unlink() failed: %s\n",
+ __func__, strerror(errno));
+ goto resized;
+ }
+ }
+ }
+
+ /*
+ * map the segment, and populate page tables, the kernel fills
+ * this segment with zeros if it's a new page.
+ */
+ va = mmap(addr, alloc_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd,
+ map_offset);
+ }
+
+ if (va == MAP_FAILED) {
+ RTE_LOG(DEBUG, EAL, "%s(): mmap() failed: %s\n", __func__,
+ strerror(errno));
+ /* mmap failed, but the previous region might have been
+ * unmapped anyway. try to remap it
+ */
+ goto unmapped;
+ }
+ if (va != addr) {
+ RTE_LOG(DEBUG, EAL, "%s(): wrong mmap() address\n", __func__);
+ munmap(va, alloc_sz);
+ goto resized;
+ }
+
+ /* In linux, hugetlb limitations, like cgroup, are
+ * enforced at fault time instead of mmap(), even
+ * with the option of MAP_POPULATE. Kernel will send
+ * a SIGBUS signal. To avoid to be killed, save stack
+ * environment here, if SIGBUS happens, we can jump
+ * back here.
+ */
+ if (huge_wrap_sigsetjmp()) {
+ RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more hugepages of size %uMB\n",
+ (unsigned int)(alloc_sz >> 20));
+ goto mapped;
+ }
+
+ /* we need to trigger a write to the page to enforce page fault and
+ * ensure that page is accessible to us, but we can't overwrite value
+ * that is already there, so read the old value, and write itback.
+ * kernel populates the page with zeroes initially.
+ */
+ *(volatile int *)addr = *(volatile int *)addr;
+
+ iova = rte_mem_virt2iova(addr);
+ if (iova == RTE_BAD_PHYS_ADDR) {
+ RTE_LOG(DEBUG, EAL, "%s(): can't get IOVA addr\n",
+ __func__);
+ goto mapped;
+ }
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ move_pages(getpid(), 1, &addr, NULL, &cur_socket_id, 0);
+
+ if (cur_socket_id != socket_id) {
+ RTE_LOG(DEBUG, EAL,
+ "%s(): allocation happened on wrong socket (wanted %d, got %d)\n",
+ __func__, socket_id, cur_socket_id);
+ goto mapped;
+ }
+#endif
+ /* for non-single file segments that aren't in-memory, we can close fd
+ * here */
+ if (!internal_config.single_file_segments && !internal_config.in_memory)
+ close(fd);
+
+ ms->addr = addr;
+ ms->hugepage_sz = alloc_sz;
+ ms->len = alloc_sz;
+ ms->nchannel = rte_memory_get_nchannel();
+ ms->nrank = rte_memory_get_nrank();
+ ms->iova = iova;
+ ms->socket_id = socket_id;
+
+ return 0;
+
+mapped:
+ munmap(addr, alloc_sz);
+unmapped:
+ flags = MAP_FIXED;
+#ifdef RTE_ARCH_PPC_64
+ flags |= MAP_HUGETLB;
+#endif
+ new_addr = eal_get_virtual_area(addr, &alloc_sz, alloc_sz, 0, flags);
+ if (new_addr != addr) {
+ if (new_addr != NULL)
+ munmap(new_addr, alloc_sz);
+ /* we're leaving a hole in our virtual address space. if
+ * somebody else maps this hole now, we could accidentally
+ * override it in the future.
+ */
+ RTE_LOG(CRIT, EAL, "Can't mmap holes in our virtual address space\n");
+ }
+resized:
+ /* in-memory mode will never be single-file-segments mode */
+ if (internal_config.single_file_segments) {
+ resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+ alloc_sz, false);
+ /* ignore failure, can't make it any worse */
+ } else {
+ /* only remove file if we can take out a write lock */
+ if (internal_config.hugepage_unlink == 0 &&
+ internal_config.in_memory == 0 &&
+ lock(fd, LOCK_EX) == 1)
+ unlink(path);
+ close(fd);
+ }
+ return -1;
+}
+
+static int
+free_seg(struct rte_memseg *ms, struct hugepage_info *hi,
+ unsigned int list_idx, unsigned int seg_idx)
+{
+ uint64_t map_offset;
+ char path[PATH_MAX];
+ int fd, ret;
+
+ /* erase page data */
+ memset(ms->addr, 0, ms->len);
+
+ if (mmap(ms->addr, ms->len, PROT_READ,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) ==
+ MAP_FAILED) {
+ RTE_LOG(DEBUG, EAL, "couldn't unmap page\n");
+ return -1;
+ }
+
+ /* if we've already unlinked the page, nothing needs to be done */
+ if (internal_config.hugepage_unlink) {
+ memset(ms, 0, sizeof(*ms));
+ return 0;
+ }
+
+ /* if we are not in single file segments mode, we're going to unmap the
+ * segment and thus drop the lock on original fd, but hugepage dir is
+ * now locked so we can take out another one without races.
+ */
+ fd = get_seg_fd(path, sizeof(path), hi, list_idx, seg_idx);
+ if (fd < 0)
+ return -1;
+
+ if (internal_config.single_file_segments) {
+ map_offset = seg_idx * ms->len;
+ if (resize_hugefile(fd, path, list_idx, seg_idx, map_offset,
+ ms->len, false))
+ return -1;
+ ret = 0;
+ } else {
+ /* if we're able to take out a write lock, we're the last one
+ * holding onto this page.
+ */
+ ret = lock(fd, LOCK_EX);
+ if (ret >= 0) {
+ /* no one else is using this page */
+ if (ret == 1)
+ unlink(path);
+ }
+ /* closing fd will drop the lock */
+ close(fd);
+ }
+
+ memset(ms, 0, sizeof(*ms));
+
+ return ret < 0 ? -1 : 0;
+}
+
+struct alloc_walk_param {
+ struct hugepage_info *hi;
+ struct rte_memseg **ms;
+ size_t page_sz;
+ unsigned int segs_allocated;
+ unsigned int n_segs;
+ int socket;
+ bool exact;
+};
+static int
+alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct alloc_walk_param *wa = arg;
+ struct rte_memseg_list *cur_msl;
+ size_t page_sz;
+ int cur_idx, start_idx, j, dir_fd = -1;
+ unsigned int msl_idx, need, i;
+
+ if (msl->page_sz != wa->page_sz)
+ return 0;
+ if (msl->socket_id != wa->socket)
+ return 0;
+
+ page_sz = (size_t)msl->page_sz;
+
+ msl_idx = msl - mcfg->memsegs;
+ cur_msl = &mcfg->memsegs[msl_idx];
+
+ need = wa->n_segs;
+
+ /* try finding space in memseg list */
+ cur_idx = rte_fbarray_find_next_n_free(&cur_msl->memseg_arr, 0, need);
+ if (cur_idx < 0)
+ return 0;
+ start_idx = cur_idx;
+
+ /* do not allow any page allocations during the time we're allocating,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ *
+ * during init, we already hold a write lock, so don't try to take out
+ * another one.
+ */
+ if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+ dir_fd = open(wa->hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
+ __func__, wa->hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
+ __func__, wa->hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
+ }
+
+ for (i = 0; i < need; i++, cur_idx++) {
+ struct rte_memseg *cur;
+ void *map_addr;
+
+ cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
+ map_addr = RTE_PTR_ADD(cur_msl->base_va,
+ cur_idx * page_sz);
+
+ if (alloc_seg(cur, map_addr, wa->socket, wa->hi,
+ msl_idx, cur_idx)) {
+ RTE_LOG(DEBUG, EAL, "attempted to allocate %i segments, but only %i were allocated\n",
+ need, i);
+
+ /* if exact number wasn't requested, stop */
+ if (!wa->exact)
+ goto out;
+
+ /* clean up */
+ for (j = start_idx; j < cur_idx; j++) {
+ struct rte_memseg *tmp;
+ struct rte_fbarray *arr =
+ &cur_msl->memseg_arr;
+
+ tmp = rte_fbarray_get(arr, j);
+ rte_fbarray_set_free(arr, j);
+
+ /* free_seg may attempt to create a file, which
+ * may fail.
+ */
+ if (free_seg(tmp, wa->hi, msl_idx, j))
+ RTE_LOG(DEBUG, EAL, "Cannot free page\n");
+ }
+ /* clear the list */
+ if (wa->ms)
+ memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
+
+ if (dir_fd >= 0)
+ close(dir_fd);
+ return -1;
+ }
+ if (wa->ms)
+ wa->ms[i] = cur;
+
+ rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
+ }
+out:
+ wa->segs_allocated = i;
+ if (i > 0)
+ cur_msl->version++;
+ if (dir_fd >= 0)
+ close(dir_fd);
+ return 1;
+}
+
+struct free_walk_param {
+ struct hugepage_info *hi;
+ struct rte_memseg *ms;
+};
+static int
+free_seg_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *found_msl;
+ struct free_walk_param *wa = arg;
+ uintptr_t start_addr, end_addr;
+ int msl_idx, seg_idx, ret, dir_fd = -1;
+
+ start_addr = (uintptr_t) msl->base_va;
+ end_addr = start_addr + msl->memseg_arr.len * (size_t)msl->page_sz;
+
+ if ((uintptr_t)wa->ms->addr < start_addr ||
+ (uintptr_t)wa->ms->addr >= end_addr)
+ return 0;
+
+ msl_idx = msl - mcfg->memsegs;
+ seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
+
+ /* msl is const */
+ found_msl = &mcfg->memsegs[msl_idx];
+
+ /* do not allow any page allocations during the time we're freeing,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ *
+ * during init, we already hold a write lock, so don't try to take out
+ * another one.
+ */
+ if (wa->hi->lock_descriptor == -1 && !internal_config.in_memory) {
+ dir_fd = open(wa->hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n",
+ __func__, wa->hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n",
+ __func__, wa->hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
+ }
+
+ found_msl->version++;
+
+ rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
+
+ ret = free_seg(wa->ms, wa->hi, msl_idx, seg_idx);
+
+ if (dir_fd >= 0)
+ close(dir_fd);
+
+ if (ret < 0)
+ return -1;
+
+ return 1;
+}
+
+int
+eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs, size_t page_sz,
+ int socket, bool exact)
+{
+ int i, ret = -1;
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ bool have_numa = false;
+ int oldpolicy;
+ struct bitmask *oldmask;
+#endif
+ struct alloc_walk_param wa;
+ struct hugepage_info *hi = NULL;
+
+ memset(&wa, 0, sizeof(wa));
+
+ /* dynamic allocation not supported in legacy mode */
+ if (internal_config.legacy_mem)
+ return -1;
+
+ for (i = 0; i < (int) RTE_DIM(internal_config.hugepage_info); i++) {
+ if (page_sz ==
+ internal_config.hugepage_info[i].hugepage_sz) {
+ hi = &internal_config.hugepage_info[i];
+ break;
+ }
+ }
+ if (!hi) {
+ RTE_LOG(ERR, EAL, "%s(): can't find relevant hugepage_info entry\n",
+ __func__);
+ return -1;
+ }
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (check_numa()) {
+ oldmask = numa_allocate_nodemask();
+ prepare_numa(&oldpolicy, oldmask, socket);
+ have_numa = true;
+ }
+#endif
+
+ wa.exact = exact;
+ wa.hi = hi;
+ wa.ms = ms;
+ wa.n_segs = n_segs;
+ wa.page_sz = page_sz;
+ wa.socket = socket;
+ wa.segs_allocated = 0;
+
+ /* memalloc is locked, so it's safe to use thread-unsafe version */
+ ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
+ if (ret == 0) {
+ RTE_LOG(ERR, EAL, "%s(): couldn't find suitable memseg_list\n",
+ __func__);
+ ret = -1;
+ } else if (ret > 0) {
+ ret = (int)wa.segs_allocated;
+ }
+
+#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (have_numa)
+ restore_numa(&oldpolicy, oldmask);
+#endif
+ return ret;
+}
+
+struct rte_memseg *
+eal_memalloc_alloc_seg(size_t page_sz, int socket)
+{
+ struct rte_memseg *ms;
+ if (eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true) < 0)
+ return NULL;
+ /* return pointer to newly allocated memseg */
+ return ms;
+}
+
+int
+eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
+{
+ int seg, ret = 0;
+
+ /* dynamic free not supported in legacy mode */
+ if (internal_config.legacy_mem)
+ return -1;
+
+ for (seg = 0; seg < n_segs; seg++) {
+ struct rte_memseg *cur = ms[seg];
+ struct hugepage_info *hi = NULL;
+ struct free_walk_param wa;
+ int i, walk_res;
+
+ /* if this page is marked as unfreeable, fail */
+ if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
+ RTE_LOG(DEBUG, EAL, "Page is not allowed to be freed\n");
+ ret = -1;
+ continue;
+ }
+
+ memset(&wa, 0, sizeof(wa));
+
+ for (i = 0; i < (int)RTE_DIM(internal_config.hugepage_info);
+ i++) {
+ hi = &internal_config.hugepage_info[i];
+ if (cur->hugepage_sz == hi->hugepage_sz)
+ break;
+ }
+ if (i == (int)RTE_DIM(internal_config.hugepage_info)) {
+ RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
+ ret = -1;
+ continue;
+ }
+
+ wa.ms = cur;
+ wa.hi = hi;
+
+ /* memalloc is locked, so it's safe to use thread-unsafe version
+ */
+ walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
+ &wa);
+ if (walk_res == 1)
+ continue;
+ if (walk_res == 0)
+ RTE_LOG(ERR, EAL, "Couldn't find memseg list\n");
+ ret = -1;
+ }
+ return ret;
+}
+
+int
+eal_memalloc_free_seg(struct rte_memseg *ms)
+{
+ /* dynamic free not supported in legacy mode */
+ if (internal_config.legacy_mem)
+ return -1;
+
+ return eal_memalloc_free_seg_bulk(&ms, 1);
+}
+
+static int
+sync_chunk(struct rte_memseg_list *primary_msl,
+ struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+ unsigned int msl_idx, bool used, int start, int end)
+{
+ struct rte_fbarray *l_arr, *p_arr;
+ int i, ret, chunk_len, diff_len;
+
+ l_arr = &local_msl->memseg_arr;
+ p_arr = &primary_msl->memseg_arr;
+
+ /* we need to aggregate allocations/deallocations into bigger chunks,
+ * as we don't want to spam the user with per-page callbacks.
+ *
+ * to avoid any potential issues, we also want to trigger
+ * deallocation callbacks *before* we actually deallocate
+ * memory, so that the user application could wrap up its use
+ * before it goes away.
+ */
+
+ chunk_len = end - start;
+
+ /* find how many contiguous pages we can map/unmap for this chunk */
+ diff_len = used ?
+ rte_fbarray_find_contig_free(l_arr, start) :
+ rte_fbarray_find_contig_used(l_arr, start);
+
+ /* has to be at least one page */
+ if (diff_len < 1)
+ return -1;
+
+ diff_len = RTE_MIN(chunk_len, diff_len);
+
+ /* if we are freeing memory, notify the application */
+ if (!used) {
+ struct rte_memseg *ms;
+ void *start_va;
+ size_t len, page_sz;
+
+ ms = rte_fbarray_get(l_arr, start);
+ start_va = ms->addr;
+ page_sz = (size_t)primary_msl->page_sz;
+ len = page_sz * diff_len;
+
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
+ start_va, len);
+ }
+
+ for (i = 0; i < diff_len; i++) {
+ struct rte_memseg *p_ms, *l_ms;
+ int seg_idx = start + i;
+
+ l_ms = rte_fbarray_get(l_arr, seg_idx);
+ p_ms = rte_fbarray_get(p_arr, seg_idx);
+
+ if (l_ms == NULL || p_ms == NULL)
+ return -1;
+
+ if (used) {
+ ret = alloc_seg(l_ms, p_ms->addr,
+ p_ms->socket_id, hi,
+ msl_idx, seg_idx);
+ if (ret < 0)
+ return -1;
+ rte_fbarray_set_used(l_arr, seg_idx);
+ } else {
+ ret = free_seg(l_ms, hi, msl_idx, seg_idx);
+ rte_fbarray_set_free(l_arr, seg_idx);
+ if (ret < 0)
+ return -1;
+ }
+ }
+
+ /* if we just allocated memory, notify the application */
+ if (used) {
+ struct rte_memseg *ms;
+ void *start_va;
+ size_t len, page_sz;
+
+ ms = rte_fbarray_get(l_arr, start);
+ start_va = ms->addr;
+ page_sz = (size_t)primary_msl->page_sz;
+ len = page_sz * diff_len;
+
+ eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
+ start_va, len);
+ }
+
+ /* calculate how much we can advance until next chunk */
+ diff_len = used ?
+ rte_fbarray_find_contig_used(l_arr, start) :
+ rte_fbarray_find_contig_free(l_arr, start);
+ ret = RTE_MIN(chunk_len, diff_len);
+
+ return ret;
+}
+
+static int
+sync_status(struct rte_memseg_list *primary_msl,
+ struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+ unsigned int msl_idx, bool used)
+{
+ struct rte_fbarray *l_arr, *p_arr;
+ int p_idx, l_chunk_len, p_chunk_len, ret;
+ int start, end;
+
+ /* this is a little bit tricky, but the basic idea is - walk both lists
+ * and spot any places where there are discrepancies. walking both lists
+ * and noting discrepancies in a single go is a hard problem, so we do
+ * it in two passes - first we spot any places where allocated segments
+ * mismatch (i.e. ensure that everything that's allocated in the primary
+ * is also allocated in the secondary), and then we do it by looking at
+ * free segments instead.
+ *
+ * we also need to aggregate changes into chunks, as we have to call
+ * callbacks per allocation, not per page.
+ */
+ l_arr = &local_msl->memseg_arr;
+ p_arr = &primary_msl->memseg_arr;
+
+ if (used)
+ p_idx = rte_fbarray_find_next_used(p_arr, 0);
+ else
+ p_idx = rte_fbarray_find_next_free(p_arr, 0);
+
+ while (p_idx >= 0) {
+ int next_chunk_search_idx;
+
+ if (used) {
+ p_chunk_len = rte_fbarray_find_contig_used(p_arr,
+ p_idx);
+ l_chunk_len = rte_fbarray_find_contig_used(l_arr,
+ p_idx);
+ } else {
+ p_chunk_len = rte_fbarray_find_contig_free(p_arr,
+ p_idx);
+ l_chunk_len = rte_fbarray_find_contig_free(l_arr,
+ p_idx);
+ }
+ /* best case scenario - no differences (or bigger, which will be
+ * fixed during next iteration), look for next chunk
+ */
+ if (l_chunk_len >= p_chunk_len) {
+ next_chunk_search_idx = p_idx + p_chunk_len;
+ goto next_chunk;
+ }
+
+ /* if both chunks start at the same point, skip parts we know
+ * are identical, and sync the rest. each call to sync_chunk
+ * will only sync contiguous segments, so we need to call this
+ * until we are sure there are no more differences in this
+ * chunk.
+ */
+ start = p_idx + l_chunk_len;
+ end = p_idx + p_chunk_len;
+ do {
+ ret = sync_chunk(primary_msl, local_msl, hi, msl_idx,
+ used, start, end);
+ start += ret;
+ } while (start < end && ret >= 0);
+ /* if ret is negative, something went wrong */
+ if (ret < 0)
+ return -1;
+
+ next_chunk_search_idx = p_idx + p_chunk_len;
+next_chunk:
+ /* skip to end of this chunk */
+ if (used) {
+ p_idx = rte_fbarray_find_next_used(p_arr,
+ next_chunk_search_idx);
+ } else {
+ p_idx = rte_fbarray_find_next_free(p_arr,
+ next_chunk_search_idx);
+ }
+ }
+ return 0;
+}
+
+static int
+sync_existing(struct rte_memseg_list *primary_msl,
+ struct rte_memseg_list *local_msl, struct hugepage_info *hi,
+ unsigned int msl_idx)
+{
+ int ret, dir_fd;
+
+ /* do not allow any page allocations during the time we're allocating,
+ * because file creation and locking operations are not atomic,
+ * and we might be the first or the last ones to use a particular page,
+ * so we need to ensure atomicity of every operation.
+ */
+ dir_fd = open(hi->hugedir, O_RDONLY);
+ if (dir_fd < 0) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot open '%s': %s\n", __func__,
+ hi->hugedir, strerror(errno));
+ return -1;
+ }
+ /* blocking writelock */
+ if (flock(dir_fd, LOCK_EX)) {
+ RTE_LOG(ERR, EAL, "%s(): Cannot lock '%s': %s\n", __func__,
+ hi->hugedir, strerror(errno));
+ close(dir_fd);
+ return -1;
+ }
+
+ /* ensure all allocated space is the same in both lists */
+ ret = sync_status(primary_msl, local_msl, hi, msl_idx, true);
+ if (ret < 0)
+ goto fail;
+
+ /* ensure all unallocated space is the same in both lists */
+ ret = sync_status(primary_msl, local_msl, hi, msl_idx, false);
+ if (ret < 0)
+ goto fail;
+
+ /* update version number */
+ local_msl->version = primary_msl->version;
+
+ close(dir_fd);
+
+ return 0;
+fail:
+ close(dir_fd);
+ return -1;
+}
+
+static int
+sync_walk(const struct rte_memseg_list *msl, void *arg __rte_unused)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *primary_msl, *local_msl;
+ struct hugepage_info *hi = NULL;
+ unsigned int i;
+ int msl_idx;
+
+ msl_idx = msl - mcfg->memsegs;
+ primary_msl = &mcfg->memsegs[msl_idx];
+ local_msl = &local_memsegs[msl_idx];
+
+ for (i = 0; i < RTE_DIM(internal_config.hugepage_info); i++) {
+ uint64_t cur_sz =
+ internal_config.hugepage_info[i].hugepage_sz;
+ uint64_t msl_sz = primary_msl->page_sz;
+ if (msl_sz == cur_sz) {
+ hi = &internal_config.hugepage_info[i];
+ break;
+ }
+ }
+ if (!hi) {
+ RTE_LOG(ERR, EAL, "Can't find relevant hugepage_info entry\n");
+ return -1;
+ }
+
+ /* if versions don't match, synchronize everything */
+ if (local_msl->version != primary_msl->version &&
+ sync_existing(primary_msl, local_msl, hi, msl_idx))
+ return -1;
+ return 0;
+}
+
+
+int
+eal_memalloc_sync_with_primary(void)
+{
+ /* nothing to be done in primary */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return 0;
+
+ /* memalloc is locked, so it's safe to call thread-unsafe version */
+ if (rte_memseg_list_walk_thread_unsafe(sync_walk, NULL))
+ return -1;
+ return 0;
+}
+
+static int
+secondary_msl_create_walk(const struct rte_memseg_list *msl,
+ void *arg __rte_unused)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *primary_msl, *local_msl;
+ char name[PATH_MAX];
+ int msl_idx, ret;
+
+ msl_idx = msl - mcfg->memsegs;
+ primary_msl = &mcfg->memsegs[msl_idx];
+ local_msl = &local_memsegs[msl_idx];
+
+ /* create distinct fbarrays for each secondary */
+ snprintf(name, RTE_FBARRAY_NAME_LEN, "%s_%i",
+ primary_msl->memseg_arr.name, getpid());
+
+ ret = rte_fbarray_init(&local_msl->memseg_arr, name,
+ primary_msl->memseg_arr.len,
+ primary_msl->memseg_arr.elt_sz);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL, "Cannot initialize local memory map\n");
+ return -1;
+ }
+ local_msl->base_va = primary_msl->base_va;
+
+ return 0;
+}
+
+static int
+secondary_lock_list_create_walk(const struct rte_memseg_list *msl,
+ void *arg __rte_unused)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ unsigned int i, len;
+ int msl_idx;
+ int *data;
+
+ msl_idx = msl - mcfg->memsegs;
+ len = msl->memseg_arr.len;
+
+ /* ensure we have space to store lock fd per each possible segment */
+ data = malloc(sizeof(int) * len);
+ if (data == NULL) {
+ RTE_LOG(ERR, EAL, "Unable to allocate space for lock descriptors\n");
+ return -1;
+ }
+ /* set all fd's as invalid */
+ for (i = 0; i < len; i++)
+ data[i] = -1;
+
+ lock_fds[msl_idx].fds = data;
+ lock_fds[msl_idx].len = len;
+ lock_fds[msl_idx].count = 0;
+ lock_fds[msl_idx].memseg_list_fd = -1;
+
+ return 0;
+}
+
+int
+eal_memalloc_init(void)
+{
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ if (rte_memseg_list_walk(secondary_msl_create_walk, NULL) < 0)
+ return -1;
+
+ /* initialize all of the lock fd lists */
+ if (internal_config.single_file_segments)
+ if (rte_memseg_list_walk(secondary_lock_list_create_walk, NULL))
+ return -1;
+ return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index 38853b75..dbf19499 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -28,6 +28,7 @@
#include <numaif.h>
#endif
+#include <rte_errno.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_launch.h>
@@ -39,6 +40,7 @@
#include <rte_string_fns.h>
#include "eal_private.h"
+#include "eal_memalloc.h"
#include "eal_internal_cfg.h"
#include "eal_filesystem.h"
#include "eal_hugepages.h"
@@ -57,8 +59,6 @@
* zone as well as a physical contiguous zone.
*/
-static uint64_t baseaddr_offset;
-
static bool phys_addrs_available = true;
#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
@@ -66,7 +66,7 @@ static bool phys_addrs_available = true;
static void
test_phys_addrs_available(void)
{
- uint64_t tmp;
+ uint64_t tmp = 0;
phys_addr_t physaddr;
if (!rte_eal_has_hugepages()) {
@@ -221,82 +221,6 @@ aslr_enabled(void)
}
}
-/*
- * Try to mmap *size bytes in /dev/zero. If it is successful, return the
- * pointer to the mmap'd area and keep *size unmodified. Else, retry
- * with a smaller zone: decrease *size by hugepage_sz until it reaches
- * 0. In this case, return NULL. Note: this function returns an address
- * which is a multiple of hugepage size.
- */
-static void *
-get_virtual_area(size_t *size, size_t hugepage_sz)
-{
- void *addr;
- void *addr_hint;
- int fd;
- long aligned_addr;
-
- if (internal_config.base_virtaddr != 0) {
- int page_size = sysconf(_SC_PAGE_SIZE);
- addr_hint = (void *) (uintptr_t)
- (internal_config.base_virtaddr + baseaddr_offset);
- addr_hint = RTE_PTR_ALIGN_FLOOR(addr_hint, page_size);
- } else {
- addr_hint = NULL;
- }
-
- RTE_LOG(DEBUG, EAL, "Ask a virtual area of 0x%zx bytes\n", *size);
-
-
- fd = open("/dev/zero", O_RDONLY);
- if (fd < 0){
- RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
- return NULL;
- }
- do {
- addr = mmap(addr_hint, (*size) + hugepage_sz, PROT_READ,
-#ifdef RTE_ARCH_PPC_64
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-#else
- MAP_PRIVATE,
-#endif
- fd, 0);
- if (addr == MAP_FAILED) {
- *size -= hugepage_sz;
- } else if (addr_hint != NULL && addr != addr_hint) {
- RTE_LOG(WARNING, EAL, "WARNING! Base virtual address "
- "hint (%p != %p) not respected!\n",
- addr_hint, addr);
- RTE_LOG(WARNING, EAL, " This may cause issues with "
- "mapping memory into secondary processes\n");
- }
- } while (addr == MAP_FAILED && *size > 0);
-
- if (addr == MAP_FAILED) {
- close(fd);
- RTE_LOG(ERR, EAL, "Cannot get a virtual area: %s\n",
- strerror(errno));
- return NULL;
- }
-
- munmap(addr, (*size) + hugepage_sz);
- close(fd);
-
- /* align addr to a huge page size boundary */
- aligned_addr = (long)addr;
- aligned_addr += (hugepage_sz - 1);
- aligned_addr &= (~(hugepage_sz - 1));
- addr = (void *)(aligned_addr);
-
- RTE_LOG(DEBUG, EAL, "Virtual area found at %p (size = 0x%zx)\n",
- addr, *size);
-
- /* increment offset */
- baseaddr_offset += *size;
-
- return addr;
-}
-
static sigjmp_buf huge_jmpenv;
static void huge_sigbus_handler(int signo __rte_unused)
@@ -330,13 +254,11 @@ void numa_error(char *where)
*/
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
- uint64_t *essential_memory __rte_unused, int orig)
+ uint64_t *essential_memory __rte_unused)
{
int fd;
unsigned i;
void *virtaddr;
- void *vma_addr = NULL;
- size_t vma_len = 0;
#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
int node_id = -1;
int essential_prev = 0;
@@ -351,7 +273,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
have_numa = false;
}
- if (orig && have_numa) {
+ if (have_numa) {
RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
if (get_mempolicy(&oldpolicy, oldmask->maskp,
oldmask->size + 1, 0, 0) < 0) {
@@ -367,6 +289,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
+ struct hugepage_file *hf = &hugepg_tbl[i];
uint64_t hugepage_sz = hpi->hugepage_sz;
#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
@@ -401,57 +324,14 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
}
#endif
- if (orig) {
- hugepg_tbl[i].file_id = i;
- hugepg_tbl[i].size = hugepage_sz;
- eal_get_hugefile_path(hugepg_tbl[i].filepath,
- sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
- hugepg_tbl[i].file_id);
- hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
- }
-#ifndef RTE_ARCH_64
- /* for 32-bit systems, don't remap 1G and 16G pages, just reuse
- * original map address as final map address.
- */
- else if ((hugepage_sz == RTE_PGSIZE_1G)
- || (hugepage_sz == RTE_PGSIZE_16G)) {
- hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
- hugepg_tbl[i].orig_va = NULL;
- continue;
- }
-#endif
- else if (vma_len == 0) {
- unsigned j, num_pages;
-
- /* reserve a virtual area for next contiguous
- * physical block: count the number of
- * contiguous physical pages. */
- for (j = i+1; j < hpi->num_pages[0] ; j++) {
-#ifdef RTE_ARCH_PPC_64
- /* The physical addresses are sorted in
- * descending order on PPC64 */
- if (hugepg_tbl[j].physaddr !=
- hugepg_tbl[j-1].physaddr - hugepage_sz)
- break;
-#else
- if (hugepg_tbl[j].physaddr !=
- hugepg_tbl[j-1].physaddr + hugepage_sz)
- break;
-#endif
- }
- num_pages = j - i;
- vma_len = num_pages * hugepage_sz;
-
- /* get the biggest virtual memory area up to
- * vma_len. If it fails, vma_addr is NULL, so
- * let the kernel provide the address. */
- vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
- if (vma_addr == NULL)
- vma_len = hugepage_sz;
- }
+ hf->file_id = i;
+ hf->size = hugepage_sz;
+ eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
+ hpi->hugedir, hf->file_id);
+ hf->filepath[sizeof(hf->filepath) - 1] = '\0';
/* try to create hugepage file */
- fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0600);
+ fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
if (fd < 0) {
RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
strerror(errno));
@@ -459,8 +339,11 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
}
/* map the segment, and populate page tables,
- * the kernel fills this segment with zeros */
- virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
+ * the kernel fills this segment with zeros. we don't care where
+ * this gets mapped - we already have contiguous memory areas
+ * ready for us to map into.
+ */
+ virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
if (virtaddr == MAP_FAILED) {
RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
@@ -469,41 +352,33 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
goto out;
}
- if (orig) {
- hugepg_tbl[i].orig_va = virtaddr;
- }
- else {
- hugepg_tbl[i].final_va = virtaddr;
- }
+ hf->orig_va = virtaddr;
- if (orig) {
- /* In linux, hugetlb limitations, like cgroup, are
- * enforced at fault time instead of mmap(), even
- * with the option of MAP_POPULATE. Kernel will send
- * a SIGBUS signal. To avoid to be killed, save stack
- * environment here, if SIGBUS happens, we can jump
- * back here.
- */
- if (huge_wrap_sigsetjmp()) {
- RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
- "hugepages of size %u MB\n",
- (unsigned)(hugepage_sz / 0x100000));
- munmap(virtaddr, hugepage_sz);
- close(fd);
- unlink(hugepg_tbl[i].filepath);
+ /* In linux, hugetlb limitations, like cgroup, are
+ * enforced at fault time instead of mmap(), even
+ * with the option of MAP_POPULATE. Kernel will send
+ * a SIGBUS signal. To avoid to be killed, save stack
+ * environment here, if SIGBUS happens, we can jump
+ * back here.
+ */
+ if (huge_wrap_sigsetjmp()) {
+ RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
+ "hugepages of size %u MB\n",
+ (unsigned int)(hugepage_sz / 0x100000));
+ munmap(virtaddr, hugepage_sz);
+ close(fd);
+ unlink(hugepg_tbl[i].filepath);
#ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
- if (maxnode)
- essential_memory[node_id] =
- essential_prev;
+ if (maxnode)
+ essential_memory[node_id] =
+ essential_prev;
#endif
- goto out;
- }
- *(int *)virtaddr = 0;
+ goto out;
}
+ *(int *)virtaddr = 0;
-
- /* set shared flock on the file. */
- if (flock(fd, LOCK_SH | LOCK_NB) == -1) {
+ /* set shared lock on the file. */
+ if (flock(fd, LOCK_SH) < 0) {
RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
__func__, strerror(errno));
close(fd);
@@ -511,9 +386,6 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
}
close(fd);
-
- vma_addr = (char *)vma_addr + hugepage_sz;
- vma_len -= hugepage_sz;
}
out:
@@ -535,20 +407,6 @@ out:
return i;
}
-/* Unmap all hugepages from original mapping */
-static int
-unmap_all_hugepages_orig(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
-{
- unsigned i;
- for (i = 0; i < hpi->num_pages[0]; i++) {
- if (hugepg_tbl[i].orig_va) {
- munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
- hugepg_tbl[i].orig_va = NULL;
- }
- }
- return 0;
-}
-
/*
* Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
* page.
@@ -663,7 +521,18 @@ static void *
create_shared_memory(const char *filename, const size_t mem_size)
{
void *retval;
- int fd = open(filename, O_CREAT | O_RDWR, 0666);
+ int fd;
+
+ /* if no shared files mode is used, create anonymous memory instead */
+ if (internal_config.no_shconf) {
+ retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (retval == MAP_FAILED)
+ return NULL;
+ return retval;
+ }
+
+ fd = open(filename, O_CREAT | O_RDWR, 0666);
if (fd < 0)
return NULL;
if (ftruncate(fd, mem_size) < 0) {
@@ -688,7 +557,7 @@ copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
int src_pos, dst_pos = 0;
for (src_pos = 0; src_pos < src_size; src_pos++) {
- if (src[src_pos].final_va != NULL) {
+ if (src[src_pos].orig_va != NULL) {
/* error on overflow attempt */
if (dst_pos == dest_size)
return -1;
@@ -759,9 +628,10 @@ unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
unmap_len = hp->size;
/* get start addr and len of the remaining segment */
- munmap(hp->final_va, (size_t) unmap_len);
+ munmap(hp->orig_va,
+ (size_t)unmap_len);
- hp->final_va = NULL;
+ hp->orig_va = NULL;
if (unlink(hp->filepath) == -1) {
RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
__func__, hp->filepath, strerror(errno));
@@ -780,6 +650,436 @@ unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
return 0;
}
+static int
+remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ struct rte_memseg_list *msl;
+ struct rte_fbarray *arr;
+ int cur_page, seg_len;
+ unsigned int msl_idx;
+ int ms_idx;
+ uint64_t page_sz;
+ size_t memseg_len;
+ int socket_id;
+
+ page_sz = hugepages[seg_start].size;
+ socket_id = hugepages[seg_start].socket_id;
+ seg_len = seg_end - seg_start;
+
+ RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
+ (seg_len * page_sz) >> 20ULL, socket_id);
+
+ /* find free space in memseg lists */
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+ bool empty;
+ msl = &mcfg->memsegs[msl_idx];
+ arr = &msl->memseg_arr;
+
+ if (msl->page_sz != page_sz)
+ continue;
+ if (msl->socket_id != socket_id)
+ continue;
+
+ /* leave space for a hole if array is not empty */
+ empty = arr->count == 0;
+ ms_idx = rte_fbarray_find_next_n_free(arr, 0,
+ seg_len + (empty ? 0 : 1));
+
+ /* memseg list is full? */
+ if (ms_idx < 0)
+ continue;
+
+ /* leave some space between memsegs, they are not IOVA
+ * contiguous, so they shouldn't be VA contiguous either.
+ */
+ if (!empty)
+ ms_idx++;
+ break;
+ }
+ if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_PER_TYPE),
+ RTE_STR(CONFIG_RTE_MAX_MEM_PER_TYPE));
+ return -1;
+ }
+
+#ifdef RTE_ARCH_PPC64
+ /* for PPC64 we go through the list backwards */
+ for (cur_page = seg_end - 1; cur_page >= seg_start;
+ cur_page--, ms_idx++) {
+#else
+ for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
+#endif
+ struct hugepage_file *hfile = &hugepages[cur_page];
+ struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
+ void *addr;
+ int fd;
+
+ fd = open(hfile->filepath, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
+ hfile->filepath, strerror(errno));
+ return -1;
+ }
+ /* set shared lock on the file. */
+ if (flock(fd, LOCK_SH) < 0) {
+ RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
+ hfile->filepath, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ memseg_len = (size_t)page_sz;
+ addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
+
+ /* we know this address is already mmapped by memseg list, so
+ * using MAP_FIXED here is safe
+ */
+ addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
+ hfile->filepath, strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ /* we have a new address, so unmap previous one */
+#ifndef RTE_ARCH_64
+ /* in 32-bit legacy mode, we have already unmapped the page */
+ if (!internal_config.legacy_mem)
+ munmap(hfile->orig_va, page_sz);
+#else
+ munmap(hfile->orig_va, page_sz);
+#endif
+
+ hfile->orig_va = NULL;
+ hfile->final_va = addr;
+
+ /* rewrite physical addresses in IOVA as VA mode */
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ hfile->physaddr = (uintptr_t)addr;
+
+ /* set up memseg data */
+ ms->addr = addr;
+ ms->hugepage_sz = page_sz;
+ ms->len = memseg_len;
+ ms->iova = hfile->physaddr;
+ ms->socket_id = hfile->socket_id;
+ ms->nchannel = rte_memory_get_nchannel();
+ ms->nrank = rte_memory_get_nrank();
+
+ rte_fbarray_set_used(arr, ms_idx);
+
+ close(fd);
+ }
+ RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
+ (seg_len * page_sz) >> 20, socket_id);
+ return 0;
+}
+
+static uint64_t
+get_mem_amount(uint64_t page_sz, uint64_t max_mem)
+{
+ uint64_t area_sz, max_pages;
+
+ /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
+ max_pages = RTE_MAX_MEMSEG_PER_LIST;
+ max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
+
+ area_sz = RTE_MIN(page_sz * max_pages, max_mem);
+
+ /* make sure the list isn't smaller than the page size */
+ area_sz = RTE_MAX(area_sz, page_sz);
+
+ return RTE_ALIGN(area_sz, page_sz);
+}
+
+static int
+free_memseg_list(struct rte_memseg_list *msl)
+{
+ if (rte_fbarray_destroy(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
+ return -1;
+ }
+ memset(msl, 0, sizeof(*msl));
+ return 0;
+}
+
+#define MEMSEG_LIST_FMT "memseg-%" PRIu64 "k-%i-%i"
+static int
+alloc_memseg_list(struct rte_memseg_list *msl, uint64_t page_sz,
+ int n_segs, int socket_id, int type_msl_idx)
+{
+ char name[RTE_FBARRAY_NAME_LEN];
+
+ snprintf(name, sizeof(name), MEMSEG_LIST_FMT, page_sz >> 10, socket_id,
+ type_msl_idx);
+ if (rte_fbarray_init(&msl->memseg_arr, name, n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list: %s\n",
+ rte_strerror(rte_errno));
+ return -1;
+ }
+
+ msl->page_sz = page_sz;
+ msl->socket_id = socket_id;
+ msl->base_va = NULL;
+
+ RTE_LOG(DEBUG, EAL, "Memseg list allocated: 0x%zxkB at socket %i\n",
+ (size_t)page_sz >> 10, socket_id);
+
+ return 0;
+}
+
+static int
+alloc_va_space(struct rte_memseg_list *msl)
+{
+ uint64_t page_sz;
+ size_t mem_sz;
+ void *addr;
+ int flags = 0;
+
+#ifdef RTE_ARCH_PPC_64
+ flags |= MAP_HUGETLB;
+#endif
+
+ page_sz = msl->page_sz;
+ mem_sz = page_sz * msl->memseg_arr.len;
+
+ addr = eal_get_virtual_area(msl->base_va, &mem_sz, page_sz, 0, flags);
+ if (addr == NULL) {
+ if (rte_errno == EADDRNOTAVAIL)
+ RTE_LOG(ERR, EAL, "Could not mmap %llu bytes at [%p] - please use '--base-virtaddr' option\n",
+ (unsigned long long)mem_sz, msl->base_va);
+ else
+ RTE_LOG(ERR, EAL, "Cannot reserve memory\n");
+ return -1;
+ }
+ msl->base_va = addr;
+
+ return 0;
+}
+
+/*
+ * Our VA space is not preallocated yet, so preallocate it here. We need to know
+ * how many segments there are in order to map all pages into one address space,
+ * and leave appropriate holes between segments so that rte_malloc does not
+ * concatenate them into one big segment.
+ *
+ * we also need to unmap original pages to free up address space.
+ */
+static int __rte_unused
+prealloc_segments(struct hugepage_file *hugepages, int n_pages)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int cur_page, seg_start_page, end_seg, new_memseg;
+ unsigned int hpi_idx, socket, i;
+ int n_contig_segs, n_segs;
+ int msl_idx;
+
+ /* before we preallocate segments, we need to free up our VA space.
+ * we're not removing files, and we already have information about
+ * PA-contiguousness, so it is safe to unmap everything.
+ */
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ struct hugepage_file *hpi = &hugepages[cur_page];
+ munmap(hpi->orig_va, hpi->size);
+ hpi->orig_va = NULL;
+ }
+
+ /* we cannot know how many page sizes and sockets we have discovered, so
+ * loop over all of them
+ */
+ for (hpi_idx = 0; hpi_idx < internal_config.num_hugepage_sizes;
+ hpi_idx++) {
+ uint64_t page_sz =
+ internal_config.hugepage_info[hpi_idx].hugepage_sz;
+
+ for (i = 0; i < rte_socket_count(); i++) {
+ struct rte_memseg_list *msl;
+
+ socket = rte_socket_id_by_idx(i);
+ n_contig_segs = 0;
+ n_segs = 0;
+ seg_start_page = -1;
+
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ struct hugepage_file *prev, *cur;
+ int prev_seg_start_page = -1;
+
+ cur = &hugepages[cur_page];
+ prev = cur_page == 0 ? NULL :
+ &hugepages[cur_page - 1];
+
+ new_memseg = 0;
+ end_seg = 0;
+
+ if (cur->size == 0)
+ end_seg = 1;
+ else if (cur->socket_id != (int) socket)
+ end_seg = 1;
+ else if (cur->size != page_sz)
+ end_seg = 1;
+ else if (cur_page == 0)
+ new_memseg = 1;
+#ifdef RTE_ARCH_PPC_64
+ /* On PPC64 architecture, the mmap always start
+ * from higher address to lower address. Here,
+ * physical addresses are in descending order.
+ */
+ else if ((prev->physaddr - cur->physaddr) !=
+ cur->size)
+ new_memseg = 1;
+#else
+ else if ((cur->physaddr - prev->physaddr) !=
+ cur->size)
+ new_memseg = 1;
+#endif
+ if (new_memseg) {
+ /* if we're already inside a segment,
+ * new segment means end of current one
+ */
+ if (seg_start_page != -1) {
+ end_seg = 1;
+ prev_seg_start_page =
+ seg_start_page;
+ }
+ seg_start_page = cur_page;
+ }
+
+ if (end_seg) {
+ if (prev_seg_start_page != -1) {
+ /* we've found a new segment */
+ n_contig_segs++;
+ n_segs += cur_page -
+ prev_seg_start_page;
+ } else if (seg_start_page != -1) {
+ /* we didn't find new segment,
+ * but did end current one
+ */
+ n_contig_segs++;
+ n_segs += cur_page -
+ seg_start_page;
+ seg_start_page = -1;
+ continue;
+ } else {
+ /* we're skipping this page */
+ continue;
+ }
+ }
+ /* segment continues */
+ }
+ /* check if we missed last segment */
+ if (seg_start_page != -1) {
+ n_contig_segs++;
+ n_segs += cur_page - seg_start_page;
+ }
+
+ /* if no segments were found, do not preallocate */
+ if (n_segs == 0)
+ continue;
+
+ /* we now have total number of pages that we will
+ * allocate for this segment list. add separator pages
+ * to the total count, and preallocate VA space.
+ */
+ n_segs += n_contig_segs - 1;
+
+ /* now, preallocate VA space for these segments */
+
+ /* first, find suitable memseg list for this */
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
+ msl_idx++) {
+ msl = &mcfg->memsegs[msl_idx];
+
+ if (msl->base_va != NULL)
+ continue;
+ break;
+ }
+ if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+ return -1;
+ }
+
+ /* now, allocate fbarray itself */
+ if (alloc_memseg_list(msl, page_sz, n_segs, socket,
+ msl_idx) < 0)
+ return -1;
+
+ /* finally, allocate VA space */
+ if (alloc_va_space(msl) < 0)
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * We cannot reallocate memseg lists on the fly because PPC64 stores pages
+ * backwards, therefore we have to process the entire memseg first before
+ * remapping it into memseg list VA space.
+ */
+static int
+remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
+{
+ int cur_page, seg_start_page, new_memseg, ret;
+
+ seg_start_page = 0;
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ struct hugepage_file *prev, *cur;
+
+ new_memseg = 0;
+
+ cur = &hugepages[cur_page];
+ prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
+
+ /* if size is zero, no more pages left */
+ if (cur->size == 0)
+ break;
+
+ if (cur_page == 0)
+ new_memseg = 1;
+ else if (cur->socket_id != prev->socket_id)
+ new_memseg = 1;
+ else if (cur->size != prev->size)
+ new_memseg = 1;
+#ifdef RTE_ARCH_PPC_64
+ /* On PPC64 architecture, the mmap always start from higher
+ * address to lower address. Here, physical addresses are in
+ * descending order.
+ */
+ else if ((prev->physaddr - cur->physaddr) != cur->size)
+ new_memseg = 1;
+#else
+ else if ((cur->physaddr - prev->physaddr) != cur->size)
+ new_memseg = 1;
+#endif
+
+ if (new_memseg) {
+ /* if this isn't the first time, remap segment */
+ if (cur_page != 0) {
+ ret = remap_segment(hugepages, seg_start_page,
+ cur_page);
+ if (ret != 0)
+ return -1;
+ }
+ /* remember where we started */
+ seg_start_page = cur_page;
+ }
+ /* continuation of previous memseg */
+ }
+ /* we were stopped, but we didn't remap the last segment, do it now */
+ if (cur_page != 0) {
+ ret = remap_segment(hugepages, seg_start_page,
+ cur_page);
+ if (ret != 0)
+ return -1;
+ }
+ return 0;
+}
+
static inline uint64_t
get_socket_mem_size(int socket)
{
@@ -788,8 +1088,7 @@ get_socket_mem_size(int socket)
for (i = 0; i < internal_config.num_hugepage_sizes; i++){
struct hugepage_info *hpi = &internal_config.hugepage_info[i];
- if (hpi->hugedir != NULL)
- size += hpi->hugepage_sz * hpi->num_pages[socket];
+ size += hpi->hugepage_sz * hpi->num_pages[socket];
}
return size;
@@ -818,8 +1117,10 @@ calc_num_pages_per_socket(uint64_t * memory,
/* if specific memory amounts per socket weren't requested */
if (internal_config.force_sockets == 0) {
+ size_t total_size;
+#ifdef RTE_ARCH_64
int cpu_per_socket[RTE_MAX_NUMA_NODES];
- size_t default_size, total_size;
+ size_t default_size;
unsigned lcore_id;
/* Compute number of cores per socket */
@@ -837,7 +1138,7 @@ calc_num_pages_per_socket(uint64_t * memory,
/* Set memory amount per socket */
default_size = (internal_config.memory * cpu_per_socket[socket])
- / rte_lcore_count();
+ / rte_lcore_count();
/* Limit to maximum available memory on socket */
default_size = RTE_MIN(default_size, get_socket_mem_size(socket));
@@ -854,18 +1155,40 @@ calc_num_pages_per_socket(uint64_t * memory,
for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0; socket++) {
/* take whatever is available */
default_size = RTE_MIN(get_socket_mem_size(socket) - memory[socket],
- total_size);
+ total_size);
/* Update sizes */
memory[socket] += default_size;
total_size -= default_size;
}
+#else
+ /* in 32-bit mode, allocate all of the memory only on master
+ * lcore socket
+ */
+ total_size = internal_config.memory;
+ for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_size != 0;
+ socket++) {
+ struct rte_config *cfg = rte_eal_get_configuration();
+ unsigned int master_lcore_socket;
+
+ master_lcore_socket =
+ rte_lcore_to_socket_id(cfg->master_lcore);
+
+ if (master_lcore_socket != socket)
+ continue;
+
+ /* Update sizes */
+ memory[socket] = total_size;
+ break;
+ }
+#endif
}
for (socket = 0; socket < RTE_MAX_NUMA_NODES && total_mem != 0; socket++) {
/* skips if the memory on specific socket wasn't requested */
for (i = 0; i < num_hp_info && memory[socket] != 0; i++){
- hp_used[i].hugedir = hp_info[i].hugedir;
+ strlcpy(hp_used[i].hugedir, hp_info[i].hugedir,
+ sizeof(hp_used[i].hugedir));
hp_used[i].num_pages[socket] = RTE_MIN(
memory[socket] / hp_info[i].hugepage_sz,
hp_info[i].num_pages[socket]);
@@ -907,7 +1230,8 @@ calc_num_pages_per_socket(uint64_t * memory,
}
}
/* if we didn't satisfy all memory requirements per socket */
- if (memory[socket] > 0) {
+ if (memory[socket] > 0 &&
+ internal_config.socket_mem[socket] != 0) {
/* to prevent icc errors */
requested = (unsigned) (internal_config.socket_mem[socket] /
0x100000);
@@ -939,7 +1263,7 @@ eal_get_hugepage_mem_size(void)
for (i = 0; i < internal_config.num_hugepage_sizes; i++) {
struct hugepage_info *hpi = &internal_config.hugepage_info[i];
- if (hpi->hugedir != NULL) {
+ if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
size += hpi->hugepage_sz * hpi->num_pages[j];
}
@@ -987,17 +1311,19 @@ huge_recover_sigbus(void)
* 6. unmap the first mapping
* 7. fill memsegs in configuration with contiguous zones
*/
-int
-rte_eal_hugepage_init(void)
+static int
+eal_legacy_hugepage_init(void)
{
struct rte_mem_config *mcfg;
struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+ struct rte_fbarray *arr;
+ struct rte_memseg *ms;
uint64_t memory[RTE_MAX_NUMA_NODES];
unsigned hp_offset;
- int i, j, new_memseg;
+ int i, j;
int nr_hugefiles, nr_hugepages = 0;
void *addr;
@@ -1010,21 +1336,54 @@ rte_eal_hugepage_init(void)
/* hugetlbfs can be disabled */
if (internal_config.no_hugetlbfs) {
+ struct rte_memseg_list *msl;
+ uint64_t page_sz;
+ int n_segs, cur_seg;
+
+ /* nohuge mode is legacy mode */
+ internal_config.legacy_mem = 1;
+
+ /* create a memseg list */
+ msl = &mcfg->memsegs[0];
+
+ page_sz = RTE_PGSIZE_4K;
+ n_segs = internal_config.memory / page_sz;
+
+ if (rte_fbarray_init(&msl->memseg_arr, "nohugemem", n_segs,
+ sizeof(struct rte_memseg))) {
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+ return -1;
+ }
+
addr = mmap(NULL, internal_config.memory, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
strerror(errno));
return -1;
}
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- mcfg->memseg[0].iova = (uintptr_t)addr;
- else
- mcfg->memseg[0].iova = RTE_BAD_IOVA;
- mcfg->memseg[0].addr = addr;
- mcfg->memseg[0].hugepage_sz = RTE_PGSIZE_4K;
- mcfg->memseg[0].len = internal_config.memory;
- mcfg->memseg[0].socket_id = 0;
+ msl->base_va = addr;
+ msl->page_sz = page_sz;
+ msl->socket_id = 0;
+
+ /* populate memsegs. each memseg is one page long */
+ for (cur_seg = 0; cur_seg < n_segs; cur_seg++) {
+ arr = &msl->memseg_arr;
+
+ ms = rte_fbarray_get(arr, cur_seg);
+ if (rte_eal_iova_mode() == RTE_IOVA_VA)
+ ms->iova = (uintptr_t)addr;
+ else
+ ms->iova = RTE_BAD_IOVA;
+ ms->addr = addr;
+ ms->hugepage_sz = page_sz;
+ ms->socket_id = 0;
+ ms->len = page_sz;
+
+ rte_fbarray_set_used(arr, cur_seg);
+
+ addr = RTE_PTR_ADD(addr, (size_t)page_sz);
+ }
return 0;
}
@@ -1057,7 +1416,6 @@ rte_eal_hugepage_init(void)
for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
memory[i] = internal_config.socket_mem[i];
-
/* map all hugepages and sort them */
for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
unsigned pages_old, pages_new;
@@ -1075,8 +1433,7 @@ rte_eal_hugepage_init(void)
/* map all hugepages available */
pages_old = hpi->num_pages[0];
- pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi,
- memory, 1);
+ pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
if (pages_new < pages_old) {
RTE_LOG(DEBUG, EAL,
"%d not %d hugepages of size %u MB allocated\n",
@@ -1091,7 +1448,8 @@ rte_eal_hugepage_init(void)
continue;
}
- if (phys_addrs_available) {
+ if (phys_addrs_available &&
+ rte_eal_iova_mode() != RTE_IOVA_VA) {
/* find physical addresses for each hugepage */
if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
@@ -1118,18 +1476,6 @@ rte_eal_hugepage_init(void)
qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
sizeof(struct hugepage_file), cmp_physaddr);
- /* remap all hugepages */
- if (map_all_hugepages(&tmp_hp[hp_offset], hpi, NULL, 0) !=
- hpi->num_pages[0]) {
- RTE_LOG(ERR, EAL, "Failed to remap %u MB pages\n",
- (unsigned)(hpi->hugepage_sz / 0x100000));
- goto fail;
- }
-
- /* unmap original mappings */
- if (unmap_all_hugepages_orig(&tmp_hp[hp_offset], hpi) < 0)
- goto fail;
-
/* we have processed a num of hugepages of this size, so inc offset */
hp_offset += hpi->num_pages[0];
}
@@ -1191,7 +1537,7 @@ rte_eal_hugepage_init(void)
}
/* create shared memory */
- hugepage = create_shared_memory(eal_hugepage_info_path(),
+ hugepage = create_shared_memory(eal_hugepage_data_path(),
nr_hugefiles * sizeof(struct hugepage_file));
if (hugepage == NULL) {
@@ -1212,7 +1558,7 @@ rte_eal_hugepage_init(void)
/*
* copy stuff from malloc'd hugepage* to the actual shared memory.
- * this procedure only copies those hugepages that have final_va
+ * this procedure only copies those hugepages that have orig_va
* not NULL. has overflow protection.
*/
if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
@@ -1221,6 +1567,23 @@ rte_eal_hugepage_init(void)
goto fail;
}
+#ifndef RTE_ARCH_64
+ /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
+ if (internal_config.legacy_mem &&
+ prealloc_segments(hugepage, nr_hugefiles)) {
+ RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
+ goto fail;
+ }
+#endif
+
+ /* remap all pages we do need into memseg list VA space, so that those
+ * pages become first-class citizens in DPDK memory subsystem
+ */
+ if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
+ RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
+ goto fail;
+ }
+
/* free the hugepage backing files */
if (internal_config.hugepage_unlink &&
unlink_hugepage_files(tmp_hp, internal_config.num_hugepage_sizes) < 0) {
@@ -1232,75 +1595,30 @@ rte_eal_hugepage_init(void)
free(tmp_hp);
tmp_hp = NULL;
- /* first memseg index shall be 0 after incrementing it below */
- j = -1;
- for (i = 0; i < nr_hugefiles; i++) {
- new_memseg = 0;
-
- /* if this is a new section, create a new memseg */
- if (i == 0)
- new_memseg = 1;
- else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
- new_memseg = 1;
- else if (hugepage[i].size != hugepage[i-1].size)
- new_memseg = 1;
-
-#ifdef RTE_ARCH_PPC_64
- /* On PPC64 architecture, the mmap always start from higher
- * virtual address to lower address. Here, both the physical
- * address and virtual address are in descending order */
- else if ((hugepage[i-1].physaddr - hugepage[i].physaddr) !=
- hugepage[i].size)
- new_memseg = 1;
- else if (((unsigned long)hugepage[i-1].final_va -
- (unsigned long)hugepage[i].final_va) != hugepage[i].size)
- new_memseg = 1;
-#else
- else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
- hugepage[i].size)
- new_memseg = 1;
- else if (((unsigned long)hugepage[i].final_va -
- (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
- new_memseg = 1;
-#endif
+ munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
- if (new_memseg) {
- j += 1;
- if (j == RTE_MAX_MEMSEG)
- break;
+ /* we're not going to allocate more pages, so release VA space for
+ * unused memseg lists
+ */
+ for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
+ struct rte_memseg_list *msl = &mcfg->memsegs[i];
+ size_t mem_sz;
- mcfg->memseg[j].iova = hugepage[i].physaddr;
- mcfg->memseg[j].addr = hugepage[i].final_va;
- mcfg->memseg[j].len = hugepage[i].size;
- mcfg->memseg[j].socket_id = hugepage[i].socket_id;
- mcfg->memseg[j].hugepage_sz = hugepage[i].size;
- }
- /* continuation of previous memseg */
- else {
-#ifdef RTE_ARCH_PPC_64
- /* Use the phy and virt address of the last page as segment
- * address for IBM Power architecture */
- mcfg->memseg[j].iova = hugepage[i].physaddr;
- mcfg->memseg[j].addr = hugepage[i].final_va;
-#endif
- mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
- }
- hugepage[i].memseg_id = j;
- }
+ /* skip inactive lists */
+ if (msl->base_va == NULL)
+ continue;
+ /* skip lists where there is at least one page allocated */
+ if (msl->memseg_arr.count > 0)
+ continue;
+ /* this is an unused list, deallocate it */
+ mem_sz = (size_t)msl->page_sz * msl->memseg_arr.len;
+ munmap(msl->base_va, mem_sz);
+ msl->base_va = NULL;
- if (i < nr_hugefiles) {
- RTE_LOG(ERR, EAL, "Can only reserve %d pages "
- "from %d requested\n"
- "Current %s=%d is not enough\n"
- "Please either increase it or request less amount "
- "of memory.\n",
- i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG),
- RTE_MAX_MEMSEG);
- goto fail;
+ /* destroy backing fbarray */
+ rte_fbarray_destroy(&msl->memseg_arr);
}
- munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
-
return 0;
fail:
@@ -1312,6 +1630,125 @@ fail:
return -1;
}
+static int __rte_unused
+hugepage_count_walk(const struct rte_memseg_list *msl, void *arg)
+{
+ struct hugepage_info *hpi = arg;
+
+ if (msl->page_sz != hpi->hugepage_sz)
+ return 0;
+
+ hpi->num_pages[msl->socket_id] += msl->memseg_arr.len;
+ return 0;
+}
+
+static int
+limits_callback(int socket_id, size_t cur_limit, size_t new_len)
+{
+ RTE_SET_USED(socket_id);
+ RTE_SET_USED(cur_limit);
+ RTE_SET_USED(new_len);
+ return -1;
+}
+
+static int
+eal_hugepage_init(void)
+{
+ struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+ uint64_t memory[RTE_MAX_NUMA_NODES];
+ int hp_sz_idx, socket_id;
+
+ test_phys_addrs_available();
+
+ memset(used_hp, 0, sizeof(used_hp));
+
+ for (hp_sz_idx = 0;
+ hp_sz_idx < (int) internal_config.num_hugepage_sizes;
+ hp_sz_idx++) {
+#ifndef RTE_ARCH_64
+ struct hugepage_info dummy;
+ unsigned int i;
+#endif
+ /* also initialize used_hp hugepage sizes in used_hp */
+ struct hugepage_info *hpi;
+ hpi = &internal_config.hugepage_info[hp_sz_idx];
+ used_hp[hp_sz_idx].hugepage_sz = hpi->hugepage_sz;
+
+#ifndef RTE_ARCH_64
+ /* for 32-bit, limit number of pages on socket to whatever we've
+ * preallocated, as we cannot allocate more.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ dummy.hugepage_sz = hpi->hugepage_sz;
+ if (rte_memseg_list_walk(hugepage_count_walk, &dummy) < 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(dummy.num_pages); i++) {
+ hpi->num_pages[i] = RTE_MIN(hpi->num_pages[i],
+ dummy.num_pages[i]);
+ }
+#endif
+ }
+
+ /* make a copy of socket_mem, needed for balanced allocation. */
+ for (hp_sz_idx = 0; hp_sz_idx < RTE_MAX_NUMA_NODES; hp_sz_idx++)
+ memory[hp_sz_idx] = internal_config.socket_mem[hp_sz_idx];
+
+ /* calculate final number of pages */
+ if (calc_num_pages_per_socket(memory,
+ internal_config.hugepage_info, used_hp,
+ internal_config.num_hugepage_sizes) < 0)
+ return -1;
+
+ for (hp_sz_idx = 0;
+ hp_sz_idx < (int)internal_config.num_hugepage_sizes;
+ hp_sz_idx++) {
+ for (socket_id = 0; socket_id < RTE_MAX_NUMA_NODES;
+ socket_id++) {
+ struct rte_memseg **pages;
+ struct hugepage_info *hpi = &used_hp[hp_sz_idx];
+ unsigned int num_pages = hpi->num_pages[socket_id];
+ int num_pages_alloc, i;
+
+ if (num_pages == 0)
+ continue;
+
+ pages = malloc(sizeof(*pages) * num_pages);
+
+ RTE_LOG(DEBUG, EAL, "Allocating %u pages of size %" PRIu64 "M on socket %i\n",
+ num_pages, hpi->hugepage_sz >> 20, socket_id);
+
+ num_pages_alloc = eal_memalloc_alloc_seg_bulk(pages,
+ num_pages, hpi->hugepage_sz,
+ socket_id, true);
+ if (num_pages_alloc < 0) {
+ free(pages);
+ return -1;
+ }
+
+ /* mark preallocated pages as unfreeable */
+ for (i = 0; i < num_pages_alloc; i++) {
+ struct rte_memseg *ms = pages[i];
+ ms->flags |= RTE_MEMSEG_FLAG_DO_NOT_FREE;
+ }
+ free(pages);
+ }
+ }
+ /* if socket limits were specified, set them */
+ if (internal_config.force_socket_limits) {
+ unsigned int i;
+ for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
+ uint64_t limit = internal_config.socket_limit[i];
+ if (limit == 0)
+ continue;
+ if (rte_mem_alloc_validator_register("socket-limit",
+ limits_callback, i, limit))
+ RTE_LOG(ERR, EAL, "Failed to register socket limits validator callback\n");
+ }
+ }
+ return 0;
+}
+
/*
* uses fstat to report the size of a file on disk
*/
@@ -1330,16 +1767,15 @@ getFileSize(int fd)
* configuration and finds the hugepages which form that segment, mapping them
* in order to form a contiguous block in the virtual memory space
*/
-int
-rte_eal_hugepage_attach(void)
+static int
+eal_legacy_hugepage_attach(void)
{
- const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
struct hugepage_file *hp = NULL;
- unsigned num_hp = 0;
- unsigned i, s = 0; /* s used to track the segment number */
- unsigned max_seg = RTE_MAX_MEMSEG;
+ unsigned int num_hp = 0;
+ unsigned int i = 0;
+ unsigned int cur_seg;
off_t size = 0;
- int fd, fd_zero = -1, fd_hugepage = -1;
+ int fd, fd_hugepage = -1;
if (aslr_enabled() > 0) {
RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
@@ -1350,139 +1786,429 @@ rte_eal_hugepage_attach(void)
test_phys_addrs_available();
- fd_zero = open("/dev/zero", O_RDONLY);
- if (fd_zero < 0) {
- RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
- goto error;
- }
- fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
+ fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
if (fd_hugepage < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
+ RTE_LOG(ERR, EAL, "Could not open %s\n",
+ eal_hugepage_data_path());
goto error;
}
- /* map all segments into memory to make sure we get the addrs */
- for (s = 0; s < RTE_MAX_MEMSEG; ++s) {
- void *base_addr;
-
- /*
- * the first memory segment with len==0 is the one that
- * follows the last valid segment.
- */
- if (mcfg->memseg[s].len == 0)
- break;
-
- /*
- * fdzero is mmapped to get a contiguous block of virtual
- * addresses of the appropriate memseg size.
- * use mmap to get identical addresses as the primary process.
- */
- base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
- PROT_READ,
-#ifdef RTE_ARCH_PPC_64
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
-#else
- MAP_PRIVATE,
-#endif
- fd_zero, 0);
- if (base_addr == MAP_FAILED ||
- base_addr != mcfg->memseg[s].addr) {
- max_seg = s;
- if (base_addr != MAP_FAILED) {
- /* errno is stale, don't use */
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
- "in /dev/zero at [%p], got [%p] - "
- "please use '--base-virtaddr' option\n",
- (unsigned long long)mcfg->memseg[s].len,
- mcfg->memseg[s].addr, base_addr);
- munmap(base_addr, mcfg->memseg[s].len);
- } else {
- RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
- "in /dev/zero at [%p]: '%s'\n",
- (unsigned long long)mcfg->memseg[s].len,
- mcfg->memseg[s].addr, strerror(errno));
- }
- if (aslr_enabled() > 0) {
- RTE_LOG(ERR, EAL, "It is recommended to "
- "disable ASLR in the kernel "
- "and retry running both primary "
- "and secondary processes\n");
- }
- goto error;
- }
- }
-
size = getFileSize(fd_hugepage);
hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
if (hp == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
+ RTE_LOG(ERR, EAL, "Could not mmap %s\n",
+ eal_hugepage_data_path());
goto error;
}
num_hp = size / sizeof(struct hugepage_file);
RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
- s = 0;
- while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
- void *addr, *base_addr;
- uintptr_t offset = 0;
- size_t mapping_size;
- /*
- * free previously mapped memory so we can map the
- * hugepages into the space
- */
- base_addr = mcfg->memseg[s].addr;
- munmap(base_addr, mcfg->memseg[s].len);
-
- /* find the hugepages for this segment and map them
- * we don't need to worry about order, as the server sorted the
- * entries before it did the second mmap of them */
- for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
- if (hp[i].memseg_id == (int)s){
- fd = open(hp[i].filepath, O_RDWR);
- if (fd < 0) {
- RTE_LOG(ERR, EAL, "Could not open %s\n",
- hp[i].filepath);
- goto error;
- }
- mapping_size = hp[i].size;
- addr = mmap(RTE_PTR_ADD(base_addr, offset),
- mapping_size, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, 0);
- close(fd); /* close file both on success and on failure */
- if (addr == MAP_FAILED ||
- addr != RTE_PTR_ADD(base_addr, offset)) {
- RTE_LOG(ERR, EAL, "Could not mmap %s\n",
- hp[i].filepath);
- goto error;
- }
- offset+=mapping_size;
- }
+ /* map all segments into memory to make sure we get the addrs. the
+ * segments themselves are already in memseg list (which is shared and
+ * has its VA space already preallocated), so we just need to map
+ * everything into correct addresses.
+ */
+ for (i = 0; i < num_hp; i++) {
+ struct hugepage_file *hf = &hp[i];
+ size_t map_sz = hf->size;
+ void *map_addr = hf->final_va;
+
+ /* if size is zero, no more pages left */
+ if (map_sz == 0)
+ break;
+
+ fd = open(hf->filepath, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
+ hf->filepath, strerror(errno));
+ goto error;
+ }
+
+ map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_FIXED, fd, 0);
+ if (map_addr == MAP_FAILED) {
+ RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
+ hf->filepath, strerror(errno));
+ close(fd);
+ goto error;
+ }
+
+ /* set shared lock on the file. */
+ if (flock(fd, LOCK_SH) < 0) {
+ RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
+ __func__, strerror(errno));
+ close(fd);
+ goto error;
}
- RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
- (unsigned long long)mcfg->memseg[s].len);
- s++;
+
+ close(fd);
}
/* unmap the hugepage config file, since we are done using it */
munmap(hp, size);
- close(fd_zero);
close(fd_hugepage);
return 0;
error:
- for (i = 0; i < max_seg && mcfg->memseg[i].len > 0; i++)
- munmap(mcfg->memseg[i].addr, mcfg->memseg[i].len);
+ /* map all segments into memory to make sure we get the addrs */
+ cur_seg = 0;
+ for (cur_seg = 0; cur_seg < i; cur_seg++) {
+ struct hugepage_file *hf = &hp[i];
+ size_t map_sz = hf->size;
+ void *map_addr = hf->final_va;
+
+ munmap(map_addr, map_sz);
+ }
if (hp != NULL && hp != MAP_FAILED)
munmap(hp, size);
- if (fd_zero >= 0)
- close(fd_zero);
if (fd_hugepage >= 0)
close(fd_hugepage);
return -1;
}
+static int
+eal_hugepage_attach(void)
+{
+ if (eal_memalloc_sync_with_primary()) {
+ RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
+ if (aslr_enabled() > 0)
+ RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
+ return -1;
+ }
+ return 0;
+}
+
+int
+rte_eal_hugepage_init(void)
+{
+ return internal_config.legacy_mem ?
+ eal_legacy_hugepage_init() :
+ eal_hugepage_init();
+}
+
+int
+rte_eal_hugepage_attach(void)
+{
+ return internal_config.legacy_mem ?
+ eal_legacy_hugepage_attach() :
+ eal_hugepage_attach();
+}
+
int
rte_eal_using_phys_addrs(void)
{
return phys_addrs_available;
}
+
+static int __rte_unused
+memseg_primary_init_32(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int active_sockets, hpi_idx, msl_idx = 0;
+ unsigned int socket_id, i;
+ struct rte_memseg_list *msl;
+ uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
+ uint64_t max_mem;
+
+ /* no-huge does not need this at all */
+ if (internal_config.no_hugetlbfs)
+ return 0;
+
+ /* this is a giant hack, but desperate times call for desperate
+ * measures. in legacy 32-bit mode, we cannot preallocate VA space,
+ * because having upwards of 2 gigabytes of VA space already mapped will
+ * interfere with our ability to map and sort hugepages.
+ *
+ * therefore, in legacy 32-bit mode, we will be initializing memseg
+ * lists much later - in eal_memory.c, right after we unmap all the
+ * unneeded pages. this will not affect secondary processes, as those
+ * should be able to mmap the space without (too many) problems.
+ */
+ if (internal_config.legacy_mem)
+ return 0;
+
+ /* 32-bit mode is a very special case. we cannot know in advance where
+ * the user will want to allocate their memory, so we have to do some
+ * heuristics.
+ */
+ active_sockets = 0;
+ total_requested_mem = 0;
+ if (internal_config.force_sockets)
+ for (i = 0; i < rte_socket_count(); i++) {
+ uint64_t mem;
+
+ socket_id = rte_socket_id_by_idx(i);
+ mem = internal_config.socket_mem[socket_id];
+
+ if (mem == 0)
+ continue;
+
+ active_sockets++;
+ total_requested_mem += mem;
+ }
+ else
+ total_requested_mem = internal_config.memory;
+
+ max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
+ if (total_requested_mem > max_mem) {
+ RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
+ (unsigned int)(max_mem >> 20));
+ return -1;
+ }
+ total_extra_mem = max_mem - total_requested_mem;
+ extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
+ total_extra_mem / active_sockets;
+
+ /* the allocation logic is a little bit convoluted, but here's how it
+ * works, in a nutshell:
+ * - if user hasn't specified on which sockets to allocate memory via
+ * --socket-mem, we allocate all of our memory on master core socket.
+ * - if user has specified sockets to allocate memory on, there may be
+ * some "unused" memory left (e.g. if user has specified --socket-mem
+ * such that not all memory adds up to 2 gigabytes), so add it to all
+ * sockets that are in use equally.
+ *
+ * page sizes are sorted by size in descending order, so we can safely
+ * assume that we dispense with bigger page sizes first.
+ */
+
+ /* create memseg lists */
+ for (i = 0; i < rte_socket_count(); i++) {
+ int hp_sizes = (int) internal_config.num_hugepage_sizes;
+ uint64_t max_socket_mem, cur_socket_mem;
+ unsigned int master_lcore_socket;
+ struct rte_config *cfg = rte_eal_get_configuration();
+ bool skip;
+
+ socket_id = rte_socket_id_by_idx(i);
+
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (socket_id > 0)
+ break;
+#endif
+
+ /* if we didn't specifically request memory on this socket */
+ skip = active_sockets != 0 &&
+ internal_config.socket_mem[socket_id] == 0;
+ /* ...or if we didn't specifically request memory on *any*
+ * socket, and this is not master lcore
+ */
+ master_lcore_socket = rte_lcore_to_socket_id(cfg->master_lcore);
+ skip |= active_sockets == 0 && socket_id != master_lcore_socket;
+
+ if (skip) {
+ RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
+ socket_id);
+ continue;
+ }
+
+ /* max amount of memory on this socket */
+ max_socket_mem = (active_sockets != 0 ?
+ internal_config.socket_mem[socket_id] :
+ internal_config.memory) +
+ extra_mem_per_socket;
+ cur_socket_mem = 0;
+
+ for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
+ uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
+ uint64_t hugepage_sz;
+ struct hugepage_info *hpi;
+ int type_msl_idx, max_segs, total_segs = 0;
+
+ hpi = &internal_config.hugepage_info[hpi_idx];
+ hugepage_sz = hpi->hugepage_sz;
+
+ /* check if pages are actually available */
+ if (hpi->num_pages[socket_id] == 0)
+ continue;
+
+ max_segs = RTE_MAX_MEMSEG_PER_TYPE;
+ max_pagesz_mem = max_socket_mem - cur_socket_mem;
+
+ /* make it multiple of page size */
+ max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
+ hugepage_sz);
+
+ RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
+ "%" PRIu64 "M on socket %i\n",
+ max_pagesz_mem >> 20, socket_id);
+
+ type_msl_idx = 0;
+ while (cur_pagesz_mem < max_pagesz_mem &&
+ total_segs < max_segs) {
+ uint64_t cur_mem;
+ unsigned int n_segs;
+
+ if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL,
+ "No more space in memseg lists, please increase %s\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+ return -1;
+ }
+
+ msl = &mcfg->memsegs[msl_idx];
+
+ cur_mem = get_mem_amount(hugepage_sz,
+ max_pagesz_mem);
+ n_segs = cur_mem / hugepage_sz;
+
+ if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ socket_id, type_msl_idx)) {
+ /* failing to allocate a memseg list is
+ * a serious error.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
+ return -1;
+ }
+
+ if (alloc_va_space(msl)) {
+ /* if we couldn't allocate VA space, we
+ * can try with smaller page sizes.
+ */
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
+ /* deallocate memseg list */
+ if (free_memseg_list(msl))
+ return -1;
+ break;
+ }
+
+ total_segs += msl->memseg_arr.len;
+ cur_pagesz_mem = total_segs * hugepage_sz;
+ type_msl_idx++;
+ msl_idx++;
+ }
+ cur_socket_mem += cur_pagesz_mem;
+ }
+ if (cur_socket_mem == 0) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
+ socket_id);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int __rte_unused
+memseg_primary_init(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int i, socket_id, hpi_idx, msl_idx = 0;
+ struct rte_memseg_list *msl;
+ uint64_t max_mem, total_mem;
+
+ /* no-huge does not need this at all */
+ if (internal_config.no_hugetlbfs)
+ return 0;
+
+ max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
+ total_mem = 0;
+
+ /* create memseg lists */
+ for (hpi_idx = 0; hpi_idx < (int) internal_config.num_hugepage_sizes;
+ hpi_idx++) {
+ struct hugepage_info *hpi;
+ uint64_t hugepage_sz;
+
+ hpi = &internal_config.hugepage_info[hpi_idx];
+ hugepage_sz = hpi->hugepage_sz;
+
+ for (i = 0; i < (int) rte_socket_count(); i++) {
+ uint64_t max_type_mem, total_type_mem = 0;
+ int type_msl_idx, max_segs, total_segs = 0;
+
+ socket_id = rte_socket_id_by_idx(i);
+
+#ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
+ if (socket_id > 0)
+ break;
+#endif
+
+ if (total_mem >= max_mem)
+ break;
+
+ max_type_mem = RTE_MIN(max_mem - total_mem,
+ (uint64_t)RTE_MAX_MEM_MB_PER_TYPE << 20);
+ max_segs = RTE_MAX_MEMSEG_PER_TYPE;
+
+ type_msl_idx = 0;
+ while (total_type_mem < max_type_mem &&
+ total_segs < max_segs) {
+ uint64_t cur_max_mem, cur_mem;
+ unsigned int n_segs;
+
+ if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
+ RTE_LOG(ERR, EAL,
+ "No more space in memseg lists, please increase %s\n",
+ RTE_STR(CONFIG_RTE_MAX_MEMSEG_LISTS));
+ return -1;
+ }
+
+ msl = &mcfg->memsegs[msl_idx++];
+
+ cur_max_mem = max_type_mem - total_type_mem;
+
+ cur_mem = get_mem_amount(hugepage_sz,
+ cur_max_mem);
+ n_segs = cur_mem / hugepage_sz;
+
+ if (alloc_memseg_list(msl, hugepage_sz, n_segs,
+ socket_id, type_msl_idx))
+ return -1;
+
+ total_segs += msl->memseg_arr.len;
+ total_type_mem = total_segs * hugepage_sz;
+ type_msl_idx++;
+
+ if (alloc_va_space(msl)) {
+ RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list\n");
+ return -1;
+ }
+ }
+ total_mem += total_type_mem;
+ }
+ }
+ return 0;
+}
+
+static int
+memseg_secondary_init(void)
+{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ int msl_idx = 0;
+ struct rte_memseg_list *msl;
+
+ for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
+
+ msl = &mcfg->memsegs[msl_idx];
+
+ /* skip empty memseg lists */
+ if (msl->memseg_arr.len == 0)
+ continue;
+
+ if (rte_fbarray_attach(&msl->memseg_arr)) {
+ RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
+ return -1;
+ }
+
+ /* preallocate VA space */
+ if (alloc_va_space(msl)) {
+ RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eal_memseg_init(void)
+{
+ return rte_eal_process_type() == RTE_PROC_PRIMARY ?
+#ifndef RTE_ARCH_64
+ memseg_primary_init_32() :
+#else
+ memseg_primary_init() :
+#endif
+ memseg_secondary_init();
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
index 08e150b7..b496fc71 100644
--- a/lib/librte_eal/linuxapp/eal/eal_thread.c
+++ b/lib/librte_eal/linuxapp/eal/eal_thread.c
@@ -119,7 +119,7 @@ eal_thread_loop(__attribute__((unused)) void *arg)
if (eal_thread_set_affinity() < 0)
rte_panic("cannot set affinity\n");
- ret = eal_thread_dump_affinity(cpuset, RTE_CPU_AFFINITY_STR_LEN);
+ ret = eal_thread_dump_affinity(cpuset, sizeof(cpuset));
RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%x;cpuset=[%s%s])\n",
lcore_id, (int)thread_id, cpuset, ret == 0 ? "" : "...");
@@ -176,7 +176,7 @@ int rte_sys_gettid(void)
int rte_thread_setname(pthread_t id, const char *name)
{
- int ret = -1;
+ int ret = ENOSYS;
#if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
#if __GLIBC_PREREQ(2, 12)
ret = pthread_setname_np(id, name);
@@ -184,5 +184,5 @@ int rte_thread_setname(pthread_t id, const char *name)
#endif
RTE_SET_USED(id);
RTE_SET_USED(name);
- return ret;
+ return -ret;
}
diff --git a/lib/librte_eal/linuxapp/eal/eal_timer.c b/lib/librte_eal/linuxapp/eal/eal_timer.c
index 161322f2..2766bd78 100644
--- a/lib/librte_eal/linuxapp/eal/eal_timer.c
+++ b/lib/librte_eal/linuxapp/eal/eal_timer.c
@@ -137,7 +137,6 @@ int
rte_eal_hpet_init(int make_default)
{
int fd, ret;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
if (internal_config.no_hpet) {
RTE_LOG(NOTICE, EAL, "HPET is disabled\n");
@@ -178,7 +177,7 @@ rte_eal_hpet_init(int make_default)
/* create a thread that will increment a global variable for
* msb (hpet is 32 bits by default under linux) */
- ret = pthread_create(&msb_inc_thread_id, NULL,
+ ret = rte_ctrl_thread_create(&msb_inc_thread_id, "hpet-msb-inc", NULL,
(void *(*)(void *))hpet_msb_inc, NULL);
if (ret != 0) {
RTE_LOG(ERR, EAL, "ERROR: Cannot create HPET timer thread!\n");
@@ -186,15 +185,6 @@ rte_eal_hpet_init(int make_default)
return -1;
}
- /*
- * Set thread_name for aid in debugging.
- */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "hpet-msb-inc");
- ret = rte_thread_setname(msb_inc_thread_id, thread_name);
- if (ret != 0)
- RTE_LOG(DEBUG, EAL,
- "Cannot set HPET timer thread name!\n");
-
if (make_default)
eal_timer_source = EAL_TIMER_HPET;
return 0;
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c
index e44ae4d0..c68dc38e 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+#include <inttypes.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
+#include <rte_errno.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_eal_memconfig.h>
@@ -18,59 +20,258 @@
#ifdef VFIO_PRESENT
+#define VFIO_MEM_EVENT_CLB_NAME "vfio_mem_event_clb"
+
+/* hot plug/unplug of VFIO groups may cause all DMA maps to be dropped. we can
+ * recreate the mappings for DPDK segments, but we cannot do so for memory that
+ * was registered by the user themselves, so we need to store the user mappings
+ * somewhere, to recreate them later.
+ */
+#define VFIO_MAX_USER_MEM_MAPS 256
+struct user_mem_map {
+ uint64_t addr;
+ uint64_t iova;
+ uint64_t len;
+};
+
+struct user_mem_maps {
+ rte_spinlock_recursive_t lock;
+ int n_maps;
+ struct user_mem_map maps[VFIO_MAX_USER_MEM_MAPS];
+};
+
+struct vfio_config {
+ int vfio_enabled;
+ int vfio_container_fd;
+ int vfio_active_groups;
+ const struct vfio_iommu_type *vfio_iommu_type;
+ struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
+ struct user_mem_maps mem_maps;
+};
+
/* per-process VFIO config */
-static struct vfio_config vfio_cfg;
+static struct vfio_config vfio_cfgs[VFIO_MAX_CONTAINERS];
+static struct vfio_config *default_vfio_cfg = &vfio_cfgs[0];
static int vfio_type1_dma_map(int);
+static int vfio_type1_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
static int vfio_spapr_dma_map(int);
+static int vfio_spapr_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
static int vfio_noiommu_dma_map(int);
+static int vfio_noiommu_dma_mem_map(int, uint64_t, uint64_t, uint64_t, int);
+static int vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr,
+ uint64_t iova, uint64_t len, int do_map);
/* IOMMU types we support */
static const struct vfio_iommu_type iommu_types[] = {
/* x86 IOMMU, otherwise known as type 1 */
- { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
+ {
+ .type_id = RTE_VFIO_TYPE1,
+ .name = "Type 1",
+ .dma_map_func = &vfio_type1_dma_map,
+ .dma_user_map_func = &vfio_type1_dma_mem_map
+ },
/* ppc64 IOMMU, otherwise known as spapr */
- { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
+ {
+ .type_id = RTE_VFIO_SPAPR,
+ .name = "sPAPR",
+ .dma_map_func = &vfio_spapr_dma_map,
+ .dma_user_map_func = &vfio_spapr_dma_mem_map
+ },
/* IOMMU-less mode */
- { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
+ {
+ .type_id = RTE_VFIO_NOIOMMU,
+ .name = "No-IOMMU",
+ .dma_map_func = &vfio_noiommu_dma_map,
+ .dma_user_map_func = &vfio_noiommu_dma_mem_map
+ },
};
-int
-vfio_get_group_fd(int iommu_group_no)
+static int
+is_null_map(const struct user_mem_map *map)
{
- int i;
- int vfio_group_fd;
- char filename[PATH_MAX];
- struct vfio_group *cur_grp;
+ return map->addr == 0 && map->iova == 0 && map->len == 0;
+}
- /* check if we already have the group descriptor open */
- for (i = 0; i < VFIO_MAX_GROUPS; i++)
- if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
- return vfio_cfg.vfio_groups[i].fd;
+/* we may need to merge user mem maps together in case of user mapping/unmapping
+ * chunks of memory, so we'll need a comparator function to sort segments.
+ */
+static int
+user_mem_map_cmp(const void *a, const void *b)
+{
+ const struct user_mem_map *umm_a = a;
+ const struct user_mem_map *umm_b = b;
- /* Lets see first if there is room for a new group */
- if (vfio_cfg.vfio_active_groups == VFIO_MAX_GROUPS) {
- RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
+ /* move null entries to end */
+ if (is_null_map(umm_a))
+ return 1;
+ if (is_null_map(umm_b))
return -1;
- }
- /* Now lets get an index for the new group */
- for (i = 0; i < VFIO_MAX_GROUPS; i++)
- if (vfio_cfg.vfio_groups[i].group_no == -1) {
- cur_grp = &vfio_cfg.vfio_groups[i];
- break;
- }
+ /* sort by iova first */
+ if (umm_a->iova < umm_b->iova)
+ return -1;
+ if (umm_a->iova > umm_b->iova)
+ return 1;
- /* This should not happen */
- if (i == VFIO_MAX_GROUPS) {
- RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
+ if (umm_a->addr < umm_b->addr)
return -1;
+ if (umm_a->addr > umm_b->addr)
+ return 1;
+
+ if (umm_a->len < umm_b->len)
+ return -1;
+ if (umm_a->len > umm_b->len)
+ return 1;
+
+ return 0;
+}
+
+/* adjust user map entry. this may result in shortening of existing map, or in
+ * splitting existing map in two pieces.
+ */
+static void
+adjust_map(struct user_mem_map *src, struct user_mem_map *end,
+ uint64_t remove_va_start, uint64_t remove_len)
+{
+ /* if va start is same as start address, we're simply moving start */
+ if (remove_va_start == src->addr) {
+ src->addr += remove_len;
+ src->iova += remove_len;
+ src->len -= remove_len;
+ } else if (remove_va_start + remove_len == src->addr + src->len) {
+ /* we're shrinking mapping from the end */
+ src->len -= remove_len;
+ } else {
+ /* we're blowing a hole in the middle */
+ struct user_mem_map tmp;
+ uint64_t total_len = src->len;
+
+ /* adjust source segment length */
+ src->len = remove_va_start - src->addr;
+
+ /* create temporary segment in the middle */
+ tmp.addr = src->addr + src->len;
+ tmp.iova = src->iova + src->len;
+ tmp.len = remove_len;
+
+ /* populate end segment - this one we will be keeping */
+ end->addr = tmp.addr + tmp.len;
+ end->iova = tmp.iova + tmp.len;
+ end->len = total_len - src->len - tmp.len;
+ }
+}
+
+/* try merging two maps into one, return 1 if succeeded */
+static int
+merge_map(struct user_mem_map *left, struct user_mem_map *right)
+{
+ if (left->addr + left->len != right->addr)
+ return 0;
+ if (left->iova + left->len != right->iova)
+ return 0;
+
+ left->len += right->len;
+
+ memset(right, 0, sizeof(*right));
+
+ return 1;
+}
+
+static struct user_mem_map *
+find_user_mem_map(struct user_mem_maps *user_mem_maps, uint64_t addr,
+ uint64_t iova, uint64_t len)
+{
+ uint64_t va_end = addr + len;
+ uint64_t iova_end = iova + len;
+ int i;
+
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ struct user_mem_map *map = &user_mem_maps->maps[i];
+ uint64_t map_va_end = map->addr + map->len;
+ uint64_t map_iova_end = map->iova + map->len;
+
+ /* check start VA */
+ if (addr < map->addr || addr >= map_va_end)
+ continue;
+ /* check if VA end is within boundaries */
+ if (va_end <= map->addr || va_end > map_va_end)
+ continue;
+
+ /* check start IOVA */
+ if (iova < map->iova || iova >= map_iova_end)
+ continue;
+ /* check if IOVA end is within boundaries */
+ if (iova_end <= map->iova || iova_end > map_iova_end)
+ continue;
+
+ /* we've found our map */
+ return map;
}
+ return NULL;
+}
+
+/* this will sort all user maps, and merge/compact any adjacent maps */
+static void
+compact_user_maps(struct user_mem_maps *user_mem_maps)
+{
+ int i, n_merged, cur_idx;
+
+ qsort(user_mem_maps->maps, user_mem_maps->n_maps,
+ sizeof(user_mem_maps->maps[0]), user_mem_map_cmp);
+
+ /* we'll go over the list backwards when merging */
+ n_merged = 0;
+ for (i = user_mem_maps->n_maps - 2; i >= 0; i--) {
+ struct user_mem_map *l, *r;
+
+ l = &user_mem_maps->maps[i];
+ r = &user_mem_maps->maps[i + 1];
+
+ if (is_null_map(l) || is_null_map(r))
+ continue;
+
+ if (merge_map(l, r))
+ n_merged++;
+ }
+
+ /* the entries are still sorted, but now they have holes in them, so
+ * walk through the list and remove the holes
+ */
+ if (n_merged > 0) {
+ cur_idx = 0;
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ if (!is_null_map(&user_mem_maps->maps[i])) {
+ struct user_mem_map *src, *dst;
+
+ src = &user_mem_maps->maps[i];
+ dst = &user_mem_maps->maps[cur_idx++];
+
+ if (src != dst) {
+ memcpy(dst, src, sizeof(*src));
+ memset(src, 0, sizeof(*src));
+ }
+ }
+ }
+ user_mem_maps->n_maps = cur_idx;
+ }
+}
+
+static int
+vfio_open_group_fd(int iommu_group_num)
+{
+ int vfio_group_fd;
+ char filename[PATH_MAX];
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+
/* if primary, try to open the group */
if (internal_config.process_type == RTE_PROC_PRIMARY) {
/* try regular group format */
snprintf(filename, sizeof(filename),
- VFIO_GROUP_FMT, iommu_group_no);
+ VFIO_GROUP_FMT, iommu_group_num);
vfio_group_fd = open(filename, O_RDWR);
if (vfio_group_fd < 0) {
/* if file not found, it's not an error */
@@ -82,7 +283,8 @@ vfio_get_group_fd(int iommu_group_no)
/* special case: try no-IOMMU path as well */
snprintf(filename, sizeof(filename),
- VFIO_NOIOMMU_GROUP_FMT, iommu_group_no);
+ VFIO_NOIOMMU_GROUP_FMT,
+ iommu_group_num);
vfio_group_fd = open(filename, O_RDWR);
if (vfio_group_fd < 0) {
if (errno != ENOENT) {
@@ -95,178 +297,285 @@ vfio_get_group_fd(int iommu_group_no)
/* noiommu group found */
}
- cur_grp->group_no = iommu_group_no;
- cur_grp->fd = vfio_group_fd;
- vfio_cfg.vfio_active_groups++;
return vfio_group_fd;
}
/* if we're in a secondary process, request group fd from the primary
- * process via our socket
+ * process via mp channel.
*/
- else {
- int socket_fd, ret;
+ p->req = SOCKET_REQ_GROUP;
+ p->group_num = iommu_group_num;
+ strcpy(mp_req.name, EAL_VFIO_MP);
+ mp_req.len_param = sizeof(*p);
+ mp_req.num_fds = 0;
+
+ vfio_group_fd = -1;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ p = (struct vfio_mp_param *)mp_rep->param;
+ if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
+ vfio_group_fd = mp_rep->fds[0];
+ } else if (p->result == SOCKET_NO_FD) {
+ RTE_LOG(ERR, EAL, " bad VFIO group fd\n");
+ vfio_group_fd = 0;
+ }
+ free(mp_reply.msgs);
+ }
- socket_fd = vfio_mp_sync_connect_to_primary();
+ if (vfio_group_fd < 0)
+ RTE_LOG(ERR, EAL, " cannot request group fd\n");
+ return vfio_group_fd;
+}
- if (socket_fd < 0) {
- RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
- return -1;
- }
- if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
- RTE_LOG(ERR, EAL, " cannot request container fd!\n");
- close(socket_fd);
- return -1;
- }
- if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
- RTE_LOG(ERR, EAL, " cannot send group number!\n");
- close(socket_fd);
- return -1;
- }
- ret = vfio_mp_sync_receive_request(socket_fd);
- switch (ret) {
- case SOCKET_NO_FD:
- close(socket_fd);
- return 0;
- case SOCKET_OK:
- vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
- /* if we got the fd, store it and return it */
- if (vfio_group_fd > 0) {
- close(socket_fd);
- cur_grp->group_no = iommu_group_no;
- cur_grp->fd = vfio_group_fd;
- vfio_cfg.vfio_active_groups++;
- return vfio_group_fd;
- }
- /* fall-through on error */
- default:
- RTE_LOG(ERR, EAL, " cannot get container fd!\n");
- close(socket_fd);
- return -1;
+static struct vfio_config *
+get_vfio_cfg_by_group_num(int iommu_group_num)
+{
+ struct vfio_config *vfio_cfg;
+ int i, j;
+
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ vfio_cfg = &vfio_cfgs[i];
+ for (j = 0; j < VFIO_MAX_GROUPS; j++) {
+ if (vfio_cfg->vfio_groups[j].group_num ==
+ iommu_group_num)
+ return vfio_cfg;
}
}
- return -1;
+
+ return NULL;
}
+static struct vfio_config *
+get_vfio_cfg_by_group_fd(int vfio_group_fd)
+{
+ struct vfio_config *vfio_cfg;
+ int i, j;
+
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ vfio_cfg = &vfio_cfgs[i];
+ for (j = 0; j < VFIO_MAX_GROUPS; j++)
+ if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
+ return vfio_cfg;
+ }
-static int
-get_vfio_group_idx(int vfio_group_fd)
+ return NULL;
+}
+
+static struct vfio_config *
+get_vfio_cfg_by_container_fd(int container_fd)
+{
+ int i;
+
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ if (vfio_cfgs[i].vfio_container_fd == container_fd)
+ return &vfio_cfgs[i];
+ }
+
+ return NULL;
+}
+
+int
+rte_vfio_get_group_fd(int iommu_group_num)
{
int i;
+ int vfio_group_fd;
+ struct vfio_group *cur_grp;
+ struct vfio_config *vfio_cfg;
+
+ /* get the vfio_config it belongs to */
+ vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
+ vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
+
+ /* check if we already have the group descriptor open */
for (i = 0; i < VFIO_MAX_GROUPS; i++)
- if (vfio_cfg.vfio_groups[i].fd == vfio_group_fd)
- return i;
+ if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num)
+ return vfio_cfg->vfio_groups[i].fd;
+
+ /* Lets see first if there is room for a new group */
+ if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
+ return -1;
+ }
+
+ /* Now lets get an index for the new group */
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
+ if (vfio_cfg->vfio_groups[i].group_num == -1) {
+ cur_grp = &vfio_cfg->vfio_groups[i];
+ break;
+ }
+
+ /* This should not happen */
+ if (i == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
+ return -1;
+ }
+
+ vfio_group_fd = vfio_open_group_fd(iommu_group_num);
+ if (vfio_group_fd < 0) {
+ RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
+ return -1;
+ }
+
+ cur_grp->group_num = iommu_group_num;
+ cur_grp->fd = vfio_group_fd;
+ vfio_cfg->vfio_active_groups++;
+
+ return vfio_group_fd;
+}
+
+static int
+get_vfio_group_idx(int vfio_group_fd)
+{
+ struct vfio_config *vfio_cfg;
+ int i, j;
+
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ vfio_cfg = &vfio_cfgs[i];
+ for (j = 0; j < VFIO_MAX_GROUPS; j++)
+ if (vfio_cfg->vfio_groups[j].fd == vfio_group_fd)
+ return j;
+ }
+
return -1;
}
static void
vfio_group_device_get(int vfio_group_fd)
{
+ struct vfio_config *vfio_cfg;
int i;
+ vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, " invalid group fd!\n");
+ return;
+ }
+
i = get_vfio_group_idx(vfio_group_fd);
if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
else
- vfio_cfg.vfio_groups[i].devices++;
+ vfio_cfg->vfio_groups[i].devices++;
}
static void
vfio_group_device_put(int vfio_group_fd)
{
+ struct vfio_config *vfio_cfg;
int i;
+ vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, " invalid group fd!\n");
+ return;
+ }
+
i = get_vfio_group_idx(vfio_group_fd);
if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
else
- vfio_cfg.vfio_groups[i].devices--;
+ vfio_cfg->vfio_groups[i].devices--;
}
static int
vfio_group_device_count(int vfio_group_fd)
{
+ struct vfio_config *vfio_cfg;
int i;
+ vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, " invalid group fd!\n");
+ return -1;
+ }
+
i = get_vfio_group_idx(vfio_group_fd);
if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
return -1;
}
- return vfio_cfg.vfio_groups[i].devices;
+ return vfio_cfg->vfio_groups[i].devices;
}
-int
-rte_vfio_clear_group(int vfio_group_fd)
+static void
+vfio_mem_event_callback(enum rte_mem_event type, const void *addr, size_t len,
+ void *arg __rte_unused)
{
- int i;
- int socket_fd, ret;
-
- if (internal_config.process_type == RTE_PROC_PRIMARY) {
-
- i = get_vfio_group_idx(vfio_group_fd);
- if (i < 0)
- return -1;
- vfio_cfg.vfio_groups[i].group_no = -1;
- vfio_cfg.vfio_groups[i].fd = -1;
- vfio_cfg.vfio_groups[i].devices = 0;
- vfio_cfg.vfio_active_groups--;
- return 0;
+ struct rte_memseg_list *msl;
+ struct rte_memseg *ms;
+ size_t cur_len = 0;
+
+ msl = rte_mem_virt2memseg_list(addr);
+
+ /* for IOVA as VA mode, no need to care for IOVA addresses */
+ if (rte_eal_iova_mode() == RTE_IOVA_VA) {
+ uint64_t vfio_va = (uint64_t)(uintptr_t)addr;
+ if (type == RTE_MEM_EVENT_ALLOC)
+ vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
+ len, 1);
+ else
+ vfio_dma_mem_map(default_vfio_cfg, vfio_va, vfio_va,
+ len, 0);
+ return;
}
- /* This is just for SECONDARY processes */
- socket_fd = vfio_mp_sync_connect_to_primary();
+ /* memsegs are contiguous in memory */
+ ms = rte_mem_virt2memseg(addr, msl);
+ while (cur_len < len) {
+ if (type == RTE_MEM_EVENT_ALLOC)
+ vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
+ ms->iova, ms->len, 1);
+ else
+ vfio_dma_mem_map(default_vfio_cfg, ms->addr_64,
+ ms->iova, ms->len, 0);
- if (socket_fd < 0) {
- RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
- return -1;
+ cur_len += ms->len;
+ ++ms;
}
+}
- if (vfio_mp_sync_send_request(socket_fd, SOCKET_CLR_GROUP) < 0) {
- RTE_LOG(ERR, EAL, " cannot request container fd!\n");
- close(socket_fd);
+int
+rte_vfio_clear_group(int vfio_group_fd)
+{
+ int i;
+ struct vfio_config *vfio_cfg;
+
+ vfio_cfg = get_vfio_cfg_by_group_fd(vfio_group_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, " invalid group fd!\n");
return -1;
}
- if (vfio_mp_sync_send_request(socket_fd, vfio_group_fd) < 0) {
- RTE_LOG(ERR, EAL, " cannot send group fd!\n");
- close(socket_fd);
+ i = get_vfio_group_idx(vfio_group_fd);
+ if (i < 0)
return -1;
- }
+ vfio_cfg->vfio_groups[i].group_num = -1;
+ vfio_cfg->vfio_groups[i].fd = -1;
+ vfio_cfg->vfio_groups[i].devices = 0;
+ vfio_cfg->vfio_active_groups--;
- ret = vfio_mp_sync_receive_request(socket_fd);
- switch (ret) {
- case SOCKET_NO_FD:
- RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
- close(socket_fd);
- break;
- case SOCKET_OK:
- close(socket_fd);
- return 0;
- case SOCKET_ERR:
- RTE_LOG(ERR, EAL, " Socket error\n");
- close(socket_fd);
- break;
- default:
- RTE_LOG(ERR, EAL, " UNKNOWN reply, %d\n", ret);
- close(socket_fd);
- }
- return -1;
+ return 0;
}
int
rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
int *vfio_dev_fd, struct vfio_device_info *device_info)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
struct vfio_group_status group_status = {
.argsz = sizeof(group_status)
};
+ struct vfio_config *vfio_cfg;
+ struct user_mem_maps *user_mem_maps;
+ int vfio_container_fd;
int vfio_group_fd;
- int iommu_group_no;
- int ret;
+ int iommu_group_num;
+ int i, ret;
/* get group number */
- ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
+ ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
if (ret == 0) {
RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
dev_addr);
@@ -278,7 +587,7 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
return -1;
/* get the actual group fd */
- vfio_group_fd = vfio_get_group_fd(iommu_group_no);
+ vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
if (vfio_group_fd < 0)
return -1;
@@ -309,12 +618,18 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
return -1;
}
+ /* get the vfio_config it belongs to */
+ vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
+ vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
+ vfio_container_fd = vfio_cfg->vfio_container_fd;
+ user_mem_maps = &vfio_cfg->mem_maps;
+
/* check if group does not have a container yet */
if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
/* add group to a container */
ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
- &vfio_cfg.vfio_container_fd);
+ &vfio_container_fd);
if (ret) {
RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
"error %i (%s)\n", dev_addr, errno, strerror(errno));
@@ -332,10 +647,12 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
* functionality.
*/
if (internal_config.process_type == RTE_PROC_PRIMARY &&
- vfio_cfg.vfio_active_groups == 1) {
+ vfio_cfg->vfio_active_groups == 1 &&
+ vfio_group_device_count(vfio_group_fd) == 0) {
+ const struct vfio_iommu_type *t;
+
/* select an IOMMU type which we will be using */
- const struct vfio_iommu_type *t =
- vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
+ t = vfio_set_iommu_type(vfio_container_fd);
if (!t) {
RTE_LOG(ERR, EAL,
" %s failed to select IOMMU type\n",
@@ -344,15 +661,75 @@ rte_vfio_setup_device(const char *sysfs_base, const char *dev_addr,
rte_vfio_clear_group(vfio_group_fd);
return -1;
}
- ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
+ /* lock memory hotplug before mapping and release it
+ * after registering callback, to prevent races
+ */
+ rte_rwlock_read_lock(mem_lock);
+ if (vfio_cfg == default_vfio_cfg)
+ ret = t->dma_map_func(vfio_container_fd);
+ else
+ ret = 0;
if (ret) {
RTE_LOG(ERR, EAL,
" %s DMA remapping failed, error %i (%s)\n",
dev_addr, errno, strerror(errno));
close(vfio_group_fd);
rte_vfio_clear_group(vfio_group_fd);
+ rte_rwlock_read_unlock(mem_lock);
return -1;
}
+
+ vfio_cfg->vfio_iommu_type = t;
+
+ /* re-map all user-mapped segments */
+ rte_spinlock_recursive_lock(&user_mem_maps->lock);
+
+ /* this IOMMU type may not support DMA mapping, but
+ * if we have mappings in the list - that means we have
+ * previously mapped something successfully, so we can
+ * be sure that DMA mapping is supported.
+ */
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ struct user_mem_map *map;
+ map = &user_mem_maps->maps[i];
+
+ ret = t->dma_user_map_func(
+ vfio_container_fd,
+ map->addr, map->iova, map->len,
+ 1);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "Couldn't map user memory for DMA: "
+ "va: 0x%" PRIx64 " "
+ "iova: 0x%" PRIx64 " "
+ "len: 0x%" PRIu64 "\n",
+ map->addr, map->iova,
+ map->len);
+ rte_spinlock_recursive_unlock(
+ &user_mem_maps->lock);
+ rte_rwlock_read_unlock(mem_lock);
+ return -1;
+ }
+ }
+ rte_spinlock_recursive_unlock(&user_mem_maps->lock);
+
+ /* register callback for mem events */
+ if (vfio_cfg == default_vfio_cfg)
+ ret = rte_mem_event_callback_register(
+ VFIO_MEM_EVENT_CLB_NAME,
+ vfio_mem_event_callback, NULL);
+ else
+ ret = 0;
+ /* unlock memory hotplug */
+ rte_rwlock_read_unlock(mem_lock);
+
+ if (ret && rte_errno != ENOTSUP) {
+ RTE_LOG(ERR, EAL, "Could not install memory event callback for VFIO\n");
+ return -1;
+ }
+ if (ret)
+ RTE_LOG(DEBUG, EAL, "Memory event callbacks not supported\n");
+ else
+ RTE_LOG(DEBUG, EAL, "Installed memory event callback for VFIO\n");
}
}
@@ -390,30 +767,45 @@ int
rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
int vfio_dev_fd)
{
+ struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+ rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
struct vfio_group_status group_status = {
.argsz = sizeof(group_status)
};
+ struct vfio_config *vfio_cfg;
int vfio_group_fd;
- int iommu_group_no;
+ int iommu_group_num;
int ret;
+ /* we don't want any DMA mapping messages to come while we're detaching
+ * VFIO device, because this might be the last device and we might need
+ * to unregister the callback.
+ */
+ rte_rwlock_read_lock(mem_lock);
+
/* get group number */
- ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
+ ret = rte_vfio_get_group_num(sysfs_base, dev_addr, &iommu_group_num);
if (ret <= 0) {
RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
dev_addr);
/* This is an error at this point. */
- return -1;
+ ret = -1;
+ goto out;
}
/* get the actual group fd */
- vfio_group_fd = vfio_get_group_fd(iommu_group_no);
+ vfio_group_fd = rte_vfio_get_group_fd(iommu_group_num);
if (vfio_group_fd <= 0) {
- RTE_LOG(INFO, EAL, "vfio_get_group_fd failed for %s\n",
+ RTE_LOG(INFO, EAL, "rte_vfio_get_group_fd failed for %s\n",
dev_addr);
- return -1;
+ ret = -1;
+ goto out;
}
+ /* get the vfio_config it belongs to */
+ vfio_cfg = get_vfio_cfg_by_group_num(iommu_group_num);
+ vfio_cfg = vfio_cfg ? vfio_cfg : default_vfio_cfg;
+
/* At this point we got an active group. Closing it will make the
* container detachment. If this is the last active group, VFIO kernel
* code will unset the container and the IOMMU mappings.
@@ -423,7 +815,8 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
if (close(vfio_dev_fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
dev_addr);
- return -1;
+ ret = -1;
+ goto out;
}
/* An VFIO group can have several devices attached. Just when there is
@@ -435,30 +828,53 @@ rte_vfio_release_device(const char *sysfs_base, const char *dev_addr,
if (close(vfio_group_fd) < 0) {
RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
dev_addr);
- return -1;
+ ret = -1;
+ goto out;
}
if (rte_vfio_clear_group(vfio_group_fd) < 0) {
RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
dev_addr);
- return -1;
+ ret = -1;
+ goto out;
}
}
- return 0;
+ /* if there are no active device groups, unregister the callback to
+ * avoid spurious attempts to map/unmap memory from VFIO.
+ */
+ if (vfio_cfg == default_vfio_cfg && vfio_cfg->vfio_active_groups == 0)
+ rte_mem_event_callback_unregister(VFIO_MEM_EVENT_CLB_NAME,
+ NULL);
+
+ /* success */
+ ret = 0;
+
+out:
+ rte_rwlock_read_unlock(mem_lock);
+ return ret;
}
int
rte_vfio_enable(const char *modname)
{
/* initialize group list */
- int i;
+ int i, j;
int vfio_available;
- for (i = 0; i < VFIO_MAX_GROUPS; i++) {
- vfio_cfg.vfio_groups[i].fd = -1;
- vfio_cfg.vfio_groups[i].group_no = -1;
- vfio_cfg.vfio_groups[i].devices = 0;
+ rte_spinlock_recursive_t lock = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
+ for (i = 0; i < VFIO_MAX_CONTAINERS; i++) {
+ vfio_cfgs[i].vfio_container_fd = -1;
+ vfio_cfgs[i].vfio_active_groups = 0;
+ vfio_cfgs[i].vfio_iommu_type = NULL;
+ vfio_cfgs[i].mem_maps.lock = lock;
+
+ for (j = 0; j < VFIO_MAX_GROUPS; j++) {
+ vfio_cfgs[i].vfio_groups[j].fd = -1;
+ vfio_cfgs[i].vfio_groups[j].group_num = -1;
+ vfio_cfgs[i].vfio_groups[j].devices = 0;
+ }
}
/* inform the user that we are probing for VFIO */
@@ -480,12 +896,12 @@ rte_vfio_enable(const char *modname)
return 0;
}
- vfio_cfg.vfio_container_fd = vfio_get_container_fd();
+ default_vfio_cfg->vfio_container_fd = rte_vfio_get_container_fd();
/* check if we have VFIO driver enabled */
- if (vfio_cfg.vfio_container_fd != -1) {
+ if (default_vfio_cfg->vfio_container_fd != -1) {
RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
- vfio_cfg.vfio_enabled = 1;
+ default_vfio_cfg->vfio_enabled = 1;
} else {
RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
}
@@ -497,7 +913,7 @@ int
rte_vfio_is_enabled(const char *modname)
{
const int mod_available = rte_eal_check_module(modname) > 0;
- return vfio_cfg.vfio_enabled && mod_available;
+ return default_vfio_cfg->vfio_enabled && mod_available;
}
const struct vfio_iommu_type *
@@ -558,9 +974,14 @@ vfio_has_supported_extensions(int vfio_container_fd)
}
int
-vfio_get_container_fd(void)
+rte_vfio_get_container_fd(void)
{
int ret, vfio_container_fd;
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct vfio_mp_param *p = (struct vfio_mp_param *)mp_req.param;
+
/* if we're in a primary process, try to open the container */
if (internal_config.process_type == RTE_PROC_PRIMARY) {
@@ -591,39 +1012,35 @@ vfio_get_container_fd(void)
}
return vfio_container_fd;
- } else {
- /*
- * if we're in a secondary process, request container fd from the
- * primary process via our socket
- */
- int socket_fd;
-
- socket_fd = vfio_mp_sync_connect_to_primary();
- if (socket_fd < 0) {
- RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
- return -1;
- }
- if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
- RTE_LOG(ERR, EAL, " cannot request container fd!\n");
- close(socket_fd);
- return -1;
- }
- vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
- if (vfio_container_fd < 0) {
- RTE_LOG(ERR, EAL, " cannot get container fd!\n");
- close(socket_fd);
- return -1;
+ }
+ /*
+ * if we're in a secondary process, request container fd from the
+ * primary process via mp channel
+ */
+ p->req = SOCKET_REQ_CONTAINER;
+ strcpy(mp_req.name, EAL_VFIO_MP);
+ mp_req.len_param = sizeof(*p);
+ mp_req.num_fds = 0;
+
+ vfio_container_fd = -1;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0 &&
+ mp_reply.nb_received == 1) {
+ mp_rep = &mp_reply.msgs[0];
+ p = (struct vfio_mp_param *)mp_rep->param;
+ if (p->result == SOCKET_OK && mp_rep->num_fds == 1) {
+ free(mp_reply.msgs);
+ return mp_rep->fds[0];
}
- close(socket_fd);
- return vfio_container_fd;
+ free(mp_reply.msgs);
}
+ RTE_LOG(ERR, EAL, " cannot request container fd\n");
return -1;
}
int
-vfio_get_group_no(const char *sysfs_base,
- const char *dev_addr, int *iommu_group_no)
+rte_vfio_get_group_num(const char *sysfs_base,
+ const char *dev_addr, int *iommu_group_num)
{
char linkname[PATH_MAX];
char filename[PATH_MAX];
@@ -655,7 +1072,7 @@ vfio_get_group_no(const char *sysfs_base,
errno = 0;
group_tok = tok[ret - 1];
end = group_tok;
- *iommu_group_no = strtol(group_tok, &end, 10);
+ *iommu_group_num = strtol(group_tok, &end, 10);
if ((end != group_tok && *end != '\0') || errno != 0) {
RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
return -1;
@@ -665,34 +1082,49 @@ vfio_get_group_no(const char *sysfs_base,
}
static int
-vfio_type1_dma_map(int vfio_container_fd)
+type1_map(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- int i, ret;
+ int *vfio_container_fd = arg;
- /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- struct vfio_iommu_type1_dma_map dma_map;
+ return vfio_type1_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
+ ms->len, 1);
+}
- if (ms[i].addr == NULL)
- break;
+static int
+vfio_type1_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len, int do_map)
+{
+ struct vfio_iommu_type1_dma_map dma_map;
+ struct vfio_iommu_type1_dma_unmap dma_unmap;
+ int ret;
+ if (do_map != 0) {
memset(&dma_map, 0, sizeof(dma_map));
dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
- dma_map.vaddr = ms[i].addr_64;
- dma_map.size = ms[i].len;
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dma_map.iova = dma_map.vaddr;
- else
- dma_map.iova = ms[i].iova;
- dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
+ dma_map.vaddr = vaddr;
+ dma_map.size = len;
+ dma_map.iova = iova;
+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
+ VFIO_DMA_MAP_FLAG_WRITE;
ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ } else {
+ memset(&dma_unmap, 0, sizeof(dma_unmap));
+ dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
+ dma_unmap.size = len;
+ dma_unmap.iova = iova;
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
+ &dma_unmap);
if (ret) {
- RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
- "error %i (%s)\n", errno,
- strerror(errno));
+ RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
+ errno, strerror(errno));
return -1;
}
}
@@ -701,24 +1133,107 @@ vfio_type1_dma_map(int vfio_container_fd)
}
static int
-vfio_spapr_dma_map(int vfio_container_fd)
+vfio_type1_dma_map(int vfio_container_fd)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- int i, ret;
+ return rte_memseg_walk(type1_map, &vfio_container_fd);
+}
+
+static int
+vfio_spapr_dma_do_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len, int do_map)
+{
+ struct vfio_iommu_type1_dma_map dma_map;
+ struct vfio_iommu_type1_dma_unmap dma_unmap;
+ int ret;
+
+ if (do_map != 0) {
+ memset(&dma_map, 0, sizeof(dma_map));
+ dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
+ dma_map.vaddr = vaddr;
+ dma_map.size = len;
+ dma_map.iova = iova;
+ dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
+ VFIO_DMA_MAP_FLAG_WRITE;
+
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot set up DMA remapping, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+
+ } else {
+ struct vfio_iommu_spapr_register_memory reg = {
+ .argsz = sizeof(reg),
+ .flags = 0
+ };
+ reg.vaddr = (uintptr_t) vaddr;
+ reg.size = len;
+
+ ret = ioctl(vfio_container_fd,
+ VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY, &reg);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot unregister vaddr for IOMMU, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
- struct vfio_iommu_spapr_register_memory reg = {
- .argsz = sizeof(reg),
- .flags = 0
+ memset(&dma_unmap, 0, sizeof(dma_unmap));
+ dma_unmap.argsz = sizeof(struct vfio_iommu_type1_dma_unmap);
+ dma_unmap.size = len;
+ dma_unmap.iova = iova;
+
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_UNMAP_DMA,
+ &dma_unmap);
+ if (ret) {
+ RTE_LOG(ERR, EAL, " cannot clear DMA remapping, error %i (%s)\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+vfio_spapr_map_walk(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ int *vfio_container_fd = arg;
+
+ return vfio_spapr_dma_mem_map(*vfio_container_fd, ms->addr_64, ms->iova,
+ ms->len, 1);
+}
+
+struct spapr_walk_param {
+ uint64_t window_size;
+ uint64_t hugepage_sz;
+};
+static int
+vfio_spapr_window_size_walk(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg)
+{
+ struct spapr_walk_param *param = arg;
+ uint64_t max = ms->iova + ms->len;
+
+ if (max > param->window_size) {
+ param->hugepage_sz = ms->hugepage_sz;
+ param->window_size = max;
+ }
+
+ return 0;
+}
+
+static int
+vfio_spapr_create_new_dma_window(int vfio_container_fd,
+ struct vfio_iommu_spapr_tce_create *create) {
+ struct vfio_iommu_spapr_tce_remove remove = {
+ .argsz = sizeof(remove),
};
struct vfio_iommu_spapr_tce_info info = {
.argsz = sizeof(info),
};
- struct vfio_iommu_spapr_tce_create create = {
- .argsz = sizeof(create),
- };
- struct vfio_iommu_spapr_tce_remove remove = {
- .argsz = sizeof(remove),
- };
+ int ret;
/* query spapr iommu info */
ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
@@ -737,70 +1252,159 @@ vfio_spapr_dma_map(int vfio_container_fd)
return -1;
}
- /* create DMA window from 0 to max(phys_addr + len) */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (ms[i].addr == NULL)
- break;
-
- create.window_size = RTE_MAX(create.window_size,
- ms[i].iova + ms[i].len);
- }
-
- /* sPAPR requires window size to be a power of 2 */
- create.window_size = rte_align64pow2(create.window_size);
- create.page_shift = __builtin_ctzll(ms->hugepage_sz);
- create.levels = 1;
-
- ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
+ /* create new DMA window */
+ ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, create);
if (ret) {
RTE_LOG(ERR, EAL, " cannot create new DMA window, "
"error %i (%s)\n", errno, strerror(errno));
return -1;
}
- if (create.start_addr != 0) {
+ if (create->start_addr != 0) {
RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
return -1;
}
- /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- struct vfio_iommu_type1_dma_map dma_map;
+ return 0;
+}
- if (ms[i].addr == NULL)
- break;
+static int
+vfio_spapr_dma_mem_map(int vfio_container_fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len, int do_map)
+{
+ struct spapr_walk_param param;
+ struct vfio_iommu_spapr_tce_create create = {
+ .argsz = sizeof(create),
+ };
+ struct vfio_config *vfio_cfg;
+ struct user_mem_maps *user_mem_maps;
+ int i, ret = 0;
- reg.vaddr = (uintptr_t) ms[i].addr;
- reg.size = ms[i].len;
- ret = ioctl(vfio_container_fd,
- VFIO_IOMMU_SPAPR_REGISTER_MEMORY, &reg);
- if (ret) {
- RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
- "error %i (%s)\n", errno, strerror(errno));
- return -1;
- }
+ vfio_cfg = get_vfio_cfg_by_container_fd(vfio_container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, " invalid container fd!\n");
+ return -1;
+ }
- memset(&dma_map, 0, sizeof(dma_map));
- dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
- dma_map.vaddr = ms[i].addr_64;
- dma_map.size = ms[i].len;
- if (rte_eal_iova_mode() == RTE_IOVA_VA)
- dma_map.iova = dma_map.vaddr;
- else
- dma_map.iova = ms[i].iova;
- dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
- VFIO_DMA_MAP_FLAG_WRITE;
+ user_mem_maps = &vfio_cfg->mem_maps;
+ rte_spinlock_recursive_lock(&user_mem_maps->lock);
- ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
+ /* check if window size needs to be adjusted */
+ memset(&param, 0, sizeof(param));
- if (ret) {
- RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
- "error %i (%s)\n", errno, strerror(errno));
- return -1;
+ /* we're inside a callback so use thread-unsafe version */
+ if (rte_memseg_walk_thread_unsafe(vfio_spapr_window_size_walk,
+ &param) < 0) {
+ RTE_LOG(ERR, EAL, "Could not get window size\n");
+ ret = -1;
+ goto out;
+ }
+
+ /* also check user maps */
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ uint64_t max = user_mem_maps->maps[i].iova +
+ user_mem_maps->maps[i].len;
+ create.window_size = RTE_MAX(create.window_size, max);
+ }
+
+ /* sPAPR requires window size to be a power of 2 */
+ create.window_size = rte_align64pow2(param.window_size);
+ create.page_shift = __builtin_ctzll(param.hugepage_sz);
+ create.levels = 1;
+
+ if (do_map) {
+ void *addr;
+ /* re-create window and remap the entire memory */
+ if (iova > create.window_size) {
+ if (vfio_spapr_create_new_dma_window(vfio_container_fd,
+ &create) < 0) {
+ RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
+ ret = -1;
+ goto out;
+ }
+ /* we're inside a callback, so use thread-unsafe version
+ */
+ if (rte_memseg_walk_thread_unsafe(vfio_spapr_map_walk,
+ &vfio_container_fd) < 0) {
+ RTE_LOG(ERR, EAL, "Could not recreate DMA maps\n");
+ ret = -1;
+ goto out;
+ }
+ /* remap all user maps */
+ for (i = 0; i < user_mem_maps->n_maps; i++) {
+ struct user_mem_map *map =
+ &user_mem_maps->maps[i];
+ if (vfio_spapr_dma_do_map(vfio_container_fd,
+ map->addr, map->iova, map->len,
+ 1)) {
+ RTE_LOG(ERR, EAL, "Could not recreate user DMA maps\n");
+ ret = -1;
+ goto out;
+ }
+ }
}
+ /* now that we've remapped all of the memory that was present
+ * before, map the segment that we were requested to map.
+ *
+ * however, if we were called by the callback, the memory we
+ * were called with was already in the memseg list, so previous
+ * mapping should've mapped that segment already.
+ *
+ * virt2memseg_list is a relatively cheap check, so use that. if
+ * memory is within any memseg list, it's a memseg, so it's
+ * already mapped.
+ */
+ addr = (void *)(uintptr_t)vaddr;
+ if (rte_mem_virt2memseg_list(addr) == NULL &&
+ vfio_spapr_dma_do_map(vfio_container_fd,
+ vaddr, iova, len, 1) < 0) {
+ RTE_LOG(ERR, EAL, "Could not map segment\n");
+ ret = -1;
+ goto out;
+ }
+ } else {
+ /* for unmap, check if iova within DMA window */
+ if (iova > create.window_size) {
+ RTE_LOG(ERR, EAL, "iova beyond DMA window for unmap");
+ ret = -1;
+ goto out;
+ }
+
+ vfio_spapr_dma_do_map(vfio_container_fd, vaddr, iova, len, 0);
+ }
+out:
+ rte_spinlock_recursive_unlock(&user_mem_maps->lock);
+ return ret;
+}
+
+static int
+vfio_spapr_dma_map(int vfio_container_fd)
+{
+ struct vfio_iommu_spapr_tce_create create = {
+ .argsz = sizeof(create),
+ };
+ struct spapr_walk_param param;
+
+ memset(&param, 0, sizeof(param));
+
+ /* create DMA window from 0 to max(phys_addr + len) */
+ rte_memseg_walk(vfio_spapr_window_size_walk, &param);
+
+ /* sPAPR requires window size to be a power of 2 */
+ create.window_size = rte_align64pow2(param.window_size);
+ create.page_shift = __builtin_ctzll(param.hugepage_sz);
+ create.levels = 1;
+
+ if (vfio_spapr_create_new_dma_window(vfio_container_fd, &create) < 0) {
+ RTE_LOG(ERR, EAL, "Could not create new DMA window\n");
+ return -1;
}
+ /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
+ if (rte_memseg_walk(vfio_spapr_map_walk, &vfio_container_fd) < 0)
+ return -1;
+
return 0;
}
@@ -811,6 +1415,175 @@ vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
return 0;
}
+static int
+vfio_noiommu_dma_mem_map(int __rte_unused vfio_container_fd,
+ uint64_t __rte_unused vaddr,
+ uint64_t __rte_unused iova, uint64_t __rte_unused len,
+ int __rte_unused do_map)
+{
+ /* No-IOMMU mode does not need DMA mapping */
+ return 0;
+}
+
+static int
+vfio_dma_mem_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
+ uint64_t len, int do_map)
+{
+ const struct vfio_iommu_type *t = vfio_cfg->vfio_iommu_type;
+
+ if (!t) {
+ RTE_LOG(ERR, EAL, " VFIO support not initialized\n");
+ rte_errno = ENODEV;
+ return -1;
+ }
+
+ if (!t->dma_user_map_func) {
+ RTE_LOG(ERR, EAL,
+ " VFIO custom DMA region maping not supported by IOMMU %s\n",
+ t->name);
+ rte_errno = ENOTSUP;
+ return -1;
+ }
+
+ return t->dma_user_map_func(vfio_cfg->vfio_container_fd, vaddr, iova,
+ len, do_map);
+}
+
+static int
+container_dma_map(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
+ uint64_t len)
+{
+ struct user_mem_map *new_map;
+ struct user_mem_maps *user_mem_maps;
+ int ret = 0;
+
+ user_mem_maps = &vfio_cfg->mem_maps;
+ rte_spinlock_recursive_lock(&user_mem_maps->lock);
+ if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
+ RTE_LOG(ERR, EAL, "No more space for user mem maps\n");
+ rte_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+ /* map the entry */
+ if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 1)) {
+ /* technically, this will fail if there are currently no devices
+ * plugged in, even if a device were added later, this mapping
+ * might have succeeded. however, since we cannot verify if this
+ * is a valid mapping without having a device attached, consider
+ * this to be unsupported, because we can't just store any old
+ * mapping and pollute list of active mappings willy-nilly.
+ */
+ RTE_LOG(ERR, EAL, "Couldn't map new region for DMA\n");
+ ret = -1;
+ goto out;
+ }
+ /* create new user mem map entry */
+ new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
+ new_map->addr = vaddr;
+ new_map->iova = iova;
+ new_map->len = len;
+
+ compact_user_maps(user_mem_maps);
+out:
+ rte_spinlock_recursive_unlock(&user_mem_maps->lock);
+ return ret;
+}
+
+static int
+container_dma_unmap(struct vfio_config *vfio_cfg, uint64_t vaddr, uint64_t iova,
+ uint64_t len)
+{
+ struct user_mem_map *map, *new_map = NULL;
+ struct user_mem_maps *user_mem_maps;
+ int ret = 0;
+
+ user_mem_maps = &vfio_cfg->mem_maps;
+ rte_spinlock_recursive_lock(&user_mem_maps->lock);
+
+ /* find our mapping */
+ map = find_user_mem_map(user_mem_maps, vaddr, iova, len);
+ if (!map) {
+ RTE_LOG(ERR, EAL, "Couldn't find previously mapped region\n");
+ rte_errno = EINVAL;
+ ret = -1;
+ goto out;
+ }
+ if (map->addr != vaddr || map->iova != iova || map->len != len) {
+ /* we're partially unmapping a previously mapped region, so we
+ * need to split entry into two.
+ */
+ if (user_mem_maps->n_maps == VFIO_MAX_USER_MEM_MAPS) {
+ RTE_LOG(ERR, EAL, "Not enough space to store partial mapping\n");
+ rte_errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+ new_map = &user_mem_maps->maps[user_mem_maps->n_maps++];
+ }
+
+ /* unmap the entry */
+ if (vfio_dma_mem_map(vfio_cfg, vaddr, iova, len, 0)) {
+ /* there may not be any devices plugged in, so unmapping will
+ * fail with ENODEV/ENOTSUP rte_errno values, but that doesn't
+ * stop us from removing the mapping, as the assumption is we
+ * won't be needing this memory any more and thus will want to
+ * prevent it from being remapped again on hotplug. so, only
+ * fail if we indeed failed to unmap (e.g. if the mapping was
+ * within our mapped range but had invalid alignment).
+ */
+ if (rte_errno != ENODEV && rte_errno != ENOTSUP) {
+ RTE_LOG(ERR, EAL, "Couldn't unmap region for DMA\n");
+ ret = -1;
+ goto out;
+ } else {
+ RTE_LOG(DEBUG, EAL, "DMA unmapping failed, but removing mappings anyway\n");
+ }
+ }
+ /* remove map from the list of active mappings */
+ if (new_map != NULL) {
+ adjust_map(map, new_map, vaddr, len);
+
+ /* if we've created a new map by splitting, sort everything */
+ if (!is_null_map(new_map)) {
+ compact_user_maps(user_mem_maps);
+ } else {
+ /* we've created a new mapping, but it was unused */
+ user_mem_maps->n_maps--;
+ }
+ } else {
+ memset(map, 0, sizeof(*map));
+ compact_user_maps(user_mem_maps);
+ user_mem_maps->n_maps--;
+ }
+
+out:
+ rte_spinlock_recursive_unlock(&user_mem_maps->lock);
+ return ret;
+}
+
+int
+rte_vfio_dma_map(uint64_t vaddr, uint64_t iova, uint64_t len)
+{
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return container_dma_map(default_vfio_cfg, vaddr, iova, len);
+}
+
+int
+rte_vfio_dma_unmap(uint64_t vaddr, uint64_t iova, uint64_t len)
+{
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ return container_dma_unmap(default_vfio_cfg, vaddr, iova, len);
+}
+
int
rte_vfio_noiommu_is_enabled(void)
{
@@ -843,4 +1616,299 @@ rte_vfio_noiommu_is_enabled(void)
return c == 'Y';
}
-#endif
+int
+rte_vfio_container_create(void)
+{
+ int i;
+
+ /* Find an empty slot to store new vfio config */
+ for (i = 1; i < VFIO_MAX_CONTAINERS; i++) {
+ if (vfio_cfgs[i].vfio_container_fd == -1)
+ break;
+ }
+
+ if (i == VFIO_MAX_CONTAINERS) {
+ RTE_LOG(ERR, EAL, "exceed max vfio container limit\n");
+ return -1;
+ }
+
+ vfio_cfgs[i].vfio_container_fd = rte_vfio_get_container_fd();
+ if (vfio_cfgs[i].vfio_container_fd < 0) {
+ RTE_LOG(NOTICE, EAL, "fail to create a new container\n");
+ return -1;
+ }
+
+ return vfio_cfgs[i].vfio_container_fd;
+}
+
+int __rte_experimental
+rte_vfio_container_destroy(int container_fd)
+{
+ struct vfio_config *vfio_cfg;
+ int i;
+
+ vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid container fd\n");
+ return -1;
+ }
+
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
+ if (vfio_cfg->vfio_groups[i].group_num != -1)
+ rte_vfio_container_group_unbind(container_fd,
+ vfio_cfg->vfio_groups[i].group_num);
+
+ close(container_fd);
+ vfio_cfg->vfio_container_fd = -1;
+ vfio_cfg->vfio_active_groups = 0;
+ vfio_cfg->vfio_iommu_type = NULL;
+
+ return 0;
+}
+
+int
+rte_vfio_container_group_bind(int container_fd, int iommu_group_num)
+{
+ struct vfio_config *vfio_cfg;
+ struct vfio_group *cur_grp;
+ int vfio_group_fd;
+ int i;
+
+ vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid container fd\n");
+ return -1;
+ }
+
+ /* Check room for new group */
+ if (vfio_cfg->vfio_active_groups == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
+ return -1;
+ }
+
+ /* Get an index for the new group */
+ for (i = 0; i < VFIO_MAX_GROUPS; i++)
+ if (vfio_cfg->vfio_groups[i].group_num == -1) {
+ cur_grp = &vfio_cfg->vfio_groups[i];
+ break;
+ }
+
+ /* This should not happen */
+ if (i == VFIO_MAX_GROUPS) {
+ RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
+ return -1;
+ }
+
+ vfio_group_fd = vfio_open_group_fd(iommu_group_num);
+ if (vfio_group_fd < 0) {
+ RTE_LOG(ERR, EAL, "Failed to open group %d\n", iommu_group_num);
+ return -1;
+ }
+ cur_grp->group_num = iommu_group_num;
+ cur_grp->fd = vfio_group_fd;
+ cur_grp->devices = 0;
+ vfio_cfg->vfio_active_groups++;
+
+ return vfio_group_fd;
+}
+
+int
+rte_vfio_container_group_unbind(int container_fd, int iommu_group_num)
+{
+ struct vfio_config *vfio_cfg;
+ struct vfio_group *cur_grp = NULL;
+ int i;
+
+ vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid container fd\n");
+ return -1;
+ }
+
+ for (i = 0; i < VFIO_MAX_GROUPS; i++) {
+ if (vfio_cfg->vfio_groups[i].group_num == iommu_group_num) {
+ cur_grp = &vfio_cfg->vfio_groups[i];
+ break;
+ }
+ }
+
+ /* This should not happen */
+ if (i == VFIO_MAX_GROUPS || cur_grp == NULL) {
+ RTE_LOG(ERR, EAL, "Specified group number not found\n");
+ return -1;
+ }
+
+ if (cur_grp->fd >= 0 && close(cur_grp->fd) < 0) {
+ RTE_LOG(ERR, EAL, "Error when closing vfio_group_fd for"
+ " iommu_group_num %d\n", iommu_group_num);
+ return -1;
+ }
+ cur_grp->group_num = -1;
+ cur_grp->fd = -1;
+ cur_grp->devices = 0;
+ vfio_cfg->vfio_active_groups--;
+
+ return 0;
+}
+
+int
+rte_vfio_container_dma_map(int container_fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len)
+{
+ struct vfio_config *vfio_cfg;
+
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid container fd\n");
+ return -1;
+ }
+
+ return container_dma_map(vfio_cfg, vaddr, iova, len);
+}
+
+int
+rte_vfio_container_dma_unmap(int container_fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len)
+{
+ struct vfio_config *vfio_cfg;
+
+ if (len == 0) {
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ vfio_cfg = get_vfio_cfg_by_container_fd(container_fd);
+ if (vfio_cfg == NULL) {
+ RTE_LOG(ERR, EAL, "Invalid container fd\n");
+ return -1;
+ }
+
+ return container_dma_unmap(vfio_cfg, vaddr, iova, len);
+}
+
+#else
+
+int
+rte_vfio_dma_map(uint64_t __rte_unused vaddr, __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_dma_unmap(uint64_t __rte_unused vaddr, uint64_t __rte_unused iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_setup_device(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr,
+ __rte_unused int *vfio_dev_fd,
+ __rte_unused struct vfio_device_info *device_info)
+{
+ return -1;
+}
+
+int
+rte_vfio_release_device(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr, __rte_unused int fd)
+{
+ return -1;
+}
+
+int
+rte_vfio_enable(__rte_unused const char *modname)
+{
+ return -1;
+}
+
+int
+rte_vfio_is_enabled(__rte_unused const char *modname)
+{
+ return -1;
+}
+
+int
+rte_vfio_noiommu_is_enabled(void)
+{
+ return -1;
+}
+
+int
+rte_vfio_clear_group(__rte_unused int vfio_group_fd)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_group_num(__rte_unused const char *sysfs_base,
+ __rte_unused const char *dev_addr,
+ __rte_unused int *iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_container_fd(void)
+{
+ return -1;
+}
+
+int
+rte_vfio_get_group_fd(__rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_create(void)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_destroy(__rte_unused int container_fd)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_group_bind(__rte_unused int container_fd,
+ __rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_group_unbind(__rte_unused int container_fd,
+ __rte_unused int iommu_group_num)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_dma_map(__rte_unused int container_fd,
+ __rte_unused uint64_t vaddr,
+ __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+int
+rte_vfio_container_dma_unmap(__rte_unused int container_fd,
+ __rte_unused uint64_t vaddr,
+ __rte_unused uint64_t iova,
+ __rte_unused uint64_t len)
+{
+ return -1;
+}
+
+#endif /* VFIO_PRESENT */
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h
index 80595773..68d4750a 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio.h
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h
@@ -19,6 +19,7 @@
#ifdef VFIO_PRESENT
+#include <stdint.h>
#include <linux/vfio.h>
#define RTE_VFIO_TYPE1 VFIO_TYPE1_IOMMU
@@ -26,6 +27,7 @@
#ifndef VFIO_SPAPR_TCE_v2_IOMMU
#define RTE_VFIO_SPAPR 7
#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
+#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
@@ -79,49 +81,37 @@ struct vfio_iommu_spapr_tce_info {
#define RTE_VFIO_SPAPR VFIO_SPAPR_TCE_v2_IOMMU
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
-#define RTE_VFIO_NOIOMMU 8
-#else
-#define RTE_VFIO_NOIOMMU VFIO_NOIOMMU_IOMMU
-#endif
-
#define VFIO_MAX_GROUPS RTE_MAX_VFIO_GROUPS
-
-/*
- * Function prototypes for VFIO multiprocess sync functions
- */
-int vfio_mp_sync_send_request(int socket, int req);
-int vfio_mp_sync_receive_request(int socket);
-int vfio_mp_sync_send_fd(int socket, int fd);
-int vfio_mp_sync_receive_fd(int socket);
-int vfio_mp_sync_connect_to_primary(void);
+#define VFIO_MAX_CONTAINERS RTE_MAX_VFIO_CONTAINERS
/*
* we don't need to store device fd's anywhere since they can be obtained from
* the group fd via an ioctl() call.
*/
struct vfio_group {
- int group_no;
+ int group_num;
int fd;
int devices;
};
-struct vfio_config {
- int vfio_enabled;
- int vfio_container_fd;
- int vfio_active_groups;
- struct vfio_group vfio_groups[VFIO_MAX_GROUPS];
-};
-
/* DMA mapping function prototype.
* Takes VFIO container fd as a parameter.
* Returns 0 on success, -1 on error.
* */
typedef int (*vfio_dma_func_t)(int);
+/* Custom memory region DMA mapping function prototype.
+ * Takes VFIO container fd, virtual address, phisical address, length and
+ * operation type (0 to unmap 1 for map) as a parameters.
+ * Returns 0 on success, -1 on error.
+ **/
+typedef int (*vfio_dma_user_func_t)(int fd, uint64_t vaddr, uint64_t iova,
+ uint64_t len, int do_map);
+
struct vfio_iommu_type {
int type_id;
const char *name;
+ vfio_dma_user_func_t dma_user_map_func;
vfio_dma_func_t dma_map_func;
};
@@ -133,30 +123,22 @@ vfio_set_iommu_type(int vfio_container_fd);
int
vfio_has_supported_extensions(int vfio_container_fd);
-/* open container fd or get an existing one */
-int
-vfio_get_container_fd(void);
-
-/* parse IOMMU group number for a device
- * returns 1 on success, -1 for errors, 0 for non-existent group
- */
-int
-vfio_get_group_no(const char *sysfs_base,
- const char *dev_addr, int *iommu_group_no);
-
-/* open group fd or get an existing one */
-int
-vfio_get_group_fd(int iommu_group_no);
-
int vfio_mp_sync_setup(void);
+#define EAL_VFIO_MP "eal_vfio_mp_sync"
+
#define SOCKET_REQ_CONTAINER 0x100
#define SOCKET_REQ_GROUP 0x200
-#define SOCKET_CLR_GROUP 0x300
#define SOCKET_OK 0x0
#define SOCKET_NO_FD 0x1
#define SOCKET_ERR 0xFF
+struct vfio_mp_param {
+ int req;
+ int result;
+ int group_num;
+};
+
#endif /* VFIO_PRESENT */
#endif /* EAL_VFIO_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
index 7cc3c152..680a24aa 100644
--- a/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
+++ b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c
@@ -1,32 +1,16 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
+#include <unistd.h>
#include <string.h>
-#include <fcntl.h>
-#include <sys/socket.h>
-#include <pthread.h>
-
-/* sys/un.h with __USE_MISC uses strlen, which is unsafe */
-#ifdef __USE_MISC
-#define REMOVED_USE_MISC
-#undef __USE_MISC
-#endif
-#include <sys/un.h>
-/* make sure we redefine __USE_MISC only if it was previously undefined */
-#ifdef REMOVED_USE_MISC
-#define __USE_MISC
-#undef REMOVED_USE_MISC
-#endif
+#include <rte_compat.h>
#include <rte_log.h>
-#include <rte_eal_memconfig.h>
-#include <rte_malloc.h>
#include <rte_vfio.h>
+#include <rte_eal.h>
-#include "eal_filesystem.h"
#include "eal_vfio.h"
-#include "eal_thread.h"
/**
* @file
@@ -37,358 +21,70 @@
#ifdef VFIO_PRESENT
-#define SOCKET_PATH_FMT "%s/.%s_mp_socket"
-#define CMSGLEN (CMSG_LEN(sizeof(int)))
-#define FD_TO_CMSGHDR(fd, chdr) \
- do {\
- (chdr).cmsg_len = CMSGLEN;\
- (chdr).cmsg_level = SOL_SOCKET;\
- (chdr).cmsg_type = SCM_RIGHTS;\
- memcpy((chdr).__cmsg_data, &(fd), sizeof(fd));\
- } while (0)
-#define CMSGHDR_TO_FD(chdr, fd) \
- memcpy(&(fd), (chdr).__cmsg_data, sizeof(fd))
-
-static pthread_t socket_thread;
-static int mp_socket_fd;
-
-
-/* get socket path (/var/run if root, $HOME otherwise) */
-static void
-get_socket_path(char *buffer, int bufsz)
-{
- const char *dir = "/var/run";
- const char *home_dir = getenv("HOME");
-
- if (getuid() != 0 && home_dir != NULL)
- dir = home_dir;
-
- /* use current prefix as file path */
- snprintf(buffer, bufsz, SOCKET_PATH_FMT, dir,
- internal_config.hugefile_prefix);
-}
-
-
-
-/*
- * data flow for socket comm protocol:
- * 1. client sends SOCKET_REQ_CONTAINER or SOCKET_REQ_GROUP
- * 1a. in case of SOCKET_REQ_GROUP, client also then sends group number
- * 2. server receives message
- * 2a. in case of invalid group, SOCKET_ERR is sent back to client
- * 2b. in case of unbound group, SOCKET_NO_FD is sent back to client
- * 2c. in case of valid group, SOCKET_OK is sent and immediately followed by fd
- *
- * in case of any error, socket is closed.
- */
-
-/* send a request, return -1 on error */
-int
-vfio_mp_sync_send_request(int socket, int req)
-{
- struct msghdr hdr;
- struct iovec iov;
- int buf;
- int ret;
-
- memset(&hdr, 0, sizeof(hdr));
-
- buf = req;
-
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
- iov.iov_base = (char *) &buf;
- iov.iov_len = sizeof(buf);
-
- ret = sendmsg(socket, &hdr, 0);
- if (ret < 0)
- return -1;
- return 0;
-}
-
-/* receive a request and return it */
-int
-vfio_mp_sync_receive_request(int socket)
-{
- int buf;
- struct msghdr hdr;
- struct iovec iov;
- int ret, req;
-
- memset(&hdr, 0, sizeof(hdr));
-
- buf = SOCKET_ERR;
-
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
- iov.iov_base = (char *) &buf;
- iov.iov_len = sizeof(buf);
-
- ret = recvmsg(socket, &hdr, 0);
- if (ret < 0)
- return -1;
-
- req = buf;
-
- return req;
-}
-
-/* send OK in message, fd in control message */
-int
-vfio_mp_sync_send_fd(int socket, int fd)
+static int
+vfio_mp_primary(const struct rte_mp_msg *msg, const void *peer)
{
- int buf;
- struct msghdr hdr;
- struct cmsghdr *chdr;
- char chdr_buf[CMSGLEN];
- struct iovec iov;
+ int fd = -1;
int ret;
+ struct rte_mp_msg reply;
+ struct vfio_mp_param *r = (struct vfio_mp_param *)reply.param;
+ const struct vfio_mp_param *m =
+ (const struct vfio_mp_param *)msg->param;
- chdr = (struct cmsghdr *) chdr_buf;
- memset(chdr, 0, sizeof(chdr_buf));
- memset(&hdr, 0, sizeof(hdr));
-
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
- iov.iov_base = (char *) &buf;
- iov.iov_len = sizeof(buf);
- hdr.msg_control = chdr;
- hdr.msg_controllen = CMSGLEN;
-
- buf = SOCKET_OK;
- FD_TO_CMSGHDR(fd, *chdr);
-
- ret = sendmsg(socket, &hdr, 0);
- if (ret < 0)
- return -1;
- return 0;
-}
-
-/* receive OK in message, fd in control message */
-int
-vfio_mp_sync_receive_fd(int socket)
-{
- int buf;
- struct msghdr hdr;
- struct cmsghdr *chdr;
- char chdr_buf[CMSGLEN];
- struct iovec iov;
- int ret, req, fd;
-
- buf = SOCKET_ERR;
-
- chdr = (struct cmsghdr *) chdr_buf;
- memset(chdr, 0, sizeof(chdr_buf));
- memset(&hdr, 0, sizeof(hdr));
-
- hdr.msg_iov = &iov;
- hdr.msg_iovlen = 1;
- iov.iov_base = (char *) &buf;
- iov.iov_len = sizeof(buf);
- hdr.msg_control = chdr;
- hdr.msg_controllen = CMSGLEN;
-
- ret = recvmsg(socket, &hdr, 0);
- if (ret < 0)
- return -1;
-
- req = buf;
-
- if (req != SOCKET_OK)
- return -1;
-
- CMSGHDR_TO_FD(*chdr, fd);
-
- return fd;
-}
-
-/* connect socket_fd in secondary process to the primary process's socket */
-int
-vfio_mp_sync_connect_to_primary(void)
-{
- struct sockaddr_un addr;
- socklen_t sockaddr_len;
- int socket_fd;
-
- /* set up a socket */
- socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
- if (socket_fd < 0) {
- RTE_LOG(ERR, EAL, "Failed to create socket!\n");
+ if (msg->len_param != sizeof(*m)) {
+ RTE_LOG(ERR, EAL, "vfio received invalid message!\n");
return -1;
}
- get_socket_path(addr.sun_path, sizeof(addr.sun_path));
- addr.sun_family = AF_UNIX;
-
- sockaddr_len = sizeof(struct sockaddr_un);
-
- if (connect(socket_fd, (struct sockaddr *) &addr, sockaddr_len) == 0)
- return socket_fd;
-
- /* if connect failed */
- close(socket_fd);
- return -1;
-}
-
+ memset(&reply, 0, sizeof(reply));
-
-/*
- * socket listening thread for primary process
- */
-static __attribute__((noreturn)) void *
-vfio_mp_sync_thread(void __rte_unused * arg)
-{
- int ret, fd, vfio_data;
-
- /* wait for requests on the socket */
- for (;;) {
- int conn_sock;
- struct sockaddr_un addr;
- socklen_t sockaddr_len = sizeof(addr);
-
- /* this is a blocking call */
- conn_sock = accept(mp_socket_fd, (struct sockaddr *) &addr,
- &sockaddr_len);
-
- /* just restart on error */
- if (conn_sock == -1)
- continue;
-
- /* set socket to linger after close */
- struct linger l;
- l.l_onoff = 1;
- l.l_linger = 60;
-
- if (setsockopt(conn_sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) < 0)
- RTE_LOG(WARNING, EAL, "Cannot set SO_LINGER option "
- "on listen socket (%s)\n", strerror(errno));
-
- ret = vfio_mp_sync_receive_request(conn_sock);
-
- switch (ret) {
- case SOCKET_REQ_CONTAINER:
- fd = vfio_get_container_fd();
- if (fd < 0)
- vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
- else
- vfio_mp_sync_send_fd(conn_sock, fd);
- if (fd >= 0)
- close(fd);
- break;
- case SOCKET_REQ_GROUP:
- /* wait for group number */
- vfio_data = vfio_mp_sync_receive_request(conn_sock);
- if (vfio_data < 0) {
- close(conn_sock);
- continue;
- }
-
- fd = vfio_get_group_fd(vfio_data);
-
- if (fd < 0)
- vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
+ switch (m->req) {
+ case SOCKET_REQ_GROUP:
+ r->req = SOCKET_REQ_GROUP;
+ r->group_num = m->group_num;
+ fd = rte_vfio_get_group_fd(m->group_num);
+ if (fd < 0)
+ r->result = SOCKET_ERR;
+ else if (fd == 0)
/* if VFIO group exists but isn't bound to VFIO driver */
- else if (fd == 0)
- vfio_mp_sync_send_request(conn_sock, SOCKET_NO_FD);
+ r->result = SOCKET_NO_FD;
+ else {
/* if group exists and is bound to VFIO driver */
- else {
- vfio_mp_sync_send_request(conn_sock, SOCKET_OK);
- vfio_mp_sync_send_fd(conn_sock, fd);
- }
- break;
- case SOCKET_CLR_GROUP:
- /* wait for group fd */
- vfio_data = vfio_mp_sync_receive_request(conn_sock);
- if (vfio_data < 0) {
- close(conn_sock);
- continue;
- }
-
- ret = rte_vfio_clear_group(vfio_data);
-
- if (ret < 0)
- vfio_mp_sync_send_request(conn_sock, SOCKET_NO_FD);
- else
- vfio_mp_sync_send_request(conn_sock, SOCKET_OK);
- break;
- default:
- vfio_mp_sync_send_request(conn_sock, SOCKET_ERR);
- break;
+ r->result = SOCKET_OK;
+ reply.num_fds = 1;
+ reply.fds[0] = fd;
}
- close(conn_sock);
- }
-}
-
-static int
-vfio_mp_sync_socket_setup(void)
-{
- int ret, socket_fd;
- struct sockaddr_un addr;
- socklen_t sockaddr_len;
-
- /* set up a socket */
- socket_fd = socket(AF_UNIX, SOCK_SEQPACKET, 0);
- if (socket_fd < 0) {
- RTE_LOG(ERR, EAL, "Failed to create socket!\n");
- return -1;
- }
-
- get_socket_path(addr.sun_path, sizeof(addr.sun_path));
- addr.sun_family = AF_UNIX;
-
- sockaddr_len = sizeof(struct sockaddr_un);
-
- unlink(addr.sun_path);
-
- ret = bind(socket_fd, (struct sockaddr *) &addr, sockaddr_len);
- if (ret) {
- RTE_LOG(ERR, EAL, "Failed to bind socket: %s!\n", strerror(errno));
- close(socket_fd);
- return -1;
- }
-
- ret = listen(socket_fd, 50);
- if (ret) {
- RTE_LOG(ERR, EAL, "Failed to listen: %s!\n", strerror(errno));
- close(socket_fd);
+ break;
+ case SOCKET_REQ_CONTAINER:
+ r->req = SOCKET_REQ_CONTAINER;
+ fd = rte_vfio_get_container_fd();
+ if (fd < 0)
+ r->result = SOCKET_ERR;
+ else {
+ r->result = SOCKET_OK;
+ reply.num_fds = 1;
+ reply.fds[0] = fd;
+ }
+ break;
+ default:
+ RTE_LOG(ERR, EAL, "vfio received invalid message!\n");
return -1;
}
- /* save the socket in local configuration */
- mp_socket_fd = socket_fd;
+ strcpy(reply.name, EAL_VFIO_MP);
+ reply.len_param = sizeof(*r);
- return 0;
+ ret = rte_mp_reply(&reply, peer);
+ if (m->req == SOCKET_REQ_CONTAINER && fd >= 0)
+ close(fd);
+ return ret;
}
-/*
- * set up a local socket and tell it to listen for incoming connections
- */
int
vfio_mp_sync_setup(void)
{
- int ret;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
-
- if (vfio_mp_sync_socket_setup() < 0) {
- RTE_LOG(ERR, EAL, "Failed to set up local socket!\n");
- return -1;
- }
-
- ret = pthread_create(&socket_thread, NULL,
- vfio_mp_sync_thread, NULL);
- if (ret) {
- RTE_LOG(ERR, EAL,
- "Failed to create thread for communication with secondary processes!\n");
- close(mp_socket_fd);
- return -1;
- }
-
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "vfio-sync");
- ret = rte_thread_setname(socket_thread, thread_name);
- if (ret)
- RTE_LOG(DEBUG, EAL,
- "Failed to set thread name for secondary processes!\n");
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ return rte_mp_action_register(EAL_VFIO_MP, vfio_mp_primary);
return 0;
}
diff --git a/lib/librte_eal/linuxapp/eal/meson.build b/lib/librte_eal/linuxapp/eal/meson.build
index 03974ff2..6e31c2aa 100644
--- a/lib/librte_eal/linuxapp/eal/meson.build
+++ b/lib/librte_eal/linuxapp/eal/meson.build
@@ -7,9 +7,11 @@ install_subdir('include/exec-env', install_dir: get_option('includedir'))
env_objs = []
env_headers = []
env_sources = files('eal_alarm.c',
+ 'eal_cpuflags.c',
'eal_debug.c',
'eal_hugepage_info.c',
'eal_interrupts.c',
+ 'eal_memalloc.c',
'eal_lcore.c',
'eal_log.c',
'eal_thread.c',
@@ -18,8 +20,10 @@ env_sources = files('eal_alarm.c',
'eal_vfio_mp_sync.c',
'eal.c',
'eal_memory.c',
+ 'eal_dev.c',
)
+deps += ['kvargs']
if has_libnuma == 1
dpdk_conf.set10('RTE_EAL_NUMA_AWARE_HUGEPAGES', true)
endif
diff --git a/lib/librte_eal/linuxapp/igb_uio/Kbuild b/lib/librte_eal/linuxapp/igb_uio/Kbuild
deleted file mode 100644
index 98c98fe5..00000000
--- a/lib/librte_eal/linuxapp/igb_uio/Kbuild
+++ /dev/null
@@ -1 +0,0 @@
-obj-m := igb_uio.o
diff --git a/lib/librte_eal/meson.build b/lib/librte_eal/meson.build
index d9ba3853..e1fde15d 100644
--- a/lib/librte_eal/meson.build
+++ b/lib/librte_eal/meson.build
@@ -12,40 +12,19 @@ subdir('common') # defines common_sources, common_objs, etc.
if host_machine.system() == 'linux'
dpdk_conf.set('RTE_EXEC_ENV_LINUXAPP', 1)
subdir('linuxapp/eal')
- subdir('linuxapp/igb_uio')
elif host_machine.system() == 'freebsd'
dpdk_conf.set('RTE_EXEC_ENV_BSDAPP', 1)
subdir('bsdapp/eal')
- kmods = ['contigmem', 'nic_uio']
- # for building kernel modules, we use kernel build system using make, as
- # with Linux. We have a skeleton BSDmakefile, which pulls many of its
- # values from the environment. Each module only has a single source file
- # right now, which allows us to simplify things. We pull in the sourcer
- # files from the individual meson.build files, and then use a custom
- # target to call make, passing in the values as env parameters.
- kmod_cflags = ['-I' + meson.build_root(),
- '-I' + join_paths(meson.source_root(), 'config'),
- '-include rte_config.h']
- foreach k:kmods
- subdir(join_paths('bsdapp', k))
- custom_target(k,
- input: [files('bsdapp/BSDmakefile.meson'), sources],
- output: k + '.ko',
- command: ['make', '-f', '@INPUT0@',
- 'KMOD_SRC=@INPUT1@',
- 'KMOD=' + k,
- 'KMOD_CFLAGS=' + ' '.join(kmod_cflags)],
- build_by_default: get_option('enable_kmods'))
- endforeach
else
- error('unsupported system type @0@'.format(hostmachine.system()))
+ error('unsupported system type "@0@"'.format(host_machine.system()))
endif
-version = 6 # the version of the EAL API
+version = 8 # the version of the EAL API
allow_experimental_apis = true
deps += 'compat'
+deps += 'kvargs'
cflags += '-D_GNU_SOURCE'
sources = common_sources + env_sources
objs = common_objs + env_objs
diff --git a/lib/librte_eal/rte_eal_version.map b/lib/librte_eal/rte_eal_version.map
index d1236023..344a43d3 100644
--- a/lib/librte_eal/rte_eal_version.map
+++ b/lib/librte_eal/rte_eal_version.map
@@ -2,7 +2,6 @@ DPDK_2.0 {
global:
__rte_panic;
- devargs_list;
eal_parse_sysfs_value;
eal_timer_source;
lcore_config;
@@ -25,7 +24,6 @@ DPDK_2.0 {
rte_eal_devargs_type_count;
rte_eal_get_configuration;
rte_eal_get_lcore_state;
- rte_eal_get_physmem_layout;
rte_eal_get_physmem_size;
rte_eal_has_hugepages;
rte_eal_hpet_init;
@@ -163,9 +161,6 @@ DPDK_17.05 {
rte_log_set_global_level;
rte_log_set_level;
rte_log_set_level_regexp;
- vfio_get_container_fd;
- vfio_get_group_fd;
- vfio_get_group_no;
} DPDK_17.02;
@@ -186,7 +181,6 @@ DPDK_17.11 {
rte_bus_get_iommu_class;
rte_eal_has_pci;
rte_eal_iova_mode;
- rte_eal_mbuf_default_mempool_ops;
rte_eal_using_phys_addrs;
rte_eal_vfio_intr_mode;
rte_lcore_has_role;
@@ -211,26 +205,15 @@ DPDK_18.02 {
} DPDK_17.11;
-EXPERIMENTAL {
+DPDK_18.05 {
global:
- rte_eal_cleanup;
- rte_eal_devargs_insert;
- rte_eal_devargs_parse;
- rte_eal_devargs_remove;
- rte_eal_hotplug_add;
- rte_eal_hotplug_remove;
- rte_eal_mbuf_user_pool_ops;
- rte_mp_action_register;
- rte_mp_action_unregister;
- rte_mp_sendmsg;
- rte_mp_request;
- rte_mp_reply;
+ rte_log_set_level_pattern;
rte_service_attr_get;
rte_service_attr_reset_all;
rte_service_component_register;
- rte_service_component_unregister;
rte_service_component_runstate_set;
+ rte_service_component_unregister;
rte_service_dump;
rte_service_finalize;
rte_service_get_by_id;
@@ -256,3 +239,100 @@ EXPERIMENTAL {
rte_service_start_with_defaults;
} DPDK_18.02;
+
+DPDK_18.08 {
+ global:
+
+ rte_eal_mbuf_user_pool_ops;
+ rte_uuid_compare;
+ rte_uuid_is_null;
+ rte_uuid_parse;
+ rte_uuid_unparse;
+ rte_vfio_container_create;
+ rte_vfio_container_destroy;
+ rte_vfio_container_dma_map;
+ rte_vfio_container_dma_unmap;
+ rte_vfio_container_group_bind;
+ rte_vfio_container_group_unbind;
+ rte_vfio_dma_map;
+ rte_vfio_dma_unmap;
+ rte_vfio_get_container_fd;
+ rte_vfio_get_group_fd;
+ rte_vfio_get_group_num;
+
+} DPDK_18.05;
+
+EXPERIMENTAL {
+ global:
+
+ rte_class_find;
+ rte_class_find_by_name;
+ rte_class_register;
+ rte_class_unregister;
+ rte_ctrl_thread_create;
+ rte_dev_event_callback_register;
+ rte_dev_event_callback_unregister;
+ rte_dev_event_monitor_start;
+ rte_dev_event_monitor_stop;
+ rte_dev_iterator_init;
+ rte_dev_iterator_next;
+ rte_devargs_add;
+ rte_devargs_dump;
+ rte_devargs_insert;
+ rte_devargs_next;
+ rte_devargs_parse;
+ rte_devargs_parsef;
+ rte_devargs_remove;
+ rte_devargs_type_count;
+ rte_eal_cleanup;
+ rte_eal_hotplug_add;
+ rte_eal_hotplug_remove;
+ rte_fbarray_attach;
+ rte_fbarray_destroy;
+ rte_fbarray_detach;
+ rte_fbarray_dump_metadata;
+ rte_fbarray_find_idx;
+ rte_fbarray_find_next_free;
+ rte_fbarray_find_next_used;
+ rte_fbarray_find_next_n_free;
+ rte_fbarray_find_next_n_used;
+ rte_fbarray_find_prev_free;
+ rte_fbarray_find_prev_used;
+ rte_fbarray_find_prev_n_free;
+ rte_fbarray_find_prev_n_used;
+ rte_fbarray_find_contig_free;
+ rte_fbarray_find_contig_used;
+ rte_fbarray_find_rev_contig_free;
+ rte_fbarray_find_rev_contig_used;
+ rte_fbarray_get;
+ rte_fbarray_init;
+ rte_fbarray_is_used;
+ rte_fbarray_set_free;
+ rte_fbarray_set_used;
+ rte_log_register_type_and_pick_level;
+ rte_malloc_dump_heaps;
+ rte_mem_alloc_validator_register;
+ rte_mem_alloc_validator_unregister;
+ rte_mem_event_callback_register;
+ rte_mem_event_callback_unregister;
+ rte_mem_iova2virt;
+ rte_mem_virt2memseg;
+ rte_mem_virt2memseg_list;
+ rte_memseg_contig_walk;
+ rte_memseg_contig_walk_thread_unsafe;
+ rte_memseg_list_walk;
+ rte_memseg_list_walk_thread_unsafe;
+ rte_memseg_walk;
+ rte_memseg_walk_thread_unsafe;
+ rte_mp_action_register;
+ rte_mp_action_unregister;
+ rte_mp_reply;
+ rte_mp_request_sync;
+ rte_mp_request_async;
+ rte_mp_sendmsg;
+ rte_service_lcore_attr_get;
+ rte_service_lcore_attr_reset_all;
+ rte_service_may_be_active;
+ rte_socket_count;
+ rte_socket_id_by_idx;
+};
diff --git a/lib/librte_ether/Makefile b/lib/librte_ethdev/Makefile
index 3ca5782b..0935a275 100644
--- a/lib/librte_ether/Makefile
+++ b/lib/librte_ethdev/Makefile
@@ -16,7 +16,7 @@ LDLIBS += -lrte_mbuf
EXPORT_MAP := rte_ethdev_version.map
-LIBABIVER := 8
+LIBABIVER := 10
SRCS-y += rte_ethdev.c
SRCS-y += rte_flow.c
diff --git a/lib/librte_ether/ethdev_profile.c b/lib/librte_ethdev/ethdev_profile.c
index 0d1dcda3..0d1dcda3 100644
--- a/lib/librte_ether/ethdev_profile.c
+++ b/lib/librte_ethdev/ethdev_profile.c
diff --git a/lib/librte_ether/ethdev_profile.h b/lib/librte_ethdev/ethdev_profile.h
index e5ea3682..e5ea3682 100644
--- a/lib/librte_ether/ethdev_profile.h
+++ b/lib/librte_ethdev/ethdev_profile.h
diff --git a/lib/librte_ether/meson.build b/lib/librte_ethdev/meson.build
index 7fed8605..596cd0f3 100644
--- a/lib/librte_ether/meson.build
+++ b/lib/librte_ethdev/meson.build
@@ -2,7 +2,7 @@
# Copyright(c) 2017 Intel Corporation
name = 'ethdev'
-version = 8
+version = 10
allow_experimental_apis = true
sources = files('ethdev_profile.c',
'rte_ethdev.c',
@@ -24,4 +24,4 @@ headers = files('rte_ethdev.h',
'rte_tm.h',
'rte_tm_driver.h')
-deps += ['net']
+deps += ['net', 'kvargs']
diff --git a/lib/librte_ether/rte_dev_info.h b/lib/librte_ethdev/rte_dev_info.h
index 6b68584d..fea5da88 100644
--- a/lib/librte_ether/rte_dev_info.h
+++ b/lib/librte_ethdev/rte_dev_info.h
@@ -28,4 +28,22 @@ struct rte_dev_eeprom_info {
uint32_t magic; /**< Device-specific key, such as device-id */
};
+/**
+ * Placeholder for accessing plugin module eeprom
+ */
+struct rte_eth_dev_module_info {
+ uint32_t type; /**< Type of plugin module eeprom */
+ uint32_t eeprom_len; /**< Length of plugin module eeprom */
+};
+
+/* EEPROM Standards for plug in modules */
+#define RTE_ETH_MODULE_SFF_8079 0x1
+#define RTE_ETH_MODULE_SFF_8079_LEN 256
+#define RTE_ETH_MODULE_SFF_8472 0x2
+#define RTE_ETH_MODULE_SFF_8472_LEN 512
+#define RTE_ETH_MODULE_SFF_8636 0x3
+#define RTE_ETH_MODULE_SFF_8636_LEN 256
+#define RTE_ETH_MODULE_SFF_8436 0x4
+#define RTE_ETH_MODULE_SFF_8436_LEN 256
+
#endif /* _RTE_DEV_INFO_H_ */
diff --git a/lib/librte_ether/rte_eth_ctrl.h b/lib/librte_ethdev/rte_eth_ctrl.h
index 668f59ac..5ea8ae24 100644
--- a/lib/librte_ether/rte_eth_ctrl.h
+++ b/lib/librte_ethdev/rte_eth_ctrl.h
@@ -54,7 +54,8 @@ extern "C" {
#define RTE_ETH_FLOW_VXLAN 19 /**< VXLAN protocol based flow */
#define RTE_ETH_FLOW_GENEVE 20 /**< GENEVE protocol based flow */
#define RTE_ETH_FLOW_NVGRE 21 /**< NVGRE protocol based flow */
-#define RTE_ETH_FLOW_MAX 22
+#define RTE_ETH_FLOW_VXLAN_GPE 22 /**< VXLAN-GPE protocol based flow */
+#define RTE_ETH_FLOW_MAX 23
/**
* Feature filter types
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c
index 0590f0c1..4c320250 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ethdev/rte_ethdev.c
@@ -10,6 +10,7 @@
#include <string.h>
#include <stdarg.h>
#include <errno.h>
+#include <stdbool.h>
#include <stdint.h>
#include <inttypes.h>
#include <netinet/in.h>
@@ -34,16 +35,18 @@
#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
-#include <rte_compat.h>
+#include <rte_kvargs.h>
#include "rte_ether.h"
#include "rte_ethdev.h"
#include "rte_ethdev_driver.h"
#include "ethdev_profile.h"
+int rte_eth_dev_logtype;
+
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static uint8_t eth_dev_last_created_port;
+static uint16_t eth_dev_last_created_port;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -123,6 +126,7 @@ static const struct {
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
+ RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
};
#undef RTE_RX_OFFLOAD_BIT2STR
@@ -222,19 +226,41 @@ rte_eth_dev_shared_data_prepare(void)
rte_spinlock_unlock(&rte_eth_shared_data_lock);
}
-struct rte_eth_dev *
-rte_eth_dev_allocated(const char *name)
+static bool
+is_allocated(const struct rte_eth_dev *ethdev)
+{
+ return ethdev->data->name[0] != '\0';
+}
+
+static struct rte_eth_dev *
+_rte_eth_dev_allocated(const char *name)
{
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
+ if (rte_eth_devices[i].data != NULL &&
strcmp(rte_eth_devices[i].data->name, name) == 0)
return &rte_eth_devices[i];
}
return NULL;
}
+struct rte_eth_dev *
+rte_eth_dev_allocated(const char *name)
+{
+ struct rte_eth_dev *ethdev;
+
+ rte_eth_dev_shared_data_prepare();
+
+ rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
+
+ ethdev = _rte_eth_dev_allocated(name);
+
+ rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+ return ethdev;
+}
+
static uint16_t
rte_eth_dev_find_free_port(void)
{
@@ -257,7 +283,6 @@ eth_dev_get(uint16_t port_id)
struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
- eth_dev->state = RTE_ETH_DEV_ATTACHED;
eth_dev_last_created_port = port_id;
@@ -275,15 +300,17 @@ rte_eth_dev_allocate(const char *name)
/* Synchronize port creation between primary and secondary threads. */
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
- port_id = rte_eth_dev_find_free_port();
- if (port_id == RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, EAL, "Reached maximum number of Ethernet ports\n");
+ if (_rte_eth_dev_allocated(name) != NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethernet device with name %s already allocated\n",
+ name);
goto unlock;
}
- if (rte_eth_dev_allocated(name) != NULL) {
- RTE_LOG(ERR, EAL, "Ethernet Device with name %s already allocated!\n",
- name);
+ port_id = rte_eth_dev_find_free_port();
+ if (port_id == RTE_MAX_ETHPORTS) {
+ RTE_ETHDEV_LOG(ERR,
+ "Reached maximum number of Ethernet ports\n");
goto unlock;
}
@@ -295,9 +322,6 @@ rte_eth_dev_allocate(const char *name)
unlock:
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
- if (eth_dev != NULL)
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_NEW, NULL);
-
return eth_dev;
}
@@ -322,8 +346,8 @@ rte_eth_dev_attach_secondary(const char *name)
break;
}
if (i == RTE_MAX_ETHPORTS) {
- RTE_PMD_DEBUG_TRACE(
- "device %s is not driven by the primary process\n",
+ RTE_ETHDEV_LOG(ERR,
+ "Device %s is not driven by the primary process\n",
name);
} else {
eth_dev = eth_dev_get(i);
@@ -342,6 +366,8 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_eth_dev_shared_data_prepare();
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
+
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
eth_dev->state = RTE_ETH_DEV_UNUSED;
@@ -350,8 +376,6 @@ rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_DESTROY, NULL);
-
return 0;
}
@@ -370,13 +394,14 @@ rte_eth_is_valid_owner_id(uint64_t owner_id)
{
if (owner_id == RTE_ETH_DEV_NO_OWNER ||
rte_eth_dev_shared_data->next_owner_id <= owner_id) {
- RTE_PMD_DEBUG_TRACE("Invalid owner_id=%016lX.\n", owner_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid owner_id=%016"PRIx64"\n",
+ owner_id);
return 0;
}
return 1;
}
-uint64_t __rte_experimental
+uint64_t
rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
@@ -408,10 +433,15 @@ static int
_rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
const struct rte_eth_dev_owner *new_owner)
{
+ struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
struct rte_eth_dev_owner *port_owner;
int sret;
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
+ RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ port_id);
+ return -ENODEV;
+ }
if (!rte_eth_is_valid_owner_id(new_owner->id) &&
!rte_eth_is_valid_owner_id(old_owner_id))
@@ -419,22 +449,22 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
port_owner = &rte_eth_devices[port_id].data->owner;
if (port_owner->id != old_owner_id) {
- RTE_PMD_DEBUG_TRACE("Cannot set owner to port %d already owned"
- " by %s_%016lX.\n", port_id,
- port_owner->name, port_owner->id);
+ RTE_ETHDEV_LOG(ERR,
+ "Cannot set owner to port %u already owned by %s_%016"PRIX64"\n",
+ port_id, port_owner->name, port_owner->id);
return -EPERM;
}
sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
new_owner->name);
if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
- RTE_PMD_DEBUG_TRACE("Port %d owner name was truncated.\n",
- port_id);
+ RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
+ port_id);
port_owner->id = new_owner->id;
- RTE_PMD_DEBUG_TRACE("Port %d owner is %s_%016lX.\n", port_id,
- new_owner->name, new_owner->id);
+ RTE_ETHDEV_LOG(DEBUG, "Port %u owner is %s_%016"PRIx64"\n",
+ port_id, new_owner->name, new_owner->id);
return 0;
}
@@ -482,11 +512,13 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
if (rte_eth_is_valid_owner_id(owner_id)) {
- RTE_ETH_FOREACH_DEV_OWNED_BY(port_id, owner_id)
- memset(&rte_eth_devices[port_id].data->owner, 0,
- sizeof(struct rte_eth_dev_owner));
- RTE_PMD_DEBUG_TRACE("All port owners owned by %016X identifier"
- " have removed.\n", owner_id);
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ if (rte_eth_devices[port_id].data->owner.id == owner_id)
+ memset(&rte_eth_devices[port_id].data->owner, 0,
+ sizeof(struct rte_eth_dev_owner));
+ RTE_ETHDEV_LOG(ERR,
+ "All port owners owned by %016"PRIx64" identifier have removed\n",
+ owner_id);
}
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
@@ -496,17 +528,18 @@ int __rte_experimental
rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
{
int ret = 0;
+ struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
rte_eth_dev_shared_data_prepare();
rte_spinlock_lock(&rte_eth_dev_shared_data->ownership_lock);
- if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
+ RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ port_id);
ret = -ENODEV;
} else {
- rte_memcpy(owner, &rte_eth_devices[port_id].data->owner,
- sizeof(*owner));
+ rte_memcpy(owner, &ethdev->data->owner, sizeof(*owner));
}
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
@@ -521,7 +554,7 @@ rte_eth_dev_socket_id(uint16_t port_id)
}
void *
-rte_eth_dev_get_sec_ctx(uint8_t port_id)
+rte_eth_dev_get_sec_ctx(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
return rte_eth_devices[port_id].security_ctx;
@@ -530,6 +563,12 @@ rte_eth_dev_get_sec_ctx(uint8_t port_id)
uint16_t
rte_eth_dev_count(void)
{
+ return rte_eth_dev_count_avail();
+}
+
+uint16_t
+rte_eth_dev_count_avail(void)
+{
uint16_t p;
uint16_t count;
@@ -541,6 +580,18 @@ rte_eth_dev_count(void)
return count;
}
+uint16_t __rte_experimental
+rte_eth_dev_count_total(void)
+{
+ uint16_t port, count = 0;
+
+ for (port = 0; port < RTE_MAX_ETHPORTS; port++)
+ if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
+ count++;
+
+ return count;
+}
+
int
rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
{
@@ -549,7 +600,7 @@ rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
if (name == NULL) {
- RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
return -EINVAL;
}
@@ -566,14 +617,13 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
uint32_t pid;
if (name == NULL) {
- RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
+ RTE_ETHDEV_LOG(ERR, "Null pointer is specified\n");
return -EINVAL;
}
for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
- !strncmp(name, rte_eth_dev_shared_data->data[pid].name,
- strlen(name))) {
+ !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
*port_id = pid;
return 0;
}
@@ -596,35 +646,37 @@ eth_err(uint16_t port_id, int ret)
int
rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
{
+ int current = rte_eth_dev_count_total();
+ struct rte_devargs da;
int ret = -1;
- int current = rte_eth_dev_count();
- char *name = NULL;
- char *args = NULL;
+
+ memset(&da, 0, sizeof(da));
if ((devargs == NULL) || (port_id == NULL)) {
ret = -EINVAL;
goto err;
}
- /* parse devargs, then retrieve device name and args */
- if (rte_eal_parse_devargs_str(devargs, &name, &args))
+ /* parse devargs */
+ if (rte_devargs_parse(&da, devargs))
goto err;
- ret = rte_eal_dev_attach(name, args);
+ ret = rte_eal_hotplug_add(da.bus->name, da.name, da.args);
if (ret < 0)
goto err;
/* no point looking at the port count if no port exists */
- if (!rte_eth_dev_count()) {
- RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
+ if (!rte_eth_dev_count_total()) {
+ RTE_ETHDEV_LOG(ERR, "No port found for device (%s)\n", da.name);
ret = -1;
goto err;
}
/* if nothing happened, there is a bug here, since some driver told us
* it did attach a device, but did not create a port.
+ * FIXME: race condition in case of plug-out of another device
*/
- if (current == rte_eth_dev_count()) {
+ if (current == rte_eth_dev_count_total()) {
ret = -1;
goto err;
}
@@ -633,45 +685,42 @@ rte_eth_dev_attach(const char *devargs, uint16_t *port_id)
ret = 0;
err:
- free(name);
- free(args);
+ free(da.args);
return ret;
}
/* detach the device, then store the name of the device */
int
-rte_eth_dev_detach(uint16_t port_id, char *name)
+rte_eth_dev_detach(uint16_t port_id, char *name __rte_unused)
{
+ struct rte_device *dev;
+ struct rte_bus *bus;
uint32_t dev_flags;
int ret = -1;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- if (name == NULL) {
- ret = -EINVAL;
- goto err;
- }
-
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if (dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
- RTE_LOG(ERR, EAL, "Port %" PRIu16 " is bonded, cannot detach\n",
- port_id);
- ret = -ENOTSUP;
- goto err;
+ RTE_ETHDEV_LOG(ERR,
+ "Port %"PRIu16" is bonded, cannot detach\n", port_id);
+ return -ENOTSUP;
}
- snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
- "%s", rte_eth_devices[port_id].data->name);
+ dev = rte_eth_devices[port_id].device;
+ if (dev == NULL)
+ return -EINVAL;
+
+ bus = rte_bus_find_by_device(dev);
+ if (bus == NULL)
+ return -ENOENT;
- ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
+ ret = rte_eal_hotplug_remove(bus->name, dev->name);
if (ret < 0)
- goto err;
+ return ret;
rte_eth_dev_release_port(&rte_eth_devices[port_id]);
return 0;
-
-err:
- return ret;
}
static int
@@ -732,16 +781,23 @@ rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be started before start any queue\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
- RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
- " already started\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
rx_queue_id, port_id);
return 0;
}
@@ -760,15 +816,15 @@ rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
- RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
- " already stopped\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
rx_queue_id, port_id);
return 0;
}
@@ -785,23 +841,28 @@ rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be started before start any queue\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
- RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
- " already started\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
tx_queue_id, port_id);
return 0;
}
- return eth_err(port_id, dev->dev_ops->tx_queue_start(dev,
- tx_queue_id));
-
+ return eth_err(port_id, dev->dev_ops->tx_queue_start(dev, tx_queue_id));
}
int
@@ -813,15 +874,15 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
- RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
- " already stopped\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
tx_queue_id, port_id);
return 0;
}
@@ -913,95 +974,6 @@ rte_eth_speed_bitflag(uint32_t speed, int duplex)
}
}
-/**
- * A conversion function from rxmode bitfield API.
- */
-static void
-rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
- uint64_t *rx_offloads)
-{
- uint64_t offloads = 0;
-
- if (rxmode->header_split == 1)
- offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
- if (rxmode->hw_ip_checksum == 1)
- offloads |= DEV_RX_OFFLOAD_CHECKSUM;
- if (rxmode->hw_vlan_filter == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
- if (rxmode->hw_vlan_strip == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
- if (rxmode->hw_vlan_extend == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
- if (rxmode->jumbo_frame == 1)
- offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (rxmode->hw_strip_crc == 1)
- offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- if (rxmode->enable_scatter == 1)
- offloads |= DEV_RX_OFFLOAD_SCATTER;
- if (rxmode->enable_lro == 1)
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
- if (rxmode->hw_timestamp == 1)
- offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
- if (rxmode->security == 1)
- offloads |= DEV_RX_OFFLOAD_SECURITY;
-
- *rx_offloads = offloads;
-}
-
-/**
- * A conversion function from rxmode offloads API.
- */
-static void
-rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
- struct rte_eth_rxmode *rxmode)
-{
-
- if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
- rxmode->header_split = 1;
- else
- rxmode->header_split = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
- rxmode->hw_ip_checksum = 1;
- else
- rxmode->hw_ip_checksum = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- rxmode->hw_vlan_filter = 1;
- else
- rxmode->hw_vlan_filter = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- rxmode->hw_vlan_strip = 1;
- else
- rxmode->hw_vlan_strip = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
- rxmode->hw_vlan_extend = 1;
- else
- rxmode->hw_vlan_extend = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- rxmode->jumbo_frame = 1;
- else
- rxmode->jumbo_frame = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
- rxmode->hw_strip_crc = 1;
- else
- rxmode->hw_strip_crc = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
- rxmode->enable_scatter = 1;
- else
- rxmode->enable_scatter = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
- rxmode->enable_lro = 1;
- else
- rxmode->enable_lro = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
- rxmode->hw_timestamp = 1;
- else
- rxmode->hw_timestamp = 0;
- if (rx_offloads & DEV_RX_OFFLOAD_SECURITY)
- rxmode->security = 1;
- else
- rxmode->security = 0;
-}
-
const char * __rte_experimental
rte_eth_dev_rx_offload_name(uint64_t offload)
{
@@ -1045,43 +1017,49 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ /* If number of queues specified by application for both Rx and Tx is
+ * zero, use driver preferred values. This cannot be done individually
+ * as it is valid for either Tx or Rx (but not both) to be zero.
+ * If driver does not provide any preferred valued, fall back on
+ * EAL defaults.
+ */
+ if (nb_rx_q == 0 && nb_tx_q == 0) {
+ nb_rx_q = dev_info.default_rxportconf.nb_queues;
+ if (nb_rx_q == 0)
+ nb_rx_q = RTE_ETH_DEV_FALLBACK_RX_NBQUEUES;
+ nb_tx_q = dev_info.default_txportconf.nb_queues;
+ if (nb_tx_q == 0)
+ nb_tx_q = RTE_ETH_DEV_FALLBACK_TX_NBQUEUES;
+ }
+
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
- RTE_PMD_DEBUG_TRACE(
+ RTE_ETHDEV_LOG(ERR,
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
return -EINVAL;
}
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
- RTE_PMD_DEBUG_TRACE(
+ RTE_ETHDEV_LOG(ERR,
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
return -EINVAL;
}
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
-
if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be stopped to allow configuration\n",
+ port_id);
return -EBUSY;
}
- /*
- * Convert between the offloads API to enable PMDs to support
- * only one of them.
- */
- if (dev_conf->rxmode.ignore_offload_bitfield == 0) {
- rte_eth_convert_rx_offload_bitfield(
- &dev_conf->rxmode, &local_conf.rxmode.offloads);
- } else {
- rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
- &local_conf.rxmode);
- }
-
/* Copy the dev_conf parameter into the dev structure */
memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
@@ -1090,36 +1068,29 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
* than the maximum number of RX and TX queues supported by the
* configured device.
*/
- (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
-
- if (nb_rx_q == 0 && nb_tx_q == 0) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
- return -EINVAL;
- }
-
if (nb_rx_q > dev_info.max_rx_queues) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
- port_id, nb_rx_q, dev_info.max_rx_queues);
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
+ port_id, nb_rx_q, dev_info.max_rx_queues);
return -EINVAL;
}
if (nb_tx_q > dev_info.max_tx_queues) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
- port_id, nb_tx_q, dev_info.max_tx_queues);
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
+ port_id, nb_tx_q, dev_info.max_tx_queues);
return -EINVAL;
}
/* Check that the device supports requested interrupts */
if ((dev_conf->intr_conf.lsc == 1) &&
- (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
- RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
- dev->device->driver->name);
- return -EINVAL;
+ (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
+ RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
+ dev->device->driver->name);
+ return -EINVAL;
}
if ((dev_conf->intr_conf.rmv == 1) &&
- (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
- RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
- dev->device->driver->name);
+ (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
+ RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
+ dev->device->driver->name);
return -EINVAL;
}
@@ -1128,19 +1099,16 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
* length is supported by the configured device.
*/
if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (dev_conf->rxmode.max_rx_pkt_len >
- dev_info.max_rx_pktlen) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
- " > max valid value %u\n",
- port_id,
- (unsigned)dev_conf->rxmode.max_rx_pkt_len,
- (unsigned)dev_info.max_rx_pktlen);
+ if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
+ port_id, dev_conf->rxmode.max_rx_pkt_len,
+ dev_info.max_rx_pktlen);
return -EINVAL;
} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
- RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
- " < min valid value %u\n",
- port_id,
- (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
+ port_id, dev_conf->rxmode.max_rx_pkt_len,
(unsigned)ETHER_MIN_LEN);
return -EINVAL;
}
@@ -1152,28 +1120,71 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
ETHER_MAX_LEN;
}
+ /* Any requested offloading must be within its device capabilities */
+ if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
+ local_conf.rxmode.offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
+ "capabilities 0x%"PRIx64" in %s()\n",
+ port_id, local_conf.rxmode.offloads,
+ dev_info.rx_offload_capa,
+ __func__);
+ return -EINVAL;
+ }
+ if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
+ local_conf.txmode.offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
+ "capabilities 0x%"PRIx64" in %s()\n",
+ port_id, local_conf.txmode.offloads,
+ dev_info.tx_offload_capa,
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((local_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP) &&
+ (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port id=%u not allowed to set both CRC STRIP and KEEP CRC offload flags\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ /* Check that device supports requested rss hash functions. */
+ if ((dev_info.flow_type_rss_offloads |
+ dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
+ dev_info.flow_type_rss_offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+ port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
+ dev_info.flow_type_rss_offloads);
+ return -EINVAL;
+ }
+
/*
* Setup new number of RX/TX queues and reconfigure device.
*/
diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
if (diag != 0) {
- RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
- port_id, diag);
+ RTE_ETHDEV_LOG(ERR,
+ "Port%u rte_eth_dev_rx_queue_config = %d\n",
+ port_id, diag);
return diag;
}
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
if (diag != 0) {
- RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
- port_id, diag);
+ RTE_ETHDEV_LOG(ERR,
+ "Port%u rte_eth_dev_tx_queue_config = %d\n",
+ port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
return diag;
}
diag = (*dev->dev_ops->dev_configure)(dev);
if (diag != 0) {
- RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
- port_id, diag);
+ RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
+ port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
return eth_err(port_id, diag);
@@ -1182,8 +1193,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
/* Initialize Rx profiling if enabled at compilation time. */
diag = __rte_eth_profile_rx_init(port_id, dev);
if (diag != 0) {
- RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
- port_id, diag);
+ RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_profile_rx_init = %d\n",
+ port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
return eth_err(port_id, diag);
@@ -1196,8 +1207,7 @@ void
_rte_eth_dev_reset(struct rte_eth_dev *dev)
{
if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow reset\n",
+ RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
dev->data->port_id);
return;
}
@@ -1276,8 +1286,8 @@ rte_eth_dev_start(uint16_t port_id)
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
if (dev->data->dev_started != 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
- " already started\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Device with port_id=%"PRIu16" already started\n",
port_id);
return 0;
}
@@ -1308,8 +1318,8 @@ rte_eth_dev_stop(uint16_t port_id)
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
if (dev->data->dev_started == 0) {
- RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu16
- " already stopped\n",
+ RTE_ETHDEV_LOG(INFO,
+ "Device with port_id=%"PRIu16" already stopped\n",
port_id);
return;
}
@@ -1421,16 +1431,10 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
- if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
@@ -1441,39 +1445,53 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
*/
rte_eth_dev_info_get(port_id, &dev_info);
if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
- RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
- mp->name, (int) mp->private_data_size,
- (int) sizeof(struct rte_pktmbuf_pool_private));
+ RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
+ mp->name, (int)mp->private_data_size,
+ (int)sizeof(struct rte_pktmbuf_pool_private));
return -ENOSPC;
}
mbp_buf_size = rte_pktmbuf_data_room_size(mp);
if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
- RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
- "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
- "=%d)\n",
- mp->name,
- (int)mbp_buf_size,
- (int)(RTE_PKTMBUF_HEADROOM +
- dev_info.min_rx_bufsize),
- (int)RTE_PKTMBUF_HEADROOM,
- (int)dev_info.min_rx_bufsize);
+ RTE_ETHDEV_LOG(ERR,
+ "%s mbuf_data_room_size %d < %d (RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)=%d)\n",
+ mp->name, (int)mbp_buf_size,
+ (int)(RTE_PKTMBUF_HEADROOM + dev_info.min_rx_bufsize),
+ (int)RTE_PKTMBUF_HEADROOM,
+ (int)dev_info.min_rx_bufsize);
return -EINVAL;
}
+ /* Use default specified by driver, if nb_rx_desc is zero */
+ if (nb_rx_desc == 0) {
+ nb_rx_desc = dev_info.default_rxportconf.ring_size;
+ /* If driver default is also zero, fall back on EAL default */
+ if (nb_rx_desc == 0)
+ nb_rx_desc = RTE_ETH_DEV_FALLBACK_RX_RINGSIZE;
+ }
+
if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
- RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
- "should be: <= %hu, = %hu, and a product of %hu\n",
- nb_rx_desc,
- dev_info.rx_desc_lim.nb_max,
+ RTE_ETHDEV_LOG(ERR,
+ "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+ nb_rx_desc, dev_info.rx_desc_lim.nb_max,
dev_info.rx_desc_lim.nb_min,
dev_info.rx_desc_lim.nb_align);
return -EINVAL;
}
+ if (dev->data->dev_started &&
+ !(dev_info.dev_capa &
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP))
+ return -EBUSY;
+
+ if (dev->data->dev_started &&
+ (dev->data->rx_queue_state[rx_queue_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED))
+ return -EBUSY;
+
rxq = dev->data->rx_queues;
if (rxq[rx_queue_id]) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
@@ -1486,13 +1504,34 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
rx_conf = &dev_info.default_rxconf;
local_conf = *rx_conf;
- if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
- /**
- * Reflect port offloads to queue offloads in order for
- * offloads to not be discarded.
- */
- rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
- &local_conf.offloads);
+
+ /*
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
+ */
+ local_conf.offloads &= ~dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure() and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure().
+ */
+ if ((local_conf.offloads & dev_info.rx_queue_offload_capa) !=
+ local_conf.offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
+ "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ port_id, rx_queue_id, local_conf.offloads,
+ dev_info.rx_queue_offload_capa,
+ __func__);
+ return -EINVAL;
}
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
@@ -1506,55 +1545,6 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
return eth_err(port_id, ret);
}
-/**
- * A conversion function from txq_flags API.
- */
-static void
-rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
-{
- uint64_t offloads = 0;
-
- if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
- offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
- offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
- offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
- offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
- offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
- (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
- offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
-
- *tx_offloads = offloads;
-}
-
-/**
- * A conversion function from offloads API.
- */
-static void
-rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags)
-{
- uint32_t flags = 0;
-
- if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
- flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
- if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
- flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
- if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
- if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- flags |= (ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP);
-
- *txq_flags = flags;
-}
-
int
rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1569,33 +1559,43 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
- if (dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be stopped to allow configuration\n", port_id);
- return -EBUSY;
- }
-
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
rte_eth_dev_info_get(port_id, &dev_info);
+ /* Use default specified by driver, if nb_tx_desc is zero */
+ if (nb_tx_desc == 0) {
+ nb_tx_desc = dev_info.default_txportconf.ring_size;
+ /* If driver default is zero, fall back on EAL default */
+ if (nb_tx_desc == 0)
+ nb_tx_desc = RTE_ETH_DEV_FALLBACK_TX_RINGSIZE;
+ }
if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
- RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
- "should be: <= %hu, = %hu, and a product of %hu\n",
- nb_tx_desc,
- dev_info.tx_desc_lim.nb_max,
- dev_info.tx_desc_lim.nb_min,
- dev_info.tx_desc_lim.nb_align);
+ RTE_ETHDEV_LOG(ERR,
+ "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+ nb_tx_desc, dev_info.tx_desc_lim.nb_max,
+ dev_info.tx_desc_lim.nb_min,
+ dev_info.tx_desc_lim.nb_align);
return -EINVAL;
}
+ if (dev->data->dev_started &&
+ !(dev_info.dev_capa &
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP))
+ return -EBUSY;
+
+ if (dev->data->dev_started &&
+ (dev->data->tx_queue_state[tx_queue_id] !=
+ RTE_ETH_QUEUE_STATE_STOPPED))
+ return -EBUSY;
+
txq = dev->data->tx_queues;
if (txq[tx_queue_id]) {
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
@@ -1607,19 +1607,35 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
+ local_conf = *tx_conf;
+
/*
- * Convert between the offloads API to enable PMDs to support
- * only one of them.
+ * If an offloading has already been enabled in
+ * rte_eth_dev_configure(), it has been enabled on all queues,
+ * so there is no need to enable it in this queue again.
+ * The local_conf.offloads input to underlying PMD only carries
+ * those offloadings which are only enabled on this queue and
+ * not enabled on all queues.
*/
- local_conf = *tx_conf;
- if (tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE) {
- rte_eth_convert_txq_offloads(tx_conf->offloads,
- &local_conf.txq_flags);
- /* Keep the ignore flag. */
- local_conf.txq_flags |= ETH_TXQ_FLAGS_IGNORE;
- } else {
- rte_eth_convert_txq_flags(tx_conf->txq_flags,
- &local_conf.offloads);
+ local_conf.offloads &= ~dev->data->dev_conf.txmode.offloads;
+
+ /*
+ * New added offloadings for this queue are those not enabled in
+ * rte_eth_dev_configure() and they must be per-queue type.
+ * A pure per-port offloading can't be enabled on a queue while
+ * disabled on another queue. A pure per-port offloading can't
+ * be enabled for any queue as new added one if it hasn't been
+ * enabled in rte_eth_dev_configure().
+ */
+ if ((local_conf.offloads & dev_info.tx_queue_offload_capa) !=
+ local_conf.offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
+ "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ port_id, tx_queue_id, local_conf.offloads,
+ dev_info.tx_queue_offload_capa,
+ __func__);
+ return -EINVAL;
}
return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev,
@@ -1765,20 +1781,6 @@ rte_eth_allmulticast_get(uint16_t port_id)
return dev->data->all_multicast;
}
-static inline int
-rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
void
rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
{
@@ -1787,8 +1789,9 @@ rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 1);
@@ -1804,8 +1807,9 @@ rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.intr_conf.lsc != 0)
- rte_eth_dev_atomic_read_link_status(dev, eth_link);
+ if (dev->data->dev_conf.intr_conf.lsc &&
+ dev->data->dev_started)
+ rte_eth_linkstatus_get(dev, eth_link);
else {
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
(*dev->dev_ops->link_update)(dev, 0);
@@ -1895,19 +1899,19 @@ rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (!id) {
- RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
+ RTE_ETHDEV_LOG(ERR, "Id pointer is NULL\n");
return -ENOMEM;
}
if (!xstat_name) {
- RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
+ RTE_ETHDEV_LOG(ERR, "xstat_name pointer is NULL\n");
return -ENOMEM;
}
/* Get count */
cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
if (cnt_xstats < 0) {
- RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
+ RTE_ETHDEV_LOG(ERR, "Cannot get count of xstats\n");
return -ENODEV;
}
@@ -1916,7 +1920,7 @@ rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name,
if (cnt_xstats != rte_eth_xstats_get_names_by_id(
port_id, xstats_names, cnt_xstats, NULL)) {
- RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
+ RTE_ETHDEV_LOG(ERR, "Cannot get xstats lookup\n");
return -1;
}
@@ -2039,7 +2043,7 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
sizeof(struct rte_eth_xstat_name));
if (!xstats_names_copy) {
- RTE_PMD_DEBUG_TRACE("ERROR: can't allocate memory");
+ RTE_ETHDEV_LOG(ERR, "Can't allocate memory\n");
return -ENOMEM;
}
@@ -2067,7 +2071,7 @@ rte_eth_xstats_get_names_by_id(uint16_t port_id,
/* Filter stats */
for (i = 0; i < size; i++) {
if (ids[i] >= expected_entries) {
- RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
+ RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
free(xstats_names_copy);
return -1;
}
@@ -2252,7 +2256,7 @@ rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids,
/* Filter stats */
for (i = 0; i < size; i++) {
if (ids[i] >= expected_entries) {
- RTE_PMD_DEBUG_TRACE("ERROR: id value isn't valid\n");
+ RTE_ETHDEV_LOG(ERR, "Id value isn't valid\n");
return -1;
}
values[i] = xstats[ids[i]].value;
@@ -2342,6 +2346,16 @@ set_queue_stats_mapping(uint16_t port_id, uint16_t queue_id, uint8_t stat_idx,
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
+
+ if (is_rx && (queue_id >= dev->data->nb_rx_queues))
+ return -EINVAL;
+
+ if (!is_rx && (queue_id >= dev->data->nb_tx_queues))
+ return -EINVAL;
+
+ if (stat_idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ return -EINVAL;
+
return (*dev->dev_ops->queue_stats_mapping_set)
(dev, queue_id, stat_idx, is_rx);
}
@@ -2393,12 +2407,15 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
+ dev_info->device = dev->device;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
+
+ dev_info->dev_flags = &dev->data->dev_flags;
}
int
@@ -2477,13 +2494,14 @@ rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
dev = &rte_eth_devices[port_id];
if (!(dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER)) {
- RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
+ RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
+ port_id);
return -ENOSYS;
}
if (vlan_id > 4095) {
- RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
- port_id, (unsigned) vlan_id);
+ RTE_ETHDEV_LOG(ERR, "Port_id=%u invalid vlan_id=%u > 4095\n",
+ port_id, vlan_id);
return -EINVAL;
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
@@ -2516,7 +2534,7 @@ rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid rx_queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
@@ -2601,19 +2619,10 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
-
- /*
- * Convert to the offload bitfield API just in case the underlying PMD
- * still supporting it.
- */
- rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
- &dev->data->dev_conf.rxmode);
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
if (ret) {
/* hit an error restore original values */
dev->data->dev_conf.rxmode.offloads = orig_offloads;
- rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
- &dev->data->dev_conf.rxmode);
}
return eth_err(port_id, ret);
@@ -2674,7 +2683,7 @@ rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
- RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid send_xon, only 0/1 allowed\n");
return -EINVAL;
}
@@ -2691,7 +2700,7 @@ rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
- RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
return -EINVAL;
}
@@ -2732,7 +2741,7 @@ rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
return -EINVAL;
if (max_rxq == 0) {
- RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
+ RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
return -EINVAL;
}
@@ -2741,8 +2750,9 @@ rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
shift = i % RTE_RETA_GROUP_SIZE;
if ((reta_conf[idx].mask & (1ULL << shift)) &&
(reta_conf[idx].reta[shift] >= max_rxq)) {
- RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
- "the maximum rxq index: %u\n", idx, shift,
+ RTE_ETHDEV_LOG(ERR,
+ "reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
+ idx, shift,
reta_conf[idx].reta[shift], max_rxq);
return -EINVAL;
}
@@ -2804,9 +2814,19 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
struct rte_eth_rss_conf *rss_conf)
{
struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
+ dev_info.flow_type_rss_offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
+ port_id, rss_conf->rss_hf,
+ dev_info.flow_type_rss_offloads);
+ return -EINVAL;
+ }
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
return eth_err(port_id, (*dev->dev_ops->rss_hash_update)(dev,
rss_conf));
@@ -2833,12 +2853,12 @@ rte_eth_dev_udp_tunnel_port_add(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (udp_tunnel == NULL) {
- RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
return -EINVAL;
}
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
@@ -2858,12 +2878,12 @@ rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id,
dev = &rte_eth_devices[port_id];
if (udp_tunnel == NULL) {
- RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid udp_tunnel parameter\n");
return -EINVAL;
}
if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
@@ -2931,12 +2951,12 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
if (is_zero_ether_addr(addr)) {
- RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
}
if (pool >= ETH_64_POOLS) {
- RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
+ RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
return -EINVAL;
}
@@ -2944,7 +2964,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
if (index < 0) {
index = get_mac_addr_index(port_id, &null_mac_addr);
if (index < 0) {
- RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
+ RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
port_id);
return -ENOSPC;
}
@@ -2982,7 +3002,9 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
index = get_mac_addr_index(port_id, addr);
if (index == 0) {
- RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u: Cannot remove default MAC address\n",
+ port_id);
return -EADDRINUSE;
} else if (index < 0)
return 0; /* Do nothing if address wasn't found */
@@ -3003,6 +3025,7 @@ int
rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
{
struct rte_eth_dev *dev;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
@@ -3012,11 +3035,13 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
+ ret = (*dev->dev_ops->mac_addr_set)(dev, addr);
+ if (ret < 0)
+ return ret;
+
/* Update default address in NIC data structure */
ether_addr_copy(addr, &dev->data->mac_addrs[0]);
- (*dev->dev_ops->mac_addr_set)(dev, addr);
-
return 0;
}
@@ -3056,7 +3081,7 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
dev = &rte_eth_devices[port_id];
if (is_zero_ether_addr(addr)) {
- RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
+ RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
}
@@ -3068,15 +3093,16 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
if (index < 0) {
if (!on) {
- RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
- "set in UTA\n", port_id);
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u: the MAC address was not set in UTA\n",
+ port_id);
return -EINVAL;
}
index = get_hash_mac_addr_index(port_id, &null_mac_addr);
if (index < 0) {
- RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
- port_id);
+ RTE_ETHDEV_LOG(ERR, "Port %u: MAC address array full\n",
+ port_id);
return -ENOSPC;
}
}
@@ -3124,14 +3150,15 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
link = dev->data->dev_link;
if (queue_idx > dev_info.max_tx_queues) {
- RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
- "invalid queue id=%d\n", port_id, queue_idx);
+ RTE_ETHDEV_LOG(ERR,
+ "Set queue rate limit:port %u: invalid queue id=%u\n",
+ port_id, queue_idx);
return -EINVAL;
}
if (tx_rate > link.link_speed) {
- RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
- "bigger than link speed= %d\n",
+ RTE_ETHDEV_LOG(ERR,
+ "Set queue rate limit:invalid tx_rate=%u, bigger than link speed= %d\n",
tx_rate, link.link_speed);
return -EINVAL;
}
@@ -3150,26 +3177,28 @@ rte_eth_mirror_rule_set(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (mirror_conf->rule_type == 0) {
- RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
+ RTE_ETHDEV_LOG(ERR, "Mirror rule type can not be 0\n");
return -EINVAL;
}
if (mirror_conf->dst_pool >= ETH_64_POOLS) {
- RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
- ETH_64_POOLS - 1);
+ RTE_ETHDEV_LOG(ERR, "Invalid dst pool, pool id must be 0-%d\n",
+ ETH_64_POOLS - 1);
return -EINVAL;
}
if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
(mirror_conf->pool_mask == 0)) {
- RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
+ RTE_ETHDEV_LOG(ERR,
+ "Invalid mirror pool, pool mask can not be 0\n");
return -EINVAL;
}
if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
mirror_conf->vlan.vlan_mask == 0) {
- RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
+ RTE_ETHDEV_LOG(ERR,
+ "Invalid vlan mask, vlan mask can not be 0\n");
return -EINVAL;
}
@@ -3216,7 +3245,7 @@ rte_eth_dev_callback_register(uint16_t port_id,
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
return -EINVAL;
}
@@ -3279,7 +3308,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id,
return -EINVAL;
if (!rte_eth_dev_is_valid_port(port_id) && port_id != RTE_ETH_ALL) {
- RTE_LOG(ERR, EAL, "Invalid port_id=%d\n", port_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid port_id=%d\n", port_id);
return -EINVAL;
}
@@ -3348,6 +3377,17 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
return rc;
}
+void
+rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
+{
+ if (dev == NULL)
+ return;
+
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
+
+ dev->state = RTE_ETH_DEV_ATTACHED;
+}
+
int
rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
{
@@ -3362,13 +3402,13 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
dev = &rte_eth_devices[port_id];
if (!dev->intr_handle) {
- RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
- RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
return -EPERM;
}
@@ -3376,9 +3416,9 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
vec = intr_handle->intr_vec[qid];
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
- RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
- " op %d epfd %d vec %u\n",
- port_id, qid, op, epfd, vec);
+ RTE_ETHDEV_LOG(ERR,
+ "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ port_id, qid, op, epfd, vec);
}
}
@@ -3401,7 +3441,103 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
+}
+
+int __rte_experimental
+rte_eth_dev_create(struct rte_device *device, const char *name,
+ size_t priv_data_size,
+ ethdev_bus_specific_init ethdev_bus_specific_init,
+ void *bus_init_params,
+ ethdev_init_t ethdev_init, void *init_params)
+{
+ struct rte_eth_dev *ethdev;
+ int retval;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ ethdev = rte_eth_dev_allocate(name);
+ if (!ethdev) {
+ retval = -ENODEV;
+ goto probe_failed;
+ }
+
+ if (priv_data_size) {
+ ethdev->data->dev_private = rte_zmalloc_socket(
+ name, priv_data_size, RTE_CACHE_LINE_SIZE,
+ device->numa_node);
+
+ if (!ethdev->data->dev_private) {
+ RTE_LOG(ERR, EAL, "failed to allocate private data");
+ retval = -ENOMEM;
+ goto probe_failed;
+ }
+ }
+ } else {
+ ethdev = rte_eth_dev_attach_secondary(name);
+ if (!ethdev) {
+ RTE_LOG(ERR, EAL, "secondary process attach failed, "
+ "ethdev doesn't exist");
+ retval = -ENODEV;
+ goto probe_failed;
+ }
+ }
+
+ ethdev->device = device;
+
+ if (ethdev_bus_specific_init) {
+ retval = ethdev_bus_specific_init(ethdev, bus_init_params);
+ if (retval) {
+ RTE_LOG(ERR, EAL,
+ "ethdev bus specific initialisation failed");
+ goto probe_failed;
+ }
+ }
+
+ retval = ethdev_init(ethdev, init_params);
+ if (retval) {
+ RTE_LOG(ERR, EAL, "ethdev initialisation failed");
+ goto probe_failed;
+ }
+
+ rte_eth_dev_probing_finish(ethdev);
+
+ return retval;
+probe_failed:
+ /* free ports private data if primary process */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(ethdev->data->dev_private);
+
+ rte_eth_dev_release_port(ethdev);
+
+ return retval;
+}
+
+int __rte_experimental
+rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
+ ethdev_uninit_t ethdev_uninit)
+{
+ int ret;
+
+ ethdev = rte_eth_dev_allocated(ethdev->data->name);
+ if (!ethdev)
+ return -ENODEV;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
+ if (ethdev_uninit) {
+ ret = ethdev_uninit(ethdev);
+ if (ret)
+ return ret;
+ }
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(ethdev->data->dev_private);
+
+ ethdev->data->dev_private = NULL;
+
+ return rte_eth_dev_release_port(ethdev);
}
int
@@ -3417,27 +3553,27 @@ rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
return -EINVAL;
}
if (!dev->intr_handle) {
- RTE_PMD_DEBUG_TRACE("RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
if (!intr_handle->intr_vec) {
- RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
+ RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
return -EPERM;
}
vec = intr_handle->intr_vec[queue_id];
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
- RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
- " op %d epfd %d vec %u\n",
- port_id, queue_id, op, epfd, vec);
+ RTE_ETHDEV_LOG(ERR,
+ "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ port_id, queue_id, op, epfd, vec);
return rc;
}
@@ -3490,153 +3626,8 @@ rte_eth_dev_filter_supported(uint16_t port_id,
}
int
-rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
-
-int
-rte_eth_dev_filter_ctrl_v22(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
-{
- struct rte_eth_fdir_info_v22 {
- enum rte_fdir_mode mode;
- struct rte_eth_fdir_masks mask;
- struct rte_eth_fdir_flex_conf flex_conf;
- uint32_t guarant_spc;
- uint32_t best_spc;
- uint32_t flow_types_mask[1];
- uint32_t max_flexpayload;
- uint32_t flex_payload_unit;
- uint32_t max_flex_payload_segment_num;
- uint16_t flex_payload_limit;
- uint32_t flex_bitmask_unit;
- uint32_t max_flex_bitmask_num;
- };
-
- struct rte_eth_hash_global_conf_v22 {
- enum rte_eth_hash_function hash_func;
- uint32_t sym_hash_enable_mask[1];
- uint32_t valid_bit_mask[1];
- };
-
- struct rte_eth_hash_filter_info_v22 {
- enum rte_eth_hash_filter_info_type info_type;
- union {
- uint8_t enable;
- struct rte_eth_hash_global_conf_v22 global_conf;
- struct rte_eth_input_set_conf input_set_conf;
- } info;
- };
-
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
- if (filter_op == RTE_ETH_FILTER_INFO) {
- int retval;
- struct rte_eth_fdir_info_v22 *fdir_info_v22;
- struct rte_eth_fdir_info fdir_info;
-
- fdir_info_v22 = (struct rte_eth_fdir_info_v22 *)arg;
-
- retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
- filter_op, (void *)&fdir_info);
- fdir_info_v22->mode = fdir_info.mode;
- fdir_info_v22->mask = fdir_info.mask;
- fdir_info_v22->flex_conf = fdir_info.flex_conf;
- fdir_info_v22->guarant_spc = fdir_info.guarant_spc;
- fdir_info_v22->best_spc = fdir_info.best_spc;
- fdir_info_v22->flow_types_mask[0] =
- (uint32_t)fdir_info.flow_types_mask[0];
- fdir_info_v22->max_flexpayload = fdir_info.max_flexpayload;
- fdir_info_v22->flex_payload_unit = fdir_info.flex_payload_unit;
- fdir_info_v22->max_flex_payload_segment_num =
- fdir_info.max_flex_payload_segment_num;
- fdir_info_v22->flex_payload_limit =
- fdir_info.flex_payload_limit;
- fdir_info_v22->flex_bitmask_unit = fdir_info.flex_bitmask_unit;
- fdir_info_v22->max_flex_bitmask_num =
- fdir_info.max_flex_bitmask_num;
- return retval;
- } else if (filter_op == RTE_ETH_FILTER_GET) {
- int retval;
- struct rte_eth_hash_filter_info f_info;
- struct rte_eth_hash_filter_info_v22 *f_info_v22 =
- (struct rte_eth_hash_filter_info_v22 *)arg;
-
- f_info.info_type = f_info_v22->info_type;
- retval = (*dev->dev_ops->filter_ctrl)(dev, filter_type,
- filter_op, (void *)&f_info);
-
- switch (f_info_v22->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- f_info_v22->info.enable = f_info.info.enable;
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- f_info_v22->info.global_conf.hash_func =
- f_info.info.global_conf.hash_func;
- f_info_v22->info.global_conf.sym_hash_enable_mask[0] =
- (uint32_t)
- f_info.info.global_conf.sym_hash_enable_mask[0];
- f_info_v22->info.global_conf.valid_bit_mask[0] =
- (uint32_t)
- f_info.info.global_conf.valid_bit_mask[0];
- break;
- case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
- f_info_v22->info.input_set_conf =
- f_info.info.input_set_conf;
- break;
- default:
- break;
- }
- return retval;
- } else if (filter_op == RTE_ETH_FILTER_SET) {
- struct rte_eth_hash_filter_info f_info;
- struct rte_eth_hash_filter_info_v22 *f_v22 =
- (struct rte_eth_hash_filter_info_v22 *)arg;
-
- f_info.info_type = f_v22->info_type;
- switch (f_v22->info_type) {
- case RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT:
- f_info.info.enable = f_v22->info.enable;
- break;
- case RTE_ETH_HASH_FILTER_GLOBAL_CONFIG:
- f_info.info.global_conf.hash_func =
- f_v22->info.global_conf.hash_func;
- f_info.info.global_conf.sym_hash_enable_mask[0] =
- (uint32_t)
- f_v22->info.global_conf.sym_hash_enable_mask[0];
- f_info.info.global_conf.valid_bit_mask[0] =
- (uint32_t)
- f_v22->info.global_conf.valid_bit_mask[0];
- break;
- case RTE_ETH_HASH_FILTER_INPUT_SET_SELECT:
- f_info.info.input_set_conf =
- f_v22->info.input_set_conf;
- break;
- default:
- break;
- }
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
- (void *)&f_info);
- } else
- return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op,
- arg);
-}
-VERSION_SYMBOL(rte_eth_dev_filter_ctrl, _v22, 2.2);
-
-int
-rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg);
-
-int
-rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg)
+rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
{
struct rte_eth_dev *dev;
@@ -3647,13 +3638,8 @@ rte_eth_dev_filter_ctrl_v1802(uint16_t port_id,
return eth_err(port_id, (*dev->dev_ops->filter_ctrl)(dev, filter_type,
filter_op, arg));
}
-BIND_DEFAULT_SYMBOL(rte_eth_dev_filter_ctrl, _v1802, 18.02);
-MAP_STATIC_SYMBOL(int rte_eth_dev_filter_ctrl(uint16_t port_id,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op, void *arg),
- rte_eth_dev_filter_ctrl_v1802);
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
@@ -3695,7 +3681,7 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param)
{
@@ -3730,7 +3716,7 @@ rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
return cb;
}
-void *
+const struct rte_eth_rxtx_callback *
rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param)
{
@@ -3775,7 +3761,7 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
int
rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
@@ -3809,7 +3795,7 @@ rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
int
rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb)
+ const struct rte_eth_rxtx_callback *user_cb)
{
#ifndef RTE_ETHDEV_RXTX_CALLBACKS
return -ENOTSUP;
@@ -3854,7 +3840,7 @@ rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
return -EINVAL;
}
@@ -3878,7 +3864,7 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
return -EINVAL;
}
@@ -3886,6 +3872,7 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
+
return 0;
}
@@ -4043,6 +4030,32 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
}
+int __rte_experimental
+rte_eth_dev_get_module_info(uint16_t port_id,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_info, -ENOTSUP);
+ return (*dev->dev_ops->get_module_info)(dev, modinfo);
+}
+
+int __rte_experimental
+rte_eth_dev_get_module_eeprom(uint16_t port_id,
+ struct rte_dev_eeprom_info *info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_module_eeprom, -ENOTSUP);
+ return (*dev->dev_ops->get_module_eeprom)(dev, info);
+}
+
int
rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info)
@@ -4066,12 +4079,12 @@ rte_eth_dev_l2_tunnel_eth_type_conf(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (l2_tunnel == NULL) {
- RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
return -EINVAL;
}
if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
@@ -4093,17 +4106,17 @@ rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (l2_tunnel == NULL) {
- RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid l2_tunnel parameter\n");
return -EINVAL;
}
if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
- RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
+ RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
if (mask == 0) {
- RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
+ RTE_ETHDEV_LOG(ERR, "Mask should have a value\n");
return -EINVAL;
}
@@ -4168,3 +4181,245 @@ rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
return (*dev->dev_ops->pool_ops_supported)(dev, pool);
}
+
+/**
+ * A set of values to describe the possible states of a switch domain.
+ */
+enum rte_eth_switch_domain_state {
+ RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED
+};
+
+/**
+ * Array of switch domains available for allocation. Array is sized to
+ * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
+ * ethdev ports in a single process.
+ */
+struct rte_eth_dev_switch {
+ enum rte_eth_switch_domain_state state;
+} rte_eth_switch_domains[RTE_MAX_ETHPORTS];
+
+int __rte_experimental
+rte_eth_switch_domain_alloc(uint16_t *domain_id)
+{
+ unsigned int i;
+
+ *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
+
+ for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
+ i < RTE_MAX_ETHPORTS; i++) {
+ if (rte_eth_switch_domains[i].state ==
+ RTE_ETH_SWITCH_DOMAIN_UNUSED) {
+ rte_eth_switch_domains[i].state =
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
+ *domain_id = i;
+ return 0;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+int __rte_experimental
+rte_eth_switch_domain_free(uint16_t domain_id)
+{
+ if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
+ domain_id >= RTE_MAX_ETHPORTS)
+ return -EINVAL;
+
+ if (rte_eth_switch_domains[domain_id].state !=
+ RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
+ return -EINVAL;
+
+ rte_eth_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
+
+ return 0;
+}
+
+typedef int (*rte_eth_devargs_callback_t)(char *str, void *data);
+
+static int
+rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
+{
+ int state;
+ struct rte_kvargs_pair *pair;
+ char *letter;
+
+ arglist->str = strdup(str_in);
+ if (arglist->str == NULL)
+ return -ENOMEM;
+
+ letter = arglist->str;
+ state = 0;
+ arglist->count = 0;
+ pair = &arglist->pairs[0];
+ while (1) {
+ switch (state) {
+ case 0: /* Initial */
+ if (*letter == '=')
+ return -EINVAL;
+ else if (*letter == '\0')
+ return 0;
+
+ state = 1;
+ pair->key = letter;
+ /* fall-thru */
+
+ case 1: /* Parsing key */
+ if (*letter == '=') {
+ *letter = '\0';
+ pair->value = letter + 1;
+ state = 2;
+ } else if (*letter == ',' || *letter == '\0')
+ return -EINVAL;
+ break;
+
+
+ case 2: /* Parsing value */
+ if (*letter == '[')
+ state = 3;
+ else if (*letter == ',') {
+ *letter = '\0';
+ arglist->count++;
+ pair = &arglist->pairs[arglist->count];
+ state = 0;
+ } else if (*letter == '\0') {
+ letter--;
+ arglist->count++;
+ pair = &arglist->pairs[arglist->count];
+ state = 0;
+ }
+ break;
+
+ case 3: /* Parsing list */
+ if (*letter == ']')
+ state = 2;
+ else if (*letter == '\0')
+ return -EINVAL;
+ break;
+ }
+ letter++;
+ }
+}
+
+static int
+rte_eth_devargs_parse_list(char *str, rte_eth_devargs_callback_t callback,
+ void *data)
+{
+ char *str_start;
+ int state;
+ int result;
+
+ if (*str != '[')
+ /* Single element, not a list */
+ return callback(str, data);
+
+ /* Sanity check, then strip the brackets */
+ str_start = &str[strlen(str) - 1];
+ if (*str_start != ']') {
+ RTE_LOG(ERR, EAL, "(%s): List does not end with ']'", str);
+ return -EINVAL;
+ }
+ str++;
+ *str_start = '\0';
+
+ /* Process list elements */
+ state = 0;
+ while (1) {
+ if (state == 0) {
+ if (*str == '\0')
+ break;
+ if (*str != ',') {
+ str_start = str;
+ state = 1;
+ }
+ } else if (state == 1) {
+ if (*str == ',' || *str == '\0') {
+ if (str > str_start) {
+ /* Non-empty string fragment */
+ *str = '\0';
+ result = callback(str_start, data);
+ if (result < 0)
+ return result;
+ }
+ state = 0;
+ }
+ }
+ str++;
+ }
+ return 0;
+}
+
+static int
+rte_eth_devargs_process_range(char *str, uint16_t *list, uint16_t *len_list,
+ const uint16_t max_list)
+{
+ uint16_t lo, hi, val;
+ int result;
+
+ result = sscanf(str, "%hu-%hu", &lo, &hi);
+ if (result == 1) {
+ if (*len_list >= max_list)
+ return -ENOMEM;
+ list[(*len_list)++] = lo;
+ } else if (result == 2) {
+ if (lo >= hi || lo > RTE_MAX_ETHPORTS || hi > RTE_MAX_ETHPORTS)
+ return -EINVAL;
+ for (val = lo; val <= hi; val++) {
+ if (*len_list >= max_list)
+ return -ENOMEM;
+ list[(*len_list)++] = val;
+ }
+ } else
+ return -EINVAL;
+ return 0;
+}
+
+
+static int
+rte_eth_devargs_parse_representor_ports(char *str, void *data)
+{
+ struct rte_eth_devargs *eth_da = data;
+
+ return rte_eth_devargs_process_range(str, eth_da->representor_ports,
+ &eth_da->nb_representor_ports, RTE_MAX_ETHPORTS);
+}
+
+int __rte_experimental
+rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
+{
+ struct rte_kvargs args;
+ struct rte_kvargs_pair *pair;
+ unsigned int i;
+ int result = 0;
+
+ memset(eth_da, 0, sizeof(*eth_da));
+
+ result = rte_eth_devargs_tokenise(&args, dargs);
+ if (result < 0)
+ goto parse_cleanup;
+
+ for (i = 0; i < args.count; i++) {
+ pair = &args.pairs[i];
+ if (strcmp("representor", pair->key) == 0) {
+ result = rte_eth_devargs_parse_list(pair->value,
+ rte_eth_devargs_parse_representor_ports,
+ eth_da);
+ if (result < 0)
+ goto parse_cleanup;
+ }
+ }
+
+parse_cleanup:
+ if (args.str)
+ free(args.str);
+
+ return result;
+}
+
+RTE_INIT(ethdev_init_log)
+{
+ rte_eth_dev_logtype = rte_log_register("lib.ethdev");
+ if (rte_eth_dev_logtype >= 0)
+ rte_log_set_level(rte_eth_dev_logtype, RTE_LOG_INFO);
+}
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ethdev/rte_ethdev.h
index 03615330..7070e9ab 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ethdev/rte_ethdev.h
@@ -159,6 +159,11 @@ extern "C" {
#include "rte_eth_ctrl.h"
#include "rte_dev_info.h"
+extern int rte_eth_dev_logtype;
+
+#define RTE_ETHDEV_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
+
struct rte_mbuf;
/**
@@ -321,7 +326,7 @@ enum rte_eth_tx_mq_mode {
struct rte_eth_rxmode {
/** The multi-queue packet distribution mode to be used, e.g. RSS. */
enum rte_eth_rx_mq_mode mq_mode;
- uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
+ uint32_t max_rx_pkt_len; /**< Only used if JUMBO_FRAME enabled. */
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
/**
* Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
@@ -329,33 +334,6 @@ struct rte_eth_rxmode {
* structure are allowed to be set.
*/
uint64_t offloads;
- __extension__
- /**
- * Below bitfield API is obsolete. Application should
- * enable per-port offloads using the offload field
- * above.
- */
- uint16_t header_split : 1, /**< Header Split enable. */
- hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
- hw_vlan_filter : 1, /**< VLAN filter enable. */
- hw_vlan_strip : 1, /**< VLAN strip enable. */
- hw_vlan_extend : 1, /**< Extended VLAN enable. */
- jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
- hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
- enable_scatter : 1, /**< Enable scatter packets rx handler */
- enable_lro : 1, /**< Enable LRO */
- hw_timestamp : 1, /**< Enable HW timestamp */
- security : 1, /**< Enable rte_security offloads */
- /**
- * When set the offload bitfield should be ignored.
- * Instead per-port Rx offloads should be set on offloads
- * field above.
- * Per-queue offloads shuold be set on rte_eth_rxq_conf
- * structure.
- * This bit is temporary till rxmode bitfield offloads API will
- * be deprecated.
- */
- ignore_offload_bitfield : 1;
};
/**
@@ -702,28 +680,6 @@ struct rte_eth_rxconf {
uint64_t offloads;
};
-#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
-#define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002 /**< refcnt can be ignored */
-#define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004 /**< all bufs come from same mempool */
-#define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100 /**< disable VLAN offload */
-#define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200 /**< disable SCTP checksum offload */
-#define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400 /**< disable UDP checksum offload */
-#define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800 /**< disable TCP checksum offload */
-#define ETH_TXQ_FLAGS_NOOFFLOADS \
- (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
- ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
-#define ETH_TXQ_FLAGS_NOXSUMS \
- (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
- ETH_TXQ_FLAGS_NOXSUMTCP)
-/**
- * When set the txq_flags should be ignored,
- * instead per-queue Tx offloads will be set on offloads field
- * located on rte_eth_txq_conf struct.
- * This flag is temporary till the rte_eth_txq_conf.txq_flags
- * API will be deprecated.
- */
-#define ETH_TXQ_FLAGS_IGNORE 0x8000
-
/**
* A structure used to configure a TX ring of an Ethernet port.
*/
@@ -733,7 +689,6 @@ struct rte_eth_txconf {
uint16_t tx_free_thresh; /**< Start freeing TX buffers if there are
less free descriptors than this value. */
- uint32_t txq_flags; /**< Set flags for the Tx queue */
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
/**
* Per-queue Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
@@ -939,6 +894,12 @@ struct rte_eth_conf {
#define DEV_RX_OFFLOAD_SCATTER 0x00002000
#define DEV_RX_OFFLOAD_TIMESTAMP 0x00004000
#define DEV_RX_OFFLOAD_SECURITY 0x00008000
+
+/**
+ * Invalid to set both DEV_RX_OFFLOAD_CRC_STRIP and DEV_RX_OFFLOAD_KEEP_CRC
+ * No DEV_RX_OFFLOAD_CRC_STRIP flag means keep CRC
+ */
+#define DEV_RX_OFFLOAD_KEEP_CRC 0x00010000
#define DEV_RX_OFFLOAD_CHECKSUM (DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM)
@@ -980,22 +941,81 @@ struct rte_eth_conf {
* the same mempool and has refcnt = 1.
*/
#define DEV_TX_OFFLOAD_SECURITY 0x00020000
+/**
+ * Device supports generic UDP tunneled packet TSO.
+ * Application must set PKT_TX_TUNNEL_UDP and other mbuf fields required
+ * for tunnel TSO.
+ */
+#define DEV_TX_OFFLOAD_UDP_TNL_TSO 0x00040000
+/**
+ * Device supports generic IP tunneled packet TSO.
+ * Application must set PKT_TX_TUNNEL_IP and other mbuf fields required
+ * for tunnel TSO.
+ */
+#define DEV_TX_OFFLOAD_IP_TNL_TSO 0x00080000
+
+#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP 0x00000001
+/**< Device supports Rx queue setup after device started*/
+#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP 0x00000002
+/**< Device supports Tx queue setup after device started*/
/*
* If new Tx offload capabilities are defined, they also must be
* mentioned in rte_tx_offload_names in rte_ethdev.c file.
*/
-struct rte_pci_device;
+/*
+ * Fallback default preferred Rx/Tx port parameters.
+ * These are used if an application requests default parameters
+ * but the PMD does not provide preferred values.
+ */
+#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
+#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
+#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
+#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
+
+/**
+ * Preferred Rx/Tx port parameters.
+ * There are separate instances of this structure for transmission
+ * and reception respectively.
+ */
+struct rte_eth_dev_portconf {
+ uint16_t burst_size; /**< Device-preferred burst size */
+ uint16_t ring_size; /**< Device-preferred size of queue rings */
+ uint16_t nb_queues; /**< Device-preferred number of queues */
+};
+
+/**
+ * Default values for switch domain id when ethdev does not support switch
+ * domain definitions.
+ */
+#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (0)
+
+/**
+ * Ethernet device associated switch information
+ */
+struct rte_eth_switch_info {
+ const char *name; /**< switch name */
+ uint16_t domain_id; /**< switch domain id */
+ uint16_t port_id;
+ /**<
+ * mapping to the devices physical switch port as enumerated from the
+ * perspective of the embedded interconnect/switch. For SR-IOV enabled
+ * device this may correspond to the VF_ID of each virtual function,
+ * but each driver should explicitly define the mapping of switch
+ * port identifier to that physical interconnect/switch
+ */
+};
/**
* Ethernet device information
*/
struct rte_eth_dev_info {
- struct rte_pci_device *pci_dev; /**< Device PCI information. */
+ struct rte_device *device; /** Generic device information */
const char *driver_name; /**< Device Driver name. */
unsigned int if_index; /**< Index to bound host interface, or 0 if none.
Use if_indextoname() to translate into an interface name. */
+ const uint32_t *dev_flags; /**< Device flags */
uint32_t min_rx_bufsize; /**< Minimum size of RX buffer. */
uint32_t max_rx_pktlen; /**< Maximum configurable length of RX pkt. */
uint16_t max_rx_queues; /**< Maximum number of RX queues. */
@@ -1006,13 +1026,13 @@ struct rte_eth_dev_info {
uint16_t max_vfs; /**< Maximum number of VFs. */
uint16_t max_vmdq_pools; /**< Maximum number of VMDq pools. */
uint64_t rx_offload_capa;
- /**< Device per port RX offload capabilities. */
+ /**< All RX offload capabilities including all per-queue ones */
uint64_t tx_offload_capa;
- /**< Device per port TX offload capabilities. */
+ /**< All TX offload capabilities including all per-queue ones */
uint64_t rx_queue_offload_capa;
- /**< Device per queue RX offload capabilities. */
+ /**< Device per-queue RX offload capabilities. */
uint64_t tx_queue_offload_capa;
- /**< Device per queue TX offload capabilities. */
+ /**< Device per-queue TX offload capabilities. */
uint16_t reta_size;
/**< Device redirection table size, the total number of entries. */
uint8_t hash_key_size; /**< Hash key size in bytes */
@@ -1029,6 +1049,17 @@ struct rte_eth_dev_info {
/** Configured number of rx/tx queues */
uint16_t nb_rx_queues; /**< Number of RX queues. */
uint16_t nb_tx_queues; /**< Number of TX queues. */
+ /** Rx parameter recommendations */
+ struct rte_eth_dev_portconf default_rxportconf;
+ /** Tx parameter recommendations */
+ struct rte_eth_dev_portconf default_txportconf;
+ /** Generic device capabilities (RTE_ETH_DEV_CAPA_). */
+ uint64_t dev_capa;
+ /**
+ * Switching information for ports on a device with a
+ * embedded managed interconnect/switch.
+ */
+ struct rte_eth_switch_info switch_info;
};
/**
@@ -1117,21 +1148,19 @@ struct rte_eth_dcb_info {
#define RTE_ETH_QUEUE_STATE_STOPPED 0
#define RTE_ETH_QUEUE_STATE_STARTED 1
-struct rte_eth_dev;
-
#define RTE_ETH_ALL RTE_MAX_ETHPORTS
/* Macros to check for valid port */
#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
if (!rte_eth_dev_is_valid_port(port_id)) { \
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
return retval; \
} \
} while (0)
#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
if (!rte_eth_dev_is_valid_port(port_id)) { \
- RTE_PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); \
+ RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
return; \
} \
} while (0)
@@ -1199,12 +1228,16 @@ typedef uint16_t (*rte_tx_callback_fn)(uint16_t port_id, uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param);
/**
- * A set of values to describe the possible states of an eth device.
+ * Possible states of an ethdev port.
*/
enum rte_eth_dev_state {
+ /** Device is unused before being probed. */
RTE_ETH_DEV_UNUSED = 0,
+ /** Device is attached when allocated in probing. */
RTE_ETH_DEV_ATTACHED,
+ /** The deferred state is useless and replaced by ownership. */
RTE_ETH_DEV_DEFERRED,
+ /** Device is in removed state when plug-out is detected. */
RTE_ETH_DEV_REMOVED,
};
@@ -1233,11 +1266,10 @@ struct rte_eth_dev_owner {
#define RTE_ETH_DEV_BONDED_SLAVE 0x0004
/** Device supports device removal interrupt */
#define RTE_ETH_DEV_INTR_RMV 0x0008
+/** Device is port representor */
+#define RTE_ETH_DEV_REPRESENTOR 0x0010
/**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
* Iterates over valid ethdev ports owned by a specific owner.
*
* @param port_id
@@ -1248,7 +1280,7 @@ struct rte_eth_dev_owner {
* @return
* Next valid port id owned by owner_id, RTE_MAX_ETHPORTS if there is none.
*/
-uint64_t __rte_experimental rte_eth_find_next_owned_by(uint16_t port_id,
+uint64_t rte_eth_find_next_owned_by(uint16_t port_id,
const uint64_t owner_id);
/**
@@ -1362,9 +1394,32 @@ int __rte_experimental rte_eth_dev_owner_get(const uint16_t port_id,
* @return
* - The total number of usable Ethernet devices.
*/
+__rte_deprecated
uint16_t rte_eth_dev_count(void);
/**
+ * Get the number of ports which are usable for the application.
+ *
+ * These devices must be iterated by using the macro
+ * ``RTE_ETH_FOREACH_DEV`` or ``RTE_ETH_FOREACH_DEV_OWNED_BY``
+ * to deal with non-contiguous ranges of devices.
+ *
+ * @return
+ * The count of available Ethernet devices.
+ */
+uint16_t rte_eth_dev_count_avail(void);
+
+/**
+ * Get the total number of ports which are allocated.
+ *
+ * Some devices may not be available for the application.
+ *
+ * @return
+ * The total count of Ethernet devices.
+ */
+uint16_t __rte_experimental rte_eth_dev_count_total(void);
+
+/**
* Attach a new Ethernet device specified by arguments.
*
* @param devargs
@@ -1376,6 +1431,7 @@ uint16_t rte_eth_dev_count(void);
* @return
* 0 on success and port_id is filled, negative on error
*/
+__rte_deprecated
int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
/**
@@ -1391,6 +1447,7 @@ int rte_eth_dev_attach(const char *devargs, uint16_t *port_id);
* @return
* 0 on success and devname is filled, negative on error
*/
+__rte_deprecated
int rte_eth_dev_detach(uint16_t port_id, char *devname);
/**
@@ -1454,8 +1511,15 @@ const char * __rte_experimental rte_eth_dev_tx_offload_name(uint64_t offload);
* The Rx offload bitfield API is obsolete and will be deprecated.
* Applications should set the ignore_bitfield_offloads bit on *rxmode*
* structure and use offloads field to set per-port offloads instead.
- * - the Receive Side Scaling (RSS) configuration when using multiple RX
- * queues per port.
+ * - Any offloading set in eth_conf->[rt]xmode.offloads must be within
+ * the [rt]x_offload_capa returned from rte_eth_dev_infos_get().
+ * Any type of device supported offloading set in the input argument
+ * eth_conf->[rt]xmode.offloads to rte_eth_dev_configure() is enabled
+ * on all queues and it can't be disabled in rte_eth_[rt]x_queue_setup()
+ * - the Receive Side Scaling (RSS) configuration when using multiple RX
+ * queues per port. Any RSS hash function set in eth_conf->rss_conf.rss_hf
+ * must be within the flow_type_rss_offloads provided by drivers via
+ * rte_eth_dev_infos_get() API.
*
* Embedding all configuration information in a single data structure
* is the more flexible method that allows the addition of new features
@@ -1510,6 +1574,13 @@ rte_eth_dev_is_removed(uint16_t port_id);
* ring.
* In addition it contains the hardware offloads features to activate using
* the DEV_RX_OFFLOAD_* flags.
+ * If an offloading set in rx_conf->offloads
+ * hasn't been set in the input argument eth_conf->rxmode.offloads
+ * to rte_eth_dev_configure(), it is a new added offloading, it must be
+ * per-queue type and it is enabled for the queue.
+ * No need to repeat any bit in rx_conf->offloads which has already been
+ * enabled in rte_eth_dev_configure() at port level. An offloading enabled
+ * at port level can't be disabled at queue level.
* @param mb_pool
* The pointer to the memory pool from which to allocate *rte_mbuf* network
* memory buffers to populate each descriptor of the receive ring.
@@ -1561,14 +1632,14 @@ int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
* The *tx_rs_thresh* value should be less or equal then
* *tx_free_thresh* value, and both of them should be less then
* *nb_tx_desc* - 3.
- * - The *txq_flags* member contains flags to pass to the TX queue setup
- * function to configure the behavior of the TX queue. This should be set
- * to 0 if no special configuration is required.
- * This API is obsolete and will be deprecated. Applications
- * should set it to ETH_TXQ_FLAGS_IGNORE and use
- * the offloads field below.
* - The *offloads* member contains Tx offloads to be enabled.
- * Offloads which are not set cannot be used on the datapath.
+ * If an offloading set in tx_conf->offloads
+ * hasn't been set in the input argument eth_conf->txmode.offloads
+ * to rte_eth_dev_configure(), it is a new added offloading, it must be
+ * per-queue type and it is enabled for the queue.
+ * No need to repeat any bit in tx_conf->offloads which has already been
+ * enabled in rte_eth_dev_configure() at port level. An offloading enabled
+ * at port level can't be disabled at queue level.
*
* Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
* the transmit function to use default values.
@@ -1889,6 +1960,15 @@ int rte_eth_stats_reset(uint16_t port_id);
/**
* Retrieve names of extended statistics of an Ethernet device.
*
+ * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
+ * by array index:
+ * xstats_names[i].name => xstats[i].value
+ *
+ * And the array index is same with id field of 'struct rte_eth_xstat':
+ * xstats[i].id == i
+ *
+ * This assumption makes key-value pair matching less flexible but simpler.
+ *
* @param port_id
* The port identifier of the Ethernet device.
* @param xstats_names
@@ -1913,13 +1993,20 @@ int rte_eth_xstats_get_names(uint16_t port_id,
/**
* Retrieve extended statistics of an Ethernet device.
*
+ * There is an assumption that 'xstat_names' and 'xstats' arrays are matched
+ * by array index:
+ * xstats_names[i].name => xstats[i].value
+ *
+ * And the array index is same with id field of 'struct rte_eth_xstat':
+ * xstats[i].id == i
+ *
+ * This assumption makes key-value pair matching less flexible but simpler.
+ *
* @param port_id
* The port identifier of the Ethernet device.
* @param xstats
* A pointer to a table of structure of type *rte_eth_xstat*
- * to be filled with device statistics ids and values: id is the
- * index of the name string in xstats_names (see rte_eth_xstats_get_names()),
- * and value is the statistic counter.
+ * to be filled with device statistics ids and values.
* This parameter can be set to NULL if n is 0.
* @param n
* The size of the xstats array (number of elements).
@@ -2030,7 +2117,7 @@ void rte_eth_xstats_reset(uint16_t port_id);
* @param stat_idx
* The per-queue packet statistics functionality number that the transmit
* queue is to be assigned.
- * The value must be in the range [0, RTE_MAX_ETHPORT_QUEUE_STATS_MAPS - 1].
+ * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
* @return
* Zero if successful. Non-zero otherwise.
*/
@@ -2050,7 +2137,7 @@ int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id,
* @param stat_idx
* The per-queue packet statistics functionality number that the receive
* queue is to be assigned.
- * The value must be in the range [0, RTE_MAX_ETHPORT_QUEUE_STATS_MAPS - 1].
+ * The value must be in the range [0, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1].
* @return
* Zero if successful. Non-zero otherwise.
*/
@@ -2438,6 +2525,46 @@ int
rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt);
/**
+ * Subtypes for IPsec offload event(@ref RTE_ETH_EVENT_IPSEC) raised by
+ * eth device.
+ */
+enum rte_eth_event_ipsec_subtype {
+ RTE_ETH_EVENT_IPSEC_UNKNOWN = 0,
+ /**< Unknown event type */
+ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW,
+ /**< Sequence number overflow */
+ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY,
+ /**< Soft time expiry of SA */
+ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY,
+ /**< Soft byte expiry of SA */
+ RTE_ETH_EVENT_IPSEC_MAX
+ /**< Max value of this enum */
+};
+
+/**
+ * Descriptor for @ref RTE_ETH_EVENT_IPSEC event. Used by eth dev to send extra
+ * information of the IPsec offload event.
+ */
+struct rte_eth_event_ipsec_desc {
+ enum rte_eth_event_ipsec_subtype subtype;
+ /**< Type of RTE_ETH_EVENT_IPSEC_* event */
+ uint64_t metadata;
+ /**< Event specific metadata
+ *
+ * For the following events, *userdata* registered
+ * with the *rte_security_session* would be returned
+ * as metadata,
+ *
+ * - @ref RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
+ * - @ref RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
+ * - @ref RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
+ *
+ * @see struct rte_security_session_conf
+ *
+ */
+};
+
+/**
* The eth device event type for interrupt, and maybe others in the future.
*/
enum rte_eth_event_type {
@@ -2452,6 +2579,7 @@ enum rte_eth_event_type {
RTE_ETH_EVENT_INTR_RMV, /**< device removal event */
RTE_ETH_EVENT_NEW, /**< port is probed */
RTE_ETH_EVENT_DESTROY, /**< port is released */
+ RTE_ETH_EVENT_IPSEC, /**< IPsec offload related event */
RTE_ETH_EVENT_MAX /**< max value of this enum */
};
@@ -3004,6 +3132,8 @@ int rte_eth_dev_filter_ctrl(uint16_t port_id, enum rte_filter_type filter_type,
int rte_eth_dev_get_dcb_info(uint16_t port_id,
struct rte_eth_dcb_info *dcb_info);
+struct rte_eth_rxtx_callback;
+
/**
* Add a callback to be called on packet RX on a given port and queue.
*
@@ -3028,7 +3158,8 @@ int rte_eth_dev_get_dcb_info(uint16_t port_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
+const struct rte_eth_rxtx_callback *
+rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
@@ -3056,7 +3187,8 @@ void *rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
+const struct rte_eth_rxtx_callback *
+rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
rte_rx_callback_fn fn, void *user_param);
/**
@@ -3083,11 +3215,10 @@ void *rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id,
* NULL on error.
* On success, a pointer value which can later be used to remove the callback.
*/
-void *rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
+const struct rte_eth_rxtx_callback *
+rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
rte_tx_callback_fn fn, void *user_param);
-struct rte_eth_rxtx_callback;
-
/**
* Remove an RX packet callback from a given port and queue.
*
@@ -3119,7 +3250,7 @@ struct rte_eth_rxtx_callback;
* is NULL or not found for the port/queue.
*/
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb);
+ const struct rte_eth_rxtx_callback *user_cb);
/**
* Remove a TX packet callback from a given port and queue.
@@ -3152,7 +3283,7 @@ int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id,
* is NULL or not found for the port/queue.
*/
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id,
- struct rte_eth_rxtx_callback *user_cb);
+ const struct rte_eth_rxtx_callback *user_cb);
/**
* Retrieve information about given port's RX queue.
@@ -3262,6 +3393,49 @@ int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info);
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the type and size of plugin module EEPROM
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param modinfo
+ * The type and size of plugin module EEPROM.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
+ * - others depends on the specific operations implementation.
+ */
+int __rte_experimental
+rte_eth_dev_get_module_info(uint16_t port_id,
+ struct rte_eth_dev_module_info *modinfo);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Retrieve the data of plugin module EEPROM
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param info
+ * The template includes the plugin module EEPROM attributes, and the
+ * buffer for return plugin module EEPROM data.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EIO) if device is removed.
+ * - others depends on the specific operations implementation.
+ */
+int __rte_experimental
+rte_eth_dev_get_module_eeprom(uint16_t port_id,
+ struct rte_dev_eeprom_info *info);
+
+/**
* Set the list of multicast addresses to filter on an Ethernet device.
*
* @param port_id
@@ -3455,8 +3629,11 @@ rte_eth_dev_l2_tunnel_offload_set(uint16_t port_id,
uint8_t en);
/**
-* Get the port id from pci address or device name
-* Ex: 0000:2:00.0 or vdev name net_pcap0
+* Get the port id from device name. The device name should be specified
+* as below:
+* - PCIe address (Domain:Bus:Device.Function), for example- 0000:2:00.0
+* - SoC device name, for example- fsl-gmac0
+* - vdev dpdk name, for example- net_[pcap0|null0|tap0]
*
* @param name
* pci address or name of the device
@@ -3470,12 +3647,15 @@ int
rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id);
/**
-* Get the device name from port id
+* Get the device name from port id. The device name is specified as below:
+* - PCIe address (Domain:Bus:Device.Function), for example- 0000:02:00.0
+* - SoC device name, for example- fsl-gmac0
+* - vdev dpdk name, for example- net_[pcap0|null0|tun0|tap0]
*
* @param port_id
-* pointer to port identifier of the device
+* Port identifier of the device.
* @param name
-* pci address or name of the device
+* Buffer of size RTE_ETH_NAME_MAX_LEN to store the name.
* @return
* - (0) if successful.
* - (-EINVAL) on failure.
@@ -3530,7 +3710,7 @@ rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool);
* - pointer to security context on success.
*/
void *
-rte_eth_dev_get_sec_ctx(uint8_t port_id);
+rte_eth_dev_get_sec_ctx(uint16_t port_id);
#include <rte_ethdev_core.h>
@@ -3622,23 +3802,25 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ uint16_t nb_rx;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, 0);
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
return 0;
}
#endif
- int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
- rx_pkts, nb_pkts);
+ nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+ rx_pkts, nb_pkts);
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+ if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
+ struct rte_eth_rxtx_callback *cb =
+ dev->post_rx_burst_cbs[queue_id];
- if (unlikely(cb != NULL)) {
do {
nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
nb_pkts, cb->param);
@@ -3673,7 +3855,7 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
if (queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
/**
@@ -3860,7 +4042,10 @@ static inline int rte_eth_tx_descriptor_status(uint16_t port_id,
*
* If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
* invoke this function concurrently on the same tx queue without SW lock.
- * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
+ * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
+ *
+ * @see rte_eth_tx_prepare to perform some prior checks or adjustments
+ * for offloads.
*
* @param port_id
* The port identifier of the Ethernet device.
@@ -3890,7 +4075,7 @@ rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id,
RTE_FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, 0);
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
return 0;
}
#endif
@@ -3976,7 +4161,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
if (!rte_eth_dev_is_valid_port(port_id)) {
- RTE_PMD_DEBUG_TRACE("Invalid TX port_id=%d\n", port_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX port_id=%u\n", port_id);
rte_errno = -EINVAL;
return 0;
}
@@ -3986,7 +4171,7 @@ rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id,
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
rte_errno = -EINVAL;
return 0;
}
@@ -4058,8 +4243,9 @@ rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
/* All packets sent, or to be dealt with by callback below */
if (unlikely(sent != to_send))
- buffer->error_callback(&buffer->pkts[sent], to_send - sent,
- buffer->error_userdata);
+ buffer->error_callback(&buffer->pkts[sent],
+ (uint16_t)(to_send - sent),
+ buffer->error_userdata);
return sent;
}
diff --git a/lib/librte_ether/rte_ethdev_core.h b/lib/librte_ethdev/rte_ethdev_core.h
index e5681e46..33d12b3a 100644
--- a/lib/librte_ether/rte_ethdev_core.h
+++ b/lib/librte_ethdev/rte_ethdev_core.h
@@ -255,7 +255,7 @@ typedef int (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
uint32_t vmdq);
/**< @internal Set a MAC address into Receive Address Address Register */
-typedef void (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
+typedef int (*eth_mac_addr_set_t)(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
/**< @internal Set a MAC address into Receive Address Address Register */
@@ -337,6 +337,14 @@ typedef int (*eth_set_eeprom_t)(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *info);
/**< @internal Program eeprom data */
+typedef int (*eth_get_module_info_t)(struct rte_eth_dev *dev,
+ struct rte_eth_dev_module_info *modinfo);
+/**< @internal Retrieve type and size of plugin module eeprom */
+
+typedef int (*eth_get_module_eeprom_t)(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *info);
+/**< @internal Retrieve plugin module eeprom data */
+
typedef int (*eth_l2_tunnel_eth_type_conf_t)
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
/**< @internal config l2 tunnel ether type */
@@ -467,6 +475,10 @@ struct eth_dev_ops {
eth_get_eeprom_t get_eeprom; /**< Get eeprom data. */
eth_set_eeprom_t set_eeprom; /**< Set eeprom. */
+ eth_get_module_info_t get_module_info;
+ /** Get plugin module eeprom attribute. */
+ eth_get_module_eeprom_t get_module_eeprom;
+ /** Get plugin module eeprom data. */
eth_filter_ctrl_t filter_ctrl; /**< common filter control. */
diff --git a/lib/librte_ethdev/rte_ethdev_driver.h b/lib/librte_ethdev/rte_ethdev_driver.h
new file mode 100644
index 00000000..c6d9bc1a
--- /dev/null
+++ b/lib/librte_ethdev/rte_ethdev_driver.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_ETHDEV_DRIVER_H_
+#define _RTE_ETHDEV_DRIVER_H_
+
+/**
+ * @file
+ *
+ * RTE Ethernet Device PMD API
+ *
+ * These APIs for the use from Ethernet drivers, user applications shouldn't
+ * use them.
+ *
+ */
+
+#include <rte_ethdev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal
+ * Returns a ethdev slot specified by the unique identifier name.
+ *
+ * @param name
+ * The pointer to the Unique identifier name for each Ethernet device
+ * @return
+ * - The pointer to the ethdev slot, on success. NULL on error
+ */
+struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
+
+/**
+ * @internal
+ * Allocates a new ethdev slot for an ethernet device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name Unique identifier name for each Ethernet device
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
+
+/**
+ * @internal
+ * Attach to the ethdev already initialized by the primary
+ * process.
+ *
+ * @param name Ethernet device's name.
+ * @return
+ * - Success: Slot in the rte_dev_devices array for attached
+ * device.
+ * - Error: Null pointer.
+ */
+struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
+
+/**
+ * @internal
+ * Release the specified ethdev port.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
+
+/**
+ * @internal
+ * Release device queues and clear its configuration to force the user
+ * application to reconfigure it. It is for internal use only.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * void
+ */
+void _rte_eth_dev_reset(struct rte_eth_dev *dev);
+
+/**
+ * @internal Executes all the user application registered callbacks for
+ * the specific device. It is for DPDK internal user only. User
+ * application should not call it directly.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param event
+ * Eth device interrupt event type.
+ * @param ret_param
+ * To pass data back to user application.
+ * This allows the user application to decide if a particular function
+ * is permitted or not.
+ *
+ * @return
+ * int
+ */
+int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event, void *ret_param);
+
+/**
+ * @internal
+ * This is the last step of device probing.
+ * It must be called after a port is allocated and initialized successfully.
+ *
+ * The notification RTE_ETH_EVENT_NEW is sent to other entities
+ * (libraries and applications).
+ * The state is set as RTE_ETH_DEV_ATTACHED.
+ *
+ * @param dev
+ * New ethdev port.
+ */
+void rte_eth_dev_probing_finish(struct rte_eth_dev *dev);
+
+/**
+ * Create memzone for HW rings.
+ * malloc can't be used as the physical address is needed.
+ * If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ *
+ * @param eth_dev
+ * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ * @param name
+ * The name of the memory zone
+ * @param queue_id
+ * The index of the queue to add to name
+ * @param size
+ * The sizeof of the memory area
+ * @param align
+ * Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of NUMA.
+ */
+const struct rte_memzone *
+rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
+ uint16_t queue_id, size_t size,
+ unsigned align, int socket_id);
+
+/**
+ * @internal
+ * Atomically set the link status for the specific device.
+ * It is for use by DPDK device driver use only.
+ * User applications should not call it
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param link
+ * New link status value.
+ * @return
+ * Same convention as eth_link_update operation.
+ * 0 if link up status has changed
+ * -1 if link up status was unchanged
+ */
+static inline int
+rte_eth_linkstatus_set(struct rte_eth_dev *dev,
+ const struct rte_eth_link *new_link)
+{
+ volatile uint64_t *dev_link
+ = (volatile uint64_t *)&(dev->data->dev_link);
+ union {
+ uint64_t val64;
+ struct rte_eth_link link;
+ } orig;
+
+ RTE_BUILD_BUG_ON(sizeof(*new_link) != sizeof(uint64_t));
+
+ orig.val64 = rte_atomic64_exchange(dev_link,
+ *(const uint64_t *)new_link);
+
+ return (orig.link.link_status == new_link->link_status) ? -1 : 0;
+}
+
+/**
+ * @internal
+ * Atomically get the link speed and status.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ * @param link
+ * link status value.
+ */
+static inline void
+rte_eth_linkstatus_get(const struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ volatile uint64_t *src = (uint64_t *)&(dev->data->dev_link);
+ uint64_t *dst = (uint64_t *)link;
+
+ RTE_BUILD_BUG_ON(sizeof(*link) != sizeof(uint64_t));
+
+#ifdef __LP64__
+ /* if cpu arch has 64 bit unsigned lon then implicitly atomic */
+ *dst = *src;
+#else
+ /* can't use rte_atomic64_read because it returns signed int */
+ do {
+ *dst = *src;
+ } while (!rte_atomic64_cmpset(src, *dst, *dst));
+#endif
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Allocate an unique switch domain identifier.
+ *
+ * A pool of switch domain identifiers which can be allocated on request. This
+ * will enabled devices which support the concept of switch domains to request
+ * a switch domain id which is guaranteed to be unique from other devices
+ * running in the same process.
+ *
+ * @param domain_id
+ * switch domain identifier parameter to pass back to application
+ *
+ * @return
+ * Negative errno value on error, 0 on success.
+ */
+int __rte_experimental
+rte_eth_switch_domain_alloc(uint16_t *domain_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Free switch domain.
+ *
+ * Return a switch domain identifier to the pool of free identifiers after it is
+ * no longer in use by device.
+ *
+ * @param domain_id
+ * switch domain identifier to free
+ *
+ * @return
+ * Negative errno value on error, 0 on success.
+ */
+int __rte_experimental
+rte_eth_switch_domain_free(uint16_t domain_id);
+
+/** Generic Ethernet device arguments */
+struct rte_eth_devargs {
+ uint16_t ports[RTE_MAX_ETHPORTS];
+ /** port/s number to enable on a multi-port single function */
+ uint16_t nb_ports;
+ /** number of ports in ports field */
+ uint16_t representor_ports[RTE_MAX_ETHPORTS];
+ /** representor port/s identifier to enable on device */
+ uint16_t nb_representor_ports;
+ /** number of ports in representor port field */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * PMD helper function to parse ethdev arguments
+ *
+ * @param devargs
+ * device arguments
+ * @param eth_devargs
+ * parsed ethdev specific arguments.
+ *
+ * @return
+ * Negative errno value on error, 0 on success.
+ */
+int __rte_experimental
+rte_eth_devargs_parse(const char *devargs, struct rte_eth_devargs *eth_devargs);
+
+
+typedef int (*ethdev_init_t)(struct rte_eth_dev *ethdev, void *init_params);
+typedef int (*ethdev_bus_specific_init)(struct rte_eth_dev *ethdev,
+ void *bus_specific_init_params);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * PMD helper function for the creation of a new ethdev ports.
+ *
+ * @param device
+ * rte_device handle.
+ * @param name
+ * port name.
+ * @param priv_data_size
+ * size of private data required for port.
+ * @param bus_specific_init
+ * port bus specific initialisation callback function
+ * @param bus_init_params
+ * port bus specific initialisation parameters
+ * @param ethdev_init
+ * device specific port initialization callback function
+ * @param init_params
+ * port initialisation parameters
+ *
+ * @return
+ * Negative errno value on error, 0 on success.
+ */
+int __rte_experimental
+rte_eth_dev_create(struct rte_device *device, const char *name,
+ size_t priv_data_size,
+ ethdev_bus_specific_init bus_specific_init, void *bus_init_params,
+ ethdev_init_t ethdev_init, void *init_params);
+
+
+typedef int (*ethdev_uninit_t)(struct rte_eth_dev *ethdev);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * PMD helper function for cleaing up the resources of a ethdev port on it's
+ * destruction.
+ *
+ * @param ethdev
+ * ethdev handle of port.
+ * @param ethdev_uninit
+ * device specific port un-initialise callback function
+ *
+ * @return
+ * Negative errno value on error, 0 on success.
+ */
+int __rte_experimental
+rte_eth_dev_destroy(struct rte_eth_dev *ethdev, ethdev_uninit_t ethdev_uninit);
+
+/**
+ * PMD helper function to check if keeping CRC is requested
+ *
+ * @note
+ * When CRC_STRIP offload flag is removed and default behavior switch to
+ * strip CRC, as planned, this helper function is not that useful and will be
+ * removed. In PMDs this function will be replaced with check:
+ * if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ *
+ * @param rx_offloads
+ * offload bits to be applied
+ *
+ * @return
+ * Return positive if keeping CRC is requested,
+ * zero if stripping CRC is requested
+ */
+static inline int
+rte_eth_dev_must_keep_crc(uint64_t rx_offloads)
+{
+ if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
+ return 0;
+
+ /* no KEEP_CRC or CRC_STRIP offload flags means keep CRC */
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHDEV_DRIVER_H_ */
diff --git a/lib/librte_ether/rte_ethdev_pci.h b/lib/librte_ethdev/rte_ethdev_pci.h
index 897ce5b4..f652596f 100644
--- a/lib/librte_ether/rte_ethdev_pci.h
+++ b/lib/librte_ethdev/rte_ethdev_pci.h
@@ -53,8 +53,8 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
struct rte_pci_device *pci_dev)
{
if ((eth_dev == NULL) || (pci_dev == NULL)) {
- RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
- eth_dev, pci_dev);
+ RTE_ETHDEV_LOG(ERR, "NULL pointer eth_dev=%p pci_dev=%p",
+ (void *)eth_dev, (void *)pci_dev);
return;
}
@@ -70,6 +70,18 @@ rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev,
eth_dev->data->numa_node = pci_dev->device.numa_node;
}
+static inline int
+eth_dev_pci_specific_init(struct rte_eth_dev *eth_dev, void *bus_device) {
+ struct rte_pci_device *pci_dev = bus_device;
+
+ if (!pci_dev)
+ return -ENODEV;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ return 0;
+}
+
/**
* @internal
* Allocates a new ethdev slot for an ethernet device and returns the pointer
@@ -123,9 +135,6 @@ rte_eth_dev_pci_allocate(struct rte_pci_device *dev, size_t private_data_size)
static inline void
rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
{
- /* free ether device */
- rte_eth_dev_release_port(eth_dev);
-
if (rte_eal_process_type() == RTE_PROC_PRIMARY)
rte_free(eth_dev->data->dev_private);
@@ -139,6 +148,9 @@ rte_eth_dev_pci_release(struct rte_eth_dev *eth_dev)
eth_dev->device = NULL;
eth_dev->intr_handle = NULL;
+
+ /* free ether device */
+ rte_eth_dev_release_port(eth_dev);
}
typedef int (*eth_dev_pci_callback_t)(struct rte_eth_dev *eth_dev);
@@ -163,6 +175,8 @@ rte_eth_dev_pci_generic_probe(struct rte_pci_device *pci_dev,
ret = dev_init(eth_dev);
if (ret)
rte_eth_dev_pci_release(eth_dev);
+ else
+ rte_eth_dev_probing_finish(eth_dev);
return ret;
}
diff --git a/lib/librte_ether/rte_ethdev_vdev.h b/lib/librte_ethdev/rte_ethdev_vdev.h
index 259feda3..259feda3 100644
--- a/lib/librte_ether/rte_ethdev_vdev.h
+++ b/lib/librte_ethdev/rte_ethdev_vdev.h
diff --git a/lib/librte_ether/rte_ethdev_version.map b/lib/librte_ethdev/rte_ethdev_version.map
index 87f02fb7..38f117f0 100644
--- a/lib/librte_ether/rte_ethdev_version.map
+++ b/lib/librte_ethdev/rte_ethdev_version.map
@@ -16,7 +16,6 @@ DPDK_2.2 {
rte_eth_dev_count;
rte_eth_dev_default_mac_addr_set;
rte_eth_dev_detach;
- rte_eth_dev_filter_ctrl;
rte_eth_dev_filter_supported;
rte_eth_dev_flow_ctrl_get;
rte_eth_dev_flow_ctrl_set;
@@ -128,11 +127,6 @@ DPDK_17.02 {
_rte_eth_dev_reset;
rte_eth_dev_fw_version_get;
- rte_flow_create;
- rte_flow_destroy;
- rte_flow_flush;
- rte_flow_query;
- rte_flow_validate;
} DPDK_16.07;
@@ -153,9 +147,8 @@ DPDK_17.08 {
_rte_eth_dev_callback_process;
rte_eth_dev_adjust_nb_rx_tx_desc;
- rte_flow_copy;
- rte_flow_isolate;
rte_tm_capabilities_get;
+ rte_tm_get_number_of_leaf_nodes;
rte_tm_hierarchy_commit;
rte_tm_level_capabilities_get;
rte_tm_mark_ip_dscp;
@@ -193,7 +186,6 @@ DPDK_17.11 {
rte_eth_dev_get_sec_ctx;
rte_eth_dev_pool_ops_supported;
rte_eth_dev_reset;
- rte_flow_error_set;
} DPDK_17.08;
@@ -204,9 +196,39 @@ DPDK_18.02 {
} DPDK_17.11;
+DPDK_18.05 {
+ global:
+
+ rte_eth_dev_count_avail;
+ rte_eth_dev_probing_finish;
+ rte_eth_find_next_owned_by;
+ rte_flow_copy;
+ rte_flow_create;
+ rte_flow_destroy;
+ rte_flow_error_set;
+ rte_flow_flush;
+ rte_flow_isolate;
+ rte_flow_query;
+ rte_flow_validate;
+
+} DPDK_18.02;
+
+DPDK_18.08 {
+ global:
+
+ rte_eth_dev_logtype;
+
+} DPDK_18.05;
+
EXPERIMENTAL {
global:
+ rte_eth_devargs_parse;
+ rte_eth_dev_count_total;
+ rte_eth_dev_create;
+ rte_eth_dev_destroy;
+ rte_eth_dev_get_module_eeprom;
+ rte_eth_dev_get_module_info;
rte_eth_dev_is_removed;
rte_eth_dev_owner_delete;
rte_eth_dev_owner_get;
@@ -215,7 +237,9 @@ EXPERIMENTAL {
rte_eth_dev_owner_unset;
rte_eth_dev_rx_offload_name;
rte_eth_dev_tx_offload_name;
- rte_eth_find_next_owned_by;
+ rte_eth_switch_domain_alloc;
+ rte_eth_switch_domain_free;
+ rte_flow_expand_rss;
rte_mtr_capabilities_get;
rte_mtr_create;
rte_mtr_destroy;
@@ -228,5 +252,4 @@ EXPERIMENTAL {
rte_mtr_policer_actions_update;
rte_mtr_stats_read;
rte_mtr_stats_update;
-
-} DPDK_17.11;
+};
diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ethdev/rte_flow.c
index 38f2d27b..cff4b520 100644
--- a/lib/librte_ether/rte_flow.c
+++ b/lib/librte_ethdev/rte_flow.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#include <errno.h>
@@ -38,8 +38,9 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = {
MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
MK_FLOW_ITEM(PF, 0),
MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
- MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
- MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
+ MK_FLOW_ITEM(PHY_PORT, sizeof(struct rte_flow_item_phy_port)),
+ MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
+ MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
@@ -54,6 +55,17 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = {
MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
+ MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
+ MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
+ MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
+ MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
+ MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
+ MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
+ MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
+ sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
};
/** Generate flow_action[] entry. */
@@ -72,11 +84,31 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(FLAG, 0),
MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
MK_FLOW_ACTION(DROP, 0),
- MK_FLOW_ACTION(COUNT, 0),
- MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
- MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
+ MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
+ MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
MK_FLOW_ACTION(PF, 0),
MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
+ MK_FLOW_ACTION(PHY_PORT, sizeof(struct rte_flow_action_phy_port)),
+ MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
+ MK_FLOW_ACTION(OF_SET_MPLS_TTL,
+ sizeof(struct rte_flow_action_of_set_mpls_ttl)),
+ MK_FLOW_ACTION(OF_DEC_MPLS_TTL, 0),
+ MK_FLOW_ACTION(OF_SET_NW_TTL,
+ sizeof(struct rte_flow_action_of_set_nw_ttl)),
+ MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_OUT, 0),
+ MK_FLOW_ACTION(OF_COPY_TTL_IN, 0),
+ MK_FLOW_ACTION(OF_POP_VLAN, 0),
+ MK_FLOW_ACTION(OF_PUSH_VLAN,
+ sizeof(struct rte_flow_action_of_push_vlan)),
+ MK_FLOW_ACTION(OF_SET_VLAN_VID,
+ sizeof(struct rte_flow_action_of_set_vlan_vid)),
+ MK_FLOW_ACTION(OF_SET_VLAN_PCP,
+ sizeof(struct rte_flow_action_of_set_vlan_pcp)),
+ MK_FLOW_ACTION(OF_POP_MPLS,
+ sizeof(struct rte_flow_action_of_pop_mpls)),
+ MK_FLOW_ACTION(OF_PUSH_MPLS,
+ sizeof(struct rte_flow_action_of_push_mpls)),
};
static int
@@ -201,7 +233,7 @@ rte_flow_flush(uint16_t port_id,
int
rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
- enum rte_flow_action_type action,
+ const struct rte_flow_action *action,
void *data,
struct rte_flow_error *error)
{
@@ -255,60 +287,136 @@ rte_flow_error_set(struct rte_flow_error *error,
return -code;
}
-/** Compute storage space needed by item specification. */
-static void
-flow_item_spec_size(const struct rte_flow_item *item,
- size_t *size, size_t *pad)
+/** Pattern item specification types. */
+enum item_spec_type {
+ ITEM_SPEC,
+ ITEM_LAST,
+ ITEM_MASK,
+};
+
+/** Compute storage space needed by item specification and copy it. */
+static size_t
+flow_item_spec_copy(void *buf, const struct rte_flow_item *item,
+ enum item_spec_type type)
{
- if (!item->spec) {
- *size = 0;
+ size_t size = 0;
+ const void *data =
+ type == ITEM_SPEC ? item->spec :
+ type == ITEM_LAST ? item->last :
+ type == ITEM_MASK ? item->mask :
+ NULL;
+
+ if (!item->spec || !data)
goto empty;
- }
switch (item->type) {
union {
const struct rte_flow_item_raw *raw;
} spec;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } last;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } mask;
+ union {
+ const struct rte_flow_item_raw *raw;
+ } src;
+ union {
+ struct rte_flow_item_raw *raw;
+ } dst;
+ size_t off;
- /* Not a fall-through */
case RTE_FLOW_ITEM_TYPE_RAW:
spec.raw = item->spec;
- *size = offsetof(struct rte_flow_item_raw, pattern) +
- spec.raw->length * sizeof(*spec.raw->pattern);
+ last.raw = item->last ? item->last : item->spec;
+ mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
+ src.raw = data;
+ dst.raw = buf;
+ off = RTE_ALIGN_CEIL(sizeof(struct rte_flow_item_raw),
+ sizeof(*src.raw->pattern));
+ if (type == ITEM_SPEC ||
+ (type == ITEM_MASK &&
+ ((spec.raw->length & mask.raw->length) >=
+ (last.raw->length & mask.raw->length))))
+ size = spec.raw->length & mask.raw->length;
+ else
+ size = last.raw->length & mask.raw->length;
+ size = off + size * sizeof(*src.raw->pattern);
+ if (dst.raw) {
+ memcpy(dst.raw, src.raw, sizeof(*src.raw));
+ dst.raw->pattern = memcpy((uint8_t *)dst.raw + off,
+ src.raw->pattern,
+ size - off);
+ }
break;
default:
- *size = rte_flow_desc_item[item->type].size;
+ size = rte_flow_desc_item[item->type].size;
+ if (buf)
+ memcpy(buf, data, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
-/** Compute storage space needed by action configuration. */
-static void
-flow_action_conf_size(const struct rte_flow_action *action,
- size_t *size, size_t *pad)
+/** Compute storage space needed by action configuration and copy it. */
+static size_t
+flow_action_conf_copy(void *buf, const struct rte_flow_action *action)
{
- if (!action->conf) {
- *size = 0;
+ size_t size = 0;
+
+ if (!action->conf)
goto empty;
- }
switch (action->type) {
union {
const struct rte_flow_action_rss *rss;
- } conf;
+ } src;
+ union {
+ struct rte_flow_action_rss *rss;
+ } dst;
+ size_t off;
- /* Not a fall-through. */
case RTE_FLOW_ACTION_TYPE_RSS:
- conf.rss = action->conf;
- *size = offsetof(struct rte_flow_action_rss, queue) +
- conf.rss->num * sizeof(*conf.rss->queue);
+ src.rss = action->conf;
+ dst.rss = buf;
+ off = 0;
+ if (dst.rss)
+ *dst.rss = (struct rte_flow_action_rss){
+ .func = src.rss->func,
+ .level = src.rss->level,
+ .types = src.rss->types,
+ .key_len = src.rss->key_len,
+ .queue_num = src.rss->queue_num,
+ };
+ off += sizeof(*src.rss);
+ if (src.rss->key_len) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->key) * src.rss->key_len;
+ if (dst.rss)
+ dst.rss->key = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->key, size);
+ off += size;
+ }
+ if (src.rss->queue_num) {
+ off = RTE_ALIGN_CEIL(off, sizeof(double));
+ size = sizeof(*src.rss->queue) * src.rss->queue_num;
+ if (dst.rss)
+ dst.rss->queue = memcpy
+ ((void *)((uintptr_t)dst.rss + off),
+ src.rss->queue, size);
+ off += size;
+ }
+ size = off;
break;
default:
- *size = rte_flow_desc_action[action->type].size;
+ size = rte_flow_desc_action[action->type].size;
+ if (buf)
+ memcpy(buf, action->conf, size);
break;
}
empty:
- *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
+ return RTE_ALIGN_CEIL(size, sizeof(double));
}
/** Store a full rte_flow description. */
@@ -320,7 +428,6 @@ rte_flow_copy(struct rte_flow_desc *desc, size_t len,
{
struct rte_flow_desc *fd = NULL;
size_t tmp;
- size_t pad;
size_t off1 = 0;
size_t off2 = 0;
size_t size = 0;
@@ -345,24 +452,26 @@ store:
dst = memcpy(fd->data + off1, item,
sizeof(*item));
off1 += sizeof(*item);
- flow_item_spec_size(item, &tmp, &pad);
if (item->spec) {
if (fd)
- dst->spec = memcpy(fd->data + off2,
- item->spec, tmp);
- off2 += tmp + pad;
+ dst->spec = fd->data + off2;
+ off2 += flow_item_spec_copy
+ (fd ? fd->data + off2 : NULL, item,
+ ITEM_SPEC);
}
if (item->last) {
if (fd)
- dst->last = memcpy(fd->data + off2,
- item->last, tmp);
- off2 += tmp + pad;
+ dst->last = fd->data + off2;
+ off2 += flow_item_spec_copy
+ (fd ? fd->data + off2 : NULL, item,
+ ITEM_LAST);
}
if (item->mask) {
if (fd)
- dst->mask = memcpy(fd->data + off2,
- item->mask, tmp);
- off2 += tmp + pad;
+ dst->mask = fd->data + off2;
+ off2 += flow_item_spec_copy
+ (fd ? fd->data + off2 : NULL, item,
+ ITEM_MASK);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
@@ -387,12 +496,11 @@ store:
dst = memcpy(fd->data + off1, action,
sizeof(*action));
off1 += sizeof(*action);
- flow_action_conf_size(action, &tmp, &pad);
if (action->conf) {
if (fd)
- dst->conf = memcpy(fd->data + off2,
- action->conf, tmp);
- off2 += tmp + pad;
+ dst->conf = fd->data + off2;
+ off2 += flow_action_conf_copy
+ (fd ? fd->data + off2 : NULL, action);
}
off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
} while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
@@ -418,3 +526,110 @@ store:
}
return 0;
}
+
+/**
+ * Expand RSS flows into several possible flows according to the RSS hash
+ * fields requested and the driver capabilities.
+ */
+int __rte_experimental
+rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+ const struct rte_flow_item *pattern, uint64_t types,
+ const struct rte_flow_expand_node graph[],
+ int graph_root_index)
+{
+ const int elt_n = 8;
+ const struct rte_flow_item *item;
+ const struct rte_flow_expand_node *node = &graph[graph_root_index];
+ const int *next_node;
+ const int *stack[elt_n];
+ int stack_pos = 0;
+ struct rte_flow_item flow_items[elt_n];
+ unsigned int i;
+ size_t lsize;
+ size_t user_pattern_size = 0;
+ void *addr = NULL;
+
+ lsize = offsetof(struct rte_flow_expand_rss, entry) +
+ elt_n * sizeof(buf->entry[0]);
+ if (lsize <= size) {
+ buf->entry[0].priority = 0;
+ buf->entry[0].pattern = (void *)&buf->entry[elt_n];
+ buf->entries = 0;
+ addr = buf->entry[0].pattern;
+ }
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ const struct rte_flow_expand_node *next = NULL;
+
+ for (i = 0; node->next && node->next[i]; ++i) {
+ next = &graph[node->next[i]];
+ if (next->type == item->type)
+ break;
+ }
+ if (next)
+ node = next;
+ user_pattern_size += sizeof(*item);
+ }
+ user_pattern_size += sizeof(*item); /* Handle END item. */
+ lsize += user_pattern_size;
+ /* Copy the user pattern in the first entry of the buffer. */
+ if (lsize <= size) {
+ rte_memcpy(addr, pattern, user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) + user_pattern_size);
+ buf->entries = 1;
+ }
+ /* Start expanding. */
+ memset(flow_items, 0, sizeof(flow_items));
+ user_pattern_size -= sizeof(*item);
+ next_node = node->next;
+ stack[stack_pos] = next_node;
+ node = next_node ? &graph[*next_node] : NULL;
+ while (node) {
+ flow_items[stack_pos].type = node->type;
+ if (node->rss_types & types) {
+ /*
+ * compute the number of items to copy from the
+ * expansion and copy it.
+ * When the stack_pos is 0, there are 1 element in it,
+ * plus the addition END item.
+ */
+ int elt = stack_pos + 2;
+
+ flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
+ lsize += elt * sizeof(*item) + user_pattern_size;
+ if (lsize <= size) {
+ size_t n = elt * sizeof(*item);
+
+ buf->entry[buf->entries].priority =
+ stack_pos + 1;
+ buf->entry[buf->entries].pattern = addr;
+ buf->entries++;
+ rte_memcpy(addr, buf->entry[0].pattern,
+ user_pattern_size);
+ addr = (void *)(((uintptr_t)addr) +
+ user_pattern_size);
+ rte_memcpy(addr, flow_items, n);
+ addr = (void *)(((uintptr_t)addr) + n);
+ }
+ }
+ /* Go deeper. */
+ if (node->next) {
+ next_node = node->next;
+ if (stack_pos++ == elt_n) {
+ rte_errno = E2BIG;
+ return -rte_errno;
+ }
+ stack[stack_pos] = next_node;
+ } else if (*(next_node + 1)) {
+ /* Follow up with the next possibility. */
+ ++next_node;
+ } else {
+ /* Move to the next path. */
+ if (stack_pos)
+ next_node = stack[--stack_pos];
+ next_node++;
+ stack[stack_pos] = next_node;
+ }
+ node = *next_node ? &graph[*next_node] : NULL;
+ };
+ return lsize;
+}
diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ethdev/rte_flow.h
index 13e42021..f8ba71cd 100644
--- a/lib/librte_ether/rte_flow.h
+++ b/lib/librte_ethdev/rte_flow.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_FLOW_H_
@@ -14,8 +14,12 @@
* associated actions in hardware through flow rules.
*/
+#include <stddef.h>
+#include <stdint.h>
+
#include <rte_arp.h>
#include <rte_ether.h>
+#include <rte_eth_ctrl.h>
#include <rte_icmp.h>
#include <rte_ip.h>
#include <rte_sctp.h>
@@ -31,18 +35,20 @@ extern "C" {
/**
* Flow rule attributes.
*
- * Priorities are set on two levels: per group and per rule within groups.
+ * Priorities are set on a per rule based within groups.
*
- * Lower values denote higher priority, the highest priority for both levels
- * is 0, so that a rule with priority 0 in group 8 is always matched after a
- * rule with priority 8 in group 0.
+ * Lower values denote higher priority, the highest priority for a flow rule
+ * is 0, so that a flow that matches for than one rule, the rule with the
+ * lowest priority value will always be matched.
*
* Although optional, applications are encouraged to group similar rules as
* much as possible to fully take advantage of hardware capabilities
* (e.g. optimized matching) and work around limitations (e.g. a single
- * pattern type possibly allowed in a given group).
+ * pattern type possibly allowed in a given group). Applications should be
+ * aware that groups are not linked by default, and that they must be
+ * explicitly linked by the application using the JUMP action.
*
- * Group and priority levels are arbitrary and up to the application, they
+ * Priority levels are arbitrary and up to the application, they
* do not need to be contiguous nor start from 0, however the maximum number
* varies between devices and may be affected by existing flow rules.
*
@@ -65,10 +71,29 @@ extern "C" {
*/
struct rte_flow_attr {
uint32_t group; /**< Priority group. */
- uint32_t priority; /**< Priority level within group. */
+ uint32_t priority; /**< Rule priority level within group. */
uint32_t ingress:1; /**< Rule applies to ingress traffic. */
uint32_t egress:1; /**< Rule applies to egress traffic. */
- uint32_t reserved:30; /**< Reserved, must be zero. */
+ /**
+ * Instead of simply matching the properties of traffic as it would
+ * appear on a given DPDK port ID, enabling this attribute transfers
+ * a flow rule to the lowest possible level of any device endpoints
+ * found in the pattern.
+ *
+ * When supported, this effectively enables an application to
+ * re-route traffic not necessarily intended for it (e.g. coming
+ * from or addressed to different physical ports, VFs or
+ * applications) at the device level.
+ *
+ * It complements the behavior of some pattern items such as
+ * RTE_FLOW_ITEM_TYPE_PHY_PORT and is meaningless without them.
+ *
+ * When transferring flow rules, ingress and egress attributes keep
+ * their original meaning, as if processing traffic emitted or
+ * received by the application.
+ */
+ uint32_t transfer:1;
+ uint32_t reserved:29; /**< Reserved, must be zero. */
};
/**
@@ -76,15 +101,13 @@ struct rte_flow_attr {
*
* Pattern items fall in two categories:
*
- * - Matching protocol headers and packet data (ANY, RAW, ETH, VLAN, IPV4,
- * IPV6, ICMP, UDP, TCP, SCTP, VXLAN and so on), usually associated with a
+ * - Matching protocol headers and packet data, usually associated with a
* specification structure. These must be stacked in the same order as the
- * protocol layers to match, starting from the lowest.
+ * protocol layers to match inside packets, starting from the lowest.
*
- * - Matching meta-data or affecting pattern processing (END, VOID, INVERT,
- * PF, VF, PORT and so on), often without a specification structure. Since
- * they do not match packet contents, these can be specified anywhere
- * within item lists without affecting others.
+ * - Matching meta-data or affecting pattern processing, often without a
+ * specification structure. Since they do not match packet contents, their
+ * position in the list is usually not relevant.
*
* See the description of individual types for more information. Those
* marked with [META] fall into the second category.
@@ -131,13 +154,8 @@ enum rte_flow_item_type {
/**
* [META]
*
- * Matches packets addressed to the physical function of the device.
- *
- * If the underlying device function differs from the one that would
- * normally receive the matched traffic, specifying this item
- * prevents it from reaching that device unless the flow rule
- * contains a PF action. Packets are not duplicated between device
- * instances by default.
+ * Matches traffic originating from (ingress) or going to (egress)
+ * the physical function of the current device.
*
* No associated specification structure.
*/
@@ -146,13 +164,8 @@ enum rte_flow_item_type {
/**
* [META]
*
- * Matches packets addressed to a virtual function ID of the device.
- *
- * If the underlying device function differs from the one that would
- * normally receive the matched traffic, specifying this item
- * prevents it from reaching that device unless the flow rule
- * contains a VF action. Packets are not duplicated between device
- * instances by default.
+ * Matches traffic originating from (ingress) or going to (egress) a
+ * given virtual function of the current device.
*
* See struct rte_flow_item_vf.
*/
@@ -161,17 +174,22 @@ enum rte_flow_item_type {
/**
* [META]
*
- * Matches packets coming from the specified physical port of the
- * underlying device.
+ * Matches traffic originating from (ingress) or going to (egress) a
+ * physical port of the underlying device.
*
- * The first PORT item overrides the physical port normally
- * associated with the specified DPDK input port (port_id). This
- * item can be provided several times to match additional physical
- * ports.
+ * See struct rte_flow_item_phy_port.
+ */
+ RTE_FLOW_ITEM_TYPE_PHY_PORT,
+
+ /**
+ * [META]
+ *
+ * Matches traffic originating from (ingress) or going to (egress) a
+ * given DPDK port ID.
*
- * See struct rte_flow_item_port.
+ * See struct rte_flow_item_port_id.
*/
- RTE_FLOW_ITEM_TYPE_PORT,
+ RTE_FLOW_ITEM_TYPE_PORT_ID,
/**
* Matches a byte string of a given length at a given offset.
@@ -323,6 +341,78 @@ enum rte_flow_item_type {
* See struct rte_flow_item_geneve.
*/
RTE_FLOW_ITEM_TYPE_GENEVE,
+
+ /**
+ * Matches a VXLAN-GPE header.
+ *
+ * See struct rte_flow_item_vxlan_gpe.
+ */
+ RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+
+ /**
+ * Matches an ARP header for Ethernet/IPv4.
+ *
+ * See struct rte_flow_item_arp_eth_ipv4.
+ */
+ RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
+
+ /**
+ * Matches the presence of any IPv6 extension header.
+ *
+ * See struct rte_flow_item_ipv6_ext.
+ */
+ RTE_FLOW_ITEM_TYPE_IPV6_EXT,
+
+ /**
+ * Matches any ICMPv6 header.
+ *
+ * See struct rte_flow_item_icmp6.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6,
+
+ /**
+ * Matches an ICMPv6 neighbor discovery solicitation.
+ *
+ * See struct rte_flow_item_icmp6_nd_ns.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS,
+
+ /**
+ * Matches an ICMPv6 neighbor discovery advertisement.
+ *
+ * See struct rte_flow_item_icmp6_nd_na.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA,
+
+ /**
+ * Matches the presence of any ICMPv6 neighbor discovery option.
+ *
+ * See struct rte_flow_item_icmp6_nd_opt.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT,
+
+ /**
+ * Matches an ICMPv6 neighbor discovery source Ethernet link-layer
+ * address option.
+ *
+ * See struct rte_flow_item_icmp6_nd_opt_sla_eth.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH,
+
+ /**
+ * Matches an ICMPv6 neighbor discovery target Ethernet link-layer
+ * address option.
+ *
+ * See struct rte_flow_item_icmp6_nd_opt_tla_eth.
+ */
+ RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH,
+
+ /**
+ * Matches specified mark field.
+ *
+ * See struct rte_flow_item_mark.
+ */
+ RTE_FLOW_ITEM_TYPE_MARK,
};
/**
@@ -350,15 +440,15 @@ static const struct rte_flow_item_any rte_flow_item_any_mask = {
/**
* RTE_FLOW_ITEM_TYPE_VF
*
- * Matches packets addressed to a virtual function ID of the device.
+ * Matches traffic originating from (ingress) or going to (egress) a given
+ * virtual function of the current device.
+ *
+ * If supported, should work even if the virtual function is not managed by
+ * the application and thus not associated with a DPDK port ID.
*
- * If the underlying device function differs from the one that would
- * normally receive the matched traffic, specifying this item prevents it
- * from reaching that device unless the flow rule contains a VF
- * action. Packets are not duplicated between device instances by default.
+ * Note this pattern item does not match VF representors traffic which, as
+ * separate entities, should be addressed through their own DPDK port IDs.
*
- * - Likely to return an error or never match any traffic if this causes a
- * VF device to match traffic addressed to a different VF.
* - Can be specified multiple times to match traffic addressed to several
* VF IDs.
* - Can be combined with a PF item to match both PF and VF traffic.
@@ -366,7 +456,7 @@ static const struct rte_flow_item_any rte_flow_item_any_mask = {
* A zeroed mask can be used to match any VF ID.
*/
struct rte_flow_item_vf {
- uint32_t id; /**< Destination VF ID. */
+ uint32_t id; /**< VF ID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VF. */
@@ -377,13 +467,13 @@ static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
#endif
/**
- * RTE_FLOW_ITEM_TYPE_PORT
+ * RTE_FLOW_ITEM_TYPE_PHY_PORT
*
- * Matches packets coming from the specified physical port of the underlying
- * device.
+ * Matches traffic originating from (ingress) or going to (egress) a
+ * physical port of the underlying device.
*
- * The first PORT item overrides the physical port normally associated with
- * the specified DPDK input port (port_id). This item can be provided
+ * The first PHY_PORT item overrides the physical port normally associated
+ * with the specified DPDK input port (port_id). This item can be provided
* several times to match additional physical ports.
*
* Note that physical ports are not necessarily tied to DPDK input ports
@@ -396,18 +486,44 @@ static const struct rte_flow_item_vf rte_flow_item_vf_mask = {
*
* A zeroed mask can be used to match any port index.
*/
-struct rte_flow_item_port {
+struct rte_flow_item_phy_port {
uint32_t index; /**< Physical port index. */
};
-/** Default mask for RTE_FLOW_ITEM_TYPE_PORT. */
+/** Default mask for RTE_FLOW_ITEM_TYPE_PHY_PORT. */
#ifndef __cplusplus
-static const struct rte_flow_item_port rte_flow_item_port_mask = {
+static const struct rte_flow_item_phy_port rte_flow_item_phy_port_mask = {
.index = 0x00000000,
};
#endif
/**
+ * RTE_FLOW_ITEM_TYPE_PORT_ID
+ *
+ * Matches traffic originating from (ingress) or going to (egress) a given
+ * DPDK port ID.
+ *
+ * Normally only supported if the port ID in question is known by the
+ * underlying PMD and related to the device the flow rule is created
+ * against.
+ *
+ * This must not be confused with @p PHY_PORT which refers to the physical
+ * port of a device, whereas @p PORT_ID refers to a struct rte_eth_dev
+ * object on the application side (also known as "port representor"
+ * depending on the kind of underlying device).
+ */
+struct rte_flow_item_port_id {
+ uint32_t id; /**< DPDK port ID. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_PORT_ID. */
+#ifndef __cplusplus
+static const struct rte_flow_item_port_id rte_flow_item_port_id_mask = {
+ .id = 0xffffffff,
+};
+#endif
+
+/**
* RTE_FLOW_ITEM_TYPE_RAW
*
* Matches a byte string of a given length at a given offset.
@@ -432,7 +548,7 @@ struct rte_flow_item_raw {
int32_t offset; /**< Absolute or relative offset for pattern. */
uint16_t limit; /**< Search area limit for start of pattern. */
uint16_t length; /**< Pattern length. */
- uint8_t pattern[]; /**< Byte string to look for. */
+ const uint8_t *pattern; /**< Byte string to look for. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_RAW. */
@@ -444,6 +560,7 @@ static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
.offset = 0xffffffff,
.limit = 0xffff,
.length = 0xffff,
+ .pattern = NULL,
};
#endif
@@ -451,11 +568,17 @@ static const struct rte_flow_item_raw rte_flow_item_raw_mask = {
* RTE_FLOW_ITEM_TYPE_ETH
*
* Matches an Ethernet header.
+ *
+ * The @p type field either stands for "EtherType" or "TPID" when followed
+ * by so-called layer 2.5 pattern items such as RTE_FLOW_ITEM_TYPE_VLAN. In
+ * the latter case, @p type refers to that of the outer header, with the
+ * inner EtherType/TPID provided by the subsequent pattern item. This is the
+ * same order as on the wire.
*/
struct rte_flow_item_eth {
struct ether_addr dst; /**< Destination MAC. */
struct ether_addr src; /**< Source MAC. */
- rte_be16_t type; /**< EtherType. */
+ rte_be16_t type; /**< EtherType or TPID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_ETH. */
@@ -472,19 +595,20 @@ static const struct rte_flow_item_eth rte_flow_item_eth_mask = {
*
* Matches an 802.1Q/ad VLAN tag.
*
- * This type normally follows either RTE_FLOW_ITEM_TYPE_ETH or
- * RTE_FLOW_ITEM_TYPE_VLAN.
+ * The corresponding standard outer EtherType (TPID) values are
+ * ETHER_TYPE_VLAN or ETHER_TYPE_QINQ. It can be overridden by the preceding
+ * pattern item.
*/
struct rte_flow_item_vlan {
- rte_be16_t tpid; /**< Tag protocol identifier. */
rte_be16_t tci; /**< Tag control information. */
+ rte_be16_t inner_type; /**< Inner EtherType or TPID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_VLAN. */
#ifndef __cplusplus
static const struct rte_flow_item_vlan rte_flow_item_vlan_mask = {
- .tpid = RTE_BE16(0x0000),
- .tci = RTE_BE16(0xffff),
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0x0000),
};
#endif
@@ -514,7 +638,8 @@ static const struct rte_flow_item_ipv4 rte_flow_item_ipv4_mask = {
*
* Matches an IPv6 header.
*
- * Note: IPv6 options are handled by dedicated pattern items.
+ * Note: IPv6 options are handled by dedicated pattern items, see
+ * RTE_FLOW_ITEM_TYPE_IPV6_EXT.
*/
struct rte_flow_item_ipv6 {
struct ipv6_hdr hdr; /**< IPv6 header definition. */
@@ -633,9 +758,11 @@ static const struct rte_flow_item_vxlan rte_flow_item_vxlan_mask = {
* RTE_FLOW_ITEM_TYPE_E_TAG.
*
* Matches a E-tag header.
+ *
+ * The corresponding standard outer EtherType (TPID) value is
+ * ETHER_TYPE_ETAG. It can be overridden by the preceding pattern item.
*/
struct rte_flow_item_e_tag {
- rte_be16_t tpid; /**< Tag protocol identifier (0x893F). */
/**
* E-Tag control information (E-TCI).
* E-PCP (3b), E-DEI (1b), ingress E-CID base (12b).
@@ -645,6 +772,7 @@ struct rte_flow_item_e_tag {
rte_be16_t rsvd_grp_ecid_b;
uint8_t in_ecid_e; /**< Ingress E-CID ext. */
uint8_t ecid_e; /**< E-CID ext. */
+ rte_be16_t inner_type; /**< Inner EtherType or TPID. */
};
/** Default mask for RTE_FLOW_ITEM_TYPE_E_TAG. */
@@ -815,6 +943,241 @@ static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = {
#endif
/**
+ * RTE_FLOW_ITEM_TYPE_VXLAN_GPE (draft-ietf-nvo3-vxlan-gpe-05).
+ *
+ * Matches a VXLAN-GPE header.
+ */
+struct rte_flow_item_vxlan_gpe {
+ uint8_t flags; /**< Normally 0x0c (I and P flags). */
+ uint8_t rsvd0[2]; /**< Reserved, normally 0x0000. */
+ uint8_t protocol; /**< Protocol type. */
+ uint8_t vni[3]; /**< VXLAN identifier. */
+ uint8_t rsvd1; /**< Reserved, normally 0x00. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_VXLAN_GPE. */
+#ifndef __cplusplus
+static const struct rte_flow_item_vxlan_gpe rte_flow_item_vxlan_gpe_mask = {
+ .vni = "\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4
+ *
+ * Matches an ARP header for Ethernet/IPv4.
+ */
+struct rte_flow_item_arp_eth_ipv4 {
+ rte_be16_t hrd; /**< Hardware type, normally 1. */
+ rte_be16_t pro; /**< Protocol type, normally 0x0800. */
+ uint8_t hln; /**< Hardware address length, normally 6. */
+ uint8_t pln; /**< Protocol address length, normally 4. */
+ rte_be16_t op; /**< Opcode (1 for request, 2 for reply). */
+ struct ether_addr sha; /**< Sender hardware address. */
+ rte_be32_t spa; /**< Sender IPv4 address. */
+ struct ether_addr tha; /**< Target hardware address. */
+ rte_be32_t tpa; /**< Target IPv4 address. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4. */
+#ifndef __cplusplus
+static const struct rte_flow_item_arp_eth_ipv4
+rte_flow_item_arp_eth_ipv4_mask = {
+ .sha.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .spa = RTE_BE32(0xffffffff),
+ .tha.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .tpa = RTE_BE32(0xffffffff),
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_IPV6_EXT
+ *
+ * Matches the presence of any IPv6 extension header.
+ *
+ * Normally preceded by any of:
+ *
+ * - RTE_FLOW_ITEM_TYPE_IPV6
+ * - RTE_FLOW_ITEM_TYPE_IPV6_EXT
+ */
+struct rte_flow_item_ipv6_ext {
+ uint8_t next_hdr; /**< Next header. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_IPV6_EXT. */
+#ifndef __cplusplus
+static const
+struct rte_flow_item_ipv6_ext rte_flow_item_ipv6_ext_mask = {
+ .next_hdr = 0xff,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6
+ *
+ * Matches any ICMPv6 header.
+ */
+struct rte_flow_item_icmp6 {
+ uint8_t type; /**< ICMPv6 type. */
+ uint8_t code; /**< ICMPv6 code. */
+ uint16_t checksum; /**< ICMPv6 checksum. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6. */
+#ifndef __cplusplus
+static const struct rte_flow_item_icmp6 rte_flow_item_icmp6_mask = {
+ .type = 0xff,
+ .code = 0xff,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
+ *
+ * Matches an ICMPv6 neighbor discovery solicitation.
+ */
+struct rte_flow_item_icmp6_nd_ns {
+ uint8_t type; /**< ICMPv6 type, normally 135. */
+ uint8_t code; /**< ICMPv6 code, normally 0. */
+ rte_be16_t checksum; /**< ICMPv6 checksum. */
+ rte_be32_t reserved; /**< Reserved, normally 0. */
+ uint8_t target_addr[16]; /**< Target address. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS. */
+#ifndef __cplusplus
+static const
+struct rte_flow_item_icmp6_nd_ns rte_flow_item_icmp6_nd_ns_mask = {
+ .target_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
+ *
+ * Matches an ICMPv6 neighbor discovery advertisement.
+ */
+struct rte_flow_item_icmp6_nd_na {
+ uint8_t type; /**< ICMPv6 type, normally 136. */
+ uint8_t code; /**< ICMPv6 code, normally 0. */
+ rte_be16_t checksum; /**< ICMPv6 checksum. */
+ /**
+ * Route flag (1b), solicited flag (1b), override flag (1b),
+ * reserved (29b).
+ */
+ rte_be32_t rso_reserved;
+ uint8_t target_addr[16]; /**< Target address. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA. */
+#ifndef __cplusplus
+static const
+struct rte_flow_item_icmp6_nd_na rte_flow_item_icmp6_nd_na_mask = {
+ .target_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
+ *
+ * Matches the presence of any ICMPv6 neighbor discovery option.
+ *
+ * Normally preceded by any of:
+ *
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
+ */
+struct rte_flow_item_icmp6_nd_opt {
+ uint8_t type; /**< ND option type. */
+ uint8_t length; /**< ND option length. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT. */
+#ifndef __cplusplus
+static const struct rte_flow_item_icmp6_nd_opt
+rte_flow_item_icmp6_nd_opt_mask = {
+ .type = 0xff,
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH
+ *
+ * Matches an ICMPv6 neighbor discovery source Ethernet link-layer address
+ * option.
+ *
+ * Normally preceded by any of:
+ *
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
+ */
+struct rte_flow_item_icmp6_nd_opt_sla_eth {
+ uint8_t type; /**< ND option type, normally 1. */
+ uint8_t length; /**< ND option length, normally 1. */
+ struct ether_addr sla; /**< Source Ethernet LLA. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH. */
+#ifndef __cplusplus
+static const struct rte_flow_item_icmp6_nd_opt_sla_eth
+rte_flow_item_icmp6_nd_opt_sla_eth_mask = {
+ .sla.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+};
+#endif
+
+/**
+ * RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH
+ *
+ * Matches an ICMPv6 neighbor discovery target Ethernet link-layer address
+ * option.
+ *
+ * Normally preceded by any of:
+ *
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS
+ * - RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT
+ */
+struct rte_flow_item_icmp6_nd_opt_tla_eth {
+ uint8_t type; /**< ND option type, normally 2. */
+ uint8_t length; /**< ND option length, normally 1. */
+ struct ether_addr tla; /**< Target Ethernet LLA. */
+};
+
+/** Default mask for RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH. */
+#ifndef __cplusplus
+static const struct rte_flow_item_icmp6_nd_opt_tla_eth
+rte_flow_item_icmp6_nd_opt_tla_eth_mask = {
+ .tla.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+};
+#endif
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * RTE_FLOW_ITEM_TYPE_MARK
+ *
+ * Matches an arbitrary integer value which was set using the ``MARK`` action
+ * in a previously matched rule.
+ *
+ * This item can only be specified once as a match criteria as the ``MARK``
+ * action can only be specified once in a flow action.
+ *
+ * This value is arbitrary and application-defined. Maximum allowed value
+ * depends on the underlying implementation.
+ *
+ * Depending on the underlying implementation the MARK item may be supported on
+ * the physical device, with virtual groups in the PMD or not at all.
+ */
+struct rte_flow_item_mark {
+ uint32_t id; /**< Integer value to match against. */
+};
+
+/**
* Matching pattern item definition.
*
* A pattern is formed by stacking items starting from the lowest protocol
@@ -859,33 +1222,28 @@ struct rte_flow_item {
*
* Each possible action is represented by a type. Some have associated
* configuration structures. Several actions combined in a list can be
- * affected to a flow rule. That list is not ordered.
+ * assigned to a flow rule and are performed in order.
*
* They fall in three categories:
*
- * - Terminating actions (such as QUEUE, DROP, RSS, PF, VF) that prevent
- * processing matched packets by subsequent flow rules, unless overridden
- * with PASSTHRU.
- *
- * - Non terminating actions (PASSTHRU, DUP) that leave matched packets up
- * for additional processing by subsequent flow rules.
+ * - Actions that modify the fate of matching traffic, for instance by
+ * dropping or assigning it a specific destination.
*
- * - Other non terminating meta actions that do not affect the fate of
- * packets (END, VOID, MARK, FLAG, COUNT).
+ * - Actions that modify matching traffic contents or its properties. This
+ * includes adding/removing encapsulation, encryption, compression and
+ * marks.
*
- * When several actions are combined in a flow rule, they should all have
- * different types (e.g. dropping a packet twice is not possible).
+ * - Actions related to the flow rule itself, such as updating counters or
+ * making it non-terminating.
*
- * Only the last action of a given type is taken into account. PMDs still
- * perform error checking on the entire list.
+ * Flow rules being terminating by default, not specifying any action of the
+ * fate kind results in undefined behavior. This applies to both ingress and
+ * egress.
*
- * Note that PASSTHRU is the only action able to override a terminating
- * rule.
+ * PASSTHRU, when supported, makes a flow rule non-terminating.
*/
enum rte_flow_action_type {
/**
- * [META]
- *
* End marker for action lists. Prevents further processing of
* actions, thereby ending the list.
*
@@ -894,8 +1252,6 @@ enum rte_flow_action_type {
RTE_FLOW_ACTION_TYPE_END,
/**
- * [META]
- *
* Used as a placeholder for convenience. It is ignored and simply
* discarded by PMDs.
*
@@ -904,18 +1260,23 @@ enum rte_flow_action_type {
RTE_FLOW_ACTION_TYPE_VOID,
/**
- * Leaves packets up for additional processing by subsequent flow
- * rules. This is the default when a rule does not contain a
- * terminating action, but can be specified to force a rule to
- * become non-terminating.
+ * Leaves traffic up for additional processing by subsequent flow
+ * rules; makes a flow rule non-terminating.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_PASSTHRU,
/**
- * [META]
+ * RTE_FLOW_ACTION_TYPE_JUMP
+ *
+ * Redirects packets to a group on the current device.
*
+ * See struct rte_flow_action_jump.
+ */
+ RTE_FLOW_ACTION_TYPE_JUMP,
+
+ /**
* Attaches an integer value to packets and sets PKT_RX_FDIR and
* PKT_RX_FDIR_ID mbuf flags.
*
@@ -924,8 +1285,6 @@ enum rte_flow_action_type {
RTE_FLOW_ACTION_TYPE_MARK,
/**
- * [META]
- *
* Flags packets. Similar to MARK without a specific value; only
* sets the PKT_RX_FDIR mbuf flag.
*
@@ -950,28 +1309,16 @@ enum rte_flow_action_type {
RTE_FLOW_ACTION_TYPE_DROP,
/**
- * [META]
- *
- * Enables counters for this rule.
+ * Enables counters for this flow rule.
*
* These counters can be retrieved and reset through rte_flow_query(),
* see struct rte_flow_query_count.
*
- * No associated configuration structure.
+ * See struct rte_flow_action_count.
*/
RTE_FLOW_ACTION_TYPE_COUNT,
/**
- * Duplicates packets to a given queue index.
- *
- * This is normally combined with QUEUE, however when used alone, it
- * is actually similar to QUEUE + PASSTHRU.
- *
- * See struct rte_flow_action_dup.
- */
- RTE_FLOW_ACTION_TYPE_DUP,
-
- /**
* Similar to QUEUE, except RSS is additionally performed on packets
* to spread them among several queues according to the provided
* parameters.
@@ -981,22 +1328,37 @@ enum rte_flow_action_type {
RTE_FLOW_ACTION_TYPE_RSS,
/**
- * Redirects packets to the physical function (PF) of the current
- * device.
+ * Directs matching traffic to the physical function (PF) of the
+ * current device.
*
* No associated configuration structure.
*/
RTE_FLOW_ACTION_TYPE_PF,
/**
- * Redirects packets to the virtual function (VF) of the current
- * device with the specified ID.
+ * Directs matching traffic to a given virtual function of the
+ * current device.
*
* See struct rte_flow_action_vf.
*/
RTE_FLOW_ACTION_TYPE_VF,
/**
+ * Directs packets to a given physical port index of the underlying
+ * device.
+ *
+ * See struct rte_flow_action_phy_port.
+ */
+ RTE_FLOW_ACTION_TYPE_PHY_PORT,
+
+ /**
+ * Directs matching traffic to a given DPDK port ID.
+ *
+ * See struct rte_flow_action_port_id.
+ */
+ RTE_FLOW_ACTION_TYPE_PORT_ID,
+
+ /**
* Traffic metering and policing (MTR).
*
* See struct rte_flow_action_meter.
@@ -1010,7 +1372,139 @@ enum rte_flow_action_type {
*
* See struct rte_flow_action_security.
*/
- RTE_FLOW_ACTION_TYPE_SECURITY
+ RTE_FLOW_ACTION_TYPE_SECURITY,
+
+ /**
+ * Implements OFPAT_SET_MPLS_TTL ("MPLS TTL") as defined by the
+ * OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_set_mpls_ttl.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL,
+
+ /**
+ * Implements OFPAT_DEC_MPLS_TTL ("decrement MPLS TTL") as defined
+ * by the OpenFlow Switch Specification.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_DEC_MPLS_TTL,
+
+ /**
+ * Implements OFPAT_SET_NW_TTL ("IP TTL") as defined by the OpenFlow
+ * Switch Specification.
+ *
+ * See struct rte_flow_action_of_set_nw_ttl.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL,
+
+ /**
+ * Implements OFPAT_DEC_NW_TTL ("decrement IP TTL") as defined by
+ * the OpenFlow Switch Specification.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL,
+
+ /**
+ * Implements OFPAT_COPY_TTL_OUT ("copy TTL "outwards" -- from
+ * next-to-outermost to outermost") as defined by the OpenFlow
+ * Switch Specification.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_OUT,
+
+ /**
+ * Implements OFPAT_COPY_TTL_IN ("copy TTL "inwards" -- from
+ * outermost to next-to-outermost") as defined by the OpenFlow
+ * Switch Specification.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_COPY_TTL_IN,
+
+ /**
+ * Implements OFPAT_POP_VLAN ("pop the outer VLAN tag") as defined
+ * by the OpenFlow Switch Specification.
+ *
+ * No associated configuration structure.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_POP_VLAN,
+
+ /**
+ * Implements OFPAT_PUSH_VLAN ("push a new VLAN tag") as defined by
+ * the OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_push_vlan.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN,
+
+ /**
+ * Implements OFPAT_SET_VLAN_VID ("set the 802.1q VLAN id") as
+ * defined by the OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_set_vlan_vid.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID,
+
+ /**
+ * Implements OFPAT_SET_LAN_PCP ("set the 802.1q priority") as
+ * defined by the OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_set_vlan_pcp.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP,
+
+ /**
+ * Implements OFPAT_POP_MPLS ("pop the outer MPLS tag") as defined
+ * by the OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_pop_mpls.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_POP_MPLS,
+
+ /**
+ * Implements OFPAT_PUSH_MPLS ("push a new MPLS tag") as defined by
+ * the OpenFlow Switch Specification.
+ *
+ * See struct rte_flow_action_of_push_mpls.
+ */
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS,
+
+ /**
+ * Encapsulate flow in VXLAN tunnel as defined in
+ * rte_flow_action_vxlan_encap action structure.
+ *
+ * See struct rte_flow_action_vxlan_encap.
+ */
+ RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP,
+
+ /**
+ * Decapsulate outer most VXLAN tunnel from matched flow.
+ *
+ * If flow pattern does not define a valid VXLAN tunnel (as specified by
+ * RFC7348) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION
+ * error.
+ */
+ RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
+
+ /**
+ * Encapsulate flow in NVGRE tunnel defined in the
+ * rte_flow_action_nvgre_encap action structure.
+ *
+ * See struct rte_flow_action_nvgre_encap.
+ */
+ RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP,
+
+ /**
+ * Decapsulate outer most NVGRE tunnel from matched flow.
+ *
+ * If flow pattern does not define a valid NVGRE tunnel (as specified by
+ * RFC7637) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION
+ * error.
+ */
+ RTE_FLOW_ACTION_TYPE_NVGRE_DECAP,
};
/**
@@ -1028,16 +1522,62 @@ struct rte_flow_action_mark {
};
/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * RTE_FLOW_ACTION_TYPE_JUMP
+ *
+ * Redirects packets to a group on the current device.
+ *
+ * In a hierarchy of groups, which can be used to represent physical or logical
+ * flow tables on the device, this action allows the action to be a redirect to
+ * a group on that device.
+ */
+struct rte_flow_action_jump {
+ uint32_t group;
+};
+
+/**
* RTE_FLOW_ACTION_TYPE_QUEUE
*
* Assign packets to a given queue index.
- *
- * Terminating by default.
*/
struct rte_flow_action_queue {
uint16_t index; /**< Queue index to use. */
};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * RTE_FLOW_ACTION_TYPE_COUNT
+ *
+ * Adds a counter action to a matched flow.
+ *
+ * If more than one count action is specified in a single flow rule, then each
+ * action must specify a unique id.
+ *
+ * Counters can be retrieved and reset through ``rte_flow_query()``, see
+ * ``struct rte_flow_query_count``.
+ *
+ * The shared flag indicates whether the counter is unique to the flow rule the
+ * action is specified with, or whether it is a shared counter.
+ *
+ * For a count action with the shared flag set, then then a global device
+ * namespace is assumed for the counter id, so that any matched flow rules using
+ * a count action with the same counter id on the same port will contribute to
+ * that counter.
+ *
+ * For ports within the same switch domain then the counter id namespace extends
+ * to all ports within that switch domain.
+ */
+struct rte_flow_action_count {
+ uint32_t shared:1; /**< Share counter ID with other flow rules. */
+ uint32_t reserved:31; /**< Reserved, must be zero. */
+ uint32_t id; /**< Counter ID. */
+};
+
/**
* RTE_FLOW_ACTION_TYPE_COUNT (query)
*
@@ -1053,54 +1593,99 @@ struct rte_flow_query_count {
};
/**
- * RTE_FLOW_ACTION_TYPE_DUP
- *
- * Duplicates packets to a given queue index.
- *
- * This is normally combined with QUEUE, however when used alone, it is
- * actually similar to QUEUE + PASSTHRU.
- *
- * Non-terminating by default.
- */
-struct rte_flow_action_dup {
- uint16_t index; /**< Queue index to duplicate packets to. */
-};
-
-/**
* RTE_FLOW_ACTION_TYPE_RSS
*
* Similar to QUEUE, except RSS is additionally performed on packets to
* spread them among several queues according to the provided parameters.
*
+ * Unlike global RSS settings used by other DPDK APIs, unsetting the
+ * @p types field does not disable RSS in a flow rule. Doing so instead
+ * requests safe unspecified "best-effort" settings from the underlying PMD,
+ * which depending on the flow rule, may result in anything ranging from
+ * empty (single queue) to all-inclusive RSS.
+ *
* Note: RSS hash result is stored in the hash.rss mbuf field which overlaps
* hash.fdir.lo. Since the MARK action sets the hash.fdir.hi field only,
* both can be requested simultaneously.
- *
- * Terminating by default.
*/
struct rte_flow_action_rss {
- const struct rte_eth_rss_conf *rss_conf; /**< RSS parameters. */
- uint16_t num; /**< Number of entries in queue[]. */
- uint16_t queue[]; /**< Queues indices to use. */
+ enum rte_eth_hash_function func; /**< RSS hash function to apply. */
+ /**
+ * Packet encapsulation level RSS hash @p types apply to.
+ *
+ * - @p 0 requests the default behavior. Depending on the packet
+ * type, it can mean outermost, innermost, anything in between or
+ * even no RSS.
+ *
+ * It basically stands for the innermost encapsulation level RSS
+ * can be performed on according to PMD and device capabilities.
+ *
+ * - @p 1 requests RSS to be performed on the outermost packet
+ * encapsulation level.
+ *
+ * - @p 2 and subsequent values request RSS to be performed on the
+ * specified inner packet encapsulation level, from outermost to
+ * innermost (lower to higher values).
+ *
+ * Values other than @p 0 are not necessarily supported.
+ *
+ * Requesting a specific RSS level on unrecognized traffic results
+ * in undefined behavior. For predictable results, it is recommended
+ * to make the flow rule pattern match packet headers up to the
+ * requested encapsulation level so that only matching traffic goes
+ * through.
+ */
+ uint32_t level;
+ uint64_t types; /**< Specific RSS hash types (see ETH_RSS_*). */
+ uint32_t key_len; /**< Hash key length in bytes. */
+ uint32_t queue_num; /**< Number of entries in @p queue. */
+ const uint8_t *key; /**< Hash key. */
+ const uint16_t *queue; /**< Queue indices to use. */
};
/**
* RTE_FLOW_ACTION_TYPE_VF
*
- * Redirects packets to a virtual function (VF) of the current device.
+ * Directs matching traffic to a given virtual function of the current
+ * device.
*
* Packets matched by a VF pattern item can be redirected to their original
* VF ID instead of the specified one. This parameter may not be available
* and is not guaranteed to work properly if the VF part is matched by a
* prior flow rule or if packets are not addressed to a VF in the first
* place.
- *
- * Terminating by default.
*/
struct rte_flow_action_vf {
uint32_t original:1; /**< Use original VF ID if possible. */
uint32_t reserved:31; /**< Reserved, must be zero. */
- uint32_t id; /**< VF ID to redirect packets to. */
+ uint32_t id; /**< VF ID. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_PHY_PORT
+ *
+ * Directs packets to a given physical port index of the underlying
+ * device.
+ *
+ * @see RTE_FLOW_ITEM_TYPE_PHY_PORT
+ */
+struct rte_flow_action_phy_port {
+ uint32_t original:1; /**< Use original port index if possible. */
+ uint32_t reserved:31; /**< Reserved, must be zero. */
+ uint32_t index; /**< Physical port index. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_PORT_ID
+ *
+ * Directs matching traffic to a given DPDK port ID.
+ *
+ * @see RTE_FLOW_ITEM_TYPE_PORT_ID
+ */
+struct rte_flow_action_port_id {
+ uint32_t original:1; /**< Use original DPDK port ID if possible. */
+ uint32_t reserved:31; /**< Reserved, must be zero. */
+ uint32_t id; /**< DPDK port ID. */
};
/**
@@ -1110,8 +1695,6 @@ struct rte_flow_action_vf {
*
* Packets matched by items of this type can be either dropped or passed to the
* next item with their color set by the MTR object.
- *
- * Non-terminating by default.
*/
struct rte_flow_action_meter {
uint32_t mtr_id; /**< MTR object ID created with rte_mtr_create(). */
@@ -1141,14 +1724,151 @@ struct rte_flow_action_meter {
* direction.
*
* Multiple flows can be configured to use the same security session.
- *
- * Non-terminating by default.
*/
struct rte_flow_action_security {
void *security_session; /**< Pointer to security session structure. */
};
/**
+ * RTE_FLOW_ACTION_TYPE_OF_SET_MPLS_TTL
+ *
+ * Implements OFPAT_SET_MPLS_TTL ("MPLS TTL") as defined by the OpenFlow
+ * Switch Specification.
+ */
+struct rte_flow_action_of_set_mpls_ttl {
+ uint8_t mpls_ttl; /**< MPLS TTL. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_SET_NW_TTL
+ *
+ * Implements OFPAT_SET_NW_TTL ("IP TTL") as defined by the OpenFlow Switch
+ * Specification.
+ */
+struct rte_flow_action_of_set_nw_ttl {
+ uint8_t nw_ttl; /**< IP TTL. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN
+ *
+ * Implements OFPAT_PUSH_VLAN ("push a new VLAN tag") as defined by the
+ * OpenFlow Switch Specification.
+ */
+struct rte_flow_action_of_push_vlan {
+ rte_be16_t ethertype; /**< EtherType. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID
+ *
+ * Implements OFPAT_SET_VLAN_VID ("set the 802.1q VLAN id") as defined by
+ * the OpenFlow Switch Specification.
+ */
+struct rte_flow_action_of_set_vlan_vid {
+ rte_be16_t vlan_vid; /**< VLAN id. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP
+ *
+ * Implements OFPAT_SET_LAN_PCP ("set the 802.1q priority") as defined by
+ * the OpenFlow Switch Specification.
+ */
+struct rte_flow_action_of_set_vlan_pcp {
+ uint8_t vlan_pcp; /**< VLAN priority. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_POP_MPLS
+ *
+ * Implements OFPAT_POP_MPLS ("pop the outer MPLS tag") as defined by the
+ * OpenFlow Switch Specification.
+ */
+struct rte_flow_action_of_pop_mpls {
+ rte_be16_t ethertype; /**< EtherType. */
+};
+
+/**
+ * RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS
+ *
+ * Implements OFPAT_PUSH_MPLS ("push a new MPLS tag") as defined by the
+ * OpenFlow Switch Specification.
+ */
+struct rte_flow_action_of_push_mpls {
+ rte_be16_t ethertype; /**< EtherType. */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
+ *
+ * VXLAN tunnel end-point encapsulation data definition
+ *
+ * The tunnel definition is provided through the flow item pattern, the
+ * provided pattern must conform to RFC7348 for the tunnel specified. The flow
+ * definition must be provided in order from the RTE_FLOW_ITEM_TYPE_ETH
+ * definition up the end item which is specified by RTE_FLOW_ITEM_TYPE_END.
+ *
+ * The mask field allows user to specify which fields in the flow item
+ * definitions can be ignored and which have valid data and can be used
+ * verbatim.
+ *
+ * Note: the last field is not used in the definition of a tunnel and can be
+ * ignored.
+ *
+ * Valid flow definition for RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP include:
+ *
+ * - ETH / IPV4 / UDP / VXLAN / END
+ * - ETH / IPV6 / UDP / VXLAN / END
+ * - ETH / VLAN / IPV4 / UDP / VXLAN / END
+ *
+ */
+struct rte_flow_action_vxlan_encap {
+ /**
+ * Encapsulating vxlan tunnel definition
+ * (terminated by the END pattern item).
+ */
+ struct rte_flow_item *definition;
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP
+ *
+ * NVGRE tunnel end-point encapsulation data definition
+ *
+ * The tunnel definition is provided through the flow item pattern the
+ * provided pattern must conform with RFC7637. The flow definition must be
+ * provided in order from the RTE_FLOW_ITEM_TYPE_ETH definition up the end item
+ * which is specified by RTE_FLOW_ITEM_TYPE_END.
+ *
+ * The mask field allows user to specify which fields in the flow item
+ * definitions can be ignored and which have valid data and can be used
+ * verbatim.
+ *
+ * Note: the last field is not used in the definition of a tunnel and can be
+ * ignored.
+ *
+ * Valid flow definition for RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP include:
+ *
+ * - ETH / IPV4 / NVGRE / END
+ * - ETH / VLAN / IPV6 / NVGRE / END
+ *
+ */
+struct rte_flow_action_nvgre_encap {
+ /**
+ * Encapsulating vxlan tunnel definition
+ * (terminated by the END pattern item).
+ */
+ struct rte_flow_item *definition;
+};
+
+/*
* Definition of a single action.
*
* A list of actions is terminated by a END action.
@@ -1182,10 +1902,15 @@ enum rte_flow_error_type {
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, /**< Priority field. */
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, /**< Ingress field. */
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, /**< Egress field. */
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, /**< Transfer field. */
RTE_FLOW_ERROR_TYPE_ATTR, /**< Attributes structure. */
RTE_FLOW_ERROR_TYPE_ITEM_NUM, /**< Pattern length. */
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, /**< Item specification. */
+ RTE_FLOW_ERROR_TYPE_ITEM_LAST, /**< Item specification range. */
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, /**< Item specification mask. */
RTE_FLOW_ERROR_TYPE_ITEM, /**< Specific pattern item. */
RTE_FLOW_ERROR_TYPE_ACTION_NUM, /**< Number of actions. */
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, /**< Action configuration. */
RTE_FLOW_ERROR_TYPE_ACTION, /**< Specific action. */
};
@@ -1351,7 +2076,7 @@ rte_flow_flush(uint16_t port_id,
* @param flow
* Flow rule handle to query.
* @param action
- * Action type to query.
+ * Action definition as defined in original flow rule.
* @param[in, out] data
* Pointer to storage for the associated query data type.
* @param[out] error
@@ -1364,7 +2089,7 @@ rte_flow_flush(uint16_t port_id,
int
rte_flow_query(uint16_t port_id,
struct rte_flow *flow,
- enum rte_flow_action_type action,
+ const struct rte_flow_action *action,
void *data,
struct rte_flow_error *error);
diff --git a/lib/librte_ether/rte_flow_driver.h b/lib/librte_ethdev/rte_flow_driver.h
index 7778c8e0..688f7230 100644
--- a/lib/librte_ether/rte_flow_driver.h
+++ b/lib/librte_ethdev/rte_flow_driver.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef RTE_FLOW_DRIVER_H_
@@ -88,7 +88,7 @@ struct rte_flow_ops {
int (*query)
(struct rte_eth_dev *,
struct rte_flow *,
- enum rte_flow_action_type,
+ const struct rte_flow_action *,
void *,
struct rte_flow_error *);
/** See rte_flow_isolate(). */
@@ -114,6 +114,69 @@ struct rte_flow_ops {
const struct rte_flow_ops *
rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error);
+/** Helper macro to build input graph for rte_flow_expand_rss(). */
+#define RTE_FLOW_EXPAND_RSS_NEXT(...) \
+ (const int []){ \
+ __VA_ARGS__, 0, \
+ }
+
+/** Node object of input graph for rte_flow_expand_rss(). */
+struct rte_flow_expand_node {
+ const int *const next;
+ /**<
+ * List of next node indexes. Index 0 is interpreted as a terminator.
+ */
+ const enum rte_flow_item_type type;
+ /**< Pattern item type of current node. */
+ uint64_t rss_types;
+ /**<
+ * RSS types bit-field associated with this node
+ * (see ETH_RSS_* definitions).
+ */
+};
+
+/** Object returned by rte_flow_expand_rss(). */
+struct rte_flow_expand_rss {
+ uint32_t entries;
+ /**< Number of entries @p patterns and @p priorities. */
+ struct {
+ struct rte_flow_item *pattern; /**< Expanded pattern array. */
+ uint32_t priority; /**< Priority offset for each expansion. */
+ } entry[];
+};
+
+/**
+ * Expand RSS flows into several possible flows according to the RSS hash
+ * fields requested and the driver capabilities.
+ *
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * @param[out] buf
+ * Buffer to store the result expansion.
+ * @param[in] size
+ * Buffer size in bytes. If 0, @p buf can be NULL.
+ * @param[in] pattern
+ * User flow pattern.
+ * @param[in] types
+ * RSS types to expand (see ETH_RSS_* definitions).
+ * @param[in] graph
+ * Input graph to expand @p pattern according to @p types.
+ * @param[in] graph_root_index
+ * Index of root node in @p graph, typically 0.
+ *
+ * @return
+ * A positive value representing the size of @p buf in bytes regardless of
+ * @p size on success, a negative errno value otherwise and rte_errno is
+ * set, the following errors are defined:
+ *
+ * -E2BIG: graph-depth @p graph is too deep.
+ */
+int __rte_experimental
+rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
+ const struct rte_flow_item *pattern, uint64_t types,
+ const struct rte_flow_expand_node graph[],
+ int graph_root_index);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_ether/rte_mtr.c b/lib/librte_ethdev/rte_mtr.c
index 1046cb5f..1046cb5f 100644
--- a/lib/librte_ether/rte_mtr.c
+++ b/lib/librte_ethdev/rte_mtr.c
diff --git a/lib/librte_ether/rte_mtr.h b/lib/librte_ethdev/rte_mtr.h
index c4819b27..c4819b27 100644
--- a/lib/librte_ether/rte_mtr.h
+++ b/lib/librte_ethdev/rte_mtr.h
diff --git a/lib/librte_ether/rte_mtr_driver.h b/lib/librte_ethdev/rte_mtr_driver.h
index c9a6d7c3..c9a6d7c3 100644
--- a/lib/librte_ether/rte_mtr_driver.h
+++ b/lib/librte_ethdev/rte_mtr_driver.h
diff --git a/lib/librte_ether/rte_tm.c b/lib/librte_ethdev/rte_tm.c
index 9709454f..9709454f 100644
--- a/lib/librte_ether/rte_tm.c
+++ b/lib/librte_ethdev/rte_tm.c
diff --git a/lib/librte_ether/rte_tm.h b/lib/librte_ethdev/rte_tm.h
index 2b25a871..955f02ff 100644
--- a/lib/librte_ether/rte_tm.h
+++ b/lib/librte_ethdev/rte_tm.h
@@ -377,6 +377,22 @@ struct rte_tm_capabilities {
*/
uint32_t sched_wfq_weight_max;
+ /** WRED packet mode support. When non-zero, this parameter indicates
+ * that there is atleast one leaf node that supports the WRED packet
+ * mode, which might not be true for all the leaf nodes. In packet
+ * mode, the WRED thresholds specify the queue length in packets, as
+ * opposed to bytes.
+ */
+ int cman_wred_packet_mode_supported;
+
+ /** WRED byte mode support. When non-zero, this parameter indicates that
+ * there is atleast one leaf node that supports the WRED byte mode,
+ * which might not be true for all the leaf nodes. In byte mode, the
+ * WRED thresholds specify the queue length in bytes, as opposed to
+ * packets.
+ */
+ int cman_wred_byte_mode_supported;
+
/** Head drop algorithm support. When non-zero, this parameter
* indicates that there is at least one leaf node that supports the head
* drop algorithm, which might not be true for all the leaf nodes.
@@ -628,6 +644,24 @@ struct rte_tm_level_capabilities {
*/
uint32_t shaper_shared_n_max;
+ /** WRED packet mode support. When non-zero, this
+ * parameter indicates that there is atleast one leaf
+ * node on this level that supports the WRED packet
+ * mode, which might not be true for all the leaf
+ * nodes. In packet mode, the WRED thresholds specify
+ * the queue length in packets, as opposed to bytes.
+ */
+ int cman_wred_packet_mode_supported;
+
+ /** WRED byte mode support. When non-zero, this
+ * parameter indicates that there is atleast one leaf
+ * node on this level that supports the WRED byte mode,
+ * which might not be true for all the leaf nodes. In
+ * byte mode, the WRED thresholds specify the queue
+ * length in bytes, as opposed to packets.
+ */
+ int cman_wred_byte_mode_supported;
+
/** Head drop algorithm support. When non-zero, this
* parameter indicates that there is at least one leaf
* node on this level that supports the head drop
@@ -743,6 +777,12 @@ struct rte_tm_node_capabilities {
/** Items valid only for leaf nodes. */
struct {
+ /** WRED packet mode support for current node. */
+ int cman_wred_packet_mode_supported;
+
+ /** WRED byte mode support for current node. */
+ int cman_wred_byte_mode_supported;
+
/** Head drop algorithm support for current node. */
int cman_head_drop_supported;
@@ -791,10 +831,10 @@ enum rte_tm_cman_mode {
*/
struct rte_tm_red_params {
/** Minimum queue threshold */
- uint16_t min_th;
+ uint32_t min_th;
/** Maximum queue threshold */
- uint16_t max_th;
+ uint32_t max_th;
/** Inverse of packet marking probability maximum value (maxp), i.e.
* maxp_inv = 1 / maxp
@@ -815,10 +855,19 @@ struct rte_tm_red_params {
* WRED context is used to perform congestion management for a single leaf
* node, while a shared WRED context is used to perform congestion management
* for a group of leaf nodes.
+ *
+ * @see struct rte_tm_capabilities::cman_wred_packet_mode_supported
+ * @see struct rte_tm_capabilities::cman_wred_byte_mode_supported
*/
struct rte_tm_wred_params {
/** One set of RED parameters per packet color */
struct rte_tm_red_params red_params[RTE_TM_COLORS];
+
+ /** When non-zero, the *min_th* and *max_th* thresholds are specified
+ * in packets (WRED packet mode). When zero, the *min_th* and *max_th*
+ * thresholds are specified in bytes (WRED byte mode)
+ */
+ int packet_mode;
};
/**
@@ -1520,6 +1569,10 @@ rte_tm_hierarchy_commit(uint16_t port_id,
/**
* Traffic manager node parent update
*
+ * This function may be used to move a node and its children to a different
+ * parent. Additionally, if the new parent is the same as the current parent,
+ * this function will update the priority/weight of an existing node.
+ *
* Restriction for root node: its parent cannot be changed.
*
* This function can only be called after the rte_tm_hierarchy_commit()
@@ -1643,7 +1696,7 @@ rte_tm_node_stats_update(uint16_t port_id,
* @param[in] port_id
* The port identifier of the Ethernet device.
* @param[in] node_id
- * Node ID. Needs to be valid leaf node ID.
+ * Node ID. Needs to be valid non-leaf node ID.
* @param[in] wfq_weight_mode
* WFQ weight mode for each SP priority. When NULL, it indicates that WFQ is
* to be used for all priorities. When non-NULL, it points to a pre-allocated
diff --git a/lib/librte_ether/rte_tm_driver.h b/lib/librte_ethdev/rte_tm_driver.h
index 90114ff5..90114ff5 100644
--- a/lib/librte_ether/rte_tm_driver.h
+++ b/lib/librte_ethdev/rte_tm_driver.h
diff --git a/lib/librte_ether/rte_ethdev_driver.h b/lib/librte_ether/rte_ethdev_driver.h
deleted file mode 100644
index 45f08c65..00000000
--- a/lib/librte_ether/rte_ethdev_driver.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
- */
-
-#ifndef _RTE_ETHDEV_DRIVER_H_
-#define _RTE_ETHDEV_DRIVER_H_
-
-/**
- * @file
- *
- * RTE Ethernet Device PMD API
- *
- * These APIs for the use from Ethernet drivers, user applications shouldn't
- * use them.
- *
- */
-
-#include <rte_ethdev.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @internal
- * Returns a ethdev slot specified by the unique identifier name.
- *
- * @param name
- * The pointer to the Unique identifier name for each Ethernet device
- * @return
- * - The pointer to the ethdev slot, on success. NULL on error
- */
-struct rte_eth_dev *rte_eth_dev_allocated(const char *name);
-
-/**
- * @internal
- * Allocates a new ethdev slot for an ethernet device and returns the pointer
- * to that slot for the driver to use.
- *
- * @param name Unique identifier name for each Ethernet device
- * @param type Device type of this Ethernet device
- * @return
- * - Slot in the rte_dev_devices array for a new device;
- */
-struct rte_eth_dev *rte_eth_dev_allocate(const char *name);
-
-/**
- * @internal
- * Attach to the ethdev already initialized by the primary
- * process.
- *
- * @param name Ethernet device's name.
- * @return
- * - Success: Slot in the rte_dev_devices array for attached
- * device.
- * - Error: Null pointer.
- */
-struct rte_eth_dev *rte_eth_dev_attach_secondary(const char *name);
-
-/**
- * @internal
- * Release the specified ethdev port.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure.
- * @return
- * - 0 on success, negative on error
- */
-int rte_eth_dev_release_port(struct rte_eth_dev *eth_dev);
-
-/**
- * @internal
- * Release device queues and clear its configuration to force the user
- * application to reconfigure it. It is for internal use only.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- *
- * @return
- * void
- */
-void _rte_eth_dev_reset(struct rte_eth_dev *dev);
-
-/**
- * @internal Executes all the user application registered callbacks for
- * the specific device. It is for DPDK internal user only. User
- * application should not call it directly.
- *
- * @param dev
- * Pointer to struct rte_eth_dev.
- * @param event
- * Eth device interrupt event type.
- * @param ret_param
- * To pass data back to user application.
- * This allows the user application to decide if a particular function
- * is permitted or not.
- *
- * @return
- * int
- */
-int _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *ret_param);
-
-/**
- * Create memzone for HW rings.
- * malloc can't be used as the physical address is needed.
- * If the memzone is already created, then this function returns a ptr
- * to the old one.
- *
- * @param eth_dev
- * The *eth_dev* pointer is the address of the *rte_eth_dev* structure
- * @param name
- * The name of the memory zone
- * @param queue_id
- * The index of the queue to add to name
- * @param size
- * The sizeof of the memory area
- * @param align
- * Alignment for resulting memzone. Must be a power of 2.
- * @param socket_id
- * The *socket_id* argument is the socket identifier in case of NUMA.
- */
-const struct rte_memzone *
-rte_eth_dma_zone_reserve(const struct rte_eth_dev *eth_dev, const char *name,
- uint16_t queue_id, size_t size,
- unsigned align, int socket_id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _RTE_ETHDEV_DRIVER_H_ */
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index d27dd070..47f599a6 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -8,18 +8,26 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_eventdev.a
# library version
-LIBABIVER := 3
+LIBABIVER := 5
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS += -DLINUX
+else
+CFLAGS += -DBSD
+endif
+LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash -lrte_mempool -lrte_timer
+LDLIBS += -lrte_mbuf -lrte_cryptodev -lpthread
# library source files
SRCS-y += rte_eventdev.c
SRCS-y += rte_event_ring.c
SRCS-y += rte_event_eth_rx_adapter.c
+SRCS-y += rte_event_timer_adapter.c
+SRCS-y += rte_event_crypto_adapter.c
# export include files
SYMLINK-y-include += rte_eventdev.h
@@ -28,6 +36,9 @@ SYMLINK-y-include += rte_eventdev_pmd_pci.h
SYMLINK-y-include += rte_eventdev_pmd_vdev.h
SYMLINK-y-include += rte_event_ring.h
SYMLINK-y-include += rte_event_eth_rx_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter_pmd.h
+SYMLINK-y-include += rte_event_crypto_adapter.h
# versioning export map
EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/meson.build b/lib/librte_eventdev/meson.build
index d1a99602..3cbaf298 100644
--- a/lib/librte_eventdev/meson.build
+++ b/lib/librte_eventdev/meson.build
@@ -1,14 +1,27 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 5
allow_experimental_apis = true
+
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
sources = files('rte_eventdev.c',
'rte_event_ring.c',
- 'rte_event_eth_rx_adapter.c')
+ 'rte_event_eth_rx_adapter.c',
+ 'rte_event_timer_adapter.c',
+ 'rte_event_crypto_adapter.c')
headers = files('rte_eventdev.h',
'rte_eventdev_pmd.h',
'rte_eventdev_pmd_pci.h',
'rte_eventdev_pmd_vdev.h',
'rte_event_ring.h',
- 'rte_event_eth_rx_adapter.h')
-deps += ['ring', 'ethdev', 'hash']
+ 'rte_event_eth_rx_adapter.h',
+ 'rte_event_timer_adapter.h',
+ 'rte_event_timer_adapter_pmd.h',
+ 'rte_event_crypto_adapter.h')
+deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.c b/lib/librte_eventdev/rte_event_crypto_adapter.c
new file mode 100644
index 00000000..11b28ca9
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.c
@@ -0,0 +1,1128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <stdbool.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_crypto_adapter.h"
+
+#define BATCH_SIZE 32
+#define DEFAULT_MAX_NB 128
+#define CRYPTO_ADAPTER_NAME_LEN 32
+#define CRYPTO_ADAPTER_MEM_NAME_LEN 32
+#define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
+
+/* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
+ * iterations of eca_crypto_adapter_enq_run()
+ */
+#define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
+
+struct rte_event_crypto_adapter {
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Store event device's implicit release capability */
+ uint8_t implicit_release_disabled;
+ /* Max crypto ops processed in any service function invocation */
+ uint32_t max_nb;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t lock;
+ /* Next crypto device to be processed */
+ uint16_t next_cdev_id;
+ /* Per crypto device structure */
+ struct crypto_device_info *cdevs;
+ /* Loop counter to flush crypto ops */
+ uint16_t transmit_loop_count;
+ /* Per instance stats structure */
+ struct rte_event_crypto_adapter_stats crypto_stats;
+ /* Configuration callback for rte_service configuration */
+ rte_event_crypto_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Memory allocation name */
+ char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+ /* No. of queue pairs configured */
+ uint16_t nb_qps;
+ /* Adapter mode */
+ enum rte_event_crypto_adapter_mode mode;
+} __rte_cache_aligned;
+
+/* Per crypto device information */
+struct crypto_device_info {
+ /* Pointer to cryptodev */
+ struct rte_cryptodev *dev;
+ /* Pointer to queue pair info */
+ struct crypto_queue_pair_info *qpairs;
+ /* Next queue pair to be processed */
+ uint16_t next_queue_pair_id;
+ /* Set to indicate cryptodev->eventdev packet
+ * transfer uses a hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set to indicate processing has been started */
+ uint8_t dev_started;
+ /* If num_qpairs > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t num_qpairs;
+} __rte_cache_aligned;
+
+/* Per queue pair information */
+struct crypto_queue_pair_info {
+ /* Set to indicate queue pair is enabled */
+ bool qp_enabled;
+ /* Pointer to hold rte_crypto_ops for batching */
+ struct rte_crypto_op **op_buffer;
+ /* No of crypto ops accumulated */
+ uint8_t len;
+} __rte_cache_aligned;
+
+static struct rte_event_crypto_adapter **event_crypto_adapter;
+
+/* Macros to check for valid adapter */
+#define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!eca_valid_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+eca_valid_id(uint8_t id)
+{
+ return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+}
+
+static int
+eca_init(void)
+{
+ const char *name = "crypto_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_crypto_adapter) *
+ RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_crypto_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_crypto_adapter *
+eca_id_to_adapter(uint8_t id)
+{
+ return event_crypto_adapter ?
+ event_crypto_adapter[id] : NULL;
+}
+
+static int
+eca_default_config_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf, void *arg)
+{
+ struct rte_event_dev_config dev_conf;
+ struct rte_eventdev *dev;
+ uint8_t port_id;
+ int started;
+ int ret;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started) {
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+ }
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb = DEFAULT_MAX_NB;
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ adapter->default_cb_arg = 1;
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg)
+{
+ struct rte_event_crypto_adapter *adapter;
+ char mem_name[CRYPTO_ADAPTER_NAME_LEN];
+ struct rte_event_dev_info dev_info;
+ int socket_id;
+ uint8_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_crypto_adapter == NULL) {
+ ret = eca_init();
+ if (ret)
+ return ret;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+
+ adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (adapter == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
+ return -ENOMEM;
+ }
+
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ if (ret < 0) {
+ RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
+ dev_id, dev_info.driver_name);
+ return ret;
+ }
+
+ adapter->implicit_release_disabled = (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+ adapter->eventdev_id = dev_id;
+ adapter->socket_id = socket_id;
+ adapter->conf_cb = conf_cb;
+ adapter->conf_arg = conf_arg;
+ adapter->mode = mode;
+ strcpy(adapter->mem_name, mem_name);
+ adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
+ rte_cryptodev_count() *
+ sizeof(struct crypto_device_info), 0,
+ socket_id);
+ if (adapter->cdevs == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
+ rte_free(adapter);
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&adapter->lock);
+ for (i = 0; i < rte_cryptodev_count(); i++)
+ adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
+
+ event_crypto_adapter[id] = adapter;
+
+ return 0;
+}
+
+
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_crypto_adapter_create_ext(id, dev_id,
+ eca_default_config_cb,
+ mode,
+ pc);
+ if (ret)
+ rte_free(pc);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ if (adapter->nb_qps) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
+ adapter->nb_qps);
+ return -EBUSY;
+ }
+
+ if (adapter->default_cb_arg)
+ rte_free(adapter->conf_arg);
+ rte_free(adapter->cdevs);
+ rte_free(adapter);
+ event_crypto_adapter[id] = NULL;
+
+ return 0;
+}
+
+static inline unsigned int
+eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
+ struct rte_event *ev, unsigned int cnt)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ struct crypto_queue_pair_info *qp_info = NULL;
+ struct rte_crypto_op *crypto_op;
+ unsigned int i, n;
+ uint16_t qp_id, len, ret;
+ uint8_t cdev_id;
+
+ len = 0;
+ ret = 0;
+ n = 0;
+ stats->event_deq_count += cnt;
+
+ for (i = 0; i < cnt; i++) {
+ crypto_op = ev[i].event_ptr;
+ if (crypto_op == NULL)
+ continue;
+ if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ crypto_op->sym->session);
+ if (m_data == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ crypto_op->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)crypto_op +
+ crypto_op->private_data_offset);
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ if (len == BATCH_SIZE) {
+ struct rte_crypto_op **op_buffer = qp_info->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp_id,
+ op_buffer,
+ BATCH_SIZE);
+
+ stats->crypto_enq_count += ret;
+
+ while (ret < len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ len = 0;
+ }
+
+ if (qp_info)
+ qp_info->len = len;
+ n += ret;
+ }
+
+ return n;
+}
+
+static unsigned int
+eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op **op_buffer;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp;
+ uint16_t ret;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ ret = 0;
+ for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+
+ for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ op_buffer = curr_queue->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp,
+ op_buffer,
+ curr_queue->len);
+ stats->crypto_enq_count += ret;
+
+ while (ret < curr_queue->len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+ curr_queue->len = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_enq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct rte_event ev[BATCH_SIZE];
+ unsigned int nb_enq, nb_enqueued;
+ uint16_t n;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+
+ nb_enqueued = 0;
+ if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ return 0;
+
+ for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
+ stats->event_poll_count++;
+ n = rte_event_dequeue_burst(event_dev_id,
+ event_port_id, ev, BATCH_SIZE, 0);
+
+ if (!n)
+ break;
+
+ nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
+ }
+
+ if ((++adapter->transmit_loop_count &
+ (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
+ nb_enqueued += eca_crypto_enq_flush(adapter);
+ }
+
+ return nb_enqueued;
+}
+
+static inline void
+eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
+ struct rte_crypto_op **ops, uint16_t num)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+ struct rte_event events[BATCH_SIZE];
+ uint16_t nb_enqueued, nb_ev;
+ uint8_t retry;
+ uint8_t i;
+
+ nb_ev = 0;
+ retry = 0;
+ nb_enqueued = 0;
+ num = RTE_MIN(num, BATCH_SIZE);
+ for (i = 0; i < num; i++) {
+ struct rte_event *ev = &events[nb_ev++];
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ ops[i]->sym->session);
+ } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ ops[i]->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)ops[i] +
+ ops[i]->private_data_offset);
+ }
+
+ if (unlikely(m_data == NULL)) {
+ rte_pktmbuf_free(ops[i]->sym->m_src);
+ rte_crypto_op_free(ops[i]);
+ continue;
+ }
+
+ rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
+ ev->event_ptr = ops[i];
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ if (adapter->implicit_release_disabled)
+ ev->op = RTE_EVENT_OP_FORWARD;
+ else
+ ev->op = RTE_EVENT_OP_NEW;
+ }
+
+ do {
+ nb_enqueued += rte_event_enqueue_burst(event_dev_id,
+ event_port_id,
+ &events[nb_enqueued],
+ nb_ev - nb_enqueued);
+ } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
+ nb_enqueued < nb_ev);
+
+ /* Free mbufs and rte_crypto_ops for failed events */
+ for (i = nb_enqueued; i < nb_ev; i++) {
+ struct rte_crypto_op *op = events[i].event_ptr;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ stats->event_enq_fail_count += nb_ev - nb_enqueued;
+ stats->event_enq_count += nb_enqueued;
+ stats->event_enq_retry_count += retry - 1;
+}
+
+static inline unsigned int
+eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_deq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op *ops[BATCH_SIZE];
+ uint16_t n, nb_deq;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp, dev_qps;
+ bool done;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ nb_deq = 0;
+ do {
+ uint16_t queues = 0;
+ done = true;
+
+ for (cdev_id = adapter->next_cdev_id;
+ cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+ dev_qps = dev->data->nb_queue_pairs;
+
+ for (qp = curr_dev->next_queue_pair_id;
+ queues < dev_qps; qp = (qp + 1) % dev_qps,
+ queues++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ n = rte_cryptodev_dequeue_burst(cdev_id, qp,
+ ops, BATCH_SIZE);
+ if (!n)
+ continue;
+
+ done = false;
+ stats->crypto_deq_count += n;
+ eca_ops_enqueue_burst(adapter, ops, n);
+ nb_deq += n;
+
+ if (nb_deq > max_deq) {
+ if ((qp + 1) == dev_qps) {
+ adapter->next_cdev_id =
+ (cdev_id + 1)
+ % num_cdev;
+ }
+ curr_dev->next_queue_pair_id = (qp + 1)
+ % dev->data->nb_queue_pairs;
+
+ return nb_deq;
+ }
+ }
+ }
+ } while (done == false);
+ return nb_deq;
+}
+
+static void
+eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_ops)
+{
+ while (max_ops) {
+ unsigned int e_cnt, d_cnt;
+
+ e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, e_cnt);
+
+ d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, d_cnt);
+
+ if (e_cnt == 0 && d_cnt == 0)
+ break;
+
+ }
+}
+
+static int
+eca_service_func(void *args)
+{
+ struct rte_event_crypto_adapter *adapter = args;
+
+ if (rte_spinlock_trylock(&adapter->lock) == 0)
+ return 0;
+ eca_crypto_adapter_run(adapter, adapter->max_nb);
+ rte_spinlock_unlock(&adapter->lock);
+
+ return 0;
+}
+
+static int
+eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+{
+ struct rte_event_crypto_adapter_conf adapter_conf;
+ struct rte_service_spec service;
+ int ret;
+
+ if (adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+ service.socket_id = adapter->socket_id;
+ service.callback = eca_service_func;
+ service.callback_userdata = adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = adapter->conf_cb(id, adapter->eventdev_id,
+ &adapter_conf, adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ return ret;
+ }
+
+ adapter->max_nb = adapter_conf.max_nb;
+ adapter->event_port_id = adapter_conf.event_port_id;
+ adapter->service_inited = 1;
+
+ return ret;
+}
+
+static void
+eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
+ struct crypto_device_info *dev_info,
+ int32_t queue_pair_id,
+ uint8_t add)
+{
+ struct crypto_queue_pair_info *qp_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->qpairs == NULL)
+ return;
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, add);
+ } else {
+ qp_info = &dev_info->qpairs[queue_pair_id];
+ enabled = qp_info->qp_enabled;
+ if (add) {
+ adapter->nb_qps += !enabled;
+ dev_info->num_qpairs += !enabled;
+ } else {
+ adapter->nb_qps -= enabled;
+ dev_info->num_qpairs -= enabled;
+ }
+ qp_info->qp_enabled = !!add;
+ }
+}
+
+static int
+eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
+ uint8_t cdev_id,
+ int queue_pair_id)
+{
+ struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
+ struct crypto_queue_pair_info *qpairs;
+ uint32_t i;
+
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+
+ qpairs = dev_info->qpairs;
+ qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
+ BATCH_SIZE *
+ sizeof(struct rte_crypto_op *),
+ 0, adapter->socket_id);
+ if (!qpairs->op_buffer) {
+ rte_free(qpairs);
+ return -ENOMEM;
+ }
+ }
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, 1);
+ } else
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 1);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
+ " cdev %" PRIu8, id, cdev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (event == NULL)) {
+ RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
+ cdev_id);
+ return -EINVAL;
+ }
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
+ * no need of service core as HW supports event forward capability.
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_add,
+ -ENOTSUP);
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
+ dev_info->dev,
+ queue_pair_id,
+ event);
+ if (ret)
+ return ret;
+
+ else
+ eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
+ queue_pair_id, 1);
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
+ * or SW adapter, initiate services so the application can choose
+ * which ever way it wants to use the adapter.
+ * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
+ * Application may wants to use one of below two mode
+ * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
+ * b. OP_NEW mode -> HW Dequeue
+ * Case 2: No HW caps, use SW adapter
+ * a. OP_FORWARD mode -> SW enqueue & dequeue
+ * b. OP_NEW mode -> SW Dequeue
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
+ (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
+ rte_spinlock_lock(&adapter->lock);
+ ret = eca_init_service(adapter, id);
+ if (ret == 0)
+ ret = eca_add_queue_pair(adapter, cdev_id,
+ queue_pair_id);
+ rte_spinlock_unlock(&adapter->lock);
+
+ if (ret)
+ return ret;
+
+ rte_service_component_runstate_set(adapter->service_id, 1);
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ int ret;
+ uint32_t cap;
+ uint16_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
+ dev_info->dev,
+ queue_pair_id);
+ if (ret == 0) {
+ eca_update_qp_info(adapter,
+ &adapter->cdevs[cdev_id],
+ queue_pair_id,
+ 0);
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+ }
+ } else {
+ if (adapter->nb_qps == 0)
+ return 0;
+
+ rte_spinlock_lock(&adapter->lock);
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
+ i++)
+ eca_update_qp_info(adapter, dev_info,
+ queue_pair_id, 0);
+ } else {
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 0);
+ }
+
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+
+ rte_spinlock_unlock(&adapter->lock);
+ rte_service_component_runstate_set(adapter->service_id,
+ adapter->nb_qps);
+ }
+
+ return ret;
+}
+
+static int
+eca_adapter_ctrl(uint8_t id, int start)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+ int use_service;
+ int stop = !start;
+
+ use_service = 0;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ /* if start check for num queue pairs */
+ if (start && !dev_info->num_qpairs)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->crypto_adapter_start)(dev,
+ &dev_info->dev[i]) :
+ (*dev->dev_ops->crypto_adapter_stop)(dev,
+ &dev_info->dev[i]);
+ }
+
+ if (use_service)
+ rte_service_runstate_set(adapter->service_id, start);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ return eca_adapter_ctrl(id, 1);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id)
+{
+ return eca_adapter_ctrl(id, 0);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_crypto_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
+ dev_info->dev,
+ &dev_stats);
+ if (ret)
+ continue;
+
+ dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
+ dev_stats_sum.event_enq_count +=
+ dev_stats.event_enq_count;
+ }
+
+ if (adapter->service_inited)
+ *stats = adapter->crypto_stats;
+
+ stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
+ stats->event_enq_count += dev_stats_sum.event_enq_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
+ dev_info->dev);
+ }
+
+ memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (adapter->service_inited)
+ *service_id = adapter->service_id;
+
+ return adapter->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || event_port_id == NULL)
+ return -EINVAL;
+
+ *event_port_id = adapter->event_port_id;
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_event_crypto_adapter.h b/lib/librte_eventdev/rte_event_crypto_adapter.h
new file mode 100644
index 00000000..d367309c
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_crypto_adapter.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENT_CRYPTO_ADAPTER_
+#define _RTE_EVENT_CRYPTO_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event crypto adapter
+ *
+ * Eventdev library provides couple of adapters to bridge between various
+ * components for providing new event source. The event crypto adapter is
+ * one of those adapters which is intended to bridge between event devices
+ * and crypto devices.
+ *
+ * The crypto adapter adds support to enqueue/dequeue crypto operations to/
+ * from event device. The packet flow between crypto device and the event
+ * device can be accomplished using both SW and HW based transfer mechanisms.
+ * The adapter uses an EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the crypto device and the event device.
+ *
+ * The application can choose to submit a crypto operation directly to
+ * crypto device or send it to the crypto adapter via eventdev based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ * mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
+ * mode. The choice of mode can be specified while creating the adapter.
+ * In the former mode, it is an application responsibility to enable ingress
+ * packet ordering. In the latter mode, it is the adapter responsibility to
+ * enable the ingress packet ordering.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode:
+ *
+ * +--------------+ +--------------+
+ * | | | Crypto stage |
+ * | Application |---[2]-->| + enqueue to |
+ * | | | cryptodev |
+ * +--------------+ +--------------+
+ * ^ ^ |
+ * | | [3]
+ * [6] [1] |
+ * | | |
+ * +--------------+ |
+ * | | |
+ * | Event device | |
+ * | | |
+ * +--------------+ |
+ * ^ |
+ * | |
+ * [5] |
+ * | v
+ * +--------------+ +--------------+
+ * | | | |
+ * |Crypto adapter|<--[4]---| Cryptodev |
+ * | | | |
+ * +--------------+ +--------------+
+ *
+ *
+ * [1] Application dequeues events from the previous stage.
+ * [2] Application prepares the crypto operations.
+ * [3] Crypto operations are submitted to cryptodev by application.
+ * [4] Crypto adapter dequeues crypto completions from cryptodev.
+ * [5] Crypto adapter enqueues events to the eventdev.
+ * [6] Application dequeues from eventdev and prepare for further
+ * processing.
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
+ * operations directly to crypto device. The adapter then dequeues crypto
+ * completions from crypto device and enqueue events to the event device.
+ * This mode does not ensure ingress ordering, if the application directly
+ * enqueues to cryptodev without going through crypto/atomic stage i.e.
+ * removing item [1] and [2].
+ * Events dequeued from the adapter will be treated as new events.
+ * In this mode, application needs to specify event information (response
+ * information) which is needed to enqueue an event after the crypto operation
+ * is completed.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode:
+ *
+ * +--------------+ +--------------+
+ * --[1]-->| |---[2]-->| Application |
+ * | Event device | | in |
+ * <--[8]--| |<--[3]---| Ordered stage|
+ * +--------------+ +--------------+
+ * ^ |
+ * | [4]
+ * [7] |
+ * | v
+ * +----------------+ +--------------+
+ * | |--[5]->| |
+ * | Crypto adapter | | Cryptodev |
+ * | |<-[6]--| |
+ * +----------------+ +--------------+
+ *
+ *
+ * [1] Events from the previous stage.
+ * [2] Application in ordered stage dequeues events from eventdev.
+ * [3] Application enqueues crypto operations as events to eventdev.
+ * [4] Crypto adapter dequeues event from eventdev.
+ * [5] Crypto adapter submits crypto operations to cryptodev
+ * (Atomic stage).
+ * [6] Crypto adapter dequeues crypto completions from cryptodev
+ * [7] Crypto adapter enqueues events to the eventdev
+ * [8] Events to the next stage
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
+ * can directly submit the crypto operations to the cryptodev.
+ * If not, application retrieves crypto adapter's event port using
+ * rte_event_crypto_adapter_event_port_get() API. Then, links its event
+ * queue to this port and starts enqueuing crypto operations as events
+ * to the eventdev. The adapter then dequeues the events and submits the
+ * crypto operations to the cryptodev. After the crypto completions, the
+ * adapter enqueues events to the event device.
+ * Application can use this mode, when ingress packet ordering is needed.
+ * Events dequeued from the adapter will be treated as forwarded events.
+ * In this mode, the application needs to specify the cryptodev ID
+ * and queue pair ID (request information) needed to enqueue a crypto
+ * operation in addition to the event information (response information)
+ * needed to enqueue an event after the crypto operation has completed.
+ *
+ *
+ * The event crypto adapter provides common APIs to configure the packet flow
+ * from the crypto device to event devices for both SW and HW based transfers.
+ * The crypto event adapter's functions are:
+ * - rte_event_crypto_adapter_create_ext()
+ * - rte_event_crypto_adapter_create()
+ * - rte_event_crypto_adapter_free()
+ * - rte_event_crypto_adapter_queue_pair_add()
+ * - rte_event_crypto_adapter_queue_pair_del()
+ * - rte_event_crypto_adapter_start()
+ * - rte_event_crypto_adapter_stop()
+ * - rte_event_crypto_adapter_stats_get()
+ * - rte_event_crypto_adapter_stats_reset()
+
+ * The applicaton creates an instance using rte_event_crypto_adapter_create()
+ * or rte_event_crypto_adapter_create_ext().
+ *
+ * Cryptodev queue pair addition/deletion is done using the
+ * rte_event_crypto_adapter_queue_pair_xxx() APIs. If HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability, event
+ * information must be passed to the add API.
+ *
+ * The SW adapter or HW PMD uses rte_crypto_op::sess_type to decide whether
+ * request/response(private) data is located in the crypto/security session
+ * or at an offset in the rte_crypto_op.
+ *
+ * For session-based operations, the set and get API provides a mechanism for
+ * an application to store and retrieve the data information stored
+ * along with the crypto session.
+ * The RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
+ * whether HW or SW supports this feature.
+ *
+ * For session-less mode, the adapter gets the private data information placed
+ * along with the ``struct rte_crypto_op``.
+ * The rte_crypto_op::private_data_offset provides an offset to locate the
+ * request/response information in the rte_crypto_op. This offset is counted
+ * from the start of the rte_crypto_op including initialization vector (IV).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Crypto event adapter mode
+ */
+enum rte_event_crypto_adapter_mode {
+ RTE_EVENT_CRYPTO_ADAPTER_OP_NEW,
+ /**< Start the crypto adapter in event new mode.
+ * @see RTE_EVENT_OP_NEW.
+ * Application submits crypto operations to the cryptodev.
+ * Adapter only dequeues the crypto completions from cryptodev
+ * and enqueue events to the eventdev.
+ */
+ RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD,
+ /**< Start the crypto adapter in event forward mode.
+ * @see RTE_EVENT_OP_FORWARD.
+ * Application submits crypto requests as events to the crypto
+ * adapter or crypto device based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * Crypto completions are enqueued back to the eventdev by
+ * crypto adapter.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event request structure will be filled by application to
+ * provide event request information to the adapter.
+ */
+struct rte_event_crypto_request {
+ uint8_t resv[8];
+ /**< Overlaps with first 8 bytes of struct rte_event
+ * that encode the response event information. Application
+ * is expected to fill in struct rte_event response_info.
+ */
+ uint16_t cdev_id;
+ /**< cryptodev ID to be used */
+ uint16_t queue_pair_id;
+ /**< cryptodev queue pair ID to be used */
+ uint32_t resv1;
+ /**< Reserved bits */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event metadata structure will be filled by application
+ * to provide crypto request and event response information.
+ *
+ * If crypto events are enqueued using a HW mechanism, the cryptodev
+ * PMD will use the event response information to set up the event
+ * that is enqueued back to eventdev after completion of the crypto
+ * operation. If the transfer is done by SW, event response information
+ * will be used by the adapter.
+ */
+union rte_event_crypto_metadata {
+ struct rte_event_crypto_request request_info;
+ /**< Request information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ struct rte_event response_info;
+ /**< Response information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_NEW and
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_crypto_adapter_conf_cb
+ */
+struct rte_event_crypto_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues events to this
+ * port and dequeues crypto request events in
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ uint32_t max_nb;
+ /**< The adapter can return early if it has processed at least
+ * max_nb crypto ops. This isn't treated as a requirement; batching
+ * may cause the adapter to process more than max_nb crypto ops.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_crypto_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * cryptodev queue pair to the event device. The SW service is created within
+ * the rte_event_crypto_adapter_queue_pair_add() function if SW based packet
+ * transfers from cryptodev queue pair to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_crypto_adapter_create_ext().
+ */
+typedef int (*rte_event_crypto_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * A structure used to retrieve statistics for an event crypto adapter
+ * instance.
+ */
+
+struct rte_event_crypto_adapter_stats {
+ uint64_t event_poll_count;
+ /**< Event port poll count */
+ uint64_t event_deq_count;
+ /**< Event dequeue count */
+ uint64_t crypto_enq_count;
+ /**< Cryptodev enqueue count */
+ uint64_t crypto_enq_fail;
+ /**< Cryptodev enqueue failed count */
+ uint64_t crypto_deq_count;
+ /**< Cryptodev dequeue count */
+ uint64_t event_enq_count;
+ /**< Event enqueue count */
+ uint64_t event_enq_retry_count;
+ /**< Event enqueue retry count */
+ uint64_t event_enq_fail_count;
+ /**< Event enqueue fail count */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_crypto_adapter_conf struct passed into
+ * it.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and set up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_crypto_adapter_create_ext() version.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has queue pairs
+ * added to it, the function returns -EBUSY.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add a queue pair to an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier. If queue_pair_id is set -1,
+ * adapter adds all the pre configured queue pairs to the instance.
+ *
+ * @param event
+ * if HW supports cryptodev queue pair to event queue binding, application is
+ * expected to fill in event information, else it will be NULL.
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ *
+ * @return
+ * - 0: Success, queue pair added correctly.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete a queue pair from an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, queue pair deleted successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ *
+ * @return
+ * - 0: Success, adapter started successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, adapter stopped successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event port of an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] event_port_id
+ * Application links its event queue to this adapter port which is used
+ * in RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_CRYPTO_ADAPTER_ */
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index 9aece9f8..f5e5a0b5 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -1,3 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ */
+#if defined(LINUX)
+#include <sys/epoll.h>
+#endif
+#include <unistd.h>
+
#include <rte_cycles.h>
#include <rte_common.h>
#include <rte_dev.h>
@@ -7,6 +16,7 @@
#include <rte_malloc.h>
#include <rte_service_component.h>
#include <rte_thash.h>
+#include <rte_interrupts.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -20,6 +30,22 @@
#define ETH_RX_ADAPTER_MEM_NAME_LEN 32
#define RSS_KEY_SIZE 40
+/* value written to intr thread pipe to signal thread exit */
+#define ETH_BRIDGE_INTR_THREAD_EXIT 1
+/* Sentinel value to detect initialized file handle */
+#define INIT_FD -1
+
+/*
+ * Used to store port and queue ID of interrupting Rx queue
+ */
+union queue_data {
+ RTE_STD_C11
+ void *ptr;
+ struct {
+ uint16_t port;
+ uint16_t queue;
+ };
+};
/*
* There is an instance of this struct per polled Rx queue added to the
@@ -27,7 +53,7 @@
*/
struct eth_rx_poll_entry {
/* Eth port to poll */
- uint8_t eth_dev_id;
+ uint16_t eth_dev_id;
/* Eth rx queue to poll */
uint16_t eth_rx_qid;
};
@@ -71,6 +97,30 @@ struct rte_event_eth_rx_adapter {
uint16_t enq_block_count;
/* Block start ts */
uint64_t rx_enq_block_start_ts;
+ /* epoll fd used to wait for Rx interrupts */
+ int epd;
+ /* Num of interrupt driven interrupt queues */
+ uint32_t num_rx_intr;
+ /* Used to send <dev id, queue id> of interrupting Rx queues from
+ * the interrupt thread to the Rx thread
+ */
+ struct rte_ring *intr_ring;
+ /* Rx Queue data (dev id, queue id) for the last non-empty
+ * queue polled
+ */
+ union queue_data qd;
+ /* queue_data is valid */
+ int qd_valid;
+ /* Interrupt ring lock, synchronizes Rx thread
+ * and interrupt thread
+ */
+ rte_spinlock_t intr_ring_lock;
+ /* event array passed to rte_poll_wait */
+ struct rte_epoll_event *epoll_events;
+ /* Count of interrupt vectors in use */
+ uint32_t num_intr_vec;
+ /* Thread blocked on Rx interrupts */
+ pthread_t rx_intr_thread;
/* Configuration callback for rte_service configuration */
rte_event_eth_rx_adapter_conf_cb conf_cb;
/* Configuration callback argument */
@@ -87,12 +137,20 @@ struct rte_event_eth_rx_adapter {
int socket_id;
/* Per adapter EAL service */
uint32_t service_id;
+ /* Adapter started flag */
+ uint8_t rxa_started;
+ /* Adapter ID */
+ uint8_t id;
} __rte_cache_aligned;
/* Per eth device */
struct eth_device_info {
struct rte_eth_dev *dev;
struct eth_rx_queue_info *rx_queue;
+ /* Rx callback */
+ rte_event_eth_rx_adapter_cb_fn cb_fn;
+ /* Rx callback argument */
+ void *cb_arg;
/* Set if ethdev->eventdev packet transfer uses a
* hardware mechanism
*/
@@ -103,15 +161,42 @@ struct eth_device_info {
* rx_adapter_stop callback needs to be invoked
*/
uint8_t dev_rx_started;
- /* If nb_dev_queues > 0, the start callback will
+ /* Number of queues added for this device */
+ uint16_t nb_dev_queues;
+ /* Number of poll based queues
+ * If nb_rx_poll > 0, the start callback will
* be invoked if not already invoked
*/
- uint16_t nb_dev_queues;
+ uint16_t nb_rx_poll;
+ /* Number of interrupt based queues
+ * If nb_rx_intr > 0, the start callback will
+ * be invoked if not already invoked.
+ */
+ uint16_t nb_rx_intr;
+ /* Number of queues that use the shared interrupt */
+ uint16_t nb_shared_intr;
+ /* sum(wrr(q)) for all queues within the device
+ * useful when deleting all device queues
+ */
+ uint32_t wrr_len;
+ /* Intr based queue index to start polling from, this is used
+ * if the number of shared interrupts is non-zero
+ */
+ uint16_t next_q_idx;
+ /* Intr based queue indices */
+ uint16_t *intr_queue;
+ /* device generates per Rx queue interrupt for queue index
+ * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
+ */
+ int multi_intr_cap;
+ /* shared interrupt enabled */
+ int shared_intr_enabled;
};
/* Per Rx queue */
struct eth_rx_queue_info {
int queue_enabled; /* True if added */
+ int intr_enabled;
uint16_t wt; /* Polling weight */
uint8_t event_queue_id; /* Event queue to enqueue packets to */
uint8_t sched_type; /* Sched type for events */
@@ -123,30 +208,30 @@ struct eth_rx_queue_info {
static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
static inline int
-valid_id(uint8_t id)
+rxa_validate_id(uint8_t id)
{
return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
}
#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
- if (!valid_id(id)) { \
+ if (!rxa_validate_id(id)) { \
RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
return retval; \
} \
} while (0)
static inline int
-sw_rx_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
{
- return rx_adapter->num_rx_polled;
+ return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
}
/* Greatest common divisor */
-static uint16_t gcd_u16(uint16_t a, uint16_t b)
+static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
{
uint16_t r = a % b;
- return r ? gcd_u16(b, r) : b;
+ return r ? rxa_gcd_u16(b, r) : b;
}
/* Returns the next queue in the polling sequence
@@ -154,7 +239,7 @@ static uint16_t gcd_u16(uint16_t a, uint16_t b)
* http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
*/
static int
-wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
unsigned int n, int *cw,
struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
uint16_t gcd, int prev)
@@ -164,7 +249,7 @@ wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
while (1) {
uint16_t q;
- uint8_t d;
+ uint16_t d;
i = (i + 1) % n;
if (i == 0) {
@@ -182,13 +267,298 @@ wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
}
}
-/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static inline int
+rxa_shared_intr(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int multi_intr_cap;
+
+ if (dev_info->dev->intr_handle == NULL)
+ return 0;
+
+ multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
+ return !multi_intr_cap ||
+ rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
+}
+
+static inline int
+rxa_intr_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return dev_info->rx_queue &&
+ !dev_info->internal_event_port &&
+ queue_info->queue_enabled && queue_info->wt == 0;
+}
+
+static inline int
+rxa_polled_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return !dev_info->internal_event_port &&
+ dev_info->rx_queue &&
+ queue_info->queue_enabled && queue_info->wt != 0;
+}
+
+/* Calculate change in number of vectors after Rx queue ID is add/deleted */
static int
-eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
{
- uint8_t d;
+ uint16_t i;
+ int n, s;
+ uint16_t nbq;
+
+ nbq = dev_info->dev->data->nb_rx_queues;
+ n = 0; /* non shared count */
+ s = 0; /* shared count */
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < nbq; i++) {
+ if (!rxa_shared_intr(dev_info, i))
+ n += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ else
+ s += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ }
+
+ if (s > 0) {
+ if ((add && dev_info->nb_shared_intr == 0) ||
+ (!add && dev_info->nb_shared_intr))
+ n += 1;
+ }
+ } else {
+ if (!rxa_shared_intr(dev_info, rx_queue_id))
+ n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
+ rxa_intr_queue(dev_info, rx_queue_id);
+ else
+ n = add ? !dev_info->nb_shared_intr :
+ dev_info->nb_shared_intr == 1;
+ }
+
+ return add ? n : -n;
+}
+
+/* Calculate nb_rx_intr after deleting interrupt mode rx queues
+ */
+static void
+rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_intr)
+{
+ uint32_t intr_diff;
+
+ if (rx_queue_id == -1)
+ intr_diff = dev_info->nb_rx_intr;
+ else
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+}
+
+/* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
+ * interrupt queues could currently be poll mode Rx queues
+ */
+static void
+rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_intr;
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate size of the eth_rx_poll and wrr_sched arrays
+ * after deleting poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_wrr)
+{
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->nb_rx_intr;
+ poll_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_poll;
+ wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
+ - dev_info->wrr_len;
+ } else {
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
+ wt - dev_info->rx_queue[rx_queue_id].wt :
+ wt;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+ *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding rx_queue_id */
+static void
+rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ if (wt != 0)
+ rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
+ wt, nb_rx_poll, nb_rx_intr, nb_wrr);
+ else
+ rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_poll, nb_rx_intr, nb_wrr);
+}
+
+/* Calculate nb_rx_* after deleting rx_queue_id */
+static void
+rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
+ nb_wrr);
+ rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_intr);
+}
+
+/*
+ * Allocate the rx_poll array
+ */
+static struct eth_rx_poll_entry *
+rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_rx_polled)
+{
+ size_t len;
+
+ len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+/*
+ * Allocate the WRR array
+ */
+static uint32_t *
+rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+{
+ size_t len;
+
+ len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+static int
+rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t nb_poll,
+ uint32_t nb_wrr,
+ struct eth_rx_poll_entry **rx_poll,
+ uint32_t **wrr_sched)
+{
+
+ if (nb_poll == 0) {
+ *rx_poll = NULL;
+ *wrr_sched = NULL;
+ return 0;
+ }
+
+ *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
+ if (*rx_poll == NULL) {
+ *wrr_sched = NULL;
+ return -ENOMEM;
+ }
+
+ *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
+ if (*wrr_sched == NULL) {
+ rte_free(*rx_poll);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static void
+rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_rx_poll_entry *rx_poll,
+ uint32_t *rx_wrr)
+{
+ uint16_t d;
uint16_t q;
unsigned int i;
+ int prev = -1;
+ int cw = -1;
/* Initialize variables for calculation of wrr schedule */
uint16_t max_wrr_pos = 0;
@@ -196,79 +566,52 @@ eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
uint16_t max_wt = 0;
uint16_t gcd = 0;
- struct eth_rx_poll_entry *rx_poll = NULL;
- uint32_t *rx_wrr = NULL;
+ if (rx_poll == NULL)
+ return;
- if (rx_adapter->num_rx_polled) {
- size_t len = RTE_ALIGN(rx_adapter->num_rx_polled *
- sizeof(*rx_adapter->eth_rx_poll),
- RTE_CACHE_LINE_SIZE);
- rx_poll = rte_zmalloc_socket(rx_adapter->mem_name,
- len,
- RTE_CACHE_LINE_SIZE,
- rx_adapter->socket_id);
- if (rx_poll == NULL)
- return -ENOMEM;
+ /* Generate array of all queues to poll, the size of this
+ * array is poll_q
+ */
+ RTE_ETH_FOREACH_DEV(d) {
+ uint16_t nb_rx_queues;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[d];
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ if (dev_info->rx_queue == NULL)
+ continue;
+ if (dev_info->internal_event_port)
+ continue;
+ dev_info->wrr_len = 0;
+ for (q = 0; q < nb_rx_queues; q++) {
+ struct eth_rx_queue_info *queue_info =
+ &dev_info->rx_queue[q];
+ uint16_t wt;
- /* Generate array of all queues to poll, the size of this
- * array is poll_q
- */
- for (d = 0; d < rte_eth_dev_count(); d++) {
- uint16_t nb_rx_queues;
- struct eth_device_info *dev_info =
- &rx_adapter->eth_devices[d];
- nb_rx_queues = dev_info->dev->data->nb_rx_queues;
- if (dev_info->rx_queue == NULL)
+ if (!rxa_polled_queue(dev_info, q))
continue;
- for (q = 0; q < nb_rx_queues; q++) {
- struct eth_rx_queue_info *queue_info =
- &dev_info->rx_queue[q];
- if (queue_info->queue_enabled == 0)
- continue;
-
- uint16_t wt = queue_info->wt;
- rx_poll[poll_q].eth_dev_id = d;
- rx_poll[poll_q].eth_rx_qid = q;
- max_wrr_pos += wt;
- max_wt = RTE_MAX(max_wt, wt);
- gcd = (gcd) ? gcd_u16(gcd, wt) : wt;
- poll_q++;
- }
- }
-
- len = RTE_ALIGN(max_wrr_pos * sizeof(*rx_wrr),
- RTE_CACHE_LINE_SIZE);
- rx_wrr = rte_zmalloc_socket(rx_adapter->mem_name,
- len,
- RTE_CACHE_LINE_SIZE,
- rx_adapter->socket_id);
- if (rx_wrr == NULL) {
- rte_free(rx_poll);
- return -ENOMEM;
- }
-
- /* Generate polling sequence based on weights */
- int prev = -1;
- int cw = -1;
- for (i = 0; i < max_wrr_pos; i++) {
- rx_wrr[i] = wrr_next(rx_adapter, poll_q, &cw,
- rx_poll, max_wt, gcd, prev);
- prev = rx_wrr[i];
+ wt = queue_info->wt;
+ rx_poll[poll_q].eth_dev_id = d;
+ rx_poll[poll_q].eth_rx_qid = q;
+ max_wrr_pos += wt;
+ dev_info->wrr_len += wt;
+ max_wt = RTE_MAX(max_wt, wt);
+ gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
+ poll_q++;
}
}
- rte_free(rx_adapter->eth_rx_poll);
- rte_free(rx_adapter->wrr_sched);
-
- rx_adapter->eth_rx_poll = rx_poll;
- rx_adapter->wrr_sched = rx_wrr;
- rx_adapter->wrr_len = max_wrr_pos;
-
- return 0;
+ /* Generate polling sequence based on weights */
+ prev = -1;
+ cw = -1;
+ for (i = 0; i < max_wrr_pos; i++) {
+ rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
+ rx_poll, max_wt, gcd, prev);
+ prev = rx_wrr[i];
+ }
}
static inline void
-mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
+rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
struct ipv6_hdr **ipv6_hdr)
{
struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -307,7 +650,7 @@ mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
/* Calculate RSS hash for IPv4/6 */
static inline uint32_t
-do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
+rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
{
uint32_t input_len;
void *tuple;
@@ -316,7 +659,7 @@ do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
- mtoip(m, &ipv4_hdr, &ipv6_hdr);
+ rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
if (ipv4_hdr) {
ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
@@ -335,13 +678,13 @@ do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
}
static inline int
-rx_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
{
return !!rx_adapter->enq_block_count;
}
static inline void
-rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
{
if (rx_adapter->rx_enq_block_start_ts)
return;
@@ -354,13 +697,13 @@ rx_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
}
static inline void
-rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
struct rte_event_eth_rx_adapter_stats *stats)
{
if (unlikely(!stats->rx_enq_start_ts))
stats->rx_enq_start_ts = rte_get_tsc_cycles();
- if (likely(!rx_enq_blocked(rx_adapter)))
+ if (likely(!rxa_enq_blocked(rx_adapter)))
return;
rx_adapter->enq_block_count = 0;
@@ -376,8 +719,8 @@ rx_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
* this function
*/
static inline void
-buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
- struct rte_event *ev)
+rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event *ev)
{
struct rte_eth_event_enqueue_buffer *buf =
&rx_adapter->event_enqueue_buffer;
@@ -386,7 +729,7 @@ buf_event_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
/* Enqueue buffered events to event device */
static inline uint16_t
-flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
{
struct rte_eth_event_enqueue_buffer *buf =
&rx_adapter->event_enqueue_buffer;
@@ -403,8 +746,8 @@ flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
stats->rx_enq_retry++;
}
- n ? rx_enq_block_end_ts(rx_adapter, stats) :
- rx_enq_block_start_ts(rx_adapter);
+ n ? rxa_enq_block_end_ts(rx_adapter, stats) :
+ rxa_enq_block_start_ts(rx_adapter);
buf->count -= n;
stats->rx_enq_count += n;
@@ -413,18 +756,19 @@ flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
}
static inline void
-fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t dev_id,
- uint16_t rx_queue_id,
- struct rte_mbuf **mbufs,
- uint16_t num)
+rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
+ uint16_t rx_queue_id,
+ struct rte_mbuf **mbufs,
+ uint16_t num)
{
uint32_t i;
- struct eth_device_info *eth_device_info =
- &rx_adapter->eth_devices[dev_id];
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
- &eth_device_info->rx_queue[rx_queue_id];
-
+ &dev_info->rx_queue[rx_queue_id];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
int32_t qid = eth_rx_queue_info->event_queue_id;
uint8_t sched_type = eth_rx_queue_info->sched_type;
uint8_t priority = eth_rx_queue_info->priority;
@@ -434,22 +778,48 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
uint32_t rss_mask;
uint32_t rss;
int do_rss;
+ uint64_t ts;
+ struct rte_mbuf *cb_mbufs[BATCH_SIZE];
+ uint16_t nb_cb;
/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+ if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
+ ts = rte_get_tsc_cycles();
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+
+ m->timestamp = ts;
+ m->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+ }
+
+
+ nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
+ ETH_EVENT_BUFFER_SIZE,
+ buf->count, mbufs,
+ num,
+ dev_info->cb_arg,
+ cb_mbufs) :
+ num;
+ if (nb_cb < num) {
+ mbufs = cb_mbufs;
+ num = nb_cb;
+ }
+
for (i = 0; i < num; i++) {
m = mbufs[i];
struct rte_event *ev = &events[i];
rss = do_rss ?
- do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss;
+ rxa_do_softrss(m, rx_adapter->rss_key_be) :
+ m->hash.rss;
flow_id =
eth_rx_queue_info->flow_id &
eth_rx_queue_info->flow_id_mask;
flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
-
ev->flow_id = flow_id;
ev->op = RTE_EVENT_OP_NEW;
ev->sched_type = sched_type;
@@ -459,8 +829,275 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
ev->priority = priority;
ev->mbuf = m;
- buf_event_enqueue(rx_adapter, ev);
+ rxa_buffer_event(rx_adapter, ev);
+ }
+}
+
+/* Enqueue packets from <port, q> to event buffer */
+static inline uint32_t
+rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t port_id,
+ uint16_t queue_id,
+ uint32_t rx_count,
+ uint32_t max_rx,
+ int *rxq_empty)
+{
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats =
+ &rx_adapter->stats;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+
+ if (rxq_empty)
+ *rxq_empty = 0;
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
+ if (unlikely(!n)) {
+ if (rxq_empty)
+ *rxq_empty = 1;
+ break;
+ }
+ rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+ nb_rx += n;
+ if (rx_count + nb_rx > max_rx)
+ break;
+ }
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ return nb_rx;
+}
+
+static inline void
+rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
+ void *data)
+{
+ uint16_t port_id;
+ uint16_t queue;
+ int err;
+ union queue_data qd;
+ struct eth_device_info *dev_info;
+ struct eth_rx_queue_info *queue_info;
+ int *intr_enabled;
+
+ qd.ptr = data;
+ port_id = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port_id];
+ queue_info = &dev_info->rx_queue[queue];
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+ if (rxa_shared_intr(dev_info, queue))
+ intr_enabled = &dev_info->shared_intr_enabled;
+ else
+ intr_enabled = &queue_info->intr_enabled;
+
+ if (*intr_enabled) {
+ *intr_enabled = 0;
+ err = rte_ring_enqueue(rx_adapter->intr_ring, data);
+ /* Entry should always be available.
+ * The ring size equals the maximum number of interrupt
+ * vectors supported (an interrupt vector is shared in
+ * case of shared interrupts)
+ */
+ if (err)
+ RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
+ " to ring: %s", strerror(err));
+ else
+ rte_eth_dev_rx_intr_disable(port_id, queue);
+ }
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+static int
+rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_intr_vec)
+{
+ if (rx_adapter->num_intr_vec + num_intr_vec >
+ RTE_EVENT_ETH_INTR_RING_SIZE) {
+ RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
+ " %d needed %d limit %d", rx_adapter->num_intr_vec,
+ num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/* Delete entries for (dev, queue) from the interrupt ring */
+static void
+rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int i, n;
+ union queue_data qd;
+
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+
+ n = rte_ring_count(rx_adapter->intr_ring);
+ for (i = 0; i < n; i++) {
+ rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (!rxa_shared_intr(dev_info, rx_queue_id)) {
+ if (qd.port == dev_info->dev->data->port_id &&
+ qd.queue == rx_queue_id)
+ continue;
+ } else {
+ if (qd.port == dev_info->dev->data->port_id)
+ continue;
+ }
+ rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
+ }
+
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+/* pthread callback handling interrupt mode receive queues
+ * After receiving an Rx interrupt, it enqueues the port id and queue id of the
+ * interrupting queue to the adapter's ring buffer for interrupt events.
+ * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
+ * the adapter service function.
+ */
+static void *
+rxa_intr_thread(void *arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = arg;
+ struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
+ int n, i;
+
+ while (1) {
+ n = rte_epoll_wait(rx_adapter->epd, epoll_events,
+ RTE_EVENT_ETH_INTR_RING_SIZE, -1);
+ if (unlikely(n < 0))
+ RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
+ n);
+ for (i = 0; i < n; i++) {
+ rxa_intr_ring_enqueue(rx_adapter,
+ epoll_events[i].epdata.data);
+ }
+ }
+
+ return NULL;
+}
+
+/* Dequeue <port, q> from interrupt ring and enqueue received
+ * mbufs to eventdev
+ */
+static inline uint32_t
+rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t n;
+ uint32_t nb_rx = 0;
+ int rxq_empty;
+ struct rte_eth_event_enqueue_buffer *buf;
+ rte_spinlock_t *ring_lock;
+ uint8_t max_done = 0;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ if (rte_ring_count(rx_adapter->intr_ring) == 0
+ && !rx_adapter->qd_valid)
+ return 0;
+
+ buf = &rx_adapter->event_enqueue_buffer;
+ ring_lock = &rx_adapter->intr_ring_lock;
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ struct eth_device_info *dev_info;
+ uint16_t port;
+ uint16_t queue;
+ union queue_data qd = rx_adapter->qd;
+ int err;
+
+ if (!rx_adapter->qd_valid) {
+ struct eth_rx_queue_info *queue_info;
+
+ rte_spinlock_lock(ring_lock);
+ err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (err) {
+ rte_spinlock_unlock(ring_lock);
+ break;
+ }
+
+ port = qd.port;
+ queue = qd.queue;
+ rx_adapter->qd = qd;
+ rx_adapter->qd_valid = 1;
+ dev_info = &rx_adapter->eth_devices[port];
+ if (rxa_shared_intr(dev_info, queue))
+ dev_info->shared_intr_enabled = 1;
+ else {
+ queue_info = &dev_info->rx_queue[queue];
+ queue_info->intr_enabled = 1;
+ }
+ rte_eth_dev_rx_intr_enable(port, queue);
+ rte_spinlock_unlock(ring_lock);
+ } else {
+ port = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port];
+ }
+
+ if (rxa_shared_intr(dev_info, queue)) {
+ uint16_t i;
+ uint16_t nb_queues;
+
+ nb_queues = dev_info->dev->data->nb_rx_queues;
+ n = 0;
+ for (i = dev_info->next_q_idx; i < nb_queues; i++) {
+ uint8_t enq_buffer_full;
+
+ if (!rxa_intr_queue(dev_info, i))
+ continue;
+ n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ nb_rx += n;
+
+ enq_buffer_full = !rxq_empty && n == 0;
+ max_done = nb_rx > rx_adapter->max_nb_rx;
+
+ if (enq_buffer_full || max_done) {
+ dev_info->next_q_idx = i;
+ goto done;
+ }
+ }
+
+ rx_adapter->qd_valid = 0;
+
+ /* Reinitialize for next interrupt */
+ dev_info->next_q_idx = dev_info->multi_intr_cap ?
+ RTE_MAX_RXTX_INTR_VEC_ID - 1 :
+ 0;
+ } else {
+ n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ rx_adapter->qd_valid = !rxq_empty;
+ nb_rx += n;
+ if (nb_rx > rx_adapter->max_nb_rx)
+ break;
+ }
}
+
+done:
+ rx_adapter->stats.rx_intr_packets += nb_rx;
+ return nb_rx;
}
/*
@@ -477,12 +1114,10 @@ fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
* it.
*/
static inline uint32_t
-eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
{
uint32_t num_queue;
- uint16_t n;
uint32_t nb_rx = 0;
- struct rte_mbuf *mbufs[BATCH_SIZE];
struct rte_eth_event_enqueue_buffer *buf;
uint32_t wrr_pos;
uint32_t max_nb_rx;
@@ -490,57 +1125,54 @@ eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
wrr_pos = rx_adapter->wrr_pos;
max_nb_rx = rx_adapter->max_nb_rx;
buf = &rx_adapter->event_enqueue_buffer;
- struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+ stats = &rx_adapter->stats;
/* Iterate through a WRR sequence */
for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
- uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+ uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
/* Don't do a batch dequeue from the rx queue if there isn't
* enough space in the enqueue buffer.
*/
if (buf->count >= BATCH_SIZE)
- flush_event_buffer(rx_adapter);
- if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
- break;
-
- stats->rx_poll_count++;
- n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
+ rxa_flush_event_buffer(rx_adapter);
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
+ rx_adapter->wrr_pos = wrr_pos;
+ return nb_rx;
+ }
- if (n) {
- stats->rx_packets += n;
- /* The check before rte_eth_rx_burst() ensures that
- * all n mbufs can be buffered
- */
- fill_event_buffer(rx_adapter, d, qid, mbufs, n);
- nb_rx += n;
- if (nb_rx > max_nb_rx) {
- rx_adapter->wrr_pos =
+ nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
+ NULL);
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
- return nb_rx;
- }
+ break;
}
if (++wrr_pos == rx_adapter->wrr_len)
wrr_pos = 0;
}
-
return nb_rx;
}
static int
-event_eth_rx_adapter_service_func(void *args)
+rxa_service_func(void *args)
{
struct rte_event_eth_rx_adapter *rx_adapter = args;
- struct rte_eth_event_enqueue_buffer *buf;
+ struct rte_event_eth_rx_adapter_stats *stats;
- buf = &rx_adapter->event_enqueue_buffer;
if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
return 0;
- if (eth_rx_poll(rx_adapter) == 0 && buf->count)
- flush_event_buffer(rx_adapter);
+ if (!rx_adapter->rxa_started) {
+ return 0;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ stats = &rx_adapter->stats;
+ stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
+ stats->rx_packets += rxa_poll(rx_adapter);
rte_spinlock_unlock(&rx_adapter->rx_lock);
return 0;
}
@@ -572,14 +1204,14 @@ rte_event_eth_rx_adapter_init(void)
}
static inline struct rte_event_eth_rx_adapter *
-id_to_rx_adapter(uint8_t id)
+rxa_id_to_adapter(uint8_t id)
{
return event_eth_rx_adapter ?
event_eth_rx_adapter[id] : NULL;
}
static int
-default_conf_cb(uint8_t id, uint8_t dev_id,
+rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
struct rte_event_eth_rx_adapter_conf *conf, void *arg)
{
int ret;
@@ -588,7 +1220,7 @@ default_conf_cb(uint8_t id, uint8_t dev_id,
int started;
uint8_t port_id;
struct rte_event_port_conf *port_conf = arg;
- struct rte_event_eth_rx_adapter *rx_adapter = id_to_rx_adapter(id);
+ struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
dev = &rte_eventdevs[rx_adapter->eventdev_id];
dev_conf = dev->data->dev_conf;
@@ -625,7 +1257,351 @@ default_conf_cb(uint8_t id, uint8_t dev_id,
}
static int
-init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+rxa_epoll_create1(void)
+{
+#if defined(LINUX)
+ int fd;
+ fd = epoll_create1(EPOLL_CLOEXEC);
+ return fd < 0 ? -errno : fd;
+#elif defined(BSD)
+ return -ENOTSUP;
+#endif
+}
+
+static int
+rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->epd != INIT_FD)
+ return 0;
+
+ rx_adapter->epd = rxa_epoll_create1();
+ if (rx_adapter->epd < 0) {
+ int err = rx_adapter->epd;
+ rx_adapter->epd = INIT_FD;
+ RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ if (rx_adapter->intr_ring)
+ return 0;
+
+ rx_adapter->intr_ring = rte_ring_create("intr_ring",
+ RTE_EVENT_ETH_INTR_RING_SIZE,
+ rte_socket_id(), 0);
+ if (!rx_adapter->intr_ring)
+ return -ENOMEM;
+
+ rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
+ RTE_EVENT_ETH_INTR_RING_SIZE *
+ sizeof(struct rte_epoll_event),
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (!rx_adapter->epoll_events) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ rte_spinlock_init(&rx_adapter->intr_ring_lock);
+
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ "rx-intr-thread-%d", rx_adapter->id);
+
+ err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
+ NULL, rxa_intr_thread, rx_adapter);
+ if (!err) {
+ rte_thread_setname(rx_adapter->rx_intr_thread, thread_name);
+ return 0;
+ }
+
+ RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
+error:
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return err;
+}
+
+static int
+rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+
+ err = pthread_cancel(rx_adapter->rx_intr_thread);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
+ err);
+
+ err = pthread_join(rx_adapter->rx_intr_thread, NULL);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
+
+ rte_free(rx_adapter->epoll_events);
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return 0;
+}
+
+static int
+rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int ret;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ ret = rxa_destroy_intr_thread(rx_adapter);
+ if (ret)
+ return ret;
+
+ close(rx_adapter->epd);
+ rx_adapter->epd = INIT_FD;
+
+ return ret;
+}
+
+static int
+rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
+ rx_queue_id);
+ return err;
+ }
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err)
+ RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
+
+ if (sintr)
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
+ else
+ dev_info->shared_intr_enabled = 0;
+ return err;
+}
+
+static int
+rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int err;
+ int i;
+ int s;
+
+ if (dev_info->nb_rx_intr == 0)
+ return 0;
+
+ err = 0;
+ if (rx_queue_id == -1) {
+ s = dev_info->nb_shared_intr;
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ int sintr;
+ uint16_t q;
+
+ q = dev_info->intr_queue[i];
+ sintr = rxa_shared_intr(dev_info, q);
+ s -= sintr;
+
+ if (!sintr || s == 0) {
+
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ q);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ q);
+ }
+ }
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+ if (!rxa_shared_intr(dev_info, rx_queue_id) ||
+ dev_info->nb_shared_intr == 1) {
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ rx_queue_id);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ rx_queue_id);
+ }
+
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ if (dev_info->intr_queue[i] == rx_queue_id) {
+ for (; i < dev_info->nb_rx_intr - 1; i++)
+ dev_info->intr_queue[i] =
+ dev_info->intr_queue[i + 1];
+ break;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int
+rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err, err1;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ union queue_data qd;
+ int init_fd;
+ uint16_t *intr_queue;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ if (rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+
+ intr_queue = dev_info->intr_queue;
+ if (dev_info->intr_queue == NULL) {
+ size_t len =
+ dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
+ dev_info->intr_queue =
+ rte_zmalloc_socket(
+ rx_adapter->mem_name,
+ len,
+ 0,
+ rx_adapter->socket_id);
+ if (dev_info->intr_queue == NULL)
+ return -ENOMEM;
+ }
+
+ init_fd = rx_adapter->epd;
+ err = rxa_init_epd(rx_adapter);
+ if (err)
+ goto err_free_queue;
+
+ qd.port = eth_dev_id;
+ qd.queue = rx_queue_id;
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_ADD,
+ qd.ptr);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+ goto err_del_fd;
+ }
+
+ err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not enable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+
+ goto err_del_event;
+ }
+
+ err = rxa_create_intr_thread(rx_adapter);
+ if (!err) {
+ if (sintr)
+ dev_info->shared_intr_enabled = 1;
+ else
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
+ return 0;
+ }
+
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err)
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+err_del_event:
+ err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err1) {
+ RTE_EDEV_LOG_ERR("Could not delete event for"
+ " Rx Queue %u err %d", rx_queue_id, err1);
+ }
+err_del_fd:
+ if (init_fd == INIT_FD) {
+ close(rx_adapter->epd);
+ rx_adapter->epd = -1;
+ }
+err_free_queue:
+ if (intr_queue == NULL)
+ rte_free(dev_info->intr_queue);
+
+ return err;
+}
+
+static int
+rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+
+{
+ int i, j, err;
+ int si = -1;
+ int shared_done = (dev_info->nb_shared_intr > 0);
+
+ if (rx_queue_id != -1) {
+ if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
+ return 0;
+ return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
+ }
+
+ err = 0;
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
+
+ if (rxa_shared_intr(dev_info, i) && shared_done)
+ continue;
+
+ err = rxa_config_intr(rx_adapter, dev_info, i);
+
+ shared_done = err == 0 && rxa_shared_intr(dev_info, i);
+ if (shared_done) {
+ si = i;
+ dev_info->shared_intr_enabled = 1;
+ }
+ if (err)
+ break;
+ }
+
+ if (err == 0)
+ return 0;
+
+ shared_done = (dev_info->nb_shared_intr > 0);
+ for (j = 0; j < i; j++) {
+ if (rxa_intr_queue(dev_info, j))
+ continue;
+ if (rxa_shared_intr(dev_info, j) && si != j)
+ continue;
+ err = rxa_disable_intr(rx_adapter, dev_info, j);
+ if (err)
+ break;
+
+ }
+
+ return err;
+}
+
+
+static int
+rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
{
int ret;
struct rte_service_spec service;
@@ -638,7 +1614,7 @@ init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
"rte_event_eth_rx_adapter_%d", id);
service.socket_id = rx_adapter->socket_id;
- service.callback = event_eth_rx_adapter_service_func;
+ service.callback = rxa_service_func;
service.callback_userdata = rx_adapter;
/* Service function handles locking for queue add/del updates */
service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
@@ -659,6 +1635,7 @@ init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
rx_adapter->service_inited = 1;
+ rx_adapter->epd = INIT_FD;
return 0;
err_done:
@@ -666,9 +1643,8 @@ err_done:
return ret;
}
-
static void
-update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
+rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
struct eth_device_info *dev_info,
int32_t rx_queue_id,
uint8_t add)
@@ -682,7 +1658,7 @@ update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
if (rx_queue_id == -1) {
for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- update_queue_info(rx_adapter, dev_info, i, add);
+ rxa_update_queue(rx_adapter, dev_info, i, add);
} else {
queue_info = &dev_info->rx_queue[rx_queue_id];
enabled = queue_info->queue_enabled;
@@ -697,31 +1673,65 @@ update_queue_info(struct rte_event_eth_rx_adapter *rx_adapter,
}
}
-static int
-event_eth_rx_adapter_queue_del(struct rte_event_eth_rx_adapter *rx_adapter,
- struct eth_device_info *dev_info,
- uint16_t rx_queue_id)
+static void
+rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id)
{
- struct eth_rx_queue_info *queue_info;
+ int pollq;
+ int intrq;
+ int sintrq;
+
if (rx_adapter->nb_queues == 0)
- return 0;
+ return;
- queue_info = &dev_info->rx_queue[rx_queue_id];
- rx_adapter->num_rx_polled -= queue_info->queue_enabled;
- update_queue_info(rx_adapter, dev_info, rx_queue_id, 0);
- return 0;
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_sw_del(rx_adapter, dev_info, i);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
}
static void
-event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
- struct eth_device_info *dev_info,
- uint16_t rx_queue_id,
- const struct rte_event_eth_rx_adapter_queue_conf *conf)
-
+rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf)
{
struct eth_rx_queue_info *queue_info;
const struct rte_event *ev = &conf->ev;
+ int pollq;
+ int intrq;
+ int sintrq;
+
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_add_queue(rx_adapter, dev_info, i, conf);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
queue_info = &dev_info->rx_queue[rx_queue_id];
queue_info->event_queue_id = ev->queue_id;
@@ -735,69 +1745,162 @@ event_eth_rx_adapter_queue_add(struct rte_event_eth_rx_adapter *rx_adapter,
queue_info->flow_id_mask = ~0;
}
- /* The same queue can be added more than once */
- rx_adapter->num_rx_polled += !queue_info->queue_enabled;
- update_queue_info(rx_adapter, dev_info, rx_queue_id, 1);
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
+ if (rxa_polled_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled += !pollq;
+ dev_info->nb_rx_poll += !pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
+ }
+
+ if (rxa_intr_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr += !intrq;
+ dev_info->nb_rx_intr += !intrq;
+ dev_info->nb_shared_intr += !intrq && sintrq;
+ if (dev_info->nb_shared_intr == 1) {
+ if (dev_info->multi_intr_cap)
+ dev_info->next_q_idx =
+ RTE_MAX_RXTX_INTR_VEC_ID - 1;
+ else
+ dev_info->next_q_idx = 0;
+ }
+ }
}
-static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t eth_dev_id,
+static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
int rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
struct rte_event_eth_rx_adapter_queue_conf temp_conf;
- uint32_t i;
int ret;
+ struct eth_rx_poll_entry *rx_poll;
+ struct eth_rx_queue_info *rx_queue;
+ uint32_t *rx_wrr;
+ uint16_t nb_rx_queues;
+ uint32_t nb_rx_poll, nb_wrr;
+ uint32_t nb_rx_intr;
+ int num_intr_vec;
+ uint16_t wt;
if (queue_conf->servicing_weight == 0) {
-
struct rte_eth_dev_data *data = dev_info->dev->data;
- if (data->dev_conf.intr_conf.rxq) {
- RTE_EDEV_LOG_ERR("Interrupt driven queues"
- " not supported");
- return -ENOTSUP;
- }
- temp_conf = *queue_conf;
- /* If Rx interrupts are disabled set wt = 1 */
- temp_conf.servicing_weight = 1;
+ temp_conf = *queue_conf;
+ if (!data->dev_conf.intr_conf.rxq) {
+ /* If Rx interrupts are disabled set wt = 1 */
+ temp_conf.servicing_weight = 1;
+ }
queue_conf = &temp_conf;
}
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ rx_queue = dev_info->rx_queue;
+ wt = queue_conf->servicing_weight;
+
if (dev_info->rx_queue == NULL) {
dev_info->rx_queue =
rte_zmalloc_socket(rx_adapter->mem_name,
- dev_info->dev->data->nb_rx_queues *
+ nb_rx_queues *
sizeof(struct eth_rx_queue_info), 0,
rx_adapter->socket_id);
if (dev_info->rx_queue == NULL)
return -ENOMEM;
}
+ rx_wrr = NULL;
+ rx_poll = NULL;
- if (rx_queue_id == -1) {
- for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- event_eth_rx_adapter_queue_add(rx_adapter,
- dev_info, i,
- queue_conf);
+ rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
+ queue_conf->servicing_weight,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ if (dev_info->dev->intr_handle)
+ dev_info->multi_intr_cap =
+ rte_intr_cap_multiple(dev_info->dev->intr_handle);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ goto err_free_rxqueue;
+
+ if (wt == 0) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
+
+ ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
+ if (ret)
+ goto err_free_rxqueue;
+
+ ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
} else {
- event_eth_rx_adapter_queue_add(rx_adapter, dev_info,
- (uint16_t)rx_queue_id,
- queue_conf);
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ /* interrupt based queues are being converted to
+ * poll mode queues, delete the interrupt configuration
+ * for those.
+ */
+ ret = rxa_del_intr_queue(rx_adapter,
+ dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
+ }
}
- ret = eth_poll_wrr_calc(rx_adapter);
- if (ret) {
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info, rx_queue_id);
- return ret;
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto err_free_rxqueue;
}
- return ret;
+ if (wt == 0) {
+ uint16_t i;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ dev_info->intr_queue[i] = i;
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ dev_info->intr_queue[nb_rx_intr - 1] =
+ rx_queue_id;
+ }
+ }
+
+
+
+ rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
+ return 0;
+
+err_free_rxqueue:
+ if (rx_queue == NULL) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+
+ return 0;
}
static int
-rx_adapter_ctrl(uint8_t id, int start)
+rxa_ctrl(uint8_t id, int start)
{
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
@@ -807,13 +1910,13 @@ rx_adapter_ctrl(uint8_t id, int start)
int stop = !start;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
/* if start check for num dev queues */
if (start && !dev_info->nb_dev_queues)
@@ -831,8 +1934,12 @@ rx_adapter_ctrl(uint8_t id, int start)
&rte_eth_devices[i]);
}
- if (use_service)
+ if (use_service) {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ rx_adapter->rxa_started = start;
rte_service_runstate_set(rx_adapter->service_id, start);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
return 0;
}
@@ -845,7 +1952,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
struct rte_event_eth_rx_adapter *rx_adapter;
int ret;
int socket_id;
- uint8_t i;
+ uint16_t i;
char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
const uint8_t default_rss_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
@@ -866,7 +1973,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
return ret;
}
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter != NULL) {
RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
return -EEXIST;
@@ -888,9 +1995,11 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
rx_adapter->socket_id = socket_id;
rx_adapter->conf_cb = conf_cb;
rx_adapter->conf_arg = conf_arg;
+ rx_adapter->id = id;
strcpy(rx_adapter->mem_name, mem_name);
rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
- rte_eth_dev_count() *
+ /* FIXME: incompatible with hotplug */
+ rte_eth_dev_count_total() *
sizeof(struct eth_device_info), 0,
socket_id);
rte_convert_rss_key((const uint32_t *)default_rss_key,
@@ -903,11 +2012,11 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
return -ENOMEM;
}
rte_spinlock_init(&rx_adapter->rx_lock);
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
event_eth_rx_adapter[id] = rx_adapter;
- if (conf_cb == default_conf_cb)
+ if (conf_cb == rxa_default_conf_cb)
rx_adapter->default_cb_arg = 1;
return 0;
}
@@ -928,7 +2037,7 @@ rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
return -ENOMEM;
*pc = *port_config;
ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
- default_conf_cb,
+ rxa_default_conf_cb,
pc);
if (ret)
rte_free(pc);
@@ -942,7 +2051,7 @@ rte_event_eth_rx_adapter_free(uint8_t id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
@@ -963,7 +2072,7 @@ rte_event_eth_rx_adapter_free(uint8_t id)
int
rte_event_eth_rx_adapter_queue_add(uint8_t id,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
@@ -972,12 +2081,11 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
struct rte_event_eth_rx_adapter *rx_adapter;
struct rte_eventdev *dev;
struct eth_device_info *dev_info;
- int start_service;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if ((rx_adapter == NULL) || (queue_conf == NULL))
return -EINVAL;
@@ -987,7 +2095,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&cap);
if (ret) {
RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
- "eth port %" PRIu8, id, eth_dev_id);
+ "eth port %" PRIu16, id, eth_dev_id);
return ret;
}
@@ -995,7 +2103,7 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&& (queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
- " eth port: %" PRIu8 " adapter id: %" PRIu8,
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
@@ -1003,7 +2111,8 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
(rx_queue_id != -1)) {
RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
- "event queue id %u eth port %u", id, eth_dev_id);
+ "event queue, eth port: %" PRIu16 " adapter id: %"
+ PRIu8, eth_dev_id, id);
return -EINVAL;
}
@@ -1014,7 +2123,6 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
return -EINVAL;
}
- start_service = 0;
dev_info = &rx_adapter->eth_devices[eth_dev_id];
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
@@ -1034,33 +2142,34 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
&rte_eth_devices[eth_dev_id],
rx_queue_id, queue_conf);
if (ret == 0) {
- update_queue_info(rx_adapter,
+ dev_info->internal_event_port = 1;
+ rxa_update_queue(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
1);
}
} else {
rte_spinlock_lock(&rx_adapter->rx_lock);
- ret = init_service(rx_adapter, id);
- if (ret == 0)
- ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
+ dev_info->internal_event_port = 0;
+ ret = rxa_init_service(rx_adapter, id);
+ if (ret == 0) {
+ uint32_t service_id = rx_adapter->service_id;
+ ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
queue_conf);
+ rte_service_component_runstate_set(service_id,
+ rxa_sw_adapter_queue_count(rx_adapter));
+ }
rte_spinlock_unlock(&rx_adapter->rx_lock);
- if (ret == 0)
- start_service = !!sw_rx_adapter_queue_count(rx_adapter);
}
if (ret)
return ret;
- if (start_service)
- rte_service_component_runstate_set(rx_adapter->service_id, 1);
-
return 0;
}
int
-rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
int32_t rx_queue_id)
{
int ret = 0;
@@ -1068,12 +2177,17 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
struct rte_event_eth_rx_adapter *rx_adapter;
struct eth_device_info *dev_info;
uint32_t cap;
- uint16_t i;
+ uint32_t nb_rx_poll = 0;
+ uint32_t nb_wrr = 0;
+ uint32_t nb_rx_intr;
+ struct eth_rx_poll_entry *rx_poll = NULL;
+ uint32_t *rx_wrr = NULL;
+ int num_intr_vec;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
@@ -1100,7 +2214,7 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
&rte_eth_devices[eth_dev_id],
rx_queue_id);
if (ret == 0) {
- update_queue_info(rx_adapter,
+ rxa_update_queue(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
0);
@@ -1110,48 +2224,78 @@ rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
}
}
} else {
- int rc;
+ rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ return ret;
+
rte_spinlock_lock(&rx_adapter->rx_lock);
- if (rx_queue_id == -1) {
- for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info,
- i);
- } else {
- event_eth_rx_adapter_queue_del(rx_adapter,
- dev_info,
- (uint16_t)rx_queue_id);
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ ret = rxa_del_intr_queue(rx_adapter, dev_info,
+ rx_queue_id);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ if (nb_rx_intr == 0) {
+ rte_free(dev_info->intr_queue);
+ dev_info->intr_queue = NULL;
}
- rc = eth_poll_wrr_calc(rx_adapter);
- if (rc)
- RTE_EDEV_LOG_ERR("WRR recalculation failed %" PRId32,
- rc);
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
if (dev_info->nb_dev_queues == 0) {
rte_free(dev_info->rx_queue);
dev_info->rx_queue = NULL;
}
-
+unlock_ret:
rte_spinlock_unlock(&rx_adapter->rx_lock);
+ if (ret) {
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+ return ret;
+ }
+
rte_service_component_runstate_set(rx_adapter->service_id,
- sw_rx_adapter_queue_count(rx_adapter));
+ rxa_sw_adapter_queue_count(rx_adapter));
}
return ret;
}
-
int
rte_event_eth_rx_adapter_start(uint8_t id)
{
- return rx_adapter_ctrl(id, 1);
+ return rxa_ctrl(id, 1);
}
int
rte_event_eth_rx_adapter_stop(uint8_t id)
{
- return rx_adapter_ctrl(id, 0);
+ return rxa_ctrl(id, 0);
}
int
@@ -1168,13 +2312,13 @@ rte_event_eth_rx_adapter_stats_get(uint8_t id,
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL || stats == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
memset(stats, 0, sizeof(*stats));
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_get == NULL)
@@ -1206,12 +2350,12 @@ rte_event_eth_rx_adapter_stats_reset(uint8_t id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
@@ -1231,7 +2375,7 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
- rx_adapter = id_to_rx_adapter(id);
+ rx_adapter = rxa_id_to_adapter(id);
if (rx_adapter == NULL || service_id == NULL)
return -EINVAL;
@@ -1240,3 +2384,47 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
return rx_adapter->service_inited ? 0 : -ESRCH;
}
+
+int rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ if (dev_info->rx_queue == NULL)
+ return -EINVAL;
+
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu16, id, eth_dev_id);
+ return ret;
+ }
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
+ PRIu16, eth_dev_id);
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->cb_fn = cb_fn;
+ dev_info->cb_arg = cb_arg;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+
+ return 0;
+}
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
index c20507b2..332ee216 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -1,32 +1,6 @@
-/*
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
*/
#ifndef _RTE_EVENT_ETH_RX_ADAPTER_
@@ -47,7 +21,11 @@
*
* The adapter uses a EAL service core function for SW based packet transfer
* and uses the eventdev PMD functions to configure HW based packet transfer
- * between the ethernet device and the event device.
+ * between the ethernet device and the event device. For SW based packet
+ * transfer, if the mbuf does not have a timestamp set, the adapter adds a
+ * timestamp to the mbuf using rte_get_tsc_cycles(), this provides a more
+ * accurate timestamp as compared to if the application were to set the time
+ * stamp since it avoids event device schedule latency.
*
* The ethernet Rx event adapter's functions are:
* - rte_event_eth_rx_adapter_create_ext()
@@ -85,7 +63,23 @@
* rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
* the service function ID of the adapter in this case.
*
- * Note: Interrupt driven receive queues are currently unimplemented.
+ * For SW based packet transfers, i.e., when the
+ * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
+ * capabilities flags for a particular ethernet device, the service function
+ * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * to the event device. If the buffer fills up, the service function stops
+ * dequeueing packets from the ethernet device. The application may want to
+ * monitor the buffer fill level and instruct the service function to
+ * selectively buffer packets. The application may also use some other
+ * criteria to decide which packets should enter the event device even when
+ * the event buffer fill level is low. The
+ * rte_event_eth_rx_adapter_cb_register() function allows the
+ * application to register a callback that selects which packets to enqueue
+ * to the event device.
+ *
+ * Note:
+ * 1) Devices created after an instance of rte_event_eth_rx_adapter_create
+ * should be added to a new instance of the rx adapter.
*/
#ifdef __cplusplus
@@ -218,12 +212,55 @@ struct rte_event_eth_rx_adapter_stats {
* block cycles can be used to compute the percentage of
* cycles the service is blocked by the event device.
*/
+ uint64_t rx_intr_packets;
+ /**< Received packet count for interrupt mode Rx queues */
};
/**
* @warning
* @b EXPERIMENTAL: this API may change without prior notice
*
+ * Callback function invoked by the SW adapter before it continues
+ * to process packets. The callback is passed the size of the enqueue
+ * buffer in the SW adapter and the occupancy of the buffer. The
+ * callback can use these values to decide which mbufs should be
+ * enqueued to the event device. If the return value of the callback
+ * is less than nb_mbuf then the SW adapter uses the return value to
+ * enqueue enq_mbuf[] to the event device.
+ *
+ * @param eth_dev_id
+ * Port identifier of the Ethernet device.
+ * @param queue_id
+ * Receive queue index.
+ * @param enqueue_buf_size
+ * Total enqueue buffer size.
+ * @param enqueue_buf_count
+ * mbuf count in enqueue buffer.
+ * @param mbuf
+ * mbuf array.
+ * @param nb_mbuf
+ * mbuf count.
+ * @param cb_arg
+ * Callback argument.
+ * @param[out] enq_mbuf
+ * The adapter enqueues enq_mbuf[] if the return value of the
+ * callback is less than nb_mbuf
+ * @return
+ * Returns the number of mbufs should be enqueued to eventdev
+ */
+typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
+ uint16_t queue_id,
+ uint32_t enqueue_buf_size,
+ uint32_t enqueue_buf_count,
+ struct rte_mbuf **mbuf,
+ uint16_t nb_mbuf,
+ void *cb_arg,
+ struct rte_mbuf **enq_buf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
* Create a new ethernet Rx event adapter with the specified identifier.
*
* @param id
@@ -329,7 +366,7 @@ int rte_event_eth_rx_adapter_free(uint8_t id);
* combination of the two error codes.
*/
int rte_event_eth_rx_adapter_queue_add(uint8_t id,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *conf);
@@ -357,7 +394,7 @@ int rte_event_eth_rx_adapter_queue_add(uint8_t id,
* - 0: Success, Receive queue deleted correctly.
* - <0: Error code on failure.
*/
-int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
int32_t rx_queue_id);
/**
@@ -444,6 +481,32 @@ int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
*/
int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register callback to process Rx packets, this is supported for
+ * SW based packet transfers.
+ * @see rte_event_eth_rx_cb_fn
+ *
+ * @param id
+ * Adapter identifier.
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ * @param cb_fn
+ * Callback function.
+ * @param cb_arg
+ * Callback arg.
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eventdev/rte_event_ring.c b/lib/librte_eventdev/rte_event_ring.c
index eb67751d..16d02a95 100644
--- a/lib/librte_eventdev/rte_event_ring.c
+++ b/lib/librte_eventdev/rte_event_ring.c
@@ -82,11 +82,16 @@ rte_event_ring_create(const char *name, unsigned int count, int socket_id,
mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
if (mz != NULL) {
r = mz->addr;
- /*
- * no need to check return value here, we already checked the
- * arguments above
- */
- rte_event_ring_init(r, name, requested_count, flags);
+ /* Check return value in case rte_ring_init() fails on size */
+ int err = rte_event_ring_init(r, name, requested_count, flags);
+ if (err) {
+ RTE_LOG(ERR, RING, "Ring init failed\n");
+ if (rte_memzone_free(mz) != 0)
+ RTE_LOG(ERR, RING, "Cannot free memzone\n");
+ rte_free(te);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return NULL;
+ }
te->data = (void *) r;
r->r.memzone = mz;
diff --git a/lib/librte_eventdev/rte_event_ring.h b/lib/librte_eventdev/rte_event_ring.h
index 29d4228a..827a3209 100644
--- a/lib/librte_eventdev/rte_event_ring.h
+++ b/lib/librte_eventdev/rte_event_ring.h
@@ -99,7 +99,7 @@ rte_event_ring_enqueue_burst(struct rte_event_ring *r,
ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
- update_tail(&r->r.prod, prod_head, prod_next, 1, 1);
+ update_tail(&r->r.prod, prod_head, prod_next, r->r.prod.single, 1);
end:
if (free_space != NULL)
*free_space = free_entries - n;
@@ -140,7 +140,7 @@ rte_event_ring_dequeue_burst(struct rte_event_ring *r,
DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
- update_tail(&r->r.cons, cons_head, cons_next, 1, 0);
+ update_tail(&r->r.cons, cons_head, cons_next, r->r.cons.single, 0);
end:
if (available != NULL)
diff --git a/lib/librte_eventdev/rte_event_timer_adapter.c b/lib/librte_eventdev/rte_event_timer_adapter.c
new file mode 100644
index 00000000..79070d48
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter.c
@@ -0,0 +1,1299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+#include <rte_timer.h>
+#include <rte_service_component.h>
+#include <rte_cycles.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_timer_adapter.h"
+#include "rte_event_timer_adapter_pmd.h"
+
+#define DATA_MZ_NAME_MAX_LEN 64
+#define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
+
+static int evtim_logtype;
+static int evtim_svc_logtype;
+static int evtim_buffer_logtype;
+
+static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops;
+
+#define EVTIM_LOG(level, logtype, ...) \
+ rte_log(RTE_LOG_ ## level, logtype, \
+ RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
+ "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define EVTIM_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
+#define EVTIM_BUF_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
+#define EVTIM_SVC_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
+#else
+#define EVTIM_LOG_DBG(...) (void)0
+#define EVTIM_BUF_LOG_DBG(...) (void)0
+#define EVTIM_SVC_LOG_DBG(...) (void)0
+#endif
+
+static int
+default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ void *conf_arg)
+{
+ struct rte_event_timer_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_port_conf *port_conf, def_port_conf = {0};
+ int started;
+ uint8_t port_id;
+ uint8_t dev_id;
+ int ret;
+
+ RTE_SET_USED(event_dev_id);
+
+ adapter = &adapters[id];
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+ dev_id = dev->data->dev_id;
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started)
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+
+ return ret;
+ }
+
+ if (conf_arg != NULL)
+ port_conf = conf_arg;
+ else {
+ port_conf = &def_port_conf;
+ ret = rte_event_port_default_conf_get(dev_id, port_id,
+ port_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
+ port_id, dev_id);
+ return ret;
+ }
+
+ *event_port_id = port_id;
+
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ return ret;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
+{
+ return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
+ NULL);
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg)
+{
+ uint16_t adapter_id;
+ struct rte_event_timer_adapter *adapter;
+ const struct rte_memzone *mz;
+ char mz_name[DATA_MZ_NAME_MAX_LEN];
+ int n, ret;
+ struct rte_eventdev *dev;
+
+ if (conf == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check eventdev ID */
+ if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ dev = &rte_eventdevs[conf->event_dev_id];
+
+ adapter_id = conf->timer_adapter_id;
+
+ /* Check that adapter_id is in range */
+ if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check adapter ID not already allocated */
+ adapter = &adapters[adapter_id];
+ if (adapter->allocated) {
+ rte_errno = EEXIST;
+ return NULL;
+ }
+
+ /* Create shared data area. */
+ n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
+ if (n >= (int)sizeof(mz_name)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_event_timer_adapter_data),
+ conf->socket_id, 0);
+ if (mz == NULL)
+ /* rte_errno set by rte_memzone_reserve */
+ return NULL;
+
+ adapter->data = mz->addr;
+ memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
+
+ adapter->data->mz = mz;
+ adapter->data->event_dev_id = conf->event_dev_id;
+ adapter->data->id = adapter_id;
+ adapter->data->socket_id = conf->socket_id;
+ adapter->data->conf = *conf; /* copy conf structure */
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ if (!(adapter->data->caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL);
+ ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
+ &adapter->data->event_port_id, conf_arg);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Allow driver to do some setup */
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP);
+ ret = adapter->ops->init(adapter);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+
+free_memzone:
+ rte_memzone_free(adapter->data->mz);
+ return NULL;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->ops->get_info)
+ /* let driver set values it knows */
+ adapter->ops->get_info(adapter, adapter_info);
+
+ /* Set common values */
+ adapter_info->conf = adapter->data->conf;
+ adapter_info->event_dev_port_id = adapter->data->event_port_id;
+ adapter_info->caps = adapter->data->caps;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
+
+ ret = adapter->ops->start(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
+
+ if (adapter->data->started == 0) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
+ adapter->data->id);
+ return 0;
+ }
+
+ ret = adapter->ops->stop(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 0;
+
+ return 0;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id)
+{
+ char name[DATA_MZ_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ struct rte_event_timer_adapter_data *data;
+ struct rte_event_timer_adapter *adapter;
+ int ret;
+ struct rte_eventdev *dev;
+
+ if (adapters[adapter_id].allocated)
+ return &adapters[adapter_id]; /* Adapter is already loaded */
+
+ snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ data = mz->addr;
+
+ adapter = &adapters[data->id];
+ adapter->data = data;
+
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
+
+ if (adapter->data->started == 1) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
+ "before freeing", adapter->data->id);
+ return -EBUSY;
+ }
+
+ /* free impl priv data */
+ ret = adapter->ops->uninit(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* free shared data area */
+ ret = rte_memzone_free(adapter->data->mz);
+ if (ret < 0)
+ return ret;
+
+ adapter->data = NULL;
+ adapter->allocated = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->data->service_inited && service_id != NULL)
+ *service_id = adapter->data->service_id;
+
+ return adapter->data->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
+ if (stats == NULL)
+ return -EINVAL;
+
+ return adapter->ops->stats_get(adapter, stats);
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
+ return adapter->ops->stats_reset(adapter);
+}
+
+/*
+ * Software event timer adapter buffer helper functions
+ */
+
+#define NSECPERSEC 1E9
+
+/* Optimizations used to index into the buffer require that the buffer size
+ * be a power of 2.
+ */
+#define EVENT_BUFFER_SZ 4096
+#define EVENT_BUFFER_BATCHSZ 32
+#define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
+
+struct event_buffer {
+ uint16_t head;
+ uint16_t tail;
+ struct rte_event events[EVENT_BUFFER_SZ];
+} __rte_cache_aligned;
+
+static inline bool
+event_buffer_full(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
+}
+
+static inline bool
+event_buffer_batch_ready(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
+}
+
+static void
+event_buffer_init(struct event_buffer *bufp)
+{
+ bufp->head = bufp->tail = 0;
+ memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
+}
+
+static int
+event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
+{
+ uint16_t head_idx;
+ struct rte_event *buf_eventp;
+
+ if (event_buffer_full(bufp))
+ return -1;
+
+ /* Instead of modulus, bitwise AND with mask to get head_idx. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ buf_eventp = &bufp->events[head_idx];
+ rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
+
+ /* Wrap automatically when overflow occurs. */
+ bufp->head++;
+
+ return 0;
+}
+
+static void
+event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
+ uint16_t *nb_events_flushed,
+ uint16_t *nb_events_inv)
+{
+ uint16_t head_idx, tail_idx, n = 0;
+ struct rte_event *events = bufp->events;
+
+ /* Instead of modulus, bitwise AND with mask to get index. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ tail_idx = bufp->tail & EVENT_BUFFER_MASK;
+
+ /* Determine the largest contigous run we can attempt to enqueue to the
+ * event device.
+ */
+ if (head_idx > tail_idx)
+ n = head_idx - tail_idx;
+ else if (head_idx < tail_idx)
+ n = EVENT_BUFFER_SZ - tail_idx;
+ else {
+ *nb_events_flushed = 0;
+ return;
+ }
+
+ *nb_events_inv = 0;
+ *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
+ &events[tail_idx], n);
+ if (*nb_events_flushed != n && rte_errno == -EINVAL) {
+ EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
+ (*nb_events_inv)++;
+ }
+
+ bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
+}
+
+/*
+ * Software event timer adapter implementation
+ */
+
+struct rte_event_timer_adapter_sw_data {
+ /* List of messages for outstanding timers */
+ TAILQ_HEAD(, msg) msgs_tailq_head;
+ /* Lock to guard tailq and armed count */
+ rte_spinlock_t msgs_tailq_sl;
+ /* Identifier of service executing timer management logic. */
+ uint32_t service_id;
+ /* The cycle count at which the adapter should next tick */
+ uint64_t next_tick_cycles;
+ /* Incremented as the service moves through phases of an iteration */
+ volatile int service_phase;
+ /* The tick resolution used by adapter instance. May have been
+ * adjusted from what user requested
+ */
+ uint64_t timer_tick_ns;
+ /* Maximum timeout in nanoseconds allowed by adapter instance. */
+ uint64_t max_tmo_ns;
+ /* Ring containing messages to arm or cancel event timers */
+ struct rte_ring *msg_ring;
+ /* Mempool containing msg objects */
+ struct rte_mempool *msg_pool;
+ /* Buffered timer expiry events to be enqueued to an event device. */
+ struct event_buffer buffer;
+ /* Statistics */
+ struct rte_event_timer_adapter_stats stats;
+ /* The number of threads currently adding to the message ring */
+ rte_atomic16_t message_producer_count;
+};
+
+enum msg_type {MSG_TYPE_ARM, MSG_TYPE_CANCEL};
+
+struct msg {
+ enum msg_type type;
+ struct rte_event_timer *evtim;
+ struct rte_timer tim;
+ TAILQ_ENTRY(msg) msgs;
+};
+
+static void
+sw_event_timer_cb(struct rte_timer *tim, void *arg)
+{
+ int ret;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ uint64_t opaque;
+ struct rte_event_timer *evtim;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ evtim = arg;
+ opaque = evtim->impl_opaque[1];
+ adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
+ sw_data = adapter->data->adapter_priv;
+
+ ret = event_buffer_add(&sw_data->buffer, &evtim->ev);
+ if (ret < 0) {
+ /* If event buffer is full, put timer back in list with
+ * immediate expiry value, so that we process it again on the
+ * next iteration.
+ */
+ rte_timer_reset_sync(tim, 0, SINGLE, rte_lcore_id(),
+ sw_event_timer_cb, evtim);
+
+ sw_data->stats.evtim_retry_count++;
+ EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
+ "immediate expiry value");
+ } else {
+ struct msg *m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m, msgs);
+ EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
+ evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
+
+ /* Free the msg object containing the rte_timer now that
+ * we've buffered its event successfully.
+ */
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Bump the count when we successfully add an expiry event to
+ * the buffer.
+ */
+ sw_data->stats.evtim_exp_count++;
+ }
+
+ if (event_buffer_batch_ready(&sw_data->buffer)) {
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed,
+ &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ }
+}
+
+static __rte_always_inline uint64_t
+get_timeout_cycles(struct rte_event_timer *evtim,
+ struct rte_event_timer_adapter *adapter)
+{
+ uint64_t timeout_ns;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ timeout_ns = evtim->timeout_ticks * sw_data->timer_tick_ns;
+ return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+
+}
+
+/* This function returns true if one or more (adapter) ticks have occurred since
+ * the last time it was called.
+ */
+static inline bool
+adapter_did_tick(struct rte_event_timer_adapter *adapter)
+{
+ uint64_t cycles_per_adapter_tick, start_cycles;
+ uint64_t *next_tick_cyclesp;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ next_tick_cyclesp = &sw_data->next_tick_cycles;
+
+ cycles_per_adapter_tick = sw_data->timer_tick_ns *
+ (rte_get_timer_hz() / NSECPERSEC);
+
+ start_cycles = rte_get_timer_cycles();
+
+ /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
+ * execute, and set things going.
+ */
+
+ if (start_cycles >= *next_tick_cyclesp) {
+ /* Snap the current cycle count to the preceding adapter tick
+ * boundary.
+ */
+ start_cycles -= start_cycles % cycles_per_adapter_tick;
+
+ *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Check that event timer timeout value is in range */
+static __rte_always_inline int
+check_timeout(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ uint64_t tmo_nsec;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ tmo_nsec = evtim->timeout_ticks * sw_data->timer_tick_ns;
+
+ if (tmo_nsec > sw_data->max_tmo_ns)
+ return -1;
+
+ if (tmo_nsec < sw_data->timer_tick_ns)
+ return -2;
+
+ return 0;
+}
+
+/* Check that event timer event queue sched type matches destination event queue
+ * sched type
+ */
+static __rte_always_inline int
+check_destination_event_queue(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ uint32_t sched_type;
+
+ ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
+ evtim->ev.queue_id,
+ RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
+ &sched_type);
+
+ if ((ret < 0 && ret != -EOVERFLOW) ||
+ evtim->ev.sched_type != sched_type)
+ return -1;
+
+ return 0;
+}
+
+#define NB_OBJS 32
+static int
+sw_event_timer_adapter_service_func(void *arg)
+{
+ int i, num_msgs;
+ uint64_t cycles, opaque;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_timer *tim = NULL;
+ struct msg *msg, *msgs[NB_OBJS];
+
+ adapter = arg;
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->service_phase = 1;
+ rte_smp_wmb();
+
+ while (rte_atomic16_read(&sw_data->message_producer_count) > 0 ||
+ !rte_ring_empty(sw_data->msg_ring)) {
+
+ num_msgs = rte_ring_dequeue_burst(sw_data->msg_ring,
+ (void **)msgs, NB_OBJS, NULL);
+
+ for (i = 0; i < num_msgs; i++) {
+ int ret = 0;
+
+ RTE_SET_USED(ret);
+
+ msg = msgs[i];
+ evtim = msg->evtim;
+
+ switch (msg->type) {
+ case MSG_TYPE_ARM:
+ EVTIM_SVC_LOG_DBG("dequeued ARM message from "
+ "ring");
+ tim = &msg->tim;
+ rte_timer_init(tim);
+ cycles = get_timeout_cycles(evtim,
+ adapter);
+ ret = rte_timer_reset(tim, cycles, SINGLE,
+ rte_lcore_id(),
+ sw_event_timer_cb,
+ evtim);
+ RTE_ASSERT(ret == 0);
+
+ evtim->impl_opaque[0] = (uintptr_t)tim;
+ evtim->impl_opaque[1] = (uintptr_t)adapter;
+
+ TAILQ_INSERT_TAIL(&sw_data->msgs_tailq_head,
+ msg,
+ msgs);
+ break;
+ case MSG_TYPE_CANCEL:
+ EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
+ "from ring");
+ opaque = evtim->impl_opaque[0];
+ tim = (struct rte_timer *)(uintptr_t)opaque;
+ RTE_ASSERT(tim != NULL);
+
+ ret = rte_timer_stop(tim);
+ RTE_ASSERT(ret == 0);
+
+ /* Free the msg object for the original arm
+ * request.
+ */
+ struct msg *m;
+ m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m,
+ msgs);
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Free the msg object for the current msg */
+ rte_mempool_put(sw_data->msg_pool, msg);
+
+ evtim->impl_opaque[0] = 0;
+ evtim->impl_opaque[1] = 0;
+
+ break;
+ }
+ }
+ }
+
+ sw_data->service_phase = 2;
+ rte_smp_wmb();
+
+ if (adapter_did_tick(adapter)) {
+ rte_timer_manage();
+
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed, &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ sw_data->stats.adapter_tick_count++;
+ }
+
+ sw_data->service_phase = 0;
+ rte_smp_wmb();
+
+ return 0;
+}
+
+/* The adapter initialization function rounds the mempool size up to the next
+ * power of 2, so we can take the difference between that value and what the
+ * user requested, and use the space for caches. This avoids a scenario where a
+ * user can't arm the number of timers the adapter was configured with because
+ * mempool objects have been lost to caches.
+ *
+ * nb_actual should always be a power of 2, so we can iterate over the powers
+ * of 2 to see what the largest cache size we can use is.
+ */
+static int
+compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
+{
+ int i;
+ int size;
+ int cache_size = 0;
+
+ for (i = 0; ; i++) {
+ size = 1 << i;
+
+ if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
+ size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
+ size <= nb_actual / 1.5)
+ cache_size = size;
+ else
+ break;
+ }
+
+ return cache_size;
+}
+
+#define SW_MIN_INTERVAL 1E5
+
+static int
+sw_event_timer_adapter_init(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ uint64_t nb_timers;
+ unsigned int flags;
+ struct rte_service_spec service;
+ static bool timer_subsystem_inited; // static initialized to false
+
+ /* Allocate storage for SW implementation data */
+ char priv_data_name[RTE_RING_NAMESIZE];
+ snprintf(priv_data_name, RTE_RING_NAMESIZE, "sw_evtim_adap_priv_%"PRIu8,
+ adapter->data->id);
+ adapter->data->adapter_priv = rte_zmalloc_socket(
+ priv_data_name,
+ sizeof(struct rte_event_timer_adapter_sw_data),
+ RTE_CACHE_LINE_SIZE,
+ adapter->data->socket_id);
+ if (adapter->data->adapter_priv == NULL) {
+ EVTIM_LOG_ERR("failed to allocate space for private data");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ if (adapter->data->conf.timer_tick_ns < SW_MIN_INTERVAL) {
+ EVTIM_LOG_ERR("failed to create adapter with requested tick "
+ "interval");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->timer_tick_ns = adapter->data->conf.timer_tick_ns;
+ sw_data->max_tmo_ns = adapter->data->conf.max_tmo_ns;
+
+ TAILQ_INIT(&sw_data->msgs_tailq_head);
+ rte_spinlock_init(&sw_data->msgs_tailq_sl);
+ rte_atomic16_init(&sw_data->message_producer_count);
+
+ /* Rings require power of 2, so round up to next such value */
+ nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
+
+ char msg_ring_name[RTE_RING_NAMESIZE];
+ snprintf(msg_ring_name, RTE_RING_NAMESIZE,
+ "sw_evtim_adap_msg_ring_%"PRIu8, adapter->data->id);
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ RING_F_SP_ENQ | RING_F_SC_DEQ :
+ RING_F_SC_DEQ;
+ sw_data->msg_ring = rte_ring_create(msg_ring_name, nb_timers,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_ring == NULL) {
+ EVTIM_LOG_ERR("failed to create message ring");
+ rte_errno = ENOMEM;
+ goto free_priv_data;
+ }
+
+ char pool_name[RTE_RING_NAMESIZE];
+ snprintf(pool_name, RTE_RING_NAMESIZE, "sw_evtim_adap_msg_pool_%"PRIu8,
+ adapter->data->id);
+
+ /* Both the arming/canceling thread and the service thread will do puts
+ * to the mempool, but if the SP_PUT flag is enabled, we can specify
+ * single-consumer get for the mempool.
+ */
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ MEMPOOL_F_SC_GET : 0;
+
+ /* The usable size of a ring is count - 1, so subtract one here to
+ * make the counts agree.
+ */
+ int pool_size = nb_timers - 1;
+ int cache_size = compute_msg_mempool_cache_size(
+ adapter->data->conf.nb_timers, nb_timers);
+ sw_data->msg_pool = rte_mempool_create(pool_name, pool_size,
+ sizeof(struct msg), cache_size,
+ 0, NULL, NULL, NULL, NULL,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_pool == NULL) {
+ EVTIM_LOG_ERR("failed to create message object mempool");
+ rte_errno = ENOMEM;
+ goto free_msg_ring;
+ }
+
+ event_buffer_init(&sw_data->buffer);
+
+ /* Register a service component to run adapter logic */
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, RTE_SERVICE_NAME_MAX,
+ "sw_evimer_adap_svc_%"PRIu8, adapter->data->id);
+ service.socket_id = adapter->data->socket_id;
+ service.callback = sw_event_timer_adapter_service_func;
+ service.callback_userdata = adapter;
+ service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
+ ret = rte_service_component_register(&service, &sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
+ ": err = %d", service.name, sw_data->service_id,
+ ret);
+
+ rte_errno = ENOSPC;
+ goto free_msg_pool;
+ }
+
+ EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
+ sw_data->service_id);
+
+ adapter->data->service_id = sw_data->service_id;
+ adapter->data->service_inited = 1;
+
+ if (!timer_subsystem_inited) {
+ rte_timer_subsystem_init();
+ timer_subsystem_inited = true;
+ }
+
+ return 0;
+
+free_msg_pool:
+ rte_mempool_free(sw_data->msg_pool);
+free_msg_ring:
+ rte_ring_free(sw_data->msg_ring);
+free_priv_data:
+ rte_free(sw_data);
+ return -1;
+}
+
+static int
+sw_event_timer_adapter_uninit(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct msg *m1, *m2;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ rte_spinlock_lock(&sw_data->msgs_tailq_sl);
+
+ /* Cancel outstanding rte_timers and free msg objects */
+ m1 = TAILQ_FIRST(&sw_data->msgs_tailq_head);
+ while (m1 != NULL) {
+ EVTIM_LOG_DBG("freeing outstanding timer");
+ m2 = TAILQ_NEXT(m1, msgs);
+
+ rte_timer_stop_sync(&m1->tim);
+ rte_mempool_put(sw_data->msg_pool, m1);
+
+ m1 = m2;
+ }
+
+ rte_spinlock_unlock(&sw_data->msgs_tailq_sl);
+
+ ret = rte_service_component_unregister(sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to unregister service component");
+ return ret;
+ }
+
+ rte_ring_free(sw_data->msg_ring);
+ rte_mempool_free(sw_data->msg_pool);
+ rte_free(adapter->data->adapter_priv);
+
+ return 0;
+}
+
+static inline int32_t
+get_mapped_count_for_service(uint32_t service_id)
+{
+ int32_t core_count, i, mapped_count = 0;
+ uint32_t lcore_arr[RTE_MAX_LCORE];
+
+ core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
+
+ for (i = 0; i < core_count; i++)
+ if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
+ mapped_count++;
+
+ return mapped_count;
+}
+
+static int
+sw_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int mapped_count;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+
+ /* Mapping the service to more than one service core can introduce
+ * delays while one thread is waiting to acquire a lock, so only allow
+ * one core to be mapped to the service.
+ */
+ mapped_count = get_mapped_count_for_service(sw_data->service_id);
+
+ if (mapped_count == 1)
+ return rte_service_component_runstate_set(sw_data->service_id,
+ 1);
+
+ return mapped_count < 1 ? -ENOENT : -ENOTSUP;
+}
+
+static int
+sw_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ ret = rte_service_component_runstate_set(sw_data->service_id, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the service to complete its final iteration before
+ * stopping.
+ */
+ while (sw_data->service_phase != 0)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ return 0;
+}
+
+static void
+sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+
+ adapter_info->min_resolution_ns = sw_data->timer_tick_ns;
+ adapter_info->max_tmo_ns = sw_data->max_tmo_ns;
+}
+
+static int
+sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ *stats = sw_data->stats;
+ return 0;
+}
+
+static int
+sw_event_timer_adapter_stats_reset(
+ const struct rte_event_timer_adapter *adapter)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ memset(&sw_data->stats, 0, sizeof(sw_data->stats));
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+__sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service is managing timers, wait for it to finish */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
+ evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ ret = check_timeout(evtims[i], adapter);
+ if (ret == -1) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+ rte_errno = EINVAL;
+ break;
+ }
+ if (ret == -2) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ if (check_destination_event_queue(evtims[i], adapter) < 0) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ /* Checks passed, set up a message to enqueue */
+ msgs[i]->type = MSG_TYPE_ARM;
+ msgs[i]->evtim = evtims[i];
+
+ /* Set the payload pointer if not set. */
+ if (evtims[i]->ev.event_ptr == NULL)
+ evtims[i]->ev.event_ptr = evtims[i];
+
+ /* msg objects that get enqueued successfully will be freed
+ * either by a future cancel operation or by the timer
+ * expiration callback.
+ */
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued ARM message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static uint16_t
+sw_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service could be modifying event timer states, wait */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ msgs[i]->type = MSG_TYPE_CANCEL;
+ msgs[i]->evtim = evtims[i];
+
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued CANCEL message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint64_t timeout_ticks,
+ uint16_t nb_evtims)
+{
+ int i;
+
+ for (i = 0; i < nb_evtims; i++)
+ evtims[i]->timeout_ticks = timeout_ticks;
+
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops = {
+ .init = sw_event_timer_adapter_init,
+ .uninit = sw_event_timer_adapter_uninit,
+ .start = sw_event_timer_adapter_start,
+ .stop = sw_event_timer_adapter_stop,
+ .get_info = sw_event_timer_adapter_get_info,
+ .stats_get = sw_event_timer_adapter_stats_get,
+ .stats_reset = sw_event_timer_adapter_stats_reset,
+ .arm_burst = sw_event_timer_arm_burst,
+ .arm_tmo_tick_burst = sw_event_timer_arm_tmo_tick_burst,
+ .cancel_burst = sw_event_timer_cancel_burst,
+};
+
+RTE_INIT(event_timer_adapter_init_log)
+{
+ evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
+ if (evtim_logtype >= 0)
+ rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
+
+ evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
+ "buffer");
+ if (evtim_buffer_logtype >= 0)
+ rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
+
+ evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
+ if (evtim_svc_logtype >= 0)
+ rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
+}
diff --git a/lib/librte_eventdev/rte_event_timer_adapter.h b/lib/librte_eventdev/rte_event_timer_adapter.h
new file mode 100644
index 00000000..d4ea6f17
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter.h
@@ -0,0 +1,766 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc.
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_H__
+#define __RTE_EVENT_TIMER_ADAPTER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Timer Adapter
+ *
+ * An event timer adapter has the following abstract working model:
+ *
+ * timer_tick_ns
+ * +
+ * +-------+ |
+ * | | |
+ * +-------+ bkt 0 +----v---+
+ * | | | |
+ * | +-------+ |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | | | | | | | | |
+ * | bkt n | | bkt 1 |<-> t0| t1| t2| tn|
+ * | | | | | | | | |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | Timer adapter |
+ * +---+---+ +---+---+
+ * | | | |
+ * | bkt 4 | | bkt 2 |<--- Current bucket
+ * | | | |
+ * +---+---+ +---+---+
+ * | +-------+ |
+ * | | | |
+ * +------+ bkt 3 +-------+
+ * | |
+ * +-------+
+ *
+ * - It has a virtual monotonically increasing 64-bit timer adapter clock based
+ * on *enum rte_event_timer_adapter_clk_src* clock source. The clock source
+ * could be a CPU clock, or a platform dependent external clock.
+ *
+ * - The application creates a timer adapter instance with given the clock
+ * source, the total number of event timers, and a resolution(expressed in ns)
+ * to traverse between the buckets.
+ *
+ * - Each timer adapter may have 0 to n buckets based on the configured
+ * max timeout(max_tmo_ns) and resolution(timer_tick_ns). Upon starting the
+ * timer adapter, the adapter starts ticking at *timer_tick_ns* resolution.
+ *
+ * - The application arms an event timer that will expire *timer_tick_ns*
+ * from now.
+ *
+ * - The application can cancel an armed timer and no timer expiry event will be
+ * generated.
+ *
+ * - If a timer expires then the library injects the timer expiry event in
+ * the designated event queue.
+ *
+ * - The timer expiry event will be received through *rte_event_dequeue_burst*.
+ *
+ * - The application frees the timer adapter instance.
+ *
+ * Multiple timer adapters can be created with a varying level of resolution
+ * for various expiry use cases that run in parallel.
+ *
+ * Before using the timer adapter, the application has to create and configure
+ * an event device along with the event port. Based on the event device
+ * capability it might require creating an additional event port to be used
+ * by the timer adapter.
+ *
+ * The application creates the event timer adapter using the
+ * ``rte_event_timer_adapter_create()``. The event device id is passed to this
+ * function, inside this function the event device capability is checked,
+ * and if an in-built port is absent the application uses the default
+ * function to create a new producer port.
+ *
+ * The application may also use the function
+ * ``rte_event_timer_adapter_create_ext()`` to have granular control over
+ * producer port creation in a case where the in-built port is absent.
+ *
+ * After creating the timer adapter, the application has to start it
+ * using ``rte_event_timer_adapter_start()``. The buckets are traversed from
+ * 0 to n; when the adapter ticks, the next bucket is visited. Each time,
+ * the list per bucket is processed, and timer expiry events are sent to the
+ * designated event queue.
+ *
+ * The application can arm one or more event timers using the
+ * ``rte_event_timer_arm_burst()``. The *timeout_ticks* represents the number
+ * of *timer_tick_ns* after which the timer has to expire. The timeout at
+ * which the timers expire can be grouped or be independent of each
+ * event timer instance. ``rte_event_timer_arm_tmo_tick_burst()`` addresses the
+ * former case and ``rte_event_timer_arm_burst()`` addresses the latter case.
+ *
+ * The application can cancel the timers from expiring using the
+ * ``rte_event_timer_cancel_burst()``.
+ *
+ * On the secondary process, ``rte_event_timer_adapter_lookup()`` can be used
+ * to get the timer adapter pointer from its id and use it to invoke fastpath
+ * operations such as arm and cancel.
+ *
+ * Some of the use cases of event timer adapter are Beacon Timers,
+ * Generic SW Timeout, Wireless MAC Scheduling, 3G Frame Protocols,
+ * Packet Scheduling, Protocol Retransmission Timers, Supervision Timers.
+ * All these use cases require high resolution and low time drift.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Timer adapter clock source
+ */
+enum rte_event_timer_adapter_clk_src {
+ RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ /**< Use CPU clock as the clock source. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ /**< Platform dependent external clock source 0. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ /**< Platform dependent external clock source 1. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+ /**< Platform dependent external clock source 2. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK3,
+ /**< Platform dependent external clock source 3. */
+};
+
+#define RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES (1ULL << 0)
+/**< The event timer adapter implementation may have constraints on the
+ * resolution (timer_tick_ns) and maximum timer expiry timeout(max_tmo_ns)
+ * based on the given timer adapter or system. If this flag is set, the
+ * implementation adjusts the resolution and maximum timeout to the best
+ * possible configuration. On successful timer adapter creation, the
+ * application can get the configured resolution and max timeout with
+ * ``rte_event_timer_adapter_get_info()``.
+ *
+ * @see struct rte_event_timer_adapter_info::min_resolution_ns
+ * @see struct rte_event_timer_adapter_info::max_tmo_ns
+ */
+#define RTE_EVENT_TIMER_ADAPTER_F_SP_PUT (1ULL << 1)
+/**< ``rte_event_timer_arm_burst()`` API to be used in single producer mode.
+ *
+ * @see struct rte_event_timer_adapter_conf::flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter configuration structure
+ */
+struct rte_event_timer_adapter_conf {
+ uint8_t event_dev_id;
+ /**< Event device identifier */
+ uint16_t timer_adapter_id;
+ /**< Event timer adapter identifier */
+ uint32_t socket_id;
+ /**< Identifier of socket from which to allocate memory for adapter */
+ enum rte_event_timer_adapter_clk_src clk_src;
+ /**< Clock source for timer adapter */
+ uint64_t timer_tick_ns;
+ /**< Timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expiry) in ns */
+ uint64_t nb_timers;
+ /**< Total number of timers per adapter */
+ uint64_t flags;
+ /**< Timer adapter config flags (RTE_EVENT_TIMER_ADAPTER_F_*) */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer adapter stats structure
+ */
+struct rte_event_timer_adapter_stats {
+ uint64_t evtim_exp_count;
+ /**< Number of event timers that have expired. */
+ uint64_t ev_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t ev_inv_count;
+ /**< Invalid expiry event count */
+ uint64_t evtim_retry_count;
+ /**< Event timer retry count */
+ uint64_t adapter_tick_count;
+ /**< Tick count for the adapter, at its resolution */
+};
+
+struct rte_event_timer_adapter;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Callback function type for producer port creation.
+ */
+typedef int (*rte_event_timer_adapter_port_conf_cb_t)(uint16_t id,
+ uint8_t event_dev_id,
+ uint8_t *event_port_id,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create an event timer adapter.
+ *
+ * This function must be invoked first before any other function in the API.
+ *
+ * @param conf
+ * The event timer adapter configuration structure.
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ * - EIO: event device reconfiguration and restart error. The adapter
+ * reconfigures the event device with an additional port by default if it is
+ * required to use a service to manage timers. If the device had been started
+ * before this call, this error code indicates an error in restart following
+ * an error in reconfiguration, i.e., a combination of the two error codes.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a timer adapter with the supplied callback.
+ *
+ * This function can be used to have a more granular control over the timer
+ * adapter creation. If a built-in port is absent, then the function uses the
+ * callback provided to create and get the port id to be used as a producer
+ * port.
+ *
+ * @param conf
+ * The timer adapter configuration structure
+ * @param conf_cb
+ * The port config callback function.
+ * @param conf_arg
+ * Opaque pointer to the argument for the callback function
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter info structure.
+ */
+struct rte_event_timer_adapter_info {
+ uint64_t min_resolution_ns;
+ /**< Minimum timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expire) in ns */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configured timer adapter attributes */
+ uint32_t caps;
+ /**< Event timer adapter capabilities */
+ int16_t event_dev_port_id;
+ /**< Event device port ID, if applicable */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the contextual information of an event timer adapter.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @param[out] adapter_info
+ * A pointer to a structure of type *rte_event_timer_adapter_info* to be
+ * filled with the contextual information of the adapter.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the
+ * timer adapter
+ * - <0: Error code returned by the driver info get function.
+ * - -EINVAL: adapter identifier invalid
+ *
+ * @see RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
+ * struct rte_event_timer_adapter_info
+ *
+ */
+int __rte_experimental
+rte_event_timer_adapter_get_info(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start a timer adapter.
+ *
+ * The adapter start step is the last one and consists of setting the timer
+ * adapter to start accepting the timers and schedules to event queues.
+ *
+ * On success, all basic functions exported by the API (timer arm,
+ * timer cancel and so on) can be invoked.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter started.
+ * - <0: Error code returned by the driver start function.
+ * - -EINVAL if adapter identifier invalid
+ * - -ENOENT if software adapter but no service core mapped
+ * - -ENOTSUP if software adapter and more than one service core mapped
+ */
+int __rte_experimental
+rte_event_timer_adapter_start(
+ const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop an event timer adapter.
+ *
+ * The adapter can be restarted with a call to
+ * ``rte_event_timer_adapter_start()``.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter stopped.
+ * - <0: Error code returned by the driver stop function.
+ * - -EINVAL if adapter identifier invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup an event timer adapter using its identifier.
+ *
+ * If an event timer adapter was created in another process with the same
+ * identifier, this function will locate its state and set up access to it
+ * so that it can be used in this process.
+ *
+ * @param adapter_id
+ * The event timer adapter identifier.
+ *
+ * @return
+ * A pointer to the event timer adapter matching the identifier on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ENOENT - requested entry not available to return.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event timer adapter.
+ *
+ * Destroy an event timer adapter, freeing all resources.
+ *
+ * Before invoking this function, the application must wait for all the
+ * armed timers to expire or cancel the outstanding armed timers.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully freed the event timer adapter resources.
+ * - <0: Failed to free the event timer adapter resources.
+ * - -EAGAIN: adapter is busy; timers outstanding
+ * - -EBUSY: stop hasn't been called for this adapter yet
+ * - -EINVAL: adapter id invalid, or adapter invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ * - -ESRCH: the adapter does not require a service to operate
+ */
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer state.
+ */
+enum rte_event_timer_state {
+ RTE_EVENT_TIMER_NOT_ARMED = 0,
+ /**< Event timer not armed. */
+ RTE_EVENT_TIMER_ARMED = 1,
+ /**< Event timer successfully armed. */
+ RTE_EVENT_TIMER_CANCELED = 2,
+ /**< Event timer successfully canceled. */
+ RTE_EVENT_TIMER_ERROR = -1,
+ /**< Generic event timer error. */
+ RTE_EVENT_TIMER_ERROR_TOOEARLY = -2,
+ /**< Event timer timeout tick value is too small for the adapter to
+ * handle, given its configured resolution.
+ */
+ RTE_EVENT_TIMER_ERROR_TOOLATE = -3,
+ /**< Event timer timeout tick is greater than the maximum timeout.*/
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * The generic *rte_event_timer* structure to hold the event timer attributes
+ * for arm and cancel operations.
+ */
+RTE_STD_C11
+struct rte_event_timer {
+ struct rte_event ev;
+ /**<
+ * Expiry event attributes. On successful event timer timeout,
+ * the following attributes will be used to inject the expiry event to
+ * the eventdev:
+ * - event_queue_id: Targeted event queue id for expiry events.
+ * - event_priority: Event priority of the event expiry event in the
+ * event queue relative to other events.
+ * - sched_type: Scheduling type of the expiry event.
+ * - flow_id: Flow id of the expiry event.
+ * - op: RTE_EVENT_OP_NEW
+ * - event_type: RTE_EVENT_TYPE_TIMER
+ */
+ volatile enum rte_event_timer_state state;
+ /**< State of the event timer. */
+ uint64_t timeout_ticks;
+ /**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
+ * now.
+ * @see struct rte_event_timer_adapter_info::adapter_conf::timer_tick_ns
+ */
+ uint64_t impl_opaque[2];
+ /**< Implementation-specific opaque data.
+ * An event timer adapter implementation use this field to hold
+ * implementation specific values to share between the arm and cancel
+ * operations. The application should not modify this field.
+ */
+ uint8_t user_meta[0];
+ /**< Memory to store user specific metadata.
+ * The event timer adapter implementation should not modify this area.
+ */
+} __rte_cache_aligned;
+
+typedef uint16_t (*rte_event_timer_arm_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Enable event timers to enqueue timer events upon expiry */
+typedef uint16_t (*rte_event_timer_arm_tmo_tick_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint64_t timeout_tick,
+ uint16_t nb_tims);
+/**< @internal Enable event timers with common expiration time */
+typedef uint16_t (*rte_event_timer_cancel_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Prevent event timers from enqueuing timer events */
+
+/**
+ * @internal Data structure associated with each event timer adapter.
+ */
+struct rte_event_timer_adapter {
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Pointer to driver arm_burst function. */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Pointer to driver arm_tmo_tick_burst function. */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Pointer to driver cancel function. */
+ struct rte_event_timer_adapter_data *data;
+ /**< Pointer to shared adapter data */
+ const struct rte_event_timer_adapter_ops *ops;
+ /**< Functions exported by adapter driver */
+
+ RTE_STD_C11
+ uint8_t allocated : 1;
+ /**< Flag to indicate that this adapter has been allocated */
+} __rte_cache_aligned;
+
+#define ADAPTER_VALID_OR_ERR_RET(adapter, retval) do { \
+ if (adapter == NULL || !adapter->allocated) \
+ return retval; \
+} while (0)
+
+#define FUNC_PTR_OR_ERR_RET(func, errval) do { \
+ if ((func) == NULL) \
+ return errval; \
+} while (0)
+
+#define FUNC_PTR_OR_NULL_RET_WITH_ERRNO(func, errval) do { \
+ if ((func) == NULL) { \
+ rte_errno = errval; \
+ return NULL; \
+ } \
+} while (0)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with separate expiration timeout tick for each
+ * event timer.
+ *
+ * Before calling this function, the application allocates
+ * ``struct rte_event_timer`` objects from mempool or huge page backed
+ * application buffers of desired size. On successful allocation,
+ * application updates the `struct rte_event_timer`` attributes such as
+ * expiry event attributes, timeout ticks from now.
+ * This function submits the event timer arm requests to the event timer adapter
+ * and on expiry, the events will be injected to designated event queue.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Pointer to an array of objects of type *rte_event_timer* structure.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_burst, -EINVAL);
+#endif
+ return adapter->arm_burst(adapter, evtims, nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with same expiration timeout tick.
+ *
+ * Provides the same functionality as ``rte_event_timer_arm_burst()``, except
+ * that application can use this API when all the event timers have the
+ * same timeout expiration tick. This specialized function can provide the
+ * additional hint to the adapter implementation and optimize if possible.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure.
+ * @param timeout_ticks
+ * The number of ticks in which the timers should expire.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified event timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_tmo_tick_burst(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ const uint64_t timeout_ticks,
+ const uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_tmo_tick_burst, -EINVAL);
+#endif
+ return adapter->arm_tmo_tick_burst(adapter, evtims, timeout_ticks,
+ nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Cancel a burst of event timers from being scheduled to the event device.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure
+ * @param nb_evtims
+ * Number of event timer instances in the supplied array.
+ *
+ * @return
+ * The number of successfully canceled event timers. The return value can be
+ * less than the value of the *nb_evtims* parameter. If the return value is
+ * less than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter identifier
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already canceled
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->cancel_burst, -EINVAL);
+#endif
+ return adapter->cancel_burst(adapter, evtims, nb_evtims);
+}
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_H__ */
diff --git a/lib/librte_eventdev/rte_event_timer_adapter_pmd.h b/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
new file mode 100644
index 00000000..cf3509dc
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+
+/**
+ * @file
+ * RTE Event Timer Adapter API (PMD Side)
+ *
+ * @note
+ * This file provides implementation helpers for internal use by PMDs. They
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_event_timer_adapter.h"
+
+/*
+ * Definitions of functions exported by an event timer adapter implementation
+ * through *rte_event_timer_adapter_ops* structure supplied in the
+ * *rte_event_timer_adapter* structure associated with an event timer adapter.
+ */
+
+typedef int (*rte_event_timer_adapter_init_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation setup */
+typedef int (*rte_event_timer_adapter_uninit_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation teardown */
+typedef int (*rte_event_timer_adapter_start_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Start running event timer adapter */
+typedef int (*rte_event_timer_adapter_stop_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Stop running event timer adapter */
+typedef void (*rte_event_timer_adapter_get_info_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+/**< @internal Get contextual information for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_get_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+/**< @internal Get statistics for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_reset_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Reset statistics for event timer adapter */
+
+/**
+ * @internal Structure containing the functions exported by an event timer
+ * adapter implementation.
+ */
+struct rte_event_timer_adapter_ops {
+ rte_event_timer_adapter_init_t init; /**< Set up adapter */
+ rte_event_timer_adapter_uninit_t uninit;/**< Tear down adapter */
+ rte_event_timer_adapter_start_t start; /**< Start adapter */
+ rte_event_timer_adapter_stop_t stop; /**< Stop adapter */
+ rte_event_timer_adapter_get_info_t get_info;
+ /**< Get info from driver */
+ rte_event_timer_adapter_stats_get_t stats_get;
+ /**< Get adapter statistics */
+ rte_event_timer_adapter_stats_reset_t stats_reset;
+ /**< Reset adapter statistics */
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Arm one or more event timers */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Arm event timers with same expiration time */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Cancel one or more event timers */
+};
+
+/**
+ * @internal Adapter data; structure to be placed in shared memory to be
+ * accessible by various processes in a multi-process configuration.
+ */
+struct rte_event_timer_adapter_data {
+ uint8_t id;
+ /**< Event timer adapter ID */
+ uint8_t event_dev_id;
+ /**< Event device ID */
+ uint32_t socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t event_port_id;
+ /**< Optional: event port ID used when the inbuilt port is absent */
+ const struct rte_memzone *mz;
+ /**< Event timer adapter memzone pointer */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configuration used to configure the adapter. */
+ uint32_t caps;
+ /**< Adapter capabilities */
+ void *adapter_priv;
+ /**< Timer adapter private data*/
+ uint8_t service_inited;
+ /**< Service initialization state */
+ uint32_t service_id;
+ /**< Service ID*/
+
+ RTE_STD_C11
+ uint8_t started : 1;
+ /**< Flag to indicate adapter started. */
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index 851a1190..801810ed 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -29,6 +29,8 @@
#include <rte_malloc.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
#include "rte_eventdev.h"
#include "rte_eventdev_pmd.h"
@@ -55,16 +57,21 @@ int
rte_event_dev_get_dev_id(const char *name)
{
int i;
+ uint8_t cmp;
if (!name)
return -EINVAL;
- for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
- if ((strcmp(rte_event_devices[i].data->name, name)
- == 0) &&
- (rte_event_devices[i].attached ==
- RTE_EVENTDEV_ATTACHED))
+ for (i = 0; i < rte_eventdev_globals->nb_devs; i++) {
+ cmp = (strncmp(rte_event_devices[i].data->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
+ (rte_event_devices[i].dev ? (strncmp(
+ rte_event_devices[i].dev->driver->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
+ if (cmp && (rte_event_devices[i].attached ==
+ RTE_EVENTDEV_ATTACHED))
return i;
+ }
return -ENODEV;
}
@@ -123,6 +130,51 @@ rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
: 0;
}
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ const struct rte_event_timer_adapter_ops *ops;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->timer_adapter_caps_get ?
+ (*dev->dev_ops->timer_adapter_caps_get)(dev,
+ 0,
+ caps,
+ &ops)
+ : 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ struct rte_cryptodev *cdev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[dev_id];
+ cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->crypto_adapter_caps_get ?
+ (*dev->dev_ops->crypto_adapter_caps_get)
+ (dev, cdev, caps) : -ENOTSUP;
+}
+
static inline int
rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
{
@@ -1123,6 +1175,23 @@ rte_event_dev_start(uint8_t dev_id)
return 0;
}
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ dev->dev_ops->dev_stop_flush = callback;
+ dev->data->dev_stop_flush_arg = userdata;
+
+ return 0;
+}
+
void
rte_event_dev_stop(uint8_t dev_id)
{
diff --git a/lib/librte_eventdev/rte_eventdev.h b/lib/librte_eventdev/rte_eventdev.h
index b21c2717..b6fd6ee7 100644
--- a/lib/librte_eventdev/rte_eventdev.h
+++ b/lib/librte_eventdev/rte_eventdev.h
@@ -1,35 +1,8 @@
-/*
- * BSD LICENSE
- *
- * Copyright 2016 Cavium, Inc.
- * Copyright 2016 Intel Corporation.
- * Copyright 2016 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Cavium, Inc nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc.
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright 2016 NXP
+ * All rights reserved.
*/
#ifndef _RTE_EVENTDEV_H_
@@ -244,6 +217,7 @@ extern "C" {
#include <rte_errno.h>
struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
+struct rte_event;
/* Event device capability bitmap flags */
#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
@@ -835,15 +809,60 @@ int
rte_event_dev_start(uint8_t dev_id);
/**
- * Stop an event device. The device can be restarted with a call to
- * rte_event_dev_start()
+ * Stop an event device.
+ *
+ * This function causes all queued events to be drained, including those
+ * residing in event ports. While draining events out of the device, this
+ * function calls the user-provided flush callback (if one was registered) once
+ * per event.
+ *
+ * The device can be restarted with a call to rte_event_dev_start(). Threads
+ * that continue to enqueue/dequeue while the device is stopped, or being
+ * stopped, will result in undefined behavior. This includes event adapters,
+ * which must be stopped prior to stopping the eventdev.
*
* @param dev_id
* Event device identifier.
+ *
+ * @see rte_event_dev_stop_flush_callback_register()
*/
void
rte_event_dev_stop(uint8_t dev_id);
+typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
+ void *arg);
+/**< Callback function called during rte_event_dev_stop(), invoked once per
+ * flushed event.
+ */
+
+/**
+ * Registers a callback function to be invoked during rte_event_dev_stop() for
+ * each flushed event. This function can be used to properly dispose of queued
+ * events, for example events containing memory pointers.
+ *
+ * The callback function is only registered for the calling process. The
+ * callback function must be registered in every process that can call
+ * rte_event_dev_stop().
+ *
+ * To unregister a callback, call this function with a NULL callback pointer.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param callback
+ * Callback function invoked once per flushed event.
+ * @param userdata
+ * Argument supplied to callback.
+ *
+ * @return
+ * - 0 on success.
+ * - -EINVAL if *dev_id* is invalid
+ *
+ * @see rte_event_dev_stop()
+ */
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata);
+
/**
* Close an event device. The device cannot be restarted!
*
@@ -923,8 +942,8 @@ rte_event_dev_close(uint8_t dev_id);
/**< The event generated from ethdev subsystem */
#define RTE_EVENT_TYPE_CRYPTODEV 0x1
/**< The event generated from crypodev subsystem */
-#define RTE_EVENT_TYPE_TIMERDEV 0x2
-/**< The event generated from timerdev subsystem */
+#define RTE_EVENT_TYPE_TIMER 0x2
+/**< The event generated from event timer adapter */
#define RTE_EVENT_TYPE_CPU 0x3
/**< The event generated from cpu for pipelining.
* Application may use *sub_event_type* to further classify the event
@@ -1096,7 +1115,77 @@ int
rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
uint32_t *caps);
-struct rte_eventdev_driver;
+#define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
+/**< This flag is set when the timer mechanism is in HW. */
+
+/**
+ * Retrieve the event device's timer adapter capabilities.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] caps
+ * A pointer to memory to be filled with event timer adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provided event timer adapter capabilities.
+ * - <0: Error code returned by the driver function.
+ */
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
+
+/* Crypto adapter capability bitmap flag */
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
+ * packets to the event device as new events using an internal
+ * event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
+ * packets to the event device as forwarded event using an
+ * internal event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
+/**< Flag indicates HW is capable of mapping crypto queue pair to
+ * event queue.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
+/**< Flag indicates HW/SW suports a mechanism to store and retrieve
+ * the private data information along with the crypto session.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev device
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param cdev_id
+ * The identifier of the cryptodev device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps);
+
struct rte_eventdev_ops;
struct rte_eventdev;
@@ -1152,6 +1241,8 @@ struct rte_eventdev_data {
/* Service initialization state */
uint32_t service_id;
/* Service ID*/
+ void *dev_stop_flush_arg;
+ /**< User-provided argument for event flush function */
RTE_STD_C11
uint8_t dev_started : 1;
@@ -1178,7 +1269,7 @@ struct rte_eventdev {
struct rte_eventdev_data *data;
/**< Pointer to device data */
- const struct rte_eventdev_ops *dev_ops;
+ struct rte_eventdev_ops *dev_ops;
/**< Functions exported by PMD */
struct rte_device *dev;
/**< Device info. supplied by probing */
diff --git a/lib/librte_eventdev/rte_eventdev_pmd.h b/lib/librte_eventdev/rte_eventdev_pmd.h
index 31343b51..3fbb4d2b 100644
--- a/lib/librte_eventdev/rte_eventdev_pmd.h
+++ b/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -26,6 +26,7 @@ extern "C" {
#include <rte_malloc.h>
#include "rte_eventdev.h"
+#include "rte_event_timer_adapter_pmd.h"
/* Logging Macros */
#define RTE_EDEV_LOG_ERR(...) \
@@ -69,6 +70,9 @@ extern "C" {
((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \
(RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))
+#define RTE_EVENT_CRYPTO_ADAPTER_SW_CAP \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA
+
/**< Ethernet Rx adapter cap to return If the packet transfers from
* the ethdev to eventdev use a SW service function
*/
@@ -449,6 +453,37 @@ typedef int (*eventdev_eth_rx_adapter_caps_get_t)
struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
/**
+ * Retrieve the event device's timer adapter capabilities, as well as the ops
+ * structure that an event timer adapter should call through to enter the
+ * driver
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param flags
+ * Flags that can be used to determine how to select an event timer
+ * adapter ops structure
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @param[out] ops
+ * A pointer to the ops pointer to set with the address of the desired ops
+ * structure
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_timer_adapter_caps_get_t)(
+ const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops);
+
+/**
* Add ethernet Rx queues to event device. This callback is invoked if
* the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
* has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
@@ -585,6 +620,175 @@ typedef int (*eventdev_eth_rx_adapter_stats_reset)
*/
typedef int (*eventdev_selftest)(void);
+
+struct rte_cryptodev;
+
+/**
+ * This API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps);
+
+/**
+ * This API may change without prior notice
+ *
+ * Add crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @param event
+ * Event information required for binding cryptodev queue pair to event queue.
+ * This structure will have a valid value for only those HW PMDs supporting
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+
+/**
+ * This API may change without prior notice
+ *
+ * Delete crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id);
+
+/**
+ * Start crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+/**
+ * Stop crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+struct rte_event_crypto_adapter_stats;
+
+/**
+ * Retrieve crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * Reset crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
/** Event device operations function pointer table */
struct rte_eventdev_ops {
eventdev_info_get_t dev_infos_get; /**< Get device info. */
@@ -640,8 +844,29 @@ struct rte_eventdev_ops {
eventdev_eth_rx_adapter_stats_reset eth_rx_adapter_stats_reset;
/**< Reset ethernet Rx stats */
+ eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
+ /**< Get timer adapter capabilities */
+
+ eventdev_crypto_adapter_caps_get_t crypto_adapter_caps_get;
+ /**< Get crypto adapter capabilities */
+ eventdev_crypto_adapter_queue_pair_add_t crypto_adapter_queue_pair_add;
+ /**< Add queue pair to crypto adapter */
+ eventdev_crypto_adapter_queue_pair_del_t crypto_adapter_queue_pair_del;
+ /**< Delete queue pair from crypto adapter */
+ eventdev_crypto_adapter_start_t crypto_adapter_start;
+ /**< Start crypto adapter */
+ eventdev_crypto_adapter_stop_t crypto_adapter_stop;
+ /**< Stop crypto adapter */
+ eventdev_crypto_adapter_stats_get crypto_adapter_stats_get;
+ /**< Get crypto stats */
+ eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
+ /**< Reset crypto stats */
+
eventdev_selftest dev_selftest;
/**< Start eventdev Selftest */
+
+ eventdev_stop_flush_t dev_stop_flush;
+ /**< User-provided event flush function */
};
/**
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 2aef470b..12835e9f 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -66,7 +66,6 @@ DPDK_17.11 {
rte_event_eth_rx_adapter_stats_get;
rte_event_eth_rx_adapter_stats_reset;
rte_event_eth_rx_adapter_stop;
-
} DPDK_17.08;
DPDK_18.02 {
@@ -74,3 +73,41 @@ DPDK_18.02 {
rte_event_dev_selftest;
} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ rte_event_dev_stop_flush_callback_register;
+} DPDK_18.02;
+
+EXPERIMENTAL {
+ global:
+
+ rte_event_crypto_adapter_caps_get;
+ rte_event_crypto_adapter_create;
+ rte_event_crypto_adapter_create_ext;
+ rte_event_crypto_adapter_event_port_get;
+ rte_event_crypto_adapter_free;
+ rte_event_crypto_adapter_queue_pair_add;
+ rte_event_crypto_adapter_queue_pair_del;
+ rte_event_crypto_adapter_service_id_get;
+ rte_event_crypto_adapter_start;
+ rte_event_crypto_adapter_stats_get;
+ rte_event_crypto_adapter_stats_reset;
+ rte_event_crypto_adapter_stop;
+ rte_event_eth_rx_adapter_cb_register;
+ rte_event_timer_adapter_caps_get;
+ rte_event_timer_adapter_create;
+ rte_event_timer_adapter_create_ext;
+ rte_event_timer_adapter_free;
+ rte_event_timer_adapter_get_info;
+ rte_event_timer_adapter_lookup;
+ rte_event_timer_adapter_service_id_get;
+ rte_event_timer_adapter_start;
+ rte_event_timer_adapter_stats_get;
+ rte_event_timer_adapter_stats_reset;
+ rte_event_timer_adapter_stop;
+ rte_event_timer_arm_burst;
+ rte_event_timer_arm_tmo_tick_burst;
+ rte_event_timer_cancel_burst;
+};
diff --git a/lib/librte_flow_classify/rte_flow_classify.c b/lib/librte_flow_classify/rte_flow_classify.c
index 7edb2f15..4c3469da 100644
--- a/lib/librte_flow_classify/rte_flow_classify.c
+++ b/lib/librte_flow_classify/rte_flow_classify.c
@@ -635,9 +635,7 @@ action_apply(struct rte_flow_classifier *cls,
}
if (count) {
ret = 0;
- ntuple_stats =
- (struct rte_flow_classify_ipv4_5tuple_stats *)
- stats->stats;
+ ntuple_stats = stats->stats;
ntuple_stats->counter1 = count;
ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
}
@@ -675,10 +673,7 @@ rte_flow_classifier_query(struct rte_flow_classifier *cls,
return ret;
}
-RTE_INIT(librte_flow_classify_init_log);
-
-static void
-librte_flow_classify_init_log(void)
+RTE_INIT(librte_flow_classify_init_log)
{
librte_flow_classify_logtype =
rte_log_register("lib.flow_classify");
diff --git a/lib/librte_flow_classify/rte_flow_classify_parse.c b/lib/librte_flow_classify/rte_flow_classify_parse.c
index 10eaf043..f65ceaf7 100644
--- a/lib/librte_flow_classify/rte_flow_classify_parse.c
+++ b/lib/librte_flow_classify/rte_flow_classify_parse.c
@@ -279,7 +279,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
@@ -301,7 +301,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
@@ -339,7 +339,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
@@ -373,12 +373,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
return -EINVAL;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -397,11 +397,11 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else {
- sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
@@ -420,7 +420,7 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
- sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
}
@@ -480,12 +480,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
- count = (const struct rte_flow_action_count *)act->conf;
+ count = act->conf;
memcpy(&action.act.counter, count, sizeof(action.act.counter));
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ mark_spec = act->conf;
memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
break;
default:
@@ -502,12 +502,12 @@ classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_COUNT:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
- count = (const struct rte_flow_action_count *)act->conf;
+ count = act->conf;
memcpy(&action.act.counter, count, sizeof(action.act.counter));
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ mark_spec = act->conf;
memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
break;
case RTE_FLOW_ACTION_TYPE_END:
diff --git a/lib/librte_gso/Makefile b/lib/librte_gso/Makefile
index 3648ec09..1fac53a8 100644
--- a/lib/librte_gso/Makefile
+++ b/lib/librte_gso/Makefile
@@ -19,6 +19,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_GSO) += rte_gso.c
SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_common.c
SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_tcp4.c
SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_tunnel_tcp4.c
+SRCS-$(CONFIG_RTE_LIBRTE_GSO) += gso_udp4.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_GSO)-include += rte_gso.h
diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h
index 5ca59745..6cd764ff 100644
--- a/lib/librte_gso/gso_common.h
+++ b/lib/librte_gso/gso_common.h
@@ -31,6 +31,9 @@
(PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
PKT_TX_TUNNEL_GRE))
+#define IS_IPV4_UDP(flag) (((flag) & (PKT_TX_UDP_SEG | PKT_TX_IPV4)) == \
+ (PKT_TX_UDP_SEG | PKT_TX_IPV4))
+
/**
* Internal function which updates the UDP header of a packet, following
* segmentation. This is required to update the header's datagram length field.
diff --git a/lib/librte_gso/gso_udp4.c b/lib/librte_gso/gso_udp4.c
new file mode 100644
index 00000000..927dee12
--- /dev/null
+++ b/lib/librte_gso/gso_udp4.c
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include "gso_common.h"
+#include "gso_udp4.h"
+
+#define IPV4_HDR_MF_BIT (1U << 13)
+
+static inline void
+update_ipv4_udp_headers(struct rte_mbuf *pkt, struct rte_mbuf **segs,
+ uint16_t nb_segs)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ uint16_t frag_offset = 0, is_mf;
+ uint16_t l2_hdrlen = pkt->l2_len, l3_hdrlen = pkt->l3_len;
+ uint16_t tail_idx = nb_segs - 1, length, i;
+
+ /*
+ * Update IP header fields for output segments. Specifically,
+ * keep the same IP id, update fragment offset and total
+ * length.
+ */
+ for (i = 0; i < nb_segs; i++) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(segs[i], struct ipv4_hdr *,
+ l2_hdrlen);
+ length = segs[i]->pkt_len - l2_hdrlen;
+ ipv4_hdr->total_length = rte_cpu_to_be_16(length);
+
+ is_mf = i < tail_idx ? IPV4_HDR_MF_BIT : 0;
+ ipv4_hdr->fragment_offset =
+ rte_cpu_to_be_16(frag_offset | is_mf);
+ frag_offset += ((length - l3_hdrlen) >> 3);
+ }
+}
+
+int
+gso_udp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ uint16_t pyld_unit_size, hdr_offset;
+ uint16_t frag_off;
+ int ret;
+
+ /* Don't process the fragmented packet */
+ ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct ipv4_hdr *,
+ pkt->l2_len);
+ frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
+ if (unlikely(IS_FRAGMENTED(frag_off))) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ /*
+ * UDP fragmentation is the same as IP fragmentation.
+ * Except the first one, other output packets just have l2
+ * and l3 headers.
+ */
+ hdr_offset = pkt->l2_len + pkt->l3_len;
+
+ /* Don't process the packet without data. */
+ if (unlikely(hdr_offset + pkt->l4_len >= pkt->pkt_len)) {
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ pyld_unit_size = gso_size - hdr_offset;
+
+ /* Segment the payload */
+ ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
+ indirect_pool, pkts_out, nb_pkts_out);
+ if (ret > 1)
+ update_ipv4_udp_headers(pkt, pkts_out, ret);
+
+ return ret;
+}
diff --git a/lib/librte_gso/gso_udp4.h b/lib/librte_gso/gso_udp4.h
new file mode 100644
index 00000000..b2a2908e
--- /dev/null
+++ b/lib/librte_gso/gso_udp4.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _GSO_UDP4_H_
+#define _GSO_UDP4_H_
+
+#include <stdint.h>
+#include <rte_mbuf.h>
+
+/**
+ * Segment an UDP/IPv4 packet. This function doesn't check if the input
+ * packet has correct checksums, and doesn't update checksums for output
+ * GSO segments. Furthermore, it doesn't process IP fragment packets.
+ *
+ * @param pkt
+ * The packet mbuf to segment.
+ * @param gso_size
+ * The max length of a GSO segment, measured in bytes.
+ * @param direct_pool
+ * MBUF pool used for allocating direct buffers for output segments.
+ * @param indirect_pool
+ * MBUF pool used for allocating indirect buffers for output segments.
+ * @param pkts_out
+ * Pointer array used to store the MBUF addresses of output GSO
+ * segments, when the function succeeds. If the memory space in
+ * pkts_out is insufficient, it fails and returns -EINVAL.
+ * @param nb_pkts_out
+ * The max number of items that 'pkts_out' can keep.
+ *
+ * @return
+ * - The number of GSO segments filled in pkts_out on success.
+ * - Return -ENOMEM if run out of memory in MBUF pools.
+ * - Return -EINVAL for invalid parameters.
+ */
+int gso_udp4_segment(struct rte_mbuf *pkt,
+ uint16_t gso_size,
+ struct rte_mempool *direct_pool,
+ struct rte_mempool *indirect_pool,
+ struct rte_mbuf **pkts_out,
+ uint16_t nb_pkts_out);
+#endif
diff --git a/lib/librte_gso/meson.build b/lib/librte_gso/meson.build
index 056534fb..ad8dd858 100644
--- a/lib/librte_gso/meson.build
+++ b/lib/librte_gso/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-sources = files('gso_common.c', 'gso_tcp4.c',
+sources = files('gso_common.c', 'gso_tcp4.c', 'gso_udp4.c',
'gso_tunnel_tcp4.c', 'rte_gso.c')
headers = files('rte_gso.h')
deps += ['ethdev']
diff --git a/lib/librte_gso/rte_gso.c b/lib/librte_gso/rte_gso.c
index a44e3d43..751b5b62 100644
--- a/lib/librte_gso/rte_gso.c
+++ b/lib/librte_gso/rte_gso.c
@@ -11,6 +11,17 @@
#include "gso_common.h"
#include "gso_tcp4.h"
#include "gso_tunnel_tcp4.h"
+#include "gso_udp4.h"
+
+#define ILLEGAL_UDP_GSO_CTX(ctx) \
+ ((((ctx)->gso_types & DEV_TX_OFFLOAD_UDP_TSO) == 0) || \
+ (ctx)->gso_size < RTE_GSO_UDP_SEG_SIZE_MIN)
+
+#define ILLEGAL_TCP_GSO_CTX(ctx) \
+ ((((ctx)->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0) || \
+ (ctx)->gso_size < RTE_GSO_SEG_SIZE_MIN)
int
rte_gso_segment(struct rte_mbuf *pkt,
@@ -27,14 +38,12 @@ rte_gso_segment(struct rte_mbuf *pkt,
if (pkt == NULL || pkts_out == NULL || gso_ctx == NULL ||
nb_pkts_out < 1 ||
- gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
- ((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
+ (ILLEGAL_UDP_GSO_CTX(gso_ctx) &&
+ ILLEGAL_TCP_GSO_CTX(gso_ctx)))
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
- pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ pkt->ol_flags &= (~(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG));
pkts_out[0] = pkt;
return 1;
}
@@ -59,6 +68,11 @@ rte_gso_segment(struct rte_mbuf *pkt,
ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,
pkts_out, nb_pkts_out);
+ } else if (IS_IPV4_UDP(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+ pkt->ol_flags &= (~PKT_TX_UDP_SEG);
+ ret = gso_udp4_segment(pkt, gso_size, direct_pool,
+ indirect_pool, pkts_out, nb_pkts_out);
} else {
/* unsupported packet, skip */
pkts_out[0] = pkt;
diff --git a/lib/librte_gso/rte_gso.h b/lib/librte_gso/rte_gso.h
index f4abd61c..a626a11e 100644
--- a/lib/librte_gso/rte_gso.h
+++ b/lib/librte_gso/rte_gso.h
@@ -17,10 +17,14 @@ extern "C" {
#include <stdint.h>
#include <rte_mbuf.h>
-/* Minimum GSO segment size. */
+/* Minimum GSO segment size for TCP based packets. */
#define RTE_GSO_SEG_SIZE_MIN (sizeof(struct ether_hdr) + \
sizeof(struct ipv4_hdr) + sizeof(struct tcp_hdr) + 1)
+/* Minimum GSO segment size for UDP based packets. */
+#define RTE_GSO_UDP_SEG_SIZE_MIN (sizeof(struct ether_hdr) + \
+ sizeof(struct ipv4_hdr) + sizeof(struct udp_hdr) + 1)
+
/* GSO flags for rte_gso_ctx. */
#define RTE_GSO_FLAG_IPID_FIXED (1ULL << 0)
/**< Use fixed IP ids for output GSO segments. Setting
diff --git a/lib/librte_hash/meson.build b/lib/librte_hash/meson.build
index e139e1d7..efc06ede 100644
--- a/lib/librte_hash/meson.build
+++ b/lib/librte_hash/meson.build
@@ -6,7 +6,6 @@ headers = files('rte_cmp_arm64.h',
'rte_cmp_x86.h',
'rte_crc_arm64.h',
'rte_cuckoo_hash.h',
- 'rte_cuckoo_hash_x86.h',
'rte_fbk_hash.h',
'rte_hash_crc.h',
'rte_hash.h',
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 9b1387b5..f7b86c8c 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -31,9 +31,6 @@
#include "rte_hash.h"
#include "rte_cuckoo_hash.h"
-#if defined(RTE_ARCH_X86)
-#include "rte_cuckoo_hash_x86.h"
-#endif
TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
@@ -93,8 +90,10 @@ rte_hash_create(const struct rte_hash_parameters *params)
void *buckets = NULL;
char ring_name[RTE_RING_NAMESIZE];
unsigned num_key_slots;
- unsigned hw_trans_mem_support = 0;
unsigned i;
+ unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
+ unsigned int readwrite_concur_support = 0;
+
rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
@@ -107,7 +106,6 @@ rte_hash_create(const struct rte_hash_parameters *params)
/* Check for valid parameters */
if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
(params->entries < RTE_HASH_BUCKET_ENTRIES) ||
- !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
(params->key_len == 0)) {
rte_errno = EINVAL;
RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
@@ -118,21 +116,29 @@ rte_hash_create(const struct rte_hash_parameters *params)
if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
hw_trans_mem_support = 1;
+ if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
+ multi_writer_support = 1;
+
+ if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
+ readwrite_concur_support = 1;
+ multi_writer_support = 1;
+ }
+
/* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
- if (hw_trans_mem_support)
+ if (multi_writer_support)
/*
* Increase number of slots by total number of indices
* that can be stored in the lcore caches
* except for the first cache
*/
num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
- LCORE_CACHE_SIZE + 1;
+ (LCORE_CACHE_SIZE - 1) + 1;
else
num_key_slots = params->entries + 1;
snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
/* Create ring (Dummy slot index is not enqueued) */
- r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots - 1),
+ r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
params->socket_id, 0);
if (r == NULL) {
RTE_LOG(ERR, HASH, "memory allocation failed\n");
@@ -233,7 +239,7 @@ rte_hash_create(const struct rte_hash_parameters *params)
h->cmp_jump_table_idx = KEY_OTHER_BYTES;
#endif
- if (hw_trans_mem_support) {
+ if (multi_writer_support) {
h->local_free_slots = rte_zmalloc_socket(NULL,
sizeof(struct lcore_cache) * RTE_MAX_LCORE,
RTE_CACHE_LINE_SIZE, params->socket_id);
@@ -261,6 +267,8 @@ rte_hash_create(const struct rte_hash_parameters *params)
h->key_store = k;
h->free_slots = r;
h->hw_trans_mem_support = hw_trans_mem_support;
+ h->multi_writer_support = multi_writer_support;
+ h->readwrite_concur_support = readwrite_concur_support;
#if defined(RTE_ARCH_X86)
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
@@ -271,24 +279,20 @@ rte_hash_create(const struct rte_hash_parameters *params)
#endif
h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
- /* Turn on multi-writer only with explicit flat from user and TM
+ /* Turn on multi-writer only with explicit flag from user and TM
* support.
*/
- if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
- if (h->hw_trans_mem_support) {
- h->add_key = ADD_KEY_MULTIWRITER_TM;
- } else {
- h->add_key = ADD_KEY_MULTIWRITER;
- h->multiwriter_lock = rte_malloc(NULL,
- sizeof(rte_spinlock_t),
- LCORE_CACHE_SIZE);
- rte_spinlock_init(h->multiwriter_lock);
- }
- } else
- h->add_key = ADD_KEY_SINGLEWRITER;
+ if (h->multi_writer_support) {
+ h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
+ RTE_CACHE_LINE_SIZE);
+ if (h->readwrite_lock == NULL)
+ goto err_unlock;
+
+ rte_rwlock_init(h->readwrite_lock);
+ }
/* Populate free slots ring. Entry zero is reserved for key misses. */
- for (i = 1; i < params->entries + 1; i++)
+ for (i = 1; i < num_key_slots; i++)
rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
te->data = (void *) h;
@@ -335,11 +339,10 @@ rte_hash_free(struct rte_hash *h)
rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
- if (h->hw_trans_mem_support)
+ if (h->multi_writer_support) {
rte_free(h->local_free_slots);
-
- if (h->add_key == ADD_KEY_MULTIWRITER)
- rte_free(h->multiwriter_lock);
+ rte_free(h->readwrite_lock);
+ }
rte_ring_free(h->free_slots);
rte_free(h->key_store);
rte_free(h->buckets);
@@ -366,15 +369,78 @@ rte_hash_secondary_hash(const hash_sig_t primary_hash)
return primary_hash ^ ((tag + 1) * alt_bits_xor);
}
+int32_t
+rte_hash_count(const struct rte_hash *h)
+{
+ uint32_t tot_ring_cnt, cached_cnt = 0;
+ uint32_t i, ret;
+
+ if (h == NULL)
+ return -EINVAL;
+
+ if (h->multi_writer_support) {
+ tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
+ (LCORE_CACHE_SIZE - 1);
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ cached_cnt += h->local_free_slots[i].len;
+
+ ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
+ cached_cnt;
+ } else {
+ tot_ring_cnt = h->entries;
+ ret = tot_ring_cnt - rte_ring_count(h->free_slots);
+ }
+ return ret;
+}
+
+/* Read write locks implemented using rte_rwlock */
+static inline void
+__hash_rw_writer_lock(const struct rte_hash *h)
+{
+ if (h->multi_writer_support && h->hw_trans_mem_support)
+ rte_rwlock_write_lock_tm(h->readwrite_lock);
+ else if (h->multi_writer_support)
+ rte_rwlock_write_lock(h->readwrite_lock);
+}
+
+
+static inline void
+__hash_rw_reader_lock(const struct rte_hash *h)
+{
+ if (h->readwrite_concur_support && h->hw_trans_mem_support)
+ rte_rwlock_read_lock_tm(h->readwrite_lock);
+ else if (h->readwrite_concur_support)
+ rte_rwlock_read_lock(h->readwrite_lock);
+}
+
+static inline void
+__hash_rw_writer_unlock(const struct rte_hash *h)
+{
+ if (h->multi_writer_support && h->hw_trans_mem_support)
+ rte_rwlock_write_unlock_tm(h->readwrite_lock);
+ else if (h->multi_writer_support)
+ rte_rwlock_write_unlock(h->readwrite_lock);
+}
+
+static inline void
+__hash_rw_reader_unlock(const struct rte_hash *h)
+{
+ if (h->readwrite_concur_support && h->hw_trans_mem_support)
+ rte_rwlock_read_unlock_tm(h->readwrite_lock);
+ else if (h->readwrite_concur_support)
+ rte_rwlock_read_unlock(h->readwrite_lock);
+}
+
void
rte_hash_reset(struct rte_hash *h)
{
void *ptr;
- unsigned i;
+ uint32_t tot_ring_cnt, i;
if (h == NULL)
return;
+ __hash_rw_writer_lock(h);
memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
@@ -383,97 +449,260 @@ rte_hash_reset(struct rte_hash *h)
rte_pause();
/* Repopulate the free slots ring. Entry zero is reserved for key misses */
- for (i = 1; i < h->entries + 1; i++)
+ if (h->multi_writer_support)
+ tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
+ (LCORE_CACHE_SIZE - 1);
+ else
+ tot_ring_cnt = h->entries;
+
+ for (i = 1; i < tot_ring_cnt + 1; i++)
rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
- if (h->hw_trans_mem_support) {
+ if (h->multi_writer_support) {
/* Reset local caches per lcore */
for (i = 0; i < RTE_MAX_LCORE; i++)
h->local_free_slots[i].len = 0;
}
+ __hash_rw_writer_unlock(h);
}
-/* Search for an entry that can be pushed to its alternative location */
-static inline int
-make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt,
- unsigned int *nr_pushes)
+/*
+ * Function called to enqueue back an index in the cache/ring,
+ * as slot has not being used and it can be used in the
+ * next addition attempt.
+ */
+static inline void
+enqueue_slot_back(const struct rte_hash *h,
+ struct lcore_cache *cached_free_slots,
+ void *slot_id)
{
- unsigned i, j;
- int ret;
- uint32_t next_bucket_idx;
- struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];
+ if (h->multi_writer_support) {
+ cached_free_slots->objs[cached_free_slots->len] = slot_id;
+ cached_free_slots->len++;
+ } else
+ rte_ring_sp_enqueue(h->free_slots, slot_id);
+}
+
+/* Search a key from bucket and update its data */
+static inline int32_t
+search_and_update(const struct rte_hash *h, void *data, const void *key,
+ struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
+{
+ int i;
+ struct rte_hash_key *k, *keys = h->key_store;
- /*
- * Push existing item (search for bucket with space in
- * alternative locations) to its alternative location
- */
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- /* Search for space in alternative locations */
- next_bucket_idx = bkt->sig_alt[i] & h->bucket_bitmask;
- next_bkt[i] = &h->buckets[next_bucket_idx];
- for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
- if (next_bkt[i]->key_idx[j] == EMPTY_SLOT)
- break;
+ if (bkt->sig_current[i] == sig &&
+ bkt->sig_alt[i] == alt_hash) {
+ k = (struct rte_hash_key *) ((char *)keys +
+ bkt->key_idx[i] * h->key_entry_size);
+ if (rte_hash_cmp_eq(key, k->key, h) == 0) {
+ /* Update data */
+ k->pdata = data;
+ /*
+ * Return index where key is stored,
+ * subtracting the first dummy index
+ */
+ return bkt->key_idx[i] - 1;
+ }
}
-
- if (j != RTE_HASH_BUCKET_ENTRIES)
- break;
}
+ return -1;
+}
- /* Alternative location has spare room (end of recursive function) */
- if (i != RTE_HASH_BUCKET_ENTRIES) {
- next_bkt[i]->sig_alt[j] = bkt->sig_current[i];
- next_bkt[i]->sig_current[j] = bkt->sig_alt[i];
- next_bkt[i]->key_idx[j] = bkt->key_idx[i];
- return i;
+/* Only tries to insert at one bucket (@prim_bkt) without trying to push
+ * buckets around.
+ * return 1 if matching existing key, return 0 if succeeds, return -1 for no
+ * empty entry.
+ */
+static inline int32_t
+rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
+ struct rte_hash_bucket *prim_bkt,
+ struct rte_hash_bucket *sec_bkt,
+ const struct rte_hash_key *key, void *data,
+ hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
+ int32_t *ret_val)
+{
+ unsigned int i;
+ struct rte_hash_bucket *cur_bkt = prim_bkt;
+ int32_t ret;
+
+ __hash_rw_writer_lock(h);
+ /* Check if key was inserted after last check but before this
+ * protected region in case of inserting duplicated keys.
+ */
+ ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ *ret_val = ret;
+ return 1;
+ }
+ ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ *ret_val = ret;
+ return 1;
}
- /* Pick entry that has not been pushed yet */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
- if (bkt->flag[i] == 0)
+ /* Insert new entry if there is room in the primary
+ * bucket.
+ */
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ /* Check if slot is available */
+ if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
+ prim_bkt->sig_current[i] = sig;
+ prim_bkt->sig_alt[i] = alt_hash;
+ prim_bkt->key_idx[i] = new_idx;
break;
+ }
+ }
+ __hash_rw_writer_unlock(h);
- /* All entries have been pushed, so entry cannot be added */
- if (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES)
- return -ENOSPC;
+ if (i != RTE_HASH_BUCKET_ENTRIES)
+ return 0;
- /* Set flag to indicate that this entry is going to be pushed */
- bkt->flag[i] = 1;
+ /* no empty entry */
+ return -1;
+}
- /* Need room in alternative bucket to insert the pushed entry */
- ret = make_space_bucket(h, next_bkt[i], nr_pushes);
- /*
- * After recursive function.
- * Clear flags and insert the pushed entry
- * in its alternative location if successful,
- * or return error
+/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
+ * the path head with new entry (sig, alt_hash, new_idx)
+ * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
+ * return 0 if succeeds.
+ */
+static inline int
+rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
+ struct rte_hash_bucket *bkt,
+ struct rte_hash_bucket *alt_bkt,
+ const struct rte_hash_key *key, void *data,
+ struct queue_node *leaf, uint32_t leaf_slot,
+ hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
+ int32_t *ret_val)
+{
+ uint32_t prev_alt_bkt_idx;
+ struct rte_hash_bucket *cur_bkt = bkt;
+ struct queue_node *prev_node, *curr_node = leaf;
+ struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
+ uint32_t prev_slot, curr_slot = leaf_slot;
+ int32_t ret;
+
+ __hash_rw_writer_lock(h);
+
+ /* In case empty slot was gone before entering protected region */
+ if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
+ __hash_rw_writer_unlock(h);
+ return -1;
+ }
+
+ /* Check if key was inserted after last check but before this
+ * protected region.
*/
- bkt->flag[i] = 0;
- if (ret >= 0) {
- next_bkt[i]->sig_alt[ret] = bkt->sig_current[i];
- next_bkt[i]->sig_current[ret] = bkt->sig_alt[i];
- next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
- return i;
- } else
- return ret;
+ ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ *ret_val = ret;
+ return 1;
+ }
+
+ ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ *ret_val = ret;
+ return 1;
+ }
+
+ while (likely(curr_node->prev != NULL)) {
+ prev_node = curr_node->prev;
+ prev_bkt = prev_node->bkt;
+ prev_slot = curr_node->prev_slot;
+
+ prev_alt_bkt_idx =
+ prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;
+
+ if (unlikely(&h->buckets[prev_alt_bkt_idx]
+ != curr_bkt)) {
+ /* revert it to empty, otherwise duplicated keys */
+ curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
+ __hash_rw_writer_unlock(h);
+ return -1;
+ }
+
+ /* Need to swap current/alt sig to allow later
+ * Cuckoo insert to move elements back to its
+ * primary bucket if available
+ */
+ curr_bkt->sig_alt[curr_slot] =
+ prev_bkt->sig_current[prev_slot];
+ curr_bkt->sig_current[curr_slot] =
+ prev_bkt->sig_alt[prev_slot];
+ curr_bkt->key_idx[curr_slot] =
+ prev_bkt->key_idx[prev_slot];
+
+ curr_slot = prev_slot;
+ curr_node = prev_node;
+ curr_bkt = curr_node->bkt;
+ }
+
+ curr_bkt->sig_current[curr_slot] = sig;
+ curr_bkt->sig_alt[curr_slot] = alt_hash;
+ curr_bkt->key_idx[curr_slot] = new_idx;
+
+ __hash_rw_writer_unlock(h);
+
+ return 0;
}
/*
- * Function called to enqueue back an index in the cache/ring,
- * as slot has not being used and it can be used in the
- * next addition attempt.
+ * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
+ * Cuckoo
*/
-static inline void
-enqueue_slot_back(const struct rte_hash *h,
- struct lcore_cache *cached_free_slots,
- void *slot_id)
+static inline int
+rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
+ struct rte_hash_bucket *bkt,
+ struct rte_hash_bucket *sec_bkt,
+ const struct rte_hash_key *key, void *data,
+ hash_sig_t sig, hash_sig_t alt_hash,
+ uint32_t new_idx, int32_t *ret_val)
{
- if (h->hw_trans_mem_support) {
- cached_free_slots->objs[cached_free_slots->len] = slot_id;
- cached_free_slots->len++;
- } else
- rte_ring_sp_enqueue(h->free_slots, slot_id);
+ unsigned int i;
+ struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
+ struct queue_node *tail, *head;
+ struct rte_hash_bucket *curr_bkt, *alt_bkt;
+
+ tail = queue;
+ head = queue + 1;
+ tail->bkt = bkt;
+ tail->prev = NULL;
+ tail->prev_slot = -1;
+
+ /* Cuckoo bfs Search */
+ while (likely(tail != head && head <
+ queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
+ RTE_HASH_BUCKET_ENTRIES)) {
+ curr_bkt = tail->bkt;
+ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
+ if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
+ int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
+ bkt, sec_bkt, key, data,
+ tail, i, sig, alt_hash,
+ new_idx, ret_val);
+ if (likely(ret != -1))
+ return ret;
+ }
+
+ /* Enqueue new node and keep prev node info */
+ alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
+ & h->bucket_bitmask]);
+ head->bkt = alt_bkt;
+ head->prev = tail;
+ head->prev_slot = i;
+ head++;
+ }
+ tail++;
+ }
+
+ return -ENOSPC;
}
static inline int32_t
@@ -482,19 +711,15 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
{
hash_sig_t alt_hash;
uint32_t prim_bucket_idx, sec_bucket_idx;
- unsigned i;
struct rte_hash_bucket *prim_bkt, *sec_bkt;
- struct rte_hash_key *new_k, *k, *keys = h->key_store;
+ struct rte_hash_key *new_k, *keys = h->key_store;
void *slot_id = NULL;
uint32_t new_idx;
int ret;
unsigned n_slots;
unsigned lcore_id;
struct lcore_cache *cached_free_slots = NULL;
- unsigned int nr_pushes = 0;
-
- if (h->add_key == ADD_KEY_MULTIWRITER)
- rte_spinlock_lock(h->multiwriter_lock);
+ int32_t ret_val;
prim_bucket_idx = sig & h->bucket_bitmask;
prim_bkt = &h->buckets[prim_bucket_idx];
@@ -505,8 +730,24 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
sec_bkt = &h->buckets[sec_bucket_idx];
rte_prefetch0(sec_bkt);
- /* Get a new slot for storing the new key */
- if (h->hw_trans_mem_support) {
+ /* Check if key is already inserted in primary location */
+ __hash_rw_writer_lock(h);
+ ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ return ret;
+ }
+
+ /* Check if key is already inserted in secondary location */
+ ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ return ret;
+ }
+ __hash_rw_writer_unlock(h);
+
+ /* Did not find a match, so get a new slot for storing the new key */
+ if (h->multi_writer_support) {
lcore_id = rte_lcore_id();
cached_free_slots = &h->local_free_slots[lcore_id];
/* Try to get a free slot from the local cache */
@@ -516,8 +757,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
cached_free_slots->objs,
LCORE_CACHE_SIZE, NULL);
if (n_slots == 0) {
- ret = -ENOSPC;
- goto failure;
+ return -ENOSPC;
}
cached_free_slots->len += n_slots;
@@ -528,122 +768,50 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
slot_id = cached_free_slots->objs[cached_free_slots->len];
} else {
if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
- ret = -ENOSPC;
- goto failure;
+ return -ENOSPC;
}
}
new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
- rte_prefetch0(new_k);
new_idx = (uint32_t)((uintptr_t) slot_id);
-
- /* Check if key is already inserted in primary location */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- if (prim_bkt->sig_current[i] == sig &&
- prim_bkt->sig_alt[i] == alt_hash) {
- k = (struct rte_hash_key *) ((char *)keys +
- prim_bkt->key_idx[i] * h->key_entry_size);
- if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- /* Enqueue index of free slot back in the ring. */
- enqueue_slot_back(h, cached_free_slots, slot_id);
- /* Update data */
- k->pdata = data;
- /*
- * Return index where key is stored,
- * subtracting the first dummy index
- */
- return prim_bkt->key_idx[i] - 1;
- }
- }
- }
-
- /* Check if key is already inserted in secondary location */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- if (sec_bkt->sig_alt[i] == sig &&
- sec_bkt->sig_current[i] == alt_hash) {
- k = (struct rte_hash_key *) ((char *)keys +
- sec_bkt->key_idx[i] * h->key_entry_size);
- if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- /* Enqueue index of free slot back in the ring. */
- enqueue_slot_back(h, cached_free_slots, slot_id);
- /* Update data */
- k->pdata = data;
- /*
- * Return index where key is stored,
- * subtracting the first dummy index
- */
- return sec_bkt->key_idx[i] - 1;
- }
- }
- }
-
/* Copy key */
rte_memcpy(new_k->key, key, h->key_len);
new_k->pdata = data;
-#if defined(RTE_ARCH_X86) /* currently only x86 support HTM */
- if (h->add_key == ADD_KEY_MULTIWRITER_TM) {
- ret = rte_hash_cuckoo_insert_mw_tm(prim_bkt,
- sig, alt_hash, new_idx);
- if (ret >= 0)
- return new_idx - 1;
- /* Primary bucket full, need to make space for new entry */
- ret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig,
- alt_hash, new_idx);
+ /* Find an empty slot and insert */
+ ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
+ sig, alt_hash, new_idx, &ret_val);
+ if (ret == 0)
+ return new_idx - 1;
+ else if (ret == 1) {
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ return ret_val;
+ }
- if (ret >= 0)
- return new_idx - 1;
+ /* Primary bucket full, need to make space for new entry */
+ ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
+ sig, alt_hash, new_idx, &ret_val);
+ if (ret == 0)
+ return new_idx - 1;
+ else if (ret == 1) {
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ return ret_val;
+ }
- /* Also search secondary bucket to get better occupancy */
- ret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig,
- alt_hash, new_idx);
+ /* Also search secondary bucket to get better occupancy */
+ ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
+ alt_hash, sig, new_idx, &ret_val);
- if (ret >= 0)
- return new_idx - 1;
+ if (ret == 0)
+ return new_idx - 1;
+ else if (ret == 1) {
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ return ret_val;
} else {
-#endif
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- /* Check if slot is available */
- if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
- prim_bkt->sig_current[i] = sig;
- prim_bkt->sig_alt[i] = alt_hash;
- prim_bkt->key_idx[i] = new_idx;
- break;
- }
- }
-
- if (i != RTE_HASH_BUCKET_ENTRIES) {
- if (h->add_key == ADD_KEY_MULTIWRITER)
- rte_spinlock_unlock(h->multiwriter_lock);
- return new_idx - 1;
- }
-
- /* Primary bucket full, need to make space for new entry
- * After recursive function.
- * Insert the new entry in the position of the pushed entry
- * if successful or return error and
- * store the new slot back in the ring
- */
- ret = make_space_bucket(h, prim_bkt, &nr_pushes);
- if (ret >= 0) {
- prim_bkt->sig_current[ret] = sig;
- prim_bkt->sig_alt[ret] = alt_hash;
- prim_bkt->key_idx[ret] = new_idx;
- if (h->add_key == ADD_KEY_MULTIWRITER)
- rte_spinlock_unlock(h->multiwriter_lock);
- return new_idx - 1;
- }
-#if defined(RTE_ARCH_X86)
+ enqueue_slot_back(h, cached_free_slots, slot_id);
+ return ret;
}
-#endif
- /* Error in addition, store new slot back in the ring and return error */
- enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));
-
-failure:
- if (h->add_key == ADD_KEY_MULTIWRITER)
- rte_spinlock_unlock(h->multiwriter_lock);
- return ret;
}
int32_t
@@ -688,20 +856,15 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
else
return ret;
}
+
+/* Search one bucket to find the match key */
static inline int32_t
-__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig, void **data)
+search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
+ void **data, const struct rte_hash_bucket *bkt)
{
- uint32_t bucket_idx;
- hash_sig_t alt_hash;
- unsigned i;
- struct rte_hash_bucket *bkt;
+ int i;
struct rte_hash_key *k, *keys = h->key_store;
- bucket_idx = sig & h->bucket_bitmask;
- bkt = &h->buckets[bucket_idx];
-
- /* Check if key is in primary location */
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
if (bkt->sig_current[i] == sig &&
bkt->key_idx[i] != EMPTY_SLOT) {
@@ -718,30 +881,41 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
}
}
}
+ return -1;
+}
+
+static inline int32_t
+__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig, void **data)
+{
+ uint32_t bucket_idx;
+ hash_sig_t alt_hash;
+ struct rte_hash_bucket *bkt;
+ int ret;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+ __hash_rw_reader_lock(h);
+
+ /* Check if key is in primary location */
+ ret = search_one_bucket(h, key, sig, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
+ }
/* Calculate secondary hash */
alt_hash = rte_hash_secondary_hash(sig);
bucket_idx = alt_hash & h->bucket_bitmask;
bkt = &h->buckets[bucket_idx];
/* Check if key is in secondary location */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- if (bkt->sig_current[i] == alt_hash &&
- bkt->sig_alt[i] == sig) {
- k = (struct rte_hash_key *) ((char *)keys +
- bkt->key_idx[i] * h->key_entry_size);
- if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- if (data != NULL)
- *data = k->pdata;
- /*
- * Return index where key is stored,
- * subtracting the first dummy index
- */
- return bkt->key_idx[i] - 1;
- }
- }
+ ret = search_one_bucket(h, key, alt_hash, data, bkt);
+ if (ret != -1) {
+ __hash_rw_reader_unlock(h);
+ return ret;
}
-
+ __hash_rw_reader_unlock(h);
return -ENOENT;
}
@@ -783,7 +957,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
bkt->sig_current[i] = NULL_SIGNATURE;
bkt->sig_alt[i] = NULL_SIGNATURE;
- if (h->hw_trans_mem_support) {
+ if (h->multi_writer_support) {
lcore_id = rte_lcore_id();
cached_free_slots = &h->local_free_slots[lcore_id];
/* Cache full, need to free it. */
@@ -804,20 +978,15 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
}
}
+/* Search one bucket and remove the matched key */
static inline int32_t
-__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
- hash_sig_t sig)
+search_and_remove(const struct rte_hash *h, const void *key,
+ struct rte_hash_bucket *bkt, hash_sig_t sig)
{
- uint32_t bucket_idx;
- hash_sig_t alt_hash;
- unsigned i;
- struct rte_hash_bucket *bkt;
struct rte_hash_key *k, *keys = h->key_store;
+ unsigned int i;
int32_t ret;
- bucket_idx = sig & h->bucket_bitmask;
- bkt = &h->buckets[bucket_idx];
-
/* Check if key is in primary location */
for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
if (bkt->sig_current[i] == sig &&
@@ -837,32 +1006,42 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
}
}
}
+ return -1;
+}
+
+static inline int32_t
+__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
+ hash_sig_t sig)
+{
+ uint32_t bucket_idx;
+ hash_sig_t alt_hash;
+ struct rte_hash_bucket *bkt;
+ int32_t ret;
+
+ bucket_idx = sig & h->bucket_bitmask;
+ bkt = &h->buckets[bucket_idx];
+
+ __hash_rw_writer_lock(h);
+ /* look for key in primary bucket */
+ ret = search_and_remove(h, key, bkt, sig);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ return ret;
+ }
/* Calculate secondary hash */
alt_hash = rte_hash_secondary_hash(sig);
bucket_idx = alt_hash & h->bucket_bitmask;
bkt = &h->buckets[bucket_idx];
- /* Check if key is in secondary location */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- if (bkt->sig_current[i] == alt_hash &&
- bkt->key_idx[i] != EMPTY_SLOT) {
- k = (struct rte_hash_key *) ((char *)keys +
- bkt->key_idx[i] * h->key_entry_size);
- if (rte_hash_cmp_eq(key, k->key, h) == 0) {
- remove_entry(h, bkt, i);
-
- /*
- * Return index where key is stored,
- * subtracting the first dummy index
- */
- ret = bkt->key_idx[i] - 1;
- bkt->key_idx[i] = EMPTY_SLOT;
- return ret;
- }
- }
+ /* look for key in secondary bucket */
+ ret = search_and_remove(h, key, bkt, alt_hash);
+ if (ret != -1) {
+ __hash_rw_writer_unlock(h);
+ return ret;
}
+ __hash_rw_writer_unlock(h);
return -ENOENT;
}
@@ -1004,6 +1183,7 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
rte_prefetch0(secondary_bkt[i]);
}
+ __hash_rw_reader_lock(h);
/* Compare signatures and prefetch key slot of first hit */
for (i = 0; i < num_keys; i++) {
compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
@@ -1086,6 +1266,8 @@ next_key:
continue;
}
+ __hash_rw_reader_unlock(h);
+
if (hit_mask != NULL)
*hit_mask = hits;
}
@@ -1144,7 +1326,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
idx = *next % RTE_HASH_BUCKET_ENTRIES;
}
-
+ __hash_rw_reader_lock(h);
/* Get position of entry in key table */
position = h->buckets[bucket_idx].key_idx[idx];
next_key = (struct rte_hash_key *) ((char *)h->key_store +
@@ -1153,6 +1335,8 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32
*key = next_key->key;
*data = next_key->pdata;
+ __hash_rw_reader_unlock(h);
+
/* Increment iterator */
(*next)++;
diff --git a/lib/librte_hash/rte_cuckoo_hash.h b/lib/librte_hash/rte_cuckoo_hash.h
index 7a54e555..b43f467d 100644
--- a/lib/librte_hash/rte_cuckoo_hash.h
+++ b/lib/librte_hash/rte_cuckoo_hash.h
@@ -88,15 +88,14 @@ const rte_hash_cmp_eq_t cmp_jump_table[NUM_KEY_CMP_CASES] = {
#endif
-enum add_key_case {
- ADD_KEY_SINGLEWRITER = 0,
- ADD_KEY_MULTIWRITER,
- ADD_KEY_MULTIWRITER_TM,
-};
/** Number of items per bucket. */
#define RTE_HASH_BUCKET_ENTRIES 8
+#if !RTE_IS_POWER_OF_2(RTE_HASH_BUCKET_ENTRIES)
+#error RTE_HASH_BUCKET_ENTRIES must be a power of 2
+#endif
+
#define NULL_SIGNATURE 0
#define EMPTY_SLOT 0
@@ -155,18 +154,20 @@ struct rte_hash {
struct rte_ring *free_slots;
/**< Ring that stores all indexes of the free slots in the key table */
- uint8_t hw_trans_mem_support;
- /**< Hardware transactional memory support */
+
struct lcore_cache *local_free_slots;
/**< Local cache per lcore, storing some indexes of the free slots */
- enum add_key_case add_key; /**< Multi-writer hash add behavior */
-
- rte_spinlock_t *multiwriter_lock; /**< Multi-writer spinlock for w/o TM */
/* Fields used in lookup */
uint32_t key_len __rte_cache_aligned;
/**< Length of hash key. */
+ uint8_t hw_trans_mem_support;
+ /**< If hardware transactional memory is used. */
+ uint8_t multi_writer_support;
+ /**< If multi-writer support is enabled. */
+ uint8_t readwrite_concur_support;
+ /**< If read-write concurrency support is enabled */
rte_hash_function hash_func; /**< Function used to calculate hash. */
uint32_t hash_func_init_val; /**< Init value used by hash_func. */
rte_hash_cmp_eq_t rte_hash_custom_cmp_eq;
@@ -184,6 +185,7 @@ struct rte_hash {
/**< Table with buckets storing all the hash values and key indexes
* to the key table.
*/
+ rte_rwlock_t *readwrite_lock; /**< Read-write lock thread-safety. */
} __rte_cache_aligned;
struct queue_node {
diff --git a/lib/librte_hash/rte_cuckoo_hash_x86.h b/lib/librte_hash/rte_cuckoo_hash_x86.h
deleted file mode 100644
index 2c5b017e..00000000
--- a/lib/librte_hash/rte_cuckoo_hash_x86.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Intel Corporation
- */
-
-/* rte_cuckoo_hash_x86.h
- * This file holds all x86 specific Cuckoo Hash functions
- */
-
-/* Only tries to insert at one bucket (@prim_bkt) without trying to push
- * buckets around
- */
-static inline unsigned
-rte_hash_cuckoo_insert_mw_tm(struct rte_hash_bucket *prim_bkt,
- hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
-{
- unsigned i, status;
- unsigned try = 0;
-
- while (try < RTE_HASH_TSX_MAX_RETRY) {
- status = rte_xbegin();
- if (likely(status == RTE_XBEGIN_STARTED)) {
- /* Insert new entry if there is room in the primary
- * bucket.
- */
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- /* Check if slot is available */
- if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
- prim_bkt->sig_current[i] = sig;
- prim_bkt->sig_alt[i] = alt_hash;
- prim_bkt->key_idx[i] = new_idx;
- break;
- }
- }
- rte_xend();
-
- if (i != RTE_HASH_BUCKET_ENTRIES)
- return 0;
-
- break; /* break off try loop if transaction commits */
- } else {
- /* If we abort we give up this cuckoo path. */
- try++;
- rte_pause();
- }
- }
-
- return -1;
-}
-
-/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
- * the path head with new entry (sig, alt_hash, new_idx)
- */
-static inline int
-rte_hash_cuckoo_move_insert_mw_tm(const struct rte_hash *h,
- struct queue_node *leaf, uint32_t leaf_slot,
- hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx)
-{
- unsigned try = 0;
- unsigned status;
- uint32_t prev_alt_bkt_idx;
-
- struct queue_node *prev_node, *curr_node = leaf;
- struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
- uint32_t prev_slot, curr_slot = leaf_slot;
-
- while (try < RTE_HASH_TSX_MAX_RETRY) {
- status = rte_xbegin();
- if (likely(status == RTE_XBEGIN_STARTED)) {
- while (likely(curr_node->prev != NULL)) {
- prev_node = curr_node->prev;
- prev_bkt = prev_node->bkt;
- prev_slot = curr_node->prev_slot;
-
- prev_alt_bkt_idx
- = prev_bkt->sig_alt[prev_slot]
- & h->bucket_bitmask;
-
- if (unlikely(&h->buckets[prev_alt_bkt_idx]
- != curr_bkt)) {
- rte_xabort(RTE_XABORT_CUCKOO_PATH_INVALIDED);
- }
-
- /* Need to swap current/alt sig to allow later
- * Cuckoo insert to move elements back to its
- * primary bucket if available
- */
- curr_bkt->sig_alt[curr_slot] =
- prev_bkt->sig_current[prev_slot];
- curr_bkt->sig_current[curr_slot] =
- prev_bkt->sig_alt[prev_slot];
- curr_bkt->key_idx[curr_slot]
- = prev_bkt->key_idx[prev_slot];
-
- curr_slot = prev_slot;
- curr_node = prev_node;
- curr_bkt = curr_node->bkt;
- }
-
- curr_bkt->sig_current[curr_slot] = sig;
- curr_bkt->sig_alt[curr_slot] = alt_hash;
- curr_bkt->key_idx[curr_slot] = new_idx;
-
- rte_xend();
-
- return 0;
- }
-
- /* If we abort we give up this cuckoo path, since most likely it's
- * no longer valid as TSX detected data conflict
- */
- try++;
- rte_pause();
- }
-
- return -1;
-}
-
-/*
- * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
- * Cuckoo
- */
-static inline int
-rte_hash_cuckoo_make_space_mw_tm(const struct rte_hash *h,
- struct rte_hash_bucket *bkt,
- hash_sig_t sig, hash_sig_t alt_hash,
- uint32_t new_idx)
-{
- unsigned i;
- struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
- struct queue_node *tail, *head;
- struct rte_hash_bucket *curr_bkt, *alt_bkt;
-
- tail = queue;
- head = queue + 1;
- tail->bkt = bkt;
- tail->prev = NULL;
- tail->prev_slot = -1;
-
- /* Cuckoo bfs Search */
- while (likely(tail != head && head <
- queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
- RTE_HASH_BUCKET_ENTRIES)) {
- curr_bkt = tail->bkt;
- for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
- if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
- if (likely(rte_hash_cuckoo_move_insert_mw_tm(h,
- tail, i, sig,
- alt_hash, new_idx) == 0))
- return 0;
- }
-
- /* Enqueue new node and keep prev node info */
- alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
- & h->bucket_bitmask]);
- head->bkt = alt_bkt;
- head->prev = tail;
- head->prev_slot = i;
- head++;
- }
- tail++;
- }
-
- return -ENOSPC;
-}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index 3beaca71..9e7d9315 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -34,6 +34,9 @@ extern "C" {
/** Default behavior of insertion, single writer/multi writer */
#define RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD 0x02
+/** Flag to support reader writer concurrency */
+#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
+
/** Signature of key that is stored internally. */
typedef uint32_t hash_sig_t;
@@ -124,9 +127,22 @@ void
rte_hash_reset(struct rte_hash *h);
/**
+ * Return the number of keys in the hash table
+ * @param h
+ * Hash table to query from
+ * @return
+ * - -EINVAL if parameters are invalid
+ * - A value indicating how many keys were inserted in the table.
+ */
+int32_t
+rte_hash_count(const struct rte_hash *h);
+
+/**
* Add a key-value pair to an existing hash table.
* This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to add the key to.
@@ -146,7 +162,9 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data);
* Add a key-value pair with a pre-computed hash value
* to an existing hash table.
* This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to add the key to.
@@ -167,7 +185,9 @@ rte_hash_add_key_with_hash_data(const struct rte_hash *h, const void *key,
/**
* Add a key to an existing hash table. This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to add the key to.
@@ -185,7 +205,9 @@ rte_hash_add_key(const struct rte_hash *h, const void *key);
/**
* Add a key to an existing hash table.
* This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to add the key to.
@@ -205,7 +227,9 @@ rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
/**
* Remove a key from an existing hash table.
* This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to remove the key from.
@@ -224,7 +248,9 @@ rte_hash_del_key(const struct rte_hash *h, const void *key);
/**
* Remove a key from an existing hash table.
* This operation is not multi-thread safe
- * and should only be called from one thread.
+ * and should only be called from one thread by default.
+ * Thread safety can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to remove the key from.
@@ -244,7 +270,9 @@ rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
/**
* Find a key in the hash table given the position.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to get the key from.
@@ -254,8 +282,8 @@ rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t
* Output containing a pointer to the key
* @return
* - 0 if retrieved successfully
- * - EINVAL if the parameters are invalid.
- * - ENOENT if no valid key is found in the given position.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if no valid key is found in the given position.
*/
int
rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
@@ -263,7 +291,9 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
/**
* Find a key-value pair in the hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
@@ -272,9 +302,11 @@ rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
* @param data
* Output with pointer to data returned from the hash table.
* @return
- * 0 if successful lookup
- * - EINVAL if the parameters are invalid.
- * - ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
*/
int
rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data);
@@ -282,7 +314,9 @@ rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data);
/**
* Find a key-value pair with a pre-computed hash value
* to an existing hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
@@ -293,9 +327,11 @@ rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data);
* @param data
* Output with pointer to data returned from the hash table.
* @return
- * 0 if successful lookup
- * - EINVAL if the parameters are invalid.
- * - ENOENT if the key is not found.
+ * - A positive value that can be used by the caller as an offset into an
+ * array of user data. This value is unique for this key, and is the same
+ * value that was returned when the key was added.
+ * - -EINVAL if the parameters are invalid.
+ * - -ENOENT if the key is not found.
*/
int
rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key,
@@ -303,7 +339,9 @@ rte_hash_lookup_with_hash_data(const struct rte_hash *h, const void *key,
/**
* Find a key in the hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
@@ -321,14 +359,16 @@ rte_hash_lookup(const struct rte_hash *h, const void *key);
/**
* Find a key in the hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
* @param key
* Key to find.
* @param sig
- * Hash value to remove from the hash table.
+ * Precomputed hash value for 'key'.
* @return
* - -EINVAL if the parameters are invalid.
* - -ENOENT if the key is not found.
@@ -356,7 +396,9 @@ rte_hash_hash(const struct rte_hash *h, const void *key);
/**
* Find multiple keys in the hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
@@ -377,7 +419,9 @@ rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
/**
* Find multiple keys in the hash table.
- * This operation is multi-thread safe.
+ * This operation is multi-thread safe with regarding to other lookup threads.
+ * Read-write concurrency can be enabled by setting flag during
+ * table creation.
*
* @param h
* Hash table to look in.
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
index 479f84b1..cf28031b 100644
--- a/lib/librte_hash/rte_hash_crc.h
+++ b/lib/librte_hash/rte_hash_crc.h
@@ -338,14 +338,13 @@ crc32c_1word(uint32_t data, uint32_t init_val)
static inline uint32_t
crc32c_2words(uint64_t data, uint32_t init_val)
{
+ uint32_t crc, term1, term2;
union {
uint64_t u64;
uint32_t u32[2];
} d;
d.u64 = data;
- uint32_t crc, term1, term2;
-
crc = init_val;
crc ^= d.u32[0];
@@ -399,9 +398,9 @@ crc32c_sse42_u64_mimic(uint64_t data, uint64_t init_val)
} d;
d.u64 = data;
- init_val = crc32c_sse42_u32(d.u32[0], init_val);
- init_val = crc32c_sse42_u32(d.u32[1], init_val);
- return init_val;
+ init_val = crc32c_sse42_u32(d.u32[0], (uint32_t)init_val);
+ init_val = crc32c_sse42_u32(d.u32[1], (uint32_t)init_val);
+ return (uint32_t)init_val;
}
#endif
@@ -413,7 +412,7 @@ crc32c_sse42_u64(uint64_t data, uint64_t init_val)
"crc32q %[data], %[init_val];"
: [init_val] "+r" (init_val)
: [data] "rm" (data));
- return init_val;
+ return (uint32_t)init_val;
}
#endif
diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map
index 52a2576f..e216ac8e 100644
--- a/lib/librte_hash/rte_hash_version.map
+++ b/lib/librte_hash/rte_hash_version.map
@@ -45,3 +45,11 @@ DPDK_16.07 {
rte_hash_get_key_with_position;
} DPDK_2.2;
+
+
+DPDK_18.08 {
+ global:
+
+ rte_hash_count;
+
+} DPDK_16.07;
diff --git a/lib/librte_ip_frag/ip_frag_internal.c b/lib/librte_ip_frag/ip_frag_internal.c
index 7397aa69..2560c771 100644
--- a/lib/librte_ip_frag/ip_frag_internal.c
+++ b/lib/librte_ip_frag/ip_frag_internal.c
@@ -152,7 +152,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
fp->frags[IP_LAST_FRAG_IDX].len);
else
IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
+ "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
"last fragment: ofs: %u, len: %u\n\n",
@@ -210,7 +210,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
fp->frags[IP_LAST_FRAG_IDX].len);
else
IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
- "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
+ "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
"last fragment: ofs: %u, len: %u\n\n",
@@ -331,7 +331,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
if (p1->key.key_len == IPV4_KEYLEN)
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line0: %p, index: %u from %u\n"
+ "ipv4_frag_pkt line0: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
@@ -357,7 +357,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
if (p2->key.key_len == IPV4_KEYLEN)
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv6_frag_pkt line1: %p, index: %u from %u\n"
+ "ipv4_frag_pkt line1: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c
index 82e831ca..4956b99e 100644
--- a/lib/librte_ip_frag/rte_ipv4_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c
@@ -59,7 +59,9 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp)
/* chain with the first fragment. */
rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
+ fp->frags[curr_idx].mb = NULL;
m = fp->frags[IP_FIRST_FRAG_IDX].mb;
+ fp->frags[IP_FIRST_FRAG_IDX].mb = NULL;
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
diff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c
index 3479fabb..db249fe6 100644
--- a/lib/librte_ip_frag/rte_ipv6_reassembly.c
+++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c
@@ -82,7 +82,9 @@ ipv6_frag_reassemble(struct ip_frag_pkt *fp)
/* chain with the first fragment. */
rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
+ fp->frags[curr_idx].mb = NULL;
m = fp->frags[IP_FIRST_FRAG_IDX].mb;
+ fp->frags[IP_FIRST_FRAG_IDX].mb = NULL;
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
diff --git a/lib/librte_kni/meson.build b/lib/librte_kni/meson.build
index c4b21961..a738a033 100644
--- a/lib/librte_kni/meson.build
+++ b/lib/librte_kni/meson.build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-if host_machine.system() != 'linux'
+if host_machine.system() != 'linux' or cc.sizeof('void *') == 4
build = false
endif
version = 2
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 28674115..65f6a2b0 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -510,7 +510,7 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
{
int ret = 0;
- if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
+ if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
return -EINVAL;
}
@@ -530,7 +530,7 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
static int
kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
{
- if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
+ if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
return -EINVAL;
}
@@ -715,6 +715,9 @@ rte_kni_get(const char *name)
struct rte_kni_memzone_slot *it;
struct rte_kni *kni;
+ if (name == NULL || name[0] == '\0')
+ return NULL;
+
/* Note: could be improved perf-wise if necessary */
for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
it = &kni_memzone_pool.slots[i];
diff --git a/lib/librte_kvargs/Makefile b/lib/librte_kvargs/Makefile
index 4eaa9334..87593954 100644
--- a/lib/librte_kvargs/Makefile
+++ b/lib/librte_kvargs/Makefile
@@ -1,35 +1,5 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2014 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# - Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# - Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-# OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -37,7 +7,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_kvargs.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
-LDLIBS += -lrte_eal
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
EXPORT_MAP := rte_kvargs_version.map
diff --git a/lib/librte_kvargs/meson.build b/lib/librte_kvargs/meson.build
index 0c5b9cb2..acd3e543 100644
--- a/lib/librte_kvargs/meson.build
+++ b/lib/librte_kvargs/meson.build
@@ -1,6 +1,11 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+includes = [global_inc]
+includes += include_directories('../librte_eal/common/include')
+
version = 1
sources = files('rte_kvargs.c')
headers = files('rte_kvargs.h')
+
+deps += 'compat'
diff --git a/lib/librte_kvargs/rte_kvargs.c b/lib/librte_kvargs/rte_kvargs.c
index 9662375e..a28f7694 100644
--- a/lib/librte_kvargs/rte_kvargs.c
+++ b/lib/librte_kvargs/rte_kvargs.c
@@ -6,14 +6,13 @@
#include <string.h>
#include <stdlib.h>
-#include <rte_log.h>
#include <rte_string_fns.h>
#include "rte_kvargs.h"
/*
* Receive a string with a list of arguments following the pattern
- * key=value;key=value;... and insert them into the list.
+ * key=value,key=value,... and insert them into the list.
* strtok() is used so the params string will be copied to be modified.
*/
static int
@@ -28,29 +27,22 @@ rte_kvargs_tokenize(struct rte_kvargs *kvlist, const char *params)
* to pass to rte_strsplit
*/
kvlist->str = strdup(params);
- if (kvlist->str == NULL) {
- RTE_LOG(ERR, PMD, "Cannot parse arguments: not enough memory\n");
+ if (kvlist->str == NULL)
return -1;
- }
/* browse each key/value pair and add it in kvlist */
str = kvlist->str;
while ((str = strtok_r(str, RTE_KVARGS_PAIRS_DELIM, &ctx1)) != NULL) {
i = kvlist->count;
- if (i >= RTE_KVARGS_MAX) {
- RTE_LOG(ERR, PMD, "Cannot parse arguments: list full\n");
+ if (i >= RTE_KVARGS_MAX)
return -1;
- }
kvlist->pairs[i].key = strtok_r(str, RTE_KVARGS_KV_DELIM, &ctx2);
kvlist->pairs[i].value = strtok_r(NULL, RTE_KVARGS_KV_DELIM, &ctx2);
- if (kvlist->pairs[i].key == NULL || kvlist->pairs[i].value == NULL) {
- RTE_LOG(ERR, PMD,
- "Cannot parse arguments: wrong key or value\n"
- "params=<%s>\n", params);
+ if (kvlist->pairs[i].key == NULL ||
+ kvlist->pairs[i].value == NULL)
return -1;
- }
kvlist->count++;
str = NULL;
@@ -89,12 +81,8 @@ check_for_valid_keys(struct rte_kvargs *kvlist,
for (i = 0; i < kvlist->count; i++) {
pair = &kvlist->pairs[i];
ret = is_valid_key(valid, pair->key);
- if (!ret) {
- RTE_LOG(ERR, PMD,
- "Error parsing device, invalid key <%s>\n",
- pair->key);
+ if (!ret)
return -1;
- }
}
return 0;
}
@@ -154,7 +142,7 @@ rte_kvargs_free(struct rte_kvargs *kvlist)
}
/*
- * Parse the arguments "key=value;key=value;..." string and return
+ * Parse the arguments "key=value,key=value,..." string and return
* an allocated structure that contains a key/value list. Also
* check if only valid keys were used.
*/
@@ -180,3 +168,38 @@ rte_kvargs_parse(const char *args, const char * const valid_keys[])
return kvlist;
}
+
+__rte_experimental
+struct rte_kvargs *
+rte_kvargs_parse_delim(const char *args, const char * const valid_keys[],
+ const char *valid_ends)
+{
+ struct rte_kvargs *kvlist = NULL;
+ char *copy;
+ size_t len;
+
+ if (valid_ends == NULL)
+ return rte_kvargs_parse(args, valid_keys);
+
+ copy = strdup(args);
+ if (copy == NULL)
+ return NULL;
+
+ len = strcspn(copy, valid_ends);
+ copy[len] = '\0';
+
+ kvlist = rte_kvargs_parse(copy, valid_keys);
+
+ free(copy);
+ return kvlist;
+}
+
+__rte_experimental
+int
+rte_kvargs_strcmp(const char *key __rte_unused,
+ const char *value, void *opaque)
+{
+ const char *str = opaque;
+
+ return -abs(strcmp(str, value));
+}
diff --git a/lib/librte_kvargs/rte_kvargs.h b/lib/librte_kvargs/rte_kvargs.h
index 51b8120b..fc041956 100644
--- a/lib/librte_kvargs/rte_kvargs.h
+++ b/lib/librte_kvargs/rte_kvargs.h
@@ -25,6 +25,8 @@
extern "C" {
#endif
+#include <rte_compat.h>
+
/** Maximum number of key/value associations */
#define RTE_KVARGS_MAX 32
@@ -72,6 +74,36 @@ struct rte_kvargs *rte_kvargs_parse(const char *args,
const char *const valid_keys[]);
/**
+ * Allocate a rte_kvargs and store key/value associations from a string.
+ * This version will consider any byte from valid_ends as a possible
+ * terminating character, and will not parse beyond any of their occurrence.
+ *
+ * The function allocates and fills an rte_kvargs structure from a given
+ * string whose format is key1=value1,key2=value2,...
+ *
+ * The structure can be freed with rte_kvargs_free().
+ *
+ * @param args
+ * The input string containing the key/value associations
+ *
+ * @param valid_keys
+ * A list of valid keys (table of const char *, the last must be NULL).
+ * This argument is ignored if NULL
+ *
+ * @param valid_ends
+ * Acceptable terminating characters.
+ * If NULL, the behavior is the same as ``rte_kvargs_parse``.
+ *
+ * @return
+ * - A pointer to an allocated rte_kvargs structure on success
+ * - NULL on error
+ */
+__rte_experimental
+struct rte_kvargs *rte_kvargs_parse_delim(const char *args,
+ const char *const valid_keys[],
+ const char *valid_ends);
+
+/**
* Free a rte_kvargs structure
*
* Free a rte_kvargs structure previously allocated with
@@ -121,6 +153,32 @@ int rte_kvargs_process(const struct rte_kvargs *kvlist,
unsigned rte_kvargs_count(const struct rte_kvargs *kvlist,
const char *key_match);
+/**
+ * Generic kvarg handler for string comparison.
+ *
+ * This function can be used for a generic string comparison processing
+ * on a list of kvargs.
+ *
+ * @param key
+ * kvarg pair key.
+ *
+ * @param value
+ * kvarg pair value.
+ *
+ * @param opaque
+ * Opaque pointer to a string.
+ *
+ * @return
+ * 0 if the strings match.
+ * !0 otherwise or on error.
+ *
+ * Unless strcmp, comparison ordering is not kept.
+ * In order for rte_kvargs_process to stop processing on match error,
+ * a negative value is returned even if strcmp had returned a positive one.
+ */
+__rte_experimental
+int rte_kvargs_strcmp(const char *key, const char *value, void *opaque);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_kvargs/rte_kvargs_version.map b/lib/librte_kvargs/rte_kvargs_version.map
index 2030ec46..8f4b4e3f 100644
--- a/lib/librte_kvargs/rte_kvargs_version.map
+++ b/lib/librte_kvargs/rte_kvargs_version.map
@@ -8,3 +8,11 @@ DPDK_2.0 {
local: *;
};
+
+EXPERIMENTAL {
+ global:
+
+ rte_kvargs_parse_delim;
+ rte_kvargs_strcmp;
+
+} DPDK_2.0;
diff --git a/lib/librte_latencystats/rte_latencystats.c b/lib/librte_latencystats/rte_latencystats.c
index 66330203..1fdec68e 100644
--- a/lib/librte_latencystats/rte_latencystats.c
+++ b/lib/librte_latencystats/rte_latencystats.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
+ * Copyright(c) 2018 Intel Corporation
*/
#include <unistd.h>
@@ -46,7 +46,7 @@ struct rte_latency_stats {
static struct rte_latency_stats *glob_stats;
struct rxtx_cbs {
- struct rte_eth_rxtx_callback *cb;
+ const struct rte_eth_rxtx_callback *cb;
};
static struct rxtx_cbs rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
@@ -201,7 +201,6 @@ rte_latencystats_init(uint64_t app_samp_intvl,
uint16_t pid;
uint16_t qid;
struct rxtx_cbs *cbs = NULL;
- const uint16_t nb_ports = rte_eth_dev_count();
const char *ptr_strings[NUM_LATENCY_STATS] = {0};
const struct rte_memzone *mz = NULL;
const unsigned int flags = 0;
@@ -234,7 +233,7 @@ rte_latencystats_init(uint64_t app_samp_intvl,
}
/** Register Rx/Tx callbacks */
- for (pid = 0; pid < nb_ports; pid++) {
+ RTE_ETH_FOREACH_DEV(pid) {
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(pid, &dev_info);
for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
@@ -266,10 +265,10 @@ rte_latencystats_uninit(void)
uint16_t qid;
int ret = 0;
struct rxtx_cbs *cbs = NULL;
- const uint16_t nb_ports = rte_eth_dev_count();
+ const struct rte_memzone *mz = NULL;
/** De register Rx/Tx callbacks */
- for (pid = 0; pid < nb_ports; pid++) {
+ RTE_ETH_FOREACH_DEV(pid) {
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(pid, &dev_info);
for (qid = 0; qid < dev_info.nb_rx_queues; qid++) {
@@ -290,6 +289,11 @@ rte_latencystats_uninit(void)
}
}
+ /* free up the memzone */
+ mz = rte_memzone_lookup(MZ_RTE_LATENCY_STATS);
+ if (mz)
+ rte_memzone_free(mz);
+
return 0;
}
diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
index 367568ae..e2b98a25 100644
--- a/lib/librte_mbuf/Makefile
+++ b/lib/librte_mbuf/Makefile
@@ -6,13 +6,12 @@ include $(RTE_SDK)/mk/rte.vars.mk
# library name
LIB = librte_mbuf.a
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
LDLIBS += -lrte_eal -lrte_mempool
EXPORT_MAP := rte_mbuf_version.map
-LIBABIVER := 3
+LIBABIVER := 4
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c rte_mbuf_ptype.c rte_mbuf_pool_ops.c
diff --git a/lib/librte_mbuf/meson.build b/lib/librte_mbuf/meson.build
index 869c17c1..45ffb0db 100644
--- a/lib/librte_mbuf/meson.build
+++ b/lib/librte_mbuf/meson.build
@@ -2,7 +2,6 @@
# Copyright(c) 2017 Intel Corporation
version = 3
-allow_experimental_apis = true
sources = files('rte_mbuf.c', 'rte_mbuf_ptype.c', 'rte_mbuf_pool_ops.c')
headers = files('rte_mbuf.h', 'rte_mbuf_ptype.h', 'rte_mbuf_pool_ops.h')
deps += ['mempool']
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
index 091d388d..e714c5a5 100644
--- a/lib/librte_mbuf/rte_mbuf.c
+++ b/lib/librte_mbuf/rte_mbuf.c
@@ -33,21 +33,6 @@
#include <rte_memcpy.h>
/*
- * ctrlmbuf constructor, given as a callback function to
- * rte_mempool_obj_iter() or rte_mempool_create()
- */
-void
-rte_ctrlmbuf_init(struct rte_mempool *mp,
- __attribute__((unused)) void *opaque_arg,
- void *_m,
- __attribute__((unused)) unsigned i)
-{
- struct rte_mbuf *m = _m;
- rte_pktmbuf_init(mp, opaque_arg, _m, i);
- m->ol_flags |= CTRL_MBUF_FLAG;
-}
-
-/*
* pktmbuf pool constructor, given as a callback function to
* rte_mempool_create(), or called directly if using
* rte_mempool_create_empty()/rte_mempool_populate()
@@ -122,7 +107,7 @@ rte_pktmbuf_init(struct rte_mempool *mp,
}
/* Helper to create a mbuf pool with given mempool ops name*/
-struct rte_mempool * __rte_experimental
+struct rte_mempool *
rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
int socket_id, const char *ops_name)
@@ -405,6 +390,9 @@ const char *rte_get_tx_ol_flag_name(uint64_t mask)
case PKT_TX_TUNNEL_IPIP: return "PKT_TX_TUNNEL_IPIP";
case PKT_TX_TUNNEL_GENEVE: return "PKT_TX_TUNNEL_GENEVE";
case PKT_TX_TUNNEL_MPLSINUDP: return "PKT_TX_TUNNEL_MPLSINUDP";
+ case PKT_TX_TUNNEL_VXLAN_GPE: return "PKT_TX_TUNNEL_VXLAN_GPE";
+ case PKT_TX_TUNNEL_IP: return "PKT_TX_TUNNEL_IP";
+ case PKT_TX_TUNNEL_UDP: return "PKT_TX_TUNNEL_UDP";
case PKT_TX_MACSEC: return "PKT_TX_MACSEC";
case PKT_TX_SEC_OFFLOAD: return "PKT_TX_SEC_OFFLOAD";
default: return NULL;
@@ -439,6 +427,12 @@ rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
"PKT_TX_TUNNEL_NONE" },
{ PKT_TX_TUNNEL_MPLSINUDP, PKT_TX_TUNNEL_MASK,
"PKT_TX_TUNNEL_NONE" },
+ { PKT_TX_TUNNEL_VXLAN_GPE, PKT_TX_TUNNEL_MASK,
+ "PKT_TX_TUNNEL_NONE" },
+ { PKT_TX_TUNNEL_IP, PKT_TX_TUNNEL_MASK,
+ "PKT_TX_TUNNEL_NONE" },
+ { PKT_TX_TUNNEL_UDP, PKT_TX_TUNNEL_MASK,
+ "PKT_TX_TUNNEL_NONE" },
{ PKT_TX_MACSEC, PKT_TX_MACSEC, NULL },
{ PKT_TX_SEC_OFFLOAD, PKT_TX_SEC_OFFLOAD, NULL },
};
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 62740254..9ce5d76d 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -201,15 +201,44 @@ extern "C" {
/**
* Bits 45:48 used for the tunnel type.
- * When doing Tx offload like TSO or checksum, the HW needs to configure the
- * tunnel type into the HW descriptors.
+ * The tunnel type must be specified for TSO or checksum on the inner part
+ * of tunnel packets.
+ * These flags can be used with PKT_TX_TCP_SEG for TSO, or PKT_TX_xxx_CKSUM.
+ * The mbuf fields for inner and outer header lengths are required:
+ * outer_l2_len, outer_l3_len, l2_len, l3_len, l4_len and tso_segsz for TSO.
*/
#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
-/**< TX packet with MPLS-in-UDP RFC 7510 header. */
+/** TX packet with MPLS-in-UDP RFC 7510 header. */
#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
+#define PKT_TX_TUNNEL_VXLAN_GPE (0x6ULL << 45)
+/**
+ * Generic IP encapsulated tunnel type, used for TSO and checksum offload.
+ * It can be used for tunnels which are not standards or listed above.
+ * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_GRE
+ * or PKT_TX_TUNNEL_IPIP if possible.
+ * The ethdev must be configured with DEV_TX_OFFLOAD_IP_TNL_TSO.
+ * Outer and inner checksums are done according to the existing flags like
+ * PKT_TX_xxx_CKSUM.
+ * Specific tunnel headers that contain payload length, sequence id
+ * or checksum are not expected to be updated.
+ */
+#define PKT_TX_TUNNEL_IP (0xDULL << 45)
+/**
+ * Generic UDP encapsulated tunnel type, used for TSO and checksum offload.
+ * UDP tunnel type implies outer IP layer.
+ * It can be used for tunnels which are not standards or listed above.
+ * It is preferred to use specific tunnel flags like PKT_TX_TUNNEL_VXLAN
+ * if possible.
+ * The ethdev must be configured with DEV_TX_OFFLOAD_UDP_TNL_TSO.
+ * Outer and inner checksums are done according to the existing flags like
+ * PKT_TX_xxx_CKSUM.
+ * Specific tunnel headers that contain payload length, sequence id
+ * or checksum are not expected to be updated.
+ */
+#define PKT_TX_TUNNEL_UDP (0xEULL << 45)
/* add new TX TUNNEL type here */
#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
@@ -226,12 +255,8 @@ extern "C" {
* - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
* PKT_TX_TCP_CKSUM)
* - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
- * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum
- * to 0 in the packet
+ * - if it's IPv4, set the PKT_TX_IP_CKSUM flag
* - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
- * - calculate the pseudo header checksum without taking ip_len in account,
- * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
- * rte_ipv6_phdr_cksum() that can be used as helpers.
*/
#define PKT_TX_TCP_SEG (1ULL << 50)
@@ -244,9 +269,6 @@ extern "C" {
* - fill l2_len and l3_len in mbuf
* - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
* - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
- * - calculate the pseudo header checksum and set it in the L4 header (only
- * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum().
- * For SCTP, set the crc field to 0.
*/
#define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */
#define PKT_TX_TCP_CKSUM (1ULL << 52) /**< TCP cksum of TX pkt. computed by NIC. */
@@ -258,7 +280,6 @@ extern "C" {
* Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
* also be set by the application, although a PMD will only check
* PKT_TX_IP_CKSUM.
- * - set the IP checksum field in the packet to 0
* - fill the mbuf offload information: l2_len, l3_len
*/
#define PKT_TX_IP_CKSUM (1ULL << 54)
@@ -288,10 +309,8 @@ extern "C" {
/**
* Offload the IP checksum of an external header in the hardware. The
- * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh
- * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the
- * packet must be set to 0.
- * - set the outer IP checksum field in the packet to 0
+ * flag PKT_TX_OUTER_IPV4 should also be set by the application, although
+ * a PMD will only check PKT_TX_OUTER_IP_CKSUM.
* - fill the mbuf offload information: outer_l2_len, outer_l3_len
*/
#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
@@ -326,13 +345,13 @@ extern "C" {
PKT_TX_MACSEC | \
PKT_TX_SEC_OFFLOAD)
-#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
+/**
+ * Mbuf having an external buffer attached. shinfo in mbuf must be filled.
+ */
+#define EXT_ATTACHED_MBUF (1ULL << 61)
#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
-/* Use final bit of flags to indicate a control mbuf */
-#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
-
/** Alignment constraint of mbuf private area. */
#define RTE_MBUF_PRIV_ALIGN 8
@@ -569,8 +588,27 @@ struct rte_mbuf {
/** Sequence number. See also rte_reorder_insert(). */
uint32_t seqn;
+ /** Shared data for external buffer attached to mbuf. See
+ * rte_pktmbuf_attach_extbuf().
+ */
+ struct rte_mbuf_ext_shared_info *shinfo;
+
} __rte_cache_aligned;
+/**
+ * Function typedef of callback to free externally attached buffer.
+ */
+typedef void (*rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque);
+
+/**
+ * Shared data at the end of an external buffer.
+ */
+struct rte_mbuf_ext_shared_info {
+ rte_mbuf_extbuf_free_callback_t free_cb; /**< Free callback function */
+ void *fcb_opaque; /**< Free callback argument */
+ rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
+};
+
/**< Maximum number of nb_segs allowed. */
#define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
@@ -691,14 +729,53 @@ rte_mbuf_to_baddr(struct rte_mbuf *md)
}
/**
- * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ * Return the starting address of the private data area embedded in
+ * the given mbuf.
+ *
+ * Note that no check is made to ensure that a private data area
+ * actually exists in the supplied mbuf.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @return
+ * The starting address of the private data area of the given mbuf.
+ */
+static inline void * __rte_experimental
+rte_mbuf_to_priv(struct rte_mbuf *m)
+{
+ return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
+}
+
+/**
+ * Returns TRUE if given mbuf is cloned by mbuf indirection, or FALSE
+ * otherwise.
+ *
+ * If a mbuf has its data in another mbuf and references it by mbuf
+ * indirection, this mbuf can be defined as a cloned mbuf.
+ */
+#define RTE_MBUF_CLONED(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+
+/**
+ * Deprecated.
+ * Use RTE_MBUF_CLONED().
+ */
+#define RTE_MBUF_INDIRECT(mb) RTE_MBUF_CLONED(mb)
+
+/**
+ * Returns TRUE if given mbuf has an external buffer, or FALSE otherwise.
+ *
+ * External buffer is a user-provided anonymous buffer.
*/
-#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+#define RTE_MBUF_HAS_EXTBUF(mb) ((mb)->ol_flags & EXT_ATTACHED_MBUF)
/**
* Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ *
+ * If a mbuf embeds its own data after the rte_mbuf structure, this mbuf
+ * can be defined as a direct mbuf.
*/
-#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
+#define RTE_MBUF_DIRECT(mb) \
+ (!((mb)->ol_flags & (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF)))
/**
* Private data in case of pktmbuf pool.
@@ -748,7 +825,7 @@ rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static inline void
rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
{
- rte_atomic16_set(&m->refcnt_atomic, new_value);
+ rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
}
/* internal */
@@ -778,8 +855,9 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
* reference counter can occur.
*/
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
- rte_mbuf_refcnt_set(m, 1 + value);
- return 1 + value;
+ ++value;
+ rte_mbuf_refcnt_set(m, (uint16_t)value);
+ return (uint16_t)value;
}
return __rte_mbuf_refcnt_update(m, value);
@@ -824,6 +902,59 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
#endif /* RTE_MBUF_REFCNT_ATOMIC */
+/**
+ * Reads the refcnt of an external buffer.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @return
+ * Reference count number.
+ */
+static inline uint16_t
+rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
+{
+ return (uint16_t)(rte_atomic16_read(&shinfo->refcnt_atomic));
+}
+
+/**
+ * Set refcnt of an external buffer.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @param new_value
+ * Value set
+ */
+static inline void
+rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo,
+ uint16_t new_value)
+{
+ rte_atomic16_set(&shinfo->refcnt_atomic, (int16_t)new_value);
+}
+
+/**
+ * Add given value to refcnt of an external buffer and return its new
+ * value.
+ *
+ * @param shinfo
+ * Shared data of the external buffer.
+ * @param value
+ * Value to add/subtract
+ * @return
+ * Updated value
+ */
+static inline uint16_t
+rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo,
+ int16_t value)
+{
+ if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
+ ++value;
+ rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
+ return (uint16_t)value;
+ }
+
+ return (uint16_t)rte_atomic16_add_return(&shinfo->refcnt_atomic, value);
+}
+
/** Mbuf prefetch */
#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
if ((m) != NULL) \
@@ -915,89 +1046,6 @@ __rte_mbuf_raw_free(struct rte_mbuf *m)
rte_mbuf_raw_free(m);
}
-/* Operations on ctrl mbuf */
-
-/**
- * The control mbuf constructor.
- *
- * This function initializes some fields in an mbuf structure that are
- * not modified by the user once created (mbuf type, origin pool, buffer
- * start address, and so on). This function is given as a callback function
- * to rte_mempool_obj_iter() or rte_mempool_create() at pool creation time.
- *
- * @param mp
- * The mempool from which the mbuf is allocated.
- * @param opaque_arg
- * A pointer that can be used by the user to retrieve useful information
- * for mbuf initialization. This pointer is the opaque argument passed to
- * rte_mempool_obj_iter() or rte_mempool_create().
- * @param m
- * The mbuf to initialize.
- * @param i
- * The index of the mbuf in the pool table.
- */
-void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
- void *m, unsigned i);
-
-/**
- * Allocate a new mbuf (type is ctrl) from mempool *mp*.
- *
- * This new mbuf is initialized with data pointing to the beginning of
- * buffer, and with a length of zero.
- *
- * @param mp
- * The mempool from which the mbuf is allocated.
- * @return
- * - The pointer to the new mbuf on success.
- * - NULL if allocation failed.
- */
-#define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
-
-/**
- * Free a control mbuf back into its original mempool.
- *
- * @param m
- * The control mbuf to be freed.
- */
-#define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
-
-/**
- * A macro that returns the pointer to the carried data.
- *
- * The value that can be read or assigned.
- *
- * @param m
- * The control mbuf.
- */
-#define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
-
-/**
- * A macro that returns the length of the carried data.
- *
- * The value that can be read or assigned.
- *
- * @param m
- * The control mbuf.
- */
-#define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
-
-/**
- * Tests if an mbuf is a control mbuf
- *
- * @param m
- * The mbuf to be tested
- * @return
- * - True (1) if the mbuf is a control mbuf
- * - False(0) otherwise
- */
-static inline int
-rte_is_ctrlmbuf(struct rte_mbuf *m)
-{
- return !!(m->ol_flags & CTRL_MBUF_FLAG);
-}
-
-/* Operations on pkt mbuf */
-
/**
* The packet mbuf constructor.
*
@@ -1116,7 +1164,7 @@ rte_pktmbuf_pool_create(const char *name, unsigned n,
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
*/
-struct rte_mempool * __rte_experimental
+struct rte_mempool *
rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
int socket_id, const char *ops_name);
@@ -1172,7 +1220,8 @@ rte_pktmbuf_priv_size(struct rte_mempool *mp)
*/
static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
{
- m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
+ m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
+ (uint16_t)m->buf_len);
}
/**
@@ -1281,11 +1330,161 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
}
/**
+ * Initialize shared data at the end of an external buffer before attaching
+ * to a mbuf by ``rte_pktmbuf_attach_extbuf()``. This is not a mandatory
+ * initialization but a helper function to simply spare a few bytes at the
+ * end of the buffer for shared data. If shared data is allocated
+ * separately, this should not be called but application has to properly
+ * initialize the shared data according to its need.
+ *
+ * Free callback and its argument is saved and the refcnt is set to 1.
+ *
+ * @warning
+ * The value of buf_len will be reduced to RTE_PTR_DIFF(shinfo, buf_addr)
+ * after this initialization. This shall be used for
+ * ``rte_pktmbuf_attach_extbuf()``
+ *
+ * @param buf_addr
+ * The pointer to the external buffer.
+ * @param [in,out] buf_len
+ * The pointer to length of the external buffer. Input value must be
+ * larger than the size of ``struct rte_mbuf_ext_shared_info`` and
+ * padding for alignment. If not enough, this function will return NULL.
+ * Adjusted buffer length will be returned through this pointer.
+ * @param free_cb
+ * Free callback function to call when the external buffer needs to be
+ * freed.
+ * @param fcb_opaque
+ * Argument for the free callback function.
+ *
+ * @return
+ * A pointer to the initialized shared data on success, return NULL
+ * otherwise.
+ */
+static inline struct rte_mbuf_ext_shared_info *
+rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
+ rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
+{
+ struct rte_mbuf_ext_shared_info *shinfo;
+ void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
+ void *addr;
+
+ addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
+ sizeof(uintptr_t));
+ if (addr <= buf_addr)
+ return NULL;
+
+ shinfo = (struct rte_mbuf_ext_shared_info *)addr;
+ shinfo->free_cb = free_cb;
+ shinfo->fcb_opaque = fcb_opaque;
+ rte_mbuf_ext_refcnt_set(shinfo, 1);
+
+ *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
+ return shinfo;
+}
+
+/**
+ * Attach an external buffer to a mbuf.
+ *
+ * User-managed anonymous buffer can be attached to an mbuf. When attaching
+ * it, corresponding free callback function and its argument should be
+ * provided via shinfo. This callback function will be called once all the
+ * mbufs are detached from the buffer (refcnt becomes zero).
+ *
+ * The headroom for the attaching mbuf will be set to zero and this can be
+ * properly adjusted after attachment. For example, ``rte_pktmbuf_adj()``
+ * or ``rte_pktmbuf_reset_headroom()`` might be used.
+ *
+ * More mbufs can be attached to the same external buffer by
+ * ``rte_pktmbuf_attach()`` once the external buffer has been attached by
+ * this API.
+ *
+ * Detachment can be done by either ``rte_pktmbuf_detach_extbuf()`` or
+ * ``rte_pktmbuf_detach()``.
+ *
+ * Memory for shared data must be provided and user must initialize all of
+ * the content properly, escpecially free callback and refcnt. The pointer
+ * of shared data will be stored in m->shinfo.
+ * ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
+ * bytes at the end of buffer for the shared data, store free callback and
+ * its argument and set the refcnt to 1. The following is an example:
+ *
+ * struct rte_mbuf_ext_shared_info *shinfo =
+ * rte_pktmbuf_ext_shinfo_init_helper(buf_addr, &buf_len,
+ * free_cb, fcb_arg);
+ * rte_pktmbuf_attach_extbuf(m, buf_addr, buf_iova, buf_len, shinfo);
+ * rte_pktmbuf_reset_headroom(m);
+ * rte_pktmbuf_adj(m, data_len);
+ *
+ * Attaching an external buffer is quite similar to mbuf indirection in
+ * replacing buffer addresses and length of a mbuf, but a few differences:
+ * - When an indirect mbuf is attached, refcnt of the direct mbuf would be
+ * 2 as long as the direct mbuf itself isn't freed after the attachment.
+ * In such cases, the buffer area of a direct mbuf must be read-only. But
+ * external buffer has its own refcnt and it starts from 1. Unless
+ * multiple mbufs are attached to a mbuf having an external buffer, the
+ * external buffer is writable.
+ * - There's no need to allocate buffer from a mempool. Any buffer can be
+ * attached with appropriate free callback and its IO address.
+ * - Smaller metadata is required to maintain shared data such as refcnt.
+ *
+ * @warning
+ * @b EXPERIMENTAL: This API may change without prior notice.
+ * Once external buffer is enabled by allowing experimental API,
+ * ``RTE_MBUF_DIRECT()`` and ``RTE_MBUF_INDIRECT()`` are no longer
+ * exclusive. A mbuf can be considered direct if it is neither indirect nor
+ * having external buffer.
+ *
+ * @param m
+ * The pointer to the mbuf.
+ * @param buf_addr
+ * The pointer to the external buffer.
+ * @param buf_iova
+ * IO address of the external buffer.
+ * @param buf_len
+ * The size of the external buffer.
+ * @param shinfo
+ * User-provided memory for shared data of the external buffer.
+ */
+static inline void __rte_experimental
+rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
+ rte_iova_t buf_iova, uint16_t buf_len,
+ struct rte_mbuf_ext_shared_info *shinfo)
+{
+ /* mbuf should not be read-only */
+ RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
+ RTE_ASSERT(shinfo->free_cb != NULL);
+
+ m->buf_addr = buf_addr;
+ m->buf_iova = buf_iova;
+ m->buf_len = buf_len;
+
+ m->data_len = 0;
+ m->data_off = 0;
+
+ m->ol_flags |= EXT_ATTACHED_MBUF;
+ m->shinfo = shinfo;
+}
+
+/**
+ * Detach the external buffer attached to a mbuf, same as
+ * ``rte_pktmbuf_detach()``
+ *
+ * @param m
+ * The mbuf having external buffer.
+ */
+#define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
+
+/**
* Attach packet mbuf to another packet mbuf.
*
- * After attachment we refer the mbuf we attached as 'indirect',
- * while mbuf we attached to as 'direct'.
- * The direct mbuf's reference counter is incremented.
+ * If the mbuf we are attaching to isn't a direct buffer and is attached to
+ * an external buffer, the mbuf being attached will be attached to the
+ * external buffer instead of mbuf indirection.
+ *
+ * Otherwise, the mbuf will be indirectly attached. After attachment we
+ * refer the mbuf we attached as 'indirect', while mbuf we attached to as
+ * 'direct'. The direct mbuf's reference counter is incremented.
*
* Right now, not supported:
* - attachment for already indirect mbuf (e.g. - mi has to be direct).
@@ -1299,19 +1498,20 @@ static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
*/
static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
{
- struct rte_mbuf *md;
-
RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
rte_mbuf_refcnt_read(mi) == 1);
- /* if m is not direct, get the mbuf that embeds the data */
- if (RTE_MBUF_DIRECT(m))
- md = m;
- else
- md = rte_mbuf_from_indirect(m);
+ if (RTE_MBUF_HAS_EXTBUF(m)) {
+ rte_mbuf_ext_refcnt_update(m->shinfo, 1);
+ mi->ol_flags = m->ol_flags;
+ mi->shinfo = m->shinfo;
+ } else {
+ /* if m is not direct, get the mbuf that embeds the data */
+ rte_mbuf_refcnt_update(rte_mbuf_from_indirect(m), 1);
+ mi->priv_size = m->priv_size;
+ mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
+ }
- rte_mbuf_refcnt_update(md, 1);
- mi->priv_size = m->priv_size;
mi->buf_iova = m->buf_iova;
mi->buf_addr = m->buf_addr;
mi->buf_len = m->buf_len;
@@ -1327,7 +1527,6 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
mi->next = NULL;
mi->pkt_len = mi->data_len;
mi->nb_segs = 1;
- mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
mi->packet_type = m->packet_type;
mi->timestamp = m->timestamp;
@@ -1336,12 +1535,52 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
}
/**
- * Detach an indirect packet mbuf.
+ * @internal used by rte_pktmbuf_detach().
+ *
+ * Decrement the reference counter of the external buffer. When the
+ * reference counter becomes 0, the buffer is freed by pre-registered
+ * callback.
+ */
+static inline void
+__rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
+{
+ RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
+ RTE_ASSERT(m->shinfo != NULL);
+
+ if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
+ m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
+}
+
+/**
+ * @internal used by rte_pktmbuf_detach().
+ *
+ * Decrement the direct mbuf's reference counter. When the reference
+ * counter becomes 0, the direct mbuf is freed.
+ */
+static inline void
+__rte_pktmbuf_free_direct(struct rte_mbuf *m)
+{
+ struct rte_mbuf *md;
+
+ RTE_ASSERT(RTE_MBUF_INDIRECT(m));
+
+ md = rte_mbuf_from_indirect(m);
+
+ if (rte_mbuf_refcnt_update(md, -1) == 0) {
+ md->next = NULL;
+ md->nb_segs = 1;
+ rte_mbuf_refcnt_set(md, 1);
+ rte_mbuf_raw_free(md);
+ }
+}
+
+/**
+ * Detach a packet mbuf from external buffer or direct buffer.
*
+ * - decrement refcnt and free the external/direct buffer if refcnt
+ * becomes zero.
* - restore original mbuf address and length values.
* - reset pktmbuf data and data_len to their default values.
- * - decrement the direct mbuf's reference counter. When the
- * reference counter becomes 0, the direct mbuf is freed.
*
* All other fields of the given packet mbuf will be left intact.
*
@@ -1350,12 +1589,17 @@ static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
*/
static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
{
- struct rte_mbuf *md = rte_mbuf_from_indirect(m);
struct rte_mempool *mp = m->pool;
- uint32_t mbuf_size, buf_len, priv_size;
+ uint32_t mbuf_size, buf_len;
+ uint16_t priv_size;
+
+ if (RTE_MBUF_HAS_EXTBUF(m))
+ __rte_pktmbuf_free_extbuf(m);
+ else
+ __rte_pktmbuf_free_direct(m);
priv_size = rte_pktmbuf_priv_size(mp);
- mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
buf_len = rte_pktmbuf_data_room_size(mp);
m->priv_size = priv_size;
@@ -1365,13 +1609,6 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
rte_pktmbuf_reset_headroom(m);
m->data_len = 0;
m->ol_flags = 0;
-
- if (rte_mbuf_refcnt_update(md, -1) == 0) {
- md->next = NULL;
- md->nb_segs = 1;
- rte_mbuf_refcnt_set(md, 1);
- rte_mbuf_raw_free(md);
- }
}
/**
@@ -1395,7 +1632,7 @@ rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
- if (RTE_MBUF_INDIRECT(m))
+ if (!RTE_MBUF_DIRECT(m))
rte_pktmbuf_detach(m);
if (m->next != NULL) {
@@ -1407,7 +1644,7 @@ rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
} else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
- if (RTE_MBUF_INDIRECT(m))
+ if (!RTE_MBUF_DIRECT(m))
rte_pktmbuf_detach(m);
if (m->next != NULL) {
@@ -1690,7 +1927,10 @@ static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->data_off -= len;
+ /* NB: elaborating the subtraction like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ m->data_off = (uint16_t)(m->data_off - len);
m->data_len = (uint16_t)(m->data_len + len);
m->pkt_len = (m->pkt_len + len);
@@ -1750,8 +1990,11 @@ static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
if (unlikely(len > m->data_len))
return NULL;
+ /* NB: elaborating the addition like this instead of using
+ * += allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
m->data_len = (uint16_t)(m->data_len - len);
- m->data_off += len;
+ m->data_off = (uint16_t)(m->data_off + len);
m->pkt_len = (m->pkt_len - len);
return (char *)m->buf_addr + m->data_off;
}
@@ -1863,8 +2106,11 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
cur_tail = rte_pktmbuf_lastseg(head);
cur_tail->next = tail;
- /* accumulate number of segments and total length. */
- head->nb_segs += tail->nb_segs;
+ /* accumulate number of segments and total length.
+ * NB: elaborating the addition like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
head->pkt_len += tail->pkt_len;
/* pkt_len is only set in the head */
@@ -1894,7 +2140,11 @@ rte_validate_tx_offload(const struct rte_mbuf *m)
return 0;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
+ /* NB: elaborating the addition like this instead of using
+ * += gives the result uint64_t type instead of int,
+ * avoiding compiler warnings on gcc 8.1 at least */
+ inner_l3_offset = inner_l3_offset + m->outer_l2_len +
+ m->outer_l3_len;
/* Headers are fragmented */
if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
@@ -1939,7 +2189,7 @@ rte_validate_tx_offload(const struct rte_mbuf *m)
static inline int
rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
{
- int seg_len, copy_len;
+ size_t seg_len, copy_len;
struct rte_mbuf *m;
struct rte_mbuf *m_next;
char *buffer;
diff --git a/lib/librte_mbuf/rte_mbuf_pool_ops.c b/lib/librte_mbuf/rte_mbuf_pool_ops.c
index 48cc3420..5722976f 100644
--- a/lib/librte_mbuf/rte_mbuf_pool_ops.c
+++ b/lib/librte_mbuf/rte_mbuf_pool_ops.c
@@ -9,7 +9,7 @@
#include <rte_errno.h>
#include <rte_mbuf_pool_ops.h>
-int __rte_experimental
+int
rte_mbuf_set_platform_mempool_ops(const char *ops_name)
{
const struct rte_memzone *mz;
@@ -23,7 +23,7 @@ rte_mbuf_set_platform_mempool_ops(const char *ops_name)
RTE_MEMPOOL_OPS_NAMESIZE, SOCKET_ID_ANY, 0);
if (mz == NULL)
return -rte_errno;
- strncpy(mz->addr, ops_name, strlen(ops_name));
+ strcpy(mz->addr, ops_name);
return 0;
} else if (strcmp(mz->addr, ops_name) == 0) {
return 0;
@@ -35,7 +35,7 @@ rte_mbuf_set_platform_mempool_ops(const char *ops_name)
return -EEXIST;
}
-const char * __rte_experimental
+const char *
rte_mbuf_platform_mempool_ops(void)
{
const struct rte_memzone *mz;
@@ -46,7 +46,7 @@ rte_mbuf_platform_mempool_ops(void)
return mz->addr;
}
-int __rte_experimental
+int
rte_mbuf_set_user_mempool_ops(const char *ops_name)
{
const struct rte_memzone *mz;
@@ -62,12 +62,12 @@ rte_mbuf_set_user_mempool_ops(const char *ops_name)
return -rte_errno;
}
- strncpy(mz->addr, ops_name, strlen(ops_name));
+ strcpy(mz->addr, ops_name);
return 0;
}
-const char * __rte_experimental
+const char *
rte_mbuf_user_mempool_ops(void)
{
const struct rte_memzone *mz;
@@ -79,7 +79,7 @@ rte_mbuf_user_mempool_ops(void)
}
/* Return mbuf pool ops name */
-const char * __rte_experimental
+const char *
rte_mbuf_best_mempool_ops(void)
{
/* User defined mempool ops takes the priority */
diff --git a/lib/librte_mbuf/rte_mbuf_pool_ops.h b/lib/librte_mbuf/rte_mbuf_pool_ops.h
index ebf5bf0f..7ed95a49 100644
--- a/lib/librte_mbuf/rte_mbuf_pool_ops.h
+++ b/lib/librte_mbuf/rte_mbuf_pool_ops.h
@@ -12,9 +12,6 @@
* These APIs are for configuring the mbuf pool ops names to be largely used by
* rte_pktmbuf_pool_create(). However, this can also be used to set and inquire
* the best mempool ops available.
- *
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
*/
#include <rte_compat.h>
@@ -34,7 +31,7 @@ extern "C" {
* - On success, zero.
* - On failure, a negative value.
*/
-int __rte_experimental
+int
rte_mbuf_set_platform_mempool_ops(const char *ops_name);
/**
@@ -46,7 +43,7 @@ rte_mbuf_set_platform_mempool_ops(const char *ops_name);
* - On success, platform pool ops name.
* - On failure, NULL.
*/
-const char * __rte_experimental
+const char *
rte_mbuf_platform_mempool_ops(void);
/**
@@ -60,7 +57,7 @@ rte_mbuf_platform_mempool_ops(void);
* - On success, zero.
* - On failure, a negative value.
*/
-int __rte_experimental
+int
rte_mbuf_set_user_mempool_ops(const char *ops_name);
/**
@@ -72,7 +69,7 @@ rte_mbuf_set_user_mempool_ops(const char *ops_name);
* - On success, user pool ops name..
* - On failure, NULL.
*/
-const char * __rte_experimental
+const char *
rte_mbuf_user_mempool_ops(void);
/**
@@ -87,7 +84,7 @@ rte_mbuf_user_mempool_ops(void);
* @return
* returns preferred mbuf pool ops name
*/
-const char * __rte_experimental
+const char *
rte_mbuf_best_mempool_ops(void);
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.c b/lib/librte_mbuf/rte_mbuf_ptype.c
index 1feefacc..d7835e28 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.c
+++ b/lib/librte_mbuf/rte_mbuf_ptype.c
@@ -65,6 +65,9 @@ const char *rte_get_ptype_tunnel_name(uint32_t ptype)
case RTE_PTYPE_TUNNEL_GTPU: return "TUNNEL_GTPU";
case RTE_PTYPE_TUNNEL_ESP: return "TUNNEL_ESP";
case RTE_PTYPE_TUNNEL_L2TP: return "TUNNEL_L2TP";
+ case RTE_PTYPE_TUNNEL_VXLAN_GPE: return "TUNNEL_VXLAN_GPE";
+ case RTE_PTYPE_TUNNEL_MPLS_IN_UDP: return "TUNNEL_MPLS_IN_UDP";
+ case RTE_PTYPE_TUNNEL_MPLS_IN_GRE: return "TUNNEL_MPLS_IN_GRE";
default: return "TUNNEL_UNKNOWN";
}
}
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index b9a33811..01acc66e 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -423,6 +423,53 @@ extern "C" {
*/
#define RTE_PTYPE_TUNNEL_L2TP 0x0000a000
/**
+ * VXLAN-GPE (VXLAN Generic Protocol Extension) tunneling packet type.
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=4790>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=4790>
+ */
+#define RTE_PTYPE_TUNNEL_VXLAN_GPE 0x0000b000
+/**
+ * MPLS-in-GRE tunneling packet type (RFC 4023).
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=47
+ * | 'protocol'=0x8847>
+ * or,
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=47
+ * | 'protocol'=0x8848>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'protocol'=47
+ * | 'protocol'=0x8847>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=47
+ * | 'protocol'=0x8848>
+ */
+#define RTE_PTYPE_TUNNEL_MPLS_IN_GRE 0x0000c000
+/**
+ * MPLS-in-UDP tunneling packet type (RFC 7510).
+ *
+ * Packet format:
+ * <'ether type'=0x0800
+ * | 'version'=4, 'protocol'=17
+ * | 'destination port'=6635>
+ * or,
+ * <'ether type'=0x86DD
+ * | 'version'=6, 'next header'=17
+ * | 'destination port'=6635>
+ */
+#define RTE_PTYPE_TUNNEL_MPLS_IN_UDP 0x0000d000
+/**
* Mask of tunneling packet types.
*/
#define RTE_PTYPE_TUNNEL_MASK 0x0000f000
@@ -606,9 +653,9 @@ extern "C" {
#define RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4)
/**
- * Check if the (outer) L3 header is IPv4. To avoid comparing IPv4 types one by
- * one, bit 6 is selected to be used for IPv4 only. Then checking bit 6 can
- * determine if it is an IPV4 packet.
+ * Check if the (outer) L3 header is IPv6. To avoid comparing IPv6 types one by
+ * one, bit 6 is selected to be used for IPv6 only. Then checking bit 6 can
+ * determine if it is an IPV6 packet.
*/
#define RTE_ETH_IS_IPV6_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV6)
diff --git a/lib/librte_mbuf/rte_mbuf_version.map b/lib/librte_mbuf/rte_mbuf_version.map
index d418dcb8..cae68db8 100644
--- a/lib/librte_mbuf/rte_mbuf_version.map
+++ b/lib/librte_mbuf/rte_mbuf_version.map
@@ -1,7 +1,6 @@
DPDK_2.0 {
global:
- rte_ctrlmbuf_init;
rte_get_rx_ol_flag_name;
rte_get_tx_ol_flag_name;
rte_mbuf_sanity_check;
@@ -36,7 +35,7 @@ DPDK_16.11 {
} DPDK_2.1;
-EXPERIMENTAL {
+DPDK_18.08 {
global:
rte_mbuf_best_mempool_ops;
@@ -45,5 +44,4 @@ EXPERIMENTAL {
rte_mbuf_set_user_mempool_ops;
rte_mbuf_user_mempool_ops;
rte_pktmbuf_pool_create_by_ops;
-
} DPDK_16.11;
diff --git a/lib/librte_member/rte_member.c b/lib/librte_member/rte_member.c
index e147dd1f..702c01d3 100644
--- a/lib/librte_member/rte_member.c
+++ b/lib/librte_member/rte_member.c
@@ -297,10 +297,7 @@ rte_member_reset(const struct rte_member_setsum *setsum)
}
}
-RTE_INIT(librte_member_init_log);
-
-static void
-librte_member_init_log(void)
+RTE_INIT(librte_member_init_log)
{
librte_member_logtype = rte_log_register("lib.member");
if (librte_member_logtype >= 0)
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
index 24e735a3..20bf63fb 100644
--- a/lib/librte_mempool/Makefile
+++ b/lib/librte_mempool/Makefile
@@ -7,15 +7,20 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_mempool.a
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
LDLIBS += -lrte_eal -lrte_ring
EXPORT_MAP := rte_mempool_version.map
-LIBABIVER := 3
+LIBABIVER := 5
+
+# memseg walk is not yet part of stable API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool.c
SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += rte_mempool_ops_default.c
# install includes
SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
diff --git a/lib/librte_mempool/meson.build b/lib/librte_mempool/meson.build
index 7a4f3dae..38d7ae89 100644
--- a/lib/librte_mempool/meson.build
+++ b/lib/librte_mempool/meson.build
@@ -1,7 +1,21 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-version = 2
-sources = files('rte_mempool.c', 'rte_mempool_ops.c')
+allow_experimental_apis = true
+
+extra_flags = []
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+version = 5
+sources = files('rte_mempool.c', 'rte_mempool_ops.c',
+ 'rte_mempool_ops_default.c')
headers = files('rte_mempool.h')
deps += ['ring']
+
+# memseg walk is not yet part of stable API
+allow_experimental_apis = true
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index 54f7f4ba..03e6b5f7 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -3,6 +3,7 @@
* Copyright(c) 2016 6WIND S.A.
*/
+#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
@@ -98,8 +99,31 @@ static unsigned optimize_object_size(unsigned obj_size)
return new_obj_size * RTE_MEMPOOL_ALIGN;
}
+static int
+find_min_pagesz(const struct rte_memseg_list *msl, void *arg)
+{
+ size_t *min = arg;
+
+ if (msl->page_sz < *min)
+ *min = msl->page_sz;
+
+ return 0;
+}
+
+static size_t
+get_min_page_size(void)
+{
+ size_t min_pagesz = SIZE_MAX;
+
+ rte_memseg_list_walk(find_min_pagesz, &min_pagesz);
+
+ return min_pagesz == SIZE_MAX ? (size_t) getpagesize() : min_pagesz;
+}
+
+
static void
-mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
+mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
+ void *obj, rte_iova_t iova)
{
struct rte_mempool_objhdr *hdr;
struct rte_mempool_objtlr *tlr __rte_unused;
@@ -116,9 +140,6 @@ mempool_add_elem(struct rte_mempool *mp, void *obj, rte_iova_t iova)
tlr = __mempool_get_trailer(obj);
tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
#endif
-
- /* enqueue in ring */
- rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
}
/* call obj_cb() for each mempool element */
@@ -204,92 +225,6 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
return sz->total_size;
}
-
-/*
- * Calculate maximum amount of memory required to store given number of objects.
- */
-size_t
-rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
- unsigned int flags)
-{
- size_t obj_per_page, pg_num, pg_sz;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
-
- if (total_elt_sz == 0)
- return 0;
-
- if (pg_shift == 0)
- return total_elt_sz * elt_num;
-
- pg_sz = (size_t)1 << pg_shift;
- obj_per_page = pg_sz / total_elt_sz;
- if (obj_per_page == 0)
- return RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * elt_num;
-
- pg_num = (elt_num + obj_per_page - 1) / obj_per_page;
- return pg_num << pg_shift;
-}
-
-/*
- * Calculate how much memory would be actually required with the
- * given memory footprint to store required number of elements.
- */
-ssize_t
-rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
- uint32_t pg_shift, unsigned int flags)
-{
- uint32_t elt_cnt = 0;
- rte_iova_t start, end;
- uint32_t iova_idx;
- size_t pg_sz = (size_t)1 << pg_shift;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
-
- /* if iova is NULL, assume contiguous memory */
- if (iova == NULL) {
- start = 0;
- end = pg_sz * pg_num;
- iova_idx = pg_num;
- } else {
- start = iova[0];
- end = iova[0] + pg_sz;
- iova_idx = 1;
- }
- while (elt_cnt < elt_num) {
-
- if (end - start >= total_elt_sz) {
- /* enough contiguous memory, add an object */
- start += total_elt_sz;
- elt_cnt++;
- } else if (iova_idx < pg_num) {
- /* no room to store one obj, add a page */
- if (end == iova[iova_idx]) {
- end += pg_sz;
- } else {
- start = iova[iova_idx];
- end = iova[iova_idx] + pg_sz;
- }
- iova_idx++;
-
- } else {
- /* no more page, return how many elements fit */
- return -(size_t)elt_cnt;
- }
- }
-
- return (size_t)iova_idx << pg_shift;
-}
-
/* free a memchunk allocated with rte_memzone_reserve() */
static void
rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
@@ -323,6 +258,21 @@ rte_mempool_free_memchunks(struct rte_mempool *mp)
}
}
+static int
+mempool_ops_alloc_once(struct rte_mempool *mp)
+{
+ int ret;
+
+ /* create the internal ring if not already done */
+ if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
+ ret = rte_mempool_ops_alloc(mp);
+ if (ret != 0)
+ return ret;
+ mp->flags |= MEMPOOL_F_POOL_CREATED;
+ }
+ return 0;
+}
+
/* Add objects in the pool, using a physically contiguous memory
* zone. Return the number of objects added, or a negative value
* on error.
@@ -332,51 +282,19 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
- unsigned total_elt_sz;
- unsigned int mp_capa_flags;
unsigned i = 0;
size_t off;
struct rte_mempool_memhdr *memhdr;
int ret;
- /* create the internal ring if not already done */
- if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
- ret = rte_mempool_ops_alloc(mp);
- if (ret != 0)
- return ret;
- mp->flags |= MEMPOOL_F_POOL_CREATED;
- }
-
- /* Notify memory area to mempool */
- ret = rte_mempool_ops_register_memory_area(mp, vaddr, iova, len);
- if (ret != -ENOTSUP && ret < 0)
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
return ret;
/* mempool is already populated */
if (mp->populated_size >= mp->size)
return -ENOSPC;
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
-
- /* Get mempool capabilities */
- mp_capa_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_capa_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
- /* update mempool capabilities */
- mp->flags |= mp_capa_flags;
-
- /* Detect pool area has sufficient space for elements */
- if (mp_capa_flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
- if (len < total_elt_sz * mp->size) {
- RTE_LOG(ERR, MEMPOOL,
- "pool area %" PRIx64 " not enough\n",
- (uint64_t)len);
- return -ENOSPC;
- }
- }
-
memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
if (memhdr == NULL)
return -ENOMEM;
@@ -388,89 +306,34 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp_capa_flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS)
- /* align object start address to a multiple of total_elt_sz */
- off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
- else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
- while (off + total_elt_sz <= len && mp->populated_size < mp->size) {
- off += mp->header_size;
- if (iova == RTE_BAD_IOVA)
- mempool_add_elem(mp, (char *)vaddr + off,
- RTE_BAD_IOVA);
- else
- mempool_add_elem(mp, (char *)vaddr + off, iova + off);
- off += mp->elt_size + mp->trailer_size;
- i++;
+ if (off > len) {
+ ret = -EINVAL;
+ goto fail;
}
+ i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
+ (char *)vaddr + off,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
+ len - off, mempool_add_elem, NULL);
+
/* not enough room to store one object */
- if (i == 0)
- return -EINVAL;
+ if (i == 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
mp->nb_mem_chunks++;
return i;
-}
-
-int
-rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
- phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
- void *opaque)
-{
- return rte_mempool_populate_iova(mp, vaddr, paddr, len, free_cb, opaque);
-}
-
-/* Add objects in the pool, using a table of physical pages. Return the
- * number of objects added, or a negative value on error.
- */
-int
-rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
- const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
-{
- uint32_t i, n;
- int ret, cnt = 0;
- size_t pg_sz = (size_t)1 << pg_shift;
-
- /* mempool must not be populated */
- if (mp->nb_mem_chunks != 0)
- return -EEXIST;
-
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
- return rte_mempool_populate_iova(mp, vaddr, RTE_BAD_IOVA,
- pg_num * pg_sz, free_cb, opaque);
-
- for (i = 0; i < pg_num && mp->populated_size < mp->size; i += n) {
-
- /* populate with the largest group of contiguous pages */
- for (n = 1; (i + n) < pg_num &&
- iova[i + n - 1] + pg_sz == iova[i + n]; n++)
- ;
-
- ret = rte_mempool_populate_iova(mp, vaddr + i * pg_sz,
- iova[i], n * pg_sz, free_cb, opaque);
- if (ret < 0) {
- rte_mempool_free_memchunks(mp);
- return ret;
- }
- /* no need to call the free callback for next chunks */
- free_cb = NULL;
- cnt += ret;
- }
- return cnt;
-}
-int
-rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_memchunk_free_cb_t *free_cb, void *opaque)
-{
- return rte_mempool_populate_iova_tab(mp, vaddr, paddr, pg_num, pg_shift,
- free_cb, opaque);
+fail:
+ rte_free(memhdr);
+ return ret;
}
/* Populate the mempool with a virtual area. Return the number of
@@ -485,16 +348,13 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
size_t off, phys_len;
int ret, cnt = 0;
- /* mempool must not be populated */
- if (mp->nb_mem_chunks != 0)
- return -EEXIST;
/* address and len must be page-aligned */
if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
return -EINVAL;
if (RTE_ALIGN_CEIL(len, pg_sz) != len)
return -EINVAL;
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+ if (mp->flags & MEMPOOL_F_NO_IOVA_CONTIG)
return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
len, free_cb, opaque);
@@ -544,39 +404,94 @@ rte_mempool_populate_default(struct rte_mempool *mp)
unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
char mz_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
- size_t size, total_elt_sz, align, pg_sz, pg_shift;
+ ssize_t mem_size;
+ size_t align, pg_sz, pg_shift;
rte_iova_t iova;
unsigned mz_id, n;
- unsigned int mp_flags;
int ret;
+ bool no_contig, try_contig, no_pageshift;
+
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
/* mempool must not be populated */
if (mp->nb_mem_chunks != 0)
return -EEXIST;
- /* Get mempool capabilities */
- mp_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
+ no_contig = mp->flags & MEMPOOL_F_NO_IOVA_CONTIG;
- /* update mempool capabilities */
- mp->flags |= mp_flags;
+ /*
+ * the following section calculates page shift and page size values.
+ *
+ * these values impact the result of calc_mem_size operation, which
+ * returns the amount of memory that should be allocated to store the
+ * desired number of objects. when not zero, it allocates more memory
+ * for the padding between objects, to ensure that an object does not
+ * cross a page boundary. in other words, page size/shift are to be set
+ * to zero if mempool elements won't care about page boundaries.
+ * there are several considerations for page size and page shift here.
+ *
+ * if we don't need our mempools to have physically contiguous objects,
+ * then just set page shift and page size to 0, because the user has
+ * indicated that there's no need to care about anything.
+ *
+ * if we do need contiguous objects, there is also an option to reserve
+ * the entire mempool memory as one contiguous block of memory, in
+ * which case the page shift and alignment wouldn't matter as well.
+ *
+ * if we require contiguous objects, but not necessarily the entire
+ * mempool reserved space to be contiguous, then there are two options.
+ *
+ * if our IO addresses are virtual, not actual physical (IOVA as VA
+ * case), then no page shift needed - our memory allocation will give us
+ * contiguous IO memory as far as the hardware is concerned, so
+ * act as if we're getting contiguous memory.
+ *
+ * if our IO addresses are physical, we may get memory from bigger
+ * pages, or we might get memory from smaller pages, and how much of it
+ * we require depends on whether we want bigger or smaller pages.
+ * However, requesting each and every memory size is too much work, so
+ * what we'll do instead is walk through the page sizes available, pick
+ * the smallest one and set up page shift to match that one. We will be
+ * wasting some space this way, but it's much nicer than looping around
+ * trying to reserve each and every page size.
+ *
+ * However, since size calculation will produce page-aligned sizes, it
+ * makes sense to first try and see if we can reserve the entire memzone
+ * in one contiguous chunk as well (otherwise we might end up wasting a
+ * 1G page on a 10MB memzone). If we fail to get enough contiguous
+ * memory, then we'll go and reserve space page-by-page.
+ */
+ no_pageshift = no_contig || rte_eal_iova_mode() == RTE_IOVA_VA;
+ try_contig = !no_contig && !no_pageshift && rte_eal_has_hugepages();
- if (rte_eal_has_hugepages()) {
- pg_shift = 0; /* not needed, zone is physically contiguous */
+ if (no_pageshift) {
pg_sz = 0;
- align = RTE_CACHE_LINE_SIZE;
+ pg_shift = 0;
+ } else if (try_contig) {
+ pg_sz = get_min_page_size();
+ pg_shift = rte_bsf32(pg_sz);
} else {
pg_sz = getpagesize();
pg_shift = rte_bsf32(pg_sz);
- align = pg_sz;
}
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
- size = rte_mempool_xmem_size(n, total_elt_sz, pg_shift,
- mp->flags);
+ size_t min_chunk_size;
+ unsigned int flags;
+
+ if (try_contig || no_pageshift)
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ 0, &min_chunk_size, &align);
+ else
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ pg_shift, &min_chunk_size, &align);
+
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
ret = snprintf(mz_name, sizeof(mz_name),
RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
@@ -585,30 +500,70 @@ rte_mempool_populate_default(struct rte_mempool *mp)
goto fail;
}
- mz = rte_memzone_reserve_aligned(mz_name, size,
- mp->socket_id, mz_flags, align);
- /* not enough memory, retry with the biggest zone we have */
- if (mz == NULL)
+ flags = mz_flags;
+
+ /* if we're trying to reserve contiguous memory, add appropriate
+ * memzone flag.
+ */
+ if (try_contig)
+ flags |= RTE_MEMZONE_IOVA_CONTIG;
+
+ mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ mp->socket_id, flags, align);
+
+ /* if we were trying to allocate contiguous memory, failed and
+ * minimum required contiguous chunk fits minimum page, adjust
+ * memzone size to the page size, and try again.
+ */
+ if (mz == NULL && try_contig && min_chunk_size <= pg_sz) {
+ try_contig = false;
+ flags &= ~RTE_MEMZONE_IOVA_CONTIG;
+
+ mem_size = rte_mempool_ops_calc_mem_size(mp, n,
+ pg_shift, &min_chunk_size, &align);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto fail;
+ }
+
+ mz = rte_memzone_reserve_aligned(mz_name, mem_size,
+ mp->socket_id, flags, align);
+ }
+ /* don't try reserving with 0 size if we were asked to reserve
+ * IOVA-contiguous memory.
+ */
+ if (min_chunk_size < (size_t)mem_size && mz == NULL) {
+ /* not enough memory, retry with the biggest zone we
+ * have
+ */
mz = rte_memzone_reserve_aligned(mz_name, 0,
- mp->socket_id, mz_flags, align);
+ mp->socket_id, flags,
+ RTE_MAX(pg_sz, align));
+ }
if (mz == NULL) {
ret = -rte_errno;
goto fail;
}
- if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG)
+ if (mz->len < min_chunk_size) {
+ rte_memzone_free(mz);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (no_contig)
iova = RTE_BAD_IOVA;
else
iova = mz->iova;
- if (rte_eal_has_hugepages())
+ if (no_pageshift || try_contig)
ret = rte_mempool_populate_iova(mp, mz->addr,
iova, mz->len,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
else
ret = rte_mempool_populate_virt(mp, mz->addr,
- mz->len, pg_sz,
+ RTE_ALIGN_FLOOR(mz->len, pg_sz), pg_sz,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
if (ret < 0) {
@@ -625,16 +580,18 @@ rte_mempool_populate_default(struct rte_mempool *mp)
}
/* return the memory size required for mempool objects in anonymous mem */
-static size_t
+static ssize_t
get_anon_size(const struct rte_mempool *mp)
{
- size_t size, total_elt_sz, pg_sz, pg_shift;
+ ssize_t size;
+ size_t pg_sz, pg_shift;
+ size_t min_chunk_size;
+ size_t align;
pg_sz = getpagesize();
pg_shift = rte_bsf32(pg_sz);
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
- size = rte_mempool_xmem_size(mp->size, total_elt_sz, pg_shift,
- mp->flags);
+ size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
+ &min_chunk_size, &align);
return size;
}
@@ -644,25 +601,45 @@ static void
rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
void *opaque)
{
- munmap(opaque, get_anon_size(memhdr->mp));
+ ssize_t size;
+
+ /*
+ * Calculate size since memhdr->len has contiguous chunk length
+ * which may be smaller if anon map is split into many contiguous
+ * chunks. Result must be the same as we calculated on populate.
+ */
+ size = get_anon_size(memhdr->mp);
+ if (size < 0)
+ return;
+
+ munmap(opaque, size);
}
/* populate the mempool with an anonymous mapping */
int
rte_mempool_populate_anon(struct rte_mempool *mp)
{
- size_t size;
+ ssize_t size;
int ret;
char *addr;
/* mempool is already populated, error */
- if (!STAILQ_EMPTY(&mp->mem_list)) {
+ if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
rte_errno = EINVAL;
return 0;
}
- /* get chunk of virtually continuous memory */
+ ret = mempool_ops_alloc_once(mp);
+ if (ret != 0)
+ return ret;
+
size = get_anon_size(mp);
+ if (size < 0) {
+ rte_errno = -size;
+ return 0;
+ }
+
+ /* get chunk of virtually continuous memory */
addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
@@ -795,6 +772,12 @@ rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
+ /* asked for zero items */
+ if (n == 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
/* asked cache too big */
if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
@@ -944,66 +927,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
return NULL;
}
-/*
- * Create the mempool over already allocated chunk of memory.
- * That external memory buffer can consists of physically disjoint pages.
- * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create()
- * behavior.
- */
-struct rte_mempool *
-rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags, void *vaddr,
- const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift)
-{
- struct rte_mempool *mp = NULL;
- int ret;
-
- /* no virtual address supplied, use rte_mempool_create() */
- if (vaddr == NULL)
- return rte_mempool_create(name, n, elt_size, cache_size,
- private_data_size, mp_init, mp_init_arg,
- obj_init, obj_init_arg, socket_id, flags);
-
- /* check that we have both VA and PA */
- if (iova == NULL) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- /* Check that pg_shift parameter is valid. */
- if (pg_shift > MEMPOOL_PG_SHIFT_MAX) {
- rte_errno = EINVAL;
- return NULL;
- }
-
- mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
- private_data_size, socket_id, flags);
- if (mp == NULL)
- return NULL;
-
- /* call the mempool priv initializer */
- if (mp_init)
- mp_init(mp, mp_init_arg);
-
- ret = rte_mempool_populate_iova_tab(mp, vaddr, iova, pg_num, pg_shift,
- NULL, NULL);
- if (ret < 0 || ret != (int)mp->size)
- goto fail;
-
- /* call the object initializers */
- if (obj_init)
- rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
-
- return mp;
-
- fail:
- rte_mempool_free(mp);
- return NULL;
-}
-
/* Return the number of entries in the mempool */
unsigned int
rte_mempool_avail_count(const struct rte_mempool *mp)
@@ -1132,6 +1055,36 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
#endif
}
+void
+rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
+ void * const *first_obj_table_const, unsigned int n, int free)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_info info;
+ const size_t total_elt_sz =
+ mp->header_size + mp->elt_size + mp->trailer_size;
+ unsigned int i, j;
+
+ rte_mempool_ops_get_info(mp, &info);
+
+ for (i = 0; i < n; ++i) {
+ void *first_obj = first_obj_table_const[i];
+
+ for (j = 0; j < info.contig_block_size; ++j) {
+ void *obj;
+
+ obj = (void *)((uintptr_t)first_obj + j * total_elt_sz);
+ rte_mempool_check_cookies(mp, &obj, 1, free);
+ }
+ }
+#else
+ RTE_SET_USED(mp);
+ RTE_SET_USED(first_obj_table_const);
+ RTE_SET_USED(n);
+ RTE_SET_USED(free);
+#endif
+}
+
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
static void
mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
@@ -1197,6 +1150,7 @@ void
rte_mempool_dump(FILE *f, struct rte_mempool *mp)
{
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_info info;
struct rte_mempool_debug_stats sum;
unsigned lcore_id;
#endif
@@ -1238,6 +1192,7 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
/* sum and dump statistics */
#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_ops_get_info(mp, &info);
memset(&sum, 0, sizeof(sum));
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
sum.put_bulk += mp->stats[lcore_id].put_bulk;
@@ -1246,6 +1201,8 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
+ sum.get_success_blks += mp->stats[lcore_id].get_success_blks;
+ sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks;
}
fprintf(f, " stats:\n");
fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk);
@@ -1254,6 +1211,11 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp)
fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs);
fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
fprintf(f, " get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
+ if (info.contig_block_size > 0) {
+ fprintf(f, " get_success_blks=%"PRIu64"\n",
+ sum.get_success_blks);
+ fprintf(f, " get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
+ }
#else
fprintf(f, " no statistics available\n");
#endif
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 8b1b7f7e..7c9cd9a2 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -70,6 +70,10 @@ struct rte_mempool_debug_stats {
uint64_t get_success_objs; /**< Objects successfully allocated. */
uint64_t get_fail_bulk; /**< Failed allocation number. */
uint64_t get_fail_objs; /**< Objects that failed to be allocated. */
+ /** Successful allocation number of contiguous blocks. */
+ uint64_t get_success_blks;
+ /** Failed allocation number of contiguous blocks. */
+ uint64_t get_fail_blks;
} __rte_cache_aligned;
#endif
@@ -190,6 +194,20 @@ struct rte_mempool_memhdr {
};
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Additional information about the mempool
+ *
+ * The structure is cache-line aligned to avoid ABI breakages in
+ * a number of cases when something small is added.
+ */
+struct rte_mempool_info {
+ /** Number of objects in the contiguous block */
+ unsigned int contig_block_size;
+} __rte_cache_aligned;
+
+/**
* The RTE mempool structure.
*/
struct rte_mempool {
@@ -244,25 +262,8 @@ struct rte_mempool {
#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
-#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
-/**
- * This capability flag is advertised by a mempool handler, if the whole
- * memory area containing the objects must be physically contiguous.
- * Note: This flag should not be passed by application.
- */
-#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
-/**
- * This capability flag is advertised by a mempool handler. Used for a case
- * where mempool driver wants object start address(vaddr) aligned to block
- * size(/ total element size).
- *
- * Note:
- * - This flag should not be passed by application.
- * Flag used for mempool driver only.
- * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with
- * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS.
- */
-#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
+#define MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /**< Don't need IOVA contiguous objs. */
+#define MEMPOOL_F_NO_PHYS_CONTIG MEMPOOL_F_NO_IOVA_CONTIG /* deprecated */
/**
* @internal When debug is enabled, store some statistics.
@@ -282,8 +283,16 @@ struct rte_mempool {
mp->stats[__lcore_id].name##_bulk += 1; \
} \
} while(0)
+#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do { \
+ unsigned int __lcore_id = rte_lcore_id(); \
+ if (__lcore_id < RTE_MAX_LCORE) { \
+ mp->stats[__lcore_id].name##_blks += n; \
+ mp->stats[__lcore_id].name##_bulk += 1; \
+ } \
+ } while (0)
#else
#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#define __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) do {} while (0)
#endif
/**
@@ -351,6 +360,38 @@ void rte_mempool_check_cookies(const struct rte_mempool *mp,
#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * @internal Check contiguous object blocks and update cookies or panic.
+ *
+ * @param mp
+ * Pointer to the memory pool.
+ * @param first_obj_table_const
+ * Pointer to a table of void * pointers (first object of the contiguous
+ * object blocks).
+ * @param n
+ * Number of contiguous object blocks.
+ * @param free
+ * - 0: object is supposed to be allocated, mark it as free
+ * - 1: object is supposed to be free, mark it as allocated
+ * - 2: just check that cookie is valid (free or allocated)
+ */
+void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
+ void * const *first_obj_table_const, unsigned int n, int free);
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free) \
+ rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free)
+#else
+#define __mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \
+ free) \
+ do {} while (0)
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
#define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */
/**
@@ -383,21 +424,135 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
void **obj_table, unsigned int n);
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Dequeue a number of contiquous object blocks from the external pool.
+ */
+typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n);
+
+/**
* Return the number of available objects in the external pool.
*/
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
/**
- * Get the mempool capabilities.
+ * Calculate memory size required to store given number of objects.
+ *
+ * If mempool objects are not required to be IOVA-contiguous
+ * (the flag MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines
+ * virtually contiguous chunk size. Otherwise, if mempool objects must
+ * be IOVA-contiguous (the flag MEMPOOL_F_NO_IOVA_CONTIG is clear),
+ * min_chunk_size defines IOVA-contiguous chunk size.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
+ * @return
+ * Required memory size aligned at page boundary.
+ */
+typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
+/**
+ * Default way to calculate memory size required to store given number of
+ * objects.
+ *
+ * If page boundaries may be ignored, it is just a product of total
+ * object size including header and trailer and number of objects.
+ * Otherwise, it is a number of pages required to store given number of
+ * objects without crossing page boundary.
+ *
+ * Note that if object size is bigger than page size, then it assumes
+ * that pages are grouped in subsets of physically continuous pages big
+ * enough to store at least one object.
+ *
+ * Minimum size of memory chunk is a maximum of the page size and total
+ * element size.
+ *
+ * Required memory chunk alignment is a maximum of page size and cache
+ * line size.
*/
-typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
- unsigned int *flags);
+ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
/**
- * Notify new memory area to mempool.
+ * Function to be called for each populated object.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] opaque
+ * An opaque pointer passed to iterator.
+ * @param[in] vaddr
+ * Object virtual address.
+ * @param[in] iova
+ * Input/output virtual address of the object or RTE_BAD_IOVA.
*/
-typedef int (*rte_mempool_ops_register_memory_area_t)
-(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp,
+ void *opaque, void *vaddr, rte_iova_t iova);
+
+/**
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * Populated objects should be enqueued to the pool, e.g. using
+ * rte_mempool_ops_enqueue_bulk().
+ *
+ * If the given IO address is unknown (iova = RTE_BAD_IOVA),
+ * the chunk doesn't need to be physically contiguous (only virtually),
+ * and allocated objects may span two pages.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] max_objs
+ * Maximum number of objects to be populated.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * Default way to populate memory pool object using provided memory
+ * chunk: just slice objects one by one.
+ */
+int rte_mempool_op_populate_default(struct rte_mempool *mp,
+ unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get some additional information about a mempool.
+ */
+typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp,
+ struct rte_mempool_info *info);
+
/** Structure defining mempool operations structure */
struct rte_mempool_ops {
@@ -408,13 +563,23 @@ struct rte_mempool_ops {
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
/**
- * Get the mempool capabilities
+ * Optional callback to calculate memory size required to
+ * store specified number of objects.
+ */
+ rte_mempool_calc_mem_size_t calc_mem_size;
+ /**
+ * Optional callback to populate mempool objects using
+ * provided memory chunk.
+ */
+ rte_mempool_populate_t populate;
+ /**
+ * Get mempool info
*/
- rte_mempool_get_capabilities_t get_capabilities;
+ rte_mempool_get_info_t get_info;
/**
- * Notify new memory area to mempool
+ * Dequeue a number of contiguous object blocks.
*/
- rte_mempool_ops_register_memory_area_t register_memory_area;
+ rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
} __rte_cache_aligned;
#define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */
@@ -493,6 +658,30 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp,
}
/**
+ * @internal Wrapper for mempool_ops dequeue_contig_blocks callback.
+ *
+ * @param[in] mp
+ * Pointer to the memory pool.
+ * @param[out] first_obj_table
+ * Pointer to a table of void * pointers (first objects).
+ * @param[in] n
+ * Number of blocks to get.
+ * @return
+ * - 0: Success; got n objects.
+ * - <0: Error; code of dequeue function.
+ */
+static inline int
+rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+ RTE_ASSERT(ops->dequeue_contig_blocks != NULL);
+ return ops->dequeue_contig_blocks(mp, first_obj_table, n);
+}
+
+/**
* @internal wrapper for mempool_ops enqueue callback.
*
* @param mp
@@ -527,41 +716,74 @@ unsigned
rte_mempool_ops_get_count(const struct rte_mempool *mp);
/**
- * @internal wrapper for mempool_ops get_capabilities callback.
+ * @internal wrapper for mempool_ops calc_mem_size callback.
+ * API to calculate size of memory required to store specified number of
+ * object.
*
- * @param mp [in]
+ * @param[in] mp
* Pointer to the memory pool.
- * @param flags [out]
- * Pointer to the mempool flags.
+ * @param[in] obj_num
+ * Number of objects.
+ * @param[in] pg_shift
+ * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
+ * @param[out] min_chunk_size
+ * Location for minimum size of the memory chunk which may be used to
+ * store memory pool objects.
+ * @param[out] align
+ * Location for required memory chunk alignment.
* @return
- * - 0: Success; The mempool driver has advertised his pool capabilities in
- * flags param.
- * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
- * - Otherwise, pool create fails.
+ * Required memory size aligned at page boundary.
*/
-int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags);
+ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align);
+
/**
- * @internal wrapper for mempool_ops register_memory_area callback.
- * API to notify the mempool handler when a new memory area is added to pool.
+ * @internal wrapper for mempool_ops populate callback.
*
- * @param mp
+ * Populate memory pool objects using provided memory chunk.
+ *
+ * @param[in] mp
+ * A pointer to the mempool structure.
+ * @param[in] max_objs
+ * Maximum number of objects to be populated.
+ * @param[in] vaddr
+ * The virtual address of memory that should be used to store objects.
+ * @param[in] iova
+ * The IO address
+ * @param[in] len
+ * The length of memory in bytes.
+ * @param[in] obj_cb
+ * Callback function to be executed for each populated object.
+ * @param[in] obj_cb_arg
+ * An opaque pointer passed to the callback function.
+ * @return
+ * The number of objects added on success.
+ * On error, no objects are populated and a negative errno is returned.
+ */
+int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Wrapper for mempool_ops get_info callback.
+ *
+ * @param[in] mp
* Pointer to the memory pool.
- * @param vaddr
- * Pointer to the buffer virtual address.
- * @param iova
- * Pointer to the buffer IO address.
- * @param len
- * Pool size.
+ * @param[out] info
+ * Pointer to the rte_mempool_info structure
* @return
- * - 0: Success;
- * - -ENOTSUP - doesn't support register_memory_area ops (valid error case).
- * - Otherwise, rte_mempool_populate_phys fails thus pool create fails.
+ * - 0: Success; The mempool driver supports retrieving supplementary
+ * mempool information
+ * - -ENOTSUP - doesn't support get_info ops (valid case).
*/
-int
-rte_mempool_ops_register_memory_area(const struct rte_mempool *mp,
- char *vaddr, rte_iova_t iova, size_t len);
+__rte_experimental
+int rte_mempool_ops_get_info(const struct rte_mempool *mp,
+ struct rte_mempool_info *info);
/**
* @internal wrapper for mempool_ops free callback.
@@ -710,8 +932,8 @@ typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
* - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
* when using rte_mempool_get() or rte_mempool_get_bulk() is
* "single-consumer". Otherwise, it is "multi-consumers".
- * - MEMPOOL_F_NO_PHYS_CONTIG: If set, allocated objects won't
- * necessarily be contiguous in physical memory.
+ * - MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't
+ * necessarily be contiguous in IO memory.
* @return
* The pointer to the new allocated mempool, on success. NULL on error
* with rte_errno set appropriately. Possible rte_errno values include:
@@ -730,72 +952,6 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
int socket_id, unsigned flags);
/**
- * Create a new mempool named *name* in memory.
- *
- * The pool contains n elements of elt_size. Its size is set to n.
- * This function uses ``memzone_reserve()`` to allocate the mempool header
- * (and the objects if vaddr is NULL).
- * Depending on the input parameters, mempool elements can be either allocated
- * together with the mempool header, or an externally provided memory buffer
- * could be used to store mempool objects. In later case, that external
- * memory buffer can consist of set of disjoint physical pages.
- *
- * @param name
- * The name of the mempool.
- * @param n
- * The number of elements in the mempool. The optimum size (in terms of
- * memory usage) for a mempool is when n is a power of two minus one:
- * n = (2^q - 1).
- * @param elt_size
- * The size of each element.
- * @param cache_size
- * Size of the cache. See rte_mempool_create() for details.
- * @param private_data_size
- * The size of the private data appended after the mempool
- * structure. This is useful for storing some private data after the
- * mempool structure, as is done for rte_mbuf_pool for example.
- * @param mp_init
- * A function pointer that is called for initialization of the pool,
- * before object initialization. The user can initialize the private
- * data in this function if needed. This parameter can be NULL if
- * not needed.
- * @param mp_init_arg
- * An opaque pointer to data that can be used in the mempool
- * constructor function.
- * @param obj_init
- * A function called for each object at initialization of the pool.
- * See rte_mempool_create() for details.
- * @param obj_init_arg
- * An opaque pointer passed to the object constructor function.
- * @param socket_id
- * The *socket_id* argument is the socket identifier in the case of
- * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
- * constraint for the reserved zone.
- * @param flags
- * Flags controlling the behavior of the mempool. See
- * rte_mempool_create() for details.
- * @param vaddr
- * Virtual address of the externally allocated memory buffer.
- * Will be used to store mempool objects.
- * @param iova
- * Array of IO addresses of the pages that comprises given memory buffer.
- * @param pg_num
- * Number of elements in the iova array.
- * @param pg_shift
- * LOG2 of the physical pages size.
- * @return
- * The pointer to the new allocated mempool, on success. NULL on error
- * with rte_errno set appropriately. See rte_mempool_create() for details.
- */
-struct rte_mempool *
-rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
- unsigned cache_size, unsigned private_data_size,
- rte_mempool_ctor_t *mp_init, void *mp_init_arg,
- rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
- int socket_id, unsigned flags, void *vaddr,
- const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift);
-
-/**
* Create an empty mempool
*
* The mempool is allocated and initialized, but it is not populated: no
@@ -877,46 +1033,6 @@ int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque);
-__rte_deprecated
-int rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
- phys_addr_t paddr, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
- void *opaque);
-
-/**
- * Add physical memory for objects in the pool at init
- *
- * Add a virtually contiguous memory chunk in the pool where objects can
- * be instantiated. The IO addresses corresponding to the virtual
- * area are described in iova[], pg_num, pg_shift.
- *
- * @param mp
- * A pointer to the mempool structure.
- * @param vaddr
- * The virtual address of memory that should be used to store objects.
- * @param iova
- * An array of IO addresses of each page composing the virtual area.
- * @param pg_num
- * Number of elements in the iova array.
- * @param pg_shift
- * LOG2 of the physical pages size.
- * @param free_cb
- * The callback used to free this chunk when destroying the mempool.
- * @param opaque
- * An opaque argument passed to free_cb.
- * @return
- * The number of objects added on success.
- * On error, the chunks are not added in the memory list of the
- * mempool and a negative errno is returned.
- */
-int rte_mempool_populate_iova_tab(struct rte_mempool *mp, char *vaddr,
- const rte_iova_t iova[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
-
-__rte_deprecated
-int rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
- const phys_addr_t paddr[], uint32_t pg_num, uint32_t pg_shift,
- rte_mempool_memchunk_free_cb_t *free_cb, void *opaque);
-
/**
* Add virtually contiguous memory for objects in the pool at init
*
@@ -1049,22 +1165,6 @@ void
rte_mempool_cache_free(struct rte_mempool_cache *cache);
/**
- * Flush a user-owned mempool cache to the specified mempool.
- *
- * @param cache
- * A pointer to the mempool cache.
- * @param mp
- * A pointer to the mempool.
- */
-static __rte_always_inline void
-rte_mempool_cache_flush(struct rte_mempool_cache *cache,
- struct rte_mempool *mp)
-{
- rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
- cache->len = 0;
-}
-
-/**
* Get a pointer to the per-lcore default mempool cache.
*
* @param mp
@@ -1087,6 +1187,26 @@ rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id)
}
/**
+ * Flush a user-owned mempool cache to the specified mempool.
+ *
+ * @param cache
+ * A pointer to the mempool cache.
+ * @param mp
+ * A pointer to the mempool.
+ */
+static __rte_always_inline void
+rte_mempool_cache_flush(struct rte_mempool_cache *cache,
+ struct rte_mempool *mp)
+{
+ if (cache == NULL)
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ if (cache == NULL || cache->len == 0)
+ return;
+ rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len);
+ cache->len = 0;
+}
+
+/**
* @internal Put several objects back in the mempool; used internally.
* @param mp
* A pointer to the mempool structure.
@@ -1366,6 +1486,49 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
}
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Get a contiguous blocks of objects from the mempool.
+ *
+ * If cache is enabled, consider to flush it first, to reuse objects
+ * as soon as possible.
+ *
+ * The application should check that the driver supports the operation
+ * by calling rte_mempool_ops_get_info() and checking that `contig_block_size`
+ * is not zero.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param first_obj_table
+ * A pointer to a pointer to the first object in each block.
+ * @param n
+ * The number of blocks to get from mempool.
+ * @return
+ * - 0: Success; blocks taken.
+ * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved.
+ * - -EOPNOTSUPP: The mempool driver does not support block dequeue
+ */
+static __rte_always_inline int
+__rte_experimental
+rte_mempool_get_contig_blocks(struct rte_mempool *mp,
+ void **first_obj_table, unsigned int n)
+{
+ int ret;
+
+ ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n);
+ if (ret == 0) {
+ __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_success, n);
+ __mempool_contig_blocks_check_cookies(mp, first_obj_table, n,
+ 1);
+ } else {
+ __MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, get_fail, n);
+ }
+
+ return ret;
+}
+
+/**
* Return the number of entries in the mempool.
*
* When cache is enabled, this function has to browse the length of
@@ -1439,7 +1602,7 @@ rte_mempool_empty(const struct rte_mempool *mp)
* A pointer (virtual address) to the element of the pool.
* @return
* The IO address of the elt element.
- * If the mempool was created with MEMPOOL_F_NO_PHYS_CONTIG, the
+ * If the mempool was created with MEMPOOL_F_NO_IOVA_CONTIG, the
* returned value is RTE_BAD_IOVA.
*/
static inline rte_iova_t
@@ -1451,13 +1614,6 @@ rte_mempool_virt2iova(const void *elt)
return hdr->iova;
}
-__rte_deprecated
-static inline phys_addr_t
-rte_mempool_virt2phy(__rte_unused const struct rte_mempool *mp, const void *elt)
-{
- return rte_mempool_virt2iova(elt);
-}
-
/**
* Check the consistency of mempool objects.
*
@@ -1527,64 +1683,6 @@ uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
struct rte_mempool_objsz *sz);
/**
- * Get the size of memory required to store mempool elements.
- *
- * Calculate the maximum amount of memory required to store given number
- * of objects. Assume that the memory buffer will be aligned at page
- * boundary.
- *
- * Note that if object size is bigger then page size, then it assumes
- * that pages are grouped in subsets of physically continuous pages big
- * enough to store at least one object.
- *
- * @param elt_num
- * Number of elements.
- * @param total_elt_sz
- * The size of each element, including header and trailer, as returned
- * by rte_mempool_calc_obj_size().
- * @param pg_shift
- * LOG2 of the physical pages size. If set to 0, ignore page boundaries.
- * @param flags
- * The mempool flags.
- * @return
- * Required memory size aligned at page boundary.
- */
-size_t rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz,
- uint32_t pg_shift, unsigned int flags);
-
-/**
- * Get the size of memory required to store mempool elements.
- *
- * Calculate how much memory would be actually required with the given
- * memory footprint to store required number of objects.
- *
- * @param vaddr
- * Virtual address of the externally allocated memory buffer.
- * Will be used to store mempool objects.
- * @param elt_num
- * Number of elements.
- * @param total_elt_sz
- * The size of each element, including header and trailer, as returned
- * by rte_mempool_calc_obj_size().
- * @param iova
- * Array of IO addresses of the pages that comprises given memory buffer.
- * @param pg_num
- * Number of elements in the iova array.
- * @param pg_shift
- * LOG2 of the physical pages size.
- * @param flags
- * The mempool flags.
- * @return
- * On success, the number of bytes needed to store given number of
- * objects, aligned to the given page size. If the provided memory
- * buffer is too small, return a negative value whose absolute value
- * is the actual number of elements that can be stored in that buffer.
- */
-ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num,
- size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
- uint32_t pg_shift, unsigned int flags);
-
-/**
* Walk list of all memory pools
*
* @param func
diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
index 0732255c..a27e1fa5 100644
--- a/lib/librte_mempool/rte_mempool_ops.c
+++ b/lib/librte_mempool/rte_mempool_ops.c
@@ -57,8 +57,10 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
- ops->get_capabilities = h->get_capabilities;
- ops->register_memory_area = h->register_memory_area;
+ ops->calc_mem_size = h->calc_mem_size;
+ ops->populate = h->populate;
+ ops->get_info = h->get_info;
+ ops->dequeue_contig_blocks = h->dequeue_contig_blocks;
rte_spinlock_unlock(&rte_mempool_ops_table.sl);
@@ -97,32 +99,57 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp)
return ops->get_count(mp);
}
-/* wrapper to get external mempool capabilities. */
+/* wrapper to notify new memory area to external mempool */
+ssize_t
+rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ struct rte_mempool_ops *ops;
+
+ ops = rte_mempool_get_ops(mp->ops_index);
+
+ if (ops->calc_mem_size == NULL)
+ return rte_mempool_op_calc_mem_size_default(mp, obj_num,
+ pg_shift, min_chunk_size, align);
+
+ return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
+}
+
+/* wrapper to populate memory pool objects using provided memory chunk */
int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
+rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb,
+ void *obj_cb_arg)
{
struct rte_mempool_ops *ops;
ops = rte_mempool_get_ops(mp->ops_index);
- RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP);
- return ops->get_capabilities(mp, flags);
+ if (ops->populate == NULL)
+ return rte_mempool_op_populate_default(mp, max_objs, vaddr,
+ iova, len, obj_cb,
+ obj_cb_arg);
+
+ return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb,
+ obj_cb_arg);
}
-/* wrapper to notify new memory area to external mempool */
+/* wrapper to get additional mempool info */
int
-rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
- rte_iova_t iova, size_t len)
+rte_mempool_ops_get_info(const struct rte_mempool *mp,
+ struct rte_mempool_info *info)
{
struct rte_mempool_ops *ops;
ops = rte_mempool_get_ops(mp->ops_index);
- RTE_FUNC_PTR_OR_ERR_RET(ops->register_memory_area, -ENOTSUP);
- return ops->register_memory_area(mp, vaddr, iova, len);
+ RTE_FUNC_PTR_OR_ERR_RET(ops->get_info, -ENOTSUP);
+ return ops->get_info(mp, info);
}
+
/* sets mempool ops previously registered by rte_mempool_register_ops. */
int
rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
diff --git a/lib/librte_mempool/rte_mempool_ops_default.c b/lib/librte_mempool/rte_mempool_ops_default.c
new file mode 100644
index 00000000..4e2bfc82
--- /dev/null
+++ b/lib/librte_mempool/rte_mempool_ops_default.c
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * Copyright(c) 2018 Solarflare Communications Inc.
+ */
+
+#include <rte_mempool.h>
+
+ssize_t
+rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp,
+ uint32_t obj_num, uint32_t pg_shift,
+ size_t *min_chunk_size, size_t *align)
+{
+ size_t total_elt_sz;
+ size_t obj_per_page, pg_num, pg_sz;
+ size_t mem_size;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+ if (total_elt_sz == 0) {
+ mem_size = 0;
+ } else if (pg_shift == 0) {
+ mem_size = total_elt_sz * obj_num;
+ } else {
+ pg_sz = (size_t)1 << pg_shift;
+ obj_per_page = pg_sz / total_elt_sz;
+ if (obj_per_page == 0) {
+ /*
+ * Note that if object size is bigger than page size,
+ * then it is assumed that pages are grouped in subsets
+ * of physically continuous pages big enough to store
+ * at least one object.
+ */
+ mem_size =
+ RTE_ALIGN_CEIL(total_elt_sz, pg_sz) * obj_num;
+ } else {
+ pg_num = (obj_num + obj_per_page - 1) / obj_per_page;
+ mem_size = pg_num << pg_shift;
+ }
+ }
+
+ *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
+
+ *align = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, (size_t)1 << pg_shift);
+
+ return mem_size;
+}
+
+int
+rte_mempool_op_populate_default(struct rte_mempool *mp, unsigned int max_objs,
+ void *vaddr, rte_iova_t iova, size_t len,
+ rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
+{
+ size_t total_elt_sz;
+ size_t off;
+ unsigned int i;
+ void *obj;
+
+ total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
+
+ for (off = 0, i = 0; off + total_elt_sz <= len && i < max_objs; i++) {
+ off += mp->header_size;
+ obj = (char *)vaddr + off;
+ obj_cb(mp, obj_cb_arg, obj,
+ (iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off));
+ rte_mempool_ops_enqueue_bulk(mp, &obj, 1);
+ off += mp->elt_size + mp->trailer_size;
+ }
+
+ return i;
+}
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index 62b76f91..17cbca46 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -8,9 +8,6 @@ DPDK_2.0 {
rte_mempool_list_dump;
rte_mempool_lookup;
rte_mempool_walk;
- rte_mempool_xmem_create;
- rte_mempool_xmem_size;
- rte_mempool_xmem_usage;
local: *;
};
@@ -34,8 +31,6 @@ DPDK_16.07 {
rte_mempool_ops_table;
rte_mempool_populate_anon;
rte_mempool_populate_default;
- rte_mempool_populate_phys;
- rte_mempool_populate_phys_tab;
rte_mempool_populate_virt;
rte_mempool_register_ops;
rte_mempool_set_ops_byname;
@@ -45,9 +40,21 @@ DPDK_16.07 {
DPDK_17.11 {
global:
- rte_mempool_ops_get_capabilities;
- rte_mempool_ops_register_memory_area;
rte_mempool_populate_iova;
- rte_mempool_populate_iova_tab;
} DPDK_16.07;
+
+DPDK_18.05 {
+ global:
+
+ rte_mempool_contig_blocks_check_cookies;
+ rte_mempool_op_calc_mem_size_default;
+ rte_mempool_op_populate_default;
+
+} DPDK_17.11;
+
+EXPERIMENTAL {
+ global:
+
+ rte_mempool_ops_get_info;
+};
diff --git a/lib/librte_meter/Makefile b/lib/librte_meter/Makefile
index 3b80c6ac..2dc071e8 100644
--- a/lib/librte_meter/Makefile
+++ b/lib/librte_meter/Makefile
@@ -16,7 +16,7 @@ LDLIBS += -lrte_eal
EXPORT_MAP := rte_meter_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
diff --git a/lib/librte_meter/meson.build b/lib/librte_meter/meson.build
index 646fd4d4..947bc19e 100644
--- a/lib/librte_meter/meson.build
+++ b/lib/librte_meter/meson.build
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_meter.c')
headers = files('rte_meter.h')
diff --git a/lib/librte_meter/rte_meter.c b/lib/librte_meter/rte_meter.c
index 332c5190..473f69ab 100644
--- a/lib/librte_meter/rte_meter.c
+++ b/lib/librte_meter/rte_meter.c
@@ -31,61 +31,82 @@ rte_meter_get_tb_params(uint64_t hz, uint64_t rate, uint64_t *tb_period, uint64_
}
int
-rte_meter_srtcm_config(struct rte_meter_srtcm *m, struct rte_meter_srtcm_params *params)
+rte_meter_srtcm_profile_config(struct rte_meter_srtcm_profile *p,
+ struct rte_meter_srtcm_params *params)
{
- uint64_t hz;
+ uint64_t hz = rte_get_tsc_hz();
/* Check input parameters */
- if ((m == NULL) || (params == NULL)) {
- return -1;
- }
+ if ((p == NULL) ||
+ (params == NULL) ||
+ (params->cir == 0) ||
+ ((params->cbs == 0) && (params->ebs == 0)))
+ return -EINVAL;
- if ((params->cir == 0) || ((params->cbs == 0) && (params->ebs == 0))) {
- return -2;
- }
+ /* Initialize srTCM run-time structure */
+ p->cbs = params->cbs;
+ p->ebs = params->ebs;
+ rte_meter_get_tb_params(hz, params->cir, &p->cir_period,
+ &p->cir_bytes_per_period);
+
+ return 0;
+}
+
+int
+rte_meter_srtcm_config(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_profile *p)
+{
+ /* Check input parameters */
+ if ((m == NULL) || (p == NULL))
+ return -EINVAL;
/* Initialize srTCM run-time structure */
- hz = rte_get_tsc_hz();
m->time = rte_get_tsc_cycles();
- m->tc = m->cbs = params->cbs;
- m->te = m->ebs = params->ebs;
- rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
-
- RTE_LOG(INFO, METER, "Low level srTCM config: \n"
- "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n",
- m->cir_period, m->cir_bytes_per_period);
+ m->tc = p->cbs;
+ m->te = p->ebs;
return 0;
}
int
-rte_meter_trtcm_config(struct rte_meter_trtcm *m, struct rte_meter_trtcm_params *params)
+rte_meter_trtcm_profile_config(struct rte_meter_trtcm_profile *p,
+ struct rte_meter_trtcm_params *params)
{
- uint64_t hz;
+ uint64_t hz = rte_get_tsc_hz();
/* Check input parameters */
- if ((m == NULL) || (params == NULL)) {
- return -1;
- }
+ if ((p == NULL) ||
+ (params == NULL) ||
+ (params->cir == 0) ||
+ (params->pir == 0) ||
+ (params->pir < params->cir) ||
+ (params->cbs == 0) ||
+ (params->pbs == 0))
+ return -EINVAL;
- if ((params->cir == 0) || (params->pir == 0) || (params->pir < params->cir) ||
- (params->cbs == 0) || (params->pbs == 0)) {
- return -2;
- }
+ /* Initialize trTCM run-time structure */
+ p->cbs = params->cbs;
+ p->pbs = params->pbs;
+ rte_meter_get_tb_params(hz, params->cir, &p->cir_period,
+ &p->cir_bytes_per_period);
+ rte_meter_get_tb_params(hz, params->pir, &p->pir_period,
+ &p->pir_bytes_per_period);
+
+ return 0;
+}
+
+int
+rte_meter_trtcm_config(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_profile *p)
+{
+ /* Check input parameters */
+ if ((m == NULL) || (p == NULL))
+ return -EINVAL;
/* Initialize trTCM run-time structure */
- hz = rte_get_tsc_hz();
m->time_tc = m->time_tp = rte_get_tsc_cycles();
- m->tc = m->cbs = params->cbs;
- m->tp = m->pbs = params->pbs;
- rte_meter_get_tb_params(hz, params->cir, &m->cir_period, &m->cir_bytes_per_period);
- rte_meter_get_tb_params(hz, params->pir, &m->pir_period, &m->pir_bytes_per_period);
-
- RTE_LOG(INFO, METER, "Low level trTCM config: \n"
- "\tCIR period = %" PRIu64 ", CIR bytes per period = %" PRIu64 "\n"
- "\tPIR period = %" PRIu64 ", PIR bytes per period = %" PRIu64 "\n",
- m->cir_period, m->cir_bytes_per_period,
- m->pir_period, m->pir_bytes_per_period);
+ m->tc = p->cbs;
+ m->tp = p->pbs;
return 0;
}
diff --git a/lib/librte_meter/rte_meter.h b/lib/librte_meter/rte_meter.h
index ebdc453f..58a05158 100644
--- a/lib/librte_meter/rte_meter.h
+++ b/lib/librte_meter/rte_meter.h
@@ -53,6 +53,18 @@ struct rte_meter_trtcm_params {
uint64_t pbs; /**< Peak Burst Size (PBS). Measured in bytes. */
};
+/**
+ * Internal data structure storing the srTCM configuration profile. Typically
+ * shared by multiple srTCM objects.
+ */
+struct rte_meter_srtcm_profile;
+
+/**
+ * Internal data structure storing the trTCM configuration profile. Typically
+ * shared by multiple trTCM objects.
+ */
+struct rte_meter_trtcm_profile;
+
/** Internal data structure storing the srTCM run-time context per metered traffic flow. */
struct rte_meter_srtcm;
@@ -60,38 +72,68 @@ struct rte_meter_srtcm;
struct rte_meter_trtcm;
/**
+ * srTCM profile configuration
+ *
+ * @param p
+ * Pointer to pre-allocated srTCM profile data structure
+ * @param params
+ * srTCM profile parameters
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_meter_srtcm_profile_config(struct rte_meter_srtcm_profile *p,
+ struct rte_meter_srtcm_params *params);
+
+/**
+ * trTCM profile configuration
+ *
+ * @param p
+ * Pointer to pre-allocated trTCM profile data structure
+ * @param params
+ * trTCM profile parameters
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int
+rte_meter_trtcm_profile_config(struct rte_meter_trtcm_profile *p,
+ struct rte_meter_trtcm_params *params);
+
+/**
* srTCM configuration per metered traffic flow
*
* @param m
* Pointer to pre-allocated srTCM data structure
- * @param params
- * User parameters per srTCM metered traffic flow
+ * @param p
+ * srTCM profile. Needs to be valid.
* @return
* 0 upon success, error code otherwise
*/
int
rte_meter_srtcm_config(struct rte_meter_srtcm *m,
- struct rte_meter_srtcm_params *params);
+ struct rte_meter_srtcm_profile *p);
/**
* trTCM configuration per metered traffic flow
*
* @param m
* Pointer to pre-allocated trTCM data structure
- * @param params
- * User parameters per trTCM metered traffic flow
+ * @param p
+ * trTCM profile. Needs to be valid.
* @return
* 0 upon success, error code otherwise
*/
int
rte_meter_trtcm_config(struct rte_meter_trtcm *m,
- struct rte_meter_trtcm_params *params);
+ struct rte_meter_trtcm_profile *p);
/**
* srTCM color blind traffic metering
*
* @param m
* Handle to srTCM instance
+ * @param p
+ * srTCM profile specified at srTCM object creation time
* @param time
* Current CPU time stamp (measured in CPU cycles)
* @param pkt_len
@@ -101,6 +143,7 @@ rte_meter_trtcm_config(struct rte_meter_trtcm *m,
*/
static inline enum rte_meter_color
rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_profile *p,
uint64_t time,
uint32_t pkt_len);
@@ -109,6 +152,8 @@ rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
*
* @param m
* Handle to srTCM instance
+ * @param p
+ * srTCM profile specified at srTCM object creation time
* @param time
* Current CPU time stamp (measured in CPU cycles)
* @param pkt_len
@@ -120,6 +165,7 @@ rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
*/
static inline enum rte_meter_color
rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_profile *p,
uint64_t time,
uint32_t pkt_len,
enum rte_meter_color pkt_color);
@@ -129,6 +175,8 @@ rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
*
* @param m
* Handle to trTCM instance
+ * @param p
+ * trTCM profile specified at trTCM object creation time
* @param time
* Current CPU time stamp (measured in CPU cycles)
* @param pkt_len
@@ -138,6 +186,7 @@ rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
*/
static inline enum rte_meter_color
rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_profile *p,
uint64_t time,
uint32_t pkt_len);
@@ -146,6 +195,8 @@ rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
*
* @param m
* Handle to trTCM instance
+ * @param p
+ * trTCM profile specified at trTCM object creation time
* @param time
* Current CPU time stamp (measured in CPU cycles)
* @param pkt_len
@@ -157,6 +208,7 @@ rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
*/
static inline enum rte_meter_color
rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_profile *p,
uint64_t time,
uint32_t pkt_len,
enum rte_meter_color pkt_color);
@@ -166,33 +218,57 @@ rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
*
***/
+struct rte_meter_srtcm_profile {
+ uint64_t cbs;
+ /**< Upper limit for C token bucket */
+ uint64_t ebs;
+ /**< Upper limit for E token bucket */
+ uint64_t cir_period;
+ /**< Number of CPU cycles for each update of C and E token buckets */
+ uint64_t cir_bytes_per_period;
+ /**< Number of bytes to add to C and E token buckets on each update */
+};
+
/* Internal data structure storing the srTCM run-time context per metered traffic flow. */
struct rte_meter_srtcm {
uint64_t time; /* Time of latest update of C and E token buckets */
uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
uint64_t te; /* Number of bytes currently available in the excess (E) token bucket */
- uint64_t cbs; /* Upper limit for C token bucket */
- uint64_t ebs; /* Upper limit for E token bucket */
- uint64_t cir_period; /* Number of CPU cycles for one update of C and E token buckets */
- uint64_t cir_bytes_per_period; /* Number of bytes to add to C and E token buckets on each update */
};
-/* Internal data structure storing the trTCM run-time context per metered traffic flow. */
+struct rte_meter_trtcm_profile {
+ uint64_t cbs;
+ /**< Upper limit for C token bucket */
+ uint64_t pbs;
+ /**< Upper limit for P token bucket */
+ uint64_t cir_period;
+ /**< Number of CPU cycles for one update of C token bucket */
+ uint64_t cir_bytes_per_period;
+ /**< Number of bytes to add to C token bucket on each update */
+ uint64_t pir_period;
+ /**< Number of CPU cycles for one update of P token bucket */
+ uint64_t pir_bytes_per_period;
+ /**< Number of bytes to add to P token bucket on each update */
+};
+
+/**
+ * Internal data structure storing the trTCM run-time context per metered
+ * traffic flow.
+ */
struct rte_meter_trtcm {
- uint64_t time_tc; /* Time of latest update of C token bucket */
- uint64_t time_tp; /* Time of latest update of E token bucket */
- uint64_t tc; /* Number of bytes currently available in the committed (C) token bucket */
- uint64_t tp; /* Number of bytes currently available in the peak (P) token bucket */
- uint64_t cbs; /* Upper limit for C token bucket */
- uint64_t pbs; /* Upper limit for P token bucket */
- uint64_t cir_period; /* Number of CPU cycles for one update of C token bucket */
- uint64_t cir_bytes_per_period; /* Number of bytes to add to C token bucket on each update */
- uint64_t pir_period; /* Number of CPU cycles for one update of P token bucket */
- uint64_t pir_bytes_per_period; /* Number of bytes to add to P token bucket on each update */
+ uint64_t time_tc;
+ /**< Time of latest update of C token bucket */
+ uint64_t time_tp;
+ /**< Time of latest update of E token bucket */
+ uint64_t tc;
+ /**< Number of bytes currently available in committed(C) token bucket */
+ uint64_t tp;
+ /**< Number of bytes currently available in the peak(P) token bucket */
};
static inline enum rte_meter_color
rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_profile *p,
uint64_t time,
uint32_t pkt_len)
{
@@ -200,17 +276,17 @@ rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
/* Bucket update */
time_diff = time - m->time;
- n_periods = time_diff / m->cir_period;
- m->time += n_periods * m->cir_period;
+ n_periods = time_diff / p->cir_period;
+ m->time += n_periods * p->cir_period;
/* Put the tokens overflowing from tc into te bucket */
- tc = m->tc + n_periods * m->cir_bytes_per_period;
+ tc = m->tc + n_periods * p->cir_bytes_per_period;
te = m->te;
- if (tc > m->cbs) {
- te += (tc - m->cbs);
- if (te > m->ebs)
- te = m->ebs;
- tc = m->cbs;
+ if (tc > p->cbs) {
+ te += (tc - p->cbs);
+ if (te > p->ebs)
+ te = p->ebs;
+ tc = p->cbs;
}
/* Color logic */
@@ -233,6 +309,7 @@ rte_meter_srtcm_color_blind_check(struct rte_meter_srtcm *m,
static inline enum rte_meter_color
rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
+ struct rte_meter_srtcm_profile *p,
uint64_t time,
uint32_t pkt_len,
enum rte_meter_color pkt_color)
@@ -241,17 +318,17 @@ rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
/* Bucket update */
time_diff = time - m->time;
- n_periods = time_diff / m->cir_period;
- m->time += n_periods * m->cir_period;
+ n_periods = time_diff / p->cir_period;
+ m->time += n_periods * p->cir_period;
/* Put the tokens overflowing from tc into te bucket */
- tc = m->tc + n_periods * m->cir_bytes_per_period;
+ tc = m->tc + n_periods * p->cir_bytes_per_period;
te = m->te;
- if (tc > m->cbs) {
- te += (tc - m->cbs);
- if (te > m->ebs)
- te = m->ebs;
- tc = m->cbs;
+ if (tc > p->cbs) {
+ te += (tc - p->cbs);
+ if (te > p->ebs)
+ te = p->ebs;
+ tc = p->cbs;
}
/* Color logic */
@@ -274,6 +351,7 @@ rte_meter_srtcm_color_aware_check(struct rte_meter_srtcm *m,
static inline enum rte_meter_color
rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_profile *p,
uint64_t time,
uint32_t pkt_len)
{
@@ -282,18 +360,18 @@ rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
/* Bucket update */
time_diff_tc = time - m->time_tc;
time_diff_tp = time - m->time_tp;
- n_periods_tc = time_diff_tc / m->cir_period;
- n_periods_tp = time_diff_tp / m->pir_period;
- m->time_tc += n_periods_tc * m->cir_period;
- m->time_tp += n_periods_tp * m->pir_period;
+ n_periods_tc = time_diff_tc / p->cir_period;
+ n_periods_tp = time_diff_tp / p->pir_period;
+ m->time_tc += n_periods_tc * p->cir_period;
+ m->time_tp += n_periods_tp * p->pir_period;
- tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
+ tc = m->tc + n_periods_tc * p->cir_bytes_per_period;
+ if (tc > p->cbs)
+ tc = p->cbs;
- tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
- if (tp > m->pbs)
- tp = m->pbs;
+ tp = m->tp + n_periods_tp * p->pir_bytes_per_period;
+ if (tp > p->pbs)
+ tp = p->pbs;
/* Color logic */
if (tp < pkt_len) {
@@ -315,6 +393,7 @@ rte_meter_trtcm_color_blind_check(struct rte_meter_trtcm *m,
static inline enum rte_meter_color
rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
+ struct rte_meter_trtcm_profile *p,
uint64_t time,
uint32_t pkt_len,
enum rte_meter_color pkt_color)
@@ -324,18 +403,18 @@ rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m,
/* Bucket update */
time_diff_tc = time - m->time_tc;
time_diff_tp = time - m->time_tp;
- n_periods_tc = time_diff_tc / m->cir_period;
- n_periods_tp = time_diff_tp / m->pir_period;
- m->time_tc += n_periods_tc * m->cir_period;
- m->time_tp += n_periods_tp * m->pir_period;
-
- tc = m->tc + n_periods_tc * m->cir_bytes_per_period;
- if (tc > m->cbs)
- tc = m->cbs;
-
- tp = m->tp + n_periods_tp * m->pir_bytes_per_period;
- if (tp > m->pbs)
- tp = m->pbs;
+ n_periods_tc = time_diff_tc / p->cir_period;
+ n_periods_tp = time_diff_tp / p->pir_period;
+ m->time_tc += n_periods_tc * p->cir_period;
+ m->time_tp += n_periods_tp * p->pir_period;
+
+ tc = m->tc + n_periods_tc * p->cir_bytes_per_period;
+ if (tc > p->cbs)
+ tc = p->cbs;
+
+ tp = m->tp + n_periods_tp * p->pir_bytes_per_period;
+ if (tp > p->pbs)
+ tp = p->pbs;
/* Color logic */
if ((pkt_color == e_RTE_METER_RED) || (tp < pkt_len)) {
diff --git a/lib/librte_meter/rte_meter_version.map b/lib/librte_meter/rte_meter_version.map
index 2fd647c6..cb79f0c2 100644
--- a/lib/librte_meter/rte_meter_version.map
+++ b/lib/librte_meter/rte_meter_version.map
@@ -10,3 +10,10 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_18.08 {
+ global:
+
+ rte_meter_srtcm_profile_config;
+ rte_meter_trtcm_profile_config;
+};
diff --git a/lib/librte_metrics/rte_metrics.c b/lib/librte_metrics/rte_metrics.c
index 556ae1ba..99a96b65 100644
--- a/lib/librte_metrics/rte_metrics.c
+++ b/lib/librte_metrics/rte_metrics.c
@@ -6,6 +6,7 @@
#include <sys/queue.h>
#include <rte_common.h>
+#include <rte_string_fns.h>
#include <rte_malloc.h>
#include <rte_metrics.h>
#include <rte_lcore.h>
@@ -95,6 +96,9 @@ rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
/* Some sanity checks */
if (cnt_names < 1 || names == NULL)
return -EINVAL;
+ for (idx_name = 0; idx_name < cnt_names; idx_name++)
+ if (names[idx_name] == NULL)
+ return -EINVAL;
memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
if (memzone == NULL)
@@ -113,10 +117,7 @@ rte_metrics_reg_names(const char * const *names, uint16_t cnt_names)
for (idx_name = 0; idx_name < cnt_names; idx_name++) {
entry = &stats->metadata[idx_name + stats->cnt_stats];
- strncpy(entry->name, names[idx_name],
- RTE_METRICS_MAX_NAME_LEN);
- /* Enforce NULL-termination */
- entry->name[RTE_METRICS_MAX_NAME_LEN - 1] = '\0';
+ strlcpy(entry->name, names[idx_name], RTE_METRICS_MAX_NAME_LEN);
memset(entry->value, 0, sizeof(entry->value));
entry->idx_next_stat = idx_name + stats->cnt_stats + 1;
}
@@ -161,6 +162,11 @@ rte_metrics_update_values(int port_id,
stats = memzone->addr;
rte_spinlock_lock(&stats->lock);
+
+ if (key >= stats->cnt_stats) {
+ rte_spinlock_unlock(&stats->lock);
+ return -EINVAL;
+ }
idx_metric = key;
cnt_setsize = 1;
while (idx_metric < stats->cnt_stats) {
@@ -202,9 +208,8 @@ rte_metrics_get_names(struct rte_metric_name *names,
int return_value;
memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
- /* If not allocated, fail silently */
if (memzone == NULL)
- return 0;
+ return -EIO;
stats = memzone->addr;
rte_spinlock_lock(&stats->lock);
@@ -215,7 +220,7 @@ rte_metrics_get_names(struct rte_metric_name *names,
return return_value;
}
for (idx_name = 0; idx_name < stats->cnt_stats; idx_name++)
- strncpy(names[idx_name].name,
+ strlcpy(names[idx_name].name,
stats->metadata[idx_name].name,
RTE_METRICS_MAX_NAME_LEN);
}
@@ -240,9 +245,9 @@ rte_metrics_get_values(int port_id,
return -EINVAL;
memzone = rte_memzone_lookup(RTE_METRICS_MEMZONE_NAME);
- /* If not allocated, fail silently */
if (memzone == NULL)
- return 0;
+ return -EIO;
+
stats = memzone->addr;
rte_spinlock_lock(&stats->lock);
diff --git a/lib/librte_net/Makefile b/lib/librte_net/Makefile
index 95ff5490..85e403f4 100644
--- a/lib/librte_net/Makefile
+++ b/lib/librte_net/Makefile
@@ -5,6 +5,7 @@ include $(RTE_SDK)/mk/rte.vars.mk
LIB = librte_net.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
LDLIBS += -lrte_mbuf -lrte_eal -lrte_mempool
diff --git a/lib/librte_net/meson.build b/lib/librte_net/meson.build
index 78c0f03e..d3ea1feb 100644
--- a/lib/librte_net/meson.build
+++ b/lib/librte_net/meson.build
@@ -2,6 +2,7 @@
# Copyright(c) 2017 Intel Corporation
version = 1
+allow_experimental_apis = true
headers = files('rte_ip.h',
'rte_tcp.h',
'rte_udp.h',
diff --git a/lib/librte_net/rte_esp.h b/lib/librte_net/rte_esp.h
index 0fc99ac6..f77ec2eb 100644
--- a/lib/librte_net/rte_esp.h
+++ b/lib/librte_net/rte_esp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2017, Mellanox Technologies.
+ * Copyright 2016 Mellanox Technologies, Ltd
*/
#ifndef _RTE_ESP_H_
diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h
index 45daa911..bee2b34f 100644
--- a/lib/librte_net/rte_ether.h
+++ b/lib/librte_net/rte_ether.h
@@ -210,7 +210,7 @@ static inline void eth_random_addr(uint8_t *addr)
uint8_t *p = (uint8_t *)&rand;
rte_memcpy(addr, p, ETHER_ADDR_LEN);
- addr[0] &= ~ETHER_GROUP_ADDR; /* clear multicast bit */
+ addr[0] &= (uint8_t)~ETHER_GROUP_ADDR; /* clear multicast bit */
addr[0] |= ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */
}
@@ -301,6 +301,7 @@ struct vxlan_hdr {
#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */
#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
#define ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */
+#define ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */
#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
#define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */
#define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */
@@ -310,6 +311,31 @@ struct vxlan_hdr {
/**< VXLAN tunnel header length. */
/**
+ * VXLAN-GPE protocol header (draft-ietf-nvo3-vxlan-gpe-05).
+ * Contains the 8-bit flag, 8-bit next-protocol, 24-bit VXLAN Network
+ * Identifier and Reserved fields (16 bits and 8 bits).
+ */
+struct vxlan_gpe_hdr {
+ uint8_t vx_flags; /**< flag (8). */
+ uint8_t reserved[2]; /**< Reserved (16). */
+ uint8_t proto; /**< next-protocol (8). */
+ uint32_t vx_vni; /**< VNI (24) + Reserved (8). */
+} __attribute__((__packed__));
+
+/* VXLAN-GPE next protocol types */
+#define VXLAN_GPE_TYPE_IPV4 1 /**< IPv4 Protocol. */
+#define VXLAN_GPE_TYPE_IPV6 2 /**< IPv6 Protocol. */
+#define VXLAN_GPE_TYPE_ETH 3 /**< Ethernet Protocol. */
+#define VXLAN_GPE_TYPE_NSH 4 /**< NSH Protocol. */
+#define VXLAN_GPE_TYPE_MPLS 5 /**< MPLS Protocol. */
+#define VXLAN_GPE_TYPE_GBP 6 /**< GBP Protocol. */
+#define VXLAN_GPE_TYPE_VBNG 7 /**< vBNG Protocol. */
+
+#define ETHER_VXLAN_GPE_HLEN (sizeof(struct udp_hdr) + \
+ sizeof(struct vxlan_gpe_hdr))
+/**< VXLAN-GPE tunnel header length. */
+
+/**
* Extract VLAN tag information into mbuf
*
* Software version of VLAN stripping
@@ -324,11 +350,12 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)
{
struct ether_hdr *eh
= rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct vlan_hdr *vh;
if (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN))
return -1;
- struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
+ vh = (struct vlan_hdr *)(eh + 1);
m->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h
index f32684c6..f2a8904a 100644
--- a/lib/librte_net/rte_ip.h
+++ b/lib/librte_net/rte_ip.h
@@ -108,25 +108,25 @@ __rte_raw_cksum(const void *buf, size_t len, uint32_t sum)
/* workaround gcc strict-aliasing warning */
uintptr_t ptr = (uintptr_t)buf;
typedef uint16_t __attribute__((__may_alias__)) u16_p;
- const u16_p *u16 = (const u16_p *)ptr;
-
- while (len >= (sizeof(*u16) * 4)) {
- sum += u16[0];
- sum += u16[1];
- sum += u16[2];
- sum += u16[3];
- len -= sizeof(*u16) * 4;
- u16 += 4;
+ const u16_p *u16_buf = (const u16_p *)ptr;
+
+ while (len >= (sizeof(*u16_buf) * 4)) {
+ sum += u16_buf[0];
+ sum += u16_buf[1];
+ sum += u16_buf[2];
+ sum += u16_buf[3];
+ len -= sizeof(*u16_buf) * 4;
+ u16_buf += 4;
}
- while (len >= sizeof(*u16)) {
- sum += *u16;
- len -= sizeof(*u16);
- u16 += 1;
+ while (len >= sizeof(*u16_buf)) {
+ sum += *u16_buf;
+ len -= sizeof(*u16_buf);
+ u16_buf += 1;
}
/* if length is in odd bytes */
if (len == 1)
- sum += *((const uint8_t *)u16);
+ sum += *((const uint8_t *)u16_buf);
return sum;
}
@@ -222,7 +222,7 @@ rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len,
for (;;) {
tmp = __rte_raw_cksum(buf, seglen, 0);
if (done & 1)
- tmp = rte_bswap16(tmp);
+ tmp = rte_bswap16((uint16_t)tmp);
sum += tmp;
done += seglen;
if (done == len)
@@ -253,7 +253,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
{
uint16_t cksum;
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
- return (cksum == 0xffff) ? cksum : ~cksum;
+ return (cksum == 0xffff) ? cksum : (uint16_t)~cksum;
}
/**
@@ -318,8 +318,8 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
uint32_t cksum;
uint32_t l4_len;
- l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) -
- sizeof(struct ipv4_hdr);
+ l4_len = (uint32_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) -
+ sizeof(struct ipv4_hdr));
cksum = rte_raw_cksum(l4_hdr, l4_len);
cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0);
@@ -329,7 +329,7 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
if (cksum == 0)
cksum = 0xffff;
- return cksum;
+ return (uint16_t)cksum;
}
/**
@@ -375,7 +375,7 @@ rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
} psd_hdr;
- psd_hdr.proto = (ipv6_hdr->proto << 24);
+ psd_hdr.proto = (uint32_t)(ipv6_hdr->proto << 24);
if (ol_flags & PKT_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
@@ -418,7 +418,7 @@ rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr)
if (cksum == 0)
cksum = 0xffff;
- return cksum;
+ return (uint16_t)cksum;
}
#ifdef __cplusplus
diff --git a/lib/librte_net/rte_net.c b/lib/librte_net/rte_net.c
index 56a13e3c..9eb7c743 100644
--- a/lib/librte_net/rte_net.c
+++ b/lib/librte_net/rte_net.c
@@ -178,8 +178,8 @@ ip4_hlen(const struct ipv4_hdr *hdr)
}
/* parse ipv6 extended headers, update offset and return next proto */
-static uint16_t
-skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
+int __rte_experimental
+rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
int *frag)
{
struct ext_hdr {
@@ -201,7 +201,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
&xh_copy);
if (xh == NULL)
- return 0;
+ return -1;
*off += (xh->len + 1) * 8;
proto = xh->next_hdr;
break;
@@ -209,7 +209,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
xh = rte_pktmbuf_read(m, *off, sizeof(*xh),
&xh_copy);
if (xh == NULL)
- return 0;
+ return -1;
*off += 8;
proto = xh->next_hdr;
*frag = 1;
@@ -220,7 +220,7 @@ skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
return proto;
}
}
- return 0;
+ return -1;
}
/* parse mbuf data to get packet type */
@@ -233,6 +233,7 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
uint32_t pkt_type = RTE_PTYPE_L2_ETHER;
uint32_t off = 0;
uint16_t proto;
+ int ret;
if (hdr_lens == NULL)
hdr_lens = &local_hdr_lens;
@@ -316,7 +317,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
off += hdr_lens->l3_len;
pkt_type |= ptype_l3_ip6(proto);
if ((pkt_type & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV6_EXT) {
- proto = skip_ip6_ext(proto, m, &off, &frag);
+ ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
+ if (ret < 0)
+ return pkt_type;
+ proto = ret;
hdr_lens->l3_len = off - hdr_lens->l2_len;
}
if (proto == 0)
@@ -449,7 +453,10 @@ uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
uint32_t prev_off;
prev_off = off;
- proto = skip_ip6_ext(proto, m, &off, &frag);
+ ret = rte_net_skip_ip6_ext(proto, m, &off, &frag);
+ if (ret < 0)
+ return pkt_type;
+ proto = ret;
hdr_lens->inner_l3_len += off - prev_off;
}
if (proto == 0)
diff --git a/lib/librte_net/rte_net.h b/lib/librte_net/rte_net.h
index 0e97901f..b6ab6e1d 100644
--- a/lib/librte_net/rte_net.h
+++ b/lib/librte_net/rte_net.h
@@ -29,6 +29,33 @@ struct rte_net_hdr_lens {
};
/**
+ * Skip IPv6 header extensions.
+ *
+ * This function skips all IPv6 extensions, returning size of
+ * complete header including options and final protocol value.
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * @param proto
+ * Protocol field of IPv6 header.
+ * @param m
+ * The packet mbuf to be parsed.
+ * @param off
+ * On input, must contain the offset to the first byte following
+ * IPv6 header, on output, contains offset to the first byte
+ * of next layer (after any IPv6 extension header)
+ * @param frag
+ * Contains 1 in output if packet is an IPv6 fragment.
+ * @return
+ * Protocol that follows IPv6 header.
+ * -1 if an error occurs during mbuf parsing.
+ */
+int __rte_experimental
+rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
+ int *frag);
+
+/**
* Parse an Ethernet packet to get its packet type.
*
* This function parses the network headers in mbuf data and return its
diff --git a/lib/librte_net/rte_net_version.map b/lib/librte_net/rte_net_version.map
index 213e6fd3..26c06e7c 100644
--- a/lib/librte_net/rte_net_version.map
+++ b/lib/librte_net/rte_net_version.map
@@ -17,4 +17,5 @@ EXPERIMENTAL {
global:
rte_net_make_rarp_packet;
-} DPDK_17.05;
+ rte_net_skip_ip6_ext;
+};
diff --git a/lib/librte_pci/Makefile b/lib/librte_pci/Makefile
index fe213ea6..94a63267 100644
--- a/lib/librte_pci/Makefile
+++ b/lib/librte_pci/Makefile
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 6WIND S.A.
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/lib/librte_pci/rte_pci.c b/lib/librte_pci/rte_pci.c
index 1a6d7485..530738db 100644
--- a/lib/librte_pci/rte_pci.c
+++ b/lib/librte_pci/rte_pci.c
@@ -155,9 +155,10 @@ pci_map_resource(void *requested_addr, int fd, off_t offset, size_t size,
mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
MAP_SHARED | additional_flags, fd, offset);
if (mapaddr == MAP_FAILED) {
- RTE_LOG(ERR, EAL, "%s(): cannot mmap(%d, %p, 0x%lx, 0x%lx): %s (%p)\n",
- __func__, fd, requested_addr,
- (unsigned long)size, (unsigned long)offset,
+ RTE_LOG(ERR, EAL,
+ "%s(): cannot mmap(%d, %p, 0x%zx, 0x%llx): %s (%p)\n",
+ __func__, fd, requested_addr, size,
+ (unsigned long long)offset,
strerror(errno), mapaddr);
} else
RTE_LOG(DEBUG, EAL, " PCI memory mapped at %p\n", mapaddr);
@@ -174,8 +175,8 @@ pci_unmap_resource(void *requested_addr, size_t size)
/* Unmap the PCI memory resource of device */
if (munmap(requested_addr, size)) {
- RTE_LOG(ERR, EAL, "%s(): cannot munmap(%p, 0x%lx): %s\n",
- __func__, requested_addr, (unsigned long)size,
+ RTE_LOG(ERR, EAL, "%s(): cannot munmap(%p, %#zx): %s\n",
+ __func__, requested_addr, size,
strerror(errno));
} else
RTE_LOG(DEBUG, EAL, " PCI memory unmapped at %p\n",
diff --git a/lib/librte_pci/rte_pci_version.map b/lib/librte_pci/rte_pci_version.map
index 15d93d95..c0280277 100644
--- a/lib/librte_pci/rte_pci_version.map
+++ b/lib/librte_pci/rte_pci_version.map
@@ -3,12 +3,11 @@ DPDK_17.11 {
eal_parse_pci_BDF;
eal_parse_pci_DomBDF;
- rte_pci_addr_cmp;
- rte_pci_addr_parse;
- rte_pci_device_name;
pci_map_resource;
pci_unmap_resource;
rte_eal_compare_pci_addr;
+ rte_pci_addr_cmp;
+ rte_pci_addr_parse;
rte_pci_device_name;
local: *;
diff --git a/lib/librte_pdump/Makefile b/lib/librte_pdump/Makefile
index 98fa752e..0ee0fa1a 100644
--- a/lib/librte_pdump/Makefile
+++ b/lib/librte_pdump/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2016 Intel Corporation
+# Copyright(c) 2016-2018 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
# library name
LIB = librte_pdump.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
CFLAGS += -D_GNU_SOURCE
LDLIBS += -lpthread
diff --git a/lib/librte_pdump/meson.build b/lib/librte_pdump/meson.build
index 3a95eabd..be80904b 100644
--- a/lib/librte_pdump/meson.build
+++ b/lib/librte_pdump/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
+version = 2
sources = files('rte_pdump.c')
headers = files('rte_pdump.h')
+allow_experimental_apis = true
deps += ['ethdev']
diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c
index ec8a5d84..6c3a8858 100644
--- a/lib/librte_pdump/rte_pdump.c
+++ b/lib/librte_pdump/rte_pdump.c
@@ -1,35 +1,24 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2016 Intel Corporation
+ * Copyright(c) 2016-2018 Intel Corporation
*/
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pthread.h>
-#include <stdbool.h>
-#include <stdio.h>
-
#include <rte_memcpy.h>
#include <rte_mbuf.h>
#include <rte_ethdev.h>
#include <rte_lcore.h>
#include <rte_log.h>
#include <rte_errno.h>
+#include <rte_string_fns.h>
#include "rte_pdump.h"
-#define SOCKET_PATH_VAR_RUN "/var/run"
-#define SOCKET_PATH_HOME "HOME"
-#define DPDK_DIR "/.dpdk"
-#define SOCKET_DIR "/pdump_sockets"
-#define SERVER_SOCKET "%s/pdump_server_socket"
-#define CLIENT_SOCKET "%s/pdump_client_socket_%d_%u"
#define DEVICE_ID_SIZE 64
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_PDUMP RTE_LOGTYPE_USER1
+/* Used for the multi-process communication */
+#define PDUMP_MP "mp_pdump"
+
enum pdump_operation {
DISABLE = 1,
ENABLE = 2
@@ -39,11 +28,6 @@ enum pdump_version {
V1 = 1
};
-static pthread_t pdump_thread;
-static int pdump_socket_fd;
-static char server_socket_dir[PATH_MAX];
-static char client_socket_dir[PATH_MAX];
-
struct pdump_request {
uint16_t ver;
uint16_t op;
@@ -75,7 +59,7 @@ struct pdump_response {
static struct pdump_rxtx_cbs {
struct rte_ring *ring;
struct rte_mempool *mp;
- struct rte_eth_rxtx_callback *cb;
+ const struct rte_eth_rxtx_callback *cb;
void *filter;
} rx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT],
tx_cbs[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
@@ -307,7 +291,7 @@ pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
}
static int
-set_pdump_rxtx_cbs(struct pdump_request *p)
+set_pdump_rxtx_cbs(const struct pdump_request *p)
{
uint16_t nb_rx_q = 0, nb_tx_q = 0, end_q, queue;
uint16_t port;
@@ -391,313 +375,51 @@ set_pdump_rxtx_cbs(struct pdump_request *p)
return ret;
}
-/* get socket path (/var/run if root, $HOME otherwise) */
static int
-pdump_get_socket_path(char *buffer, int bufsz, enum rte_pdump_socktype type)
+pdump_server(const struct rte_mp_msg *mp_msg, const void *peer)
{
- char dpdk_dir[PATH_MAX] = {0};
- char dir[PATH_MAX] = {0};
- char *dir_home = NULL;
- int ret = 0;
-
- if (type == RTE_PDUMP_SOCKET_SERVER && server_socket_dir[0] != 0)
- snprintf(dir, sizeof(dir), "%s", server_socket_dir);
- else if (type == RTE_PDUMP_SOCKET_CLIENT && client_socket_dir[0] != 0)
- snprintf(dir, sizeof(dir), "%s", client_socket_dir);
- else {
- if (getuid() != 0) {
- dir_home = getenv(SOCKET_PATH_HOME);
- if (!dir_home) {
- RTE_LOG(ERR, PDUMP,
- "Failed to get environment variable"
- " value for %s, %s:%d\n",
- SOCKET_PATH_HOME, __func__, __LINE__);
- return -1;
- }
- snprintf(dpdk_dir, sizeof(dpdk_dir), "%s%s",
- dir_home, DPDK_DIR);
- } else
- snprintf(dpdk_dir, sizeof(dpdk_dir), "%s%s",
- SOCKET_PATH_VAR_RUN, DPDK_DIR);
-
- mkdir(dpdk_dir, 0700);
- snprintf(dir, sizeof(dir), "%s%s",
- dpdk_dir, SOCKET_DIR);
- }
-
- ret = mkdir(dir, 0700);
- /* if user passed socket path is invalid, return immediately */
- if (ret < 0 && errno != EEXIST) {
- RTE_LOG(ERR, PDUMP,
- "Failed to create dir:%s:%s\n", dir,
- strerror(errno));
- rte_errno = errno;
- return -1;
- }
-
- if (type == RTE_PDUMP_SOCKET_SERVER)
- snprintf(buffer, bufsz, SERVER_SOCKET, dir);
- else
- snprintf(buffer, bufsz, CLIENT_SOCKET, dir, getpid(),
- rte_sys_gettid());
-
- return 0;
-}
-
-static int
-pdump_create_server_socket(void)
-{
- int ret, socket_fd;
- struct sockaddr_un addr;
- socklen_t addr_len;
-
- ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
- RTE_PDUMP_SOCKET_SERVER);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to get server socket path: %s:%d\n",
- __func__, __LINE__);
- return -1;
- }
- addr.sun_family = AF_UNIX;
-
- /* remove if file already exists */
- unlink(addr.sun_path);
-
- /* set up a server socket */
- socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
- if (socket_fd < 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to create server socket: %s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- return -1;
- }
-
- addr_len = sizeof(struct sockaddr_un);
- ret = bind(socket_fd, (struct sockaddr *) &addr, addr_len);
- if (ret) {
- RTE_LOG(ERR, PDUMP,
- "Failed to bind to server socket: %s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- close(socket_fd);
+ struct rte_mp_msg mp_resp;
+ const struct pdump_request *cli_req;
+ struct pdump_response *resp = (struct pdump_response *)&mp_resp.param;
+
+ /* recv client requests */
+ if (mp_msg->len_param != sizeof(*cli_req)) {
+ RTE_LOG(ERR, PDUMP, "failed to recv from client\n");
+ resp->err_value = -EINVAL;
+ } else {
+ cli_req = (const struct pdump_request *)mp_msg->param;
+ resp->ver = cli_req->ver;
+ resp->res_op = cli_req->op;
+ resp->err_value = set_pdump_rxtx_cbs(cli_req);
+ }
+
+ strlcpy(mp_resp.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
+ mp_resp.len_param = sizeof(*resp);
+ mp_resp.num_fds = 0;
+ if (rte_mp_reply(&mp_resp, peer) < 0) {
+ RTE_LOG(ERR, PDUMP, "failed to send to client:%s, %s:%d\n",
+ strerror(rte_errno), __func__, __LINE__);
return -1;
}
- /* save the socket in local configuration */
- pdump_socket_fd = socket_fd;
-
return 0;
}
-static __attribute__((noreturn)) void *
-pdump_thread_main(__rte_unused void *arg)
-{
- struct sockaddr_un cli_addr;
- socklen_t cli_len;
- struct pdump_request cli_req;
- struct pdump_response resp;
- int n;
- int ret = 0;
-
- /* host thread, never break out */
- for (;;) {
- /* recv client requests */
- cli_len = sizeof(cli_addr);
- n = recvfrom(pdump_socket_fd, &cli_req,
- sizeof(struct pdump_request), 0,
- (struct sockaddr *)&cli_addr, &cli_len);
- if (n < 0) {
- RTE_LOG(ERR, PDUMP,
- "failed to recv from client:%s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- continue;
- }
-
- ret = set_pdump_rxtx_cbs(&cli_req);
-
- resp.ver = cli_req.ver;
- resp.res_op = cli_req.op;
- resp.err_value = ret;
- n = sendto(pdump_socket_fd, &resp,
- sizeof(struct pdump_response),
- 0, (struct sockaddr *)&cli_addr, cli_len);
- if (n < 0) {
- RTE_LOG(ERR, PDUMP,
- "failed to send to client:%s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- }
- }
-}
-
int
-rte_pdump_init(const char *path)
+rte_pdump_init(const char *path __rte_unused)
{
- int ret = 0;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
-
- ret = rte_pdump_set_socket_dir(path, RTE_PDUMP_SOCKET_SERVER);
- if (ret != 0)
- return -1;
-
- ret = pdump_create_server_socket();
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to create server socket:%s:%d\n",
- __func__, __LINE__);
- return -1;
- }
-
- /* create the host thread to wait/handle pdump requests */
- ret = pthread_create(&pdump_thread, NULL, pdump_thread_main, NULL);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to create the pdump thread:%s, %s:%d\n",
- strerror(ret), __func__, __LINE__);
- return -1;
- }
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "pdump-thread");
- ret = rte_thread_setname(pdump_thread, thread_name);
- if (ret != 0) {
- RTE_LOG(DEBUG, PDUMP,
- "Failed to set thread name for pdump handling\n");
- }
-
- return 0;
+ return rte_mp_action_register(PDUMP_MP, pdump_server);
}
int
rte_pdump_uninit(void)
{
- int ret;
-
- ret = pthread_cancel(pdump_thread);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to cancel the pdump thread:%s, %s:%d\n",
- strerror(ret), __func__, __LINE__);
- return -1;
- }
-
- ret = close(pdump_socket_fd);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to close server socket: %s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- return -1;
- }
-
- struct sockaddr_un addr;
-
- ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
- RTE_PDUMP_SOCKET_SERVER);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to get server socket path: %s:%d\n",
- __func__, __LINE__);
- return -1;
- }
- ret = unlink(addr.sun_path);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to remove server socket addr: %s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- return -1;
- }
+ rte_mp_action_unregister(PDUMP_MP);
return 0;
}
static int
-pdump_create_client_socket(struct pdump_request *p)
-{
- int ret, socket_fd;
- int pid;
- int n;
- struct pdump_response server_resp;
- struct sockaddr_un addr, serv_addr, from;
- socklen_t addr_len, serv_len;
-
- pid = getpid();
-
- socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
- if (socket_fd < 0) {
- RTE_LOG(ERR, PDUMP,
- "client socket(): %s:pid(%d):tid(%u), %s:%d\n",
- strerror(errno), pid, rte_sys_gettid(),
- __func__, __LINE__);
- rte_errno = errno;
- return -1;
- }
-
- ret = pdump_get_socket_path(addr.sun_path, sizeof(addr.sun_path),
- RTE_PDUMP_SOCKET_CLIENT);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to get client socket path: %s:%d\n",
- __func__, __LINE__);
- rte_errno = errno;
- goto exit;
- }
- addr.sun_family = AF_UNIX;
- addr_len = sizeof(struct sockaddr_un);
-
- do {
- ret = bind(socket_fd, (struct sockaddr *) &addr, addr_len);
- if (ret) {
- RTE_LOG(ERR, PDUMP,
- "client bind(): %s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- rte_errno = errno;
- break;
- }
-
- serv_len = sizeof(struct sockaddr_un);
- memset(&serv_addr, 0, sizeof(serv_addr));
- ret = pdump_get_socket_path(serv_addr.sun_path,
- sizeof(serv_addr.sun_path),
- RTE_PDUMP_SOCKET_SERVER);
- if (ret != 0) {
- RTE_LOG(ERR, PDUMP,
- "Failed to get server socket path: %s:%d\n",
- __func__, __LINE__);
- rte_errno = errno;
- break;
- }
- serv_addr.sun_family = AF_UNIX;
-
- n = sendto(socket_fd, p, sizeof(struct pdump_request), 0,
- (struct sockaddr *)&serv_addr, serv_len);
- if (n < 0) {
- RTE_LOG(ERR, PDUMP,
- "failed to send to server:%s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- rte_errno = errno;
- ret = -1;
- break;
- }
-
- n = recvfrom(socket_fd, &server_resp,
- sizeof(struct pdump_response), 0,
- (struct sockaddr *)&from, &serv_len);
- if (n < 0) {
- RTE_LOG(ERR, PDUMP,
- "failed to recv from server:%s, %s:%d\n",
- strerror(errno), __func__, __LINE__);
- rte_errno = errno;
- ret = -1;
- break;
- }
- ret = server_resp.err_value;
- } while (0);
-
-exit:
- close(socket_fd);
- unlink(addr.sun_path);
- return ret;
-}
-
-static int
pdump_validate_ring_mp(struct rte_ring *ring, struct rte_mempool *mp)
{
if (ring == NULL || mp == NULL) {
@@ -768,36 +490,48 @@ pdump_prepare_client_request(char *device, uint16_t queue,
struct rte_mempool *mp,
void *filter)
{
- int ret;
- struct pdump_request req = {.ver = 1,};
-
- req.flags = flags;
- req.op = operation;
+ int ret = -1;
+ struct rte_mp_msg mp_req, *mp_rep;
+ struct rte_mp_reply mp_reply;
+ struct timespec ts = {.tv_sec = 5, .tv_nsec = 0};
+ struct pdump_request *req = (struct pdump_request *)mp_req.param;
+ struct pdump_response *resp;
+
+ req->ver = 1;
+ req->flags = flags;
+ req->op = operation;
if ((operation & ENABLE) != 0) {
- snprintf(req.data.en_v1.device, sizeof(req.data.en_v1.device),
- "%s", device);
- req.data.en_v1.queue = queue;
- req.data.en_v1.ring = ring;
- req.data.en_v1.mp = mp;
- req.data.en_v1.filter = filter;
+ snprintf(req->data.en_v1.device,
+ sizeof(req->data.en_v1.device), "%s", device);
+ req->data.en_v1.queue = queue;
+ req->data.en_v1.ring = ring;
+ req->data.en_v1.mp = mp;
+ req->data.en_v1.filter = filter;
} else {
- snprintf(req.data.dis_v1.device, sizeof(req.data.dis_v1.device),
- "%s", device);
- req.data.dis_v1.queue = queue;
- req.data.dis_v1.ring = NULL;
- req.data.dis_v1.mp = NULL;
- req.data.dis_v1.filter = NULL;
+ snprintf(req->data.dis_v1.device,
+ sizeof(req->data.dis_v1.device), "%s", device);
+ req->data.dis_v1.queue = queue;
+ req->data.dis_v1.ring = NULL;
+ req->data.dis_v1.mp = NULL;
+ req->data.dis_v1.filter = NULL;
+ }
+
+ strlcpy(mp_req.name, PDUMP_MP, RTE_MP_MAX_NAME_LEN);
+ mp_req.len_param = sizeof(*req);
+ mp_req.num_fds = 0;
+ if (rte_mp_request_sync(&mp_req, &mp_reply, &ts) == 0) {
+ mp_rep = &mp_reply.msgs[0];
+ resp = (struct pdump_response *)mp_rep->param;
+ rte_errno = resp->err_value;
+ if (!resp->err_value)
+ ret = 0;
+ free(mp_reply.msgs);
}
- ret = pdump_create_client_socket(&req);
- if (ret < 0) {
+ if (ret < 0)
RTE_LOG(ERR, PDUMP,
"client request for pdump enable/disable failed\n");
- rte_errno = ret;
- return -1;
- }
-
- return 0;
+ return ret;
}
int
@@ -884,30 +618,8 @@ rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
}
int
-rte_pdump_set_socket_dir(const char *path, enum rte_pdump_socktype type)
+rte_pdump_set_socket_dir(const char *path __rte_unused,
+ enum rte_pdump_socktype type __rte_unused)
{
- int ret, count;
-
- if (path != NULL) {
- if (type == RTE_PDUMP_SOCKET_SERVER) {
- count = sizeof(server_socket_dir);
- ret = snprintf(server_socket_dir, count, "%s", path);
- } else {
- count = sizeof(client_socket_dir);
- ret = snprintf(client_socket_dir, count, "%s", path);
- }
-
- if (ret < 0 || ret >= count) {
- RTE_LOG(ERR, PDUMP,
- "Invalid socket path:%s:%d\n",
- __func__, __LINE__);
- if (type == RTE_PDUMP_SOCKET_SERVER)
- server_socket_dir[0] = 0;
- else
- client_socket_dir[0] = 0;
- return -EINVAL;
- }
- }
-
return 0;
}
diff --git a/lib/librte_pdump/rte_pdump.h b/lib/librte_pdump/rte_pdump.h
index a7e83727..673a2b07 100644
--- a/lib/librte_pdump/rte_pdump.h
+++ b/lib/librte_pdump/rte_pdump.h
@@ -37,10 +37,10 @@ enum rte_pdump_socktype {
/**
* Initialize packet capturing handling
*
- * Creates pthread and server socket for handling clients
- * requests to enable/disable rxtx callbacks.
+ * Register the IPC action for communication with target (primary) process.
*
* @param path
+ * This parameter is going to be deprecated; it was used for specifying the
* directory path for server socket.
*
* @return
@@ -52,7 +52,7 @@ rte_pdump_init(const char *path);
/**
* Un initialize packet capturing handling
*
- * Cancels pthread, close server socket, removes server socket address.
+ * Unregister the IPC action for communication with target (primary) process.
*
* @return
* 0 on success, -1 on error
@@ -163,6 +163,7 @@ rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
uint32_t flags);
/**
+ * @deprecated
* Allows applications to set server and client socket paths.
* If specified path is null default path will be selected, i.e.
*"/var/run/" for root user and "$HOME" for non root user.
@@ -181,7 +182,7 @@ rte_pdump_disable_by_deviceid(char *device_id, uint16_t queue,
* 0 on success, -EINVAL on error
*
*/
-int
+__rte_deprecated int
rte_pdump_set_socket_dir(const char *path, enum rte_pdump_socktype type);
#ifdef __cplusplus
diff --git a/lib/librte_pipeline/Makefile b/lib/librte_pipeline/Makefile
index e94fbc02..84afe98c 100644
--- a/lib/librte_pipeline/Makefile
+++ b/lib/librte_pipeline/Makefile
@@ -8,10 +8,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
#
LIB = librte_pipeline.a
+CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_table
-LDLIBS += -lrte_port
+LDLIBS += -lrte_port -lrte_meter -lrte_sched
EXPORT_MAP := rte_pipeline_version.map
@@ -21,8 +22,10 @@ LIBABIVER := 3
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_port_in_action.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_table_action.c
# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h rte_port_in_action.h rte_table_action.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pipeline/meson.build b/lib/librte_pipeline/meson.build
index a35b6220..dc16ab42 100644
--- a/lib/librte_pipeline/meson.build
+++ b/lib/librte_pipeline/meson.build
@@ -2,6 +2,7 @@
# Copyright(c) 2017 Intel Corporation
version = 3
-sources = files('rte_pipeline.c')
-headers = files('rte_pipeline.h')
-deps += ['port', 'table']
+allow_experimental_apis = true
+sources = files('rte_pipeline.c', 'rte_port_in_action.c', 'rte_table_action.c')
+headers = files('rte_pipeline.h', 'rte_port_in_action.h', 'rte_table_action.h')
+deps += ['port', 'table', 'meter', 'sched']
diff --git a/lib/librte_pipeline/rte_pipeline_version.map b/lib/librte_pipeline/rte_pipeline_version.map
index e4ee154f..d820b22f 100644
--- a/lib/librte_pipeline/rte_pipeline_version.map
+++ b/lib/librte_pipeline/rte_pipeline_version.map
@@ -45,3 +45,31 @@ DPDK_16.04 {
rte_pipeline_ah_packet_drop;
} DPDK_2.2;
+
+EXPERIMENTAL {
+ global:
+
+ rte_port_in_action_apply;
+ rte_port_in_action_create;
+ rte_port_in_action_free;
+ rte_port_in_action_params_get;
+ rte_port_in_action_profile_action_register;
+ rte_port_in_action_profile_create;
+ rte_port_in_action_profile_free;
+ rte_port_in_action_profile_freeze;
+ rte_table_action_apply;
+ rte_table_action_create;
+ rte_table_action_dscp_table_update;
+ rte_table_action_free;
+ rte_table_action_meter_profile_add;
+ rte_table_action_meter_profile_delete;
+ rte_table_action_meter_read;
+ rte_table_action_profile_action_register;
+ rte_table_action_profile_create;
+ rte_table_action_profile_free;
+ rte_table_action_profile_freeze;
+ rte_table_action_table_params_get;
+ rte_table_action_stats_read;
+ rte_table_action_time_read;
+ rte_table_action_ttl_read;
+};
diff --git a/lib/librte_pipeline/rte_port_in_action.c b/lib/librte_pipeline/rte_port_in_action.c
new file mode 100644
index 00000000..e3b00df8
--- /dev/null
+++ b/lib/librte_pipeline/rte_port_in_action.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "rte_port_in_action.h"
+
+/**
+ * RTE_PORT_IN_ACTION_FLTR
+ */
+static int
+fltr_cfg_check(struct rte_port_in_action_fltr_config *cfg)
+{
+ if (cfg == NULL)
+ return -1;
+
+ return 0;
+}
+
+struct fltr_data {
+ uint32_t port_id;
+};
+
+static void
+fltr_init(struct fltr_data *data,
+ struct rte_port_in_action_fltr_config *cfg)
+{
+ data->port_id = cfg->port_id;
+}
+
+static int
+fltr_apply(struct fltr_data *data,
+ struct rte_port_in_action_fltr_params *p)
+{
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ data->port_id = p->port_id;
+
+ return 0;
+}
+
+/**
+ * RTE_PORT_IN_ACTION_LB
+ */
+static int
+lb_cfg_check(struct rte_port_in_action_lb_config *cfg)
+{
+ if ((cfg == NULL) ||
+ (cfg->key_size < RTE_PORT_IN_ACTION_LB_KEY_SIZE_MIN) ||
+ (cfg->key_size > RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX) ||
+ (!rte_is_power_of_2(cfg->key_size)) ||
+ (cfg->f_hash == NULL))
+ return -1;
+
+ return 0;
+}
+
+struct lb_data {
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+static void
+lb_init(struct lb_data *data,
+ struct rte_port_in_action_lb_config *cfg)
+{
+ memcpy(data->port_id, cfg->port_id, sizeof(cfg->port_id));
+}
+
+static int
+lb_apply(struct lb_data *data,
+ struct rte_port_in_action_lb_params *p)
+{
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ memcpy(data->port_id, p->port_id, sizeof(p->port_id));
+
+ return 0;
+}
+
+/**
+ * Action profile
+ */
+static int
+action_valid(enum rte_port_in_action_type action)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ case RTE_PORT_IN_ACTION_LB:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#define RTE_PORT_IN_ACTION_MAX 64
+
+struct ap_config {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+static size_t
+action_cfg_size(enum rte_port_in_action_type action)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return sizeof(struct rte_port_in_action_fltr_config);
+ case RTE_PORT_IN_ACTION_LB:
+ return sizeof(struct rte_port_in_action_lb_config);
+ default:
+ return 0;
+ }
+}
+
+static void*
+action_cfg_get(struct ap_config *ap_config,
+ enum rte_port_in_action_type type)
+{
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return &ap_config->fltr;
+
+ case RTE_PORT_IN_ACTION_LB:
+ return &ap_config->lb;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+action_cfg_set(struct ap_config *ap_config,
+ enum rte_port_in_action_type type,
+ void *action_cfg)
+{
+ void *dst = action_cfg_get(ap_config, type);
+
+ if (dst)
+ memcpy(dst, action_cfg, action_cfg_size(type));
+
+ ap_config->action_mask |= 1LLU << type;
+}
+
+struct ap_data {
+ size_t offset[RTE_PORT_IN_ACTION_MAX];
+ size_t total_size;
+};
+
+static size_t
+action_data_size(enum rte_port_in_action_type action,
+ struct ap_config *ap_config __rte_unused)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return sizeof(struct fltr_data);
+
+ case RTE_PORT_IN_ACTION_LB:
+ return sizeof(struct lb_data);
+
+ default:
+ return 0;
+ }
+}
+
+static void
+action_data_offset_set(struct ap_data *ap_data,
+ struct ap_config *ap_config)
+{
+ uint64_t action_mask = ap_config->action_mask;
+ size_t offset;
+ uint32_t action;
+
+ memset(ap_data->offset, 0, sizeof(ap_data->offset));
+
+ offset = 0;
+ for (action = 0; action < RTE_PORT_IN_ACTION_MAX; action++)
+ if (action_mask & (1LLU << action)) {
+ ap_data->offset[action] = offset;
+ offset += action_data_size((enum rte_port_in_action_type)action,
+ ap_config);
+ }
+
+ ap_data->total_size = offset;
+}
+
+struct rte_port_in_action_profile {
+ struct ap_config cfg;
+ struct ap_data data;
+ int frozen;
+};
+
+struct rte_port_in_action_profile *
+rte_port_in_action_profile_create(uint32_t socket_id)
+{
+ struct rte_port_in_action_profile *ap;
+
+ /* Memory allocation */
+ ap = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_port_in_action_profile),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (ap == NULL)
+ return NULL;
+
+ return ap;
+}
+
+int
+rte_port_in_action_profile_action_register(struct rte_port_in_action_profile *profile,
+ enum rte_port_in_action_type type,
+ void *action_config)
+{
+ int status;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ profile->frozen ||
+ (action_valid(type) == 0) ||
+ (profile->cfg.action_mask & (1LLU << type)) ||
+ ((action_cfg_size(type) == 0) && action_config) ||
+ (action_cfg_size(type) && (action_config == NULL)))
+ return -EINVAL;
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ status = fltr_cfg_check(action_config);
+ break;
+
+ case RTE_PORT_IN_ACTION_LB:
+ status = lb_cfg_check(action_config);
+ break;
+
+ default:
+ status = 0;
+ break;
+ }
+
+ if (status)
+ return status;
+
+ /* Action enable */
+ action_cfg_set(&profile->cfg, type, action_config);
+
+ return 0;
+}
+
+int
+rte_port_in_action_profile_freeze(struct rte_port_in_action_profile *profile)
+{
+ if (profile->frozen)
+ return -EBUSY;
+
+ action_data_offset_set(&profile->data, &profile->cfg);
+ profile->frozen = 1;
+
+ return 0;
+}
+
+int
+rte_port_in_action_profile_free(struct rte_port_in_action_profile *profile)
+{
+ if (profile == NULL)
+ return 0;
+
+ free(profile);
+ return 0;
+}
+
+/**
+ * Action
+ */
+struct rte_port_in_action {
+ struct ap_config cfg;
+ struct ap_data data;
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static __rte_always_inline void *
+action_data_get(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type)
+{
+ size_t offset = action->data.offset[type];
+
+ return &action->memory[offset];
+}
+
+static void
+action_data_init(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type)
+{
+ void *data = action_data_get(action, type);
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ fltr_init(data, &action->cfg.fltr);
+ return;
+
+ case RTE_PORT_IN_ACTION_LB:
+ lb_init(data, &action->cfg.lb);
+ return;
+
+ default:
+ return;
+ }
+}
+
+struct rte_port_in_action *
+rte_port_in_action_create(struct rte_port_in_action_profile *profile,
+ uint32_t socket_id)
+{
+ struct rte_port_in_action *action;
+ size_t size;
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ (profile->frozen == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = sizeof(struct rte_port_in_action) + profile->data.total_size;
+ size = RTE_CACHE_LINE_ROUNDUP(size);
+
+ action = rte_zmalloc_socket(NULL,
+ size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (action == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
+ memcpy(&action->data, &profile->data, sizeof(profile->data));
+
+ for (i = 0; i < RTE_PORT_IN_ACTION_MAX; i++)
+ if (action->cfg.action_mask & (1LLU << i))
+ action_data_init(action,
+ (enum rte_port_in_action_type)i);
+
+ return action;
+}
+
+int
+rte_port_in_action_apply(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type,
+ void *action_params)
+{
+ void *action_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (action_valid(type) == 0) ||
+ ((action->cfg.action_mask & (1LLU << type)) == 0) ||
+ (action_params == NULL))
+ return -EINVAL;
+
+ /* Data update */
+ action_data = action_data_get(action, type);
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return fltr_apply(action_data,
+ action_params);
+
+ case RTE_PORT_IN_ACTION_LB:
+ return lb_apply(action_data,
+ action_params);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ah_filter_on_match(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_fltr_config *cfg = &action->cfg.fltr;
+ uint64_t *key_mask = (uint64_t *) cfg->key_mask;
+ uint64_t *key = (uint64_t *) cfg->key;
+ uint32_t key_offset = cfg->key_offset;
+ struct fltr_data *data = action_data_get(action,
+ RTE_PORT_IN_ACTION_FLTR);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ key_offset);
+
+ uint64_t xor0 = (pkt_key[0] & key_mask[0]) ^ key[0];
+ uint64_t xor1 = (pkt_key[1] & key_mask[1]) ^ key[1];
+ uint64_t or = xor0 | xor1;
+
+ if (or == 0) {
+ rte_pipeline_ah_packet_hijack(p, 1LLU << i);
+ rte_pipeline_port_out_packet_insert(p,
+ data->port_id, pkt);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ah_filter_on_mismatch(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_fltr_config *cfg = &action->cfg.fltr;
+ uint64_t *key_mask = (uint64_t *) cfg->key_mask;
+ uint64_t *key = (uint64_t *) cfg->key;
+ uint32_t key_offset = cfg->key_offset;
+ struct fltr_data *data = action_data_get(action,
+ RTE_PORT_IN_ACTION_FLTR);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ key_offset);
+
+ uint64_t xor0 = (pkt_key[0] & key_mask[0]) ^ key[0];
+ uint64_t xor1 = (pkt_key[1] & key_mask[1]) ^ key[1];
+ uint64_t or = xor0 | xor1;
+
+ if (or) {
+ rte_pipeline_ah_packet_hijack(p, 1LLU << i);
+ rte_pipeline_port_out_packet_insert(p,
+ data->port_id, pkt);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ah_lb(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_lb_config *cfg = &action->cfg.lb;
+ struct lb_data *data = action_data_get(action, RTE_PORT_IN_ACTION_LB);
+ uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t);
+ uint32_t i;
+
+ rte_pipeline_ah_packet_hijack(p, pkt_mask);
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ cfg->key_offset);
+
+ uint64_t digest = cfg->f_hash(pkt_key,
+ cfg->key_mask,
+ cfg->key_size,
+ cfg->seed);
+ uint64_t pos = digest & (RTE_PORT_IN_ACTION_LB_TABLE_SIZE - 1);
+ uint32_t port_id = data->port_id[pos];
+
+ rte_pipeline_port_out_packet_insert(p, port_id, pkt);
+ }
+
+ return 0;
+}
+
+static rte_pipeline_port_in_action_handler
+ah_selector(struct rte_port_in_action *action)
+{
+ if (action->cfg.action_mask == 0)
+ return NULL;
+
+ if (action->cfg.action_mask == 1LLU << RTE_PORT_IN_ACTION_FLTR)
+ return (action->cfg.fltr.filter_on_match) ?
+ ah_filter_on_match : ah_filter_on_mismatch;
+
+ if (action->cfg.action_mask == 1LLU << RTE_PORT_IN_ACTION_LB)
+ return ah_lb;
+
+ return NULL;
+}
+
+int
+rte_port_in_action_params_get(struct rte_port_in_action *action,
+ struct rte_pipeline_port_in_params *params)
+{
+ rte_pipeline_port_in_action_handler f_action;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (params == NULL))
+ return -EINVAL;
+
+ f_action = ah_selector(action);
+
+ /* Fill in params */
+ params->f_action = f_action;
+ params->arg_ah = (f_action) ? action : NULL;
+
+ return 0;
+}
+
+int
+rte_port_in_action_free(struct rte_port_in_action *action)
+{
+ if (action == NULL)
+ return 0;
+
+ rte_free(action);
+
+ return 0;
+}
diff --git a/lib/librte_pipeline/rte_port_in_action.h b/lib/librte_pipeline/rte_port_in_action.h
new file mode 100644
index 00000000..0a85e4e0
--- /dev/null
+++ b/lib/librte_pipeline/rte_port_in_action.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_PORT_IN_ACTION_H__
+#define __INCLUDE_RTE_PORT_IN_ACTION_H__
+
+/**
+ * @file
+ * RTE Pipeline Input Port Actions
+ *
+ * This API provides a common set of actions for pipeline input ports to speed
+ * up application development.
+ *
+ * Each pipeline input port can be assigned an action handler to be executed
+ * on every input packet during the pipeline execution. The pipeline library
+ * allows the user to define his own input port actions by providing customized
+ * input port action handler. While the user can still follow this process, this
+ * API is intended to provide a quicker development alternative for a set of
+ * predefined actions.
+ *
+ * The typical steps to use this API are:
+ * - Define an input port action profile. This is a configuration template that
+ * can potentially be shared by multiple input ports from the same or
+ * different pipelines, with different input ports from the same pipeline
+ * able to use different action profiles. For every input port using a given
+ * action profile, the profile defines the set of actions and the action
+ * configuration to be executed by the input port. API functions:
+ * rte_port_in_action_profile_create(),
+ * rte_port_in_action_profile_action_register(),
+ * rte_port_in_action_profile_freeze().
+ *
+ * - Instantiate the input port action profile to create input port action
+ * objects. Each pipeline input port has its own action object.
+ * API functions: rte_port_in_action_create().
+ *
+ * - Use the input port action object to generate the input port action handler
+ * invoked by the pipeline. API functions:
+ * rte_port_in_action_params_get().
+ *
+ * - Use the input port action object to generate the internal data structures
+ * used by the input port action handler based on given action parameters.
+ * API functions: rte_port_in_action_apply().
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+#include <rte_table_hash.h>
+
+#include "rte_pipeline.h"
+
+/** Input port actions. */
+enum rte_port_in_action_type {
+ /** Filter selected input packets. */
+ RTE_PORT_IN_ACTION_FLTR = 0,
+
+ /** Load balance. */
+ RTE_PORT_IN_ACTION_LB,
+};
+
+/**
+ * RTE_PORT_IN_ACTION_FLTR
+ */
+/** Filter key size (number of bytes) */
+#define RTE_PORT_IN_ACTION_FLTR_KEY_SIZE 16
+
+/** Filter action configuration (per action profile). */
+struct rte_port_in_action_fltr_config {
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask. */
+ uint8_t key_mask[RTE_PORT_IN_ACTION_FLTR_KEY_SIZE];
+
+ /** Key value. */
+ uint8_t key[RTE_PORT_IN_ACTION_FLTR_KEY_SIZE];
+
+ /** When non-zero, all the input packets that match the *key* (with the
+ * *key_mask* applied) are sent to the pipeline output port *port_id*.
+ * When zero, all the input packets that do NOT match the *key* (with
+ * *key_mask* applied) are sent to the pipeline output port *port_id*.
+ */
+ int filter_on_match;
+
+ /** Pipeline output port ID to send the filtered input packets to.
+ * Can be updated later.
+ *
+ * @see struct rte_port_in_action_fltr_params
+ */
+ uint32_t port_id;
+};
+
+/** Filter action parameters (per action). */
+struct rte_port_in_action_fltr_params {
+ /** Pipeline output port ID to send the filtered input packets to. */
+ uint32_t port_id;
+};
+
+/**
+ * RTE_PORT_IN_ACTION_LB
+ */
+/** Load balance key size min (number of bytes). */
+#define RTE_PORT_IN_ACTION_LB_KEY_SIZE_MIN 8
+
+/** Load balance key size max (number of bytes). */
+#define RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX 64
+
+/** Load balance table size. */
+#define RTE_PORT_IN_ACTION_LB_TABLE_SIZE 16
+
+/** Load balance action configuration (per action profile). */
+struct rte_port_in_action_lb_config {
+ /** Key size (number of bytes). */
+ uint32_t key_size;
+
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask(*key_size* bytes are valid). */
+ uint8_t key_mask[RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX];
+
+ /** Hash function. */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for *f_hash*. */
+ uint64_t seed;
+
+ /** Table defining the weight of each pipeline output port. The weights
+ * are set in 1/RTE_PORT_IN_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_PORT_IN_ACTION_LB_TABLE_SIZE to a given output port
+ * (0 <= N <= RTE_PORT_IN_ACTION_LB_TABLE_SIZE), the output port needs
+ * to show up exactly N times in this table. Can be updated later.
+ *
+ * @see struct rte_port_in_action_lb_params
+ */
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+/** Load balance action parameters (per action). */
+struct rte_port_in_action_lb_params {
+ /** Table defining the weight of each pipeline output port. The weights
+ * are set in 1/RTE_PORT_IN_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_PORT_IN_ACTION_LB_TABLE_SIZE to a given output port
+ * (0 <= N <= RTE_PORT_IN_ACTION_LB_TABLE_SIZE), the output port needs
+ * to show up exactly N times in this table.
+ */
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+/**
+ * Input port action profile.
+ */
+struct rte_port_in_action_profile;
+
+/**
+ * Input port action profile create.
+ *
+ * @param[in] socket_id
+ * CPU socket ID for the internal data structures memory allocation.
+ * @return
+ * Input port action profile handle on success, NULL otherwise.
+ */
+struct rte_port_in_action_profile * __rte_experimental
+rte_port_in_action_profile_create(uint32_t socket_id);
+
+/**
+ * Input port action profile free.
+ *
+ * @param[in] profile
+ * Input port action profile handle (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_profile_free(struct rte_port_in_action_profile *profile);
+
+/**
+ * Input port action profile action register.
+ *
+ * @param[in] profile
+ * Input port action profile handle (needs to be valid and not in frozen
+ * state).
+ * @param[in] type
+ * Specific input port action to be registered for *profile*.
+ * @param[in] action_config
+ * Configuration for the *type* action.
+ * If struct rte_port_in_action_*type*_config is defined, it needs to point to
+ * a valid instance of this structure, otherwise it needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_profile_action_register(
+ struct rte_port_in_action_profile *profile,
+ enum rte_port_in_action_type type,
+ void *action_config);
+
+/**
+ * Input port action profile freeze.
+ *
+ * Once this function is called successfully, the given profile enters the
+ * frozen state with the following immediate effects: no more actions can be
+ * registered for this profile, so the profile can be instantiated to create
+ * input port action objects.
+ *
+ * @param[in] profile
+ * Input port profile action handle (needs to be valid and not in frozen
+ * state).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ *
+ * @see rte_port_in_action_create()
+ */
+int __rte_experimental
+rte_port_in_action_profile_freeze(struct rte_port_in_action_profile *profile);
+
+/**
+ * Input port action.
+ */
+struct rte_port_in_action;
+
+/**
+ * Input port action create.
+ *
+ * Instantiates the given input port action profile to create an input port
+ * action object.
+ *
+ * @param[in] profile
+ * Input port profile action handle (needs to be valid and in frozen state).
+ * @param[in] socket_id
+ * CPU socket ID where the internal data structures required by the new input
+ * port action object should be allocated.
+ * @return
+ * Handle to input port action object on success, NULL on error.
+ */
+struct rte_port_in_action * __rte_experimental
+rte_port_in_action_create(struct rte_port_in_action_profile *profile,
+ uint32_t socket_id);
+
+/**
+ * Input port action free.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_free(struct rte_port_in_action *action);
+
+/**
+ * Input port params get.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @param[inout] params
+ * Pipeline input port parameters (needs to be pre-allocated).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_params_get(struct rte_port_in_action *action,
+ struct rte_pipeline_port_in_params *params);
+
+/**
+ * Input port action apply.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @param[in] type
+ * Specific input port action previously registered for the input port action
+ * profile of the *action* object.
+ * @param[in] action_params
+ * Parameters for the *type* action.
+ * If struct rte_port_in_action_*type*_params is defined, it needs to point to
+ * a valid instance of this structure, otherwise it needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_apply(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type,
+ void *action_params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_PORT_IN_ACTION_H__ */
diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c
new file mode 100644
index 00000000..83ffa5de
--- /dev/null
+++ b/lib/librte_pipeline/rte_table_action.c
@@ -0,0 +1,2386 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_esp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+
+#include "rte_table_action.h"
+
+#define rte_htons rte_cpu_to_be_16
+#define rte_htonl rte_cpu_to_be_32
+
+#define rte_ntohs rte_be_to_cpu_16
+#define rte_ntohl rte_be_to_cpu_32
+
+/**
+ * RTE_TABLE_ACTION_FWD
+ */
+#define fwd_data rte_pipeline_table_entry
+
+static int
+fwd_apply(struct fwd_data *data,
+ struct rte_table_action_fwd_params *p)
+{
+ data->action = p->action;
+
+ if (p->action == RTE_PIPELINE_ACTION_PORT)
+ data->port_id = p->id;
+
+ if (p->action == RTE_PIPELINE_ACTION_TABLE)
+ data->table_id = p->id;
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_LB
+ */
+static int
+lb_cfg_check(struct rte_table_action_lb_config *cfg)
+{
+ if ((cfg == NULL) ||
+ (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
+ (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
+ (!rte_is_power_of_2(cfg->key_size)) ||
+ (cfg->f_hash == NULL))
+ return -1;
+
+ return 0;
+}
+
+struct lb_data {
+ uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
+} __attribute__((__packed__));
+
+static int
+lb_apply(struct lb_data *data,
+ struct rte_table_action_lb_params *p)
+{
+ memcpy(data->out, p->out, sizeof(data->out));
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_lb(struct rte_mbuf *mbuf,
+ struct lb_data *data,
+ struct rte_table_action_lb_config *cfg)
+{
+ uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
+ uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
+ uint64_t digest, pos;
+ uint32_t out_val;
+
+ digest = cfg->f_hash(pkt_key,
+ cfg->key_mask,
+ cfg->key_size,
+ cfg->seed);
+ pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
+ out_val = data->out[pos];
+
+ *out = out_val;
+}
+
+/**
+ * RTE_TABLE_ACTION_MTR
+ */
+static int
+mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
+{
+ if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
+ ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
+ (mtr->n_bytes_enabled != 0))
+ return -ENOTSUP;
+ return 0;
+}
+
+#define MBUF_SCHED_QUEUE_TC_COLOR(queue, tc, color) \
+ ((uint16_t)((((uint64_t)(queue)) & 0x3) | \
+ ((((uint64_t)(tc)) & 0x3) << 2) | \
+ ((((uint64_t)(color)) & 0x3) << 4)))
+
+#define MBUF_SCHED_COLOR(sched, color) \
+ (((sched) & (~0x30LLU)) | ((color) << 4))
+
+struct mtr_trtcm_data {
+ struct rte_meter_trtcm trtcm;
+ uint64_t stats[e_RTE_METER_COLORS];
+} __attribute__((__packed__));
+
+#define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
+ (((data)->stats[e_RTE_METER_GREEN] & 0xF8LLU) >> 3)
+
+static void
+mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
+ uint32_t profile_id)
+{
+ data->stats[e_RTE_METER_GREEN] &= ~0xF8LLU;
+ data->stats[e_RTE_METER_GREEN] |= (profile_id % 32) << 3;
+}
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
+ (((data)->stats[(color)] & 4LLU) >> 2)
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
+ ((enum rte_meter_color)((data)->stats[(color)] & 3LLU))
+
+static void
+mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
+ enum rte_meter_color color,
+ enum rte_table_action_policer action)
+{
+ if (action == RTE_TABLE_ACTION_POLICER_DROP) {
+ data->stats[color] |= 4LLU;
+ } else {
+ data->stats[color] &= ~7LLU;
+ data->stats[color] |= color & 3LLU;
+ }
+}
+
+static uint64_t
+mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
+ enum rte_meter_color color)
+{
+ return data->stats[color] >> 8;
+}
+
+static void
+mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
+ enum rte_meter_color color)
+{
+ data->stats[color] &= 0xFFLU;
+}
+
+#define MTR_TRTCM_DATA_STATS_INC(data, color) \
+ ((data)->stats[(color)] += (1LLU << 8))
+
+static size_t
+mtr_data_size(struct rte_table_action_mtr_config *mtr)
+{
+ return mtr->n_tc * sizeof(struct mtr_trtcm_data);
+}
+
+struct dscp_table_entry_data {
+ enum rte_meter_color color;
+ uint16_t tc;
+ uint16_t queue_tc_color;
+};
+
+struct dscp_table_data {
+ struct dscp_table_entry_data entry[64];
+};
+
+struct meter_profile_data {
+ struct rte_meter_trtcm_profile profile;
+ uint32_t profile_id;
+ int valid;
+};
+
+static struct meter_profile_data *
+meter_profile_data_find(struct meter_profile_data *mp,
+ uint32_t mp_size,
+ uint32_t profile_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (mp_data->valid && (mp_data->profile_id == profile_id))
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static struct meter_profile_data *
+meter_profile_data_find_unused(struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (!mp_data->valid)
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static int
+mtr_apply_check(struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
+ return -EINVAL;
+
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mtr_apply(struct mtr_trtcm_data *data,
+ struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ status = mtr_apply_check(p, cfg, mp, mp_size);
+ if (status)
+ return status;
+
+ /* Apply */
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct mtr_trtcm_data *data_tc = &data[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ /* Find profile */
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+
+ memset(data_tc, 0, sizeof(*data_tc));
+
+ /* Meter object */
+ status = rte_meter_trtcm_config(&data_tc->trtcm,
+ &mp_data->profile);
+ if (status)
+ return status;
+
+ /* Meter profile */
+ mtr_trtcm_data_meter_profile_id_set(data_tc,
+ mp_data - mp);
+
+ /* Policer actions */
+ mtr_trtcm_data_policer_action_set(data_tc,
+ e_RTE_METER_GREEN,
+ p_tc->policer[e_RTE_METER_GREEN]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ e_RTE_METER_YELLOW,
+ p_tc->policer[e_RTE_METER_YELLOW]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ e_RTE_METER_RED,
+ p_tc->policer[e_RTE_METER_RED]);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_mtr(struct rte_mbuf *mbuf,
+ struct mtr_trtcm_data *data,
+ struct dscp_table_data *dscp_table,
+ struct meter_profile_data *mp,
+ uint64_t time,
+ uint32_t dscp,
+ uint16_t total_length)
+{
+ uint64_t drop_mask, sched;
+ uint64_t *sched_ptr = (uint64_t *) &mbuf->hash.sched;
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ enum rte_meter_color color_in, color_meter, color_policer;
+ uint32_t tc, mp_id;
+
+ tc = dscp_entry->tc;
+ color_in = dscp_entry->color;
+ data += tc;
+ mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
+ sched = *sched_ptr;
+
+ /* Meter */
+ color_meter = rte_meter_trtcm_color_aware_check(
+ &data->trtcm,
+ &mp[mp_id].profile,
+ time,
+ total_length,
+ color_in);
+
+ /* Stats */
+ MTR_TRTCM_DATA_STATS_INC(data, color_meter);
+
+ /* Police */
+ drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
+ color_policer =
+ MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
+ *sched_ptr = MBUF_SCHED_COLOR(sched, color_policer);
+
+ return drop_mask;
+}
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+static int
+tm_cfg_check(struct rte_table_action_tm_config *tm)
+{
+ if ((tm->n_subports_per_port == 0) ||
+ (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
+ (tm->n_subports_per_port > UINT16_MAX) ||
+ (tm->n_pipes_per_subport == 0) ||
+ (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct tm_data {
+ uint16_t queue_tc_color;
+ uint16_t subport;
+ uint32_t pipe;
+} __attribute__((__packed__));
+
+static int
+tm_apply_check(struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ if ((p->subport_id >= cfg->n_subports_per_port) ||
+ (p->pipe_id >= cfg->n_pipes_per_subport))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+tm_apply(struct tm_data *data,
+ struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = tm_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ data->queue_tc_color = 0;
+ data->subport = (uint16_t) p->subport_id;
+ data->pipe = p->pipe_id;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tm(struct rte_mbuf *mbuf,
+ struct tm_data *data,
+ struct dscp_table_data *dscp_table,
+ uint32_t dscp)
+{
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ struct tm_data *sched_ptr = (struct tm_data *) &mbuf->hash.sched;
+ struct tm_data sched;
+
+ sched = *data;
+ sched.queue_tc_color = dscp_entry->queue_tc_color;
+ *sched_ptr = sched;
+}
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+static int
+encap_valid(enum rte_table_action_encap_type encap)
+{
+ switch (encap) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_cfg_check(struct rte_table_action_encap_config *encap)
+{
+ if ((encap->encap_mask == 0) ||
+ (__builtin_popcountll(encap->encap_mask) != 1))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct encap_ether_data {
+ struct ether_hdr ether;
+} __attribute__((__packed__));
+
+#define VLAN(pcp, dei, vid) \
+ ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
+ ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
+ (((uint64_t)(vid)) & 0xFFFLLU)) \
+
+struct encap_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+} __attribute__((__packed__));
+
+struct encap_qinq_data {
+ struct ether_hdr ether;
+ struct vlan_hdr svlan;
+ struct vlan_hdr cvlan;
+} __attribute__((__packed__));
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+#define ETHER_TYPE_MPLS_MULTICAST 0x8848
+
+#define MPLS(label, tc, s, ttl) \
+ ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
+ ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
+ ((((uint64_t)(s)) & 0x1LLU) << 8) | \
+ (((uint64_t)(ttl)) & 0xFFLLU)))
+
+struct encap_mpls_data {
+ struct ether_hdr ether;
+ uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+ uint32_t mpls_count;
+} __attribute__((__packed__));
+
+#define ETHER_TYPE_PPPOE_SESSION 0x8864
+
+#define PPP_PROTOCOL_IP 0x0021
+
+struct pppoe_ppp_hdr {
+ uint16_t ver_type_code;
+ uint16_t session_id;
+ uint16_t length;
+ uint16_t protocol;
+} __attribute__((__packed__));
+
+struct encap_pppoe_data {
+ struct ether_hdr ether;
+ struct pppoe_ppp_hdr pppoe_ppp;
+} __attribute__((__packed__));
+
+static size_t
+encap_data_size(struct rte_table_action_encap_config *encap)
+{
+ switch (encap->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ return sizeof(struct encap_ether_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ return sizeof(struct encap_vlan_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ return sizeof(struct encap_qinq_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ return sizeof(struct encap_mpls_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return sizeof(struct encap_pppoe_data);
+
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_apply_check(struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((encap_valid(p->type) == 0) ||
+ ((cfg->encap_mask & (1LLU << p->type)) == 0))
+ return -EINVAL;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ if ((p->mpls.mpls_count == 0) ||
+ (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
+ return -EINVAL;
+
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+encap_ether_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_ether_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_vlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_vlan_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
+ p->vlan.vlan.dei,
+ p->vlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_qinq_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_mpls_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_mpls_data *d = data;
+ uint16_t ethertype = (p->mpls.unicast) ?
+ ETHER_TYPE_MPLS_UNICAST :
+ ETHER_TYPE_MPLS_MULTICAST;
+ uint32_t i;
+
+ /* Ethernet */
+ ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ /* MPLS */
+ for (i = 0; i < p->mpls.mpls_count - 1; i++)
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 0,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 1,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls_count = p->mpls.mpls_count;
+ return 0;
+}
+
+static int
+encap_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_pppoe_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg,
+ struct rte_table_action_common_config *common_cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = encap_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return encap_ether_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return encap_vlan_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return encap_qinq_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ return encap_mpls_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return encap_pppoe_apply(data, p);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static __rte_always_inline void *
+encap(void *dst, const void *src, size_t n)
+{
+ dst = ((uint8_t *) dst) - n;
+ return rte_memcpy(dst, src, n);
+}
+
+static __rte_always_inline void
+pkt_work_encap(struct rte_mbuf *mbuf,
+ void *data,
+ struct rte_table_action_encap_config *cfg,
+ void *ip,
+ uint16_t total_length,
+ uint32_t ip_offset)
+{
+ switch (cfg->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ encap(ip, data, sizeof(struct encap_ether_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_ether_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_ether_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ encap(ip, data, sizeof(struct encap_vlan_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_vlan_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_vlan_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ encap(ip, data, sizeof(struct encap_qinq_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ {
+ struct encap_mpls_data *mpls = data;
+ size_t size = sizeof(struct ether_hdr) +
+ mpls->mpls_count * 4;
+
+ encap(ip, data, size);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
+ mbuf->pkt_len = mbuf->data_len = total_length + size;
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ {
+ struct encap_pppoe_data *pppoe =
+ encap(ip, data, sizeof(struct encap_pppoe_data));
+ pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_pppoe_data);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+static int
+nat_cfg_check(struct rte_table_action_nat_config *nat)
+{
+ if ((nat->proto != 0x06) &&
+ (nat->proto != 0x11))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct nat_ipv4_data {
+ uint32_t addr;
+ uint16_t port;
+} __attribute__((__packed__));
+
+struct nat_ipv6_data {
+ uint8_t addr[16];
+ uint16_t port;
+} __attribute__((__packed__));
+
+static size_t
+nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
+ struct rte_table_action_common_config *common)
+{
+ int ip_version = common->ip_version;
+
+ return (ip_version) ?
+ sizeof(struct nat_ipv4_data) :
+ sizeof(struct nat_ipv6_data);
+}
+
+static int
+nat_apply_check(struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ if ((p->ip_version && (cfg->ip_version == 0)) ||
+ ((p->ip_version == 0) && cfg->ip_version))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nat_apply(void *data,
+ struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = nat_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ if (p->ip_version) {
+ struct nat_ipv4_data *d = data;
+
+ d->addr = rte_htonl(p->addr.ipv4);
+ d->port = rte_htons(p->port);
+ } else {
+ struct nat_ipv6_data *d = data;
+
+ memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
+ d->port = rte_htons(p->port);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
+ uint16_t *ip0,
+ uint16_t *ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
+ ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
+ ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void
+pkt_ipv4_work_nat(struct ipv4_hdr *ip,
+ struct nat_ipv4_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->src_addr,
+ data->addr,
+ tcp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->src_addr,
+ data->addr,
+ udp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->src_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->dst_addr,
+ data->addr,
+ tcp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->dst_addr,
+ data->addr,
+ udp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->dst_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+static __rte_always_inline void
+pkt_ipv6_work_nat(struct ipv6_hdr *ip,
+ struct nat_ipv6_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ tcp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ udp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ udp->src_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ tcp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ udp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ udp->dst_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+static int
+ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
+{
+ if (ttl->drop == 0)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct ttl_data {
+ uint32_t n_packets;
+} __attribute__((__packed__));
+
+#define TTL_INIT(data, decrement) \
+ ((data)->n_packets = (decrement) ? 1 : 0)
+
+#define TTL_DEC_GET(data) \
+ ((uint8_t)((data)->n_packets & 1))
+
+#define TTL_STATS_RESET(data) \
+ ((data)->n_packets = ((data)->n_packets & 1))
+
+#define TTL_STATS_READ(data) \
+ ((data)->n_packets >> 1)
+
+#define TTL_STATS_ADD(data, value) \
+ ((data)->n_packets = \
+ (((((data)->n_packets >> 1) + (value)) << 1) | \
+ ((data)->n_packets & 1)))
+
+static int
+ttl_apply(void *data,
+ struct rte_table_action_ttl_params *p)
+{
+ struct ttl_data *d = data;
+
+ TTL_INIT(d, p->decrement);
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint16_t cksum = ip->hdr_checksum;
+ uint8_t ttl = ip->time_to_live;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ cksum += ttl_diff;
+ ttl -= ttl_diff;
+
+ ip->hdr_checksum = cksum;
+ ip->time_to_live = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint8_t ttl = ip->hop_limits;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ ttl -= ttl_diff;
+
+ ip->hop_limits = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+static int
+stats_cfg_check(struct rte_table_action_stats_config *stats)
+{
+ if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+struct stats_data {
+ uint64_t n_packets;
+ uint64_t n_bytes;
+} __attribute__((__packed__));
+
+static int
+stats_apply(struct stats_data *data,
+ struct rte_table_action_stats_params *p)
+{
+ data->n_packets = p->n_packets;
+ data->n_bytes = p->n_bytes;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_stats(struct stats_data *data,
+ uint16_t total_length)
+{
+ data->n_packets++;
+ data->n_bytes += total_length;
+}
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+struct time_data {
+ uint64_t time;
+} __attribute__((__packed__));
+
+static int
+time_apply(struct time_data *data,
+ struct rte_table_action_time_params *p)
+{
+ data->time = p->time;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_time(struct time_data *data,
+ uint64_t time)
+{
+ data->time = time;
+}
+
+/**
+ * Action profile
+ */
+static int
+action_valid(enum rte_table_action_type action)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_FWD:
+ case RTE_TABLE_ACTION_LB:
+ case RTE_TABLE_ACTION_MTR:
+ case RTE_TABLE_ACTION_TM:
+ case RTE_TABLE_ACTION_ENCAP:
+ case RTE_TABLE_ACTION_NAT:
+ case RTE_TABLE_ACTION_TTL:
+ case RTE_TABLE_ACTION_STATS:
+ case RTE_TABLE_ACTION_TIME:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+#define RTE_TABLE_ACTION_MAX 64
+
+struct ap_config {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+};
+
+static size_t
+action_cfg_size(enum rte_table_action_type action)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_LB:
+ return sizeof(struct rte_table_action_lb_config);
+ case RTE_TABLE_ACTION_MTR:
+ return sizeof(struct rte_table_action_mtr_config);
+ case RTE_TABLE_ACTION_TM:
+ return sizeof(struct rte_table_action_tm_config);
+ case RTE_TABLE_ACTION_ENCAP:
+ return sizeof(struct rte_table_action_encap_config);
+ case RTE_TABLE_ACTION_NAT:
+ return sizeof(struct rte_table_action_nat_config);
+ case RTE_TABLE_ACTION_TTL:
+ return sizeof(struct rte_table_action_ttl_config);
+ case RTE_TABLE_ACTION_STATS:
+ return sizeof(struct rte_table_action_stats_config);
+ default:
+ return 0;
+ }
+}
+
+static void*
+action_cfg_get(struct ap_config *ap_config,
+ enum rte_table_action_type type)
+{
+ switch (type) {
+ case RTE_TABLE_ACTION_LB:
+ return &ap_config->lb;
+
+ case RTE_TABLE_ACTION_MTR:
+ return &ap_config->mtr;
+
+ case RTE_TABLE_ACTION_TM:
+ return &ap_config->tm;
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return &ap_config->encap;
+
+ case RTE_TABLE_ACTION_NAT:
+ return &ap_config->nat;
+
+ case RTE_TABLE_ACTION_TTL:
+ return &ap_config->ttl;
+
+ case RTE_TABLE_ACTION_STATS:
+ return &ap_config->stats;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+action_cfg_set(struct ap_config *ap_config,
+ enum rte_table_action_type type,
+ void *action_cfg)
+{
+ void *dst = action_cfg_get(ap_config, type);
+
+ if (dst)
+ memcpy(dst, action_cfg, action_cfg_size(type));
+
+ ap_config->action_mask |= 1LLU << type;
+}
+
+struct ap_data {
+ size_t offset[RTE_TABLE_ACTION_MAX];
+ size_t total_size;
+};
+
+static size_t
+action_data_size(enum rte_table_action_type action,
+ struct ap_config *ap_config)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_FWD:
+ return sizeof(struct fwd_data);
+
+ case RTE_TABLE_ACTION_LB:
+ return sizeof(struct lb_data);
+
+ case RTE_TABLE_ACTION_MTR:
+ return mtr_data_size(&ap_config->mtr);
+
+ case RTE_TABLE_ACTION_TM:
+ return sizeof(struct tm_data);
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return encap_data_size(&ap_config->encap);
+
+ case RTE_TABLE_ACTION_NAT:
+ return nat_data_size(&ap_config->nat,
+ &ap_config->common);
+
+ case RTE_TABLE_ACTION_TTL:
+ return sizeof(struct ttl_data);
+
+ case RTE_TABLE_ACTION_STATS:
+ return sizeof(struct stats_data);
+
+ case RTE_TABLE_ACTION_TIME:
+ return sizeof(struct time_data);
+
+ default:
+ return 0;
+ }
+}
+
+
+static void
+action_data_offset_set(struct ap_data *ap_data,
+ struct ap_config *ap_config)
+{
+ uint64_t action_mask = ap_config->action_mask;
+ size_t offset;
+ uint32_t action;
+
+ memset(ap_data->offset, 0, sizeof(ap_data->offset));
+
+ offset = 0;
+ for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
+ if (action_mask & (1LLU << action)) {
+ ap_data->offset[action] = offset;
+ offset += action_data_size((enum rte_table_action_type)action,
+ ap_config);
+ }
+
+ ap_data->total_size = offset;
+}
+
+struct rte_table_action_profile {
+ struct ap_config cfg;
+ struct ap_data data;
+ int frozen;
+};
+
+struct rte_table_action_profile *
+rte_table_action_profile_create(struct rte_table_action_common_config *common)
+{
+ struct rte_table_action_profile *ap;
+
+ /* Check input arguments */
+ if (common == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ ap = calloc(1, sizeof(struct rte_table_action_profile));
+ if (ap == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&ap->cfg.common, common, sizeof(*common));
+
+ return ap;
+}
+
+
+int
+rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
+ enum rte_table_action_type type,
+ void *action_config)
+{
+ int status;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ profile->frozen ||
+ (action_valid(type) == 0) ||
+ (profile->cfg.action_mask & (1LLU << type)) ||
+ ((action_cfg_size(type) == 0) && action_config) ||
+ (action_cfg_size(type) && (action_config == NULL)))
+ return -EINVAL;
+
+ switch (type) {
+ case RTE_TABLE_ACTION_LB:
+ status = lb_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_MTR:
+ status = mtr_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_TM:
+ status = tm_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_ENCAP:
+ status = encap_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_NAT:
+ status = nat_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_TTL:
+ status = ttl_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_STATS:
+ status = stats_cfg_check(action_config);
+ break;
+
+ default:
+ status = 0;
+ break;
+ }
+
+ if (status)
+ return status;
+
+ /* Action enable */
+ action_cfg_set(&profile->cfg, type, action_config);
+
+ return 0;
+}
+
+int
+rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
+{
+ if (profile->frozen)
+ return -EBUSY;
+
+ profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+ action_data_offset_set(&profile->data, &profile->cfg);
+ profile->frozen = 1;
+
+ return 0;
+}
+
+int
+rte_table_action_profile_free(struct rte_table_action_profile *profile)
+{
+ if (profile == NULL)
+ return 0;
+
+ free(profile);
+ return 0;
+}
+
+/**
+ * Action
+ */
+#define METER_PROFILES_MAX 32
+
+struct rte_table_action {
+ struct ap_config cfg;
+ struct ap_data data;
+ struct dscp_table_data dscp_table;
+ struct meter_profile_data mp[METER_PROFILES_MAX];
+};
+
+struct rte_table_action *
+rte_table_action_create(struct rte_table_action_profile *profile,
+ uint32_t socket_id)
+{
+ struct rte_table_action *action;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ (profile->frozen == 0))
+ return NULL;
+
+ /* Memory allocation */
+ action = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_table_action),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (action == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
+ memcpy(&action->data, &profile->data, sizeof(profile->data));
+
+ return action;
+}
+
+static __rte_always_inline void *
+action_data_get(void *data,
+ struct rte_table_action *action,
+ enum rte_table_action_type type)
+{
+ size_t offset = action->data.offset[type];
+ uint8_t *data_bytes = data;
+
+ return &data_bytes[offset];
+}
+
+int
+rte_table_action_apply(struct rte_table_action *action,
+ void *data,
+ enum rte_table_action_type type,
+ void *action_params)
+{
+ void *action_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (data == NULL) ||
+ (action_valid(type) == 0) ||
+ ((action->cfg.action_mask & (1LLU << type)) == 0) ||
+ (action_params == NULL))
+ return -EINVAL;
+
+ /* Data update */
+ action_data = action_data_get(data, action, type);
+
+ switch (type) {
+ case RTE_TABLE_ACTION_FWD:
+ return fwd_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_LB:
+ return lb_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_MTR:
+ return mtr_apply(action_data,
+ action_params,
+ &action->cfg.mtr,
+ action->mp,
+ RTE_DIM(action->mp));
+
+ case RTE_TABLE_ACTION_TM:
+ return tm_apply(action_data,
+ action_params,
+ &action->cfg.tm);
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return encap_apply(action_data,
+ action_params,
+ &action->cfg.encap,
+ &action->cfg.common);
+
+ case RTE_TABLE_ACTION_NAT:
+ return nat_apply(action_data,
+ action_params,
+ &action->cfg.common);
+
+ case RTE_TABLE_ACTION_TTL:
+ return ttl_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_STATS:
+ return stats_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_TIME:
+ return time_apply(action_data,
+ action_params);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_table_action_dscp_table_update(struct rte_table_action *action,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *table)
+{
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
+ (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
+ (dscp_mask == 0) ||
+ (table == NULL))
+ return -EINVAL;
+
+ for (i = 0; i < RTE_DIM(table->entry); i++) {
+ struct dscp_table_entry_data *data =
+ &action->dscp_table.entry[i];
+ struct rte_table_action_dscp_table_entry *entry =
+ &table->entry[i];
+ uint16_t queue_tc_color =
+ MBUF_SCHED_QUEUE_TC_COLOR(entry->tc_queue_id,
+ entry->tc_id,
+ entry->color);
+
+ if ((dscp_mask & (1LLU << i)) == 0)
+ continue;
+
+ data->color = entry->color;
+ data->tc = entry->tc_id;
+ data->queue_tc_color = queue_tc_color;
+ }
+
+ return 0;
+}
+
+int
+rte_table_action_meter_profile_add(struct rte_table_action *action,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct meter_profile_data *mp_data;
+ uint32_t status;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
+ (profile == NULL))
+ return -EINVAL;
+
+ if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
+ return -ENOTSUP;
+
+ mp_data = meter_profile_data_find(action->mp,
+ RTE_DIM(action->mp),
+ meter_profile_id);
+ if (mp_data)
+ return -EEXIST;
+
+ mp_data = meter_profile_data_find_unused(action->mp,
+ RTE_DIM(action->mp));
+ if (!mp_data)
+ return -ENOSPC;
+
+ /* Install new profile */
+ status = rte_meter_trtcm_profile_config(&mp_data->profile,
+ &profile->trtcm);
+ if (status)
+ return status;
+
+ mp_data->profile_id = meter_profile_id;
+ mp_data->valid = 1;
+
+ return 0;
+}
+
+int
+rte_table_action_meter_profile_delete(struct rte_table_action *action,
+ uint32_t meter_profile_id)
+{
+ struct meter_profile_data *mp_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
+ return -EINVAL;
+
+ mp_data = meter_profile_data_find(action->mp,
+ RTE_DIM(action->mp),
+ meter_profile_id);
+ if (!mp_data)
+ return 0;
+
+ /* Uninstall profile */
+ mp_data->valid = 0;
+
+ return 0;
+}
+
+int
+rte_table_action_meter_read(struct rte_table_action *action,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct mtr_trtcm_data *mtr_data;
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
+ (data == NULL) ||
+ (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
+ return -EINVAL;
+
+ mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
+
+ /* Read */
+ if (stats) {
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_counters_tc *dst =
+ &stats->stats[i];
+ struct mtr_trtcm_data *src = &mtr_data[i];
+
+ if ((tc_mask & (1 << i)) == 0)
+ continue;
+
+ dst->n_packets[e_RTE_METER_GREEN] =
+ mtr_trtcm_data_stats_get(src, e_RTE_METER_GREEN);
+
+ dst->n_packets[e_RTE_METER_YELLOW] =
+ mtr_trtcm_data_stats_get(src, e_RTE_METER_YELLOW);
+
+ dst->n_packets[e_RTE_METER_RED] =
+ mtr_trtcm_data_stats_get(src, e_RTE_METER_RED);
+
+ dst->n_packets_valid = 1;
+ dst->n_bytes_valid = 0;
+ }
+
+ stats->tc_mask = tc_mask;
+ }
+
+ /* Clear */
+ if (clear)
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct mtr_trtcm_data *src = &mtr_data[i];
+
+ if ((tc_mask & (1 << i)) == 0)
+ continue;
+
+ mtr_trtcm_data_stats_reset(src, e_RTE_METER_GREEN);
+ mtr_trtcm_data_stats_reset(src, e_RTE_METER_YELLOW);
+ mtr_trtcm_data_stats_reset(src, e_RTE_METER_RED);
+ }
+
+
+ return 0;
+}
+
+int
+rte_table_action_ttl_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct ttl_data *ttl_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
+ (data == NULL))
+ return -EINVAL;
+
+ ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
+
+ /* Read */
+ if (stats)
+ stats->n_packets = TTL_STATS_READ(ttl_data);
+
+ /* Clear */
+ if (clear)
+ TTL_STATS_RESET(ttl_data);
+
+ return 0;
+}
+
+int
+rte_table_action_stats_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct stats_data *stats_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
+ (data == NULL))
+ return -EINVAL;
+
+ stats_data = action_data_get(data, action,
+ RTE_TABLE_ACTION_STATS);
+
+ /* Read */
+ if (stats) {
+ stats->n_packets = stats_data->n_packets;
+ stats->n_bytes = stats_data->n_bytes;
+ stats->n_packets_valid = 1;
+ stats->n_bytes_valid = 1;
+ }
+
+ /* Clear */
+ if (clear) {
+ stats_data->n_packets = 0;
+ stats_data->n_bytes = 0;
+ }
+
+ return 0;
+}
+
+int
+rte_table_action_time_read(struct rte_table_action *action,
+ void *data,
+ uint64_t *timestamp)
+{
+ struct time_data *time_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
+ (data == NULL) ||
+ (timestamp == NULL))
+ return -EINVAL;
+
+ time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
+
+ /* Read */
+ *timestamp = time_data->time;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work(struct rte_mbuf *mbuf,
+ struct rte_pipeline_table_entry *table_entry,
+ uint64_t time,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t drop_mask = 0;
+
+ uint32_t ip_offset = action->cfg.common.ip_offset;
+ void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
+
+ uint32_t dscp;
+ uint16_t total_length;
+
+ if (cfg->common.ip_version) {
+ struct ipv4_hdr *hdr = ip;
+
+ dscp = hdr->type_of_service >> 2;
+ total_length = rte_ntohs(hdr->total_length);
+ } else {
+ struct ipv6_hdr *hdr = ip;
+
+ dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
+ total_length =
+ rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
+
+ pkt_work_lb(mbuf,
+ data,
+ &cfg->lb);
+ }
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
+
+ drop_mask |= pkt_work_mtr(mbuf,
+ data,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp,
+ total_length);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
+
+ pkt_work_tm(mbuf,
+ data,
+ &action->dscp_table,
+ dscp);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
+
+ pkt_work_encap(mbuf,
+ data,
+ &cfg->encap,
+ ip,
+ total_length,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
+
+ if (cfg->common.ip_version)
+ pkt_ipv4_work_nat(ip, data, &cfg->nat);
+ else
+ pkt_ipv6_work_nat(ip, data, &cfg->nat);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
+
+ if (cfg->common.ip_version)
+ drop_mask |= pkt_ipv4_work_ttl(ip, data);
+ else
+ drop_mask |= pkt_ipv6_work_ttl(ip, data);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
+
+ pkt_work_stats(data, total_length);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
+
+ pkt_work_time(data, time);
+ }
+
+ return drop_mask;
+}
+
+static __rte_always_inline uint64_t
+pkt4_work(struct rte_mbuf **mbufs,
+ struct rte_pipeline_table_entry **table_entries,
+ uint64_t time,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t drop_mask0 = 0;
+ uint64_t drop_mask1 = 0;
+ uint64_t drop_mask2 = 0;
+ uint64_t drop_mask3 = 0;
+
+ struct rte_mbuf *mbuf0 = mbufs[0];
+ struct rte_mbuf *mbuf1 = mbufs[1];
+ struct rte_mbuf *mbuf2 = mbufs[2];
+ struct rte_mbuf *mbuf3 = mbufs[3];
+
+ struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
+ struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
+ struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
+ struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
+
+ uint32_t ip_offset = action->cfg.common.ip_offset;
+ void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
+ void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
+ void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
+ void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
+
+ uint32_t dscp0, dscp1, dscp2, dscp3;
+ uint16_t total_length0, total_length1, total_length2, total_length3;
+
+ if (cfg->common.ip_version) {
+ struct ipv4_hdr *hdr0 = ip0;
+ struct ipv4_hdr *hdr1 = ip1;
+ struct ipv4_hdr *hdr2 = ip2;
+ struct ipv4_hdr *hdr3 = ip3;
+
+ dscp0 = hdr0->type_of_service >> 2;
+ dscp1 = hdr1->type_of_service >> 2;
+ dscp2 = hdr2->type_of_service >> 2;
+ dscp3 = hdr3->type_of_service >> 2;
+
+ total_length0 = rte_ntohs(hdr0->total_length);
+ total_length1 = rte_ntohs(hdr1->total_length);
+ total_length2 = rte_ntohs(hdr2->total_length);
+ total_length3 = rte_ntohs(hdr3->total_length);
+ } else {
+ struct ipv6_hdr *hdr0 = ip0;
+ struct ipv6_hdr *hdr1 = ip1;
+ struct ipv6_hdr *hdr2 = ip2;
+ struct ipv6_hdr *hdr3 = ip3;
+
+ dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
+ dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
+ dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
+ dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
+
+ total_length0 =
+ rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
+ total_length1 =
+ rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
+ total_length2 =
+ rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
+ total_length3 =
+ rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
+
+ pkt_work_lb(mbuf0,
+ data0,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf1,
+ data1,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf2,
+ data2,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf3,
+ data3,
+ &cfg->lb);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
+
+ drop_mask0 |= pkt_work_mtr(mbuf0,
+ data0,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp0,
+ total_length0);
+
+ drop_mask1 |= pkt_work_mtr(mbuf1,
+ data1,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp1,
+ total_length1);
+
+ drop_mask2 |= pkt_work_mtr(mbuf2,
+ data2,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp2,
+ total_length2);
+
+ drop_mask3 |= pkt_work_mtr(mbuf3,
+ data3,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp3,
+ total_length3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
+
+ pkt_work_tm(mbuf0,
+ data0,
+ &action->dscp_table,
+ dscp0);
+
+ pkt_work_tm(mbuf1,
+ data1,
+ &action->dscp_table,
+ dscp1);
+
+ pkt_work_tm(mbuf2,
+ data2,
+ &action->dscp_table,
+ dscp2);
+
+ pkt_work_tm(mbuf3,
+ data3,
+ &action->dscp_table,
+ dscp3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
+
+ pkt_work_encap(mbuf0,
+ data0,
+ &cfg->encap,
+ ip0,
+ total_length0,
+ ip_offset);
+
+ pkt_work_encap(mbuf1,
+ data1,
+ &cfg->encap,
+ ip1,
+ total_length1,
+ ip_offset);
+
+ pkt_work_encap(mbuf2,
+ data2,
+ &cfg->encap,
+ ip2,
+ total_length2,
+ ip_offset);
+
+ pkt_work_encap(mbuf3,
+ data3,
+ &cfg->encap,
+ ip3,
+ total_length3,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
+
+ if (cfg->common.ip_version) {
+ pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
+ pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
+ pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
+ pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
+ } else {
+ pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
+ pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
+ pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
+ pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
+ }
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
+
+ if (cfg->common.ip_version) {
+ drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
+ drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
+ drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
+ drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
+ } else {
+ drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
+ drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
+ drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
+ drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
+ }
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
+
+ pkt_work_stats(data0, total_length0);
+ pkt_work_stats(data1, total_length1);
+ pkt_work_stats(data2, total_length2);
+ pkt_work_stats(data3, total_length3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
+
+ pkt_work_time(data0, time);
+ pkt_work_time(data1, time);
+ pkt_work_time(data2, time);
+ pkt_work_time(data3, time);
+ }
+
+ return drop_mask0 |
+ (drop_mask1 << 1) |
+ (drop_mask2 << 2) |
+ (drop_mask3 << 3);
+}
+
+static __rte_always_inline int
+ah(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t pkts_drop_mask = 0;
+ uint64_t time = 0;
+
+ if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
+ (1LLU << RTE_TABLE_ACTION_TIME)))
+ time = rte_rdtsc();
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
+ uint64_t drop_mask;
+
+ drop_mask = pkt4_work(&pkts[i],
+ &entries[i],
+ time,
+ action,
+ cfg);
+
+ pkts_drop_mask |= drop_mask << i;
+ }
+
+ for ( ; i < n_pkts; i++) {
+ uint64_t drop_mask;
+
+ drop_mask = pkt_work(pkts[i],
+ entries[i],
+ time,
+ action,
+ cfg);
+
+ pkts_drop_mask |= drop_mask << i;
+ }
+ } else
+ for ( ; pkts_mask; ) {
+ uint32_t pos = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ uint64_t drop_mask;
+
+ drop_mask = pkt_work(pkts[pos],
+ entries[pos],
+ time,
+ action,
+ cfg);
+
+ pkts_mask &= ~pkt_mask;
+ pkts_drop_mask |= drop_mask << pos;
+ }
+
+ rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
+
+ return 0;
+}
+
+static int
+ah_default(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ void *arg)
+{
+ struct rte_table_action *action = arg;
+
+ return ah(p,
+ pkts,
+ pkts_mask,
+ entries,
+ action,
+ &action->cfg);
+}
+
+static rte_pipeline_table_action_handler_hit
+ah_selector(struct rte_table_action *action)
+{
+ if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
+ return NULL;
+
+ return ah_default;
+}
+
+int
+rte_table_action_table_params_get(struct rte_table_action *action,
+ struct rte_pipeline_table_params *params)
+{
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ uint32_t total_size;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (params == NULL))
+ return -EINVAL;
+
+ f_action_hit = ah_selector(action);
+ total_size = rte_align32pow2(action->data.total_size);
+
+ /* Fill in params */
+ params->f_action_hit = f_action_hit;
+ params->f_action_miss = NULL;
+ params->arg_ah = (f_action_hit) ? action : NULL;
+ params->action_data_size = total_size -
+ sizeof(struct rte_pipeline_table_entry);
+
+ return 0;
+}
+
+int
+rte_table_action_free(struct rte_table_action *action)
+{
+ if (action == NULL)
+ return 0;
+
+ rte_free(action);
+
+ return 0;
+}
diff --git a/lib/librte_pipeline/rte_table_action.h b/lib/librte_pipeline/rte_table_action.h
new file mode 100644
index 00000000..c7f751aa
--- /dev/null
+++ b/lib/librte_pipeline/rte_table_action.h
@@ -0,0 +1,905 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_ACTION_H__
+#define __INCLUDE_RTE_TABLE_ACTION_H__
+
+/**
+ * @file
+ * RTE Pipeline Table Actions
+ *
+ * This API provides a common set of actions for pipeline tables to speed up
+ * application development.
+ *
+ * Each match-action rule added to a pipeline table has associated data that
+ * stores the action context. This data is input to the table action handler
+ * called for every input packet that hits the rule as part of the table lookup
+ * during the pipeline execution. The pipeline library allows the user to define
+ * his own table actions by providing customized table action handlers (table
+ * lookup) and complete freedom of setting the rules and their data (table rule
+ * add/delete). While the user can still follow this process, this API is
+ * intended to provide a quicker development alternative for a set of predefined
+ * actions.
+ *
+ * The typical steps to use this API are:
+ * - Define a table action profile. This is a configuration template that can
+ * potentially be shared by multiple tables from the same or different
+ * pipelines, with different tables from the same pipeline likely to use
+ * different action profiles. For every table using a given action profile,
+ * the profile defines the set of actions and the action configuration to be
+ * implemented for all the table rules. API functions:
+ * rte_table_action_profile_create(),
+ * rte_table_action_profile_action_register(),
+ * rte_table_action_profile_freeze().
+ *
+ * - Instantiate the table action profile to create table action objects. Each
+ * pipeline table has its own table action object. API functions:
+ * rte_table_action_create().
+ *
+ * - Use the table action object to generate the pipeline table action handlers
+ * (invoked by the pipeline table lookup operation). API functions:
+ * rte_table_action_table_params_get().
+ *
+ * - Use the table action object to generate the rule data (for the pipeline
+ * table rule add operation) based on given action parameters. API functions:
+ * rte_table_action_apply().
+ *
+ * - Use the table action object to read action data (e.g. stats counters) for
+ * any given rule. API functions: rte_table_action_XYZ_read().
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+#include <rte_ether.h>
+#include <rte_meter.h>
+#include <rte_table_hash.h>
+
+#include "rte_pipeline.h"
+
+/** Table actions. */
+enum rte_table_action_type {
+ /** Forward to next pipeline table, output port or drop. */
+ RTE_TABLE_ACTION_FWD = 0,
+
+ /** Load balance. */
+ RTE_TABLE_ACTION_LB,
+
+ /** Traffic Metering and Policing. */
+ RTE_TABLE_ACTION_MTR,
+
+ /** Traffic Management. */
+ RTE_TABLE_ACTION_TM,
+
+ /** Packet encapsulations. */
+ RTE_TABLE_ACTION_ENCAP,
+
+ /** Network Address Translation (NAT). */
+ RTE_TABLE_ACTION_NAT,
+
+ /** Time to Live (TTL) update. */
+ RTE_TABLE_ACTION_TTL,
+
+ /** Statistics. */
+ RTE_TABLE_ACTION_STATS,
+
+ /** Timestamp. */
+ RTE_TABLE_ACTION_TIME,
+};
+
+/** Common action configuration (per table action profile). */
+struct rte_table_action_common_config {
+ /** Input packet Internet Protocol (IP) version. Non-zero for IPv4, zero
+ * for IPv6.
+ */
+ int ip_version;
+
+ /** IP header offset within the input packet buffer. Offset 0 points to
+ * the first byte of the MBUF structure.
+ */
+ uint32_t ip_offset;
+};
+
+/**
+ * RTE_TABLE_ACTION_FWD
+ */
+/** Forward action parameters (per table rule). */
+struct rte_table_action_fwd_params {
+ /** Forward action. */
+ enum rte_pipeline_action action;
+
+ /** Pipeline table ID or output port ID. */
+ uint32_t id;
+};
+
+/**
+ * RTE_TABLE_ACTION_LB
+ */
+/** Load balance key size min (number of bytes). */
+#define RTE_TABLE_ACTION_LB_KEY_SIZE_MIN 8
+
+/** Load balance key size max (number of bytes). */
+#define RTE_TABLE_ACTION_LB_KEY_SIZE_MAX 64
+
+/** Load balance table size. */
+#define RTE_TABLE_ACTION_LB_TABLE_SIZE 8
+
+/** Load balance action configuration (per table action profile). */
+struct rte_table_action_lb_config {
+ /** Key size (number of bytes). */
+ uint32_t key_size;
+
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask (*key_size* bytes are valid). */
+ uint8_t key_mask[RTE_TABLE_ACTION_LB_KEY_SIZE_MAX];
+
+ /** Hash function. */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for *f_hash*. */
+ uint64_t seed;
+
+ /** Output value offset within the input packet buffer. Offset 0 points
+ * to the first byte of the MBUF structure.
+ */
+ uint32_t out_offset;
+};
+
+/** Load balance action parameters (per table rule). */
+struct rte_table_action_lb_params {
+ /** Table defining the output values and their weights. The weights are
+ * set in 1/RTE_TABLE_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_TABLE_ACTION_LB_TABLE_SIZE to a given output value
+ * (0 <= N <= RTE_TABLE_ACTION_LB_TABLE_SIZE), the same output value
+ * needs to show up exactly N times in this table.
+ */
+ uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
+};
+
+/**
+ * RTE_TABLE_ACTION_MTR
+ */
+/** Max number of traffic classes (TCs). */
+#define RTE_TABLE_ACTION_TC_MAX 4
+
+/** Max number of queues per traffic class. */
+#define RTE_TABLE_ACTION_TC_QUEUE_MAX 4
+
+/** Differentiated Services Code Point (DSCP) translation table entry. */
+struct rte_table_action_dscp_table_entry {
+ /** Traffic class. Used by the meter or the traffic management actions.
+ * Has to be strictly smaller than *RTE_TABLE_ACTION_TC_MAX*. Traffic
+ * class 0 is the highest priority.
+ */
+ uint32_t tc_id;
+
+ /** Traffic class queue. Used by the traffic management action. Has to
+ * be strictly smaller than *RTE_TABLE_ACTION_TC_QUEUE_MAX*.
+ */
+ uint32_t tc_queue_id;
+
+ /** Packet color. Used by the meter action as the packet input color
+ * for the color aware mode of the traffic metering algorithm.
+ */
+ enum rte_meter_color color;
+};
+
+/** DSCP translation table. */
+struct rte_table_action_dscp_table {
+ /** Array of DSCP table entries */
+ struct rte_table_action_dscp_table_entry entry[64];
+};
+
+/** Supported traffic metering algorithms. */
+enum rte_table_action_meter_algorithm {
+ /** Single Rate Three Color Marker (srTCM) - IETF RFC 2697. */
+ RTE_TABLE_ACTION_METER_SRTCM,
+
+ /** Two Rate Three Color Marker (trTCM) - IETF RFC 2698. */
+ RTE_TABLE_ACTION_METER_TRTCM,
+};
+
+/** Traffic metering profile (configuration template). */
+struct rte_table_action_meter_profile {
+ /** Traffic metering algorithm. */
+ enum rte_table_action_meter_algorithm alg;
+
+ RTE_STD_C11
+ union {
+ /** Only valid when *alg* is set to srTCM - IETF RFC 2697. */
+ struct rte_meter_srtcm_params srtcm;
+
+ /** Only valid when *alg* is set to trTCM - IETF RFC 2698. */
+ struct rte_meter_trtcm_params trtcm;
+ };
+};
+
+/** Policer actions. */
+enum rte_table_action_policer {
+ /** Recolor the packet as green. */
+ RTE_TABLE_ACTION_POLICER_COLOR_GREEN = 0,
+
+ /** Recolor the packet as yellow. */
+ RTE_TABLE_ACTION_POLICER_COLOR_YELLOW,
+
+ /** Recolor the packet as red. */
+ RTE_TABLE_ACTION_POLICER_COLOR_RED,
+
+ /** Drop the packet. */
+ RTE_TABLE_ACTION_POLICER_DROP,
+
+ /** Number of policer actions. */
+ RTE_TABLE_ACTION_POLICER_MAX
+};
+
+/** Meter action configuration per traffic class. */
+struct rte_table_action_mtr_tc_params {
+ /** Meter profile ID. */
+ uint32_t meter_profile_id;
+
+ /** Policer actions. */
+ enum rte_table_action_policer policer[e_RTE_METER_COLORS];
+};
+
+/** Meter action statistics counters per traffic class. */
+struct rte_table_action_mtr_counters_tc {
+ /** Number of packets per color at the output of the traffic metering
+ * and before the policer actions are executed. Only valid when
+ * *n_packets_valid* is non-zero.
+ */
+ uint64_t n_packets[e_RTE_METER_COLORS];
+
+ /** Number of packet bytes per color at the output of the traffic
+ * metering and before the policer actions are executed. Only valid when
+ * *n_bytes_valid* is non-zero.
+ */
+ uint64_t n_bytes[e_RTE_METER_COLORS];
+
+ /** When non-zero, the *n_packets* field is valid. */
+ int n_packets_valid;
+
+ /** When non-zero, the *n_bytes* field is valid. */
+ int n_bytes_valid;
+};
+
+/** Meter action configuration (per table action profile). */
+struct rte_table_action_mtr_config {
+ /** Meter algorithm. */
+ enum rte_table_action_meter_algorithm alg;
+
+ /** Number of traffic classes. Each traffic class has its own traffic
+ * meter and policer instances. Needs to be equal to either 1 or to
+ * *RTE_TABLE_ACTION_TC_MAX*.
+ */
+ uint32_t n_tc;
+
+ /** When non-zero, the *n_packets* meter stats counter is enabled,
+ * otherwise it is disabled.
+ *
+ * @see struct rte_table_action_mtr_counters_tc
+ */
+ int n_packets_enabled;
+
+ /** When non-zero, the *n_bytes* meter stats counter is enabled,
+ * otherwise it is disabled.
+ *
+ * @see struct rte_table_action_mtr_counters_tc
+ */
+ int n_bytes_enabled;
+};
+
+/** Meter action parameters (per table rule). */
+struct rte_table_action_mtr_params {
+ /** Traffic meter and policer parameters for each of the *tc_mask*
+ * traffic classes.
+ */
+ struct rte_table_action_mtr_tc_params mtr[RTE_TABLE_ACTION_TC_MAX];
+
+ /** Bit mask defining which traffic class parameters are valid in *mtr*.
+ * If bit N is set in *tc_mask*, then parameters for traffic class N are
+ * valid in *mtr*.
+ */
+ uint32_t tc_mask;
+};
+
+/** Meter action statistics counters (per table rule). */
+struct rte_table_action_mtr_counters {
+ /** Stats counters for each of the *tc_mask* traffic classes. */
+ struct rte_table_action_mtr_counters_tc stats[RTE_TABLE_ACTION_TC_MAX];
+
+ /** Bit mask defining which traffic class parameters are valid in *mtr*.
+ * If bit N is set in *tc_mask*, then parameters for traffic class N are
+ * valid in *mtr*.
+ */
+ uint32_t tc_mask;
+};
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+/** Traffic management action configuration (per table action profile). */
+struct rte_table_action_tm_config {
+ /** Number of subports per port. */
+ uint32_t n_subports_per_port;
+
+ /** Number of pipes per subport. */
+ uint32_t n_pipes_per_subport;
+};
+
+/** Traffic management action parameters (per table rule). */
+struct rte_table_action_tm_params {
+ /** Subport ID. */
+ uint32_t subport_id;
+
+ /** Pipe ID. */
+ uint32_t pipe_id;
+};
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+/** Supported packet encapsulation types. */
+enum rte_table_action_encap_type {
+ /** IP -> { Ether | IP } */
+ RTE_TABLE_ACTION_ENCAP_ETHER = 0,
+
+ /** IP -> { Ether | VLAN | IP } */
+ RTE_TABLE_ACTION_ENCAP_VLAN,
+
+ /** IP -> { Ether | S-VLAN | C-VLAN | IP } */
+ RTE_TABLE_ACTION_ENCAP_QINQ,
+
+ /** IP -> { Ether | MPLS | IP } */
+ RTE_TABLE_ACTION_ENCAP_MPLS,
+
+ /** IP -> { Ether | PPPoE | PPP | IP } */
+ RTE_TABLE_ACTION_ENCAP_PPPOE,
+};
+
+/** Pre-computed Ethernet header fields for encapsulation action. */
+struct rte_table_action_ether_hdr {
+ struct ether_addr da; /**< Destination address. */
+ struct ether_addr sa; /**< Source address. */
+};
+
+/** Pre-computed VLAN header fields for encapsulation action. */
+struct rte_table_action_vlan_hdr {
+ uint8_t pcp; /**< Priority Code Point (PCP). */
+ uint8_t dei; /**< Drop Eligibility Indicator (DEI). */
+ uint16_t vid; /**< VLAN Identifier (VID). */
+};
+
+/** Pre-computed MPLS header fields for encapsulation action. */
+struct rte_table_action_mpls_hdr {
+ uint32_t label; /**< Label. */
+ uint8_t tc; /**< Traffic Class (TC). */
+ uint8_t ttl; /**< Time to Live (TTL). */
+};
+
+/** Pre-computed PPPoE header fields for encapsulation action. */
+struct rte_table_action_pppoe_hdr {
+ uint16_t session_id; /**< Session ID. */
+};
+
+/** Ether encap parameters. */
+struct rte_table_action_encap_ether_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+};
+
+/** VLAN encap parameters. */
+struct rte_table_action_encap_vlan_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_vlan_hdr vlan; /**< VLAN header. */
+};
+
+/** QinQ encap parameters. */
+struct rte_table_action_encap_qinq_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_vlan_hdr svlan; /**< Service VLAN header. */
+ struct rte_table_action_vlan_hdr cvlan; /**< Customer VLAN header. */
+};
+
+/** Max number of MPLS labels per output packet for MPLS encapsulation. */
+#ifndef RTE_TABLE_ACTION_MPLS_LABELS_MAX
+#define RTE_TABLE_ACTION_MPLS_LABELS_MAX 4
+#endif
+
+/** MPLS encap parameters. */
+struct rte_table_action_encap_mpls_params {
+ /** Ethernet header. */
+ struct rte_table_action_ether_hdr ether;
+
+ /** MPLS header. */
+ struct rte_table_action_mpls_hdr mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+
+ /** Number of MPLS labels in MPLS header. */
+ uint32_t mpls_count;
+
+ /** Non-zero for MPLS unicast, zero for MPLS multicast. */
+ int unicast;
+};
+
+/** PPPoE encap parameters. */
+struct rte_table_action_encap_pppoe_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_pppoe_hdr pppoe; /**< PPPoE/PPP headers. */
+};
+
+/** Encap action configuration (per table action profile). */
+struct rte_table_action_encap_config {
+ /** Bit mask defining the set of packet encapsulations enabled for the
+ * current table action profile. If bit (1 << N) is set in *encap_mask*,
+ * then packet encapsulation N is enabled, otherwise it is disabled.
+ *
+ * @see enum rte_table_action_encap_type
+ */
+ uint64_t encap_mask;
+};
+
+/** Encap action parameters (per table rule). */
+struct rte_table_action_encap_params {
+ /** Encapsulation type. */
+ enum rte_table_action_encap_type type;
+
+ RTE_STD_C11
+ union {
+ /** Only valid when *type* is set to Ether. */
+ struct rte_table_action_encap_ether_params ether;
+
+ /** Only valid when *type* is set to VLAN. */
+ struct rte_table_action_encap_vlan_params vlan;
+
+ /** Only valid when *type* is set to QinQ. */
+ struct rte_table_action_encap_qinq_params qinq;
+
+ /** Only valid when *type* is set to MPLS. */
+ struct rte_table_action_encap_mpls_params mpls;
+
+ /** Only valid when *type* is set to PPPoE. */
+ struct rte_table_action_encap_pppoe_params pppoe;
+ };
+};
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+/** NAT action configuration (per table action profile). */
+struct rte_table_action_nat_config {
+ /** When non-zero, the IP source address and L4 protocol source port are
+ * translated. When zero, the IP destination address and L4 protocol
+ * destination port are translated.
+ */
+ int source_nat;
+
+ /** Layer 4 protocol, for example TCP (0x06) or UDP (0x11). The checksum
+ * field is computed differently and placed at different header offset
+ * by each layer 4 protocol.
+ */
+ uint8_t proto;
+};
+
+/** NAT action parameters (per table rule). */
+struct rte_table_action_nat_params {
+ /** IP version for *addr*: non-zero for IPv4, zero for IPv6. */
+ int ip_version;
+
+ /** IP address. */
+ union {
+ /** IPv4 address; only valid when *ip_version* is non-zero. */
+ uint32_t ipv4;
+
+ /** IPv6 address; only valid when *ip_version* is set to 0. */
+ uint8_t ipv6[16];
+ } addr;
+
+ /** Port. */
+ uint16_t port;
+};
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+/** TTL action configuration (per table action profile). */
+struct rte_table_action_ttl_config {
+ /** When non-zero, the input packets whose updated IPv4 Time to Live
+ * (TTL) field or IPv6 Hop Limit (HL) field is zero are dropped.
+ * When zero, the input packets whose updated IPv4 TTL field or IPv6 HL
+ * field is zero are forwarded as usual (typically for debugging
+ * purpose).
+ */
+ int drop;
+
+ /** When non-zero, the *n_packets* stats counter for TTL action is
+ * enabled, otherwise disabled.
+ *
+ * @see struct rte_table_action_ttl_counters
+ */
+ int n_packets_enabled;
+};
+
+/** TTL action parameters (per table rule). */
+struct rte_table_action_ttl_params {
+ /** When non-zero, decrement the IPv4 TTL field and update the checksum
+ * field, or decrement the IPv6 HL field. When zero, the IPv4 TTL field
+ * or the IPv6 HL field is not changed.
+ */
+ int decrement;
+};
+
+/** TTL action statistics packets (per table rule). */
+struct rte_table_action_ttl_counters {
+ /** Number of IPv4 packets whose updated TTL field is zero or IPv6
+ * packets whose updated HL field is zero.
+ */
+ uint64_t n_packets;
+};
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+/** Stats action configuration (per table action profile). */
+struct rte_table_action_stats_config {
+ /** When non-zero, the *n_packets* stats counter is enabled, otherwise
+ * disabled.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ int n_packets_enabled;
+
+ /** When non-zero, the *n_bytes* stats counter is enabled, otherwise
+ * disabled.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ int n_bytes_enabled;
+};
+
+/** Stats action parameters (per table rule). */
+struct rte_table_action_stats_params {
+ /** Initial value for the *n_packets* stats counter. Typically set to 0.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ uint64_t n_packets;
+
+ /** Initial value for the *n_bytes* stats counter. Typically set to 0.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ uint64_t n_bytes;
+};
+
+/** Stats action counters (per table rule). */
+struct rte_table_action_stats_counters {
+ /** Number of packets. Valid only when *n_packets_valid* is non-zero. */
+ uint64_t n_packets;
+
+ /** Number of bytes. Valid only when *n_bytes_valid* is non-zero. */
+ uint64_t n_bytes;
+
+ /** When non-zero, the *n_packets* field is valid, otherwise invalid. */
+ int n_packets_valid;
+
+ /** When non-zero, the *n_bytes* field is valid, otherwise invalid. */
+ int n_bytes_valid;
+};
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+/** Timestamp action parameters (per table rule). */
+struct rte_table_action_time_params {
+ /** Initial timestamp value. Typically set to current time. */
+ uint64_t time;
+};
+
+/**
+ * Table action profile.
+ */
+struct rte_table_action_profile;
+
+/**
+ * Table action profile create.
+ *
+ * @param[in] common
+ * Common action configuration.
+ * @return
+ * Table action profile handle on success, NULL otherwise.
+ */
+struct rte_table_action_profile * __rte_experimental
+rte_table_action_profile_create(struct rte_table_action_common_config *common);
+
+/**
+ * Table action profile free.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_profile_free(struct rte_table_action_profile *profile);
+
+/**
+ * Table action profile action register.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and not in frozen state).
+ * @param[in] type
+ * Specific table action to be registered for *profile*.
+ * @param[in] action_config
+ * Configuration for the *type* action.
+ * If struct rte_table_action_*type*_config is defined by the Table Action
+ * API, it needs to point to a valid instance of this structure, otherwise it
+ * needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
+ enum rte_table_action_type type,
+ void *action_config);
+
+/**
+ * Table action profile freeze.
+ *
+ * Once this function is called successfully, the given profile enters the
+ * frozen state with the following immediate effects: no more actions can be
+ * registered for this profile, so the profile can be instantiated to create
+ * table action objects.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and not in frozen state).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ *
+ * @see rte_table_action_create()
+ */
+int __rte_experimental
+rte_table_action_profile_freeze(struct rte_table_action_profile *profile);
+
+/**
+ * Table action.
+ */
+struct rte_table_action;
+
+/**
+ * Table action create.
+ *
+ * Instantiates the given table action profile to create a table action object.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and in frozen state).
+ * @param[in] socket_id
+ * CPU socket ID where the internal data structures required by the new table
+ * action object should be allocated.
+ * @return
+ * Handle to table action object on success, NULL on error.
+ *
+ * @see rte_table_action_create()
+ */
+struct rte_table_action * __rte_experimental
+rte_table_action_create(struct rte_table_action_profile *profile,
+ uint32_t socket_id);
+
+/**
+ * Table action free.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_free(struct rte_table_action *action);
+
+/**
+ * Table action table params get.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[inout] params
+ * Pipeline table parameters (needs to be pre-allocated).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_table_params_get(struct rte_table_action *action,
+ struct rte_pipeline_table_params *params);
+
+/**
+ * Table action apply.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) to apply action *type* on.
+ * @param[in] type
+ * Specific table action previously registered for the table action profile of
+ * the *action* object.
+ * @param[in] action_params
+ * Parameters for the *type* action.
+ * If struct rte_table_action_*type*_params is defined by the Table Action
+ * API, it needs to point to a valid instance of this structure, otherwise it
+ * needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_apply(struct rte_table_action *action,
+ void *data,
+ enum rte_table_action_type type,
+ void *action_params);
+
+/**
+ * Table action DSCP table update.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] dscp_mask
+ * 64-bit mask defining the DSCP table entries to be updated. If bit N is set
+ * in this bit mask, then DSCP table entry N is to be updated, otherwise not.
+ * @param[in] table
+ * DSCP table.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_dscp_table_update(struct rte_table_action *action,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *table);
+
+/**
+ * Table action meter profile add.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] meter_profile_id
+ * Meter profile ID to be used for the *profile* once it is successfully added
+ * to the *action* object (needs to be unused by the set of meter profiles
+ * currently registered for the *action* object).
+ * @param[in] profile
+ * Meter profile to be added.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_profile_add(struct rte_table_action *action,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+/**
+ * Table action meter profile delete.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] meter_profile_id
+ * Meter profile ID of the meter profile to be deleted from the *action*
+ * object (needs to be valid for the *action* object).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_profile_delete(struct rte_table_action *action,
+ uint32_t meter_profile_id);
+
+/**
+ * Table action meter read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with meter action previously
+ * applied on it.
+ * @param[in] tc_mask
+ * Bit mask defining which traffic classes should have the meter stats
+ * counters read from *data* and stored into *stats*. If bit N is set in this
+ * bit mask, then traffic class N is part of this operation, otherwise it is
+ * not. If bit N is set in this bit mask, then traffic class N must be one of
+ * the traffic classes that are enabled for the meter action in the table
+ * action profile used by the *action* object.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the meter stats counters read
+ * from *data* are saved. Only the meter stats counters for the *tc_mask*
+ * traffic classes are read and stored to *stats*.
+ * @param[in] clear
+ * When non-zero, the meter stats counters are cleared (i.e. set to zero),
+ * otherwise the counters are not modified. When the read operation is enabled
+ * (*stats* is non-NULL), the clear operation is performed after the read
+ * operation is completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_read(struct rte_table_action *action,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+/**
+ * Table action TTL read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with TTL action previously
+ * applied on it.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the TTL stats counters read from
+ * *data* are saved.
+ * @param[in] clear
+ * When non-zero, the TTL stats counters are cleared (i.e. set to zero),
+ * otherwise the counters are not modified. When the read operation is enabled
+ * (*stats* is non-NULL), the clear operation is performed after the read
+ * operation is completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_ttl_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+/**
+ * Table action stats read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with stats action previously
+ * applied on it.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the stats counters read from
+ * *data* are saved.
+ * @param[in] clear
+ * When non-zero, the stats counters are cleared (i.e. set to zero), otherwise
+ * the counters are not modified. When the read operation is enabled (*stats*
+ * is non-NULL), the clear operation is performed after the read operation is
+ * completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_stats_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+/**
+ * Table action timestamp read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with timestamp action
+ * previously applied on it.
+ * @param[inout] timestamp
+ * Pre-allocated memory where the timestamp read from *data* is saved (has to
+ * be non-NULL).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_time_read(struct rte_table_action *action,
+ void *data,
+ uint64_t *timestamp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TABLE_ACTION_H__ */
diff --git a/lib/librte_port/meson.build b/lib/librte_port/meson.build
index debb5eb9..f3d8b443 100644
--- a/lib/librte_port/meson.build
+++ b/lib/librte_port/meson.build
@@ -5,23 +5,21 @@ version = 3
sources = files(
'rte_port_ethdev.c',
'rte_port_fd.c',
+ 'rte_port_frag.c',
+ 'rte_port_ras.c',
'rte_port_ring.c',
'rte_port_sched.c',
'rte_port_source_sink.c')
headers = files(
'rte_port_ethdev.h',
'rte_port_fd.h',
+ 'rte_port_frag.h',
+ 'rte_port_ras.h',
'rte_port.h',
'rte_port_ring.h',
'rte_port_sched.h',
'rte_port_source_sink.h')
-deps += ['ethdev', 'sched']
-
-if dpdk_conf.has('RTE_LIBRTE_IP_FRAG')
- sources += files('rte_port_frag.c', 'rte_port_ras.c')
- headers += files('rte_port_frag.h', 'rte_port_ras.h')
- deps += ['ip_frag']
-endif
+deps += ['ethdev', 'sched', 'ip_frag']
if dpdk_conf.has('RTE_LIBRTE_KNI')
sources += files('rte_port_kni.c')
diff --git a/lib/librte_power/channel_commands.h b/lib/librte_power/channel_commands.h
index 5e8b4ab5..ee638eef 100644
--- a/lib/librte_power/channel_commands.h
+++ b/lib/librte_power/channel_commands.h
@@ -48,7 +48,8 @@ enum workload {HIGH, MEDIUM, LOW};
enum policy_to_use {
TRAFFIC,
TIME,
- WORKLOAD
+ WORKLOAD,
+ BRANCH_RATIO
};
struct traffic {
diff --git a/lib/librte_power/power_acpi_cpufreq.c b/lib/librte_power/power_acpi_cpufreq.c
index bce933e9..cd5978d5 100644
--- a/lib/librte_power/power_acpi_cpufreq.c
+++ b/lib/librte_power/power_acpi_cpufreq.c
@@ -623,3 +623,24 @@ power_acpi_disable_turbo(unsigned int lcore_id)
return 0;
}
+
+int power_acpi_get_capabilities(unsigned int lcore_id,
+ struct rte_power_core_capabilities *caps)
+{
+ struct rte_power_info *pi;
+
+ if (lcore_id >= RTE_MAX_LCORE) {
+ RTE_LOG(ERR, POWER, "Invalid lcore ID\n");
+ return -1;
+ }
+ if (caps == NULL) {
+ RTE_LOG(ERR, POWER, "Invalid argument\n");
+ return -1;
+ }
+
+ pi = &lcore_power_info[lcore_id];
+ caps->capabilities = 0;
+ caps->turbo = !!(pi->turbo_available);
+
+ return 0;
+}
diff --git a/lib/librte_power/power_acpi_cpufreq.h b/lib/librte_power/power_acpi_cpufreq.h
index edeeb27a..1af74160 100644
--- a/lib/librte_power/power_acpi_cpufreq.h
+++ b/lib/librte_power/power_acpi_cpufreq.h
@@ -14,6 +14,7 @@
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_string_fns.h>
+#include "rte_power.h"
#ifdef __cplusplus
extern "C" {
@@ -196,6 +197,21 @@ int power_acpi_enable_turbo(unsigned int lcore_id);
*/
int power_acpi_disable_turbo(unsigned int lcore_id);
+/**
+ * Returns power capabilities for a specific lcore.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param caps
+ * pointer to rte_power_core_capabilities object.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int power_acpi_get_capabilities(unsigned int lcore_id,
+ struct rte_power_core_capabilities *caps);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_power/power_kvm_vm.c b/lib/librte_power/power_kvm_vm.c
index 38e9066f..20659b72 100644
--- a/lib/librte_power/power_kvm_vm.c
+++ b/lib/librte_power/power_kvm_vm.c
@@ -124,3 +124,11 @@ power_kvm_vm_disable_turbo(unsigned int lcore_id)
{
return send_msg(lcore_id, CPU_POWER_DISABLE_TURBO);
}
+
+struct rte_power_core_capabilities;
+int power_kvm_vm_get_capabilities(__rte_unused unsigned int lcore_id,
+ __rte_unused struct rte_power_core_capabilities *caps)
+{
+ RTE_LOG(ERR, POWER, "rte_power_get_capabilities is not implemented for Virtual Machine Power Management\n");
+ return -ENOTSUP;
+}
diff --git a/lib/librte_power/power_kvm_vm.h b/lib/librte_power/power_kvm_vm.h
index 446d6997..94d4aa12 100644
--- a/lib/librte_power/power_kvm_vm.h
+++ b/lib/librte_power/power_kvm_vm.h
@@ -14,6 +14,7 @@
#include <rte_byteorder.h>
#include <rte_log.h>
#include <rte_string_fns.h>
+#include "rte_power.h"
#ifdef __cplusplus
extern "C" {
@@ -177,6 +178,22 @@ int power_kvm_vm_enable_turbo(unsigned int lcore_id);
* - Negative on error.
*/
int power_kvm_vm_disable_turbo(unsigned int lcore_id);
+
+/**
+ * Returns power capabilities for a specific lcore.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param caps
+ * pointer to rte_power_core_capabilities object.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+int power_kvm_vm_get_capabilities(unsigned int lcore_id,
+ struct rte_power_core_capabilities *caps);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_power/rte_power.c b/lib/librte_power/rte_power.c
index 6c8fb403..208b7919 100644
--- a/lib/librte_power/rte_power.c
+++ b/lib/librte_power/rte_power.c
@@ -24,6 +24,7 @@ rte_power_freq_change_t rte_power_freq_min = NULL;
rte_power_freq_change_t rte_power_turbo_status;
rte_power_freq_change_t rte_power_freq_enable_turbo;
rte_power_freq_change_t rte_power_freq_disable_turbo;
+rte_power_get_capabilities_t rte_power_get_capabilities;
int
rte_power_set_env(enum power_management_env env)
@@ -42,6 +43,7 @@ rte_power_set_env(enum power_management_env env)
rte_power_turbo_status = power_acpi_turbo_status;
rte_power_freq_enable_turbo = power_acpi_enable_turbo;
rte_power_freq_disable_turbo = power_acpi_disable_turbo;
+ rte_power_get_capabilities = power_acpi_get_capabilities;
} else if (env == PM_ENV_KVM_VM) {
rte_power_freqs = power_kvm_vm_freqs;
rte_power_get_freq = power_kvm_vm_get_freq;
@@ -53,6 +55,7 @@ rte_power_set_env(enum power_management_env env)
rte_power_turbo_status = power_kvm_vm_turbo_status;
rte_power_freq_enable_turbo = power_kvm_vm_enable_turbo;
rte_power_freq_disable_turbo = power_kvm_vm_disable_turbo;
+ rte_power_get_capabilities = power_kvm_vm_get_capabilities;
} else {
RTE_LOG(ERR, POWER, "Invalid Power Management Environment(%d) set\n",
env);
diff --git a/lib/librte_power/rte_power.h b/lib/librte_power/rte_power.h
index b4b7357b..d70bc0b3 100644
--- a/lib/librte_power/rte_power.h
+++ b/lib/librte_power/rte_power.h
@@ -247,6 +247,38 @@ extern rte_power_freq_change_t rte_power_freq_enable_turbo;
*/
extern rte_power_freq_change_t rte_power_freq_disable_turbo;
+/**
+ * Power capabilities summary.
+ */
+struct rte_power_core_capabilities {
+ RTE_STD_C11
+ union {
+ uint64_t capabilities;
+ RTE_STD_C11
+ struct {
+ uint64_t turbo:1; /**< Turbo can be enabled. */
+ };
+ };
+};
+
+/**
+ * Returns power capabilities for a specific lcore.
+ * Function pointer definition. Review each environments
+ * specific documentation for usage.
+ *
+ * @param lcore_id
+ * lcore id.
+ * @param caps
+ * pointer to rte_power_core_capabilities object.
+ *
+ * @return
+ * - 0 on success.
+ * - Negative on error.
+ */
+typedef int (*rte_power_get_capabilities_t)(unsigned int lcore_id,
+ struct rte_power_core_capabilities *caps);
+
+extern rte_power_get_capabilities_t rte_power_get_capabilities;
#ifdef __cplusplus
}
diff --git a/lib/librte_power/rte_power_version.map b/lib/librte_power/rte_power_version.map
index 96dc42ec..dd587dfb 100644
--- a/lib/librte_power/rte_power_version.map
+++ b/lib/librte_power/rte_power_version.map
@@ -25,4 +25,11 @@ DPDK_17.11 {
rte_power_freq_enable_turbo;
rte_power_turbo_status;
-} DPDK_2.0; \ No newline at end of file
+} DPDK_2.0;
+
+DPDK_18.08 {
+ global:
+
+ rte_power_get_capabilities;
+
+} DPDK_17.11;
diff --git a/lib/librte_rawdev/Makefile b/lib/librte_rawdev/Makefile
index b9105b06..addb288d 100644
--- a/lib/librte_rawdev/Makefile
+++ b/lib/librte_rawdev/Makefile
@@ -10,7 +10,6 @@ LIB = librte_rawdev.a
LIBABIVER := 1
# build flags
-CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_eal
diff --git a/lib/librte_rawdev/meson.build b/lib/librte_rawdev/meson.build
new file mode 100644
index 00000000..a20fbdc0
--- /dev/null
+++ b/lib/librte_rawdev/meson.build
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('rte_rawdev.c')
+headers = files('rte_rawdev.h', 'rte_rawdev_pmd.h')
diff --git a/lib/librte_rawdev/rte_rawdev.c b/lib/librte_rawdev/rte_rawdev.c
index d314ef96..62b6b97e 100644
--- a/lib/librte_rawdev/rte_rawdev.c
+++ b/lib/librte_rawdev/rte_rawdev.c
@@ -46,13 +46,13 @@ static struct rte_rawdev_global rawdev_globals = {
struct rte_rawdev_global *rte_rawdev_globals = &rawdev_globals;
/* Raw device, northbound API implementation */
-uint8_t __rte_experimental
+uint8_t
rte_rawdev_count(void)
{
return rte_rawdev_globals->nb_devs;
}
-uint16_t __rte_experimental
+uint16_t
rte_rawdev_get_dev_id(const char *name)
{
uint16_t i;
@@ -69,7 +69,7 @@ rte_rawdev_get_dev_id(const char *name)
return -ENODEV;
}
-int __rte_experimental
+int
rte_rawdev_socket_id(uint16_t dev_id)
{
struct rte_rawdev *dev;
@@ -80,7 +80,7 @@ rte_rawdev_socket_id(uint16_t dev_id)
return dev->socket_id;
}
-int __rte_experimental
+int
rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
{
struct rte_rawdev *rawdev;
@@ -88,9 +88,6 @@ rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(dev_info, -EINVAL);
- if (dev_info == NULL)
- return -EINVAL;
-
rawdev = &rte_rawdevs[dev_id];
RTE_FUNC_PTR_OR_ERR_RET(*rawdev->dev_ops->dev_info_get, -ENOTSUP);
@@ -105,7 +102,7 @@ rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info)
return 0;
}
-int __rte_experimental
+int
rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf)
{
struct rte_rawdev *dev;
@@ -134,7 +131,7 @@ rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf)
return diag;
}
-int __rte_experimental
+int
rte_rawdev_queue_conf_get(uint16_t dev_id,
uint16_t queue_id,
rte_rawdev_obj_t queue_conf)
@@ -149,7 +146,7 @@ rte_rawdev_queue_conf_get(uint16_t dev_id,
return 0;
}
-int __rte_experimental
+int
rte_rawdev_queue_setup(uint16_t dev_id,
uint16_t queue_id,
rte_rawdev_obj_t queue_conf)
@@ -163,7 +160,7 @@ rte_rawdev_queue_setup(uint16_t dev_id,
return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
}
-int __rte_experimental
+int
rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
{
struct rte_rawdev *dev;
@@ -175,7 +172,19 @@ rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id)
return (*dev->dev_ops->queue_release)(dev, queue_id);
}
-int __rte_experimental
+uint16_t
+rte_rawdev_queue_count(uint16_t dev_id)
+{
+ struct rte_rawdev *dev;
+
+ RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_rawdevs[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_count, -ENOTSUP);
+ return (*dev->dev_ops->queue_count)(dev);
+}
+
+int
rte_rawdev_get_attr(uint16_t dev_id,
const char *attr_name,
uint64_t *attr_value)
@@ -189,7 +198,7 @@ rte_rawdev_get_attr(uint16_t dev_id,
return (*dev->dev_ops->attr_get)(dev, attr_name, attr_value);
}
-int __rte_experimental
+int
rte_rawdev_set_attr(uint16_t dev_id,
const char *attr_name,
const uint64_t attr_value)
@@ -203,7 +212,7 @@ rte_rawdev_set_attr(uint16_t dev_id,
return (*dev->dev_ops->attr_set)(dev, attr_name, attr_value);
}
-int __rte_experimental
+int
rte_rawdev_enqueue_buffers(uint16_t dev_id,
struct rte_rawdev_buf **buffers,
unsigned int count,
@@ -218,7 +227,7 @@ rte_rawdev_enqueue_buffers(uint16_t dev_id,
return (*dev->dev_ops->enqueue_bufs)(dev, buffers, count, context);
}
-int __rte_experimental
+int
rte_rawdev_dequeue_buffers(uint16_t dev_id,
struct rte_rawdev_buf **buffers,
unsigned int count,
@@ -233,7 +242,7 @@ rte_rawdev_dequeue_buffers(uint16_t dev_id,
return (*dev->dev_ops->dequeue_bufs)(dev, buffers, count, context);
}
-int __rte_experimental
+int
rte_rawdev_dump(uint16_t dev_id, FILE *f)
{
struct rte_rawdev *dev;
@@ -254,7 +263,7 @@ xstats_get_count(uint16_t dev_id)
return (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
}
-int __rte_experimental
+int
rte_rawdev_xstats_names_get(uint16_t dev_id,
struct rte_rawdev_xstats_name *xstats_names,
unsigned int size)
@@ -277,7 +286,7 @@ rte_rawdev_xstats_names_get(uint16_t dev_id,
}
/* retrieve rawdev extended statistics */
-int __rte_experimental
+int
rte_rawdev_xstats_get(uint16_t dev_id,
const unsigned int ids[],
uint64_t values[],
@@ -290,7 +299,7 @@ rte_rawdev_xstats_get(uint16_t dev_id,
return (*dev->dev_ops->xstats_get)(dev, ids, values, n);
}
-uint64_t __rte_experimental
+uint64_t
rte_rawdev_xstats_by_name_get(uint16_t dev_id,
const char *name,
unsigned int *id)
@@ -309,7 +318,7 @@ rte_rawdev_xstats_by_name_get(uint16_t dev_id,
return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
}
-int __rte_experimental
+int
rte_rawdev_xstats_reset(uint16_t dev_id,
const uint32_t ids[], uint32_t nb_ids)
{
@@ -320,7 +329,7 @@ rte_rawdev_xstats_reset(uint16_t dev_id,
return (*dev->dev_ops->xstats_reset)(dev, ids, nb_ids);
}
-int __rte_experimental
+int
rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
{
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -330,7 +339,7 @@ rte_rawdev_firmware_status_get(uint16_t dev_id, rte_rawdev_obj_t status_info)
return (*dev->dev_ops->firmware_status_get)(dev, status_info);
}
-int __rte_experimental
+int
rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
{
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -340,7 +349,7 @@ rte_rawdev_firmware_version_get(uint16_t dev_id, rte_rawdev_obj_t version_info)
return (*dev->dev_ops->firmware_version_get)(dev, version_info);
}
-int __rte_experimental
+int
rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
{
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -353,7 +362,7 @@ rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image)
return (*dev->dev_ops->firmware_load)(dev, firmware_image);
}
-int __rte_experimental
+int
rte_rawdev_firmware_unload(uint16_t dev_id)
{
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -363,7 +372,7 @@ rte_rawdev_firmware_unload(uint16_t dev_id)
return (*dev->dev_ops->firmware_unload)(dev);
}
-int __rte_experimental
+int
rte_rawdev_selftest(uint16_t dev_id)
{
RTE_RAWDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
@@ -373,7 +382,7 @@ rte_rawdev_selftest(uint16_t dev_id)
return (*dev->dev_ops->dev_selftest)();
}
-int __rte_experimental
+int
rte_rawdev_start(uint16_t dev_id)
{
struct rte_rawdev *dev;
@@ -400,7 +409,7 @@ rte_rawdev_start(uint16_t dev_id)
return 0;
}
-void __rte_experimental
+void
rte_rawdev_stop(uint16_t dev_id)
{
struct rte_rawdev *dev;
@@ -422,7 +431,7 @@ rte_rawdev_stop(uint16_t dev_id)
dev->started = 0;
}
-int __rte_experimental
+int
rte_rawdev_close(uint16_t dev_id)
{
struct rte_rawdev *dev;
@@ -441,7 +450,7 @@ rte_rawdev_close(uint16_t dev_id)
return (*dev->dev_ops->dev_close)(dev);
}
-int __rte_experimental
+int
rte_rawdev_reset(uint16_t dev_id)
{
struct rte_rawdev *dev;
@@ -468,7 +477,7 @@ rte_rawdev_find_free_device_index(void)
return RTE_RAWDEV_MAX_DEVS;
}
-struct rte_rawdev * __rte_experimental
+struct rte_rawdev *
rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
{
struct rte_rawdev *rawdev;
@@ -509,7 +518,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_priv_size, int socket_id)
return rawdev;
}
-int __rte_experimental
+int
rte_rawdev_pmd_release(struct rte_rawdev *rawdev)
{
int ret;
@@ -535,10 +544,7 @@ rte_rawdev_pmd_release(struct rte_rawdev *rawdev)
return 0;
}
-RTE_INIT(librawdev_init_log);
-
-static void
-librawdev_init_log(void)
+RTE_INIT(librawdev_init_log)
{
librawdev_logtype = rte_log_register("lib.rawdev");
if (librawdev_logtype >= 0)
diff --git a/lib/librte_rawdev/rte_rawdev.h b/lib/librte_rawdev/rte_rawdev.h
index 2e14919b..684bfdb8 100644
--- a/lib/librte_rawdev/rte_rawdev.h
+++ b/lib/librte_rawdev/rte_rawdev.h
@@ -35,7 +35,7 @@ typedef void *rte_rawdev_obj_t;
* @return
* The total number of usable raw devices.
*/
-uint8_t __rte_experimental
+uint8_t
rte_rawdev_count(void);
/**
@@ -48,7 +48,7 @@ rte_rawdev_count(void);
* Returns raw device identifier on success.
* - <0: Failure to find named raw device.
*/
-uint16_t __rte_experimental
+uint16_t
rte_rawdev_get_dev_id(const char *name);
/**
@@ -61,7 +61,7 @@ rte_rawdev_get_dev_id(const char *name);
* a default of zero if the socket could not be determined.
* -(-EINVAL) dev_id value is out of range.
*/
-int __rte_experimental
+int
rte_rawdev_socket_id(uint16_t dev_id);
/**
@@ -84,7 +84,7 @@ struct rte_rawdev_info;
* - <0: Error code returned by the driver info get function.
*
*/
-int __rte_experimental
+int
rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info);
/**
@@ -111,7 +111,7 @@ rte_rawdev_info_get(uint16_t dev_id, struct rte_rawdev_info *dev_info);
* - 0: Success, device configured.
* - <0: Error code returned by the driver configuration function.
*/
-int __rte_experimental
+int
rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf);
@@ -137,7 +137,7 @@ rte_rawdev_configure(uint16_t dev_id, struct rte_rawdev_info *dev_conf);
* @see rte_raw_queue_setup()
*
*/
-int __rte_experimental
+int
rte_rawdev_queue_conf_get(uint16_t dev_id,
uint16_t queue_id,
rte_rawdev_obj_t queue_conf);
@@ -160,7 +160,7 @@ rte_rawdev_queue_conf_get(uint16_t dev_id,
* - 0: Success, raw queue correctly set up.
* - <0: raw queue configuration failed
*/
-int __rte_experimental
+int
rte_rawdev_queue_setup(uint16_t dev_id,
uint16_t queue_id,
rte_rawdev_obj_t queue_conf);
@@ -180,8 +180,9 @@ rte_rawdev_queue_setup(uint16_t dev_id,
* - 0: Success, raw queue released.
* - <0: raw queue configuration failed
*/
-int __rte_experimental
+int
rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id);
+
/**
* Get the number of raw queues on a specific raw device
*
@@ -190,7 +191,7 @@ rte_rawdev_queue_release(uint16_t dev_id, uint16_t queue_id);
* @return
* - The number of configured raw queues
*/
-uint16_t __rte_experimental
+uint16_t
rte_rawdev_queue_count(uint16_t dev_id);
/**
@@ -208,7 +209,7 @@ rte_rawdev_queue_count(uint16_t dev_id);
* - 0: Success, device started.
* < 0: Failure
*/
-int __rte_experimental
+int
rte_rawdev_start(uint16_t dev_id);
/**
@@ -218,7 +219,7 @@ rte_rawdev_start(uint16_t dev_id);
* @param dev_id
* Raw device identifier.
*/
-void __rte_experimental
+void
rte_rawdev_stop(uint16_t dev_id);
/**
@@ -232,7 +233,7 @@ rte_rawdev_stop(uint16_t dev_id);
* - <0 on failure to close device
* - (-EAGAIN) if device is busy
*/
-int __rte_experimental
+int
rte_rawdev_close(uint16_t dev_id);
/**
@@ -246,7 +247,7 @@ rte_rawdev_close(uint16_t dev_id);
* 0 for sucessful reset,
* !0 for failure in resetting
*/
-int __rte_experimental
+int
rte_rawdev_reset(uint16_t dev_id);
#define RTE_RAWDEV_NAME_MAX_LEN (64)
@@ -316,7 +317,7 @@ struct rte_rawdev_buf {
* - 0: on success
* - <0: on failure.
*/
-int __rte_experimental
+int
rte_rawdev_dump(uint16_t dev_id, FILE *f);
/**
@@ -338,7 +339,7 @@ rte_rawdev_dump(uint16_t dev_id, FILE *f);
* 0 for success
* !0 Error; attr_value remains untouched in case of error.
*/
-int __rte_experimental
+int
rte_rawdev_get_attr(uint16_t dev_id,
const char *attr_name,
uint64_t *attr_value);
@@ -357,7 +358,7 @@ rte_rawdev_get_attr(uint16_t dev_id,
* 0 for success
* !0 Error
*/
-int __rte_experimental
+int
rte_rawdev_set_attr(uint16_t dev_id,
const char *attr_name,
const uint64_t attr_value);
@@ -383,7 +384,7 @@ rte_rawdev_set_attr(uint16_t dev_id,
* Whether partial enqueue is failure or success is defined between app
* and driver implementation.
*/
-int __rte_experimental
+int
rte_rawdev_enqueue_buffers(uint16_t dev_id,
struct rte_rawdev_buf **buffers,
unsigned int count,
@@ -414,7 +415,7 @@ rte_rawdev_enqueue_buffers(uint16_t dev_id,
* Whether partial enqueue is failure or success is defined between app
* and driver implementation.
*/
-int __rte_experimental
+int
rte_rawdev_dequeue_buffers(uint16_t dev_id,
struct rte_rawdev_buf **buffers,
unsigned int count,
@@ -454,7 +455,7 @@ struct rte_rawdev_xstats_name {
* -ENODEV for invalid *dev_id*
* -ENOTSUP if the device doesn't support this function.
*/
-int __rte_experimental
+int
rte_rawdev_xstats_names_get(uint16_t dev_id,
struct rte_rawdev_xstats_name *xstats_names,
unsigned int size);
@@ -478,7 +479,7 @@ rte_rawdev_xstats_names_get(uint16_t dev_id,
* -ENODEV for invalid *dev_id*
* -ENOTSUP if the device doesn't support this function.
*/
-int __rte_experimental
+int
rte_rawdev_xstats_get(uint16_t dev_id,
const unsigned int ids[],
uint64_t values[],
@@ -500,7 +501,7 @@ rte_rawdev_xstats_get(uint16_t dev_id,
* - positive value or zero: the stat value
* - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
*/
-uint64_t __rte_experimental
+uint64_t
rte_rawdev_xstats_by_name_get(uint16_t dev_id,
const char *name,
unsigned int *id);
@@ -520,7 +521,7 @@ rte_rawdev_xstats_by_name_get(uint16_t dev_id,
* - zero: successfully reset the statistics to zero
* - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
*/
-int __rte_experimental
+int
rte_rawdev_xstats_reset(uint16_t dev_id,
const uint32_t ids[],
uint32_t nb_ids);
@@ -539,7 +540,7 @@ rte_rawdev_xstats_reset(uint16_t dev_id,
* 0 for success,
* !0 for failure, `status_info` argument state is undefined
*/
-int __rte_experimental
+int
rte_rawdev_firmware_status_get(uint16_t dev_id,
rte_rawdev_obj_t status_info);
@@ -557,7 +558,7 @@ rte_rawdev_firmware_status_get(uint16_t dev_id,
* 0 for success,
* !0 for failure, `version_info` argument state is undefined
*/
-int __rte_experimental
+int
rte_rawdev_firmware_version_get(uint16_t dev_id,
rte_rawdev_obj_t version_info);
@@ -574,7 +575,7 @@ rte_rawdev_firmware_version_get(uint16_t dev_id,
* 0 for successful load
* !0 for failure to load the provided image, or image incorrect.
*/
-int __rte_experimental
+int
rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image);
/**
@@ -586,7 +587,7 @@ rte_rawdev_firmware_load(uint16_t dev_id, rte_rawdev_obj_t firmware_image);
* 0 for successful Unload
* !0 for failure in unloading
*/
-int __rte_experimental
+int
rte_rawdev_firmware_unload(uint16_t dev_id);
/**
@@ -599,7 +600,7 @@ rte_rawdev_firmware_unload(uint16_t dev_id);
* - -ENOTSUP if the device doesn't support selftest
* - other values < 0 on failure.
*/
-int __rte_experimental
+int
rte_rawdev_selftest(uint16_t dev_id);
#ifdef __cplusplus
diff --git a/lib/librte_rawdev/rte_rawdev_pmd.h b/lib/librte_rawdev/rte_rawdev_pmd.h
index 408adf0f..bb9bbc35 100644
--- a/lib/librte_rawdev/rte_rawdev_pmd.h
+++ b/lib/librte_rawdev/rte_rawdev_pmd.h
@@ -251,6 +251,24 @@ typedef int (*rawdev_queue_release_t)(struct rte_rawdev *dev,
uint16_t queue_id);
/**
+ * Get the count of number of queues configured on this device.
+ *
+ * Another way to fetch this information is to fetch the device configuration.
+ * But, that assumes that the device configuration managed by the driver has
+ * that kind of information.
+ *
+ * This function helps in getting queue count supported, independently. It
+ * can help in cases where iterator needs to be implemented.
+ *
+ * @param
+ * Raw device pointer
+ * @return
+ * Number of queues; 0 is assumed to be a valid response.
+ *
+ */
+typedef uint16_t (*rawdev_queue_count_t)(struct rte_rawdev *dev);
+
+/**
* Enqueue an array of raw buffers to the device.
*
* Buffer being used is opaque - it can be obtained from mempool or from
@@ -506,6 +524,8 @@ struct rte_rawdev_ops {
rawdev_queue_setup_t queue_setup;
/**< Release an raw queue. */
rawdev_queue_release_t queue_release;
+ /**< Get the number of queues attached to the device */
+ rawdev_queue_count_t queue_count;
/**< Enqueue an array of raw buffers to device. */
rawdev_enqueue_bufs_t enqueue_bufs;
@@ -556,7 +576,7 @@ struct rte_rawdev_ops {
* @return
* - Slot in the rte_dev_devices array for a new device;
*/
-struct rte_rawdev * __rte_experimental
+struct rte_rawdev *
rte_rawdev_pmd_allocate(const char *name, size_t dev_private_size,
int socket_id);
@@ -568,7 +588,7 @@ rte_rawdev_pmd_allocate(const char *name, size_t dev_private_size,
* @return
* - 0 on success, negative on error
*/
-int __rte_experimental
+int
rte_rawdev_pmd_release(struct rte_rawdev *rawdev);
/**
@@ -585,7 +605,7 @@ rte_rawdev_pmd_release(struct rte_rawdev *rawdev);
* - Raw device pointer if device is successfully created.
* - NULL if device cannot be created.
*/
-struct rte_rawdev * __rte_experimental
+struct rte_rawdev *
rte_rawdev_pmd_init(const char *name, size_t dev_private_size,
int socket_id);
@@ -597,7 +617,7 @@ rte_rawdev_pmd_init(const char *name, size_t dev_private_size,
* @return
* - 0 on success, negative on error
*/
-int __rte_experimental
+int
rte_rawdev_pmd_uninit(const char *name);
#ifdef __cplusplus
diff --git a/lib/librte_rawdev/rte_rawdev_version.map b/lib/librte_rawdev/rte_rawdev_version.map
index af4465e2..b61dbff1 100644
--- a/lib/librte_rawdev/rte_rawdev_version.map
+++ b/lib/librte_rawdev/rte_rawdev_version.map
@@ -1,4 +1,4 @@
-EXPERIMENTAL {
+DPDK_18.08 {
global:
rte_rawdev_close;
@@ -16,6 +16,7 @@ EXPERIMENTAL {
rte_rawdev_pmd_allocate;
rte_rawdev_pmd_release;
rte_rawdev_queue_conf_get;
+ rte_rawdev_queue_count;
rte_rawdev_queue_setup;
rte_rawdev_queue_release;
rte_rawdev_reset;
diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile
index bde8907d..21a36770 100644
--- a/lib/librte_ring/Makefile
+++ b/lib/librte_ring/Makefile
@@ -11,7 +11,7 @@ LDLIBS += -lrte_eal
EXPORT_MAP := rte_ring_version.map
-LIBABIVER := 1
+LIBABIVER := 2
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index 253cdc96..7a731d07 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -26,8 +26,9 @@
* - Bulk dequeue.
* - Bulk enqueue.
*
- * Note: the ring implementation is not preemptable. A lcore must not
- * be interrupted by another task that uses the same ring.
+ * Note: the ring implementation is not preemptible. Refer to Programmer's
+ * guide/Environment Abstraction Layer/Multiple pthread/Known Issues/rte_ring
+ * for more information.
*
*/
@@ -62,14 +63,6 @@ enum rte_ring_queue_behavior {
struct rte_memzone; /* forward declaration, so as not to require memzone.h */
-#if RTE_CACHE_LINE_SIZE < 128
-#define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2)
-#define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2)
-#else
-#define PROD_ALIGN RTE_CACHE_LINE_SIZE
-#define CONS_ALIGN RTE_CACHE_LINE_SIZE
-#endif
-
/* structure to hold a pair of head/tail values and other metadata */
struct rte_ring_headtail {
volatile uint32_t head; /**< Prod/consumer head. */
@@ -101,11 +94,15 @@ struct rte_ring {
uint32_t mask; /**< Mask (size-1) of ring. */
uint32_t capacity; /**< Usable size of ring */
+ char pad0 __rte_cache_aligned; /**< empty cache line */
+
/** Ring producer status. */
- struct rte_ring_headtail prod __rte_aligned(PROD_ALIGN);
+ struct rte_ring_headtail prod __rte_cache_aligned;
+ char pad1 __rte_cache_aligned; /**< empty cache line */
/** Ring consumer status. */
- struct rte_ring_headtail cons __rte_aligned(CONS_ALIGN);
+ struct rte_ring_headtail cons __rte_cache_aligned;
+ char pad2 __rte_cache_aligned; /**< empty cache line */
};
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
@@ -339,7 +336,7 @@ void rte_ring_dump(FILE *f, const struct rte_ring *r);
static __rte_always_inline unsigned int
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- int is_sp, unsigned int *free_space)
+ unsigned int is_sp, unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t free_entries;
@@ -381,12 +378,12 @@ end:
static __rte_always_inline unsigned int
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- int is_sc, unsigned int *available)
+ unsigned int is_sc, unsigned int *available)
{
uint32_t cons_head, cons_next;
uint32_t entries;
- n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
+ n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
&cons_head, &cons_next, &entries);
if (n == 0)
goto end;
diff --git a/lib/librte_ring/rte_ring_c11_mem.h b/lib/librte_ring/rte_ring_c11_mem.h
index 08825ea5..94df3c4a 100644
--- a/lib/librte_ring/rte_ring_c11_mem.h
+++ b/lib/librte_ring/rte_ring_c11_mem.h
@@ -51,7 +51,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
static __rte_always_inline unsigned int
-__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *free_entries)
@@ -66,14 +66,14 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
*old_head = __atomic_load_n(&r->prod.head,
__ATOMIC_ACQUIRE);
- const uint32_t cons_tail = r->cons.tail;
+
/*
* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
* and capacity (which is < size).
*/
- *free_entries = (capacity + cons_tail - *old_head);
+ *free_entries = (capacity + r->cons.tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -133,13 +133,13 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
n = max;
*old_head = __atomic_load_n(&r->cons.head,
__ATOMIC_ACQUIRE);
- const uint32_t prod_tail = r->prod.tail;
+
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1.
*/
- *entries = (prod_tail - *old_head);
+ *entries = (r->prod.tail - *old_head);
/* Set the actual entries for dequeue */
if (n > *entries)
diff --git a/lib/librte_ring/rte_ring_generic.h b/lib/librte_ring/rte_ring_generic.h
index 5b110425..ea7dbe5b 100644
--- a/lib/librte_ring/rte_ring_generic.h
+++ b/lib/librte_ring/rte_ring_generic.h
@@ -53,7 +53,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
static __rte_always_inline unsigned int
-__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *free_entries)
@@ -73,14 +73,13 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
*/
rte_smp_rmb();
- const uint32_t cons_tail = r->cons.tail;
/*
* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
* and capacity (which is < size).
*/
- *free_entries = (capacity + cons_tail - *old_head);
+ *free_entries = (capacity + r->cons.tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -124,7 +123,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
static __rte_always_inline unsigned int
-__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *entries)
@@ -144,13 +143,12 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
*/
rte_smp_rmb();
- const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1.
*/
- *entries = (prod_tail - *old_head);
+ *entries = (r->prod.tail - *old_head);
/* Set the actual entries for dequeue */
if (n > *entries)
diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 634486c8..9269e5c7 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -276,9 +276,54 @@ rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
}
static int
+pipe_profile_check(struct rte_sched_pipe_params *params,
+ uint32_t rate)
+{
+ uint32_t i;
+
+ /* Pipe parameters */
+ if (params == NULL)
+ return -10;
+
+ /* TB rate: non-zero, not greater than port rate */
+ if (params->tb_rate == 0 ||
+ params->tb_rate > rate)
+ return -11;
+
+ /* TB size: non-zero */
+ if (params->tb_size == 0)
+ return -12;
+
+ /* TC rate: non-zero, less than pipe rate */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ if (params->tc_rate[i] == 0 ||
+ params->tc_rate[i] > params->tb_rate)
+ return -13;
+ }
+
+ /* TC period: non-zero */
+ if (params->tc_period == 0)
+ return -14;
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ /* TC3 oversubscription weight: non-zero */
+ if (params->tc_ov_weight == 0)
+ return -15;
+#endif
+
+ /* Queue WRR weights: non-zero */
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) {
+ if (params->wrr_weights[i] == 0)
+ return -16;
+ }
+
+ return 0;
+}
+
+static int
rte_sched_port_check_params(struct rte_sched_port_params *params)
{
- uint32_t i, j;
+ uint32_t i;
if (params == NULL)
return -1;
@@ -324,36 +369,11 @@ rte_sched_port_check_params(struct rte_sched_port_params *params)
for (i = 0; i < params->n_pipe_profiles; i++) {
struct rte_sched_pipe_params *p = params->pipe_profiles + i;
+ int status;
- /* TB rate: non-zero, not greater than port rate */
- if (p->tb_rate == 0 || p->tb_rate > params->rate)
- return -10;
-
- /* TB size: non-zero */
- if (p->tb_size == 0)
- return -11;
-
- /* TC rate: non-zero, less than pipe rate */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- if (p->tc_rate[j] == 0 || p->tc_rate[j] > p->tb_rate)
- return -12;
- }
-
- /* TC period: non-zero */
- if (p->tc_period == 0)
- return -13;
-
-#ifdef RTE_SCHED_SUBPORT_TC_OV
- /* TC3 oversubscription weight: non-zero */
- if (p->tc_ov_weight == 0)
- return -14;
-#endif
-
- /* Queue WRR weights: non-zero */
- for (j = 0; j < RTE_SCHED_QUEUES_PER_PIPE; j++) {
- if (p->wrr_weights[j] == 0)
- return -15;
- }
+ status = pipe_profile_check(p, params->rate);
+ if (status != 0)
+ return status;
}
return 0;
@@ -514,69 +534,80 @@ rte_sched_time_ms_to_bytes(uint32_t time_ms, uint32_t rate)
}
static void
-rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port, struct rte_sched_port_params *params)
+rte_sched_pipe_profile_convert(struct rte_sched_pipe_params *src,
+ struct rte_sched_pipe_profile *dst,
+ uint32_t rate)
{
- uint32_t i, j;
+ uint32_t i;
- for (i = 0; i < port->n_pipe_profiles; i++) {
- struct rte_sched_pipe_params *src = params->pipe_profiles + i;
- struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+ /* Token Bucket */
+ if (src->tb_rate == rate) {
+ dst->tb_credits_per_period = 1;
+ dst->tb_period = 1;
+ } else {
+ double tb_rate = (double) src->tb_rate
+ / (double) rate;
+ double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
- /* Token Bucket */
- if (src->tb_rate == params->rate) {
- dst->tb_credits_per_period = 1;
- dst->tb_period = 1;
- } else {
- double tb_rate = (double) src->tb_rate
- / (double) params->rate;
- double d = RTE_SCHED_TB_RATE_CONFIG_ERR;
-
- rte_approx(tb_rate, d,
- &dst->tb_credits_per_period, &dst->tb_period);
- }
- dst->tb_size = src->tb_size;
+ rte_approx(tb_rate, d,
+ &dst->tb_credits_per_period, &dst->tb_period);
+ }
+
+ dst->tb_size = src->tb_size;
- /* Traffic Classes */
- dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
- params->rate);
+ /* Traffic Classes */
+ dst->tc_period = rte_sched_time_ms_to_bytes(src->tc_period,
+ rate);
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++)
- dst->tc_credits_per_period[j]
- = rte_sched_time_ms_to_bytes(src->tc_period,
- src->tc_rate[j]);
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+ dst->tc_credits_per_period[i]
+ = rte_sched_time_ms_to_bytes(src->tc_period,
+ src->tc_rate[i]);
#ifdef RTE_SCHED_SUBPORT_TC_OV
- dst->tc_ov_weight = src->tc_ov_weight;
+ dst->tc_ov_weight = src->tc_ov_weight;
#endif
- /* WRR */
- for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
- uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
- uint32_t lcd, lcd1, lcd2;
- uint32_t qindex;
-
- qindex = j * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
-
- wrr_cost[0] = src->wrr_weights[qindex];
- wrr_cost[1] = src->wrr_weights[qindex + 1];
- wrr_cost[2] = src->wrr_weights[qindex + 2];
- wrr_cost[3] = src->wrr_weights[qindex + 3];
-
- lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
- lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
- lcd = rte_get_lcd(lcd1, lcd2);
-
- wrr_cost[0] = lcd / wrr_cost[0];
- wrr_cost[1] = lcd / wrr_cost[1];
- wrr_cost[2] = lcd / wrr_cost[2];
- wrr_cost[3] = lcd / wrr_cost[3];
-
- dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
- dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
- dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
- dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
- }
+ /* WRR */
+ for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+ uint32_t wrr_cost[RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS];
+ uint32_t lcd, lcd1, lcd2;
+ uint32_t qindex;
+
+ qindex = i * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
+
+ wrr_cost[0] = src->wrr_weights[qindex];
+ wrr_cost[1] = src->wrr_weights[qindex + 1];
+ wrr_cost[2] = src->wrr_weights[qindex + 2];
+ wrr_cost[3] = src->wrr_weights[qindex + 3];
+
+ lcd1 = rte_get_lcd(wrr_cost[0], wrr_cost[1]);
+ lcd2 = rte_get_lcd(wrr_cost[2], wrr_cost[3]);
+ lcd = rte_get_lcd(lcd1, lcd2);
+
+ wrr_cost[0] = lcd / wrr_cost[0];
+ wrr_cost[1] = lcd / wrr_cost[1];
+ wrr_cost[2] = lcd / wrr_cost[2];
+ wrr_cost[3] = lcd / wrr_cost[3];
+
+ dst->wrr_cost[qindex] = (uint8_t) wrr_cost[0];
+ dst->wrr_cost[qindex + 1] = (uint8_t) wrr_cost[1];
+ dst->wrr_cost[qindex + 2] = (uint8_t) wrr_cost[2];
+ dst->wrr_cost[qindex + 3] = (uint8_t) wrr_cost[3];
+ }
+}
+
+static void
+rte_sched_port_config_pipe_profile_table(struct rte_sched_port *port,
+ struct rte_sched_port_params *params)
+{
+ uint32_t i;
+
+ for (i = 0; i < port->n_pipe_profiles; i++) {
+ struct rte_sched_pipe_params *src = params->pipe_profiles + i;
+ struct rte_sched_pipe_profile *dst = port->pipe_profiles + i;
+ rte_sched_pipe_profile_convert(src, dst, params->rate);
rte_sched_port_log_pipe_profile(port, i);
}
@@ -932,6 +963,48 @@ rte_sched_pipe_config(struct rte_sched_port *port,
return 0;
}
+int __rte_experimental
+rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
+ struct rte_sched_pipe_params *params,
+ uint32_t *pipe_profile_id)
+{
+ struct rte_sched_pipe_profile *pp;
+ uint32_t i;
+ int status;
+
+ /* Port */
+ if (port == NULL)
+ return -1;
+
+ /* Pipe profiles not exceeds the max limit */
+ if (port->n_pipe_profiles >= RTE_SCHED_PIPE_PROFILES_PER_PORT)
+ return -2;
+
+ /* Pipe params */
+ status = pipe_profile_check(params, port->rate);
+ if (status != 0)
+ return status;
+
+ pp = &port->pipe_profiles[port->n_pipe_profiles];
+ rte_sched_pipe_profile_convert(params, pp, port->rate);
+
+ /* Pipe profile not exists */
+ for (i = 0; i < port->n_pipe_profiles; i++)
+ if (memcmp(port->pipe_profiles + i, pp, sizeof(*pp)) == 0)
+ return -3;
+
+ /* Pipe profile commit */
+ *pipe_profile_id = port->n_pipe_profiles;
+ port->n_pipe_profiles++;
+
+ if (port->pipe_tc3_rate_max < params->tc_rate[3])
+ port->pipe_tc3_rate_max = params->tc_rate[3];
+
+ rte_sched_port_log_pipe_profile(port, *pipe_profile_id);
+
+ return 0;
+}
+
void
rte_sched_port_pkt_write(struct rte_mbuf *pkt,
uint32_t subport, uint32_t pipe, uint32_t traffic_class,
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 5d2a688d..84fa896d 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -57,6 +57,7 @@ extern "C" {
*/
#include <sys/types.h>
+#include <rte_compat.h>
#include <rte_mbuf.h>
#include <rte_meter.h>
@@ -234,6 +235,26 @@ void
rte_sched_port_free(struct rte_sched_port *port);
/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Hierarchical scheduler pipe profile add
+ *
+ * @param port
+ * Handle to port scheduler instance
+ * @param params
+ * Pipe profile parameters
+ * @param pipe_profile_id
+ * Set to valid profile id when profile is added successfully.
+ * @return
+ * 0 upon success, error code otherwise
+ */
+int __rte_experimental
+rte_sched_port_pipe_profile_add(struct rte_sched_port *port,
+ struct rte_sched_pipe_params *params,
+ uint32_t *pipe_profile_id);
+
+/**
* Hierarchical scheduler subport configuration
*
* @param port
diff --git a/lib/librte_sched/rte_sched_version.map b/lib/librte_sched/rte_sched_version.map
index 3aa159ab..72958879 100644
--- a/lib/librte_sched/rte_sched_version.map
+++ b/lib/librte_sched/rte_sched_version.map
@@ -29,3 +29,9 @@ DPDK_2.1 {
rte_sched_port_pkt_read_color;
} DPDK_2.0;
+
+EXPERIMENTAL {
+ global:
+
+ rte_sched_port_pipe_profile_add;
+};
diff --git a/lib/librte_security/rte_security.c b/lib/librte_security/rte_security.c
index 1e559c99..1954960a 100644
--- a/lib/librte_security/rte_security.c
+++ b/lib/librte_security/rte_security.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 NXP.
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP.
+ * Copyright(c) 2017 Intel Corporation.
*/
#include <rte_malloc.h>
@@ -91,7 +63,6 @@ rte_security_session_destroy(struct rte_security_ctx *instance,
struct rte_security_session *sess)
{
int ret;
- struct rte_mempool *mp = rte_mempool_from_obj(sess);
RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->session_destroy, -ENOTSUP);
@@ -100,7 +71,7 @@ rte_security_session_destroy(struct rte_security_ctx *instance,
ret = instance->ops->session_destroy(instance->device, sess);
if (!ret)
- rte_mempool_put(mp, (void *)sess);
+ rte_mempool_put(rte_mempool_from_obj(sess), (void *)sess);
return ret;
}
diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h
index c75c1218..b0d1b97e 100644
--- a/lib/librte_security/rte_security.h
+++ b/lib/librte_security/rte_security.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright 2017 NXP.
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of NXP nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP.
+ * Copyright(c) 2017 Intel Corporation.
*/
#ifndef _RTE_SECURITY_H_
@@ -222,6 +194,8 @@ struct rte_security_ipsec_xform {
/**< IPsec SA Mode - transport/tunnel */
struct rte_security_ipsec_tunnel_param tunnel;
/**< Tunnel parameters, NULL for transport mode */
+ uint64_t esn_soft_limit;
+ /**< ESN for which the overflow event need to be raised */
};
/**
@@ -362,15 +336,17 @@ rte_security_set_pkt_metadata(struct rte_security_ctx *instance,
struct rte_mbuf *mb, void *params);
/**
- * Get userdata associated with the security session which processed the
- * packet. This userdata would be registered while creating the session, and
- * application can use this to identify the SA etc. Device-specific metadata
- * in the mbuf would be used for this.
+ * Get userdata associated with the security session. Device specific metadata
+ * provided would be used to uniquely identify the security session being
+ * referred to. This userdata would be registered while creating the session,
+ * and application can use this to identify the SA etc.
*
- * This is valid only for inline processed ingress packets.
+ * Device specific metadata would be set in mbuf for inline processed inbound
+ * packets. In addition, the same metadata would be set for IPsec events
+ * reported by rte_eth_event framework.
*
* @param instance security instance
- * @param md device-specific metadata set in mbuf
+ * @param md device-specific metadata
*
* @return
* - On success, userdata
diff --git a/lib/librte_security/rte_security_driver.h b/lib/librte_security/rte_security_driver.h
index 46239049..42f42ffe 100644
--- a/lib/librte_security/rte_security_driver.h
+++ b/lib/librte_security/rte_security_driver.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Intel Corporation. All rights reserved.
- * Copyright 2017 NXP.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP.
+ * Copyright(c) 2017 Intel Corporation.
*/
#ifndef _RTE_SECURITY_DRIVER_H_
@@ -134,9 +106,9 @@ typedef int (*security_set_pkt_metadata_t)(void *device,
void *params);
/**
- * Get application specific userdata associated with the security session which
- * processed the packet. This would be retrieved using the metadata obtained
- * from packet.
+ * Get application specific userdata associated with the security session.
+ * Device specific metadata provided would be used to uniquely identify
+ * the security session being referred to.
*
* @param device Crypto/eth device pointer
* @param md Metadata
diff --git a/lib/librte_table/Makefile b/lib/librte_table/Makefile
index c4a9acb0..276d476a 100644
--- a/lib/librte_table/Makefile
+++ b/lib/librte_table/Makefile
@@ -45,6 +45,7 @@ ifeq ($(CONFIG_RTE_LIBRTE_ACL),y)
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_acl.h
endif
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_hash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_table_hash_cuckoo.h
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru.h
ifeq ($(CONFIG_RTE_ARCH_X86),y)
SYMLINK-$(CONFIG_RTE_LIBRTE_TABLE)-include += rte_lru_x86.h
diff --git a/lib/librte_table/meson.build b/lib/librte_table/meson.build
index 13e797b4..8b2f8413 100644
--- a/lib/librte_table/meson.build
+++ b/lib/librte_table/meson.build
@@ -1,24 +1,29 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-version = 2
-sources = files('rte_table_lpm.c', 'rte_table_lpm_ipv6.c',
- 'rte_table_hash_cuckoo.c', 'rte_table_hash_key8.c',
- 'rte_table_hash_key16.c', 'rte_table_hash_key32.c',
- 'rte_table_hash_ext.c', 'rte_table_hash_lru.c',
- 'rte_table_array.c', 'rte_table_stub.c')
-headers = files('rte_table.h', 'rte_table_lpm.h',
- 'rte_table_lpm_ipv6.h', 'rte_table_hash.h',
- 'rte_lru.h', 'rte_table_array.h',
+version = 3
+sources = files('rte_table_acl.c',
+ 'rte_table_lpm.c',
+ 'rte_table_lpm_ipv6.c',
+ 'rte_table_hash_cuckoo.c',
+ 'rte_table_hash_key8.c',
+ 'rte_table_hash_key16.c',
+ 'rte_table_hash_key32.c',
+ 'rte_table_hash_ext.c',
+ 'rte_table_hash_lru.c',
+ 'rte_table_array.c',
+ 'rte_table_stub.c')
+headers = files('rte_table.h',
+ 'rte_table_acl.h',
+ 'rte_table_lpm.h',
+ 'rte_table_lpm_ipv6.h',
+ 'rte_table_hash.h',
+ 'rte_table_hash_cuckoo.h',
+ 'rte_lru.h',
+ 'rte_table_array.h',
'rte_table_stub.h')
-deps += ['mbuf', 'port', 'lpm', 'hash']
+deps += ['mbuf', 'port', 'lpm', 'hash', 'acl']
if arch_subdir == 'x86'
headers += files('rte_lru_x86.h')
endif
-
-if dpdk_conf.has('RTE_LIBRTE_ACL')
- sources += files('rte_table_acl.c')
- headers += files('rte_table_acl.h')
- deps += ['acl']
-endif
diff --git a/lib/librte_table/rte_table_acl.c b/lib/librte_table/rte_table_acl.c
index 73d3910e..14d54019 100644
--- a/lib/librte_table/rte_table_acl.c
+++ b/lib/librte_table/rte_table_acl.c
@@ -472,12 +472,6 @@ rte_table_acl_entry_add_bulk(
return -EINVAL;
}
- if (entries_ptr[i] == NULL) {
- RTE_LOG(ERR, TABLE, "%s: entries_ptr[%" PRIu32 "] parameter is NULL\n",
- __func__, i);
- return -EINVAL;
- }
-
rule = keys[i];
if (rule->priority > RTE_ACL_MAX_PRIORITY) {
RTE_LOG(ERR, TABLE, "%s: Priority is too high\n", __func__);
diff --git a/lib/librte_table/rte_table_hash.h b/lib/librte_table/rte_table_hash.h
index 7aad84fa..6f55bd57 100644
--- a/lib/librte_table/rte_table_hash.h
+++ b/lib/librte_table/rte_table_hash.h
@@ -99,9 +99,6 @@ extern struct rte_table_ops rte_table_hash_key8_lru_ops;
extern struct rte_table_ops rte_table_hash_key16_lru_ops;
extern struct rte_table_ops rte_table_hash_key32_lru_ops;
-/** Cuckoo hash table operations */
-extern struct rte_table_ops rte_table_hash_cuckoo_ops;
-
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_table/rte_table_hash_cuckoo.c b/lib/librte_table/rte_table_hash_cuckoo.c
index dcb4fe97..f0243033 100644
--- a/lib/librte_table/rte_table_hash_cuckoo.c
+++ b/lib/librte_table/rte_table_hash_cuckoo.c
@@ -10,8 +10,7 @@
#include <rte_malloc.h>
#include <rte_log.h>
-#include <rte_hash.h>
-#include "rte_table_hash.h"
+#include "rte_table_hash_cuckoo.h"
#ifdef RTE_TABLE_STATS_COLLECT
@@ -35,7 +34,7 @@ struct rte_table_hash {
uint32_t key_size;
uint32_t entry_size;
uint32_t n_keys;
- rte_table_hash_op_hash f_hash;
+ rte_hash_function f_hash;
uint32_t seed;
uint32_t key_offset;
@@ -47,7 +46,7 @@ struct rte_table_hash {
};
static int
-check_params_create_hash_cuckoo(struct rte_table_hash_params *params)
+check_params_create_hash_cuckoo(struct rte_table_hash_cuckoo_params *params)
{
if (params == NULL) {
RTE_LOG(ERR, TABLE, "NULL Input Parameters.\n");
@@ -82,7 +81,7 @@ rte_table_hash_cuckoo_create(void *params,
int socket_id,
uint32_t entry_size)
{
- struct rte_table_hash_params *p = params;
+ struct rte_table_hash_cuckoo_params *p = params;
struct rte_hash *h_table;
struct rte_table_hash *t;
uint32_t total_size;
@@ -107,7 +106,7 @@ rte_table_hash_cuckoo_create(void *params,
struct rte_hash_parameters hash_cuckoo_params = {
.entries = p->n_keys,
.key_len = p->key_size,
- .hash_func = (rte_hash_function)(p->f_hash),
+ .hash_func = p->f_hash,
.hash_func_init_val = p->seed,
.socket_id = socket_id,
.name = p->name
diff --git a/lib/librte_table/rte_table_hash_cuckoo.h b/lib/librte_table/rte_table_hash_cuckoo.h
new file mode 100644
index 00000000..d9d43121
--- /dev/null
+++ b/lib/librte_table/rte_table_hash_cuckoo.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_HASH_CUCKOO_H__
+#define __INCLUDE_RTE_TABLE_HASH_CUCKOO_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Table Hash Cuckoo
+ *
+ ***/
+#include <stdint.h>
+
+#include <rte_hash.h>
+
+#include "rte_table.h"
+
+/** Hash table parameters */
+struct rte_table_hash_cuckoo_params {
+ /** Name */
+ const char *name;
+
+ /** Key size (number of bytes) */
+ uint32_t key_size;
+
+ /** Byte offset within packet meta-data where the key is located */
+ uint32_t key_offset;
+
+ /** Key mask */
+ uint8_t *key_mask;
+
+ /** Number of keys */
+ uint32_t n_keys;
+
+ /** Number of buckets */
+ uint32_t n_buckets;
+
+ /** Hash function */
+ rte_hash_function f_hash;
+
+ /** Seed value for the hash function */
+ uint32_t seed;
+};
+
+/** Cuckoo hash table operations */
+extern struct rte_table_ops rte_table_hash_cuckoo_ops;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
index 4bbcd067..590488c7 100644
--- a/lib/librte_timer/rte_timer.c
+++ b/lib/librte_timer/rte_timer.c
@@ -403,7 +403,7 @@ rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
!(rte_lcore_is_enabled(tim_lcore) ||
- rte_lcore_has_role(tim_lcore, ROLE_SERVICE) == 0)))
+ rte_lcore_has_role(tim_lcore, ROLE_SERVICE))))
return -1;
if (type == PERIODICAL)
diff --git a/lib/librte_vhost/Makefile b/lib/librte_vhost/Makefile
index 5d6c6aba..de431fbb 100644
--- a/lib/librte_vhost/Makefile
+++ b/lib/librte_vhost/Makefile
@@ -18,13 +18,20 @@ LDLIBS += -lpthread
ifeq ($(CONFIG_RTE_LIBRTE_VHOST_NUMA),y)
LDLIBS += -lnuma
endif
-LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev -lrte_net
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_ethdev -lrte_net \
+ -lrte_cryptodev -lrte_hash
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_VHOST) := fd_man.c iotlb.c socket.c vhost.c \
- vhost_user.c virtio_net.c
+ vhost_user.c virtio_net.c vdpa.c
# install includes
-SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost.h rte_vdpa.h
+
+# only compile vhost crypto when cryptodev is enabled
+ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
+SRCS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost_crypto.c
+SYMLINK-$(CONFIG_RTE_LIBRTE_VHOST)-include += rte_vhost_crypto.h
+endif
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c
index 181711c2..38347ab1 100644
--- a/lib/librte_vhost/fd_man.c
+++ b/lib/librte_vhost/fd_man.c
@@ -16,6 +16,9 @@
#include "fd_man.h"
+
+#define RTE_LOGTYPE_VHOST_FDMAN RTE_LOGTYPE_USER1
+
#define FDPOLLERR (POLLERR | POLLHUP | POLLNVAL)
static int
@@ -171,6 +174,38 @@ fdset_del(struct fdset *pfdset, int fd)
return dat;
}
+/**
+ * Unregister the fd from the fdset.
+ *
+ * If parameters are invalid, return directly -2.
+ * And check whether fd is busy, if yes, return -1.
+ * Otherwise, try to delete the fd from fdset and
+ * return true.
+ */
+int
+fdset_try_del(struct fdset *pfdset, int fd)
+{
+ int i;
+
+ if (pfdset == NULL || fd == -1)
+ return -2;
+
+ pthread_mutex_lock(&pfdset->fd_mutex);
+ i = fdset_find_fd(pfdset, fd);
+ if (i != -1 && pfdset->fd[i].busy) {
+ pthread_mutex_unlock(&pfdset->fd_mutex);
+ return -1;
+ }
+
+ if (i != -1) {
+ pfdset->fd[i].fd = -1;
+ pfdset->fd[i].rcb = pfdset->fd[i].wcb = NULL;
+ pfdset->fd[i].dat = NULL;
+ }
+
+ pthread_mutex_unlock(&pfdset->fd_mutex);
+ return 0;
+}
/**
* This functions runs in infinite blocking loop until there is no fd in
@@ -258,7 +293,7 @@ fdset_event_dispatch(void *arg)
* because the fd is closed in the cb,
* the old fd val could be reused by when creates new
* listen fd in another thread, we couldn't call
- * fd_set_del.
+ * fdset_del.
*/
if (remove1 || remove2) {
pfdentry->fd = -1;
@@ -272,3 +307,64 @@ fdset_event_dispatch(void *arg)
return NULL;
}
+
+static void
+fdset_pipe_read_cb(int readfd, void *dat __rte_unused,
+ int *remove __rte_unused)
+{
+ char charbuf[16];
+ int r = read(readfd, charbuf, sizeof(charbuf));
+ /*
+ * Just an optimization, we don't care if read() failed
+ * so ignore explicitly its return value to make the
+ * compiler happy
+ */
+ RTE_SET_USED(r);
+}
+
+void
+fdset_pipe_uninit(struct fdset *fdset)
+{
+ fdset_del(fdset, fdset->u.readfd);
+ close(fdset->u.readfd);
+ close(fdset->u.writefd);
+}
+
+int
+fdset_pipe_init(struct fdset *fdset)
+{
+ int ret;
+
+ if (pipe(fdset->u.pipefd) < 0) {
+ RTE_LOG(ERR, VHOST_FDMAN,
+ "failed to create pipe for vhost fdset\n");
+ return -1;
+ }
+
+ ret = fdset_add(fdset, fdset->u.readfd,
+ fdset_pipe_read_cb, NULL, NULL);
+
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_FDMAN,
+ "failed to add pipe readfd %d into vhost server fdset\n",
+ fdset->u.readfd);
+
+ fdset_pipe_uninit(fdset);
+ return -1;
+ }
+
+ return 0;
+}
+
+void
+fdset_pipe_notify(struct fdset *fdset)
+{
+ int r = write(fdset->u.writefd, "1", 1);
+ /*
+ * Just an optimization, we don't care if write() failed
+ * so ignore explicitly its return value to make the
+ * compiler happy
+ */
+ RTE_SET_USED(r);
+
+}
diff --git a/lib/librte_vhost/fd_man.h b/lib/librte_vhost/fd_man.h
index 3a9276c3..3331bcd9 100644
--- a/lib/librte_vhost/fd_man.h
+++ b/lib/librte_vhost/fd_man.h
@@ -25,6 +25,16 @@ struct fdset {
struct fdentry fd[MAX_FDS];
pthread_mutex_t fd_mutex;
int num; /* current fd number of this fdset */
+
+ union pipefds {
+ struct {
+ int pipefd[2];
+ };
+ struct {
+ int readfd;
+ int writefd;
+ };
+ } u;
};
@@ -34,7 +44,14 @@ int fdset_add(struct fdset *pfdset, int fd,
fd_cb rcb, fd_cb wcb, void *dat);
void *fdset_del(struct fdset *pfdset, int fd);
+int fdset_try_del(struct fdset *pfdset, int fd);
void *fdset_event_dispatch(void *arg);
+int fdset_pipe_init(struct fdset *fdset);
+
+void fdset_pipe_uninit(struct fdset *fdset);
+
+void fdset_pipe_notify(struct fdset *fdset);
+
#endif
diff --git a/lib/librte_vhost/iotlb.c b/lib/librte_vhost/iotlb.c
index c11ebcaa..c6354fef 100644
--- a/lib/librte_vhost/iotlb.c
+++ b/lib/librte_vhost/iotlb.c
@@ -303,6 +303,13 @@ out:
return vva;
}
+void
+vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq)
+{
+ vhost_user_iotlb_cache_remove_all(vq);
+ vhost_user_iotlb_pending_remove_all(vq);
+}
+
int
vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
{
@@ -315,8 +322,7 @@ vhost_user_iotlb_init(struct virtio_net *dev, int vq_index)
* The cache has already been initialized,
* just drop all cached and pending entries.
*/
- vhost_user_iotlb_cache_remove_all(vq);
- vhost_user_iotlb_pending_remove_all(vq);
+ vhost_user_iotlb_flush_all(vq);
}
#ifdef RTE_LIBRTE_VHOST_NUMA
diff --git a/lib/librte_vhost/iotlb.h b/lib/librte_vhost/iotlb.h
index e7083e37..60b9e4c5 100644
--- a/lib/librte_vhost/iotlb.h
+++ b/lib/librte_vhost/iotlb.h
@@ -73,7 +73,7 @@ void vhost_user_iotlb_pending_insert(struct vhost_virtqueue *vq, uint64_t iova,
uint8_t perm);
void vhost_user_iotlb_pending_remove(struct vhost_virtqueue *vq, uint64_t iova,
uint64_t size, uint8_t perm);
-
+void vhost_user_iotlb_flush_all(struct vhost_virtqueue *vq);
int vhost_user_iotlb_init(struct virtio_net *dev, int vq_index);
#endif /* _VHOST_IOTLB_H_ */
diff --git a/lib/librte_vhost/meson.build b/lib/librte_vhost/meson.build
index 9e8c0e76..bd62e0e3 100644
--- a/lib/librte_vhost/meson.build
+++ b/lib/librte_vhost/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
if host_machine.system() != 'linux'
build = false
@@ -9,7 +9,8 @@ if has_libnuma == 1
endif
version = 4
allow_experimental_apis = true
-sources = files('fd_man.c', 'iotlb.c', 'socket.c', 'vhost.c', 'vhost_user.c',
- 'virtio_net.c')
-headers = files('rte_vhost.h')
-deps += ['ethdev']
+sources = files('fd_man.c', 'iotlb.c', 'socket.c', 'vdpa.c',
+ 'vhost.c', 'vhost_user.c',
+ 'virtio_net.c', 'vhost_crypto.c')
+headers = files('rte_vhost.h', 'rte_vdpa.h', 'rte_vhost_crypto.h')
+deps += ['ethdev', 'cryptodev', 'hash', 'pci']
diff --git a/lib/librte_vhost/rte_vdpa.h b/lib/librte_vhost/rte_vdpa.h
new file mode 100644
index 00000000..90465ca2
--- /dev/null
+++ b/lib/librte_vhost/rte_vdpa.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _RTE_VDPA_H_
+#define _RTE_VDPA_H_
+
+/**
+ * @file
+ *
+ * Device specific vhost lib
+ */
+
+#include <rte_pci.h>
+#include "rte_vhost.h"
+
+#define MAX_VDPA_NAME_LEN 128
+
+enum vdpa_addr_type {
+ PCI_ADDR,
+ VDPA_ADDR_MAX
+};
+
+struct rte_vdpa_dev_addr {
+ enum vdpa_addr_type type;
+ union {
+ uint8_t __dummy[64];
+ struct rte_pci_addr pci_addr;
+ };
+};
+
+struct rte_vdpa_dev_ops {
+ /* Get capabilities of this device */
+ int (*get_queue_num)(int did, uint32_t *queue_num);
+ int (*get_features)(int did, uint64_t *features);
+ int (*get_protocol_features)(int did, uint64_t *protocol_features);
+
+ /* Driver configure/close the device */
+ int (*dev_conf)(int vid);
+ int (*dev_close)(int vid);
+
+ /* Enable/disable this vring */
+ int (*set_vring_state)(int vid, int vring, int state);
+
+ /* Set features when changed */
+ int (*set_features)(int vid);
+
+ /* Destination operations when migration done */
+ int (*migration_done)(int vid);
+
+ /* Get the vfio group fd */
+ int (*get_vfio_group_fd)(int vid);
+
+ /* Get the vfio device fd */
+ int (*get_vfio_device_fd)(int vid);
+
+ /* Get the notify area info of the queue */
+ int (*get_notify_area)(int vid, int qid,
+ uint64_t *offset, uint64_t *size);
+
+ /* Reserved for future extension */
+ void *reserved[5];
+};
+
+struct rte_vdpa_device {
+ struct rte_vdpa_dev_addr addr;
+ struct rte_vdpa_dev_ops *ops;
+} __rte_cache_aligned;
+
+/* Register a vdpa device, return did if successful, -1 on failure */
+int __rte_experimental
+rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
+ struct rte_vdpa_dev_ops *ops);
+
+/* Unregister a vdpa device, return -1 on failure */
+int __rte_experimental
+rte_vdpa_unregister_device(int did);
+
+/* Find did of a vdpa device, return -1 on failure */
+int __rte_experimental
+rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr);
+
+/* Find a vdpa device based on did */
+struct rte_vdpa_device * __rte_experimental
+rte_vdpa_get_device(int did);
+
+#endif /* _RTE_VDPA_H_ */
diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h
index d3320699..b02673d4 100644
--- a/lib/librte_vhost/rte_vhost.h
+++ b/lib/librte_vhost/rte_vhost.h
@@ -29,6 +29,48 @@ extern "C" {
#define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2)
#define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
+/** Protocol features. */
+#ifndef VHOST_USER_PROTOCOL_F_MQ
+#define VHOST_USER_PROTOCOL_F_MQ 0
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
+#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_RARP
+#define VHOST_USER_PROTOCOL_F_RARP 2
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
+#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_NET_MTU
+#define VHOST_USER_PROTOCOL_F_NET_MTU 4
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
+#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
+#define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
+#define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
+#endif
+
+#ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
+#define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
+#endif
+
+/** Indicate whether protocol features negotiation is supported. */
+#ifndef VHOST_USER_F_PROTOCOL_FEATURES
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#endif
+
/**
* Information relating to memory regions including offsets to
* addresses in QEMUs memory file.
@@ -90,6 +132,11 @@ struct vhost_device_ops {
/**
* Convert guest physical address to host virtual address
*
+ * This function is deprecated because unsafe.
+ * New rte_vhost_va_from_guest_pa() should be used instead to ensure
+ * guest physical ranges are fully and contiguously mapped into
+ * process virtual address space.
+ *
* @param mem
* the guest memory regions
* @param gpa
@@ -97,6 +144,7 @@ struct vhost_device_ops {
* @return
* the host virtual address on success, 0 on failure
*/
+__rte_deprecated
static __rte_always_inline uint64_t
rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
{
@@ -115,6 +163,46 @@ rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
return 0;
}
+/**
+ * Convert guest physical address to host virtual address safely
+ *
+ * This variant of rte_vhost_gpa_to_vva() takes care all the
+ * requested length is mapped and contiguous in process address
+ * space.
+ *
+ * @param mem
+ * the guest memory regions
+ * @param gpa
+ * the guest physical address for querying
+ * @param len
+ * the size of the requested area to map, updated with actual size mapped
+ * @return
+ * the host virtual address on success, 0 on failure
+ */
+static __rte_always_inline uint64_t
+rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
+ uint64_t gpa, uint64_t *len)
+{
+ struct rte_vhost_mem_region *r;
+ uint32_t i;
+
+ for (i = 0; i < mem->nregions; i++) {
+ r = &mem->regions[i];
+ if (gpa >= r->guest_phys_addr &&
+ gpa < r->guest_phys_addr + r->size) {
+
+ if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
+ *len = r->guest_phys_addr + r->size - gpa;
+
+ return gpa - r->guest_phys_addr +
+ r->host_user_addr;
+ }
+ }
+ *len = 0;
+
+ return 0;
+}
+
#define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
/**
@@ -170,6 +258,41 @@ int rte_vhost_driver_register(const char *path, uint64_t flags);
int rte_vhost_driver_unregister(const char *path);
/**
+ * Set the vdpa device id, enforce single connection per socket
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param did
+ * Device id
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_attach_vdpa_device(const char *path, int did);
+
+/**
+ * Unset the vdpa device id
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_detach_vdpa_device(const char *path);
+
+/**
+ * Get the device id
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @return
+ * Device id, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_vdpa_device_id(const char *path);
+
+/**
* Set the feature bits the vhost-user driver supports.
*
* @param path
@@ -225,6 +348,33 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features);
int rte_vhost_driver_get_features(const char *path, uint64_t *features);
/**
+ * Get the protocol feature bits before feature negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param protocol_features
+ * A pointer to store the queried protocol feature bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_protocol_features(const char *path,
+ uint64_t *protocol_features);
+
+/**
+ * Get the queue number bits before feature negotiation.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param queue_num
+ * A pointer to store the queried queue number bits
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
+
+/**
* Get the feature bits after negotiation
*
* @param vid
@@ -434,6 +584,68 @@ int rte_vhost_vring_call(int vid, uint16_t vring_idx);
*/
uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
+/**
+ * Get log base and log size of the vhost device
+ *
+ * @param vid
+ * vhost device ID
+ * @param log_base
+ * vhost log base
+ * @param log_size
+ * vhost log size
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
+
+/**
+ * Get last_avail/used_idx of the vhost virtqueue
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * vhost queue index
+ * @param last_avail_idx
+ * vhost last_avail_idx to get
+ * @param last_used_idx
+ * vhost last_used_idx to get
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx);
+
+/**
+ * Set last_avail/used_idx of the vhost virtqueue
+ *
+ * @param vid
+ * vhost device ID
+ * @param queue_id
+ * vhost queue index
+ * @param last_avail_idx
+ * last_avail_idx to set
+ * @param last_used_idx
+ * last_used_idx to set
+ * @return
+ * 0 on success, -1 on failure
+ */
+int __rte_experimental
+rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx);
+
+/**
+ * Get vdpa device id for vhost device.
+ *
+ * @param vid
+ * vhost device id
+ * @return
+ * device id
+ */
+int __rte_experimental
+rte_vhost_get_vdpa_device_id(int vid);
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
new file mode 100644
index 00000000..f9fbc054
--- /dev/null
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _VHOST_CRYPTO_H_
+#define _VHOST_CRYPTO_H_
+
+#define VHOST_CRYPTO_MBUF_POOL_SIZE (8192)
+#define VHOST_CRYPTO_MAX_BURST_SIZE (64)
+#define VHOST_CRYPTO_SESSION_MAP_ENTRIES (1024) /**< Max nb sessions */
+/** max nb virtual queues in a burst for finalizing*/
+#define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS (64)
+
+enum rte_vhost_crypto_zero_copy {
+ RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0,
+ RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE = 1,
+ RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS
+};
+
+/**
+ * Create Vhost-crypto instance
+ *
+ * @param vid
+ * The identifier of the vhost device.
+ * @param cryptodev_id
+ * The identifier of DPDK Cryptodev, the same cryptodev_id can be assigned to
+ * multiple Vhost-crypto devices.
+ * @param sess_pool
+ * The pointer to the created cryptodev session pool with the private data size
+ * matches the target DPDK Cryptodev.
+ * @param socket_id
+ * NUMA Socket ID to allocate resources on. *
+ * @return
+ * 0 if the Vhost Crypto Instance is created successfully.
+ * Negative integer if otherwise
+ */
+int __rte_experimental
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+ struct rte_mempool *sess_pool, int socket_id);
+
+/**
+ * Free the Vhost-crypto instance
+ *
+ * @param vid
+ * The identifier of the vhost device.
+ * @return
+ * 0 if the Vhost Crypto Instance is created successfully.
+ * Negative integer if otherwise.
+ */
+int __rte_experimental
+rte_vhost_crypto_free(int vid);
+
+/**
+ * Enable or disable zero copy feature
+ *
+ * @param vid
+ * The identifier of the vhost device.
+ * @param option
+ * Flag of zero copy feature.
+ * @return
+ * 0 if completed successfully.
+ * Negative integer if otherwise.
+ */
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option);
+
+/**
+ * Fetch a number of vring descriptors from virt-queue and translate to DPDK
+ * crypto operations. After this function is executed, the user can enqueue
+ * the processed ops to the target cryptodev.
+ *
+ * @param vid
+ * The identifier of the vhost device.
+ * @param qid
+ * Virtio queue index.
+ * @param ops
+ * The address of an array of pointers to *rte_crypto_op* structures that must
+ * be large enough to store *nb_ops* pointers in it.
+ * @param nb_ops
+ * The maximum number of operations to be fetched and translated.
+ * @return
+ * The number of fetched and processed vhost crypto request operations.
+ */
+uint16_t __rte_experimental
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+/**
+ * Finalize the dequeued crypto ops. After the translated crypto ops are
+ * dequeued from the cryptodev, this function shall be called to write the
+ * processed data back to the vring descriptor (if no-copy is turned off).
+ *
+ * @param ops
+ * The address of an array of *rte_crypto_op* structure that was dequeued
+ * from cryptodev.
+ * @param nb_ops
+ * The number of operations contained in the array.
+ * @callfds
+ * The callfd number(s) contained in this burst, this shall be an array with
+ * no less than VIRTIO_CRYPTO_MAX_NUM_BURST_VQS elements.
+ * @nb_callfds
+ * The number of call_fd numbers exist in the callfds.
+ * @return
+ * The number of ops processed.
+ */
+uint16_t __rte_experimental
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+ uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
+
+#endif /**< _VHOST_CRYPTO_H_ */
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index df010312..da220dd0 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -59,3 +59,27 @@ DPDK_18.02 {
rte_vhost_vring_call;
} DPDK_17.08;
+
+EXPERIMENTAL {
+ global:
+
+ rte_vdpa_register_device;
+ rte_vdpa_unregister_device;
+ rte_vdpa_find_device_id;
+ rte_vdpa_get_device;
+ rte_vhost_driver_attach_vdpa_device;
+ rte_vhost_driver_detach_vdpa_device;
+ rte_vhost_driver_get_vdpa_device_id;
+ rte_vhost_get_vdpa_device_id;
+ rte_vhost_driver_get_protocol_features;
+ rte_vhost_driver_get_queue_num;
+ rte_vhost_get_log_base;
+ rte_vhost_get_vring_base;
+ rte_vhost_set_vring_base;
+ rte_vhost_crypto_create;
+ rte_vhost_crypto_free;
+ rte_vhost_crypto_fetch_requests;
+ rte_vhost_crypto_finalize_requests;
+ rte_vhost_crypto_set_zero_copy;
+ rte_vhost_va_from_guest_pa;
+};
diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c
index 83befdce..d6303174 100644
--- a/lib/librte_vhost/socket.c
+++ b/lib/librte_vhost/socket.c
@@ -4,7 +4,6 @@
#include <stdint.h>
#include <stdio.h>
-#include <stdbool.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
@@ -52,6 +51,13 @@ struct vhost_user_socket {
uint64_t supported_features;
uint64_t features;
+ /*
+ * Device id to identify a specific backend device.
+ * It's set to -1 for the default software implementation.
+ * If valid, one socket can have 1 connection only.
+ */
+ int vdpa_dev_id;
+
struct vhost_device_ops const *notify_ops;
};
@@ -97,6 +103,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
size_t fdsize = fd_num * sizeof(int);
char control[CMSG_SPACE(fdsize)];
struct cmsghdr *cmsg;
+ int got_fds = 0;
int ret;
memset(&msgh, 0, sizeof(msgh));
@@ -123,11 +130,16 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
if ((cmsg->cmsg_level == SOL_SOCKET) &&
(cmsg->cmsg_type == SCM_RIGHTS)) {
- memcpy(fds, CMSG_DATA(cmsg), fdsize);
+ got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
+ memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int));
break;
}
}
+ /* Clear out unused file descriptors */
+ while (got_fds < fd_num)
+ fds[got_fds++] = -1;
+
return ret;
}
@@ -153,6 +165,11 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
msgh.msg_control = control;
msgh.msg_controllen = sizeof(control);
cmsg = CMSG_FIRSTHDR(&msgh);
+ if (cmsg == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n");
+ errno = EINVAL;
+ return -1;
+ }
cmsg->cmsg_len = CMSG_LEN(fdsize);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
@@ -163,7 +180,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
}
do {
- ret = sendmsg(sockfd, &msgh, 0);
+ ret = sendmsg(sockfd, &msgh, MSG_NOSIGNAL);
} while (ret < 0 && errno == EINTR);
if (ret < 0) {
@@ -182,6 +199,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
struct vhost_user_connection *conn;
int ret;
+ if (vsocket == NULL)
+ return;
+
conn = malloc(sizeof(*conn));
if (conn == NULL) {
close(fd);
@@ -198,6 +218,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
+ vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id);
+
if (vsocket->dequeue_zero_copy)
vhost_enable_dequeue_zero_copy(vid);
@@ -232,6 +254,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
pthread_mutex_lock(&vsocket->conn_mutex);
TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
pthread_mutex_unlock(&vsocket->conn_mutex);
+
+ fdset_pipe_notify(&vhost_user.fdset);
return;
err:
@@ -318,6 +342,16 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)
int fd = vsocket->socket_fd;
const char *path = vsocket->path;
+ /*
+ * bind () may fail if the socket file with the same name already
+ * exists. But the library obviously should not delete the file
+ * provided by the user, since we can not be sure that it is not
+ * being used by other applications. Moreover, many applications form
+ * socket names based on user input, which is prone to errors.
+ *
+ * The user must ensure that the socket does not exist before
+ * registering the vhost driver in server mode.
+ */
ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
@@ -436,7 +470,6 @@ static int
vhost_user_reconnect_init(void)
{
int ret;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
ret = pthread_mutex_init(&reconn_list.mutex, NULL);
if (ret < 0) {
@@ -445,7 +478,7 @@ vhost_user_reconnect_init(void)
}
TAILQ_INIT(&reconn_list.head);
- ret = pthread_create(&reconn_tid, NULL,
+ ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
vhost_user_client_reconnect, NULL);
if (ret != 0) {
RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
@@ -453,14 +486,6 @@ vhost_user_reconnect_init(void)
RTE_LOG(ERR, VHOST_CONFIG,
"failed to destroy reconnect mutex");
}
- } else {
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
- "vhost-reconn");
-
- if (rte_thread_setname(reconn_tid, thread_name)) {
- RTE_LOG(DEBUG, VHOST_CONFIG,
- "failed to set reconnect thread name");
- }
}
return ret;
@@ -524,6 +549,52 @@ find_vhost_user_socket(const char *path)
}
int
+rte_vhost_driver_attach_vdpa_device(const char *path, int did)
+{
+ struct vhost_user_socket *vsocket;
+
+ if (rte_vdpa_get_device(did) == NULL)
+ return -1;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ vsocket->vdpa_dev_id = did;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_detach_vdpa_device(const char *path)
+{
+ struct vhost_user_socket *vsocket;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ vsocket->vdpa_dev_id = -1;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_get_vdpa_device_id(const char *path)
+{
+ struct vhost_user_socket *vsocket;
+ int did = -1;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (vsocket)
+ did = vsocket->vdpa_dev_id;
+ pthread_mutex_unlock(&vhost_user.mutex);
+
+ return did;
+}
+
+int
rte_vhost_driver_disable_features(const char *path, uint64_t features)
{
struct vhost_user_socket *vsocket;
@@ -591,19 +662,136 @@ int
rte_vhost_driver_get_features(const char *path, uint64_t *features)
{
struct vhost_user_socket *vsocket;
+ uint64_t vdpa_features;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
- if (vsocket)
+ if (!vsocket) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "socket file %s is not registered yet.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_features) {
*features = vsocket->features;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa features "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *features = vsocket->features & vdpa_features;
+
+unlock_exit:
pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+
+int
+rte_vhost_driver_get_protocol_features(const char *path,
+ uint64_t *protocol_features)
+{
+ struct vhost_user_socket *vsocket;
+ uint64_t vdpa_protocol_features;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
if (!vsocket) {
RTE_LOG(ERR, VHOST_CONFIG,
"socket file %s is not registered yet.\n", path);
- return -1;
- } else {
- return 0;
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) {
+ *protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_protocol_features(did,
+ &vdpa_protocol_features) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa protocol features "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *protocol_features = VHOST_USER_PROTOCOL_FEATURES
+ & vdpa_protocol_features;
+
+unlock_exit:
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+
+int
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
+{
+ struct vhost_user_socket *vsocket;
+ uint32_t vdpa_queue_num;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
+ int ret = 0;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (!vsocket) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "socket file %s is not registered yet.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ did = vsocket->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) {
+ *queue_num = VHOST_MAX_QUEUE_PAIRS;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get vdpa queue number "
+ "for socket file %s.\n", path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
+
+unlock_exit:
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+
+static void
+vhost_user_socket_mem_free(struct vhost_user_socket *vsocket)
+{
+ if (vsocket && vsocket->path) {
+ free(vsocket->path);
+ vsocket->path = NULL;
+ }
+
+ if (vsocket) {
+ free(vsocket);
+ vsocket = NULL;
}
}
@@ -637,7 +825,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
if (vsocket->path == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"error: failed to copy socket path string\n");
- free(vsocket);
+ vhost_user_socket_mem_free(vsocket);
goto out;
}
TAILQ_INIT(&vsocket->conn_list);
@@ -665,6 +853,12 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
+ /* Dequeue zero copy can't assure descriptors returned in order */
+ if (vsocket->dequeue_zero_copy) {
+ vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER);
+ vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER);
+ }
+
if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) {
vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
@@ -695,8 +889,7 @@ out_mutex:
"error: failed to destroy connection mutex\n");
}
out_free:
- free(vsocket->path);
- free(vsocket);
+ vhost_user_socket_mem_free(vsocket);
out:
pthread_mutex_unlock(&vhost_user.mutex);
@@ -743,21 +936,25 @@ rte_vhost_driver_unregister(const char *path)
struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
if (!strcmp(vsocket->path, path)) {
- if (vsocket->is_server) {
- fdset_del(&vhost_user.fdset, vsocket->socket_fd);
- close(vsocket->socket_fd);
- unlink(path);
- } else if (vsocket->reconnect) {
- vhost_user_remove_reconnect(vsocket);
- }
-
+again:
pthread_mutex_lock(&vsocket->conn_mutex);
for (conn = TAILQ_FIRST(&vsocket->conn_list);
conn != NULL;
conn = next) {
next = TAILQ_NEXT(conn, next);
- fdset_del(&vhost_user.fdset, conn->connfd);
+ /*
+ * If r/wcb is executing, release the
+ * conn_mutex lock, and try again since
+ * the r/wcb may use the conn_mutex lock.
+ */
+ if (fdset_try_del(&vhost_user.fdset,
+ conn->connfd) == -1) {
+ pthread_mutex_unlock(
+ &vsocket->conn_mutex);
+ goto again;
+ }
+
RTE_LOG(INFO, VHOST_CONFIG,
"free connfd = %d for device '%s'\n",
conn->connfd, path);
@@ -768,9 +965,17 @@ rte_vhost_driver_unregister(const char *path)
}
pthread_mutex_unlock(&vsocket->conn_mutex);
+ if (vsocket->is_server) {
+ fdset_del(&vhost_user.fdset,
+ vsocket->socket_fd);
+ close(vsocket->socket_fd);
+ unlink(path);
+ } else if (vsocket->reconnect) {
+ vhost_user_remove_reconnect(vsocket);
+ }
+
pthread_mutex_destroy(&vsocket->conn_mutex);
- free(vsocket->path);
- free(vsocket);
+ vhost_user_socket_mem_free(vsocket);
count = --vhost_user.vsocket_cnt;
vhost_user.vsockets[i] = vhost_user.vsockets[count];
@@ -829,11 +1034,26 @@ rte_vhost_driver_start(const char *path)
return -1;
if (fdset_tid == 0) {
- int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
- &vhost_user.fdset);
- if (ret != 0)
+ /**
+ * create a pipe which will be waited by poll and notified to
+ * rebuild the wait list of poll.
+ */
+ if (fdset_pipe_init(&vhost_user.fdset) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to create pipe for vhost fdset\n");
+ return -1;
+ }
+
+ int ret = rte_ctrl_thread_create(&fdset_tid,
+ "vhost-events", NULL, fdset_event_dispatch,
+ &vhost_user.fdset);
+ if (ret != 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to create fdset handling thread");
+
+ fdset_pipe_uninit(&vhost_user.fdset);
+ return -1;
+ }
}
if (vsocket->is_server)
diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c
new file mode 100644
index 00000000..c82fd437
--- /dev/null
+++ b/lib/librte_vhost/vdpa.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/**
+ * @file
+ *
+ * Device specific vhost lib
+ */
+
+#include <stdbool.h>
+
+#include <rte_malloc.h>
+#include "rte_vdpa.h"
+#include "vhost.h"
+
+static struct rte_vdpa_device *vdpa_devices[MAX_VHOST_DEVICE];
+static uint32_t vdpa_device_num;
+
+static bool
+is_same_vdpa_device(struct rte_vdpa_dev_addr *a,
+ struct rte_vdpa_dev_addr *b)
+{
+ bool ret = true;
+
+ if (a->type != b->type)
+ return false;
+
+ switch (a->type) {
+ case PCI_ADDR:
+ if (a->pci_addr.domain != b->pci_addr.domain ||
+ a->pci_addr.bus != b->pci_addr.bus ||
+ a->pci_addr.devid != b->pci_addr.devid ||
+ a->pci_addr.function != b->pci_addr.function)
+ ret = false;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int
+rte_vdpa_register_device(struct rte_vdpa_dev_addr *addr,
+ struct rte_vdpa_dev_ops *ops)
+{
+ struct rte_vdpa_device *dev;
+ char device_name[MAX_VDPA_NAME_LEN];
+ int i;
+
+ if (vdpa_device_num >= MAX_VHOST_DEVICE)
+ return -1;
+
+ for (i = 0; i < MAX_VHOST_DEVICE; i++) {
+ dev = vdpa_devices[i];
+ if (dev && is_same_vdpa_device(&dev->addr, addr))
+ return -1;
+ }
+
+ for (i = 0; i < MAX_VHOST_DEVICE; i++) {
+ if (vdpa_devices[i] == NULL)
+ break;
+ }
+
+ sprintf(device_name, "vdpa-dev-%d", i);
+ dev = rte_zmalloc(device_name, sizeof(struct rte_vdpa_device),
+ RTE_CACHE_LINE_SIZE);
+ if (!dev)
+ return -1;
+
+ memcpy(&dev->addr, addr, sizeof(struct rte_vdpa_dev_addr));
+ dev->ops = ops;
+ vdpa_devices[i] = dev;
+ vdpa_device_num++;
+
+ return i;
+}
+
+int
+rte_vdpa_unregister_device(int did)
+{
+ if (did < 0 || did >= MAX_VHOST_DEVICE || vdpa_devices[did] == NULL)
+ return -1;
+
+ rte_free(vdpa_devices[did]);
+ vdpa_devices[did] = NULL;
+ vdpa_device_num--;
+
+ return did;
+}
+
+int
+rte_vdpa_find_device_id(struct rte_vdpa_dev_addr *addr)
+{
+ struct rte_vdpa_device *dev;
+ int i;
+
+ for (i = 0; i < MAX_VHOST_DEVICE; ++i) {
+ dev = vdpa_devices[i];
+ if (dev && is_same_vdpa_device(&dev->addr, addr))
+ return i;
+ }
+
+ return -1;
+}
+
+struct rte_vdpa_device *
+rte_vdpa_get_device(int did)
+{
+ if (did < 0 || did >= MAX_VHOST_DEVICE)
+ return NULL;
+
+ return vdpa_devices[did];
+}
diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c
index a407067e..3c9be10a 100644
--- a/lib/librte_vhost/vhost.c
+++ b/lib/librte_vhost/vhost.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2017 Intel Corporation
*/
#include <linux/vhost.h>
@@ -29,17 +29,17 @@ struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
/* Called with iotlb_lock read-locked */
uint64_t
__vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *size, uint8_t perm)
{
uint64_t vva, tmp_size;
- if (unlikely(!size))
+ if (unlikely(!*size))
return 0;
- tmp_size = size;
+ tmp_size = *size;
vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
- if (tmp_size == size)
+ if (tmp_size == *size)
return vva;
iova += tmp_size;
@@ -68,19 +68,6 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
return 0;
}
-struct virtio_net *
-get_device(int vid)
-{
- struct virtio_net *dev = vhost_devices[vid];
-
- if (unlikely(!dev)) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) device not found.\n", vid);
- }
-
- return dev;
-}
-
void
cleanup_vq(struct vhost_virtqueue *vq, int destroy)
{
@@ -106,9 +93,12 @@ cleanup_device(struct virtio_net *dev, int destroy)
}
void
-free_vq(struct vhost_virtqueue *vq)
+free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- rte_free(vq->shadow_used_ring);
+ if (vq_is_packed(dev))
+ rte_free(vq->shadow_used_packed);
+ else
+ rte_free(vq->shadow_used_split);
rte_free(vq->batch_copy_elems);
rte_mempool_free(vq->iotlb_pool);
rte_free(vq);
@@ -123,42 +113,95 @@ free_device(struct virtio_net *dev)
uint32_t i;
for (i = 0; i < dev->nr_vring; i++)
- free_vq(dev->virtqueue[i]);
+ free_vq(dev, dev->virtqueue[i]);
rte_free(dev);
}
-int
-vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+static int
+vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- uint64_t size;
+ uint64_t req_size, size;
- if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
- goto out;
-
- size = sizeof(struct vring_desc) * vq->size;
+ req_size = sizeof(struct vring_desc) * vq->size;
+ size = req_size;
vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.desc_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->desc)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc || size != req_size)
return -1;
- size = sizeof(struct vring_avail);
- size += sizeof(uint16_t) * vq->size;
+ req_size = sizeof(struct vring_avail);
+ req_size += sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.avail_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->avail)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->avail || size != req_size)
return -1;
- size = sizeof(struct vring_used);
- size += sizeof(struct vring_used_elem) * vq->size;
+ req_size = sizeof(struct vring_used);
+ req_size += sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ req_size += sizeof(uint16_t);
+ size = req_size;
vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
vq->ring_addrs.used_user_addr,
- size, VHOST_ACCESS_RW);
- if (!vq->used)
+ &size, VHOST_ACCESS_RW);
+ if (!vq->used || size != req_size)
+ return -1;
+
+ return 0;
+}
+
+static int
+vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint64_t req_size, size;
+
+ req_size = sizeof(struct vring_packed_desc) * vq->size;
+ size = req_size;
+ vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->desc_packed || size != req_size)
return -1;
+ req_size = sizeof(struct vring_packed_desc_event);
+ size = req_size;
+ vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->driver_event || size != req_size)
+ return -1;
+
+ req_size = sizeof(struct vring_packed_desc_event);
+ size = req_size;
+ vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
+ &size, VHOST_ACCESS_RW);
+ if (!vq->device_event || size != req_size)
+ return -1;
+
+ return 0;
+}
+
+int
+vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ goto out;
+
+ if (vq_is_packed(dev)) {
+ if (vring_translate_packed(dev, vq) < 0)
+ return -1;
+ } else {
+ if (vring_translate_split(dev, vq) < 0)
+ return -1;
+ }
out:
vq->access_ok = 1;
@@ -240,6 +283,9 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
dev->virtqueue[vring_idx] = vq;
init_vring_queue(dev, vring_idx);
rte_spinlock_init(&vq->access_lock);
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
+ vq->signalled_used_valid = false;
dev->nr_vring += 1;
@@ -274,21 +320,21 @@ vhost_new_device(void)
struct virtio_net *dev;
int i;
- dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
- if (dev == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "Failed to allocate memory for new dev.\n");
- return -1;
- }
-
for (i = 0; i < MAX_VHOST_DEVICE; i++) {
if (vhost_devices[i] == NULL)
break;
}
+
if (i == MAX_VHOST_DEVICE) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to find a free slot for new device.\n");
- rte_free(dev);
+ return -1;
+ }
+
+ dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
+ if (dev == NULL) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to allocate memory for new dev.\n");
return -1;
}
@@ -296,10 +342,28 @@ vhost_new_device(void)
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
+ dev->vdpa_dev_id = -1;
+ rte_spinlock_init(&dev->slave_req_lock);
return i;
}
+void
+vhost_destroy_device_notify(struct virtio_net *dev)
+{
+ struct rte_vdpa_device *vdpa_dev;
+ int did;
+
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->dev_close)
+ vdpa_dev->ops->dev_close(dev->vid);
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+}
+
/*
* Invoked when there is the vhost-user connection is broken (when
* the virtio device is being detached).
@@ -312,10 +376,7 @@ vhost_destroy_device(int vid)
if (dev == NULL)
return;
- if (dev->flags & VIRTIO_DEV_RUNNING) {
- dev->flags &= ~VIRTIO_DEV_RUNNING;
- dev->notify_ops->destroy_device(vid);
- }
+ vhost_destroy_device_notify(dev);
cleanup_device(dev, 1);
free_device(dev);
@@ -324,6 +385,33 @@ vhost_destroy_device(int vid)
}
void
+vhost_attach_vdpa_device(int vid, int did)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ if (rte_vdpa_get_device(did) == NULL)
+ return;
+
+ dev->vdpa_dev_id = did;
+}
+
+void
+vhost_detach_vdpa_device(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return;
+
+ vhost_user_host_notifier_ctrl(vid, false);
+
+ dev->vdpa_dev_id = -1;
+}
+
+void
vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
{
struct virtio_net *dev;
@@ -532,7 +620,11 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
if (!vq)
return -1;
- vhost_vring_call(dev, vq);
+ if (vq_is_packed(dev))
+ vhost_vring_call_packed(dev, vq);
+ else
+ vhost_vring_call_split(dev, vq);
+
return 0;
}
@@ -553,21 +645,52 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id)
return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
}
+static inline void
+vhost_enable_notify_split(struct vhost_virtqueue *vq, int enable)
+{
+ if (enable)
+ vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
+ else
+ vq->used->flags |= VRING_USED_F_NO_NOTIFY;
+}
+
+static inline void
+vhost_enable_notify_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable)
+{
+ uint16_t flags;
+
+ if (!enable)
+ vq->device_event->flags = VRING_EVENT_F_DISABLE;
+
+ flags = VRING_EVENT_F_ENABLE;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
+ flags = VRING_EVENT_F_DESC;
+ vq->device_event->off_wrap = vq->last_avail_idx |
+ vq->avail_wrap_counter << 15;
+ }
+
+ rte_smp_wmb();
+
+ vq->device_event->flags = flags;
+}
+
int
rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
{
struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
- if (dev == NULL)
+ if (!dev)
return -1;
- if (enable) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "guest notification isn't supported.\n");
- return -1;
- }
+ vq = dev->virtqueue[queue_id];
+
+ if (vq_is_packed(dev))
+ vhost_enable_notify_packed(dev, vq, enable);
+ else
+ vhost_enable_notify_split(vq, enable);
- dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
return 0;
}
@@ -627,3 +750,76 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid)
return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
}
+
+int rte_vhost_get_vdpa_device_id(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (dev == NULL)
+ return -1;
+
+ return dev->vdpa_dev_id;
+}
+
+int rte_vhost_get_log_base(int vid, uint64_t *log_base,
+ uint64_t *log_size)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *log_base = dev->log_base;
+ *log_size = dev->log_size;
+
+ return 0;
+}
+
+int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
+ uint16_t *last_avail_idx, uint16_t *last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
+ *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
+
+ return 0;
+}
+
+int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
+ uint16_t last_avail_idx, uint16_t last_used_idx)
+{
+ struct virtio_net *dev = get_device(vid);
+
+ if (!dev)
+ return -1;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return -1;
+ }
+
+ dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
+ dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
+
+ return 0;
+}
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index d947bc9e..760a09c0 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _VHOST_NET_CDEV_H_
#define _VHOST_NET_CDEV_H_
#include <stdint.h>
#include <stdio.h>
+#include <stdbool.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <unistd.h>
@@ -19,6 +20,7 @@
#include <rte_rwlock.h>
#include "rte_vhost.h"
+#include "rte_vdpa.h"
/* Used to indicate that the device is running on a data core */
#define VIRTIO_DEV_RUNNING 1
@@ -26,17 +28,22 @@
#define VIRTIO_DEV_READY 2
/* Used to indicate that the built-in vhost net device backend is enabled */
#define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
+/* Used to indicate that the device has its own data path and configured */
+#define VIRTIO_DEV_VDPA_CONFIGURED 8
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
#define BUF_VECTOR_MAX 256
+#define VHOST_LOG_CACHE_NR 32
+
/**
* Structure contains buffer address, length and descriptor index
* from vring to do scatter RX.
*/
struct buf_vector {
+ uint64_t buf_iova;
uint64_t buf_addr;
uint32_t buf_len;
uint32_t desc_idx;
@@ -49,6 +56,7 @@ struct buf_vector {
struct zcopy_mbuf {
struct rte_mbuf *mbuf;
uint32_t desc_idx;
+ uint16_t desc_count;
uint16_t in_use;
TAILQ_ENTRY(zcopy_mbuf) next;
@@ -65,19 +73,43 @@ struct batch_copy_elem {
uint64_t log_addr;
};
+/*
+ * Structure that contains the info for batched dirty logging.
+ */
+struct log_cache_entry {
+ uint32_t offset;
+ unsigned long val;
+};
+
+struct vring_used_elem_packed {
+ uint16_t id;
+ uint32_t len;
+ uint32_t count;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct vhost_virtqueue {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
+ union {
+ struct vring_desc *desc;
+ struct vring_packed_desc *desc_packed;
+ };
+ union {
+ struct vring_avail *avail;
+ struct vring_packed_desc_event *driver_event;
+ };
+ union {
+ struct vring_used *used;
+ struct vring_packed_desc_event *device_event;
+ };
uint32_t size;
uint16_t last_avail_idx;
uint16_t last_used_idx;
/* Last used index we notify to front end. */
uint16_t signalled_used;
+ bool signalled_used_valid;
#define VIRTIO_INVALID_EVENTFD (-1)
#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
@@ -101,12 +133,20 @@ struct vhost_virtqueue {
struct zcopy_mbuf *zmbufs;
struct zcopy_mbuf_list zmbuf_list;
- struct vring_used_elem *shadow_used_ring;
+ union {
+ struct vring_used_elem *shadow_used_split;
+ struct vring_used_elem_packed *shadow_used_packed;
+ };
uint16_t shadow_used_idx;
struct vhost_vring_addr ring_addrs;
struct batch_copy_elem *batch_copy_elems;
uint16_t batch_copy_nb_elems;
+ bool used_wrap_counter;
+ bool avail_wrap_counter;
+
+ struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
+ uint16_t log_cache_nb_elem;
rte_rwlock_t iotlb_lock;
rte_rwlock_t iotlb_pending_lock;
@@ -174,7 +214,41 @@ struct vhost_msg {
#define VIRTIO_F_VERSION_1 32
#endif
-#define VHOST_USER_F_PROTOCOL_FEATURES 30
+/* Declare packed ring related bits for older kernels */
+#ifndef VIRTIO_F_RING_PACKED
+
+#define VIRTIO_F_RING_PACKED 34
+
+#define VRING_DESC_F_NEXT 1
+#define VRING_DESC_F_WRITE 2
+#define VRING_DESC_F_INDIRECT 4
+
+#define VRING_DESC_F_AVAIL (1ULL << 7)
+#define VRING_DESC_F_USED (1ULL << 15)
+
+struct vring_packed_desc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+};
+
+#define VRING_EVENT_F_ENABLE 0x0
+#define VRING_EVENT_F_DISABLE 0x1
+#define VRING_EVENT_F_DESC 0x2
+
+struct vring_packed_desc_event {
+ uint16_t off_wrap;
+ uint16_t flags;
+};
+#endif
+
+/*
+ * Available and used descs are in same order
+ */
+#ifndef VIRTIO_F_IN_ORDER
+#define VIRTIO_F_IN_ORDER 35
+#endif
/* Features supported by this builtin vhost-user net driver. */
#define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
@@ -199,7 +273,8 @@ struct vhost_msg {
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
- (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_F_IN_ORDER) | \
(1ULL << VIRTIO_F_IOMMU_PLATFORM))
@@ -210,6 +285,51 @@ struct guest_page {
};
/**
+ * function prototype for the vhost backend to handler specific vhost user
+ * messages prior to the master message handling
+ *
+ * @param vid
+ * vhost device id
+ * @param msg
+ * Message pointer.
+ * @param require_reply
+ * If the handler requires sending a reply, this varaible shall be written 1,
+ * otherwise 0.
+ * @param skip_master
+ * If the handler requires skipping the master message handling, this variable
+ * shall be written 1, otherwise 0.
+ * @return
+ * 0 on success, -1 on failure
+ */
+typedef int (*vhost_msg_pre_handle)(int vid, void *msg,
+ uint32_t *require_reply, uint32_t *skip_master);
+
+/**
+ * function prototype for the vhost backend to handler specific vhost user
+ * messages after the master message handling is done
+ *
+ * @param vid
+ * vhost device id
+ * @param msg
+ * Message pointer.
+ * @param require_reply
+ * If the handler requires sending a reply, this varaible shall be written 1,
+ * otherwise 0.
+ * @return
+ * 0 on success, -1 on failure
+ */
+typedef int (*vhost_msg_post_handle)(int vid, void *msg,
+ uint32_t *require_reply);
+
+/**
+ * pre and post vhost user message handlers
+ */
+struct vhost_user_extern_ops {
+ vhost_msg_pre_handle pre_msg_handle;
+ vhost_msg_post_handle post_msg_handle;
+};
+
+/**
* Device structure contains all configuration information relating
* to the device.
*/
@@ -241,8 +361,32 @@ struct virtio_net {
struct guest_page *guest_pages;
int slave_req_fd;
+ rte_spinlock_t slave_req_lock;
+
+ /*
+ * Device id to identify a specific backend device.
+ * It's set to -1 for the default software implementation.
+ */
+ int vdpa_dev_id;
+
+ /* private data for virtio device */
+ void *extern_data;
+ /* pre and post vhost user message handlers for the device */
+ struct vhost_user_extern_ops extern_ops;
} __rte_cache_aligned;
+static __rte_always_inline bool
+vq_is_packed(struct virtio_net *dev)
+{
+ return dev->features & (1ull << VIRTIO_F_RING_PACKED);
+}
+
+static inline bool
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+ return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(desc->flags & VRING_DESC_F_USED);
+}
#define VHOST_LOG_PAGE 4096
@@ -252,7 +396,15 @@ struct virtio_net {
static __rte_always_inline void
vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
{
- __sync_fetch_and_or_8(addr, (1U << nr));
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+ /*
+ * __sync_ built-ins are deprecated, but __atomic_ ones
+ * are sub-optimized in older GCC versions.
+ */
+ __sync_fetch_and_or_1(addr, (1U << nr));
+#else
+ __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+#endif
}
static __rte_always_inline void
@@ -284,6 +436,103 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
}
static __rte_always_inline void
+vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ unsigned long *log_base;
+ int i;
+
+ if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+ !dev->log_base))
+ return;
+
+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
+
+ /*
+ * It is expected a write memory barrier has been issued
+ * before this function is called.
+ */
+
+ for (i = 0; i < vq->log_cache_nb_elem; i++) {
+ struct log_cache_entry *elem = vq->log_cache + i;
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+ /*
+ * '__sync' builtins are deprecated, but '__atomic' ones
+ * are sub-optimized in older GCC versions.
+ */
+ __sync_fetch_and_or(log_base + elem->offset, elem->val);
+#else
+ __atomic_fetch_or(log_base + elem->offset, elem->val,
+ __ATOMIC_RELAXED);
+#endif
+ }
+
+ rte_smp_wmb();
+
+ vq->log_cache_nb_elem = 0;
+}
+
+static __rte_always_inline void
+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t page)
+{
+ uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
+ uint32_t offset = page / (sizeof(unsigned long) << 3);
+ int i;
+
+ for (i = 0; i < vq->log_cache_nb_elem; i++) {
+ struct log_cache_entry *elem = vq->log_cache + i;
+
+ if (elem->offset == offset) {
+ elem->val |= (1UL << bit_nr);
+ return;
+ }
+ }
+
+ if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
+ /*
+ * No more room for a new log cache entry,
+ * so write the dirty log map directly.
+ */
+ rte_smp_wmb();
+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+ return;
+ }
+
+ vq->log_cache[i].offset = offset;
+ vq->log_cache[i].val = (1UL << bit_nr);
+ vq->log_cache_nb_elem++;
+}
+
+static __rte_always_inline void
+vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len)
+{
+ uint64_t page;
+
+ if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+ !dev->log_base || !len))
+ return;
+
+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+ return;
+
+ page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len) {
+ vhost_log_cache_page(dev, vq, page);
+ page += 1;
+ }
+}
+
+static __rte_always_inline void
+vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t offset, uint64_t len)
+{
+ vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
+}
+
+static __rte_always_inline void
vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t offset, uint64_t len)
{
@@ -296,8 +545,8 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
#ifdef RTE_LIBRTE_VHOST_DEBUG
#define VHOST_MAX_PRINT_BUFF 6072
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
+#define VHOST_LOG_DEBUG(log_type, fmt, args...) \
+ RTE_LOG(DEBUG, log_type, fmt, ##args)
#define PRINT_PACKET(device, addr, size, header) do { \
char *pkt_addr = (char *)(addr); \
unsigned int index; \
@@ -313,11 +562,10 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
} \
snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
\
- LOG_DEBUG(VHOST_DATA, "%s", packet); \
+ VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
} while (0)
#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
+#define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
#define PRINT_PACKET(device, addr, size, header) do {} while (0)
#endif
@@ -345,18 +593,33 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
return 0;
}
-struct virtio_net *get_device(int vid);
+static __rte_always_inline struct virtio_net *
+get_device(int vid)
+{
+ struct virtio_net *dev = vhost_devices[vid];
+
+ if (unlikely(!dev)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) device not found.\n", vid);
+ }
+
+ return dev;
+}
int vhost_new_device(void);
void cleanup_device(struct virtio_net *dev, int destroy);
void reset_device(struct virtio_net *dev);
void vhost_destroy_device(int);
+void vhost_destroy_device_notify(struct virtio_net *dev);
void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
-void free_vq(struct vhost_virtqueue *vq);
+void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
+void vhost_attach_vdpa_device(int vid, int did);
+void vhost_detach_vdpa_device(int vid);
+
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
void vhost_set_builtin_virtio_net(int vid, bool enable);
@@ -371,18 +634,18 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
void vhost_backend_cleanup(struct virtio_net *dev);
uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm);
+ uint64_t iova, uint64_t *len, uint8_t perm);
int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
static __rte_always_inline uint64_t
vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t iova, uint64_t size, uint8_t perm)
+ uint64_t iova, uint64_t *len, uint8_t perm)
{
if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
- return rte_vhost_gpa_to_vva(dev->mem, iova);
+ return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
- return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+ return __vhost_iova_to_vva(dev, vq, iova, len, perm);
}
#define vhost_used_event(vr) \
@@ -401,17 +664,17 @@ vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
}
static __rte_always_inline void
-vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
+vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
/* Flush used->idx update before we read avail->flags. */
- rte_mb();
+ rte_smp_mb();
/* Don't kick guest if we don't reach index specified by guest. */
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
uint16_t new = vq->last_used_idx;
- LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
+ VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
__func__,
vhost_used_event(vq),
old, new);
@@ -428,4 +691,55 @@ vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
}
}
+static __rte_always_inline void
+vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint16_t old, new, off, off_wrap;
+ bool signalled_used_valid, kick = false;
+
+ /* Flush used desc update. */
+ rte_smp_mb();
+
+ if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
+ if (vq->driver_event->flags !=
+ VRING_EVENT_F_DISABLE)
+ kick = true;
+ goto kick;
+ }
+
+ old = vq->signalled_used;
+ new = vq->last_used_idx;
+ vq->signalled_used = new;
+ signalled_used_valid = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+
+ if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
+ if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
+ kick = true;
+ goto kick;
+ }
+
+ if (unlikely(!signalled_used_valid)) {
+ kick = true;
+ goto kick;
+ }
+
+ rte_smp_rmb();
+
+ off_wrap = vq->driver_event->off_wrap;
+ off = off_wrap & ~(1 << 15);
+
+ if (new <= old)
+ old -= vq->size;
+
+ if (vq->used_wrap_counter != off_wrap >> 15)
+ off -= vq->size;
+
+ if (vhost_need_event(off, new, old))
+ kick = true;
+kick:
+ if (kick)
+ eventfd_write(vq->callfd, (eventfd_t)1);
+}
+
#endif /* _VHOST_NET_CDEV_H_ */
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
new file mode 100644
index 00000000..57341ef8
--- /dev/null
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -0,0 +1,1372 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+#include <rte_malloc.h>
+#include <rte_hash.h>
+#include <rte_jhash.h>
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+
+#include "rte_vhost_crypto.h"
+#include "vhost.h"
+#include "vhost_user.h"
+#include "virtio_crypto.h"
+
+#define INHDR_LEN (sizeof(struct virtio_crypto_inhdr))
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
+
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+#define VC_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n", \
+ "Vhost-Crypto", __func__, __LINE__, ## args)
+#define VC_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n", \
+ "Vhost-Crypto", __func__, __LINE__, ## args)
+
+#define VC_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n", \
+ "Vhost-Crypto", __func__, __LINE__, ## args)
+#else
+#define VC_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
+#define VC_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
+#define VC_LOG_DBG(fmt, args...)
+#endif
+
+#define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) | \
+ (1 << VIRTIO_RING_F_INDIRECT_DESC) | \
+ (1 << VIRTIO_RING_F_EVENT_IDX) | \
+ (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) | \
+ (1 << VIRTIO_CRYPTO_SERVICE_MAC) | \
+ (1 << VIRTIO_NET_F_CTRL_VQ))
+
+#define IOVA_TO_VVA(t, r, a, l, p) \
+ ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
+
+static int
+cipher_algo_transform(uint32_t virtio_cipher_algo)
+{
+ int ret;
+
+ switch (virtio_cipher_algo) {
+ case VIRTIO_CRYPTO_CIPHER_AES_CBC:
+ ret = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_CTR:
+ ret = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DES_ECB:
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_DES_CBC:
+ ret = RTE_CRYPTO_CIPHER_DES_CBC;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
+ ret = RTE_CRYPTO_CIPHER_3DES_ECB;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
+ ret = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
+ ret = RTE_CRYPTO_CIPHER_3DES_CTR;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
+ ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
+ ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_F8:
+ ret = RTE_CRYPTO_CIPHER_AES_F8;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_AES_XTS:
+ ret = RTE_CRYPTO_CIPHER_AES_XTS;
+ break;
+ case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
+ ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
+ break;
+ default:
+ ret = -VIRTIO_CRYPTO_BADMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+auth_algo_transform(uint32_t virtio_auth_algo)
+{
+ int ret;
+
+ switch (virtio_auth_algo) {
+
+ case VIRTIO_CRYPTO_NO_MAC:
+ ret = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_MD5:
+ ret = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
+ ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
+ ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
+ ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
+ ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
+ ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_CMAC_3DES:
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ break;
+ case VIRTIO_CRYPTO_MAC_CMAC_AES:
+ ret = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_KASUMI_F9:
+ ret = RTE_CRYPTO_AUTH_KASUMI_F9;
+ break;
+ case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
+ ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
+ break;
+ case VIRTIO_CRYPTO_MAC_GMAC_AES:
+ ret = RTE_CRYPTO_AUTH_AES_GMAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ break;
+ case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
+ ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
+ break;
+ case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
+ ret = -VIRTIO_CRYPTO_NOTSUPP;
+ break;
+ case VIRTIO_CRYPTO_MAC_XCBC_AES:
+ ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
+ break;
+ default:
+ ret = -VIRTIO_CRYPTO_BADMSG;
+ break;
+ }
+
+ return ret;
+}
+
+static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
+{
+ int len;
+
+ switch (algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ len = 8;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ len = 16;
+ break;
+
+ /* TODO: add common algos */
+
+ default:
+ len = -1;
+ break;
+ }
+
+ return len;
+}
+
+/**
+ * vhost_crypto struct is used to maintain a number of virtio_cryptos and
+ * one DPDK crypto device that deals with all crypto workloads. It is declared
+ * here and defined in vhost_crypto.c
+ */
+struct vhost_crypto {
+ /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
+ * session ID.
+ */
+ struct rte_hash *session_map;
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *sess_pool;
+
+ /** DPDK cryptodev ID */
+ uint8_t cid;
+ uint16_t nb_qps;
+
+ uint64_t last_session_id;
+
+ uint64_t cache_session_id;
+ struct rte_cryptodev_sym_session *cache_session;
+ /** socket id for the device */
+ int socket_id;
+
+ struct virtio_net *dev;
+
+ uint8_t option;
+} __rte_cache_aligned;
+
+struct vhost_crypto_data_req {
+ struct vring_desc *head;
+ struct virtio_net *dev;
+ struct virtio_crypto_inhdr *inhdr;
+ struct vhost_virtqueue *vq;
+ struct vring_desc *wb_desc;
+ uint16_t wb_len;
+ uint16_t desc_idx;
+ uint16_t len;
+ uint16_t zero_copy;
+};
+
+static int
+transform_cipher_param(struct rte_crypto_sym_xform *xform,
+ VhostUserCryptoSessionParam *param)
+{
+ int ret;
+
+ ret = cipher_algo_transform(param->cipher_algo);
+ if (unlikely(ret < 0))
+ return ret;
+
+ xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform->cipher.algo = (uint32_t)ret;
+ xform->cipher.key.length = param->cipher_key_len;
+ if (xform->cipher.key.length > 0)
+ xform->cipher.key.data = param->cipher_key_buf;
+ if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
+ xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
+ xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ else {
+ VC_LOG_DBG("Bad operation type");
+ return -VIRTIO_CRYPTO_BADMSG;
+ }
+
+ ret = get_iv_len(xform->cipher.algo);
+ if (unlikely(ret < 0))
+ return ret;
+ xform->cipher.iv.length = (uint16_t)ret;
+ xform->cipher.iv.offset = IV_OFFSET;
+ return 0;
+}
+
+static int
+transform_chain_param(struct rte_crypto_sym_xform *xforms,
+ VhostUserCryptoSessionParam *param)
+{
+ struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
+ int ret;
+
+ switch (param->chaining_dir) {
+ case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
+ xform_auth = xforms;
+ xform_cipher = xforms->next;
+ xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ break;
+ case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
+ xform_cipher = xforms;
+ xform_auth = xforms->next;
+ xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ break;
+ default:
+ return -VIRTIO_CRYPTO_BADMSG;
+ }
+
+ /* cipher */
+ ret = cipher_algo_transform(param->cipher_algo);
+ if (unlikely(ret < 0))
+ return ret;
+ xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher->cipher.algo = (uint32_t)ret;
+ xform_cipher->cipher.key.length = param->cipher_key_len;
+ xform_cipher->cipher.key.data = param->cipher_key_buf;
+ ret = get_iv_len(xform_cipher->cipher.algo);
+ if (unlikely(ret < 0))
+ return ret;
+ xform_cipher->cipher.iv.length = (uint16_t)ret;
+ xform_cipher->cipher.iv.offset = IV_OFFSET;
+
+ /* auth */
+ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ ret = auth_algo_transform(param->hash_algo);
+ if (unlikely(ret < 0))
+ return ret;
+ xform_auth->auth.algo = (uint32_t)ret;
+ xform_auth->auth.digest_length = param->digest_len;
+ xform_auth->auth.key.length = param->auth_key_len;
+ xform_auth->auth.key.data = param->auth_key_buf;
+
+ return 0;
+}
+
+static void
+vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
+ VhostUserCryptoSessionParam *sess_param)
+{
+ struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
+ struct rte_cryptodev_sym_session *session;
+ int ret;
+
+ switch (sess_param->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_NONE:
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ ret = transform_cipher_param(&xform1, sess_param);
+ if (unlikely(ret)) {
+ VC_LOG_ERR("Error transform session msg (%i)", ret);
+ sess_param->session_id = ret;
+ return;
+ }
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ if (unlikely(sess_param->hash_mode !=
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
+ sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
+ VC_LOG_ERR("Error transform session message (%i)",
+ -VIRTIO_CRYPTO_NOTSUPP);
+ return;
+ }
+
+ xform1.next = &xform2;
+
+ ret = transform_chain_param(&xform1, sess_param);
+ if (unlikely(ret)) {
+ VC_LOG_ERR("Error transform session message (%i)", ret);
+ sess_param->session_id = ret;
+ return;
+ }
+
+ break;
+ default:
+ VC_LOG_ERR("Algorithm not yet supported");
+ sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
+ return;
+ }
+
+ session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
+ if (!session) {
+ VC_LOG_ERR("Failed to create session");
+ sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+ return;
+ }
+
+ if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
+ vcrypto->sess_pool) < 0) {
+ VC_LOG_ERR("Failed to initialize session");
+ sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+ return;
+ }
+
+ /* insert hash to map */
+ if (rte_hash_add_key_data(vcrypto->session_map,
+ &vcrypto->last_session_id, session) < 0) {
+ VC_LOG_ERR("Failed to insert session to hash table");
+
+ if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
+ VC_LOG_ERR("Failed to clear session");
+ else {
+ if (rte_cryptodev_sym_session_free(session) < 0)
+ VC_LOG_ERR("Failed to free session");
+ }
+ sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+ return;
+ }
+
+ VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
+ vcrypto->last_session_id, vcrypto->dev->vid);
+
+ sess_param->session_id = vcrypto->last_session_id;
+ vcrypto->last_session_id++;
+}
+
+static int
+vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
+{
+ struct rte_cryptodev_sym_session *session;
+ uint64_t sess_id = session_id;
+ int ret;
+
+ ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
+ (void **)&session);
+
+ if (unlikely(ret < 0)) {
+ VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
+ return -VIRTIO_CRYPTO_INVSESS;
+ }
+
+ if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
+ VC_LOG_DBG("Failed to clear session");
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ if (rte_cryptodev_sym_session_free(session) < 0) {
+ VC_LOG_DBG("Failed to free session");
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
+ VC_LOG_DBG("Failed to delete session from hash table.");
+ return -VIRTIO_CRYPTO_ERR;
+ }
+
+ VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
+ vcrypto->dev->vid);
+
+ return 0;
+}
+
+static int
+vhost_crypto_msg_post_handler(int vid, void *msg, uint32_t *require_reply)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_crypto *vcrypto;
+ VhostUserMsg *vmsg = msg;
+ int ret = 0;
+
+ if (dev == NULL || require_reply == NULL) {
+ VC_LOG_ERR("Invalid vid %i", vid);
+ return -EINVAL;
+ }
+
+ vcrypto = dev->extern_data;
+ if (vcrypto == NULL) {
+ VC_LOG_ERR("Cannot find required data, is it initialized?");
+ return -ENOENT;
+ }
+
+ *require_reply = 0;
+
+ if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
+ vhost_crypto_create_sess(vcrypto,
+ &vmsg->payload.crypto_session);
+ *require_reply = 1;
+ } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS)
+ ret = vhost_crypto_close_sess(vcrypto, vmsg->payload.u64);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static __rte_always_inline struct vring_desc *
+find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+{
+ if (desc->flags & VRING_DESC_F_WRITE)
+ return desc;
+
+ while (desc->flags & VRING_DESC_F_NEXT) {
+ desc = &head[desc->next];
+ if (desc->flags & VRING_DESC_F_WRITE)
+ return desc;
+ }
+
+ return NULL;
+}
+
+static struct virtio_crypto_inhdr *
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+{
+ uint64_t dlen;
+ struct virtio_crypto_inhdr *inhdr;
+
+ while (desc->flags & VRING_DESC_F_NEXT)
+ desc = &vc_req->head[desc->next];
+
+ dlen = desc->len;
+ inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
+ &dlen, VHOST_ACCESS_WO);
+ if (unlikely(!inhdr || dlen != desc->len))
+ return NULL;
+
+ return inhdr;
+}
+
+static __rte_always_inline int
+move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+ uint32_t size)
+{
+ struct vring_desc *desc = *cur_desc;
+ int left = size;
+
+ rte_prefetch0(&head[desc->next]);
+ left -= desc->len;
+
+ while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ desc = &head[desc->next];
+ rte_prefetch0(&head[desc->next]);
+ left -= desc->len;
+ }
+
+ if (unlikely(left > 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
+ return -1;
+ }
+
+ *cur_desc = &head[desc->next];
+ return 0;
+}
+
+static int
+copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+ struct vring_desc **cur_desc, uint32_t size)
+{
+ struct vring_desc *desc = *cur_desc;
+ uint64_t remain, addr, dlen, len;
+ uint32_t to_copy;
+ uint8_t *data = dst_data;
+ uint8_t *src;
+ int left = size;
+
+ rte_prefetch0(&vc_req->head[desc->next]);
+ to_copy = RTE_MIN(desc->len, (uint32_t)left);
+ dlen = to_copy;
+ src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !dlen)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy((uint8_t *)data, src, dlen);
+ data += dlen;
+
+ if (unlikely(dlen < to_copy)) {
+ remain = to_copy - dlen;
+ addr = desc->addr + dlen;
+
+ while (remain) {
+ len = remain;
+ src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy(data, src, len);
+ addr += len;
+ remain -= len;
+ data += len;
+ }
+ }
+
+ left -= to_copy;
+
+ while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ desc = &vc_req->head[desc->next];
+ rte_prefetch0(&vc_req->head[desc->next]);
+ to_copy = RTE_MIN(desc->len, (uint32_t)left);
+ dlen = desc->len;
+ src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !dlen)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy(data, src, dlen);
+ data += dlen;
+
+ if (unlikely(dlen < to_copy)) {
+ remain = to_copy - dlen;
+ addr = desc->addr + dlen;
+
+ while (remain) {
+ len = remain;
+ src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy(data, src, len);
+ addr += len;
+ remain -= len;
+ data += len;
+ }
+ }
+
+ left -= to_copy;
+ }
+
+ if (unlikely(left > 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
+ return -1;
+ }
+
+ *cur_desc = &vc_req->head[desc->next];
+
+ return 0;
+}
+
+static __rte_always_inline void *
+get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,
+ uint32_t size, uint8_t perm)
+{
+ void *data;
+ uint64_t dlen = (*cur_desc)->len;
+
+ data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);
+ if (unlikely(!data || dlen != (*cur_desc)->len)) {
+ VC_LOG_ERR("Failed to map object");
+ return NULL;
+ }
+
+ if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))
+ return NULL;
+
+ return data;
+}
+
+static int
+write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)
+{
+ struct rte_mbuf *mbuf = op->sym->m_dst;
+ struct vring_desc *head = vc_req->head;
+ struct vring_desc *desc = vc_req->wb_desc;
+ int left = vc_req->wb_len;
+ uint32_t to_write;
+ uint8_t *src_data = mbuf->buf_addr, *dst;
+ uint64_t dlen;
+
+ rte_prefetch0(&head[desc->next]);
+ to_write = RTE_MIN(desc->len, (uint32_t)left);
+ dlen = desc->len;
+ dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+ VHOST_ACCESS_RW);
+ if (unlikely(!dst || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy(dst, src_data, to_write);
+ left -= to_write;
+ src_data += to_write;
+
+ while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+ desc = &head[desc->next];
+ rte_prefetch0(&head[desc->next]);
+ to_write = RTE_MIN(desc->len, (uint32_t)left);
+ dlen = desc->len;
+ dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
+ VHOST_ACCESS_RW);
+ if (unlikely(!dst || dlen != desc->len)) {
+ VC_LOG_ERR("Failed to map descriptor");
+ return -1;
+ }
+
+ rte_memcpy(dst, src_data, to_write);
+ left -= to_write;
+ src_data += to_write;
+ }
+
+ if (unlikely(left < 0)) {
+ VC_LOG_ERR("Incorrect virtio descriptor");
+ return -1;
+ }
+
+ return 0;
+}
+
+static uint8_t
+prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ struct vhost_crypto_data_req *vc_req,
+ struct virtio_crypto_cipher_data_req *cipher,
+ struct vring_desc *cur_desc)
+{
+ struct vring_desc *desc = cur_desc;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
+ uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ uint8_t ret = 0;
+
+ /* prepare */
+ /* iv */
+ if (unlikely(copy_data(iv_data, vc_req, &desc,
+ cipher->para.iv_len) < 0)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ m_src->data_len = cipher->para.src_data_len;
+
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
+ cipher->para.src_data_len);
+ m_src->buf_addr = get_data_ptr(vc_req, &desc,
+ cipher->para.src_data_len, VHOST_ACCESS_RO);
+ if (unlikely(m_src->buf_iova == 0 ||
+ m_src->buf_addr == NULL)) {
+ VC_LOG_ERR("zero_copy may fail due to cross page data");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ break;
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ if (unlikely(cipher->para.src_data_len >
+ RTE_MBUF_DEFAULT_BUF_SIZE)) {
+ VC_LOG_ERR("Not enough space to do data copy");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
+ vc_req, &desc, cipher->para.src_data_len)
+ < 0)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+ break;
+ default:
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ /* dst */
+ desc = find_write_desc(vc_req->head, desc);
+ if (unlikely(!desc)) {
+ VC_LOG_ERR("Cannot find write location");
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
+ desc->addr, cipher->para.dst_data_len);
+ m_dst->buf_addr = get_data_ptr(vc_req, &desc,
+ cipher->para.dst_data_len, VHOST_ACCESS_RW);
+ if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
+ VC_LOG_ERR("zero_copy may fail due to cross page data");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
+ m_dst->data_len = cipher->para.dst_data_len;
+ break;
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ vc_req->wb_desc = desc;
+ vc_req->wb_len = cipher->para.dst_data_len;
+ if (unlikely(move_desc(vc_req->head, &desc,
+ vc_req->wb_len) < 0)) {
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ break;
+ default:
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ /* src data */
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+ op->sym->cipher.data.offset = 0;
+ op->sym->cipher.data.length = cipher->para.src_data_len;
+
+ vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+ if (unlikely(vc_req->inhdr == NULL)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
+ vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
+
+ return 0;
+
+error_exit:
+ vc_req->len = INHDR_LEN;
+ return ret;
+}
+
+static uint8_t
+prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+ struct vhost_crypto_data_req *vc_req,
+ struct virtio_crypto_alg_chain_data_req *chain,
+ struct vring_desc *cur_desc)
+{
+ struct vring_desc *desc = cur_desc;
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
+ uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ uint32_t digest_offset;
+ void *digest_addr;
+ uint8_t ret = 0;
+
+ /* prepare */
+ /* iv */
+ if (unlikely(copy_data(iv_data, vc_req, &desc,
+ chain->para.iv_len) < 0)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ m_src->data_len = chain->para.src_data_len;
+ m_dst->data_len = chain->para.dst_data_len;
+
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
+ chain->para.src_data_len);
+ m_src->buf_addr = get_data_ptr(vc_req, &desc,
+ chain->para.src_data_len, VHOST_ACCESS_RO);
+ if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
+ VC_LOG_ERR("zero_copy may fail due to cross page data");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ break;
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ if (unlikely(chain->para.src_data_len >
+ RTE_MBUF_DEFAULT_BUF_SIZE)) {
+ VC_LOG_ERR("Not enough space to do data copy");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
+ vc_req, &desc, chain->para.src_data_len)) < 0) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+ break;
+ default:
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ /* dst */
+ desc = find_write_desc(vc_req->head, desc);
+ if (unlikely(!desc)) {
+ VC_LOG_ERR("Cannot find write location");
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
+ desc->addr, chain->para.dst_data_len);
+ m_dst->buf_addr = get_data_ptr(vc_req, &desc,
+ chain->para.dst_data_len, VHOST_ACCESS_RW);
+ if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
+ VC_LOG_ERR("zero_copy may fail due to cross page data");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+
+ op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
+ desc->addr, chain->para.hash_result_len);
+ op->sym->auth.digest.data = get_data_ptr(vc_req, &desc,
+ chain->para.hash_result_len, VHOST_ACCESS_RW);
+ if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
+ VC_LOG_ERR("zero_copy may fail due to cross page data");
+ ret = VIRTIO_CRYPTO_ERR;
+ goto error_exit;
+ }
+ break;
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ digest_offset = m_dst->data_len;
+ digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *,
+ digest_offset);
+
+ vc_req->wb_desc = desc;
+ vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len;
+
+ if (unlikely(move_desc(vc_req->head, &desc,
+ chain->para.dst_data_len) < 0)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ if (unlikely(copy_data(digest_addr, vc_req, &desc,
+ chain->para.hash_result_len)) < 0) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ op->sym->auth.digest.data = digest_addr;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,
+ digest_offset);
+ break;
+ default:
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ /* record inhdr */
+ vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
+ if (unlikely(vc_req->inhdr == NULL)) {
+ ret = VIRTIO_CRYPTO_BADMSG;
+ goto error_exit;
+ }
+
+ vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
+
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+ op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
+ op->sym->cipher.data.length = chain->para.src_data_len -
+ chain->para.cipher_start_src_offset;
+
+ op->sym->auth.data.offset = chain->para.hash_start_src_offset;
+ op->sym->auth.data.length = chain->para.len_to_hash;
+
+ vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
+ INHDR_LEN;
+ return 0;
+
+error_exit:
+ vc_req->len = INHDR_LEN;
+ return ret;
+}
+
+/**
+ * Process on descriptor
+ */
+static __rte_always_inline int
+vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+ struct vhost_virtqueue *vq, struct rte_crypto_op *op,
+ struct vring_desc *head, uint16_t desc_idx)
+{
+ struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
+ struct rte_cryptodev_sym_session *session;
+ struct virtio_crypto_op_data_req *req, tmp_req;
+ struct virtio_crypto_inhdr *inhdr;
+ struct vring_desc *desc = NULL;
+ uint64_t session_id;
+ uint64_t dlen;
+ int err = 0;
+
+ vc_req->desc_idx = desc_idx;
+ vc_req->dev = vcrypto->dev;
+ vc_req->vq = vq;
+
+ if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
+ dlen = head->len;
+ desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
+ &dlen, VHOST_ACCESS_RO);
+ if (unlikely(!desc || dlen != head->len))
+ return -1;
+ desc_idx = 0;
+ head = desc;
+ } else {
+ desc = head;
+ }
+
+ vc_req->head = head;
+ vc_req->zero_copy = vcrypto->option;
+
+ req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);
+ if (unlikely(req == NULL)) {
+ switch (vcrypto->option) {
+ case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+ err = VIRTIO_CRYPTO_BADMSG;
+ VC_LOG_ERR("Invalid descriptor");
+ goto error_exit;
+ case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+ req = &tmp_req;
+ if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
+ < 0)) {
+ err = VIRTIO_CRYPTO_BADMSG;
+ VC_LOG_ERR("Invalid descriptor");
+ goto error_exit;
+ }
+ break;
+ default:
+ err = VIRTIO_CRYPTO_ERR;
+ VC_LOG_ERR("Invalid option");
+ goto error_exit;
+ }
+ }
+
+ switch (req->header.opcode) {
+ case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+ case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ session_id = req->header.session_id;
+
+ /* one branch to avoid unnecessary table lookup */
+ if (vcrypto->cache_session_id != session_id) {
+ err = rte_hash_lookup_data(vcrypto->session_map,
+ &session_id, (void **)&session);
+ if (unlikely(err < 0)) {
+ err = VIRTIO_CRYPTO_ERR;
+ VC_LOG_ERR("Failed to find session %"PRIu64,
+ session_id);
+ goto error_exit;
+ }
+
+ vcrypto->cache_session = session;
+ vcrypto->cache_session_id = session_id;
+ }
+
+ session = vcrypto->cache_session;
+
+ err = rte_crypto_op_attach_sym_session(op, session);
+ if (unlikely(err < 0)) {
+ err = VIRTIO_CRYPTO_ERR;
+ VC_LOG_ERR("Failed to attach session to op");
+ goto error_exit;
+ }
+
+ switch (req->u.sym_req.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_NONE:
+ err = VIRTIO_CRYPTO_NOTSUPP;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ err = prepare_sym_cipher_op(vcrypto, op, vc_req,
+ &req->u.sym_req.u.cipher, desc);
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ err = prepare_sym_chain_op(vcrypto, op, vc_req,
+ &req->u.sym_req.u.chain, desc);
+ break;
+ }
+ if (unlikely(err != 0)) {
+ VC_LOG_ERR("Failed to process sym request");
+ goto error_exit;
+ }
+ break;
+ default:
+ VC_LOG_ERR("Unsupported symmetric crypto request type %u",
+ req->header.opcode);
+ goto error_exit;
+ }
+
+ return 0;
+
+error_exit:
+
+ inhdr = reach_inhdr(vc_req, desc);
+ if (likely(inhdr != NULL))
+ inhdr->status = (uint8_t)err;
+
+ return -1;
+}
+
+static __rte_always_inline struct vhost_virtqueue *
+vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
+ struct vhost_virtqueue *old_vq)
+{
+ struct rte_mbuf *m_src = op->sym->m_src;
+ struct rte_mbuf *m_dst = op->sym->m_dst;
+ struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
+ uint16_t desc_idx;
+ int ret = 0;
+
+ if (unlikely(!vc_req)) {
+ VC_LOG_ERR("Failed to retrieve vc_req");
+ return NULL;
+ }
+
+ if (old_vq && (vc_req->vq != old_vq))
+ return vc_req->vq;
+
+ desc_idx = vc_req->desc_idx;
+
+ if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
+ vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
+ else {
+ if (vc_req->zero_copy == 0) {
+ ret = write_back_data(op, vc_req);
+ if (unlikely(ret != 0))
+ vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
+ }
+ }
+
+ vc_req->vq->used->ring[desc_idx].id = desc_idx;
+ vc_req->vq->used->ring[desc_idx].len = vc_req->len;
+
+ rte_mempool_put(m_dst->pool, (void *)m_dst);
+ rte_mempool_put(m_src->pool, (void *)m_src);
+
+ return vc_req->vq;
+}
+
+static __rte_always_inline uint16_t
+vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
+ uint16_t nb_ops, int *callfd)
+{
+ uint16_t processed = 1;
+ struct vhost_virtqueue *vq, *tmp_vq;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ vq = vhost_crypto_finalize_one_request(ops[0], NULL);
+ if (unlikely(vq == NULL))
+ return 0;
+ tmp_vq = vq;
+
+ while ((processed < nb_ops)) {
+ tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
+ tmp_vq);
+
+ if (unlikely(vq != tmp_vq))
+ break;
+
+ processed++;
+ }
+
+ *callfd = vq->callfd;
+
+ *(volatile uint16_t *)&vq->used->idx += processed;
+
+ return processed;
+}
+
+int __rte_experimental
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+ struct rte_mempool *sess_pool, int socket_id)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct rte_hash_parameters params = {0};
+ struct vhost_crypto *vcrypto;
+ char name[128];
+ int ret;
+
+ if (!dev) {
+ VC_LOG_ERR("Invalid vid %i", vid);
+ return -EINVAL;
+ }
+
+ ret = rte_vhost_driver_set_features(dev->ifname,
+ VIRTIO_CRYPTO_FEATURES);
+ if (ret < 0) {
+ VC_LOG_ERR("Error setting features");
+ return -1;
+ }
+
+ vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!vcrypto) {
+ VC_LOG_ERR("Insufficient memory");
+ return -ENOMEM;
+ }
+
+ vcrypto->sess_pool = sess_pool;
+ vcrypto->cid = cryptodev_id;
+ vcrypto->cache_session_id = UINT64_MAX;
+ vcrypto->last_session_id = 1;
+ vcrypto->dev = dev;
+ vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
+
+ snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
+ params.name = name;
+ params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
+ params.hash_func = rte_jhash;
+ params.key_len = sizeof(uint64_t);
+ params.socket_id = socket_id;
+ vcrypto->session_map = rte_hash_create(&params);
+ if (!vcrypto->session_map) {
+ VC_LOG_ERR("Failed to creath session map");
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+
+ snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
+ vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
+ VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
+ sizeof(struct vhost_crypto_data_req),
+ RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
+ rte_socket_id());
+ if (!vcrypto->mbuf_pool) {
+ VC_LOG_ERR("Failed to creath mbuf pool");
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+
+ dev->extern_data = vcrypto;
+ dev->extern_ops.pre_msg_handle = NULL;
+ dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
+
+ return 0;
+
+error_exit:
+ if (vcrypto->session_map)
+ rte_hash_free(vcrypto->session_map);
+ if (vcrypto->mbuf_pool)
+ rte_mempool_free(vcrypto->mbuf_pool);
+
+ rte_free(vcrypto);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_vhost_crypto_free(int vid)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_crypto *vcrypto;
+
+ if (unlikely(dev == NULL)) {
+ VC_LOG_ERR("Invalid vid %i", vid);
+ return -EINVAL;
+ }
+
+ vcrypto = dev->extern_data;
+ if (unlikely(vcrypto == NULL)) {
+ VC_LOG_ERR("Cannot find required data, is it initialized?");
+ return -ENOENT;
+ }
+
+ rte_hash_free(vcrypto->session_map);
+ rte_mempool_free(vcrypto->mbuf_pool);
+ rte_free(vcrypto);
+
+ dev->extern_data = NULL;
+ dev->extern_ops.pre_msg_handle = NULL;
+ dev->extern_ops.post_msg_handle = NULL;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_crypto *vcrypto;
+
+ if (unlikely(dev == NULL)) {
+ VC_LOG_ERR("Invalid vid %i", vid);
+ return -EINVAL;
+ }
+
+ if (unlikely((uint32_t)option >=
+ RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
+ VC_LOG_ERR("Invalid option %i", option);
+ return -EINVAL;
+ }
+
+ vcrypto = (struct vhost_crypto *)dev->extern_data;
+ if (unlikely(vcrypto == NULL)) {
+ VC_LOG_ERR("Cannot find required data, is it initialized?");
+ return -ENOENT;
+ }
+
+ if (vcrypto->option == (uint8_t)option)
+ return 0;
+
+ if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
+ VC_LOG_ERR("Cannot update zero copy as mempool is not full");
+ return -EINVAL;
+ }
+
+ vcrypto->option = (uint8_t)option;
+
+ return 0;
+}
+
+uint16_t __rte_experimental
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_crypto *vcrypto;
+ struct vhost_virtqueue *vq;
+ uint16_t avail_idx;
+ uint16_t start_idx;
+ uint16_t required;
+ uint16_t count;
+ uint16_t i;
+
+ if (unlikely(dev == NULL)) {
+ VC_LOG_ERR("Invalid vid %i", vid);
+ return -EINVAL;
+ }
+
+ if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
+ VC_LOG_ERR("Invalid qid %u", qid);
+ return -EINVAL;
+ }
+
+ vcrypto = (struct vhost_crypto *)dev->extern_data;
+ if (unlikely(vcrypto == NULL)) {
+ VC_LOG_ERR("Cannot find required data, is it initialized?");
+ return -ENOENT;
+ }
+
+ vq = dev->virtqueue[qid];
+
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ start_idx = vq->last_used_idx;
+ count = avail_idx - start_idx;
+ count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
+ count = RTE_MIN(count, nb_ops);
+
+ if (unlikely(count == 0))
+ return 0;
+
+ /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
+ * we need only 1 mbuf as src and dst
+ */
+ required = count * 2;
+ if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
+ required) < 0)) {
+ VC_LOG_ERR("Insufficient memory");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+ uint16_t desc_idx = vq->avail->ring[used_idx];
+ struct vring_desc *head = &vq->desc[desc_idx];
+ struct rte_crypto_op *op = ops[i];
+
+ op->sym->m_src = mbufs[i * 2];
+ op->sym->m_dst = mbufs[i * 2 + 1];
+ op->sym->m_src->data_off = 0;
+ op->sym->m_dst->data_off = 0;
+
+ if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
+ desc_idx)) < 0)
+ break;
+ }
+
+ vq->last_used_idx += i;
+
+ return i;
+}
+
+uint16_t __rte_experimental
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+ uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
+{
+ struct rte_crypto_op **tmp_ops = ops;
+ uint16_t count = 0, left = nb_ops;
+ int callfd;
+ uint16_t idx = 0;
+
+ while (left) {
+ count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
+ &callfd);
+ if (unlikely(count == 0))
+ break;
+
+ tmp_ops = &tmp_ops[count];
+ left -= count;
+
+ callfds[idx++] = callfd;
+
+ if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
+ VC_LOG_ERR("Too many vqs");
+ break;
+ }
+ }
+
+ *nb_callfds = idx;
+
+ return nb_ops - left;
+}
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 90ed2112..a2d4c9ff 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1,5 +1,22 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2016 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+/* Security model
+ * --------------
+ * The vhost-user protocol connection is an external interface, so it must be
+ * robust against invalid inputs.
+ *
+ * This is important because the vhost-user master is only one step removed
+ * from the guest. Malicious guests that have escaped will then launch further
+ * attacks from the vhost-user master.
+ *
+ * Even in deployments where guests are trusted, a bug in the vhost-user master
+ * can still cause invalid messages to be sent. Such messages must not
+ * compromise the stability of the DPDK application by causing crashes, memory
+ * corruption, or other problematic behavior.
+ *
+ * Do not assume received VhostUserMsg fields contain sensible values!
*/
#include <stdint.h>
@@ -50,6 +67,8 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
[VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD",
[VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG",
+ [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
+ [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
};
static uint64_t
@@ -116,10 +135,7 @@ vhost_user_set_owner(void)
static int
vhost_user_reset_owner(struct virtio_net *dev)
{
- if (dev->flags & VIRTIO_DEV_RUNNING) {
- dev->flags &= ~VIRTIO_DEV_RUNNING;
- dev->notify_ops->destroy_device(dev->vid);
- }
+ vhost_destroy_device_notify(dev);
cleanup_device(dev, 0);
reset_device(dev);
@@ -139,12 +155,26 @@ vhost_user_get_features(struct virtio_net *dev)
}
/*
+ * The queue number that we support are requested.
+ */
+static uint32_t
+vhost_user_get_queue_num(struct virtio_net *dev)
+{
+ uint32_t queue_num = 0;
+
+ rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
+ return queue_num;
+}
+
+/*
* We receive the negotiated features supported by us and the virtio device.
*/
static int
vhost_user_set_features(struct virtio_net *dev, uint64_t features)
{
uint64_t vhost_features = 0;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
rte_vhost_driver_get_features(dev->ifname, &vhost_features);
if (features & ~vhost_features) {
@@ -181,7 +211,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
- LOG_DEBUG(VHOST_CONFIG,
+ VHOST_LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
dev->vid,
(dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
@@ -203,10 +233,15 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features)
dev->virtqueue[dev->nr_vring] = NULL;
cleanup_vq(vq, 1);
- free_vq(vq);
+ free_vq(dev, vq);
}
}
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->set_features)
+ vdpa_dev->ops->set_features(dev->vid);
+
return 0;
}
@@ -221,6 +256,17 @@ vhost_user_set_vring_num(struct virtio_net *dev,
vq->size = msg->payload.state.num;
+ /* VIRTIO 1.0, 2.4 Virtqueues says:
+ *
+ * Queue Size value is always a power of 2. The maximum Queue Size
+ * value is 32768.
+ */
+ if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "invalid virtqueue size %u\n", vq->size);
+ return -1;
+ }
+
if (dev->dequeue_zero_copy) {
vq->nr_zmbuf = 0;
vq->last_zmbuf_idx = 0;
@@ -236,13 +282,26 @@ vhost_user_set_vring_num(struct virtio_net *dev,
TAILQ_INIT(&vq->zmbuf_list);
}
- vq->shadow_used_ring = rte_malloc(NULL,
+ if (vq_is_packed(dev)) {
+ vq->shadow_used_packed = rte_malloc(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE);
+ if (!vq->shadow_used_packed) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
+
+ } else {
+ vq->shadow_used_split = rte_malloc(NULL,
vq->size * sizeof(struct vring_used_elem),
RTE_CACHE_LINE_SIZE);
- if (!vq->shadow_used_ring) {
- RTE_LOG(ERR, VHOST_CONFIG,
- "failed to allocate memory for shadow used ring.\n");
- return -1;
+ if (!vq->shadow_used_split) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to allocate memory for shadow used ring.\n");
+ return -1;
+ }
}
vq->batch_copy_elems = rte_malloc(NULL,
@@ -269,7 +328,8 @@ numa_realloc(struct virtio_net *dev, int index)
struct virtio_net *old_dev;
struct vhost_virtqueue *old_vq, *vq;
struct zcopy_mbuf *new_zmbuf;
- struct vring_used_elem *new_shadow_used_ring;
+ struct vring_used_elem *new_shadow_used_split;
+ struct vring_used_elem_packed *new_shadow_used_packed;
struct batch_copy_elem *new_batch_copy_elems;
int ret;
@@ -304,13 +364,26 @@ numa_realloc(struct virtio_net *dev, int index)
vq->zmbufs = new_zmbuf;
}
- new_shadow_used_ring = rte_malloc_socket(NULL,
- vq->size * sizeof(struct vring_used_elem),
- RTE_CACHE_LINE_SIZE,
- newnode);
- if (new_shadow_used_ring) {
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = new_shadow_used_ring;
+ if (vq_is_packed(dev)) {
+ new_shadow_used_packed = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem_packed),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_packed) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = new_shadow_used_packed;
+ }
+ } else {
+ new_shadow_used_split = rte_malloc_socket(NULL,
+ vq->size *
+ sizeof(struct vring_used_elem),
+ RTE_CACHE_LINE_SIZE,
+ newnode);
+ if (new_shadow_used_split) {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = new_shadow_used_split;
+ }
}
new_batch_copy_elems = rte_malloc_socket(NULL,
@@ -366,21 +439,26 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused)
/* Converts QEMU virtual address to Vhost virtual address. */
static uint64_t
-qva_to_vva(struct virtio_net *dev, uint64_t qva)
+qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
{
- struct rte_vhost_mem_region *reg;
+ struct rte_vhost_mem_region *r;
uint32_t i;
/* Find the region where the address lives. */
for (i = 0; i < dev->mem->nregions; i++) {
- reg = &dev->mem->regions[i];
+ r = &dev->mem->regions[i];
+
+ if (qva >= r->guest_user_addr &&
+ qva < r->guest_user_addr + r->size) {
- if (qva >= reg->guest_user_addr &&
- qva < reg->guest_user_addr + reg->size) {
- return qva - reg->guest_user_addr +
- reg->host_user_addr;
+ if (unlikely(*len > r->guest_user_addr + r->size - qva))
+ *len = r->guest_user_addr + r->size - qva;
+
+ return qva - r->guest_user_addr +
+ r->host_user_addr;
}
}
+ *len = 0;
return 0;
}
@@ -393,20 +471,20 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva)
*/
static uint64_t
ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t ra, uint64_t size)
+ uint64_t ra, uint64_t *size)
{
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
uint64_t vva;
vva = vhost_user_iotlb_cache_find(vq, ra,
- &size, VHOST_ACCESS_RW);
+ size, VHOST_ACCESS_RW);
if (!vva)
vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
return vva;
}
- return qva_to_vva(dev, ra);
+ return qva_to_vva(dev, ra, size);
}
static struct virtio_net *
@@ -414,16 +492,63 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
{
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
struct vhost_vring_addr *addr = &vq->ring_addrs;
+ uint64_t len;
+
+ if (vq_is_packed(dev)) {
+ len = sizeof(struct vring_packed_desc) * vq->size;
+ vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
+ ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
+ vq->log_guest_addr = 0;
+ if (vq->desc_packed == NULL ||
+ len != sizeof(struct vring_packed_desc) *
+ vq->size) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to map desc_packed ring.\n",
+ dev->vid);
+ return dev;
+ }
+
+ dev = numa_realloc(dev, vq_index);
+ vq = dev->virtqueue[vq_index];
+ addr = &vq->ring_addrs;
+
+ len = sizeof(struct vring_packed_desc_event);
+ vq->driver_event = (struct vring_packed_desc_event *)
+ (uintptr_t)ring_addr_to_vva(dev,
+ vq, addr->avail_user_addr, &len);
+ if (vq->driver_event == NULL ||
+ len != sizeof(struct vring_packed_desc_event)) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to find driver area address.\n",
+ dev->vid);
+ return dev;
+ }
+
+ len = sizeof(struct vring_packed_desc_event);
+ vq->device_event = (struct vring_packed_desc_event *)
+ (uintptr_t)ring_addr_to_vva(dev,
+ vq, addr->used_user_addr, &len);
+ if (vq->device_event == NULL ||
+ len != sizeof(struct vring_packed_desc_event)) {
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ "(%d) failed to find device area address.\n",
+ dev->vid);
+ return dev;
+ }
+
+ return dev;
+ }
/* The addresses are converted from QEMU virtual to Vhost virtual. */
if (vq->desc && vq->avail && vq->used)
return dev;
+ len = sizeof(struct vring_desc) * vq->size;
vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
- vq, addr->desc_user_addr, sizeof(struct vring_desc));
- if (vq->desc == 0) {
+ vq, addr->desc_user_addr, &len);
+ if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
RTE_LOG(DEBUG, VHOST_CONFIG,
- "(%d) failed to find desc ring address.\n",
+ "(%d) failed to map desc ring.\n",
dev->vid);
return dev;
}
@@ -432,20 +557,26 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
vq = dev->virtqueue[vq_index];
addr = &vq->ring_addrs;
+ len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
- vq, addr->avail_user_addr, sizeof(struct vring_avail));
- if (vq->avail == 0) {
+ vq, addr->avail_user_addr, &len);
+ if (vq->avail == 0 ||
+ len != sizeof(struct vring_avail) +
+ sizeof(uint16_t) * vq->size) {
RTE_LOG(DEBUG, VHOST_CONFIG,
- "(%d) failed to find avail ring address.\n",
+ "(%d) failed to map avail ring.\n",
dev->vid);
return dev;
}
+ len = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
- vq, addr->used_user_addr, sizeof(struct vring_used));
- if (vq->used == 0) {
+ vq, addr->used_user_addr, &len);
+ if (vq->used == 0 || len != sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size) {
RTE_LOG(DEBUG, VHOST_CONFIG,
- "(%d) failed to find used ring address.\n",
+ "(%d) failed to map used ring.\n",
dev->vid);
return dev;
}
@@ -461,13 +592,13 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
vq->log_guest_addr = addr->log_guest_addr;
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
dev->vid, vq->avail);
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
dev->vid, vq->used);
- LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
dev->vid, vq->log_guest_addr);
return dev;
@@ -500,7 +631,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
if (vq->enabled && (dev->features &
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
- dev = translate_ring_addresses(dev, msg->payload.state.index);
+ dev = translate_ring_addresses(dev, msg->payload.addr.index);
if (!dev)
return -1;
@@ -525,7 +656,7 @@ vhost_user_set_vring_base(struct virtio_net *dev,
return 0;
}
-static void
+static int
add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
uint64_t host_phys_addr, uint64_t size)
{
@@ -535,6 +666,10 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
dev->max_guest_pages *= 2;
dev->guest_pages = realloc(dev->guest_pages,
dev->max_guest_pages * sizeof(*page));
+ if (!dev->guest_pages) {
+ RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
+ return -1;
+ }
}
if (dev->nr_guest_pages > 0) {
@@ -543,7 +678,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
if (host_phys_addr == last_page->host_phys_addr +
last_page->size) {
last_page->size += size;
- return;
+ return 0;
}
}
@@ -551,9 +686,11 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
page->guest_phys_addr = guest_phys_addr;
page->host_phys_addr = host_phys_addr;
page->size = size;
+
+ return 0;
}
-static void
+static int
add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t page_size)
{
@@ -567,7 +704,9 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
size = page_size - (guest_phys_addr & (page_size - 1));
size = RTE_MIN(size, reg_size);
- add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
+ if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
+ return -1;
+
host_user_addr += size;
guest_phys_addr += size;
reg_size -= size;
@@ -576,12 +715,16 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
size = RTE_MIN(reg_size, page_size);
host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
host_user_addr);
- add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
+ if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
+ size) < 0)
+ return -1;
host_user_addr += size;
guest_phys_addr += size;
reg_size -= size;
}
+
+ return 0;
}
#ifdef RTE_LIBRTE_VHOST_DEBUG
@@ -635,8 +778,9 @@ vhost_memory_changed(struct VhostUserMemory *new,
}
static int
-vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
+vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
{
+ struct virtio_net *dev = *pdev;
struct VhostUserMemory memory = pmsg->payload.memory;
struct rte_vhost_mem_region *reg;
void *mmap_addr;
@@ -644,8 +788,15 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
uint64_t mmap_offset;
uint64_t alignment;
uint32_t i;
+ int populate;
int fd;
+ if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "too many memory regions (%u)\n", memory.nregions);
+ return -1;
+ }
+
if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
RTE_LOG(INFO, VHOST_CONFIG,
"(%d) memory regions not changed\n", dev->vid);
@@ -662,6 +813,11 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
dev->mem = NULL;
}
+ /* Flush IOTLB cache as previous HVAs are now invalid */
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ for (i = 0; i < dev->nr_vring; i++)
+ vhost_user_iotlb_flush_all(dev->virtqueue[i]);
+
dev->nr_guest_pages = 0;
if (!dev->guest_pages) {
dev->max_guest_pages = 8;
@@ -696,7 +852,17 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
reg->fd = fd;
mmap_offset = memory.regions[i].mmap_offset;
- mmap_size = reg->size + mmap_offset;
+
+ /* Check for memory_size + mmap_offset overflow */
+ if (mmap_offset >= -reg->size) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "mmap_offset (%#"PRIx64") and memory_size "
+ "(%#"PRIx64") overflow\n",
+ mmap_offset, reg->size);
+ goto err_mmap;
+ }
+
+ mmap_size = reg->size + mmap_offset;
/* mmap() without flag of MAP_ANONYMOUS, should be called
* with length argument aligned with hugepagesz at older
@@ -714,8 +880,9 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
}
mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
+ populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, fd, 0);
+ MAP_SHARED | populate, fd, 0);
if (mmap_addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG,
@@ -729,7 +896,12 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
mmap_offset;
if (dev->dequeue_zero_copy)
- add_guest_pages(dev, reg, alignment);
+ if (add_guest_pages(dev, reg, alignment) < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "adding guest pages to region %u failed.\n",
+ i);
+ goto err_mmap;
+ }
RTE_LOG(INFO, VHOST_CONFIG,
"guest memory region %u, size: 0x%" PRIx64 "\n"
@@ -750,6 +922,25 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
mmap_offset);
}
+ for (i = 0; i < dev->nr_vring; i++) {
+ struct vhost_virtqueue *vq = dev->virtqueue[i];
+
+ if (vq->desc || vq->avail || vq->used) {
+ /*
+ * If the memory table got updated, the ring addresses
+ * need to be translated again as virtual addresses have
+ * changed.
+ */
+ vring_invalidate(dev, vq);
+
+ dev = translate_ring_addresses(dev, i);
+ if (!dev)
+ return -1;
+
+ *pdev = dev;
+ }
+ }
+
dump_guest_pages(dev);
return 0;
@@ -761,10 +952,20 @@ err_mmap:
return -1;
}
-static int
-vq_is_ready(struct vhost_virtqueue *vq)
+static bool
+vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- return vq && vq->desc && vq->avail && vq->used &&
+ bool rings_ok;
+
+ if (!vq)
+ return false;
+
+ if (vq_is_packed(dev))
+ rings_ok = !!vq->desc_packed;
+ else
+ rings_ok = vq->desc && vq->avail && vq->used;
+
+ return rings_ok &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
}
@@ -781,7 +982,7 @@ virtio_is_ready(struct virtio_net *dev)
for (i = 0; i < dev->nr_vring; i++) {
vq = dev->virtqueue[i];
- if (!vq_is_ready(vq))
+ if (!vq_is_ready(dev, vq))
return 0;
}
@@ -874,15 +1075,13 @@ vhost_user_get_vring_base(struct virtio_net *dev,
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
/* We have to stop the queue (virtio) if it is running. */
- if (dev->flags & VIRTIO_DEV_RUNNING) {
- dev->flags &= ~VIRTIO_DEV_RUNNING;
- dev->notify_ops->destroy_device(dev->vid);
- }
+ vhost_destroy_device_notify(dev);
dev->flags &= ~VIRTIO_DEV_READY;
+ dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
- /* Here we are safe to get the last used index */
- msg->payload.state.num = vq->last_used_idx;
+ /* Here we are safe to get the last avail index */
+ msg->payload.state.num = vq->last_avail_idx;
RTE_LOG(INFO, VHOST_CONFIG,
"vring base idx:%d file:%d\n", msg->payload.state.index,
@@ -897,10 +1096,20 @@ vhost_user_get_vring_base(struct virtio_net *dev,
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ if (vq->callfd >= 0)
+ close(vq->callfd);
+
+ vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
- rte_free(vq->shadow_used_ring);
- vq->shadow_used_ring = NULL;
+ if (vq_is_packed(dev)) {
+ rte_free(vq->shadow_used_packed);
+ vq->shadow_used_packed = NULL;
+ } else {
+ rte_free(vq->shadow_used_split);
+ vq->shadow_used_split = NULL;
+ }
rte_free(vq->batch_copy_elems);
vq->batch_copy_elems = NULL;
@@ -917,16 +1126,24 @@ vhost_user_set_vring_enable(struct virtio_net *dev,
VhostUserMsg *msg)
{
int enable = (int)msg->payload.state.num;
+ int index = (int)msg->payload.state.index;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
RTE_LOG(INFO, VHOST_CONFIG,
"set queue enable: %d to qp idx: %d\n",
- enable, msg->payload.state.index);
+ enable, index);
+
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->set_vring_state)
+ vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
if (dev->notify_ops->vring_state_changed)
dev->notify_ops->vring_state_changed(dev->vid,
- msg->payload.state.index, enable);
+ index, enable);
- dev->virtqueue[msg->payload.state.index]->enabled = enable;
+ dev->virtqueue[index]->enabled = enable;
return 0;
}
@@ -935,9 +1152,10 @@ static void
vhost_user_get_protocol_features(struct virtio_net *dev,
struct VhostUserMsg *msg)
{
- uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+ uint64_t features, protocol_features;
rte_vhost_driver_get_features(dev->ifname, &features);
+ rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
/*
* REPLY_ACK protocol feature is only mandatory for now
@@ -983,6 +1201,15 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
size = msg->payload.log.mmap_size;
off = msg->payload.log.mmap_offset;
+
+ /* Don't allow mmap_offset to point outside the mmap region */
+ if (off > size) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
+ off, size);
+ return -1;
+ }
+
RTE_LOG(INFO, VHOST_CONFIG,
"log mmap size: %"PRId64", offset: %"PRId64"\n",
size, off);
@@ -991,7 +1218,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
* mmap from 0 to workaround a hugepage mmap bug: mmap will
* fail when offset is not page size aligned.
*/
- addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
@@ -1024,6 +1251,8 @@ static int
vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
{
uint8_t *mac = (uint8_t *)&msg->payload.u64;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
RTE_LOG(DEBUG, VHOST_CONFIG,
":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1039,6 +1268,10 @@ vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
*/
rte_smp_wmb();
rte_atomic16_set(&dev->broadcast_rarp, 1);
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && vdpa_dev->ops->migration_done)
+ vdpa_dev->ops->migration_done(dev->vid);
return 0;
}
@@ -1131,11 +1364,12 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
struct virtio_net *dev = *pdev;
struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
uint16_t i;
- uint64_t vva;
+ uint64_t vva, len;
switch (imsg->type) {
case VHOST_IOTLB_UPDATE:
- vva = qva_to_vva(dev, imsg->uaddr);
+ len = imsg->size;
+ vva = qva_to_vva(dev, imsg->uaddr, &len);
if (!vva)
return -1;
@@ -1143,7 +1377,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
struct vhost_virtqueue *vq = dev->virtqueue[i];
vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
- imsg->size, imsg->perm);
+ len, imsg->perm);
if (is_vring_iotlb_update(vq, imsg))
*pdev = dev = translate_ring_addresses(dev, i);
@@ -1200,13 +1434,13 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
}
static int
-send_vhost_message(int sockfd, struct VhostUserMsg *msg)
+send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
{
if (!msg)
return 0;
return send_fd_message(sockfd, (char *)msg,
- VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
+ VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
}
static int
@@ -1220,7 +1454,23 @@ send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
msg->flags |= VHOST_USER_VERSION;
msg->flags |= VHOST_USER_REPLY_MASK;
- return send_vhost_message(sockfd, msg);
+ return send_vhost_message(sockfd, msg, NULL, 0);
+}
+
+static int
+send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
+ int *fds, int fd_num)
+{
+ int ret;
+
+ if (msg->flags & VHOST_USER_NEED_REPLY)
+ rte_spinlock_lock(&dev->slave_req_lock);
+
+ ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
+ if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
+ rte_spinlock_unlock(&dev->slave_req_lock);
+
+ return ret;
}
/*
@@ -1300,8 +1550,11 @@ vhost_user_msg_handler(int vid, int fd)
{
struct virtio_net *dev;
struct VhostUserMsg msg;
+ struct rte_vdpa_device *vdpa_dev;
+ int did = -1;
int ret;
int unlock_required = 0;
+ uint32_t skip_master = 0;
dev = get_device(vid);
if (dev == NULL)
@@ -1379,6 +1632,21 @@ vhost_user_msg_handler(int vid, int fd)
}
+ if (dev->extern_ops.pre_msg_handle) {
+ uint32_t need_reply;
+
+ ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
+ (void *)&msg, &need_reply, &skip_master);
+ if (ret < 0)
+ goto skip_to_reply;
+
+ if (need_reply)
+ send_vhost_reply(fd, &msg);
+
+ if (skip_master)
+ goto skip_to_post_handle;
+ }
+
switch (msg.request.master) {
case VHOST_USER_GET_FEATURES:
msg.payload.u64 = vhost_user_get_features(dev);
@@ -1407,7 +1675,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_SET_MEM_TABLE:
- ret = vhost_user_set_mem_table(dev, &msg);
+ ret = vhost_user_set_mem_table(&dev, &msg);
break;
case VHOST_USER_SET_LOG_BASE:
@@ -1452,7 +1720,7 @@ vhost_user_msg_handler(int vid, int fd)
break;
case VHOST_USER_GET_QUEUE_NUM:
- msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
+ msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
msg.size = sizeof(msg.payload.u64);
send_vhost_reply(fd, &msg);
break;
@@ -1479,9 +1747,22 @@ vhost_user_msg_handler(int vid, int fd)
default:
ret = -1;
break;
+ }
+
+skip_to_post_handle:
+ if (dev->extern_ops.post_msg_handle) {
+ uint32_t need_reply;
+ ret = (*dev->extern_ops.post_msg_handle)(
+ dev->vid, (void *)&msg, &need_reply);
+ if (ret < 0)
+ goto skip_to_reply;
+
+ if (need_reply)
+ send_vhost_reply(fd, &msg);
}
+skip_to_reply:
if (unlock_required)
vhost_user_unlock_all_queue_pairs(dev);
@@ -1505,9 +1786,53 @@ vhost_user_msg_handler(int vid, int fd)
}
}
+ did = dev->vdpa_dev_id;
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (vdpa_dev && virtio_is_ready(dev) &&
+ !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
+ msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+ if (vdpa_dev->ops->dev_conf)
+ vdpa_dev->ops->dev_conf(dev->vid);
+ dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
+ if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "(%d) software relay is used for vDPA, performance may be low.\n",
+ dev->vid);
+ }
+ }
+
return 0;
}
+static int process_slave_message_reply(struct virtio_net *dev,
+ const VhostUserMsg *msg)
+{
+ VhostUserMsg msg_reply;
+ int ret;
+
+ if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
+ return 0;
+
+ if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ if (msg_reply.request.slave != msg->request.slave) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Received unexpected msg type (%u), expected %u\n",
+ msg_reply.request.slave, msg->request.slave);
+ ret = -1;
+ goto out;
+ }
+
+ ret = msg_reply.payload.u64 ? -1 : 0;
+
+out:
+ rte_spinlock_unlock(&dev->slave_req_lock);
+ return ret;
+}
+
int
vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
{
@@ -1523,7 +1848,7 @@ vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
},
};
- ret = send_vhost_message(dev->slave_req_fd, &msg);
+ ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to send IOTLB miss message (%d)\n",
@@ -1533,3 +1858,101 @@ vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
return 0;
}
+
+static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
+ int index, int fd,
+ uint64_t offset,
+ uint64_t size)
+{
+ int *fdp = NULL;
+ size_t fd_num = 0;
+ int ret;
+ struct VhostUserMsg msg = {
+ .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
+ .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
+ .size = sizeof(msg.payload.area),
+ .payload.area = {
+ .u64 = index & VHOST_USER_VRING_IDX_MASK,
+ .size = size,
+ .offset = offset,
+ },
+ };
+
+ if (fd < 0)
+ msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
+ else {
+ fdp = &fd;
+ fd_num = 1;
+ }
+
+ ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to set host notifier (%d)\n", ret);
+ return ret;
+ }
+
+ return process_slave_message_reply(dev, &msg);
+}
+
+int vhost_user_host_notifier_ctrl(int vid, bool enable)
+{
+ struct virtio_net *dev;
+ struct rte_vdpa_device *vdpa_dev;
+ int vfio_device_fd, did, ret = 0;
+ uint64_t offset, size;
+ unsigned int i;
+
+ dev = get_device(vid);
+ if (!dev)
+ return -ENODEV;
+
+ did = dev->vdpa_dev_id;
+ if (did < 0)
+ return -EINVAL;
+
+ if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
+ !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
+ !(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
+ !(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
+ !(dev->protocol_features &
+ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
+ return -ENOTSUP;
+
+ vdpa_dev = rte_vdpa_get_device(did);
+ if (!vdpa_dev)
+ return -ENODEV;
+
+ RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
+
+ vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
+ if (vfio_device_fd < 0)
+ return -ENOTSUP;
+
+ if (enable) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
+ &size) < 0) {
+ ret = -ENOTSUP;
+ goto disable;
+ }
+
+ if (vhost_user_slave_set_vring_host_notifier(dev, i,
+ vfio_device_fd, offset, size) < 0) {
+ ret = -EFAULT;
+ goto disable;
+ }
+ }
+ } else {
+disable:
+ for (i = 0; i < dev->nr_vring; i++) {
+ vhost_user_slave_set_vring_host_notifier(dev, i, -1,
+ 0, 0);
+ }
+ }
+
+ return ret;
+}
diff --git a/lib/librte_vhost/vhost_user.h b/lib/librte_vhost/vhost_user.h
index d4bd604b..42166adf 100644
--- a/lib/librte_vhost/vhost_user.h
+++ b/lib/librte_vhost/vhost_user.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
#ifndef _VHOST_NET_USER_H
@@ -14,19 +14,15 @@
#define VHOST_MEMORY_MAX_NREGIONS 8
-#define VHOST_USER_PROTOCOL_F_MQ 0
-#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
-#define VHOST_USER_PROTOCOL_F_RARP 2
-#define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
-#define VHOST_USER_PROTOCOL_F_NET_MTU 4
-#define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
-
#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
(1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |\
(1ULL << VHOST_USER_PROTOCOL_F_RARP) | \
(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK) | \
(1ULL << VHOST_USER_PROTOCOL_F_NET_MTU) | \
- (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ))
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_CRYPTO_SESSION) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER))
typedef enum VhostUserRequest {
VHOST_USER_NONE = 0,
@@ -52,12 +48,15 @@ typedef enum VhostUserRequest {
VHOST_USER_NET_SET_MTU = 20,
VHOST_USER_SET_SLAVE_REQ_FD = 21,
VHOST_USER_IOTLB_MSG = 22,
- VHOST_USER_MAX
+ VHOST_USER_CRYPTO_CREATE_SESS = 26,
+ VHOST_USER_CRYPTO_CLOSE_SESS = 27,
+ VHOST_USER_MAX = 28
} VhostUserRequest;
typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_NONE = 0,
VHOST_USER_SLAVE_IOTLB_MSG = 1,
+ VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@@ -79,10 +78,40 @@ typedef struct VhostUserLog {
uint64_t mmap_offset;
} VhostUserLog;
+/* Comply with Cryptodev-Linux */
+#define VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH 512
+#define VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH 64
+
+/* Same structure as vhost-user backend session info */
+typedef struct VhostUserCryptoSessionParam {
+ int64_t session_id;
+ uint32_t op_code;
+ uint32_t cipher_algo;
+ uint32_t cipher_key_len;
+ uint32_t hash_algo;
+ uint32_t digest_len;
+ uint32_t auth_key_len;
+ uint32_t aad_len;
+ uint8_t op_type;
+ uint8_t dir;
+ uint8_t hash_mode;
+ uint8_t chaining_dir;
+ uint8_t *ciphe_key;
+ uint8_t *auth_key;
+ uint8_t cipher_key_buf[VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH];
+ uint8_t auth_key_buf[VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH];
+} VhostUserCryptoSessionParam;
+
+typedef struct VhostUserVringArea {
+ uint64_t u64;
+ uint64_t size;
+ uint64_t offset;
+} VhostUserVringArea;
+
typedef struct VhostUserMsg {
union {
- VhostUserRequest master;
- VhostUserSlaveRequest slave;
+ uint32_t master; /* a VhostUserRequest value */
+ uint32_t slave; /* a VhostUserSlaveRequest value*/
} request;
#define VHOST_USER_VERSION_MASK 0x3
@@ -99,6 +128,8 @@ typedef struct VhostUserMsg {
VhostUserMemory memory;
VhostUserLog log;
struct vhost_iotlb_msg iotlb;
+ VhostUserCryptoSessionParam crypto_session;
+ VhostUserVringArea area;
} payload;
int fds[VHOST_MEMORY_MAX_NREGIONS];
} __attribute((packed)) VhostUserMsg;
@@ -112,6 +143,7 @@ typedef struct VhostUserMsg {
/* vhost_user.c */
int vhost_user_msg_handler(int vid, int fd);
int vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm);
+int vhost_user_host_notifier_ctrl(int vid, bool enable);
/* socket.c */
int read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num);
diff --git a/lib/librte_vhost/virtio_crypto.h b/lib/librte_vhost/virtio_crypto.h
new file mode 100644
index 00000000..e3b93573
--- /dev/null
+++ b/lib/librte_vhost/virtio_crypto.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_H
+#define _VIRTIO_CRYPTO_H
+
+#define VIRTIO_CRYPTO_SERVICE_CIPHER 0
+#define VIRTIO_CRYPTO_SERVICE_HASH 1
+#define VIRTIO_CRYPTO_SERVICE_MAC 2
+#define VIRTIO_CRYPTO_SERVICE_AEAD 3
+
+#define VIRTIO_CRYPTO_OPCODE(service, op) (((service) << 8) | (op))
+
+struct virtio_crypto_ctrl_header {
+#define VIRTIO_CRYPTO_CIPHER_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x02)
+#define VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x03)
+#define VIRTIO_CRYPTO_HASH_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x02)
+#define VIRTIO_CRYPTO_HASH_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x03)
+#define VIRTIO_CRYPTO_MAC_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x02)
+#define VIRTIO_CRYPTO_MAC_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x03)
+#define VIRTIO_CRYPTO_AEAD_CREATE_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
+#define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+ uint32_t opcode;
+ uint32_t algo;
+ uint32_t flag;
+ /* data virtqueue id */
+ uint32_t queue_id;
+};
+
+struct virtio_crypto_cipher_session_para {
+#define VIRTIO_CRYPTO_NO_CIPHER 0
+#define VIRTIO_CRYPTO_CIPHER_ARC4 1
+#define VIRTIO_CRYPTO_CIPHER_AES_ECB 2
+#define VIRTIO_CRYPTO_CIPHER_AES_CBC 3
+#define VIRTIO_CRYPTO_CIPHER_AES_CTR 4
+#define VIRTIO_CRYPTO_CIPHER_DES_ECB 5
+#define VIRTIO_CRYPTO_CIPHER_DES_CBC 6
+#define VIRTIO_CRYPTO_CIPHER_3DES_ECB 7
+#define VIRTIO_CRYPTO_CIPHER_3DES_CBC 8
+#define VIRTIO_CRYPTO_CIPHER_3DES_CTR 9
+#define VIRTIO_CRYPTO_CIPHER_KASUMI_F8 10
+#define VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2 11
+#define VIRTIO_CRYPTO_CIPHER_AES_F8 12
+#define VIRTIO_CRYPTO_CIPHER_AES_XTS 13
+#define VIRTIO_CRYPTO_CIPHER_ZUC_EEA3 14
+ uint32_t algo;
+ /* length of key */
+ uint32_t keylen;
+
+#define VIRTIO_CRYPTO_OP_ENCRYPT 1
+#define VIRTIO_CRYPTO_OP_DECRYPT 2
+ /* encrypt or decrypt */
+ uint32_t op;
+ uint32_t padding;
+};
+
+struct virtio_crypto_session_input {
+ /* Device-writable part */
+ uint64_t session_id;
+ uint32_t status;
+ uint32_t padding;
+};
+
+struct virtio_crypto_cipher_session_req {
+ struct virtio_crypto_cipher_session_para para;
+ uint8_t padding[32];
+};
+
+struct virtio_crypto_hash_session_para {
+#define VIRTIO_CRYPTO_NO_HASH 0
+#define VIRTIO_CRYPTO_HASH_MD5 1
+#define VIRTIO_CRYPTO_HASH_SHA1 2
+#define VIRTIO_CRYPTO_HASH_SHA_224 3
+#define VIRTIO_CRYPTO_HASH_SHA_256 4
+#define VIRTIO_CRYPTO_HASH_SHA_384 5
+#define VIRTIO_CRYPTO_HASH_SHA_512 6
+#define VIRTIO_CRYPTO_HASH_SHA3_224 7
+#define VIRTIO_CRYPTO_HASH_SHA3_256 8
+#define VIRTIO_CRYPTO_HASH_SHA3_384 9
+#define VIRTIO_CRYPTO_HASH_SHA3_512 10
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE128 11
+#define VIRTIO_CRYPTO_HASH_SHA3_SHAKE256 12
+ uint32_t algo;
+ /* hash result length */
+ uint32_t hash_result_len;
+ uint8_t padding[8];
+};
+
+struct virtio_crypto_hash_create_session_req {
+ struct virtio_crypto_hash_session_para para;
+ uint8_t padding[40];
+};
+
+struct virtio_crypto_mac_session_para {
+#define VIRTIO_CRYPTO_NO_MAC 0
+#define VIRTIO_CRYPTO_MAC_HMAC_MD5 1
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA1 2
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_224 3
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_256 4
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_384 5
+#define VIRTIO_CRYPTO_MAC_HMAC_SHA_512 6
+#define VIRTIO_CRYPTO_MAC_CMAC_3DES 25
+#define VIRTIO_CRYPTO_MAC_CMAC_AES 26
+#define VIRTIO_CRYPTO_MAC_KASUMI_F9 27
+#define VIRTIO_CRYPTO_MAC_SNOW3G_UIA2 28
+#define VIRTIO_CRYPTO_MAC_GMAC_AES 41
+#define VIRTIO_CRYPTO_MAC_GMAC_TWOFISH 42
+#define VIRTIO_CRYPTO_MAC_CBCMAC_AES 49
+#define VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9 50
+#define VIRTIO_CRYPTO_MAC_XCBC_AES 53
+ uint32_t algo;
+ /* hash result length */
+ uint32_t hash_result_len;
+ /* length of authenticated key */
+ uint32_t auth_key_len;
+ uint32_t padding;
+};
+
+struct virtio_crypto_mac_create_session_req {
+ struct virtio_crypto_mac_session_para para;
+ uint8_t padding[40];
+};
+
+struct virtio_crypto_aead_session_para {
+#define VIRTIO_CRYPTO_NO_AEAD 0
+#define VIRTIO_CRYPTO_AEAD_GCM 1
+#define VIRTIO_CRYPTO_AEAD_CCM 2
+#define VIRTIO_CRYPTO_AEAD_CHACHA20_POLY1305 3
+ uint32_t algo;
+ /* length of key */
+ uint32_t key_len;
+ /* hash result length */
+ uint32_t hash_result_len;
+ /* length of the additional authenticated data (AAD) in bytes */
+ uint32_t aad_len;
+ /* encrypt or decrypt, See above VIRTIO_CRYPTO_OP_* */
+ uint32_t op;
+ uint32_t padding;
+};
+
+struct virtio_crypto_aead_create_session_req {
+ struct virtio_crypto_aead_session_para para;
+ uint8_t padding[32];
+};
+
+struct virtio_crypto_alg_chain_session_para {
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER 1
+#define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH 2
+ uint32_t alg_chain_order;
+/* Plain hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN 1
+/* Authenticated hash (mac) */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH 2
+/* Nested hash */
+#define VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED 3
+ uint32_t hash_mode;
+ struct virtio_crypto_cipher_session_para cipher_param;
+ union {
+ struct virtio_crypto_hash_session_para hash_param;
+ struct virtio_crypto_mac_session_para mac_param;
+ uint8_t padding[16];
+ } u;
+ /* length of the additional authenticated data (AAD) in bytes */
+ uint32_t aad_len;
+ uint32_t padding;
+};
+
+struct virtio_crypto_alg_chain_session_req {
+ struct virtio_crypto_alg_chain_session_para para;
+};
+
+struct virtio_crypto_sym_create_session_req {
+ union {
+ struct virtio_crypto_cipher_session_req cipher;
+ struct virtio_crypto_alg_chain_session_req chain;
+ uint8_t padding[48];
+ } u;
+
+ /* Device-readable part */
+
+/* No operation */
+#define VIRTIO_CRYPTO_SYM_OP_NONE 0
+/* Cipher only operation on the data */
+#define VIRTIO_CRYPTO_SYM_OP_CIPHER 1
+/*
+ * Chain any cipher with any hash or mac operation. The order
+ * depends on the value of alg_chain_order param
+ */
+#define VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING 2
+ uint32_t op_type;
+ uint32_t padding;
+};
+
+struct virtio_crypto_destroy_session_req {
+ /* Device-readable part */
+ uint64_t session_id;
+ uint8_t padding[48];
+};
+
+/* The request of the control virtqueue's packet */
+struct virtio_crypto_op_ctrl_req {
+ struct virtio_crypto_ctrl_header header;
+
+ union {
+ struct virtio_crypto_sym_create_session_req
+ sym_create_session;
+ struct virtio_crypto_hash_create_session_req
+ hash_create_session;
+ struct virtio_crypto_mac_create_session_req
+ mac_create_session;
+ struct virtio_crypto_aead_create_session_req
+ aead_create_session;
+ struct virtio_crypto_destroy_session_req
+ destroy_session;
+ uint8_t padding[56];
+ } u;
+};
+
+struct virtio_crypto_op_header {
+#define VIRTIO_CRYPTO_CIPHER_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x00)
+#define VIRTIO_CRYPTO_CIPHER_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_CIPHER, 0x01)
+#define VIRTIO_CRYPTO_HASH \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_HASH, 0x00)
+#define VIRTIO_CRYPTO_MAC \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_MAC, 0x00)
+#define VIRTIO_CRYPTO_AEAD_ENCRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
+#define VIRTIO_CRYPTO_AEAD_DECRYPT \
+ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+ uint32_t opcode;
+ /* algo should be service-specific algorithms */
+ uint32_t algo;
+ /* session_id should be service-specific algorithms */
+ uint64_t session_id;
+ /* control flag to control the request */
+ uint32_t flag;
+ uint32_t padding;
+};
+
+struct virtio_crypto_cipher_para {
+ /*
+ * Byte Length of valid IV/Counter
+ *
+ * For block ciphers in CBC or F8 mode, or for Kasumi in F8 mode, or for
+ * SNOW3G in UEA2 mode, this is the length of the IV (which
+ * must be the same as the block length of the cipher).
+ * For block ciphers in CTR mode, this is the length of the counter
+ * (which must be the same as the block length of the cipher).
+ * For AES-XTS, this is the 128bit tweak, i, from IEEE Std 1619-2007.
+ *
+ * The IV/Counter will be updated after every partial cryptographic
+ * operation.
+ */
+ uint32_t iv_len;
+ /* length of source data */
+ uint32_t src_data_len;
+ /* length of dst data */
+ uint32_t dst_data_len;
+ uint32_t padding;
+};
+
+struct virtio_crypto_hash_para {
+ /* length of source data */
+ uint32_t src_data_len;
+ /* hash result length */
+ uint32_t hash_result_len;
+};
+
+struct virtio_crypto_mac_para {
+ struct virtio_crypto_hash_para hash;
+};
+
+struct virtio_crypto_aead_para {
+ /*
+ * Byte Length of valid IV data pointed to by the below iv_addr
+ * parameter.
+ *
+ * For GCM mode, this is either 12 (for 96-bit IVs) or 16, in which
+ * case iv_addr points to J0.
+ * For CCM mode, this is the length of the nonce, which can be in the
+ * range 7 to 13 inclusive.
+ */
+ uint32_t iv_len;
+ /* length of additional auth data */
+ uint32_t aad_len;
+ /* length of source data */
+ uint32_t src_data_len;
+ /* length of dst data */
+ uint32_t dst_data_len;
+};
+
+struct virtio_crypto_cipher_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_cipher_para para;
+ uint8_t padding[24];
+};
+
+struct virtio_crypto_hash_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_hash_para para;
+ uint8_t padding[40];
+};
+
+struct virtio_crypto_mac_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_mac_para para;
+ uint8_t padding[40];
+};
+
+struct virtio_crypto_alg_chain_data_para {
+ uint32_t iv_len;
+ /* Length of source data */
+ uint32_t src_data_len;
+ /* Length of destination data */
+ uint32_t dst_data_len;
+ /* Starting point for cipher processing in source data */
+ uint32_t cipher_start_src_offset;
+ /* Length of the source data that the cipher will be computed on */
+ uint32_t len_to_cipher;
+ /* Starting point for hash processing in source data */
+ uint32_t hash_start_src_offset;
+ /* Length of the source data that the hash will be computed on */
+ uint32_t len_to_hash;
+ /* Length of the additional auth data */
+ uint32_t aad_len;
+ /* Length of the hash result */
+ uint32_t hash_result_len;
+ uint32_t reserved;
+};
+
+struct virtio_crypto_alg_chain_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_alg_chain_data_para para;
+};
+
+struct virtio_crypto_sym_data_req {
+ union {
+ struct virtio_crypto_cipher_data_req cipher;
+ struct virtio_crypto_alg_chain_data_req chain;
+ uint8_t padding[40];
+ } u;
+
+ /* See above VIRTIO_CRYPTO_SYM_OP_* */
+ uint32_t op_type;
+ uint32_t padding;
+};
+
+struct virtio_crypto_aead_data_req {
+ /* Device-readable part */
+ struct virtio_crypto_aead_para para;
+ uint8_t padding[32];
+};
+
+/* The request of the data virtqueue's packet */
+struct virtio_crypto_op_data_req {
+ struct virtio_crypto_op_header header;
+
+ union {
+ struct virtio_crypto_sym_data_req sym_req;
+ struct virtio_crypto_hash_data_req hash_req;
+ struct virtio_crypto_mac_data_req mac_req;
+ struct virtio_crypto_aead_data_req aead_req;
+ uint8_t padding[48];
+ } u;
+};
+
+#define VIRTIO_CRYPTO_OK 0
+#define VIRTIO_CRYPTO_ERR 1
+#define VIRTIO_CRYPTO_BADMSG 2
+#define VIRTIO_CRYPTO_NOTSUPP 3
+#define VIRTIO_CRYPTO_INVSESS 4 /* Invalid session id */
+
+/* The accelerator hardware is ready */
+#define VIRTIO_CRYPTO_S_HW_READY (1 << 0)
+
+struct virtio_crypto_config {
+ /* See VIRTIO_CRYPTO_OP_* above */
+ uint32_t status;
+
+ /*
+ * Maximum number of data queue
+ */
+ uint32_t max_dataqueues;
+
+ /*
+ * Specifies the services mask which the device support,
+ * see VIRTIO_CRYPTO_SERVICE_* above
+ */
+ uint32_t crypto_services;
+
+ /* Detailed algorithms mask */
+ uint32_t cipher_algo_l;
+ uint32_t cipher_algo_h;
+ uint32_t hash_algo;
+ uint32_t mac_algo_l;
+ uint32_t mac_algo_h;
+ uint32_t aead_algo;
+ /* Maximum length of cipher key */
+ uint32_t max_cipher_key_len;
+ /* Maximum length of authenticated key */
+ uint32_t max_auth_key_len;
+ uint32_t reserve;
+ /* Maximum size of each crypto request's content */
+ uint64_t max_size;
+};
+
+struct virtio_crypto_inhdr {
+ /* See VIRTIO_CRYPTO_* above */
+ uint8_t status;
+};
+#endif /* _VIRTIO_CRYPTO_H */
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 700aca7c..99c7afc8 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -16,6 +16,7 @@
#include <rte_sctp.h>
#include <rte_arp.h>
#include <rte_spinlock.h>
+#include <rte_malloc.h>
#include "iotlb.h"
#include "vhost.h"
@@ -24,60 +25,174 @@
#define MAX_BATCH_LEN 256
+static __rte_always_inline bool
+rxvq_is_mergeable(struct virtio_net *dev)
+{
+ return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
+}
+
static bool
is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
+static __rte_always_inline void *
+alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t desc_addr, uint64_t desc_len)
+{
+ void *idesc;
+ uint64_t src, dst;
+ uint64_t len, remain = desc_len;
+
+ idesc = rte_malloc(__func__, desc_len, 0);
+ if (unlikely(!idesc))
+ return 0;
+
+ dst = (uint64_t)(uintptr_t)idesc;
+
+ while (remain) {
+ len = remain;
+ src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ rte_free(idesc);
+ return 0;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ desc_addr += len;
+ }
+
+ return idesc;
+}
+
static __rte_always_inline void
-do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint16_t to, uint16_t from, uint16_t size)
+free_ind_table(void *idesc)
+{
+ rte_free(idesc);
+}
+
+static __rte_always_inline void
+do_flush_shadow_used_ring_split(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint16_t to, uint16_t from, uint16_t size)
{
rte_memcpy(&vq->used->ring[to],
- &vq->shadow_used_ring[from],
+ &vq->shadow_used_split[from],
size * sizeof(struct vring_used_elem));
- vhost_log_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[to]),
size * sizeof(struct vring_used_elem));
}
static __rte_always_inline void
-flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
+flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
if (used_idx + vq->shadow_used_idx <= vq->size) {
- do_flush_shadow_used_ring(dev, vq, used_idx, 0,
+ do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
vq->shadow_used_idx);
} else {
uint16_t size;
/* update used ring interval [used_idx, vq->size] */
size = vq->size - used_idx;
- do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
+ do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
/* update the left half used ring interval [0, left_size] */
- do_flush_shadow_used_ring(dev, vq, 0, size,
+ do_flush_shadow_used_ring_split(dev, vq, 0, size,
vq->shadow_used_idx - size);
}
vq->last_used_idx += vq->shadow_used_idx;
rte_smp_wmb();
+ vhost_log_cache_sync(dev, vq);
+
*(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
+ vq->shadow_used_idx = 0;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
}
static __rte_always_inline void
-update_shadow_used_ring(struct vhost_virtqueue *vq,
+update_shadow_used_ring_split(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint16_t len)
{
uint16_t i = vq->shadow_used_idx++;
- vq->shadow_used_ring[i].id = desc_idx;
- vq->shadow_used_ring[i].len = len;
+ vq->shadow_used_split[i].id = desc_idx;
+ vq->shadow_used_split[i].len = len;
+}
+
+static __rte_always_inline void
+flush_shadow_used_ring_packed(struct virtio_net *dev,
+ struct vhost_virtqueue *vq)
+{
+ int i;
+ uint16_t used_idx = vq->last_used_idx;
+
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
+ vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
+
+ used_idx += vq->shadow_used_packed[i].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+ }
+
+ rte_smp_wmb();
+
+ for (i = 0; i < vq->shadow_used_idx; i++) {
+ uint16_t flags;
+
+ if (vq->shadow_used_packed[i].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
+ } else {
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
+ }
+
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+
+ vhost_log_cache_used_vring(dev, vq,
+ vq->last_used_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc));
+
+ vq->last_used_idx += vq->shadow_used_packed[i].count;
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+ }
+
+ rte_smp_wmb();
+ vq->shadow_used_idx = 0;
+ vhost_log_cache_sync(dev, vq);
+}
+
+static __rte_always_inline void
+update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
+ uint16_t desc_idx, uint16_t len, uint16_t count)
+{
+ uint16_t i = vq->shadow_used_idx++;
+
+ vq->shadow_used_packed[i].id = desc_idx;
+ vq->shadow_used_packed[i].len = len;
+ vq->shadow_used_packed[i].count = count;
}
static inline void
@@ -89,9 +204,11 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
for (i = 0; i < count; i++) {
rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
- vhost_log_write(dev, elem[i].log_addr, elem[i].len);
+ vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
}
+
+ vq->batch_copy_nb_elems = 0;
}
static inline void
@@ -103,6 +220,8 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq)
for (i = 0; i < count; i++)
rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
+
+ vq->batch_copy_nb_elems = 0;
}
/* avoid write operation when necessary, to lessen cache issues */
@@ -111,7 +230,7 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq)
(var) = (val); \
} while (0)
-static void
+static __rte_always_inline void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
@@ -173,273 +292,268 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct vring_desc *descs, struct rte_mbuf *m,
- uint16_t desc_idx, uint32_t size)
+map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct buf_vector *buf_vec, uint16_t *vec_idx,
+ uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
{
- uint32_t desc_avail, desc_offset;
- uint32_t mbuf_avail, mbuf_offset;
- uint32_t cpy_len;
- struct vring_desc *desc;
- uint64_t desc_addr;
- /* A counter to avoid desc dead loop chain */
- uint16_t nr_desc = 1;
- struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
- uint16_t copy_nb = vq->batch_copy_nb_elems;
- int error = 0;
+ uint16_t vec_id = *vec_idx;
- desc = &descs[desc_idx];
- desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
- desc->len, VHOST_ACCESS_RW);
- /*
- * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
- * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
- * otherwise stores offset on the stack instead of in a register.
- */
- if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
- error = -1;
- goto out;
+ while (desc_len) {
+ uint64_t desc_addr;
+ uint64_t desc_chunck_len = desc_len;
+
+ if (unlikely(vec_id >= BUF_VECTOR_MAX))
+ return -1;
+
+ desc_addr = vhost_iova_to_vva(dev, vq,
+ desc_iova,
+ &desc_chunck_len,
+ perm);
+ if (unlikely(!desc_addr))
+ return -1;
+
+ buf_vec[vec_id].buf_iova = desc_iova;
+ buf_vec[vec_id].buf_addr = desc_addr;
+ buf_vec[vec_id].buf_len = desc_chunck_len;
+
+ desc_len -= desc_chunck_len;
+ desc_iova += desc_chunck_len;
+ vec_id++;
}
+ *vec_idx = vec_id;
- rte_prefetch0((void *)(uintptr_t)desc_addr);
+ return 0;
+}
- virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
- vhost_log_write(dev, desc->addr, dev->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+static __rte_always_inline int
+fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint32_t avail_idx, uint16_t *vec_idx,
+ struct buf_vector *buf_vec, uint16_t *desc_chain_head,
+ uint16_t *desc_chain_len, uint8_t perm)
+{
+ uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
+ uint16_t vec_id = *vec_idx;
+ uint32_t len = 0;
+ uint64_t dlen;
+ struct vring_desc *descs = vq->desc;
+ struct vring_desc *idesc = NULL;
- desc_offset = dev->vhost_hlen;
- desc_avail = desc->len - dev->vhost_hlen;
+ *desc_chain_head = idx;
- mbuf_avail = rte_pktmbuf_data_len(m);
- mbuf_offset = 0;
- while (mbuf_avail != 0 || m->next != NULL) {
- /* done with current mbuf, fetch next */
- if (mbuf_avail == 0) {
- m = m->next;
+ if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+ dlen = vq->desc[idx].len;
+ descs = (struct vring_desc *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
+ &dlen,
+ VHOST_ACCESS_RO);
+ if (unlikely(!descs))
+ return -1;
- mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
- }
+ if (unlikely(dlen < vq->desc[idx].len)) {
+ /*
+ * The indirect desc table is not contiguous
+ * in process VA space, we have to copy it.
+ */
+ idesc = alloc_copy_ind_table(dev, vq,
+ vq->desc[idx].addr, vq->desc[idx].len);
+ if (unlikely(!idesc))
+ return -1;
- /* done with current desc buf, fetch next */
- if (desc_avail == 0) {
- if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
- /* Room in vring buffer is not enough */
- error = -1;
- goto out;
- }
- if (unlikely(desc->next >= size || ++nr_desc > size)) {
- error = -1;
- goto out;
- }
+ descs = idesc;
+ }
- desc = &descs[desc->next];
- desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
- desc->len,
- VHOST_ACCESS_RW);
- if (unlikely(!desc_addr)) {
- error = -1;
- goto out;
- }
+ idx = 0;
+ }
- desc_offset = 0;
- desc_avail = desc->len;
+ while (1) {
+ if (unlikely(idx >= vq->size)) {
+ free_ind_table(idesc);
+ return -1;
}
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
- rte_memcpy((void *)((uintptr_t)(desc_addr +
- desc_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
- cpy_len, 0);
- } else {
- batch_copy[copy_nb].dst =
- (void *)((uintptr_t)(desc_addr + desc_offset));
- batch_copy[copy_nb].src =
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
- batch_copy[copy_nb].log_addr = desc->addr + desc_offset;
- batch_copy[copy_nb].len = cpy_len;
- copy_nb++;
+ len += descs[idx].len;
+
+ if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+ descs[idx].addr, descs[idx].len,
+ perm))) {
+ free_ind_table(idesc);
+ return -1;
}
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- desc_avail -= cpy_len;
- desc_offset += cpy_len;
+ if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
+ break;
+
+ idx = descs[idx].next;
}
-out:
- vq->batch_copy_nb_elems = copy_nb;
+ *desc_chain_len = len;
+ *vec_idx = vec_id;
- return error;
+ if (unlikely(!!idesc))
+ free_ind_table(idesc);
+
+ return 0;
}
-/**
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that are successfully
- * added to the RX queue. This function works when the mbuf is scattered, but
- * it doesn't support the mergeable feature.
+/*
+ * Returns -1 on fail, 0 on success
*/
-static __rte_always_inline uint32_t
-virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+static inline int
+reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint32_t size, struct buf_vector *buf_vec,
+ uint16_t *num_buffers, uint16_t avail_head,
+ uint16_t *nr_vec)
{
- struct vhost_virtqueue *vq;
- uint16_t avail_idx, free_entries, start_idx;
- uint16_t desc_indexes[MAX_PKT_BURST];
- struct vring_desc *descs;
- uint16_t used_idx;
- uint32_t i, sz;
+ uint16_t cur_idx;
+ uint16_t vec_idx = 0;
+ uint16_t max_tries, tries = 0;
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
+ uint16_t head_idx = 0;
+ uint16_t len = 0;
- vq = dev->virtqueue[queue_id];
+ *num_buffers = 0;
+ cur_idx = vq->last_avail_idx;
- rte_spinlock_lock(&vq->access_lock);
+ if (rxvq_is_mergeable(dev))
+ max_tries = vq->size - 1;
+ else
+ max_tries = 1;
- if (unlikely(vq->enabled == 0))
- goto out_access_unlock;
+ while (size > 0) {
+ if (unlikely(cur_idx == avail_head))
+ return -1;
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(++tries > max_tries))
+ return -1;
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
+ if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
+ &vec_idx, buf_vec,
+ &head_idx, &len,
+ VHOST_ACCESS_RW) < 0))
+ return -1;
+ len = RTE_MIN(len, size);
+ update_shadow_used_ring_split(vq, head_idx, len);
+ size -= len;
- if (unlikely(vq->access_ok == 0)) {
- if (unlikely(vring_translate(dev, vq) < 0)) {
- count = 0;
- goto out;
- }
+ cur_idx++;
+ *num_buffers += 1;
}
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
- start_idx = vq->last_used_idx;
- free_entries = avail_idx - start_idx;
- count = RTE_MIN(count, free_entries);
- count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
- if (count == 0)
- goto out;
+ *nr_vec = vec_idx;
- LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
- dev->vid, start_idx, start_idx + count);
+ return 0;
+}
- vq->batch_copy_nb_elems = 0;
+static __rte_always_inline int
+fill_vec_buf_packed_indirect(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ struct vring_packed_desc *desc, uint16_t *vec_idx,
+ struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
+{
+ uint16_t i;
+ uint32_t nr_descs;
+ uint16_t vec_id = *vec_idx;
+ uint64_t dlen;
+ struct vring_packed_desc *descs, *idescs = NULL;
+
+ dlen = desc->len;
+ descs = (struct vring_packed_desc *)(uintptr_t)
+ vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
+ if (unlikely(!descs))
+ return -1;
+
+ if (unlikely(dlen < desc->len)) {
+ /*
+ * The indirect desc table is not contiguous
+ * in process VA space, we have to copy it.
+ */
+ idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
+ if (unlikely(!idescs))
+ return -1;
- /* Retrieve all of the desc indexes first to avoid caching issues. */
- rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
- for (i = 0; i < count; i++) {
- used_idx = (start_idx + i) & (vq->size - 1);
- desc_indexes[i] = vq->avail->ring[used_idx];
- vq->used->ring[used_idx].id = desc_indexes[i];
- vq->used->ring[used_idx].len = pkts[i]->pkt_len +
- dev->vhost_hlen;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
+ descs = idescs;
}
- rte_prefetch0(&vq->desc[desc_indexes[0]]);
- for (i = 0; i < count; i++) {
- uint16_t desc_idx = desc_indexes[i];
- int err;
-
- if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
- descs = (struct vring_desc *)(uintptr_t)
- vhost_iova_to_vva(dev,
- vq, vq->desc[desc_idx].addr,
- vq->desc[desc_idx].len,
- VHOST_ACCESS_RO);
- if (unlikely(!descs)) {
- count = i;
- break;
- }
-
- desc_idx = 0;
- sz = vq->desc[desc_idx].len / sizeof(*descs);
- } else {
- descs = vq->desc;
- sz = vq->size;
- }
+ nr_descs = desc->len / sizeof(struct vring_packed_desc);
+ if (unlikely(nr_descs >= vq->size)) {
+ free_ind_table(idescs);
+ return -1;
+ }
- err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
- if (unlikely(err)) {
- count = i;
- break;
+ for (i = 0; i < nr_descs; i++) {
+ if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
+ free_ind_table(idescs);
+ return -1;
}
- if (i + 1 < count)
- rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
+ *len += descs[i].len;
+ if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+ descs[i].addr, descs[i].len,
+ perm)))
+ return -1;
}
+ *vec_idx = vec_id;
- do_data_copy_enqueue(dev, vq);
-
- rte_smp_wmb();
-
- *(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx += count;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, idx),
- sizeof(vq->used->idx));
-
- vhost_vring_call(dev, vq);
-out:
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_unlock(vq);
-
-out_access_unlock:
- rte_spinlock_unlock(&vq->access_lock);
+ if (unlikely(!!idescs))
+ free_ind_table(idescs);
- return count;
+ return 0;
}
static __rte_always_inline int
-fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint32_t avail_idx, uint32_t *vec_idx,
- struct buf_vector *buf_vec, uint16_t *desc_chain_head,
- uint16_t *desc_chain_len)
+fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint16_t avail_idx, uint16_t *desc_count,
+ struct buf_vector *buf_vec, uint16_t *vec_idx,
+ uint16_t *buf_id, uint16_t *len, uint8_t perm)
{
- uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
- uint32_t vec_id = *vec_idx;
- uint32_t len = 0;
- struct vring_desc *descs = vq->desc;
+ bool wrap_counter = vq->avail_wrap_counter;
+ struct vring_packed_desc *descs = vq->desc_packed;
+ uint16_t vec_id = *vec_idx;
- *desc_chain_head = idx;
+ if (avail_idx < vq->last_avail_idx)
+ wrap_counter ^= 1;
- if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
- descs = (struct vring_desc *)(uintptr_t)
- vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
- vq->desc[idx].len,
- VHOST_ACCESS_RO);
- if (unlikely(!descs))
- return -1;
+ if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
+ return -1;
- idx = 0;
- }
+ *desc_count = 0;
while (1) {
- if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
+ if (unlikely(vec_id >= BUF_VECTOR_MAX))
return -1;
- len += descs[idx].len;
- buf_vec[vec_id].buf_addr = descs[idx].addr;
- buf_vec[vec_id].buf_len = descs[idx].len;
- buf_vec[vec_id].desc_idx = idx;
- vec_id++;
+ *desc_count += 1;
+ *buf_id = descs[avail_idx].id;
- if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
+ if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
+ if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
+ &descs[avail_idx],
+ &vec_id, buf_vec,
+ len, perm) < 0))
+ return -1;
+ } else {
+ *len += descs[avail_idx].len;
+
+ if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
+ descs[avail_idx].addr,
+ descs[avail_idx].len,
+ perm)))
+ return -1;
+ }
+
+ if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
break;
- idx = descs[idx].next;
+ if (++avail_idx >= vq->size) {
+ avail_idx -= vq->size;
+ wrap_counter ^= 1;
+ }
}
- *desc_chain_len = len;
*vec_idx = vec_id;
return 0;
@@ -449,61 +563,74 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
* Returns -1 on fail, 0 on success
*/
static inline int
-reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t size, struct buf_vector *buf_vec,
- uint16_t *num_buffers, uint16_t avail_head)
+ uint16_t *nr_vec, uint16_t *num_buffers,
+ uint16_t *nr_descs)
{
- uint16_t cur_idx;
- uint32_t vec_idx = 0;
- uint16_t tries = 0;
+ uint16_t avail_idx;
+ uint16_t vec_idx = 0;
+ uint16_t max_tries, tries = 0;
- uint16_t head_idx = 0;
+ uint16_t buf_id = 0;
uint16_t len = 0;
+ uint16_t desc_count;
*num_buffers = 0;
- cur_idx = vq->last_avail_idx;
+ avail_idx = vq->last_avail_idx;
+
+ if (rxvq_is_mergeable(dev))
+ max_tries = vq->size - 1;
+ else
+ max_tries = 1;
while (size > 0) {
- if (unlikely(cur_idx == avail_head))
+ /*
+ * if we tried all available ring items, and still
+ * can't get enough buf, it means something abnormal
+ * happened.
+ */
+ if (unlikely(++tries > max_tries))
return -1;
- if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
- &head_idx, &len) < 0))
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ avail_idx, &desc_count,
+ buf_vec, &vec_idx,
+ &buf_id, &len,
+ VHOST_ACCESS_RO) < 0))
return -1;
+
len = RTE_MIN(len, size);
- update_shadow_used_ring(vq, head_idx, len);
+ update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
size -= len;
- cur_idx++;
- tries++;
- *num_buffers += 1;
+ avail_idx += desc_count;
+ if (avail_idx >= vq->size)
+ avail_idx -= vq->size;
- /*
- * if we tried all available ring items, and still
- * can't get enough buf, it means something abnormal
- * happened.
- */
- if (unlikely(tries >= vq->size))
- return -1;
+ *nr_descs += desc_count;
+ *num_buffers += 1;
}
+ *nr_vec = vec_idx;
+
return 0;
}
static __rte_always_inline int
-copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t num_buffers)
+ uint16_t nr_vec, uint16_t num_buffers)
{
uint32_t vec_idx = 0;
- uint64_t desc_addr;
uint32_t mbuf_offset, mbuf_avail;
- uint32_t desc_offset, desc_avail;
+ uint32_t buf_offset, buf_avail;
+ uint64_t buf_addr, buf_iova, buf_len;
uint32_t cpy_len;
- uint64_t hdr_addr, hdr_phys_addr;
+ uint64_t hdr_addr;
struct rte_mbuf *hdr_mbuf;
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
- uint16_t copy_nb = vq->batch_copy_nb_elems;
+ struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
int error = 0;
if (unlikely(m == NULL)) {
@@ -511,45 +638,61 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
goto out;
}
- desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
- buf_vec[vec_idx].buf_len,
- VHOST_ACCESS_RW);
- if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ if (nr_vec > 1)
+ rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
+
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
error = -1;
goto out;
}
hdr_mbuf = m;
- hdr_addr = desc_addr;
- hdr_phys_addr = buf_vec[vec_idx].buf_addr;
- rte_prefetch0((void *)(uintptr_t)hdr_addr);
+ hdr_addr = buf_addr;
+ if (unlikely(buf_len < dev->vhost_hlen))
+ hdr = &tmp_hdr;
+ else
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
dev->vid, num_buffers);
- desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
- desc_offset = dev->vhost_hlen;
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ buf_offset = dev->vhost_hlen - buf_len;
+ vec_idx++;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+ buf_avail = buf_len - buf_offset;
+ } else {
+ buf_offset = dev->vhost_hlen;
+ buf_avail = buf_len - dev->vhost_hlen;
+ }
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
while (mbuf_avail != 0 || m->next != NULL) {
- /* done with current desc buf, get the next one */
- if (desc_avail == 0) {
+ /* done with current buf, get the next one */
+ if (buf_avail == 0) {
vec_idx++;
- desc_addr =
- vhost_iova_to_vva(dev, vq,
- buf_vec[vec_idx].buf_addr,
- buf_vec[vec_idx].buf_len,
- VHOST_ACCESS_RW);
- if (unlikely(!desc_addr)) {
+ if (unlikely(vec_idx >= nr_vec)) {
error = -1;
goto out;
}
- /* Prefetch buffer address. */
- rte_prefetch0((void *)(uintptr_t)desc_addr);
- desc_offset = 0;
- desc_avail = buf_vec[vec_idx].buf_len;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+
+ /* Prefetch next buffer address. */
+ if (vec_idx + 1 < nr_vec)
+ rte_prefetch0((void *)(uintptr_t)
+ buf_vec[vec_idx + 1].buf_addr);
+ buf_offset = 0;
+ buf_avail = buf_len;
}
/* done with current mbuf, get the next one */
@@ -561,129 +704,221 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
}
if (hdr_addr) {
- struct virtio_net_hdr_mrg_rxbuf *hdr;
-
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
- hdr_addr;
virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
- ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
-
- vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)hdr_addr,
- dev->vhost_hlen, 0);
+ if (rxvq_is_mergeable(dev))
+ ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
+ num_buffers);
+
+ if (unlikely(hdr == &tmp_hdr)) {
+ uint64_t len;
+ uint64_t remain = dev->vhost_hlen;
+ uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
+ uint64_t iova = buf_vec[0].buf_iova;
+ uint16_t hdr_vec_idx = 0;
+
+ while (remain) {
+ len = RTE_MIN(remain,
+ buf_vec[hdr_vec_idx].buf_len);
+ dst = buf_vec[hdr_vec_idx].buf_addr;
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src,
+ len);
+
+ PRINT_PACKET(dev, (uintptr_t)dst,
+ (uint32_t)len, 0);
+ vhost_log_cache_write(dev, vq,
+ iova, len);
+
+ remain -= len;
+ iova += len;
+ src += len;
+ hdr_vec_idx++;
+ }
+ } else {
+ PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+ dev->vhost_hlen, 0);
+ vhost_log_cache_write(dev, vq,
+ buf_vec[0].buf_iova,
+ dev->vhost_hlen);
+ }
hdr_addr = 0;
}
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
- rte_memcpy((void *)((uintptr_t)(desc_addr +
- desc_offset)),
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ vq->batch_copy_nb_elems >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
cpy_len);
- vhost_log_write(dev,
- buf_vec[vec_idx].buf_addr + desc_offset,
- cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
cpy_len, 0);
} else {
- batch_copy[copy_nb].dst =
- (void *)((uintptr_t)(desc_addr + desc_offset));
- batch_copy[copy_nb].src =
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ (void *)((uintptr_t)(buf_addr + buf_offset));
+ batch_copy[vq->batch_copy_nb_elems].src =
rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
- batch_copy[copy_nb].log_addr =
- buf_vec[vec_idx].buf_addr + desc_offset;
- batch_copy[copy_nb].len = cpy_len;
- copy_nb++;
+ batch_copy[vq->batch_copy_nb_elems].log_addr =
+ buf_iova + buf_offset;
+ batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+ vq->batch_copy_nb_elems++;
}
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
- desc_avail -= cpy_len;
- desc_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
}
out:
- vq->batch_copy_nb_elems = copy_nb;
return error;
}
static __rte_always_inline uint32_t
-virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
+virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mbuf **pkts, uint32_t count)
{
- struct vhost_virtqueue *vq;
uint32_t pkt_idx = 0;
uint16_t num_buffers;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint16_t avail_head;
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+ avail_head = *((volatile uint16_t *)&vq->avail->idx);
- vq = dev->virtqueue[queue_id];
+ for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+ uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
- rte_spinlock_lock(&vq->access_lock);
+ if (unlikely(reserve_avail_buf_split(dev, vq,
+ pkt_len, buf_vec, &num_buffers,
+ avail_head, &nr_vec) < 0)) {
+ VHOST_LOG_DEBUG(VHOST_DATA,
+ "(%d) failed to get enough desc from vring\n",
+ dev->vid);
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
- if (unlikely(vq->enabled == 0))
- goto out_access_unlock;
+ rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ dev->vid, vq->last_avail_idx,
+ vq->last_avail_idx + num_buffers);
- if (unlikely(vq->access_ok == 0))
- if (unlikely(vring_translate(dev, vq) < 0))
- goto out;
+ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec,
+ num_buffers) < 0) {
+ vq->shadow_used_idx -= num_buffers;
+ break;
+ }
- count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
- if (count == 0)
- goto out;
+ vq->last_avail_idx += num_buffers;
+ }
- vq->batch_copy_nb_elems = 0;
+ do_data_copy_enqueue(dev, vq);
- rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
+
+ return pkt_idx;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ uint32_t pkt_idx = 0;
+ uint16_t num_buffers;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
- vq->shadow_used_idx = 0;
- avail_head = *((volatile uint16_t *)&vq->avail->idx);
for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+ uint16_t nr_vec = 0;
+ uint16_t nr_descs = 0;
- if (unlikely(reserve_avail_buf_mergeable(dev, vq,
- pkt_len, buf_vec, &num_buffers,
- avail_head) < 0)) {
- LOG_DEBUG(VHOST_DATA,
+ if (unlikely(reserve_avail_buf_packed(dev, vq,
+ pkt_len, buf_vec, &nr_vec,
+ &num_buffers, &nr_descs) < 0)) {
+ VHOST_LOG_DEBUG(VHOST_DATA,
"(%d) failed to get enough desc from vring\n",
dev->vid);
vq->shadow_used_idx -= num_buffers;
break;
}
- LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+ rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
+
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx],
- buf_vec, num_buffers) < 0) {
+ if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+ buf_vec, nr_vec,
+ num_buffers) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
- vq->last_avail_idx += num_buffers;
+ vq->last_avail_idx += nr_descs;
+ if (vq->last_avail_idx >= vq->size) {
+ vq->last_avail_idx -= vq->size;
+ vq->avail_wrap_counter ^= 1;
+ }
}
do_data_copy_enqueue(dev, vq);
if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring(dev, vq);
- vhost_vring_call(dev, vq);
+ flush_shadow_used_ring_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);
}
+ return pkt_idx;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
+ count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+ if (count == 0)
+ goto out;
+
+ if (vq_is_packed(dev))
+ count = virtio_dev_rx_packed(dev, vq, pkts, count);
+ else
+ count = virtio_dev_rx_split(dev, vq, pkts, count);
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
@@ -691,7 +926,7 @@ out:
out_access_unlock:
rte_spinlock_unlock(&vq->access_lock);
- return pkt_idx;
+ return count;
}
uint16_t
@@ -710,10 +945,7 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
return 0;
}
- if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
- return virtio_dev_merge_rx(dev, queue_id, pkts, count);
- else
- return virtio_dev_rx(dev, queue_id, pkts, count);
+ return virtio_dev_rx(dev, queue_id, pkts, count);
}
static inline bool
@@ -838,42 +1070,62 @@ put_zmbuf(struct zcopy_mbuf *zmbuf)
static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct vring_desc *descs, uint16_t max_desc,
- struct rte_mbuf *m, uint16_t desc_idx,
- struct rte_mempool *mbuf_pool)
+ struct buf_vector *buf_vec, uint16_t nr_vec,
+ struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
{
- struct vring_desc *desc;
- uint64_t desc_addr;
- uint32_t desc_avail, desc_offset;
+ uint32_t buf_avail, buf_offset;
+ uint64_t buf_addr, buf_iova, buf_len;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
+ struct virtio_net_hdr tmp_hdr;
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
- uint32_t nr_desc = 1;
+ uint16_t vec_idx = 0;
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
- uint16_t copy_nb = vq->batch_copy_nb_elems;
int error = 0;
- desc = &descs[desc_idx];
- if (unlikely((desc->len < dev->vhost_hlen)) ||
- (desc->flags & VRING_DESC_F_INDIRECT)) {
- error = -1;
- goto out;
- }
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
- desc_addr = vhost_iova_to_vva(dev,
- vq, desc->addr,
- desc->len,
- VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
+ if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
error = -1;
goto out;
}
+ if (likely(nr_vec > 1))
+ rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
+
if (virtio_net_with_host_offload(dev)) {
- hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
- rte_prefetch0(hdr);
+ if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
+ uint64_t len;
+ uint64_t remain = sizeof(struct virtio_net_hdr);
+ uint64_t src;
+ uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
+ uint16_t hdr_vec_idx = 0;
+
+ /*
+ * No luck, the virtio-net header doesn't fit
+ * in a contiguous virtual area.
+ */
+ while (remain) {
+ len = RTE_MIN(remain,
+ buf_vec[hdr_vec_idx].buf_len);
+ src = buf_vec[hdr_vec_idx].buf_addr;
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ hdr_vec_idx++;
+ }
+
+ hdr = &tmp_hdr;
+ } else {
+ hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
+ rte_prefetch0(hdr);
+ }
}
/*
@@ -881,41 +1133,40 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
* for Tx: the first for storing the header, and others
* for storing the data.
*/
- if (likely((desc->len == dev->vhost_hlen) &&
- (desc->flags & VRING_DESC_F_NEXT) != 0)) {
- desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
- error = -1;
- goto out;
- }
-
- desc_addr = vhost_iova_to_vva(dev,
- vq, desc->addr,
- desc->len,
- VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
- error = -1;
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ buf_offset = dev->vhost_hlen - buf_len;
+ vec_idx++;
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
+ buf_avail = buf_len - buf_offset;
+ } else if (buf_len == dev->vhost_hlen) {
+ if (unlikely(++vec_idx >= nr_vec))
goto out;
- }
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
- desc_offset = 0;
- desc_avail = desc->len;
- nr_desc += 1;
+ buf_offset = 0;
+ buf_avail = buf_len;
} else {
- desc_avail = desc->len - dev->vhost_hlen;
- desc_offset = dev->vhost_hlen;
+ buf_offset = dev->vhost_hlen;
+ buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
}
- rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
+ rte_prefetch0((void *)(uintptr_t)
+ (buf_addr + buf_offset));
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+ PRINT_PACKET(dev,
+ (uintptr_t)(buf_addr + buf_offset),
+ (uint32_t)buf_avail, 0);
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
while (1) {
uint64_t hpa;
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ cpy_len = RTE_MIN(buf_avail, mbuf_avail);
/*
* A desc buf might across two host physical pages that are
@@ -923,11 +1174,11 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
* will be copied even though zero copy is enabled.
*/
if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
- desc->addr + desc_offset, cpy_len)))) {
+ buf_iova + buf_offset, cpy_len)))) {
cur->data_len = cpy_len;
cur->data_off = 0;
- cur->buf_addr = (void *)(uintptr_t)(desc_addr
- + desc_offset);
+ cur->buf_addr =
+ (void *)(uintptr_t)(buf_addr + buf_offset);
cur->buf_iova = hpa;
/*
@@ -937,61 +1188,53 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
mbuf_avail = cpy_len;
} else {
if (likely(cpy_len > MAX_BATCH_LEN ||
- copy_nb >= vq->size ||
+ vq->batch_copy_nb_elems >= vq->size ||
(hdr && cur == m))) {
rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
mbuf_offset),
- (void *)((uintptr_t)(desc_addr +
- desc_offset)),
+ (void *)((uintptr_t)(buf_addr +
+ buf_offset)),
cpy_len);
} else {
- batch_copy[copy_nb].dst =
+ batch_copy[vq->batch_copy_nb_elems].dst =
rte_pktmbuf_mtod_offset(cur, void *,
mbuf_offset);
- batch_copy[copy_nb].src =
- (void *)((uintptr_t)(desc_addr +
- desc_offset));
- batch_copy[copy_nb].len = cpy_len;
- copy_nb++;
+ batch_copy[vq->batch_copy_nb_elems].src =
+ (void *)((uintptr_t)(buf_addr +
+ buf_offset));
+ batch_copy[vq->batch_copy_nb_elems].len =
+ cpy_len;
+ vq->batch_copy_nb_elems++;
}
}
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
- desc_avail -= cpy_len;
- desc_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
- /* This desc reaches to its end, get the next one */
- if (desc_avail == 0) {
- if ((desc->flags & VRING_DESC_F_NEXT) == 0)
+ /* This buf reaches to its end, get the next one */
+ if (buf_avail == 0) {
+ if (++vec_idx >= nr_vec)
break;
- if (unlikely(desc->next >= max_desc ||
- ++nr_desc > max_desc)) {
- error = -1;
- goto out;
- }
- desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
- error = -1;
- goto out;
- }
-
- desc_addr = vhost_iova_to_vva(dev,
- vq, desc->addr,
- desc->len,
- VHOST_ACCESS_RO);
- if (unlikely(!desc_addr)) {
- error = -1;
- goto out;
- }
+ buf_addr = buf_vec[vec_idx].buf_addr;
+ buf_iova = buf_vec[vec_idx].buf_iova;
+ buf_len = buf_vec[vec_idx].buf_len;
- rte_prefetch0((void *)(uintptr_t)desc_addr);
+ /*
+ * Prefecth desc n + 1 buffer while
+ * desc n buffer is processed.
+ */
+ if (vec_idx + 1 < nr_vec)
+ rte_prefetch0((void *)(uintptr_t)
+ buf_vec[vec_idx + 1].buf_addr);
- desc_offset = 0;
- desc_avail = desc->len;
+ buf_offset = 0;
+ buf_avail = buf_len;
- PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+ PRINT_PACKET(dev, (uintptr_t)buf_addr,
+ (uint32_t)buf_avail, 0);
}
/*
@@ -1027,38 +1270,10 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
vhost_dequeue_offload(hdr, m);
out:
- vq->batch_copy_nb_elems = copy_nb;
return error;
}
-static __rte_always_inline void
-update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint32_t used_idx, uint32_t desc_idx)
-{
- vq->used->ring[used_idx].id = desc_idx;
- vq->used->ring[used_idx].len = 0;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
-}
-
-static __rte_always_inline void
-update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint32_t count)
-{
- if (unlikely(count == 0))
- return;
-
- rte_smp_wmb();
- rte_smp_rmb();
-
- vq->used->idx += count;
- vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
- sizeof(vq->used->idx));
- vhost_vring_call(dev, vq);
-}
-
static __rte_always_inline struct zcopy_mbuf *
get_zmbuf(struct vhost_virtqueue *vq)
{
@@ -1118,66 +1333,137 @@ restore_mbuf(struct rte_mbuf *m)
}
}
-uint16_t
-rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev;
- struct rte_mbuf *rarp_mbuf = NULL;
- struct vhost_virtqueue *vq;
- uint32_t desc_indexes[MAX_PKT_BURST];
- uint32_t used_idx;
- uint32_t i = 0;
+ uint16_t i;
uint16_t free_entries;
- uint16_t avail_idx;
- dev = get_device(vid);
- if (!dev)
- return 0;
+ if (unlikely(dev->dequeue_zero_copy)) {
+ struct zcopy_mbuf *zmbuf, *next;
- if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- RTE_LOG(ERR, VHOST_DATA,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
- return 0;
- }
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
+ if (mbuf_is_consumed(zmbuf->mbuf)) {
+ update_shadow_used_ring_split(vq,
+ zmbuf->desc_idx, 0);
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+ }
+
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
}
- vq = dev->virtqueue[queue_id];
+ rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ free_entries = *((volatile uint16_t *)&vq->avail->idx) -
+ vq->last_avail_idx;
+ if (free_entries == 0)
return 0;
- if (unlikely(vq->enabled == 0))
- goto out_access_unlock;
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
- vq->batch_copy_nb_elems = 0;
+ count = RTE_MIN(count, MAX_PKT_BURST);
+ count = RTE_MIN(count, free_entries);
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+ dev->vid, count);
- if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
- vhost_user_iotlb_rd_lock(vq);
+ for (i = 0; i < count; i++) {
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t head_idx, dummy_len;
+ uint16_t nr_vec = 0;
+ int err;
- if (unlikely(vq->access_ok == 0))
- if (unlikely(vring_translate(dev, vq) < 0))
- goto out;
+ if (unlikely(fill_vec_buf_split(dev, vq,
+ vq->last_avail_idx + i,
+ &nr_vec, buf_vec,
+ &head_idx, &dummy_len,
+ VHOST_ACCESS_RO) < 0))
+ break;
+
+ if (likely(dev->dequeue_zero_copy == 0))
+ update_shadow_used_ring_split(vq, head_idx, 0);
+
+ rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
+
+ pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
+ if (unlikely(pkts[i] == NULL)) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to allocate memory for mbuf.\n");
+ break;
+ }
+
+ err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+ mbuf_pool);
+ if (unlikely(err)) {
+ rte_pktmbuf_free(pkts[i]);
+ break;
+ }
+
+ if (unlikely(dev->dequeue_zero_copy)) {
+ struct zcopy_mbuf *zmbuf;
+
+ zmbuf = get_zmbuf(vq);
+ if (!zmbuf) {
+ rte_pktmbuf_free(pkts[i]);
+ break;
+ }
+ zmbuf->mbuf = pkts[i];
+ zmbuf->desc_idx = head_idx;
+
+ /*
+ * Pin lock the mbuf; we will check later to see
+ * whether the mbuf is freed (when we are the last
+ * user) or not. If that's the case, we then could
+ * update the used ring safely.
+ */
+ rte_mbuf_refcnt_update(pkts[i], 1);
+
+ vq->nr_zmbuf += 1;
+ TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+ }
+ }
+ vq->last_avail_idx += i;
+
+ if (likely(dev->dequeue_zero_copy == 0)) {
+ do_data_copy_dequeue(vq);
+ if (unlikely(i < count))
+ vq->shadow_used_idx = i;
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
+
+ return i;
+}
+
+static __rte_always_inline uint16_t
+virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ uint16_t i;
+
+ rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
- int nr_updated = 0;
for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
zmbuf != NULL; zmbuf = next) {
next = TAILQ_NEXT(zmbuf, next);
if (mbuf_is_consumed(zmbuf->mbuf)) {
- used_idx = vq->last_used_idx++ & (vq->size - 1);
- update_used_ring(dev, vq, used_idx,
- zmbuf->desc_idx);
- nr_updated += 1;
+ update_shadow_used_ring_packed(vq,
+ zmbuf->desc_idx,
+ 0,
+ zmbuf->desc_count);
TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
restore_mbuf(zmbuf->mbuf);
@@ -1187,93 +1473,34 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
}
}
- update_used_idx(dev, vq, nr_updated);
+ flush_shadow_used_ring_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);
}
- /*
- * Construct a RARP broadcast packet, and inject it to the "pkts"
- * array, to looks like that guest actually send such packet.
- *
- * Check user_send_rarp() for more information.
- *
- * broadcast_rarp shares a cacheline in the virtio_net structure
- * with some fields that are accessed during enqueue and
- * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
- * result in false sharing between enqueue and dequeue.
- *
- * Prevent unnecessary false sharing by reading broadcast_rarp first
- * and only performing cmpset if the read indicates it is likely to
- * be set.
- */
-
- if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
- rte_atomic16_cmpset((volatile uint16_t *)
- &dev->broadcast_rarp.cnt, 1, 0))) {
-
- rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
- if (rarp_mbuf == NULL) {
- RTE_LOG(ERR, VHOST_DATA,
- "Failed to make RARP packet.\n");
- return 0;
- }
- count -= 1;
- }
-
- free_entries = *((volatile uint16_t *)&vq->avail->idx) -
- vq->last_avail_idx;
- if (free_entries == 0)
- goto out;
-
- LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-
- /* Prefetch available and used ring */
- avail_idx = vq->last_avail_idx & (vq->size - 1);
- used_idx = vq->last_used_idx & (vq->size - 1);
- rte_prefetch0(&vq->avail->ring[avail_idx]);
- rte_prefetch0(&vq->used->ring[used_idx]);
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
- count = RTE_MIN(count, free_entries);
- LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+ VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
dev->vid, count);
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < count; i++) {
- avail_idx = (vq->last_avail_idx + i) & (vq->size - 1);
- used_idx = (vq->last_used_idx + i) & (vq->size - 1);
- desc_indexes[i] = vq->avail->ring[avail_idx];
-
- if (likely(dev->dequeue_zero_copy == 0))
- update_used_ring(dev, vq, used_idx, desc_indexes[i]);
- }
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[desc_indexes[0]]);
for (i = 0; i < count; i++) {
- struct vring_desc *desc;
- uint16_t sz, idx;
+ struct buf_vector buf_vec[BUF_VECTOR_MAX];
+ uint16_t buf_id, dummy_len;
+ uint16_t desc_count, nr_vec = 0;
int err;
- if (likely(i + 1 < count))
- rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
+ if (unlikely(fill_vec_buf_packed(dev, vq,
+ vq->last_avail_idx, &desc_count,
+ buf_vec, &nr_vec,
+ &buf_id, &dummy_len,
+ VHOST_ACCESS_RW) < 0))
+ break;
- if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
- desc = (struct vring_desc *)(uintptr_t)
- vhost_iova_to_vva(dev, vq,
- vq->desc[desc_indexes[i]].addr,
- sizeof(*desc),
- VHOST_ACCESS_RO);
- if (unlikely(!desc))
- break;
+ if (likely(dev->dequeue_zero_copy == 0))
+ update_shadow_used_ring_packed(vq, buf_id, 0,
+ desc_count);
- rte_prefetch0(desc);
- sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
- idx = 0;
- } else {
- desc = vq->desc;
- sz = vq->size;
- idx = desc_indexes[i];
- }
+ rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(pkts[i] == NULL)) {
@@ -1282,8 +1509,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
break;
}
- err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx,
- mbuf_pool);
+ err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
+ mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
break;
@@ -1298,7 +1525,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
break;
}
zmbuf->mbuf = pkts[i];
- zmbuf->desc_idx = desc_indexes[i];
+ zmbuf->desc_idx = buf_id;
+ zmbuf->desc_count = desc_count;
/*
* Pin lock the mbuf; we will check later to see
@@ -1311,15 +1539,103 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
vq->nr_zmbuf += 1;
TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
}
+
+ vq->last_avail_idx += desc_count;
+ if (vq->last_avail_idx >= vq->size) {
+ vq->last_avail_idx -= vq->size;
+ vq->avail_wrap_counter ^= 1;
+ }
}
- vq->last_avail_idx += i;
if (likely(dev->dequeue_zero_copy == 0)) {
do_data_copy_dequeue(vq);
- vq->last_used_idx += i;
- update_used_idx(dev, vq, i);
+ if (unlikely(i < count))
+ vq->shadow_used_idx = i;
+ flush_shadow_used_ring_packed(dev, vq);
+ vhost_vring_call_packed(dev, vq);
+ }
+
+ return i;
+}
+
+uint16_t
+rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
+ struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev;
+ struct rte_mbuf *rarp_mbuf = NULL;
+ struct vhost_virtqueue *vq;
+
+ dev = get_device(vid);
+ if (!dev)
+ return 0;
+
+ if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "(%d) %s: built-in vhost net backend is disabled.\n",
+ dev->vid, __func__);
+ return 0;
+ }
+
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
+ RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
+ return 0;
+
+ if (unlikely(vq->enabled == 0)) {
+ count = 0;
+ goto out_access_unlock;
+ }
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0)) {
+ count = 0;
+ goto out;
+ }
+
+ /*
+ * Construct a RARP broadcast packet, and inject it to the "pkts"
+ * array, to looks like that guest actually send such packet.
+ *
+ * Check user_send_rarp() for more information.
+ *
+ * broadcast_rarp shares a cacheline in the virtio_net structure
+ * with some fields that are accessed during enqueue and
+ * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
+ * result in false sharing between enqueue and dequeue.
+ *
+ * Prevent unnecessary false sharing by reading broadcast_rarp first
+ * and only performing cmpset if the read indicates it is likely to
+ * be set.
+ */
+ if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
+ rte_atomic16_cmpset((volatile uint16_t *)
+ &dev->broadcast_rarp.cnt, 1, 0))) {
+
+ rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
+ if (rarp_mbuf == NULL) {
+ RTE_LOG(ERR, VHOST_DATA,
+ "Failed to make RARP packet.\n");
+ count = 0;
+ goto out;
+ }
+ count -= 1;
}
+ if (vq_is_packed(dev))
+ count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
+ else
+ count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
+
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_unlock(vq);
@@ -1332,10 +1648,10 @@ out_access_unlock:
* Inject it to the head of "pkts" array, so that switch's mac
* learning table will get updated first.
*/
- memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
+ memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
pkts[0] = rarp_mbuf;
- i += 1;
+ count += 1;
}
- return i;
+ return count;
}
diff --git a/lib/meson.build b/lib/meson.build
index ef615917..eb91f100 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -9,22 +9,27 @@
# given as a dep, no need to mention ring. This is especially true for the
# core libs which are widely reused, so their deps are kept to a minimum.
libraries = [ 'compat', # just a header, used for versioning
- 'eal', 'ring', 'mempool', 'mbuf', 'net', 'ether', 'pci', # core
+ 'kvargs',
+ 'eal', 'ring', 'mempool', 'mbuf', 'net', 'ethdev', 'pci', # core
'metrics', # bitrate/latency stats depends on this
'hash', # efd depends on this
- 'kvargs', # cryptodev depends on this
+ 'timer', # eventdev depends on this
'acl', 'bbdev', 'bitratestats', 'cfgfile',
- 'cmdline', 'cryptodev',
+ 'cmdline', 'compressdev', 'cryptodev',
'distributor', 'efd', 'eventdev',
'gro', 'gso', 'ip_frag', 'jobstats',
'kni', 'latencystats', 'lpm', 'member',
- 'meter', 'power', 'pdump',
- 'reorder', 'sched', 'security', 'timer', 'vhost',
+ 'meter', 'power', 'pdump', 'rawdev',
+ 'reorder', 'sched', 'security', 'vhost',
# add pkt framework libs which use other libs from above
'port', 'table', 'pipeline',
# flow_classify lib depends on pkt framework table lib
- 'flow_classify']
+ 'flow_classify', 'bpf']
+default_cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ default_cflags += '-Wno-format-truncation'
+endif
foreach l:libraries
build = true
name = l
@@ -33,7 +38,7 @@ foreach l:libraries
sources = []
headers = []
includes = []
- cflags = machine_args
+ cflags = default_cflags
objs = [] # other object files to link against, used e.g. for
# instruction-set optimized versions of code
@@ -41,9 +46,12 @@ foreach l:libraries
# external package/library requirements
ext_deps = []
deps = ['eal'] # eal is standard dependency except for itself
- if l == 'eal'
+ if l == 'kvargs'
deps = []
endif
+ if l == 'eal'
+ deps = ['kvargs']
+ endif
dir_name = 'librte_' + l
subdir(dir_name)
@@ -63,6 +71,10 @@ foreach l:libraries
shared_deps = ext_deps
static_deps = ext_deps
foreach d:deps
+ if not is_variable('shared_rte_' + d)
+ error('Missing dependency ' + d +
+ ' for library ' + lib_name)
+ endif
shared_deps += [get_variable('shared_rte_' + d)]
static_deps += [get_variable('static_rte_' + d)]
endforeach
@@ -95,7 +107,7 @@ foreach l:libraries
# then use pre-build objects to build shared lib
sources = []
- objs += static_lib.extract_all_objects()
+ objs += static_lib.extract_all_objects(recursive: false)
version_map = '@0@/@1@/rte_@2@_version.map'.format(
meson.current_source_dir(), dir_name, name)
shared_lib = shared_library(libname,
diff --git a/meson.build b/meson.build
index 0d993138..e718972f 100644
--- a/meson.build
+++ b/meson.build
@@ -2,7 +2,7 @@
# Copyright(c) 2017 Intel Corporation
project('DPDK', 'C',
- version: '18.02.0',
+ version: '18.08.0',
license: 'BSD',
default_options: ['buildtype=release', 'default_library=static'],
meson_version: '>= 0.41'
@@ -39,6 +39,11 @@ if get_option('examples') != ''
subdir('examples')
endif
+# build kernel modules if enabled
+if get_option('enable_kmods')
+ subdir('kernel')
+endif
+
# write the build config
build_cfg = 'rte_build_config.h'
configure_file(output: build_cfg,
diff --git a/meson_options.txt b/meson_options.txt
index 1a674aa5..c8432785 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -8,6 +8,8 @@ option('include_subdir_arch', type: 'string', value: '',
description: 'subdirectory where to install arch-dependent headers')
option('kernel_dir', type: 'string', value: '',
description: 'path to the kernel for building kernel modules')
+option('lib_musdk_dir', type: 'string', value: '',
+ description: 'path to the MUSDK library installation directory')
option('machine', type: 'string', value: 'native',
description: 'set the target machine type')
option('max_lcores', type: 'string', value: '128',
diff --git a/mk/arch/arm/rte.vars.mk b/mk/arch/arm/rte.vars.mk
index 2f8cf7cb..27b11476 100644
--- a/mk/arch/arm/rte.vars.mk
+++ b/mk/arch/arm/rte.vars.mk
@@ -1,33 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (C) 2015 RehiveTech. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of RehiveTech nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2015 RehiveTech. All rights reserved.
ARCH ?= arm
CROSS ?=
diff --git a/mk/machine/armv7a/rte.vars.mk b/mk/machine/armv7a/rte.vars.mk
index 41c4c408..44ebd68d 100644
--- a/mk/machine/armv7a/rte.vars.mk
+++ b/mk/machine/armv7a/rte.vars.mk
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright (C) 2015 RehiveTech. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of RehiveTech nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2015 RehiveTech. All rights reserved.
#
# machine:
diff --git a/mk/machine/dpaa/rte.vars.mk b/mk/machine/dpaa/rte.vars.mk
index bddcb800..75df626f 100644
--- a/mk/machine/dpaa/rte.vars.mk
+++ b/mk/machine/dpaa/rte.vars.mk
@@ -32,3 +32,6 @@ MACHINE_CFLAGS += -march=armv8-a+crc
ifdef CONFIG_RTE_ARCH_ARM_TUNE
MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
endif
+
+# To avoid TLS corruption issue.
+MACHINE_CFLAGS += -mtls-dialect=trad
diff --git a/mk/machine/dpaa2/rte.vars.mk b/mk/machine/dpaa2/rte.vars.mk
index 2fd2eac8..aaa03c42 100644
--- a/mk/machine/dpaa2/rte.vars.mk
+++ b/mk/machine/dpaa2/rte.vars.mk
@@ -32,3 +32,6 @@ MACHINE_CFLAGS += -march=armv8-a+crc
ifdef CONFIG_RTE_ARCH_ARM_TUNE
MACHINE_CFLAGS += -mtune=$(CONFIG_RTE_ARCH_ARM_TUNE:"%"=%)
endif
+
+# To avoid TLS corruption issue.
+MACHINE_CFLAGS += -mtls-dialect=trad
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 3eb41d17..de33883b 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -1,34 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
-# Copyright(c) 2014-2015 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2017 Intel Corporation.
+# Copyright(c) 2014-2015 6WIND S.A.
include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
@@ -59,15 +31,19 @@ _LDLIBS-y += -L$(RTE_SDK_BIN)/lib
# Order is important: from higher level to lower level
#
_LDLIBS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += -lrte_flow_classify
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += -lrte_pipeline
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PIPELINE) += --no-whole-archive
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += -lrte_table
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TABLE) += --no-whole-archive
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += -lrte_port
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PORT) += --no-whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP) += -lrte_pdump
_LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += -lrte_distributor
_LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG) += -lrte_ip_frag
-_LDLIBS-$(CONFIG_RTE_LIBRTE_GRO) += -lrte_gro
-_LDLIBS-$(CONFIG_RTE_LIBRTE_GSO) += -lrte_gso
_LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lrte_meter
_LDLIBS-$(CONFIG_RTE_LIBRTE_LPM) += -lrte_lpm
# librte_acl needs --whole-archive because of weak functions
@@ -80,12 +56,17 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_BITRATE) += -lrte_bitratestats
_LDLIBS-$(CONFIG_RTE_LIBRTE_LATENCY_STATS) += -lrte_latencystats
_LDLIBS-$(CONFIG_RTE_LIBRTE_POWER) += -lrte_power
-_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
_LDLIBS-$(CONFIG_RTE_LIBRTE_EFD) += -lrte_efd
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BPF) += -lrte_bpf
+ifeq ($(CONFIG_RTE_LIBRTE_BPF_ELF),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_BPF) += -lelf
+endif
_LDLIBS-y += --whole-archive
_LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE) += -lrte_cfgfile
+_LDLIBS-$(CONFIG_RTE_LIBRTE_GRO) += -lrte_gro
+_LDLIBS-$(CONFIG_RTE_LIBRTE_GSO) += -lrte_gso
_LDLIBS-$(CONFIG_RTE_LIBRTE_HASH) += -lrte_hash
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lrte_member
_LDLIBS-$(CONFIG_RTE_LIBRTE_VHOST) += -lrte_vhost
@@ -96,8 +77,10 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_ETHER) += -lrte_ethdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_BBDEV) += -lrte_bbdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += -lrte_cryptodev
_LDLIBS-$(CONFIG_RTE_LIBRTE_SECURITY) += -lrte_security
+_LDLIBS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += -lrte_compressdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += -lrte_eventdev
_LDLIBS-$(CONFIG_RTE_LIBRTE_RAWDEV) += -lrte_rawdev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_TIMER) += -lrte_timer
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += -lrte_mempool
_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += -lrte_mempool_ring
_LDLIBS-$(CONFIG_RTE_LIBRTE_RING) += -lrte_ring
@@ -111,28 +94,44 @@ ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_KNI) += -lrte_kni
endif
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL),yy)
+_LDLIBS-y += -lrte_common_octeontx
+endif
+
_LDLIBS-$(CONFIG_RTE_LIBRTE_PCI_BUS) += -lrte_bus_pci
_LDLIBS-$(CONFIG_RTE_LIBRTE_VDEV_BUS) += -lrte_bus_vdev
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += -lrte_bus_dpaa
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += -lrte_bus_fslmc
+endif
ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n)
# plugins (link only if static libraries)
+_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_BUCKET) += -lrte_mempool_bucket
_LDLIBS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += -lrte_mempool_stack
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += -lrte_mempool_dpaa
+endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += -lrte_mempool_dpaa2
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
_LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += -lrte_pmd_ark
_LDLIBS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += -lrte_pmd_avf
_LDLIBS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += -lrte_pmd_avp
+_LDLIBS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += -lrte_pmd_axgbe
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
_LDLIBS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += -lrte_pmd_cxgbe
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_BUS) += -lrte_bus_dpaa
-_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += -lrte_mempool_dpaa
_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += -lrte_pmd_dpaa
endif
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_pmd_dpaa2
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += -lrte_pmd_e1000
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += -lrte_pmd_ena
_LDLIBS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += -lrte_pmd_enic
@@ -150,11 +149,11 @@ else
_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += -lrte_pmd_mlx4 -libverbs -lmlx4
endif
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -ldl
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -ldl -lmnl
else
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs -lmlx5
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += -lrte_pmd_mlx5 -libverbs -lmlx5 -lmnl
endif
-_LDLIBS-$(CONFIG_RTE_LIBRTE_MRVL_PMD) += -lrte_pmd_mrvl -L$(LIBMUSDK_PATH)/lib -lmusdk
+_LDLIBS-$(CONFIG_RTE_LIBRTE_MVPP2_PMD) += -lrte_pmd_mvpp2 -L$(LIBMUSDK_PATH)/lib -lmusdk
_LDLIBS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += -lrte_pmd_nfp
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += -lrte_pmd_null
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += -lrte_pmd_pcap -lpcap
@@ -171,9 +170,15 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD) += -lrte_pmd_vdev_netvsc
_LDLIBS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += -lrte_pmd_virtio
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += -lrte_pmd_vhost
+ifeq ($(CONFIG_RTE_EAL_VFIO),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_IFC_PMD) += -lrte_pmd_ifc
+endif # $(CONFIG_RTE_EAL_VFIO)
endif # $(CONFIG_RTE_LIBRTE_VHOST)
_LDLIBS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += -lrte_pmd_vmxnet3_uio
+_LDLIBS-$(CONFIG_RTE_LIBRTE_VMBUS) += -lrte_bus_vmbus
+_LDLIBS-$(CONFIG_RTE_LIBRTE_NETVSC_PMD) += -lrte_pmd_netvsc
+
ifeq ($(CONFIG_RTE_LIBRTE_BBDEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL) += -lrte_pmd_bbdev_null
@@ -188,12 +193,15 @@ endif # CONFIG_RTE_LIBRTE_BBDEV
ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lrte_pmd_aesni_mb
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += -lIPSec_MB
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lrte_pmd_aesni_gcm
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += -lIPSec_MB
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += -lrte_pmd_ccp -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += -lrte_pmd_openssl -lcrypto
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += -lrte_pmd_null_crypto
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat -lcrypto
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT_SYM) += -lrte_pmd_qat -lcrypto
+endif # CONFIG_RTE_LIBRTE_PMD_QAT
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -lrte_pmd_snow3g
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += -lrte_pmd_kasumi
@@ -202,27 +210,40 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -lrte_pmd_zuc
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -lrte_pmd_armv8
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += -L$(LIBMUSDK_PATH)/lib -lrte_pmd_mrvl_crypto -lmusdk
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += -L$(LIBMUSDK_PATH)/lib -lrte_pmd_mvsam_crypto -lmusdk
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += -lrte_pmd_crypto_scheduler
-ifeq ($(CONFIG_RTE_LIBRTE_FSLMC_BUS),y)
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_pmd_dpaa2_sec
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_mempool_dpaa2
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += -lrte_bus_fslmc
endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
-
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_bus_dpaa
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_pmd_dpaa_sec
endif # CONFIG_RTE_LIBRTE_DPAA_BUS
-
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += -lrte_pmd_virtio_crypto
endif # CONFIG_RTE_LIBRTE_CRYPTODEV
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += -lrte_pmd_isal_comp
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += -lisal
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += -lrte_pmd_octeontx_zip
+# Link QAT driver if it has not been linked yet
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),n)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += -lrte_pmd_qat
+endif # CONFIG_RTE_LIBRTE_PMD_QAT_SYM
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += -lrte_pmd_zlib
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += -lz
+endif # CONFIG_RTE_LIBRTE_COMPRESSDEV
+
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += -lrte_pmd_skeleton_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += -lrte_pmd_sw_event
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += -lrte_pmd_octeontx_ssovf
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV) += -lrte_pmd_dpaa_event
+endif # CONFIG_RTE_LIBRTE_DPAA_BUS
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV) += -lrte_pmd_dpaa2_event
+endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
+
_LDLIBS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += -lrte_mempool_octeontx
_LDLIBS-$(CONFIG_RTE_LIBRTE_OCTEONTX_PMD) += -lrte_pmd_octeontx
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV) += -lrte_pmd_opdl_event
@@ -230,13 +251,16 @@ endif # CONFIG_RTE_LIBRTE_EVENTDEV
ifeq ($(CONFIG_RTE_LIBRTE_RAWDEV),y)
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV) += -lrte_pmd_skeleton_rawdev
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV) += -lrte_pmd_dpaa2_cmdif
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV) += -lrte_pmd_dpaa2_qdma
+endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
+_LDLIBS-$(CONFIG_RTE_LIBRTE_IFPGA_BUS) += -lrte_bus_ifpga
+ifeq ($(CONFIG_RTE_LIBRTE_IFPGA_BUS),y)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV) += -lrte_pmd_ifpga_rawdev
+endif # CONFIG_RTE_LIBRTE_IFPGA_BUS
endif # CONFIG_RTE_LIBRTE_RAWDEV
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
-_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_bus_fslmc
-_LDLIBS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += -lrte_mempool_dpaa2
-endif # CONFIG_RTE_LIBRTE_DPAA2_PMD
-
endif # !CONFIG_RTE_BUILD_SHARED_LIBS
_LDLIBS-y += --no-whole-archive
@@ -248,6 +272,9 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
endif
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_USE_LIBBSD),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lbsd
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lm
diff --git a/mk/rte.extshared.mk b/mk/rte.extshared.mk
index 0d8abbe3..64269e1a 100644
--- a/mk/rte.extshared.mk
+++ b/mk/rte.extshared.mk
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2012-2013 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAKEFLAGS += --no-print-directory
diff --git a/mk/rte.extsubdir.mk b/mk/rte.extsubdir.mk
index e2ef0132..0f8ef94c 100644
--- a/mk/rte.extsubdir.mk
+++ b/mk/rte.extsubdir.mk
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014 6WIND S.A.
MAKEFLAGS += --no-print-directory
diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk
index 53e0721e..5dc43e42 100644
--- a/mk/rte.sdkbuild.mk
+++ b/mk/rte.sdkbuild.mk
@@ -14,6 +14,7 @@ endif
-include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.custom.mk
buildtools: | lib
+kernel: | lib
drivers: | lib buildtools
app: | lib buildtools drivers
test: | lib buildtools drivers
diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk
index 0664725e..d90d62cc 100644
--- a/mk/rte.sdkconfig.mk
+++ b/mk/rte.sdkconfig.mk
@@ -36,7 +36,6 @@ notemplate:
@echo "use T=template from the following list:"
@$(MAKE) -rR showconfigs | sed 's,^, ,'
-
.PHONY: defconfig
defconfig:
@$(MAKE) config T=$(shell \
@@ -47,15 +46,25 @@ defconfig:
print "arm-armv7a"} \
else if ($$0 == "ppc64") { \
print "ppc_64-power8"} \
+ else if ($$0 == "amd64") { \
+ print "x86_64-native"} \
else { \
- printf "%s-native", $$0} }')-$(shell \
+ printf "%s-native", $$0} }' \
+ )-$(shell \
uname | awk '{ \
if ($$0 == "Linux") { \
print "linuxapp"} \
else { \
- print "bsdapp"} }')-$(shell \
- ${CC} -v 2>&1 | \
- grep " version " | cut -d ' ' -f 1)
+ print "bsdapp"} }' \
+ )-$(shell \
+ ${CC} --version | grep -o 'cc\|gcc\|icc\|clang' | awk \
+ '{ \
+ if ($$1 == "cc") { \
+ print "gcc" } \
+ else { \
+ print $$1 } \
+ }' \
+ )
.PHONY: config
ifeq ($(RTE_CONFIG_TEMPLATE),)
diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk
index bce57c5d..bd2e5763 100644
--- a/mk/rte.sdkdoc.mk
+++ b/mk/rte.sdkdoc.mk
@@ -1,34 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
-# Copyright(c) 2013-2015 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation.
+# Copyright(c) 2013-2015 6WIND S.A.
ifdef T
ifeq ("$(origin T)", "command line")
diff --git a/mk/rte.sdkexamples.mk b/mk/rte.sdkexamples.mk
index 111ce91d..5eeec364 100644
--- a/mk/rte.sdkexamples.mk
+++ b/mk/rte.sdkexamples.mk
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014 6WIND S.A.
# examples application are seen as external applications which are
# not part of SDK.
diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk
index 4e97feff..8296e6db 100644
--- a/mk/rte.sdkinstall.mk
+++ b/mk/rte.sdkinstall.mk
@@ -1,34 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright 2015 6WIND S.A.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation.
+# Copyright 2015 6WIND S.A.
# Configuration, compilation and installation can be done at once
# with make install T=<config>
@@ -113,18 +85,22 @@ else
@echo Installation in $(DESTDIR)$(prefix)/ complete
endif
+# when installing we want recursive copies preserving timestamps only, no
+# preservation of user/group ids or permissions
+CP_FLAGS=-dR --preserve=timestamps
+TAR_X_FLAGS=--strip-components=1 --keep-newer-files --no-same-owner --no-same-permissions
+
install-runtime:
$(Q)$(call rte_mkdir, $(DESTDIR)$(libdir))
- $(Q)cp -a $O/lib/* $(DESTDIR)$(libdir)
+ $(Q)cp $(CP_FLAGS) $O/lib/* $(DESTDIR)$(libdir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(bindir))
$(Q)tar -cf - -C $O --exclude 'app/*.map' \
--exclude app/dpdk-pmdinfogen \
--exclude 'app/cmdline*' --exclude app/test \
--exclude app/testacl --exclude app/testpipeline app | \
- tar -xf - -C $(DESTDIR)$(bindir) --strip-components=1 \
- --keep-newer-files
+ tar -xf - -C $(DESTDIR)$(bindir) $(TAR_X_FLAGS)
$(Q)$(call rte_mkdir, $(DESTDIR)$(datadir))
- $(Q)cp -a $(RTE_SDK)/usertools $(DESTDIR)$(datadir)
+ $(Q)cp $(CP_FLAGS) $(RTE_SDK)/usertools $(DESTDIR)$(datadir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(sbindir))
$(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/usertools/dpdk-devbind.py, \
$(DESTDIR)$(sbindir)/dpdk-devbind)
@@ -132,30 +108,29 @@ install-runtime:
$(DESTDIR)$(bindir)/dpdk-pmdinfo)
ifneq ($(wildcard $O/doc/man/*/*.1),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(mandir)/man1)
- $(Q)cp -a $O/doc/man/*/*.1 $(DESTDIR)$(mandir)/man1
+ $(Q)cp $(CP_FLAGS) $O/doc/man/*/*.1 $(DESTDIR)$(mandir)/man1
endif
ifneq ($(wildcard $O/doc/man/*/*.8),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(mandir)/man8)
- $(Q)cp -a $O/doc/man/*/*.8 $(DESTDIR)$(mandir)/man8
+ $(Q)cp $(CP_FLAGS) $O/doc/man/*/*.8 $(DESTDIR)$(mandir)/man8
endif
install-kmod:
ifneq ($(wildcard $O/kmod/*),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(kerneldir))
- $(Q)cp -a $O/kmod/* $(DESTDIR)$(kerneldir)
+ $(Q)cp $(CP_FLAGS) $O/kmod/* $(DESTDIR)$(kerneldir)
endif
install-sdk:
$(Q)$(call rte_mkdir, $(DESTDIR)$(includedir))
$(Q)tar -chf - -C $O include | \
- tar -xf - -C $(DESTDIR)$(includedir) --strip-components=1 \
- --keep-newer-files
+ tar -xf - -C $(DESTDIR)$(includedir) $(TAR_X_FLAGS)
$(Q)$(call rte_mkdir, $(DESTDIR)$(sdkdir))
- $(Q)cp -a $(RTE_SDK)/mk $(DESTDIR)$(sdkdir)
- $(Q)cp -a $(RTE_SDK)/buildtools $(DESTDIR)$(sdkdir)
+ $(Q)cp $(CP_FLAGS) $(RTE_SDK)/mk $(DESTDIR)$(sdkdir)
+ $(Q)cp $(CP_FLAGS) $(RTE_SDK)/buildtools $(DESTDIR)$(sdkdir)
$(Q)$(call rte_mkdir, $(DESTDIR)$(targetdir)/app)
- $(Q)cp -a $O/.config $(DESTDIR)$(targetdir)
- $(Q)cp -a $O/app/dpdk-pmdinfogen $(DESTDIR)$(targetdir)/app
+ $(Q)cp $(CP_FLAGS) $O/.config $(DESTDIR)$(targetdir)
+ $(Q)cp $(CP_FLAGS) $O/app/dpdk-pmdinfogen $(DESTDIR)$(targetdir)/app
$(Q)$(call rte_symlink, $(DESTDIR)$(includedir), $(DESTDIR)$(targetdir)/include)
$(Q)$(call rte_symlink, $(DESTDIR)$(libdir), $(DESTDIR)$(targetdir)/lib)
@@ -163,12 +138,11 @@ install-doc:
ifneq ($(wildcard $O/doc/html),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(docdir))
$(Q)tar -cf - -C $O/doc --exclude 'html/guides/.*' html | \
- tar -xf - -C $(DESTDIR)$(docdir) --strip-components=1 \
- --keep-newer-files
+ tar -xf - -C $(DESTDIR)$(docdir) $(TAR_X_FLAGS)
endif
ifneq ($(wildcard $O/doc/*/*/*pdf),)
$(Q)$(call rte_mkdir, $(DESTDIR)$(docdir)/guides)
- $(Q)cp -a $O/doc/*/*/*pdf $(DESTDIR)$(docdir)/guides
+ $(Q)cp $(CP_FLAGS) $O/doc/*/*/*pdf $(DESTDIR)$(docdir)/guides
endif
$(Q)$(call rte_mkdir, $(DESTDIR)$(datadir))
- $(Q)cp -a $(RTE_SDK)/examples $(DESTDIR)$(datadir)
+ $(Q)cp $(CP_FLAGS) $(RTE_SDK)/examples $(DESTDIR)$(datadir)
diff --git a/mk/rte.sdkroot.mk b/mk/rte.sdkroot.mk
index f43cc782..18c88017 100644
--- a/mk/rte.sdkroot.mk
+++ b/mk/rte.sdkroot.mk
@@ -68,8 +68,8 @@ config defconfig showconfigs showversion showversionum:
cscope gtags tags etags:
$(Q)$(RTE_SDK)/devtools/build-tags.sh $@ $T
-.PHONY: test test-basic test-fast test-ring test-mempool test-perf coverage
-test test-basic test-fast test-ring test-mempool test-perf coverage:
+.PHONY: test test-fast test-perf coverage test-drivers test-dump
+test test-fast test-perf coverage test-drivers test-dump:
$(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk $@
test: test-build
diff --git a/mk/rte.sdktest.mk b/mk/rte.sdktest.mk
index ee1fe0c7..29559280 100644
--- a/mk/rte.sdktest.mk
+++ b/mk/rte.sdktest.mk
@@ -18,14 +18,34 @@ DIR := $(shell basename $(RTE_OUTPUT))
#
# test: launch auto-tests, very simple for now.
#
-.PHONY: test test-basic test-fast test-perf coverage
+.PHONY: test test-fast test-perf test-drivers test-dump coverage
-PERFLIST=ring_perf,mempool_perf,memcpy_perf,hash_perf,timer_perf
-coverage: BLACKLIST=-$(PERFLIST)
-test-fast: BLACKLIST=-$(PERFLIST)
-test-perf: WHITELIST=$(PERFLIST)
+PERFLIST=ring_perf,mempool_perf,memcpy_perf,hash_perf,timer_perf,\
+ reciprocal_division,reciprocal_division_perf,lpm_perf,red_all,\
+ barrier,hash_multiwriter,timer_racecond,efd,hash_functions,\
+ eventdev_selftest_sw,member_perf,efd_perf,lpm6_perf,red_perf,\
+ distributor_perf,ring_pmd_perf,pmd_perf,ring_perf
+DRIVERSLIST=link_bonding,link_bonding_mode4,link_bonding_rssconf,\
+ cryptodev_sw_mrvl,cryptodev_dpaa2_sec,cryptodev_dpaa_sec,\
+ cryptodev_qat,cryptodev_aesni_mb,cryptodev_openssl,\
+ cryptodev_scheduler,cryptodev_aesni_gcm,cryptodev_null,\
+ cryptodev_sw_snow3g,cryptodev_sw_kasumi,cryptodev_sw_zuc
+DUMPLIST=dump_struct_sizes,dump_mempool,dump_malloc_stats,dump_devargs,\
+ dump_log_types,dump_ring,dump_physmem,dump_memzone
-test test-basic test-fast test-perf:
+SPACESTR:=
+SPACESTR+=
+STRIPPED_PERFLIST=$(subst $(SPACESTR),,$(PERFLIST))
+STRIPPED_DRIVERSLIST=$(subst $(SPACESTR),,$(DRIVERSLIST))
+STRIPPED_DUMPLIST=$(subst $(SPACESTR),,$(DUMPLIST))
+
+coverage: BLACKLIST=-$(STRIPPED_PERFLIST)
+test-fast: BLACKLIST=-$(STRIPPED_PERFLIST),$(STRIPPED_DRIVERSLIST),$(STRIPPED_DUMPLIST)
+test-perf: WHITELIST=$(STRIPPED_PERFLIST)
+test-drivers: WHITELIST=$(STRIPPED_DRIVERSLIST)
+test-dump: WHITELIST=$(STRIPPED_DUMPLIST)
+
+test test-fast test-perf test-drivers test-dump:
@mkdir -p $(AUTOTEST_DIR) ; \
cd $(AUTOTEST_DIR) ; \
if [ -f $(RTE_OUTPUT)/app/test ]; then \
diff --git a/mk/rte.shared.mk b/mk/rte.shared.mk
index 4e680bc0..2b501ddb 100644
--- a/mk/rte.shared.mk
+++ b/mk/rte.shared.mk
@@ -1,32 +1,5 @@
-# BSD LICENSE
-#
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2012-2013 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
include $(RTE_SDK)/mk/internal/rte.install-pre.mk
diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk
index 9f17131f..1e4434fa 100644
--- a/mk/toolchain/gcc/rte.toolchain-compat.mk
+++ b/mk/toolchain/gcc/rte.toolchain-compat.mk
@@ -12,8 +12,14 @@
GCC_MAJOR = $(shell echo __GNUC__ | $(CC) -E -x c - | tail -n 1)
GCC_MINOR = $(shell echo __GNUC_MINOR__ | $(CC) -E -x c - | tail -n 1)
+GCC_PATCHLEVEL = $(shell echo __GNUC_PATCHLEVEL__ | $(CC) -E -x c - | tail -n 1)
GCC_VERSION = $(GCC_MAJOR)$(GCC_MINOR)
+HOST_GCC_MAJOR = $(shell echo __GNUC__ | $(HOSTCC) -E -x c - | tail -n 1)
+HOST_GCC_MINOR = $(shell echo __GNUC_MINOR__ | $(HOSTCC) -E -x c - | tail -n 1)
+HOST_GCC_PATCHLEVEL = $(shell echo __GNUC_PATCHLEVEL__ | $(HOSTCC) -E -x c - | tail -n 1)
+HOST_GCC_VERSION = $(HOST_GCC_MAJOR)$(HOST_GCC_MINOR)
+
# if GCC is older than 4.x
ifeq ($(shell test $(GCC_VERSION) -lt 40 && echo 1), 1)
MACHINE_CFLAGS =
@@ -73,4 +79,11 @@ else
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=d
endif
+ # Disable octeontx event PMD for gcc < 4.8.6
+ ifeq ($(shell test $(GCC_VERSION)$(GCC_PATCHLEVEL) -lt 486 && echo 1), 1)
+ CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=d
+ CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=d
+ CONFIG_RTE_LIBRTE_OCTEONTX_PMD=d
+ endif
+
endif
diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk
index 7e4531ba..d8b99faf 100644
--- a/mk/toolchain/gcc/rte.vars.mk
+++ b/mk/toolchain/gcc/rte.vars.mk
@@ -71,6 +71,15 @@ ifeq ($(shell test $(GCC_VERSION) -lt 47 && echo 1), 1)
WERROR_FLAGS += -Wno-uninitialized
endif
+HOST_WERROR_FLAGS := $(WERROR_FLAGS)
+
+ifeq ($(shell test $(HOST_GCC_VERSION) -gt 70 && echo 1), 1)
+# Tell GCC only to error for switch fallthroughs without a suitable comment
+HOST_WERROR_FLAGS += -Wimplicit-fallthrough=2
+# Ignore errors for snprintf truncation
+HOST_WERROR_FLAGS += -Wno-format-truncation
+endif
+
ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
# Tell GCC only to error for switch fallthroughs without a suitable comment
WERROR_FLAGS += -Wimplicit-fallthrough=2
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index 4d3b5745..4fd927b3 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -1,36 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2014 6WIND S.A.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# - Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# - Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# - Neither the name of 6WIND S.A. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 18.02
+Version: 18.08
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
diff --git a/test/bpf/dummy.c b/test/bpf/dummy.c
new file mode 100644
index 00000000..5851469e
--- /dev/null
+++ b/test/bpf/dummy.c
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * does nothing always return success.
+ * used to measure BPF infrastructure overhead.
+ * To compile:
+ * clang -O2 -target bpf -c dummy.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+
+uint64_t
+entry(void *arg)
+{
+ return 1;
+}
diff --git a/test/bpf/mbuf.h b/test/bpf/mbuf.h
new file mode 100644
index 00000000..f24f908d
--- /dev/null
+++ b/test/bpf/mbuf.h
@@ -0,0 +1,578 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation.
+ * Copyright 2014 6WIND S.A.
+ */
+
+/*
+ * Snipper from dpdk.org rte_mbuf.h.
+ * used to provide BPF programs information about rte_mbuf layout.
+ */
+
+#ifndef _MBUF_H_
+#define _MBUF_H_
+
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Packet Offload Features Flags. It also carry packet type information.
+ * Critical resources. Both rx/tx shared these bits. Be cautious on any change
+ *
+ * - RX flags start at bit position zero, and get added to the left of previous
+ * flags.
+ * - The most-significant 3 bits are reserved for generic mbuf flags
+ * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
+ * added to the right of the previously defined flags i.e. they should count
+ * downwards, not upwards.
+ *
+ * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
+ * rte_get_tx_ol_flag_name().
+ */
+
+/**
+ * RX packet is a 802.1q VLAN packet. This flag was set by PMDs when
+ * the packet is recognized as a VLAN, but the behavior between PMDs
+ * was not the same. This flag is kept for some time to avoid breaking
+ * applications and should be replaced by PKT_RX_VLAN_STRIPPED.
+ */
+#define PKT_RX_VLAN_PKT (1ULL << 0)
+
+#define PKT_RX_RSS_HASH (1ULL << 1)
+/**< RX packet with RSS hash result. */
+#define PKT_RX_FDIR (1ULL << 2)
+/**< RX packet with FDIR match indicate. */
+
+/**
+ * Deprecated.
+ * Checking this flag alone is deprecated: check the 2 bits of
+ * PKT_RX_L4_CKSUM_MASK.
+ * This flag was set when the L4 checksum of a packet was detected as
+ * wrong by the hardware.
+ */
+#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
+
+/**
+ * Deprecated.
+ * Checking this flag alone is deprecated: check the 2 bits of
+ * PKT_RX_IP_CKSUM_MASK.
+ * This flag was set when the IP checksum of a packet was detected as
+ * wrong by the hardware.
+ */
+#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
+
+#define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
+/**< External IP header checksum error. */
+
+/**
+ * A vlan has been stripped by the hardware and its tci is saved in
+ * mbuf->vlan_tci. This can only happen if vlan stripping is enabled
+ * in the RX configuration of the PMD.
+ */
+#define PKT_RX_VLAN_STRIPPED (1ULL << 6)
+
+/**
+ * Mask of bits used to determine the status of RX IP checksum.
+ * - PKT_RX_IP_CKSUM_UNKNOWN: no information about the RX IP checksum
+ * - PKT_RX_IP_CKSUM_BAD: the IP checksum in the packet is wrong
+ * - PKT_RX_IP_CKSUM_GOOD: the IP checksum in the packet is valid
+ * - PKT_RX_IP_CKSUM_NONE: the IP checksum is not correct in the packet
+ * data, but the integrity of the IP header is verified.
+ */
+#define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
+
+#define PKT_RX_IP_CKSUM_UNKNOWN 0
+#define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
+#define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
+#define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
+
+/**
+ * Mask of bits used to determine the status of RX L4 checksum.
+ * - PKT_RX_L4_CKSUM_UNKNOWN: no information about the RX L4 checksum
+ * - PKT_RX_L4_CKSUM_BAD: the L4 checksum in the packet is wrong
+ * - PKT_RX_L4_CKSUM_GOOD: the L4 checksum in the packet is valid
+ * - PKT_RX_L4_CKSUM_NONE: the L4 checksum is not correct in the packet
+ * data, but the integrity of the L4 data is verified.
+ */
+#define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
+
+#define PKT_RX_L4_CKSUM_UNKNOWN 0
+#define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
+#define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
+#define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
+
+#define PKT_RX_IEEE1588_PTP (1ULL << 9)
+/**< RX IEEE1588 L2 Ethernet PT Packet. */
+#define PKT_RX_IEEE1588_TMST (1ULL << 10)
+/**< RX IEEE1588 L2/L4 timestamped packet.*/
+#define PKT_RX_FDIR_ID (1ULL << 13)
+/**< FD id reported if FDIR match. */
+#define PKT_RX_FDIR_FLX (1ULL << 14)
+/**< Flexible bytes reported if FDIR match. */
+
+/**
+ * The 2 vlans have been stripped by the hardware and their tci are
+ * saved in mbuf->vlan_tci (inner) and mbuf->vlan_tci_outer (outer).
+ * This can only happen if vlan stripping is enabled in the RX
+ * configuration of the PMD. If this flag is set, PKT_RX_VLAN_STRIPPED
+ * must also be set.
+ */
+#define PKT_RX_QINQ_STRIPPED (1ULL << 15)
+
+/**
+ * Deprecated.
+ * RX packet with double VLAN stripped.
+ * This flag is replaced by PKT_RX_QINQ_STRIPPED.
+ */
+#define PKT_RX_QINQ_PKT PKT_RX_QINQ_STRIPPED
+
+/**
+ * When packets are coalesced by a hardware or virtual driver, this flag
+ * can be set in the RX mbuf, meaning that the m->tso_segsz field is
+ * valid and is set to the segment size of original packets.
+ */
+#define PKT_RX_LRO (1ULL << 16)
+
+/**
+ * Indicate that the timestamp field in the mbuf is valid.
+ */
+#define PKT_RX_TIMESTAMP (1ULL << 17)
+
+/* add new RX flags here */
+
+/* add new TX flags here */
+
+/**
+ * Offload the MACsec. This flag must be set by the application to enable
+ * this offload feature for a packet to be transmitted.
+ */
+#define PKT_TX_MACSEC (1ULL << 44)
+
+/**
+ * Bits 45:48 used for the tunnel type.
+ * When doing Tx offload like TSO or checksum, the HW needs to configure the
+ * tunnel type into the HW descriptors.
+ */
+#define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
+#define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
+#define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
+#define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
+/**< TX packet with MPLS-in-UDP RFC 7510 header. */
+#define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
+/* add new TX TUNNEL type here */
+#define PKT_TX_TUNNEL_MASK (0xFULL << 45)
+
+/**
+ * Second VLAN insertion (QinQ) flag.
+ */
+#define PKT_TX_QINQ_PKT (1ULL << 49)
+/**< TX packet with double VLAN inserted. */
+
+/**
+ * TCP segmentation offload. To enable this offload feature for a
+ * packet to be transmitted on hardware supporting TSO:
+ * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies
+ * PKT_TX_TCP_CKSUM)
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum
+ * to 0 in the packet
+ * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz
+ * - calculate the pseudo header checksum without taking ip_len in account,
+ * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and
+ * rte_ipv6_phdr_cksum() that can be used as helpers.
+ */
+#define PKT_TX_TCP_SEG (1ULL << 50)
+
+#define PKT_TX_IEEE1588_TMST (1ULL << 51)
+/**< TX IEEE1588 packet to timestamp. */
+
+/**
+ * Bits 52+53 used for L4 packet type with checksum enabled: 00: Reserved,
+ * 01: TCP checksum, 10: SCTP checksum, 11: UDP checksum. To use hardware
+ * L4 checksum offload, the user needs to:
+ * - fill l2_len and l3_len in mbuf
+ * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM
+ * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6
+ * - calculate the pseudo header checksum and set it in the L4 header (only
+ * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum().
+ * For SCTP, set the crc field to 0.
+ */
+#define PKT_TX_L4_NO_CKSUM (0ULL << 52)
+/**< Disable L4 cksum of TX pkt. */
+#define PKT_TX_TCP_CKSUM (1ULL << 52)
+/**< TCP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_SCTP_CKSUM (2ULL << 52)
+/**< SCTP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_UDP_CKSUM (3ULL << 52)
+/**< UDP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_L4_MASK (3ULL << 52)
+/**< Mask for L4 cksum offload request. */
+
+/**
+ * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should
+ * also be set by the application, although a PMD will only check
+ * PKT_TX_IP_CKSUM.
+ * - set the IP checksum field in the packet to 0
+ * - fill the mbuf offload information: l2_len, l3_len
+ */
+#define PKT_TX_IP_CKSUM (1ULL << 54)
+
+/**
+ * Packet is IPv4. This flag must be set when using any offload feature
+ * (TSO, L3 or L4 checksum) to tell the NIC that the packet is an IPv4
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
+#define PKT_TX_IPV4 (1ULL << 55)
+
+/**
+ * Packet is IPv6. This flag must be set when using an offload feature
+ * (TSO or L4 checksum) to tell the NIC that the packet is an IPv6
+ * packet. If the packet is a tunneled packet, this flag is related to
+ * the inner headers.
+ */
+#define PKT_TX_IPV6 (1ULL << 56)
+
+#define PKT_TX_VLAN_PKT (1ULL << 57)
+/**< TX packet is a 802.1q VLAN packet. */
+
+/**
+ * Offload the IP checksum of an external header in the hardware. The
+ * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh
+ * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the
+ * packet must be set to 0.
+ * - set the outer IP checksum field in the packet to 0
+ * - fill the mbuf offload information: outer_l2_len, outer_l3_len
+ */
+#define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
+
+/**
+ * Packet outer header is IPv4. This flag must be set when using any
+ * outer offload feature (L3 or L4 checksum) to tell the NIC that the
+ * outer header of the tunneled packet is an IPv4 packet.
+ */
+#define PKT_TX_OUTER_IPV4 (1ULL << 59)
+
+/**
+ * Packet outer header is IPv6. This flag must be set when using any
+ * outer offload feature (L4 checksum) to tell the NIC that the outer
+ * header of the tunneled packet is an IPv6 packet.
+ */
+#define PKT_TX_OUTER_IPV6 (1ULL << 60)
+
+/**
+ * Bitmask of all supported packet Tx offload features flags,
+ * which can be set for packet.
+ */
+#define PKT_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_IEEE1588_TMST | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ PKT_TX_MACSEC)
+
+#define __RESERVED (1ULL << 61) /**< reserved for future mbuf use */
+
+#define IND_ATTACHED_MBUF (1ULL << 62) /**< Indirect attached mbuf */
+
+/* Use final bit of flags to indicate a control mbuf */
+#define CTRL_MBUF_FLAG (1ULL << 63) /**< Mbuf contains control data */
+
+/** Alignment constraint of mbuf private area. */
+#define RTE_MBUF_PRIV_ALIGN 8
+
+/**
+ * Get the name of a RX offload flag
+ *
+ * @param mask
+ * The mask describing the flag.
+ * @return
+ * The name of this flag, or NULL if it's not a valid RX flag.
+ */
+const char *rte_get_rx_ol_flag_name(uint64_t mask);
+
+/**
+ * Dump the list of RX offload flags in a buffer
+ *
+ * @param mask
+ * The mask describing the RX flags.
+ * @param buf
+ * The output buffer.
+ * @param buflen
+ * The length of the buffer.
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
+
+/**
+ * Get the name of a TX offload flag
+ *
+ * @param mask
+ * The mask describing the flag. Usually only one bit must be set.
+ * Several bits can be given if they belong to the same mask.
+ * Ex: PKT_TX_L4_MASK.
+ * @return
+ * The name of this flag, or NULL if it's not a valid TX flag.
+ */
+const char *rte_get_tx_ol_flag_name(uint64_t mask);
+
+/**
+ * Dump the list of TX offload flags in a buffer
+ *
+ * @param mask
+ * The mask describing the TX flags.
+ * @param buf
+ * The output buffer.
+ * @param buflen
+ * The length of the buffer.
+ * @return
+ * 0 on success, (-1) on error.
+ */
+int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
+
+/**
+ * Some NICs need at least 2KB buffer to RX standard Ethernet frame without
+ * splitting it into multiple segments.
+ * So, for mbufs that planned to be involved into RX/TX, the recommended
+ * minimal buffer length is 2KB + RTE_PKTMBUF_HEADROOM.
+ */
+#define RTE_MBUF_DEFAULT_DATAROOM 2048
+#define RTE_MBUF_DEFAULT_BUF_SIZE \
+ (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
+
+/* define a set of marker types that can be used to refer to set points in the
+ * mbuf.
+ */
+__extension__
+typedef void *MARKER[0]; /**< generic marker for a point in a structure */
+__extension__
+typedef uint8_t MARKER8[0]; /**< generic marker with 1B alignment */
+__extension__
+typedef uint64_t MARKER64[0];
+/**< marker that allows us to overwrite 8 bytes with a single assignment */
+
+typedef struct {
+ volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * The generic rte_mbuf, containing a packet mbuf.
+ */
+struct rte_mbuf {
+ MARKER cacheline0;
+
+ void *buf_addr; /**< Virtual address of segment buffer. */
+ /**
+ * Physical address of segment buffer.
+ * Force alignment to 8-bytes, so as to ensure we have the exact
+ * same mbuf cacheline0 layout for 32-bit and 64-bit. This makes
+ * working on vector drivers easier.
+ */
+ phys_addr_t buf_physaddr __rte_aligned(sizeof(phys_addr_t));
+
+ /* next 8 bytes are initialised on RX descriptor rearm */
+ MARKER64 rearm_data;
+ uint16_t data_off;
+
+ /**
+ * Reference counter. Its size should at least equal to the size
+ * of port field (16 bits), to support zero-copy broadcast.
+ * It should only be accessed using the following functions:
+ * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
+ * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
+ * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC
+ * config option.
+ */
+ RTE_STD_C11
+ union {
+ rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */
+ uint16_t refcnt;
+ /**< Non-atomically accessed refcnt */
+ };
+ uint16_t nb_segs; /**< Number of segments. */
+
+ /** Input port (16 bits to support more than 256 virtual ports). */
+ uint16_t port;
+
+ uint64_t ol_flags; /**< Offload features. */
+
+ /* remaining bytes are set on RX when pulling packet from descriptor */
+ MARKER rx_descriptor_fields1;
+
+ /*
+ * The packet type, which is the combination of outer/inner L2, L3, L4
+ * and tunnel types. The packet_type is about data really present in the
+ * mbuf. Example: if vlan stripping is enabled, a received vlan packet
+ * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
+ * vlan is stripped from the data.
+ */
+ RTE_STD_C11
+ union {
+ uint32_t packet_type; /**< L2/L3/L4 and tunnel information. */
+ struct {
+ uint32_t l2_type:4; /**< (Outer) L2 type. */
+ uint32_t l3_type:4; /**< (Outer) L3 type. */
+ uint32_t l4_type:4; /**< (Outer) L4 type. */
+ uint32_t tun_type:4; /**< Tunnel type. */
+ uint32_t inner_l2_type:4; /**< Inner L2 type. */
+ uint32_t inner_l3_type:4; /**< Inner L3 type. */
+ uint32_t inner_l4_type:4; /**< Inner L4 type. */
+ };
+ };
+
+ uint32_t pkt_len; /**< Total pkt len: sum of all segments. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ /** VLAN TCI (CPU order), valid if PKT_RX_VLAN_STRIPPED is set. */
+ uint16_t vlan_tci;
+
+ union {
+ uint32_t rss; /**< RSS hash result if RSS enabled */
+ struct {
+ RTE_STD_C11
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ /**< Second 4 flexible bytes */
+ };
+ uint32_t hi;
+ /**< First 4 flexible bytes or FD ID, dependent on
+ * PKT_RX_FDIR_* flag in ol_flags.
+ */
+ } fdir; /**< Filter identifier if FDIR enabled */
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched; /**< Hierarchical scheduler */
+ uint32_t usr;
+ /**< User defined tags. See rte_distributor_process() */
+ } hash; /**< hash information */
+
+ /** Outer VLAN TCI (CPU order), valid if PKT_RX_QINQ_STRIPPED is set. */
+ uint16_t vlan_tci_outer;
+
+ uint16_t buf_len; /**< Length of segment buffer. */
+
+ /** Valid if PKT_RX_TIMESTAMP is set. The unit and time reference
+ * are not normalized but are always the same for a given port.
+ */
+ uint64_t timestamp;
+
+ /* second cache line - fields only used in slow path or on TX */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ RTE_STD_C11
+ union {
+ void *userdata; /**< Can be used for external metadata */
+ uint64_t udata64; /**< Allow 8-byte userdata on 32-bit */
+ };
+
+ struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+ struct rte_mbuf *next; /**< Next segment of scattered packet. */
+
+ /* fields to support TX offloads */
+ RTE_STD_C11
+ union {
+ uint64_t tx_offload; /**< combined for easy fetch */
+ __extension__
+ struct {
+ uint64_t l2_len:7;
+ /**< L2 (MAC) Header Length for non-tunneling pkt.
+ * Outer_L4_len + ... + Inner_L2_len for tunneling pkt.
+ */
+ uint64_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
+ uint64_t tso_segsz:16; /**< TCP TSO segment size */
+
+ /* fields for TX offloading of tunnels */
+ uint64_t outer_l3_len:9;
+ /**< Outer L3 (IP) Hdr Length. */
+ uint64_t outer_l2_len:7;
+ /**< Outer L2 (MAC) Hdr Length. */
+
+ /* uint64_t unused:8; */
+ };
+ };
+
+ /** Size of the application private data. In case of an indirect
+ * mbuf, it stores the direct mbuf private data size.
+ */
+ uint16_t priv_size;
+
+ /** Timesync flags for use with IEEE1588. */
+ uint16_t timesync;
+
+ /** Sequence number. See also rte_reorder_insert(). */
+ uint32_t seqn;
+
+} __rte_cache_aligned;
+
+
+/**
+ * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ */
+#define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
+
+/**
+ * Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ */
+#define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
+
+/**
+ * Private data in case of pktmbuf pool.
+ *
+ * A structure that contains some pktmbuf_pool-specific data that are
+ * appended after the mempool structure (in private data).
+ */
+struct rte_pktmbuf_pool_private {
+ uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf. */
+ uint16_t mbuf_priv_size; /**< Size of private area in each mbuf. */
+};
+
+/**
+ * A macro that points to an offset into the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param o
+ * The offset into the mbuf data.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod_offset(m, t, o) \
+ ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
+
+/**
+ * A macro that points to the start of the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that the first segment is large
+ * enough to accommodate its data.
+ *
+ * @param m
+ * The packet mbuf.
+ * @param t
+ * The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MBUF_H_ */
diff --git a/test/bpf/t1.c b/test/bpf/t1.c
new file mode 100644
index 00000000..60f9434a
--- /dev/null
+++ b/test/bpf/t1.c
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to first segment packet data as an input parameter.
+ * analog of tcpdump -s 1 -d 'dst 1.2.3.4 && udp && dst port 5000'
+ * (000) ldh [12]
+ * (001) jeq #0x800 jt 2 jf 12
+ * (002) ld [30]
+ * (003) jeq #0x1020304 jt 4 jf 12
+ * (004) ldb [23]
+ * (005) jeq #0x11 jt 6 jf 12
+ * (006) ldh [20]
+ * (007) jset #0x1fff jt 12 jf 8
+ * (008) ldxb 4*([14]&0xf)
+ * (009) ldh [x + 16]
+ * (010) jeq #0x1388 jt 11 jf 12
+ * (011) ret #1
+ * (012) ret #0
+ *
+ * To compile:
+ * clang -O2 -target bpf -c t1.c
+ */
+
+#include <stdint.h>
+#include <net/ethernet.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+
+uint64_t
+entry(void *pkt)
+{
+ struct ether_header *ether_header = (void *)pkt;
+
+ if (ether_header->ether_type != __builtin_bswap16(0x0800))
+ return 0;
+
+ struct iphdr *iphdr = (void *)(ether_header + 1);
+ if (iphdr->protocol != 17 || (iphdr->frag_off & 0x1ffff) != 0 ||
+ iphdr->daddr != __builtin_bswap32(0x1020304))
+ return 0;
+
+ int hlen = iphdr->ihl * 4;
+ struct udphdr *udphdr = (void *)iphdr + hlen;
+
+ if (udphdr->dest != __builtin_bswap16(5000))
+ return 0;
+
+ return 1;
+}
diff --git a/test/bpf/t2.c b/test/bpf/t2.c
new file mode 100644
index 00000000..69d7a4fe
--- /dev/null
+++ b/test/bpf/t2.c
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to struct rte_mbuf as an input parameter.
+ * cleanup mbuf's vlan_tci and all related RX flags
+ * (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED).
+ * Doesn't touch contents of packet data.
+ * To compile:
+ * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \
+ * -target bpf -Wno-int-to-void-pointer-cast -c t2.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <rte_config.h>
+#include "mbuf.h"
+
+uint64_t
+entry(void *pkt)
+{
+ struct rte_mbuf *mb;
+
+ mb = pkt;
+ mb->vlan_tci = 0;
+ mb->ol_flags &= ~(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+
+ return 1;
+}
diff --git a/test/bpf/t3.c b/test/bpf/t3.c
new file mode 100644
index 00000000..531b9cb8
--- /dev/null
+++ b/test/bpf/t3.c
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+/*
+ * eBPF program sample.
+ * Accepts pointer to struct rte_mbuf as an input parameter.
+ * Dump the mbuf into stdout if it is an ARP packet (aka tcpdump 'arp').
+ * To compile:
+ * clang -O2 -I${RTE_SDK}/${RTE_TARGET}/include \
+ * -target bpf -Wno-int-to-void-pointer-cast -c t3.c
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <net/ethernet.h>
+#include <rte_config.h>
+#include "mbuf.h"
+
+extern void rte_pktmbuf_dump(FILE *, const struct rte_mbuf *, unsigned int);
+
+uint64_t
+entry(const void *pkt)
+{
+ const struct rte_mbuf *mb;
+ const struct ether_header *eth;
+
+ mb = pkt;
+ eth = rte_pktmbuf_mtod(mb, const struct ether_header *);
+
+ if (eth->ether_type == __builtin_bswap16(ETHERTYPE_ARP))
+ rte_pktmbuf_dump(stdout, mb, 64);
+
+ return 1;
+}
diff --git a/test/test-pipeline/init.c b/test/test-pipeline/init.c
index 19cf04a6..f33216c9 100644
--- a/test/test-pipeline/init.c
+++ b/test/test-pipeline/init.c
@@ -70,11 +70,7 @@ struct app_params app = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .header_split = 0, /* Header Split disabled */
- .hw_ip_checksum = 1, /* IP checksum offload enabled */
- .hw_vlan_filter = 0, /* VLAN filtering disabled */
- .jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /* CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
.rss_conf = {
diff --git a/test/test-pipeline/main.h b/test/test-pipeline/main.h
index f844e941..59dcfddb 100644
--- a/test/test-pipeline/main.h
+++ b/test/test-pipeline/main.h
@@ -107,6 +107,10 @@ uint64_t test_hash(void *key,
uint32_t key_size,
uint64_t seed);
+uint32_t test_hash_cuckoo(const void *key,
+ uint32_t key_size,
+ uint32_t seed);
+
void app_main_loop_worker(void);
void app_main_loop_worker_pipeline_stub(void);
void app_main_loop_worker_pipeline_hash(void);
diff --git a/test/test-pipeline/pipeline_hash.c b/test/test-pipeline/pipeline_hash.c
index 11e2402d..c2014723 100644
--- a/test/test-pipeline/pipeline_hash.c
+++ b/test/test-pipeline/pipeline_hash.c
@@ -15,6 +15,7 @@
#include <rte_port_ring.h>
#include <rte_table_hash.h>
#include <rte_hash.h>
+#include <rte_table_hash_cuckoo.h>
#include <rte_pipeline.h>
#include "main.h"
@@ -151,6 +152,17 @@ app_main_loop_worker_pipeline_hash(void) {
.seed = 0,
};
+ struct rte_table_hash_cuckoo_params table_hash_cuckoo_params = {
+ .name = "TABLE",
+ .key_size = key_size,
+ .key_offset = APP_METADATA_OFFSET(32),
+ .key_mask = NULL,
+ .n_keys = 1 << 24,
+ .n_buckets = 1 << 22,
+ .f_hash = test_hash_cuckoo,
+ .seed = 0,
+ };
+
/* Table configuration */
switch (app.pipeline_type) {
case e_APP_PIPELINE_HASH_KEY8_EXT:
@@ -298,7 +310,7 @@ app_main_loop_worker_pipeline_hash(void) {
{
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_hash_cuckoo_ops,
- .arg_create = &table_hash_params,
+ .arg_create = &table_hash_cuckoo_params,
.f_action_hit = NULL,
.f_action_miss = NULL,
.arg_ah = NULL,
@@ -379,6 +391,18 @@ uint64_t test_hash(
return signature;
}
+uint32_t test_hash_cuckoo(
+ const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed)
+{
+ const uint32_t *k32 = key;
+ uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
+ uint32_t signature = (ip_dst >> 2) | ((ip_dst & 0x3) << 30);
+
+ return signature;
+}
+
void
app_main_loop_rx_metadata(void) {
uint32_t i, j;
diff --git a/test/test/Makefile b/test/test/Makefile
index a88cc38b..e6967bab 100644
--- a/test/test/Makefile
+++ b/test/test/Makefile
@@ -70,6 +70,7 @@ SRCS-y += test_memzone.c
SRCS-y += test_bitmap.c
SRCS-y += test_reciprocal_division.c
SRCS-y += test_reciprocal_division_perf.c
+SRCS-y += test_fbarray.c
SRCS-y += test_ring.c
SRCS-y += test_ring_perf.c
@@ -113,6 +114,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_functions.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_scaling.c
SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_multiwriter.c
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) += test_hash_readwrite.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm.c
SRCS-$(CONFIG_RTE_LIBRTE_LPM) += test_lpm_perf.c
@@ -161,7 +163,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += test_distributor_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_REORDER) += test_reorder.c
-SRCS-y += test_devargs.c
SRCS-y += virtual_pmd.c
SRCS-y += packet_burst_generator.c
SRCS-$(CONFIG_RTE_LIBRTE_ACL) += test_acl.c
@@ -180,11 +181,18 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += test_pmd_ring_perf.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_blockcipher.c
SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += test_cryptodev_asym.c
+
+ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y)
+SRCS-$(CONFIG_RTE_LIBRTE_COMPRESSDEV) += test_compressdev.c
+endif
ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
SRCS-y += test_eventdev.c
SRCS-y += test_event_ring.c
SRCS-y += test_event_eth_rx_adapter.c
+SRCS-y += test_event_timer_adapter.c
+SRCS-y += test_event_crypto_adapter.c
endif
ifeq ($(CONFIG_RTE_LIBRTE_RAWDEV),y)
@@ -193,6 +201,8 @@ endif
SRCS-$(CONFIG_RTE_LIBRTE_KVARGS) += test_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_BPF) += test_bpf.c
+
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
@@ -201,6 +211,11 @@ CFLAGS += $(WERROR_FLAGS)
CFLAGS += -D_GNU_SOURCE
LDLIBS += -lm
+ifeq ($(CONFIG_RTE_COMPRESSDEV_TEST),y)
+ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y)
+LDLIBS += -lz
+endif
+endif
# Disable VTA for memcpy test
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
@@ -211,6 +226,8 @@ CFLAGS_test_memcpy_perf.o += -fno-var-tracking-assignments
# designated initializers.
ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
CFLAGS_test_eventdev_sw.o += -Wno-missing-field-initializers
+CFLAGS_test_event_timer_adapter.o += -Wno-missing-field-initializers
+CFLAGS_test_event_crypto_adapter.o += -Wno-missing-field-initializers
endif
endif
endif
diff --git a/test/test/autotest.py b/test/test/autotest.py
index 1cfd8cf2..12997fdf 100644
--- a/test/test/autotest.py
+++ b/test/test/autotest.py
@@ -36,14 +36,15 @@ cmdline = "%s -c f -n 4" % (sys.argv[1])
print(cmdline)
-runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
- test_whitelist)
+# how many workers to run tests with. FreeBSD doesn't support multiple primary
+# processes, so make it 1, otherwise make it 4. ignored for non-parallel tests
+n_processes = 1 if "bsdapp" in target else 4
-for test_group in autotest_data.parallel_test_group_list:
- runner.add_parallel_test_group(test_group)
+runner = autotest_runner.AutotestRunner(cmdline, target, test_blacklist,
+ test_whitelist, n_processes)
-for test_group in autotest_data.non_parallel_test_group_list:
- runner.add_non_parallel_test_group(test_group)
+runner.parallel_tests = autotest_data.parallel_test_list[:]
+runner.non_parallel_tests = autotest_data.non_parallel_test_list[:]
num_fails = runner.run_all_tests()
diff --git a/test/test/autotest_data.py b/test/test/autotest_data.py
index aacfe0a6..f68d9b11 100644
--- a/test/test/autotest_data.py
+++ b/test/test/autotest_data.py
@@ -3,465 +3,662 @@
# Test data for autotests
-from glob import glob
from autotest_test_funcs import *
-
-# quick and dirty function to find out number of sockets
-def num_sockets():
- result = len(glob("/sys/devices/system/node/node*"))
- if result == 0:
- return 1
- return result
-
-
-# Assign given number to each socket
-# e.g. 32 becomes 32,32 or 32,32,32,32
-def per_sockets(num):
- return ",".join([str(num)] * num_sockets())
-
# groups of tests that can be run in parallel
# the grouping has been found largely empirically
-parallel_test_group_list = [
- {
- "Prefix": "group_1",
- "Memory": per_sockets(8),
- "Tests":
- [
- {
- "Name": "Cycles autotest",
- "Command": "cycles_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Timer autotest",
- "Command": "timer_autotest",
- "Func": timer_autotest,
- "Report": None,
- },
- {
- "Name": "Debug autotest",
- "Command": "debug_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Errno autotest",
- "Command": "errno_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Meter autotest",
- "Command": "meter_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Common autotest",
- "Command": "common_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Resource autotest",
- "Command": "resource_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_2",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Memory autotest",
- "Command": "memory_autotest",
- "Func": memory_autotest,
- "Report": None,
- },
- {
- "Name": "Read/write lock autotest",
- "Command": "rwlock_autotest",
- "Func": rwlock_autotest,
- "Report": None,
- },
- {
- "Name": "Logs autotest",
- "Command": "logs_autotest",
- "Func": logs_autotest,
- "Report": None,
- },
- {
- "Name": "CPU flags autotest",
- "Command": "cpuflags_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Version autotest",
- "Command": "version_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "EAL filesystem autotest",
- "Command": "eal_fs_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "EAL flags autotest",
- "Command": "eal_flags_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Hash autotest",
- "Command": "hash_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ],
- },
- {
- "Prefix": "group_3",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "LPM autotest",
- "Command": "lpm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "LPM6 autotest",
- "Command": "lpm6_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Memcpy autotest",
- "Command": "memcpy_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Memzone autotest",
- "Command": "memzone_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "String autotest",
- "Command": "string_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Alarm autotest",
- "Command": "alarm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_4",
- "Memory": per_sockets(128),
- "Tests":
- [
- {
- "Name": "PCI autotest",
- "Command": "pci_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Malloc autotest",
- "Command": "malloc_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Multi-process autotest",
- "Command": "multiprocess_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Mbuf autotest",
- "Command": "mbuf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Per-lcore autotest",
- "Command": "per_lcore_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Ring autotest",
- "Command": "ring_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_5",
- "Memory": "32",
- "Tests":
- [
- {
- "Name": "Spinlock autotest",
- "Command": "spinlock_autotest",
- "Func": spinlock_autotest,
- "Report": None,
- },
- {
- "Name": "Byte order autotest",
- "Command": "byteorder_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "TAILQ autotest",
- "Command": "tailq_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Command-line autotest",
- "Command": "cmdline_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Interrupts autotest",
- "Command": "interrupt_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_6",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Function reentrancy autotest",
- "Command": "func_reentrancy_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Mempool autotest",
- "Command": "mempool_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Atomics autotest",
- "Command": "atomic_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Prefetch autotest",
- "Command": "prefetch_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Red autotest",
- "Command": "red_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "group_7",
- "Memory": "64",
- "Tests":
- [
- {
- "Name": "PMD ring autotest",
- "Command": "ring_pmd_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Access list control autotest",
- "Command": "acl_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- {
- "Name": "Sched autotest",
- "Command": "sched_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+parallel_test_list = [
+ {
+ "Name": "Cycles autotest",
+ "Command": "cycles_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer autotest",
+ "Command": "timer_autotest",
+ "Func": timer_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Debug autotest",
+ "Command": "debug_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Errno autotest",
+ "Command": "errno_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Meter autotest",
+ "Command": "meter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Common autotest",
+ "Command": "common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Resource autotest",
+ "Command": "resource_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memory autotest",
+ "Command": "memory_autotest",
+ "Func": memory_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Read/write lock autotest",
+ "Command": "rwlock_autotest",
+ "Func": rwlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Logs autotest",
+ "Command": "logs_autotest",
+ "Func": logs_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "CPU flags autotest",
+ "Command": "cpuflags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Version autotest",
+ "Command": "version_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL filesystem autotest",
+ "Command": "eal_fs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "EAL flags autotest",
+ "Command": "eal_flags_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash autotest",
+ "Command": "hash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM autotest",
+ "Command": "lpm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "LPM6 autotest",
+ "Command": "lpm6_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy autotest",
+ "Command": "memcpy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memzone autotest",
+ "Command": "memzone_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "String autotest",
+ "Command": "string_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Alarm autotest",
+ "Command": "alarm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Malloc autotest",
+ "Command": "malloc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Multi-process autotest",
+ "Command": "multiprocess_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mbuf autotest",
+ "Command": "mbuf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Per-lcore autotest",
+ "Command": "per_lcore_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring autotest",
+ "Command": "ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Spinlock autotest",
+ "Command": "spinlock_autotest",
+ "Func": spinlock_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Byte order autotest",
+ "Command": "byteorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "TAILQ autotest",
+ "Command": "tailq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Command-line autotest",
+ "Command": "cmdline_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Interrupts autotest",
+ "Command": "interrupt_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Function reentrancy autotest",
+ "Command": "func_reentrancy_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool autotest",
+ "Command": "mempool_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Atomics autotest",
+ "Command": "atomic_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Prefetch autotest",
+ "Command": "prefetch_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red autotest",
+ "Command": "red_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "PMD ring autotest",
+ "Command": "ring_pmd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Access list control autotest",
+ "Command": "acl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Sched autotest",
+ "Command": "sched_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Eventdev selftest octeontx",
+ "Command": "eventdev_selftest_octeontx",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Event ring autotest",
+ "Command": "event_ring_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Table autotest",
+ "Command": "table_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Flow classify autotest",
+ "Command": "flow_classify_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Event eth rx adapter autotest",
+ "Command": "event_eth_rx_adapter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "User delay",
+ "Command": "user_delay_us",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Rawdev autotest",
+ "Command": "rawdev_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Kvargs autotest",
+ "Command": "kvargs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Devargs autotest",
+ "Command": "devargs_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding autotest",
+ "Command": "link_bonding_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding mode4 autotest",
+ "Command": "link_bonding_mode4_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Link bonding rssconf autotest",
+ "Command": "link_bonding_rssconf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Crc autotest",
+ "Command": "crc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Distributor autotest",
+ "Command": "distributor_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reorder autotest",
+ "Command": "reorder_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Barrier autotest",
+ "Command": "barrier_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Bitmap test",
+ "Command": "bitmap_test",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash scaling autotest",
+ "Command": "hash_scaling_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash multiwriter autotest",
+ "Command": "hash_multiwriter_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Service autotest",
+ "Command": "service_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Timer racecond autotest",
+ "Command": "timer_racecond_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Member autotest",
+ "Command": "member_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Efd_autotest",
+ "Command": "efd_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Thash autotest",
+ "Command": "thash_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash function autotest",
+ "Command": "hash_functions_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw mrvl autotest",
+ "Command": "cryptodev_sw_mrvl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev dpaa2 sec autotest",
+ "Command": "cryptodev_dpaa2_sec_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev dpaa sec autotest",
+ "Command": "cryptodev_dpaa_sec_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev qat autotest",
+ "Command": "cryptodev_qat_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev aesni mb autotest",
+ "Command": "cryptodev_aesni_mb_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev openssl autotest",
+ "Command": "cryptodev_openssl_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev scheduler autotest",
+ "Command": "cryptodev_scheduler_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev aesni gcm autotest",
+ "Command": "cryptodev_aesni_gcm_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev null autotest",
+ "Command": "cryptodev_null_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw snow3g autotest",
+ "Command": "cryptodev_sw_snow3g_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev sw kasumi autotest",
+ "Command": "cryptodev_sw_kasumi_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Cryptodev_sw_zuc_autotest",
+ "Command": "cryptodev_sw_zuc_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reciprocal division",
+ "Command": "reciprocal_division",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red all",
+ "Command": "red_all",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Fbarray autotest",
+ "Command": "fbarray_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ #
+ #Please always keep all dump tests at the end and together!
+ #
+ {
+ "Name": "Dump physmem",
+ "Command": "dump_physmem",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump memzone",
+ "Command": "dump_memzone",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump struct sizes",
+ "Command": "dump_struct_sizes",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump mempool",
+ "Command": "dump_mempool",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump malloc stats",
+ "Command": "dump_malloc_stats",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump devargs",
+ "Command": "dump_devargs",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump log types",
+ "Command": "dump_log_types",
+ "Func": dump_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Dump_ring",
+ "Command": "dump_ring",
+ "Func": dump_autotest,
+ "Report": None,
},
]
# tests that should not be run when any other tests are running
-non_parallel_test_group_list = [
-
+non_parallel_test_list = [
+ {
+ "Name": "Eventdev common autotest",
+ "Command": "eventdev_common_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Eventdev selftest sw",
+ "Command": "eventdev_selftest_sw",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "KNI autotest",
+ "Command": "kni_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Mempool performance autotest",
+ "Command": "mempool_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Memcpy performance autotest",
+ "Command": "memcpy_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Hash performance autotest",
+ "Command": "hash_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Power autotest",
+ "Command": "power_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Power ACPI cpufreq autotest",
+ "Command": "power_acpi_cpufreq_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
{
- "Prefix": "eventdev",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "Eventdev common autotest",
- "Command": "eventdev_common_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "eventdev_sw",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "Eventdev sw autotest",
- "Command": "eventdev_sw_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "kni",
- "Memory": "512",
- "Tests":
- [
- {
- "Name": "KNI autotest",
- "Command": "kni_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "mempool_perf",
- "Memory": per_sockets(256),
- "Tests":
- [
- {
- "Name": "Mempool performance autotest",
- "Command": "mempool_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "memcpy_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Memcpy performance autotest",
- "Command": "memcpy_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "hash_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Hash performance autotest",
- "Command": "hash_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power autotest",
- "Command": "power_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power_acpi_cpufreq",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power ACPI cpufreq autotest",
- "Command": "power_acpi_cpufreq_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "power_kvm_vm",
- "Memory": "16",
- "Tests":
- [
- {
- "Name": "Power KVM VM autotest",
- "Command": "power_kvm_vm_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
- },
- {
- "Prefix": "timer_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Timer performance autotest",
- "Command": "timer_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+ "Name": "Power KVM VM autotest",
+ "Command": "power_kvm_vm_autotest",
+ "Func": default_autotest,
+ "Report": None,
},
+ {
+ "Name": "Timer performance autotest",
+ "Command": "timer_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Pmd perf autotest",
+ "Command": "pmd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Ring pmd perf autotest",
+ "Command": "ring_pmd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Distributor perf autotest",
+ "Command": "distributor_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Red_perf",
+ "Command": "red_perf",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Lpm6 perf autotest",
+ "Command": "lpm6_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Lpm perf autotest",
+ "Command": "lpm_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Efd perf autotest",
+ "Command": "efd_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Member perf autotest",
+ "Command": "member_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
+ },
+ {
+ "Name": "Reciprocal division perf",
+ "Command": "reciprocal_division_perf",
+ "Func": default_autotest,
+ "Report": None,
+ },
#
# Please always make sure that ring_perf is the last test!
#
{
- "Prefix": "ring_perf",
- "Memory": per_sockets(512),
- "Tests":
- [
- {
- "Name": "Ring performance autotest",
- "Command": "ring_perf_autotest",
- "Func": default_autotest,
- "Report": None,
- },
- ]
+ "Name": "Ring performance autotest",
+ "Command": "ring_perf_autotest",
+ "Func": default_autotest,
+ "Report": None,
},
]
diff --git a/test/test/autotest_runner.py b/test/test/autotest_runner.py
index a692f069..36941a40 100644
--- a/test/test/autotest_runner.py
+++ b/test/test/autotest_runner.py
@@ -3,18 +3,19 @@
# The main logic behind running autotests in parallel
+from __future__ import print_function
import StringIO
import csv
-import multiprocessing
+from multiprocessing import Pool, Queue
import pexpect
import re
import subprocess
import sys
import time
+import glob
+import os
# wait for prompt
-
-
def wait_prompt(child):
try:
child.sendline()
@@ -27,143 +28,150 @@ def wait_prompt(child):
else:
return False
-# run a test group
-# each result tuple in results list consists of:
-# result value (0 or -1)
-# result string
-# test name
-# total test run time (double)
-# raw test log
-# test report (if not available, should be None)
-#
-# this function needs to be outside AutotestRunner class
-# because otherwise Pool won't work (or rather it will require
-# quite a bit of effort to make it work).
+# get all valid NUMA nodes
+def get_numa_nodes():
+ return [
+ int(
+ re.match(r"node(\d+)", os.path.basename(node))
+ .group(1)
+ )
+ for node in glob.glob("/sys/devices/system/node/node*")
+ ]
-def run_test_group(cmdline, test_group):
- results = []
- child = None
+
+# find first (or any, really) CPU on a particular node, will be used to spread
+# processes around NUMA nodes to avoid exhausting memory on particular node
+def first_cpu_on_node(node_nr):
+ cpu_path = glob.glob("/sys/devices/system/node/node%d/cpu*" % node_nr)[0]
+ cpu_name = os.path.basename(cpu_path)
+ m = re.match(r"cpu(\d+)", cpu_name)
+ return int(m.group(1))
+
+
+pool_child = None # per-process child
+
+
+# we initialize each worker with a queue because we need per-pool unique
+# command-line arguments, but we cannot do different arguments in an initializer
+# because the API doesn't allow per-worker initializer arguments. so, instead,
+# we will initialize with a shared queue, and dequeue command-line arguments
+# from this queue
+def pool_init(queue, result_queue):
+ global pool_child
+
+ cmdline, prefix = queue.get()
start_time = time.time()
- startuplog = None
+ name = ("Start %s" % prefix) if prefix != "" else "Start"
+
+ # use default prefix if no prefix was specified
+ prefix_cmdline = "--file-prefix=%s" % prefix if prefix != "" else ""
+
+ # append prefix to cmdline
+ cmdline = "%s %s" % (cmdline, prefix_cmdline)
+
+ # prepare logging of init
+ startuplog = StringIO.StringIO()
# run test app
try:
- # prepare logging of init
- startuplog = StringIO.StringIO()
- print >>startuplog, "\n%s %s\n" % ("=" * 20, test_group["Prefix"])
- print >>startuplog, "\ncmdline=%s" % cmdline
+ print("\n%s %s\n" % ("=" * 20, prefix), file=startuplog)
+ print("\ncmdline=%s" % cmdline, file=startuplog)
- child = pexpect.spawn(cmdline, logfile=startuplog)
+ pool_child = pexpect.spawn(cmdline, logfile=startuplog)
# wait for target to boot
- if not wait_prompt(child):
- child.close()
+ if not wait_prompt(pool_child):
+ pool_child.close()
- results.append((-1,
+ result = tuple((-1,
"Fail [No prompt]",
- "Start %s" % test_group["Prefix"],
+ name,
+ time.time() - start_time,
+ startuplog.getvalue(),
+ None))
+ pool_child = None
+ else:
+ result = tuple((0,
+ "Success",
+ name,
time.time() - start_time,
startuplog.getvalue(),
None))
-
- # mark all tests as failed
- for test in test_group["Tests"]:
- results.append((-1, "Fail [No prompt]", test["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
except:
- results.append((-1,
+ result = tuple((-1,
"Fail [Can't run]",
- "Start %s" % test_group["Prefix"],
+ name,
time.time() - start_time,
startuplog.getvalue(),
None))
+ pool_child = None
- # mark all tests as failed
- for t in test_group["Tests"]:
- results.append((-1, "Fail [Can't run]", t["Name"],
- time.time() - start_time, "", None))
- # exit test
- return results
-
- # startup was successful
- results.append((0, "Success", "Start %s" % test_group["Prefix"],
- time.time() - start_time, startuplog.getvalue(), None))
+ result_queue.put(result)
- # parse the binary for available test commands
- binary = cmdline.split()[0]
- stripped = 'not stripped' not in subprocess.check_output(['file', binary])
- if not stripped:
- symbols = subprocess.check_output(['nm', binary]).decode('utf-8')
- avail_cmds = re.findall('test_register_(\w+)', symbols)
- # run all tests in test group
- for test in test_group["Tests"]:
+# run a test
+# each result tuple in results list consists of:
+# result value (0 or -1)
+# result string
+# test name
+# total test run time (double)
+# raw test log
+# test report (if not available, should be None)
+#
+# this function needs to be outside AutotestRunner class because otherwise Pool
+# won't work (or rather it will require quite a bit of effort to make it work).
+def run_test(target, test):
+ global pool_child
- # create log buffer for each test
- # in multiprocessing environment, the logging would be
- # interleaved and will create a mess, hence the buffering
- logfile = StringIO.StringIO()
- child.logfile = logfile
+ if pool_child is None:
+ return -1, "Fail [No test process]", test["Name"], 0, "", None
- result = ()
+ # create log buffer for each test
+ # in multiprocessing environment, the logging would be
+ # interleaved and will create a mess, hence the buffering
+ logfile = StringIO.StringIO()
+ pool_child.logfile = logfile
- # make a note when the test started
- start_time = time.time()
+ # make a note when the test started
+ start_time = time.time()
- try:
- # print test name to log buffer
- print >>logfile, "\n%s %s\n" % ("-" * 20, test["Name"])
+ try:
+ # print test name to log buffer
+ print("\n%s %s\n" % ("-" * 20, test["Name"]), file=logfile)
- # run test function associated with the test
- if stripped or test["Command"] in avail_cmds:
- result = test["Func"](child, test["Command"])
- else:
- result = (0, "Skipped [Not Available]")
+ # run test function associated with the test
+ result = test["Func"](pool_child, test["Command"])
- # make a note when the test was finished
- end_time = time.time()
+ # make a note when the test was finished
+ end_time = time.time()
- # append test data to the result tuple
- result += (test["Name"], end_time - start_time,
- logfile.getvalue())
+ log = logfile.getvalue()
- # call report function, if any defined, and supply it with
- # target and complete log for test run
- if test["Report"]:
- report = test["Report"](self.target, log)
+ # append test data to the result tuple
+ result += (test["Name"], end_time - start_time, log)
- # append report to results tuple
- result += (report,)
- else:
- # report is None
- result += (None,)
- except:
- # make a note when the test crashed
- end_time = time.time()
-
- # mark test as failed
- result = (-1, "Fail [Crash]", test["Name"],
- end_time - start_time, logfile.getvalue(), None)
- finally:
- # append the results to the results list
- results.append(result)
+ # call report function, if any defined, and supply it with
+ # target and complete log for test run
+ if test["Report"]:
+ report = test["Report"](target, log)
- # regardless of whether test has crashed, try quitting it
- try:
- child.sendline("quit")
- child.close()
- # if the test crashed, just do nothing instead
+ # append report to results tuple
+ result += (report,)
+ else:
+ # report is None
+ result += (None,)
except:
- # nop
- pass
+ # make a note when the test crashed
+ end_time = time.time()
+
+ # mark test as failed
+ result = (-1, "Fail [Crash]", test["Name"],
+ end_time - start_time, logfile.getvalue(), None)
# return test results
- return results
+ return result
# class representing an instance of autotests run
@@ -181,11 +189,17 @@ class AutotestRunner:
blacklist = []
whitelist = []
- def __init__(self, cmdline, target, blacklist, whitelist):
+ def __init__(self, cmdline, target, blacklist, whitelist, n_processes):
self.cmdline = cmdline
self.target = target
+ self.binary = cmdline.split()[0]
self.blacklist = blacklist
self.whitelist = whitelist
+ self.skipped = []
+ self.parallel_tests = []
+ self.non_parallel_tests = []
+ self.n_processes = n_processes
+ self.active_processes = 0
# log file filename
logfile = "%s.log" % target
@@ -199,177 +213,196 @@ class AutotestRunner:
self.csvwriter.writerow(["test_name", "test_result", "result_str"])
# set up cmdline string
- def __get_cmdline(self, test):
- cmdline = self.cmdline
-
- # append memory limitations for each test
- # otherwise tests won't run in parallel
- if "i686" not in self.target:
- cmdline += " --socket-mem=%s" % test["Memory"]
- else:
- # affinitize startup so that tests don't fail on i686
- cmdline = "taskset 1 " + cmdline
- cmdline += " -m " + str(sum(map(int, test["Memory"].split(","))))
-
- # set group prefix for autotest group
- # otherwise they won't run in parallel
- cmdline += " --file-prefix=%s" % test["Prefix"]
+ def __get_cmdline(self, cpu_nr):
+ cmdline = ("taskset -c %i " % cpu_nr) + self.cmdline
return cmdline
- def add_parallel_test_group(self, test_group):
- self.parallel_test_groups.append(test_group)
+ def __process_result(self, result):
- def add_non_parallel_test_group(self, test_group):
- self.non_parallel_test_groups.append(test_group)
+ # unpack result tuple
+ test_result, result_str, test_name, \
+ test_time, log, report = result
- def __process_results(self, results):
- # this iterates over individual test results
- for i, result in enumerate(results):
+ # get total run time
+ cur_time = time.time()
+ total_time = int(cur_time - self.start)
- # increase total number of tests that were run
- # do not include "start" test
- if i > 0:
- self.n_tests += 1
+ # print results, test run time and total time since start
+ result = ("%s:" % test_name).ljust(30)
+ result += result_str.ljust(29)
+ result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
- # unpack result tuple
- test_result, result_str, test_name, \
- test_time, log, report = result
+ # don't print out total time every line, it's the same anyway
+ print(result + "[%02dm %02ds]" % (total_time / 60, total_time % 60))
- # get total run time
- cur_time = time.time()
- total_time = int(cur_time - self.start)
+ # if test failed and it wasn't a "start" test
+ if test_result < 0:
+ self.fails += 1
- # print results, test run time and total time since start
- result = ("%s:" % test_name).ljust(30)
- result += result_str.ljust(29)
- result += "[%02dm %02ds]" % (test_time / 60, test_time % 60)
+ # collect logs
+ self.log_buffers.append(log)
- # don't print out total time every line, it's the same anyway
- if i == len(results) - 1:
- print(result,
- "[%02dm %02ds]" % (total_time / 60, total_time % 60))
+ # create report if it exists
+ if report:
+ try:
+ f = open("%s_%s_report.rst" %
+ (self.target, test_name), "w")
+ except IOError:
+ print("Report for %s could not be created!" % test_name)
else:
- print(result)
-
- # if test failed and it wasn't a "start" test
- if test_result < 0 and not i == 0:
- self.fails += 1
-
- # collect logs
- self.log_buffers.append(log)
-
- # create report if it exists
- if report:
- try:
- f = open("%s_%s_report.rst" %
- (self.target, test_name), "w")
- except IOError:
- print("Report for %s could not be created!" % test_name)
- else:
- with f:
- f.write(report)
-
- # write test result to CSV file
- if i != 0:
- self.csvwriter.writerow([test_name, test_result, result_str])
+ with f:
+ f.write(report)
+
+ # write test result to CSV file
+ self.csvwriter.writerow([test_name, test_result, result_str])
+
+ # this function checks individual test and decides if this test should be in
+ # the group by comparing it against whitelist/blacklist. it also checks if
+ # the test is compiled into the binary, and marks it as skipped if necessary
+ def __filter_test(self, test):
+ test_cmd = test["Command"]
+ test_id = test_cmd
+
+ # dump tests are specified in full e.g. "Dump_mempool"
+ if "_autotest" in test_id:
+ test_id = test_id[:-len("_autotest")]
+
+ # filter out blacklisted/whitelisted tests
+ if self.blacklist and test_id in self.blacklist:
+ return False
+ if self.whitelist and test_id not in self.whitelist:
+ return False
+
+ # if test wasn't compiled in, remove it as well
+
+ # parse the binary for available test commands
+ stripped = 'not stripped' not in \
+ subprocess.check_output(['file', self.binary])
+ if not stripped:
+ symbols = subprocess.check_output(['nm',
+ self.binary]).decode('utf-8')
+ avail_cmds = re.findall('test_register_(\w+)', symbols)
+
+ if test_cmd not in avail_cmds:
+ # notify user
+ result = 0, "Skipped [Not compiled]", test_id, 0, "", None
+ self.skipped.append(tuple(result))
+ return False
- # this function iterates over test groups and removes each
- # test that is not in whitelist/blacklist
- def __filter_groups(self, test_groups):
- groups_to_remove = []
+ return True
- # filter out tests from parallel test groups
- for i, test_group in enumerate(test_groups):
+ def __run_test_group(self, test_group, worker_cmdlines):
+ group_queue = Queue()
+ init_result_queue = Queue()
+ for proc, cmdline in enumerate(worker_cmdlines):
+ prefix = "test%i" % proc if len(worker_cmdlines) > 1 else ""
+ group_queue.put(tuple((cmdline, prefix)))
- # iterate over a copy so that we could safely delete individual
- # tests
- for test in test_group["Tests"][:]:
- test_id = test["Command"]
+ # create a pool of worker threads
+ # we will initialize child in the initializer, and we don't need to
+ # close the child because when the pool worker gets destroyed, child
+ # closes the process
+ pool = Pool(processes=len(worker_cmdlines),
+ initializer=pool_init,
+ initargs=(group_queue, init_result_queue))
+
+ results = []
- # dump tests are specified in full e.g. "Dump_mempool"
- if "_autotest" in test_id:
- test_id = test_id[:-len("_autotest")]
+ # process all initialization results
+ for _ in range(len(worker_cmdlines)):
+ self.__process_result(init_result_queue.get())
- # filter out blacklisted/whitelisted tests
- if self.blacklist and test_id in self.blacklist:
- test_group["Tests"].remove(test)
- continue
- if self.whitelist and test_id not in self.whitelist:
- test_group["Tests"].remove(test)
+ # run all tests asynchronously
+ for test in test_group:
+ result = pool.apply_async(run_test, (self.target, test))
+ results.append(result)
+
+ # tell the pool to stop all processes once done
+ pool.close()
+
+ # iterate while we have group execution results to get
+ while len(results) > 0:
+ # iterate over a copy to be able to safely delete results
+ # this iterates over a list of group results
+ for async_result in results[:]:
+ # if the thread hasn't finished yet, continue
+ if not async_result.ready():
continue
- # modify or remove original group
- if len(test_group["Tests"]) > 0:
- test_groups[i] = test_group
- else:
- # remember which groups should be deleted
- # put the numbers backwards so that we start
- # deleting from the end, not from the beginning
- groups_to_remove.insert(0, i)
+ res = async_result.get()
- # remove test groups that need to be removed
- for i in groups_to_remove:
- del test_groups[i]
+ self.__process_result(res)
- return test_groups
+ # remove result from results list once we're done with it
+ results.remove(async_result)
# iterate over test groups and run tests associated with them
def run_all_tests(self):
# filter groups
- self.parallel_test_groups = \
- self.__filter_groups(self.parallel_test_groups)
- self.non_parallel_test_groups = \
- self.__filter_groups(self.non_parallel_test_groups)
-
- # create a pool of worker threads
- pool = multiprocessing.Pool(processes=1)
-
- results = []
-
- # whatever happens, try to save as much logs as possible
- try:
-
- # create table header
- print("")
- print("Test name".ljust(30), "Test result".ljust(29),
- "Test".center(9), "Total".center(9))
- print("=" * 80)
-
- # make a note of tests start time
- self.start = time.time()
+ self.parallel_tests = list(
+ filter(self.__filter_test,
+ self.parallel_tests)
+ )
+ self.non_parallel_tests = list(
+ filter(self.__filter_test,
+ self.non_parallel_tests)
+ )
+
+ parallel_cmdlines = []
+ # FreeBSD doesn't have NUMA support
+ numa_nodes = get_numa_nodes()
+ if len(numa_nodes) > 0:
+ for proc in range(self.n_processes):
+ # spread cpu affinity between NUMA nodes to have less chance of
+ # running out of memory while running multiple test apps in
+ # parallel. to do that, alternate between NUMA nodes in a round
+ # robin fashion, and pick an arbitrary CPU from that node to
+ # taskset our execution to
+ numa_node = numa_nodes[self.active_processes % len(numa_nodes)]
+ cpu_nr = first_cpu_on_node(numa_node)
+ parallel_cmdlines += [self.__get_cmdline(cpu_nr)]
+ # increase number of active processes so that the next cmdline
+ # gets a different NUMA node
+ self.active_processes += 1
+ else:
+ parallel_cmdlines = [self.cmdline] * self.n_processes
- # assign worker threads to run test groups
- for test_group in self.parallel_test_groups:
- result = pool.apply_async(run_test_group,
- [self.__get_cmdline(test_group),
- test_group])
- results.append(result)
+ print("Running tests with %d workers" % self.n_processes)
- # iterate while we have group execution results to get
- while len(results) > 0:
+ # create table header
+ print("")
+ print("Test name".ljust(30) + "Test result".ljust(29) +
+ "Test".center(9) + "Total".center(9))
+ print("=" * 80)
- # iterate over a copy to be able to safely delete results
- # this iterates over a list of group results
- for group_result in results[:]:
+ if len(self.skipped):
+ print("Skipped autotests:")
- # if the thread hasn't finished yet, continue
- if not group_result.ready():
- continue
+ # print out any skipped tests
+ for result in self.skipped:
+ # unpack result tuple
+ test_result, result_str, test_name, _, _, _ = result
+ self.csvwriter.writerow([test_name, test_result, result_str])
- res = group_result.get()
+ t = ("%s:" % test_name).ljust(30)
+ t += result_str.ljust(29)
+ t += "[00m 00s]"
- self.__process_results(res)
+ print(t)
- # remove result from results list once we're done with it
- results.remove(group_result)
+ # make a note of tests start time
+ self.start = time.time()
- # run non_parallel tests. they are run one by one, synchronously
- for test_group in self.non_parallel_test_groups:
- group_result = run_test_group(
- self.__get_cmdline(test_group), test_group)
+ # whatever happens, try to save as much logs as possible
+ try:
+ if len(self.parallel_tests) > 0:
+ print("Parallel autotests:")
+ self.__run_test_group(self.parallel_tests, parallel_cmdlines)
- self.__process_results(group_result)
+ if len(self.non_parallel_tests) > 0:
+ print("Non-parallel autotests:")
+ self.__run_test_group(self.non_parallel_tests, [self.cmdline])
# get total run time
cur_time = time.time()
diff --git a/test/test/commands.c b/test/test/commands.c
index cf0b726b..94fbc310 100644
--- a/test/test/commands.c
+++ b/test/test/commands.c
@@ -132,11 +132,13 @@ static void cmd_dump_parsed(void *parsed_result,
else if (!strcmp(res->dump, "dump_mempool"))
rte_mempool_list_dump(stdout);
else if (!strcmp(res->dump, "dump_devargs"))
- rte_eal_devargs_dump(stdout);
+ rte_devargs_dump(stdout);
else if (!strcmp(res->dump, "dump_log_types"))
rte_log_dump(stdout);
else if (!strcmp(res->dump, "dump_malloc_stats"))
rte_malloc_dump_stats(stdout, NULL);
+ else if (!strcmp(res->dump, "dump_malloc_heaps"))
+ rte_malloc_dump_heaps(stdout);
}
cmdline_parse_token_string_t cmd_dump_dump =
@@ -147,6 +149,7 @@ cmdline_parse_token_string_t cmd_dump_dump =
"dump_ring#"
"dump_mempool#"
"dump_malloc_stats#"
+ "dump_malloc_heaps#"
"dump_devargs#"
"dump_log_types");
diff --git a/test/test/meson.build b/test/test/meson.build
index eb3d87a4..b1dd6eca 100644
--- a/test/test/meson.build
+++ b/test/test/meson.build
@@ -8,6 +8,7 @@ test_sources = files('commands.c',
'test_alarm.c',
'test_atomic.c',
'test_barrier.c',
+ 'test_bpf.c',
'test_byteorder.c',
'test_cmdline.c',
'test_cmdline_cirbuf.c',
@@ -21,10 +22,10 @@ test_sources = files('commands.c',
'test_cpuflags.c',
'test_crc.c',
'test_cryptodev.c',
+ 'test_cryptodev_asym.c',
'test_cryptodev_blockcipher.c',
'test_cycles.c',
'test_debug.c',
- 'test_devargs.c',
'test_distributor.c',
'test_distributor_perf.c',
'test_eal_flags.c',
@@ -98,6 +99,7 @@ test_sources = files('commands.c',
)
test_deps = ['acl',
+ 'bpf',
'cfgfile',
'cmdline',
'cryptodev',
@@ -129,13 +131,14 @@ test_names = [
'cryptodev_qat_autotest',
'cryptodev_aesni_mb_autotest',
'cryptodev_openssl_autotest',
+ 'cryptodev_openssl_asym_autotest',
'cryptodev_aesni_gcm_autotest',
'cryptodev_null_autotest',
'cryptodev_sw_snow3g_autotest',
'cryptodev_sw_kasumi_autotest',
'cryptodev_sw_zuc_autotest',
'cryptodev_sw_armv8_autotest',
- 'cryptodev_sw_mrvl_autotest',
+ 'cryptodev_sw_mvsam_autotest',
'cryptodev_dpaa2_sec_autotest',
'cryptodev_dpaa_sec_autotest',
'cycles_autotest',
@@ -234,7 +237,20 @@ if dpdk_conf.has('RTE_LIBRTE_KNI')
test_deps += 'kni'
endif
+cflags = machine_args
+if cc.has_argument('-Wno-format-truncation')
+ cflags += '-Wno-format-truncation'
+endif
+
test_dep_objs = []
+compress_test_dep = dependency('zlib', required: false)
+if compress_test_dep.found()
+ test_dep_objs += compress_test_dep
+ test_sources += 'test_compressdev.c'
+ test_deps += 'compressdev'
+ test_names += 'compressdev_autotest'
+endif
+
foreach d:test_deps
def_lib = get_option('default_library')
test_dep_objs += get_variable(def_lib + '_rte_' + d)
@@ -251,7 +267,7 @@ if get_option('tests')
test_sources,
link_whole: link_libs,
dependencies: test_dep_objs,
- c_args: [machine_args, '-DALLOW_EXPERIMENTAL_API'],
+ c_args: [cflags, '-DALLOW_EXPERIMENTAL_API'],
install_rpath: driver_install_path,
install: false)
diff --git a/test/test/process.h b/test/test/process.h
index 11986d5c..ba3a1850 100644
--- a/test/test/process.h
+++ b/test/test/process.h
@@ -5,6 +5,11 @@
#ifndef _PROCESS_H_
#define _PROCESS_H_
+#include <limits.h> /* PATH_MAX */
+#include <libgen.h> /* basename et al */
+#include <stdlib.h> /* NULL */
+#include <unistd.h> /* readlink */
+
#ifdef RTE_EXEC_ENV_BSDAPP
#define self "curproc"
#define exe "file"
@@ -61,4 +66,28 @@ process_dup(const char *const argv[], int numargs, const char *env_value)
return status;
}
+/* FreeBSD doesn't support file prefixes, so force compile failures for any
+ * tests attempting to use this function on FreeBSD.
+ */
+#ifdef RTE_EXEC_ENV_LINUXAPP
+static char *
+get_current_prefix(char *prefix, int size)
+{
+ char path[PATH_MAX] = {0};
+ char buf[PATH_MAX] = {0};
+
+ /* get file for config (fd is always 3) */
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
+
+ /* return NULL on error */
+ if (readlink(path, buf, sizeof(buf)) == -1)
+ return NULL;
+
+ /* get the prefix */
+ snprintf(prefix, size, "%s", basename(dirname(buf)));
+
+ return prefix;
+}
+#endif
+
#endif /* _PROCESS_H_ */
diff --git a/test/test/resource.c b/test/test/resource.c
index 0e2b62cd..34465f16 100644
--- a/test/test/resource.c
+++ b/test/test/resource.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <stdio.h>
diff --git a/test/test/resource.h b/test/test/resource.h
index 1e961221..223fa22a 100644
--- a/test/test/resource.h
+++ b/test/test/resource.h
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#ifndef _RESOURCE_H_
diff --git a/test/test/test_bpf.c b/test/test/test_bpf.c
new file mode 100644
index 00000000..fa17c4f7
--- /dev/null
+++ b/test/test/test_bpf.c
@@ -0,0 +1,1926 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_memory.h>
+#include <rte_debug.h>
+#include <rte_hexdump.h>
+#include <rte_random.h>
+#include <rte_byteorder.h>
+#include <rte_errno.h>
+#include <rte_bpf.h>
+
+#include "test.h"
+
+/*
+ * Basic functional tests for librte_bpf.
+ * The main procedure - load eBPF program, execute it and
+ * compare restuls with expected values.
+ */
+
+struct dummy_offset {
+ uint64_t u64;
+ uint32_t u32;
+ uint16_t u16;
+ uint8_t u8;
+};
+
+struct dummy_vect8 {
+ struct dummy_offset in[8];
+ struct dummy_offset out[8];
+};
+
+#define TEST_FILL_1 0xDEADBEEF
+
+#define TEST_MUL_1 21
+#define TEST_MUL_2 -100
+
+#define TEST_SHIFT_1 15
+#define TEST_SHIFT_2 33
+
+#define TEST_JCC_1 0
+#define TEST_JCC_2 -123
+#define TEST_JCC_3 5678
+#define TEST_JCC_4 TEST_FILL_1
+
+struct bpf_test {
+ const char *name;
+ size_t arg_sz;
+ struct rte_bpf_prm prm;
+ void (*prepare)(void *);
+ int (*check_result)(uint64_t, const void *);
+ uint32_t allow_fail;
+};
+
+/*
+ * Compare return value and result data with expected ones.
+ * Report a failure if they don't match.
+ */
+static int
+cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc,
+ const void *exp_res, const void *ret_res, size_t res_sz)
+{
+ int32_t ret;
+
+ ret = 0;
+ if (exp_rc != ret_rc) {
+ printf("%s@%d: invalid return value, expected: 0x%" PRIx64
+ ",result: 0x%" PRIx64 "\n",
+ func, __LINE__, exp_rc, ret_rc);
+ ret |= -1;
+ }
+
+ if (memcmp(exp_res, ret_res, res_sz) != 0) {
+ printf("%s: invalid value\n", func);
+ rte_memdump(stdout, "expected", exp_res, res_sz);
+ rte_memdump(stdout, "result", ret_res, res_sz);
+ ret |= -1;
+ }
+
+ return ret;
+}
+
+/* store immediate test-cases */
+static const struct ebpf_insn test_store1_prog[] = {
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u8),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u16),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ST | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ .imm = TEST_FILL_1,
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_store1_prepare(void *arg)
+{
+ struct dummy_offset *df;
+
+ df = arg;
+ memset(df, 0, sizeof(*df));
+}
+
+static int
+test_store1_check(uint64_t rc, const void *arg)
+{
+ const struct dummy_offset *dft;
+ struct dummy_offset dfe;
+
+ dft = arg;
+
+ memset(&dfe, 0, sizeof(dfe));
+ dfe.u64 = (int32_t)TEST_FILL_1;
+ dfe.u32 = dfe.u64;
+ dfe.u16 = dfe.u64;
+ dfe.u8 = dfe.u64;
+
+ return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
+}
+
+/* store register test-cases */
+static const struct ebpf_insn test_store2_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u8),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u16),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+/* load test-cases */
+static const struct ebpf_insn test_load1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u8),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return sum */
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_load1_prepare(void *arg)
+{
+ struct dummy_offset *df;
+
+ df = arg;
+
+ memset(df, 0, sizeof(*df));
+ df->u64 = (int32_t)TEST_FILL_1;
+ df->u32 = df->u64;
+ df->u16 = df->u64;
+ df->u8 = df->u64;
+}
+
+static int
+test_load1_check(uint64_t rc, const void *arg)
+{
+ uint64_t v;
+ const struct dummy_offset *dft;
+
+ dft = arg;
+ v = dft->u64;
+ v += dft->u32;
+ v += dft->u16;
+ v += dft->u8;
+
+ return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft));
+}
+
+/* alu mul test-cases */
+static const struct ebpf_insn test_mul1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_MUL | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MUL | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (BPF_ALU | BPF_MUL | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MUL | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_mul1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v;
+
+ dv = arg;
+
+ v = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u32 = v;
+ dv->in[1].u64 = v << 12 | v >> 6;
+ dv->in[2].u32 = -v;
+}
+
+static int
+test_mul1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 * TEST_MUL_1;
+ r3 *= TEST_MUL_2;
+ r4 = (uint32_t)(r4 * r2);
+ r4 *= r3;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* alu shift test-cases */
+static const struct ebpf_insn test_shift1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_SHIFT_1,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_SHIFT_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_RSH | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_LSH | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint64_t) * CHAR_BIT - 1,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint32_t) * CHAR_BIT - 1,
+ },
+ {
+ .code = (BPF_ALU | BPF_LSH | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_shift1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v;
+
+ dv = arg;
+
+ v = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u32 = v;
+ dv->in[1].u64 = v << 12 | v >> 6;
+ dv->in[2].u32 = (-v ^ 5);
+}
+
+static int
+test_shift1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 << TEST_SHIFT_1;
+ r3 = (int64_t)r3 >> TEST_SHIFT_2;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+
+ r2 = (uint32_t)r2 >> r4;
+ r3 <<= r4;
+
+ dve.out[2].u64 = r2;
+ dve.out[3].u64 = r3;
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 &= sizeof(uint64_t) * CHAR_BIT - 1;
+ r3 = (int64_t)r3 >> r2;
+ r2 &= sizeof(uint32_t) * CHAR_BIT - 1;
+ r4 = (uint32_t)r4 << r2;
+
+ dve.out[4].u64 = r4;
+ dve.out[5].u64 = r3;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* jmp test-cases */
+static const struct ebpf_insn test_jump1_prog[] = {
+
+ [0] = {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0,
+ },
+ [1] = {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ [2] = {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ [3] = {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u32),
+ },
+ [4] = {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ [5] = {
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_JCC_1,
+ .off = 8,
+ },
+ [6] = {
+ .code = (BPF_JMP | EBPF_JSLE | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_JCC_2,
+ .off = 9,
+ },
+ [7] = {
+ .code = (BPF_JMP | BPF_JGT | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_JCC_3,
+ .off = 10,
+ },
+ [8] = {
+ .code = (BPF_JMP | BPF_JSET | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_JCC_4,
+ .off = 11,
+ },
+ [9] = {
+ .code = (BPF_JMP | EBPF_JNE | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ .off = 12,
+ },
+ [10] = {
+ .code = (BPF_JMP | EBPF_JSGT | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_4,
+ .off = 13,
+ },
+ [11] = {
+ .code = (BPF_JMP | EBPF_JLE | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_5,
+ .off = 14,
+ },
+ [12] = {
+ .code = (BPF_JMP | BPF_JSET | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_5,
+ .off = 15,
+ },
+ [13] = {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ [14] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x1,
+ },
+ [15] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -10,
+ },
+ [16] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x2,
+ },
+ [17] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -11,
+ },
+ [18] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x4,
+ },
+ [19] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -12,
+ },
+ [20] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x8,
+ },
+ [21] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -13,
+ },
+ [22] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x10,
+ },
+ [23] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -14,
+ },
+ [24] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x20,
+ },
+ [25] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -15,
+ },
+ [26] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x40,
+ },
+ [27] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -16,
+ },
+ [28] = {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 0x80,
+ },
+ [29] = {
+ .code = (BPF_JMP | BPF_JA),
+ .off = -17,
+ },
+};
+
+static void
+test_jump1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+ uint64_t v1, v2;
+
+ dv = arg;
+
+ v1 = rte_rand();
+ v2 = rte_rand();
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u64 = v1;
+ dv->in[1].u64 = v2;
+ dv->in[0].u32 = (v1 << 12) + (v2 >> 6);
+ dv->in[1].u32 = (v2 << 12) - (v1 >> 6);
+}
+
+static int
+test_jump1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4, r5, rv;
+ const struct dummy_vect8 *dvt;
+
+ dvt = arg;
+
+ rv = 0;
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[0].u64;
+ r4 = dvt->in[1].u32;
+ r5 = dvt->in[1].u64;
+
+ if (r2 == TEST_JCC_1)
+ rv |= 0x1;
+ if ((int64_t)r3 <= TEST_JCC_2)
+ rv |= 0x2;
+ if (r4 > TEST_JCC_3)
+ rv |= 0x4;
+ if (r5 & TEST_JCC_4)
+ rv |= 0x8;
+ if (r2 != r3)
+ rv |= 0x10;
+ if ((int64_t)r2 > (int64_t)r4)
+ rv |= 0x20;
+ if (r2 <= r5)
+ rv |= 0x40;
+ if (r3 & r5)
+ rv |= 0x80;
+
+ return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv));
+}
+
+/* alu (add, sub, and, or, xor, neg) test-cases */
+static const struct ebpf_insn test_alu1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_AND | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_ALU | BPF_XOR | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_ALU | BPF_OR | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_XOR | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_4,
+ },
+ {
+ .code = (BPF_ALU | BPF_SUB | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_5,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_AND | BPF_X),
+ .dst_reg = EBPF_REG_5,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[6].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_vect8, out[7].u64),
+ },
+ /* return (-r2 + (-r3)) */
+ {
+ .code = (BPF_ALU | BPF_NEG),
+ .dst_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_NEG),
+ .dst_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_alu1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4, r5, rv;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[0].u64;
+ r4 = dvt->in[1].u32;
+ r5 = dvt->in[1].u64;
+
+ r2 = (uint32_t)r2 & TEST_FILL_1;
+ r3 |= (int32_t) TEST_FILL_1;
+ r4 = (uint32_t)r4 ^ TEST_FILL_1;
+ r5 += (int32_t)TEST_FILL_1;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+ dve.out[3].u64 = r5;
+
+ r2 = (uint32_t)r2 | (uint32_t)r3;
+ r3 ^= r4;
+ r4 = (uint32_t)r4 - (uint32_t)r5;
+ r5 &= r2;
+
+ dve.out[4].u64 = r2;
+ dve.out[5].u64 = r3;
+ dve.out[6].u64 = r4;
+ dve.out[7].u64 = r5;
+
+ r2 = -(int32_t)r2;
+ rv = (uint32_t)r2;
+ r3 = -r3;
+ rv += r3;
+
+ return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* endianness conversions (BE->LE/LE->BE) test-cases */
+static const struct ebpf_insn test_bele1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint16_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_3,
+ .imm = sizeof(uint32_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_BE),
+ .dst_reg = EBPF_REG_4,
+ .imm = sizeof(uint64_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u16),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u64),
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_2,
+ .imm = sizeof(uint16_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_3,
+ .imm = sizeof(uint32_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_ALU | EBPF_END | EBPF_TO_LE),
+ .dst_reg = EBPF_REG_4,
+ .imm = sizeof(uint64_t) * CHAR_BIT,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[3].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[4].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[5].u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+test_bele1_prepare(void *arg)
+{
+ struct dummy_vect8 *dv;
+
+ dv = arg;
+
+ memset(dv, 0, sizeof(*dv));
+ dv->in[0].u64 = rte_rand();
+ dv->in[0].u32 = dv->in[0].u64;
+ dv->in[0].u16 = dv->in[0].u64;
+}
+
+static int
+test_bele1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u16;
+ r3 = dvt->in[0].u32;
+ r4 = dvt->in[0].u64;
+
+ r2 = rte_cpu_to_be_16(r2);
+ r3 = rte_cpu_to_be_32(r3);
+ r4 = rte_cpu_to_be_64(r4);
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ r2 = dvt->in[0].u16;
+ r3 = dvt->in[0].u32;
+ r4 = dvt->in[0].u64;
+
+ r2 = rte_cpu_to_le_16(r2);
+ r3 = rte_cpu_to_le_32(r3);
+ r4 = rte_cpu_to_le_64(r4);
+
+ dve.out[3].u64 = r2;
+ dve.out[4].u64 = r3;
+ dve.out[5].u64 = r4;
+
+ return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* atomic add test-cases */
+static const struct ebpf_insn test_xadd1_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = -1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_4,
+ .imm = TEST_FILL_1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_5,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_5,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_6,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_6,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_6,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_7,
+ .imm = TEST_JCC_2,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_7,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_7,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_8,
+ .imm = TEST_JCC_3,
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | BPF_W),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_8,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_STX | EBPF_XADD | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_8,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_xadd1_check(uint64_t rc, const void *arg)
+{
+ uint64_t rv;
+ const struct dummy_offset *dft;
+ struct dummy_offset dfe;
+
+ dft = arg;
+ memset(&dfe, 0, sizeof(dfe));
+
+ rv = 1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = -1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = (int32_t)TEST_FILL_1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_MUL_1;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_MUL_2;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_JCC_2;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ rv = TEST_JCC_3;
+ rte_atomic32_add((rte_atomic32_t *)&dfe.u32, rv);
+ rte_atomic64_add((rte_atomic64_t *)&dfe.u64, rv);
+
+ return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe));
+}
+
+/* alu div test-cases */
+static const struct ebpf_insn test_div1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[0].u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[1].u64),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[2].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_DIV | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = TEST_MUL_1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_MOD | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = TEST_MUL_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 1,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_OR | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_ALU | BPF_MOD | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_DIV | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_3,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_2,
+ .off = offsetof(struct dummy_vect8, out[0].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_3,
+ .off = offsetof(struct dummy_vect8, out[1].u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_4,
+ .off = offsetof(struct dummy_vect8, out[2].u64),
+ },
+ /* check that we can handle division by zero gracefully. */
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_vect8, in[3].u32),
+ },
+ {
+ .code = (BPF_ALU | BPF_DIV | BPF_X),
+ .dst_reg = EBPF_REG_4,
+ .src_reg = EBPF_REG_2,
+ },
+ /* return 1 */
+ {
+ .code = (BPF_ALU | EBPF_MOV | BPF_K),
+ .dst_reg = EBPF_REG_0,
+ .imm = 1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static int
+test_div1_check(uint64_t rc, const void *arg)
+{
+ uint64_t r2, r3, r4;
+ const struct dummy_vect8 *dvt;
+ struct dummy_vect8 dve;
+
+ dvt = arg;
+ memset(&dve, 0, sizeof(dve));
+
+ r2 = dvt->in[0].u32;
+ r3 = dvt->in[1].u64;
+ r4 = dvt->in[2].u32;
+
+ r2 = (uint32_t)r2 / TEST_MUL_1;
+ r3 %= TEST_MUL_2;
+ r2 |= 1;
+ r3 |= 1;
+ r4 = (uint32_t)(r4 % r2);
+ r4 /= r3;
+
+ dve.out[0].u64 = r2;
+ dve.out[1].u64 = r3;
+ dve.out[2].u64 = r4;
+
+ /*
+ * in the test prog we attempted to divide by zero.
+ * so return value should return 0.
+ */
+ return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out));
+}
+
+/* call test-cases */
+static const struct ebpf_insn test_call1_prog[] = {
+
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u32),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_1,
+ .off = offsetof(struct dummy_offset, u64),
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_2,
+ .off = -4,
+ },
+ {
+ .code = (BPF_STX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_10,
+ .src_reg = EBPF_REG_3,
+ .off = -16,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = 4,
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_3,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_SUB | BPF_K),
+ .dst_reg = EBPF_REG_3,
+ .imm = 16,
+ },
+ {
+ .code = (BPF_JMP | EBPF_CALL),
+ .imm = 0,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ .off = -4,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_10,
+ .off = -16
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_2,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+};
+
+static void
+dummy_func1(const void *p, uint32_t *v32, uint64_t *v64)
+{
+ const struct dummy_offset *dv;
+
+ dv = p;
+
+ v32[0] += dv->u16;
+ v64[0] += dv->u8;
+}
+
+static int
+test_call1_check(uint64_t rc, const void *arg)
+{
+ uint32_t v32;
+ uint64_t v64;
+ const struct dummy_offset *dv;
+
+ dv = arg;
+
+ v32 = dv->u32;
+ v64 = dv->u64;
+ dummy_func1(arg, &v32, &v64);
+ v64 += v32;
+
+ if (v64 != rc) {
+ printf("%s@%d: invalid return value "
+ "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
+ __func__, __LINE__, v64, rc);
+ return -1;
+ }
+ return 0;
+ return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv));
+}
+
+static const struct rte_bpf_xsym test_call1_xsym[] = {
+ {
+ .name = RTE_STR(dummy_func1),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = {
+ .val = (void *)dummy_func1,
+ .nb_args = 3,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(uint32_t),
+ },
+ [2] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(uint64_t),
+ },
+ },
+ },
+ },
+};
+
+static const struct ebpf_insn test_call2_prog[] = {
+
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_1,
+ .imm = -(int32_t)sizeof(struct dummy_offset),
+ },
+ {
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .dst_reg = EBPF_REG_2,
+ .src_reg = EBPF_REG_10,
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_K),
+ .dst_reg = EBPF_REG_2,
+ .imm = -2 * (int32_t)sizeof(struct dummy_offset),
+ },
+ {
+ .code = (BPF_JMP | EBPF_CALL),
+ .imm = 0,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u64)),
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_W),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u32)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_H),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u16)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_LDX | BPF_MEM | BPF_B),
+ .dst_reg = EBPF_REG_1,
+ .src_reg = EBPF_REG_10,
+ .off = -(int32_t)(2 * sizeof(struct dummy_offset) -
+ offsetof(struct dummy_offset, u8)),
+ },
+ {
+ .code = (EBPF_ALU64 | BPF_ADD | BPF_X),
+ .dst_reg = EBPF_REG_0,
+ .src_reg = EBPF_REG_1,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+
+};
+
+static void
+dummy_func2(struct dummy_offset *a, struct dummy_offset *b)
+{
+ uint64_t v;
+
+ v = 0;
+ a->u64 = v++;
+ a->u32 = v++;
+ a->u16 = v++;
+ a->u8 = v++;
+ b->u64 = v++;
+ b->u32 = v++;
+ b->u16 = v++;
+ b->u8 = v++;
+}
+
+static int
+test_call2_check(uint64_t rc, const void *arg)
+{
+ uint64_t v;
+ struct dummy_offset a, b;
+
+ RTE_SET_USED(arg);
+
+ dummy_func2(&a, &b);
+ v = a.u64 + a.u32 + b.u16 + b.u8;
+
+ if (v != rc) {
+ printf("%s@%d: invalid return value "
+ "expected=0x%" PRIx64 ", actual=0x%" PRIx64 "\n",
+ __func__, __LINE__, v, rc);
+ return -1;
+ }
+ return 0;
+}
+
+static const struct rte_bpf_xsym test_call2_xsym[] = {
+ {
+ .name = RTE_STR(dummy_func2),
+ .type = RTE_BPF_XTYPE_FUNC,
+ .func = {
+ .val = (void *)dummy_func2,
+ .nb_args = 2,
+ .args = {
+ [0] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ [1] = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ },
+ },
+};
+
+static const struct bpf_test tests[] = {
+ {
+ .name = "test_store1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_store1_prog,
+ .nb_ins = RTE_DIM(test_store1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_store1_check,
+ },
+ {
+ .name = "test_store2",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_store2_prog,
+ .nb_ins = RTE_DIM(test_store2_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_store1_check,
+ },
+ {
+ .name = "test_load1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_load1_prog,
+ .nb_ins = RTE_DIM(test_load1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_load1_prepare,
+ .check_result = test_load1_check,
+ },
+ {
+ .name = "test_mul1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_mul1_prog,
+ .nb_ins = RTE_DIM(test_mul1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_mul1_prepare,
+ .check_result = test_mul1_check,
+ },
+ {
+ .name = "test_shift1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_shift1_prog,
+ .nb_ins = RTE_DIM(test_shift1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_shift1_prepare,
+ .check_result = test_shift1_check,
+ },
+ {
+ .name = "test_jump1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_jump1_prog,
+ .nb_ins = RTE_DIM(test_jump1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_jump1_prepare,
+ .check_result = test_jump1_check,
+ },
+ {
+ .name = "test_alu1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_alu1_prog,
+ .nb_ins = RTE_DIM(test_alu1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_jump1_prepare,
+ .check_result = test_alu1_check,
+ },
+ {
+ .name = "test_bele1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_bele1_prog,
+ .nb_ins = RTE_DIM(test_bele1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_bele1_prepare,
+ .check_result = test_bele1_check,
+ },
+ {
+ .name = "test_xadd1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_xadd1_prog,
+ .nb_ins = RTE_DIM(test_xadd1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_xadd1_check,
+ },
+ {
+ .name = "test_div1",
+ .arg_sz = sizeof(struct dummy_vect8),
+ .prm = {
+ .ins = test_div1_prog,
+ .nb_ins = RTE_DIM(test_div1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_vect8),
+ },
+ },
+ .prepare = test_mul1_prepare,
+ .check_result = test_div1_check,
+ },
+ {
+ .name = "test_call1",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_call1_prog,
+ .nb_ins = RTE_DIM(test_call1_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ .xsym = test_call1_xsym,
+ .nb_xsym = RTE_DIM(test_call1_xsym),
+ },
+ .prepare = test_load1_prepare,
+ .check_result = test_call1_check,
+ /* for now don't support function calls on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+ {
+ .name = "test_call2",
+ .arg_sz = sizeof(struct dummy_offset),
+ .prm = {
+ .ins = test_call2_prog,
+ .nb_ins = RTE_DIM(test_call2_prog),
+ .prog_arg = {
+ .type = RTE_BPF_ARG_PTR,
+ .size = sizeof(struct dummy_offset),
+ },
+ .xsym = test_call2_xsym,
+ .nb_xsym = RTE_DIM(test_call2_xsym),
+ },
+ .prepare = test_store1_prepare,
+ .check_result = test_call2_check,
+ /* for now don't support function calls on 32 bit platform */
+ .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)),
+ },
+};
+
+static int
+run_test(const struct bpf_test *tst)
+{
+ int32_t ret, rv;
+ int64_t rc;
+ struct rte_bpf *bpf;
+ struct rte_bpf_jit jit;
+ uint8_t tbuf[tst->arg_sz];
+
+ printf("%s(%s) start\n", __func__, tst->name);
+
+ bpf = rte_bpf_load(&tst->prm);
+ if (bpf == NULL) {
+ printf("%s@%d: failed to load bpf code, error=%d(%s);\n",
+ __func__, __LINE__, rte_errno, strerror(rte_errno));
+ return -1;
+ }
+
+ tst->prepare(tbuf);
+
+ rc = rte_bpf_exec(bpf, tbuf);
+ ret = tst->check_result(rc, tbuf);
+ if (ret != 0) {
+ printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
+ __func__, __LINE__, tst->name, ret, strerror(ret));
+ }
+
+ rte_bpf_get_jit(bpf, &jit);
+ if (jit.func == NULL)
+ return 0;
+
+ tst->prepare(tbuf);
+ rc = jit.func(tbuf);
+ rv = tst->check_result(rc, tbuf);
+ ret |= rv;
+ if (rv != 0) {
+ printf("%s@%d: check_result(%s) failed, error: %d(%s);\n",
+ __func__, __LINE__, tst->name, rv, strerror(ret));
+ }
+
+ rte_bpf_destroy(bpf);
+ return ret;
+
+}
+
+static int
+test_bpf(void)
+{
+ int32_t rc, rv;
+ uint32_t i;
+
+ rc = 0;
+ for (i = 0; i != RTE_DIM(tests); i++) {
+ rv = run_test(tests + i);
+ if (tests[i].allow_fail == 0)
+ rc |= rv;
+ }
+
+ return rc;
+}
+
+REGISTER_TEST_COMMAND(bpf_autotest, test_bpf);
diff --git a/test/test/test_cmdline_cirbuf.c b/test/test/test_cmdline_cirbuf.c
index e9193f66..8ac326cb 100644
--- a/test/test/test_cmdline_cirbuf.c
+++ b/test/test/test_cmdline_cirbuf.c
@@ -483,7 +483,7 @@ test_cirbuf_string_get_del_partial(void)
memset(tmp, 0, sizeof(tmp));
memset(tmp2, 0, sizeof(tmp));
- snprintf(tmp2, sizeof(tmp2), "%s", CIRBUF_STR_HEAD);
+ strlcpy(tmp2, CIRBUF_STR_HEAD, sizeof(tmp2));
/*
* initialize circular buffer
diff --git a/test/test/test_cmdline_ipaddr.c b/test/test/test_cmdline_ipaddr.c
index 2eb5a774..8ee7f628 100644
--- a/test/test/test_cmdline_ipaddr.c
+++ b/test/test/test_cmdline_ipaddr.c
@@ -87,8 +87,6 @@ const struct ipaddr_str ipaddr_valid_strs[] = {
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"192.168.1.0/24", {AF_INET, {IP4(192,168,1,0)}, 24},
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
- {"012.34.56.78/24", {AF_INET, {IP4(12,34,56,78)}, 24},
- CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"34.56.78.90/1", {AF_INET, {IP4(34,56,78,90)}, 1},
CMDLINE_IPADDR_V4 | CMDLINE_IPADDR_NETWORK},
{"::", {AF_INET6, {IP6(0,0,0,0,0,0,0,0)}, 0},
diff --git a/test/test/test_common.c b/test/test/test_common.c
index d0342430..7a67e458 100644
--- a/test/test/test_common.c
+++ b/test/test/test_common.c
@@ -3,6 +3,7 @@
*/
#include <stdio.h>
+#include <inttypes.h>
#include <string.h>
#include <math.h>
#include <rte_common.h>
@@ -70,6 +71,9 @@ test_align(void)
#define FAIL_ALIGN(x, i, p)\
{printf(x "() test failed: %u %u\n", i, p);\
return -1;}
+#define FAIL_ALIGN64(x, j, q)\
+ {printf(x "() test failed: %"PRIu64" %"PRIu64"\n", j, q);\
+ return -1; }
#define ERROR_FLOOR(res, i, pow) \
(res % pow) || /* check if not aligned */ \
((res / pow) != (i / pow)) /* check if correct alignment */
@@ -80,6 +84,7 @@ test_align(void)
val / pow != (i / pow) + 1) /* if not aligned, hence +1 */
uint32_t i, p, val;
+ uint64_t j, q;
for (i = 1, p = 1; i <= MAX_NUM; i ++) {
if (rte_align32pow2(i) != p)
@@ -88,6 +93,27 @@ test_align(void)
p <<= 1;
}
+ for (i = 1, p = 1; i <= MAX_NUM; i++) {
+ if (rte_align32prevpow2(i) != p)
+ FAIL_ALIGN("rte_align32prevpow2", i, p);
+ if (rte_is_power_of_2(i + 1))
+ p = i + 1;
+ }
+
+ for (j = 1, q = 1; j <= MAX_NUM ; j++) {
+ if (rte_align64pow2(j) != q)
+ FAIL_ALIGN64("rte_align64pow2", j, q);
+ if (j == q)
+ q <<= 1;
+ }
+
+ for (j = 1, q = 1; j <= MAX_NUM ; j++) {
+ if (rte_align64prevpow2(j) != q)
+ FAIL_ALIGN64("rte_align64prevpow2", j, q);
+ if (rte_is_power_of_2(j + 1))
+ q = j + 1;
+ }
+
for (p = 2; p <= MAX_NUM; p <<= 1) {
if (!rte_is_power_of_2(p))
@@ -128,6 +154,18 @@ test_align(void)
FAIL("rte_is_aligned");
}
}
+
+ for (p = 1; p <= MAX_NUM / 2; p++) {
+ for (i = 1; i <= MAX_NUM / 2; i++) {
+ val = RTE_ALIGN_MUL_CEIL(i, p);
+ if (val % p != 0 || val < i)
+ FAIL_ALIGN("RTE_ALIGN_MUL_CEIL", i, p);
+ val = RTE_ALIGN_MUL_FLOOR(i, p);
+ if (val % p != 0 || val > i)
+ FAIL_ALIGN("RTE_ALIGN_MUL_FLOOR", i, p);
+ }
+ }
+
return 0;
}
diff --git a/test/test/test_compressdev.c b/test/test/test_compressdev.c
new file mode 100644
index 00000000..86453882
--- /dev/null
+++ b/test/test/test_compressdev.c
@@ -0,0 +1,1486 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <string.h>
+#include <zlib.h>
+#include <math.h>
+
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev.h>
+
+#include "test_compressdev_test_buffer.h"
+#include "test.h"
+
+#define DIV_CEIL(a, b) ((a) / (b) + ((a) % (b) != 0))
+
+#define DEFAULT_WINDOW_SIZE 15
+#define DEFAULT_MEM_LEVEL 8
+#define MAX_DEQD_RETRIES 10
+#define DEQUEUE_WAIT_TIME 10000
+
+/*
+ * 30% extra size for compressed data compared to original data,
+ * in case data size cannot be reduced and it is actually bigger
+ * due to the compress block headers
+ */
+#define COMPRESS_BUF_SIZE_RATIO 1.3
+#define NUM_LARGE_MBUFS 16
+#define SMALL_SEG_SIZE 256
+#define MAX_SEGS 16
+#define NUM_OPS 16
+#define NUM_MAX_XFORMS 16
+#define NUM_MAX_INFLIGHT_OPS 128
+#define CACHE_SIZE 0
+
+const char *
+huffman_type_strings[] = {
+ [RTE_COMP_HUFFMAN_DEFAULT] = "PMD default",
+ [RTE_COMP_HUFFMAN_FIXED] = "Fixed",
+ [RTE_COMP_HUFFMAN_DYNAMIC] = "Dynamic"
+};
+
+enum zlib_direction {
+ ZLIB_NONE,
+ ZLIB_COMPRESS,
+ ZLIB_DECOMPRESS,
+ ZLIB_ALL
+};
+
+struct priv_op_data {
+ uint16_t orig_idx;
+};
+
+struct comp_testsuite_params {
+ struct rte_mempool *large_mbuf_pool;
+ struct rte_mempool *small_mbuf_pool;
+ struct rte_mempool *op_pool;
+ struct rte_comp_xform *def_comp_xform;
+ struct rte_comp_xform *def_decomp_xform;
+};
+
+static struct comp_testsuite_params testsuite_params = { 0 };
+
+static void
+testsuite_teardown(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+
+ rte_mempool_free(ts_params->large_mbuf_pool);
+ rte_mempool_free(ts_params->small_mbuf_pool);
+ rte_mempool_free(ts_params->op_pool);
+ rte_free(ts_params->def_comp_xform);
+ rte_free(ts_params->def_decomp_xform);
+}
+
+static int
+testsuite_setup(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint32_t max_buf_size = 0;
+ unsigned int i;
+
+ if (rte_compressdev_count() == 0) {
+ RTE_LOG(ERR, USER1, "Need at least one compress device\n");
+ return TEST_FAILED;
+ }
+
+ RTE_LOG(NOTICE, USER1, "Running tests on device %s\n",
+ rte_compressdev_name_get(0));
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++)
+ max_buf_size = RTE_MAX(max_buf_size,
+ strlen(compress_test_bufs[i]) + 1);
+
+ /*
+ * Buffers to be used in compression and decompression.
+ * Since decompressed data might be larger than
+ * compressed data (due to block header),
+ * buffers should be big enough for both cases.
+ */
+ max_buf_size *= COMPRESS_BUF_SIZE_RATIO;
+ ts_params->large_mbuf_pool = rte_pktmbuf_pool_create("large_mbuf_pool",
+ NUM_LARGE_MBUFS,
+ CACHE_SIZE, 0,
+ max_buf_size + RTE_PKTMBUF_HEADROOM,
+ rte_socket_id());
+ if (ts_params->large_mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Large mbuf pool could not be created\n");
+ return TEST_FAILED;
+ }
+
+ /* Create mempool with smaller buffers for SGL testing */
+ ts_params->small_mbuf_pool = rte_pktmbuf_pool_create("small_mbuf_pool",
+ NUM_LARGE_MBUFS * MAX_SEGS,
+ CACHE_SIZE, 0,
+ SMALL_SEG_SIZE + RTE_PKTMBUF_HEADROOM,
+ rte_socket_id());
+ if (ts_params->small_mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Small mbuf pool could not be created\n");
+ goto exit;
+ }
+
+ ts_params->op_pool = rte_comp_op_pool_create("op_pool", NUM_OPS,
+ 0, sizeof(struct priv_op_data),
+ rte_socket_id());
+ if (ts_params->op_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Operation pool could not be created\n");
+ goto exit;
+ }
+
+ ts_params->def_comp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_comp_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Default compress xform could not be created\n");
+ goto exit;
+ }
+ ts_params->def_decomp_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+ if (ts_params->def_decomp_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Default decompress xform could not be created\n");
+ goto exit;
+ }
+
+ /* Initializes default values for compress/decompress xforms */
+ ts_params->def_comp_xform->type = RTE_COMP_COMPRESS;
+ ts_params->def_comp_xform->compress.algo = RTE_COMP_ALGO_DEFLATE,
+ ts_params->def_comp_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ ts_params->def_comp_xform->compress.level = RTE_COMP_LEVEL_PMD_DEFAULT;
+ ts_params->def_comp_xform->compress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_comp_xform->compress.window_size = DEFAULT_WINDOW_SIZE;
+
+ ts_params->def_decomp_xform->type = RTE_COMP_DECOMPRESS;
+ ts_params->def_decomp_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE,
+ ts_params->def_decomp_xform->decompress.chksum = RTE_COMP_CHECKSUM_NONE;
+ ts_params->def_decomp_xform->decompress.window_size = DEFAULT_WINDOW_SIZE;
+
+ return TEST_SUCCESS;
+
+exit:
+ testsuite_teardown();
+
+ return TEST_FAILED;
+}
+
+static int
+generic_ut_setup(void)
+{
+ /* Configure compressdev (one device, one queue pair) */
+ struct rte_compressdev_config config = {
+ .socket_id = rte_socket_id(),
+ .nb_queue_pairs = 1,
+ .max_nb_priv_xforms = NUM_MAX_XFORMS,
+ .max_nb_streams = 0
+ };
+
+ if (rte_compressdev_configure(0, &config) < 0) {
+ RTE_LOG(ERR, USER1, "Device configuration failed\n");
+ return -1;
+ }
+
+ if (rte_compressdev_queue_pair_setup(0, 0, NUM_MAX_INFLIGHT_OPS,
+ rte_socket_id()) < 0) {
+ RTE_LOG(ERR, USER1, "Queue pair setup failed\n");
+ return -1;
+ }
+
+ if (rte_compressdev_start(0) < 0) {
+ RTE_LOG(ERR, USER1, "Device could not be started\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+generic_ut_teardown(void)
+{
+ rte_compressdev_stop(0);
+ if (rte_compressdev_close(0) < 0)
+ RTE_LOG(ERR, USER1, "Device could not be closed\n");
+}
+
+static int
+test_compressdev_invalid_configuration(void)
+{
+ struct rte_compressdev_config invalid_config;
+ struct rte_compressdev_config valid_config = {
+ .socket_id = rte_socket_id(),
+ .nb_queue_pairs = 1,
+ .max_nb_priv_xforms = NUM_MAX_XFORMS,
+ .max_nb_streams = 0
+ };
+ struct rte_compressdev_info dev_info;
+
+ /* Invalid configuration with 0 queue pairs */
+ memcpy(&invalid_config, &valid_config,
+ sizeof(struct rte_compressdev_config));
+ invalid_config.nb_queue_pairs = 0;
+
+ TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
+ "Device configuration was successful "
+ "with no queue pairs (invalid)\n");
+
+ /*
+ * Invalid configuration with too many queue pairs
+ * (if there is an actual maximum number of queue pairs)
+ */
+ rte_compressdev_info_get(0, &dev_info);
+ if (dev_info.max_nb_queue_pairs != 0) {
+ memcpy(&invalid_config, &valid_config,
+ sizeof(struct rte_compressdev_config));
+ invalid_config.nb_queue_pairs = dev_info.max_nb_queue_pairs + 1;
+
+ TEST_ASSERT_FAIL(rte_compressdev_configure(0, &invalid_config),
+ "Device configuration was successful "
+ "with too many queue pairs (invalid)\n");
+ }
+
+ /* Invalid queue pair setup, with no number of queue pairs set */
+ TEST_ASSERT_FAIL(rte_compressdev_queue_pair_setup(0, 0,
+ NUM_MAX_INFLIGHT_OPS, rte_socket_id()),
+ "Queue pair setup was successful "
+ "with no queue pairs set (invalid)\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+compare_buffers(const char *buffer1, uint32_t buffer1_len,
+ const char *buffer2, uint32_t buffer2_len)
+{
+ if (buffer1_len != buffer2_len) {
+ RTE_LOG(ERR, USER1, "Buffer lengths are different\n");
+ return -1;
+ }
+
+ if (memcmp(buffer1, buffer2, buffer1_len) != 0) {
+ RTE_LOG(ERR, USER1, "Buffers are different\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Maps compressdev and Zlib flush flags
+ */
+static int
+map_zlib_flush_flag(enum rte_comp_flush_flag flag)
+{
+ switch (flag) {
+ case RTE_COMP_FLUSH_NONE:
+ return Z_NO_FLUSH;
+ case RTE_COMP_FLUSH_SYNC:
+ return Z_SYNC_FLUSH;
+ case RTE_COMP_FLUSH_FULL:
+ return Z_FULL_FLUSH;
+ case RTE_COMP_FLUSH_FINAL:
+ return Z_FINISH;
+ /*
+ * There should be only the values above,
+ * so this should never happen
+ */
+ default:
+ return -1;
+ }
+}
+
+static int
+compress_zlib(struct rte_comp_op *op,
+ const struct rte_comp_xform *xform, int mem_level)
+{
+ z_stream stream;
+ int zlib_flush;
+ int strategy, window_bits, comp_level;
+ int ret = TEST_FAILED;
+ uint8_t *single_src_buf = NULL;
+ uint8_t *single_dst_buf = NULL;
+
+ /* initialize zlib stream */
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ strategy = Z_FIXED;
+ else
+ strategy = Z_DEFAULT_STRATEGY;
+
+ /*
+ * Window bits is the base two logarithm of the window size (in bytes).
+ * When doing raw DEFLATE, this number will be negative.
+ */
+ window_bits = -(xform->compress.window_size);
+
+ comp_level = xform->compress.level;
+
+ if (comp_level != RTE_COMP_LEVEL_NONE)
+ ret = deflateInit2(&stream, comp_level, Z_DEFLATED,
+ window_bits, mem_level, strategy);
+ else
+ ret = deflateInit(&stream, Z_NO_COMPRESSION);
+
+ if (ret != Z_OK) {
+ printf("Zlib deflate could not be initialized\n");
+ goto exit;
+ }
+
+ /* Assuming stateless operation */
+ /* SGL */
+ if (op->m_src->nb_segs > 1) {
+ single_src_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_src), 0);
+ if (single_src_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ single_dst_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_dst), 0);
+ if (single_dst_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ if (rte_pktmbuf_read(op->m_src, 0,
+ rte_pktmbuf_pkt_len(op->m_src),
+ single_src_buf) == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ stream.avail_in = op->src.length;
+ stream.next_in = single_src_buf;
+ stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
+ stream.next_out = single_dst_buf;
+
+ } else {
+ stream.avail_in = op->src.length;
+ stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ stream.avail_out = op->m_dst->data_len;
+ stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ }
+ /* Stateless operation, all buffer will be compressed in one go */
+ zlib_flush = map_zlib_flush_flag(op->flush_flag);
+ ret = deflate(&stream, zlib_flush);
+
+ if (stream.avail_in != 0) {
+ RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ if (ret != Z_STREAM_END)
+ goto exit;
+
+ /* Copy data to destination SGL */
+ if (op->m_src->nb_segs > 1) {
+ uint32_t remaining_data = stream.total_out;
+ uint8_t *src_data = single_dst_buf;
+ struct rte_mbuf *dst_buf = op->m_dst;
+
+ while (remaining_data > 0) {
+ uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
+ uint8_t *);
+ /* Last segment */
+ if (remaining_data < dst_buf->data_len) {
+ memcpy(dst_data, src_data, remaining_data);
+ remaining_data = 0;
+ } else {
+ memcpy(dst_data, src_data, dst_buf->data_len);
+ remaining_data -= dst_buf->data_len;
+ src_data += dst_buf->data_len;
+ dst_buf = dst_buf->next;
+ }
+ }
+ }
+
+ op->consumed = stream.total_in;
+ op->produced = stream.total_out;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ deflateReset(&stream);
+
+ ret = 0;
+exit:
+ deflateEnd(&stream);
+ rte_free(single_src_buf);
+ rte_free(single_dst_buf);
+
+ return ret;
+}
+
+static int
+decompress_zlib(struct rte_comp_op *op,
+ const struct rte_comp_xform *xform)
+{
+ z_stream stream;
+ int window_bits;
+ int zlib_flush;
+ int ret = TEST_FAILED;
+ uint8_t *single_src_buf = NULL;
+ uint8_t *single_dst_buf = NULL;
+
+ /* initialize zlib stream */
+ stream.zalloc = Z_NULL;
+ stream.zfree = Z_NULL;
+ stream.opaque = Z_NULL;
+
+ /*
+ * Window bits is the base two logarithm of the window size (in bytes).
+ * When doing raw DEFLATE, this number will be negative.
+ */
+ window_bits = -(xform->decompress.window_size);
+
+ ret = inflateInit2(&stream, window_bits);
+
+ if (ret != Z_OK) {
+ printf("Zlib deflate could not be initialized\n");
+ goto exit;
+ }
+
+ /* Assuming stateless operation */
+ /* SGL */
+ if (op->m_src->nb_segs > 1) {
+ single_src_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_src), 0);
+ if (single_src_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ single_dst_buf = rte_malloc(NULL,
+ rte_pktmbuf_pkt_len(op->m_dst), 0);
+ if (single_dst_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Buffer could not be allocated\n");
+ goto exit;
+ }
+ if (rte_pktmbuf_read(op->m_src, 0,
+ rte_pktmbuf_pkt_len(op->m_src),
+ single_src_buf) == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ stream.avail_in = op->src.length;
+ stream.next_in = single_src_buf;
+ stream.avail_out = rte_pktmbuf_pkt_len(op->m_dst);
+ stream.next_out = single_dst_buf;
+
+ } else {
+ stream.avail_in = op->src.length;
+ stream.next_in = rte_pktmbuf_mtod(op->m_src, uint8_t *);
+ stream.avail_out = op->m_dst->data_len;
+ stream.next_out = rte_pktmbuf_mtod(op->m_dst, uint8_t *);
+ }
+
+ /* Stateless operation, all buffer will be compressed in one go */
+ zlib_flush = map_zlib_flush_flag(op->flush_flag);
+ ret = inflate(&stream, zlib_flush);
+
+ if (stream.avail_in != 0) {
+ RTE_LOG(ERR, USER1, "Buffer could not be read entirely\n");
+ goto exit;
+ }
+
+ if (ret != Z_STREAM_END)
+ goto exit;
+
+ if (op->m_src->nb_segs > 1) {
+ uint32_t remaining_data = stream.total_out;
+ uint8_t *src_data = single_dst_buf;
+ struct rte_mbuf *dst_buf = op->m_dst;
+
+ while (remaining_data > 0) {
+ uint8_t *dst_data = rte_pktmbuf_mtod(dst_buf,
+ uint8_t *);
+ /* Last segment */
+ if (remaining_data < dst_buf->data_len) {
+ memcpy(dst_data, src_data, remaining_data);
+ remaining_data = 0;
+ } else {
+ memcpy(dst_data, src_data, dst_buf->data_len);
+ remaining_data -= dst_buf->data_len;
+ src_data += dst_buf->data_len;
+ dst_buf = dst_buf->next;
+ }
+ }
+ }
+
+ op->consumed = stream.total_in;
+ op->produced = stream.total_out;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ inflateReset(&stream);
+
+ ret = 0;
+exit:
+ inflateEnd(&stream);
+
+ return ret;
+}
+
+static int
+prepare_sgl_bufs(const char *test_buf, struct rte_mbuf *head_buf,
+ uint32_t total_data_size,
+ struct rte_mempool *small_mbuf_pool,
+ struct rte_mempool *large_mbuf_pool,
+ uint8_t limit_segs_in_sgl)
+{
+ uint32_t remaining_data = total_data_size;
+ uint16_t num_remaining_segs = DIV_CEIL(remaining_data, SMALL_SEG_SIZE);
+ struct rte_mempool *pool;
+ struct rte_mbuf *next_seg;
+ uint32_t data_size;
+ char *buf_ptr;
+ const char *data_ptr = test_buf;
+ uint16_t i;
+ int ret;
+
+ if (limit_segs_in_sgl != 0 && num_remaining_segs > limit_segs_in_sgl)
+ num_remaining_segs = limit_segs_in_sgl - 1;
+
+ /*
+ * Allocate data in the first segment (header) and
+ * copy data if test buffer is provided
+ */
+ if (remaining_data < SMALL_SEG_SIZE)
+ data_size = remaining_data;
+ else
+ data_size = SMALL_SEG_SIZE;
+ buf_ptr = rte_pktmbuf_append(head_buf, data_size);
+ if (buf_ptr == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Not enough space in the 1st buffer\n");
+ return -1;
+ }
+
+ if (data_ptr != NULL) {
+ /* Copy characters without NULL terminator */
+ strncpy(buf_ptr, data_ptr, data_size);
+ data_ptr += data_size;
+ }
+ remaining_data -= data_size;
+ num_remaining_segs--;
+
+ /*
+ * Allocate the rest of the segments,
+ * copy the rest of the data and chain the segments.
+ */
+ for (i = 0; i < num_remaining_segs; i++) {
+
+ if (i == (num_remaining_segs - 1)) {
+ /* last segment */
+ if (remaining_data > SMALL_SEG_SIZE)
+ pool = large_mbuf_pool;
+ else
+ pool = small_mbuf_pool;
+ data_size = remaining_data;
+ } else {
+ data_size = SMALL_SEG_SIZE;
+ pool = small_mbuf_pool;
+ }
+
+ next_seg = rte_pktmbuf_alloc(pool);
+ if (next_seg == NULL) {
+ RTE_LOG(ERR, USER1,
+ "New segment could not be allocated "
+ "from the mempool\n");
+ return -1;
+ }
+ buf_ptr = rte_pktmbuf_append(next_seg, data_size);
+ if (buf_ptr == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Not enough space in the buffer\n");
+ rte_pktmbuf_free(next_seg);
+ return -1;
+ }
+ if (data_ptr != NULL) {
+ /* Copy characters without NULL terminator */
+ strncpy(buf_ptr, data_ptr, data_size);
+ data_ptr += data_size;
+ }
+ remaining_data -= data_size;
+
+ ret = rte_pktmbuf_chain(head_buf, next_seg);
+ if (ret != 0) {
+ rte_pktmbuf_free(next_seg);
+ RTE_LOG(ERR, USER1,
+ "Segment could not chained\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Compresses and decompresses buffer with compressdev API and Zlib API
+ */
+static int
+test_deflate_comp_decomp(const char * const test_bufs[],
+ unsigned int num_bufs,
+ uint16_t buf_idx[],
+ struct rte_comp_xform *compress_xforms[],
+ struct rte_comp_xform *decompress_xforms[],
+ unsigned int num_xforms,
+ enum rte_comp_op_type state,
+ unsigned int sgl,
+ enum zlib_direction zlib_dir)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ int ret_status = -1;
+ int ret;
+ struct rte_mbuf *uncomp_bufs[num_bufs];
+ struct rte_mbuf *comp_bufs[num_bufs];
+ struct rte_comp_op *ops[num_bufs];
+ struct rte_comp_op *ops_processed[num_bufs];
+ void *priv_xforms[num_bufs];
+ uint16_t num_enqd, num_deqd, num_total_deqd;
+ uint16_t num_priv_xforms = 0;
+ unsigned int deqd_retries = 0;
+ struct priv_op_data *priv_data;
+ char *buf_ptr;
+ unsigned int i;
+ struct rte_mempool *buf_pool;
+ uint32_t data_size;
+ const struct rte_compressdev_capabilities *capa =
+ rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ char *contig_buf = NULL;
+
+ /* Initialize all arrays to NULL */
+ memset(uncomp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
+ memset(comp_bufs, 0, sizeof(struct rte_mbuf *) * num_bufs);
+ memset(ops, 0, sizeof(struct rte_comp_op *) * num_bufs);
+ memset(ops_processed, 0, sizeof(struct rte_comp_op *) * num_bufs);
+ memset(priv_xforms, 0, sizeof(void *) * num_bufs);
+
+ if (sgl)
+ buf_pool = ts_params->small_mbuf_pool;
+ else
+ buf_pool = ts_params->large_mbuf_pool;
+
+ /* Prepare the source mbufs with the data */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool,
+ uncomp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Source mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) + 1;
+ if (prepare_sgl_bufs(test_bufs[i], uncomp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) + 1;
+ buf_ptr = rte_pktmbuf_append(uncomp_bufs[i], data_size);
+ snprintf(buf_ptr, data_size, "%s", test_bufs[i]);
+ }
+ }
+
+ /* Prepare the destination mbufs */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool, comp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Destination mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) *
+ COMPRESS_BUF_SIZE_RATIO;
+ if (prepare_sgl_bufs(NULL, comp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ data_size = strlen(test_bufs[i]) *
+ COMPRESS_BUF_SIZE_RATIO;
+ rte_pktmbuf_append(comp_bufs[i], data_size);
+ }
+ }
+
+ /* Build the compression operations */
+ ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compress operations could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ ops[i]->m_src = uncomp_bufs[i];
+ ops[i]->m_dst = comp_bufs[i];
+ ops[i]->src.offset = 0;
+ ops[i]->src.length = rte_pktmbuf_pkt_len(uncomp_bufs[i]);
+ ops[i]->dst.offset = 0;
+ if (state == RTE_COMP_OP_STATELESS) {
+ ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+ } else {
+ RTE_LOG(ERR, USER1,
+ "Stateful operations are not supported "
+ "in these tests yet\n");
+ goto exit;
+ }
+ ops[i]->input_chksum = 0;
+ /*
+ * Store original operation index in private data,
+ * since ordering does not have to be maintained,
+ * when dequeueing from compressdev, so a comparison
+ * at the end of the test can be done.
+ */
+ priv_data = (struct priv_op_data *) (ops[i] + 1);
+ priv_data->orig_idx = i;
+ }
+
+ /* Compress data (either with Zlib API or compressdev API */
+ if (zlib_dir == ZLIB_COMPRESS || zlib_dir == ZLIB_ALL) {
+ for (i = 0; i < num_bufs; i++) {
+ const struct rte_comp_xform *compress_xform =
+ compress_xforms[i % num_xforms];
+ ret = compress_zlib(ops[i], compress_xform,
+ DEFAULT_MEM_LEVEL);
+ if (ret < 0)
+ goto exit;
+
+ ops_processed[i] = ops[i];
+ }
+ } else {
+ /* Create compress private xform data */
+ for (i = 0; i < num_xforms; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ (const struct rte_comp_xform *)compress_xforms[i],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+ /* Attach shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++)
+ ops[i]->private_xform = priv_xforms[i % num_xforms];
+ } else {
+ /* Create rest of the private xforms for the other ops */
+ for (i = num_xforms; i < num_bufs; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ compress_xforms[i % num_xforms],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Compression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ /* Attach non shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++)
+ ops[i]->private_xform = priv_xforms[i];
+ }
+
+ /* Enqueue and dequeue all operations */
+ num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
+ if (num_enqd < num_bufs) {
+ RTE_LOG(ERR, USER1,
+ "The operations could not be enqueued\n");
+ goto exit;
+ }
+
+ num_total_deqd = 0;
+ do {
+ /*
+ * If retrying a dequeue call, wait for 10 ms to allow
+ * enough time to the driver to process the operations
+ */
+ if (deqd_retries != 0) {
+ /*
+ * Avoid infinite loop if not all the
+ * operations get out of the device
+ */
+ if (deqd_retries == MAX_DEQD_RETRIES) {
+ RTE_LOG(ERR, USER1,
+ "Not all operations could be "
+ "dequeued\n");
+ goto exit;
+ }
+ usleep(DEQUEUE_WAIT_TIME);
+ }
+ num_deqd = rte_compressdev_dequeue_burst(0, 0,
+ &ops_processed[num_total_deqd], num_bufs);
+ num_total_deqd += num_deqd;
+ deqd_retries++;
+ } while (num_total_deqd < num_enqd);
+
+ deqd_retries = 0;
+
+ /* Free compress private xforms */
+ for (i = 0; i < num_priv_xforms; i++) {
+ rte_compressdev_private_xform_free(0, priv_xforms[i]);
+ priv_xforms[i] = NULL;
+ }
+ num_priv_xforms = 0;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx % num_xforms;
+ const struct rte_comp_compress_xform *compress_xform =
+ &compress_xforms[xform_idx]->compress;
+ enum rte_comp_huffman huffman_type =
+ compress_xform->deflate.huffman;
+ RTE_LOG(DEBUG, USER1, "Buffer %u compressed from %u to %u bytes "
+ "(level = %d, huffman = %s)\n",
+ buf_idx[priv_data->orig_idx],
+ ops_processed[i]->consumed, ops_processed[i]->produced,
+ compress_xform->level,
+ huffman_type_strings[huffman_type]);
+ RTE_LOG(DEBUG, USER1, "Compression ratio = %.2f",
+ (float)ops_processed[i]->produced /
+ ops_processed[i]->consumed * 100);
+ ops[i] = NULL;
+ }
+
+ /*
+ * Check operation status and free source mbufs (destination mbuf and
+ * compress operation information is needed for the decompression stage)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "Some operations were not successful\n");
+ goto exit;
+ }
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ rte_pktmbuf_free(uncomp_bufs[priv_data->orig_idx]);
+ uncomp_bufs[priv_data->orig_idx] = NULL;
+ }
+
+ /* Allocate buffers for decompressed data */
+ ret = rte_pktmbuf_alloc_bulk(buf_pool, uncomp_bufs, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Destination mbufs could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ if (sgl) {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)
+ (ops_processed[i] + 1);
+ data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
+ if (prepare_sgl_bufs(NULL, uncomp_bufs[i],
+ data_size,
+ ts_params->small_mbuf_pool,
+ ts_params->large_mbuf_pool,
+ MAX_SEGS) < 0)
+ goto exit;
+ }
+
+ } else {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)
+ (ops_processed[i] + 1);
+ data_size = strlen(test_bufs[priv_data->orig_idx]) + 1;
+ rte_pktmbuf_append(uncomp_bufs[i], data_size);
+ }
+ }
+
+ /* Build the decompression operations */
+ ret = rte_comp_op_bulk_alloc(ts_params->op_pool, ops, num_bufs);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompress operations could not be allocated "
+ "from the mempool\n");
+ goto exit;
+ }
+
+ /* Source buffer is the compressed data from the previous operations */
+ for (i = 0; i < num_bufs; i++) {
+ ops[i]->m_src = ops_processed[i]->m_dst;
+ ops[i]->m_dst = uncomp_bufs[i];
+ ops[i]->src.offset = 0;
+ /*
+ * Set the length of the compressed data to the
+ * number of bytes that were produced in the previous stage
+ */
+ ops[i]->src.length = ops_processed[i]->produced;
+ ops[i]->dst.offset = 0;
+ if (state == RTE_COMP_OP_STATELESS) {
+ ops[i]->flush_flag = RTE_COMP_FLUSH_FINAL;
+ } else {
+ RTE_LOG(ERR, USER1,
+ "Stateful operations are not supported "
+ "in these tests yet\n");
+ goto exit;
+ }
+ ops[i]->input_chksum = 0;
+ /*
+ * Copy private data from previous operations,
+ * to keep the pointer to the original buffer
+ */
+ memcpy(ops[i] + 1, ops_processed[i] + 1,
+ sizeof(struct priv_op_data));
+ }
+
+ /*
+ * Free the previous compress operations,
+ * as it is not needed anymore
+ */
+ for (i = 0; i < num_bufs; i++) {
+ rte_comp_op_free(ops_processed[i]);
+ ops_processed[i] = NULL;
+ }
+
+ /* Decompress data (either with Zlib API or compressdev API */
+ if (zlib_dir == ZLIB_DECOMPRESS || zlib_dir == ZLIB_ALL) {
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx % num_xforms;
+ const struct rte_comp_xform *decompress_xform =
+ decompress_xforms[xform_idx];
+
+ ret = decompress_zlib(ops[i], decompress_xform);
+ if (ret < 0)
+ goto exit;
+
+ ops_processed[i] = ops[i];
+ }
+ } else {
+ /* Create decompress private xform data */
+ for (i = 0; i < num_xforms; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ (const struct rte_comp_xform *)decompress_xforms[i],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ if (capa->comp_feature_flags & RTE_COMP_FF_SHAREABLE_PRIV_XFORM) {
+ /* Attach shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx %
+ num_xforms;
+ ops[i]->private_xform = priv_xforms[xform_idx];
+ }
+ } else {
+ /* Create rest of the private xforms for the other ops */
+ for (i = num_xforms; i < num_bufs; i++) {
+ ret = rte_compressdev_private_xform_create(0,
+ decompress_xforms[i % num_xforms],
+ &priv_xforms[i]);
+ if (ret < 0) {
+ RTE_LOG(ERR, USER1,
+ "Decompression private xform "
+ "could not be created\n");
+ goto exit;
+ }
+ num_priv_xforms++;
+ }
+
+ /* Attach non shareable private xform data to ops */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops[i] + 1);
+ uint16_t xform_idx = priv_data->orig_idx;
+ ops[i]->private_xform = priv_xforms[xform_idx];
+ }
+ }
+
+ /* Enqueue and dequeue all operations */
+ num_enqd = rte_compressdev_enqueue_burst(0, 0, ops, num_bufs);
+ if (num_enqd < num_bufs) {
+ RTE_LOG(ERR, USER1,
+ "The operations could not be enqueued\n");
+ goto exit;
+ }
+
+ num_total_deqd = 0;
+ do {
+ /*
+ * If retrying a dequeue call, wait for 10 ms to allow
+ * enough time to the driver to process the operations
+ */
+ if (deqd_retries != 0) {
+ /*
+ * Avoid infinite loop if not all the
+ * operations get out of the device
+ */
+ if (deqd_retries == MAX_DEQD_RETRIES) {
+ RTE_LOG(ERR, USER1,
+ "Not all operations could be "
+ "dequeued\n");
+ goto exit;
+ }
+ usleep(DEQUEUE_WAIT_TIME);
+ }
+ num_deqd = rte_compressdev_dequeue_burst(0, 0,
+ &ops_processed[num_total_deqd], num_bufs);
+ num_total_deqd += num_deqd;
+ deqd_retries++;
+ } while (num_total_deqd < num_enqd);
+
+ deqd_retries = 0;
+ }
+
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ RTE_LOG(DEBUG, USER1, "Buffer %u decompressed from %u to %u bytes\n",
+ buf_idx[priv_data->orig_idx],
+ ops_processed[i]->consumed, ops_processed[i]->produced);
+ ops[i] = NULL;
+ }
+
+ /*
+ * Check operation status and free source mbuf (destination mbuf and
+ * compress operation information is still needed)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ if (ops_processed[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "Some operations were not successful\n");
+ goto exit;
+ }
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ rte_pktmbuf_free(comp_bufs[priv_data->orig_idx]);
+ comp_bufs[priv_data->orig_idx] = NULL;
+ }
+
+ /*
+ * Compare the original stream with the decompressed stream
+ * (in size and the data)
+ */
+ for (i = 0; i < num_bufs; i++) {
+ priv_data = (struct priv_op_data *)(ops_processed[i] + 1);
+ const char *buf1 = test_bufs[priv_data->orig_idx];
+ const char *buf2;
+ contig_buf = rte_malloc(NULL, ops_processed[i]->produced, 0);
+ if (contig_buf == NULL) {
+ RTE_LOG(ERR, USER1, "Contiguous buffer could not "
+ "be allocated\n");
+ goto exit;
+ }
+
+ buf2 = rte_pktmbuf_read(ops_processed[i]->m_dst, 0,
+ ops_processed[i]->produced, contig_buf);
+
+ if (compare_buffers(buf1, strlen(buf1) + 1,
+ buf2, ops_processed[i]->produced) < 0)
+ goto exit;
+
+ rte_free(contig_buf);
+ contig_buf = NULL;
+ }
+
+ ret_status = 0;
+
+exit:
+ /* Free resources */
+ for (i = 0; i < num_bufs; i++) {
+ rte_pktmbuf_free(uncomp_bufs[i]);
+ rte_pktmbuf_free(comp_bufs[i]);
+ rte_comp_op_free(ops[i]);
+ rte_comp_op_free(ops_processed[i]);
+ }
+ for (i = 0; i < num_priv_xforms; i++) {
+ if (priv_xforms[i] != NULL)
+ rte_compressdev_private_xform_free(0, priv_xforms[i]);
+ }
+ rte_free(contig_buf);
+
+ return ret_status;
+}
+
+static int
+test_compressdev_deflate_stateless_fixed(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ uint16_t i;
+ int ret;
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0)
+ return -ENOTSUP;
+
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_FIXED;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_dynamic(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ uint16_t i;
+ int ret;
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_HUFFMAN_DYNAMIC) == 0)
+ return -ENOTSUP;
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xform->compress.deflate.huffman = RTE_COMP_HUFFMAN_DYNAMIC;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_multi_op(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t num_bufs = RTE_DIM(compress_test_bufs);
+ uint16_t buf_idx[num_bufs];
+ uint16_t i;
+
+ for (i = 0; i < num_bufs; i++)
+ buf_idx[i] = i;
+
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
+ buf_idx,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0)
+ return TEST_FAILED;
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(compress_test_bufs, num_bufs,
+ buf_idx,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_COMPRESS) < 0)
+ return TEST_FAILED;
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_compressdev_deflate_stateless_multi_level(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ const char *test_buffer;
+ unsigned int level;
+ uint16_t i;
+ int ret;
+ struct rte_comp_xform *compress_xform =
+ rte_malloc(NULL, sizeof(struct rte_comp_xform), 0);
+
+ if (compress_xform == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xform, ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+ for (level = RTE_COMP_LEVEL_MIN; level <= RTE_COMP_LEVEL_MAX;
+ level++) {
+ compress_xform->compress.level = level;
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &compress_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+ }
+ }
+
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_free(compress_xform);
+ return ret;
+}
+
+#define NUM_XFORMS 3
+static int
+test_compressdev_deflate_stateless_multi_xform(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t num_bufs = NUM_XFORMS;
+ struct rte_comp_xform *compress_xforms[NUM_XFORMS] = {NULL};
+ struct rte_comp_xform *decompress_xforms[NUM_XFORMS] = {NULL};
+ const char *test_buffers[NUM_XFORMS];
+ uint16_t i;
+ unsigned int level = RTE_COMP_LEVEL_MIN;
+ uint16_t buf_idx[num_bufs];
+
+ int ret;
+
+ /* Create multiple xforms with various levels */
+ for (i = 0; i < NUM_XFORMS; i++) {
+ compress_xforms[i] = rte_malloc(NULL,
+ sizeof(struct rte_comp_xform), 0);
+ if (compress_xforms[i] == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Compress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(compress_xforms[i], ts_params->def_comp_xform,
+ sizeof(struct rte_comp_xform));
+ compress_xforms[i]->compress.level = level;
+ level++;
+
+ decompress_xforms[i] = rte_malloc(NULL,
+ sizeof(struct rte_comp_xform), 0);
+ if (decompress_xforms[i] == NULL) {
+ RTE_LOG(ERR, USER1,
+ "Decompress xform could not be created\n");
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ memcpy(decompress_xforms[i], ts_params->def_decomp_xform,
+ sizeof(struct rte_comp_xform));
+ }
+
+ for (i = 0; i < NUM_XFORMS; i++) {
+ buf_idx[i] = 0;
+ /* Use the same buffer in all sessions */
+ test_buffers[i] = compress_test_bufs[0];
+ }
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(test_buffers, num_bufs,
+ buf_idx,
+ compress_xforms,
+ decompress_xforms,
+ NUM_XFORMS,
+ RTE_COMP_OP_STATELESS,
+ 0,
+ ZLIB_DECOMPRESS) < 0) {
+ ret = TEST_FAILED;
+ goto exit;
+ }
+
+ ret = TEST_SUCCESS;
+exit:
+ for (i = 0; i < NUM_XFORMS; i++) {
+ rte_free(compress_xforms[i]);
+ rte_free(decompress_xforms[i]);
+ }
+
+ return ret;
+}
+
+static int
+test_compressdev_deflate_stateless_sgl(void)
+{
+ struct comp_testsuite_params *ts_params = &testsuite_params;
+ uint16_t i;
+ const char *test_buffer;
+ const struct rte_compressdev_capabilities *capab;
+
+ capab = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
+ TEST_ASSERT(capab != NULL, "Failed to retrieve device capabilities");
+
+ if ((capab->comp_feature_flags & RTE_COMP_FF_OOP_SGL_IN_SGL_OUT) == 0)
+ return -ENOTSUP;
+
+ for (i = 0; i < RTE_DIM(compress_test_bufs); i++) {
+ test_buffer = compress_test_bufs[i];
+ /* Compress with compressdev, decompress with Zlib */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 1,
+ ZLIB_DECOMPRESS) < 0)
+ return TEST_FAILED;
+
+ /* Compress with Zlib, decompress with compressdev */
+ if (test_deflate_comp_decomp(&test_buffer, 1,
+ &i,
+ &ts_params->def_comp_xform,
+ &ts_params->def_decomp_xform,
+ 1,
+ RTE_COMP_OP_STATELESS,
+ 1,
+ ZLIB_COMPRESS) < 0)
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite compressdev_testsuite = {
+ .suite_name = "compressdev unit test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(NULL, NULL,
+ test_compressdev_invalid_configuration),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_fixed),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_dynamic),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_op),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_level),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_multi_xform),
+ TEST_CASE_ST(generic_ut_setup, generic_ut_teardown,
+ test_compressdev_deflate_stateless_sgl),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_compressdev(void)
+{
+ return unit_test_suite_runner(&compressdev_testsuite);
+}
+
+REGISTER_TEST_COMMAND(compressdev_autotest, test_compressdev);
diff --git a/test/test/test_compressdev_test_buffer.h b/test/test/test_compressdev_test_buffer.h
new file mode 100644
index 00000000..c0492f89
--- /dev/null
+++ b/test/test/test_compressdev_test_buffer.h
@@ -0,0 +1,295 @@
+#ifndef TEST_COMPRESSDEV_TEST_BUFFERS_H_
+#define TEST_COMPRESSDEV_TEST_BUFFERS_H_
+
+/*
+ * These test buffers are snippets obtained
+ * from the Canterbury and Calgary Corpus
+ * collection.
+ */
+
+/* Snippet of Alice's Adventures in Wonderland */
+static const char test_buf_alice[] =
+ " Alice was beginning to get very tired of sitting by her sister\n"
+ "on the bank, and of having nothing to do: once or twice she had\n"
+ "peeped into the book her sister was reading, but it had no\n"
+ "pictures or conversations in it, `and what is the use of a book,'\n"
+ "thought Alice `without pictures or conversation?'\n\n"
+ " So she was considering in her own mind (as well as she could,\n"
+ "for the hot day made her feel very sleepy and stupid), whether\n"
+ "the pleasure of making a daisy-chain would be worth the trouble\n"
+ "of getting up and picking the daisies, when suddenly a White\n"
+ "Rabbit with pink eyes ran close by her.\n\n"
+ " There was nothing so VERY remarkable in that; nor did Alice\n"
+ "think it so VERY much out of the way to hear the Rabbit say to\n"
+ "itself, `Oh dear! Oh dear! I shall be late!' (when she thought\n"
+ "it over afterwards, it occurred to her that she ought to have\n"
+ "wondered at this, but at the time it all seemed quite natural);\n"
+ "but when the Rabbit actually TOOK A WATCH OUT OF ITS WAISTCOAT-\n"
+ "POCKET, and looked at it, and then hurried on, Alice started to\n"
+ "her feet, for it flashed across her mind that she had never\n"
+ "before seen a rabbit with either a waistcoat-pocket, or a watch to\n"
+ "take out of it, and burning with curiosity, she ran across the\n"
+ "field after it, and fortunately was just in time to see it pop\n"
+ "down a large rabbit-hole under the hedge.\n\n"
+ " In another moment down went Alice after it, never once\n"
+ "considering how in the world she was to get out again.\n\n"
+ " The rabbit-hole went straight on like a tunnel for some way,\n"
+ "and then dipped suddenly down, so suddenly that Alice had not a\n"
+ "moment to think about stopping herself before she found herself\n"
+ "falling down a very deep well.\n\n"
+ " Either the well was very deep, or she fell very slowly, for she\n"
+ "had plenty of time as she went down to look about her and to\n"
+ "wonder what was going to happen next. First, she tried to look\n"
+ "down and make out what she was coming to, but it was too dark to\n"
+ "see anything; then she looked at the sides of the well, and\n"
+ "noticed that they were filled with cupboards and book-shelves;\n"
+ "here and there she saw maps and pictures hung upon pegs. She\n"
+ "took down a jar from one of the shelves as she passed; it was\n"
+ "labelled `ORANGE MARMALADE', but to her great disappointment it\n"
+ "was empty: she did not like to drop the jar for fear of killing\n"
+ "somebody, so managed to put it into one of the cupboards as she\n"
+ "fell past it.\n\n"
+ " `Well!' thought Alice to herself, `after such a fall as this, I\n"
+ "shall think nothing of tumbling down stairs! How brave they'll\n"
+ "all think me at home! Why, I wouldn't say anything about it,\n"
+ "even if I fell off the top of the house!' (Which was very likely\n"
+ "true.)\n\n"
+ " Down, down, down. Would the fall NEVER come to an end! `I\n"
+ "wonder how many miles I've fallen by this time?' she said aloud.\n"
+ "`I must be getting somewhere near the centre of the earth. Let\n"
+ "me see: that would be four thousand miles down, I think--' (for,\n"
+ "you see, Alice had learnt several things of this sort in her\n"
+ "lessons in the schoolroom, and though this was not a VERY good\n"
+ "opportunity for showing off her knowledge, as there was no one to\n"
+ "listen to her, still it was good practice to say it over) `--yes,\n"
+ "that's about the right distance--but then I wonder what Latitude\n"
+ "or Longitude I've got to?' (Alice had no idea what Latitude was,\n"
+ "or Longitude either, but thought they were nice grand words to\n"
+ "say.)\n\n"
+ " Presently she began again. `I wonder if I shall fall right\n"
+ "THROUGH the earth! How funny it'll seem to come out among the\n"
+ "people that walk with their heads downward! The Antipathies, I\n"
+ "think--' (she was rather glad there WAS no one listening, this\n"
+ "time, as it didn't sound at all the right word) `--but I shall\n"
+ "have to ask them what the name of the country is, you know.\n"
+ "Please, Ma'am, is this New Zealand or Australia?' (and she tried\n"
+ "to curtsey as she spoke--fancy CURTSEYING as you're falling\n"
+ "through the air! Do you think you could manage it?) `And what\n"
+ "an ignorant little girl she'll think me for asking! No, it'll\n"
+ "never do to ask: perhaps I shall see it written up somewhere.'\n"
+ " Down, down, down. There was nothing else to do, so Alice soon\n"
+ "began talking again. `Dinah'll miss me very much to-night, I\n"
+ "should think!' (Dinah was the cat.) `I hope they'll remember\n"
+ "her saucer of milk at tea-time. Dinah my dear! I wish you were\n"
+ "down here with me! There are no mice in the air, I'm afraid, but\n"
+ "you might catch a bat, and that's very like a mouse, you know.\n"
+ "But do cats eat bats, I wonder?' And here Alice began to get\n"
+ "rather sleepy, and went on saying to herself, in a dreamy sort of\n"
+ "way, `Do cats eat bats? Do cats eat bats?' and sometimes, `Do\n"
+ "bats eat cats?' for, you see, as she couldn't answer either\n"
+ "question, it didn't much matter which way she put it. She felt\n"
+ "that she was dozing off, and had just begun to dream that she\n"
+ "was walking hand in hand with Dinah, and saying to her very\n"
+ "earnestly, `Now, Dinah, tell me the truth: did you ever eat a\n"
+ "bat?' when suddenly, thump! thump! down she came upon a heap of\n"
+ "sticks and dry leaves, and the fall was over.\n\n";
+
+/* Snippet of Shakespeare play */
+static const char test_buf_shakespeare[] =
+ "CHARLES wrestler to Frederick.\n"
+ "\n"
+ "\n"
+ "OLIVER |\n"
+ " |\n"
+ "JAQUES (JAQUES DE BOYS:) | sons of Sir Rowland de Boys.\n"
+ " |\n"
+ "ORLANDO |\n"
+ "\n"
+ "\n"
+ "ADAM |\n"
+ " | servants to Oliver.\n"
+ "DENNIS |\n"
+ "\n"
+ "\n"
+ "TOUCHSTONE a clown.\n"
+ "\n"
+ "SIR OLIVER MARTEXT a vicar.\n"
+ "\n"
+ "\n"
+ "CORIN |\n"
+ " | shepherds.\n"
+ "SILVIUS |\n"
+ "\n"
+ "\n"
+ "WILLIAM a country fellow in love with Audrey.\n"
+ "\n"
+ " A person representing HYMEN. (HYMEN:)\n"
+ "\n"
+ "ROSALIND daughter to the banished duke.\n"
+ "\n"
+ "CELIA daughter to Frederick.\n"
+ "\n"
+ "PHEBE a shepherdess.\n"
+ "\n"
+ "AUDREY a country wench.\n"
+ "\n"
+ " Lords, pages, and attendants, &c.\n"
+ " (Forester:)\n"
+ " (A Lord:)\n"
+ " (First Lord:)\n"
+ " (Second Lord:)\n"
+ " (First Page:)\n"
+ " (Second Page:)\n"
+ "\n"
+ "\n"
+ "SCENE Oliver's house; Duke Frederick's court; and the\n"
+ " Forest of Arden.\n"
+ "\n"
+ "\n"
+ "\n"
+ "\n"
+ " AS YOU LIKE IT\n"
+ "\n"
+ "\n"
+ "ACT I\n"
+ "\n"
+ "\n"
+ "\n"
+ "SCENE I Orchard of Oliver's house.\n"
+ "\n"
+ "\n"
+ " [Enter ORLANDO and ADAM]\n"
+ "\n"
+ "ORLANDO As I remember, Adam, it was upon this fashion\n"
+ " bequeathed me by will but poor a thousand crowns,\n"
+ " and, as thou sayest, charged my brother, on his\n"
+ " blessing, to breed me well: and there begins my\n"
+ " sadness. My brother Jaques he keeps at school, and\n"
+ " report speaks goldenly of his profit: for my part,\n"
+ " he keeps me rustically at home, or, to speak more\n"
+ " properly, stays me here at home unkept; for call you\n"
+ " that keeping for a gentleman of my birth, that\n"
+ " differs not from the stalling of an ox? His horses\n"
+ " are bred better; for, besides that they are fair\n"
+ " with their feeding, they are taught their manage,\n"
+ " and to that end riders dearly hired: but I, his\n"
+ " brother, gain nothing under him but growth; for the\n"
+ " which his animals on his dunghills are as much\n"
+ " bound to him as I. Besides this nothing that he so\n"
+ " plentifully gives me, the something that nature gave\n"
+ " me his countenance seems to take from me: he lets\n"
+ " me feed with his hinds, bars me the place of a\n"
+ " brother, and, as much as in him lies, mines my\n"
+ " gentility with my education. This is it, Adam, that\n"
+ " grieves me; and the spirit of my father, which I\n"
+ " think is within me, begins to mutiny against this\n"
+ " servitude: I will no longer endure it, though yet I\n"
+ " know no wise remedy how to avoid it.\n"
+ "\n"
+ "ADAM Yonder comes my master, your brother.\n"
+ "\n"
+ "ORLANDO Go apart, Adam, and thou shalt hear how he will\n";
+
+/* Snippet of source code in Pascal */
+static const char test_buf_pascal[] =
+ " Ptr = 1..DMem;\n"
+ " Loc = 1..IMem;\n"
+ " Loc0 = 0..IMem;\n"
+ " EdgeT = (hout,lin,hin,lout); {Warning this order is important in}\n"
+ " {predicates such as gtS,geS}\n"
+ " CardT = (finite,infinite);\n"
+ " ExpT = Minexp..Maxexp;\n"
+ " ManT = Mininf..Maxinf; \n"
+ " Pflag = (PNull,PSoln,PTrace,PPrint);\n"
+ " Sreal = record\n"
+ " edge:EdgeT;\n"
+ " cardinality:CardT;\n"
+ " exp:ExpT; {exponent}\n"
+ " mantissa:ManT;\n"
+ " end;\n"
+ " Int = record\n"
+ " hi:Sreal;\n"
+ " lo:Sreal;\n"
+ " end;\n"
+ " Instr = record\n"
+ " Code:OpType;\n"
+ " Pars: array[0..Par] of 0..DMem;\n"
+ " end;\n"
+ " DataMem= record\n"
+ " D :array [Ptr] of Int;\n"
+ " S :array [Loc] of State;\n"
+ " LastHalve:Loc;\n"
+ " RHalve :array [Loc] of real;\n"
+ " end;\n"
+ " DataFlags=record\n"
+ " PF :array [Ptr] of Pflag;\n"
+ " end;\n"
+ "var\n"
+ " Debug : (none,activity,post,trace,dump);\n"
+ " Cut : (once,all);\n"
+ " GlobalEnd,Verifiable:boolean;\n"
+ " HalveThreshold:real;\n"
+ " I : array [Loc] of Instr; {Memory holding instructions}\n"
+ " End : Loc; {last instruction in I}\n"
+ " ParN : array [OpType] of -1..Par; {number of parameters for each \n"
+ " opcode. -1 means no result}\n"
+ " ParIntersect : array [OpType] of boolean ;\n"
+ " DInit : DataMem; {initial memory which is cleared and \n"
+ " used in first call}\n"
+ " DF : DataFlags; {hold flags for variables, e.g. print/trace}\n"
+ " MaxDMem:0..DMem;\n"
+ " Shift : array[0..Digits] of 1..maxint;{array of constant multipliers}\n"
+ " {used for alignment etc.}\n"
+ " Dummy :Positive;\n"
+ " {constant intervals and Sreals}\n"
+ " PlusInfS,MinusInfS,PlusSmallS,MinusSmallS,ZeroS,\n"
+ " PlusFiniteS,MinusFiniteS:Sreal;\n"
+ " Zero,All,AllFinite:Int;\n"
+ "\n"
+ "procedure deblank;\n"
+ "var Ch:char;\n"
+ "begin\n"
+ " while (not eof) and (input^ in [' ',' ']) do read(Ch);\n"
+ "end;\n"
+ "\n"
+ "procedure InitialOptions;\n"
+ "\n"
+ "#include '/user/profs/cleary/bin/options.i';\n"
+ "\n"
+ " procedure Option;\n"
+ " begin\n"
+ " case Opt of\n"
+ " 'a','A':Debug:=activity;\n"
+ " 'd','D':Debug:=dump;\n"
+ " 'h','H':HalveThreshold:=StringNum/100;\n"
+ " 'n','N':Debug:=none;\n"
+ " 'p','P':Debug:=post;\n"
+ " 't','T':Debug:=trace;\n"
+ " 'v','V':Verifiable:=true;\n"
+ " end;\n"
+ " end;\n"
+ "\n"
+ "begin\n"
+ " Debug:=trace;\n"
+ " Verifiable:=false;\n"
+ " HalveThreshold:=67/100;\n"
+ " Options;\n"
+ " writeln(Debug);\n"
+ " writeln('Verifiable:',Verifiable);\n"
+ " writeln('Halve threshold',HalveThreshold);\n"
+ "end;{InitialOptions}\n"
+ "\n"
+ "procedure NormalizeUp(E,M:integer;var S:Sreal;var Closed:boolean);\n"
+ "begin\n"
+ "with S do\n"
+ "begin\n"
+ " if M=0 then S:=ZeroS else\n"
+ " if M>0 then\n";
+
+static const char * const compress_test_bufs[] = {
+ test_buf_alice,
+ test_buf_shakespeare,
+ test_buf_pascal
+};
+
+#endif /* TEST_COMPRESSDEV_TEST_BUFFERS_H_ */
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 1417482c..a6044b26 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -21,6 +21,8 @@
#include <rte_cryptodev_scheduler_operations.h>
#endif
+#include <rte_lcore.h>
+
#include "test.h"
#include "test_cryptodev.h"
@@ -36,6 +38,9 @@
#include "test_cryptodev_aead_test_vectors.h"
#include "test_cryptodev_hmac_test_vectors.h"
+#define VDEV_ARGS_SIZE 100
+#define MAX_NB_SESSIONS 4
+
static int gbl_driver_id;
struct crypto_testsuite_params {
@@ -316,40 +321,81 @@ testsuite_setup(void)
}
}
- /* Create a MRVL device if required */
+ /* Create a MVSAM device if required */
if (gbl_driver_id == rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_MRVL_PMD))) {
-#ifndef RTE_LIBRTE_PMD_MRVL_CRYPTO
- RTE_LOG(ERR, USER1, "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO must be"
- " enabled in config file to run this testsuite.\n");
- return TEST_FAILED;
-#endif
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)));
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ }
+ }
+
+ /* Create an CCP device if required */
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
nb_devs = rte_cryptodev_device_count_by_driver(
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)));
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD),
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD),
NULL);
TEST_ASSERT(ret == 0, "Failed to create "
"instance of pmd : %s",
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
}
}
#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ char vdev_args[VDEV_ARGS_SIZE] = {""};
+ char temp_str[VDEV_ARGS_SIZE] = {"mode=multi-core,"
+ "ordering=enable,name=cryptodev_test_scheduler,corelist="};
+ uint16_t slave_core_count = 0;
+ uint16_t socket_id = 0;
+
if (gbl_driver_id == rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
+ /* Identify the Slave Cores
+ * Use 2 slave cores for the device args
+ */
+ RTE_LCORE_FOREACH_SLAVE(i) {
+ if (slave_core_count > 1)
+ break;
+ snprintf(vdev_args, sizeof(vdev_args),
+ "%s%d", temp_str, i);
+ strcpy(temp_str, vdev_args);
+ strcat(temp_str, ";");
+ slave_core_count++;
+ socket_id = lcore_config[i].socket_id;
+ }
+ if (slave_core_count != 2) {
+ RTE_LOG(ERR, USER1,
+ "Cryptodev scheduler test require at least "
+ "two slave cores to run. "
+ "Please use the correct coremask.\n");
+ return TEST_FAILED;
+ }
+ strcpy(temp_str, vdev_args);
+ snprintf(vdev_args, sizeof(vdev_args), "%s,socket_id=%d",
+ temp_str, socket_id);
+ RTE_LOG(DEBUG, USER1, "vdev_args: %s\n", vdev_args);
nb_devs = rte_cryptodev_device_count_by_driver(
rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD)));
if (nb_devs < 1) {
ret = rte_vdev_init(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
- NULL);
-
+ vdev_args);
TEST_ASSERT(ret == 0,
"Failed to create instance %u of"
" pmd : %s",
@@ -383,15 +429,24 @@ testsuite_setup(void)
ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
ts_params->conf.socket_id = SOCKET_ID_ANY;
- unsigned int session_size = rte_cryptodev_get_private_session_size(dev_id);
+ unsigned int session_size =
+ rte_cryptodev_sym_get_private_session_size(dev_id);
/*
* Create mempool with maximum number of sessions * 2,
* to include the session headers
*/
+ if (info.sym.max_nb_sessions != 0 &&
+ info.sym.max_nb_sessions < MAX_NB_SESSIONS) {
+ RTE_LOG(ERR, USER1, "Device does not support "
+ "at least %u sessions\n",
+ MAX_NB_SESSIONS);
+ return TEST_FAILED;
+ }
+
ts_params->session_mpool = rte_mempool_create(
"test_sess_mp",
- info.sym.max_nb_sessions * 2,
+ MAX_NB_SESSIONS * 2,
session_size,
0, 0, NULL, NULL, NULL,
NULL, SOCKET_ID_ANY,
@@ -539,7 +594,7 @@ test_device_configure_invalid_dev_id(void)
dev_id = ts_params->valid_devs[ts_params->valid_dev_count - 1];
/* Stop the device in case it's started so it can be configured */
- rte_cryptodev_stop(ts_params->valid_devs[dev_id]);
+ rte_cryptodev_stop(dev_id);
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id, &ts_params->conf),
"Failed test for rte_cryptodev_configure: "
@@ -1727,6 +1782,44 @@ test_AES_cipheronly_openssl_all(void)
}
static int
+test_AES_chain_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_AES_cipheronly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1765,6 +1858,25 @@ test_AES_cipheronly_qat_all(void)
}
static int
+test_AES_cipheronly_virtio_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD)),
+ BLKCIPHER_AES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_dpaa_sec_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1898,6 +2010,25 @@ test_authonly_openssl_all(void)
}
static int
+test_authonly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_AES_chain_armv8_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -1927,7 +2058,7 @@ test_AES_chain_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1946,7 +2077,7 @@ test_AES_cipheronly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1965,7 +2096,7 @@ test_authonly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_AUTHONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -1984,7 +2115,7 @@ test_3DES_chain_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_3DES_CHAIN_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -2003,7 +2134,7 @@ test_3DES_cipheronly_mrvl_all(void)
ts_params->session_mpool,
ts_params->valid_devs[0],
rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD)),
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD)),
BLKCIPHER_3DES_CIPHERONLY_TYPE);
TEST_ASSERT_EQUAL(status, 0, "Test failed");
@@ -3029,8 +3160,11 @@ test_kasumi_encryption_sgl(const struct kasumi_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place scatter-gather. "
"Test Skipped.\n");
return 0;
}
@@ -3177,8 +3311,11 @@ test_kasumi_encryption_oop_sgl(const struct kasumi_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+ if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) {
+ printf("Device doesn't support out-of-place scatter-gather "
+ "in both input and output mbufs. "
"Test Skipped.\n");
return 0;
}
@@ -3528,8 +3665,12 @@ test_snow3g_encryption_oop_sgl(const struct snow3g_test_data *tdata)
struct rte_cryptodev_info dev_info;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT)) {
+ printf("Device doesn't support out-of-place scatter-gather "
+ "in both input and output mbufs. "
"Test Skipped.\n");
return 0;
}
@@ -4362,10 +4503,13 @@ test_zuc_encryption_sgl(const struct wireless_test_data *tdata)
return -ENOTSUP;
rte_cryptodev_info_get(ts_params->valid_devs[0], &dev_info);
- if (!(dev_info.feature_flags & RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+
+ uint64_t feat_flags = dev_info.feature_flags;
+
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place scatter-gather. "
"Test Skipped.\n");
- return -ENOTSUP;
+ return 0;
}
plaintext_len = ceil_byte_length(tdata->plaintext.len);
@@ -4876,6 +5020,24 @@ test_DES_cipheronly_mb_all(void)
return TEST_SUCCESS;
}
+static int
+test_3DES_cipheronly_mb_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
static int
test_DES_docsis_mb_all(void)
@@ -4973,6 +5135,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
}
static int
+test_3DES_chain_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_3DES_CHAIN_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_3DES_cipheronly_ccp_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool,
+ ts_params->session_mpool,
+ ts_params->valid_devs[0],
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
+ BLKCIPHER_3DES_CIPHERONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
+static int
test_3DES_cipheronly_qat_all(void)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
@@ -6339,10 +6539,10 @@ test_multi_session(void)
sessions = rte_malloc(NULL,
(sizeof(struct rte_cryptodev_sym_session *) *
- dev_info.sym.max_nb_sessions) + 1, 0);
+ MAX_NB_SESSIONS) + 1, 0);
/* Create multiple crypto sessions*/
- for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ for (i = 0; i < MAX_NB_SESSIONS; i++) {
sessions[i] = rte_cryptodev_sym_session_create(
ts_params->session_mpool);
@@ -6391,7 +6591,7 @@ test_multi_session(void)
TEST_ASSERT_NULL(sessions[i],
"Session creation succeeded unexpectedly!");
- for (i = 0; i < dev_info.sym.max_nb_sessions; i++) {
+ for (i = 0; i < MAX_NB_SESSIONS; i++) {
rte_cryptodev_sym_session_clear(ts_params->valid_devs[0],
sessions[i]);
rte_cryptodev_sym_session_free(sessions[i]);
@@ -6450,13 +6650,13 @@ test_multi_session_random_usage(void)
sessions = rte_malloc(NULL,
(sizeof(struct rte_cryptodev_sym_session *)
- * dev_info.sym.max_nb_sessions) + 1, 0);
+ * MAX_NB_SESSIONS) + 1, 0);
for (i = 0; i < MB_SESSION_NUMBER; i++) {
sessions[i] = rte_cryptodev_sym_session_create(
ts_params->session_mpool);
- rte_memcpy(&ut_paramz[i].ut_params, &testsuite_params,
+ rte_memcpy(&ut_paramz[i].ut_params, &unittest_params,
sizeof(struct crypto_unittest_params));
test_AES_CBC_HMAC_SHA512_decrypt_create_session_params(
@@ -8375,8 +8575,17 @@ test_scheduler_attach_slave_op(void)
rte_mempool_free(ts_params->session_mpool);
ts_params->session_mpool = NULL;
}
- unsigned int session_size = rte_cryptodev_get_private_session_size(i);
+ unsigned int session_size =
+ rte_cryptodev_sym_get_private_session_size(i);
+ if (info.sym.max_nb_sessions != 0 &&
+ info.sym.max_nb_sessions < MAX_NB_SESSIONS) {
+ RTE_LOG(ERR, USER1,
+ "Device does not support "
+ "at least %u sessions\n",
+ MAX_NB_SESSIONS);
+ return TEST_FAILED;
+ }
/*
* Create mempool with maximum number of sessions * 2,
* to include the session headers
@@ -8384,7 +8593,7 @@ test_scheduler_attach_slave_op(void)
if (ts_params->session_mpool == NULL) {
ts_params->session_mpool = rte_mempool_create(
"test_sess_mp",
- info.sym.max_nb_sessions * 2,
+ MAX_NB_SESSIONS * 2,
session_size,
0, 0, NULL, NULL, NULL,
NULL, SOCKET_ID_ANY,
@@ -8428,33 +8637,47 @@ test_scheduler_detach_slave_op(void)
}
static int
-test_scheduler_mode_op(void)
+test_scheduler_mode_op(enum rte_cryptodev_scheduler_mode scheduler_mode)
{
struct crypto_testsuite_params *ts_params = &testsuite_params;
uint8_t sched_id = ts_params->valid_devs[0];
- struct rte_cryptodev_scheduler_ops op = {0};
- struct rte_cryptodev_scheduler dummy_scheduler = {
- .description = "dummy scheduler to test mode",
- .name = "dummy scheduler",
- .mode = CDEV_SCHED_MODE_USERDEFINED,
- .ops = &op
- };
- int ret;
+ /* set mode */
+ return rte_cryptodev_scheduler_mode_set(sched_id,
+ scheduler_mode);
+}
- /* set user defined mode */
- ret = rte_cryptodev_scheduler_load_user_scheduler(sched_id,
- &dummy_scheduler);
- TEST_ASSERT(ret == 0,
- "Failed to set cdev %u to user defined mode", sched_id);
-
- /* set round robin mode */
- ret = rte_cryptodev_scheduler_mode_set(sched_id,
- CDEV_SCHED_MODE_ROUNDROBIN);
- TEST_ASSERT(ret == 0,
- "Failed to set cdev %u to round-robin mode", sched_id);
- TEST_ASSERT(rte_cryptodev_scheduler_mode_get(sched_id) ==
- CDEV_SCHED_MODE_ROUNDROBIN, "Scheduling Mode "
- "not match");
+static int
+test_scheduler_mode_roundrobin_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_ROUNDROBIN) ==
+ 0, "Failed to set roundrobin mode");
+ return 0;
+
+}
+
+static int
+test_scheduler_mode_multicore_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_MULTICORE) ==
+ 0, "Failed to set multicore mode");
+
+ return 0;
+}
+
+static int
+test_scheduler_mode_failover_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_FAILOVER) ==
+ 0, "Failed to set failover mode");
+
+ return 0;
+}
+
+static int
+test_scheduler_mode_pkt_size_distr_op(void)
+{
+ TEST_ASSERT(test_scheduler_mode_op(CDEV_SCHED_MODE_PKT_SIZE_DISTR) ==
+ 0, "Failed to set pktsize mode");
return 0;
}
@@ -8464,8 +8687,20 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = {
.setup = testsuite_setup,
.teardown = testsuite_teardown,
.unit_test_cases = {
+ /* Multi Core */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_multicore_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* Round Robin */
TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
- TEST_CASE_ST(NULL, NULL, test_scheduler_mode_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_roundrobin_op),
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_chain_scheduler_all),
TEST_CASE_ST(ut_setup, ut_teardown,
@@ -8473,6 +8708,29 @@ static struct unit_test_suite cryptodev_scheduler_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_authonly_scheduler_all),
TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* Fail over */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_failover_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
+ /* PKT SIZE */
+ TEST_CASE_ST(NULL, NULL, test_scheduler_attach_slave_op),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_mode_pkt_size_distr_op),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_scheduler_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_scheduler_all),
+ TEST_CASE_ST(NULL, NULL, test_scheduler_detach_slave_op),
+
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
@@ -8767,6 +9025,18 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
}
};
+static struct unit_test_suite cryptodev_virtio_testsuite = {
+ .suite_name = "Crypto VIRTIO Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_virtio_all),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
.suite_name = "Crypto Device AESNI MB Unit Test Suite",
.setup = testsuite_setup,
@@ -8781,6 +9051,8 @@ static struct unit_test_suite cryptodev_aesni_mb_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown,
test_DES_docsis_mb_all),
TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_mb_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_CCM_authenticated_encryption_test_case_128_1),
TEST_CASE_ST(ut_setup, ut_teardown,
test_AES_CCM_authenticated_decryption_test_case_128_1),
@@ -9646,6 +9918,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite = {
}
};
+static struct unit_test_suite cryptodev_ccp_testsuite = {
+ .suite_name = "Crypto Device CCP Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_multi_session_random_usage),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_chain_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_AES_cipheronly_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_chain_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_3DES_cipheronly_ccp_all),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ test_authonly_ccp_all),
+
+ /** Negative tests */
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ authentication_verify_HMAC_SHA1_fail_tag_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
+ TEST_CASE_ST(ut_setup, ut_teardown,
+ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
static int
test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
@@ -9654,9 +9958,9 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_QAT is enabled "
- "in config file to run this testsuite.\n");
+ RTE_LOG(ERR, USER1, "QAT PMD must be loaded. Check that both "
+ "CONFIG_RTE_LIBRTE_PMD_QAT and CONFIG_RTE_LIBRTE_PMD_QAT_SYM "
+ "are enabled in config file to run this testsuite.\n");
return TEST_SKIPPED;
}
@@ -9664,6 +9968,22 @@ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
}
static int
+test_cryptodev_virtio(void /*argv __rte_unused, int argc __rte_unused*/)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "VIRTIO PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_virtio_testsuite);
+}
+
+static int
test_cryptodev_aesni_mb(void /*argv __rte_unused, int argc __rte_unused*/)
{
gbl_driver_id = rte_cryptodev_driver_id_get(
@@ -9795,11 +10115,11 @@ static int
test_cryptodev_mrvl(void)
{
gbl_driver_id = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
if (gbl_driver_id == -1) {
- RTE_LOG(ERR, USER1, "MRVL PMD must be loaded. Check if "
- "CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO is enabled "
+ RTE_LOG(ERR, USER1, "MVSAM PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO is enabled "
"in config file to run this testsuite.\n");
return TEST_SKIPPED;
}
@@ -9867,6 +10187,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
}
+static int
+test_cryptodev_ccp(void)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_ccp_testsuite);
+}
+
REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
@@ -9876,6 +10212,8 @@ REGISTER_TEST_COMMAND(cryptodev_sw_snow3g_autotest, test_cryptodev_sw_snow3g);
REGISTER_TEST_COMMAND(cryptodev_sw_kasumi_autotest, test_cryptodev_sw_kasumi);
REGISTER_TEST_COMMAND(cryptodev_sw_zuc_autotest, test_cryptodev_sw_zuc);
REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
-REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
+REGISTER_TEST_COMMAND(cryptodev_sw_mvsam_autotest, test_cryptodev_mrvl);
REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
+REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
+REGISTER_TEST_COMMAND(cryptodev_virtio_autotest, test_cryptodev_virtio);
diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
index 8cdc0874..1bd44dcd 100644
--- a/test/test/test_cryptodev.h
+++ b/test/test/test_cryptodev.h
@@ -58,9 +58,12 @@
#define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi
#define CRYPTODEV_NAME_ZUC_PMD crypto_zuc
#define CRYPTODEV_NAME_ARMV8_PMD crypto_armv8
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+#define CRYPTODEV_NAME_MVSAM_PMD crypto_mvsam
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
/**
* Write (spread) data from buffer to mbuf data
diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
index 3577ef4b..1c4dc664 100644
--- a/test/test/test_cryptodev_aes_test_vectors.h
+++ b/test/test/test_cryptodev_aes_test_vectors.h
@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR XCBC Encryption Digest",
@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1257,7 +1262,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
@@ -1302,7 +1308,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_13,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest",
@@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
@@ -1323,7 +1330,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
@@ -1345,7 +1353,7 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.test_data = &aes_test_data_12,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest",
@@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
@@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
@@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
@@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
@@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
@@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
@@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr =
@@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
};
@@ -1526,7 +1543,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-128-CBC Decryption",
@@ -1538,7 +1557,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Encryption",
@@ -1549,7 +1570,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Encryption Scater gather",
@@ -1570,7 +1593,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-192-CBC Decryption Scatter Gather",
@@ -1590,7 +1615,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC Decryption",
@@ -1602,7 +1629,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC OOP Encryption",
@@ -1612,7 +1641,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-256-CBC OOP Decryption",
@@ -1622,7 +1653,9 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO
},
{
.test_descr = "AES-128-CTR Encryption",
@@ -1634,7 +1667,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR Decryption",
@@ -1646,7 +1680,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR Encryption",
@@ -1657,7 +1692,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-192-CTR Decryption",
@@ -1668,7 +1704,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR Encryption",
@@ -1680,7 +1717,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-256-CTR Decryption",
@@ -1692,7 +1730,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "AES-128-CTR Encryption (12-byte IV)",
diff --git a/test/test/test_cryptodev_asym.c b/test/test/test_cryptodev_asym.c
new file mode 100644
index 00000000..2fdfc1df
--- /dev/null
+++ b/test/test/test_cryptodev_asym.c
@@ -0,0 +1,1369 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_pause.h>
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+
+#include "test_cryptodev.h"
+#include "test_cryptodev_dh_test_vectors.h"
+#include "test_cryptodev_dsa_test_vectors.h"
+#include "test_cryptodev_mod_test_vectors.h"
+#include "test_cryptodev_rsa_test_vectors.h"
+#include "test_cryptodev_asym_util.h"
+#include "test.h"
+
+#define TEST_NUM_BUFS 10
+#define TEST_NUM_SESSIONS 4
+
+static int gbl_driver_id;
+struct crypto_testsuite_params {
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_qp_conf qp_conf;
+ uint8_t valid_devs[RTE_CRYPTO_MAX_DEVS];
+ uint8_t valid_dev_count;
+};
+
+struct crypto_unittest_params {
+ struct rte_cryptodev_asym_session *sess;
+ struct rte_crypto_op *op;
+};
+
+static struct crypto_testsuite_params testsuite_params = { NULL };
+
+static int
+test_rsa_sign_verify(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output_buf[TEST_DATA_SIZE] = {0};
+ uint8_t input_buf[TEST_DATA_SIZE] = {0};
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &rsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__,
+ "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ /* Compute sign on the test vector */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+
+ memcpy(input_buf, &rsaplaintext.data,
+ rsaplaintext.len);
+ asym_op->rsa.message.data = input_buf;
+ asym_op->rsa.message.length = rsaplaintext.len;
+ asym_op->rsa.sign.data = output_buf;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT1;
+
+ debug_hexdump(stdout, "message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "signed message", asym_op->rsa.sign.data,
+ asym_op->rsa.sign.length);
+ asym_op = result_op->asym;
+
+ /* Verify sign */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT2;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ status = TEST_SUCCESS;
+ int ret = 0;
+ ret = rsa_verify(&rsaplaintext, result_op);
+ if (ret)
+ status = TEST_FAILED;
+
+error_exit:
+
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_rsa_enc_dec(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t input_buf[TEST_DATA_SIZE] = {0};
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &rsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__,
+ "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ /*Compute encryption on the test vector */
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_ENCRYPT;
+
+ memcpy(input_buf, rsaplaintext.data,
+ rsaplaintext.len);
+ asym_op->rsa.message.data = input_buf;
+ asym_op->rsa.message.length = rsaplaintext.len;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT2;
+
+ debug_hexdump(stdout, "message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "encrypted message", asym_op->rsa.message.data,
+ asym_op->rsa.message.length);
+ /* Use the resulted output as decryption Input vector*/
+ asym_op = result_op->asym;
+ asym_op->rsa.op_type = RTE_CRYPTO_ASYM_OP_DECRYPT;
+ asym_op->rsa.pad = RTE_CRYPTO_RSA_PKCS1_V1_5_BT1;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ status = TEST_SUCCESS;
+ int ret = 0;
+ ret = rsa_verify(&rsaplaintext, result_op);
+ if (ret)
+ status = TEST_FAILED;
+
+error_exit:
+
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+testsuite_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_info info;
+ uint32_t i = 0, nb_devs, dev_id;
+ int ret;
+ uint16_t qp_id;
+
+ memset(ts_params, 0, sizeof(*ts_params));
+
+ ts_params->op_mpool = rte_crypto_op_pool_create(
+ "CRYPTO_ASYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ TEST_NUM_BUFS, 0,
+ 0,
+ rte_socket_id());
+ if (ts_params->op_mpool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create ASYM_CRYPTO_OP_POOL\n");
+ return TEST_FAILED;
+ }
+
+ /* Create an OPENSSL device if required */
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD))) {
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD)));
+ if (nb_devs < 1) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create "
+ "instance of pmd : %s",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ }
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (nb_devs < 1) {
+ RTE_LOG(ERR, USER1, "No crypto devices found?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create list of valid crypto devs */
+ for (i = 0; i < nb_devs; i++) {
+ rte_cryptodev_info_get(i, &info);
+ if (info.driver_id == gbl_driver_id)
+ ts_params->valid_devs[ts_params->valid_dev_count++] = i;
+ }
+
+ if (ts_params->valid_dev_count < 1)
+ return TEST_FAILED;
+
+ /* Set up all the qps on the first of the valid devices found */
+
+ dev_id = ts_params->valid_devs[0];
+
+ rte_cryptodev_info_get(dev_id, &info);
+
+ /* check if device support asymmetric, skip if not */
+ if (!(info.feature_flags &
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) {
+ RTE_LOG(ERR, USER1, "Device doesn't support asymmetric. "
+ "Test Skipped.\n");
+ return TEST_FAILED;
+ }
+
+ /* configure device with num qp */
+ ts_params->conf.nb_queue_pairs = info.max_nb_queue_pairs;
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(dev_id,
+ &ts_params->conf),
+ "Failed to configure cryptodev %u with %u qps",
+ dev_id, ts_params->conf.nb_queue_pairs);
+
+ /* configure qp */
+ ts_params->qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT;
+ for (qp_id = 0; qp_id < info.max_nb_queue_pairs; qp_id++) {
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ dev_id, qp_id, &ts_params->qp_conf,
+ rte_cryptodev_socket_id(dev_id),
+ ts_params->session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u ASYM",
+ qp_id, dev_id);
+ }
+
+ /* setup asym session pool */
+ unsigned int session_size =
+ rte_cryptodev_asym_get_private_session_size(dev_id);
+ /*
+ * Create mempool with TEST_NUM_SESSIONS * 2,
+ * to include the session headers
+ */
+ ts_params->session_mpool = rte_mempool_create(
+ "test_asym_sess_mp",
+ TEST_NUM_SESSIONS * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
+ "session mempool allocation failed");
+
+ return TEST_SUCCESS;
+}
+
+static void
+testsuite_teardown(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ if (ts_params->op_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_OP_POOL count %u\n",
+ rte_mempool_avail_count(ts_params->op_mpool));
+ }
+
+ /* Free session mempools */
+ if (ts_params->session_mpool != NULL) {
+ rte_mempool_free(ts_params->session_mpool);
+ ts_params->session_mpool = NULL;
+ }
+}
+
+static int
+ut_setup(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ uint16_t qp_id;
+
+ /* Reconfigure device to default parameters */
+ ts_params->conf.socket_id = SOCKET_ID_ANY;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
+ &ts_params->conf),
+ "Failed to configure cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ for (qp_id = 0; qp_id < ts_params->conf.nb_queue_pairs ; qp_id++) {
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ ts_params->valid_devs[0], qp_id,
+ &ts_params->qp_conf,
+ rte_cryptodev_socket_id(ts_params->valid_devs[0]),
+ ts_params->session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u",
+ qp_id, ts_params->valid_devs[0]);
+ }
+
+ rte_cryptodev_stats_reset(ts_params->valid_devs[0]);
+
+ /* Start the device */
+ TEST_ASSERT_SUCCESS(rte_cryptodev_start(ts_params->valid_devs[0]),
+ "Failed to start cryptodev %u",
+ ts_params->valid_devs[0]);
+
+ return TEST_SUCCESS;
+}
+
+static void
+ut_teardown(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_stats stats;
+
+ rte_cryptodev_stats_get(ts_params->valid_devs[0], &stats);
+
+ /* Stop the device */
+ rte_cryptodev_stop(ts_params->valid_devs[0]);
+}
+
+static inline void print_asym_capa(
+ const struct rte_cryptodev_asymmetric_xform_capability *capa)
+{
+ int i = 0;
+
+ printf("\nxform type: %s\n===================\n",
+ rte_crypto_asym_xform_strings[capa->xform_type]);
+ printf("operation supported -");
+
+ for (i = 0; i < RTE_CRYPTO_ASYM_OP_LIST_END; i++) {
+ /* check supported operations */
+ if (rte_cryptodev_asym_xform_capability_check_optype(capa, i))
+ printf(" %s",
+ rte_crypto_asym_op_strings[i]);
+ }
+ switch (capa->xform_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ case RTE_CRYPTO_ASYM_XFORM_MODINV:
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ case RTE_CRYPTO_ASYM_XFORM_DH:
+ case RTE_CRYPTO_ASYM_XFORM_DSA:
+ printf(" modlen: min %d max %d increment %d\n",
+ capa->modlen.min,
+ capa->modlen.max,
+ capa->modlen.increment);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+test_capability(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *dev_capa;
+ int i = 0;
+ struct rte_cryptodev_asym_capability_idx idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capa;
+
+ rte_cryptodev_info_get(dev_id, &dev_info);
+ if (!(dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)) {
+ RTE_LOG(INFO, USER1,
+ "Device doesn't support asymmetric. Test Skipped\n");
+ return TEST_SUCCESS;
+ }
+
+ /* print xform capability */
+ for (i = 0;
+ dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
+ dev_capa = &(dev_info.capabilities[i]);
+ if (dev_info.capabilities[i].op ==
+ RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ idx.type = dev_capa->asym.xform_capa.xform_type;
+
+ capa = rte_cryptodev_asym_capability_get(dev_id,
+ (const struct
+ rte_cryptodev_asym_capability_idx *) &idx);
+ print_asym_capa(capa);
+ }
+ }
+ return TEST_SUCCESS;
+}
+
+static int
+test_dh_gen_shared_sec(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+ uint8_t peer[] = "01234567890123456789012345678901234567890123456789";
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ /* Setup a xform and op to generate private key only */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE;
+ xform.next = NULL;
+ asym_op->dh.priv_key.data = dh_test_params.priv_key.data;
+ asym_op->dh.priv_key.length = dh_test_params.priv_key.length;
+ asym_op->dh.pub_key.data = (uint8_t *)peer;
+ asym_op->dh.pub_key.length = sizeof(peer);
+ asym_op->dh.shared_secret.data = output;
+ asym_op->dh.shared_secret.length = sizeof(output);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "shared secret:",
+ asym_op->dh.shared_secret.data,
+ asym_op->dh.shared_secret.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+ return status;
+}
+
+static int
+test_dh_gen_priv_key(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ /* Setup a xform and op to generate private key only */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE;
+ xform.next = NULL;
+ asym_op->dh.priv_key.data = output;
+ asym_op->dh.priv_key.length = sizeof(output);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "private key:",
+ asym_op->dh.priv_key.data,
+ asym_op->dh.priv_key.length);
+
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+
+static int
+test_dh_gen_pub_key(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t output[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+ /* Setup a xform chain to generate public key
+ * using test private key
+ *
+ */
+ xform.dh.type = RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE;
+ xform.next = NULL;
+
+ asym_op->dh.pub_key.data = output;
+ asym_op->dh.pub_key.length = sizeof(output);
+ /* load pre-defined private key */
+ asym_op->dh.priv_key.data = rte_malloc(NULL,
+ dh_test_params.priv_key.length,
+ 0);
+ asym_op->dh.priv_key = dh_test_params.priv_key;
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ debug_hexdump(stdout, "pub key:",
+ asym_op->dh.pub_key.data, asym_op->dh.pub_key.length);
+
+ debug_hexdump(stdout, "priv key:",
+ asym_op->dh.priv_key.data, asym_op->dh.priv_key.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+static int
+test_dh_gen_kp(struct rte_crypto_asym_xform *xfrm)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t out_pub_key[TEST_DH_MOD_LEN];
+ uint8_t out_prv_key[TEST_DH_MOD_LEN];
+ struct rte_crypto_asym_xform pub_key_xform;
+ struct rte_crypto_asym_xform xform = *xfrm;
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+ /* Setup a xform chain to generate
+ * private key first followed by
+ * public key
+ */xform.dh.type = RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE;
+ pub_key_xform.xform_type = RTE_CRYPTO_ASYM_XFORM_DH;
+ pub_key_xform.dh.type = RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE;
+ xform.next = &pub_key_xform;
+
+ asym_op->dh.pub_key.data = out_pub_key;
+ asym_op->dh.pub_key.length = sizeof(out_pub_key);
+ asym_op->dh.priv_key.data = out_prv_key;
+ asym_op->dh.priv_key.length = sizeof(out_prv_key);
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ debug_hexdump(stdout, "priv key:",
+ out_prv_key, asym_op->dh.priv_key.length);
+ debug_hexdump(stdout, "pub key:",
+ out_pub_key, asym_op->dh.pub_key.length);
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ return status;
+}
+
+static int
+test_mod_inv(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ struct rte_cryptodev_asym_capability_idx cap_idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capability;
+ uint8_t input[TEST_DATA_SIZE] = {0};
+ int ret = 0;
+
+ if (rte_cryptodev_asym_get_xform_enum(
+ &modinv_xform.xform_type, "modinv") < 0) {
+ RTE_LOG(ERR, USER1,
+ "Invalid ASYNC algorithm specified\n");
+ return -1;
+ }
+
+ cap_idx.type = modinv_xform.xform_type;
+ capability = rte_cryptodev_asym_capability_get(dev_id,
+ &cap_idx);
+
+ if (rte_cryptodev_asym_xform_capability_check_modlen(
+ capability,
+ modinv_xform.modinv.modulus.length)) {
+ RTE_LOG(ERR, USER1,
+ "Invalid MODULOUS length specified\n");
+ return -1;
+ }
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (!sess) {
+ RTE_LOG(ERR, USER1, "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &modinv_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* generate crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ memcpy(input, base, sizeof(base));
+ asym_op->modinv.base.data = input;
+ asym_op->modinv.base.length = sizeof(base);
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ ret = verify_modinv(mod_inv, result_op);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "operation verification failed\n");
+ status = TEST_FAILED;
+ }
+
+error_exit:
+ if (sess) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_mod_exp(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ struct rte_cryptodev_asym_capability_idx cap_idx;
+ const struct rte_cryptodev_asymmetric_xform_capability *capability;
+ uint8_t input[TEST_DATA_SIZE] = {0};
+ int ret = 0;
+
+ if (rte_cryptodev_asym_get_xform_enum(&modex_xform.xform_type,
+ "modexp")
+ < 0) {
+ RTE_LOG(ERR, USER1,
+ "Invalid ASYNC algorithm specified\n");
+ return -1;
+ }
+
+ /* check for modlen capability */
+ cap_idx.type = modex_xform.xform_type;
+ capability = rte_cryptodev_asym_capability_get(dev_id, &cap_idx);
+
+ if (rte_cryptodev_asym_xform_capability_check_modlen(
+ capability, modex_xform.modex.modulus.length)) {
+ RTE_LOG(ERR, USER1,
+ "Invalid MODULOUS length specified\n");
+ return -1;
+ }
+
+ /* generate crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (!sess) {
+ RTE_LOG(ERR, USER1,
+ "line %u "
+ "FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &modex_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = op->asym;
+ memcpy(input, base, sizeof(base));
+ asym_op->modex.base.data = input;
+ asym_op->modex.base.length = sizeof(base);
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ ret = verify_modexp(mod_exp, result_op);
+ if (ret) {
+ RTE_LOG(ERR, USER1,
+ "operation verification failed\n");
+ status = TEST_FAILED;
+ }
+
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+
+ if (op != NULL)
+ rte_crypto_op_free(op);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_dh_keygenration(void)
+{
+ int status;
+
+ debug_hexdump(stdout, "p:", dh_xform.dh.p.data, dh_xform.dh.p.length);
+ debug_hexdump(stdout, "g:", dh_xform.dh.g.data, dh_xform.dh.g.length);
+ debug_hexdump(stdout, "priv_key:", dh_test_params.priv_key.data,
+ dh_test_params.priv_key.length);
+
+ RTE_LOG(INFO, USER1,
+ "Test Public and Private key pair generation\n");
+
+ status = test_dh_gen_kp(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test Public Key Generation using pre-defined priv key\n");
+
+ status = test_dh_gen_pub_key(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test Private Key Generation only\n");
+
+ status = test_dh_gen_priv_key(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ RTE_LOG(INFO, USER1,
+ "Test shared secret compute\n");
+
+ status = test_dh_gen_shared_sec(&dh_xform);
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return status;
+}
+
+static int
+test_dsa_sign(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_mempool *op_mpool = ts_params->op_mpool;
+ struct rte_mempool *sess_mpool = ts_params->session_mpool;
+ uint8_t dev_id = ts_params->valid_devs[0];
+ struct rte_crypto_asym_op *asym_op = NULL;
+ struct rte_crypto_op *op = NULL, *result_op = NULL;
+ struct rte_cryptodev_asym_session *sess = NULL;
+ int status = TEST_SUCCESS;
+ uint8_t r[TEST_DH_MOD_LEN];
+ uint8_t s[TEST_DH_MOD_LEN];
+ uint8_t dgst[] = "35d81554afaad2cf18f3a1770d5fedc4ea5be344";
+
+ sess = rte_cryptodev_asym_session_create(sess_mpool);
+ if (sess == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s", __LINE__,
+ "Session creation failed");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ /* set up crypto op data structure */
+ op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+ if (!op) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to allocate asymmetric crypto "
+ "operation struct");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+ asym_op = op->asym;
+
+ debug_hexdump(stdout, "p: ", dsa_xform.dsa.p.data,
+ dsa_xform.dsa.p.length);
+ debug_hexdump(stdout, "q: ", dsa_xform.dsa.q.data,
+ dsa_xform.dsa.q.length);
+ debug_hexdump(stdout, "g: ", dsa_xform.dsa.g.data,
+ dsa_xform.dsa.g.length);
+ debug_hexdump(stdout, "priv_key: ", dsa_xform.dsa.x.data,
+ dsa_xform.dsa.x.length);
+
+ if (rte_cryptodev_asym_session_init(dev_id, sess, &dsa_xform,
+ sess_mpool) < 0) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "unabled to config sym session");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ /* attach asymmetric crypto session to crypto operations */
+ rte_crypto_op_attach_asym_session(op, sess);
+ asym_op->dsa.op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+ asym_op->dsa.message.data = dgst;
+ asym_op->dsa.message.length = sizeof(dgst);
+ asym_op->dsa.r.length = sizeof(r);
+ asym_op->dsa.r.data = r;
+ asym_op->dsa.s.length = sizeof(s);
+ asym_op->dsa.s.data = s;
+
+ RTE_LOG(DEBUG, USER1, "Process ASYM operation");
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ asym_op = result_op->asym;
+
+ debug_hexdump(stdout, "r:",
+ asym_op->dsa.r.data, asym_op->dsa.r.length);
+ debug_hexdump(stdout, "s:",
+ asym_op->dsa.s.data, asym_op->dsa.s.length);
+
+ /* Test PMD DSA sign verification using signer public key */
+ asym_op->dsa.op_type = RTE_CRYPTO_ASYM_OP_VERIFY;
+
+ /* copy signer public key */
+ asym_op->dsa.y.data = dsa_test_params.y.data;
+ asym_op->dsa.y.length = dsa_test_params.y.length;
+
+ /* Process crypto operation */
+ if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Error sending packet for operation");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+ rte_pause();
+
+ if (result_op == NULL) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ goto error_exit;
+ }
+
+ if (result_op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, USER1,
+ "line %u FAILED: %s",
+ __LINE__, "Failed to process asym crypto op");
+ status = TEST_FAILED;
+ }
+error_exit:
+ if (sess != NULL) {
+ rte_cryptodev_asym_session_clear(dev_id, sess);
+ rte_cryptodev_asym_session_free(sess);
+ }
+ if (op != NULL)
+ rte_crypto_op_free(op);
+ return status;
+}
+
+static int
+test_dsa(void)
+{
+ int status;
+ status = test_dsa_sign();
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+ return status;
+}
+
+
+static struct unit_test_suite cryptodev_openssl_asym_testsuite = {
+ .suite_name = "Crypto Device OPENSSL ASYM Unit Test Suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(ut_setup, ut_teardown, test_capability),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_dsa),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_dh_keygenration),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_rsa_enc_dec),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_rsa_sign_verify),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_mod_inv),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_mod_exp),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_cryptodev_openssl_asym(void)
+{
+ gbl_driver_id = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+
+ if (gbl_driver_id == -1) {
+ RTE_LOG(ERR, USER1, "OPENSSL PMD must be loaded. Check if "
+ "CONFIG_RTE_LIBRTE_PMD_OPENSSL is enabled "
+ "in config file to run this testsuite.\n");
+ return TEST_FAILED;
+ }
+
+ return unit_test_suite_runner(&cryptodev_openssl_asym_testsuite);
+}
+
+REGISTER_TEST_COMMAND(cryptodev_openssl_asym_autotest,
+ test_cryptodev_openssl_asym);
diff --git a/test/test/test_cryptodev_asym_util.h b/test/test/test_cryptodev_asym_util.h
new file mode 100644
index 00000000..dff0c2ad
--- /dev/null
+++ b/test/test/test_cryptodev_asym_util.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_ASYM_TEST_UTIL_H__
+#define TEST_CRYPTODEV_ASYM_TEST_UTIL_H__
+
+/* Below Apis compare resulted buffer to original test vector */
+
+static inline int rsa_verify(struct rsa_test_data *rsa_param,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(rsa_param->data,
+ result_op->asym->rsa.message.data,
+ result_op->asym->rsa.message.length))
+ return -1;
+ return 0;
+}
+
+static inline int verify_modinv(uint8_t *mod_inv,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(mod_inv, result_op->asym->modinv.base.data,
+ result_op->asym->modinv.base.length))
+ return -1;
+ return 0;
+}
+
+static inline int verify_modexp(uint8_t *mod_exp,
+ struct rte_crypto_op *result_op)
+{
+ if (memcmp(mod_exp, result_op->asym->modex.base.data,
+ result_op->asym->modex.base.length))
+ return -1;
+ return 0;
+}
+
+#endif /* TEST_CRYPTODEV_ASYM_TEST_UTIL_H__ */
+
+
+
+
diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
index ed066180..f2701f8f 100644
--- a/test/test/test_cryptodev_blockcipher.c
+++ b/test/test/test_cryptodev_blockcipher.c
@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
int openssl_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int ccp_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
int scheduler_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
int armv8_pmd = rte_cryptodev_driver_id_get(
@@ -67,18 +69,34 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA_SEC_PMD));
int mrvl_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ int virtio_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
int nb_segs = 1;
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
if (t->feature_mask & BLOCKCIPHER_TEST_FEATURE_SG) {
- rte_cryptodev_info_get(dev_id, &dev_info);
- if (!(dev_info.feature_flags &
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER)) {
- printf("Device doesn't support scatter-gather. "
+ uint64_t feat_flags = dev_info.feature_flags;
+ uint64_t oop_flag = RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT;
+
+ if (t->feature_mask && BLOCKCIPHER_TEST_FEATURE_OOP) {
+ if (!(feat_flags & oop_flag)) {
+ printf("Device doesn't support out-of-place "
+ "scatter-gather in input mbuf. "
+ "Test Skipped.\n");
+ return 0;
+ }
+ } else {
+ if (!(feat_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL)) {
+ printf("Device doesn't support in-place "
+ "scatter-gather mbufs. "
"Test Skipped.\n");
- return 0;
+ return 0;
+ }
}
+
nb_segs = 3;
}
@@ -94,7 +112,9 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
driver_id == qat_pmd ||
driver_id == openssl_pmd ||
driver_id == armv8_pmd ||
- driver_id == mrvl_pmd) { /* Fall through */
+ driver_id == mrvl_pmd ||
+ driver_id == ccp_pmd ||
+ driver_id == virtio_pmd) { /* Fall through */
digest_len = tdata->digest.len;
} else if (driver_id == aesni_mb_pmd ||
driver_id == scheduler_pmd) {
@@ -432,11 +452,34 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t value;
uint32_t head_unchanged_len, changed_len = 0;
uint32_t i;
+ uint32_t hdroom_used = 0, tlroom_used = 0;
+ uint32_t hdroom = 0;
mbuf = sym_op->m_src;
+ /*
+ * Crypto PMDs specify the headroom & tailroom it would use
+ * when processing the crypto operation. PMD is free to modify
+ * this space, and so the verification check should skip that
+ * block.
+ */
+ hdroom_used = dev_info.min_mbuf_headroom_req;
+ tlroom_used = dev_info.min_mbuf_tailroom_req;
+
+ /* Get headroom */
+ hdroom = rte_pktmbuf_headroom(mbuf);
+
head_unchanged_len = mbuf->buf_len;
for (i = 0; i < mbuf->buf_len; i++) {
+
+ /* Skip headroom used by PMD */
+ if (i == hdroom - hdroom_used)
+ i += hdroom_used;
+
+ /* Skip tailroom used by PMD */
+ if (i == (hdroom + mbuf->data_len))
+ i += tlroom_used;
+
value = *((uint8_t *)(mbuf->buf_addr)+i);
if (value != tmp_src_buf[i]) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -449,14 +492,13 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
mbuf = sym_op->m_dst;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH) {
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
- sym_op->auth.data.offset;
+ head_unchanged_len = hdroom + sym_op->auth.data.offset;
changed_len = sym_op->auth.data.length;
if (t->op_mask & BLOCKCIPHER_TEST_OP_AUTH_GEN)
changed_len += digest_len;
} else {
/* cipher-only */
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->cipher.data.offset;
changed_len = sym_op->cipher.data.length;
}
@@ -480,15 +522,30 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
uint8_t value;
uint32_t head_unchanged_len = 0, changed_len = 0;
uint32_t i;
+ uint32_t hdroom_used = 0, tlroom_used = 0;
+ uint32_t hdroom = 0;
+
+ /*
+ * Crypto PMDs specify the headroom & tailroom it would use
+ * when processing the crypto operation. PMD is free to modify
+ * this space, and so the verification check should skip that
+ * block.
+ */
+ hdroom_used = dev_info.min_mbuf_headroom_req;
+ tlroom_used = dev_info.min_mbuf_tailroom_req;
mbuf = sym_op->m_src;
+
+ /* Get headroom */
+ hdroom = rte_pktmbuf_headroom(mbuf);
+
if (t->op_mask & BLOCKCIPHER_TEST_OP_CIPHER) {
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->cipher.data.offset;
changed_len = sym_op->cipher.data.length;
} else {
/* auth-only */
- head_unchanged_len = rte_pktmbuf_headroom(mbuf) +
+ head_unchanged_len = hdroom +
sym_op->auth.data.offset +
sym_op->auth.data.length;
changed_len = 0;
@@ -498,8 +555,18 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
changed_len += digest_len;
for (i = 0; i < mbuf->buf_len; i++) {
+
+ /* Skip headroom used by PMD */
+ if (i == hdroom - hdroom_used)
+ i += hdroom_used;
+
if (i == head_unchanged_len)
i += changed_len;
+
+ /* Skip tailroom used by PMD */
+ if (i == (hdroom + mbuf->data_len))
+ i += tlroom_used;
+
value = *((uint8_t *)(mbuf->buf_addr)+i);
if (value != tmp_src_buf[i]) {
snprintf(test_msg, BLOCKCIPHER_TEST_MSG_LEN,
@@ -555,6 +622,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
int openssl_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
+ int ccp_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
@@ -568,7 +637,9 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
int qat_pmd = rte_cryptodev_driver_id_get(
RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD));
int mrvl_pmd = rte_cryptodev_driver_id_get(
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD));
+ RTE_STR(CRYPTODEV_NAME_MVSAM_PMD));
+ int virtio_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_VIRTIO_PMD));
switch (test_type) {
case BLKCIPHER_AES_CHAIN_TYPE:
@@ -627,10 +698,14 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
else if (driver_id == dpaa2_sec_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
+ else if (driver_id == ccp_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
else if (driver_id == dpaa_sec_pmd)
target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
else if (driver_id == mrvl_pmd)
- target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MRVL;
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MVSAM;
+ else if (driver_id == virtio_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO;
else
TEST_ASSERT(0, "Unrecognized cryptodev type");
diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
index edbdaabe..6f7c8929 100644
--- a/test/test/test_cryptodev_blockcipher.h
+++ b/test/test/test_cryptodev_blockcipher.h
@@ -26,7 +26,9 @@
#define BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER 0x0010 /* Scheduler */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
#define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */
-#define BLOCKCIPHER_TEST_TARGET_PMD_MRVL 0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_MVSAM 0x0080 /* Marvell flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0040 /* CCP flag */
+#define BLOCKCIPHER_TEST_TARGET_PMD_VIRTIO 0x0200 /* VIRTIO flag */
#define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
BLOCKCIPHER_TEST_OP_DECRYPT)
diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
index 0be809e3..10334560 100644
--- a/test/test/test_cryptodev_des_test_vectors.h
+++ b/test/test/test_cryptodev_des_test_vectors.h
@@ -792,6 +792,30 @@ triple_des192cbc_hmac_sha1_test_vector = {
.len = 20
}
};
+static const struct blockcipher_test_data
+triple_des64cbc_test_vector = {
+ .crypto_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key = {
+ .data = {
+ 0xE4, 0x23, 0x33, 0x8A, 0x35, 0x64, 0x61, 0xE2
+ },
+ .len = 8
+ },
+ .iv = {
+ .data = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ },
+ .len = 8
+ },
+ .plaintext = {
+ .data = plaintext_des,
+ .len = 512
+ },
+ .ciphertext = {
+ .data = ciphertext512_des,
+ .len = 512
+ },
+};
static const struct blockcipher_test_data
des_cbc_test_vector = {
@@ -826,7 +850,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "DES-CBC Decryption",
@@ -835,7 +859,7 @@ static const struct blockcipher_test_case des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_MB |
BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
};
@@ -1044,7 +1068,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1053,19 +1078,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC SHA1 Encryption Digest",
.test_data = &triple_des128cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
.test_data = &triple_des128cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
@@ -1075,7 +1103,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
@@ -1085,21 +1114,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC SHA1 Encryption Digest",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
.test_data = &triple_des192cbc_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
@@ -1180,7 +1212,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
{
.test_descr =
@@ -1189,19 +1222,38 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
.test_data = &triple_des128cbc_hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
.feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP
},
};
static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
{
+ .test_descr = "3DES-64-CBC Encryption",
+ .test_data = &triple_des64cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
+ .test_descr = "3DES-64-CBC Decryption",
+ .test_data = &triple_des64cbc_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_DECRYPT,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ },
+ {
.test_descr = "3DES-128-CBC Encryption",
.test_data = &triple_des128cbc_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_ENCRYPT,
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-128-CBC Decryption",
@@ -1210,7 +1262,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
.pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-192-CBC Encryption",
@@ -1220,7 +1274,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-192-CBC Decryption",
@@ -1230,7 +1286,9 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_QAT |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MB
},
{
.test_descr = "3DES-128-CTR Encryption",
diff --git a/test/test/test_cryptodev_dh_test_vectors.h b/test/test/test_cryptodev_dh_test_vectors.h
new file mode 100644
index 00000000..fe7510dc
--- /dev/null
+++ b/test/test/test_cryptodev_dh_test_vectors.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_DH_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_DH_TEST_VECTORS_H_
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+#define TEST_DH_MOD_LEN 1024
+
+
+struct dh_test_param {
+ rte_crypto_param priv_key;
+};
+
+uint8_t dh_priv[] = {
+ 0x46, 0x3c, 0x7b, 0x43, 0xd1, 0xb8, 0xd4, 0x7a,
+ 0x56, 0x28, 0x85, 0x79, 0xcc, 0xd8, 0x90, 0x03,
+ 0x0c, 0x4b, 0xc6, 0xd3, 0x7f, 0xb3, 0x19, 0x84,
+ 0x8a, 0xc6, 0x0d, 0x24, 0x5e, 0xaa, 0x7e, 0x7a,
+ 0x73, 0x88, 0xa6, 0x47, 0x7c, 0x42, 0x78, 0x63,
+ 0x11, 0x12, 0xd3, 0xa0, 0xc5, 0xfe, 0xfd, 0xf2,
+ 0x9e, 0x17, 0x90, 0xe5, 0x6d, 0xcc, 0x20, 0x6f,
+ 0xe8, 0x82, 0x28, 0xbf, 0x5c, 0xe6, 0xd4, 0x86,
+ 0x5c, 0x35, 0x32, 0x97, 0xc2, 0x86, 0x1b, 0xc5,
+ 0x59, 0x1c, 0x0b, 0x1b, 0xec, 0x60, 0x3c, 0x1d,
+ 0x8d, 0x7f, 0xf0, 0xc7, 0x48, 0x3a, 0x51, 0x09,
+ 0xf2, 0x3e, 0x9e, 0x35, 0x74, 0x98, 0x4d, 0xad,
+ 0x39, 0xa7, 0xf2, 0xd2, 0xb4, 0x32, 0xd3, 0xc8,
+ 0xe9, 0x45, 0xbe, 0x56, 0xe7, 0x87, 0xe0, 0xa0,
+ 0x97, 0x6b, 0x5f, 0x99, 0x5e, 0x41, 0x59, 0x33,
+ 0x95, 0x64, 0x0d, 0xe9, 0x58, 0x5b, 0xa6, 0x38
+};
+
+uint8_t dh_p[] = {
+ 0xef, 0xee, 0x8c, 0x8b, 0x3f, 0x85, 0x95, 0xcd,
+ 0x4d, 0x68, 0x5d, 0x4a, 0x5d, 0x1f, 0x2a, 0x2e,
+ 0xdd, 0xcf, 0xef, 0x1b, 0x3b, 0xe9, 0x7b, 0x0c,
+ 0x13, 0xee, 0x76, 0xd5, 0x93, 0xca, 0x8b, 0xc8,
+ 0x0b, 0x97, 0x00, 0xec, 0x1f, 0x34, 0xa2, 0xce,
+ 0x83, 0x8d, 0x80, 0xea, 0xfe, 0x11, 0xed, 0x28,
+ 0xdd, 0x32, 0x22, 0x77, 0x96, 0x4e, 0xc5, 0xed,
+ 0xc8, 0x7a, 0x52, 0x10, 0x22, 0xcc, 0xb2, 0x4d,
+ 0xd3, 0xda, 0x03, 0xf5, 0x1e, 0xa8, 0x79, 0x23,
+ 0x8b, 0xe1, 0x78, 0x47, 0x07, 0x5b, 0x26, 0xbb,
+ 0x53, 0x46, 0x0b, 0x18, 0x5c, 0x07, 0x4e, 0xb6,
+ 0x76, 0xc9, 0xa8, 0xd5, 0x30, 0xa3, 0xbe, 0x8d,
+ 0xae, 0xcd, 0x34, 0x68, 0x62, 0x5f, 0xb9, 0x5c,
+ 0x34, 0x90, 0xf0, 0xda, 0x47, 0x86, 0x36, 0x04,
+ 0x28, 0xbc, 0x7d, 0xae, 0x9d, 0x4e, 0x61, 0x28,
+ 0x70, 0xdb, 0xa6, 0x55, 0x04, 0x46, 0x27, 0xe3
+};
+
+uint8_t dh_g[] = {0x02};
+
+struct dh_test_param dh_test_params = {
+ .priv_key = {
+ .data = dh_priv,
+ .length = sizeof(dh_priv)
+ }
+};
+
+struct rte_crypto_asym_xform dh_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DH,
+ .dh = {
+ .p = {
+ .data = dh_p,
+ .length = sizeof(dh_p)
+ },
+ .g = {
+ .data = dh_g,
+ .length = sizeof(dh_g)
+ },
+ }
+};
+
+#endif /* TEST_CRYPTODEV_DH_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_dsa_test_vectors.h b/test/test/test_cryptodev_dsa_test_vectors.h
new file mode 100644
index 00000000..bbcb0d72
--- /dev/null
+++ b/test/test/test_cryptodev_dsa_test_vectors.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_DSA_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_DSA_TEST_VECTORS_H_
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+
+
+struct dsa_test_param {
+ rte_crypto_param y;
+};
+
+static unsigned char dsa_x[] = {
+ 0xc5, 0x3e, 0xae, 0x6d, 0x45, 0x32, 0x31, 0x64,
+ 0xc7, 0xd0, 0x7a, 0xf5, 0x71, 0x57, 0x03, 0x74,
+ 0x4a, 0x63, 0xfc, 0x3a
+};
+
+uint8_t dsa_y[] = {
+ 0x31, 0x3f, 0xd9, 0xeb, 0xca, 0x91, 0x57, 0x4e,
+ 0x1c, 0x2e, 0xeb, 0xe1, 0x51, 0x7c, 0x57, 0xe0,
+ 0xc2, 0x1b, 0x02, 0x09, 0x87, 0x21, 0x40, 0xc5,
+ 0x32, 0x87, 0x61, 0xbb, 0xb2, 0x45, 0x0b, 0x33,
+ 0xf1, 0xb1, 0x8b, 0x40, 0x9c, 0xe9, 0xab, 0x7c,
+ 0x4c, 0xd8, 0xfd, 0xa3, 0x39, 0x1e, 0x8e, 0x34,
+ 0x86, 0x83, 0x57, 0xc1, 0x99, 0xe1, 0x6a, 0x6b,
+ 0x2e, 0xba, 0x06, 0xd6, 0x74, 0x9d, 0xef, 0x79,
+ 0x1d, 0x79, 0xe9, 0x5d, 0x3a, 0x4d, 0x09, 0xb2,
+ 0x4c, 0x39, 0x2a, 0xd8, 0x9d, 0xbf, 0x10, 0x09,
+ 0x95, 0xae, 0x19, 0xc0, 0x10, 0x62, 0x05, 0x6b,
+ 0xb1, 0x4b, 0xce, 0x00, 0x5e, 0x87, 0x31, 0xef,
+ 0xde, 0x17, 0x5f, 0x95, 0xb9, 0x75, 0x08, 0x9b,
+ 0xdc, 0xda, 0xea, 0x56, 0x2b, 0x32, 0x78, 0x6d,
+ 0x96, 0xf5, 0xa3, 0x1a, 0xed, 0xf7, 0x53, 0x64,
+ 0x00, 0x8a, 0xd4, 0xff, 0xfe, 0xbb, 0x97, 0x0b
+};
+
+static unsigned char dsa_p[] = {
+ 0xa8, 0xf9, 0xcd, 0x20, 0x1e, 0x5e, 0x35, 0xd8,
+ 0x92, 0xf8, 0x5f, 0x80, 0xe4, 0xdb, 0x25, 0x99,
+ 0xa5, 0x67, 0x6a, 0x3b, 0x1d, 0x4f, 0x19, 0x03,
+ 0x30, 0xed, 0x32, 0x56, 0xb2, 0x6d, 0x0e, 0x80,
+ 0xa0, 0xe4, 0x9a, 0x8f, 0xff, 0xaa, 0xad, 0x2a,
+ 0x24, 0xf4, 0x72, 0xd2, 0x57, 0x32, 0x41, 0xd4,
+ 0xd6, 0xd6, 0xc7, 0x48, 0x0c, 0x80, 0xb4, 0xc6,
+ 0x7b, 0xb4, 0x47, 0x9c, 0x15, 0xad, 0xa7, 0xea,
+ 0x84, 0x24, 0xd2, 0x50, 0x2f, 0xa0, 0x14, 0x72,
+ 0xe7, 0x60, 0x24, 0x17, 0x13, 0xda, 0xb0, 0x25,
+ 0xae, 0x1b, 0x02, 0xe1, 0x70, 0x3a, 0x14, 0x35,
+ 0xf6, 0x2d, 0xdf, 0x4e, 0xe4, 0xc1, 0xb6, 0x64,
+ 0x06, 0x6e, 0xb2, 0x2f, 0x2e, 0x3b, 0xf2, 0x8b,
+ 0xb7, 0x0a, 0x2a, 0x76, 0xe4, 0xfd, 0x5e, 0xbe,
+ 0x2d, 0x12, 0x29, 0x68, 0x1b, 0x5b, 0x06, 0x43,
+ 0x9a, 0xc9, 0xc7, 0xe9, 0xd8, 0xbd, 0xe2, 0x83
+};
+
+static unsigned char dsa_q[] = {
+ 0xf8, 0x5f, 0x0f, 0x83, 0xac, 0x4d, 0xf7, 0xea,
+ 0x0c, 0xdf, 0x8f, 0x46, 0x9b, 0xfe, 0xea, 0xea,
+ 0x14, 0x15, 0x64, 0x95
+};
+
+static unsigned char dsa_g[] = {
+ 0x2b, 0x31, 0x52, 0xff, 0x6c, 0x62, 0xf1, 0x46,
+ 0x22, 0xb8, 0xf4, 0x8e, 0x59, 0xf8, 0xaf, 0x46,
+ 0x88, 0x3b, 0x38, 0xe7, 0x9b, 0x8c, 0x74, 0xde,
+ 0xea, 0xe9, 0xdf, 0x13, 0x1f, 0x8b, 0x85, 0x6e,
+ 0x3a, 0xd6, 0xc8, 0x45, 0x5d, 0xab, 0x87, 0xcc,
+ 0x0d, 0xa8, 0xac, 0x97, 0x34, 0x17, 0xce, 0x4f,
+ 0x78, 0x78, 0x55, 0x7d, 0x6c, 0xdf, 0x40, 0xb3,
+ 0x5b, 0x4a, 0x0c, 0xa3, 0xeb, 0x31, 0x0c, 0x6a,
+ 0x95, 0xd6, 0x8c, 0xe2, 0x84, 0xad, 0x4e, 0x25,
+ 0xea, 0x28, 0x59, 0x16, 0x11, 0xee, 0x08, 0xb8,
+ 0x44, 0x4b, 0xd6, 0x4b, 0x25, 0xf3, 0xf7, 0xc5,
+ 0x72, 0x41, 0x0d, 0xdf, 0xb3, 0x9c, 0xc7, 0x28,
+ 0xb9, 0xc9, 0x36, 0xf8, 0x5f, 0x41, 0x91, 0x29,
+ 0x86, 0x99, 0x29, 0xcd, 0xb9, 0x09, 0xa6, 0xa3,
+ 0xa9, 0x9b, 0xbe, 0x08, 0x92, 0x16, 0x36, 0x81,
+ 0x71, 0xbd, 0x0b, 0xa8, 0x1d, 0xe4, 0xfe, 0x33
+};
+
+struct dsa_test_param dsa_test_params = {
+ .y = {
+ .data = dsa_y,
+ .length = sizeof(dsa_y)
+ }
+};
+
+struct rte_crypto_asym_xform dsa_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_DSA,
+ .dsa = {
+ .p = {
+ .data = dsa_p,
+ .length = sizeof(dsa_p)
+ },
+ .q = {
+ .data = dsa_q,
+ .length = sizeof(dsa_q)
+ },
+ .g = {
+ .data = dsa_g,
+ .length = sizeof(dsa_g)
+ },
+ .x = {
+ .data = dsa_x,
+ .length = sizeof(dsa_x)
+ }
+
+ }
+};
+
+#endif /* TEST_CRYPTODEV_DSA_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
index 93dacb7b..cf86dbb1 100644
--- a/test/test/test_cryptodev_hash_test_vectors.h
+++ b/test/test/test_cryptodev_hash_test_vectors.h
@@ -319,18 +319,68 @@ hmac_sha512_test_vector = {
}
};
+static const struct blockcipher_test_data
+cmac_test_vector = {
+ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .ciphertext = {
+ .data = plaintext_hash,
+ .len = 512
+ },
+ .auth_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96,
+ 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27
+ },
+ .len = 16,
+ .truncated_len = 16
+ }
+};
+
+static const struct blockcipher_test_data
+cmac_test_vector_12 = {
+ .auth_algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .ciphertext = {
+ .data = plaintext_hash,
+ .len = 512
+ },
+ .auth_key = {
+ .data = {
+ 0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6,
+ 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C
+ },
+ .len = 16
+ },
+ .digest = {
+ .data = {
+ 0x4C, 0x77, 0x87, 0xA0, 0x78, 0x8E, 0xEA, 0x96,
+ 0xC1, 0xEB, 0x1E, 0x4E, 0x95, 0x8F, 0xED, 0x27
+ },
+ .len = 12,
+ .truncated_len = 12
+ }
+};
+
static const struct blockcipher_test_case hash_test_cases[] = {
{
.test_descr = "MD5 Digest",
.test_data = &md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "MD5 Digest Verify",
.test_data = &md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-MD5 Digest",
@@ -341,7 +391,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-MD5 Digest Verify",
@@ -352,19 +403,24 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA1 Digest",
.test_data = &sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA1 Digest Verify",
.test_data = &sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest",
@@ -375,7 +431,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest Scatter Gather",
@@ -394,7 +452,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
@@ -408,13 +468,17 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "SHA224 Digest",
.test_data = &sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA224 Digest Verify",
.test_data = &sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA224 Digest",
@@ -425,6 +489,7 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
@@ -436,19 +501,24 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA256 Digest",
.test_data = &sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA256 Digest Verify",
.test_data = &sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA256 Digest",
@@ -459,7 +529,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA256 Digest Verify",
@@ -470,19 +542,25 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA384 Digest",
.test_data = &sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA384 Digest Verify",
.test_data = &sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA384 Digest",
@@ -493,7 +571,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA384 Digest Verify",
@@ -504,19 +584,25 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA512 Digest",
.test_data = &sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "SHA512 Digest Verify",
.test_data = &sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA512 Digest",
@@ -527,7 +613,9 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
{
.test_descr = "HMAC-SHA512 Digest Verify",
@@ -538,8 +626,34 @@ static const struct blockcipher_test_case hash_test_cases[] = {
BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
- BLOCKCIPHER_TEST_TARGET_PMD_QAT
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_CCP |
+ BLOCKCIPHER_TEST_TARGET_PMD_MVSAM
},
+ {
+ .test_descr = "CMAC Digest 12B",
+ .test_data = &cmac_test_vector_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest Verify 12B",
+ .test_data = &cmac_test_vector_12,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest 16B",
+ .test_data = &cmac_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ },
+ {
+ .test_descr = "CMAC Digest Verify 16B",
+ .test_data = &cmac_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB
+ }
};
#endif /* TEST_CRYPTODEV_HASH_TEST_VECTORS_H_ */
diff --git a/test/test/test_cryptodev_mod_test_vectors.h b/test/test/test_cryptodev_mod_test_vectors.h
new file mode 100644
index 00000000..a25c676a
--- /dev/null
+++ b/test/test/test_cryptodev_mod_test_vectors.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_MOD_TEST_VECTORS_H_
+#define TEST_CRYPTODEV_MOD_TEST_VECTORS_H_
+
+/* modular operation test data */
+uint8_t base[] = {
+ 0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
+ 0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
+ 0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
+};
+
+uint8_t mod_p[] = {
+ 0x00, 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00,
+ 0x0a, 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5,
+ 0xce, 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a,
+ 0xa2, 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde,
+ 0x0a, 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a,
+ 0x3d, 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63,
+ 0x6a, 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27,
+ 0x6e, 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa,
+ 0x72, 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53,
+ 0x87, 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a,
+ 0x62, 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63,
+ 0x18, 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33,
+ 0x4e, 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3,
+ 0x03, 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e,
+ 0xee, 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb,
+ 0xa6, 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde,
+ 0x55
+};
+
+uint8_t mod_e[] = {0x01, 0x00, 0x01};
+
+/* Precomputed modular exponentiation for verification */
+uint8_t mod_exp[] = {
+ 0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
+ 0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
+ 0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
+ 0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
+ 0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
+ 0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
+ 0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
+ 0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
+ 0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
+ 0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
+ 0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
+ 0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
+ 0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
+ 0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
+ 0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
+ 0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
+};
+
+/* Precomputed modular inverse for verification */
+uint8_t mod_inv[] = {
+ 0x52, 0xb1, 0xa3, 0x8c, 0xc5, 0x8a, 0xb9, 0x1f,
+ 0xb6, 0x82, 0xf5, 0x6a, 0x9a, 0xde, 0x8d, 0x2e,
+ 0x62, 0x4b, 0xac, 0x49, 0x21, 0x1d, 0x30, 0x4d,
+ 0x32, 0xac, 0x1f, 0x40, 0x6d, 0x52, 0xc7, 0x9b,
+ 0x6c, 0x0a, 0x82, 0x3a, 0x2c, 0xaf, 0x6b, 0x6d,
+ 0x17, 0xbe, 0x43, 0xed, 0x97, 0x78, 0xeb, 0x4c,
+ 0x92, 0x6f, 0xcf, 0xed, 0xb1, 0x09, 0xcb, 0x27,
+ 0xc2, 0xde, 0x62, 0xfd, 0x21, 0xe6, 0xbd, 0x4f,
+ 0xfe, 0x7a, 0x1b, 0x50, 0xfe, 0x10, 0x4a, 0xb0,
+ 0xb7, 0xcf, 0xdb, 0x7d, 0xca, 0xc2, 0xf0, 0x1c,
+ 0x39, 0x48, 0x6a, 0xb5, 0x4d, 0x8c, 0xfe, 0x63,
+ 0x91, 0x9c, 0x21, 0xc3, 0x0e, 0x76, 0xad, 0x44,
+ 0x8d, 0x54, 0x33, 0x99, 0xe1, 0x80, 0x19, 0xba,
+ 0xb5, 0xac, 0x7d, 0x9c, 0xce, 0x91, 0x2a, 0xd9,
+ 0x2c, 0xe1, 0x16, 0xd6, 0xd7, 0xcf, 0x9d, 0x05,
+ 0x9a, 0x66, 0x9a, 0x3a, 0xc1, 0xb8, 0x4b, 0xc3
+};
+
+struct rte_crypto_asym_xform modex_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .modex = {
+ .modulus = {
+ .data = mod_p,
+ .length = sizeof(mod_p)
+ },
+ .exponent = {
+ .data = mod_e,
+ .length = sizeof(mod_e)
+ }
+ }
+};
+
+struct rte_crypto_asym_xform modinv_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV,
+ .modinv = {
+ .modulus = {
+ .data = mod_p,
+ .length = sizeof(mod_p)
+ }
+ }
+};
+
+#endif /* TEST_CRYPTODEV_MOD_TEST_VECTORS_H__ */
diff --git a/test/test/test_cryptodev_rsa_test_vectors.h b/test/test/test_cryptodev_rsa_test_vectors.h
new file mode 100644
index 00000000..3f8c41a6
--- /dev/null
+++ b/test/test/test_cryptodev_rsa_test_vectors.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef TEST_CRYPTODEV_RSA_TEST_VECTORS_H__
+#define TEST_CRYPTODEV_RSA_TEST_VECTORS_H__
+
+#include "rte_crypto_asym.h"
+
+#define TEST_DATA_SIZE 4096
+
+struct rsa_test_data {
+ uint8_t data[TEST_DATA_SIZE];
+ unsigned int len;
+};
+
+struct rsa_test_data rsaplaintext = {
+ .data = {
+ 0xf8, 0xba, 0x1a, 0x55, 0xd0, 0x2f, 0x85, 0xae,
+ 0x96, 0x7b, 0xb6, 0x2f, 0xb6, 0xcd, 0xa8, 0xeb,
+ 0x7e, 0x78, 0xa0, 0x50
+ },
+ .len = 20
+};
+
+uint8_t rsa_n[] = {
+ 0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00,
+ 0x0a, 0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5,
+ 0xce, 0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a,
+ 0xa2, 0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde,
+ 0x0a, 0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a,
+ 0x3d, 0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63,
+ 0x6a, 0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27,
+ 0x6e, 0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa,
+ 0x72, 0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53,
+ 0x87, 0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a,
+ 0x62, 0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63,
+ 0x18, 0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33,
+ 0x4e, 0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3,
+ 0x03, 0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e,
+ 0xee, 0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb,
+ 0xa6, 0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde,
+ 0x55
+};
+
+uint8_t rsa_d[] = {
+ 0x24, 0xd7, 0xea, 0xf4, 0x7f, 0xe0, 0xca, 0x31,
+ 0x4d, 0xee, 0xc4, 0xa1, 0xbe, 0xab, 0x06, 0x61,
+ 0x32, 0xe7, 0x51, 0x46, 0x27, 0xdf, 0x72, 0xe9,
+ 0x6f, 0xa8, 0x4c, 0xd1, 0x26, 0xef, 0x65, 0xeb,
+ 0x67, 0xff, 0x5f, 0xa7, 0x3b, 0x25, 0xb9, 0x08,
+ 0x8e, 0xa0, 0x47, 0x56, 0xe6, 0x8e, 0xf9, 0xd3,
+ 0x18, 0x06, 0x3d, 0xc6, 0xb1, 0xf8, 0xdc, 0x1b,
+ 0x8d, 0xe5, 0x30, 0x54, 0x26, 0xac, 0x16, 0x3b,
+ 0x7b, 0xad, 0x46, 0x9e, 0x21, 0x6a, 0x57, 0xe6,
+ 0x81, 0x56, 0x1d, 0x2a, 0xc4, 0x39, 0x63, 0x67,
+ 0x81, 0x2c, 0xca, 0xcc, 0xf8, 0x42, 0x04, 0xbe,
+ 0xcf, 0x8f, 0x6c, 0x5b, 0x81, 0x46, 0xb9, 0xc7,
+ 0x62, 0x90, 0x87, 0x35, 0x03, 0x9b, 0x89, 0xcb,
+ 0x37, 0xbd, 0xf1, 0x1b, 0x99, 0xa1, 0x9a, 0x78,
+ 0xd5, 0x4c, 0xdd, 0x3f, 0x41, 0x0c, 0xb7, 0x1a,
+ 0xd9, 0x7b, 0x87, 0x5f, 0xbe, 0xb1, 0x83, 0x41
+};
+
+uint8_t rsa_e[] = {0x01, 0x00, 0x01};
+
+/** rsa xform using exponent key */
+struct rte_crypto_asym_xform rsa_xform = {
+ .next = NULL,
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .rsa = {
+ .n = {
+ .data = rsa_n,
+ .length = sizeof(rsa_n)
+ },
+ .e = {
+ .data = rsa_e,
+ .length = sizeof(rsa_e)
+ },
+ .key_type = RTE_RSA_KEY_TYPE_EXP,
+ .d = {
+ .data = rsa_d,
+ .length = sizeof(rsa_d)
+ }
+ }
+};
+
+#endif /* TEST_CRYPTODEV_RSA_TEST_VECTORS_H__ */
diff --git a/test/test/test_devargs.c b/test/test/test_devargs.c
deleted file mode 100644
index b8f3146f..00000000
--- a/test/test/test_devargs.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2014 6WIND S.A.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/queue.h>
-
-#include <rte_debug.h>
-#include <rte_devargs.h>
-
-#include "test.h"
-
-/* clear devargs list that was modified by the test */
-static void free_devargs_list(void)
-{
- struct rte_devargs *devargs;
-
- while (!TAILQ_EMPTY(&devargs_list)) {
- devargs = TAILQ_FIRST(&devargs_list);
- TAILQ_REMOVE(&devargs_list, devargs, next);
- free(devargs->args);
- free(devargs);
- }
-}
-
-static int
-test_devargs(void)
-{
- struct rte_devargs_list save_devargs_list;
- struct rte_devargs *devargs;
-
- /* save the real devargs_list, it is restored at the end of the test */
- save_devargs_list = devargs_list;
- TAILQ_INIT(&devargs_list);
-
- /* test valid cases */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:00.1") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "0000:5:00.0") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "04:00.0,arg=val") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_BLACKLISTED_PCI, "0000:01:00.1") < 0)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_WHITELISTED_PCI) != 2)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_BLACKLISTED_PCI) != 2)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring0") < 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,key=val,k2=val2") < 0)
- goto fail;
- if (rte_eal_devargs_type_count(RTE_DEVTYPE_VIRTUAL) != 2)
- goto fail;
- free_devargs_list();
-
- /* check virtual device with argument parsing */
- if (rte_eal_devargs_add(RTE_DEVTYPE_VIRTUAL, "net_ring1,k1=val,k2=val2") < 0)
- goto fail;
- devargs = TAILQ_FIRST(&devargs_list);
- if (strncmp(devargs->name, "net_ring1",
- sizeof(devargs->name)) != 0)
- goto fail;
- if (!devargs->args || strcmp(devargs->args, "k1=val,k2=val2") != 0)
- goto fail;
- free_devargs_list();
-
- /* check PCI device with empty argument parsing */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "04:00.1") < 0)
- goto fail;
- devargs = TAILQ_FIRST(&devargs_list);
- if (strcmp(devargs->name, "04:00.1") != 0)
- goto fail;
- if (!devargs->args || strcmp(devargs->args, "") != 0)
- goto fail;
- free_devargs_list();
-
- /* test error case: bad PCI address */
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "08:1") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "00.1") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "foo") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, ",") == 0)
- goto fail;
- if (rte_eal_devargs_add(RTE_DEVTYPE_WHITELISTED_PCI, "000f:0:0") == 0)
- goto fail;
-
- devargs_list = save_devargs_list;
- return 0;
-
- fail:
- free_devargs_list();
- devargs_list = save_devargs_list;
- return -1;
-}
-
-REGISTER_TEST_COMMAND(devargs_autotest, test_devargs);
diff --git a/test/test/test_distributor_perf.c b/test/test/test_distributor_perf.c
index 557715e1..edf1998a 100644
--- a/test/test/test_distributor_perf.c
+++ b/test/test/test_distributor_perf.c
@@ -31,7 +31,7 @@ struct worker_stats worker_stats[RTE_MAX_LCORE];
* worker thread used for testing the time to do a round-trip of a cache
* line between two cores and back again
*/
-static void
+static int
flip_bit(volatile uint64_t *arg)
{
uint64_t old_val = 0;
@@ -41,6 +41,7 @@ flip_bit(volatile uint64_t *arg)
old_val = *arg;
*arg = 0;
}
+ return 0;
}
/*
diff --git a/test/test/test_eal_flags.c b/test/test/test_eal_flags.c
index 37c42efe..3a990766 100644
--- a/test/test/test_eal_flags.c
+++ b/test/test/test_eal_flags.c
@@ -33,7 +33,7 @@
#define memtest "memtest"
#define memtest1 "memtest1"
#define memtest2 "memtest2"
-#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10)
+#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 20)
#define launch_proc(ARGV) process_dup(ARGV, \
sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
@@ -221,30 +221,6 @@ get_number_of_sockets(void)
}
#endif
-static char*
-get_current_prefix(char * prefix, int size)
-{
- char path[PATH_MAX] = {0};
- char buf[PATH_MAX] = {0};
-
- /* get file for config (fd is always 3) */
- snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
-
- /* return NULL on error */
- if (readlink(path, buf, sizeof(buf)) == -1)
- return NULL;
-
- /* get the basename */
- snprintf(buf, sizeof(buf), "%s", basename(buf));
-
- /* copy string all the way from second char up to start of _config */
- snprintf(prefix, size, "%.*s",
- (int)(strnlen(buf, sizeof(buf)) - sizeof("_config")),
- &buf[1]);
-
- return prefix;
-}
-
/*
* Test that the app doesn't run with invalid whitelist option.
* Final tests ensures it does run with valid options as sanity check (one
@@ -376,17 +352,17 @@ test_invalid_vdev_flag(void)
#endif
/* Test with invalid vdev option */
- const char *vdevinval[] = {prgname, prefix, "-n", "1",
+ const char *vdevinval[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "eth_dummy"};
/* Test with valid vdev option */
- const char *vdevval1[] = {prgname, prefix, "-n", "1",
+ const char *vdevval1[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0"};
- const char *vdevval2[] = {prgname, prefix, "-n", "1",
+ const char *vdevval2[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0,args=test"};
- const char *vdevval3[] = {prgname, prefix, "-n", "1",
+ const char *vdevval3[] = {prgname, prefix, no_huge, "-n", "1",
"-c", "1", vdev, "net_ring0,nodeaction=r1:0:CREATE"};
if (launch_proc(vdevinval) == 0) {
@@ -697,16 +673,18 @@ test_invalid_n_flag(void)
static int
test_no_hpet_flag(void)
{
- char prefix[PATH_MAX], tmp[PATH_MAX];
+ char prefix[PATH_MAX] = "";
#ifdef RTE_EXEC_ENV_BSDAPP
return 0;
-#endif
+#else
+ char tmp[PATH_MAX];
if (get_current_prefix(tmp, sizeof(tmp)) == NULL) {
printf("Error - unable to get current prefix!\n");
return -1;
}
snprintf(prefix, sizeof(prefix), "--file-prefix=%s", tmp);
+#endif
/* With --no-hpet */
const char *argv1[] = {prgname, prefix, mp_flag, no_hpet, "-c", "1", "-n", "2"};
@@ -849,13 +827,10 @@ test_misc_flags(void)
const char *argv4[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog"};
/* With invalid --syslog */
const char *argv5[] = {prgname, prefix, mp_flag, "-c", "1", "--syslog", "error"};
- /* With no-sh-conf */
+ /* With no-sh-conf, also use no-huge to ensure this test runs on BSD */
const char *argv6[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
- no_shconf, nosh_prefix };
+ no_shconf, nosh_prefix, no_huge};
-#ifdef RTE_EXEC_ENV_BSDAPP
- return 0;
-#endif
/* With --huge-dir */
const char *argv7[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
"--file-prefix=hugedir", "--huge-dir", hugepath};
@@ -889,6 +864,7 @@ test_misc_flags(void)
const char *argv15[] = {prgname, "--file-prefix=intr",
"-c", "1", "-n", "2", "--vfio-intr=invalid"};
+ /* run all tests also applicable to FreeBSD first */
if (launch_proc(argv0) == 0) {
printf("Error - process ran ok with invalid flag\n");
@@ -902,6 +878,16 @@ test_misc_flags(void)
printf("Error - process did not run ok with -v flag\n");
return -1;
}
+ if (launch_proc(argv6) != 0) {
+ printf("Error - process did not run ok with --no-shconf flag\n");
+ return -1;
+ }
+
+#ifdef RTE_EXEC_ENV_BSDAPP
+ /* no more tests to be done on FreeBSD */
+ return 0;
+#endif
+
if (launch_proc(argv3) != 0) {
printf("Error - process did not run ok with --syslog flag\n");
return -1;
@@ -914,13 +900,6 @@ test_misc_flags(void)
printf("Error - process run ok with invalid --syslog flag\n");
return -1;
}
- if (launch_proc(argv6) != 0) {
- printf("Error - process did not run ok with --no-shconf flag\n");
- return -1;
- }
-#ifdef RTE_EXEC_ENV_BSDAPP
- return 0;
-#endif
if (launch_proc(argv7) != 0) {
printf("Error - process did not run ok with --huge-dir flag\n");
return -1;
@@ -977,9 +956,15 @@ test_file_prefix(void)
* 6. run a primary process with memtest2 prefix
* 7. check that only memtest2 hugefiles are present in the hugedir
*/
+ char prefix[PATH_MAX] = "";
#ifdef RTE_EXEC_ENV_BSDAPP
return 0;
+#else
+ if (get_current_prefix(prefix, sizeof(prefix)) == NULL) {
+ printf("Error - unable to get current prefix!\n");
+ return -1;
+ }
#endif
/* this should fail unless the test itself is run with "memtest" prefix */
@@ -994,12 +979,6 @@ test_file_prefix(void)
const char *argv2[] = {prgname, "-c", "1", "-n", "2", "-m", DEFAULT_MEM_SIZE,
"--file-prefix=" memtest2 };
- char prefix[32];
- if (get_current_prefix(prefix, sizeof(prefix)) == NULL) {
- printf("Error - unable to get current prefix!\n");
- return -1;
- }
-
/* check if files for current prefix are present */
if (process_hugefiles(prefix, HUGEPAGE_CHECK_EXISTS) != 1) {
printf("Error - hugepage files for %s were not created!\n", prefix);
@@ -1138,10 +1117,11 @@ test_memory_flags(void)
#ifdef RTE_EXEC_ENV_BSDAPP
int i, num_sockets = 1;
#else
- int i, num_sockets = get_number_of_sockets();
+ int i, num_sockets = RTE_MIN(get_number_of_sockets(),
+ RTE_MAX_NUMA_NODES);
#endif
- if (num_sockets <= 0 || num_sockets > RTE_MAX_NUMA_NODES) {
+ if (num_sockets <= 0) {
printf("Error - cannot get number of sockets!\n");
return -1;
}
@@ -1151,11 +1131,12 @@ test_memory_flags(void)
/* add one extra socket */
for (i = 0; i < num_sockets + 1; i++) {
snprintf(buf, sizeof(buf), "%s%s", invalid_socket_mem, DEFAULT_MEM_SIZE);
- snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf);
+ strlcpy(invalid_socket_mem, buf, sizeof(invalid_socket_mem));
if (num_sockets + 1 - i > 1) {
snprintf(buf, sizeof(buf), "%s,", invalid_socket_mem);
- snprintf(invalid_socket_mem, sizeof(invalid_socket_mem), "%s", buf);
+ strlcpy(invalid_socket_mem, buf,
+ sizeof(invalid_socket_mem));
}
}
@@ -1167,11 +1148,12 @@ test_memory_flags(void)
/* add one extra socket */
for (i = 0; i < num_sockets; i++) {
snprintf(buf, sizeof(buf), "%s%s", valid_socket_mem, DEFAULT_MEM_SIZE);
- snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf);
+ strlcpy(valid_socket_mem, buf, sizeof(valid_socket_mem));
if (num_sockets - i > 1) {
snprintf(buf, sizeof(buf), "%s,", valid_socket_mem);
- snprintf(valid_socket_mem, sizeof(valid_socket_mem), "%s", buf);
+ strlcpy(valid_socket_mem, buf,
+ sizeof(valid_socket_mem));
}
}
diff --git a/test/test/test_event_crypto_adapter.c b/test/test/test_event_crypto_adapter.c
new file mode 100644
index 00000000..de258c34
--- /dev/null
+++ b/test/test/test_event_crypto_adapter.c
@@ -0,0 +1,928 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_eventdev.h>
+#include <rte_bus_vdev.h>
+#include <rte_service.h>
+#include <rte_event_crypto_adapter.h>
+#include "test.h"
+
+#define PKT_TRACE 0
+#define NUM 1
+#define DEFAULT_NUM_XFORMS (2)
+#define NUM_MBUFS (8191)
+#define MBUF_CACHE_SIZE (256)
+#define MAXIMUM_IV_LENGTH (16)
+#define DEFAULT_NUM_OPS_INFLIGHT (128)
+#define MAX_NB_SESSIONS 4
+#define TEST_APP_PORT_ID 0
+#define TEST_APP_EV_QUEUE_ID 0
+#define TEST_APP_EV_PRIORITY 0
+#define TEST_APP_EV_FLOWID 0xAABB
+#define TEST_CRYPTO_EV_QUEUE_ID 1
+#define TEST_ADAPTER_ID 0
+#define TEST_CDEV_ID 0
+#define TEST_CDEV_QP_ID 0
+#define PACKET_LENGTH 64
+#define NB_TEST_PORTS 1
+#define NB_TEST_QUEUES 2
+#define NUM_CORES 1
+#define CRYPTODEV_NAME_NULL_PMD crypto_null
+
+#define MBUF_SIZE (sizeof(struct rte_mbuf) + \
+ RTE_PKTMBUF_HEADROOM + PACKET_LENGTH)
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op) + \
+ DEFAULT_NUM_XFORMS * \
+ sizeof(struct rte_crypto_sym_xform))
+
+/* Handle log statements in same manner as test macros */
+#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
+
+static const uint8_t text_64B[] = {
+ 0x05, 0x15, 0x77, 0x32, 0xc9, 0x66, 0x91, 0x50,
+ 0x93, 0x9f, 0xbb, 0x4e, 0x2e, 0x5a, 0x02, 0xd0,
+ 0x2d, 0x9d, 0x31, 0x5d, 0xc8, 0x9e, 0x86, 0x36,
+ 0x54, 0x5c, 0x50, 0xe8, 0x75, 0x54, 0x74, 0x5e,
+ 0xd5, 0xa2, 0x84, 0x21, 0x2d, 0xc5, 0xf8, 0x1c,
+ 0x55, 0x1a, 0xba, 0x91, 0xce, 0xb5, 0xa3, 0x1e,
+ 0x31, 0xbf, 0xe9, 0xa1, 0x97, 0x5c, 0x2b, 0xd6,
+ 0x57, 0xa5, 0x9f, 0xab, 0xbd, 0xb0, 0x9b, 0x9c
+};
+
+struct event_crypto_adapter_test_params {
+ struct rte_mempool *mbuf_pool;
+ struct rte_mempool *op_mpool;
+ struct rte_mempool *session_mpool;
+ struct rte_cryptodev_config *config;
+ uint8_t crypto_event_port_id;
+};
+
+struct rte_event response_info = {
+ .queue_id = TEST_APP_EV_QUEUE_ID,
+ .sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .flow_id = TEST_APP_EV_FLOWID,
+ .priority = TEST_APP_EV_PRIORITY
+};
+
+struct rte_event_crypto_request request_info = {
+ .cdev_id = TEST_CDEV_ID,
+ .queue_pair_id = TEST_CDEV_QP_ID
+};
+
+static struct event_crypto_adapter_test_params params;
+static uint8_t crypto_adapter_setup_done;
+static uint32_t slcore_id;
+static int evdev;
+
+static struct rte_mbuf *
+alloc_fill_mbuf(struct rte_mempool *mpool, const uint8_t *data,
+ size_t len, uint8_t blocksize)
+{
+ struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+ size_t t_len = len - (blocksize ? (len % blocksize) : 0);
+
+ if (m) {
+ char *dst = rte_pktmbuf_append(m, t_len);
+
+ if (!dst) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+
+ rte_memcpy(dst, (const void *)data, t_len);
+ }
+ return m;
+}
+
+static int
+send_recv_ev(struct rte_event *ev)
+{
+ struct rte_crypto_op *op;
+ struct rte_event recv_ev;
+ int ret;
+
+ ret = rte_event_enqueue_burst(evdev, TEST_APP_PORT_ID, ev, NUM);
+ TEST_ASSERT_EQUAL(ret, NUM,
+ "Failed to send event to crypto adapter\n");
+
+ while (rte_event_dequeue_burst(evdev,
+ TEST_APP_PORT_ID, &recv_ev, NUM, 0) == 0)
+ rte_pause();
+
+ op = recv_ev.event_ptr;
+#if PKT_TRACE
+ struct rte_mbuf *m = op->sym->m_src;
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_stats(void)
+{
+ struct rte_event_crypto_adapter_stats stats;
+
+ rte_event_crypto_adapter_stats_get(TEST_ADAPTER_ID, &stats);
+ printf(" +------------------------------------------------------+\n");
+ printf(" + Crypto adapter stats for instance %u:\n", TEST_ADAPTER_ID);
+ printf(" + Event port poll count %" PRIx64 "\n",
+ stats.event_poll_count);
+ printf(" + Event dequeue count %" PRIx64 "\n",
+ stats.event_deq_count);
+ printf(" + Cryptodev enqueue count %" PRIx64 "\n",
+ stats.crypto_enq_count);
+ printf(" + Cryptodev enqueue failed count %" PRIx64 "\n",
+ stats.crypto_enq_fail);
+ printf(" + Cryptodev dequeue count %" PRIx64 "\n",
+ stats.crypto_deq_count);
+ printf(" + Event enqueue count %" PRIx64 "\n",
+ stats.event_enq_count);
+ printf(" + Event enqueue retry count %" PRIx64 "\n",
+ stats.event_enq_retry_count);
+ printf(" + Event enqueue fail count %" PRIx64 "\n",
+ stats.event_enq_fail_count);
+ printf(" +------------------------------------------------------+\n");
+
+ rte_event_crypto_adapter_stats_reset(TEST_ADAPTER_ID);
+ return TEST_SUCCESS;
+}
+
+static int
+test_op_forward_mode(uint8_t session_less)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+ union rte_event_crypto_metadata m_data;
+ struct rte_crypto_sym_op *sym_op;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+ uint32_t cap;
+ int ret;
+
+ memset(&m_data, 0, sizeof(m_data));
+
+ m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
+#if PKT_TRACE
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ op = rte_crypto_op_alloc(params.op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op,
+ "Failed to allocate symmetric crypto operation struct\n");
+
+ sym_op = op->sym;
+
+ if (!session_less) {
+ sess = rte_cryptodev_sym_session_create(params.session_mpool);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
+
+ /* Create Crypto session*/
+ rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
+ &cipher_xform, params.session_mpool);
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
+ evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
+ /* Fill in private user data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(response_info));
+ rte_memcpy(&m_data.request_info, &request_info,
+ sizeof(request_info));
+ rte_cryptodev_sym_session_set_user_data(sess,
+ &m_data, sizeof(m_data));
+ }
+
+ rte_crypto_op_attach_sym_session(op, sess);
+ } else {
+ struct rte_crypto_sym_xform *first_xform;
+
+ rte_crypto_op_sym_xforms_alloc(op, NUM);
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
+ first_xform = &cipher_xform;
+ sym_op->xform = first_xform;
+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
+ (sizeof(struct rte_crypto_sym_xform) * 2);
+ op->private_data_offset = len;
+ /* Fill in private data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(response_info));
+ rte_memcpy(&m_data.request_info, &request_info,
+ sizeof(request_info));
+ rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
+ }
+
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = PACKET_LENGTH;
+
+ /* Fill in event info and update event_ptr with rte_crypto_op */
+ memset(&ev, 0, sizeof(ev));
+ ev.queue_id = TEST_CRYPTO_EV_QUEUE_ID;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.flow_id = 0xAABB;
+ ev.event_ptr = op;
+
+ ret = send_recv_ev(&ev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to send/receive event to "
+ "crypto adapter\n");
+
+ test_crypto_adapter_stats();
+
+ return TEST_SUCCESS;
+}
+
+static int
+map_adapter_service_core(void)
+{
+ uint32_t adapter_service_id;
+ int ret;
+
+ if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
+ &adapter_service_id) == 0) {
+ uint32_t core_list[NUM_CORES];
+
+ ret = rte_service_lcore_list(core_list, NUM_CORES);
+ TEST_ASSERT(ret >= 0, "Failed to get service core list!");
+
+ if (core_list[0] != slcore_id) {
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+ }
+
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
+ adapter_service_id, slcore_id, 1),
+ "Failed to map adapter service");
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_sessionless_with_op_forward_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_forward_mode(1);
+ TEST_ASSERT_SUCCESS(ret, "Sessionless - FORWARD mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+test_session_with_op_forward_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID
+ ), "Failed to start event crypto adapter");
+
+ ret = test_op_forward_mode(0);
+ TEST_ASSERT_SUCCESS(ret, "Session based - FORWARD mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+send_op_recv_ev(struct rte_crypto_op *op)
+{
+ struct rte_crypto_op *recv_op;
+ struct rte_event ev;
+ int ret;
+
+ ret = rte_cryptodev_enqueue_burst(TEST_CDEV_ID, TEST_CDEV_QP_ID,
+ &op, NUM);
+ TEST_ASSERT_EQUAL(ret, NUM, "Failed to enqueue to cryptodev\n");
+ memset(&ev, 0, sizeof(ev));
+
+ while (rte_event_dequeue_burst(evdev,
+ TEST_APP_PORT_ID, &ev, NUM, 0) == 0)
+ rte_pause();
+
+ recv_op = ev.event_ptr;
+#if PKT_TRACE
+ struct rte_mbuf *m = recv_op->sym->m_src;
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ rte_pktmbuf_free(recv_op->sym->m_src);
+ rte_crypto_op_free(recv_op);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_op_new_mode(uint8_t session_less)
+{
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_cryptodev_sym_session *sess;
+ union rte_event_crypto_metadata m_data;
+ struct rte_crypto_sym_op *sym_op;
+ struct rte_crypto_op *op;
+ struct rte_mbuf *m;
+ uint32_t cap;
+ int ret;
+
+ memset(&m_data, 0, sizeof(m_data));
+
+ m = alloc_fill_mbuf(params.mbuf_pool, text_64B, PACKET_LENGTH, 0);
+ TEST_ASSERT_NOT_NULL(m, "Failed to allocate mbuf!\n");
+#if PKT_TRACE
+ rte_pktmbuf_dump(stdout, m, rte_pktmbuf_pkt_len(m));
+#endif
+ /* Setup Cipher Parameters */
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+
+ cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ op = rte_crypto_op_alloc(params.op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ TEST_ASSERT_NOT_NULL(op, "Failed to allocate crypto_op!\n");
+
+ sym_op = op->sym;
+
+ if (!session_less) {
+ sess = rte_cryptodev_sym_session_create(params.session_mpool);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed\n");
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID,
+ evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
+ /* Fill in private user data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(m_data));
+ rte_cryptodev_sym_session_set_user_data(sess,
+ &m_data, sizeof(m_data));
+ }
+ rte_cryptodev_sym_session_init(TEST_CDEV_ID, sess,
+ &cipher_xform, params.session_mpool);
+ rte_crypto_op_attach_sym_session(op, sess);
+ } else {
+ struct rte_crypto_sym_xform *first_xform;
+
+ rte_crypto_op_sym_xforms_alloc(op, NUM);
+ op->sess_type = RTE_CRYPTO_OP_SESSIONLESS;
+ first_xform = &cipher_xform;
+ sym_op->xform = first_xform;
+ uint32_t len = IV_OFFSET + MAXIMUM_IV_LENGTH +
+ (sizeof(struct rte_crypto_sym_xform) * 2);
+ op->private_data_offset = len;
+ /* Fill in private data information */
+ rte_memcpy(&m_data.response_info, &response_info,
+ sizeof(m_data));
+ rte_memcpy((uint8_t *)op + len, &m_data, sizeof(m_data));
+ }
+
+ sym_op->m_src = m;
+ sym_op->cipher.data.offset = 0;
+ sym_op->cipher.data.length = PACKET_LENGTH;
+
+ ret = send_op_recv_ev(op);
+ TEST_ASSERT_SUCCESS(ret, "Failed to enqueue op to cryptodev\n");
+
+ test_crypto_adapter_stats();
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_sessionless_with_op_new_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
+ map_adapter_service_core();
+
+ /* start the event crypto adapter */
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_new_mode(1);
+ TEST_ASSERT_SUCCESS(ret, "Sessionless - NEW mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+test_session_with_op_new_mode(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW))
+ map_adapter_service_core();
+
+ TEST_ASSERT_SUCCESS(rte_event_crypto_adapter_start(TEST_ADAPTER_ID),
+ "Failed to start event crypto adapter");
+
+ ret = test_op_new_mode(0);
+ TEST_ASSERT_SUCCESS(ret, "Session based - NEW mode test failed\n");
+ return TEST_SUCCESS;
+}
+
+static int
+configure_cryptodev(void)
+{
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_config conf;
+ struct rte_cryptodev_info info;
+ unsigned int session_size;
+ uint8_t nb_devs;
+ int ret;
+
+ params.mbuf_pool = rte_pktmbuf_pool_create(
+ "CRYPTO_ADAPTER_MBUFPOOL",
+ NUM_MBUFS, MBUF_CACHE_SIZE, 0, MBUF_SIZE,
+ rte_socket_id());
+ if (params.mbuf_pool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_MBUFPOOL\n");
+ return TEST_FAILED;
+ }
+
+ params.op_mpool = rte_crypto_op_pool_create(
+ "EVENT_CRYPTO_SYM_OP_POOL",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ NUM_MBUFS, MBUF_CACHE_SIZE,
+ DEFAULT_NUM_XFORMS *
+ sizeof(struct rte_crypto_sym_xform) +
+ MAXIMUM_IV_LENGTH,
+ rte_socket_id());
+ if (params.op_mpool == NULL) {
+ RTE_LOG(ERR, USER1, "Can't create CRYPTO_OP_POOL\n");
+ return TEST_FAILED;
+ }
+
+ /* Create a NULL crypto device */
+ nb_devs = rte_cryptodev_device_count_by_driver(
+ rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD)));
+ if (!nb_devs) {
+ ret = rte_vdev_init(
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL);
+
+ TEST_ASSERT(ret == 0, "Failed to create pmd:%s instance\n",
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD));
+ }
+
+ nb_devs = rte_cryptodev_count();
+ if (!nb_devs) {
+ RTE_LOG(ERR, USER1, "No crypto devices found!\n");
+ return TEST_FAILED;
+ }
+
+ /*
+ * Create mempool with maximum number of sessions * 2,
+ * to include the session headers & private data
+ */
+ session_size = rte_cryptodev_sym_get_private_session_size(TEST_CDEV_ID);
+ session_size += sizeof(union rte_event_crypto_metadata);
+
+ params.session_mpool = rte_mempool_create(
+ "CRYPTO_ADAPTER_SESSION_MP",
+ MAX_NB_SESSIONS * 2,
+ session_size,
+ 0, 0, NULL, NULL, NULL,
+ NULL, SOCKET_ID_ANY,
+ 0);
+
+ TEST_ASSERT_NOT_NULL(params.session_mpool,
+ "session mempool allocation failed\n");
+
+ rte_cryptodev_info_get(TEST_CDEV_ID, &info);
+ conf.nb_queue_pairs = info.max_nb_queue_pairs;
+ conf.socket_id = SOCKET_ID_ANY;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_configure(TEST_CDEV_ID, &conf),
+ "Failed to configure cryptodev %u with %u qps\n",
+ TEST_CDEV_ID, conf.nb_queue_pairs);
+
+ qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT;
+
+ TEST_ASSERT_SUCCESS(rte_cryptodev_queue_pair_setup(
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &qp_conf,
+ rte_cryptodev_socket_id(TEST_CDEV_ID),
+ params.session_mpool),
+ "Failed to setup queue pair %u on cryptodev %u\n",
+ TEST_CDEV_QP_ID, TEST_CDEV_ID);
+
+ return TEST_SUCCESS;
+}
+
+static inline void
+evdev_set_conf_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = NB_TEST_PORTS;
+ dev_conf->nb_event_queues = NB_TEST_QUEUES;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static int
+configure_eventdev(void)
+{
+ struct rte_event_queue_conf queue_conf;
+ struct rte_event_dev_config devconf;
+ struct rte_event_dev_info info;
+ uint32_t queue_count;
+ uint32_t port_count;
+ int ret;
+ uint8_t qid;
+
+ if (!rte_event_dev_count()) {
+ /* If there is no hardware eventdev, or no software vdev was
+ * specified on the command line, create an instance of
+ * event_sw.
+ */
+ LOG_DBG("Failed to find a valid event device... "
+ "testing with event_sw device\n");
+ TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
+ "Error creating eventdev");
+ evdev = rte_event_dev_get_dev_id("event_sw0");
+ }
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info\n");
+
+ evdev_set_conf_values(&devconf, &info);
+
+ ret = rte_event_dev_configure(evdev, &devconf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev\n");
+
+ /* Set up event queue */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_QUEUE_COUNT,
+ &queue_count);
+ TEST_ASSERT_SUCCESS(ret, "Queue count get failed\n");
+ TEST_ASSERT_EQUAL(queue_count, 2, "Unexpected queue count\n");
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d\n", qid);
+
+ queue_conf.nb_atomic_flows = info.max_event_queue_flows;
+ queue_conf.nb_atomic_order_sequences = 32;
+ queue_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ queue_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
+ queue_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+
+ qid = TEST_CRYPTO_EV_QUEUE_ID;
+ ret = rte_event_queue_setup(evdev, qid, &queue_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%u\n", qid);
+
+ /* Set up event port */
+ ret = rte_event_dev_attr_get(evdev, RTE_EVENT_DEV_ATTR_PORT_COUNT,
+ &port_count);
+ TEST_ASSERT_SUCCESS(ret, "Port count get failed\n");
+ TEST_ASSERT_EQUAL(port_count, 1, "Unexpected port count\n");
+
+ ret = rte_event_port_setup(evdev, TEST_APP_PORT_ID, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d\n",
+ TEST_APP_PORT_ID);
+
+ qid = TEST_APP_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev, TEST_APP_PORT_ID, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue port=%d\n",
+ TEST_APP_PORT_ID);
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_crypto_adapter_free(void)
+{
+ rte_event_crypto_adapter_free(TEST_ADAPTER_ID);
+}
+
+static int
+test_crypto_adapter_create(void)
+{
+ struct rte_event_port_conf conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+ int ret;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
+ TEST_CDEV_ID,
+ &conf, 0);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_qp_add_del(void)
+{
+ uint32_t cap;
+ int ret;
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+ } else
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to create add queue pair\n");
+
+ ret = rte_event_crypto_adapter_queue_pair_del(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID);
+ TEST_ASSERT_SUCCESS(ret, "Failed to delete add queue pair\n");
+
+ return TEST_SUCCESS;
+}
+
+static int
+configure_event_crypto_adapter(enum rte_event_crypto_adapter_mode mode)
+{
+ struct rte_event_port_conf conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ uint32_t cap;
+ int ret;
+
+ /* Create adapter with default port creation callback */
+ ret = rte_event_crypto_adapter_create(TEST_ADAPTER_ID,
+ TEST_CDEV_ID,
+ &conf, mode);
+ TEST_ASSERT_SUCCESS(ret, "Failed to create event crypto adapter\n");
+
+ ret = rte_event_crypto_adapter_caps_get(TEST_ADAPTER_ID, evdev, &cap);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get adapter capabilities\n");
+
+ if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, &response_info);
+ } else
+ ret = rte_event_crypto_adapter_queue_pair_add(TEST_ADAPTER_ID,
+ TEST_CDEV_ID, TEST_CDEV_QP_ID, NULL);
+
+ TEST_ASSERT_SUCCESS(ret, "Failed to add queue pair\n");
+
+ ret = rte_event_crypto_adapter_event_port_get(TEST_ADAPTER_ID,
+ &params.crypto_event_port_id);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event port\n");
+
+ return TEST_SUCCESS;
+}
+
+static void
+test_crypto_adapter_stop(void)
+{
+ uint32_t evdev_service_id, adapter_service_id;
+
+ /* retrieve service ids & stop services */
+ if (rte_event_crypto_adapter_service_id_get(TEST_ADAPTER_ID,
+ &adapter_service_id) == 0) {
+ rte_service_runstate_set(adapter_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_event_crypto_adapter_stop(TEST_ADAPTER_ID);
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ rte_service_runstate_set(evdev_service_id, 0);
+ rte_service_lcore_stop(slcore_id);
+ rte_service_lcore_del(slcore_id);
+ rte_event_dev_stop(evdev);
+ }
+}
+
+static int
+test_crypto_adapter_conf(enum rte_event_crypto_adapter_mode mode)
+{
+ uint32_t evdev_service_id;
+ uint8_t qid;
+ int ret;
+
+ if (!crypto_adapter_setup_done) {
+ ret = configure_event_crypto_adapter(mode);
+ if (!ret) {
+ qid = TEST_CRYPTO_EV_QUEUE_ID;
+ ret = rte_event_port_link(evdev,
+ params.crypto_event_port_id, &qid, NULL, 1);
+ TEST_ASSERT(ret >= 0, "Failed to link queue %d "
+ "port=%u\n", qid,
+ params.crypto_event_port_id);
+ }
+ crypto_adapter_setup_done = 1;
+ }
+
+ /* retrieve service ids */
+ if (rte_event_dev_service_id_get(evdev, &evdev_service_id) == 0) {
+ /* add a service core and start it */
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(slcore_id),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(slcore_id),
+ "Failed to start service core");
+
+ /* map services to it */
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(evdev_service_id,
+ slcore_id, 1), "Failed to map evdev service");
+
+ /* set services to running */
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(evdev_service_id,
+ 1), "Failed to start evdev service");
+ }
+
+ /* start the eventdev */
+ TEST_ASSERT_SUCCESS(rte_event_dev_start(evdev),
+ "Failed to start event device");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_conf_op_forward_mode(void)
+{
+ enum rte_event_crypto_adapter_mode mode;
+
+ mode = RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD;
+ test_crypto_adapter_conf(mode);
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_crypto_adapter_conf_op_new_mode(void)
+{
+ enum rte_event_crypto_adapter_mode mode;
+
+ mode = RTE_EVENT_CRYPTO_ADAPTER_OP_NEW;
+ test_crypto_adapter_conf(mode);
+ return TEST_SUCCESS;
+}
+
+
+static int
+testsuite_setup(void)
+{
+ int ret;
+
+ slcore_id = rte_get_next_lcore(-1, 1, 0);
+ TEST_ASSERT_NOT_EQUAL(slcore_id, RTE_MAX_LCORE, "At least 2 lcores "
+ "are required to run this autotest\n");
+
+ /* Setup and start event device. */
+ ret = configure_eventdev();
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup eventdev\n");
+
+ /* Setup and start crypto device. */
+ ret = configure_cryptodev();
+ TEST_ASSERT_SUCCESS(ret, "cryptodev initialization failed\n");
+
+ return TEST_SUCCESS;
+}
+
+static void
+crypto_teardown(void)
+{
+ /* Free mbuf mempool */
+ if (params.mbuf_pool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_MBUFPOOL count %u\n",
+ rte_mempool_avail_count(params.mbuf_pool));
+ rte_mempool_free(params.mbuf_pool);
+ params.mbuf_pool = NULL;
+ }
+
+ /* Free session mempool */
+ if (params.session_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "CRYPTO_ADAPTER_SESSION_MP count %u\n",
+ rte_mempool_avail_count(params.session_mpool));
+ rte_mempool_free(params.session_mpool);
+ params.session_mpool = NULL;
+ }
+
+ /* Free ops mempool */
+ if (params.op_mpool != NULL) {
+ RTE_LOG(DEBUG, USER1, "EVENT_CRYPTO_SYM_OP_POOL count %u\n",
+ rte_mempool_avail_count(params.op_mpool));
+ rte_mempool_free(params.op_mpool);
+ params.op_mpool = NULL;
+ }
+}
+
+static void
+eventdev_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+}
+
+static void
+testsuite_teardown(void)
+{
+ crypto_teardown();
+ eventdev_teardown();
+}
+
+static struct unit_test_suite functional_testsuite = {
+ .suite_name = "Event crypto adapter test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+
+ TEST_CASE_ST(NULL, test_crypto_adapter_free,
+ test_crypto_adapter_create),
+
+ TEST_CASE_ST(test_crypto_adapter_create,
+ test_crypto_adapter_free,
+ test_crypto_adapter_qp_add_del),
+
+ TEST_CASE_ST(test_crypto_adapter_create,
+ test_crypto_adapter_free,
+ test_crypto_adapter_stats),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
+ test_crypto_adapter_stop,
+ test_session_with_op_forward_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_forward_mode,
+ test_crypto_adapter_stop,
+ test_sessionless_with_op_forward_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
+ test_crypto_adapter_stop,
+ test_session_with_op_new_mode),
+
+ TEST_CASE_ST(test_crypto_adapter_conf_op_new_mode,
+ test_crypto_adapter_stop,
+ test_sessionless_with_op_new_mode),
+
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_crypto_adapter(void)
+{
+ return unit_test_suite_runner(&functional_testsuite);
+}
+
+REGISTER_TEST_COMMAND(event_crypto_adapter_autotest,
+ test_event_crypto_adapter);
diff --git a/test/test/test_event_eth_rx_adapter.c b/test/test/test_event_eth_rx_adapter.c
index 006ed314..2337e543 100644
--- a/test/test/test_event_eth_rx_adapter.c
+++ b/test/test/test_event_eth_rx_adapter.c
@@ -25,36 +25,25 @@ struct event_eth_rx_adapter_test_params {
struct rte_mempool *mp;
uint16_t rx_rings, tx_rings;
uint32_t caps;
+ int rx_intr_port_inited;
+ uint16_t rx_intr_port;
};
static struct event_eth_rx_adapter_test_params default_params;
static inline int
-port_init(uint8_t port, struct rte_mempool *mp)
+port_init_common(uint8_t port, const struct rte_eth_conf *port_conf,
+ struct rte_mempool *mp)
{
- static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = ETHER_MAX_LEN
- },
- .rx_adv_conf = {
- .rss_conf = {
- .rss_hf = ETH_RSS_IP |
- ETH_RSS_TCP |
- ETH_RSS_UDP,
- }
- }
- };
const uint16_t rx_ring_size = 512, tx_ring_size = 512;
- struct rte_eth_conf port_conf = port_conf_default;
int retval;
uint16_t q;
struct rte_eth_dev_info dev_info;
- if (port >= rte_eth_dev_count())
+ if (!rte_eth_dev_is_valid_port(port))
return -1;
- retval = rte_eth_dev_configure(port, 0, 0, &port_conf);
+ retval = rte_eth_dev_configure(port, 0, 0, port_conf);
rte_eth_dev_info_get(port, &dev_info);
@@ -64,7 +53,7 @@ port_init(uint8_t port, struct rte_mempool *mp)
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, default_params.rx_rings,
- default_params.tx_rings, &port_conf);
+ default_params.tx_rings, port_conf);
if (retval != 0)
return retval;
@@ -104,22 +93,99 @@ port_init(uint8_t port, struct rte_mempool *mp)
return 0;
}
+static inline int
+port_init_rx_intr(uint8_t port, struct rte_mempool *mp)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .intr_conf = {
+ .rxq = 1,
+ },
+ };
+
+ return port_init_common(port, &port_conf_default, mp);
+}
+
+static inline int
+port_init(uint8_t port, struct rte_mempool *mp)
+{
+ static const struct rte_eth_conf port_conf_default = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_hf = ETH_RSS_IP |
+ ETH_RSS_TCP |
+ ETH_RSS_UDP,
+ }
+ }
+ };
+
+ return port_init_common(port, &port_conf_default, mp);
+}
+
static int
-init_ports(int num_ports)
+init_port_rx_intr(int num_ports)
{
- uint8_t portid;
int retval;
+ uint16_t portid;
+ int err;
default_params.mp = rte_pktmbuf_pool_create("packet_pool",
+ NB_MBUFS,
+ MBUF_CACHE_SIZE,
+ MBUF_PRIV_SIZE,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (!default_params.mp)
+ return -ENOMEM;
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ retval = port_init_rx_intr(portid, default_params.mp);
+ if (retval)
+ continue;
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, portid,
+ &default_params.caps);
+ if (err)
+ continue;
+ if (!(default_params.caps &
+ RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ default_params.rx_intr_port_inited = 1;
+ default_params.rx_intr_port = portid;
+ return 0;
+ }
+ rte_eth_dev_stop(portid);
+ }
+ return 0;
+}
+
+static int
+init_ports(int num_ports)
+{
+ uint16_t portid;
+ int retval;
+
+ struct rte_mempool *ptr = rte_mempool_lookup("packet_pool");
+
+ if (ptr == NULL)
+ default_params.mp = rte_pktmbuf_pool_create("packet_pool",
NB_MBUFS,
MBUF_CACHE_SIZE,
MBUF_PRIV_SIZE,
RTE_MBUF_DEFAULT_BUF_SIZE,
rte_socket_id());
+ else
+ default_params.mp = ptr;
+
if (!default_params.mp)
return -ENOMEM;
- for (portid = 0; portid < num_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
retval = port_init(portid, default_params.mp);
if (retval)
return retval;
@@ -164,7 +230,7 @@ testsuite_setup(void)
* so rte_eth_dev_start invokes rte_event_dev_start internally, so
* call init_ports after rte_event_dev_configure
*/
- err = init_ports(rte_eth_dev_count());
+ err = init_ports(rte_eth_dev_count_total());
TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
@@ -175,16 +241,77 @@ testsuite_setup(void)
return err;
}
+static int
+testsuite_setup_rx_intr(void)
+{
+ int err;
+ uint8_t count;
+ struct rte_event_dev_info dev_info;
+
+ count = rte_event_dev_count();
+ if (!count) {
+ printf("Failed to find a valid event device,"
+ " testing with event_skeleton device\n");
+ rte_vdev_init("event_skeleton", NULL);
+ }
+
+ struct rte_event_dev_config config = {
+ .nb_event_queues = 1,
+ .nb_event_ports = 1,
+ };
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+ config.nb_event_queue_flows = dev_info.max_event_queue_flows;
+ config.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+ config.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+ config.nb_events_limit =
+ dev_info.max_num_events;
+
+ err = rte_event_dev_configure(TEST_DEV_ID, &config);
+ TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
+ err);
+
+ /*
+ * eth devices like octeontx use event device to receive packets
+ * so rte_eth_dev_start invokes rte_event_dev_start internally, so
+ * call init_ports after rte_event_dev_configure
+ */
+ err = init_port_rx_intr(rte_eth_dev_count_total());
+ TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+ if (!default_params.rx_intr_port_inited)
+ return 0;
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID,
+ default_params.rx_intr_port,
+ &default_params.caps);
+ TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
+
+ return err;
+}
+
static void
testsuite_teardown(void)
{
uint32_t i;
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rte_eth_dev_stop(i);
rte_mempool_free(default_params.mp);
}
+static void
+testsuite_teardown_rx_intr(void)
+{
+ if (!default_params.rx_intr_port_inited)
+ return;
+
+ rte_eth_dev_stop(default_params.rx_intr_port);
+ rte_mempool_free(default_params.mp);
+}
+
static int
adapter_create(void)
{
@@ -273,7 +400,7 @@ adapter_queue_add_del(void)
queue_config.servicing_weight = 1;
err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
- rte_eth_dev_count(),
+ rte_eth_dev_count_total(),
-1, &queue_config);
TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
@@ -333,6 +460,151 @@ adapter_queue_add_del(void)
}
static int
+adapter_multi_eth_add_del(void)
+{
+ int err;
+ struct rte_event ev;
+
+ uint16_t port_index, drv_id = 0;
+ char driver_name[50];
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+
+ /* stop eth devices for existing */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1)
+ rte_eth_dev_stop(port_index);
+
+ /* add the max port for rx_adapter */
+ port_index = rte_eth_dev_count_total();
+ for (; port_index < RTE_MAX_ETHPORTS; port_index += 1) {
+ sprintf(driver_name, "%s%u", "net_null", drv_id);
+ err = rte_vdev_init(driver_name, NULL);
+ TEST_ASSERT(err == 0, "Failed driver %s got %d",
+ driver_name, err);
+ drv_id += 1;
+ }
+
+ err = init_ports(rte_eth_dev_count_total());
+ TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
+
+ /* creating new instance for all newly added eth devices */
+ adapter_create();
+
+ /* eth_rx_adapter_queue_add for n ports */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ port_index, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ /* eth_rx_adapter_queue_del n ports */
+ port_index = 0;
+ for (; port_index < rte_eth_dev_count_total(); port_index += 1) {
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ port_index, 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+ }
+
+ adapter_free();
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_intr_queue_add_del(void)
+{
+ int err;
+ struct rte_event ev;
+ uint32_t cap;
+ uint16_t eth_port;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config;
+
+ if (!default_params.rx_intr_port_inited)
+ return 0;
+
+ eth_port = default_params.rx_intr_port;
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, eth_port, &cap);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ queue_config.ev = ev;
+
+ /* weight = 0 => interrupt mode */
+ queue_config.servicing_weight = 0;
+
+ /* add queue 0 */
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* add all queues */
+ queue_config.servicing_weight = 0;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del queue 0 */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del remaining queues */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* add all queues */
+ queue_config.servicing_weight = 0;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* intr -> poll mode queue */
+ queue_config.servicing_weight = 1;
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ /* del queues */
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ -1);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static int
adapter_start_stop(void)
{
int err;
@@ -402,7 +674,7 @@ adapter_stats(void)
return TEST_SUCCESS;
}
-static struct unit_test_suite service_tests = {
+static struct unit_test_suite event_eth_rx_tests = {
.suite_name = "rx event eth adapter test suite",
.setup = testsuite_setup,
.teardown = testsuite_teardown,
@@ -410,17 +682,37 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(NULL, NULL, adapter_create_free),
TEST_CASE_ST(adapter_create, adapter_free,
adapter_queue_add_del),
+ TEST_CASE_ST(NULL, NULL, adapter_multi_eth_add_del),
TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
+static struct unit_test_suite event_eth_rx_intr_tests = {
+ .suite_name = "rx event eth adapter test suite",
+ .setup = testsuite_setup_rx_intr,
+ .teardown = testsuite_teardown_rx_intr,
+ .unit_test_cases = {
+ TEST_CASE_ST(adapter_create, adapter_free,
+ adapter_intr_queue_add_del),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
static int
test_event_eth_rx_adapter_common(void)
{
- return unit_test_suite_runner(&service_tests);
+ return unit_test_suite_runner(&event_eth_rx_tests);
+}
+
+static int
+test_event_eth_rx_intr_adapter_common(void)
+{
+ return unit_test_suite_runner(&event_eth_rx_intr_tests);
}
REGISTER_TEST_COMMAND(event_eth_rx_adapter_autotest,
test_event_eth_rx_adapter_common);
+REGISTER_TEST_COMMAND(event_eth_rx_intr_adapter_autotest,
+ test_event_eth_rx_intr_adapter_common);
diff --git a/test/test/test_event_timer_adapter.c b/test/test/test_event_timer_adapter.c
new file mode 100644
index 00000000..93471db1
--- /dev/null
+++ b/test/test/test_event_timer_adapter.c
@@ -0,0 +1,1830 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ * Copyright(c) 2017-2018 Intel Corporation.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_mempool.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_random.h>
+#include <rte_bus_vdev.h>
+#include <rte_service.h>
+#include <stdbool.h>
+
+#include "test.h"
+
+/* 4K timers corresponds to sw evdev max inflight events */
+#define MAX_TIMERS (4 * 1024)
+#define BKT_TCK_NSEC
+
+#define NSECPERSEC 1E9
+#define BATCH_SIZE 16
+/* Both the app lcore and adapter ports are linked to this queue */
+#define TEST_QUEUE_ID 0
+/* Port the application dequeues from */
+#define TEST_PORT_ID 0
+#define TEST_ADAPTER_ID 0
+
+/* Handle log statements in same manner as test macros */
+#define LOG_DBG(...) RTE_LOG(DEBUG, EAL, __VA_ARGS__)
+
+static int evdev;
+static struct rte_event_timer_adapter *timdev;
+static struct rte_mempool *eventdev_test_mempool;
+static struct rte_ring *timer_producer_ring;
+static uint64_t global_bkt_tck_ns;
+static volatile uint8_t arm_done;
+
+static bool using_services;
+static uint32_t test_lcore1;
+static uint32_t test_lcore2;
+static uint32_t test_lcore3;
+static uint32_t sw_evdev_slcore;
+static uint32_t sw_adptr_slcore;
+
+static inline void
+devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf,
+ struct rte_event_dev_info *info)
+{
+ memset(dev_conf, 0, sizeof(struct rte_event_dev_config));
+ dev_conf->dequeue_timeout_ns = info->min_dequeue_timeout_ns;
+ dev_conf->nb_event_ports = 1;
+ dev_conf->nb_event_queues = 1;
+ dev_conf->nb_event_queue_flows = info->max_event_queue_flows;
+ dev_conf->nb_event_port_dequeue_depth =
+ info->max_event_port_dequeue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_event_port_enqueue_depth =
+ info->max_event_port_enqueue_depth;
+ dev_conf->nb_events_limit =
+ info->max_num_events;
+}
+
+static inline int
+eventdev_setup(void)
+{
+ int ret;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ uint32_t service_id;
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ TEST_ASSERT_SUCCESS(ret, "Failed to get event dev info");
+ TEST_ASSERT(info.max_num_events >= (int32_t)MAX_TIMERS,
+ "ERROR max_num_events=%d < max_events=%d",
+ info.max_num_events, MAX_TIMERS);
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+ ret = rte_event_dev_configure(evdev, &dev_conf);
+ TEST_ASSERT_SUCCESS(ret, "Failed to configure eventdev");
+
+ ret = rte_event_queue_setup(evdev, 0, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup queue=%d", 0);
+
+ /* Configure event port */
+ ret = rte_event_port_setup(evdev, 0, NULL);
+ TEST_ASSERT_SUCCESS(ret, "Failed to setup port=%d", 0);
+ ret = rte_event_port_link(evdev, 0, NULL, NULL, 0);
+ TEST_ASSERT(ret >= 0, "Failed to link all queues port=%d", 0);
+
+ /* If this is a software event device, map and start its service */
+ if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
+ TEST_ASSERT_SUCCESS(rte_service_lcore_add(sw_evdev_slcore),
+ "Failed to add service core");
+ TEST_ASSERT_SUCCESS(rte_service_lcore_start(
+ sw_evdev_slcore),
+ "Failed to start service core");
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(
+ service_id, sw_evdev_slcore, 1),
+ "Failed to map evdev service");
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(
+ service_id, 1),
+ "Failed to start evdev service");
+ }
+
+ ret = rte_event_dev_start(evdev);
+ TEST_ASSERT_SUCCESS(ret, "Failed to start device");
+
+ return TEST_SUCCESS;
+}
+
+static int
+testsuite_setup(void)
+{
+ /* Some of the multithreaded tests require 3 other lcores to run */
+ unsigned int required_lcore_count = 4;
+ uint32_t service_id;
+
+ /* To make it easier to map services later if needed, just reset
+ * service core state.
+ */
+ (void) rte_service_lcore_reset_all();
+
+ if (!rte_event_dev_count()) {
+ /* If there is no hardware eventdev, or no software vdev was
+ * specified on the command line, create an instance of
+ * event_sw.
+ */
+ LOG_DBG("Failed to find a valid event device... testing with"
+ " event_sw device\n");
+ TEST_ASSERT_SUCCESS(rte_vdev_init("event_sw0", NULL),
+ "Error creating eventdev");
+ evdev = rte_event_dev_get_dev_id("event_sw0");
+ }
+
+ if (rte_event_dev_service_id_get(evdev, &service_id) == 0) {
+ /* A software event device will use a software event timer
+ * adapter as well. 2 more cores required to convert to
+ * service cores.
+ */
+ required_lcore_count += 2;
+ using_services = true;
+ }
+
+ if (rte_lcore_count() < required_lcore_count) {
+ printf("%d lcores needed to run tests", required_lcore_count);
+ return TEST_FAILED;
+ }
+
+ /* Assign lcores for various tasks */
+ test_lcore1 = rte_get_next_lcore(-1, 1, 0);
+ test_lcore2 = rte_get_next_lcore(test_lcore1, 1, 0);
+ test_lcore3 = rte_get_next_lcore(test_lcore2, 1, 0);
+ if (using_services) {
+ sw_evdev_slcore = rte_get_next_lcore(test_lcore3, 1, 0);
+ sw_adptr_slcore = rte_get_next_lcore(sw_evdev_slcore, 1, 0);
+ }
+
+ return eventdev_setup();
+}
+
+static void
+testsuite_teardown(void)
+{
+ rte_event_dev_stop(evdev);
+ rte_event_dev_close(evdev);
+}
+
+static int
+setup_adapter_service(struct rte_event_timer_adapter *adptr)
+{
+ uint32_t adapter_service_id;
+ int ret;
+
+ /* retrieve service ids */
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_service_id_get(adptr,
+ &adapter_service_id), "Failed to get event timer "
+ "adapter service id");
+ /* add a service core and start it */
+ ret = rte_service_lcore_add(sw_adptr_slcore);
+ TEST_ASSERT(ret == 0 || ret == -EALREADY,
+ "Failed to add service core");
+ ret = rte_service_lcore_start(sw_adptr_slcore);
+ TEST_ASSERT(ret == 0 || ret == -EALREADY,
+ "Failed to start service core");
+
+ /* map services to it */
+ TEST_ASSERT_SUCCESS(rte_service_map_lcore_set(adapter_service_id,
+ sw_adptr_slcore, 1),
+ "Failed to map adapter service");
+
+ /* set services to running */
+ TEST_ASSERT_SUCCESS(rte_service_runstate_set(adapter_service_id, 1),
+ "Failed to start event timer adapter service");
+
+ return TEST_SUCCESS;
+}
+
+static int
+test_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ void *conf_arg)
+{
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_dev_info info;
+ struct rte_event_port_conf *port_conf, def_port_conf = {0};
+ uint32_t started;
+ static int port_allocated;
+ static uint8_t port_id;
+ int ret;
+
+ if (port_allocated) {
+ *event_port_id = port_id;
+ return 0;
+ }
+
+ RTE_SET_USED(id);
+
+ ret = rte_event_dev_attr_get(event_dev_id, RTE_EVENT_DEV_ATTR_STARTED,
+ &started);
+ if (ret < 0)
+ return ret;
+
+ if (started)
+ rte_event_dev_stop(event_dev_id);
+
+ ret = rte_event_dev_info_get(evdev, &info);
+ if (ret < 0)
+ return ret;
+
+ devconf_set_default_sane_values(&dev_conf, &info);
+
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports++;
+
+ ret = rte_event_dev_configure(event_dev_id, &dev_conf);
+ if (ret < 0) {
+ if (started)
+ rte_event_dev_start(event_dev_id);
+ return ret;
+ }
+
+ if (conf_arg != NULL)
+ port_conf = conf_arg;
+ else {
+ port_conf = &def_port_conf;
+ ret = rte_event_port_default_conf_get(event_dev_id, port_id,
+ port_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_event_port_setup(event_dev_id, port_id, port_conf);
+ if (ret < 0)
+ return ret;
+
+ *event_port_id = port_id;
+
+ if (started)
+ rte_event_dev_start(event_dev_id);
+
+ /* Reuse this port number next time this is called */
+ port_allocated = 1;
+
+ return 0;
+}
+
+static int
+_timdev_setup(uint64_t max_tmo_ns, uint64_t bkt_tck_ns)
+{
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = evdev,
+ .timer_adapter_id = TEST_ADAPTER_ID,
+ .timer_tick_ns = bkt_tck_ns,
+ .max_tmo_ns = max_tmo_ns,
+ .nb_timers = MAX_TIMERS * 10,
+ };
+ uint32_t caps = 0;
+ const char *pool_name = "timdev_test_pool";
+
+ global_bkt_tck_ns = bkt_tck_ns;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
+ "failed to get adapter capabilities");
+ if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ timdev = rte_event_timer_adapter_create_ext(&config,
+ test_port_conf_cb,
+ NULL);
+ setup_adapter_service(timdev);
+ using_services = true;
+ } else
+ timdev = rte_event_timer_adapter_create(&config);
+
+ TEST_ASSERT_NOT_NULL(timdev,
+ "failed to create event timer ring");
+
+ TEST_ASSERT_EQUAL(rte_event_timer_adapter_start(timdev), 0,
+ "failed to Start event timer adapter");
+
+ /* Create event timer mempool */
+ eventdev_test_mempool = rte_mempool_create(pool_name,
+ MAX_TIMERS * 2,
+ sizeof(struct rte_event_timer), /* element size*/
+ 0, /* cache size*/
+ 0, NULL, NULL, NULL, NULL,
+ rte_socket_id(), 0);
+ if (!eventdev_test_mempool) {
+ printf("ERROR creating mempool\n");
+ return TEST_FAILED;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+timdev_setup_usec(void)
+{
+ return using_services ?
+ /* Max timeout is 10,000us and bucket interval is 100us */
+ _timdev_setup(1E7, 1E5) :
+ /* Max timeout is 100us and bucket interval is 1us */
+ _timdev_setup(1E5, 1E3);
+}
+
+static int
+timdev_setup_usec_multicore(void)
+{
+ return using_services ?
+ /* Max timeout is 10,000us and bucket interval is 100us */
+ _timdev_setup(1E7, 1E5) :
+ /* Max timeout is 100us and bucket interval is 1us */
+ _timdev_setup(1E5, 1E3);
+}
+
+static int
+timdev_setup_msec(void)
+{
+ /* Max timeout is 2 mins, and bucket interval is 100 ms */
+ return _timdev_setup(180 * NSECPERSEC, NSECPERSEC / 10);
+}
+
+static int
+timdev_setup_sec(void)
+{
+ /* Max timeout is 100sec and bucket interval is 1sec */
+ return _timdev_setup(1E11, 1E9);
+}
+
+static int
+timdev_setup_sec_multicore(void)
+{
+ /* Max timeout is 100sec and bucket interval is 1sec */
+ return _timdev_setup(1E11, 1E9);
+}
+
+static void
+timdev_teardown(void)
+{
+ rte_event_timer_adapter_stop(timdev);
+ rte_event_timer_adapter_free(timdev);
+
+ rte_mempool_free(eventdev_test_mempool);
+}
+
+static inline int
+test_timer_state(void)
+{
+ struct rte_event_timer *ev_tim;
+ struct rte_event ev;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&ev_tim);
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+ ev_tim->timeout_ticks = 120;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 0,
+ "Armed timer exceeding max_timeout.");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_ERROR_TOOLATE, ev_tim->state);
+
+ ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
+ ev_tim->timeout_ticks = 10;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
+ "Failed to arm timer with proper timeout.");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_ARMED, ev_tim->state);
+
+ if (!using_services)
+ rte_delay_us(20);
+ else
+ rte_delay_us(1000 + 200);
+ TEST_ASSERT_EQUAL(rte_event_dequeue_burst(evdev, 0, &ev, 1, 0), 1,
+ "Armed timer failed to trigger.");
+
+ ev_tim->state = RTE_EVENT_TIMER_NOT_ARMED;
+ ev_tim->timeout_ticks = 90;
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim, 1), 1,
+ "Failed to arm timer with proper timeout.");
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev, &ev_tim, 1),
+ 1, "Failed to cancel armed timer");
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_CANCELED,
+ "Improper timer state set expected %d returned %d",
+ RTE_EVENT_TIMER_CANCELED, ev_tim->state);
+
+ rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+_arm_timers(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers; i++) {
+
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+_wait_timer_triggers(uint64_t wait_sec, uint64_t arm_count,
+ uint64_t cancel_count)
+{
+ uint8_t valid_event;
+ uint64_t events = 0;
+ uint64_t wait_start, max_wait;
+ struct rte_event ev;
+
+ max_wait = rte_get_timer_hz() * wait_sec;
+ wait_start = rte_get_timer_cycles();
+ while (1) {
+ if (rte_get_timer_cycles() - wait_start > max_wait) {
+ if (events + cancel_count != arm_count)
+ TEST_ASSERT_SUCCESS(max_wait,
+ "Max time limit for timers exceeded.");
+ break;
+ }
+
+ valid_event = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0);
+ if (!valid_event)
+ continue;
+
+ rte_mempool_put(eventdev_test_mempool, ev.event_ptr);
+ events++;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm(void)
+{
+ TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
+ "Failed to arm timers");
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
+ "Timer triggered count doesn't match arm count");
+ return TEST_SUCCESS;
+}
+
+static int
+_arm_wrapper(void *arg)
+{
+ RTE_SET_USED(arg);
+
+ TEST_ASSERT_SUCCESS(_arm_timers(20, MAX_TIMERS),
+ "Failed to arm timers");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_multicore(void)
+{
+
+ uint32_t lcore_1 = rte_get_next_lcore(-1, 1, 0);
+ uint32_t lcore_2 = rte_get_next_lcore(lcore_1, 1, 0);
+
+ rte_eal_remote_launch(_arm_wrapper, NULL, lcore_1);
+ rte_eal_remote_launch(_arm_wrapper, NULL, lcore_2);
+
+ rte_eal_mp_wait_lcore();
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+#define MAX_BURST 16
+static inline int
+_arm_timers_burst(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ int j;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers / MAX_BURST; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
+ eventdev_test_mempool,
+ (void **)ev_tim, MAX_BURST),
+ "mempool alloc failed");
+
+ for (j = 0; j < MAX_BURST; j++) {
+ *ev_tim[j] = tim;
+ ev_tim[j]->ev.event_ptr = ev_tim[j];
+ }
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
+ ev_tim, tim.timeout_ticks, MAX_BURST),
+ MAX_BURST, "Failed to arm timer %d", rte_errno);
+ }
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_burst(void)
+{
+ TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
+ "Failed to arm timers");
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_arm_wrapper_burst(void *arg)
+{
+ RTE_SET_USED(arg);
+
+ TEST_ASSERT_SUCCESS(_arm_timers_burst(20, MAX_TIMERS),
+ "Failed to arm timers");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_arm_burst_multicore(void)
+{
+ rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore1);
+ rte_eal_remote_launch(_arm_wrapper_burst, NULL, test_lcore2);
+
+ rte_eal_mp_wait_lcore();
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(10, MAX_TIMERS * 2, 0),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel(void)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 20,
+ };
+
+ for (i = 0; i < MAX_TIMERS; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ rte_delay_us(100 + (i % 5000));
+
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(timdev,
+ &ev_tim, 1), 1,
+ "Failed to cancel event timer %d", rte_errno);
+ rte_mempool_put(eventdev_test_mempool, ev_tim);
+ }
+
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ MAX_TIMERS),
+ "Timer triggered count doesn't match arm, cancel count");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer(uint64_t timeout_tcks, uint64_t timers)
+{
+ uint64_t i;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+
+ for (i = 0; i < timers; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ TEST_ASSERT_EQUAL(ev_tim->state, RTE_EVENT_TIMER_ARMED,
+ "Failed to arm event timer");
+
+ while (rte_ring_enqueue(timer_producer_ring, ev_tim) != 0)
+ ;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer_burst(uint64_t timeout_tcks, uint64_t timers)
+{
+
+ uint64_t i;
+ int j, ret;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = timeout_tcks,
+ };
+ int arm_count = 0;
+
+ for (i = 0; i < timers / MAX_BURST; i++) {
+ TEST_ASSERT_SUCCESS(rte_mempool_get_bulk(
+ eventdev_test_mempool,
+ (void **)ev_tim, MAX_BURST),
+ "mempool alloc failed");
+
+ for (j = 0; j < MAX_BURST; j++) {
+ *ev_tim[j] = tim;
+ ev_tim[j]->ev.event_ptr = ev_tim[j];
+ }
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_tmo_tick_burst(timdev,
+ ev_tim, tim.timeout_ticks, MAX_BURST),
+ MAX_BURST, "Failed to arm timer %d", rte_errno);
+
+ for (j = 0; j < MAX_BURST; j++)
+ TEST_ASSERT_EQUAL(ev_tim[j]->state,
+ RTE_EVENT_TIMER_ARMED,
+ "Event timer not armed, state = %d",
+ ev_tim[j]->state);
+
+ ret = rte_ring_enqueue_bulk(timer_producer_ring,
+ (void **)ev_tim, MAX_BURST, NULL);
+ TEST_ASSERT_EQUAL(ret, MAX_BURST,
+ "Failed to enqueue event timers to ring");
+ arm_count += ret;
+ }
+
+ TEST_ASSERT_EQUAL(arm_count, MAX_TIMERS,
+ "Failed to arm expected number of event timers");
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_producer_wrapper(void *args)
+{
+ RTE_SET_USED(args);
+
+ return _cancel_producer(20, MAX_TIMERS);
+}
+
+static int
+_cancel_producer_burst_wrapper(void *args)
+{
+ RTE_SET_USED(args);
+
+ return _cancel_producer_burst(100, MAX_TIMERS);
+}
+
+static int
+_cancel_thread(void *args)
+{
+ RTE_SET_USED(args);
+ struct rte_event_timer *ev_tim = NULL;
+ uint64_t cancel_count = 0;
+ uint16_t ret;
+
+ while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
+ if (rte_ring_dequeue(timer_producer_ring, (void **)&ev_tim))
+ continue;
+
+ ret = rte_event_timer_cancel_burst(timdev, &ev_tim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel timer");
+ rte_mempool_put(eventdev_test_mempool, (void *)ev_tim);
+ cancel_count++;
+ }
+
+ return TEST_SUCCESS;
+}
+
+static int
+_cancel_burst_thread(void *args)
+{
+ RTE_SET_USED(args);
+
+ int ret, i, n;
+ struct rte_event_timer *ev_tim[MAX_BURST];
+ uint64_t cancel_count = 0;
+ uint64_t dequeue_count = 0;
+
+ while (!arm_done || rte_ring_count(timer_producer_ring) > 0) {
+ n = rte_ring_dequeue_burst(timer_producer_ring,
+ (void **)ev_tim, MAX_BURST, NULL);
+ if (!n)
+ continue;
+
+ dequeue_count += n;
+
+ for (i = 0; i < n; i++)
+ TEST_ASSERT_EQUAL(ev_tim[i]->state,
+ RTE_EVENT_TIMER_ARMED,
+ "Event timer not armed, state = %d",
+ ev_tim[i]->state);
+
+ ret = rte_event_timer_cancel_burst(timdev, ev_tim, n);
+ TEST_ASSERT_EQUAL(n, ret, "Failed to cancel complete burst of "
+ "event timers");
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)ev_tim,
+ ret);
+
+ cancel_count += ret;
+ }
+
+ TEST_ASSERT_EQUAL(cancel_count, MAX_TIMERS,
+ "Failed to cancel expected number of timers: "
+ "expected = %d, cancel_count = %"PRIu64", "
+ "dequeue_count = %"PRIu64"\n", MAX_TIMERS,
+ cancel_count, dequeue_count);
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_multicore(void)
+{
+ arm_done = 0;
+ timer_producer_ring = rte_ring_create("timer_cancel_queue",
+ MAX_TIMERS * 2, rte_socket_id(), 0);
+ TEST_ASSERT_NOT_NULL(timer_producer_ring,
+ "Unable to reserve memory for ring");
+
+ rte_eal_remote_launch(_cancel_thread, NULL, test_lcore3);
+ rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore1);
+ rte_eal_remote_launch(_cancel_producer_wrapper, NULL, test_lcore2);
+
+ rte_eal_wait_lcore(test_lcore1);
+ rte_eal_wait_lcore(test_lcore2);
+ arm_done = 1;
+ rte_eal_wait_lcore(test_lcore3);
+ rte_ring_free(timer_producer_ring);
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS * 2,
+ MAX_TIMERS * 2),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_burst_multicore(void)
+{
+ arm_done = 0;
+ timer_producer_ring = rte_ring_create("timer_cancel_queue",
+ MAX_TIMERS * 2, rte_socket_id(), 0);
+ TEST_ASSERT_NOT_NULL(timer_producer_ring,
+ "Unable to reserve memory for ring");
+
+ rte_eal_remote_launch(_cancel_burst_thread, NULL, test_lcore2);
+ rte_eal_remote_launch(_cancel_producer_burst_wrapper, NULL,
+ test_lcore1);
+
+ rte_eal_wait_lcore(test_lcore1);
+ arm_done = 1;
+ rte_eal_wait_lcore(test_lcore2);
+ rte_ring_free(timer_producer_ring);
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ MAX_TIMERS),
+ "Timer triggered count doesn't match arm count");
+
+ return TEST_SUCCESS;
+}
+
+static inline int
+test_timer_cancel_random(void)
+{
+ uint64_t i;
+ uint64_t events_canceled = 0;
+ struct rte_event_timer *ev_tim;
+ const struct rte_event_timer tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = 0,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 20,
+ };
+
+ for (i = 0; i < MAX_TIMERS; i++) {
+
+ TEST_ASSERT_SUCCESS(rte_mempool_get(eventdev_test_mempool,
+ (void **)&ev_tim),
+ "mempool alloc failed");
+ *ev_tim = tim;
+ ev_tim->ev.event_ptr = ev_tim;
+
+ TEST_ASSERT_EQUAL(rte_event_timer_arm_burst(timdev, &ev_tim,
+ 1), 1, "Failed to arm timer %d",
+ rte_errno);
+
+ if (rte_rand() & 1) {
+ rte_delay_us(100 + (i % 5000));
+ TEST_ASSERT_EQUAL(rte_event_timer_cancel_burst(
+ timdev,
+ &ev_tim, 1), 1,
+ "Failed to cancel event timer %d", rte_errno);
+ rte_mempool_put(eventdev_test_mempool, ev_tim);
+ events_canceled++;
+ }
+ }
+
+ TEST_ASSERT_SUCCESS(_wait_timer_triggers(30, MAX_TIMERS,
+ events_canceled),
+ "Timer triggered count doesn't match arm, cancel count");
+
+ return TEST_SUCCESS;
+}
+
+/* Check that the adapter can be created correctly */
+static int
+adapter_create(void)
+{
+ int adapter_id = 0;
+ struct rte_event_timer_adapter *adapter, *adapter2;
+
+ struct rte_event_timer_adapter_conf conf = {
+ .event_dev_id = evdev + 1, // invalid event dev id
+ .timer_adapter_id = adapter_id,
+ .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ .timer_tick_ns = NSECPERSEC / 10,
+ .max_tmo_ns = 180 * NSECPERSEC,
+ .nb_timers = MAX_TIMERS,
+ .flags = 0,
+ };
+ uint32_t caps = 0;
+
+ /* Test invalid conf */
+ adapter = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapter, "Created adapter with invalid "
+ "event device id");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Incorrect errno value for "
+ "invalid event device id");
+
+ /* Test valid conf */
+ conf.event_dev_id = evdev;
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_caps_get(evdev, &caps),
+ "failed to get adapter capabilities");
+ if (!(caps & RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT))
+ adapter = rte_event_timer_adapter_create_ext(&conf,
+ test_port_conf_cb,
+ NULL);
+ else
+ adapter = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NOT_NULL(adapter, "Failed to create adapter with valid "
+ "configuration");
+
+ /* Test existing id */
+ adapter2 = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapter2, "Created adapter with in-use id");
+ TEST_ASSERT(rte_errno == EEXIST, "Incorrect errno value for existing "
+ "id");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapter),
+ "Failed to free adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+
+/* Test that adapter can be freed correctly. */
+static int
+adapter_free(void)
+{
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
+ "Failed to stop adapter");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
+ "Failed to free valid adapter");
+
+ /* Test free of already freed adapter */
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
+ "Freed adapter that was already freed");
+
+ /* Test free of null adapter */
+ timdev = NULL;
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_free(timdev),
+ "Freed null adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+/* Test that adapter info can be retrieved and is correct. */
+static int
+adapter_get_info(void)
+{
+ struct rte_event_timer_adapter_info info;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_get_info(timdev, &info),
+ "Failed to get adapter info");
+
+ if (using_services)
+ TEST_ASSERT_EQUAL(info.event_dev_port_id, 1,
+ "Expected port id = 1, got port id = %d",
+ info.event_dev_port_id);
+
+ return TEST_SUCCESS;
+}
+
+/* Test adapter lookup via adapter ID. */
+static int
+adapter_lookup(void)
+{
+ struct rte_event_timer_adapter *adapter;
+
+ adapter = rte_event_timer_adapter_lookup(TEST_ADAPTER_ID);
+ TEST_ASSERT_NOT_NULL(adapter, "Failed to lookup adapter");
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_start(void)
+{
+ TEST_ASSERT_SUCCESS(_timdev_setup(180 * NSECPERSEC,
+ NSECPERSEC / 10),
+ "Failed to start adapter");
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_start(timdev),
+ "Failed to repeatedly start adapter");
+
+ return TEST_SUCCESS;
+}
+
+/* Test that adapter stops correctly. */
+static int
+adapter_stop(void)
+{
+ struct rte_event_timer_adapter *l_adapter = NULL;
+
+ /* Test adapter stop */
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stop(timdev),
+ "Failed to stop event adapter");
+
+ TEST_ASSERT_FAIL(rte_event_timer_adapter_stop(l_adapter),
+ "Erroneously stopped null event adapter");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(timdev),
+ "Failed to free adapter");
+
+ rte_mempool_free(eventdev_test_mempool);
+
+ return TEST_SUCCESS;
+}
+
+/* Test increment and reset of ev_enq_count stat */
+static int
+stat_inc_reset_ev_enq(void)
+{
+ int ret, i, n;
+ int num_evtims = MAX_TIMERS;
+ struct rte_event_timer *evtims[num_evtims];
+ struct rte_event evs[BATCH_SIZE];
+ struct rte_event_timer_adapter_stats stats;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
+ ret);
+
+ for (i = 0; i < num_evtims; i++) {
+ *evtims[i] = init_tim;
+ evtims[i]->ev.event_ptr = evtims[i];
+ }
+
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0, "Stats not clear at "
+ "startup");
+
+ /* Test with the max value for the adapter */
+ ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
+ TEST_ASSERT_EQUAL(ret, num_evtims,
+ "Failed to arm all event timers: attempted = %d, "
+ "succeeded = %d, rte_errno = %s",
+ num_evtims, ret, rte_strerror(rte_errno));
+
+ rte_delay_ms(1000);
+
+#define MAX_TRIES num_evtims
+ int sum = 0;
+ int tries = 0;
+ bool done = false;
+ while (!done) {
+ sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
+ RTE_DIM(evs), 10);
+ if (sum >= num_evtims || ++tries >= MAX_TRIES)
+ done = true;
+
+ rte_delay_ms(10);
+ }
+
+ TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
+ "got %d", num_evtims, sum);
+
+ TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
+
+ rte_delay_ms(100);
+
+ /* Make sure the eventdev is still empty */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
+ 10);
+
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
+ "events from event device");
+
+ /* Check stats again */
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, num_evtims,
+ "Expected enqueue stat = %d; got %d", num_evtims,
+ (int)stats.ev_enq_count);
+
+ /* Reset and check again */
+ ret = rte_event_timer_adapter_stats_reset(timdev);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to reset stats");
+
+ ret = rte_event_timer_adapter_stats_get(timdev, &stats);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get stats");
+ TEST_ASSERT_EQUAL((int)stats.ev_enq_count, 0,
+ "Expected enqueue stat = %d; got %d", 0,
+ (int)stats.ev_enq_count);
+
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+
+ return TEST_SUCCESS;
+}
+
+/* Test various cases in arming timers */
+static int
+event_timer_arm(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+
+ /* Test single timer arm succeeds */
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event timer "
+ "in incorrect state");
+
+ /* Test arm of armed timer fails */
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "expected return value from "
+ "rte_event_timer_arm_burst: 0, got: %d", ret);
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after arming already armed timer");
+
+ /* Let timer expire */
+ rte_delay_ms(1000);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* This test checks that repeated references to the same event timer in the
+ * arm request work as expected; only the first one through should succeed.
+ */
+static int
+event_timer_arm_double(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+
+ struct rte_event_timer *evtim_arr[] = {evtim, evtim};
+ ret = rte_event_timer_arm_burst(adapter, evtim_arr, RTE_DIM(evtim_arr));
+ TEST_ASSERT_EQUAL(ret, 1, "Unexpected return value from "
+ "rte_event_timer_arm_burst");
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after double-arm");
+
+ /* Let timer expire */
+ rte_delay_ms(600);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number of expiry events - "
+ "expected: 1, actual: %d", n);
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Test the timer expiry event is generated at the expected time. */
+static int
+event_timer_arm_expiry(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event_timer *evtim2 = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up an event timer */
+ *evtim = init_tim;
+ evtim->timeout_ticks = 30, // expire in 3 secs
+ evtim->ev.event_ptr = evtim;
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED, "Event "
+ "timer in incorrect state");
+
+ rte_delay_ms(2999);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event");
+
+ /* Delay 100 ms to account for the adapter tick window - should let us
+ * dequeue one event
+ */
+ rte_delay_ms(100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Dequeued incorrect number (%d) of timer "
+ "expiry events", n);
+ TEST_ASSERT_EQUAL(evs[0].event_type, RTE_EVENT_TYPE_TIMER,
+ "Dequeued unexpected type of event");
+
+ /* Check that we recover the original event timer and then free it */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+ rte_mempool_put(eventdev_test_mempool, evtim2);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that rearming a timer works as expected. */
+static int
+event_timer_arm_rearm(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event_timer *evtim2 = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->timeout_ticks = 1; // expire in 0.1 sec
+ evtim->ev.event_ptr = evtim;
+
+ /* Arm it */
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+
+ /* Add 100ms to account for the adapter tick window */
+ rte_delay_ms(100 + 100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ /* Recover the timer through the event that was dequeued. */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+
+ /* Need to reset state in case implementation can't do it */
+ evtim2->state = RTE_EVENT_TIMER_NOT_ARMED;
+
+ /* Rearm it */
+ ret = rte_event_timer_arm_burst(timdev, &evtim2, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+
+ /* Add 100ms to account for the adapter tick window */
+ rte_delay_ms(100 + 100);
+
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 1, "Failed to dequeue expected number of expiry "
+ "events from event device");
+
+ /* Free it */
+ evtim2 = evs[0].event_ptr;
+ TEST_ASSERT_EQUAL(evtim, evtim2,
+ "Failed to recover pointer to original event timer");
+ rte_mempool_put(eventdev_test_mempool, evtim2);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that the adapter handles the max specified number of timers as
+ * expected.
+ */
+static int
+event_timer_arm_max(void)
+{
+ int ret, i, n;
+ int num_evtims = MAX_TIMERS;
+ struct rte_event_timer *evtims[num_evtims];
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ ret = rte_mempool_get_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+ TEST_ASSERT_EQUAL(ret, 0, "Failed to get array of timer objs: ret = %d",
+ ret);
+
+ for (i = 0; i < num_evtims; i++) {
+ *evtims[i] = init_tim;
+ evtims[i]->ev.event_ptr = evtims[i];
+ }
+
+ /* Test with the max value for the adapter */
+ ret = rte_event_timer_arm_burst(timdev, evtims, num_evtims);
+ TEST_ASSERT_EQUAL(ret, num_evtims,
+ "Failed to arm all event timers: attempted = %d, "
+ "succeeded = %d, rte_errno = %s",
+ num_evtims, ret, rte_strerror(rte_errno));
+
+ rte_delay_ms(1000);
+
+#define MAX_TRIES num_evtims
+ int sum = 0;
+ int tries = 0;
+ bool done = false;
+ while (!done) {
+ sum += rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs,
+ RTE_DIM(evs), 10);
+ if (sum >= num_evtims || ++tries >= MAX_TRIES)
+ done = true;
+
+ rte_delay_ms(10);
+ }
+
+ TEST_ASSERT_EQUAL(sum, num_evtims, "Expected %d timer expiry events, "
+ "got %d", num_evtims, sum);
+
+ TEST_ASSERT(tries < MAX_TRIES, "Exceeded max tries");
+
+ rte_delay_ms(100);
+
+ /* Make sure the eventdev is still empty */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs),
+ 10);
+
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected number of timer expiry "
+ "events from event device");
+
+ rte_mempool_put_bulk(eventdev_test_mempool, (void **)evtims,
+ num_evtims);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that creating an event timer with incorrect event sched type fails. */
+static int
+event_timer_arm_invalid_sched_type(void)
+{
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ if (!using_services)
+ return -ENOTSUP;
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->ev.sched_type = RTE_SCHED_TYPE_PARALLEL; // bad sched type
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "sched type, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid queue");
+
+ rte_mempool_put(eventdev_test_mempool, &evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that creating an event timer with a timeout value that is too small or
+ * too big fails.
+ */
+static int
+event_timer_arm_invalid_timeout(void)
+{
+ int ret;
+ struct rte_event_timer *evtim = NULL;
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 0; // timeout too small
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "timeout, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid timeout");
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOEARLY,
+ "Unexpected event timer state");
+
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 1801; // timeout too big
+
+ ret = rte_event_timer_arm_burst(timdev, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Expected to fail timer arm with invalid "
+ "timeout, but didn't");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after"
+ " arm fail with invalid timeout");
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ERROR_TOOLATE,
+ "Unexpected event timer state");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+static int
+event_timer_cancel(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Check that cancelling an uninited timer fails */
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
+ "uninited timer");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
+ "cancelling uninited timer");
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 30; // expire in 3 sec
+
+ /* Check that cancelling an inited but unarmed timer fails */
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 0, "Succeeded unexpectedly in canceling "
+ "unarmed timer");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Unexpected rte_errno value after "
+ "cancelling unarmed timer");
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
+ "evtim in incorrect state");
+
+ /* Delay 1 sec */
+ rte_delay_ms(1000);
+
+ ret = rte_event_timer_cancel_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to cancel event_timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_CANCELED,
+ "evtim in incorrect state");
+
+ rte_delay_ms(3000);
+
+ /* Make sure that no expiry event was generated */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+static int
+event_timer_cancel_double(void)
+{
+ uint16_t n;
+ int ret;
+ struct rte_event_timer_adapter *adapter = timdev;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_event evs[BATCH_SIZE];
+ const struct rte_event_timer init_tim = {
+ .ev.op = RTE_EVENT_OP_NEW,
+ .ev.queue_id = TEST_QUEUE_ID,
+ .ev.sched_type = RTE_SCHED_TYPE_ATOMIC,
+ .ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .ev.event_type = RTE_EVENT_TYPE_TIMER,
+ .state = RTE_EVENT_TIMER_NOT_ARMED,
+ .timeout_ticks = 5, // expire in .5 sec
+ };
+
+ rte_mempool_get(eventdev_test_mempool, (void **)&evtim);
+ if (evtim == NULL) {
+ /* Failed to get an event timer object */
+ return TEST_FAILED;
+ }
+
+ /* Set up a timer */
+ *evtim = init_tim;
+ evtim->ev.event_ptr = evtim;
+ evtim->timeout_ticks = 30; // expire in 3 sec
+
+ ret = rte_event_timer_arm_burst(adapter, &evtim, 1);
+ TEST_ASSERT_EQUAL(ret, 1, "Failed to arm event timer: %s\n",
+ rte_strerror(rte_errno));
+ TEST_ASSERT_EQUAL(evtim->state, RTE_EVENT_TIMER_ARMED,
+ "timer in unexpected state");
+
+ /* Now, test that referencing the same timer twice in the same call
+ * fails
+ */
+ struct rte_event_timer *evtim_arr[] = {evtim, evtim};
+ ret = rte_event_timer_cancel_burst(adapter, evtim_arr,
+ RTE_DIM(evtim_arr));
+
+ /* Two requests to cancel same timer, only one should succeed */
+ TEST_ASSERT_EQUAL(ret, 1, "Succeeded unexpectedly in canceling timer "
+ "twice");
+
+ TEST_ASSERT_EQUAL(rte_errno, EALREADY, "Unexpected rte_errno value "
+ "after double-cancel: rte_errno = %d", rte_errno);
+
+ rte_delay_ms(3000);
+
+ /* Still make sure that no expiry event was generated */
+ n = rte_event_dequeue_burst(evdev, TEST_PORT_ID, evs, RTE_DIM(evs), 0);
+ TEST_ASSERT_EQUAL(n, 0, "Dequeued unexpected timer expiry event\n");
+
+ rte_mempool_put(eventdev_test_mempool, evtim);
+
+ return TEST_SUCCESS;
+}
+
+/* Check that event timer adapter tick resolution works as expected by testing
+ * the number of adapter ticks that occur within a particular time interval.
+ */
+static int
+adapter_tick_resolution(void)
+{
+ struct rte_event_timer_adapter_stats stats;
+ uint64_t adapter_tick_count;
+
+ /* Only run this test in the software driver case */
+ if (!using_services)
+ return -ENOTSUP;
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_reset(timdev),
+ "Failed to reset stats");
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
+ &stats), "Failed to get adapter stats");
+ TEST_ASSERT_EQUAL(stats.adapter_tick_count, 0, "Adapter tick count "
+ "not zeroed out");
+
+ /* Delay 1 second; should let at least 10 ticks occur with the default
+ * adapter configuration used by this test.
+ */
+ rte_delay_ms(1000);
+
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_stats_get(timdev,
+ &stats), "Failed to get adapter stats");
+
+ adapter_tick_count = stats.adapter_tick_count;
+ TEST_ASSERT(adapter_tick_count >= 10 && adapter_tick_count <= 12,
+ "Expected 10-12 adapter ticks, got %"PRIu64"\n",
+ adapter_tick_count);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_create_max(void)
+{
+ int i;
+ uint32_t svc_start_count, svc_end_count;
+ struct rte_event_timer_adapter *adapters[
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX + 1];
+
+ struct rte_event_timer_adapter_conf conf = {
+ .event_dev_id = evdev,
+ // timer_adapter_id set in loop
+ .clk_src = RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ .timer_tick_ns = NSECPERSEC / 10,
+ .max_tmo_ns = 180 * NSECPERSEC,
+ .nb_timers = MAX_TIMERS,
+ .flags = 0,
+ };
+
+ if (!using_services)
+ return -ENOTSUP;
+
+ svc_start_count = rte_service_get_count();
+
+ /* This test expects that there are sufficient service IDs available
+ * to be allocated. I.e., RTE_EVENT_TIMER_ADAPTER_NUM_MAX may need to
+ * be less than RTE_SERVICE_NUM_MAX if anything else uses a service
+ * (the SW event device, for example).
+ */
+ for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++) {
+ conf.timer_adapter_id = i;
+ adapters[i] = rte_event_timer_adapter_create_ext(&conf,
+ test_port_conf_cb, NULL);
+ TEST_ASSERT_NOT_NULL(adapters[i], "Failed to create adapter "
+ "%d", i);
+ }
+
+ conf.timer_adapter_id = i;
+ adapters[i] = rte_event_timer_adapter_create(&conf);
+ TEST_ASSERT_NULL(adapters[i], "Created too many adapters");
+
+ /* Check that at least RTE_EVENT_TIMER_ADAPTER_NUM_MAX services
+ * have been created
+ */
+ svc_end_count = rte_service_get_count();
+ TEST_ASSERT_EQUAL(svc_end_count - svc_start_count,
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
+ "Failed to create expected number of services");
+
+ for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
+ TEST_ASSERT_SUCCESS(rte_event_timer_adapter_free(adapters[i]),
+ "Failed to free adapter %d", i);
+
+ /* Check that service count is back to where it was at start */
+ svc_end_count = rte_service_get_count();
+ TEST_ASSERT_EQUAL(svc_start_count, svc_end_count, "Failed to release "
+ "correct number of services");
+
+ return TEST_SUCCESS;
+}
+
+static struct unit_test_suite event_timer_adptr_functional_testsuite = {
+ .suite_name = "event timer functional test suite",
+ .setup = testsuite_setup,
+ .teardown = testsuite_teardown,
+ .unit_test_cases = {
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_state),
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_arm),
+ TEST_CASE_ST(timdev_setup_usec, timdev_teardown,
+ test_timer_arm_burst),
+ TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
+ test_timer_cancel),
+ TEST_CASE_ST(timdev_setup_sec, timdev_teardown,
+ test_timer_cancel_random),
+ TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
+ test_timer_arm_multicore),
+ TEST_CASE_ST(timdev_setup_usec_multicore, timdev_teardown,
+ test_timer_arm_burst_multicore),
+ TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
+ test_timer_cancel_multicore),
+ TEST_CASE_ST(timdev_setup_sec_multicore, timdev_teardown,
+ test_timer_cancel_burst_multicore),
+ TEST_CASE(adapter_create),
+ TEST_CASE_ST(timdev_setup_msec, NULL, adapter_free),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_get_info),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_lookup),
+ TEST_CASE_ST(NULL, timdev_teardown,
+ adapter_start),
+ TEST_CASE_ST(timdev_setup_msec, NULL,
+ adapter_stop),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ stat_inc_reset_ev_enq),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_double),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_expiry),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_rearm),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_max),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_invalid_sched_type),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_arm_invalid_timeout),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_cancel),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ event_timer_cancel_double),
+ TEST_CASE_ST(timdev_setup_msec, timdev_teardown,
+ adapter_tick_resolution),
+ TEST_CASE(adapter_create_max),
+ TEST_CASES_END() /**< NULL terminate unit test array */
+ }
+};
+
+static int
+test_event_timer_adapter_func(void)
+{
+ return unit_test_suite_runner(&event_timer_adptr_functional_testsuite);
+}
+
+REGISTER_TEST_COMMAND(event_timer_adapter_test, test_event_timer_adapter_func);
diff --git a/test/test/test_fbarray.c b/test/test/test_fbarray.c
new file mode 100644
index 00000000..8c44e2c7
--- /dev/null
+++ b/test/test/test_fbarray.c
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <limits.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_errno.h>
+#include <rte_fbarray.h>
+
+#include "test.h"
+
+struct fbarray_testsuite_params {
+ struct rte_fbarray arr;
+ int start;
+ int end;
+};
+
+static struct fbarray_testsuite_params param;
+
+#define FBARRAY_TEST_ARR_NAME "fbarray_autotest"
+#define FBARRAY_TEST_LEN 256
+#define FBARRAY_TEST_ELT_SZ (sizeof(int))
+
+static int autotest_setup(void)
+{
+ return rte_fbarray_init(&param.arr, FBARRAY_TEST_ARR_NAME,
+ FBARRAY_TEST_LEN, FBARRAY_TEST_ELT_SZ);
+}
+
+static void autotest_teardown(void)
+{
+ rte_fbarray_destroy(&param.arr);
+}
+
+static int init_array(void)
+{
+ int i;
+ for (i = param.start; i <= param.end; i++) {
+ if (rte_fbarray_set_used(&param.arr, i))
+ return -1;
+ }
+ return 0;
+}
+
+static void reset_array(void)
+{
+ int i;
+ for (i = 0; i < FBARRAY_TEST_LEN; i++)
+ rte_fbarray_set_free(&param.arr, i);
+}
+
+static int first_msk_test_setup(void)
+{
+ /* put all within first mask */
+ param.start = 3;
+ param.end = 10;
+ return init_array();
+}
+
+static int cross_msk_test_setup(void)
+{
+ /* put all within second and third mask */
+ param.start = 70;
+ param.end = 160;
+ return init_array();
+}
+
+static int multi_msk_test_setup(void)
+{
+ /* put all within first and last mask */
+ param.start = 3;
+ param.end = FBARRAY_TEST_LEN - 20;
+ return init_array();
+}
+
+static int last_msk_test_setup(void)
+{
+ /* put all within last mask */
+ param.start = FBARRAY_TEST_LEN - 20;
+ param.end = FBARRAY_TEST_LEN - 1;
+ return init_array();
+}
+
+static int full_msk_test_setup(void)
+{
+ /* fill entire mask */
+ param.start = 0;
+ param.end = FBARRAY_TEST_LEN - 1;
+ return init_array();
+}
+
+static int empty_msk_test_setup(void)
+{
+ /* do not fill anything in */
+ reset_array();
+ return 0;
+}
+
+static int test_invalid(void)
+{
+ struct rte_fbarray dummy;
+
+ /* invalid parameters */
+ TEST_ASSERT_FAIL(rte_fbarray_attach(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_detach(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_FAIL(rte_fbarray_destroy(NULL),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno valuey\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(NULL, "fail", 16, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, NULL, 16, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", 0, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", 16, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ /* len must not be greater than INT_MAX */
+ TEST_ASSERT_FAIL(rte_fbarray_init(&dummy, "fail", INT_MAX + 1U, 16),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_NULL(rte_fbarray_get(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_idx(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_set_free(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_set_used(NULL, 0),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_contig_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_contig_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_rev_contig_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_rev_contig_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_free(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(NULL, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_is_used(NULL, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_SUCCESS(rte_fbarray_init(&dummy, "success",
+ FBARRAY_TEST_LEN, 8),
+ "Failed to initialize valid fbarray\n");
+
+ /* test API for handling invalid parameters with a valid fbarray */
+ TEST_ASSERT_NULL(rte_fbarray_get(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_idx(&dummy, NULL) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_set_free(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_set_used(&dummy, FBARRAY_TEST_LEN),
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_contig_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_contig_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_rev_contig_free(&dummy,
+ FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_rev_contig_used(&dummy,
+ FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_free(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_free(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_free(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy,
+ FBARRAY_TEST_LEN, 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy, 0,
+ FBARRAY_TEST_LEN + 1) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&dummy, 0, 0) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT(rte_fbarray_is_used(&dummy, FBARRAY_TEST_LEN) < 0,
+ "Call succeeded with invalid parameters\n");
+ TEST_ASSERT_EQUAL(rte_errno, EINVAL, "Wrong errno value\n");
+
+ TEST_ASSERT_SUCCESS(rte_fbarray_destroy(&dummy),
+ "Failed to destroy valid fbarray\n");
+
+ return TEST_SUCCESS;
+}
+
+static int check_free(void)
+{
+ const int idx = 0;
+ const int last_idx = FBARRAY_TEST_LEN - 1;
+
+ /* ensure we can find a free spot */
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_free(&param.arr, idx), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(&param.arr, idx, 1), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(&param.arr, idx),
+ FBARRAY_TEST_LEN,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(&param.arr, idx), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, idx, 1), idx,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr, idx), 1,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(&param.arr, last_idx),
+ last_idx, "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(&param.arr, last_idx, 1),
+ last_idx, "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr,
+ last_idx), FBARRAY_TEST_LEN,
+ "Free space not found where expected\n");
+
+ /* ensure we can't find any used spots */
+ TEST_ASSERT(rte_fbarray_find_next_used(&param.arr, idx) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&param.arr, idx, 1) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx), 0,
+ "Used space found where none was expected\n");
+
+ TEST_ASSERT(rte_fbarray_find_prev_used(&param.arr, last_idx) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1) < 0,
+ "Used space found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), 0,
+ "Used space found where none was expected\n");
+
+ return 0;
+}
+
+static int check_used_one(void)
+{
+ const int idx = 0;
+ const int last_idx = FBARRAY_TEST_LEN - 1;
+
+ /* check that we can find used spots now */
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_used(&param.arr, idx), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(&param.arr, idx, 1), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx), 1,
+ "Used space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(&param.arr, last_idx), idx,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1),
+ idx, "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr, idx), 1,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), idx,
+ "Used space not found where expected\n");
+
+ /* check if further indices are still free */
+ TEST_ASSERT(rte_fbarray_find_next_used(&param.arr, idx + 1) < 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT(rte_fbarray_find_next_n_used(&param.arr, idx + 1, 1) < 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_errno, ENOENT, "Wrong errno value\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(&param.arr, idx + 1), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(&param.arr, idx + 1),
+ FBARRAY_TEST_LEN - 1,
+ "Used space not found where none was expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(&param.arr, last_idx), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(&param.arr, last_idx, 1),
+ 0, "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(&param.arr,
+ last_idx), 0,
+ "Used space not found where none was expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(&param.arr,
+ last_idx), FBARRAY_TEST_LEN - 1,
+ "Used space not found where none was expected\n");
+
+ return 0;
+}
+
+static int test_basic(void)
+{
+ const int idx = 0;
+ int i;
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 0, "Wrong element count\n");
+
+ /* ensure we can find a free spot */
+ if (check_free())
+ return TEST_FAILED;
+
+ /* check if used */
+ TEST_ASSERT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space found where not expected\n");
+
+ /* mark as used */
+ TEST_ASSERT_SUCCESS(rte_fbarray_set_used(&param.arr, idx),
+ "Failed to set as used\n");
+
+ /* check if used again */
+ TEST_ASSERT_NOT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space not found where expected\n");
+
+ if (check_used_one())
+ return TEST_FAILED;
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 1, "Wrong element count\n");
+
+ /* check if getting pointers works for every element */
+ for (i = 0; i < FBARRAY_TEST_LEN; i++) {
+ void *td = rte_fbarray_get(&param.arr, i);
+ TEST_ASSERT_NOT_NULL(td, "Invalid pointer returned\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_idx(&param.arr, td), i,
+ "Wrong index returned\n");
+ }
+
+ /* mark as free */
+ TEST_ASSERT_SUCCESS(rte_fbarray_set_free(&param.arr, idx),
+ "Failed to set as free\n");
+
+ /* check array count */
+ TEST_ASSERT_EQUAL(param.arr.count, 0, "Wrong element count\n");
+
+ /* check if used */
+ TEST_ASSERT_EQUAL(rte_fbarray_is_used(&param.arr, idx), 0,
+ "Used space found where not expected\n");
+
+ if (check_free())
+ return TEST_FAILED;
+
+ reset_array();
+
+ return TEST_SUCCESS;
+}
+
+static int ensure_correct(struct rte_fbarray *arr, int first, int last,
+ bool used)
+{
+ int i, len = last - first + 1;
+ for (i = 0; i < len; i++) {
+ int cur = first + i;
+ int cur_len = len - i;
+
+ if (used) {
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_used(arr,
+ cur), cur_len,
+ "Used space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(arr,
+ last), len,
+ "Used space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_used(arr,
+ cur), i + 1,
+ "Used space length is wrong\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_used(arr, cur),
+ cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(arr,
+ cur, 1), cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_used(arr, cur,
+ cur_len), cur,
+ "Used space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_used(arr, cur),
+ cur,
+ "Used space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_used(arr,
+ last, cur_len), cur,
+ "Used space not found where expected\n");
+ } else {
+ TEST_ASSERT_EQUAL(rte_fbarray_find_contig_free(arr,
+ cur), cur_len,
+ "Free space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+ last), len,
+ "Free space length is wrong\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_rev_contig_free(arr,
+ cur), i + 1,
+ "Free space length is wrong\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_free(arr, cur),
+ cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(arr, cur,
+ 1), cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_next_n_free(arr, cur,
+ cur_len), cur,
+ "Free space not found where expected\n");
+
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_free(arr, cur),
+ cur,
+ "Free space not found where expected\n");
+ TEST_ASSERT_EQUAL(rte_fbarray_find_prev_n_free(arr,
+ last, cur_len), cur,
+ "Free space not found where expected\n");
+ }
+ }
+ return 0;
+}
+
+static int test_find(void)
+{
+ TEST_ASSERT_EQUAL((int)param.arr.count, param.end - param.start + 1,
+ "Wrong element count\n");
+ /* ensure space is free before start */
+ if (ensure_correct(&param.arr, 0, param.start - 1, false))
+ return TEST_FAILED;
+ /* ensure space is occupied where it's supposed to be */
+ if (ensure_correct(&param.arr, param.start, param.end, true))
+ return TEST_FAILED;
+ /* ensure space after end is free as well */
+ if (ensure_correct(&param.arr, param.end + 1, FBARRAY_TEST_LEN - 1,
+ false))
+ return TEST_FAILED;
+ return TEST_SUCCESS;
+}
+
+static int test_empty(void)
+{
+ TEST_ASSERT_EQUAL((int)param.arr.count, 0, "Wrong element count\n");
+ /* ensure space is free */
+ if (ensure_correct(&param.arr, 0, FBARRAY_TEST_LEN - 1, false))
+ return TEST_FAILED;
+ return TEST_SUCCESS;
+}
+
+
+static struct unit_test_suite fbarray_test_suite = {
+ .suite_name = "fbarray autotest",
+ .setup = autotest_setup,
+ .teardown = autotest_teardown,
+ .unit_test_cases = {
+ TEST_CASE(test_invalid),
+ TEST_CASE(test_basic),
+ TEST_CASE_ST(first_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(cross_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(multi_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(last_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(full_msk_test_setup, reset_array, test_find),
+ TEST_CASE_ST(empty_msk_test_setup, reset_array, test_empty),
+ TEST_CASES_END()
+ }
+};
+
+static int
+test_fbarray(void)
+{
+ return unit_test_suite_runner(&fbarray_test_suite);
+}
+
+REGISTER_TEST_COMMAND(fbarray_autotest, test_fbarray);
diff --git a/test/test/test_flow_classify.c b/test/test/test_flow_classify.c
index fc83b69a..5f5beeee 100644
--- a/test/test/test_flow_classify.c
+++ b/test/test/test_flow_classify.c
@@ -871,32 +871,32 @@ test_flow_classify(void)
printf("Line %i: f_create has failed!\n", __LINE__);
rte_flow_classifier_free(cls->cls);
rte_free(cls);
- return -1;
+ return TEST_FAILED;
}
printf("Created table_acl for for IPv4 five tuple packets\n");
ret = init_mbufpool();
if (ret) {
printf("Line %i: init_mbufpool has failed!\n", __LINE__);
- return -1;
+ return TEST_FAILED;
}
if (test_invalid_parameters() < 0)
- return -1;
+ return TEST_FAILED;
if (test_valid_parameters() < 0)
- return -1;
+ return TEST_FAILED;
if (test_invalid_patterns() < 0)
- return -1;
+ return TEST_FAILED;
if (test_invalid_actions() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_udp() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_tcp() < 0)
- return -1;
+ return TEST_FAILED;
if (test_query_sctp() < 0)
- return -1;
+ return TEST_FAILED;
- return 0;
+ return TEST_SUCCESS;
}
REGISTER_TEST_COMMAND(flow_classify_autotest, test_flow_classify);
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index edf41f5a..b3db9fd1 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -1103,6 +1103,7 @@ static int test_average_table_utilization(void)
unsigned i, j;
unsigned added_keys, average_keys_added = 0;
int ret;
+ unsigned int cnt;
printf("\n# Running test to determine average utilization"
"\n before adding elements begins to fail\n");
@@ -1121,13 +1122,24 @@ static int test_average_table_utilization(void)
for (i = 0; i < ut_params.key_len; i++)
simple_key[i] = rte_rand() % 255;
ret = rte_hash_add_key(handle, simple_key);
+ if (ret < 0)
+ break;
}
+
if (ret != -ENOSPC) {
printf("Unexpected error when adding keys\n");
rte_hash_free(handle);
return -1;
}
+ cnt = rte_hash_count(handle);
+ if (cnt != added_keys) {
+ printf("rte_hash_count returned wrong value %u, %u,"
+ "%u\n", j, added_keys, cnt);
+ rte_hash_free(handle);
+ return -1;
+ }
+
average_keys_added += added_keys;
/* Reset the table */
diff --git a/test/test/test_hash_multiwriter.c b/test/test/test_hash_multiwriter.c
index ef5fce3d..6a3eb10b 100644
--- a/test/test/test_hash_multiwriter.c
+++ b/test/test/test_hash_multiwriter.c
@@ -48,18 +48,29 @@ static rte_atomic64_t ginsertions;
static int use_htm;
static int
-test_hash_multiwriter_worker(__attribute__((unused)) void *arg)
+test_hash_multiwriter_worker(void *arg)
{
uint64_t i, offset;
+ uint16_t pos_core;
uint32_t lcore_id = rte_lcore_id();
uint64_t begin, cycles;
+ uint16_t *enabled_core_ids = (uint16_t *)arg;
- offset = (lcore_id - rte_get_master_lcore())
- * tbl_multiwriter_test_params.nb_tsx_insertion;
+ for (pos_core = 0; pos_core < rte_lcore_count(); pos_core++) {
+ if (enabled_core_ids[pos_core] == lcore_id)
+ break;
+ }
+
+ /*
+ * Calculate offset for entries based on the position of the
+ * logical core, from the master core (not counting not enabled cores)
+ */
+ offset = pos_core * tbl_multiwriter_test_params.nb_tsx_insertion;
printf("Core #%d inserting %d: %'"PRId64" - %'"PRId64"\n",
lcore_id, tbl_multiwriter_test_params.nb_tsx_insertion,
- offset, offset + tbl_multiwriter_test_params.nb_tsx_insertion);
+ offset,
+ offset + tbl_multiwriter_test_params.nb_tsx_insertion - 1);
begin = rte_rdtsc_precise();
@@ -88,6 +99,8 @@ test_hash_multiwriter(void)
{
unsigned int i, rounded_nb_total_tsx_insertion;
static unsigned calledCount = 1;
+ uint16_t enabled_core_ids[RTE_MAX_LCORE];
+ uint16_t core_id;
uint32_t *keys;
uint32_t *found;
@@ -116,6 +129,7 @@ test_hash_multiwriter(void)
uint32_t duplicated_keys = 0;
uint32_t lost_keys = 0;
+ uint32_t count;
snprintf(name, 32, "test%u", calledCount++);
hash_params.name = name;
@@ -140,16 +154,17 @@ test_hash_multiwriter(void)
goto err1;
}
+ for (i = 0; i < nb_entries; i++)
+ keys[i] = i;
+
+ tbl_multiwriter_test_params.keys = keys;
+
found = rte_zmalloc(NULL, sizeof(uint32_t) * nb_entries, 0);
if (found == NULL) {
printf("RTE_ZMALLOC failed\n");
goto err2;
}
- for (i = 0; i < nb_entries; i++)
- keys[i] = i;
-
- tbl_multiwriter_test_params.keys = keys;
tbl_multiwriter_test_params.found = found;
rte_atomic64_init(&gcycles);
@@ -158,11 +173,36 @@ test_hash_multiwriter(void)
rte_atomic64_init(&ginsertions);
rte_atomic64_clear(&ginsertions);
+ /* Get list of enabled cores */
+ i = 0;
+ for (core_id = 0; core_id < RTE_MAX_LCORE; core_id++) {
+ if (i == rte_lcore_count())
+ break;
+
+ if (rte_lcore_is_enabled(core_id)) {
+ enabled_core_ids[i] = core_id;
+ i++;
+ }
+ }
+
+ if (i != rte_lcore_count()) {
+ printf("Number of enabled cores in list is different from "
+ "number given by rte_lcore_count()\n");
+ goto err3;
+ }
+
/* Fire all threads. */
rte_eal_mp_remote_launch(test_hash_multiwriter_worker,
- NULL, CALL_MASTER);
+ enabled_core_ids, CALL_MASTER);
rte_eal_mp_wait_lcore();
+ count = rte_hash_count(handle);
+ if (count != rounded_nb_total_tsx_insertion) {
+ printf("rte_hash_count returned wrong value %u, %d\n",
+ rounded_nb_total_tsx_insertion, count);
+ goto err3;
+ }
+
while (rte_hash_iterate(handle, &next_key, &next_data, &iter) >= 0) {
/* Search for the key in the list of keys added .*/
i = *(const uint32_t *)next_key;
diff --git a/test/test/test_hash_perf.c b/test/test/test_hash_perf.c
index a81d0c79..33dcb9fc 100644
--- a/test/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
@@ -76,7 +76,8 @@ static struct rte_hash_parameters ut_params = {
};
static int
-create_table(unsigned with_data, unsigned table_index)
+create_table(unsigned int with_data, unsigned int table_index,
+ unsigned int with_locks)
{
char name[RTE_HASH_NAMESIZE];
@@ -86,6 +87,14 @@ create_table(unsigned with_data, unsigned table_index)
else
sprintf(name, "test_hash%d", hashtest_key_lens[table_index]);
+
+ if (with_locks)
+ ut_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT
+ | RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+ else
+ ut_params.extra_flag = 0;
+
ut_params.name = name;
ut_params.key_len = hashtest_key_lens[table_index];
ut_params.socket_id = rte_socket_id();
@@ -459,7 +468,7 @@ reset_table(unsigned table_index)
}
static int
-run_all_tbl_perf_tests(unsigned with_pushes)
+run_all_tbl_perf_tests(unsigned int with_pushes, unsigned int with_locks)
{
unsigned i, j, with_data, with_hash;
@@ -468,7 +477,7 @@ run_all_tbl_perf_tests(unsigned with_pushes)
for (with_data = 0; with_data <= 1; with_data++) {
for (i = 0; i < NUM_KEYSIZES; i++) {
- if (create_table(with_data, i) < 0)
+ if (create_table(with_data, i, with_locks) < 0)
return -1;
if (get_input_keys(with_pushes, i) < 0)
@@ -611,15 +620,20 @@ fbk_hash_perf_test(void)
static int
test_hash_perf(void)
{
- unsigned with_pushes;
-
- for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
- if (with_pushes == 0)
- printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
+ unsigned int with_pushes, with_locks;
+ for (with_locks = 0; with_locks <= 1; with_locks++) {
+ if (with_locks)
+ printf("\nWith locks in the code\n");
else
- printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
- if (run_all_tbl_perf_tests(with_pushes) < 0)
- return -1;
+ printf("\nWithout locks in the code\n");
+ for (with_pushes = 0; with_pushes <= 1; with_pushes++) {
+ if (with_pushes == 0)
+ printf("\nALL ELEMENTS IN PRIMARY LOCATION\n");
+ else
+ printf("\nELEMENTS IN PRIMARY OR SECONDARY LOCATION\n");
+ if (run_all_tbl_perf_tests(with_pushes, with_locks) < 0)
+ return -1;
+ }
}
if (fbk_hash_perf_test() < 0)
return -1;
diff --git a/test/test/test_hash_readwrite.c b/test/test/test_hash_readwrite.c
new file mode 100644
index 00000000..55ae33d8
--- /dev/null
+++ b/test/test/test_hash_readwrite.c
@@ -0,0 +1,637 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <inttypes.h>
+#include <locale.h>
+
+#include <rte_cycles.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_jhash.h>
+#include <rte_launch.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_spinlock.h>
+
+#include "test.h"
+
+#define RTE_RWTEST_FAIL 0
+
+#define TOTAL_ENTRY (16*1024*1024)
+#define TOTAL_INSERT (15*1024*1024)
+
+#define NUM_TEST 3
+unsigned int core_cnt[NUM_TEST] = {2, 4, 8};
+
+struct perf {
+ uint32_t single_read;
+ uint32_t single_write;
+ uint32_t read_only[NUM_TEST];
+ uint32_t write_only[NUM_TEST];
+ uint32_t read_write_r[NUM_TEST];
+ uint32_t read_write_w[NUM_TEST];
+};
+
+static struct perf htm_results, non_htm_results;
+
+struct {
+ uint32_t *keys;
+ uint32_t *found;
+ uint32_t num_insert;
+ uint32_t rounded_tot_insert;
+ struct rte_hash *h;
+} tbl_rw_test_param;
+
+static rte_atomic64_t gcycles;
+static rte_atomic64_t ginsertions;
+
+static rte_atomic64_t gread_cycles;
+static rte_atomic64_t gwrite_cycles;
+
+static rte_atomic64_t greads;
+static rte_atomic64_t gwrites;
+
+static int
+test_hash_readwrite_worker(__attribute__((unused)) void *arg)
+{
+ uint64_t i, offset;
+ uint32_t lcore_id = rte_lcore_id();
+ uint64_t begin, cycles;
+ int ret;
+
+ offset = (lcore_id - rte_get_master_lcore())
+ * tbl_rw_test_param.num_insert;
+
+ printf("Core #%d inserting and reading %d: %'"PRId64" - %'"PRId64"\n",
+ lcore_id, tbl_rw_test_param.num_insert,
+ offset, offset + tbl_rw_test_param.num_insert);
+
+ begin = rte_rdtsc_precise();
+
+ for (i = offset; i < offset + tbl_rw_test_param.num_insert; i++) {
+
+ if (rte_hash_lookup(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i) > 0)
+ break;
+
+ ret = rte_hash_add_key(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i);
+ if (ret < 0)
+ break;
+
+ if (rte_hash_lookup(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i) != ret)
+ break;
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gcycles, cycles);
+ rte_atomic64_add(&ginsertions, i - offset);
+
+ for (; i < offset + tbl_rw_test_param.num_insert; i++)
+ tbl_rw_test_param.keys[i] = RTE_RWTEST_FAIL;
+
+ return 0;
+}
+
+static int
+init_params(int use_htm, int use_jhash)
+{
+ unsigned int i;
+
+ uint32_t *keys = NULL;
+ uint32_t *found = NULL;
+ struct rte_hash *handle;
+
+ struct rte_hash_parameters hash_params = {
+ .entries = TOTAL_ENTRY,
+ .key_len = sizeof(uint32_t),
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+ if (use_jhash)
+ hash_params.hash_func = rte_jhash;
+ else
+ hash_params.hash_func = rte_hash_crc;
+
+ if (use_htm)
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT |
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+ else
+ hash_params.extra_flag =
+ RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY;
+
+ hash_params.name = "tests";
+
+ handle = rte_hash_create(&hash_params);
+ if (handle == NULL) {
+ printf("hash creation failed");
+ return -1;
+ }
+
+ tbl_rw_test_param.h = handle;
+ keys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
+
+ if (keys == NULL) {
+ printf("RTE_MALLOC failed\n");
+ goto err;
+ }
+
+ found = rte_zmalloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
+ if (found == NULL) {
+ printf("RTE_ZMALLOC failed\n");
+ goto err;
+ }
+
+ tbl_rw_test_param.keys = keys;
+ tbl_rw_test_param.found = found;
+
+ for (i = 0; i < TOTAL_ENTRY; i++)
+ keys[i] = i;
+
+ return 0;
+
+err:
+ rte_free(keys);
+ rte_hash_free(handle);
+
+ return -1;
+}
+
+static int
+test_hash_readwrite_functional(int use_htm)
+{
+ unsigned int i;
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+
+ uint32_t duplicated_keys = 0;
+ uint32_t lost_keys = 0;
+ int use_jhash = 1;
+
+ rte_atomic64_init(&gcycles);
+ rte_atomic64_clear(&gcycles);
+
+ rte_atomic64_init(&ginsertions);
+ rte_atomic64_clear(&ginsertions);
+
+ if (init_params(use_htm, use_jhash) != 0)
+ goto err;
+
+ tbl_rw_test_param.num_insert =
+ TOTAL_INSERT / rte_lcore_count();
+
+ tbl_rw_test_param.rounded_tot_insert =
+ tbl_rw_test_param.num_insert
+ * rte_lcore_count();
+
+ printf("++++++++Start function tests:+++++++++\n");
+
+ /* Fire all threads. */
+ rte_eal_mp_remote_launch(test_hash_readwrite_worker,
+ NULL, CALL_MASTER);
+ rte_eal_mp_wait_lcore();
+
+ while (rte_hash_iterate(tbl_rw_test_param.h, &next_key,
+ &next_data, &iter) >= 0) {
+ /* Search for the key in the list of keys added .*/
+ i = *(const uint32_t *)next_key;
+ tbl_rw_test_param.found[i]++;
+ }
+
+ for (i = 0; i < tbl_rw_test_param.rounded_tot_insert; i++) {
+ if (tbl_rw_test_param.keys[i] != RTE_RWTEST_FAIL) {
+ if (tbl_rw_test_param.found[i] > 1) {
+ duplicated_keys++;
+ break;
+ }
+ if (tbl_rw_test_param.found[i] == 0) {
+ lost_keys++;
+ printf("key %d is lost\n", i);
+ break;
+ }
+ }
+ }
+
+ if (duplicated_keys > 0) {
+ printf("%d key duplicated\n", duplicated_keys);
+ goto err_free;
+ }
+
+ if (lost_keys > 0) {
+ printf("%d key lost\n", lost_keys);
+ goto err_free;
+ }
+
+ printf("No key corrupted during read-write test.\n");
+
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gcycles) /
+ rte_atomic64_read(&ginsertions);
+
+ printf("cycles per insertion and lookup: %llu\n", cycles_per_insertion);
+
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+ printf("+++++++++Complete function tests+++++++++\n");
+ return 0;
+
+err_free:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+err:
+ return -1;
+}
+
+static int
+test_rw_reader(__attribute__((unused)) void *arg)
+{
+ uint64_t i;
+ uint64_t begin, cycles;
+ uint64_t read_cnt = (uint64_t)((uintptr_t)arg);
+
+ begin = rte_rdtsc_precise();
+ for (i = 0; i < read_cnt; i++) {
+ void *data;
+ rte_hash_lookup_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ &data);
+ if (i != (uint64_t)(uintptr_t)data) {
+ printf("lookup find wrong value %"PRIu64","
+ "%"PRIu64"\n", i,
+ (uint64_t)(uintptr_t)data);
+ break;
+ }
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gread_cycles, cycles);
+ rte_atomic64_add(&greads, i);
+ return 0;
+}
+
+static int
+test_rw_writer(__attribute__((unused)) void *arg)
+{
+ uint64_t i;
+ uint32_t lcore_id = rte_lcore_id();
+ uint64_t begin, cycles;
+ int ret;
+ uint64_t start_coreid = (uint64_t)(uintptr_t)arg;
+ uint64_t offset;
+
+ offset = TOTAL_INSERT / 2 + (lcore_id - start_coreid)
+ * tbl_rw_test_param.num_insert;
+ begin = rte_rdtsc_precise();
+ for (i = offset; i < offset + tbl_rw_test_param.num_insert; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("writer failed %"PRIu64"\n", i);
+ break;
+ }
+ }
+
+ cycles = rte_rdtsc_precise() - begin;
+ rte_atomic64_add(&gwrite_cycles, cycles);
+ rte_atomic64_add(&gwrites, tbl_rw_test_param.num_insert);
+ return 0;
+}
+
+static int
+test_hash_readwrite_perf(struct perf *perf_results, int use_htm,
+ int reader_faster)
+{
+ unsigned int n;
+ int ret;
+ int start_coreid;
+ uint64_t i, read_cnt;
+
+ const void *next_key;
+ void *next_data;
+ uint32_t iter = 0;
+ int use_jhash = 0;
+
+ uint32_t duplicated_keys = 0;
+ uint32_t lost_keys = 0;
+
+ uint64_t start = 0, end = 0;
+
+ rte_atomic64_init(&greads);
+ rte_atomic64_init(&gwrites);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&greads);
+
+ rte_atomic64_init(&gread_cycles);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_init(&gwrite_cycles);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ if (init_params(use_htm, use_jhash) != 0)
+ goto err;
+
+ /*
+ * Do a readers finish faster or writers finish faster test.
+ * When readers finish faster, we timing the readers, and when writers
+ * finish faster, we timing the writers.
+ * Divided by 10 or 2 is just experimental values to vary the workload
+ * of readers.
+ */
+ if (reader_faster) {
+ printf("++++++Start perf test: reader++++++++\n");
+ read_cnt = TOTAL_INSERT / 10;
+ } else {
+ printf("++++++Start perf test: writer++++++++\n");
+ read_cnt = TOTAL_INSERT / 2;
+ }
+
+ /* We first test single thread performance */
+ start = rte_rdtsc_precise();
+ /* Insert half of the keys */
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+ end = rte_rdtsc_precise() - start;
+ perf_results->single_write = end / i;
+
+ start = rte_rdtsc_precise();
+
+ for (i = 0; i < read_cnt; i++) {
+ void *data;
+ rte_hash_lookup_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ &data);
+ if (i != (uint64_t)(uintptr_t)data) {
+ printf("lookup find wrong value"
+ " %"PRIu64",%"PRIu64"\n", i,
+ (uint64_t)(uintptr_t)data);
+ break;
+ }
+ }
+ end = rte_rdtsc_precise() - start;
+ perf_results->single_read = end / i;
+
+ for (n = 0; n < NUM_TEST; n++) {
+ unsigned int tot_lcore = rte_lcore_count();
+ if (tot_lcore < core_cnt[n] * 2 + 1)
+ goto finish;
+
+ rte_atomic64_clear(&greads);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ rte_hash_reset(tbl_rw_test_param.h);
+
+ tbl_rw_test_param.num_insert = TOTAL_INSERT / 2 / core_cnt[n];
+ tbl_rw_test_param.rounded_tot_insert = TOTAL_INSERT / 2 +
+ tbl_rw_test_param.num_insert *
+ core_cnt[n];
+
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+
+ /* Then test multiple thread case but only all reads or
+ * all writes
+ */
+
+ /* Test only reader cases */
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+
+ rte_eal_mp_wait_lcore();
+
+ start_coreid = i;
+ /* Test only writer cases */
+ for (; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+
+ rte_eal_mp_wait_lcore();
+
+ if (reader_faster) {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gread_cycles) /
+ rte_atomic64_read(&greads);
+ perf_results->read_only[n] = cycles_per_insertion;
+ printf("Reader only: cycles per lookup: %llu\n",
+ cycles_per_insertion);
+ }
+
+ else {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gwrite_cycles) /
+ rte_atomic64_read(&gwrites);
+ perf_results->write_only[n] = cycles_per_insertion;
+ printf("Writer only: cycles per writes: %llu\n",
+ cycles_per_insertion);
+ }
+
+ rte_atomic64_clear(&greads);
+ rte_atomic64_clear(&gread_cycles);
+ rte_atomic64_clear(&gwrites);
+ rte_atomic64_clear(&gwrite_cycles);
+
+ rte_hash_reset(tbl_rw_test_param.h);
+
+ for (i = 0; i < TOTAL_INSERT / 2; i++) {
+ ret = rte_hash_add_key_data(tbl_rw_test_param.h,
+ tbl_rw_test_param.keys + i,
+ (void *)((uintptr_t)i));
+ if (ret < 0) {
+ printf("Failed to insert half of keys\n");
+ goto err_free;
+ }
+ }
+
+ start_coreid = core_cnt[n] + 1;
+
+ if (reader_faster) {
+ for (i = core_cnt[n] + 1; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+ } else {
+ for (i = 1; i <= core_cnt[n]; i++)
+ rte_eal_remote_launch(test_rw_reader,
+ (void *)(uintptr_t)read_cnt, i);
+ for (; i <= core_cnt[n] * 2; i++)
+ rte_eal_remote_launch(test_rw_writer,
+ (void *)((uintptr_t)start_coreid), i);
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ while (rte_hash_iterate(tbl_rw_test_param.h,
+ &next_key, &next_data, &iter) >= 0) {
+ /* Search for the key in the list of keys added .*/
+ i = *(const uint32_t *)next_key;
+ tbl_rw_test_param.found[i]++;
+ }
+
+ for (i = 0; i < tbl_rw_test_param.rounded_tot_insert; i++) {
+ if (tbl_rw_test_param.keys[i] != RTE_RWTEST_FAIL) {
+ if (tbl_rw_test_param.found[i] > 1) {
+ duplicated_keys++;
+ break;
+ }
+ if (tbl_rw_test_param.found[i] == 0) {
+ lost_keys++;
+ printf("key %"PRIu64" is lost\n", i);
+ break;
+ }
+ }
+ }
+
+ if (duplicated_keys > 0) {
+ printf("%d key duplicated\n", duplicated_keys);
+ goto err_free;
+ }
+
+ if (lost_keys > 0) {
+ printf("%d key lost\n", lost_keys);
+ goto err_free;
+ }
+
+ printf("No key corrupted during read-write test.\n");
+
+ if (reader_faster) {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gread_cycles) /
+ rte_atomic64_read(&greads);
+ perf_results->read_write_r[n] = cycles_per_insertion;
+ printf("Read-write cycles per lookup: %llu\n",
+ cycles_per_insertion);
+ }
+
+ else {
+ unsigned long long int cycles_per_insertion =
+ rte_atomic64_read(&gwrite_cycles) /
+ rte_atomic64_read(&gwrites);
+ perf_results->read_write_w[n] = cycles_per_insertion;
+ printf("Read-write cycles per writes: %llu\n",
+ cycles_per_insertion);
+ }
+ }
+
+finish:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+ return 0;
+
+err_free:
+ rte_free(tbl_rw_test_param.found);
+ rte_free(tbl_rw_test_param.keys);
+ rte_hash_free(tbl_rw_test_param.h);
+
+err:
+ return -1;
+}
+
+static int
+test_hash_readwrite_main(void)
+{
+ /*
+ * Variables used to choose different tests.
+ * use_htm indicates if hardware transactional memory should be used.
+ * reader_faster indicates if the reader threads should finish earlier
+ * than writer threads. This is to timing either reader threads or
+ * writer threads for performance numbers.
+ */
+ int use_htm, reader_faster;
+
+ if (rte_lcore_count() == 1) {
+ printf("More than one lcore is required "
+ "to do read write test\n");
+ return 0;
+ }
+
+ setlocale(LC_NUMERIC, "");
+
+ if (rte_tm_supported()) {
+ printf("Hardware transactional memory (lock elision) "
+ "is supported\n");
+
+ printf("Test read-write with Hardware transactional memory\n");
+
+ use_htm = 1;
+ if (test_hash_readwrite_functional(use_htm) < 0)
+ return -1;
+
+ reader_faster = 1;
+ if (test_hash_readwrite_perf(&htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+
+ reader_faster = 0;
+ if (test_hash_readwrite_perf(&htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+ } else {
+ printf("Hardware transactional memory (lock elision) "
+ "is NOT supported\n");
+ }
+
+ printf("Test read-write without Hardware transactional memory\n");
+ use_htm = 0;
+ if (test_hash_readwrite_functional(use_htm) < 0)
+ return -1;
+ reader_faster = 1;
+ if (test_hash_readwrite_perf(&non_htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+ reader_faster = 0;
+ if (test_hash_readwrite_perf(&non_htm_results, use_htm,
+ reader_faster) < 0)
+ return -1;
+
+ printf("Results summary:\n");
+
+ int i;
+
+ printf("single read: %u\n", htm_results.single_read);
+ printf("single write: %u\n", htm_results.single_write);
+ for (i = 0; i < NUM_TEST; i++) {
+ printf("core_cnt: %u\n", core_cnt[i]);
+ printf("HTM:\n");
+ printf("read only: %u\n", htm_results.read_only[i]);
+ printf("write only: %u\n", htm_results.write_only[i]);
+ printf("read-write read: %u\n", htm_results.read_write_r[i]);
+ printf("read-write write: %u\n", htm_results.read_write_w[i]);
+
+ printf("non HTM:\n");
+ printf("read only: %u\n", non_htm_results.read_only[i]);
+ printf("write only: %u\n", non_htm_results.write_only[i]);
+ printf("read-write read: %u\n",
+ non_htm_results.read_write_r[i]);
+ printf("read-write write: %u\n",
+ non_htm_results.read_write_w[i]);
+ }
+
+ return 0;
+}
+
+REGISTER_TEST_COMMAND(hash_readwrite_autotest, test_hash_readwrite_main);
diff --git a/test/test/test_interrupts.c b/test/test/test_interrupts.c
index 31a70a0c..dc19175d 100644
--- a/test/test/test_interrupts.c
+++ b/test/test/test_interrupts.c
@@ -20,6 +20,7 @@ enum test_interrupt_handle_type {
TEST_INTERRUPT_HANDLE_VALID,
TEST_INTERRUPT_HANDLE_VALID_UIO,
TEST_INTERRUPT_HANDLE_VALID_ALARM,
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT,
TEST_INTERRUPT_HANDLE_CASE1,
TEST_INTERRUPT_HANDLE_MAX
};
@@ -80,6 +81,10 @@ test_interrupt_init(void)
intr_handles[TEST_INTERRUPT_HANDLE_VALID_ALARM].type =
RTE_INTR_HANDLE_ALARM;
+ intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].fd = pfds.readfd;
+ intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT].type =
+ RTE_INTR_HANDLE_DEV_EVENT;
+
intr_handles[TEST_INTERRUPT_HANDLE_CASE1].fd = pfds.writefd;
intr_handles[TEST_INTERRUPT_HANDLE_CASE1].type = RTE_INTR_HANDLE_UIO;
@@ -250,6 +255,14 @@ test_interrupt_enable(void)
return -1;
}
+ /* check with specific valid intr_handle */
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ if (rte_intr_enable(&test_intr_handle) == 0) {
+ printf("unexpectedly enable a specific intr_handle "
+ "successfully\n");
+ return -1;
+ }
+
/* check with valid handler and its type */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
if (rte_intr_enable(&test_intr_handle) < 0) {
@@ -306,6 +319,14 @@ test_interrupt_disable(void)
return -1;
}
+ /* check with specific valid intr_handle */
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ if (rte_intr_disable(&test_intr_handle) == 0) {
+ printf("unexpectedly disable a specific intr_handle "
+ "successfully\n");
+ return -1;
+ }
+
/* check with valid handler and its type */
test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
if (rte_intr_disable(&test_intr_handle) < 0) {
@@ -393,9 +414,17 @@ test_interrupt(void)
goto out;
}
+ printf("Check valid device event interrupt full path\n");
+ if (test_interrupt_full_path_check(
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
+ printf("failure occurred during checking valid device event "
+ "interrupt full path\n");
+ goto out;
+ }
+
printf("Check valid alarm interrupt full path\n");
- if (test_interrupt_full_path_check(TEST_INTERRUPT_HANDLE_VALID_ALARM)
- < 0) {
+ if (test_interrupt_full_path_check(
+ TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT) < 0) {
printf("failure occurred during checking valid alarm "
"interrupt full path\n");
goto out;
@@ -513,6 +542,12 @@ out:
rte_intr_callback_unregister(&test_intr_handle,
test_interrupt_callback_1, (void *)-1);
+ test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID_DEV_EVENT];
+ rte_intr_callback_unregister(&test_intr_handle,
+ test_interrupt_callback, (void *)-1);
+ rte_intr_callback_unregister(&test_intr_handle,
+ test_interrupt_callback_1, (void *)-1);
+
rte_delay_ms(2 * TEST_INTERRUPT_CHECK_INTERVAL);
/* deinit */
test_interrupt_deinit();
diff --git a/test/test/test_kni.c b/test/test/test_kni.c
index e4839cdb..1b876719 100644
--- a/test/test/test_kni.c
+++ b/test/test/test_kni.c
@@ -71,11 +71,7 @@ static const struct rte_eth_txconf tx_conf = {
static const struct rte_eth_conf port_conf = {
.rxmode = {
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .jumbo_frame = 0,
- .hw_strip_crc = 1,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_DCB_NONE,
@@ -357,6 +353,8 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus = NULL;
if (!mp)
return -1;
@@ -366,8 +364,13 @@ test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
/* core id 1 configured for kernel thread */
@@ -465,6 +468,8 @@ test_kni(void)
struct rte_kni_conf conf;
struct rte_eth_dev_info info;
struct rte_kni_ops ops;
+ const struct rte_pci_device *pci_dev;
+ const struct rte_bus *bus;
/* Initialize KNI subsytem */
rte_kni_init(KNI_TEST_MAX_PORTS);
@@ -480,7 +485,7 @@ test_kni(void)
return -1;
}
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0) {
printf("no supported nic port found\n");
return -1;
@@ -523,8 +528,15 @@ test_kni(void)
memset(&conf, 0, sizeof(conf));
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ else
+ bus = NULL;
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
@@ -552,8 +564,15 @@ test_kni(void)
memset(&info, 0, sizeof(info));
memset(&ops, 0, sizeof(ops));
rte_eth_dev_info_get(port_id, &info);
- conf.addr = info.pci_dev->addr;
- conf.id = info.pci_dev->id;
+ if (info.device)
+ bus = rte_bus_find_by_device(info.device);
+ else
+ bus = NULL;
+ if (bus && !strcmp(bus->name, "pci")) {
+ pci_dev = RTE_DEV_TO_PCI(info.device);
+ conf.addr = pci_dev->addr;
+ conf.id = pci_dev->id;
+ }
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
diff --git a/test/test/test_link_bonding.c b/test/test/test_link_bonding.c
index 0ffd6509..0fe1d78e 100644
--- a/test/test/test_link_bonding.c
+++ b/test/test/test_link_bonding.c
@@ -43,9 +43,6 @@
#define TX_HTHRESH 0
#define TX_WTHRESH 0
#define TX_RSBIT_THRESH 32
-#define TX_Q_FLAGS (ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOVLANOFFL |\
- ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
- ETH_TXQ_FLAGS_NOXSUMTCP)
#define MBUF_CACHE_SIZE (250)
#define BURST_SIZE (32)
@@ -135,35 +132,11 @@ static uint16_t dst_port_1 = 2024;
static uint16_t vlan_id = 0x100;
-struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled. */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
- .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
- .hw_vlan_strip = 1, /**< VLAN strip enabled. */
- .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
- .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
-};
-
-struct rte_fdir_conf fdir_conf = {
- .mode = RTE_FDIR_MODE_NONE,
- .pballoc = RTE_FDIR_PBALLOC_64K,
- .status = RTE_FDIR_REPORT_STATUS,
- .drop_queue = 127,
-};
-
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .max_rx_pkt_len = ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -189,8 +162,6 @@ static struct rte_eth_txconf tx_conf_default = {
},
.tx_free_thresh = TX_FREE_THRESH,
.tx_rs_thresh = TX_RSBIT_THRESH,
- .txq_flags = TX_Q_FLAGS
-
};
static void free_virtualpmd_tx_queue(void);
diff --git a/test/test/test_link_bonding_mode4.c b/test/test/test_link_bonding_mode4.c
index 426877a2..9163f631 100644
--- a/test/test/test_link_bonding_mode4.c
+++ b/test/test/test_link_bonding_mode4.c
@@ -110,11 +110,7 @@ static struct rte_eth_conf default_pmd_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -425,7 +421,7 @@ test_setup(void)
TEST_ASSERT(retval >= 0,
"Failed to create ring ethdev '%s'\n", name);
- port->port_id = rte_eth_dev_count() - 1;
+ port->port_id = rte_eth_dev_count_avail() - 1;
}
retval = configure_ethdev(port->port_id, 1);
diff --git a/test/test/test_link_bonding_rssconf.c b/test/test/test_link_bonding_rssconf.c
index 4cc08f5a..d82de2ce 100644
--- a/test/test/test_link_bonding_rssconf.c
+++ b/test/test/test_link_bonding_rssconf.c
@@ -83,11 +83,6 @@ static struct rte_eth_conf default_pmd_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -100,11 +95,6 @@ static struct rte_eth_conf rss_pmd_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -521,7 +511,7 @@ test_setup(void)
FOR_EACH_PORT(n, port) {
port = &test_params.slave_ports[n];
- port_id = rte_eth_dev_count();
+ port_id = rte_eth_dev_count_avail();
snprintf(name, sizeof(name), SLAVE_DEV_NAME_FMT, port_id);
retval = rte_vdev_init(name, "size=64,copy=0");
diff --git a/test/test/test_malloc.c b/test/test/test_malloc.c
index d23192cf..4b5abb4e 100644
--- a/test/test/test_malloc.c
+++ b/test/test/test_malloc.c
@@ -12,6 +12,7 @@
#include <rte_common.h>
#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_eal.h>
@@ -378,7 +379,7 @@ test_realloc(void)
printf("NULL pointer returned from rte_zmalloc\n");
return -1;
}
- snprintf(ptr1, size1, "%s" ,hello_str);
+ strlcpy(ptr1, hello_str, size1);
char *ptr2 = rte_realloc(ptr1, size2, RTE_CACHE_LINE_SIZE);
if (!ptr2){
rte_free(ptr1);
@@ -705,20 +706,22 @@ err_return:
return -1;
}
+static int
+check_socket_mem(const struct rte_memseg_list *msl, void *arg)
+{
+ int32_t *socket = arg;
+
+ return *socket == msl->socket_id;
+}
+
/* Check if memory is available on a specific socket */
static int
is_mem_on_socket(int32_t socket)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- unsigned i;
-
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (socket == ms[i].socket_id)
- return 1;
- }
- return 0;
+ return rte_memseg_list_walk(check_socket_mem, &socket);
}
+
/*
* Find what socket a memory address is on. Only works for addresses within
* memsegs, not heap or stack...
@@ -726,16 +729,9 @@ is_mem_on_socket(int32_t socket)
static int32_t
addr_to_socket(void * addr)
{
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- unsigned i;
+ const struct rte_memseg *ms = rte_mem_virt2memseg(addr, NULL);
+ return ms == NULL ? -1 : ms->socket_id;
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if ((ms[i].addr <= addr) &&
- ((uintptr_t)addr <
- ((uintptr_t)ms[i].addr + (uintptr_t)ms[i].len)))
- return ms[i].socket_id;
- }
- return -1;
}
/* Test using rte_[c|m|zm]alloc_socket() on a specific socket */
diff --git a/test/test/test_memory.c b/test/test/test_memory.c
index 972321f1..b96bca77 100644
--- a/test/test/test_memory.c
+++ b/test/test/test_memory.c
@@ -5,8 +5,11 @@
#include <stdio.h>
#include <stdint.h>
+#include <rte_eal.h>
+#include <rte_eal_memconfig.h>
#include <rte_memory.h>
#include <rte_common.h>
+#include <rte_memzone.h>
#include "test.h"
@@ -23,12 +26,21 @@
*/
static int
+check_mem(const struct rte_memseg_list *msl __rte_unused,
+ const struct rte_memseg *ms, void *arg __rte_unused)
+{
+ volatile uint8_t *mem = (volatile uint8_t *) ms->addr;
+ size_t i, max = ms->len;
+
+ for (i = 0; i < max; i++, mem++)
+ *mem;
+ return 0;
+}
+
+static int
test_memory(void)
{
uint64_t s;
- unsigned i;
- size_t j;
- const struct rte_memseg *mem;
/*
* dump the mapped memory: the python-expect script checks
@@ -45,14 +57,7 @@ test_memory(void)
}
/* try to read memory (should not segfault) */
- mem = rte_eal_get_physmem_layout();
- for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
-
- /* check memory */
- for (j = 0; j<mem[i].len; j++) {
- *((volatile uint8_t *) mem[i].addr + j);
- }
- }
+ rte_memseg_walk(check_mem, NULL);
return 0;
}
diff --git a/test/test/test_mempool.c b/test/test/test_mempool.c
index 63f921e2..eebb1f24 100644
--- a/test/test/test_mempool.c
+++ b/test/test/test_mempool.c
@@ -327,17 +327,17 @@ test_mempool_sp_sc(void)
}
if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
printf("Cannot lookup mempool from its name\n");
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
if (lcore_next >= RTE_MAX_LCORE) {
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
- rte_mempool_free(mp_spsc);
- RET_ERR();
+ ret = -1;
+ goto err;
}
rte_spinlock_init(&scsp_spinlock);
memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
@@ -348,7 +348,10 @@ test_mempool_sp_sc(void)
if (rte_eal_wait_lcore(lcore_next) < 0)
ret = -1;
+
+err:
rte_mempool_free(mp_spsc);
+ mp_spsc = NULL;
return ret;
}
@@ -444,34 +447,6 @@ test_mempool_same_name_twice_creation(void)
return 0;
}
-/*
- * Basic test for mempool_xmem functions.
- */
-static int
-test_mempool_xmem_misc(void)
-{
- uint32_t elt_num, total_size;
- size_t sz;
- ssize_t usz;
-
- elt_num = MAX_KEEP;
- total_size = rte_mempool_calc_obj_size(MEMPOOL_ELT_SIZE, 0, NULL);
- sz = rte_mempool_xmem_size(elt_num, total_size, MEMPOOL_PG_SHIFT_MAX,
- 0);
-
- usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
- MEMPOOL_PG_SHIFT_MAX, 0);
-
- if (sz != (size_t)usz) {
- printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
- "returns: %#zx, while expected: %#zx;\n",
- __func__, elt_num, total_size, sz, (size_t)usz);
- return -1;
- }
-
- return 0;
-}
-
static void
walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
{
@@ -596,9 +571,6 @@ test_mempool(void)
if (test_mempool_same_name_twice_creation() < 0)
goto err;
- if (test_mempool_xmem_misc() < 0)
- goto err;
-
/* test the stack handler */
if (test_mempool_basic(mp_stack, 1) < 0)
goto err;
diff --git a/test/test/test_memzone.c b/test/test/test_memzone.c
index 8ece1ac8..452d7cc5 100644
--- a/test/test/test_memzone.c
+++ b/test/test/test_memzone.c
@@ -104,28 +104,47 @@ test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
return 0;
}
+struct walk_arg {
+ int hugepage_2MB_avail;
+ int hugepage_1GB_avail;
+ int hugepage_16MB_avail;
+ int hugepage_16GB_avail;
+};
+static int
+find_available_pagesz(const struct rte_memseg_list *msl, void *arg)
+{
+ struct walk_arg *wa = arg;
+
+ if (msl->page_sz == RTE_PGSIZE_2M)
+ wa->hugepage_2MB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_1G)
+ wa->hugepage_1GB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_16M)
+ wa->hugepage_16MB_avail = 1;
+ if (msl->page_sz == RTE_PGSIZE_16G)
+ wa->hugepage_16GB_avail = 1;
+
+ return 0;
+}
+
static int
test_memzone_reserve_flags(void)
{
const struct rte_memzone *mz;
- const struct rte_memseg *ms;
- int hugepage_2MB_avail = 0;
- int hugepage_1GB_avail = 0;
- int hugepage_16MB_avail = 0;
- int hugepage_16GB_avail = 0;
+ struct walk_arg wa;
+ int hugepage_2MB_avail, hugepage_1GB_avail;
+ int hugepage_16MB_avail, hugepage_16GB_avail;
const size_t size = 100;
- int i = 0;
- ms = rte_eal_get_physmem_layout();
- for (i = 0; i < RTE_MAX_MEMSEG; i++) {
- if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
- hugepage_2MB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
- hugepage_1GB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_16M)
- hugepage_16MB_avail = 1;
- if (ms[i].hugepage_sz == RTE_PGSIZE_16G)
- hugepage_16GB_avail = 1;
- }
+
+ memset(&wa, 0, sizeof(wa));
+
+ rte_memseg_list_walk(find_available_pagesz, &wa);
+
+ hugepage_2MB_avail = wa.hugepage_2MB_avail;
+ hugepage_1GB_avail = wa.hugepage_1GB_avail;
+ hugepage_16MB_avail = wa.hugepage_16MB_avail;
+ hugepage_16GB_avail = wa.hugepage_16GB_avail;
+
/* Display the availability of 2MB ,1GB, 16MB, 16GB pages */
if (hugepage_2MB_avail)
printf("2MB Huge pages available\n");
@@ -448,61 +467,69 @@ test_memzone_reserve_flags(void)
/* Find the heap with the greatest free block size */
static size_t
-find_max_block_free_size(const unsigned _align)
+find_max_block_free_size(unsigned int align, unsigned int socket_id)
{
struct rte_malloc_socket_stats stats;
- unsigned i, align = _align;
- size_t len = 0;
+ size_t len, overhead;
- for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
- rte_malloc_get_socket_stats(i, &stats);
- if (stats.greatest_free_size > len)
- len = stats.greatest_free_size;
- }
+ rte_malloc_get_socket_stats(socket_id, &stats);
- if (align < RTE_CACHE_LINE_SIZE)
- align = RTE_CACHE_LINE_ROUNDUP(align+1);
+ len = stats.greatest_free_size;
+ overhead = MALLOC_ELEM_OVERHEAD;
- if (len <= MALLOC_ELEM_OVERHEAD + align)
+ if (len == 0)
return 0;
- return len - MALLOC_ELEM_OVERHEAD - align;
+ align = RTE_CACHE_LINE_ROUNDUP(align);
+ overhead += align;
+
+ if (len < overhead)
+ return 0;
+
+ return len - overhead;
}
static int
test_memzone_reserve_max(void)
{
- const struct rte_memzone *mz;
- size_t maxlen;
+ unsigned int i;
- maxlen = find_max_block_free_size(0);
+ for (i = 0; i < rte_socket_count(); i++) {
+ const struct rte_memzone *mz;
+ size_t maxlen;
+ int socket;
- if (maxlen == 0) {
- printf("There is no space left!\n");
- return 0;
- }
+ socket = rte_socket_id_by_idx(i);
+ maxlen = find_max_block_free_size(0, socket);
- mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
- SOCKET_ID_ANY, 0);
- if (mz == NULL){
- printf("Failed to reserve a big chunk of memory - %s\n",
- rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ if (maxlen == 0) {
+ printf("There is no space left!\n");
+ return 0;
+ }
- if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size did not return bigest block\n");
- printf("Expected size = %zu, actual size = %zu\n", maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ mz = rte_memzone_reserve(TEST_MEMZONE_NAME("max_zone"), 0,
+ socket, 0);
+ if (mz == NULL) {
+ printf("Failed to reserve a big chunk of memory - %s\n",
+ rte_strerror(rte_errno));
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
- if (rte_memzone_free(mz)) {
- printf("Fail memzone free\n");
- return -1;
+ if (mz->len != maxlen) {
+ printf("Memzone reserve with 0 size did not return bigest block\n");
+ printf("Expected size = %zu, actual size = %zu\n",
+ maxlen, mz->len);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
}
return 0;
@@ -511,45 +538,62 @@ test_memzone_reserve_max(void)
static int
test_memzone_reserve_max_aligned(void)
{
- const struct rte_memzone *mz;
- size_t maxlen = 0;
+ unsigned int i;
- /* random alignment */
- rte_srand((unsigned)rte_rdtsc());
- const unsigned align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
+ for (i = 0; i < rte_socket_count(); i++) {
+ const struct rte_memzone *mz;
+ size_t maxlen, minlen = 0;
+ int socket;
- maxlen = find_max_block_free_size(align);
+ socket = rte_socket_id_by_idx(i);
- if (maxlen == 0) {
- printf("There is no space left for biggest %u-aligned memzone!\n", align);
- return 0;
- }
+ /* random alignment */
+ rte_srand((unsigned int)rte_rdtsc());
+ const unsigned int align = 1 << ((rte_rand() % 8) + 5); /* from 128 up to 4k alignment */
- mz = rte_memzone_reserve_aligned(TEST_MEMZONE_NAME("max_zone_aligned"),
- 0, SOCKET_ID_ANY, 0, align);
- if (mz == NULL){
- printf("Failed to reserve a big chunk of memory - %s\n",
- rte_strerror(rte_errno));
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ /* memzone size may be between size and size - align */
+ minlen = find_max_block_free_size(align, socket);
+ maxlen = find_max_block_free_size(0, socket);
- if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size and alignment %u did not return"
- " bigest block\n", align);
- printf("Expected size = %zu, actual size = %zu\n",
- maxlen, mz->len);
- rte_dump_physmem_layout(stdout);
- rte_memzone_dump(stdout);
- return -1;
- }
+ if (minlen == 0 || maxlen == 0) {
+ printf("There is no space left for biggest %u-aligned memzone!\n",
+ align);
+ return 0;
+ }
- if (rte_memzone_free(mz)) {
- printf("Fail memzone free\n");
- return -1;
- }
+ mz = rte_memzone_reserve_aligned(
+ TEST_MEMZONE_NAME("max_zone_aligned"),
+ 0, socket, 0, align);
+ if (mz == NULL) {
+ printf("Failed to reserve a big chunk of memory - %s\n",
+ rte_strerror(rte_errno));
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+ if (mz->addr != RTE_PTR_ALIGN(mz->addr, align)) {
+ printf("Memzone reserve with 0 size and alignment %u did not return aligned block\n",
+ align);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+ if (mz->len < minlen || mz->len > maxlen) {
+ printf("Memzone reserve with 0 size and alignment %u did not return"
+ " bigest block\n", align);
+ printf("Expected size = %zu-%zu, actual size = %zu\n",
+ minlen, maxlen, mz->len);
+ rte_dump_physmem_layout(stdout);
+ rte_memzone_dump(stdout);
+ return -1;
+ }
+
+ if (rte_memzone_free(mz)) {
+ printf("Fail memzone free\n");
+ return -1;
+ }
+ }
return 0;
}
@@ -890,7 +934,7 @@ test_memzone_basic(void)
const struct rte_memzone *mz;
int memzone_cnt_after, memzone_cnt_expected;
int memzone_cnt_before =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
memzone1 = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
SOCKET_ID_ANY, 0);
@@ -914,7 +958,7 @@ test_memzone_basic(void)
(memzone3 != NULL) + (memzone4 != NULL);
memzone_cnt_after =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
if (memzone_cnt_after != memzone_cnt_expected)
return -1;
@@ -993,7 +1037,7 @@ test_memzone_basic(void)
}
memzone_cnt_after =
- rte_eal_get_configuration()->mem_config->memzone_cnt;
+ rte_eal_get_configuration()->mem_config->memzones.count;
if (memzone_cnt_after != memzone_cnt_before)
return -1;
@@ -1014,7 +1058,8 @@ static int
test_memzone(void)
{
/* take note of how many memzones were allocated before running */
- int memzone_cnt = rte_eal_get_configuration()->mem_config->memzone_cnt;
+ int memzone_cnt =
+ rte_eal_get_configuration()->mem_config->memzones.count;
printf("test basic memzone API\n");
if (test_memzone_basic() < 0)
diff --git a/test/test/test_meter.c b/test/test/test_meter.c
index 9f6abf99..8bb47e75 100644
--- a/test/test/test_meter.c
+++ b/test/test/test_meter.c
@@ -53,43 +53,43 @@ static inline int
tm_test_srtcm_config(void)
{
#define SRTCM_CFG_MSG "srtcm_config"
- struct rte_meter_srtcm sm;
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm_params sparams1;
/* invalid parameter test */
- if(rte_meter_srtcm_config(NULL, NULL) == 0)
+ if (rte_meter_srtcm_profile_config(NULL, NULL) == 0)
melog(SRTCM_CFG_MSG);
- if(rte_meter_srtcm_config(&sm, NULL) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, NULL) == 0)
melog(SRTCM_CFG_MSG);
- if(rte_meter_srtcm_config(NULL, &sparams) == 0)
+ if (rte_meter_srtcm_profile_config(NULL, &sparams) == 0)
melog(SRTCM_CFG_MSG);
/* cbs and ebs can't both be zero */
sparams1 = sparams;
sparams1.cbs = 0;
sparams1.ebs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0)
melog(SRTCM_CFG_MSG);
/* cir should never be 0 */
sparams1 = sparams;
sparams1.cir = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) == 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) == 0)
melog(SRTCM_CFG_MSG);
/* one of ebs and cbs can be zero, should be successful */
sparams1 = sparams;
sparams1.ebs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0)
melog(SRTCM_CFG_MSG);
sparams1 = sparams;
sparams1.cbs = 0;
- if(rte_meter_srtcm_config(&sm, &sparams1) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams1) != 0)
melog(SRTCM_CFG_MSG);
/* usual parameter, should be successful */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
melog(SRTCM_CFG_MSG);
return 0;
@@ -102,47 +102,47 @@ tm_test_srtcm_config(void)
static inline int
tm_test_trtcm_config(void)
{
- struct rte_meter_trtcm tm;
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm_params tparams1;
#define TRTCM_CFG_MSG "trtcm_config"
/* invalid parameter test */
- if(rte_meter_trtcm_config(NULL, NULL) == 0)
+ if (rte_meter_trtcm_profile_config(NULL, NULL) == 0)
melog(TRTCM_CFG_MSG);
- if(rte_meter_trtcm_config(&tm, NULL) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, NULL) == 0)
melog(TRTCM_CFG_MSG);
- if(rte_meter_trtcm_config(NULL, &tparams) == 0)
+ if (rte_meter_trtcm_profile_config(NULL, &tparams) == 0)
melog(TRTCM_CFG_MSG);
/* cir, cbs, pir and pbs never be zero */
tparams1 = tparams;
tparams1.cir = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.cbs = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.pbs = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
tparams1 = tparams;
tparams1.pir = 0;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG);
/* pir should be greater or equal to cir */
tparams1 = tparams;
tparams1.pir = tparams1.cir - 1;
- if(rte_meter_trtcm_config(&tm, &tparams1) == 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams1) == 0)
melog(TRTCM_CFG_MSG" pir < cir test");
/* usual parameter, should be successful */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
melog(TRTCM_CFG_MSG);
return 0;
@@ -155,41 +155,50 @@ static inline int
tm_test_srtcm_color_blind_check(void)
{
#define SRTCM_BLIND_CHECK_MSG "srtcm_blind_check"
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm sm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
/* Test green */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF - 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1)
!= e_RTE_METER_GREEN)
melog(SRTCM_BLIND_CHECK_MSG" GREEN");
/* Test yellow */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF + 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1)
!= e_RTE_METER_YELLOW)
melog(SRTCM_BLIND_CHECK_MSG" YELLOW");
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, (uint32_t)sm.ebs - 1) != e_RTE_METER_YELLOW)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, (uint32_t)sp.ebs - 1) != e_RTE_METER_YELLOW)
melog(SRTCM_BLIND_CHECK_MSG" YELLOW");
/* Test red */
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_blind_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF + 1)
+ if (rte_meter_srtcm_color_blind_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1)
!= e_RTE_METER_RED)
melog(SRTCM_BLIND_CHECK_MSG" RED");
@@ -206,41 +215,50 @@ tm_test_trtcm_color_blind_check(void)
#define TRTCM_BLIND_CHECK_MSG "trtcm_blind_check"
uint64_t time;
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm tm;
uint64_t hz = rte_get_tsc_hz();
/* Test green */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF - 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1)
!= e_RTE_METER_GREEN)
melog(TRTCM_BLIND_CHECK_MSG" GREEN");
/* Test yellow */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF + 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1)
!= e_RTE_METER_YELLOW)
melog(TRTCM_BLIND_CHECK_MSG" YELLOW");
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF - 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1)
!= e_RTE_METER_YELLOW)
melog(TRTCM_BLIND_CHECK_MSG" YELLOW");
/* Test red */
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_BLIND_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_BLIND_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_blind_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF + 1)
+ if (rte_meter_trtcm_color_blind_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1)
!= e_RTE_METER_RED)
melog(TRTCM_BLIND_CHECK_MSG" RED");
@@ -262,36 +280,45 @@ tm_test_srtcm_aware_check
(enum rte_meter_color in[4], enum rte_meter_color out[4])
{
#define SRTCM_AWARE_CHECK_MSG "srtcm_aware_check"
+ struct rte_meter_srtcm_profile sp;
struct rte_meter_srtcm sm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF - 1, in[0]) != out[0])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_CBS_DF + 1, in[1]) != out[1])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF - 1, in[2]) != out[2])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]);
- if(rte_meter_srtcm_config(&sm, &sparams) != 0)
+ if (rte_meter_srtcm_profile_config(&sp, &sparams) != 0)
+ melog(SRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_srtcm_config(&sm, &sp) != 0)
melog(SRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_srtcm_color_aware_check(
- &sm, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3])
+ if (rte_meter_srtcm_color_aware_check(
+ &sm, &sp, time, TM_TEST_SRTCM_EBS_DF + 1, in[3]) != out[3])
melog(SRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]);
return 0;
@@ -317,7 +344,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
/**
@@ -329,7 +356,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
/**
@@ -341,7 +368,7 @@ tm_test_srtcm_color_aware_check(void)
out[1] = e_RTE_METER_RED;
out[2] = e_RTE_METER_RED;
out[3] = e_RTE_METER_RED;
- if(tm_test_srtcm_aware_check(in, out) != 0)
+ if (tm_test_srtcm_aware_check(in, out) != 0)
return -1;
return 0;
@@ -360,36 +387,45 @@ tm_test_trtcm_aware_check
(enum rte_meter_color in[4], enum rte_meter_color out[4])
{
#define TRTCM_AWARE_CHECK_MSG "trtcm_aware_check"
+ struct rte_meter_trtcm_profile tp;
struct rte_meter_trtcm tm;
uint64_t time;
uint64_t hz = rte_get_tsc_hz();
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF - 1, in[0]) != out[0])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[0], out[0]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_CBS_DF + 1, in[1]) != out[1])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[1], out[1]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF - 1, in[2]) != out[2])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[2], out[2]);
- if(rte_meter_trtcm_config(&tm, &tparams) != 0)
+ if (rte_meter_trtcm_profile_config(&tp, &tparams) != 0)
+ melog(TRTCM_AWARE_CHECK_MSG);
+ if (rte_meter_trtcm_config(&tm, &tp) != 0)
melog(TRTCM_AWARE_CHECK_MSG);
time = rte_get_tsc_cycles() + hz;
- if(rte_meter_trtcm_color_aware_check(
- &tm, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3])
+ if (rte_meter_trtcm_color_aware_check(
+ &tm, &tp, time, TM_TEST_TRTCM_PBS_DF + 1, in[3]) != out[3])
melog(TRTCM_AWARE_CHECK_MSG" %u:%u", in[3], out[3]);
return 0;
@@ -415,7 +451,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
in[0] = in[1] = in[2] = in[3] = e_RTE_METER_YELLOW;
@@ -423,7 +459,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_YELLOW;
out[2] = e_RTE_METER_YELLOW;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
in[0] = in[1] = in[2] = in[3] = e_RTE_METER_RED;
@@ -431,7 +467,7 @@ tm_test_trtcm_color_aware_check(void)
out[1] = e_RTE_METER_RED;
out[2] = e_RTE_METER_RED;
out[3] = e_RTE_METER_RED;
- if(tm_test_trtcm_aware_check(in, out) != 0)
+ if (tm_test_trtcm_aware_check(in, out) != 0)
return -1;
return 0;
@@ -443,22 +479,22 @@ tm_test_trtcm_color_aware_check(void)
static int
test_meter(void)
{
- if(tm_test_srtcm_config() != 0 )
+ if (tm_test_srtcm_config() != 0)
return -1;
- if(tm_test_trtcm_config() != 0 )
+ if (tm_test_trtcm_config() != 0)
return -1;
- if(tm_test_srtcm_color_blind_check() != 0)
+ if (tm_test_srtcm_color_blind_check() != 0)
return -1;
- if(tm_test_trtcm_color_blind_check()!= 0)
+ if (tm_test_trtcm_color_blind_check() != 0)
return -1;
- if(tm_test_srtcm_color_aware_check()!= 0)
+ if (tm_test_srtcm_color_aware_check() != 0)
return -1;
- if(tm_test_trtcm_color_aware_check()!= 0)
+ if (tm_test_trtcm_color_aware_check() != 0)
return -1;
return 0;
diff --git a/test/test/test_mp_secondary.c b/test/test/test_mp_secondary.c
index cc46cf4d..b597dfcd 100644
--- a/test/test/test_mp_secondary.c
+++ b/test/test/test_mp_secondary.c
@@ -50,32 +50,6 @@
#define launch_proc(ARGV) process_dup(ARGV, \
sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
-#ifdef RTE_EXEC_ENV_LINUXAPP
-static char*
-get_current_prefix(char * prefix, int size)
-{
- char path[PATH_MAX] = {0};
- char buf[PATH_MAX] = {0};
-
- /* get file for config (fd is always 3) */
- snprintf(path, sizeof(path), "/proc/self/fd/%d", 3);
-
- /* return NULL on error */
- if (readlink(path, buf, sizeof(buf)) == -1)
- return NULL;
-
- /* get the basename */
- snprintf(buf, sizeof(buf), "%s", basename(buf));
-
- /* copy string all the way from second char up to start of _config */
- snprintf(prefix, size, "%.*s",
- (int)(strnlen(buf, sizeof(buf)) - sizeof("_config")),
- &buf[1]);
-
- return prefix;
-}
-#endif
-
/*
* This function is called in the primary i.e. main test, to spawn off secondary
* processes to run actual mp tests. Uses fork() and exec pair
diff --git a/test/test/test_pmd_perf.c b/test/test/test_pmd_perf.c
index 911dd762..4549965f 100644
--- a/test/test/test_pmd_perf.c
+++ b/test/test/test_pmd_perf.c
@@ -65,14 +65,7 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .hw_vlan_strip = 0, /**< VLAN strip enabled. */
- .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
- .enable_scatter = 0, /**< scatter rx disabled */
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
@@ -97,11 +90,6 @@ static struct rte_eth_txconf tx_conf = {
},
.tx_free_thresh = 32, /* Use PMD default values */
.tx_rs_thresh = 32, /* Use PMD default values */
- .txq_flags = (ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOVLANOFFL |
- ETH_TXQ_FLAGS_NOXSUMSCTP |
- ETH_TXQ_FLAGS_NOXSUMUDP |
- ETH_TXQ_FLAGS_NOXSUMTCP)
};
enum {
@@ -676,7 +664,7 @@ test_pmd_perf(void)
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
if (nb_ports < NB_ETHPORTS_USED) {
printf("At least %u port(s) used for perf. test\n",
NB_ETHPORTS_USED);
@@ -698,7 +686,7 @@ test_pmd_perf(void)
reset_count();
num = 0;
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (socketid == -1) {
socketid = rte_eth_dev_socket_id(portid);
slave_id = alloc_lcore(socketid);
@@ -791,7 +779,7 @@ test_pmd_perf(void)
return -1;
/* port tear down */
- for (portid = 0; portid < nb_ports; portid++) {
+ RTE_ETH_FOREACH_DEV(portid) {
if (socketid != rte_eth_dev_socket_id(portid))
continue;
@@ -808,38 +796,29 @@ test_set_rxtx_conf(cmdline_fixed_string_t mode)
if (!strcmp(mode, "vector")) {
/* vector rx, tx */
- tx_conf.txq_flags = 0xf01;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 0;
- port_conf.rxmode.enable_scatter = 0;
return 0;
} else if (!strcmp(mode, "scalar")) {
/* bulk alloc rx, full-featured tx */
- tx_conf.txq_flags = 0;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 1;
- port_conf.rxmode.enable_scatter = 0;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "hybrid")) {
/* bulk alloc rx, vector tx
* when vec macro not define,
* using the same rx/tx as scalar
*/
- tx_conf.txq_flags = 0xf01;
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 1;
- port_conf.rxmode.enable_scatter = 0;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CHECKSUM;
return 0;
} else if (!strcmp(mode, "full")) {
/* full feature rx,tx pair */
- tx_conf.txq_flags = 0x0; /* must condition */
tx_conf.tx_rs_thresh = 32;
tx_conf.tx_free_thresh = 32;
- port_conf.rxmode.hw_ip_checksum = 0;
- port_conf.rxmode.enable_scatter = 1; /* must condition */
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
return 0;
}
diff --git a/test/test/test_pmd_ring.c b/test/test/test_pmd_ring.c
index 4b891014..19d7d20a 100644
--- a/test/test/test_pmd_ring.c
+++ b/test/test/test_pmd_ring.c
@@ -218,6 +218,8 @@ test_pmd_ring_pair_create_attach(int portd, int porte)
struct rte_mbuf buf, *pbuf = &buf;
struct rte_eth_conf null_conf;
+ memset(&null_conf, 0, sizeof(struct rte_eth_conf));
+
if ((rte_eth_dev_configure(portd, 1, 1, &null_conf) < 0)
|| (rte_eth_dev_configure(porte, 1, 1, &null_conf) < 0)) {
printf("Configure failed for port\n");
@@ -399,7 +401,7 @@ test_pmd_ring(void)
int port, cmdl_port0 = -1;
uint8_t nb_ports;
- nb_ports = rte_eth_dev_count();
+ nb_ports = rte_eth_dev_count_avail();
printf("nb_ports=%d\n", (int)nb_ports);
/* create the rings and eth_rings in the test code.
@@ -473,7 +475,7 @@ test_pmd_ring(void)
return -1;
/* find a port created with the --vdev=net_ring0 command line option */
- for (port = 0; port < nb_ports; port++) {
+ RTE_ETH_FOREACH_DEV(port) {
struct rte_eth_dev_info dev_info;
rte_eth_dev_info_get(port, &dev_info);
diff --git a/test/test/test_power_acpi_cpufreq.c b/test/test/test_power_acpi_cpufreq.c
index 3bfd0335..22e541d6 100644
--- a/test/test/test_power_acpi_cpufreq.c
+++ b/test/test/test_power_acpi_cpufreq.c
@@ -7,6 +7,7 @@
#include <unistd.h>
#include <limits.h>
#include <string.h>
+#include <inttypes.h>
#include "test.h"
@@ -19,6 +20,13 @@ test_power_acpi_cpufreq(void)
return TEST_SKIPPED;
}
+static int
+test_power_acpi_caps(void)
+{
+ printf("Power management library not supported, skipping test\n");
+ return TEST_SKIPPED;
+}
+
#else
#include <rte_power.h>
@@ -27,7 +35,7 @@ test_power_acpi_cpufreq(void)
#define TEST_POWER_FREQS_NUM_MAX ((unsigned)RTE_MAX_LCORE_FREQS)
#define TEST_POWER_SYSFILE_CUR_FREQ \
- "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq"
+ "/sys/devices/system/cpu/cpu%u/cpufreq/cpuinfo_cur_freq"
static uint32_t total_freq_num;
static uint32_t freqs[TEST_POWER_FREQS_NUM_MAX];
@@ -517,6 +525,42 @@ fail_all:
rte_power_unset_env();
return -1;
}
+
+static int
+test_power_acpi_caps(void)
+{
+ struct rte_power_core_capabilities caps;
+ int ret;
+
+ ret = rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
+ if (ret) {
+ printf("Error setting ACPI environment\n");
+ return -1;
+ }
+
+ ret = rte_power_init(TEST_POWER_LCORE_ID);
+ if (ret < 0) {
+ printf("Cannot initialise power management for lcore %u, this "
+ "may occur if environment is not configured "
+ "correctly(APCI cpufreq) or operating in another valid "
+ "Power management environment\n", TEST_POWER_LCORE_ID);
+ rte_power_unset_env();
+ return -1;
+ }
+
+ ret = rte_power_get_capabilities(TEST_POWER_LCORE_ID, &caps);
+ if (ret) {
+ printf("ACPI: Error getting capabilities\n");
+ return -1;
+ }
+
+ printf("ACPI: Capabilities %"PRIx64"\n", caps.capabilities);
+
+ rte_power_unset_env();
+ return 0;
+}
+
#endif
REGISTER_TEST_COMMAND(power_acpi_cpufreq_autotest, test_power_acpi_cpufreq);
+REGISTER_TEST_COMMAND(power_acpi_caps_autotest, test_power_acpi_caps);
diff --git a/test/test/test_power_kvm_vm.c b/test/test/test_power_kvm_vm.c
index 91b31c44..bce706de 100644
--- a/test/test/test_power_kvm_vm.c
+++ b/test/test/test_power_kvm_vm.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2010-2018 Intel Corporation
*/
#include <stdio.h>
@@ -98,7 +98,8 @@ test_power_kvm_vm(void)
printf("Cannot initialise power management for lcore %u, this "
"may occur if environment is not configured "
"correctly(KVM VM) or operating in another valid "
- "Power management environment\n", TEST_POWER_VM_LCORE_ID);
+ "Power management environment\n",
+ TEST_POWER_VM_LCORE_ID);
rte_power_unset_env();
return -1;
}
@@ -175,6 +176,22 @@ test_power_kvm_vm(void)
goto fail_all;
}
+ /* Test KVM_VM Enable Turbo of valid core */
+ ret = rte_power_freq_enable_turbo(TEST_POWER_VM_LCORE_ID);
+ if (ret == -1) {
+ printf("rte_power_freq_enable_turbo failed on valid lcore"
+ "%u\n", TEST_POWER_VM_LCORE_ID);
+ goto fail_all;
+ }
+
+ /* Test KVM_VM Disable Turbo of valid core */
+ ret = rte_power_freq_disable_turbo(TEST_POWER_VM_LCORE_ID);
+ if (ret == -1) {
+ printf("rte_power_freq_disable_turbo failed on valid lcore"
+ "%u\n", TEST_POWER_VM_LCORE_ID);
+ goto fail_all;
+ }
+
/* Test frequency up of valid lcore */
ret = rte_power_freq_up(TEST_POWER_VM_LCORE_ID);
if (ret != 1) {
diff --git a/test/test/test_reorder.c b/test/test/test_reorder.c
index 65e4f38b..ccee4d08 100644
--- a/test/test/test_reorder.c
+++ b/test/test/test_reorder.c
@@ -146,11 +146,11 @@ test_reorder_insert(void)
b = rte_reorder_create("test_insert", rte_socket_id(), size);
TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer");
- ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs);
- TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool");
-
- for (i = 0; i < num_bufs; i++)
+ for (i = 0; i < num_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(p);
+ TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n");
bufs[i]->seqn = i;
+ }
/* This should fill up order buffer:
* reorder_seq = 0
@@ -165,6 +165,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[i] = NULL;
}
/* early packet - should move mbufs to ready buf and move sequence window
@@ -179,6 +180,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[4] = NULL;
/* early packet from current sequence window - full ready buffer */
bufs[5]->seqn = 2 * size;
@@ -189,6 +191,7 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[5] = NULL;
/* late packet */
bufs[6]->seqn = 3 * size;
@@ -199,11 +202,15 @@ test_reorder_insert(void)
ret = -1;
goto exit;
}
+ bufs[6] = NULL;
ret = 0;
exit:
- rte_mempool_put_bulk(p, (void *)bufs, num_bufs);
rte_reorder_free(b);
+ for (i = 0; i < num_bufs; i++) {
+ if (bufs[i] != NULL)
+ rte_pktmbuf_free(bufs[i]);
+ }
return ret;
}
@@ -219,6 +226,10 @@ test_reorder_drain(void)
int ret = 0;
unsigned i, cnt;
+ /* initialize all robufs to NULL */
+ for (i = 0; i < num_bufs; i++)
+ robufs[i] = NULL;
+
/* This would create a reorder buffer instance consisting of:
* reorder_seq = 0
* ready_buf: RB[size] = {NULL, NULL, NULL, NULL}
@@ -227,9 +238,6 @@ test_reorder_drain(void)
b = rte_reorder_create("test_drain", rte_socket_id(), size);
TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer");
- ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs);
- TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool");
-
/* Check no drained packets if reorder is empty */
cnt = rte_reorder_drain(b, robufs, 1);
if (cnt != 0) {
@@ -239,8 +247,11 @@ test_reorder_drain(void)
goto exit;
}
- for (i = 0; i < num_bufs; i++)
+ for (i = 0; i < num_bufs; i++) {
+ bufs[i] = rte_pktmbuf_alloc(p);
+ TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n");
bufs[i]->seqn = i;
+ }
/* Insert packet with seqn 1:
* reorder_seq = 0
@@ -248,6 +259,7 @@ test_reorder_drain(void)
* OB[] = {1, NULL, NULL, NULL}
*/
rte_reorder_insert(b, bufs[1]);
+ bufs[1] = NULL;
cnt = rte_reorder_drain(b, robufs, 1);
if (cnt != 1) {
@@ -256,6 +268,8 @@ test_reorder_drain(void)
ret = -1;
goto exit;
}
+ if (robufs[0] != NULL)
+ rte_pktmbuf_free(robufs[i]);
/* Insert more packets
* RB[] = {NULL, NULL, NULL, NULL}
@@ -263,18 +277,22 @@ test_reorder_drain(void)
*/
rte_reorder_insert(b, bufs[2]);
rte_reorder_insert(b, bufs[3]);
+ bufs[2] = NULL;
+ bufs[3] = NULL;
/* Insert more packets
* RB[] = {NULL, NULL, NULL, NULL}
* OB[] = {NULL, 2, 3, 4}
*/
rte_reorder_insert(b, bufs[4]);
+ bufs[4] = NULL;
/* Insert more packets
* RB[] = {2, 3, 4, NULL}
* OB[] = {NULL, NULL, 7, NULL}
*/
rte_reorder_insert(b, bufs[7]);
+ bufs[7] = NULL;
/* drained expected packets */
cnt = rte_reorder_drain(b, robufs, 4);
@@ -284,6 +302,10 @@ test_reorder_drain(void)
ret = -1;
goto exit;
}
+ for (i = 0; i < 3; i++) {
+ if (robufs[i] != NULL)
+ rte_pktmbuf_free(robufs[i]);
+ }
/*
* RB[] = {NULL, NULL, NULL, NULL}
@@ -298,8 +320,13 @@ test_reorder_drain(void)
}
ret = 0;
exit:
- rte_mempool_put_bulk(p, (void *)bufs, num_bufs);
rte_reorder_free(b);
+ for (i = 0; i < num_bufs; i++) {
+ if (bufs[i] != NULL)
+ rte_pktmbuf_free(bufs[i]);
+ if (robufs[i] != NULL)
+ rte_pktmbuf_free(robufs[i]);
+ }
return ret;
}
diff --git a/test/test/test_resource.c b/test/test/test_resource.c
index a3a82f13..8f41e3ba 100644
--- a/test/test/test_resource.c
+++ b/test/test/test_resource.c
@@ -1,34 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2016 RehiveTech. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of RehiveTech nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 RehiveTech. All rights reserved.
*/
#include <stdio.h>
diff --git a/test/test/test_service_cores.c b/test/test/test_service_cores.c
index 86b4073d..ec31882d 100644
--- a/test/test/test_service_cores.c
+++ b/test/test/test_service_cores.c
@@ -325,6 +325,78 @@ service_attr_get(void)
return unregister_all();
}
+/* verify service lcore attr get */
+static int
+service_lcore_attr_get(void)
+{
+ /* ensure all services unregistered so cycle counts are zero */
+ unregister_all();
+
+ struct rte_service_spec service;
+ memset(&service, 0, sizeof(struct rte_service_spec));
+ service.callback = dummy_cb;
+ snprintf(service.name, sizeof(service.name), DUMMY_SERVICE_NAME);
+ service.capabilities |= RTE_SERVICE_CAP_MT_SAFE;
+ uint32_t id;
+ TEST_ASSERT_EQUAL(0, rte_service_component_register(&service, &id),
+ "Register of service failed");
+ rte_service_component_runstate_set(id, 1);
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(id, 1),
+ "Error: Service start returned non-zero");
+ rte_service_set_stats_enable(id, 1);
+
+ uint64_t lcore_attr_value = 0xdead;
+ uint32_t lcore_attr_id = UINT32_MAX;
+
+ /* check error return values */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_attr_get(UINT32_MAX,
+ lcore_attr_id, &lcore_attr_value),
+ "Invalid lcore_id didn't return -EINVAL");
+ TEST_ASSERT_EQUAL(-ENOTSUP, rte_service_lcore_attr_get(rte_lcore_id(),
+ lcore_attr_id, &lcore_attr_value),
+ "Non-service core didn't return -ENOTSUP");
+
+ /* Start service core to increment loop count */
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Service core add did not return zero");
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(id, slcore_id, 1),
+ "Enabling valid service and core failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
+ "Starting service core failed");
+
+ /* wait for the service lcore to run */
+ rte_delay_ms(200);
+
+ lcore_attr_id = RTE_SERVICE_LCORE_ATTR_LOOPS;
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Valid lcore_attr_get() call didn't return success");
+ int loops_gt_zero = lcore_attr_value > 0;
+ TEST_ASSERT_EQUAL(1, loops_gt_zero,
+ "lcore_attr_get() failed to get loops "
+ "(expected > zero)");
+
+ lcore_attr_id++; // invalid lcore attr id
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Invalid lcore attr didn't return -EINVAL");
+
+ rte_service_lcore_stop(slcore_id);
+
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_reset_all(slcore_id),
+ "Valid lcore_attr_reset_all() didn't return success");
+
+ lcore_attr_id = RTE_SERVICE_LCORE_ATTR_LOOPS;
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_attr_get(slcore_id,
+ lcore_attr_id, &lcore_attr_value),
+ "Valid lcore_attr_get() call didn't return success");
+ TEST_ASSERT_EQUAL(0, lcore_attr_value,
+ "lcore_attr_get() didn't get correct loop count "
+ "(zero)");
+
+ return unregister_all();
+}
+
/* verify service dump */
static int
service_dump(void)
@@ -771,6 +843,48 @@ service_lcore_start_stop(void)
return unregister_all();
}
+/* stop a service and wait for it to become inactive */
+static int
+service_may_be_active(void)
+{
+ const uint32_t sid = 0;
+ int i;
+
+ /* expected failure cases */
+ TEST_ASSERT_EQUAL(-EINVAL, rte_service_may_be_active(10000),
+ "Invalid service may be active check did not fail");
+
+ /* start the service */
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 1),
+ "Starting valid service failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_add(slcore_id),
+ "Add service core failed when not in use before");
+ TEST_ASSERT_EQUAL(0, rte_service_map_lcore_set(sid, slcore_id, 1),
+ "Enabling valid service on valid core failed");
+ TEST_ASSERT_EQUAL(0, rte_service_lcore_start(slcore_id),
+ "Service core start after add failed");
+
+ /* ensures core really is running the service function */
+ TEST_ASSERT_EQUAL(1, service_lcore_running_check(),
+ "Service core expected to poll service but it didn't");
+
+ /* stop the service */
+ TEST_ASSERT_EQUAL(0, rte_service_runstate_set(sid, 0),
+ "Error: Service stop returned non-zero");
+
+ /* give the service 100ms to stop running */
+ for (i = 0; i < 100; i++) {
+ if (!rte_service_may_be_active(sid))
+ break;
+ rte_delay_ms(SERVICE_DELAY);
+ }
+
+ TEST_ASSERT_EQUAL(0, rte_service_may_be_active(sid),
+ "Error: Service not stopped after 100ms");
+
+ return unregister_all();
+}
+
static struct unit_test_suite service_tests = {
.suite_name = "service core test suite",
.setup = testsuite_setup,
@@ -781,6 +895,7 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(dummy_register, NULL, service_get_by_name),
TEST_CASE_ST(dummy_register, NULL, service_dump),
TEST_CASE_ST(dummy_register, NULL, service_attr_get),
+ TEST_CASE_ST(dummy_register, NULL, service_lcore_attr_get),
TEST_CASE_ST(dummy_register, NULL, service_probe_capability),
TEST_CASE_ST(dummy_register, NULL, service_start_stop),
TEST_CASE_ST(dummy_register, NULL, service_lcore_add_del),
@@ -790,6 +905,7 @@ static struct unit_test_suite service_tests = {
TEST_CASE_ST(dummy_register, NULL, service_mt_safe_poll),
TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_safe),
TEST_CASE_ST(dummy_register, NULL, service_app_lcore_mt_unsafe),
+ TEST_CASE_ST(dummy_register, NULL, service_may_be_active),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
diff --git a/test/test/test_table.c b/test/test/test_table.c
index f01652dc..a4b0ed65 100644
--- a/test/test/test_table.c
+++ b/test/test/test_table.c
@@ -54,6 +54,17 @@ uint64_t pipeline_test_hash(void *key,
return signature;
}
+uint32_t pipeline_test_hash_cuckoo(const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed)
+{
+ const uint32_t *k32 = key;
+ uint32_t ip_dst = rte_be_to_cpu_32(k32[0]);
+ uint32_t signature = ip_dst;
+
+ return signature;
+}
+
static void
app_free_resources(void) {
int i;
diff --git a/test/test/test_table.h b/test/test/test_table.h
index a4d3ca0c..a66342cb 100644
--- a/test/test/test_table.h
+++ b/test/test/test_table.h
@@ -6,6 +6,7 @@
#include <rte_table_lpm.h>
#include <rte_table_lpm_ipv6.h>
#include <rte_table_hash.h>
+#include <rte_table_hash_cuckoo.h>
#include <rte_table_array.h>
#include <rte_pipeline.h>
@@ -106,6 +107,11 @@ uint64_t pipeline_test_hash(
__attribute__((unused)) uint32_t key_size,
__attribute__((unused)) uint64_t seed);
+uint32_t pipeline_test_hash_cuckoo(
+ const void *key,
+ __attribute__((unused)) uint32_t key_size,
+ __attribute__((unused)) uint32_t seed);
+
/* Extern variables */
extern struct rte_pipeline *p;
extern struct rte_ring *rings_rx[N_PORTS];
diff --git a/test/test/test_table_combined.c b/test/test/test_table_combined.c
index 5e8e119a..73ad0155 100644
--- a/test/test/test_table_combined.c
+++ b/test/test/test_table_combined.c
@@ -778,14 +778,14 @@ test_table_hash_cuckoo_combined(void)
int status, i;
/* Traffic flow */
- struct rte_table_hash_params cuckoo_params = {
+ struct rte_table_hash_cuckoo_params cuckoo_params = {
.name = "TABLE",
.key_size = 32,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
.n_keys = 1 << 16,
.n_buckets = 1 << 16,
- .f_hash = pipeline_test_hash,
+ .f_hash = pipeline_test_hash_cuckoo,
.seed = 0,
};
diff --git a/test/test/test_table_pipeline.c b/test/test/test_table_pipeline.c
index 055a1a4e..441338ac 100644
--- a/test/test/test_table_pipeline.c
+++ b/test/test/test_table_pipeline.c
@@ -69,9 +69,9 @@ rte_pipeline_table_action_handler_hit
table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts,
uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
-rte_pipeline_table_action_handler_miss
+static int
table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts,
- uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg);
+ uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg);
rte_pipeline_table_action_handler_hit
table_action_0x00(__attribute__((unused)) struct rte_pipeline *p,
@@ -101,11 +101,11 @@ table_action_stub_hit(__attribute__((unused)) struct rte_pipeline *p,
return 0;
}
-rte_pipeline_table_action_handler_miss
+static int
table_action_stub_miss(struct rte_pipeline *p,
__attribute__((unused)) struct rte_mbuf **pkts,
uint64_t pkts_mask,
- __attribute__((unused)) struct rte_pipeline_table_entry **entry,
+ __attribute__((unused)) struct rte_pipeline_table_entry *entry,
__attribute__((unused)) void *arg)
{
printf("STUB Table Action Miss - setting mask to 0x%"PRIx64"\n",
@@ -517,8 +517,7 @@ test_table_pipeline(void)
/* TEST - one packet per port */
action_handler_hit = NULL;
- action_handler_miss =
- (rte_pipeline_table_action_handler_miss) table_action_stub_miss;
+ action_handler_miss = table_action_stub_miss;
table_entry_default_action = RTE_PIPELINE_ACTION_PORT;
override_miss_mask = 0x01; /* one packet per port */
setup_pipeline(e_TEST_STUB);
@@ -553,8 +552,7 @@ test_table_pipeline(void)
printf("TEST - two tables, hitmask override to 0x01\n");
connect_miss_action_to_table = 1;
- action_handler_miss =
- (rte_pipeline_table_action_handler_miss)table_action_stub_miss;
+ action_handler_miss = table_action_stub_miss;
override_miss_mask = 0x01;
setup_pipeline(e_TEST_STUB);
if (test_pipeline_single_filter(e_TEST_STUB, 2) < 0)
diff --git a/test/test/test_table_tables.c b/test/test/test_table_tables.c
index a7a69b85..20df2e92 100644
--- a/test/test/test_table_tables.c
+++ b/test/test/test_table_tables.c
@@ -903,14 +903,14 @@ test_table_hash_cuckoo(void)
uint32_t entry_size = 1;
/* Initialize params and create tables */
- struct rte_table_hash_params cuckoo_params = {
+ struct rte_table_hash_cuckoo_params cuckoo_params = {
.name = "TABLE",
.key_size = 32,
.key_offset = APP_METADATA_OFFSET(32),
.key_mask = NULL,
.n_keys = 1 << 16,
.n_buckets = 1 << 16,
- .f_hash = (rte_table_hash_op_hash)pipeline_test_hash,
+ .f_hash = pipeline_test_hash_cuckoo,
.seed = 0,
};
@@ -941,7 +941,7 @@ test_table_hash_cuckoo(void)
if (table != NULL)
return -4;
- cuckoo_params.f_hash = pipeline_test_hash;
+ cuckoo_params.f_hash = pipeline_test_hash_cuckoo;
cuckoo_params.name = NULL;
table = rte_table_hash_cuckoo_ops.f_create(&cuckoo_params,
diff --git a/test/test/virtual_pmd.c b/test/test/virtual_pmd.c
index 2f5b31db..591b3098 100644
--- a/test/test/virtual_pmd.c
+++ b/test/test/virtual_pmd.c
@@ -91,6 +91,7 @@ virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP;
}
static int
@@ -216,10 +217,11 @@ static void
virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
{}
-static void
+static int
virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
__rte_unused struct ether_addr *addr)
{
+ return 0;
}
static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
@@ -589,6 +591,8 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
+ rte_eth_dev_probing_finish(eth_dev);
+
return eth_dev->data->port_id;
err:
diff --git a/usertools/cpu_layout.py b/usertools/cpu_layout.py
index d3c8eba7..6f129b1d 100755
--- a/usertools/cpu_layout.py
+++ b/usertools/cpu_layout.py
@@ -1,38 +1,8 @@
#!/usr/bin/env python
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+# Copyright(c) 2017 Cavium, Inc. All rights reserved.
-#
-# BSD LICENSE
-#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# Copyright(c) 2017 Cavium, Inc. All rights reserved.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Intel Corporation nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
from __future__ import print_function
import sys
try:
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index 18d93860..7d564634 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -22,11 +22,18 @@ cavium_fpa = {'Class': '08', 'Vendor': '177d', 'Device': 'a053',
'SVendor': None, 'SDevice': None}
cavium_pkx = {'Class': '08', 'Vendor': '177d', 'Device': 'a0dd,a049',
'SVendor': None, 'SDevice': None}
+cavium_tim = {'Class': '08', 'Vendor': '177d', 'Device': 'a051',
+ 'SVendor': None, 'SDevice': None}
+cavium_zip = {'Class': '12', 'Vendor': '177d', 'Device': 'a037',
+ 'SVendor': None, 'SDevice': None}
+avp_vnic = {'Class': '05', 'Vendor': '1af4', 'Device': '1110',
+ 'SVendor': None, 'SDevice': None}
-network_devices = [network_class, cavium_pkx]
+network_devices = [network_class, cavium_pkx, avp_vnic]
crypto_devices = [encryption_class, intel_processor_class]
-eventdev_devices = [cavium_sso]
+eventdev_devices = [cavium_sso, cavium_tim]
mempool_devices = [cavium_fpa]
+compress_devices = [cavium_zip]
# global dict ethernet devices present. Dictionary indexed by PCI address.
# Each device within this is itself a dictionary of device properties
@@ -75,7 +82,7 @@ Options:
--status-dev:
Print the status of given device group. Supported device groups are:
- "net", "crypto", "event" and "mempool"
+ "net", "crypto", "event", "mempool" and "compress"
-b driver, --bind=driver:
Select the driver to use or \"none\" to unbind the device
@@ -565,6 +572,10 @@ def show_status():
if status_dev == "mempool" or status_dev == "all":
show_device_status(mempool_devices, "Mempool")
+ if status_dev == "compress" or status_dev == "all":
+ show_device_status(compress_devices , "Compress")
+
+
def parse_args():
'''Parses the command-line arguments given by the user and takes the
appropriate action for each'''
@@ -638,6 +649,7 @@ def do_arg_actions():
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
+ get_device_details(compress_devices)
show_status()
@@ -650,6 +662,7 @@ def main():
get_device_details(crypto_devices)
get_device_details(eventdev_devices)
get_device_details(mempool_devices)
+ get_device_details(compress_devices)
do_arg_actions()
if __name__ == "__main__":
diff --git a/usertools/dpdk-pmdinfo.py b/usertools/dpdk-pmdinfo.py
index 46c1be08..03623d5b 100755
--- a/usertools/dpdk-pmdinfo.py
+++ b/usertools/dpdk-pmdinfo.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Neil Horman <nhorman@tuxdriver.com>
# -------------------------------------------------------------------------
#